From 6488f1a661505b86633de11699e0453a6afe3fb5 Mon Sep 17 00:00:00 2001 From: ptrbortolotti Date: Wed, 18 Dec 2024 13:08:02 -0700 Subject: [PATCH 01/28] remove logic for olaf/openmp --- weis/glue_code/mpi_tools.py | 57 +++++++++---------------------------- weis/glue_code/runWEIS.py | 12 ++------ 2 files changed, 16 insertions(+), 53 deletions(-) diff --git a/weis/glue_code/mpi_tools.py b/weis/glue_code/mpi_tools.py index e53fcc852..ca664931d 100644 --- a/weis/glue_code/mpi_tools.py +++ b/weis/glue_code/mpi_tools.py @@ -35,54 +35,23 @@ def debug(*msg): # pragma: no cover MPI = None -def map_comm_heirarchical(n_DV, n_OF, openmp=False): +def map_comm_heirarchical(n_DV, n_OF): """ Heirarchical parallelization communicator mapping. Assumes a number of top level processes - equal to the number of design variables (x2 if central finite differencing is used), each + equal to the number of finite differences, each with its associated number of openfast simulations. - When openmp flag is turned on, the code spreads the openfast simulations across nodes to - lavereage the opnemp parallelization of OpenFAST. The cores that will run under openmp, are marked - in the color map as 1000000. The ones handling python and the DV are marked as 0, and - finally the master ones for each openfast run are marked with a 1. """ - if openmp: - n_procs_per_node = 36 # Number of - num_procs = MPI.COMM_WORLD.Get_size() - n_nodes = num_procs / n_procs_per_node - - comm_map_down = {} - comm_map_up = {} - color_map = [1000000] * num_procs - - n_DV_per_node = n_DV / n_nodes - - # for m in range(n_DV_per_node): - for nn in range(int(n_nodes)): - for n_dv in range(int(n_DV_per_node)): - comm_map_down[nn * n_procs_per_node + n_dv] = [ - int(n_DV_per_node) + n_dv * n_OF + nn * (n_procs_per_node) + j for j in range(n_OF) - ] - - # This core handles python, so in the colormap the entry is 0 - color_map[nn * n_procs_per_node + n_dv] = int(0) - # These cores handles openfast, so in the colormap the entry is 1 - for k in comm_map_down[nn * n_procs_per_node + n_dv]: - color_map[k] = int(1) - - for j in comm_map_down[nn * n_procs_per_node + n_dv]: - comm_map_up[j] = nn * n_procs_per_node + n_dv - else: - N = n_DV + n_DV * n_OF - comm_map_down = {} - comm_map_up = {} - color_map = [0] * n_DV - - for i in range(n_DV): - comm_map_down[i] = [n_DV + j + i * n_OF for j in range(n_OF)] - color_map.extend([i + 1] * n_OF) - - for j in comm_map_down[i]: - comm_map_up[j] = i + N = n_DV + n_DV * n_OF + comm_map_down = {} + comm_map_up = {} + color_map = [0] * n_DV + + for i in range(n_DV): + comm_map_down[i] = [n_DV + j + i * n_OF for j in range(n_OF)] + color_map.extend([i + 1] * n_OF) + + for j in comm_map_down[i]: + comm_map_up[j] = i return comm_map_down, comm_map_up, color_map diff --git a/weis/glue_code/runWEIS.py b/weis/glue_code/runWEIS.py index 92e6d9176..bac7f0a4d 100644 --- a/weis/glue_code/runWEIS.py +++ b/weis/glue_code/runWEIS.py @@ -96,11 +96,7 @@ def run_weis(fname_wt_input, fname_modeling_options, fname_opt_options, geometry color_i = 0 else: n_FD = max([n_FD, 1]) - if modeling_options['Level3']['flag'] == True and modeling_options['Level3']['AeroDyn']['WakeMod'] == 3: - olaf = True - else: - olaf = False - comm_map_down, comm_map_up, color_map = map_comm_heirarchical(n_FD, n_OF_runs_parallel, openmp=olaf) + comm_map_down, comm_map_up, color_map = map_comm_heirarchical(n_FD, n_OF_runs_parallel) rank = MPI.COMM_WORLD.Get_rank() if rank < len(color_map): try: @@ -238,10 +234,8 @@ def run_weis(fname_wt_input, fname_modeling_options, fname_opt_options, geometry if MPI and \ (modeling_options['Level3']['flag'] or modeling_options['Level2']['flag']) and \ - (not opt_options['driver']['design_of_experiments']['flag']) and \ - color_i < 1000000: + (not opt_options['driver']['design_of_experiments']['flag']): # subprocessor ranks spin, waiting for FAST simulations to run. - # Only true for cores actually in use, not the ones supporting openfast openmp (marked as color_i = 1000000) sys.stdout.flush() if rank in comm_map_up.keys(): subprocessor_loop(comm_map_up) @@ -252,7 +246,7 @@ def run_weis(fname_wt_input, fname_modeling_options, fname_opt_options, geometry sys.stdout.flush() # Send each core in use to a barrier synchronization - if MPI and color_i < 1000000: + if MPI: MPI.COMM_WORLD.Barrier() if rank == 0: From a49cc3b2b688c79bf12d06b89df0b6b0fc6db0d7 Mon Sep 17 00:00:00 2001 From: ptrbortolotti Date: Wed, 18 Dec 2024 20:59:33 -0700 Subject: [PATCH 02/28] support in setting mpi parameters --- .../03_NREL5MW_OC3_spar/analysis_options.yaml | 2 +- .../03_NREL5MW_OC3_spar/modeling_options.yaml | 2 +- examples/03_NREL5MW_OC3_spar/sbatch_hpc.sh | 26 +++ examples/03_NREL5MW_OC3_spar/weis_driver.py | 33 ++- weis/glue_code/gc_PoseOptimization.py | 199 +----------------- weis/glue_code/mpi_tools.py | 88 +++++++- weis/glue_code/runWEIS.py | 74 ++----- weis/glue_code/sbatch_eagle.sh | 23 -- 8 files changed, 158 insertions(+), 289 deletions(-) create mode 100644 examples/03_NREL5MW_OC3_spar/sbatch_hpc.sh delete mode 100644 weis/glue_code/sbatch_eagle.sh diff --git a/examples/03_NREL5MW_OC3_spar/analysis_options.yaml b/examples/03_NREL5MW_OC3_spar/analysis_options.yaml index bffee6b70..8b5f4338f 100644 --- a/examples/03_NREL5MW_OC3_spar/analysis_options.yaml +++ b/examples/03_NREL5MW_OC3_spar/analysis_options.yaml @@ -42,7 +42,7 @@ driver: max_major_iter: 10 # Maximum number of major design iterations (SNOPT) max_minor_iter: 100 # Maximum number of minor design iterations (SNOPT) max_iter: 2 # Maximum number of iterations (SLSQP) - solver: LN_COBYLA # Optimization solver. Other options are 'SLSQP' - 'CONMIN' + solver: SLSQP # Optimization solver. Other options are 'SLSQP' - 'CONMIN' step_size: 1.e-3 # Step size for finite differencing form: central # Finite differencing mode, either forward or central diff --git a/examples/03_NREL5MW_OC3_spar/modeling_options.yaml b/examples/03_NREL5MW_OC3_spar/modeling_options.yaml index be01c1533..0aba5d9c4 100644 --- a/examples/03_NREL5MW_OC3_spar/modeling_options.yaml +++ b/examples/03_NREL5MW_OC3_spar/modeling_options.yaml @@ -97,7 +97,7 @@ DLC_driver: DLCs: - DLC: "1.1" ws_bin_size: 2 - wind_speed: [14.] + wind_speed: [14.,16., 18., 20., 22.] wave_height: [7.] wave_period: [1.] n_seeds: 1 diff --git a/examples/03_NREL5MW_OC3_spar/sbatch_hpc.sh b/examples/03_NREL5MW_OC3_spar/sbatch_hpc.sh new file mode 100644 index 000000000..a05564551 --- /dev/null +++ b/examples/03_NREL5MW_OC3_spar/sbatch_hpc.sh @@ -0,0 +1,26 @@ +#!/bin/bash +#SBATCH --account=allocation_name +#SBATCH --time=01:00:00 +#SBATCH --job-name=Design1 +#SBATCH --nodes=1 +#SBATCH --ntasks-per-node=104 +#SBATCH --mail-user user@nrel.gov +#SBATCH --mail-type BEGIN,END,FAIL +#SBATCH --output=Case1.%j.out +####SBATCH --qos=high +####SBATCH --partition=debug + +module purge +module load comp-intel intel-mpi mkl +module unload gcc + +source activate weis-env + +output=$(python weis_driver.py --preMPIflag=True --maxCores=104) + +# Extract the values from the output (adjust based on actual format) +nC=$(echo "$output" | grep 'nC=' | awk -F'=' '{print $2}') +n_FD=$(echo "$output" | grep 'n_FD=' | awk -F'=' '{print $2}') +n_OFp=$(echo "$output" | grep 'n_OFp=' | awk -F'=' '{print $2}') + +mpirun -np $nC python runWEIS.py --n_FD=$n_FD --n_OF_parallel=$n_OFp diff --git a/examples/03_NREL5MW_OC3_spar/weis_driver.py b/examples/03_NREL5MW_OC3_spar/weis_driver.py index 34cd41975..2b675da50 100644 --- a/examples/03_NREL5MW_OC3_spar/weis_driver.py +++ b/examples/03_NREL5MW_OC3_spar/weis_driver.py @@ -4,21 +4,44 @@ from weis.glue_code.runWEIS import run_weis from openmdao.utils.mpi import MPI +from weis.glue_code.mpi_tools import compute_optimal_nC + +import argparse +# Set up argument parser +parser = argparse.ArgumentParser(description="Run WEIS driver with flag prepping for MPI run.") +# Add the flag +parser.add_argument("--preMPIflag", type=bool, default=False, help="Flag for preprocessing MPI settings (True or False).") +parser.add_argument("--maxCores", type=int, default=0, help="Maximum number of cores available.") +parser.add_argument("--n_FD", type=int, default=0, help="Number of parallel finite differences, only used when MPI is turned ON.") +parser.add_argument("--n_OF_parallel", type=int, default=0, help="Number of OpenFAST calls run in parallel, only used when MPI is turned ON.") +# Parse the arguments +args = parser.parse_args() +# Use the flag in your script +if args.preMPIflag: + print("Preprocessor flag is set to True. Running preprocessing setting up MPI run.") +else: + print("Preprocessor flag is set to False. Run WEIS now.") + ## File management run_dir = os.path.dirname( os.path.realpath(__file__) ) -fname_wt_input = run_dir + os.sep + "nrel5mw-spar_oc3.yaml" -fname_modeling_options = run_dir + os.sep + 'modeling_options.yaml' -fname_analysis_options = run_dir + os.sep + 'analysis_options_noopt.yaml' +fname_wt_input = os.path.join(run_dir, "nrel5mw-spar_oc3.yaml") +fname_modeling_options = os.path.join(run_dir, 'modeling_options.yaml') +fname_analysis_options = os.path.join(run_dir, 'analysis_options.yaml') tt = time.time() -wt_opt, modeling_options, opt_options = run_weis(fname_wt_input, fname_modeling_options, fname_analysis_options) +# Use the flag in your script +if args.preMPIflag: + _, modeling_options, opt_options, n_FD, n_OF_runs = run_weis(fname_wt_input, fname_modeling_options, fname_analysis_options, prepMPI=True) + compute_optimal_nC(n_FD, n_OF_runs, modeling_options, opt_options, max_cores = args.maxCores) +else: + wt_opt, modeling_options, opt_options = run_weis(fname_wt_input, fname_modeling_options, fname_analysis_options, n_FD = args.n_FD, n_OF_parallel = args.n_OF_parallel) if MPI: rank = MPI.COMM_WORLD.Get_rank() else: rank = 0 -if rank == 0: +if rank == 0 and args.preMPIflag == False: print("Run time: %f"%(time.time()-tt)) sys.stdout.flush() diff --git a/weis/glue_code/gc_PoseOptimization.py b/weis/glue_code/gc_PoseOptimization.py index 030da6331..57c39cd71 100644 --- a/weis/glue_code/gc_PoseOptimization.py +++ b/weis/glue_code/gc_PoseOptimization.py @@ -25,199 +25,12 @@ def __init__(self, wt_init, modeling_options, analysis_options): else: self.floating_period_solve_component = 'floatingse' - - def get_number_design_variables(self): - # Determine the number of design variables - n_DV = 0 - - rotorD_opt = self.opt["design_variables"]["rotor_diameter"] - blade_opt = self.opt["design_variables"]["blade"] - tower_opt = self.opt["design_variables"]["tower"] - mono_opt = self.opt["design_variables"]["monopile"] - jacket_opt = self.opt["design_variables"]["jacket"] - hub_opt = self.opt["design_variables"]["hub"] - drive_opt = self.opt["design_variables"]["drivetrain"] - float_opt = self.opt["design_variables"]["floating"] - mooring_opt = self.opt["design_variables"]["mooring"] - - if rotorD_opt["flag"]: - n_DV += 1 - if blade_opt["aero_shape"]["twist"]["flag"]: - if blade_opt["aero_shape"]["twist"]["index_end"] > blade_opt["aero_shape"]["twist"]["n_opt"]: - raise Exception( - "Check the analysis options yaml, index_end of the blade twist is higher than the number of DVs n_opt" - ) - elif blade_opt["aero_shape"]["twist"]["index_end"] == 0: - blade_opt["aero_shape"]["twist"]["index_end"] = blade_opt["aero_shape"]["twist"]["n_opt"] - n_DV += blade_opt["aero_shape"]["twist"]["index_end"] - blade_opt["aero_shape"]["twist"]["index_start"] - if blade_opt["aero_shape"]["chord"]["flag"]: - if blade_opt["aero_shape"]["chord"]["index_end"] > blade_opt["aero_shape"]["chord"]["n_opt"]: - raise Exception( - "Check the analysis options yaml, index_end of the blade chord is higher than the number of DVs n_opt" - ) - elif blade_opt["aero_shape"]["chord"]["index_end"] == 0: - blade_opt["aero_shape"]["chord"]["index_end"] = blade_opt["aero_shape"]["chord"]["n_opt"] - n_DV += blade_opt["aero_shape"]["chord"]["index_end"] - blade_opt["aero_shape"]["chord"]["index_start"] - if blade_opt["aero_shape"]["af_positions"]["flag"]: - n_DV += ( - self.modeling["WISDEM"]["RotorSE"]["n_af_span"] - - blade_opt["aero_shape"]["af_positions"]["af_start"] - - 1 - ) - if "structure" in blade_opt: - if len(blade_opt["structure"])>0: - for i in range(len(blade_opt["structure"])): - if blade_opt["structure"][i]["index_end"] > blade_opt["structure"][i]["n_opt"]: - raise Exception( - "Check the analysis options yaml, the index_end of a blade layer is higher than the number of DVs n_opt" - ) - elif blade_opt["structure"][i]["index_end"] == 0: - blade_opt["structure"][i]["index_end"] = blade_opt["structure"][i]["n_opt"] - n_DV += ( - blade_opt["structure"][i]["index_end"] - - blade_opt["structure"][i]["index_start"] - ) - if self.opt["design_variables"]["control"]["tsr"]["flag"]: - n_DV += 1 - - if tower_opt["outer_diameter"]["flag"]: - n_DV += self.modeling["WISDEM"]["TowerSE"]["n_height"] - if tower_opt["layer_thickness"]["flag"]: - n_DV += self.modeling["WISDEM"]["TowerSE"]["n_height"] * self.modeling["WISDEM"]["TowerSE"]["n_layers"] - if mono_opt["outer_diameter"]["flag"]: - n_DV += self.modeling["WISDEM"]["FixedBottomSE"]["n_height"] - if mono_opt["layer_thickness"]["flag"]: - n_DV += ( - self.modeling["WISDEM"]["FixedBottomSE"]["n_height"] - * self.modeling["WISDEM"]["FixedBottomSE"]["n_layers"] - ) - # TODO: FIX THIS - # if jacket_opt["outer_diameter"]["flag"]: - # n_DV += self.modeling["WISDEM"]["FixedBottomSE"]["n_height"] - # if jacket_opt["layer_thickness"]["flag"]: - # n_DV += ( - # self.modeling["WISDEM"]["FixedBottomSE"]["n_height"] - # * self.modeling["WISDEM"]["FixedBottomSE"]["n_layers"] - # ) - if hub_opt["cone"]["flag"]: - n_DV += 1 - if hub_opt["hub_diameter"]["flag"]: - n_DV += 1 - for k in [ - "uptilt", - "overhang", - "distance_tt_hub", - "distance_hub_mb", - "distance_mb_mb", - "generator_length", - "gear_ratio", - "generator_length", - "bedplate_web_thickness", - "bedplate_flange_thickness", - "bedplate_flange_width", - ]: - if drive_opt[k]["flag"]: - n_DV += 1 - for k in [ - "lss_diameter", - "lss_wall_thickness", - "hss_diameter", - "hss_wall_thickness", - "nose_diameter", - "nose_wall_thickness", - ]: - if drive_opt[k]["flag"]: - n_DV += 2 - if drive_opt["bedplate_wall_thickness"]["flag"]: - n_DV += 4 - - if float_opt["joints"]["flag"]: - n_DV += len(float_opt["joints"]["z_coordinate"]) + len(float_opt["joints"]["r_coordinate"]) - - if float_opt["members"]["flag"]: - for k, kgrp in enumerate(float_opt["members"]["groups"]): - memname = kgrp["names"][0] - memidx = self.modeling["floating"]["members"]["name"].index(memname) - n_grid = len(self.modeling["floating"]["members"]["grid_member_" + memname]) - n_layers = self.modeling["floating"]["members"]["n_layers"][memidx] - if "diameter" in kgrp: - if "constant" in kgrp["diameter"]: - n_DV += 1 - else: - n_DV += n_grid - if "thickness" in kgrp: - n_DV += n_grid * n_layers - if "ballast" in kgrp: - n_DV += self.modeling["floating"]["members"]["ballast_flag_member_" + memname].count(False) - if "stiffeners" in kgrp: - if "ring" in kgrp["stiffeners"]: - if "size" in kgrp["stiffeners"]["ring"]: - pass - if "spacing" in kgrp["stiffeners"]["ring"]: - n_DV += 1 - if "longitudinal" in kgrp["stiffeners"]: - if "size" in kgrp["stiffeners"]["longitudinal"]: - pass - if "spacing" in kgrp["stiffeners"]["longitudinal"]: - n_DV += 1 - if "axial_joints" in kgrp: - n_DV += len(kgrp["axial_joints"]) - if self.modeling["flags"]["mooring"]: - n_design = 1 if self.modeling["mooring"]["symmetric"] else self.modeling["mooring"]["n_lines"] - if mooring_opt["line_length"]["flag"]: - n_DV += n_design - if mooring_opt["line_diameter"]["flag"]: - n_DV += n_design - - # Count and add design variables from WEIS - if self.opt['design_variables']['control']['servo']['pitch_control']['omega']['flag']: - if hasattr(self.modeling['ROSCO']['omega_pc'],'__len__'): - n_add += len(self.modeling['ROSCO']['omega_pc']) - else: - n_add += 1 - if self.opt['design_variables']['control']['servo']['pitch_control']['zeta']['flag']: - if hasattr(self.modeling['ROSCO']['zeta_pc'],'__len__'): - n_add += len(self.modeling['ROSCO']['zeta_pc']) - else: - n_add += 1 - if self.opt['design_variables']['control']['servo']['pitch_control']['Kp_float']['flag']: - n_DV += 1 - if self.opt['design_variables']['control']['servo']['pitch_control']['ptfm_freq']['flag']: - n_DV += 1 - if self.opt['design_variables']['control']['servo']['torque_control']['omega']['flag']: - n_DV += 1 - if self.opt['design_variables']['control']['servo']['torque_control']['zeta']['flag']: - n_DV += 1 - if self.opt['design_variables']['control']['servo']['flap_control']['flp_kp_norm']['flag']: - n_DV += 1 - if self.opt['design_variables']['control']['servo']['flap_control']['flp_tau']['flag']: - n_DV += 1 - if self.opt['design_variables']['control']['flaps']['te_flap_end']['flag']: - n_DV += self.modeling['WISDEM']['RotorSE']['n_te_flaps'] - if self.opt['design_variables']['control']['flaps']['te_flap_ext']['flag']: - n_DV += self.modeling['WISDEM']['RotorSE']['n_te_flaps'] - if self.opt['design_variables']['control']['ps_percent']['flag']: - n_DV += 1 - - if self.opt['driver']['optimization']['form'] == 'central': - n_DV *= 2 - - # TMD DVs - if self.opt['design_variables']['TMDs']['flag']: - TMD_opt = self.opt['design_variables']['TMDs'] - - # We only support one TMD for now - for tmd_group in TMD_opt['groups']: - if 'mass' in tmd_group: - n_DV += 1 - if 'stiffness' in tmd_group: - n_DV += 1 - if 'damping' in tmd_group: - n_DV += 1 - - return n_DV - - + if modeling_options['Level3']['flag']: + self.n_OF_runs = modeling_options['DLC_driver']['n_cases'] + elif modeling_options['Level2']['flag']: + self.n_OF_runs = modeling_options['Level2']['linearization']['NLinTimes'] + else: + self.n_OF_runs = 0 def set_objective(self, wt_opt): # Set merit figure. Each objective has its own scaling. Check first for user override diff --git a/weis/glue_code/mpi_tools.py b/weis/glue_code/mpi_tools.py index ca664931d..28e36f376 100644 --- a/weis/glue_code/mpi_tools.py +++ b/weis/glue_code/mpi_tools.py @@ -1,8 +1,84 @@ import os import sys - +import numpy as np from openmdao.utils.mpi import MPI +def compute_optimal_nC(n_FD, n_OF_runs, modeling_options, opt_options, max_cores=0): + + + fd_methods = ['SLSQP','SNOPT', 'LD_MMA'] + evolutionary_methods = ['DE', 'NSGA2'] + + print("\nYour problem has %d design variable(s) and %d OpenFAST run(s)\n"%(n_FD, n_OF_runs)) + + if modeling_options['Level3']['flag']: + # If we are running an optimization method that doesn't use finite differencing, set the number of DVs to 1 + if not (opt_options['driver']['design_of_experiments']['flag']) and (opt_options['driver']['optimization']['solver'] in evolutionary_methods): + print("You are running an optimization based on an evolutionary solver. The number of parallel finite differencing is now multiplied by 5\n") + n_FD *= 5 # targeting 10*n_DV population size... this is what the equivalent FD coloring would take + elif not (opt_options['driver']['design_of_experiments']['flag'] or opt_options['driver']['optimization']['solver'] in fd_methods): + print("You are not running a design of experiment or your optimizer is not gradient based. The number of parallel finite differencing is now set to 1\n") + n_FD = 1 + elif modeling_options['Level2']['flag']: + if not (opt_options['driver']['design_of_experiments']['flag'] or opt_options['driver']['optimization']['solver'] in fd_methods): + print("You are not running a design of experiment or your optimizer is not gradient based. The number of parallel finite differencing is now set to 1\n") + n_FD = 1 + # # if we're doing a GA or such, "FD" means "entities in epoch" + # if opt_options['driver']['optimization']['solver'] in evolutionary_methods: + # n_FD = max_cores + + + print("To run the code in parallel with MPI, execute one of the following commands\n") + + if max_cores != 0: + print("You have access to %d cores. Please call WEIS as:"%max_cores) + # Define the color map for the parallelization, determining the maximum number of parallel finite difference (FD) + # evaluations based on the number of design variables (DV). OpenFAST on/off changes things. + if modeling_options['Level3']['flag']: + # If openfast is called, the maximum number of FD is the number of DV, if we have the number of cores available that doubles the number of DVs, + # otherwise it is half of the number of DV (rounded to the lower integer). + # We need this because a top layer of cores calls a bottom set of cores where OpenFAST runs. + if max_cores > 2. * n_FD: + n_FD = n_FD + else: + n_FD = int(np.floor(max_cores / 2)) + # Get the number of OpenFAST runs from the user input and the max that can run in parallel given the resources + # The number of OpenFAST runs is the minimum between the actual number of requested OpenFAST simulations, and + # the number of cores available (minus the number of DV, which sit and wait for OF to complete) + n_FD = max([n_FD, 1]) + max_parallel_OF_runs = max([int(np.floor((max_cores - n_FD) / n_FD)), 1]) + n_OF_runs_parallel = min([int(n_OF_runs), max_parallel_OF_runs]) + elif modeling_options['Level2']['flag']: + if max_cores > 2. * n_FD: + n_FD = n_FD + else: + n_FD = int(np.floor(max_cores / 2)) + n_FD = max([n_FD, 1]) + max_parallel_OF_runs = max([int(np.floor((max_cores - n_FD) / n_FD)), 1]) + n_OF_runs_parallel = min([int(n_OF_runs), max_parallel_OF_runs]) + else: + # If OpenFAST is not called, the number of parallel calls to compute the FDs is just equal to the minimum of cores available and DV + n_FD = min([max_cores, n_FD]) + n_OF_runs_parallel = 1 + n_C = n_FD + n_FD * n_OF_runs_parallel + + else: + n_OF_runs_parallel = n_OF_runs + n_C = n_FD + n_FD * n_OF_runs_parallel + print("If you have access to (at least) %d cores, please call WEIS as:"%n_C) + + print("mpirun -np %d python weis_driver.py --n_FD=%d --n_OF_parallel=%d\n"%(n_C, n_FD, n_OF_runs_parallel)) + + if max_cores == 0: + print("\nIf you do not have access to %d cores"%n_C) + print("please provide your maximum available number of cores by typing:") + print("python weis_driver.py --maxCore=xx") + print("And substitute xx with your number of cores\n") + + print(f"nC={n_C}") + print(f"n_FD={n_FD}") + print(f"n_OFp={n_OF_runs_parallel}") + return None def under_mpirun(): """Return True if we're being executed under mpirun.""" @@ -35,19 +111,19 @@ def debug(*msg): # pragma: no cover MPI = None -def map_comm_heirarchical(n_DV, n_OF): +def map_comm_heirarchical(n_FD, n_OF): """ Heirarchical parallelization communicator mapping. Assumes a number of top level processes equal to the number of finite differences, each with its associated number of openfast simulations. """ - N = n_DV + n_DV * n_OF + N = n_FD + n_FD * n_OF comm_map_down = {} comm_map_up = {} - color_map = [0] * n_DV + color_map = [0] * n_FD - for i in range(n_DV): - comm_map_down[i] = [n_DV + j + i * n_OF for j in range(n_OF)] + for i in range(n_FD): + comm_map_down[i] = [n_FD + j + i * n_OF for j in range(n_OF)] color_map.extend([i + 1] * n_OF) for j in comm_map_down[i]: diff --git a/weis/glue_code/runWEIS.py b/weis/glue_code/runWEIS.py index bac7f0a4d..2b949e365 100644 --- a/weis/glue_code/runWEIS.py +++ b/weis/glue_code/runWEIS.py @@ -12,13 +12,14 @@ from weis.aeroelasticse.FileTools import save_yaml from wisdem.inputs.validation import simple_types -fd_methods = ['SLSQP','SNOPT', 'LD_MMA'] -evolutionary_methods = ['DE', 'NSGA2'] + if MPI: from weis.glue_code.mpi_tools import map_comm_heirarchical, subprocessor_loop, subprocessor_stop -def run_weis(fname_wt_input, fname_modeling_options, fname_opt_options, geometry_override=None, modeling_override=None, analysis_override=None): +def run_weis(fname_wt_input, fname_modeling_options, fname_opt_options, + geometry_override=None, modeling_override=None, analysis_override=None, + prepMPI=False, n_FD=1, n_OF_parallel=0): # Load all yaml inputs and validate (also fills in defaults) wt_initial = WindTurbineOntologyPythonWEIS( fname_wt_input, @@ -32,63 +33,12 @@ def run_weis(fname_wt_input, fname_modeling_options, fname_opt_options, geometry # Initialize openmdao problem. If running with multiple processors in MPI, use parallel finite differencing equal to the number of cores used. # Otherwise, initialize the WindPark system normally. Get the rank number for parallelization. We only print output files using the root processor. myopt = PoseOptimizationWEIS(wt_init, modeling_options, opt_options) - + if MPI: - n_DV = myopt.get_number_design_variables() - # Extract the number of cores available - max_cores = MPI.COMM_WORLD.Get_size() - - # Define the color map for the parallelization, determining the maximum number of parallel finite difference (FD) - # evaluations based on the number of design variables (DV). OpenFAST on/off changes things. - if modeling_options['Level3']['flag']: - - # If we are running an optimization method that doesn't use finite differencing, set the number of DVs to 1 - if not (opt_options['driver']['design_of_experiments']['flag']) and (opt_options['driver']['optimization']['solver'] in evolutionary_methods): - n_DV *= 5 # targeting 10*n_DV population size... this is what the equivalent FD coloring would take - elif not (opt_options['driver']['design_of_experiments']['flag'] or opt_options['driver']['optimization']['solver'] in fd_methods): - n_DV = 1 - - - # If openfast is called, the maximum number of FD is the number of DV, if we have the number of cores available that doubles the number of DVs, - # otherwise it is half of the number of DV (rounded to the lower integer). - # We need this because a top layer of cores calls a bottom set of cores where OpenFAST runs. - if max_cores > 2. * n_DV: - n_FD = n_DV - else: - n_FD = int(np.floor(max_cores / 2)) - # Get the number of OpenFAST runs from the user input and the max that can run in parallel given the resources - # The number of OpenFAST runs is the minimum between the actual number of requested OpenFAST simulations, and - # the number of cores available (minus the number of DV, which sit and wait for OF to complete) - n_OF_runs = modeling_options['DLC_driver']['n_cases'] - n_DV = max([n_DV, 1]) - max_parallel_OF_runs = max([int(np.floor((max_cores - n_DV) / n_DV)), 1]) - n_OF_runs_parallel = min([int(n_OF_runs), max_parallel_OF_runs]) - elif modeling_options['Level2']['flag']: - - if not (opt_options['driver']['design_of_experiments']['flag'] or opt_options['driver']['optimization']['solver'] in fd_methods): - n_DV = 1 - - - if max_cores > 2. * n_DV: - n_FD = n_DV - else: - n_FD = int(np.floor(max_cores / 2)) - n_OF_runs = modeling_options['Level2']['linearization']['NLinTimes'] - n_DV = max([n_DV, 1]) - max_parallel_OF_runs = max([int(np.floor((max_cores - n_DV) / n_DV)), 1]) - n_OF_runs_parallel = min([int(n_OF_runs), max_parallel_OF_runs]) - else: - # If OpenFAST is not called, the number of parallel calls to compute the FDs is just equal to the minimum of cores available and DV - n_FD = min([max_cores, n_DV]) - n_OF_runs_parallel = 1 - # if we're doing a GA or such, "FD" means "entities in epoch" - if opt_options['driver']['optimization']['solver'] in evolutionary_methods: - n_FD = max_cores - # Define the color map for the cores (how these are distributed between finite differencing and openfast runs) if opt_options['driver']['design_of_experiments']['flag']: n_FD = MPI.COMM_WORLD.Get_size() - n_OF_runs_parallel = 1 + n_OF_parallel = 1 rank = MPI.COMM_WORLD.Get_rank() comm_map_up = comm_map_down = {} for r in range(MPI.COMM_WORLD.Get_size()): @@ -96,7 +46,7 @@ def run_weis(fname_wt_input, fname_modeling_options, fname_opt_options, geometry color_i = 0 else: n_FD = max([n_FD, 1]) - comm_map_down, comm_map_up, color_map = map_comm_heirarchical(n_FD, n_OF_runs_parallel) + comm_map_down, comm_map_up, color_map = map_comm_heirarchical(n_FD, n_OF_parallel) rank = MPI.COMM_WORLD.Get_rank() if rank < len(color_map): try: @@ -129,7 +79,7 @@ def run_weis(fname_wt_input, fname_modeling_options, fname_opt_options, geometry if opt_options['driver']['design_of_experiments']['flag']: modeling_options['General']['openfast_configuration']['cores'] = 1 else: - modeling_options['General']['openfast_configuration']['cores'] = n_OF_runs_parallel + modeling_options['General']['openfast_configuration']['cores'] = n_OF_parallel # Parallel settings for OpenMDAO if opt_options['driver']['design_of_experiments']['flag']: @@ -152,8 +102,7 @@ def run_weis(fname_wt_input, fname_modeling_options, fname_opt_options, geometry if opt_options['driver']['design_of_experiments']['flag']: wt_opt.driver.options['debug_print'] = ['desvars','ln_cons','nl_cons','objs'] - wt_opt.driver.options['procs_per_model'] = 1 # n_OF_runs_parallel # int(max_cores / np.floor(max_cores/n_OF_runs)) - + wt_opt.driver.options['procs_per_model'] = 1 wt_opt = myopt.set_recorders(wt_opt) wt_opt.driver.options['debug_print'] = ['desvars','ln_cons','nl_cons','objs','totals'] @@ -165,6 +114,11 @@ def run_weis(fname_wt_input, fname_modeling_options, fname_opt_options, geometry # memory for the derivative arrays. wt_opt.setup(derivatives=False) + # If WEIS is called simply to prep for an MPI call, return the number of finite differences and OpenFAST calls, and stop + if prepMPI: + n_FD = len(wt_opt.model.list_outputs(is_design_var=True, out_stream=None)) + return wt_opt, modeling_options, opt_options, n_FD, myopt.n_OF_runs + # Load initial wind turbine data from wt_initial to the openmdao problem wt_opt = yaml2openmdao(wt_opt, modeling_options, wt_init, opt_options) wt_opt = assign_ROSCO_values(wt_opt, modeling_options, opt_options) diff --git a/weis/glue_code/sbatch_eagle.sh b/weis/glue_code/sbatch_eagle.sh deleted file mode 100644 index f4cf6d340..000000000 --- a/weis/glue_code/sbatch_eagle.sh +++ /dev/null @@ -1,23 +0,0 @@ -#!/bin/bash -#SBATCH --account=allocation_name -#SBATCH --time=01:00:00 -#SBATCH --job-name=Design1 -#SBATCH --nodes=4 -#SBATCH --ntasks-per-node=36 -#SBATCH --mail-user user@nrel.gov -#SBATCH --mail-type BEGIN,END,FAIL -#SBATCH --output=Case1.%j.out -####SBATCH --qos=high -####SBATCH --partition=debug - -nDV=11 # Number of design variables (x2 for central difference) -nOF=100 # Number of openfast runs per finite-difference evaluation -nC=$(( nDV + nDV * nOF )) # Number of cores needed. Make sure to request an appropriate number of nodes = N / 36 - -module purge -module load comp-intel intel-mpi mkl -module unload gcc - -source activate weis-env - -mpirun -np $nC python runWEIS.py From f62be3f4b3bf0f5ad0e627ef166b6f5e89c94398 Mon Sep 17 00:00:00 2001 From: ptrbortolotti Date: Thu, 19 Dec 2024 13:48:19 -0700 Subject: [PATCH 03/28] add documentation page --- docs/index.rst | 1 + docs/run_in_parallel.rst | 59 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 60 insertions(+) create mode 100644 docs/run_in_parallel.rst diff --git a/docs/index.rst b/docs/index.rst index c21030311..da4a85245 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -20,6 +20,7 @@ Using WEIS installation how_weis_works inputs/yaml_inputs + run_in_parallel WEIS Visualization APP diff --git a/docs/run_in_parallel.rst b/docs/run_in_parallel.rst new file mode 100644 index 000000000..22693a404 --- /dev/null +++ b/docs/run_in_parallel.rst @@ -0,0 +1,59 @@ +Run in parallel +-------------- + +WEIS can be run sequentially on a single processor. WEIS can however also be parallelized to handle larger problems in a timely manner. + + +Background +------------------------------------ + +The parallelization of WEIS leverages the [MPI library](https://mpi4py.readthedocs.io/en/stable/). Before proceeding, please make sure that your WEIS environemnt includes the library mpi4py as discussed in the README.md at the root level of this repo, or on the `WEIS GitHub page `_. + +Parallelization in WEIS happens at two levels: +* The first level is triggered when an optimization is run and a gradient-based optimizer is called. In this case, the OpenMDAO library will execute the finite differences in parallel. OpenMDAO then assembles all partial derivatives in a single Jacobian and iterations progress. +* The second level is triggered when multiple OpenFAST runs are specified. These are executed in parallel. +The two levels of parallelization are integrated and can co-exist as long as sufficient computational resources are available. + +WEIS helps you set up the right call to MPI given the available computational resources, see the section below. + + +How to run WEIS in parallel +------------------------------------ + +Running WEIS in parallel is slightly more elaborated than running it sequentially. An example of a parallel call to WEIS is provided in example `03_NREL5MW_OC3 `_ + +The first consists of a pre-processing call to WEIS. This step returns the number of finite differences that are specified given the analysis_options.yaml file. To execute this step, in a terminal window type: + +.. code-block:: bash + + python weis_driver.py --preMPIflag=True + +In the terminal the code will return the best setup for an optimal MPI call given your problem. + +If you are resource constrained, you can pass the keyword argument `--maxCores` and set the maximum number of processors that you have available. If you have 20, type: + +.. code-block:: bash + + python weis_driver.py --preMPIflag=True --maxCores=20 + +These two commands will help you set the three keyword arguments `nC`, `n_FD`, and `n_OFp`. + +At this point, you are ready to launch WEIS in parallel. Assume that `nC=12`, `n_FD=2`, and `n_OFp=5`, your call to WEIS will look like: + +.. code-block:: bash + + mpirun -np 12 python runWEIS.py --n_FD=2 --n_OF_parallel=5 + +Lastly, note that the two steps can be coded up in a batch script that automatically reads `nC`, `n_FD`, and `n_OFp` from the preprocessing of WEIS and passes to the full call of WEIS. +You can do this by listing these lines of code in a batch script: +.. code-block:: bash + + source activate weis-env + output=$(python weis_driver.py --preMPIflag=True --maxCores=104) + nC=$(echo "$output" | grep 'nC=' | awk -F'=' '{print $2}') + n_FD=$(echo "$output" | grep 'n_FD=' | awk -F'=' '{print $2}') + n_OFp=$(echo "$output" | grep 'n_OFp=' | awk -F'=' '{print $2}') + mpirun -np $nC python runWEIS.py --n_FD=$n_FD --n_OF_parallel=$n_OFp + + +You can see this example in the `example sbatch script for NREL's HPC system Kestrel `. \ No newline at end of file From 00f9713068a0b2a924d587bbfaec8da504029984 Mon Sep 17 00:00:00 2001 From: ptrbortolotti Date: Fri, 20 Dec 2024 13:52:56 -0700 Subject: [PATCH 04/28] example for simple call to openfast with mpi --- docs/run_in_parallel.rst | 53 +++++++++++++++++-- .../weis_driver_loads.py | 27 +++++++++- examples/03_NREL5MW_OC3_spar/weis_driver.py | 14 +++-- weis/control/tune_rosco.py | 2 +- 4 files changed, 82 insertions(+), 14 deletions(-) diff --git a/docs/run_in_parallel.rst b/docs/run_in_parallel.rst index 22693a404..09478ce85 100644 --- a/docs/run_in_parallel.rst +++ b/docs/run_in_parallel.rst @@ -20,9 +20,13 @@ WEIS helps you set up the right call to MPI given the available computational re How to run WEIS in parallel ------------------------------------ -Running WEIS in parallel is slightly more elaborated than running it sequentially. An example of a parallel call to WEIS is provided in example `03_NREL5MW_OC3 `_ +Running WEIS in parallel is slightly more elaborated than running it sequentially. A first example of a parallel call for a design optimization in WEIS is provided in example `03_NREL5MW_OC3 `_. A second example runs an OpenFAST model in parallel, see example `02_run_openfast_cases `_. -The first consists of a pre-processing call to WEIS. This step returns the number of finite differences that are specified given the analysis_options.yaml file. To execute this step, in a terminal window type: + +Design optimization in parallel +------------------------------------ + +These instructions follow example 03 `03_NREL5MW_OC3 `_. To run the design optimization in parallel, the first step consists of a pre-processing call to WEIS. This step returns the number of finite differences that are specified given the analysis_options.yaml file. To execute this step, in a terminal window navigate to example 03 and type: .. code-block:: bash @@ -56,4 +60,47 @@ You can do this by listing these lines of code in a batch script: mpirun -np $nC python runWEIS.py --n_FD=$n_FD --n_OF_parallel=$n_OFp -You can see this example in the `example sbatch script for NREL's HPC system Kestrel `. \ No newline at end of file +You can see this example in the `example sbatch script for NREL's HPC system Kestrel `. + + +Parallelize calls to OpenFAST +------------------------------------ + +WEIS can be used to run OpenFAST simulations, such as design load cases. +[More information about setting DLCs can be found here](https://github.com/WISDEM/WEIS/blob/docs/docs/dlc_generator.rst) + +To do so, WEIS is run with a single function evaluation that fires off a number of OpenFAST simulations. This can be done by setting `n_FD=1`, `n_OFp` to the number of OpenFAST simulations, and `nC=n_FD+n_OFp`. + +Let's look at an example in `02_run_openfast_cases `_. + +In a terminal, navigate to example 02 and type: + +.. code-block:: bash + python weis_driver_loads.py --preMPIflag=True + +The terminal should return this message + +.. code-block:: bash + Your problem has 0 design variable(s) and 7 OpenFAST run(s) + + You are not running a design of experiment or your optimizer is not gradient based. The number of parallel finite differencing is now set to 1 + + To run the code in parallel with MPI, execute one of the following commands + + If you have access to (at least) 8 cores, please call WEIS as: + mpirun -np 8 python weis_driver.py --n_FD=1 --n_OF_parallel=7 + + + If you do not have access to 8 cores + please provide your maximum available number of cores by typing: + python weis_driver.py --maxCore=xx + And substitute xx with your number of cores + + nC=8 + n_FD=1 + n_OFp=7 + +You are now ready to execute your script by typing + +.. code-block:: bash + mpirun -np 8 python weis_driver.py --n_FD=1 --n_OF_parallel=7 \ No newline at end of file diff --git a/examples/02_run_openfast_cases/weis_driver_loads.py b/examples/02_run_openfast_cases/weis_driver_loads.py index 435feef47..af57006ea 100644 --- a/examples/02_run_openfast_cases/weis_driver_loads.py +++ b/examples/02_run_openfast_cases/weis_driver_loads.py @@ -4,6 +4,7 @@ from weis.glue_code.runWEIS import run_weis from openmdao.utils.mpi import MPI +from weis.glue_code.mpi_tools import compute_optimal_nC ## File management run_dir = os.path.dirname( os.path.realpath(__file__) ) @@ -12,13 +13,35 @@ fname_analysis_options = run_dir + os.sep + 'analysis_options_loads.yaml' +import argparse +# Set up argument parser +parser = argparse.ArgumentParser(description="Run WEIS driver with flag prepping for MPI run.") +# Add the flag +parser.add_argument("--preMPIflag", type=bool, default=False, help="Flag for preprocessing MPI settings (True or False).") +parser.add_argument("--maxCores", type=int, default=0, help="Maximum number of cores available.") +parser.add_argument("--n_FD", type=int, default=0, help="Number of parallel finite differences, only used when MPI is turned ON.") +parser.add_argument("--n_OF_parallel", type=int, default=0, help="Number of OpenFAST calls run in parallel, only used when MPI is turned ON.") +# Parse the arguments +args = parser.parse_args() +# Use the flag in your script +if args.preMPIflag: + print("Preprocessor flag is set to True. Running preprocessing setting up MPI run.") +else: + print("Preprocessor flag is set to False. Run WEIS now.") + tt = time.time() -wt_opt, modeling_options, opt_options = run_weis(fname_wt_input, fname_modeling_options, fname_analysis_options) +# Use the flag in your script +if args.preMPIflag: + _, modeling_options, opt_options, n_FD, n_OF_runs = run_weis(fname_wt_input, fname_modeling_options, fname_analysis_options, prepMPI=True) + compute_optimal_nC(n_FD, n_OF_runs, modeling_options, opt_options, max_cores = args.maxCores) +else: + wt_opt, modeling_options, opt_options = run_weis(fname_wt_input, fname_modeling_options, fname_analysis_options, n_FD = args.n_FD, n_OF_parallel = args.n_OF_parallel) if MPI: rank = MPI.COMM_WORLD.Get_rank() else: rank = 0 -if rank == 0: +if rank == 0 and args.preMPIflag == False: print("Run time: %f"%(time.time()-tt)) sys.stdout.flush() + diff --git a/examples/03_NREL5MW_OC3_spar/weis_driver.py b/examples/03_NREL5MW_OC3_spar/weis_driver.py index 2b675da50..a31acc2df 100644 --- a/examples/03_NREL5MW_OC3_spar/weis_driver.py +++ b/examples/03_NREL5MW_OC3_spar/weis_driver.py @@ -6,6 +6,12 @@ from openmdao.utils.mpi import MPI from weis.glue_code.mpi_tools import compute_optimal_nC +## File management +run_dir = os.path.dirname( os.path.realpath(__file__) ) +fname_wt_input = os.path.join(run_dir, "nrel5mw-spar_oc3.yaml") +fname_modeling_options = os.path.join(run_dir, 'modeling_options.yaml') +fname_analysis_options = os.path.join(run_dir, 'analysis_options.yaml') + import argparse # Set up argument parser parser = argparse.ArgumentParser(description="Run WEIS driver with flag prepping for MPI run.") @@ -22,14 +28,6 @@ else: print("Preprocessor flag is set to False. Run WEIS now.") - -## File management -run_dir = os.path.dirname( os.path.realpath(__file__) ) -fname_wt_input = os.path.join(run_dir, "nrel5mw-spar_oc3.yaml") -fname_modeling_options = os.path.join(run_dir, 'modeling_options.yaml') -fname_analysis_options = os.path.join(run_dir, 'analysis_options.yaml') - - tt = time.time() # Use the flag in your script if args.preMPIflag: diff --git a/weis/control/tune_rosco.py b/weis/control/tune_rosco.py index 3e9094ac0..02779a909 100644 --- a/weis/control/tune_rosco.py +++ b/weis/control/tune_rosco.py @@ -522,7 +522,7 @@ def setup(self): if parameter_filename == 'none': raise Exception('A ROSCO tuning_yaml must be specified in the modeling_options if from_OpenFAST is True') - inps = load_rosco_yaml(parameter_filename) + inps = load_rosco_yaml(parameter_filename, rank_0=True) self.turbine_params = inps['turbine_params'] self.control_params = inps['controller_params'] From 9cb54d38eec63aac8449098defe8d2d379f6fae1 Mon Sep 17 00:00:00 2001 From: ptrbortolotti Date: Sat, 21 Dec 2024 07:10:44 -0700 Subject: [PATCH 05/28] postpone return so that we can stack the preMPI call and the actual WEIS call --- examples/03_NREL5MW_OC3_spar/weis_driver.py | 2 +- weis/glue_code/mpi_tools.py | 2 +- weis/glue_code/runWEIS.py | 147 ++++++++++---------- 3 files changed, 78 insertions(+), 73 deletions(-) diff --git a/examples/03_NREL5MW_OC3_spar/weis_driver.py b/examples/03_NREL5MW_OC3_spar/weis_driver.py index a31acc2df..967f5494e 100644 --- a/examples/03_NREL5MW_OC3_spar/weis_driver.py +++ b/examples/03_NREL5MW_OC3_spar/weis_driver.py @@ -32,7 +32,7 @@ # Use the flag in your script if args.preMPIflag: _, modeling_options, opt_options, n_FD, n_OF_runs = run_weis(fname_wt_input, fname_modeling_options, fname_analysis_options, prepMPI=True) - compute_optimal_nC(n_FD, n_OF_runs, modeling_options, opt_options, max_cores = args.maxCores) + n_C, n_FD, n_OF_parallel= compute_optimal_nC(n_FD, n_OF_runs, modeling_options, opt_options, max_cores = args.maxCores) else: wt_opt, modeling_options, opt_options = run_weis(fname_wt_input, fname_modeling_options, fname_analysis_options, n_FD = args.n_FD, n_OF_parallel = args.n_OF_parallel) diff --git a/weis/glue_code/mpi_tools.py b/weis/glue_code/mpi_tools.py index 28e36f376..255a7d285 100644 --- a/weis/glue_code/mpi_tools.py +++ b/weis/glue_code/mpi_tools.py @@ -78,7 +78,7 @@ def compute_optimal_nC(n_FD, n_OF_runs, modeling_options, opt_options, max_cores print(f"nC={n_C}") print(f"n_FD={n_FD}") print(f"n_OFp={n_OF_runs_parallel}") - return None + return n_C, n_FD, n_OF_runs_parallel def under_mpirun(): """Return True if we're being executed under mpirun.""" diff --git a/weis/glue_code/runWEIS.py b/weis/glue_code/runWEIS.py index 2b949e365..0df031968 100644 --- a/weis/glue_code/runWEIS.py +++ b/weis/glue_code/runWEIS.py @@ -114,77 +114,79 @@ def run_weis(fname_wt_input, fname_modeling_options, fname_opt_options, # memory for the derivative arrays. wt_opt.setup(derivatives=False) - # If WEIS is called simply to prep for an MPI call, return the number of finite differences and OpenFAST calls, and stop - if prepMPI: - n_FD = len(wt_opt.model.list_outputs(is_design_var=True, out_stream=None)) - return wt_opt, modeling_options, opt_options, n_FD, myopt.n_OF_runs - - # Load initial wind turbine data from wt_initial to the openmdao problem - wt_opt = yaml2openmdao(wt_opt, modeling_options, wt_init, opt_options) - wt_opt = assign_ROSCO_values(wt_opt, modeling_options, opt_options) - if modeling_options['flags']['TMDs']: - wt_opt = assign_TMD_values(wt_opt, wt_init, opt_options) - - wt_opt = myopt.set_initial(wt_opt, wt_init) - if modeling_options['Level3']['flag']: - wt_opt = myopt.set_initial_weis(wt_opt) - - # If the user provides values in geometry_override, they overwrite - # whatever values have been set by the yaml files. - # This is useful for performing black-box wrapped optimization without - # needing to modify the yaml files. - # Some logic is used here if the user gives a smalller size for the - # design variable than expected to input the values into the end - # of the array. - # This is useful when optimizing twist, where the first few indices - # do not need to be optimized as they correspond to a circular cross-section. - if geometry_override is not None: - for key in geometry_override: - num_values = np.array(geometry_override[key]).size - key_size = wt_opt[key].size - idx_start = key_size - num_values - wt_opt[key][idx_start:] = geometry_override[key] - - # Place the last design variables from a previous run into the problem. - # This needs to occur after the above setup() and yaml2openmdao() calls - # so these values are correctly placed in the problem. - wt_opt = myopt.set_restart(wt_opt) - - if 'check_totals' in opt_options['driver']['optimization']: - if opt_options['driver']['optimization']['check_totals']: - wt_opt.run_model() - totals = wt_opt.compute_totals() - - if 'check_partials' in opt_options['driver']['optimization']: - if opt_options['driver']['optimization']['check_partials']: + # Return number of design variables, used to setup WEIS for an MPI run + n_FD = len(wt_opt.model.list_outputs(is_design_var=True, out_stream=None)) + + # If WEIS is called simply to prep for an MPI call, no need to proceed and simply + # return the number of finite differences and OpenFAST calls, and stop + # Otherwise, keep going assigning inputs and running the OpenMDAO model/driver + if not prepMPI: + # Load initial wind turbine data from wt_initial to the openmdao problem + wt_opt = yaml2openmdao(wt_opt, modeling_options, wt_init, opt_options) + wt_opt = assign_ROSCO_values(wt_opt, modeling_options, opt_options) + if modeling_options['flags']['TMDs']: + wt_opt = assign_TMD_values(wt_opt, wt_init, opt_options) + + wt_opt = myopt.set_initial(wt_opt, wt_init) + if modeling_options['Level3']['flag']: + wt_opt = myopt.set_initial_weis(wt_opt) + + # If the user provides values in geometry_override, they overwrite + # whatever values have been set by the yaml files. + # This is useful for performing black-box wrapped optimization without + # needing to modify the yaml files. + # Some logic is used here if the user gives a smalller size for the + # design variable than expected to input the values into the end + # of the array. + # This is useful when optimizing twist, where the first few indices + # do not need to be optimized as they correspond to a circular cross-section. + if geometry_override is not None: + for key in geometry_override: + num_values = np.array(geometry_override[key]).size + key_size = wt_opt[key].size + idx_start = key_size - num_values + wt_opt[key][idx_start:] = geometry_override[key] + + # Place the last design variables from a previous run into the problem. + # This needs to occur after the above setup() and yaml2openmdao() calls + # so these values are correctly placed in the problem. + wt_opt = myopt.set_restart(wt_opt) + + if 'check_totals' in opt_options['driver']['optimization']: + if opt_options['driver']['optimization']['check_totals']: + wt_opt.run_model() + totals = wt_opt.compute_totals() + + if 'check_partials' in opt_options['driver']['optimization']: + if opt_options['driver']['optimization']['check_partials']: + wt_opt.run_model() + checks = wt_opt.check_partials(compact_print=True) + + sys.stdout.flush() + # Run openmdao problem + if opt_options['opt_flag']: + wt_opt.run_driver() + else: wt_opt.run_model() - checks = wt_opt.check_partials(compact_print=True) - sys.stdout.flush() - # Run openmdao problem - if opt_options['opt_flag']: - wt_opt.run_driver() - else: - wt_opt.run_model() - - if (not MPI) or (MPI and rank == 0): - # Save data coming from openmdao to an output yaml file - froot_out = os.path.join(folder_output, opt_options['general']['fname_output']) - # Remove the fst_vt key from the dictionary and write out the modeling options - modeling_options['General']['openfast_configuration']['fst_vt'] = {} - if not modeling_options['Level3']['from_openfast']: - wt_initial.write_ontology(wt_opt, froot_out) - wt_initial.write_options(froot_out) - - # openMDAO doesn't save constraint values, so we get them from this construction - problem_var_dict = wt_opt.list_driver_vars( - desvar_opts=["lower", "upper",], - cons_opts=["lower", "upper", "equals",], - ) - save_yaml(folder_output, "problem_vars.yaml", simple_types(problem_var_dict)) - - # Save data to numpy and matlab arrays - fileIO.save_data(froot_out, wt_opt) + if (not MPI) or (MPI and rank == 0): + # Save data coming from openmdao to an output yaml file + froot_out = os.path.join(folder_output, opt_options['general']['fname_output']) + # Remove the fst_vt key from the dictionary and write out the modeling options + modeling_options['General']['openfast_configuration']['fst_vt'] = {} + if not modeling_options['Level3']['from_openfast']: + wt_initial.write_ontology(wt_opt, froot_out) + wt_initial.write_options(froot_out) + + # openMDAO doesn't save constraint values, so we get them from this construction + problem_var_dict = wt_opt.list_driver_vars( + desvar_opts=["lower", "upper",], + cons_opts=["lower", "upper", "equals",], + ) + save_yaml(folder_output, "problem_vars.yaml", simple_types(problem_var_dict)) + + # Save data to numpy and matlab arrays + fileIO.save_data(froot_out, wt_opt) if MPI and \ (modeling_options['Level3']['flag'] or modeling_options['Level2']['flag']) and \ @@ -203,7 +205,10 @@ def run_weis(fname_wt_input, fname_modeling_options, fname_opt_options, if MPI: MPI.COMM_WORLD.Barrier() - if rank == 0: - return wt_opt, modeling_options, opt_options + if color_i == 0: + if prepMPI: + return wt_opt, modeling_options, opt_options, n_FD, myopt.n_OF_runs + else: + return wt_opt, modeling_options, opt_options else: return [], [], [] From 98824050e179ecfa4f193ce93a486ee1a8abb2d8 Mon Sep 17 00:00:00 2001 From: ptrbortolotti Date: Sun, 22 Dec 2024 07:21:05 -0700 Subject: [PATCH 06/28] work in progress, mpi settings moved into modeling options --- examples/03_NREL5MW_OC3_spar/weis_driver.py | 21 +++--- weis/glue_code/mpi_tools.py | 81 +++++++++++---------- weis/glue_code/runWEIS.py | 31 ++++---- 3 files changed, 69 insertions(+), 64 deletions(-) diff --git a/examples/03_NREL5MW_OC3_spar/weis_driver.py b/examples/03_NREL5MW_OC3_spar/weis_driver.py index 967f5494e..d6f7c3883 100644 --- a/examples/03_NREL5MW_OC3_spar/weis_driver.py +++ b/examples/03_NREL5MW_OC3_spar/weis_driver.py @@ -4,7 +4,6 @@ from weis.glue_code.runWEIS import run_weis from openmdao.utils.mpi import MPI -from weis.glue_code.mpi_tools import compute_optimal_nC ## File management run_dir = os.path.dirname( os.path.realpath(__file__) ) @@ -16,30 +15,30 @@ # Set up argument parser parser = argparse.ArgumentParser(description="Run WEIS driver with flag prepping for MPI run.") # Add the flag -parser.add_argument("--preMPIflag", type=bool, default=False, help="Flag for preprocessing MPI settings (True or False).") -parser.add_argument("--maxCores", type=int, default=0, help="Maximum number of cores available.") -parser.add_argument("--n_FD", type=int, default=0, help="Number of parallel finite differences, only used when MPI is turned ON.") -parser.add_argument("--n_OF_parallel", type=int, default=0, help="Number of OpenFAST calls run in parallel, only used when MPI is turned ON.") +parser.add_argument("--preMPI", type=bool, default=False, help="Flag for preprocessing MPI settings (True or False).") +parser.add_argument("--maxnP", type=int, default=0, help="Maximum number of cores available.") +parser.add_argument("--nFD", type=int, default=0, help="Number of parallel finite differences, only used when MPI is turned ON.") +parser.add_argument("--nOFp", type=int, default=0, help="Number of OpenFAST calls run in parallel, only used when MPI is turned ON.") # Parse the arguments args = parser.parse_args() # Use the flag in your script -if args.preMPIflag: +if args.preMPI: print("Preprocessor flag is set to True. Running preprocessing setting up MPI run.") else: print("Preprocessor flag is set to False. Run WEIS now.") tt = time.time() # Use the flag in your script -if args.preMPIflag: - _, modeling_options, opt_options, n_FD, n_OF_runs = run_weis(fname_wt_input, fname_modeling_options, fname_analysis_options, prepMPI=True) - n_C, n_FD, n_OF_parallel= compute_optimal_nC(n_FD, n_OF_runs, modeling_options, opt_options, max_cores = args.maxCores) +if args.preMPI: + _, _, _ = run_weis(fname_wt_input, fname_modeling_options, fname_analysis_options, prepMPI=True, maxnP = args.maxnP) else: - wt_opt, modeling_options, opt_options = run_weis(fname_wt_input, fname_modeling_options, fname_analysis_options, n_FD = args.n_FD, n_OF_parallel = args.n_OF_parallel) + _, modeling_options, _ = run_weis(fname_wt_input, fname_modeling_options, fname_analysis_options, prepMPI=True, maxnP = args.maxnP) + wt_opt, modeling_options, opt_options = run_weis(fname_wt_input, modeling_options, fname_analysis_options) if MPI: rank = MPI.COMM_WORLD.Get_rank() else: rank = 0 -if rank == 0 and args.preMPIflag == False: +if rank == 0 and args.preMPI == False: print("Run time: %f"%(time.time()-tt)) sys.stdout.flush() diff --git a/weis/glue_code/mpi_tools.py b/weis/glue_code/mpi_tools.py index 255a7d285..c1771df09 100644 --- a/weis/glue_code/mpi_tools.py +++ b/weis/glue_code/mpi_tools.py @@ -3,82 +3,87 @@ import numpy as np from openmdao.utils.mpi import MPI -def compute_optimal_nC(n_FD, n_OF_runs, modeling_options, opt_options, max_cores=0): +def compute_optimal_nC(nFD, nOF, modeling_options, opt_options, maxnP=0): fd_methods = ['SLSQP','SNOPT', 'LD_MMA'] evolutionary_methods = ['DE', 'NSGA2'] - print("\nYour problem has %d design variable(s) and %d OpenFAST run(s)\n"%(n_FD, n_OF_runs)) + print("\nYour problem has %d design variable(s) and %d OpenFAST run(s)\n"%(nFD, nOF)) if modeling_options['Level3']['flag']: # If we are running an optimization method that doesn't use finite differencing, set the number of DVs to 1 if not (opt_options['driver']['design_of_experiments']['flag']) and (opt_options['driver']['optimization']['solver'] in evolutionary_methods): print("You are running an optimization based on an evolutionary solver. The number of parallel finite differencing is now multiplied by 5\n") - n_FD *= 5 # targeting 10*n_DV population size... this is what the equivalent FD coloring would take + nFD *= 5 # targeting 10*n_DV population size... this is what the equivalent FD coloring would take elif not (opt_options['driver']['design_of_experiments']['flag'] or opt_options['driver']['optimization']['solver'] in fd_methods): print("You are not running a design of experiment or your optimizer is not gradient based. The number of parallel finite differencing is now set to 1\n") - n_FD = 1 + nFD = 1 elif modeling_options['Level2']['flag']: if not (opt_options['driver']['design_of_experiments']['flag'] or opt_options['driver']['optimization']['solver'] in fd_methods): print("You are not running a design of experiment or your optimizer is not gradient based. The number of parallel finite differencing is now set to 1\n") - n_FD = 1 + nFD = 1 # # if we're doing a GA or such, "FD" means "entities in epoch" # if opt_options['driver']['optimization']['solver'] in evolutionary_methods: - # n_FD = max_cores + # nFD = maxnP print("To run the code in parallel with MPI, execute one of the following commands\n") - if max_cores != 0: - print("You have access to %d cores. Please call WEIS as:"%max_cores) + if maxnP != 0: + print("You have access to %d cores. Please call WEIS as:"%maxnP) # Define the color map for the parallelization, determining the maximum number of parallel finite difference (FD) # evaluations based on the number of design variables (DV). OpenFAST on/off changes things. if modeling_options['Level3']['flag']: # If openfast is called, the maximum number of FD is the number of DV, if we have the number of cores available that doubles the number of DVs, # otherwise it is half of the number of DV (rounded to the lower integer). # We need this because a top layer of cores calls a bottom set of cores where OpenFAST runs. - if max_cores > 2. * n_FD: - n_FD = n_FD + if maxnP > 2. * nFD: + nFD = nFD else: - n_FD = int(np.floor(max_cores / 2)) + nFD = int(np.floor(maxnP / 2)) # Get the number of OpenFAST runs from the user input and the max that can run in parallel given the resources # The number of OpenFAST runs is the minimum between the actual number of requested OpenFAST simulations, and # the number of cores available (minus the number of DV, which sit and wait for OF to complete) - n_FD = max([n_FD, 1]) - max_parallel_OF_runs = max([int(np.floor((max_cores - n_FD) / n_FD)), 1]) - n_OF_runs_parallel = min([int(n_OF_runs), max_parallel_OF_runs]) + nFD = max([nFD, 1]) + max_parallel_OF_runs = max([int(np.floor((maxnP - nFD) / nFD)), 1]) + nOFp = min([int(nOF), max_parallel_OF_runs]) elif modeling_options['Level2']['flag']: - if max_cores > 2. * n_FD: - n_FD = n_FD + if maxnP > 2. * nFD: + nFD = nFD else: - n_FD = int(np.floor(max_cores / 2)) - n_FD = max([n_FD, 1]) - max_parallel_OF_runs = max([int(np.floor((max_cores - n_FD) / n_FD)), 1]) - n_OF_runs_parallel = min([int(n_OF_runs), max_parallel_OF_runs]) + nFD = int(np.floor(maxnP / 2)) + nFD = max([nFD, 1]) + max_parallel_OF_runs = max([int(np.floor((maxnP - nFD) / nFD)), 1]) + nOFp = min([int(nOF), max_parallel_OF_runs]) else: # If OpenFAST is not called, the number of parallel calls to compute the FDs is just equal to the minimum of cores available and DV - n_FD = min([max_cores, n_FD]) - n_OF_runs_parallel = 1 - n_C = n_FD + n_FD * n_OF_runs_parallel + nFD = min([maxnP, nFD]) + nOFp = 1 + nC = nFD + nFD * nOFp else: - n_OF_runs_parallel = n_OF_runs - n_C = n_FD + n_FD * n_OF_runs_parallel - print("If you have access to (at least) %d cores, please call WEIS as:"%n_C) + nOFp = nOF + nC = nFD + nFD * nOFp + print("If you have access to (at least) %d cores, please call WEIS as:"%nC) - print("mpirun -np %d python weis_driver.py --n_FD=%d --n_OF_parallel=%d\n"%(n_C, n_FD, n_OF_runs_parallel)) + print("mpirun -np %d python weis_driver.py --nFD=%d --n_OF_parallel=%d\n"%(nC, nFD, nOFp)) - if max_cores == 0: - print("\nIf you do not have access to %d cores"%n_C) + if maxnP == 0: + print("\nIf you do not have access to %d cores"%nC) print("please provide your maximum available number of cores by typing:") print("python weis_driver.py --maxCore=xx") print("And substitute xx with your number of cores\n") - print(f"nC={n_C}") - print(f"n_FD={n_FD}") - print(f"n_OFp={n_OF_runs_parallel}") - return n_C, n_FD, n_OF_runs_parallel + print(f"nC={nC}") + print(f"nFD={nFD}") + print(f"n_OFp={nOFp}") + + modeling_options['General']['openfast_configuration']['nC'] = nC + modeling_options['General']['openfast_configuration']['nFD'] = nFD + modeling_options['General']['openfast_configuration']['nOFp'] = nOFp + + return modeling_options def under_mpirun(): """Return True if we're being executed under mpirun.""" @@ -111,19 +116,19 @@ def debug(*msg): # pragma: no cover MPI = None -def map_comm_heirarchical(n_FD, n_OF): +def map_comm_heirarchical(nFD, n_OF): """ Heirarchical parallelization communicator mapping. Assumes a number of top level processes equal to the number of finite differences, each with its associated number of openfast simulations. """ - N = n_FD + n_FD * n_OF + N = nFD + nFD * n_OF comm_map_down = {} comm_map_up = {} - color_map = [0] * n_FD + color_map = [0] * nFD - for i in range(n_FD): - comm_map_down[i] = [n_FD + j + i * n_OF for j in range(n_OF)] + for i in range(nFD): + comm_map_down[i] = [nFD + j + i * n_OF for j in range(n_OF)] color_map.extend([i + 1] * n_OF) for j in comm_map_down[i]: diff --git a/weis/glue_code/runWEIS.py b/weis/glue_code/runWEIS.py index 0df031968..620517baf 100644 --- a/weis/glue_code/runWEIS.py +++ b/weis/glue_code/runWEIS.py @@ -11,7 +11,7 @@ from weis.control.tmd import assign_TMD_values from weis.aeroelasticse.FileTools import save_yaml from wisdem.inputs.validation import simple_types - +from weis.glue_code.mpi_tools import compute_optimal_nC if MPI: @@ -19,7 +19,7 @@ def run_weis(fname_wt_input, fname_modeling_options, fname_opt_options, geometry_override=None, modeling_override=None, analysis_override=None, - prepMPI=False, n_FD=1, n_OF_parallel=0): + prepMPI=False, maxnP=0): # Load all yaml inputs and validate (also fills in defaults) wt_initial = WindTurbineOntologyPythonWEIS( fname_wt_input, @@ -35,24 +35,26 @@ def run_weis(fname_wt_input, fname_modeling_options, fname_opt_options, myopt = PoseOptimizationWEIS(wt_init, modeling_options, opt_options) if MPI: + nFD = modeling_options['General']['openfast_configuration']['nFD'] + nOFp = modeling_options['General']['openfast_configuration']['nOFp'] # Define the color map for the cores (how these are distributed between finite differencing and openfast runs) if opt_options['driver']['design_of_experiments']['flag']: - n_FD = MPI.COMM_WORLD.Get_size() - n_OF_parallel = 1 + nFD = MPI.COMM_WORLD.Get_size() + nOFp = 1 rank = MPI.COMM_WORLD.Get_rank() comm_map_up = comm_map_down = {} for r in range(MPI.COMM_WORLD.Get_size()): comm_map_up[r] = [r] color_i = 0 else: - n_FD = max([n_FD, 1]) - comm_map_down, comm_map_up, color_map = map_comm_heirarchical(n_FD, n_OF_parallel) + nFD = max([nFD, 1]) + comm_map_down, comm_map_up, color_map = map_comm_heirarchical(nFD, nOFp) rank = MPI.COMM_WORLD.Get_rank() if rank < len(color_map): try: color_i = color_map[rank] except IndexError: - raise ValueError('The number of finite differencing variables is {} and the correct number of cores were not allocated'.format(n_FD)) + raise ValueError('The number of finite differencing variables is {} and the correct number of cores were not allocated'.format(nFD)) else: color_i = max(color_map) + 1 comm_i = MPI.COMM_WORLD.Split(color_i, 1) @@ -79,13 +81,13 @@ def run_weis(fname_wt_input, fname_modeling_options, fname_opt_options, if opt_options['driver']['design_of_experiments']['flag']: modeling_options['General']['openfast_configuration']['cores'] = 1 else: - modeling_options['General']['openfast_configuration']['cores'] = n_OF_parallel + modeling_options['General']['openfast_configuration']['cores'] = nOFp # Parallel settings for OpenMDAO if opt_options['driver']['design_of_experiments']['flag']: wt_opt = om.Problem(model=WindPark(modeling_options = modeling_options, opt_options = opt_options), reports=False) else: - wt_opt = om.Problem(model=om.Group(num_par_fd=n_FD), comm=comm_i, reports=False) + wt_opt = om.Problem(model=om.Group(num_par_fd=nFD), comm=comm_i, reports=False) wt_opt.model.add_subsystem('comp', WindPark(modeling_options = modeling_options, opt_options = opt_options), promotes=['*']) else: # Sequential finite differencing and openfast simulations @@ -114,8 +116,10 @@ def run_weis(fname_wt_input, fname_modeling_options, fname_opt_options, # memory for the derivative arrays. wt_opt.setup(derivatives=False) - # Return number of design variables, used to setup WEIS for an MPI run - n_FD = len(wt_opt.model.list_outputs(is_design_var=True, out_stream=None)) + # Estimate number of design variables and parallel calls to OpenFASRT given + # the computational resources available. This is used to setup WEIS for an MPI run + nFD = len(wt_opt.model.list_outputs(is_design_var=True, out_stream=None)) + modeling_options = compute_optimal_nC(nFD, myopt.n_OF_runs, modeling_options, opt_options, maxnP = maxnP) # If WEIS is called simply to prep for an MPI call, no need to proceed and simply # return the number of finite differences and OpenFAST calls, and stop @@ -206,9 +210,6 @@ def run_weis(fname_wt_input, fname_modeling_options, fname_opt_options, MPI.COMM_WORLD.Barrier() if color_i == 0: - if prepMPI: - return wt_opt, modeling_options, opt_options, n_FD, myopt.n_OF_runs - else: - return wt_opt, modeling_options, opt_options + return wt_opt, modeling_options, opt_options else: return [], [], [] From ca3a4d892d711c4b80894174b4603aba91a9a8b1 Mon Sep 17 00:00:00 2001 From: ptrbortolotti Date: Sun, 22 Dec 2024 07:27:09 -0700 Subject: [PATCH 07/28] adjust if statements --- weis/glue_code/mpi_tools.py | 2 +- weis/glue_code/runWEIS.py | 8 ++++++-- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/weis/glue_code/mpi_tools.py b/weis/glue_code/mpi_tools.py index c1771df09..b36dccb3b 100644 --- a/weis/glue_code/mpi_tools.py +++ b/weis/glue_code/mpi_tools.py @@ -67,7 +67,7 @@ def compute_optimal_nC(nFD, nOF, modeling_options, opt_options, maxnP=0): nC = nFD + nFD * nOFp print("If you have access to (at least) %d cores, please call WEIS as:"%nC) - print("mpirun -np %d python weis_driver.py --nFD=%d --n_OF_parallel=%d\n"%(nC, nFD, nOFp)) + print("mpirun -np %d python weis_driver.py --nFD=%d --nOFp=%d\n"%(nC, nFD, nOFp)) if maxnP == 0: print("\nIf you do not have access to %d cores"%nC) diff --git a/weis/glue_code/runWEIS.py b/weis/glue_code/runWEIS.py index 620517baf..f0b281117 100644 --- a/weis/glue_code/runWEIS.py +++ b/weis/glue_code/runWEIS.py @@ -35,8 +35,12 @@ def run_weis(fname_wt_input, fname_modeling_options, fname_opt_options, myopt = PoseOptimizationWEIS(wt_init, modeling_options, opt_options) if MPI: - nFD = modeling_options['General']['openfast_configuration']['nFD'] - nOFp = modeling_options['General']['openfast_configuration']['nOFp'] + if not prepMPI: + nFD = modeling_options['General']['openfast_configuration']['nFD'] + nOFp = modeling_options['General']['openfast_configuration']['nOFp'] + else: + nFD = 1 + nOFp = 0 # Define the color map for the cores (how these are distributed between finite differencing and openfast runs) if opt_options['driver']['design_of_experiments']['flag']: nFD = MPI.COMM_WORLD.Get_size() From 139263a5404f18a0f14fe9fcea4a3ea9d7f83fba Mon Sep 17 00:00:00 2001 From: ptrbortolotti Date: Sun, 22 Dec 2024 07:48:04 -0700 Subject: [PATCH 08/28] broadcast mod and opt options --- weis/glue_code/runWEIS.py | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/weis/glue_code/runWEIS.py b/weis/glue_code/runWEIS.py index f0b281117..df943e543 100644 --- a/weis/glue_code/runWEIS.py +++ b/weis/glue_code/runWEIS.py @@ -210,10 +210,22 @@ def run_weis(fname_wt_input, fname_modeling_options, fname_opt_options, sys.stdout.flush() # Send each core in use to a barrier synchronization + # Next, share WEIS outputs across all processors from rank=0 (root) if MPI: MPI.COMM_WORLD.Barrier() + rank = MPI.COMM_WORLD.Get_rank() + if rank != 0: + wt_opt = None + modeling_options = None + opt_options = None + + # MPI.COMM_WORLD.bcast cannot broadcast out a full OpenMDAO problem + # We don't need it for now, but this might become an issue if we start + # stacking multiple WEIS calls on top of each other and we need to + # reuse wt_opt from one call to the next + modeling_options = MPI.COMM_WORLD.bcast(modeling_options, root = 0) + opt_options = MPI.COMM_WORLD.bcast(opt_options, root = 0) + MPI.COMM_WORLD.Barrier() + + return wt_opt, modeling_options, opt_options - if color_i == 0: - return wt_opt, modeling_options, opt_options - else: - return [], [], [] From 77ad691bf272ff5eb34848b2af7cd62c3ba770ee Mon Sep 17 00:00:00 2001 From: ptrbortolotti Date: Sun, 22 Dec 2024 07:59:56 -0700 Subject: [PATCH 09/28] more progress, not there yet --- examples/03_NREL5MW_OC3_spar/weis_driver.py | 22 ++++++++++++++++++--- weis/inputs/modeling_schema.yaml | 8 ++++++++ 2 files changed, 27 insertions(+), 3 deletions(-) diff --git a/examples/03_NREL5MW_OC3_spar/weis_driver.py b/examples/03_NREL5MW_OC3_spar/weis_driver.py index d6f7c3883..6587f689d 100644 --- a/examples/03_NREL5MW_OC3_spar/weis_driver.py +++ b/examples/03_NREL5MW_OC3_spar/weis_driver.py @@ -30,10 +30,26 @@ tt = time.time() # Use the flag in your script if args.preMPI: - _, _, _ = run_weis(fname_wt_input, fname_modeling_options, fname_analysis_options, prepMPI=True, maxnP = args.maxnP) + _, _, _ = run_weis(fname_wt_input, + fname_modeling_options, + fname_analysis_options, + prepMPI=True, + maxnP = args.maxnP) else: - _, modeling_options, _ = run_weis(fname_wt_input, fname_modeling_options, fname_analysis_options, prepMPI=True, maxnP = args.maxnP) - wt_opt, modeling_options, opt_options = run_weis(fname_wt_input, modeling_options, fname_analysis_options) + _, modeling_options, _ = run_weis(fname_wt_input, + fname_modeling_options, + fname_analysis_options, + prepMPI=True, + maxnP = args.maxnP) + modeling_override = {} + modeling_override['General'] = {} + modeling_override['General']['openfast_configuration'] = {} + modeling_override['General']['openfast_configuration']['nFD'] = modeling_options['General']['openfast_configuration']['nFD'] + modeling_override['General']['openfast_configuration']['nOFp'] = modeling_options['General']['openfast_configuration']['nOFp'] + wt_opt, modeling_options, opt_options = run_weis(fname_wt_input, + fname_modeling_options, + fname_analysis_options, + modeling_override=modeling_options) if MPI: rank = MPI.COMM_WORLD.Get_rank() diff --git a/weis/inputs/modeling_schema.yaml b/weis/inputs/modeling_schema.yaml index 4c4957241..5698fc0a3 100644 --- a/weis/inputs/modeling_schema.yaml +++ b/weis/inputs/modeling_schema.yaml @@ -80,6 +80,14 @@ properties: type: boolean default: False description: Write standard output to own file. Output will not print to screen. + nFD: + type: integer + default: 1 + description: Number of parallel finite differencing performed by OpenMDAO. WEIS sets this number once the preMPI keyword argument is set + nOFp: + type: integer + default: 1 + description: Number of parallel OpenFAST runs performed by OpenMDAO. WEIS sets this number once the preMPI keyword argument is set goodman_correction: type: boolean default: False From ce527f69339d031ecd39cd8f1aceafe07284f67b Mon Sep 17 00:00:00 2001 From: ptrbortolotti Date: Tue, 24 Dec 2024 12:22:48 -0700 Subject: [PATCH 10/28] sequential or preMPI and actual weis call now working --- examples/03_NREL5MW_OC3_spar/weis_driver.py | 4 +++- weis/glue_code/mpi_tools.py | 20 ++++++++------------ weis/glue_code/runWEIS.py | 4 ++-- 3 files changed, 13 insertions(+), 15 deletions(-) diff --git a/examples/03_NREL5MW_OC3_spar/weis_driver.py b/examples/03_NREL5MW_OC3_spar/weis_driver.py index 6587f689d..aadd6e8c0 100644 --- a/examples/03_NREL5MW_OC3_spar/weis_driver.py +++ b/examples/03_NREL5MW_OC3_spar/weis_driver.py @@ -41,15 +41,17 @@ fname_analysis_options, prepMPI=True, maxnP = args.maxnP) + modeling_override = {} modeling_override['General'] = {} modeling_override['General']['openfast_configuration'] = {} modeling_override['General']['openfast_configuration']['nFD'] = modeling_options['General']['openfast_configuration']['nFD'] modeling_override['General']['openfast_configuration']['nOFp'] = modeling_options['General']['openfast_configuration']['nOFp'] + wt_opt, modeling_options, opt_options = run_weis(fname_wt_input, fname_modeling_options, fname_analysis_options, - modeling_override=modeling_options) + modeling_override=modeling_override) if MPI: rank = MPI.COMM_WORLD.Get_rank() diff --git a/weis/glue_code/mpi_tools.py b/weis/glue_code/mpi_tools.py index b36dccb3b..c01155de1 100644 --- a/weis/glue_code/mpi_tools.py +++ b/weis/glue_code/mpi_tools.py @@ -3,7 +3,7 @@ import numpy as np from openmdao.utils.mpi import MPI -def compute_optimal_nC(nFD, nOF, modeling_options, opt_options, maxnP=0): +def compute_optimal_nP(nFD, nOF, modeling_options, opt_options, maxnP=0): fd_methods = ['SLSQP','SNOPT', 'LD_MMA'] @@ -60,26 +60,22 @@ def compute_optimal_nC(nFD, nOF, modeling_options, opt_options, maxnP=0): # If OpenFAST is not called, the number of parallel calls to compute the FDs is just equal to the minimum of cores available and DV nFD = min([maxnP, nFD]) nOFp = 1 - nC = nFD + nFD * nOFp + nP = nFD + nFD * nOFp else: nOFp = nOF - nC = nFD + nFD * nOFp - print("If you have access to (at least) %d cores, please call WEIS as:"%nC) + nP = nFD + nFD * nOFp + print("If you have access to (at least) %d cores, please call WEIS as:"%nP) - print("mpirun -np %d python weis_driver.py --nFD=%d --nOFp=%d\n"%(nC, nFD, nOFp)) + print("mpirun -np %d python weis_driver.py --nFD=%d --nOFp=%d\n"%(nP, nFD, nOFp)) if maxnP == 0: - print("\nIf you do not have access to %d cores"%nC) + print("\nIf you do not have access to %d cores"%nP) print("please provide your maximum available number of cores by typing:") - print("python weis_driver.py --maxCore=xx") + print("python weis_driver.py --maxnP=xx") print("And substitute xx with your number of cores\n") - - print(f"nC={nC}") - print(f"nFD={nFD}") - print(f"n_OFp={nOFp}") - modeling_options['General']['openfast_configuration']['nC'] = nC + modeling_options['General']['openfast_configuration']['nP'] = nP modeling_options['General']['openfast_configuration']['nFD'] = nFD modeling_options['General']['openfast_configuration']['nOFp'] = nOFp diff --git a/weis/glue_code/runWEIS.py b/weis/glue_code/runWEIS.py index df943e543..c92ac0d66 100644 --- a/weis/glue_code/runWEIS.py +++ b/weis/glue_code/runWEIS.py @@ -11,7 +11,7 @@ from weis.control.tmd import assign_TMD_values from weis.aeroelasticse.FileTools import save_yaml from wisdem.inputs.validation import simple_types -from weis.glue_code.mpi_tools import compute_optimal_nC +from weis.glue_code.mpi_tools import compute_optimal_nP if MPI: @@ -123,7 +123,7 @@ def run_weis(fname_wt_input, fname_modeling_options, fname_opt_options, # Estimate number of design variables and parallel calls to OpenFASRT given # the computational resources available. This is used to setup WEIS for an MPI run nFD = len(wt_opt.model.list_outputs(is_design_var=True, out_stream=None)) - modeling_options = compute_optimal_nC(nFD, myopt.n_OF_runs, modeling_options, opt_options, maxnP = maxnP) + modeling_options = compute_optimal_nP(nFD, myopt.n_OF_runs, modeling_options, opt_options, maxnP = maxnP) # If WEIS is called simply to prep for an MPI call, no need to proceed and simply # return the number of finite differences and OpenFAST calls, and stop From 86fef371da8ec91864136be80dab5a5ccb4c4444 Mon Sep 17 00:00:00 2001 From: ptrbortolotti Date: Tue, 24 Dec 2024 16:43:54 -0300 Subject: [PATCH 11/28] better, but MPI can still hang --- docs/run_in_parallel.rst | 57 +++++++------------ .../weis_driver_loads.py | 42 ++++++++++---- examples/03_NREL5MW_OC3_spar/sbatch_hpc.sh | 9 +-- examples/03_NREL5MW_OC3_spar/weis_driver.py | 26 +++++---- weis/glue_code/mpi_tools.py | 24 ++++---- 5 files changed, 79 insertions(+), 79 deletions(-) diff --git a/docs/run_in_parallel.rst b/docs/run_in_parallel.rst index 09478ce85..d8c3155af 100644 --- a/docs/run_in_parallel.rst +++ b/docs/run_in_parallel.rst @@ -1,7 +1,7 @@ Run in parallel -------------- -WEIS can be run sequentially on a single processor. WEIS can however also be parallelized to handle larger problems in a timely manner. +WEIS can be run sequentially on a single processor. WEIS can also be parallelized to handle larger problems in a timely manner. Background @@ -30,37 +30,23 @@ These instructions follow example 03 `03_NREL5MW_OC3 `. + mpirun -np 20 python weis_driver.py Parallelize calls to OpenFAST @@ -69,38 +55,39 @@ Parallelize calls to OpenFAST WEIS can be used to run OpenFAST simulations, such as design load cases. [More information about setting DLCs can be found here](https://github.com/WISDEM/WEIS/blob/docs/docs/dlc_generator.rst) -To do so, WEIS is run with a single function evaluation that fires off a number of OpenFAST simulations. This can be done by setting `n_FD=1`, `n_OFp` to the number of OpenFAST simulations, and `nC=n_FD+n_OFp`. +To do so, WEIS is run with a single function evaluation that fires off a number of OpenFAST simulations. Let's look at an example in `02_run_openfast_cases `_. In a terminal, navigate to example 02 and type: .. code-block:: bash - python weis_driver_loads.py --preMPIflag=True + python weis_driver_loads.py --preMPI=True The terminal should return this message .. code-block:: bash Your problem has 0 design variable(s) and 7 OpenFAST run(s) - You are not running a design of experiment or your optimizer is not gradient based. The number of parallel finite differencing is now set to 1 + You are not running a design optimization, a design of experiment, or your optimizer is not gradient based. The number of parallel function evaluations is set to 1 To run the code in parallel with MPI, execute one of the following commands - If you have access to (at least) 8 cores, please call WEIS as: - mpirun -np 8 python weis_driver.py --n_FD=1 --n_OF_parallel=7 + If you have access to (at least) 8 processors, please call WEIS as: + mpirun -np 8 python weis_driver.py + + If you do not have access to 8 processors + please provide your maximum available number of processors by typing: + python weis_driver.py --maxnP=xx + And substitute xx with your number of processors - If you do not have access to 8 cores - please provide your maximum available number of cores by typing: - python weis_driver.py --maxCore=xx - And substitute xx with your number of cores +If you have access to 8 processors, you are now ready to execute your script by typing - nC=8 - n_FD=1 - n_OFp=7 +.. code-block:: bash + mpirun -np 8 python weis_driver_loads.py -You are now ready to execute your script by typing +If you have access to fewer processors, say 4, adjust the -np entry accordingly .. code-block:: bash - mpirun -np 8 python weis_driver.py --n_FD=1 --n_OF_parallel=7 \ No newline at end of file + mpirun -np 4 python weis_driver_loads.py \ No newline at end of file diff --git a/examples/02_run_openfast_cases/weis_driver_loads.py b/examples/02_run_openfast_cases/weis_driver_loads.py index af57006ea..cf74c65d0 100644 --- a/examples/02_run_openfast_cases/weis_driver_loads.py +++ b/examples/02_run_openfast_cases/weis_driver_loads.py @@ -4,7 +4,7 @@ from weis.glue_code.runWEIS import run_weis from openmdao.utils.mpi import MPI -from weis.glue_code.mpi_tools import compute_optimal_nC +from weis.glue_code.mpi_tools import compute_optimal_nP ## File management run_dir = os.path.dirname( os.path.realpath(__file__) ) @@ -17,31 +17,49 @@ # Set up argument parser parser = argparse.ArgumentParser(description="Run WEIS driver with flag prepping for MPI run.") # Add the flag -parser.add_argument("--preMPIflag", type=bool, default=False, help="Flag for preprocessing MPI settings (True or False).") -parser.add_argument("--maxCores", type=int, default=0, help="Maximum number of cores available.") -parser.add_argument("--n_FD", type=int, default=0, help="Number of parallel finite differences, only used when MPI is turned ON.") -parser.add_argument("--n_OF_parallel", type=int, default=0, help="Number of OpenFAST calls run in parallel, only used when MPI is turned ON.") +parser.add_argument("--preMPI", type=bool, default=False, help="Flag for preprocessing MPI settings (True or False).") +parser.add_argument("--maxnP", type=int, default=0, help="Maximum number of processors available.") # Parse the arguments args = parser.parse_args() # Use the flag in your script -if args.preMPIflag: +if args.preMPI: print("Preprocessor flag is set to True. Running preprocessing setting up MPI run.") else: print("Preprocessor flag is set to False. Run WEIS now.") tt = time.time() -# Use the flag in your script -if args.preMPIflag: - _, modeling_options, opt_options, n_FD, n_OF_runs = run_weis(fname_wt_input, fname_modeling_options, fname_analysis_options, prepMPI=True) - compute_optimal_nC(n_FD, n_OF_runs, modeling_options, opt_options, max_cores = args.maxCores) + +if args.preMPI: + _, _, _ = run_weis(fname_wt_input, + fname_modeling_options, + fname_analysis_options, + prepMPI=True, + maxnP = args.maxnP) else: - wt_opt, modeling_options, opt_options = run_weis(fname_wt_input, fname_modeling_options, fname_analysis_options, n_FD = args.n_FD, n_OF_parallel = args.n_OF_parallel) + if MPI: + _, modeling_options, _ = run_weis(fname_wt_input, + fname_modeling_options, + fname_analysis_options, + prepMPI=True, + maxnP = args.maxnP) + + modeling_override = {} + modeling_override['General'] = {} + modeling_override['General']['openfast_configuration'] = {} + modeling_override['General']['openfast_configuration']['nFD'] = modeling_options['General']['openfast_configuration']['nFD'] + modeling_override['General']['openfast_configuration']['nOFp'] = modeling_options['General']['openfast_configuration']['nOFp'] + else: + modeling_override = None + wt_opt, modeling_options, opt_options = run_weis(fname_wt_input, + fname_modeling_options, + fname_analysis_options, + modeling_override=modeling_override) if MPI: rank = MPI.COMM_WORLD.Get_rank() else: rank = 0 -if rank == 0 and args.preMPIflag == False: +if rank == 0 and args.preMPI == False: print("Run time: %f"%(time.time()-tt)) sys.stdout.flush() diff --git a/examples/03_NREL5MW_OC3_spar/sbatch_hpc.sh b/examples/03_NREL5MW_OC3_spar/sbatch_hpc.sh index a05564551..c9880443e 100644 --- a/examples/03_NREL5MW_OC3_spar/sbatch_hpc.sh +++ b/examples/03_NREL5MW_OC3_spar/sbatch_hpc.sh @@ -16,11 +16,4 @@ module unload gcc source activate weis-env -output=$(python weis_driver.py --preMPIflag=True --maxCores=104) - -# Extract the values from the output (adjust based on actual format) -nC=$(echo "$output" | grep 'nC=' | awk -F'=' '{print $2}') -n_FD=$(echo "$output" | grep 'n_FD=' | awk -F'=' '{print $2}') -n_OFp=$(echo "$output" | grep 'n_OFp=' | awk -F'=' '{print $2}') - -mpirun -np $nC python runWEIS.py --n_FD=$n_FD --n_OF_parallel=$n_OFp +mpirun -np 12 python weis_driver.py diff --git a/examples/03_NREL5MW_OC3_spar/weis_driver.py b/examples/03_NREL5MW_OC3_spar/weis_driver.py index aadd6e8c0..54525eca6 100644 --- a/examples/03_NREL5MW_OC3_spar/weis_driver.py +++ b/examples/03_NREL5MW_OC3_spar/weis_driver.py @@ -36,18 +36,20 @@ prepMPI=True, maxnP = args.maxnP) else: - _, modeling_options, _ = run_weis(fname_wt_input, - fname_modeling_options, - fname_analysis_options, - prepMPI=True, - maxnP = args.maxnP) - - modeling_override = {} - modeling_override['General'] = {} - modeling_override['General']['openfast_configuration'] = {} - modeling_override['General']['openfast_configuration']['nFD'] = modeling_options['General']['openfast_configuration']['nFD'] - modeling_override['General']['openfast_configuration']['nOFp'] = modeling_options['General']['openfast_configuration']['nOFp'] - + if MPI: + _, modeling_options, _ = run_weis(fname_wt_input, + fname_modeling_options, + fname_analysis_options, + prepMPI=True, + maxnP = args.maxnP) + + modeling_override = {} + modeling_override['General'] = {} + modeling_override['General']['openfast_configuration'] = {} + modeling_override['General']['openfast_configuration']['nFD'] = modeling_options['General']['openfast_configuration']['nFD'] + modeling_override['General']['openfast_configuration']['nOFp'] = modeling_options['General']['openfast_configuration']['nOFp'] + else: + modeling_override = None wt_opt, modeling_options, opt_options = run_weis(fname_wt_input, fname_modeling_options, fname_analysis_options, diff --git a/weis/glue_code/mpi_tools.py b/weis/glue_code/mpi_tools.py index c01155de1..ad1d983a8 100644 --- a/weis/glue_code/mpi_tools.py +++ b/weis/glue_code/mpi_tools.py @@ -17,11 +17,11 @@ def compute_optimal_nP(nFD, nOF, modeling_options, opt_options, maxnP=0): print("You are running an optimization based on an evolutionary solver. The number of parallel finite differencing is now multiplied by 5\n") nFD *= 5 # targeting 10*n_DV population size... this is what the equivalent FD coloring would take elif not (opt_options['driver']['design_of_experiments']['flag'] or opt_options['driver']['optimization']['solver'] in fd_methods): - print("You are not running a design of experiment or your optimizer is not gradient based. The number of parallel finite differencing is now set to 1\n") + print("You are not running a design optimization, a design of experiment, or your optimizer is not gradient based. The number of parallel function evaluations is set to 1\n") nFD = 1 elif modeling_options['Level2']['flag']: if not (opt_options['driver']['design_of_experiments']['flag'] or opt_options['driver']['optimization']['solver'] in fd_methods): - print("You are not running a design of experiment or your optimizer is not gradient based. The number of parallel finite differencing is now set to 1\n") + print("You are not running a design optimization, a design of experiment, or your optimizer is not gradient based. The number of parallel function evaluations is set to 1\n") nFD = 1 # # if we're doing a GA or such, "FD" means "entities in epoch" # if opt_options['driver']['optimization']['solver'] in evolutionary_methods: @@ -31,20 +31,20 @@ def compute_optimal_nP(nFD, nOF, modeling_options, opt_options, maxnP=0): print("To run the code in parallel with MPI, execute one of the following commands\n") if maxnP != 0: - print("You have access to %d cores. Please call WEIS as:"%maxnP) + print("You have access to %d processors. Please call WEIS as:"%maxnP) # Define the color map for the parallelization, determining the maximum number of parallel finite difference (FD) # evaluations based on the number of design variables (DV). OpenFAST on/off changes things. if modeling_options['Level3']['flag']: - # If openfast is called, the maximum number of FD is the number of DV, if we have the number of cores available that doubles the number of DVs, + # If openfast is called, the maximum number of FD is the number of DV, if we have the number of processors available that doubles the number of DVs, # otherwise it is half of the number of DV (rounded to the lower integer). - # We need this because a top layer of cores calls a bottom set of cores where OpenFAST runs. + # We need this because a top layer of processors calls a bottom set of processors where OpenFAST runs. if maxnP > 2. * nFD: nFD = nFD else: nFD = int(np.floor(maxnP / 2)) # Get the number of OpenFAST runs from the user input and the max that can run in parallel given the resources # The number of OpenFAST runs is the minimum between the actual number of requested OpenFAST simulations, and - # the number of cores available (minus the number of DV, which sit and wait for OF to complete) + # the number of processors available (minus the number of DV, which sit and wait for OF to complete) nFD = max([nFD, 1]) max_parallel_OF_runs = max([int(np.floor((maxnP - nFD) / nFD)), 1]) nOFp = min([int(nOF), max_parallel_OF_runs]) @@ -57,7 +57,7 @@ def compute_optimal_nP(nFD, nOF, modeling_options, opt_options, maxnP=0): max_parallel_OF_runs = max([int(np.floor((maxnP - nFD) / nFD)), 1]) nOFp = min([int(nOF), max_parallel_OF_runs]) else: - # If OpenFAST is not called, the number of parallel calls to compute the FDs is just equal to the minimum of cores available and DV + # If OpenFAST is not called, the number of parallel calls to compute the FDs is just equal to the minimum of processors available and DV nFD = min([maxnP, nFD]) nOFp = 1 nP = nFD + nFD * nOFp @@ -65,15 +65,15 @@ def compute_optimal_nP(nFD, nOF, modeling_options, opt_options, maxnP=0): else: nOFp = nOF nP = nFD + nFD * nOFp - print("If you have access to (at least) %d cores, please call WEIS as:"%nP) + print("If you have access to (at least) %d processors, please call WEIS as:"%nP) - print("mpirun -np %d python weis_driver.py --nFD=%d --nOFp=%d\n"%(nP, nFD, nOFp)) + print("mpirun -np %d python weis_driver.py\n"%nP) if maxnP == 0: - print("\nIf you do not have access to %d cores"%nP) - print("please provide your maximum available number of cores by typing:") + print("\nIf you do not have access to %d processors"%nP) + print("please provide your maximum available number of processors by typing:") print("python weis_driver.py --maxnP=xx") - print("And substitute xx with your number of cores\n") + print("And substitute xx with your number of processors\n") modeling_options['General']['openfast_configuration']['nP'] = nP modeling_options['General']['openfast_configuration']['nFD'] = nFD From cf4fcee9c06143ad5aca1265d182d40574716b2a Mon Sep 17 00:00:00 2001 From: ptrbortolotti Date: Fri, 27 Dec 2024 11:28:02 -0300 Subject: [PATCH 12/28] adjust if settings, things seem to be running fine now --- .../weis_driver_loads.py | 26 ++++++++++------- examples/03_NREL5MW_OC3_spar/weis_driver.py | 29 ++++++++++++------- weis/glue_code/mpi_tools.py | 6 ++-- weis/glue_code/runWEIS.py | 2 +- 4 files changed, 38 insertions(+), 25 deletions(-) diff --git a/examples/02_run_openfast_cases/weis_driver_loads.py b/examples/02_run_openfast_cases/weis_driver_loads.py index cf74c65d0..a9b95c85f 100644 --- a/examples/02_run_openfast_cases/weis_driver_loads.py +++ b/examples/02_run_openfast_cases/weis_driver_loads.py @@ -4,14 +4,12 @@ from weis.glue_code.runWEIS import run_weis from openmdao.utils.mpi import MPI -from weis.glue_code.mpi_tools import compute_optimal_nP ## File management -run_dir = os.path.dirname( os.path.realpath(__file__) ) -fname_wt_input = run_dir + os.sep + 'IEA-15-240-RWT.yaml' -fname_modeling_options = run_dir + os.sep + 'modeling_options_loads.yaml' -fname_analysis_options = run_dir + os.sep + 'analysis_options_loads.yaml' - +run_dir = os.path.dirname( os.path.realpath(__file__) ) +fname_wt_input = os.path.join(run_dir, 'IEA-15-240-RWT.yaml') +fname_modeling_options = os.path.join(run_dir, 'modeling_options_loads.yaml') +fname_analysis_options = os.path.join(run_dir, 'analysis_options_loads.yaml') import argparse # Set up argument parser @@ -29,20 +27,29 @@ tt = time.time() +# Set max number of processes, either set by user or extracted from MPI +if args.preMPI: + maxnP = args.preMPI +else: + if MPI: + maxnP = MPI.COMM_WORLD.Get_size() + else: + maxnP = 1 + if args.preMPI: _, _, _ = run_weis(fname_wt_input, fname_modeling_options, fname_analysis_options, prepMPI=True, - maxnP = args.maxnP) + maxnP = maxnP) else: if MPI: _, modeling_options, _ = run_weis(fname_wt_input, fname_modeling_options, fname_analysis_options, prepMPI=True, - maxnP = args.maxnP) - + maxnP = maxnP) + modeling_override = {} modeling_override['General'] = {} modeling_override['General']['openfast_configuration'] = {} @@ -62,4 +69,3 @@ if rank == 0 and args.preMPI == False: print("Run time: %f"%(time.time()-tt)) sys.stdout.flush() - diff --git a/examples/03_NREL5MW_OC3_spar/weis_driver.py b/examples/03_NREL5MW_OC3_spar/weis_driver.py index 54525eca6..d699cc52d 100644 --- a/examples/03_NREL5MW_OC3_spar/weis_driver.py +++ b/examples/03_NREL5MW_OC3_spar/weis_driver.py @@ -6,19 +6,17 @@ from openmdao.utils.mpi import MPI ## File management -run_dir = os.path.dirname( os.path.realpath(__file__) ) -fname_wt_input = os.path.join(run_dir, "nrel5mw-spar_oc3.yaml") -fname_modeling_options = os.path.join(run_dir, 'modeling_options.yaml') -fname_analysis_options = os.path.join(run_dir, 'analysis_options.yaml') +run_dir = os.path.dirname( os.path.realpath(__file__) ) +fname_wt_input = os.path.join(run_dir, 'nrel5mw-spar_oc3.yaml') +fname_modeling_options = os.path.join(run_dir, 'modeling_options_loads.yaml') +fname_analysis_options = os.path.join(run_dir, 'analysis_options_loads.yaml') import argparse # Set up argument parser parser = argparse.ArgumentParser(description="Run WEIS driver with flag prepping for MPI run.") # Add the flag parser.add_argument("--preMPI", type=bool, default=False, help="Flag for preprocessing MPI settings (True or False).") -parser.add_argument("--maxnP", type=int, default=0, help="Maximum number of cores available.") -parser.add_argument("--nFD", type=int, default=0, help="Number of parallel finite differences, only used when MPI is turned ON.") -parser.add_argument("--nOFp", type=int, default=0, help="Number of OpenFAST calls run in parallel, only used when MPI is turned ON.") +parser.add_argument("--maxnP", type=int, default=0, help="Maximum number of processors available.") # Parse the arguments args = parser.parse_args() # Use the flag in your script @@ -28,21 +26,30 @@ print("Preprocessor flag is set to False. Run WEIS now.") tt = time.time() -# Use the flag in your script + +# Set max number of processes, either set by user or extracted from MPI +if args.preMPI: + maxnP = args.preMPI +else: + if MPI: + maxnP = MPI.COMM_WORLD.Get_size() + else: + maxnP = 1 + if args.preMPI: _, _, _ = run_weis(fname_wt_input, fname_modeling_options, fname_analysis_options, prepMPI=True, - maxnP = args.maxnP) + maxnP = maxnP) else: if MPI: _, modeling_options, _ = run_weis(fname_wt_input, fname_modeling_options, fname_analysis_options, prepMPI=True, - maxnP = args.maxnP) - + maxnP = maxnP) + modeling_override = {} modeling_override['General'] = {} modeling_override['General']['openfast_configuration'] = {} diff --git a/weis/glue_code/mpi_tools.py b/weis/glue_code/mpi_tools.py index ad1d983a8..948f40918 100644 --- a/weis/glue_code/mpi_tools.py +++ b/weis/glue_code/mpi_tools.py @@ -3,7 +3,7 @@ import numpy as np from openmdao.utils.mpi import MPI -def compute_optimal_nP(nFD, nOF, modeling_options, opt_options, maxnP=0): +def compute_optimal_nP(nFD, nOF, modeling_options, opt_options, maxnP=1): fd_methods = ['SLSQP','SNOPT', 'LD_MMA'] @@ -30,7 +30,7 @@ def compute_optimal_nP(nFD, nOF, modeling_options, opt_options, maxnP=0): print("To run the code in parallel with MPI, execute one of the following commands\n") - if maxnP != 0: + if maxnP != 1: print("You have access to %d processors. Please call WEIS as:"%maxnP) # Define the color map for the parallelization, determining the maximum number of parallel finite difference (FD) # evaluations based on the number of design variables (DV). OpenFAST on/off changes things. @@ -69,7 +69,7 @@ def compute_optimal_nP(nFD, nOF, modeling_options, opt_options, maxnP=0): print("mpirun -np %d python weis_driver.py\n"%nP) - if maxnP == 0: + if maxnP == 1: print("\nIf you do not have access to %d processors"%nP) print("please provide your maximum available number of processors by typing:") print("python weis_driver.py --maxnP=xx") diff --git a/weis/glue_code/runWEIS.py b/weis/glue_code/runWEIS.py index c92ac0d66..a2ea587e3 100644 --- a/weis/glue_code/runWEIS.py +++ b/weis/glue_code/runWEIS.py @@ -19,7 +19,7 @@ def run_weis(fname_wt_input, fname_modeling_options, fname_opt_options, geometry_override=None, modeling_override=None, analysis_override=None, - prepMPI=False, maxnP=0): + prepMPI=False, maxnP=1): # Load all yaml inputs and validate (also fills in defaults) wt_initial = WindTurbineOntologyPythonWEIS( fname_wt_input, From 97a6bb132a130492a7e2b531b91dc04f836847ca Mon Sep 17 00:00:00 2001 From: ptrbortolotti Date: Fri, 27 Dec 2024 11:33:13 -0300 Subject: [PATCH 13/28] fix last typos --- examples/02_run_openfast_cases/weis_driver_loads.py | 2 +- examples/03_NREL5MW_OC3_spar/weis_driver.py | 6 +++--- weis/glue_code/mpi_tools.py | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/examples/02_run_openfast_cases/weis_driver_loads.py b/examples/02_run_openfast_cases/weis_driver_loads.py index a9b95c85f..f581950b3 100644 --- a/examples/02_run_openfast_cases/weis_driver_loads.py +++ b/examples/02_run_openfast_cases/weis_driver_loads.py @@ -29,7 +29,7 @@ # Set max number of processes, either set by user or extracted from MPI if args.preMPI: - maxnP = args.preMPI + maxnP = args.maxnP else: if MPI: maxnP = MPI.COMM_WORLD.Get_size() diff --git a/examples/03_NREL5MW_OC3_spar/weis_driver.py b/examples/03_NREL5MW_OC3_spar/weis_driver.py index d699cc52d..c7d0b2812 100644 --- a/examples/03_NREL5MW_OC3_spar/weis_driver.py +++ b/examples/03_NREL5MW_OC3_spar/weis_driver.py @@ -8,8 +8,8 @@ ## File management run_dir = os.path.dirname( os.path.realpath(__file__) ) fname_wt_input = os.path.join(run_dir, 'nrel5mw-spar_oc3.yaml') -fname_modeling_options = os.path.join(run_dir, 'modeling_options_loads.yaml') -fname_analysis_options = os.path.join(run_dir, 'analysis_options_loads.yaml') +fname_modeling_options = os.path.join(run_dir, 'modeling_options.yaml') +fname_analysis_options = os.path.join(run_dir, 'analysis_options.yaml') import argparse # Set up argument parser @@ -29,7 +29,7 @@ # Set max number of processes, either set by user or extracted from MPI if args.preMPI: - maxnP = args.preMPI + maxnP = args.maxnP else: if MPI: maxnP = MPI.COMM_WORLD.Get_size() diff --git a/weis/glue_code/mpi_tools.py b/weis/glue_code/mpi_tools.py index 948f40918..8a1df2e96 100644 --- a/weis/glue_code/mpi_tools.py +++ b/weis/glue_code/mpi_tools.py @@ -72,7 +72,7 @@ def compute_optimal_nP(nFD, nOF, modeling_options, opt_options, maxnP=1): if maxnP == 1: print("\nIf you do not have access to %d processors"%nP) print("please provide your maximum available number of processors by typing:") - print("python weis_driver.py --maxnP=xx") + print("python weis_driver.py --preMPI=True --maxnP=xx") print("And substitute xx with your number of processors\n") modeling_options['General']['openfast_configuration']['nP'] = nP From ee0f6bce0af2843b8c19f61be36d437517fa60a8 Mon Sep 17 00:00:00 2001 From: ptrbortolotti Date: Fri, 27 Dec 2024 11:43:43 -0300 Subject: [PATCH 14/28] add tests, switch to mpiexec --- .github/workflows/run_exhaustive_examples.yml | 14 ++- docs/run_in_parallel.rst | 14 +-- examples/05_IEA-3.4-130-RWT/weis_driver.py | 116 +++++++++--------- weis/glue_code/mpi_tools.py | 2 +- 4 files changed, 81 insertions(+), 65 deletions(-) diff --git a/.github/workflows/run_exhaustive_examples.yml b/.github/workflows/run_exhaustive_examples.yml index 528c414df..30e766d2a 100644 --- a/.github/workflows/run_exhaustive_examples.yml +++ b/.github/workflows/run_exhaustive_examples.yml @@ -76,7 +76,19 @@ jobs: run: | cd weis/test python run_examples.py - + + # Run parallel script calling OpenFAST + - name: Run parallel OpenFAST + run: | + cd examples/02_run_openfast_cases + mpiexec -np 2 python weis_driver_loads.py + + # Run parallel script calling a simple optimization + - name: Run parallel OpenFAST + run: | + cd examples/03_NREL5MW_OC3_spar + mpiexec -np 2 python weis_driver.py + # Run scripts within rotor_opt folder with MPI - name: Run parallel examples rotor optimization run: | diff --git a/docs/run_in_parallel.rst b/docs/run_in_parallel.rst index d8c3155af..947d12a9c 100644 --- a/docs/run_in_parallel.rst +++ b/docs/run_in_parallel.rst @@ -7,14 +7,14 @@ WEIS can be run sequentially on a single processor. WEIS can also be parallelize Background ------------------------------------ -The parallelization of WEIS leverages the [MPI library](https://mpi4py.readthedocs.io/en/stable/). Before proceeding, please make sure that your WEIS environemnt includes the library mpi4py as discussed in the README.md at the root level of this repo, or on the `WEIS GitHub page `_. +The parallelization of WEIS leverages the [MPI library](https://mpi4py.readthedocs.io/en/stable/). Before proceeding, please make sure that your WEIS environment includes the library mpi4py as discussed in the README.md at the root level of this repo, or on the `WEIS GitHub page `_. Parallelization in WEIS happens at two levels: * The first level is triggered when an optimization is run and a gradient-based optimizer is called. In this case, the OpenMDAO library will execute the finite differences in parallel. OpenMDAO then assembles all partial derivatives in a single Jacobian and iterations progress. * The second level is triggered when multiple OpenFAST runs are specified. These are executed in parallel. The two levels of parallelization are integrated and can co-exist as long as sufficient computational resources are available. -WEIS helps you set up the right call to MPI given the available computational resources, see the section below. +WEIS helps you set up the right call to MPI given the available computational resources, see the sections below. How to run WEIS in parallel @@ -46,7 +46,7 @@ At this point, you are ready to launch WEIS in parallel. If you have 20 processo .. code-block:: bash - mpirun -np 20 python weis_driver.py + mpiexec -np 20 python weis_driver.py Parallelize calls to OpenFAST @@ -74,20 +74,20 @@ The terminal should return this message To run the code in parallel with MPI, execute one of the following commands If you have access to (at least) 8 processors, please call WEIS as: - mpirun -np 8 python weis_driver.py + mpiexec -np 8 python weis_driver.py If you do not have access to 8 processors please provide your maximum available number of processors by typing: - python weis_driver.py --maxnP=xx + python weis_driver.py --preMPI=True --maxnP=xx And substitute xx with your number of processors If you have access to 8 processors, you are now ready to execute your script by typing .. code-block:: bash - mpirun -np 8 python weis_driver_loads.py + mpiexec -np 8 python weis_driver_loads.py If you have access to fewer processors, say 4, adjust the -np entry accordingly .. code-block:: bash - mpirun -np 4 python weis_driver_loads.py \ No newline at end of file + mpiexec -np 4 python weis_driver_loads.py \ No newline at end of file diff --git a/examples/05_IEA-3.4-130-RWT/weis_driver.py b/examples/05_IEA-3.4-130-RWT/weis_driver.py index 078e6f0f8..948550e1f 100644 --- a/examples/05_IEA-3.4-130-RWT/weis_driver.py +++ b/examples/05_IEA-3.4-130-RWT/weis_driver.py @@ -1,67 +1,71 @@ +import os +import time +import sys -from weis.glue_code.runWEIS import run_weis +from weis.glue_code.runWEIS import run_weis from openmdao.utils.mpi import MPI -import os, time, sys ## File management -run_dir = os.path.dirname( os.path.realpath(__file__) ) + os.sep -fname_wt_input = run_dir + os.sep + "IEA-3p4-130-RWT.yaml" -fname_modeling_options = run_dir + "modeling_options.yaml" -fname_analysis_options = run_dir + "analysis_options.yaml" +run_dir = os.path.dirname( os.path.realpath(__file__) ) +fname_wt_input = os.path.join(run_dir, 'IEA-3p4-130-RWT.yaml') +fname_modeling_options = os.path.join(run_dir, 'modeling_options.yaml') +fname_analysis_options = os.path.join(run_dir, 'analysis_options.yaml') -plot_flag = False +import argparse +# Set up argument parser +parser = argparse.ArgumentParser(description="Run WEIS driver with flag prepping for MPI run.") +# Add the flag +parser.add_argument("--preMPI", type=bool, default=False, help="Flag for preprocessing MPI settings (True or False).") +parser.add_argument("--maxnP", type=int, default=0, help="Maximum number of processors available.") +# Parse the arguments +args = parser.parse_args() +# Use the flag in your script +if args.preMPI: + print("Preprocessor flag is set to True. Running preprocessing setting up MPI run.") +else: + print("Preprocessor flag is set to False. Run WEIS now.") tt = time.time() -wt_opt, modeling_options, opt_options = run_weis(fname_wt_input, fname_modeling_options, fname_analysis_options) -rank = MPI.COMM_WORLD.Get_rank() if MPI else 0 +# Set max number of processes, either set by user or extracted from MPI +if args.preMPI: + maxnP = args.maxnP +else: + if MPI: + maxnP = MPI.COMM_WORLD.Get_size() + else: + maxnP = 1 -if rank == 0 and plot_flag: - print('Run time: %f'%(time.time()-tt)) - sys.stdout.flush() - - import matplotlib.pyplot as plt - import numpy as np - from matplotlib import cm - - X = wt_opt['sse_tune.aeroperf_tables.pitch_vector'] - Y = wt_opt['sse_tune.aeroperf_tables.tsr_vector'] - X, Y = np.meshgrid(X, Y) - pitch_schedule = wt_opt['rotorse.rp.powercurve.pitch'] - tsr_schedule = wt_opt['rotorse.rp.powercurve.Omega'] / 30. * np.pi * wt_opt['rotorse.Rtip'] / wt_opt['rotorse.rp.powercurve.V'] - - # Plot the Cp surface - fig, ax = plt.subplots() - Z = wt_opt['sse_tune.aeroperf_tables.Cp'] - cs = ax.contourf(X, Y, Z[:,:,0], cmap=cm.inferno, levels = [0, 0.1, 0.2, 0.3, 0.4, 0.44, 0.47, 0.50, 0.53, 0.56]) - ax.plot(pitch_schedule, tsr_schedule, 'w--', label = 'Regulation trajectory') - ax.set_xlabel('Pitch angle (deg)', fontweight = 'bold') - ax.set_ylabel('Tip speed ratio (-)', fontweight = 'bold') - cbar = fig.colorbar(cs) - cbar.ax.set_ylabel('Aerodynamic power coefficient (-)', fontweight = 'bold') - plt.legend() +if args.preMPI: + _, _, _ = run_weis(fname_wt_input, + fname_modeling_options, + fname_analysis_options, + prepMPI=True, + maxnP = maxnP) +else: + if MPI: + _, modeling_options, _ = run_weis(fname_wt_input, + fname_modeling_options, + fname_analysis_options, + prepMPI=True, + maxnP = maxnP) - # Plot the Ct surface - fig, ax = plt.subplots() - Z = wt_opt['sse_tune.aeroperf_tables.Ct'] - cs = ax.contourf(X, Y, Z[:,:,0], cmap=cm.inferno) - ax.plot(pitch_schedule, tsr_schedule, 'w--', label = 'Regulation trajectory') - ax.set_xlabel('Pitch angle (deg)', fontweight = 'bold') - ax.set_ylabel('Tip speed ratio (-)', fontweight = 'bold') - cbar = fig.colorbar(cs) - cbar.ax.set_ylabel('Aerodynamic thrust coefficient (-)', fontweight = 'bold') - plt.legend() - - # Plot the Cq surface - fig, ax = plt.subplots() - Z = wt_opt['sse_tune.aeroperf_tables.Cq'] - cs = ax.contourf(X, Y, Z[:,:,0], cmap=cm.inferno) - ax.plot(pitch_schedule, tsr_schedule, 'w--', label = 'Regulation trajectory') - ax.set_xlabel('Pitch angle (deg)', fontweight = 'bold') - ax.set_ylabel('Tip speed ratio (-)', fontweight = 'bold') - cbar = fig.colorbar(cs) - cbar.ax.set_ylabel('Aerodynamic torque coefficient (-)', fontweight = 'bold') - plt.legend() - - plt.show() + modeling_override = {} + modeling_override['General'] = {} + modeling_override['General']['openfast_configuration'] = {} + modeling_override['General']['openfast_configuration']['nFD'] = modeling_options['General']['openfast_configuration']['nFD'] + modeling_override['General']['openfast_configuration']['nOFp'] = modeling_options['General']['openfast_configuration']['nOFp'] + else: + modeling_override = None + wt_opt, modeling_options, opt_options = run_weis(fname_wt_input, + fname_modeling_options, + fname_analysis_options, + modeling_override=modeling_override) +if MPI: + rank = MPI.COMM_WORLD.Get_rank() +else: + rank = 0 +if rank == 0 and args.preMPI == False: + print("Run time: %f"%(time.time()-tt)) + sys.stdout.flush() diff --git a/weis/glue_code/mpi_tools.py b/weis/glue_code/mpi_tools.py index 8a1df2e96..95903123b 100644 --- a/weis/glue_code/mpi_tools.py +++ b/weis/glue_code/mpi_tools.py @@ -67,7 +67,7 @@ def compute_optimal_nP(nFD, nOF, modeling_options, opt_options, maxnP=1): nP = nFD + nFD * nOFp print("If you have access to (at least) %d processors, please call WEIS as:"%nP) - print("mpirun -np %d python weis_driver.py\n"%nP) + print("mpiexec -np %d python weis_driver.py\n"%nP) if maxnP == 1: print("\nIf you do not have access to %d processors"%nP) From 36d1653949be1040608a9254ce0d4620367f0934 Mon Sep 17 00:00:00 2001 From: ptrbortolotti Date: Fri, 27 Dec 2024 11:53:01 -0300 Subject: [PATCH 15/28] remove sbatch kestrel (can't mantain...) and shorten OF sims --- .../03_NREL5MW_OC3_spar/modeling_options.yaml | 2 +- examples/03_NREL5MW_OC3_spar/sbatch_hpc.sh | 19 ------------------- 2 files changed, 1 insertion(+), 20 deletions(-) delete mode 100644 examples/03_NREL5MW_OC3_spar/sbatch_hpc.sh diff --git a/examples/03_NREL5MW_OC3_spar/modeling_options.yaml b/examples/03_NREL5MW_OC3_spar/modeling_options.yaml index 0aba5d9c4..729e31ca6 100644 --- a/examples/03_NREL5MW_OC3_spar/modeling_options.yaml +++ b/examples/03_NREL5MW_OC3_spar/modeling_options.yaml @@ -101,7 +101,7 @@ DLC_driver: wave_height: [7.] wave_period: [1.] n_seeds: 1 - analysis_time: 20. + analysis_time: 10. transient_time: 0. turbulent_wind: HubHt: 90.0 diff --git a/examples/03_NREL5MW_OC3_spar/sbatch_hpc.sh b/examples/03_NREL5MW_OC3_spar/sbatch_hpc.sh deleted file mode 100644 index c9880443e..000000000 --- a/examples/03_NREL5MW_OC3_spar/sbatch_hpc.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/bash -#SBATCH --account=allocation_name -#SBATCH --time=01:00:00 -#SBATCH --job-name=Design1 -#SBATCH --nodes=1 -#SBATCH --ntasks-per-node=104 -#SBATCH --mail-user user@nrel.gov -#SBATCH --mail-type BEGIN,END,FAIL -#SBATCH --output=Case1.%j.out -####SBATCH --qos=high -####SBATCH --partition=debug - -module purge -module load comp-intel intel-mpi mkl -module unload gcc - -source activate weis-env - -mpirun -np 12 python weis_driver.py From 9ad56b54d6026686c4b002b4e8e90f9cd52b9d35 Mon Sep 17 00:00:00 2001 From: ptrbortolotti Date: Fri, 27 Dec 2024 11:59:23 -0300 Subject: [PATCH 16/28] remove outdated py file --- .../weis_driver_model_only.py | 73 ------------------- 1 file changed, 73 deletions(-) delete mode 100644 examples/05_IEA-3.4-130-RWT/weis_driver_model_only.py diff --git a/examples/05_IEA-3.4-130-RWT/weis_driver_model_only.py b/examples/05_IEA-3.4-130-RWT/weis_driver_model_only.py deleted file mode 100644 index ff207b286..000000000 --- a/examples/05_IEA-3.4-130-RWT/weis_driver_model_only.py +++ /dev/null @@ -1,73 +0,0 @@ - -from weis.glue_code.runWEIS import run_weis -from openmdao.utils.mpi import MPI -import os, time, sys - -## File management -run_dir = os.path.dirname( os.path.realpath(__file__) ) + os.sep -fname_wt_input = run_dir + os.sep + "IEA-3p4-130-RWT.yaml" -fname_modeling_options = run_dir + "modeling_options.yaml" -fname_analysis_options = run_dir + "analysis_options.yaml" - -plot_flag = False - -# Test model_only flag here -modeling_override = {} -modeling_override['General'] = {} -modeling_override['General']['openfast_configuration'] = {} -modeling_override['General']['openfast_configuration']['model_only'] = True - -tt = time.time() -wt_opt, modeling_options, opt_options = run_weis(fname_wt_input, fname_modeling_options, fname_analysis_options, modeling_override=modeling_override) - -rank = MPI.COMM_WORLD.Get_rank() if MPI else 0 - -if rank == 0 and plot_flag: - print('Run time: %f'%(time.time()-tt)) - sys.stdout.flush() - - import matplotlib.pyplot as plt - import numpy as np - from matplotlib import cm - - X = wt_opt['sse_tune.aeroperf_tables.pitch_vector'] - Y = wt_opt['sse_tune.aeroperf_tables.tsr_vector'] - X, Y = np.meshgrid(X, Y) - pitch_schedule = wt_opt['rotorse.rp.powercurve.pitch'] - tsr_schedule = wt_opt['rotorse.rp.powercurve.Omega'] / 30. * np.pi * wt_opt['rotorse.Rtip'] / wt_opt['rotorse.rp.powercurve.V'] - - # Plot the Cp surface - fig, ax = plt.subplots() - Z = wt_opt['sse_tune.aeroperf_tables.Cp'] - cs = ax.contourf(X, Y, Z[:,:,0], cmap=cm.inferno, levels = [0, 0.1, 0.2, 0.3, 0.4, 0.44, 0.47, 0.50, 0.53, 0.56]) - ax.plot(pitch_schedule, tsr_schedule, 'w--', label = 'Regulation trajectory') - ax.set_xlabel('Pitch angle (deg)', fontweight = 'bold') - ax.set_ylabel('Tip speed ratio (-)', fontweight = 'bold') - cbar = fig.colorbar(cs) - cbar.ax.set_ylabel('Aerodynamic power coefficient (-)', fontweight = 'bold') - plt.legend() - - # Plot the Ct surface - fig, ax = plt.subplots() - Z = wt_opt['sse_tune.aeroperf_tables.Ct'] - cs = ax.contourf(X, Y, Z[:,:,0], cmap=cm.inferno) - ax.plot(pitch_schedule, tsr_schedule, 'w--', label = 'Regulation trajectory') - ax.set_xlabel('Pitch angle (deg)', fontweight = 'bold') - ax.set_ylabel('Tip speed ratio (-)', fontweight = 'bold') - cbar = fig.colorbar(cs) - cbar.ax.set_ylabel('Aerodynamic thrust coefficient (-)', fontweight = 'bold') - plt.legend() - - # Plot the Cq surface - fig, ax = plt.subplots() - Z = wt_opt['sse_tune.aeroperf_tables.Cq'] - cs = ax.contourf(X, Y, Z[:,:,0], cmap=cm.inferno) - ax.plot(pitch_schedule, tsr_schedule, 'w--', label = 'Regulation trajectory') - ax.set_xlabel('Pitch angle (deg)', fontweight = 'bold') - ax.set_ylabel('Tip speed ratio (-)', fontweight = 'bold') - cbar = fig.colorbar(cs) - cbar.ax.set_ylabel('Aerodynamic torque coefficient (-)', fontweight = 'bold') - plt.legend() - - plt.show() - From e9aa7914312394f0c9b5d36160fd15d30daad970 Mon Sep 17 00:00:00 2001 From: ptrbortolotti Date: Fri, 27 Dec 2024 11:59:32 -0300 Subject: [PATCH 17/28] suppress print statements when not needed --- weis/glue_code/mpi_tools.py | 29 ++++++++++++++++++----------- 1 file changed, 18 insertions(+), 11 deletions(-) diff --git a/weis/glue_code/mpi_tools.py b/weis/glue_code/mpi_tools.py index 95903123b..3df578c9a 100644 --- a/weis/glue_code/mpi_tools.py +++ b/weis/glue_code/mpi_tools.py @@ -8,30 +8,35 @@ def compute_optimal_nP(nFD, nOF, modeling_options, opt_options, maxnP=1): fd_methods = ['SLSQP','SNOPT', 'LD_MMA'] evolutionary_methods = ['DE', 'NSGA2'] - - print("\nYour problem has %d design variable(s) and %d OpenFAST run(s)\n"%(nFD, nOF)) + + if not MPI: + print("\nYour problem has %d design variable(s) and %d OpenFAST run(s)\n"%(nFD, nOF)) if modeling_options['Level3']['flag']: # If we are running an optimization method that doesn't use finite differencing, set the number of DVs to 1 if not (opt_options['driver']['design_of_experiments']['flag']) and (opt_options['driver']['optimization']['solver'] in evolutionary_methods): - print("You are running an optimization based on an evolutionary solver. The number of parallel finite differencing is now multiplied by 5\n") + if not MPI: + print("You are running an optimization based on an evolutionary solver. The number of parallel finite differencing is now multiplied by 5\n") nFD *= 5 # targeting 10*n_DV population size... this is what the equivalent FD coloring would take elif not (opt_options['driver']['design_of_experiments']['flag'] or opt_options['driver']['optimization']['solver'] in fd_methods): - print("You are not running a design optimization, a design of experiment, or your optimizer is not gradient based. The number of parallel function evaluations is set to 1\n") + if not MPI: + print("You are not running a design optimization, a design of experiment, or your optimizer is not gradient based. The number of parallel function evaluations is set to 1\n") nFD = 1 elif modeling_options['Level2']['flag']: if not (opt_options['driver']['design_of_experiments']['flag'] or opt_options['driver']['optimization']['solver'] in fd_methods): - print("You are not running a design optimization, a design of experiment, or your optimizer is not gradient based. The number of parallel function evaluations is set to 1\n") + if not MPI: + print("You are not running a design optimization, a design of experiment, or your optimizer is not gradient based. The number of parallel function evaluations is set to 1\n") nFD = 1 # # if we're doing a GA or such, "FD" means "entities in epoch" # if opt_options['driver']['optimization']['solver'] in evolutionary_methods: # nFD = maxnP - - print("To run the code in parallel with MPI, execute one of the following commands\n") + if not MPI: + print("To run the code in parallel with MPI, execute one of the following commands\n") if maxnP != 1: - print("You have access to %d processors. Please call WEIS as:"%maxnP) + if not MPI: + print("You have access to %d processors. Please call WEIS as:"%maxnP) # Define the color map for the parallelization, determining the maximum number of parallel finite difference (FD) # evaluations based on the number of design variables (DV). OpenFAST on/off changes things. if modeling_options['Level3']['flag']: @@ -65,11 +70,13 @@ def compute_optimal_nP(nFD, nOF, modeling_options, opt_options, maxnP=1): else: nOFp = nOF nP = nFD + nFD * nOFp - print("If you have access to (at least) %d processors, please call WEIS as:"%nP) + if not MPI: + print("If you have access to (at least) %d processors, please call WEIS as:"%nP) - print("mpiexec -np %d python weis_driver.py\n"%nP) + if not MPI: + print("mpiexec -np %d python weis_driver.py\n"%nP) - if maxnP == 1: + if maxnP == 1 and not MPI: print("\nIf you do not have access to %d processors"%nP) print("please provide your maximum available number of processors by typing:") print("python weis_driver.py --preMPI=True --maxnP=xx") From a5a71cfb174fb97695f92a7ddf769d2c48fa1ca9 Mon Sep 17 00:00:00 2001 From: ptrbortolotti Date: Fri, 27 Dec 2024 18:06:38 -0300 Subject: [PATCH 18/28] adjust if condition --- weis/glue_code/runWEIS.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/weis/glue_code/runWEIS.py b/weis/glue_code/runWEIS.py index a2ea587e3..92fd15ae1 100644 --- a/weis/glue_code/runWEIS.py +++ b/weis/glue_code/runWEIS.py @@ -122,8 +122,9 @@ def run_weis(fname_wt_input, fname_modeling_options, fname_opt_options, # Estimate number of design variables and parallel calls to OpenFASRT given # the computational resources available. This is used to setup WEIS for an MPI run - nFD = len(wt_opt.model.list_outputs(is_design_var=True, out_stream=None)) - modeling_options = compute_optimal_nP(nFD, myopt.n_OF_runs, modeling_options, opt_options, maxnP = maxnP) + if prepMPI: + nFD = max([1,len(wt_opt.model.list_outputs(is_design_var=True, out_stream=None))]) + modeling_options = compute_optimal_nP(nFD, myopt.n_OF_runs, modeling_options, opt_options, maxnP = maxnP) # If WEIS is called simply to prep for an MPI call, no need to proceed and simply # return the number of finite differences and OpenFAST calls, and stop From fcae5826d791d05545293770e09e6543e3b9b80c Mon Sep 17 00:00:00 2001 From: ptrbortolotti Date: Fri, 27 Dec 2024 19:26:17 -0300 Subject: [PATCH 19/28] lock openfast wisdem and rosco --- environment.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/environment.yml b/environment.yml index 373f1bd01..77767da95 100644 --- a/environment.yml +++ b/environment.yml @@ -9,16 +9,16 @@ dependencies: - mat4py - nlopt - numpydoc - - openfast>=3.5.3 + - openfast=3.5.5 - openraft>=1.2.4 - osqp - pcrunch - pip - pyhams>=1.3 #- pyoptsparse - - rosco>=2.9.4 + - rosco=2.9.5 - smt - - wisdem>=3.16.4 + - wisdem=3.18.1 - pip: - dash-bootstrap-components - dash-mantine-components From 005cd35bdc8c6c8523cb1d7525b3a0be7133f528 Mon Sep 17 00:00:00 2001 From: ptrbortolotti Date: Sun, 29 Dec 2024 14:29:40 -0300 Subject: [PATCH 20/28] adjust list of examples run during testing --- weis/test/run_examples.py | 2 +- weis/test/test_examples_skinny.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/weis/test/run_examples.py b/weis/test/run_examples.py index f04d1ed34..bb8d87b78 100644 --- a/weis/test/run_examples.py +++ b/weis/test/run_examples.py @@ -10,7 +10,7 @@ # "03_NREL5MW_OC3_spar/weis_freq_driver", # executed in examples_skinny # "04_NREL5MW_OC4_semi/weis_driver", # skipping until we resolve multiple variable ballasts "04_NREL5MW_OC4_semi/weis_freq_driver", - "05_IEA-3.4-130-RWT/weis_driver", # also executed via mpi in the gitthub workflow + # "05_IEA-3.4-130-RWT/weis_driver", # also executed via mpi in the gitthub workflow #"06_IEA-15-240-RWT/weis_driver", # executed in the test_IEA15.py # "07_te_flaps/dac_driver", "08_OLAF/weis_driver", diff --git a/weis/test/test_examples_skinny.py b/weis/test/test_examples_skinny.py index ae74a7d77..b41513c1e 100644 --- a/weis/test/test_examples_skinny.py +++ b/weis/test/test_examples_skinny.py @@ -6,7 +6,7 @@ "02_run_openfast_cases/weis_driver_sm", #Not as fast as weis_driver, but not too bad (120 sec. locally) #"03_NREL5MW_OC3_spar/weis_driver", "03_NREL5MW_OC3_spar/weis_freq_driver", - "05_IEA-3.4-130-RWT/weis_driver_model_only", + "05_IEA-3.4-130-RWT/weis_driver", "06_IEA-15-240-RWT/weis_driver_monopile", "06_IEA-15-240-RWT/weis_driver_TMDs", "09_design_of_experiments/DOE_openfast", From 58b697cbbacd074570b8c4979a714d3c933f8d62 Mon Sep 17 00:00:00 2001 From: ptrbortolotti Date: Sun, 29 Dec 2024 22:50:50 -0300 Subject: [PATCH 21/28] try again --- examples/02_run_openfast_cases/weis_driver_loads.py | 4 ++-- examples/03_NREL5MW_OC3_spar/weis_driver.py | 4 ++-- examples/05_IEA-3.4-130-RWT/weis_driver.py | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/examples/02_run_openfast_cases/weis_driver_loads.py b/examples/02_run_openfast_cases/weis_driver_loads.py index f581950b3..69ea39484 100644 --- a/examples/02_run_openfast_cases/weis_driver_loads.py +++ b/examples/02_run_openfast_cases/weis_driver_loads.py @@ -16,9 +16,9 @@ parser = argparse.ArgumentParser(description="Run WEIS driver with flag prepping for MPI run.") # Add the flag parser.add_argument("--preMPI", type=bool, default=False, help="Flag for preprocessing MPI settings (True or False).") -parser.add_argument("--maxnP", type=int, default=0, help="Maximum number of processors available.") +parser.add_argument("--maxnP", type=int, default=1, help="Maximum number of processors available.") # Parse the arguments -args = parser.parse_args() +args, _ = parser.parse_known_args() # Use the flag in your script if args.preMPI: print("Preprocessor flag is set to True. Running preprocessing setting up MPI run.") diff --git a/examples/03_NREL5MW_OC3_spar/weis_driver.py b/examples/03_NREL5MW_OC3_spar/weis_driver.py index c7d0b2812..e626dbb45 100644 --- a/examples/03_NREL5MW_OC3_spar/weis_driver.py +++ b/examples/03_NREL5MW_OC3_spar/weis_driver.py @@ -16,9 +16,9 @@ parser = argparse.ArgumentParser(description="Run WEIS driver with flag prepping for MPI run.") # Add the flag parser.add_argument("--preMPI", type=bool, default=False, help="Flag for preprocessing MPI settings (True or False).") -parser.add_argument("--maxnP", type=int, default=0, help="Maximum number of processors available.") +parser.add_argument("--maxnP", type=int, default=1, help="Maximum number of processors available.") # Parse the arguments -args = parser.parse_args() +args, _ = parser.parse_known_args() # Use the flag in your script if args.preMPI: print("Preprocessor flag is set to True. Running preprocessing setting up MPI run.") diff --git a/examples/05_IEA-3.4-130-RWT/weis_driver.py b/examples/05_IEA-3.4-130-RWT/weis_driver.py index 948550e1f..e63be3a3d 100644 --- a/examples/05_IEA-3.4-130-RWT/weis_driver.py +++ b/examples/05_IEA-3.4-130-RWT/weis_driver.py @@ -16,9 +16,9 @@ parser = argparse.ArgumentParser(description="Run WEIS driver with flag prepping for MPI run.") # Add the flag parser.add_argument("--preMPI", type=bool, default=False, help="Flag for preprocessing MPI settings (True or False).") -parser.add_argument("--maxnP", type=int, default=0, help="Maximum number of processors available.") +parser.add_argument("--maxnP", type=int, default=1, help="Maximum number of processors available.") # Parse the arguments -args = parser.parse_args() +args, _ = parser.parse_known_args() # Use the flag in your script if args.preMPI: print("Preprocessor flag is set to True. Running preprocessing setting up MPI run.") From 5a9bc96dbd408ea696b403c0c8c79d9c0345b1ad Mon Sep 17 00:00:00 2001 From: dzalkind Date: Mon, 30 Dec 2024 11:33:28 -0700 Subject: [PATCH 22/28] Tidy up weis_driver_loads --- .../weis_driver_loads.py | 41 +++++-------------- weis/glue_code/weis_args.py | 38 +++++++++++++++++ 2 files changed, 49 insertions(+), 30 deletions(-) create mode 100644 weis/glue_code/weis_args.py diff --git a/examples/02_run_openfast_cases/weis_driver_loads.py b/examples/02_run_openfast_cases/weis_driver_loads.py index 69ea39484..71fb3bb36 100644 --- a/examples/02_run_openfast_cases/weis_driver_loads.py +++ b/examples/02_run_openfast_cases/weis_driver_loads.py @@ -3,60 +3,41 @@ import sys from weis.glue_code.runWEIS import run_weis +from weis.glue_code.weis_args import weis_args, get_max_procs, set_modopt_procs from openmdao.utils.mpi import MPI +# Parse args +args = weis_args() + ## File management run_dir = os.path.dirname( os.path.realpath(__file__) ) fname_wt_input = os.path.join(run_dir, 'IEA-15-240-RWT.yaml') fname_modeling_options = os.path.join(run_dir, 'modeling_options_loads.yaml') fname_analysis_options = os.path.join(run_dir, 'analysis_options_loads.yaml') -import argparse -# Set up argument parser -parser = argparse.ArgumentParser(description="Run WEIS driver with flag prepping for MPI run.") -# Add the flag -parser.add_argument("--preMPI", type=bool, default=False, help="Flag for preprocessing MPI settings (True or False).") -parser.add_argument("--maxnP", type=int, default=1, help="Maximum number of processors available.") -# Parse the arguments -args, _ = parser.parse_known_args() -# Use the flag in your script -if args.preMPI: - print("Preprocessor flag is set to True. Running preprocessing setting up MPI run.") -else: - print("Preprocessor flag is set to False. Run WEIS now.") - tt = time.time() - -# Set max number of processes, either set by user or extracted from MPI -if args.preMPI: - maxnP = args.maxnP -else: - if MPI: - maxnP = MPI.COMM_WORLD.Get_size() - else: - maxnP = 1 +maxnP = get_max_procs(args) if args.preMPI: + # Do a dry-run to compute number of procs _, _, _ = run_weis(fname_wt_input, fname_modeling_options, fname_analysis_options, prepMPI=True, maxnP = maxnP) else: + modeling_override = None if MPI: + # Pre-compute number of cores needed in this run _, modeling_options, _ = run_weis(fname_wt_input, fname_modeling_options, fname_analysis_options, prepMPI=True, maxnP = maxnP) - modeling_override = {} - modeling_override['General'] = {} - modeling_override['General']['openfast_configuration'] = {} - modeling_override['General']['openfast_configuration']['nFD'] = modeling_options['General']['openfast_configuration']['nFD'] - modeling_override['General']['openfast_configuration']['nOFp'] = modeling_options['General']['openfast_configuration']['nOFp'] - else: - modeling_override = None + modeling_override = set_modopt_procs(modeling_options) + + # Run WEIS for real now wt_opt, modeling_options, opt_options = run_weis(fname_wt_input, fname_modeling_options, fname_analysis_options, diff --git a/weis/glue_code/weis_args.py b/weis/glue_code/weis_args.py new file mode 100644 index 000000000..0db86d272 --- /dev/null +++ b/weis/glue_code/weis_args.py @@ -0,0 +1,38 @@ +import argparse +from openmdao.utils.mpi import MPI + +def weis_args(): + + # Set up argument parser + parser = argparse.ArgumentParser(description="Run WEIS driver with flag prepping for MPI run.") + # Add the flag + parser.add_argument("--preMPI", type=bool, default=False, help="Flag for preprocessing MPI settings (True or False).") + parser.add_argument("--maxnP", type=int, default=1, help="Maximum number of processors available.") + # Parse the arguments + args, _ = parser.parse_known_args() + # Use the flag in your script + if args.preMPI: + print("Preprocessor flag is set to True. Running preprocessing setting up MPI run.") + else: + print("Preprocessor flag is set to False. Run WEIS now.") + + return args + +def get_max_procs(args): + # Set max number of processes, either set by user or extracted from MPI + if args.preMPI: + maxnP = args.maxnP + else: + if MPI: + maxnP = MPI.COMM_WORLD.Get_size() + else: + maxnP = 1 + return maxnP + +def set_modopt_procs(modeling_options): + modeling_override = {} + modeling_override['General'] = {} + modeling_override['General']['openfast_configuration'] = {} + modeling_override['General']['openfast_configuration']['nFD'] = modeling_options['General']['openfast_configuration']['nFD'] + modeling_override['General']['openfast_configuration']['nOFp'] = modeling_options['General']['openfast_configuration']['nOFp'] + return modeling_override \ No newline at end of file From 08ebbf34d20c89f1840d70f72115c37818b92825 Mon Sep 17 00:00:00 2001 From: dzalkind Date: Mon, 30 Dec 2024 11:53:46 -0700 Subject: [PATCH 23/28] Print information about modeling options to user --- weis/aeroelasticse/FileTools.py | 7 +++++++ weis/glue_code/mpi_tools.py | 17 ++++++++++++++--- weis/glue_code/weis_args.py | 1 + 3 files changed, 22 insertions(+), 3 deletions(-) diff --git a/weis/aeroelasticse/FileTools.py b/weis/aeroelasticse/FileTools.py index 9477400c4..9d691901a 100644 --- a/weis/aeroelasticse/FileTools.py +++ b/weis/aeroelasticse/FileTools.py @@ -3,6 +3,7 @@ import operator import numpy as np import yaml +import sys from functools import reduce try: import ruamel_yaml as ry @@ -136,6 +137,12 @@ def save_yaml(outdir, fname, data_out): yaml.dump(data_out, f) f.close() +def print_yaml(data_struct): + data_struct = remove_numpy(data_struct) + yaml=ry.YAML() + yaml.indent(mapping=4, sequence=6, offset=3) + yaml.dump(data_struct,sys.stdout) + def select_cases(cases, var_sel, val_sel): # Find a variable value from the AeroelasticSE case_matrix diff --git a/weis/glue_code/mpi_tools.py b/weis/glue_code/mpi_tools.py index 3df578c9a..603fc2355 100644 --- a/weis/glue_code/mpi_tools.py +++ b/weis/glue_code/mpi_tools.py @@ -2,6 +2,8 @@ import sys import numpy as np from openmdao.utils.mpi import MPI +from weis.aeroelasticse.FileTools import print_yaml + def compute_optimal_nP(nFD, nOF, modeling_options, opt_options, maxnP=1): @@ -82,9 +84,18 @@ def compute_optimal_nP(nFD, nOF, modeling_options, opt_options, maxnP=1): print("python weis_driver.py --preMPI=True --maxnP=xx") print("And substitute xx with your number of processors\n") - modeling_options['General']['openfast_configuration']['nP'] = nP - modeling_options['General']['openfast_configuration']['nFD'] = nFD - modeling_options['General']['openfast_configuration']['nOFp'] = nOFp + modeling_updates = {} + modeling_updates['General'] = {} + modeling_updates['General']['openfast_configuration'] = {} + modeling_updates['General']['openfast_configuration']['nP'] = nP + modeling_updates['General']['openfast_configuration']['nFD'] = nFD + modeling_updates['General']['openfast_configuration']['nOFp'] = nOFp + + print('The following changes should be made to the modeling options:') + print_yaml(modeling_updates) + + # Apply updates + modeling_options['General']['openfast_configuration'].update(modeling_updates['General']['openfast_configuration']) return modeling_options diff --git a/weis/glue_code/weis_args.py b/weis/glue_code/weis_args.py index 0db86d272..cc1dbdeb9 100644 --- a/weis/glue_code/weis_args.py +++ b/weis/glue_code/weis_args.py @@ -30,6 +30,7 @@ def get_max_procs(args): return maxnP def set_modopt_procs(modeling_options): + print('Applying the modeling option updates as overrides.') modeling_override = {} modeling_override['General'] = {} modeling_override['General']['openfast_configuration'] = {} From b903ba1ea1ae98d4ffa0544fc3075df88cc09423 Mon Sep 17 00:00:00 2001 From: dzalkind Date: Mon, 30 Dec 2024 12:58:03 -0700 Subject: [PATCH 24/28] Simplify weis_driver_loads more --- .../weis_driver_loads.py | 41 ++++++++----------- 1 file changed, 17 insertions(+), 24 deletions(-) diff --git a/examples/02_run_openfast_cases/weis_driver_loads.py b/examples/02_run_openfast_cases/weis_driver_loads.py index 71fb3bb36..41aaa976e 100644 --- a/examples/02_run_openfast_cases/weis_driver_loads.py +++ b/examples/02_run_openfast_cases/weis_driver_loads.py @@ -18,30 +18,23 @@ tt = time.time() maxnP = get_max_procs(args) -if args.preMPI: - # Do a dry-run to compute number of procs - _, _, _ = run_weis(fname_wt_input, - fname_modeling_options, - fname_analysis_options, - prepMPI=True, - maxnP = maxnP) -else: - modeling_override = None - if MPI: - # Pre-compute number of cores needed in this run - _, modeling_options, _ = run_weis(fname_wt_input, - fname_modeling_options, - fname_analysis_options, - prepMPI=True, - maxnP = maxnP) - - modeling_override = set_modopt_procs(modeling_options) - - # Run WEIS for real now - wt_opt, modeling_options, opt_options = run_weis(fname_wt_input, - fname_modeling_options, - fname_analysis_options, - modeling_override=modeling_override) +modeling_override = None +if MPI: + # Pre-compute number of cores needed in this run + _, modeling_options, _ = run_weis(fname_wt_input, + fname_modeling_options, + fname_analysis_options, + prepMPI=True, + maxnP = maxnP) + + modeling_override = set_modopt_procs(modeling_options) + +# Run WEIS for real now +wt_opt, modeling_options, opt_options = run_weis(fname_wt_input, + fname_modeling_options, + fname_analysis_options, + modeling_override=modeling_override, + prepMPI=args.preMPI) if MPI: rank = MPI.COMM_WORLD.Get_rank() From fc4cf7cee7768f017f3833cff4368a56191c8ea6 Mon Sep 17 00:00:00 2001 From: dzalkind Date: Mon, 30 Dec 2024 12:58:16 -0700 Subject: [PATCH 25/28] Make control example case --- .../modeling_options.yaml | 3 +++ .../weis_driver_rosco_opt.py | 27 +++++++++++++++++-- 2 files changed, 28 insertions(+), 2 deletions(-) diff --git a/examples/02_run_openfast_cases/modeling_options.yaml b/examples/02_run_openfast_cases/modeling_options.yaml index 28d925def..bd9e42933 100644 --- a/examples/02_run_openfast_cases/modeling_options.yaml +++ b/examples/02_run_openfast_cases/modeling_options.yaml @@ -32,6 +32,9 @@ ROSCO: flag: True tuning_yaml: ../01_aeroelasticse/OpenFAST_models/IEA-15-240-RWT/IEA-15-240-RWT-UMaineSemi/IEA15MW-UMaineSemi.yaml Kp_float: -10 + U_pc: [12, 18, 24] + omega_pc: [.1, .1, .1] + zeta_pc: [1.,1.,1.] DLC_driver: diff --git a/examples/02_run_openfast_cases/weis_driver_rosco_opt.py b/examples/02_run_openfast_cases/weis_driver_rosco_opt.py index 80af35b38..8d501ee76 100644 --- a/examples/02_run_openfast_cases/weis_driver_rosco_opt.py +++ b/examples/02_run_openfast_cases/weis_driver_rosco_opt.py @@ -4,6 +4,11 @@ from weis.glue_code.runWEIS import run_weis from openmdao.utils.mpi import MPI +from weis.glue_code.weis_args import weis_args, get_max_procs, set_modopt_procs + +# Parse args +args = weis_args() + ## File management run_dir = os.path.dirname( os.path.realpath(__file__) ) @@ -13,12 +18,30 @@ tt = time.time() -wt_opt, modeling_options, opt_options = run_weis(fname_wt_input, fname_modeling_options, fname_analysis_options) +maxnP = get_max_procs(args) + +modeling_override = None +if MPI: + # Pre-compute number of cores needed in this run + _, modeling_options, _ = run_weis(fname_wt_input, + fname_modeling_options, + fname_analysis_options, + prepMPI=True, + maxnP = maxnP) + + modeling_override = set_modopt_procs(modeling_options) + +# Run WEIS for real now +wt_opt, modeling_options, opt_options = run_weis(fname_wt_input, + fname_modeling_options, + fname_analysis_options, + modeling_override=modeling_override, + prepMPI=args.preMPI) if MPI: rank = MPI.COMM_WORLD.Get_rank() else: rank = 0 -if rank == 0: +if rank == 0 and args.preMPI == False: print("Run time: %f"%(time.time()-tt)) sys.stdout.flush() From 7aedb7c83275c0dd0777de3c081bab07497baf89 Mon Sep 17 00:00:00 2001 From: dzalkind Date: Mon, 30 Dec 2024 13:42:48 -0700 Subject: [PATCH 26/28] Count elements in each design variable --- weis/glue_code/runWEIS.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/weis/glue_code/runWEIS.py b/weis/glue_code/runWEIS.py index 92fd15ae1..b7d735414 100644 --- a/weis/glue_code/runWEIS.py +++ b/weis/glue_code/runWEIS.py @@ -123,7 +123,15 @@ def run_weis(fname_wt_input, fname_modeling_options, fname_opt_options, # Estimate number of design variables and parallel calls to OpenFASRT given # the computational resources available. This is used to setup WEIS for an MPI run if prepMPI: - nFD = max([1,len(wt_opt.model.list_outputs(is_design_var=True, out_stream=None))]) + nFD = 0 + for dv in wt_opt.model.list_outputs(is_design_var=True, out_stream=None): + # dv is a tuple with (name, info) + nFD += len(dv[1]['val']) + + # number of finite differences should be at least 1 + nFD = max([1,nFD]) + + # Compute number of processors modeling_options = compute_optimal_nP(nFD, myopt.n_OF_runs, modeling_options, opt_options, maxnP = maxnP) # If WEIS is called simply to prep for an MPI call, no need to proceed and simply From 9a99005467951af777db620a3a3f01302496db60 Mon Sep 17 00:00:00 2001 From: ptrbortolotti Date: Thu, 2 Jan 2025 13:03:06 -0300 Subject: [PATCH 27/28] bring back weis_driver_model_only --- .../weis_driver_model_only.py | 22 +++++++++++++++++++ weis/test/run_examples.py | 2 +- weis/test/test_examples_skinny.py | 2 +- 3 files changed, 24 insertions(+), 2 deletions(-) create mode 100644 examples/05_IEA-3.4-130-RWT/weis_driver_model_only.py diff --git a/examples/05_IEA-3.4-130-RWT/weis_driver_model_only.py b/examples/05_IEA-3.4-130-RWT/weis_driver_model_only.py new file mode 100644 index 000000000..4c6330d6e --- /dev/null +++ b/examples/05_IEA-3.4-130-RWT/weis_driver_model_only.py @@ -0,0 +1,22 @@ +import os +import time +import sys + +from weis.glue_code.runWEIS import run_weis +from openmdao.utils.mpi import MPI + +## File management +run_dir = os.path.dirname( os.path.realpath(__file__) ) +fname_wt_input = os.path.join(run_dir, 'IEA-3p4-130-RWT.yaml') +fname_modeling_options = os.path.join(run_dir, 'modeling_options.yaml') +fname_analysis_options = os.path.join(run_dir, 'analysis_options.yaml') + +modeling_override = {} +modeling_override['General'] = {} +modeling_override['General']['openfast_configuration'] = {} +modeling_override['General']['openfast_configuration']['model_only'] = True + +wt_opt, modeling_options, opt_options = run_weis(fname_wt_input, + fname_modeling_options, + fname_analysis_options, + modeling_override=modeling_override) diff --git a/weis/test/run_examples.py b/weis/test/run_examples.py index bb8d87b78..f04d1ed34 100644 --- a/weis/test/run_examples.py +++ b/weis/test/run_examples.py @@ -10,7 +10,7 @@ # "03_NREL5MW_OC3_spar/weis_freq_driver", # executed in examples_skinny # "04_NREL5MW_OC4_semi/weis_driver", # skipping until we resolve multiple variable ballasts "04_NREL5MW_OC4_semi/weis_freq_driver", - # "05_IEA-3.4-130-RWT/weis_driver", # also executed via mpi in the gitthub workflow + "05_IEA-3.4-130-RWT/weis_driver", # also executed via mpi in the gitthub workflow #"06_IEA-15-240-RWT/weis_driver", # executed in the test_IEA15.py # "07_te_flaps/dac_driver", "08_OLAF/weis_driver", diff --git a/weis/test/test_examples_skinny.py b/weis/test/test_examples_skinny.py index b41513c1e..ae74a7d77 100644 --- a/weis/test/test_examples_skinny.py +++ b/weis/test/test_examples_skinny.py @@ -6,7 +6,7 @@ "02_run_openfast_cases/weis_driver_sm", #Not as fast as weis_driver, but not too bad (120 sec. locally) #"03_NREL5MW_OC3_spar/weis_driver", "03_NREL5MW_OC3_spar/weis_freq_driver", - "05_IEA-3.4-130-RWT/weis_driver", + "05_IEA-3.4-130-RWT/weis_driver_model_only", "06_IEA-15-240-RWT/weis_driver_monopile", "06_IEA-15-240-RWT/weis_driver_TMDs", "09_design_of_experiments/DOE_openfast", From 00f02c71d637518bbd487da878611f99dc82cf2e Mon Sep 17 00:00:00 2001 From: ptrbortolotti Date: Thu, 2 Jan 2025 13:10:48 -0300 Subject: [PATCH 28/28] update front scripts examples 3 4 5 --- examples/03_NREL5MW_OC3_spar/weis_driver.py | 70 ++++++------------ .../03_NREL5MW_OC3_spar/weis_freq_driver.py | 35 +++++++-- examples/04_NREL5MW_OC4_semi/weis_driver.py | 35 +++++++-- .../04_NREL5MW_OC4_semi/weis_freq_driver.py | 35 +++++++-- examples/05_IEA-3.4-130-RWT/weis_driver.py | 72 ++++++------------- 5 files changed, 129 insertions(+), 118 deletions(-) diff --git a/examples/03_NREL5MW_OC3_spar/weis_driver.py b/examples/03_NREL5MW_OC3_spar/weis_driver.py index e626dbb45..cd618976f 100644 --- a/examples/03_NREL5MW_OC3_spar/weis_driver.py +++ b/examples/03_NREL5MW_OC3_spar/weis_driver.py @@ -3,64 +3,38 @@ import sys from weis.glue_code.runWEIS import run_weis +from weis.glue_code.weis_args import weis_args, get_max_procs, set_modopt_procs from openmdao.utils.mpi import MPI +# Parse args +args = weis_args() + ## File management run_dir = os.path.dirname( os.path.realpath(__file__) ) fname_wt_input = os.path.join(run_dir, 'nrel5mw-spar_oc3.yaml') fname_modeling_options = os.path.join(run_dir, 'modeling_options.yaml') fname_analysis_options = os.path.join(run_dir, 'analysis_options.yaml') -import argparse -# Set up argument parser -parser = argparse.ArgumentParser(description="Run WEIS driver with flag prepping for MPI run.") -# Add the flag -parser.add_argument("--preMPI", type=bool, default=False, help="Flag for preprocessing MPI settings (True or False).") -parser.add_argument("--maxnP", type=int, default=1, help="Maximum number of processors available.") -# Parse the arguments -args, _ = parser.parse_known_args() -# Use the flag in your script -if args.preMPI: - print("Preprocessor flag is set to True. Running preprocessing setting up MPI run.") -else: - print("Preprocessor flag is set to False. Run WEIS now.") - tt = time.time() +maxnP = get_max_procs(args) -# Set max number of processes, either set by user or extracted from MPI -if args.preMPI: - maxnP = args.maxnP -else: - if MPI: - maxnP = MPI.COMM_WORLD.Get_size() - else: - maxnP = 1 - -if args.preMPI: - _, _, _ = run_weis(fname_wt_input, - fname_modeling_options, - fname_analysis_options, - prepMPI=True, - maxnP = maxnP) -else: - if MPI: - _, modeling_options, _ = run_weis(fname_wt_input, - fname_modeling_options, - fname_analysis_options, - prepMPI=True, - maxnP = maxnP) - - modeling_override = {} - modeling_override['General'] = {} - modeling_override['General']['openfast_configuration'] = {} - modeling_override['General']['openfast_configuration']['nFD'] = modeling_options['General']['openfast_configuration']['nFD'] - modeling_override['General']['openfast_configuration']['nOFp'] = modeling_options['General']['openfast_configuration']['nOFp'] - else: - modeling_override = None - wt_opt, modeling_options, opt_options = run_weis(fname_wt_input, - fname_modeling_options, - fname_analysis_options, - modeling_override=modeling_override) +modeling_override = None +if MPI: + # Pre-compute number of cores needed in this run + _, modeling_options, _ = run_weis(fname_wt_input, + fname_modeling_options, + fname_analysis_options, + prepMPI=True, + maxnP = maxnP) + + modeling_override = set_modopt_procs(modeling_options) + +# Run WEIS for real now +wt_opt, modeling_options, opt_options = run_weis(fname_wt_input, + fname_modeling_options, + fname_analysis_options, + modeling_override=modeling_override, + prepMPI=args.preMPI) if MPI: rank = MPI.COMM_WORLD.Get_rank() diff --git a/examples/03_NREL5MW_OC3_spar/weis_freq_driver.py b/examples/03_NREL5MW_OC3_spar/weis_freq_driver.py index 08b1e9d1c..896efd801 100644 --- a/examples/03_NREL5MW_OC3_spar/weis_freq_driver.py +++ b/examples/03_NREL5MW_OC3_spar/weis_freq_driver.py @@ -3,22 +3,43 @@ import sys from weis.glue_code.runWEIS import run_weis +from weis.glue_code.weis_args import weis_args, get_max_procs, set_modopt_procs from openmdao.utils.mpi import MPI -## File management -run_dir = os.path.dirname( os.path.realpath(__file__) ) -fname_wt_input = run_dir + os.sep + "nrel5mw-spar_oc3.yaml" -fname_modeling_options = run_dir + os.sep + "modeling_options_freq.yaml" -fname_analysis_options = run_dir + os.sep + "analysis_options_noopt.yaml" +# Parse args +args = weis_args() +## File management +run_dir = os.path.dirname( os.path.realpath(__file__) ) +fname_wt_input = os.path.join(run_dir, 'nrel5mw-spar_oc3.yaml') +fname_modeling_options = os.path.join(run_dir, 'modeling_options_freq.yaml') +fname_analysis_options = os.path.join(run_dir, 'analysis_options_noopt.yaml') tt = time.time() -wt_opt, modeling_options, opt_options = run_weis(fname_wt_input, fname_modeling_options, fname_analysis_options) +maxnP = get_max_procs(args) + +modeling_override = None +if MPI: + # Pre-compute number of cores needed in this run + _, modeling_options, _ = run_weis(fname_wt_input, + fname_modeling_options, + fname_analysis_options, + prepMPI=True, + maxnP = maxnP) + + modeling_override = set_modopt_procs(modeling_options) + +# Run WEIS for real now +wt_opt, modeling_options, opt_options = run_weis(fname_wt_input, + fname_modeling_options, + fname_analysis_options, + modeling_override=modeling_override, + prepMPI=args.preMPI) if MPI: rank = MPI.COMM_WORLD.Get_rank() else: rank = 0 -if rank == 0: +if rank == 0 and args.preMPI == False: print("Run time: %f"%(time.time()-tt)) sys.stdout.flush() diff --git a/examples/04_NREL5MW_OC4_semi/weis_driver.py b/examples/04_NREL5MW_OC4_semi/weis_driver.py index 3c9ebf63d..c1c04f081 100644 --- a/examples/04_NREL5MW_OC4_semi/weis_driver.py +++ b/examples/04_NREL5MW_OC4_semi/weis_driver.py @@ -3,22 +3,43 @@ import sys from weis.glue_code.runWEIS import run_weis +from weis.glue_code.weis_args import weis_args, get_max_procs, set_modopt_procs from openmdao.utils.mpi import MPI -## File management -run_dir = os.path.dirname( os.path.realpath(__file__) ) -fname_wt_input = run_dir + os.sep + "nrel5mw-semi_oc4.yaml" -fname_modeling_options = run_dir + os.sep + "modeling_options.yaml" -fname_analysis_options = run_dir + os.sep + "analysis_options.yaml" +# Parse args +args = weis_args() +## File management +run_dir = os.path.dirname( os.path.realpath(__file__) ) +fname_wt_input = os.path.join(run_dir, 'nrel5mw-semi_oc4.yaml') +fname_modeling_options = os.path.join(run_dir, 'modeling_options.yaml') +fname_analysis_options = os.path.join(run_dir, 'analysis_options.yaml') tt = time.time() -wt_opt, modeling_options, opt_options = run_weis(fname_wt_input, fname_modeling_options, fname_analysis_options) +maxnP = get_max_procs(args) + +modeling_override = None +if MPI: + # Pre-compute number of cores needed in this run + _, modeling_options, _ = run_weis(fname_wt_input, + fname_modeling_options, + fname_analysis_options, + prepMPI=True, + maxnP = maxnP) + + modeling_override = set_modopt_procs(modeling_options) + +# Run WEIS for real now +wt_opt, modeling_options, opt_options = run_weis(fname_wt_input, + fname_modeling_options, + fname_analysis_options, + modeling_override=modeling_override, + prepMPI=args.preMPI) if MPI: rank = MPI.COMM_WORLD.Get_rank() else: rank = 0 -if rank == 0: +if rank == 0 and args.preMPI == False: print("Run time: %f"%(time.time()-tt)) sys.stdout.flush() diff --git a/examples/04_NREL5MW_OC4_semi/weis_freq_driver.py b/examples/04_NREL5MW_OC4_semi/weis_freq_driver.py index 55cee8a4a..ff02ad531 100644 --- a/examples/04_NREL5MW_OC4_semi/weis_freq_driver.py +++ b/examples/04_NREL5MW_OC4_semi/weis_freq_driver.py @@ -3,22 +3,43 @@ import sys from weis.glue_code.runWEIS import run_weis +from weis.glue_code.weis_args import weis_args, get_max_procs, set_modopt_procs from openmdao.utils.mpi import MPI -## File management -run_dir = os.path.dirname( os.path.realpath(__file__) ) -fname_wt_input = run_dir + os.sep + "nrel5mw-semi_oc4.yaml" -fname_modeling_options = run_dir + os.sep + "modeling_options_freq.yaml" -fname_analysis_options = run_dir + os.sep + "analysis_options.yaml" +# Parse args +args = weis_args() +## File management +run_dir = os.path.dirname( os.path.realpath(__file__) ) +fname_wt_input = os.path.join(run_dir, 'nrel5mw-semi_oc4.yaml') +fname_modeling_options = os.path.join(run_dir, 'modeling_options_freq.yaml') +fname_analysis_options = os.path.join(run_dir, 'analysis_options.yaml') tt = time.time() -wt_opt, modeling_options, opt_options = run_weis(fname_wt_input, fname_modeling_options, fname_analysis_options) +maxnP = get_max_procs(args) + +modeling_override = None +if MPI: + # Pre-compute number of cores needed in this run + _, modeling_options, _ = run_weis(fname_wt_input, + fname_modeling_options, + fname_analysis_options, + prepMPI=True, + maxnP = maxnP) + + modeling_override = set_modopt_procs(modeling_options) + +# Run WEIS for real now +wt_opt, modeling_options, opt_options = run_weis(fname_wt_input, + fname_modeling_options, + fname_analysis_options, + modeling_override=modeling_override, + prepMPI=args.preMPI) if MPI: rank = MPI.COMM_WORLD.Get_rank() else: rank = 0 -if rank == 0: +if rank == 0 and args.preMPI == False: print("Run time: %f"%(time.time()-tt)) sys.stdout.flush() diff --git a/examples/05_IEA-3.4-130-RWT/weis_driver.py b/examples/05_IEA-3.4-130-RWT/weis_driver.py index e63be3a3d..2db7a45ae 100644 --- a/examples/05_IEA-3.4-130-RWT/weis_driver.py +++ b/examples/05_IEA-3.4-130-RWT/weis_driver.py @@ -2,65 +2,39 @@ import time import sys -from weis.glue_code.runWEIS import run_weis +from weis.glue_code.runWEIS import run_weis +from weis.glue_code.weis_args import weis_args, get_max_procs, set_modopt_procs from openmdao.utils.mpi import MPI +# Parse args +args = weis_args() + ## File management run_dir = os.path.dirname( os.path.realpath(__file__) ) fname_wt_input = os.path.join(run_dir, 'IEA-3p4-130-RWT.yaml') fname_modeling_options = os.path.join(run_dir, 'modeling_options.yaml') fname_analysis_options = os.path.join(run_dir, 'analysis_options.yaml') -import argparse -# Set up argument parser -parser = argparse.ArgumentParser(description="Run WEIS driver with flag prepping for MPI run.") -# Add the flag -parser.add_argument("--preMPI", type=bool, default=False, help="Flag for preprocessing MPI settings (True or False).") -parser.add_argument("--maxnP", type=int, default=1, help="Maximum number of processors available.") -# Parse the arguments -args, _ = parser.parse_known_args() -# Use the flag in your script -if args.preMPI: - print("Preprocessor flag is set to True. Running preprocessing setting up MPI run.") -else: - print("Preprocessor flag is set to False. Run WEIS now.") - tt = time.time() +maxnP = get_max_procs(args) -# Set max number of processes, either set by user or extracted from MPI -if args.preMPI: - maxnP = args.maxnP -else: - if MPI: - maxnP = MPI.COMM_WORLD.Get_size() - else: - maxnP = 1 - -if args.preMPI: - _, _, _ = run_weis(fname_wt_input, - fname_modeling_options, - fname_analysis_options, - prepMPI=True, - maxnP = maxnP) -else: - if MPI: - _, modeling_options, _ = run_weis(fname_wt_input, - fname_modeling_options, - fname_analysis_options, - prepMPI=True, - maxnP = maxnP) - - modeling_override = {} - modeling_override['General'] = {} - modeling_override['General']['openfast_configuration'] = {} - modeling_override['General']['openfast_configuration']['nFD'] = modeling_options['General']['openfast_configuration']['nFD'] - modeling_override['General']['openfast_configuration']['nOFp'] = modeling_options['General']['openfast_configuration']['nOFp'] - else: - modeling_override = None - wt_opt, modeling_options, opt_options = run_weis(fname_wt_input, - fname_modeling_options, - fname_analysis_options, - modeling_override=modeling_override) +modeling_override = None +if MPI: + # Pre-compute number of cores needed in this run + _, modeling_options, _ = run_weis(fname_wt_input, + fname_modeling_options, + fname_analysis_options, + prepMPI=True, + maxnP = maxnP) + + modeling_override = set_modopt_procs(modeling_options) + +# Run WEIS for real now +wt_opt, modeling_options, opt_options = run_weis(fname_wt_input, + fname_modeling_options, + fname_analysis_options, + modeling_override=modeling_override, + prepMPI=args.preMPI) if MPI: rank = MPI.COMM_WORLD.Get_rank()