diff --git a/.github/workflows/run_exhaustive_examples.yml b/.github/workflows/run_exhaustive_examples.yml
index 528c414df..30e766d2a 100644
--- a/.github/workflows/run_exhaustive_examples.yml
+++ b/.github/workflows/run_exhaustive_examples.yml
@@ -76,7 +76,19 @@ jobs:
run: |
cd weis/test
python run_examples.py
-
+
+ # Run parallel script calling OpenFAST
+ - name: Run parallel OpenFAST
+ run: |
+ cd examples/02_run_openfast_cases
+ mpiexec -np 2 python weis_driver_loads.py
+
+ # Run parallel script calling a simple optimization
+ - name: Run parallel OpenFAST
+ run: |
+ cd examples/03_NREL5MW_OC3_spar
+ mpiexec -np 2 python weis_driver.py
+
# Run scripts within rotor_opt folder with MPI
- name: Run parallel examples rotor optimization
run: |
diff --git a/docs/index.rst b/docs/index.rst
index c21030311..da4a85245 100644
--- a/docs/index.rst
+++ b/docs/index.rst
@@ -20,6 +20,7 @@ Using WEIS
installation
how_weis_works
inputs/yaml_inputs
+ run_in_parallel
WEIS Visualization APP
diff --git a/docs/run_in_parallel.rst b/docs/run_in_parallel.rst
new file mode 100644
index 000000000..947d12a9c
--- /dev/null
+++ b/docs/run_in_parallel.rst
@@ -0,0 +1,93 @@
+Run in parallel
+--------------
+
+WEIS can be run sequentially on a single processor. WEIS can also be parallelized to handle larger problems in a timely manner.
+
+
+Background
+------------------------------------
+
+The parallelization of WEIS leverages the [MPI library](https://mpi4py.readthedocs.io/en/stable/). Before proceeding, please make sure that your WEIS environment includes the library mpi4py as discussed in the README.md at the root level of this repo, or on the `WEIS GitHub page `_.
+
+Parallelization in WEIS happens at two levels:
+* The first level is triggered when an optimization is run and a gradient-based optimizer is called. In this case, the OpenMDAO library will execute the finite differences in parallel. OpenMDAO then assembles all partial derivatives in a single Jacobian and iterations progress.
+* The second level is triggered when multiple OpenFAST runs are specified. These are executed in parallel.
+The two levels of parallelization are integrated and can co-exist as long as sufficient computational resources are available.
+
+WEIS helps you set up the right call to MPI given the available computational resources, see the sections below.
+
+
+How to run WEIS in parallel
+------------------------------------
+
+Running WEIS in parallel is slightly more elaborated than running it sequentially. A first example of a parallel call for a design optimization in WEIS is provided in example `03_NREL5MW_OC3 `_. A second example runs an OpenFAST model in parallel, see example `02_run_openfast_cases `_.
+
+
+Design optimization in parallel
+------------------------------------
+
+These instructions follow example 03 `03_NREL5MW_OC3 `_. To run the design optimization in parallel, the first step consists of a pre-processing call to WEIS. This step returns the number of finite differences that are specified given the analysis_options.yaml file. To execute this step, in a terminal window navigate to example 03 and type:
+
+.. code-block:: bash
+
+ python weis_driver.py --preMPI=True
+
+In the terminal the code will return the best setup for an optimal MPI call given your problem.
+
+If you are resource constrained, you can pass the keyword argument `--maxnP` and set the maximum number of processors that you have available. If you have 20, type:
+
+.. code-block:: bash
+
+ python weis_driver.py --preMPI=True --maxnP=20
+
+These two commands will help you set up the appropriate computational resources.
+
+At this point, you are ready to launch WEIS in parallel. If you have 20 processors, your call to WEIS will look like:
+
+.. code-block:: bash
+
+ mpiexec -np 20 python weis_driver.py
+
+
+Parallelize calls to OpenFAST
+------------------------------------
+
+WEIS can be used to run OpenFAST simulations, such as design load cases.
+[More information about setting DLCs can be found here](https://github.com/WISDEM/WEIS/blob/docs/docs/dlc_generator.rst)
+
+To do so, WEIS is run with a single function evaluation that fires off a number of OpenFAST simulations.
+
+Let's look at an example in `02_run_openfast_cases `_.
+
+In a terminal, navigate to example 02 and type:
+
+.. code-block:: bash
+ python weis_driver_loads.py --preMPI=True
+
+The terminal should return this message
+
+.. code-block:: bash
+ Your problem has 0 design variable(s) and 7 OpenFAST run(s)
+
+ You are not running a design optimization, a design of experiment, or your optimizer is not gradient based. The number of parallel function evaluations is set to 1
+
+ To run the code in parallel with MPI, execute one of the following commands
+
+ If you have access to (at least) 8 processors, please call WEIS as:
+ mpiexec -np 8 python weis_driver.py
+
+
+ If you do not have access to 8 processors
+ please provide your maximum available number of processors by typing:
+ python weis_driver.py --preMPI=True --maxnP=xx
+ And substitute xx with your number of processors
+
+If you have access to 8 processors, you are now ready to execute your script by typing
+
+.. code-block:: bash
+ mpiexec -np 8 python weis_driver_loads.py
+
+If you have access to fewer processors, say 4, adjust the -np entry accordingly
+
+.. code-block:: bash
+ mpiexec -np 4 python weis_driver_loads.py
\ No newline at end of file
diff --git a/environment.yml b/environment.yml
index 373f1bd01..77767da95 100644
--- a/environment.yml
+++ b/environment.yml
@@ -9,16 +9,16 @@ dependencies:
- mat4py
- nlopt
- numpydoc
- - openfast>=3.5.3
+ - openfast=3.5.5
- openraft>=1.2.4
- osqp
- pcrunch
- pip
- pyhams>=1.3
#- pyoptsparse
- - rosco>=2.9.4
+ - rosco=2.9.5
- smt
- - wisdem>=3.16.4
+ - wisdem=3.18.1
- pip:
- dash-bootstrap-components
- dash-mantine-components
diff --git a/examples/02_run_openfast_cases/modeling_options.yaml b/examples/02_run_openfast_cases/modeling_options.yaml
index 28d925def..bd9e42933 100644
--- a/examples/02_run_openfast_cases/modeling_options.yaml
+++ b/examples/02_run_openfast_cases/modeling_options.yaml
@@ -32,6 +32,9 @@ ROSCO:
flag: True
tuning_yaml: ../01_aeroelasticse/OpenFAST_models/IEA-15-240-RWT/IEA-15-240-RWT-UMaineSemi/IEA15MW-UMaineSemi.yaml
Kp_float: -10
+ U_pc: [12, 18, 24]
+ omega_pc: [.1, .1, .1]
+ zeta_pc: [1.,1.,1.]
DLC_driver:
diff --git a/examples/02_run_openfast_cases/weis_driver_loads.py b/examples/02_run_openfast_cases/weis_driver_loads.py
index 435feef47..41aaa976e 100644
--- a/examples/02_run_openfast_cases/weis_driver_loads.py
+++ b/examples/02_run_openfast_cases/weis_driver_loads.py
@@ -3,22 +3,43 @@
import sys
from weis.glue_code.runWEIS import run_weis
+from weis.glue_code.weis_args import weis_args, get_max_procs, set_modopt_procs
from openmdao.utils.mpi import MPI
-## File management
-run_dir = os.path.dirname( os.path.realpath(__file__) )
-fname_wt_input = run_dir + os.sep + 'IEA-15-240-RWT.yaml'
-fname_modeling_options = run_dir + os.sep + 'modeling_options_loads.yaml'
-fname_analysis_options = run_dir + os.sep + 'analysis_options_loads.yaml'
+# Parse args
+args = weis_args()
+## File management
+run_dir = os.path.dirname( os.path.realpath(__file__) )
+fname_wt_input = os.path.join(run_dir, 'IEA-15-240-RWT.yaml')
+fname_modeling_options = os.path.join(run_dir, 'modeling_options_loads.yaml')
+fname_analysis_options = os.path.join(run_dir, 'analysis_options_loads.yaml')
tt = time.time()
-wt_opt, modeling_options, opt_options = run_weis(fname_wt_input, fname_modeling_options, fname_analysis_options)
+maxnP = get_max_procs(args)
+
+modeling_override = None
+if MPI:
+ # Pre-compute number of cores needed in this run
+ _, modeling_options, _ = run_weis(fname_wt_input,
+ fname_modeling_options,
+ fname_analysis_options,
+ prepMPI=True,
+ maxnP = maxnP)
+
+ modeling_override = set_modopt_procs(modeling_options)
+
+# Run WEIS for real now
+wt_opt, modeling_options, opt_options = run_weis(fname_wt_input,
+ fname_modeling_options,
+ fname_analysis_options,
+ modeling_override=modeling_override,
+ prepMPI=args.preMPI)
if MPI:
rank = MPI.COMM_WORLD.Get_rank()
else:
rank = 0
-if rank == 0:
+if rank == 0 and args.preMPI == False:
print("Run time: %f"%(time.time()-tt))
sys.stdout.flush()
diff --git a/examples/02_run_openfast_cases/weis_driver_rosco_opt.py b/examples/02_run_openfast_cases/weis_driver_rosco_opt.py
index 80af35b38..8d501ee76 100644
--- a/examples/02_run_openfast_cases/weis_driver_rosco_opt.py
+++ b/examples/02_run_openfast_cases/weis_driver_rosco_opt.py
@@ -4,6 +4,11 @@
from weis.glue_code.runWEIS import run_weis
from openmdao.utils.mpi import MPI
+from weis.glue_code.weis_args import weis_args, get_max_procs, set_modopt_procs
+
+# Parse args
+args = weis_args()
+
## File management
run_dir = os.path.dirname( os.path.realpath(__file__) )
@@ -13,12 +18,30 @@
tt = time.time()
-wt_opt, modeling_options, opt_options = run_weis(fname_wt_input, fname_modeling_options, fname_analysis_options)
+maxnP = get_max_procs(args)
+
+modeling_override = None
+if MPI:
+ # Pre-compute number of cores needed in this run
+ _, modeling_options, _ = run_weis(fname_wt_input,
+ fname_modeling_options,
+ fname_analysis_options,
+ prepMPI=True,
+ maxnP = maxnP)
+
+ modeling_override = set_modopt_procs(modeling_options)
+
+# Run WEIS for real now
+wt_opt, modeling_options, opt_options = run_weis(fname_wt_input,
+ fname_modeling_options,
+ fname_analysis_options,
+ modeling_override=modeling_override,
+ prepMPI=args.preMPI)
if MPI:
rank = MPI.COMM_WORLD.Get_rank()
else:
rank = 0
-if rank == 0:
+if rank == 0 and args.preMPI == False:
print("Run time: %f"%(time.time()-tt))
sys.stdout.flush()
diff --git a/examples/03_NREL5MW_OC3_spar/analysis_options.yaml b/examples/03_NREL5MW_OC3_spar/analysis_options.yaml
index bffee6b70..8b5f4338f 100644
--- a/examples/03_NREL5MW_OC3_spar/analysis_options.yaml
+++ b/examples/03_NREL5MW_OC3_spar/analysis_options.yaml
@@ -42,7 +42,7 @@ driver:
max_major_iter: 10 # Maximum number of major design iterations (SNOPT)
max_minor_iter: 100 # Maximum number of minor design iterations (SNOPT)
max_iter: 2 # Maximum number of iterations (SLSQP)
- solver: LN_COBYLA # Optimization solver. Other options are 'SLSQP' - 'CONMIN'
+ solver: SLSQP # Optimization solver. Other options are 'SLSQP' - 'CONMIN'
step_size: 1.e-3 # Step size for finite differencing
form: central # Finite differencing mode, either forward or central
diff --git a/examples/03_NREL5MW_OC3_spar/modeling_options.yaml b/examples/03_NREL5MW_OC3_spar/modeling_options.yaml
index be01c1533..729e31ca6 100644
--- a/examples/03_NREL5MW_OC3_spar/modeling_options.yaml
+++ b/examples/03_NREL5MW_OC3_spar/modeling_options.yaml
@@ -97,11 +97,11 @@ DLC_driver:
DLCs:
- DLC: "1.1"
ws_bin_size: 2
- wind_speed: [14.]
+ wind_speed: [14.,16., 18., 20., 22.]
wave_height: [7.]
wave_period: [1.]
n_seeds: 1
- analysis_time: 20.
+ analysis_time: 10.
transient_time: 0.
turbulent_wind:
HubHt: 90.0
diff --git a/examples/03_NREL5MW_OC3_spar/weis_driver.py b/examples/03_NREL5MW_OC3_spar/weis_driver.py
index 34cd41975..cd618976f 100644
--- a/examples/03_NREL5MW_OC3_spar/weis_driver.py
+++ b/examples/03_NREL5MW_OC3_spar/weis_driver.py
@@ -3,22 +3,43 @@
import sys
from weis.glue_code.runWEIS import run_weis
+from weis.glue_code.weis_args import weis_args, get_max_procs, set_modopt_procs
from openmdao.utils.mpi import MPI
-## File management
-run_dir = os.path.dirname( os.path.realpath(__file__) )
-fname_wt_input = run_dir + os.sep + "nrel5mw-spar_oc3.yaml"
-fname_modeling_options = run_dir + os.sep + 'modeling_options.yaml'
-fname_analysis_options = run_dir + os.sep + 'analysis_options_noopt.yaml'
+# Parse args
+args = weis_args()
+## File management
+run_dir = os.path.dirname( os.path.realpath(__file__) )
+fname_wt_input = os.path.join(run_dir, 'nrel5mw-spar_oc3.yaml')
+fname_modeling_options = os.path.join(run_dir, 'modeling_options.yaml')
+fname_analysis_options = os.path.join(run_dir, 'analysis_options.yaml')
tt = time.time()
-wt_opt, modeling_options, opt_options = run_weis(fname_wt_input, fname_modeling_options, fname_analysis_options)
+maxnP = get_max_procs(args)
+
+modeling_override = None
+if MPI:
+ # Pre-compute number of cores needed in this run
+ _, modeling_options, _ = run_weis(fname_wt_input,
+ fname_modeling_options,
+ fname_analysis_options,
+ prepMPI=True,
+ maxnP = maxnP)
+
+ modeling_override = set_modopt_procs(modeling_options)
+
+# Run WEIS for real now
+wt_opt, modeling_options, opt_options = run_weis(fname_wt_input,
+ fname_modeling_options,
+ fname_analysis_options,
+ modeling_override=modeling_override,
+ prepMPI=args.preMPI)
if MPI:
rank = MPI.COMM_WORLD.Get_rank()
else:
rank = 0
-if rank == 0:
+if rank == 0 and args.preMPI == False:
print("Run time: %f"%(time.time()-tt))
sys.stdout.flush()
diff --git a/examples/03_NREL5MW_OC3_spar/weis_freq_driver.py b/examples/03_NREL5MW_OC3_spar/weis_freq_driver.py
index 08b1e9d1c..896efd801 100644
--- a/examples/03_NREL5MW_OC3_spar/weis_freq_driver.py
+++ b/examples/03_NREL5MW_OC3_spar/weis_freq_driver.py
@@ -3,22 +3,43 @@
import sys
from weis.glue_code.runWEIS import run_weis
+from weis.glue_code.weis_args import weis_args, get_max_procs, set_modopt_procs
from openmdao.utils.mpi import MPI
-## File management
-run_dir = os.path.dirname( os.path.realpath(__file__) )
-fname_wt_input = run_dir + os.sep + "nrel5mw-spar_oc3.yaml"
-fname_modeling_options = run_dir + os.sep + "modeling_options_freq.yaml"
-fname_analysis_options = run_dir + os.sep + "analysis_options_noopt.yaml"
+# Parse args
+args = weis_args()
+## File management
+run_dir = os.path.dirname( os.path.realpath(__file__) )
+fname_wt_input = os.path.join(run_dir, 'nrel5mw-spar_oc3.yaml')
+fname_modeling_options = os.path.join(run_dir, 'modeling_options_freq.yaml')
+fname_analysis_options = os.path.join(run_dir, 'analysis_options_noopt.yaml')
tt = time.time()
-wt_opt, modeling_options, opt_options = run_weis(fname_wt_input, fname_modeling_options, fname_analysis_options)
+maxnP = get_max_procs(args)
+
+modeling_override = None
+if MPI:
+ # Pre-compute number of cores needed in this run
+ _, modeling_options, _ = run_weis(fname_wt_input,
+ fname_modeling_options,
+ fname_analysis_options,
+ prepMPI=True,
+ maxnP = maxnP)
+
+ modeling_override = set_modopt_procs(modeling_options)
+
+# Run WEIS for real now
+wt_opt, modeling_options, opt_options = run_weis(fname_wt_input,
+ fname_modeling_options,
+ fname_analysis_options,
+ modeling_override=modeling_override,
+ prepMPI=args.preMPI)
if MPI:
rank = MPI.COMM_WORLD.Get_rank()
else:
rank = 0
-if rank == 0:
+if rank == 0 and args.preMPI == False:
print("Run time: %f"%(time.time()-tt))
sys.stdout.flush()
diff --git a/examples/04_NREL5MW_OC4_semi/weis_driver.py b/examples/04_NREL5MW_OC4_semi/weis_driver.py
index 3c9ebf63d..c1c04f081 100644
--- a/examples/04_NREL5MW_OC4_semi/weis_driver.py
+++ b/examples/04_NREL5MW_OC4_semi/weis_driver.py
@@ -3,22 +3,43 @@
import sys
from weis.glue_code.runWEIS import run_weis
+from weis.glue_code.weis_args import weis_args, get_max_procs, set_modopt_procs
from openmdao.utils.mpi import MPI
-## File management
-run_dir = os.path.dirname( os.path.realpath(__file__) )
-fname_wt_input = run_dir + os.sep + "nrel5mw-semi_oc4.yaml"
-fname_modeling_options = run_dir + os.sep + "modeling_options.yaml"
-fname_analysis_options = run_dir + os.sep + "analysis_options.yaml"
+# Parse args
+args = weis_args()
+## File management
+run_dir = os.path.dirname( os.path.realpath(__file__) )
+fname_wt_input = os.path.join(run_dir, 'nrel5mw-semi_oc4.yaml')
+fname_modeling_options = os.path.join(run_dir, 'modeling_options.yaml')
+fname_analysis_options = os.path.join(run_dir, 'analysis_options.yaml')
tt = time.time()
-wt_opt, modeling_options, opt_options = run_weis(fname_wt_input, fname_modeling_options, fname_analysis_options)
+maxnP = get_max_procs(args)
+
+modeling_override = None
+if MPI:
+ # Pre-compute number of cores needed in this run
+ _, modeling_options, _ = run_weis(fname_wt_input,
+ fname_modeling_options,
+ fname_analysis_options,
+ prepMPI=True,
+ maxnP = maxnP)
+
+ modeling_override = set_modopt_procs(modeling_options)
+
+# Run WEIS for real now
+wt_opt, modeling_options, opt_options = run_weis(fname_wt_input,
+ fname_modeling_options,
+ fname_analysis_options,
+ modeling_override=modeling_override,
+ prepMPI=args.preMPI)
if MPI:
rank = MPI.COMM_WORLD.Get_rank()
else:
rank = 0
-if rank == 0:
+if rank == 0 and args.preMPI == False:
print("Run time: %f"%(time.time()-tt))
sys.stdout.flush()
diff --git a/examples/04_NREL5MW_OC4_semi/weis_freq_driver.py b/examples/04_NREL5MW_OC4_semi/weis_freq_driver.py
index 55cee8a4a..ff02ad531 100644
--- a/examples/04_NREL5MW_OC4_semi/weis_freq_driver.py
+++ b/examples/04_NREL5MW_OC4_semi/weis_freq_driver.py
@@ -3,22 +3,43 @@
import sys
from weis.glue_code.runWEIS import run_weis
+from weis.glue_code.weis_args import weis_args, get_max_procs, set_modopt_procs
from openmdao.utils.mpi import MPI
-## File management
-run_dir = os.path.dirname( os.path.realpath(__file__) )
-fname_wt_input = run_dir + os.sep + "nrel5mw-semi_oc4.yaml"
-fname_modeling_options = run_dir + os.sep + "modeling_options_freq.yaml"
-fname_analysis_options = run_dir + os.sep + "analysis_options.yaml"
+# Parse args
+args = weis_args()
+## File management
+run_dir = os.path.dirname( os.path.realpath(__file__) )
+fname_wt_input = os.path.join(run_dir, 'nrel5mw-semi_oc4.yaml')
+fname_modeling_options = os.path.join(run_dir, 'modeling_options_freq.yaml')
+fname_analysis_options = os.path.join(run_dir, 'analysis_options.yaml')
tt = time.time()
-wt_opt, modeling_options, opt_options = run_weis(fname_wt_input, fname_modeling_options, fname_analysis_options)
+maxnP = get_max_procs(args)
+
+modeling_override = None
+if MPI:
+ # Pre-compute number of cores needed in this run
+ _, modeling_options, _ = run_weis(fname_wt_input,
+ fname_modeling_options,
+ fname_analysis_options,
+ prepMPI=True,
+ maxnP = maxnP)
+
+ modeling_override = set_modopt_procs(modeling_options)
+
+# Run WEIS for real now
+wt_opt, modeling_options, opt_options = run_weis(fname_wt_input,
+ fname_modeling_options,
+ fname_analysis_options,
+ modeling_override=modeling_override,
+ prepMPI=args.preMPI)
if MPI:
rank = MPI.COMM_WORLD.Get_rank()
else:
rank = 0
-if rank == 0:
+if rank == 0 and args.preMPI == False:
print("Run time: %f"%(time.time()-tt))
sys.stdout.flush()
diff --git a/examples/05_IEA-3.4-130-RWT/weis_driver.py b/examples/05_IEA-3.4-130-RWT/weis_driver.py
index 078e6f0f8..2db7a45ae 100644
--- a/examples/05_IEA-3.4-130-RWT/weis_driver.py
+++ b/examples/05_IEA-3.4-130-RWT/weis_driver.py
@@ -1,67 +1,45 @@
+import os
+import time
+import sys
from weis.glue_code.runWEIS import run_weis
+from weis.glue_code.weis_args import weis_args, get_max_procs, set_modopt_procs
from openmdao.utils.mpi import MPI
-import os, time, sys
-## File management
-run_dir = os.path.dirname( os.path.realpath(__file__) ) + os.sep
-fname_wt_input = run_dir + os.sep + "IEA-3p4-130-RWT.yaml"
-fname_modeling_options = run_dir + "modeling_options.yaml"
-fname_analysis_options = run_dir + "analysis_options.yaml"
+# Parse args
+args = weis_args()
-plot_flag = False
+## File management
+run_dir = os.path.dirname( os.path.realpath(__file__) )
+fname_wt_input = os.path.join(run_dir, 'IEA-3p4-130-RWT.yaml')
+fname_modeling_options = os.path.join(run_dir, 'modeling_options.yaml')
+fname_analysis_options = os.path.join(run_dir, 'analysis_options.yaml')
tt = time.time()
-wt_opt, modeling_options, opt_options = run_weis(fname_wt_input, fname_modeling_options, fname_analysis_options)
-
-rank = MPI.COMM_WORLD.Get_rank() if MPI else 0
-
-if rank == 0 and plot_flag:
- print('Run time: %f'%(time.time()-tt))
+maxnP = get_max_procs(args)
+
+modeling_override = None
+if MPI:
+ # Pre-compute number of cores needed in this run
+ _, modeling_options, _ = run_weis(fname_wt_input,
+ fname_modeling_options,
+ fname_analysis_options,
+ prepMPI=True,
+ maxnP = maxnP)
+
+ modeling_override = set_modopt_procs(modeling_options)
+
+# Run WEIS for real now
+wt_opt, modeling_options, opt_options = run_weis(fname_wt_input,
+ fname_modeling_options,
+ fname_analysis_options,
+ modeling_override=modeling_override,
+ prepMPI=args.preMPI)
+
+if MPI:
+ rank = MPI.COMM_WORLD.Get_rank()
+else:
+ rank = 0
+if rank == 0 and args.preMPI == False:
+ print("Run time: %f"%(time.time()-tt))
sys.stdout.flush()
-
- import matplotlib.pyplot as plt
- import numpy as np
- from matplotlib import cm
-
- X = wt_opt['sse_tune.aeroperf_tables.pitch_vector']
- Y = wt_opt['sse_tune.aeroperf_tables.tsr_vector']
- X, Y = np.meshgrid(X, Y)
- pitch_schedule = wt_opt['rotorse.rp.powercurve.pitch']
- tsr_schedule = wt_opt['rotorse.rp.powercurve.Omega'] / 30. * np.pi * wt_opt['rotorse.Rtip'] / wt_opt['rotorse.rp.powercurve.V']
-
- # Plot the Cp surface
- fig, ax = plt.subplots()
- Z = wt_opt['sse_tune.aeroperf_tables.Cp']
- cs = ax.contourf(X, Y, Z[:,:,0], cmap=cm.inferno, levels = [0, 0.1, 0.2, 0.3, 0.4, 0.44, 0.47, 0.50, 0.53, 0.56])
- ax.plot(pitch_schedule, tsr_schedule, 'w--', label = 'Regulation trajectory')
- ax.set_xlabel('Pitch angle (deg)', fontweight = 'bold')
- ax.set_ylabel('Tip speed ratio (-)', fontweight = 'bold')
- cbar = fig.colorbar(cs)
- cbar.ax.set_ylabel('Aerodynamic power coefficient (-)', fontweight = 'bold')
- plt.legend()
-
- # Plot the Ct surface
- fig, ax = plt.subplots()
- Z = wt_opt['sse_tune.aeroperf_tables.Ct']
- cs = ax.contourf(X, Y, Z[:,:,0], cmap=cm.inferno)
- ax.plot(pitch_schedule, tsr_schedule, 'w--', label = 'Regulation trajectory')
- ax.set_xlabel('Pitch angle (deg)', fontweight = 'bold')
- ax.set_ylabel('Tip speed ratio (-)', fontweight = 'bold')
- cbar = fig.colorbar(cs)
- cbar.ax.set_ylabel('Aerodynamic thrust coefficient (-)', fontweight = 'bold')
- plt.legend()
-
- # Plot the Cq surface
- fig, ax = plt.subplots()
- Z = wt_opt['sse_tune.aeroperf_tables.Cq']
- cs = ax.contourf(X, Y, Z[:,:,0], cmap=cm.inferno)
- ax.plot(pitch_schedule, tsr_schedule, 'w--', label = 'Regulation trajectory')
- ax.set_xlabel('Pitch angle (deg)', fontweight = 'bold')
- ax.set_ylabel('Tip speed ratio (-)', fontweight = 'bold')
- cbar = fig.colorbar(cs)
- cbar.ax.set_ylabel('Aerodynamic torque coefficient (-)', fontweight = 'bold')
- plt.legend()
-
- plt.show()
-
diff --git a/examples/05_IEA-3.4-130-RWT/weis_driver_model_only.py b/examples/05_IEA-3.4-130-RWT/weis_driver_model_only.py
index ff207b286..4c6330d6e 100644
--- a/examples/05_IEA-3.4-130-RWT/weis_driver_model_only.py
+++ b/examples/05_IEA-3.4-130-RWT/weis_driver_model_only.py
@@ -1,73 +1,22 @@
+import os
+import time
+import sys
-from weis.glue_code.runWEIS import run_weis
+from weis.glue_code.runWEIS import run_weis
from openmdao.utils.mpi import MPI
-import os, time, sys
## File management
-run_dir = os.path.dirname( os.path.realpath(__file__) ) + os.sep
-fname_wt_input = run_dir + os.sep + "IEA-3p4-130-RWT.yaml"
-fname_modeling_options = run_dir + "modeling_options.yaml"
-fname_analysis_options = run_dir + "analysis_options.yaml"
+run_dir = os.path.dirname( os.path.realpath(__file__) )
+fname_wt_input = os.path.join(run_dir, 'IEA-3p4-130-RWT.yaml')
+fname_modeling_options = os.path.join(run_dir, 'modeling_options.yaml')
+fname_analysis_options = os.path.join(run_dir, 'analysis_options.yaml')
-plot_flag = False
-
-# Test model_only flag here
modeling_override = {}
modeling_override['General'] = {}
modeling_override['General']['openfast_configuration'] = {}
modeling_override['General']['openfast_configuration']['model_only'] = True
-tt = time.time()
-wt_opt, modeling_options, opt_options = run_weis(fname_wt_input, fname_modeling_options, fname_analysis_options, modeling_override=modeling_override)
-
-rank = MPI.COMM_WORLD.Get_rank() if MPI else 0
-
-if rank == 0 and plot_flag:
- print('Run time: %f'%(time.time()-tt))
- sys.stdout.flush()
-
- import matplotlib.pyplot as plt
- import numpy as np
- from matplotlib import cm
-
- X = wt_opt['sse_tune.aeroperf_tables.pitch_vector']
- Y = wt_opt['sse_tune.aeroperf_tables.tsr_vector']
- X, Y = np.meshgrid(X, Y)
- pitch_schedule = wt_opt['rotorse.rp.powercurve.pitch']
- tsr_schedule = wt_opt['rotorse.rp.powercurve.Omega'] / 30. * np.pi * wt_opt['rotorse.Rtip'] / wt_opt['rotorse.rp.powercurve.V']
-
- # Plot the Cp surface
- fig, ax = plt.subplots()
- Z = wt_opt['sse_tune.aeroperf_tables.Cp']
- cs = ax.contourf(X, Y, Z[:,:,0], cmap=cm.inferno, levels = [0, 0.1, 0.2, 0.3, 0.4, 0.44, 0.47, 0.50, 0.53, 0.56])
- ax.plot(pitch_schedule, tsr_schedule, 'w--', label = 'Regulation trajectory')
- ax.set_xlabel('Pitch angle (deg)', fontweight = 'bold')
- ax.set_ylabel('Tip speed ratio (-)', fontweight = 'bold')
- cbar = fig.colorbar(cs)
- cbar.ax.set_ylabel('Aerodynamic power coefficient (-)', fontweight = 'bold')
- plt.legend()
-
- # Plot the Ct surface
- fig, ax = plt.subplots()
- Z = wt_opt['sse_tune.aeroperf_tables.Ct']
- cs = ax.contourf(X, Y, Z[:,:,0], cmap=cm.inferno)
- ax.plot(pitch_schedule, tsr_schedule, 'w--', label = 'Regulation trajectory')
- ax.set_xlabel('Pitch angle (deg)', fontweight = 'bold')
- ax.set_ylabel('Tip speed ratio (-)', fontweight = 'bold')
- cbar = fig.colorbar(cs)
- cbar.ax.set_ylabel('Aerodynamic thrust coefficient (-)', fontweight = 'bold')
- plt.legend()
-
- # Plot the Cq surface
- fig, ax = plt.subplots()
- Z = wt_opt['sse_tune.aeroperf_tables.Cq']
- cs = ax.contourf(X, Y, Z[:,:,0], cmap=cm.inferno)
- ax.plot(pitch_schedule, tsr_schedule, 'w--', label = 'Regulation trajectory')
- ax.set_xlabel('Pitch angle (deg)', fontweight = 'bold')
- ax.set_ylabel('Tip speed ratio (-)', fontweight = 'bold')
- cbar = fig.colorbar(cs)
- cbar.ax.set_ylabel('Aerodynamic torque coefficient (-)', fontweight = 'bold')
- plt.legend()
-
- plt.show()
-
+wt_opt, modeling_options, opt_options = run_weis(fname_wt_input,
+ fname_modeling_options,
+ fname_analysis_options,
+ modeling_override=modeling_override)
diff --git a/weis/aeroelasticse/FileTools.py b/weis/aeroelasticse/FileTools.py
index 9477400c4..9d691901a 100644
--- a/weis/aeroelasticse/FileTools.py
+++ b/weis/aeroelasticse/FileTools.py
@@ -3,6 +3,7 @@
import operator
import numpy as np
import yaml
+import sys
from functools import reduce
try:
import ruamel_yaml as ry
@@ -136,6 +137,12 @@ def save_yaml(outdir, fname, data_out):
yaml.dump(data_out, f)
f.close()
+def print_yaml(data_struct):
+ data_struct = remove_numpy(data_struct)
+ yaml=ry.YAML()
+ yaml.indent(mapping=4, sequence=6, offset=3)
+ yaml.dump(data_struct,sys.stdout)
+
def select_cases(cases, var_sel, val_sel):
# Find a variable value from the AeroelasticSE case_matrix
diff --git a/weis/control/tune_rosco.py b/weis/control/tune_rosco.py
index 3e9094ac0..02779a909 100644
--- a/weis/control/tune_rosco.py
+++ b/weis/control/tune_rosco.py
@@ -522,7 +522,7 @@ def setup(self):
if parameter_filename == 'none':
raise Exception('A ROSCO tuning_yaml must be specified in the modeling_options if from_OpenFAST is True')
- inps = load_rosco_yaml(parameter_filename)
+ inps = load_rosco_yaml(parameter_filename, rank_0=True)
self.turbine_params = inps['turbine_params']
self.control_params = inps['controller_params']
diff --git a/weis/glue_code/gc_PoseOptimization.py b/weis/glue_code/gc_PoseOptimization.py
index 030da6331..57c39cd71 100644
--- a/weis/glue_code/gc_PoseOptimization.py
+++ b/weis/glue_code/gc_PoseOptimization.py
@@ -25,199 +25,12 @@ def __init__(self, wt_init, modeling_options, analysis_options):
else:
self.floating_period_solve_component = 'floatingse'
-
- def get_number_design_variables(self):
- # Determine the number of design variables
- n_DV = 0
-
- rotorD_opt = self.opt["design_variables"]["rotor_diameter"]
- blade_opt = self.opt["design_variables"]["blade"]
- tower_opt = self.opt["design_variables"]["tower"]
- mono_opt = self.opt["design_variables"]["monopile"]
- jacket_opt = self.opt["design_variables"]["jacket"]
- hub_opt = self.opt["design_variables"]["hub"]
- drive_opt = self.opt["design_variables"]["drivetrain"]
- float_opt = self.opt["design_variables"]["floating"]
- mooring_opt = self.opt["design_variables"]["mooring"]
-
- if rotorD_opt["flag"]:
- n_DV += 1
- if blade_opt["aero_shape"]["twist"]["flag"]:
- if blade_opt["aero_shape"]["twist"]["index_end"] > blade_opt["aero_shape"]["twist"]["n_opt"]:
- raise Exception(
- "Check the analysis options yaml, index_end of the blade twist is higher than the number of DVs n_opt"
- )
- elif blade_opt["aero_shape"]["twist"]["index_end"] == 0:
- blade_opt["aero_shape"]["twist"]["index_end"] = blade_opt["aero_shape"]["twist"]["n_opt"]
- n_DV += blade_opt["aero_shape"]["twist"]["index_end"] - blade_opt["aero_shape"]["twist"]["index_start"]
- if blade_opt["aero_shape"]["chord"]["flag"]:
- if blade_opt["aero_shape"]["chord"]["index_end"] > blade_opt["aero_shape"]["chord"]["n_opt"]:
- raise Exception(
- "Check the analysis options yaml, index_end of the blade chord is higher than the number of DVs n_opt"
- )
- elif blade_opt["aero_shape"]["chord"]["index_end"] == 0:
- blade_opt["aero_shape"]["chord"]["index_end"] = blade_opt["aero_shape"]["chord"]["n_opt"]
- n_DV += blade_opt["aero_shape"]["chord"]["index_end"] - blade_opt["aero_shape"]["chord"]["index_start"]
- if blade_opt["aero_shape"]["af_positions"]["flag"]:
- n_DV += (
- self.modeling["WISDEM"]["RotorSE"]["n_af_span"]
- - blade_opt["aero_shape"]["af_positions"]["af_start"]
- - 1
- )
- if "structure" in blade_opt:
- if len(blade_opt["structure"])>0:
- for i in range(len(blade_opt["structure"])):
- if blade_opt["structure"][i]["index_end"] > blade_opt["structure"][i]["n_opt"]:
- raise Exception(
- "Check the analysis options yaml, the index_end of a blade layer is higher than the number of DVs n_opt"
- )
- elif blade_opt["structure"][i]["index_end"] == 0:
- blade_opt["structure"][i]["index_end"] = blade_opt["structure"][i]["n_opt"]
- n_DV += (
- blade_opt["structure"][i]["index_end"]
- - blade_opt["structure"][i]["index_start"]
- )
- if self.opt["design_variables"]["control"]["tsr"]["flag"]:
- n_DV += 1
-
- if tower_opt["outer_diameter"]["flag"]:
- n_DV += self.modeling["WISDEM"]["TowerSE"]["n_height"]
- if tower_opt["layer_thickness"]["flag"]:
- n_DV += self.modeling["WISDEM"]["TowerSE"]["n_height"] * self.modeling["WISDEM"]["TowerSE"]["n_layers"]
- if mono_opt["outer_diameter"]["flag"]:
- n_DV += self.modeling["WISDEM"]["FixedBottomSE"]["n_height"]
- if mono_opt["layer_thickness"]["flag"]:
- n_DV += (
- self.modeling["WISDEM"]["FixedBottomSE"]["n_height"]
- * self.modeling["WISDEM"]["FixedBottomSE"]["n_layers"]
- )
- # TODO: FIX THIS
- # if jacket_opt["outer_diameter"]["flag"]:
- # n_DV += self.modeling["WISDEM"]["FixedBottomSE"]["n_height"]
- # if jacket_opt["layer_thickness"]["flag"]:
- # n_DV += (
- # self.modeling["WISDEM"]["FixedBottomSE"]["n_height"]
- # * self.modeling["WISDEM"]["FixedBottomSE"]["n_layers"]
- # )
- if hub_opt["cone"]["flag"]:
- n_DV += 1
- if hub_opt["hub_diameter"]["flag"]:
- n_DV += 1
- for k in [
- "uptilt",
- "overhang",
- "distance_tt_hub",
- "distance_hub_mb",
- "distance_mb_mb",
- "generator_length",
- "gear_ratio",
- "generator_length",
- "bedplate_web_thickness",
- "bedplate_flange_thickness",
- "bedplate_flange_width",
- ]:
- if drive_opt[k]["flag"]:
- n_DV += 1
- for k in [
- "lss_diameter",
- "lss_wall_thickness",
- "hss_diameter",
- "hss_wall_thickness",
- "nose_diameter",
- "nose_wall_thickness",
- ]:
- if drive_opt[k]["flag"]:
- n_DV += 2
- if drive_opt["bedplate_wall_thickness"]["flag"]:
- n_DV += 4
-
- if float_opt["joints"]["flag"]:
- n_DV += len(float_opt["joints"]["z_coordinate"]) + len(float_opt["joints"]["r_coordinate"])
-
- if float_opt["members"]["flag"]:
- for k, kgrp in enumerate(float_opt["members"]["groups"]):
- memname = kgrp["names"][0]
- memidx = self.modeling["floating"]["members"]["name"].index(memname)
- n_grid = len(self.modeling["floating"]["members"]["grid_member_" + memname])
- n_layers = self.modeling["floating"]["members"]["n_layers"][memidx]
- if "diameter" in kgrp:
- if "constant" in kgrp["diameter"]:
- n_DV += 1
- else:
- n_DV += n_grid
- if "thickness" in kgrp:
- n_DV += n_grid * n_layers
- if "ballast" in kgrp:
- n_DV += self.modeling["floating"]["members"]["ballast_flag_member_" + memname].count(False)
- if "stiffeners" in kgrp:
- if "ring" in kgrp["stiffeners"]:
- if "size" in kgrp["stiffeners"]["ring"]:
- pass
- if "spacing" in kgrp["stiffeners"]["ring"]:
- n_DV += 1
- if "longitudinal" in kgrp["stiffeners"]:
- if "size" in kgrp["stiffeners"]["longitudinal"]:
- pass
- if "spacing" in kgrp["stiffeners"]["longitudinal"]:
- n_DV += 1
- if "axial_joints" in kgrp:
- n_DV += len(kgrp["axial_joints"])
- if self.modeling["flags"]["mooring"]:
- n_design = 1 if self.modeling["mooring"]["symmetric"] else self.modeling["mooring"]["n_lines"]
- if mooring_opt["line_length"]["flag"]:
- n_DV += n_design
- if mooring_opt["line_diameter"]["flag"]:
- n_DV += n_design
-
- # Count and add design variables from WEIS
- if self.opt['design_variables']['control']['servo']['pitch_control']['omega']['flag']:
- if hasattr(self.modeling['ROSCO']['omega_pc'],'__len__'):
- n_add += len(self.modeling['ROSCO']['omega_pc'])
- else:
- n_add += 1
- if self.opt['design_variables']['control']['servo']['pitch_control']['zeta']['flag']:
- if hasattr(self.modeling['ROSCO']['zeta_pc'],'__len__'):
- n_add += len(self.modeling['ROSCO']['zeta_pc'])
- else:
- n_add += 1
- if self.opt['design_variables']['control']['servo']['pitch_control']['Kp_float']['flag']:
- n_DV += 1
- if self.opt['design_variables']['control']['servo']['pitch_control']['ptfm_freq']['flag']:
- n_DV += 1
- if self.opt['design_variables']['control']['servo']['torque_control']['omega']['flag']:
- n_DV += 1
- if self.opt['design_variables']['control']['servo']['torque_control']['zeta']['flag']:
- n_DV += 1
- if self.opt['design_variables']['control']['servo']['flap_control']['flp_kp_norm']['flag']:
- n_DV += 1
- if self.opt['design_variables']['control']['servo']['flap_control']['flp_tau']['flag']:
- n_DV += 1
- if self.opt['design_variables']['control']['flaps']['te_flap_end']['flag']:
- n_DV += self.modeling['WISDEM']['RotorSE']['n_te_flaps']
- if self.opt['design_variables']['control']['flaps']['te_flap_ext']['flag']:
- n_DV += self.modeling['WISDEM']['RotorSE']['n_te_flaps']
- if self.opt['design_variables']['control']['ps_percent']['flag']:
- n_DV += 1
-
- if self.opt['driver']['optimization']['form'] == 'central':
- n_DV *= 2
-
- # TMD DVs
- if self.opt['design_variables']['TMDs']['flag']:
- TMD_opt = self.opt['design_variables']['TMDs']
-
- # We only support one TMD for now
- for tmd_group in TMD_opt['groups']:
- if 'mass' in tmd_group:
- n_DV += 1
- if 'stiffness' in tmd_group:
- n_DV += 1
- if 'damping' in tmd_group:
- n_DV += 1
-
- return n_DV
-
-
+ if modeling_options['Level3']['flag']:
+ self.n_OF_runs = modeling_options['DLC_driver']['n_cases']
+ elif modeling_options['Level2']['flag']:
+ self.n_OF_runs = modeling_options['Level2']['linearization']['NLinTimes']
+ else:
+ self.n_OF_runs = 0
def set_objective(self, wt_opt):
# Set merit figure. Each objective has its own scaling. Check first for user override
diff --git a/weis/glue_code/mpi_tools.py b/weis/glue_code/mpi_tools.py
index e53fcc852..603fc2355 100644
--- a/weis/glue_code/mpi_tools.py
+++ b/weis/glue_code/mpi_tools.py
@@ -1,8 +1,103 @@
import os
import sys
-
+import numpy as np
from openmdao.utils.mpi import MPI
-
+from weis.aeroelasticse.FileTools import print_yaml
+
+
+def compute_optimal_nP(nFD, nOF, modeling_options, opt_options, maxnP=1):
+
+
+ fd_methods = ['SLSQP','SNOPT', 'LD_MMA']
+ evolutionary_methods = ['DE', 'NSGA2']
+
+ if not MPI:
+ print("\nYour problem has %d design variable(s) and %d OpenFAST run(s)\n"%(nFD, nOF))
+
+ if modeling_options['Level3']['flag']:
+ # If we are running an optimization method that doesn't use finite differencing, set the number of DVs to 1
+ if not (opt_options['driver']['design_of_experiments']['flag']) and (opt_options['driver']['optimization']['solver'] in evolutionary_methods):
+ if not MPI:
+ print("You are running an optimization based on an evolutionary solver. The number of parallel finite differencing is now multiplied by 5\n")
+ nFD *= 5 # targeting 10*n_DV population size... this is what the equivalent FD coloring would take
+ elif not (opt_options['driver']['design_of_experiments']['flag'] or opt_options['driver']['optimization']['solver'] in fd_methods):
+ if not MPI:
+ print("You are not running a design optimization, a design of experiment, or your optimizer is not gradient based. The number of parallel function evaluations is set to 1\n")
+ nFD = 1
+ elif modeling_options['Level2']['flag']:
+ if not (opt_options['driver']['design_of_experiments']['flag'] or opt_options['driver']['optimization']['solver'] in fd_methods):
+ if not MPI:
+ print("You are not running a design optimization, a design of experiment, or your optimizer is not gradient based. The number of parallel function evaluations is set to 1\n")
+ nFD = 1
+ # # if we're doing a GA or such, "FD" means "entities in epoch"
+ # if opt_options['driver']['optimization']['solver'] in evolutionary_methods:
+ # nFD = maxnP
+
+ if not MPI:
+ print("To run the code in parallel with MPI, execute one of the following commands\n")
+
+ if maxnP != 1:
+ if not MPI:
+ print("You have access to %d processors. Please call WEIS as:"%maxnP)
+ # Define the color map for the parallelization, determining the maximum number of parallel finite difference (FD)
+ # evaluations based on the number of design variables (DV). OpenFAST on/off changes things.
+ if modeling_options['Level3']['flag']:
+ # If openfast is called, the maximum number of FD is the number of DV, if we have the number of processors available that doubles the number of DVs,
+ # otherwise it is half of the number of DV (rounded to the lower integer).
+ # We need this because a top layer of processors calls a bottom set of processors where OpenFAST runs.
+ if maxnP > 2. * nFD:
+ nFD = nFD
+ else:
+ nFD = int(np.floor(maxnP / 2))
+ # Get the number of OpenFAST runs from the user input and the max that can run in parallel given the resources
+ # The number of OpenFAST runs is the minimum between the actual number of requested OpenFAST simulations, and
+ # the number of processors available (minus the number of DV, which sit and wait for OF to complete)
+ nFD = max([nFD, 1])
+ max_parallel_OF_runs = max([int(np.floor((maxnP - nFD) / nFD)), 1])
+ nOFp = min([int(nOF), max_parallel_OF_runs])
+ elif modeling_options['Level2']['flag']:
+ if maxnP > 2. * nFD:
+ nFD = nFD
+ else:
+ nFD = int(np.floor(maxnP / 2))
+ nFD = max([nFD, 1])
+ max_parallel_OF_runs = max([int(np.floor((maxnP - nFD) / nFD)), 1])
+ nOFp = min([int(nOF), max_parallel_OF_runs])
+ else:
+ # If OpenFAST is not called, the number of parallel calls to compute the FDs is just equal to the minimum of processors available and DV
+ nFD = min([maxnP, nFD])
+ nOFp = 1
+ nP = nFD + nFD * nOFp
+
+ else:
+ nOFp = nOF
+ nP = nFD + nFD * nOFp
+ if not MPI:
+ print("If you have access to (at least) %d processors, please call WEIS as:"%nP)
+
+ if not MPI:
+ print("mpiexec -np %d python weis_driver.py\n"%nP)
+
+ if maxnP == 1 and not MPI:
+ print("\nIf you do not have access to %d processors"%nP)
+ print("please provide your maximum available number of processors by typing:")
+ print("python weis_driver.py --preMPI=True --maxnP=xx")
+ print("And substitute xx with your number of processors\n")
+
+ modeling_updates = {}
+ modeling_updates['General'] = {}
+ modeling_updates['General']['openfast_configuration'] = {}
+ modeling_updates['General']['openfast_configuration']['nP'] = nP
+ modeling_updates['General']['openfast_configuration']['nFD'] = nFD
+ modeling_updates['General']['openfast_configuration']['nOFp'] = nOFp
+
+ print('The following changes should be made to the modeling options:')
+ print_yaml(modeling_updates)
+
+ # Apply updates
+ modeling_options['General']['openfast_configuration'].update(modeling_updates['General']['openfast_configuration'])
+
+ return modeling_options
def under_mpirun():
"""Return True if we're being executed under mpirun."""
@@ -35,54 +130,23 @@ def debug(*msg): # pragma: no cover
MPI = None
-def map_comm_heirarchical(n_DV, n_OF, openmp=False):
+def map_comm_heirarchical(nFD, n_OF):
"""
Heirarchical parallelization communicator mapping. Assumes a number of top level processes
- equal to the number of design variables (x2 if central finite differencing is used), each
+ equal to the number of finite differences, each
with its associated number of openfast simulations.
- When openmp flag is turned on, the code spreads the openfast simulations across nodes to
- lavereage the opnemp parallelization of OpenFAST. The cores that will run under openmp, are marked
- in the color map as 1000000. The ones handling python and the DV are marked as 0, and
- finally the master ones for each openfast run are marked with a 1.
"""
- if openmp:
- n_procs_per_node = 36 # Number of
- num_procs = MPI.COMM_WORLD.Get_size()
- n_nodes = num_procs / n_procs_per_node
-
- comm_map_down = {}
- comm_map_up = {}
- color_map = [1000000] * num_procs
-
- n_DV_per_node = n_DV / n_nodes
-
- # for m in range(n_DV_per_node):
- for nn in range(int(n_nodes)):
- for n_dv in range(int(n_DV_per_node)):
- comm_map_down[nn * n_procs_per_node + n_dv] = [
- int(n_DV_per_node) + n_dv * n_OF + nn * (n_procs_per_node) + j for j in range(n_OF)
- ]
-
- # This core handles python, so in the colormap the entry is 0
- color_map[nn * n_procs_per_node + n_dv] = int(0)
- # These cores handles openfast, so in the colormap the entry is 1
- for k in comm_map_down[nn * n_procs_per_node + n_dv]:
- color_map[k] = int(1)
-
- for j in comm_map_down[nn * n_procs_per_node + n_dv]:
- comm_map_up[j] = nn * n_procs_per_node + n_dv
- else:
- N = n_DV + n_DV * n_OF
- comm_map_down = {}
- comm_map_up = {}
- color_map = [0] * n_DV
+ N = nFD + nFD * n_OF
+ comm_map_down = {}
+ comm_map_up = {}
+ color_map = [0] * nFD
- for i in range(n_DV):
- comm_map_down[i] = [n_DV + j + i * n_OF for j in range(n_OF)]
- color_map.extend([i + 1] * n_OF)
+ for i in range(nFD):
+ comm_map_down[i] = [nFD + j + i * n_OF for j in range(n_OF)]
+ color_map.extend([i + 1] * n_OF)
- for j in comm_map_down[i]:
- comm_map_up[j] = i
+ for j in comm_map_down[i]:
+ comm_map_up[j] = i
return comm_map_down, comm_map_up, color_map
diff --git a/weis/glue_code/runWEIS.py b/weis/glue_code/runWEIS.py
index 92e6d9176..b7d735414 100644
--- a/weis/glue_code/runWEIS.py
+++ b/weis/glue_code/runWEIS.py
@@ -11,14 +11,15 @@
from weis.control.tmd import assign_TMD_values
from weis.aeroelasticse.FileTools import save_yaml
from wisdem.inputs.validation import simple_types
+from weis.glue_code.mpi_tools import compute_optimal_nP
-fd_methods = ['SLSQP','SNOPT', 'LD_MMA']
-evolutionary_methods = ['DE', 'NSGA2']
if MPI:
from weis.glue_code.mpi_tools import map_comm_heirarchical, subprocessor_loop, subprocessor_stop
-def run_weis(fname_wt_input, fname_modeling_options, fname_opt_options, geometry_override=None, modeling_override=None, analysis_override=None):
+def run_weis(fname_wt_input, fname_modeling_options, fname_opt_options,
+ geometry_override=None, modeling_override=None, analysis_override=None,
+ prepMPI=False, maxnP=1):
# Load all yaml inputs and validate (also fills in defaults)
wt_initial = WindTurbineOntologyPythonWEIS(
fname_wt_input,
@@ -32,81 +33,32 @@ def run_weis(fname_wt_input, fname_modeling_options, fname_opt_options, geometry
# Initialize openmdao problem. If running with multiple processors in MPI, use parallel finite differencing equal to the number of cores used.
# Otherwise, initialize the WindPark system normally. Get the rank number for parallelization. We only print output files using the root processor.
myopt = PoseOptimizationWEIS(wt_init, modeling_options, opt_options)
-
+
if MPI:
- n_DV = myopt.get_number_design_variables()
- # Extract the number of cores available
- max_cores = MPI.COMM_WORLD.Get_size()
-
- # Define the color map for the parallelization, determining the maximum number of parallel finite difference (FD)
- # evaluations based on the number of design variables (DV). OpenFAST on/off changes things.
- if modeling_options['Level3']['flag']:
-
- # If we are running an optimization method that doesn't use finite differencing, set the number of DVs to 1
- if not (opt_options['driver']['design_of_experiments']['flag']) and (opt_options['driver']['optimization']['solver'] in evolutionary_methods):
- n_DV *= 5 # targeting 10*n_DV population size... this is what the equivalent FD coloring would take
- elif not (opt_options['driver']['design_of_experiments']['flag'] or opt_options['driver']['optimization']['solver'] in fd_methods):
- n_DV = 1
-
-
- # If openfast is called, the maximum number of FD is the number of DV, if we have the number of cores available that doubles the number of DVs,
- # otherwise it is half of the number of DV (rounded to the lower integer).
- # We need this because a top layer of cores calls a bottom set of cores where OpenFAST runs.
- if max_cores > 2. * n_DV:
- n_FD = n_DV
- else:
- n_FD = int(np.floor(max_cores / 2))
- # Get the number of OpenFAST runs from the user input and the max that can run in parallel given the resources
- # The number of OpenFAST runs is the minimum between the actual number of requested OpenFAST simulations, and
- # the number of cores available (minus the number of DV, which sit and wait for OF to complete)
- n_OF_runs = modeling_options['DLC_driver']['n_cases']
- n_DV = max([n_DV, 1])
- max_parallel_OF_runs = max([int(np.floor((max_cores - n_DV) / n_DV)), 1])
- n_OF_runs_parallel = min([int(n_OF_runs), max_parallel_OF_runs])
- elif modeling_options['Level2']['flag']:
-
- if not (opt_options['driver']['design_of_experiments']['flag'] or opt_options['driver']['optimization']['solver'] in fd_methods):
- n_DV = 1
-
-
- if max_cores > 2. * n_DV:
- n_FD = n_DV
- else:
- n_FD = int(np.floor(max_cores / 2))
- n_OF_runs = modeling_options['Level2']['linearization']['NLinTimes']
- n_DV = max([n_DV, 1])
- max_parallel_OF_runs = max([int(np.floor((max_cores - n_DV) / n_DV)), 1])
- n_OF_runs_parallel = min([int(n_OF_runs), max_parallel_OF_runs])
+ if not prepMPI:
+ nFD = modeling_options['General']['openfast_configuration']['nFD']
+ nOFp = modeling_options['General']['openfast_configuration']['nOFp']
else:
- # If OpenFAST is not called, the number of parallel calls to compute the FDs is just equal to the minimum of cores available and DV
- n_FD = min([max_cores, n_DV])
- n_OF_runs_parallel = 1
- # if we're doing a GA or such, "FD" means "entities in epoch"
- if opt_options['driver']['optimization']['solver'] in evolutionary_methods:
- n_FD = max_cores
-
+ nFD = 1
+ nOFp = 0
# Define the color map for the cores (how these are distributed between finite differencing and openfast runs)
if opt_options['driver']['design_of_experiments']['flag']:
- n_FD = MPI.COMM_WORLD.Get_size()
- n_OF_runs_parallel = 1
+ nFD = MPI.COMM_WORLD.Get_size()
+ nOFp = 1
rank = MPI.COMM_WORLD.Get_rank()
comm_map_up = comm_map_down = {}
for r in range(MPI.COMM_WORLD.Get_size()):
comm_map_up[r] = [r]
color_i = 0
else:
- n_FD = max([n_FD, 1])
- if modeling_options['Level3']['flag'] == True and modeling_options['Level3']['AeroDyn']['WakeMod'] == 3:
- olaf = True
- else:
- olaf = False
- comm_map_down, comm_map_up, color_map = map_comm_heirarchical(n_FD, n_OF_runs_parallel, openmp=olaf)
+ nFD = max([nFD, 1])
+ comm_map_down, comm_map_up, color_map = map_comm_heirarchical(nFD, nOFp)
rank = MPI.COMM_WORLD.Get_rank()
if rank < len(color_map):
try:
color_i = color_map[rank]
except IndexError:
- raise ValueError('The number of finite differencing variables is {} and the correct number of cores were not allocated'.format(n_FD))
+ raise ValueError('The number of finite differencing variables is {} and the correct number of cores were not allocated'.format(nFD))
else:
color_i = max(color_map) + 1
comm_i = MPI.COMM_WORLD.Split(color_i, 1)
@@ -133,13 +85,13 @@ def run_weis(fname_wt_input, fname_modeling_options, fname_opt_options, geometry
if opt_options['driver']['design_of_experiments']['flag']:
modeling_options['General']['openfast_configuration']['cores'] = 1
else:
- modeling_options['General']['openfast_configuration']['cores'] = n_OF_runs_parallel
+ modeling_options['General']['openfast_configuration']['cores'] = nOFp
# Parallel settings for OpenMDAO
if opt_options['driver']['design_of_experiments']['flag']:
wt_opt = om.Problem(model=WindPark(modeling_options = modeling_options, opt_options = opt_options), reports=False)
else:
- wt_opt = om.Problem(model=om.Group(num_par_fd=n_FD), comm=comm_i, reports=False)
+ wt_opt = om.Problem(model=om.Group(num_par_fd=nFD), comm=comm_i, reports=False)
wt_opt.model.add_subsystem('comp', WindPark(modeling_options = modeling_options, opt_options = opt_options), promotes=['*'])
else:
# Sequential finite differencing and openfast simulations
@@ -156,8 +108,7 @@ def run_weis(fname_wt_input, fname_modeling_options, fname_opt_options, geometry
if opt_options['driver']['design_of_experiments']['flag']:
wt_opt.driver.options['debug_print'] = ['desvars','ln_cons','nl_cons','objs']
- wt_opt.driver.options['procs_per_model'] = 1 # n_OF_runs_parallel # int(max_cores / np.floor(max_cores/n_OF_runs))
-
+ wt_opt.driver.options['procs_per_model'] = 1
wt_opt = myopt.set_recorders(wt_opt)
wt_opt.driver.options['debug_print'] = ['desvars','ln_cons','nl_cons','objs','totals']
@@ -169,79 +120,95 @@ def run_weis(fname_wt_input, fname_modeling_options, fname_opt_options, geometry
# memory for the derivative arrays.
wt_opt.setup(derivatives=False)
- # Load initial wind turbine data from wt_initial to the openmdao problem
- wt_opt = yaml2openmdao(wt_opt, modeling_options, wt_init, opt_options)
- wt_opt = assign_ROSCO_values(wt_opt, modeling_options, opt_options)
- if modeling_options['flags']['TMDs']:
- wt_opt = assign_TMD_values(wt_opt, wt_init, opt_options)
-
- wt_opt = myopt.set_initial(wt_opt, wt_init)
- if modeling_options['Level3']['flag']:
- wt_opt = myopt.set_initial_weis(wt_opt)
-
- # If the user provides values in geometry_override, they overwrite
- # whatever values have been set by the yaml files.
- # This is useful for performing black-box wrapped optimization without
- # needing to modify the yaml files.
- # Some logic is used here if the user gives a smalller size for the
- # design variable than expected to input the values into the end
- # of the array.
- # This is useful when optimizing twist, where the first few indices
- # do not need to be optimized as they correspond to a circular cross-section.
- if geometry_override is not None:
- for key in geometry_override:
- num_values = np.array(geometry_override[key]).size
- key_size = wt_opt[key].size
- idx_start = key_size - num_values
- wt_opt[key][idx_start:] = geometry_override[key]
-
- # Place the last design variables from a previous run into the problem.
- # This needs to occur after the above setup() and yaml2openmdao() calls
- # so these values are correctly placed in the problem.
- wt_opt = myopt.set_restart(wt_opt)
-
- if 'check_totals' in opt_options['driver']['optimization']:
- if opt_options['driver']['optimization']['check_totals']:
- wt_opt.run_model()
- totals = wt_opt.compute_totals()
-
- if 'check_partials' in opt_options['driver']['optimization']:
- if opt_options['driver']['optimization']['check_partials']:
+ # Estimate number of design variables and parallel calls to OpenFASRT given
+ # the computational resources available. This is used to setup WEIS for an MPI run
+ if prepMPI:
+ nFD = 0
+ for dv in wt_opt.model.list_outputs(is_design_var=True, out_stream=None):
+ # dv is a tuple with (name, info)
+ nFD += len(dv[1]['val'])
+
+ # number of finite differences should be at least 1
+ nFD = max([1,nFD])
+
+ # Compute number of processors
+ modeling_options = compute_optimal_nP(nFD, myopt.n_OF_runs, modeling_options, opt_options, maxnP = maxnP)
+
+ # If WEIS is called simply to prep for an MPI call, no need to proceed and simply
+ # return the number of finite differences and OpenFAST calls, and stop
+ # Otherwise, keep going assigning inputs and running the OpenMDAO model/driver
+ if not prepMPI:
+ # Load initial wind turbine data from wt_initial to the openmdao problem
+ wt_opt = yaml2openmdao(wt_opt, modeling_options, wt_init, opt_options)
+ wt_opt = assign_ROSCO_values(wt_opt, modeling_options, opt_options)
+ if modeling_options['flags']['TMDs']:
+ wt_opt = assign_TMD_values(wt_opt, wt_init, opt_options)
+
+ wt_opt = myopt.set_initial(wt_opt, wt_init)
+ if modeling_options['Level3']['flag']:
+ wt_opt = myopt.set_initial_weis(wt_opt)
+
+ # If the user provides values in geometry_override, they overwrite
+ # whatever values have been set by the yaml files.
+ # This is useful for performing black-box wrapped optimization without
+ # needing to modify the yaml files.
+ # Some logic is used here if the user gives a smalller size for the
+ # design variable than expected to input the values into the end
+ # of the array.
+ # This is useful when optimizing twist, where the first few indices
+ # do not need to be optimized as they correspond to a circular cross-section.
+ if geometry_override is not None:
+ for key in geometry_override:
+ num_values = np.array(geometry_override[key]).size
+ key_size = wt_opt[key].size
+ idx_start = key_size - num_values
+ wt_opt[key][idx_start:] = geometry_override[key]
+
+ # Place the last design variables from a previous run into the problem.
+ # This needs to occur after the above setup() and yaml2openmdao() calls
+ # so these values are correctly placed in the problem.
+ wt_opt = myopt.set_restart(wt_opt)
+
+ if 'check_totals' in opt_options['driver']['optimization']:
+ if opt_options['driver']['optimization']['check_totals']:
+ wt_opt.run_model()
+ totals = wt_opt.compute_totals()
+
+ if 'check_partials' in opt_options['driver']['optimization']:
+ if opt_options['driver']['optimization']['check_partials']:
+ wt_opt.run_model()
+ checks = wt_opt.check_partials(compact_print=True)
+
+ sys.stdout.flush()
+ # Run openmdao problem
+ if opt_options['opt_flag']:
+ wt_opt.run_driver()
+ else:
wt_opt.run_model()
- checks = wt_opt.check_partials(compact_print=True)
- sys.stdout.flush()
- # Run openmdao problem
- if opt_options['opt_flag']:
- wt_opt.run_driver()
- else:
- wt_opt.run_model()
-
- if (not MPI) or (MPI and rank == 0):
- # Save data coming from openmdao to an output yaml file
- froot_out = os.path.join(folder_output, opt_options['general']['fname_output'])
- # Remove the fst_vt key from the dictionary and write out the modeling options
- modeling_options['General']['openfast_configuration']['fst_vt'] = {}
- if not modeling_options['Level3']['from_openfast']:
- wt_initial.write_ontology(wt_opt, froot_out)
- wt_initial.write_options(froot_out)
-
- # openMDAO doesn't save constraint values, so we get them from this construction
- problem_var_dict = wt_opt.list_driver_vars(
- desvar_opts=["lower", "upper",],
- cons_opts=["lower", "upper", "equals",],
- )
- save_yaml(folder_output, "problem_vars.yaml", simple_types(problem_var_dict))
-
- # Save data to numpy and matlab arrays
- fileIO.save_data(froot_out, wt_opt)
+ if (not MPI) or (MPI and rank == 0):
+ # Save data coming from openmdao to an output yaml file
+ froot_out = os.path.join(folder_output, opt_options['general']['fname_output'])
+ # Remove the fst_vt key from the dictionary and write out the modeling options
+ modeling_options['General']['openfast_configuration']['fst_vt'] = {}
+ if not modeling_options['Level3']['from_openfast']:
+ wt_initial.write_ontology(wt_opt, froot_out)
+ wt_initial.write_options(froot_out)
+
+ # openMDAO doesn't save constraint values, so we get them from this construction
+ problem_var_dict = wt_opt.list_driver_vars(
+ desvar_opts=["lower", "upper",],
+ cons_opts=["lower", "upper", "equals",],
+ )
+ save_yaml(folder_output, "problem_vars.yaml", simple_types(problem_var_dict))
+
+ # Save data to numpy and matlab arrays
+ fileIO.save_data(froot_out, wt_opt)
if MPI and \
(modeling_options['Level3']['flag'] or modeling_options['Level2']['flag']) and \
- (not opt_options['driver']['design_of_experiments']['flag']) and \
- color_i < 1000000:
+ (not opt_options['driver']['design_of_experiments']['flag']):
# subprocessor ranks spin, waiting for FAST simulations to run.
- # Only true for cores actually in use, not the ones supporting openfast openmp (marked as color_i = 1000000)
sys.stdout.flush()
if rank in comm_map_up.keys():
subprocessor_loop(comm_map_up)
@@ -252,10 +219,22 @@ def run_weis(fname_wt_input, fname_modeling_options, fname_opt_options, geometry
sys.stdout.flush()
# Send each core in use to a barrier synchronization
- if MPI and color_i < 1000000:
+ # Next, share WEIS outputs across all processors from rank=0 (root)
+ if MPI:
+ MPI.COMM_WORLD.Barrier()
+ rank = MPI.COMM_WORLD.Get_rank()
+ if rank != 0:
+ wt_opt = None
+ modeling_options = None
+ opt_options = None
+
+ # MPI.COMM_WORLD.bcast cannot broadcast out a full OpenMDAO problem
+ # We don't need it for now, but this might become an issue if we start
+ # stacking multiple WEIS calls on top of each other and we need to
+ # reuse wt_opt from one call to the next
+ modeling_options = MPI.COMM_WORLD.bcast(modeling_options, root = 0)
+ opt_options = MPI.COMM_WORLD.bcast(opt_options, root = 0)
MPI.COMM_WORLD.Barrier()
- if rank == 0:
- return wt_opt, modeling_options, opt_options
- else:
- return [], [], []
+ return wt_opt, modeling_options, opt_options
+
diff --git a/weis/glue_code/sbatch_eagle.sh b/weis/glue_code/sbatch_eagle.sh
deleted file mode 100644
index f4cf6d340..000000000
--- a/weis/glue_code/sbatch_eagle.sh
+++ /dev/null
@@ -1,23 +0,0 @@
-#!/bin/bash
-#SBATCH --account=allocation_name
-#SBATCH --time=01:00:00
-#SBATCH --job-name=Design1
-#SBATCH --nodes=4
-#SBATCH --ntasks-per-node=36
-#SBATCH --mail-user user@nrel.gov
-#SBATCH --mail-type BEGIN,END,FAIL
-#SBATCH --output=Case1.%j.out
-####SBATCH --qos=high
-####SBATCH --partition=debug
-
-nDV=11 # Number of design variables (x2 for central difference)
-nOF=100 # Number of openfast runs per finite-difference evaluation
-nC=$(( nDV + nDV * nOF )) # Number of cores needed. Make sure to request an appropriate number of nodes = N / 36
-
-module purge
-module load comp-intel intel-mpi mkl
-module unload gcc
-
-source activate weis-env
-
-mpirun -np $nC python runWEIS.py
diff --git a/weis/glue_code/weis_args.py b/weis/glue_code/weis_args.py
new file mode 100644
index 000000000..cc1dbdeb9
--- /dev/null
+++ b/weis/glue_code/weis_args.py
@@ -0,0 +1,39 @@
+import argparse
+from openmdao.utils.mpi import MPI
+
+def weis_args():
+
+ # Set up argument parser
+ parser = argparse.ArgumentParser(description="Run WEIS driver with flag prepping for MPI run.")
+ # Add the flag
+ parser.add_argument("--preMPI", type=bool, default=False, help="Flag for preprocessing MPI settings (True or False).")
+ parser.add_argument("--maxnP", type=int, default=1, help="Maximum number of processors available.")
+ # Parse the arguments
+ args, _ = parser.parse_known_args()
+ # Use the flag in your script
+ if args.preMPI:
+ print("Preprocessor flag is set to True. Running preprocessing setting up MPI run.")
+ else:
+ print("Preprocessor flag is set to False. Run WEIS now.")
+
+ return args
+
+def get_max_procs(args):
+ # Set max number of processes, either set by user or extracted from MPI
+ if args.preMPI:
+ maxnP = args.maxnP
+ else:
+ if MPI:
+ maxnP = MPI.COMM_WORLD.Get_size()
+ else:
+ maxnP = 1
+ return maxnP
+
+def set_modopt_procs(modeling_options):
+ print('Applying the modeling option updates as overrides.')
+ modeling_override = {}
+ modeling_override['General'] = {}
+ modeling_override['General']['openfast_configuration'] = {}
+ modeling_override['General']['openfast_configuration']['nFD'] = modeling_options['General']['openfast_configuration']['nFD']
+ modeling_override['General']['openfast_configuration']['nOFp'] = modeling_options['General']['openfast_configuration']['nOFp']
+ return modeling_override
\ No newline at end of file
diff --git a/weis/inputs/modeling_schema.yaml b/weis/inputs/modeling_schema.yaml
index 4c4957241..5698fc0a3 100644
--- a/weis/inputs/modeling_schema.yaml
+++ b/weis/inputs/modeling_schema.yaml
@@ -80,6 +80,14 @@ properties:
type: boolean
default: False
description: Write standard output to own file. Output will not print to screen.
+ nFD:
+ type: integer
+ default: 1
+ description: Number of parallel finite differencing performed by OpenMDAO. WEIS sets this number once the preMPI keyword argument is set
+ nOFp:
+ type: integer
+ default: 1
+ description: Number of parallel OpenFAST runs performed by OpenMDAO. WEIS sets this number once the preMPI keyword argument is set
goodman_correction:
type: boolean
default: False