diff --git a/.github/workflows/development-pipeline.yml b/.github/workflows/development-pipeline.yml index 5d3c290..d8dcc8b 100644 --- a/.github/workflows/development-pipeline.yml +++ b/.github/workflows/development-pipeline.yml @@ -21,14 +21,6 @@ jobs: strategy: matrix: include: - - os: ubuntu-latest - python-version: 3.7 - python: xvfb-run python3 - pip_arg: "" - - os: ubuntu-latest - python-version: 3.8 - python: xvfb-run python3 - pip_arg: "" - os: ubuntu-latest python-version: 3.9 python: xvfb-run python3 @@ -41,11 +33,11 @@ jobs: python-version: 3.12 python: xvfb-run python3 pip_arg: "" - - os: macos-11 - python-version: 3.11 + - os: macos-13 + python-version: 3.12 python: python3 pip_arg: "" - - os: windows-2019 + - os: windows-2022 python-version: 3.11 python: python pip_arg: --user diff --git a/.gitignore b/.gitignore index 72d4adf..da24737 100644 --- a/.gitignore +++ b/.gitignore @@ -119,3 +119,5 @@ venv.bak/ # Hidding files starting with underscore except init _* !__init__.py +*.xml +*.iml diff --git a/data/example_files/AeroDyn_v3.0.dat b/data/example_files/AeroDyn_v3.0.dat new file mode 100644 index 0000000..e4c5964 --- /dev/null +++ b/data/example_files/AeroDyn_v3.0.dat @@ -0,0 +1,104 @@ +------- AERODYN v15 for OpenFAST INPUT FILE ----------------------------------------------- +NREL 5.0 MW offshore baseline aerodynamic input properties. +====== General Options ============================================================================ +False Echo - Echo the input to ".AD.ech"? (flag) +"default" DTAero - Time interval for aerodynamic calculations {or "default"} (s) + 2 WakeMod - Type of wake/induction model (switch) {0=none, 1=BEMT, 2=DBEMT, 3=OLAF} [WakeMod cannot be 2 or 3 when linearizing] + 2 AFAeroMod - Type of blade airfoil aerodynamics model (switch) {1=steady model, 2=Beddoes-Leishman unsteady model} [AFAeroMod must be 1 when linearizing] + 0 TwrPotent - Type tower influence on wind based on potential flow around the tower (switch) {0=none, 1=baseline potential flow, 2=potential flow with Bak correction} + 0 TwrShadow - Calculate tower influence on wind based on downstream tower shadow (switch) {0=none, 1=Powles model, 2=Eames model} +False TwrAero - Calculate tower aerodynamic loads? (flag) +False FrozenWake - Assume frozen wake during linearization? (flag) [used only when WakeMod=1 and when linearizing] +False CavitCheck - Perform cavitation check? (flag) [AFAeroMod must be 1 when CavitCheck=true] +False Buoyancy - Include buoyancy effects? (flag) +False CompAA - Flag to compute AeroAcoustics calculation [used only when WakeMod = 1 or 2] +"unused" AA_InputFile - AeroAcoustics input file [used only when CompAA=true] +====== Environmental Conditions =================================================================== +"default" AirDens - Air density (kg/m^3) +"default" KinVisc - Kinematic viscosity of working fluid (m^2/s) +"default" SpdSound - Speed of sound in working fluid (m/s) +"default" Patm - Atmospheric pressure (Pa) [used only when CavitCheck=True] +"default" Pvap - Vapour pressure of working fluid (Pa) [used only when CavitCheck=True] +====== Blade-Element/Momentum Theory Options ====================================================== [unused when WakeMod=0 or 3] + 2 SkewMod - Type of skewed-wake correction model (switch) {1=uncoupled, 2=Pitt/Peters, 3=coupled} [unused when WakeMod=0 or 3] +"default" SkewModFactor - Constant used in Pitt/Peters skewed wake model {or "default" is 15/32*pi} (-) [used only when SkewMod=2; unused when WakeMod=0 or 3] +True TipLoss - Use the Prandtl tip-loss model? (flag) [unused when WakeMod=0 or 3] +True HubLoss - Use the Prandtl hub-loss model? (flag) [unused when WakeMod=0 or 3] +True TanInd - Include tangential induction in BEMT calculations? (flag) [unused when WakeMod=0 or 3] +False AIDrag - Include the drag term in the axial-induction calculation? (flag) [unused when WakeMod=0 or 3] +False TIDrag - Include the drag term in the tangential-induction calculation? (flag) [unused when WakeMod=0,3 or TanInd=FALSE] +"Default" IndToler - Convergence tolerance for BEMT nonlinear solve residual equation {or "default"} (-) [unused when WakeMod=0 or 3] + 100 MaxIter - Maximum number of iteration steps (-) [unused when WakeMod=0] +====== Dynamic Blade-Element/Momentum Theory Options ============================================== [used only when WakeMod=2] + 2 DBEMT_Mod - Type of dynamic BEMT (DBEMT) model {1=constant tau1, 2=time-dependent tau1} (-) [used only when WakeMod=2] + 4 tau1_const - Time constant for DBEMT (s) [used only when WakeMod=2 and DBEMT_Mod=1] +====== OLAF -- cOnvecting LAgrangian Filaments (Free Vortex Wake) Theory Options ================== [used only when WakeMod=3] +"unused" OLAFInputFileName - Input file for OLAF [used only when WakeMod=3] +====== Beddoes-Leishman Unsteady Airfoil Aerodynamics Options ===================================== [used only when AFAeroMod=2] + 3 UAMod - Unsteady Aero Model Switch (switch) {1=Baseline model (Original), 2=Gonzalez’s variant (changes in Cn,Cc,Cm), 3=Minemma/Pierce variant (changes in Cc and Cm)} [used only when AFAeroMod=2] +True FLookup - Flag to indicate whether a lookup for f' will be calculated (TRUE) or whether best-fit exponential equations will be used (FALSE); if FALSE S1-S4 must be provided in airfoil input files (flag) [used only when AFAeroMod=2] +0.15 UAStartRad - Starting radius for dynamic stall (fraction of rotor radius) [used only when AFAeroMod=2] +1.0 UAEndRad - Ending radius for dynamic stall (fraction of rotor radius) [used only when AFAeroMod=2] +====== Airfoil Information ========================================================================= + 1 AFTabMod - Interpolation method for multiple airfoil tables {1=1D interpolation on AoA (first table only); 2=2D interpolation on AoA and Re; 3=2D interpolation on AoA and UserProp} (-) + 1 InCol_Alfa - The column in the airfoil tables that contains the angle of attack (-) + 2 InCol_Cl - The column in the airfoil tables that contains the lift coefficient (-) + 3 InCol_Cd - The column in the airfoil tables that contains the drag coefficient (-) + 4 InCol_Cm - The column in the airfoil tables that contains the pitching-moment coefficient; use zero if there is no Cm column (-) + 0 InCol_Cpmin - The column in the airfoil tables that contains the Cpmin coefficient; use zero if there is no Cpmin column (-) + 8 NumAFfiles - Number of airfoil files used (-) +"../5MW_Baseline/Airfoils/Cylinder1.dat" AFNames - Airfoil file names (NumAFfiles lines) (quoted strings) +"../5MW_Baseline/Airfoils/Cylinder2.dat" +"../5MW_Baseline/Airfoils/DU40_A17.dat" +"../5MW_Baseline/Airfoils/DU35_A17.dat" +"../5MW_Baseline/Airfoils/DU30_A17.dat" +"../5MW_Baseline/Airfoils/DU25_A17.dat" +"../5MW_Baseline/Airfoils/DU21_A17.dat" +"../5MW_Baseline/Airfoils/NACA64_A17.dat" +====== Rotor/Blade Properties ===================================================================== +True UseBlCm - Include aerodynamic pitching moment in calculations? (flag) +"../5MW_Baseline/NRELOffshrBsline5MW_AeroDyn_blade.dat" ADBlFile(1) - Name of file containing distributed aerodynamic properties for Blade #1 (-) +"../5MW_Baseline/NRELOffshrBsline5MW_AeroDyn_blade.dat" ADBlFile(2) - Name of file containing distributed aerodynamic properties for Blade #2 (-) [unused if NumBl < 2] +"../5MW_Baseline/NRELOffshrBsline5MW_AeroDyn_blade.dat" ADBlFile(3) - Name of file containing distributed aerodynamic properties for Blade #3 (-) [unused if NumBl < 3] +====== Hub Properties ============================================================================== [used only when Buoyancy=True] +0.0 VolHub - Hub volume (m^3) +0.0 HubCenBx - Hub center of buoyancy x direction offset (m) +====== Nacelle Properties ========================================================================== [used only when Buoyancy=True] +0.0 VolNac - Nacelle volume (m^3) +0,0,0 NacCenB - Position of nacelle center of buoyancy from yaw bearing in nacelle coordinates (m) +====== Tail fin Aerodynamics ======================================================================== +False TFinAero - Calculate tail fin aerodynamics model (flag) +"unused" TFinFile - Input file for tail fin aerodynamics [used only when TFinAero=True] +====== Tower Influence and Aerodynamics ============================================================ [used only when TwrPotent/=0, TwrShadow/=0, TwrAero=True, or Buoyancy=True] + 12 NumTwrNds - Number of tower nodes used in the analysis (-) [used only when TwrPotent/=0, TwrShadow/=0, TwrAero=True, or Buoyancy=True] +TwrElev TwrDiam TwrCd TwrTI TwrCb !TwrTI used only with TwrShadow=2, TwrCb used only with Buoyancy=True +(m) (m) (-) (-) (-) +0.0000000E+00 6.0000000E+00 1.0000000E+00 1.0000000E-01 0.0 +8.5261000E+00 5.7870000E+00 1.0000000E+00 1.0000000E-01 0.0 +1.7053000E+01 5.5740000E+00 1.0000000E+00 1.0000000E-01 0.0 +2.5579000E+01 5.3610000E+00 1.0000000E+00 1.0000000E-01 0.0 +3.4105000E+01 5.1480000E+00 1.0000000E+00 1.0000000E-01 0.0 +4.2633000E+01 4.9350000E+00 1.0000000E+00 1.0000000E-01 0.0 +5.1158000E+01 4.7220000E+00 1.0000000E+00 1.0000000E-01 0.0 +5.9685000E+01 4.5090000E+00 1.0000000E+00 1.0000000E-01 0.0 +6.8211000E+01 4.2960000E+00 1.0000000E+00 1.0000000E-01 0.0 +7.6738000E+01 4.0830000E+00 1.0000000E+00 1.0000000E-01 0.0 +8.5268000E+01 3.8700000E+00 1.0000000E+00 1.0000000E-01 0.0 +8.7600000E+01 3.8700000E+00 1.0000000E+00 1.0000000E-01 0.0 +====== Outputs ==================================================================================== +False SumPrint - Generate a summary file listing input options and interpolated properties to ".AD.sum"? (flag) + 0 NBlOuts - Number of blade node outputs [0 - 9] (-) + 1 BlOutNd - Blade nodes whose values will be output (-) + 0 NTwOuts - Number of tower node outputs [0 - 9] (-) + 1 TwOutNd - Tower nodes whose values will be output (-) + OutList - The next line(s) contains a list of output parameters. See OutListParameters.xlsx for a listing of available output channels, (-) +"RtAeroPwr" +"RtAeroCp" +"RtAeroCt" +END of input file (the word "END" must appear in the first 3 columns of this last OutList line) +====== Outputs for all blade stations (same ending as above for B1N1.... =========================== [optional section] + 0 BldNd_BladesOut - Number of blades to output all node information at. Up to number of blades on turbine. (-) + "All" BldNd_BlOutNd - Future feature will allow selecting a portion of the nodes to output. Not implemented yet. (-) + OutListAD - The next line(s) contains a list of output parameters. See OutListParameters.xlsx for a listing of available output channels, (-) +END of input file (the word "END" must appear in the first 3 columns of this last OutList line) +--------------------------------------------------------------------------------------- diff --git a/openfast_toolbox/airfoils/Polar.py b/openfast_toolbox/airfoils/Polar.py index 3fb7038..a2edc7e 100644 --- a/openfast_toolbox/airfoils/Polar.py +++ b/openfast_toolbox/airfoils/Polar.py @@ -84,15 +84,18 @@ def __init__(self, filename=None, alpha=None, cl=None, cd=None, cm=None, Re=None cd = df['Cd'].values cm = df['Cm'].values if 'fs' in df.keys(): - print('[INFO] Using separating function from input file.') + if verbose: + print('[INFO] Using separating function from input file.') self.fs = df['fs'].values self._fs_lock = True if 'Cl_fs' in df.keys(): - print('[INFO] Using Cl fully separated from input file.') + if verbose: + print('[INFO] Using Cl fully separated from input file.') self.cl_fs =df['Cl_fs'].values self._cl_fs_lock = True if 'Cl_inv' in df.keys(): - print('[INFO] Using Cl inviscid from input file.') + if verbose: + print('[INFO] Using Cl inviscid from input file.') self.cl_inv = df['Cl_inv'].values self._cl_inv_lock = True # TODO we need a trigger if cl_inv provided, we should get alpha0 and slope from it @@ -667,7 +670,7 @@ def __getCM(self, i, cmCoef, alpha, cl_ext, cd_ext, alpha_low_deg, alpha_high_de print("Angle encountered for which there is no CM table value " "(near +/-180 deg). Program will stop.") return cm_new - def unsteadyParams(self, window_offset=None, nMin=720): + def unsteadyParams(self, window_offset=None, nMin=720, verbose=False): """compute unsteady aero parameters used in AeroDyn input file TODO Questions to solve: @@ -737,7 +740,8 @@ def unsteadyParams(self, window_offset=None, nMin=720): try: alpha0cn = _find_alpha0(alpha, cn, window, direction='up', value_if_constant = 0.) except NoCrossingException: - print("[WARN] Polar: Cn unsteady, cannot find zero crossing with up direction, trying down direction") + if verbose: + print("[WARN] Polar: Cn unsteady, cannot find zero crossing with up direction, trying down direction") alpha0cn = _find_alpha0(alpha, cn, window, direction='down') # checks for inppropriate data (like cylinders) @@ -751,7 +755,8 @@ def unsteadyParams(self, window_offset=None, nMin=720): try: a_MaxUpp, cn_MaxUpp, a_MaxLow, cn_MaxLow = _find_max_points(alpha, cn, alpha0, method="inflections") except NoStallDetectedException: - print('[WARN] Polar: Cn unsteady, cannot find stall based on inflections, using min and max') + if verbose: + print('[WARN] Polar: Cn unsteady, cannot find stall based on inflections, using min and max') a_MaxUpp, cn_MaxUpp, a_MaxLow, cn_MaxLow = _find_max_points(alpha, cn, alpha0, method="minmax") # --- cn slope @@ -790,7 +795,8 @@ def unsteadyParams(self, window_offset=None, nMin=720): a_f07_Upp = xInter[2] a_f07_Low = xInter[0] else: - print('[WARN] Polar: Cn unsteady, cn_f does not intersect cn 3 times. Intersections:{}.'.format(xInter)) + if verbose: + print('[WARN] Polar: Cn unsteady, cn_f does not intersect cn 3 times. Intersections:{}.'.format(xInter)) a_f07_Upp = abs(xInter[0]) a_f07_Low = -abs(xInter[0]) @@ -1745,6 +1751,8 @@ def _find_linear_region(x, y, nMin, x0=None): iEnd = j + nMin if x0 is not None: sl = np.linalg.lstsq(x[iStart:iEnd], y[iStart:iEnd], rcond=None)[0][0] + if not np.isscalar(sl): + sl = sl[0] slp[iStart, j] = sl off[iStart, j] = x0 y_lin = x[iStart:iEnd] * sl diff --git a/openfast_toolbox/airfoils/tests/test_run_Examples.py b/openfast_toolbox/airfoils/tests/test_run_Examples.py index c705186..887e8dd 100644 --- a/openfast_toolbox/airfoils/tests/test_run_Examples.py +++ b/openfast_toolbox/airfoils/tests/test_run_Examples.py @@ -19,14 +19,17 @@ def test_run_examples(self): # Add tests to class MyDir=os.path.dirname(__file__) files = glob.glob(os.path.join(MyDir,'../examples/[a-zA-Z]*.py')) - import matplotlib.pyplot as plt print('\n--------------------------------------------------------------') + import matplotlib.pyplot as plt for f in files: - print('Running example script: {}'.format(f)) + print('Running: {}'.format(os.path.relpath(f, MyDir))) if hasattr(self,'subTest'): with self.subTest(filename=os.path.basename(f)): execfile(f, {'__name__': '__test__', 'print': lambda *_:None}) - plt.close('all') + try: + plt.close('all') + except: + pass if __name__ == '__main__': diff --git a/openfast_toolbox/converters/examples/Main_AD30_AD40.py b/openfast_toolbox/converters/examples/Main_AD30_AD40.py new file mode 100644 index 0000000..f8ec668 --- /dev/null +++ b/openfast_toolbox/converters/examples/Main_AD30_AD40.py @@ -0,0 +1,23 @@ +""" +Converts a AD3.x file to AD4.0 +""" +import os +import numpy as np +from openfast_toolbox.converters.versions.aerodyn import ad30_to_ad40 + +# Get current directory so this script can be called from any location +scriptDir=os.path.dirname(__file__) + +fileold = os.path.join(scriptDir, '../../../data/example_files/AeroDyn_v3.0.dat') +filenew = '_aerodyn_temp.dat' + +if __name__ == '__main__': + ad = ad30_to_ad40(fileold, filenew, verbose=True, overwrite=False) + +if __name__ == '__test__': + ad = ad30_to_ad40(fileold, filenew, verbose=False, overwrite=False) + # small tests + np.testing.assert_equal(ad['Wake_Mod'], 1) + np.testing.assert_equal(ad['UA_Mod'], 3) + np.testing.assert_equal(ad['DBEMT_Mod'], 2) + os.remove(filenew) diff --git a/openfast_toolbox/converters/versions/__init__.py b/openfast_toolbox/converters/versions/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/openfast_toolbox/converters/versions/aerodyn.py b/openfast_toolbox/converters/versions/aerodyn.py new file mode 100644 index 0000000..fcd001a --- /dev/null +++ b/openfast_toolbox/converters/versions/aerodyn.py @@ -0,0 +1,138 @@ +from openfast_toolbox.io.fast_input_file import FASTInputFile + +def ad30_to_ad40(fileold, filenew=None, verbose=True, overwrite=False): + """ + Convert AD 3.0 file to AD 4.0 + NOTE: beta version. + + INPUTS: + - fileold: filepath to old input file + - filenew: filepath to new input file (can be the same as old) + If None, and overwrite is false, a file with '_new' is written + - overwrite: if True, filenew = fileold + """ + if filenew is None: + if overwrite: + filenew = fileold + else: + filenew = fileold[:-4]+'_new.dat' + if verbose: + print('> Converting:', fileold) + print('> to :', filenew) + # --- + f = FASTInputFile(fileold, IComment=[1]) + + # --- Checking if conversion already done + try: + iExisting = [f.getID('Wake_Mod'), f.getID('BEM_Mod'), f.getID('Skew_Mod'), f.getID('SectAvg'), f.getID('AoA34')] + print('[WARN] File already converted, skipping: ', fileold) + # We still write + f.write(filenew) + return + except KeyError: + pass + + # --- Extracting old data + iSkewMod = f.getID('SkewMod') + WakeMod = f.pop('WakeMod')['value'] + SkewMod = f.pop('SkewMod')['value'] + AFAeroMod = f.pop('AFAeroMod')['value'] + UAMod = f.pop('UAMod')['value'] + FrozenWake = f.pop('FrozenWake')['value'] + DBEMTMod = f.pop('DBEMT_Mod')['value'] + SkewModFactor = f.pop('SkewModFactor')['value'] + + # --- Default + BEM_Mod = 1 + DBEMT_Mod = 0 + AoA34 = True + SectAvg = False + SectAvgWeighting = 1 + SectAvgNPoints = 'default' + Skew_Mod = 0 + SkewMomCorr = False + SkewRedistr_Mod = 1 + AoA34 = False + UA_Mod = 0 + + # --- Logic + Wake_Mod = {None:1, 0:0, 1:1, 2:1, 3:3}[WakeMod] + Skew_Mod = {None:1, 0:-1, 1:0, 2:1}[SkewMod] + UA_Mod = {None:0, 1:0, 2:UAMod}[AFAeroMod] + if WakeMod==1: + DBEMT_Mod=0 + BEM_Mod=1 + if WakeMod==2: + DBEMT_Mod = DBEMTMod + if FrozenWake is not None and FrozenWake: + DBEMT_Mod=-1 + AoA34 = UA_Mod>0 # Legacy behavior if UA is on, use AoA34, if it's off use AoA QC + + + # --- Changing file + f.data[0]['value'] = '------- AERODYN INPUT FILE --------------------------------------------------------------------------' +# f.data[1]['isComment'] = True +# f.data[1]['value'] = (str(f.data[1]['value']) +' '+ str(f.data[1]['label']) + ' '+ str(f.data[1]['descr'])).strip() + f.insertKeyValAfter('DTAero', 'Wake_Mod', Wake_Mod, 'Wake/induction model (switch) {0=none, 1=BEMT, 3=OLAF} [WakeMod cannot be 2 or 3 when linearizing]') + + #i = f.getID('Pvap') # NOTE might not exist for old files.. + i = f.getID('TipLoss')-2 + f.pop(i+1) # Remove 'BEMT comment' + f.insertComment(i+1,'====== Blade-Element/Momentum Theory Options ====================================================== [unused when WakeMod=0 or 3, except for BEM_Mod]') + f.insertKeyVal (i+2, 'BEM_Mod', BEM_Mod, 'BEM model {1=legacy NoSweepPitchTwist, 2=polar} (switch) [used for all Wake_Mod to determine output coordinate system]') + f.insertComment(i+3, '--- Skew correction') + f.insertKeyVal (i+4, 'Skew_Mod' , Skew_Mod ,'Skew model {0=No skew model, -1=Remove non-normal component for linearization, 1=skew model active}') + f.insertKeyVal (i+5, 'SkewMomCorr' , False ,'Turn the skew momentum correction on or off [used only when Skew_Mod=1]') + f.insertKeyVal (i+6, 'SkewRedistr_Mod' , 'default' ,'Type of skewed-wake correction model (switch) {0=no redistribution, 1=Glauert/Pitt/Peters, default=1} [used only when Skew_Mod=1]') + f.insertKeyVal (i+7, 'SkewRedistrFactor', SkewModFactor,'Constant used in Pitt/Peters skewed wake model {or "default" is 15/32*pi} (-) [used only when Skew_Mod=1 and SkewRedistr_Mod=1]') + f.insertComment(i+8, '--- BEM algorithm ') + + i = f.getID('MaxIter') + f.pop(i+1) # Remove 'DBEMT comment' + f.insertComment(i+1, '--- Shear correction') + f.insertKeyVal(i+2, 'SectAvg' , SectAvg , 'Use sector averaging (flag)') + f.insertKeyVal(i+3, 'SectAvgWeighting' , SectAvgWeighting , 'Weighting function for sector average {1=Uniform, default=1} within a sector centered on the blade (switch) [used only when SectAvg=True] ') + f.insertKeyVal(i+4, 'SectAvgNPoints' , SectAvgNPoints , 'Number of points per sectors (-) {default=5} [used only when SectAvg=True] ') + f.insertKeyVal(i+5, 'SectAvgPsiBwd' , 'default' , 'Backward azimuth relative to blade where the sector starts (<=0) {default=-60} (deg) [used only when SectAvg=True]') + f.insertKeyVal(i+6, 'SectAvgPsiFwd' , 'default' , 'Forward azimuth relative to blade where the sector ends (>=0) {default=60} (deg) [used only when SectAvg=True]') + f.insertComment(i+7, '--- Dynamic wake/inflow') + f.insertKeyVal (i+8, 'DBEMT_Mod', DBEMT_Mod, 'Type of dynamic BEMT (DBEMT) model {0=No Dynamic Wake, -1=Frozen Wake for linearization, 1:constant tau1, 2=time-dependent tau1, 3=constant tau1 with continuous formulation} (-)') + f.data[f.getID('tau1_const')]['descr']= 'Time constant for DBEMT (s) [used only when DBEMT_Mod=1 or 3]' + + #i = f.getID('OLAFInputFileName') + i = f.getID('FLookup')-2 + f.pop(i+1) # Remove 'BEDDOES comment' + f.insertComment(i+1, '====== Unsteady Airfoil Aerodynamics Options ====================================================') + f.insertKeyVal (i+2, 'AoA34' , AoA34, 'Sample the angle of attack (AoA) at the 3/4 chord or the AC point {default=True} [always used]') + f.insertKeyVal (i+3, 'UA_Mod', UA_Mod, 'Unsteady Aero Model Switch (switch) {0=Quasi-steady (no UA), 2=B-L Gonzalez, 3=B-L Minnema/Pierce, 4=B-L HGM 4-states, 5=B-L HGM+vortex 5 states, 6=Oye, 7=Boeing-Vertol}') + if verbose: + print(' -------------- New AeroDyn inputs (with new meaning):') + print('Wake_Mod : {}'.format(Wake_Mod )) + print('BEM_Mod : {}'.format(BEM_Mod )) + print('SectAvg : {}'.format(SectAvg )) + print('SectAvgWeighting : {}'.format(SectAvgWeighting )) + print('SectAvgNPoints : {}'.format(SectAvgNPoints )) + print('DBEMT_Mod : {}'.format(DBEMT_Mod )) + print('Skew_Mod : {}'.format(Skew_Mod )) + print('SkewMomCorr : {}'.format(SkewMomCorr )) + print('SkewRedistr_Mod : {}'.format(SkewRedistr_Mod )) + print('AoA34 : {}'.format(AoA34 )) + print('UA_Mod : {}'.format(UA_Mod )) + print('--------------- Old AeroDyn inputs:') + print('WakeMod: {}'.format(WakeMod)) + print('SkewMod: {}'.format(SkewMod)) + print('AFAeroMod: {}'.format(AFAeroMod)) + print('FrozenWake:{}'.format(FrozenWake)) + print('DBEMT_Mod {}'.format(DBEMTMod)) + print('UAMod: {}'.format(UAMod)) + print('-----------------------------------------------------') + + # --- Write new file + f.write(filenew) + return f + + +if __name__ == '__main__': + fileold='C:/W0/Work-Old/2018-NREL/BAR-Cone-BEM/openfast-ad-neo/reg_tests/r-test/modules/aerodyn/py_ad_B1n2_OLAF/AD_old.dat' + filenew='C:/W0/Work-Old/2018-NREL/BAR-Cone-BEM/openfast-ad-neo/reg_tests/r-test/modules/aerodyn/py_ad_B1n2_OLAF/AD_conv.dat' + ad30_to_ad40(fileold, filenew) diff --git a/openfast_toolbox/io/__init__.py b/openfast_toolbox/io/__init__.py index b86f3a6..7d2d95c 100644 --- a/openfast_toolbox/io/__init__.py +++ b/openfast_toolbox/io/__init__.py @@ -85,6 +85,7 @@ def fileFormats(userpath=None, ignoreErrors=False, verbose=False): from .hawcstab2_pwr_file import HAWCStab2PwrFile from .hawcstab2_ind_file import HAWCStab2IndFile from .hawcstab2_cmb_file import HAWCStab2CmbFile + from .gnuplot_file import GNUPlotFile from .mannbox_file import MannBoxFile from .flex_blade_file import FLEXBladeFile from .flex_profile_file import FLEXProfileFile @@ -141,6 +142,7 @@ def addFormat(priority, fmt): addFormat(60, FileFormat(NetCDFFile)) addFormat(60, FileFormat(VTKFile)) addFormat(60, FileFormat(TDMSFile)) + addFormat(60, FileFormat(GNUPlotFile)) addFormat(60, FileFormat(ParquetFile)) addFormat(60, FileFormat(PickleFile)) addFormat(70, FileFormat(CactusFile)) diff --git a/openfast_toolbox/io/bladed_out_file.py b/openfast_toolbox/io/bladed_out_file.py index d1f0dcd..a7b1564 100644 --- a/openfast_toolbox/io/bladed_out_file.py +++ b/openfast_toolbox/io/bladed_out_file.py @@ -72,12 +72,11 @@ def read_bladed_sensor_file(sensorfile): try: # Combine the strings into one string combined_string = ''.join(sensorLines) - - # Search everything betwee AXITICK and AXISLAB with a regex pattern - t_line = re.search(r'(?<=AXITICK).+?(?=AXISLAB)', combined_string, flags=re.DOTALL) - t_line=t_line.group(0) + # Search for a regex pattern that spans across multiple strings + line = re.search(r'(?<=AXITICK).+?(?=(AXISLAB|NVARS))', combined_string, flags=re.DOTALL) + line=line.group(0) # Replace consecutive whitespace characters with a single space - t_line = re.sub(r'\s+', ' ', t_line) + t_line = re.sub(r'\s+', ' ', line) except: pass @@ -107,6 +106,7 @@ def read_bladed_sensor_file(sensorfile): except: pass def repUnits(s): + s = s.replace('[[','[').replace(']]',']') s = s.replace('TT','s^2').replace('T','s').replace('A','rad') s = s.replace('P','W').replace('L','m').replace('F','N').replace('M','kg') return s @@ -184,6 +184,10 @@ def read_bladed_output(sensorFilename, readTimeFilesOnly=False): data = np.fromfile(fid_2, sensorInfo['Precision']) try: + if nMajor==0: + nMajor=int(np.floor(len(data)/nSections/nSensors)) + data=data[0:nMajor*nSections*nSensors] + sensorInfo['nMajor']=nMajor if sensorInfo['NDIMENS'] == 3: data = np.reshape(data,(nMajor, nSections, nSensors), order='C') diff --git a/openfast_toolbox/io/csv_file.py b/openfast_toolbox/io/csv_file.py index 5621f03..5bb398e 100644 --- a/openfast_toolbox/io/csv_file.py +++ b/openfast_toolbox/io/csv_file.py @@ -1,8 +1,12 @@ import os - -from .file import File, WrongFormatError import pandas as pd +try: + from .file import File, WrongFormatError +except: + File=dict + WrongFormatError = type('WrongFormatError', (Exception,),{}) + class CSVFile(File): """ Read/write a CSV file. @@ -51,7 +55,23 @@ def __init__(self, filename=None, sep=None, colNames=None, commentChar=None, com raise Exception('Provide either `commentChar` or `commentLines` for CSV file types') if (len(self.colNames)>0) and (self.colNamesLine is not None): raise Exception('Provide either `colNames` or `colNamesLine` for CSV file types') - super(CSVFile, self).__init__(filename=filename,**kwargs) + if filename: + self.read(filename, **kwargs) + else: + self.filename = None + + def read(self, filename=None, **kwargs): + if filename: + self.filename = filename + if not self.filename: + raise Exception('No filename provided') + if not os.path.isfile(self.filename): + raise OSError(2,'File not found:',self.filename) + if os.stat(self.filename).st_size == 0: + raise EmptyFileError('File is empty:',self.filename) + # Calling children function + self._read(**kwargs) + def _read(self): COMMENT_CHAR=['#','!',';'] @@ -283,3 +303,47 @@ def __repr__(self): def _toDataFrame(self): return self.data + def to2DFields(self, **kwargs): + import xarray as xr + if len(kwargs.keys())>0: + print('[WARN] CSVFile: to2DFields: ignored keys: ',kwargs.keys()) + if len(self.data)==0: + return None + M = self.data.values + if self.data.columns[0].lower()=='index': + M = M[:,1:] + s1 = 'rows' + s2 = 'columns' + ds = xr.Dataset(coords={s1: range(M.shape[0]), s2: range(M.shape[1])}) + ds['data'] = ([s1, s2], M) + return ds + + # -------------------------------------------------------------------------------- + # --- Properties NOTE: copy pasted from file.py to make this file standalone.. + # -------------------------------------------------------------------------------- + @property + def size(self): + return os.path.getsize(self.filename) + @property + def encoding(self): + import codecs + import chardet + """ Detects encoding""" + try: + byts = min(32, self.size) + except TypeError: + return None + with open(self.filename, 'rb') as f: + raw = f.read(byts) + if raw.startswith(codecs.BOM_UTF8): + return 'utf-8-sig' + else: + result = chardet.detect(raw) + return result['encoding'] + +if __name__ == '__main__': + f = CSVFile('C:/Work/Courses/440/project_solution/data/CFD_u.dat') + print(f) + ds = f.to2DFields() + print(ds) + diff --git a/openfast_toolbox/io/fast_input_file.py b/openfast_toolbox/io/fast_input_file.py index 97ff369..21a5ae4 100644 --- a/openfast_toolbox/io/fast_input_file.py +++ b/openfast_toolbox/io/fast_input_file.py @@ -19,6 +19,7 @@ class BrokenFormatError(Exception): pass TABTYPE_NUM_SUBDYNOUT = 7 TABTYPE_MIX_WITH_HEADER = 6 TABTYPE_FIL = 3 +TABTYPE_OUTLIST = 33 TABTYPE_FMT = 9999 # TODO @@ -140,10 +141,10 @@ def __iter__(self): def __next__(self): return self.fixedfile.__next__() - def __setitem__(self,key,item): - return self.fixedfile.__setitem__(key,item) + def __setitem__(self, key, item): + return self.fixedfile.__setitem__(key, item) - def __getitem__(self,key): + def __getitem__(self, key): return self.fixedfile.__getitem__(key) def __repr__(self): @@ -151,10 +152,61 @@ def __repr__(self): #s ='Fast input file: {}\n'.format(self.filename) #return s+'\n'.join(['{:15s}: {}'.format(d['label'],d['value']) for i,d in enumerate(self.data)]) + def delete(self, key, error=False): + self.pop(key, error=error) + + def pop(self, key, error=False): + if isinstance(key, int): + i = key + else: + i = self.fixedfile.getIDSafe(key) + if i>=0: + d = self.data[i] + del self.data[i] + return d + else: + if error: + raise Exception('Key `{}` not found in file:{}'.format(key, self.filename)) + else: + print('[WARN] Key `{}` not found in file:{}'.format(key, self.filename)) + return None + + def insertComment(self, i, comment='', error=False): + d = getDict() + d['value'] = comment + d['label'] = '' + d['descr'] = '' + d['isComment'] = True + try: + self.data.insert(i, d) + except: + import pdb; pdb.set_trace() + + def insertKeyVal(self, i, key, value, description='', error=False): + d = getDict() + d['value'] = value + d['label'] = key + d['descr'] = description + d['isComment'] = False + self.data.insert(i, d) + + def insertKeyValAfter(self, key_prev, key, value, description, error=False): + i = self.fixedfile.getIDSafe(key_prev) + if i<0: + if error: + raise Exception('Key `{}` not found in file:{}'.format(key_prev, self.filename)) + else: + print('[WARN] Key `{}` not found in file:{}'.format(key_prev, self.filename)) + self.insertKeyVal(i+1, key, value, description, error=error) + + + + # --------------------------------------------------------------------------------} # --- BASE INPUT FILE # --------------------------------------------------------------------------------{ + class FASTInputFileBase(File): """ Read/write an OpenFAST input file. The object behaves like a dictionary. @@ -185,12 +237,12 @@ def defaultExtensions(): def formatName(): return 'FAST input file Base' - def __init__(self, filename=None, **kwargs): + def __init__(self, filename=None, IComment=None, **kwargs): self._size=None self.setData() # Init data if filename: self.filename = filename - self.read() + self.read(IComment=IComment) def setData(self, filename=None, data=None, hasNodal=False, module=None): """ Set the data of this object. This object shouldn't store anything else. """ @@ -260,12 +312,14 @@ def __setitem__(self, key, item): pass self.data[i]['value'] = item - def __getitem__(self,key): + def __getitem__(self, key): i = self.getID(key) return self.data[i]['value'] def __repr__(self): - s ='Fast input file base: {}\n'.format(self.filename) + s='<{} object - Base> with attributes:\n'.format(type(self).__name__) + s =' - filename: {}\n'.format(self.filename) + s =' - dict keys/values: \n' return s+'\n'.join(['{:15s}: {}'.format(d['label'],d['value']) for i,d in enumerate(self.data)]) def addKeyVal(self, key, val, descr=None): @@ -319,7 +373,7 @@ def _IComment(self): return [1] # Typical OpenFAST files have comment on second line [1] - def read(self, filename=None): + def read(self, filename=None, IComment=None): if filename: self.filename = filename if self.filename: @@ -327,11 +381,13 @@ def read(self, filename=None): raise OSError(2,'File not found:',self.filename) if os.stat(self.filename).st_size == 0: raise EmptyFileError('File is empty:',self.filename) - self._read() + self._read(IComment=IComment) else: raise Exception('No filename provided') - def _read(self): + def _read(self, IComment=None): + if IComment is None: + IComment=[] # --- Tables that can be detected based on the "Value" (first entry on line) # TODO members for BeamDyn with mutliple key point ####### TODO PropSetID is Duplicate SubDyn and used in HydroDyn @@ -422,6 +478,11 @@ def _read(self): labOffset='' while i0 \ @@ -442,20 +503,19 @@ def _read(self): else: d['label'] = firstword d['descr'] = remainer - d['tabType'] = TABTYPE_FIL # TODO + d['tabType'] = TABTYPE_OUTLIST # TODO d['value'] = ['']+OutList self.data.append(d) if i>=len(lines): + self.addComment('END of input file (the word "END" must appear in the first 3 columns of this last OutList line)') + self.addComment('---------------------------------------------------------------------------------------') break - # --- Here we cheat and force an exit of the input file # The reason for this is that some files have a lot of things after the END, which will result in the file being intepreted as a wrong format due to too many comments if i+20 or lines[i+2].lower().find('bldnd_bloutnd')>0): self.hasNodal=True else: - self.data.append(parseFASTInputLine('END of input file (the word "END" must appear in the first 3 columns of this last OutList line)',i+1)) - self.data.append(parseFASTInputLine('---------------------------------------------------------------------------------------',i+2)) - break + pass elif line.upper().find('SSOUTLIST' )>0 or line.upper().find('SDOUTLIST' )>0: # SUBDYN Outlist doesn not follow regular format self.data.append(parseFASTInputLine(line,i)) @@ -468,8 +528,8 @@ def _read(self): d['value']=o self.data.append(d) # --- Here we cheat and force an exit of the input file - self.data.append(parseFASTInputLine('END of input file (the word "END" must appear in the first 3 columns of this last OutList line)',i+1)) - self.data.append(parseFASTInputLine('---------------------------------------------------------------------------------------',i+2)) + self.addComment('END of input file (the word "END" must appear in the first 3 columns of this last OutList line)') + self.addComment('---------------------------------------------------------------------------------------') break elif line.upper().find('ADDITIONAL STIFFNESS')>0: # TODO, lazy implementation so far, MAKE SUB FUNCTION @@ -491,18 +551,33 @@ def _read(self): i+=1; self.readBeamDynProps(lines,i) return + elif line.upper().find('GLBDCM')>0: + # BeamDyn DCM has no label..... + self.addComment(lines[i]); i+=1 + self.addComment(lines[i]); i+=1 + d = getDict() + d['label'] = 'GlbDCM' + d['tabType'] = TABTYPE_NUM_NO_HEADER + nTabLines = 3 + nHeaders = 0 + d['value'], d['tabColumnNames'], d['tabUnits'] = parseFASTNumTable(self.filename, lines[i:i+nTabLines],nTabLines, i, nHeaders, tableType='num', nOffset=0) + i=i+3 + self.data.append(d) + continue + + #---The following 3 by 3 matrix is the direction cosine matirx ,GlbDCM(3,3), elif line.upper().find('OUTPUTS')>0: if 'Points' in self.keys() and 'dtM' in self.keys(): OutList,i = parseFASTOutList(lines,i+1) d = getDict() d['label'] = 'Outlist' d['descr'] = '' - d['tabType'] = TABTYPE_FIL # TODO + d['tabType'] = TABTYPE_OUTLIST d['value'] = OutList self.addComment('------------------------ OUTPUTS --------------------------------------------') self.data.append(d) - self.addComment('END') - self.addComment('------------------------- need this line --------------------------------------') + self.addComment('END of input file (the word "END" must appear in the first 3 columns of this last OutList line)') + self.addComment('---------------------------------------------------------------------------------------') return # --- Parsing of standard lines: value(s) key comment @@ -553,6 +628,7 @@ def _read(self): d['tabType'] = TABTYPE_NUM_WITH_HEADERCOM nTabLines = self[d['tabDimVar']]-1 # SOMEHOW ONE DATA POINT LESS d['value'], d['tabColumnNames'],_ = parseFASTNumTable(self.filename,lines[i:i+nTabLines+1],nTabLines,i,1) + d['descr'] = '' # d['tabUnits'] = ['(-)','(-)'] self.data.append(d) break @@ -574,7 +650,6 @@ def _read(self): #print(isStr(d['value'])) #if isStr(d['value']): # print(d['value'].lower() in NUMTAB_FROM_VAL_DETECT_L) - # --- Handling of tables if isStr(d['value']) and d['value'].lower() in NUMTAB_FROM_VAL_DETECT_L: @@ -595,6 +670,7 @@ def _read(self): nTabLines = self[d['tabDimVar']] #print('Reading table {} Dimension {} (based on {})'.format(d['label'],nTabLines,d['tabDimVar'])); d['value'], d['tabColumnNames'], d['tabUnits'] = parseFASTNumTable(self.filename,lines[i:i+nTabLines+nHeaders], nTabLines, i, nHeaders, tableType=tab_type, varNumLines=d['tabDimVar']) + _, d['descr'] = splitAfterChar(lines[i], '!') i += nTabLines+nHeaders-1 # --- Temporary hack for e.g. SubDyn, that has duplicate table, impossible to detect in the current way... @@ -661,6 +737,7 @@ def _read(self): d['label'] += labOffset #print('Reading table {} Dimension {} (based on {})'.format(d['label'],nTabLines,d['tabDimVar'])); d['value'], d['tabColumnNames'], d['tabUnits'] = parseFASTNumTable(self.filename,lines[i:i+nTabLines+nHeaders+nOffset],nTabLines,i, nHeaders, tableType=tab_type, nOffset=nOffset, varNumLines=d['tabDimVar']) + d['descr'] = '' # i += nTabLines+1-nOffset # --- Temporary hack for e.g. SubDyn, that has duplicate table, impossible to detect in the current way... @@ -732,10 +809,11 @@ def toString(self): def toStringVLD(val,lab,descr): val='{}'.format(val) lab='{}'.format(lab) - if len(val)<13: - val='{:13s}'.format(val) - if len(lab)<13: - lab='{:13s}'.format(lab) + # Trying to reproduce WISDEM format + if len(val)<22: + val='{:22s}'.format(val) + if len(lab)<11: + lab='{:11s}'.format(lab) return val+' '+lab+' - '+descr.strip().lstrip('-').lstrip() def toStringIntFloatStr(x): @@ -762,7 +840,6 @@ def mat_tostring(M,fmt='24.16e'): s+='\n' s+='\n' return s - for i in range(len(self.data)): d=self.data[i] if d['isComment']: @@ -774,32 +851,42 @@ def mat_tostring(M,fmt='24.16e'): else: s+=toStringVLD(d['value'],d['label'],d['descr']) elif d['tabType']==TABTYPE_NUM_WITH_HEADER: + PrettyCols= d['label']!='TowProp' # Temporary hack for AeroDyn tower table if d['tabColumnNames'] is not None: - s+='{}'.format(' '.join(['{:15s}'.format(s) for s in d['tabColumnNames']])) - #s+=d['descr'] # Not ready for that + if PrettyCols: + s+='{}'.format(' '.join(['{:^15s}'.format(s) for s in d['tabColumnNames']])) + else: + s+='{}'.format(' '.join(['{:14s}'.format(s) for s in d['tabColumnNames']])) + if len(d['descr'])>0 and d['descr'][0]=='!': + s+=d['descr'] if d['tabUnits'] is not None: s+='\n' - s+='{}'.format(' '.join(['{:15s}'.format(s) for s in d['tabUnits']])) + if PrettyCols: + s+='{}'.format(' '.join(['{:^15s}'.format(s) for s in d['tabUnits']])) + else: + s+='{}'.format(' '.join(['{:14s}'.format(s) for s in d['tabUnits']])) newline='\n' else: newline='' if np.size(d['value'],0) > 0 : s+=newline - s+='\n'.join('\t'.join( ('{:15.0f}'.format(x) if int(x)==x else '{:15.8e}'.format(x) ) for x in y) for y in d['value']) + if PrettyCols: + s+='\n'.join('\t'.join( ('{:^15.0f}'.format(x) if int(x)==x else '{:15.8e}'.format(x) ) for x in y) for y in d['value']) + else: + s+='\n'.join(' '.join( ('{:13.7E}'.format(x) ) for x in y) for y in d['value']) elif d['tabType']==TABTYPE_MIX_WITH_HEADER: s+='{}'.format(' '.join(['{:15s}'.format(s) for s in d['tabColumnNames']])) if d['tabUnits'] is not None: s+='\n' - s+='{}'.format(' '.join(['{:15s}'.format(s) for s in d['tabUnits']])) + s+='{}'.format(' '.join(['{:^15s}'.format(s) for s in d['tabUnits']])) if np.size(d['value'],0) > 0 : s+='\n' s+='\n'.join('\t'.join(toStringIntFloatStr(x) for x in y) for y in d['value']) elif d['tabType']==TABTYPE_NUM_WITH_HEADERCOM: - s+='! {}\n'.format(' '.join(['{:15s}'.format(s) for s in d['tabColumnNames']])) - s+='! {}\n'.format(' '.join(['{:15s}'.format(s) for s in d['tabUnits']])) + s+='! {}\n'.format(' '.join(['{:^15s}'.format(s) for s in d['tabColumnNames']])) + s+='! {}\n'.format(' '.join(['{:^15s}'.format(s) for s in d['tabUnits']])) s+='\n'.join('\t'.join('{:15.8e}'.format(x) for x in y) for y in d['value']) elif d['tabType']==TABTYPE_FIL: - #f.write('{} {} {}\n'.format(d['value'][0],d['tabDetect'],d['descr'])) label = d['label'] if 'kbot' in self.keys(): # Moordyn has no 'OutList' label.. label='' @@ -808,6 +895,10 @@ def mat_tostring(M,fmt='24.16e'): else: s+='{} {} {}\n'.format(d['value'][0], label, d['descr']) # TODO? s+='\n'.join(fil for fil in d['value'][1:]) + elif d['tabType']==TABTYPE_OUTLIST: + label = d['label'] + s+='{:22s} {:11s} - {}\n'.format('', label, d['descr']) + s+='\n'.join(fil for fil in d['value'][1:]) elif d['tabType']==TABTYPE_NUM_BEAMDYN: # TODO use dedicated sub-class data = d['value'] @@ -821,11 +912,14 @@ def mat_tostring(M,fmt='24.16e'): s += beamdyn_section_mat_tostring(x,K,M) elif d['tabType']==TABTYPE_NUM_SUBDYNOUT: data = d['value'] - s+='{}\n'.format(' '.join(['{:15s}'.format(s) for s in d['tabColumnNames']])) - s+='{}'.format(' '.join(['{:15s}'.format(s) for s in d['tabUnits']])) + s+='{}\n'.format(' '.join(['{:^15s}'.format(s) for s in d['tabColumnNames']])) + s+='{}'.format(' '.join(['{:^15s}'.format(s) for s in d['tabUnits']])) if np.size(d['value'],0) > 0 : s+='\n' s+='\n'.join('\t'.join('{:15.0f}'.format(x) for x in y) for y in data) + elif d['tabType']==TABTYPE_NUM_NO_HEADER: + data = d['value'] + s+='\n'.join('\t'.join( ('{:15.0f}'.format(x) if int(x)==x else '{:15.8e}'.format(x) ) for x in y) for y in d['value']) else: raise Exception('Unknown table type for variable {}'.format(d)) if i0: return l[:n] else: return l +def splitAfterChar(l, c): + n = l.find(c); + if n>0: + return l[:n], l[n:] + else: + return l, '' def getDict(): return {'value':None, 'label':'', 'isComment':False, 'descr':'', 'tabType':TABTYPE_NOT_A_TAB} @@ -1073,6 +1172,12 @@ def _merge_value(splits): def parseFASTInputLine(line_raw,i,allowSpaceSeparatedList=False): d = getDict() + line_low = line_raw.lower() + if line_low=='end' or line_low.startswith('end of') or line_low.startswith('end ('): + d['isComment'] = True + d['value'] = line_raw + d['label'] = 'END' + return d #print(line_raw) try: # preliminary cleaning (Note: loss of formatting) @@ -1185,7 +1290,7 @@ def parseFASTOutList(lines,iStart): if i>=len(lines): print('[WARN] End of file reached while reading Outlist') #i=min(i+1,len(lines)) - return OutList,iStart+len(OutList) + return OutList, iStart+len(OutList) def extractWithinParenthesis(s): @@ -1238,6 +1343,7 @@ def parseFASTNumTable(filename,lines,n,iStart,nHeaders=2,tableType='num',nOffset Units = None + i = 0 if len(lines)!=n+nHeaders+nOffset: raise BrokenFormatError('Not enough lines in table: {} lines instead of {}\nFile:{}'.format(len(lines)-nHeaders,n,filename)) try: @@ -2026,7 +2132,7 @@ def __init__(self, filename=None, **kwargs): self.module='ExtPtfm' - def _read(self): + def _read(self, IComment=None): with open(self.filename, 'r', errors="surrogateescape") as f: lines=f.read().splitlines() detectAndReadExtPtfmSE(self, lines) diff --git a/openfast_toolbox/io/fast_input_file_graph.py b/openfast_toolbox/io/fast_input_file_graph.py index 9d2c848..82fdf76 100644 --- a/openfast_toolbox/io/fast_input_file_graph.py +++ b/openfast_toolbox/io/fast_input_file_graph.py @@ -134,7 +134,7 @@ def subdynToGraph(sd, propToNodes=False, propToElem=False): # --------------------------------------------------------------------------------} # --- HydroDyn # --------------------------------------------------------------------------------{ -def hydrodynToGraph(hd, propToNodes=False, propToElem=False): +def hydrodynToGraph(hd, propToNodes=False, propToElem=False, verbose=False): """ hd: dict-like object as returned by weio @@ -196,7 +196,8 @@ def type2Color(Pot): if 'FillGroups' in hd.keys(): # Filled members Graph.addMiscPropertySet('FillGroups') - print('>>> TODO Filled Groups') + if verbose: + print('>>> TODO Filled Groups') #for ip,P in enumerate(hd['FillGroups']): # # FillNumM FillMList FillFSLoc FillDens # raise NotImplementedError('hydroDynToGraph, Fill List might not be properly set, verify below') diff --git a/openfast_toolbox/io/fast_output_file.py b/openfast_toolbox/io/fast_output_file.py index df03aac..234cfcf 100644 --- a/openfast_toolbox/io/fast_output_file.py +++ b/openfast_toolbox/io/fast_output_file.py @@ -81,7 +81,10 @@ def formatName(): def __init__(self, filename=None, **kwargs): """ Class constructor. If a `filename` is given, the file is read. """ - self.filename = filename + # Data + self.filename = filename + self.data = None # pandas.DataFrame + self.description = '' # string if filename: self.read(**kwargs) @@ -97,10 +100,8 @@ def read(self, filename=None, **kwargs): raise OSError(2,'File not found:',self.filename) if os.stat(self.filename).st_size == 0: raise EmptyFileError('File is empty:',self.filename) - # --- Calling (children) function to read - self._read(**kwargs) - def _read(self): + # --- Actual reading def readline(iLine): with open(self.filename) as f: for i, line in enumerate(f): @@ -110,26 +111,26 @@ def readline(iLine): break ext = os.path.splitext(self.filename.lower())[1] - self.info={} + info={} self['binary']=False try: if ext in ['.out','.elev','.dbg','.dbg2']: - self.data, self.info = load_ascii_output(self.filename) + self.data, info = load_ascii_output(self.filename, **kwargs) elif ext=='.outb': - self.data, self.info = load_binary_output(self.filename) + self.data, info = load_binary_output(self.filename, **kwargs) self['binary']=True elif ext=='.elm': F=CSVFile(filename=self.filename, sep=' ', commentLines=[0,2],colNamesLine=1) self.data = F.data del F - self.info['attribute_units']=readline(3).replace('sec','s').split() - self.info['attribute_names']=self.data.columns.values + info['attribute_units']=readline(3).replace('sec','s').split() + info['attribute_names']=self.data.columns.values else: if isBinary(self.filename): - self.data, self.info = load_binary_output(self.filename) + self.data, info = load_binary_output(self.filename, **kwargs) self['binary']=True else: - self.data, self.info = load_ascii_output(self.filename) + self.data, info = load_ascii_output(self.filename, **kwargs) self['binary']=False except MemoryError as e: raise BrokenReaderError('FAST Out File {}: Memory error encountered\n{}'.format(self.filename,e)) @@ -138,11 +139,34 @@ def readline(iLine): if self.data.shape[0]==0: raise EmptyFileError('This FAST output file contains no data: {}'.format(self.filename)) - if self.info['attribute_units'] is not None: - self.info['attribute_units'] = [re.sub(r'[()\[\]]','',u) for u in self.info['attribute_units']] - def _write(self, binary=None, fileID=4): + # --- Convert to DataFrame + if info['attribute_units'] is not None: + info['attribute_units'] = [re.sub(r'[()\[\]]','',u) for u in info['attribute_units']] + if len(info['attribute_names'])!=len(info['attribute_units']): + cols=info['attribute_names'] + print('[WARN] not all columns have units! Skipping units') + else: + cols=[n+'_['+u.replace('sec','s')+']' for n,u in zip(info['attribute_names'], info['attribute_units'])] + else: + cols=info['attribute_names'] + self.description = info.get('description', '') + self.description = ''.join(self.description) if isinstance(self.description,list) else self.description + if isinstance(self.data, pd.DataFrame): + self.data.columns = cols + else: + if len(cols)!=self.data.shape[1]: + raise BrokenFormatError('Inconstistent number of columns between headers ({}) and data ({}) for file {}'.format(len(cols), self.data.shape[1], self.filename)) + self.data = pd.DataFrame(data=self.data, columns=cols) + + + def write(self, filename=None, binary=None, fileID=4): + if filename: + self.filename = filename + if not self.filename: + raise Exception('No filename provided') + # Calling children function if binary is None: binary = self['binary'] @@ -152,43 +176,151 @@ def _write(self, binary=None, fileID=4): else: # ascii output with open(self.filename,'w') as f: - f.write('\t'.join(['{:>10s}'.format(c) for c in self.info['attribute_names']])+'\n') - f.write('\t'.join(['{:>10s}'.format('('+u+')') for u in self.info['attribute_units']])+'\n') + f.write(self.description) # add description to the begining of the file + f.write('\t'.join(['{:>10s}'.format(c) for c in self.channels])+'\n') + f.write('\t'.join(['{:>10s}'.format('('+u+')') for u in self.units])+'\n') # TODO better.. - f.write('\n'.join(['\t'.join(['{:10.4f}'.format(y[0])]+['{:10.3e}'.format(x) for x in y[1:]]) for y in self.data])) + if self.data is not None: + if isinstance(self.data, pd.DataFrame) and not self.data.empty: + f.write('\n'.join(['\t'.join(['{:10.4f}'.format(y.iloc[0])]+['{: .5e}'.format(x) for x in y.iloc[1:]]) for _, y in self.data.iterrows()])) + else: # in case data beeing array or list of list. + f.write('\n'.join(['\t'.join(['{:10.4f}'.format(y)]+['{: .5e}'.format(x) for x in y]) for y in self.data])) + + @property + def channels(self): + if self.data is None: + return [] + def no_unit(s): + s=s.replace('(','[').replace(')',']').replace(' [','_[').strip(']') + try: + return s.split('_[')[0].strip() + except: + return s.strip() + channels = [no_unit(c) for c in self.data.columns] + return channels + + @property + def units(self): + if self.data is None: + return [] + def unit(s): + s=s.replace('(','[').replace(')',']').replace(' [','_[').strip(']') + try: + return s.split('_[')[1].strip() + except: + return s.strip() + units = [unit(c) for c in self.data.columns] + return units def toDataFrame(self): """ Returns object into one DataFrame, or a dictionary of DataFrames""" - # --- Example (returning one DataFrame): - # return pd.DataFrame(data=np.zeros((10,2)),columns=['Col1','Col2']) - if self.info['attribute_units'] is not None: - if len(self.info['attribute_names'])!=len(self.info['attribute_units']): - cols=self.info['attribute_names'] - print('[WARN] not all columns have units! Skipping units') - else: - cols=[n+'_['+u.replace('sec','s')+']' for n,u in zip(self.info['attribute_names'],self.info['attribute_units'])] - else: - cols=self.info['attribute_names'] - if isinstance(self.data, pd.DataFrame): - df= self.data - df.columns=cols - else: - if len(cols)!=self.data.shape[1]: - raise BrokenFormatError('Inconstistent number of columns between headers ({}) and data ({}) for file {}'.format(len(cols), self.data.shape[1], self.filename)) - df = pd.DataFrame(data=self.data,columns=cols) - - return df + return self.data def writeDataFrame(self, df, filename, binary=True): writeDataFrame(df, filename, binary=binary) def __repr__(self): s='<{} object> with attributes:\n'.format(type(self).__name__) - s+=' - info ({})\n'.format(type(self.info)) + s+=' - filename: {}\n'.format(self.filename) s+=' - data ({})\n'.format(type(self.data)) + s+=' - description: {}\n'.format(self.description) s+='and keys: {}\n'.format(self.keys()) return s + # -------------------------------------------------------------------------------- + # --- 2D fields + # -------------------------------------------------------------------------------- + @property + def driverFile(self): + try: + driver, FFKind = findDriverFile(self.filename, df=None) + return driver + except: + return None + + def to2DFields(self, DeltaAzi=5, nPeriods=3, rcoords=None, kinds=['(t,r)','(psi,r)'], **kwargs): + import welib.fast.postpro as fastlib + + def insertName(ds, name, dims): + for var in ds.variables: + # Create a new name, for example, add "_new" to each variable name + if ds[var].dims == dims: + #chan = var.split('_[')[0] + #unit = var.split('_[')[1] + #new_var = chan+name+'_['+unit + new_var = name+'_'+var + ds = ds.rename({var: new_var}) + return ds + + driverFile = self.driverFile + + if len(kwargs.keys())>0: + print('[WARN] FASTOutputFile: to2DFields: ignored keys: ',kwargs.keys()) + # Sanity check + DeltaAzi=float(DeltaAzi) + df = self.toDataFrame() + ds_AD1 = None + ds_AD2 = None + # --- Radial coordinate + def get_rvals(ds, rcoords): + n = len(ds['i/n_[-]'].values) + if rcoords is not None: # priority to user input + runit = 'm' + rvals = rcoords + if len(rcoords)!=n: + raise Exception('Length of rcoords should be: {}'.format(n)) + elif 'r_[m]' in ds.keys(): + rvals = ds['r_[m]'].values + runit = 'm' + else: + rvals = ds['i/n_[-]'].values + runit = '-' + return rvals, runit + # --- Time wise + if '(t,r)' in kinds: + ds_AD1, ds_ED, ds_BD = fastlib.spanwisePostProRows(df, driverFile, si1='t', sir='r') + if ds_AD1 is None: + return None # No Hope + rvals, runit = get_rvals(ds_AD1, rcoords) + # Rename columns to make them unique + ds_AD1 = insertName(ds_AD1, '(t,r)', ('t','r')) + try: + ds_AD1.coords['t'] = ('t', df['Time_[s]'].values) + except: + pass + ds_AD1.coords['r'] = ('r', rvals) + ds_AD1.r.attrs['unit'] = runit + ds_AD1.t.attrs['unit'] = 's' + + # --- Azimuthal Radial postpro + if '(psi,r)' in kinds: + psi = np.arange(0, 360+DeltaAzi/10, DeltaAzi) + dfPsi = fastlib.azimuthal_average_DF(df, psiBin=psi, periodic=True, nPeriods=nPeriods) #, tStart = time[-1]-20) + ds_AD2, ds_ED2, ds_BD2 = fastlib.spanwisePostProRows(dfPsi, driverFile, si1='psi', sir='r') + rvals, runit = get_rvals(ds_AD2, rcoords) + ds_AD2.coords['psi'] = ('psi', psi) # TODO hack from bin to bin edges... + ds_AD2.coords['r'] = ('r', rvals) + # Rename columns to make them unique + ds_AD2= insertName(ds_AD2, '(psi,r)', ('psi','r')) + ds_AD2.psi.attrs['unit'] = 'deg' + ds_AD2.r.attrs['unit'] = runit + + # --- Combine into one field (need unique variables and dimension) + if ds_AD1 is not None and ds_AD2 is not None: + ds_AD = ds_AD1.merge(ds_AD2, compat='override') + elif ds_AD1 is not None: + ds_AD = ds_AD1 + else: + ds_AD = ds_AD2 + + # --- +# # ds_AD = ds_AD.swap_dims(dims_dict={'it': 't'}) +# # ds_AD = ds_AD.drop_vars('it') +# # ds_AD.coords['r'] = ('ir', rcoords) +# # ds_AD = ds_AD.swap_dims(dims_dict={'ir': 'r'}) +# # ds_AD = ds_AD.drop_vars('ir') + return ds_AD + # -------------------------------------------------------------------------------- # --- Converters # -------------------------------------------------------------------------------- @@ -206,9 +338,9 @@ def toOUTB(self, filename=None, extension='.outb', fileID=4, noOverWrite=True, * # NOTE: fileID=2 will chop the channels name of long channels use fileID4 instead channels = self.data - chanNames = self.info['attribute_names'] - chanUnits = self.info['attribute_units'] - descStr = self.info['description'] + chanNames = self.channels + chanUnits = self.units + descStr = self.description if isinstance(descStr, list): descStr=(''.join(descStr[:2])).replace('\n','') writeBinary(filename, channels, chanNames, chanUnits, fileID=fileID, descStr=descStr) @@ -235,7 +367,7 @@ def isBinary(filename): -def load_ascii_output(filename, method='numpy', encoding='ascii'): +def load_ascii_output(filename, method='numpy', encoding='ascii', **kwargs): if method in ['forLoop','pandas']: @@ -270,7 +402,7 @@ def load_ascii_output(filename, method='numpy', encoding='ascii'): headerRead=True break if not headerRead: - raise WrongFormatError('Could not find the keyword "Time" or "Alpha" in the first {} lines of the file'.format(maxHeaderLines)) + raise WrongFormatError('Could not find the keyword "Time" or "Alpha" in the first {} lines of the file {}'.format(maxHeaderLines, filename)) nHeader = len(header)+1 nCols = len(info['attribute_names']) @@ -309,29 +441,53 @@ def load_ascii_output(filename, method='numpy', encoding='ascii'): return data, info -def load_binary_output(filename, use_buffer=True): +def load_binary_output(filename, use_buffer=False, method='mix', **kwargs): """ 03/09/15: Ported from ReadFASTbinary.m by Mads M Pedersen, DTU Wind 24/10/18: Low memory/buffered version by E. Branlard, NREL - 18/01/19: New file format for exctended channels, by E. Branlard, NREL - - Info about ReadFASTbinary.m: - % Author: Bonnie Jonkman, National Renewable Energy Laboratory - % (c) 2012, National Renewable Energy Laboratory - % - % Edited for FAST v7.02.00b-bjj 22-Oct-2012 + 18/01/19: New file format for extended channels, by E. Branlard, NREL + 20/11/23: Improved performances using np.fromfile, by E. Branlard, NREL """ StructDict = { - 'uint8': ('B', 1, np.uint8), - 'int16':('h', 2, np.int16), - 'int32':('i', 4, np.int32), - 'float32':('f', 4, np.float32), - 'float64':('d', 8, np.float64)} - def fread(fid, n, dtype): + 'uint8': ('B', 1, np.uint8), + 'int16': ('h', 2, np.int16), + 'int32': ('i', 4, np.int32), + 'float32': ('f', 4, np.float32), + 'float64': ('d', 8, np.float64) + } + def getFileSizeMB(filename): + return os.path.getsize(filename)/(1024.0**2) + + def freadStruct(fid, n, dtype): fmt, nbytes, npdtype = StructDict[dtype] - #return np.array(struct.unpack(fmt * n, fid.read(nbytes * n)), dtype=npdtype) return struct.unpack(fmt * n, fid.read(nbytes * n)) + def freadStructArray(fid, n, dtype): + fmt, nbytes, npdtype = StructDict[dtype] + return np.array(struct.unpack(fmt * n, fid.read(nbytes * n))) + + def freadNumpy(fid, n, dtype): + fmt, nbytes, npdtype = StructDict[dtype] + return np.fromfile(fid, count=n, dtype=npdtype) # Improved performances + + if method=='numpy': + fread = freadNumpy + freadLarge = freadNumpy + elif method=='struct': + fread = freadStruct + freadLarge = freadStructArray + elif method=='mix': + fread = freadStruct + freadLarge = freadNumpy + elif method=='optim': + # Decide on method on the fly + #MB = getFileSizeMB(filename) + use_buffer = False + fread = freadStruct + freadLarge = freadNumpy + else: + raise NotImplementedError + def freadRowOrderTableBuffered(fid, n, type_in, nCols, nOff=0, type_out='float64'): """ Reads of row-ordered table from a binary file. @@ -346,7 +502,7 @@ def freadRowOrderTableBuffered(fid, n, type_in, nCols, nOff=0, type_out='float64 @author E.Branlard, NREL """ - fmt, nbytes = {'uint8': ('B', 1), 'int16':('h', 2), 'int32':('i', 4), 'float32':('f', 4), 'float64':('d', 8)}[type_in] + fmt, nbytes = StructDict[type_in][:2] nLines = int(n/nCols) GoodBufferSize = 4096*40 nLinesPerBuffer = int(GoodBufferSize/nCols) @@ -361,21 +517,20 @@ def freadRowOrderTableBuffered(fid, n, type_in, nCols, nOff=0, type_out='float64 while nIntRead0: + return Files[0] + return None + +def findDriverFileFASTFarm(base): + Files=[base+ext for ext in ['.fstf','.FSTF','.Fstf','.fmas','.FMAS','.Fmas'] if os.path.exists(base+ext)] + if len(Files)>0: + return Files[0] + return None + +def findDriverFile(filename, df=None): + """ + OUTPUTS: + - driver : filename of driver + - FASTFarm: logical + """ + base,out_ext = os.path.splitext(filename) + # HACK for AD file to find the right .fst file + iDotAD = base.lower().find('.ad') + if iDotAD>1: + base=base[:iDotAD] + + #FASTFarmKind = None + if df is not None: + sCols = ''.join(df.columns) + FASTFarmKind = sCols.find('WkDf')>1 or sCols.find('CtT')>0 + if FASTFarmKind: + return findDriverFileFASTFarm(base), FASTFarmKind + else: + return findDriverFileFAST(base), FASTFarmKind + else: + driver = findDriverFileFASTFarm(base) + if driver: + return driver, True + else: + driver = findDriverFileFAST(base) + return driver, False + + + + if __name__ == "__main__": - B=FASTOutputFile('tests/example_files/FASTOutBin.outb') + scriptDir = os.path.dirname(__file__) + B=FASTOutputFile(os.path.join(scriptDir, 'tests/example_files/FASTOutBin.outb')) + B.to2DFields() df=B.toDataFrame() - B.writeDataFrame(df, 'tests/example_files/FASTOutBin_OUT.outb') + B.writeDataFrame(df, os.path.join(scriptDir, 'tests/example_files/FASTOutBin_OUT.outb')) B.toOUTB(extension='.dat.outb') B.toParquet() B.toCSV() diff --git a/openfast_toolbox/io/fast_summary_file.py b/openfast_toolbox/io/fast_summary_file.py index 2fbcb86..dce3f0a 100644 --- a/openfast_toolbox/io/fast_summary_file.py +++ b/openfast_toolbox/io/fast_summary_file.py @@ -56,15 +56,38 @@ def read(self, filename=None, header_only=False): raise OSError(2,'File not found:',self.filename) if os.stat(self.filename).st_size == 0: raise EmptyFileError('File is empty:',self.filename) - + # Children class + self._read() + + def _read(self): + def readFirstLines(fid, nLines): + lines=[] + for i, line in enumerate(fid): + lines.append(line.strip()) + if i==nLines: + break + return lines with open(self.filename, 'r', errors="surrogateescape") as fid: header= readFirstLines(fid, 4) if any(['subdyn' in s.lower() for s in header]): self['module']='SubDyn' - readSubDynSum(self) + fsum = SubDynSummaryFile(self.filename) + self.update_from_child(fsum, attrList=fsum.attributes()) + elif any(['beamdyn' in s.lower() for s in header]): + self['module']='BeamDyn' + fsum = BeamDynSummaryFile(self.filename) + self.update_from_child(fsum, attrList=fsum.attributes()) else: raise NotImplementedError('This summary file format is not yet supported') + def update_from_child(self, child, attrList=None): + self.update(child) + if attrList is None: + attrList = [c for c in child.__dir__() if not c.startswith('_')] + for attr in attrList: + print('FASTSummaryFile from child {}, setting `{}`'.format(type(child).__name__, attr)) + setattr(self, attr, getattr(child, attr)) + def toDataFrame(self): if 'module' not in self.keys(): raise Exception(''); @@ -82,38 +105,46 @@ def toGraph(self): # --------------------------------------------------------------------------------} -# --- Helper functions +# --- SubDyn Summary File # --------------------------------------------------------------------------------{ -def readFirstLines(fid, nLines): - lines=[] - for i, line in enumerate(fid): - lines.append(line.strip()) - if i==nLines: - break - return lines +class SubDynSummaryFile(FASTSummaryFile): -# --------------------------------------------------------------------------------} -# --- Sub-reader/class for SubDyn summary files -# --------------------------------------------------------------------------------{ -def readSubDynSum(self): - - # Read data - #T=yaml.load(fid, Loader=yaml.SafeLoader) - yaml_read(self.filename, self) - - # --- Treatement of useful data - if self['DOF2Nodes'].shape[1]==3: - self['DOF2Nodes']=np.column_stack((np.arange(self['DOF2Nodes'].shape[0])+1,self['DOF2Nodes'])) - # NOTE: DOFs are reindexed to start at 0 - self['DOF2Nodes'][:,0]-=1 - self['DOF___L'] -=1 # internal DOFs - self['DOF___B'] -=1 # internal - self['DOF___F'] -=1 # fixed DOFs - - self['CB_frequencies']=self['CB_frequencies'].ravel() - self['X'] = self['Nodes'][:,1].astype(float) - self['Y'] = self['Nodes'][:,2].astype(float) - self['Z'] = self['Nodes'][:,3].astype(float) + @staticmethod + def formatName(): + return 'SubDyn summary file' + + @staticmethod + def attributes(): + attr = ['formatName'] + attr += ['_read'] + attr += ['toDataFrame'] + attr += ['NodesDisp' ] + attr += ['toDataFrame'] + attr += ['toJSON' ] + attr += ['getModes' ] + return attr + + def _read(self): + """ """ + #T=yaml.load(fid, Loader=yaml.SafeLoader) + yaml_read(self.filename, self) + + # --- Treatement of useful data + if self['DOF2Nodes'].shape[1]==3: + self['DOF2Nodes']=np.column_stack((np.arange(self['DOF2Nodes'].shape[0])+1,self['DOF2Nodes'])) + # NOTE: DOFs are reindexed to start at 0 + self['DOF2Nodes'][:,0]-=1 + self['DOF___L'] -=1 # internal DOFs + self['DOF___B'] -=1 # internal + self['DOF___F'] -=1 # fixed DOFs + + self['CB_frequencies']=self['CB_frequencies'].ravel() + self['X'] = self['Nodes'][:,1].astype(float) + self['Y'] = self['Nodes'][:,2].astype(float) + self['Z'] = self['Nodes'][:,3].astype(float) + + def toDataFrame(self): + return None # --- Useful methods that will be added to the class def NodesDisp(self, IDOF, UDOF, maxDisp=None, sortDim=None): @@ -151,7 +182,7 @@ def NodesDisp(self, IDOF, UDOF, maxDisp=None, sortDim=None): pos = pos[I,:] return disp, pos, INodes - def getModes(data, maxDisp=None, sortDim=None): + def getModes(self, maxDisp=None, sortDim=None): """ return Guyan and CB modes""" if maxDisp is None: #compute max disp such as it's 10% of maxdimension @@ -161,38 +192,38 @@ def getModes(data, maxDisp=None, sortDim=None): maxDisp = np.max([dx,dy,dz])*0.1 # NOTE: DOF have been reindexed -1 - DOF_B = data['DOF___B'].ravel() - DOF_F = data['DOF___F'].ravel() - DOF_K = (np.concatenate((DOF_B,data['DOF___L'].ravel(), DOF_F))).astype(int) + DOF_B = self['DOF___B'].ravel() + DOF_F = self['DOF___F'].ravel() + DOF_K = (np.concatenate((DOF_B,self['DOF___L'].ravel(), DOF_F))).astype(int) # CB modes - PhiM = data['PhiM'] + PhiM = self['PhiM'] Phi_CB = np.vstack((np.zeros((len(DOF_B),PhiM.shape[1])),PhiM, np.zeros((len(DOF_F),PhiM.shape[1])))) - dispCB, posCB, INodesCB = data.NodesDisp(DOF_K, Phi_CB, maxDisp=maxDisp, sortDim=sortDim) + dispCB, posCB, INodesCB = self.NodesDisp(DOF_K, Phi_CB, maxDisp=maxDisp, sortDim=sortDim) # Guyan modes - PhiR = data['PhiR'] + PhiR = self['PhiR'] Phi_Guyan = np.vstack((np.eye(len(DOF_B)),PhiR, np.zeros((len(DOF_F),PhiR.shape[1])))) - dispGy, posGy, INodesGy = data.NodesDisp(DOF_K, Phi_Guyan, maxDisp=maxDisp, sortDim=sortDim) + dispGy, posGy, INodesGy = self.NodesDisp(DOF_K, Phi_Guyan, maxDisp=maxDisp, sortDim=sortDim) return dispGy, posGy, INodesGy, dispCB, posCB, INodesCB - def subDynToJson(data, outfile=None): + def toJSON(self, outfile=None): """ Convert to a "JSON" format TODO: convert to graph and use graph.toJSON """ - #return data.toGraph().toJSON(outfile) + #return self.toGraph().toJSON(outfile) - dispGy, posGy, _, dispCB, posCB, _ = data.getModes(sortDim=None) # Sorting mess things up + dispGy, posGy, _, dispCB, posCB, _ = self.getModes(sortDim=None) # Sorting mess things up Nodes = self['Nodes'].copy() Elements = self['Elements'].copy() Elements[:,0]-=1 Elements[:,1]-=1 Elements[:,2]-=1 - CB_freq = data['CB_frequencies'].ravel() + CB_freq = self['CB_frequencies'].ravel() d=dict(); d['Connectivity']=Elements[:,[1,2]].astype(int).tolist(); @@ -212,7 +243,7 @@ def subDynToJson(data, outfile=None): 'omega':CB_freq[iMode]*2*np.pi, #in [rad/s] 'Displ':dispCB[:,:,iMode].tolist() } for iMode in range(dispCB.shape[2]) ] - d['groundLevel']=np.min(data['Z']) # TODO + d['groundLevel']=np.min(self['Z']) # TODO if outfile is not None: import json @@ -224,7 +255,7 @@ def subDynToJson(data, outfile=None): return d - def subDynToDataFrame(data, sortDim=2, removeZero=True): + def toDataFrame(self, sortDim=2, removeZero=True): """ Convert to DataFrame containing nodal displacements """ def toDF(pos,disp,preffix=''): disp[np.isnan(disp)]=0 @@ -245,7 +276,7 @@ def toDF(pos,disp,preffix=''): dfDisp.columns = [c.replace('Mode','Disp') for c in dfDisp.columns.values] return df, dfDisp - dispGy, posGy, _, dispCB, posCB, _ = data.getModes(sortDim=sortDim) + dispGy, posGy, _, dispCB, posCB, _ = self.getModes(sortDim=sortDim) columns = ['z_[m]','x_[m]','y_[m]'] dataZXY = np.column_stack((posGy[:,2],posGy[:,0],posGy[:,1])) @@ -255,13 +286,42 @@ def toDF(pos,disp,preffix=''): df = pd.concat((dfZXY, df1, df2, df1d, df2d), axis=1) return df - # adding method to class dynamically to give it a "SubDyn Summary flavor" - setattr(FASTSummaryFile, 'NodesDisp' , NodesDisp) - setattr(FASTSummaryFile, 'toDataFrame', subDynToDataFrame) - setattr(FASTSummaryFile, 'toJSON' , subDynToJson) - setattr(FASTSummaryFile, 'getModes' , getModes) - return self + + +# --------------------------------------------------------------------------------} +# --- BeamDyn +# --------------------------------------------------------------------------------{ +class BeamDynSummaryFile(FASTSummaryFile): + + @staticmethod + def formatName(): + return 'BeamDyn summary file' + + @staticmethod + def attributes(): + attr = ['formatName'] + attr += ['_read'] + attr += ['toDataFrame'] + return attr + + def _read(self, filename=None): + """ """ + if not filename: + filename = self.filename + # Put data into self dict + yaml_read(filename, self) + + def toDataFrame(self): + #self['Init_QP_E1'] + #self['M_IEC'] + #self['K_IEC'] + df = pd.DataFrame(data=self['Init_Nodes_E1']) + + + return df + + if __name__=='__main__': diff --git a/openfast_toolbox/io/gnuplot_file.py b/openfast_toolbox/io/gnuplot_file.py new file mode 100644 index 0000000..29c8a32 --- /dev/null +++ b/openfast_toolbox/io/gnuplot_file.py @@ -0,0 +1,373 @@ +""" +Input/output class for the fileformat supported by GNU Plot +""" +import numpy as np +import pandas as pd +import os + + +try: + from .file import File, WrongFormatError, BrokenFormatError +except: + File=dict + EmptyFileError = type('EmptyFileError', (Exception,),{}) + WrongFormatError = type('WrongFormatError', (Exception,),{}) + BrokenFormatError = type('BrokenFormatError', (Exception,),{}) + +class GNUPlotFile(File): + """ + Read/write a GnuPlot file. The object behaves as a dictionary. + + Main methods + ------------ + - read, write, toDataFrame, to2DFields, keys + + Examples + -------- + f = GNUPlotFile('file.dat') + print(f.keys()) + print(f.toDataFrame().columns) + + """ + + @staticmethod + def defaultExtensions(): + """ List of file extensions expected for this fileformat""" + return ['.dat','.raw'] + + @staticmethod + def formatName(): + """ Short string (~100 char) identifying the file format""" + return 'GNUPlot file' + + @staticmethod + def priority(): return 60 # Priority in weio.read fileformat list between 0=high and 100:low + + def __init__(self, filename=None, **kwargs): + """ Class constructor. If a `filename` is given, the file is read. """ + self.filename = filename + if filename: + self.read(**kwargs) + + def read(self, filename=None, **kwargs): + """ Reads the file self.filename, or `filename` if provided """ + + # --- Standard tests and exceptions (generic code) + if filename: + self.filename = filename + if not self.filename: + raise Exception('No filename provided') + if not os.path.isfile(self.filename): + raise OSError(2,'File not found:',self.filename) + if os.stat(self.filename).st_size == 0: + raise EmptyFileError('File is empty:',self.filename) + # --- Calling (children) function to read + self._read(**kwargs) + + def write(self, filename=None): + """ Rewrite object to file, or write object to `filename` if provided """ + if filename: + self.filename = filename + if not self.filename: + raise Exception('No filename provided') + # Calling (children) function to write + self._write() + + def _read(self): + """ Reads self.filename and stores data into self. Self is (or behaves like) a dictionary""" + # NOTE: for ascii files only + with open(self.filename, 'r') as fid: + data = [] + column_names = None + headers = [] + current_dataset = [] + + for line in fid: + line = line.strip() + # header + if line.startswith('#'): + headers.append(line[1:].strip()) + continue + if not line: + # a new line triggers GNUPlot to "lift the pen" + if current_dataset: + data.append(np.array(current_dataset)) + current_dataset = [] + continue + current_dataset.append(np.array(line.split()).astype(float)) + + if current_dataset: + data.append(np.array(current_dataset)) + + # Try to detect column names from header + same_columns, same_rows, n_cols, n_rows = check_same_shape(data) + if same_columns: + column_names = find_string_with_n_splits(headers, n_cols) + + self['data'] = data + self['headers'] = headers + self['column_names']= column_names + + def plot(self): + import matplotlib.pyplot as plt + data, column_names = self['data'], self['column_names'] + + if not data: + print("No data found in the file.") + return + + shapes = [dataset.shape for dataset in data] + if len(set(shapes)) != 1: + print("Datasets have different shapes.") + return + + data = data[0] # Assuming all datasets have the same shape, take the first one + x = data[:, 0] + y = data[:, 1] + z_columns = data[:, 2:] + + xu = np.unique(x) + yu = np.unique(y) + + if len(xu) * len(yu) != len(x): + print("The data does not form a proper grid.") + return + + X, Y = np.meshgrid(xu, yu) + + for i, z_column in enumerate(z_columns.T): + Z = z_column.reshape(len(yu), len(xu)) + plt.figure() + cp = plt.contourf(X, Y, Z) + plt.colorbar(cp) + plt.title(f'Plot for column {i + 3}' + (f' ({column_names[i + 2]})' if column_names else '')) + plt.xlabel('X') + plt.ylabel('Y') + plt.show() + + + def _write(self): + """ Writes to self.filename""" + # --- Example: + #with open(self.filename,'w') as f: + # f.write(self.toString) + raise NotImplementedError() + + + # -------------------------------------------------------------------------------- + # --- Convenient properties + # -------------------------------------------------------------------------------- + @property + def common_shape(self): + same_columns, same_rows, n_cols, n_rows = check_same_shape(self['data']) + if same_columns and same_rows: + return n_rows, n_cols + else: + return None + + @property + def is_meshgrid(self): + b1 = self.common_shape + b2 = check_first_column_same(self['data']) + b3 = check_second_column_unique(self['data']) + return b1 is not None and b2 is not None and b3 is not None + + @property + def x_values(self): + if len(self['data'])==1: + x = self['data'][0][:,0] + else: + x = check_first_column_same(self['data']) + if x is None: + # TODO, potential concat but this will make the check above unclear + pass + return x + + @property + def y_values(self): + if len(self['data'])==1: + if self['data'][0].shape[1]<2: + return None + y= self['data'][0][:,1] + else: + y = check_second_column_unique(self['data']) + if y is None: + # TODO, potential concat but this will make the check above unclear + pass + return y + + def toDataFrame(self): + """ Returns object into one DataFrame, or a dictionary of DataFrames""" + data = self['data'] + shape = self.common_shape + x = self.x_values + y = self.y_values + colNames=self['column_names'] + if self['column_names'] is None: + colNames = default_colnames(self['data'][0].shape[1]) + + # --- Only one data set + if len(data)==1: + M = self['data'][0] + return pd.DataFrame(data=M, columns=colNames) + + # --- Mesh grid dataset + if self.is_meshgrid: + # We concatenate.. + M = np.vstack(self['data']) + return pd.DataFrame(data=M, columns=colNames) + + # --- A bunch a different lines + dfs={} + same_columns, same_rows, n_cols, n_rows = check_same_shape(self['data']) + for i in range(len(self['data'])): + if same_columns: + cols = colNames + else: + colNames = default_colnames(self['data'][i].shape[1]) + dfs['set'+str(i)] = pd.DataFrame(data=self['data'][i], columns=cols) + return dfs + + def to2DFields(self, **kwargs): + import xarray as xr + is_meshgrid = self.is_meshgrid + shape = self.common_shape + x = self.x_values + y = self.y_values + colNames=self['column_names'] + if self['column_names'] is None: + colNames = default_colnames(self['data'][0].shape[1]) + + if len(kwargs.keys())>0: + print('[WARN] GNUPlotFile: to2DFields: ignored keys: ',kwargs.keys()) + + if not self.is_meshgrid: + if len(self['data'])>1: + # Probably hard to do, or have to provide different sets + return None + # --- Single table..., likely dubious results + M = self['data'][0] + if M.shape[1]<2: + return None + s1 = 'rows' + s2 = 'columns' + ds = xr.Dataset(coords={s1: range(M.shape[0]), s2: range(M.shape[1])}) + ds['data'] = ([s1, s2], M) + return ds + + # --- Mesh grid + ds = xr.Dataset() + ds = xr.Dataset(coords={colNames[0]: x, colNames[1]: y}) + data = np.array(self['data']) + #X, Y = np.meshgrid(x,y) + for i,c in enumerate(colNames[2:]): + ds[c] = ((colNames[0],colNames[1]), np.squeeze(data[:,:,i+2]).T) + return ds + + # --- Optional functions + def __repr__(self): + """ String that is written to screen when the user calls `print()` on the object. + Provide short and relevant information to save time for the user. + """ + def axisvec_to_string(v, s=''): + if v is None: + return 'None' + else: + deltas = np.diff(v) + if len(v)==0: + return '[], n:0' + elif len(v)==1: + return '[{}], n:1'.format(v[0]) + elif len(v)==2: + return '[{}, {}], n:2'.format(v[0], v[1]) + else: # len(np.unique(deltas))==1: + # TODO improve me + return '[{} ... {}], d{}:{}, n:{}'.format(v[0], v[-1], s, v[1]-v[0], len(v)) + #else: + # return '[{} {} ... {}], d{}:{}, n:{}'.format(v[0], v[1], v[-1], s, v[-1]-v[-2], len(v)) + + s='<{} object>:\n'.format(type(self).__name__) + s+='|Main attributes:\n' + s+='| - filename: {}\n'.format(self.filename) + + common_shape = self.common_shape + s+='| * common_shape: {}\n'.format(common_shape) + s+='| * is_meshgrid: {}\n'.format(self.is_meshgrid) + s+='| * x_values: {}\n'.format(axisvec_to_string(self.x_values, 'x')) + s+='| * y_values: {}\n'.format(axisvec_to_string(self.y_values, 'y')) + s+='|Main keys:\n' + if common_shape: + s+='| - data : length {}, shape {}\n'.format(len(self['data']), common_shape) + else: + s+='| - data : length {}, shapes {}\n'.format(len(self['data']), [x.shape for x in self['data']]) + s+='| - headers : {}\n'.format(self['headers']) + s+='| - column_names : {}\n'.format(self['column_names']) + s+='|Main methods:\n' + s+='| - read, write, toDataFrame, to2DFields, keys' + return s + + def toString(self): + """ """ + s='' + return s + +# --------------------------------------------------------------------------------} +# --- Some hlper functions +# --------------------------------------------------------------------------------{ +def check_same_shape(arrays): + if not arrays: + return False, False + num_columns = arrays[0].shape[1] + num_rows = arrays[0].shape[0] + same_columns = all(array.shape[1] == num_columns for array in arrays) + same_rows = all(array.shape[0] == num_rows for array in arrays) + return same_columns, same_rows, num_columns, num_rows + +def check_first_column_same(arrays): + if not arrays: + return None + first_column = arrays[0][:, 0] + for array in arrays[1:]: + if not np.array_equal(array[:, 0], first_column): + return None + return first_column + +def check_second_column_unique(arrays): + if not arrays: + return None + unique_values = [] + if len(arrays[0].shape)!=2: + return None + for array in arrays: + if array.shape[1]<2: + return None + second_column = array[:, 1] + unique_value = np.unique(second_column) + if len(unique_value) != 1: + return None + unique_values.append(unique_value[0]) + return np.array(unique_values) + +def find_string_with_n_splits(strings, n): + for string in strings: + splits = string.split() + if len(splits) == n: + return splits + return None + +def default_colnames(n): + colNames=['C{}'.format(i) for i in range(n)] + colNames[0] = 'x' + if len(colNames)>1: + colNames[1] = 'y' + return colNames + +if __name__ == '__main__': + from welib.essentials import * + #plt = GNUPlotFile('diff.000000.dat') + plt = GNUPlotFile('C:/Work/Courses/440/project_solution/data/CFD_x.dat') + print(plt) + df =plt.toDataFrame() + print(df) + ds = plt.to2DFields() + print(ds) diff --git a/openfast_toolbox/io/hawc2_dat_file.py b/openfast_toolbox/io/hawc2_dat_file.py index 7ee37f6..4a6ba4a 100644 --- a/openfast_toolbox/io/hawc2_dat_file.py +++ b/openfast_toolbox/io/hawc2_dat_file.py @@ -103,7 +103,7 @@ def _write(self): nChannels = self.data.shape[1] SimTime = self.data[-1,0] #-self.data[0,0] # --- dat file - np.savetxt(datfilename, self.data, fmt=b'%16.8e') + np.savetxt(datfilename, self.data, fmt='%16.8e') # --- Sel file with open(selfilename, 'w') as f: if self.bHawc: diff --git a/openfast_toolbox/io/hawc2_pc_file.py b/openfast_toolbox/io/hawc2_pc_file.py index 74e69bd..e7a75c5 100644 --- a/openfast_toolbox/io/hawc2_pc_file.py +++ b/openfast_toolbox/io/hawc2_pc_file.py @@ -77,7 +77,7 @@ def add_set(self, set_label, thicknesses, profiles): @property def sets(self): - return self.pc_sets + return self.data.pc_sets def __repr__(self): cols=['Alpha_[deg]','Cl_[-]','Cd_[-]','Cm_[-]'] diff --git a/openfast_toolbox/io/mini_yaml.py b/openfast_toolbox/io/mini_yaml.py index 0650c0e..db10ee5 100644 --- a/openfast_toolbox/io/mini_yaml.py +++ b/openfast_toolbox/io/mini_yaml.py @@ -54,14 +54,50 @@ def yaml_read(filename=None, dictIn=None, lines=None, text=None): d[key]=float(val) except: d[key]=val.strip() # string + elif len(sp)>=2 and sp[1].strip()[0]=='{': + key = sp[0] + i1=l.find('{') + i2=l.find('}') + LL = l[i1:i2+1] + D=_str2dict(LL) + d[key] = D + elif len(sp)>=2: + key = sp[0] + value = ':'.join(sp[1:]) + d[key] = value + print('mini_yaml: Setting {}={}'.format(key, value)) + else: - raise Exception('Line {:d} has colon, number of splits is {}, which is not supported'.format(len(sp))) + raise Exception('Line {:d} has colon, number of splits is {}, which is not supported'.format(i,len(sp))) return d def _cleanComment(l): """ remove comments from a line""" return l.split('#')[0].strip() + +def _str2dict(string): + string = string.strip('{}') + pairs = string.split(', ') + d={} + for pair in pairs: + print(pair) + sp = pair.split(':') + key = sp[0].strip() + value = sp[1].strip() + try: + d[key] = int(value) + continue + except ValueError: + pass + try: + d[key] = float(value) + continue + except ValueError: + pass + d[key] = value + return d + def _readDashList(iStart, lines): """ """ i=iStart diff --git a/openfast_toolbox/io/pickle_file.py b/openfast_toolbox/io/pickle_file.py index 97bd289..908cbde 100644 --- a/openfast_toolbox/io/pickle_file.py +++ b/openfast_toolbox/io/pickle_file.py @@ -50,7 +50,7 @@ def __init__(self, filename=None, data=None, **kwargs): self.filename = filename if filename and not data: self.read(**kwargs) - if data: + if data is not None: self._setData(data) if filename: self.write() diff --git a/openfast_toolbox/io/rosco_performance_file.py b/openfast_toolbox/io/rosco_performance_file.py index c075ef3..5c433b1 100644 --- a/openfast_toolbox/io/rosco_performance_file.py +++ b/openfast_toolbox/io/rosco_performance_file.py @@ -19,7 +19,7 @@ class ROSCOPerformanceFile(File): Main methods ------------ - - read, write, toDataFrame, keys + - read, write, toDataFrame, to2DFields, keys Examples -------- @@ -104,7 +104,7 @@ def write(self, filename=None): # Write write_rotor_performance(self.filename, self['pitch'], self['TSR'], self['CP'],self['CT'], self['CQ'], self['WS'], TurbineName=self.name) - def checkConsistency(self): + def checkConsistency(self, verbose=False): """ Check that data makes sense. in particular, check if CP=lambda CQ @@ -119,10 +119,12 @@ def checkConsistency(self): TSR = np.tile(tsr.flatten(), (len(self['pitch']),1)).T if CQ is None and CP is not None: CQ = CP/TSR - print('[INFO] Computing CQ from CP') + if verbose: + print('[INFO] Computing CQ from CP') elif CQ is not None and CP is None: CP = CQ*TSR - print('[INFO] Computing CP from CQ') + if verbose: + print('[INFO] Computing CP from CQ') elif CQ is not None and CP is not None: pass else: @@ -144,6 +146,22 @@ def toDataFrame(self): dfs['CQ'] = pd.DataFrame(np.column_stack((self['TSR'], self['CQ'])), columns=columns) return dfs + def to2DFields(self, **kwargs): + import xarray as xr + if len(kwargs.keys())>0: + print('[WARN] RoscoPerformance_CpCtCq: to2DFields: ignored keys: ',kwargs.keys()) + s1 = 'TSR' + s2 = 'pitch' + ds = xr.Dataset(coords={s1: self['TSR'], s2: self['pitch']}) + ds[s1].attrs['unit'] = '-' + ds[s2].attrs['unit'] = 'deg' + for var in ['CP','CT','CQ']: + M = self[var].copy() + M[M<0] = 0 + ds[var] = ([s1, s2], M) + ds[var].attrs['unit'] = '-' + return ds + # --- Optional functions def toAeroDisc(self, filename, R, csv=False, WS=None, omegaRPM=10): """ Convert to AeroDisc Format @@ -250,8 +268,7 @@ def plotCP3D(self, plotMax=True, trajectory=None): CP[CP<0]=0 # CP_max, tsr_max, pitch_max = self.CPmax() # plot - fig = plt.figure() - ax = fig.gca(projection='3d') + fig, ax = plt.subplots(subplot_kw={'projection': '3d'}) surf = ax.plot_surface(LAMBDA, PITCH, np.transpose(CP), cmap=cm.coolwarm, linewidth=0, antialiased=True,alpha=0.8, label='$C_p$') if plotMax: ax.scatter(tsr_max, pitch_max, CP_max, c='k', marker='o', s=50, label=r'$C_{p,max}$') @@ -406,7 +423,7 @@ def write_rotor_performance(txt_filename, pitch, TSR, CP, CT, CQ, WS=None, Turbi file.close() -def interp2d_pairs(*args,**kwargs): +def interp2d_pairs(X,Y,Z,**kwargs): """ Same interface as interp2d but the returned interpolant will evaluate its inputs as pairs of values. Inputs can therefore be arrays @@ -422,12 +439,20 @@ def interp2d_pairs(*args,**kwargs): author: E. Branlard """ import scipy.interpolate as si + # Wrapping the scipy interp2 function to call out interpolant instead + # --- OLD # Internal function, that evaluates pairs of values, output has the same shape as input - def interpolant(x,y,f): + #def interpolant(x,y,f): + # x,y = np.asarray(x), np.asarray(y) + # return (si.dfitpack.bispeu(f.tck[0], f.tck[1], f.tck[2], f.tck[3], f.tck[4], x.ravel(), y.ravel())[0]).reshape(x.shape) + #return lambda x,y: interpolant(x,y,si.interp2d(*args,**kwargs)) + # --- NEW + Finterp = si.RegularGridInterpolator((X,Y), Z.T, **kwargs) + #r = si.RectBivariateSpline(X, Y, Z.T) + def interpolant(x,y): x,y = np.asarray(x), np.asarray(y) - return (si.dfitpack.bispeu(f.tck[0], f.tck[1], f.tck[2], f.tck[3], f.tck[4], x.ravel(), y.ravel())[0]).reshape(x.shape) - # Wrapping the scipy interp2 function to call out interpolant instead - return lambda x,y: interpolant(x,y,si.interp2d(*args,**kwargs)) + return Finterp((x,y)) + return interpolant if __name__ == '__main__': diff --git a/openfast_toolbox/io/tests/test_fast_output.py b/openfast_toolbox/io/tests/test_fast_output.py index 91d14e7..7b527e4 100644 --- a/openfast_toolbox/io/tests/test_fast_output.py +++ b/openfast_toolbox/io/tests/test_fast_output.py @@ -28,11 +28,11 @@ def test_FASTOutBin(self): # Read written file F2= FASTOutputFile(tempFilename) # Test that read data match - np.testing.assert_almost_equal(F.data,F2.data, 4) - np.testing.assert_almost_equal(F.data[-1,-1] ,40.57663190807828, 10) - np.testing.assert_almost_equal(F2.data[-1,-1],40.57663190807828, 10) - self.assertEqual(F2.info['attribute_names'][-1],'GenPwr') - self.assertEqual(F2.info['attribute_units'][-1],'kW') + np.testing.assert_almost_equal(F.data.values,F2.data.values, 4) + np.testing.assert_almost_equal(F.data.values[-1,-1] ,40.57663190807828, 10) + np.testing.assert_almost_equal(F2.data.values[-1,-1],40.57663190807828, 10) + self.assertEqual(F2.channels[-1],'GenPwr') + self.assertEqual(F2.units[-1],'kW') # cleanup try: os.remove(tempFilename) diff --git a/openfast_toolbox/io/turbsim_file.py b/openfast_toolbox/io/turbsim_file.py index 0259c7d..e3e7d55 100644 --- a/openfast_toolbox/io/turbsim_file.py +++ b/openfast_toolbox/io/turbsim_file.py @@ -263,9 +263,9 @@ def _longiline(ts, iy0=None, iz0=None, removeMean=False): """ if iy0 is None: iy0,iz0 = ts.iMid - u = ts['u'][0,:,iy0,iz0] - v = ts['u'][1,:,iy0,iz0] - w = ts['u'][2,:,iy0,iz0] + u = ts['u'][0,:,iy0,iz0].copy() + v = ts['u'][1,:,iy0,iz0].copy() + w = ts['u'][2,:,iy0,iz0].copy() if removeMean: u -= np.mean(u) v -= np.mean(v) @@ -279,9 +279,9 @@ def _latline(ts, ix0=None, iz0=None, removeMean=False): if ix0 is None: iy0,iz0 = ts.iMid ix0=int(len(ts['t'])/2) - u = ts['u'][0,ix0,:,iz0] - v = ts['u'][1,ix0,:,iz0] - w = ts['u'][2,ix0,:,iz0] + u = ts['u'][0,ix0,:,iz0].copy() + v = ts['u'][1,ix0,:,iz0].copy() + w = ts['u'][2,ix0,:,iz0].copy() if removeMean: u -= np.mean(u) v -= np.mean(v) @@ -295,9 +295,9 @@ def _vertline(ts, ix0=None, iy0=None, removeMean=False): if ix0 is None: iy0,iz0 = ts.iMid ix0=int(len(ts['t'])/2) - u = ts['u'][0,ix0,iy0,:] - v = ts['u'][1,ix0,iy0,:] - w = ts['u'][2,ix0,iy0,:] + u = ts['u'][0,ix0,iy0,:].copy() + v = ts['u'][1,ix0,iy0,:].copy() + w = ts['u'][2,ix0,iy0,:].copy() if removeMean: u -= np.mean(u) v -= np.mean(v) @@ -308,7 +308,7 @@ def _vertline(ts, ix0=None, iy0=None, removeMean=False): # --- Extracting plane data at one point # --------------------------------------------------------------------------------{ def horizontalPlane(ts, z=None, iz0=None, removeMean=False): - """ return velocity components on a horizontal plane + """ return velocity components on a horizontal plane z=cst, (time x ny) If no z value is provided, returned at mid box """ if z is None and iz0 is None: @@ -316,9 +316,9 @@ def horizontalPlane(ts, z=None, iz0=None, removeMean=False): elif z is not None: _, iz0 = ts.closestPoint(ts.y[0], z) - u = ts['u'][0,:,:,iz0] - v = ts['u'][1,:,:,iz0] - w = ts['u'][2,:,:,iz0] + u = ts['u'][0,:,:,iz0].copy() + v = ts['u'][1,:,:,iz0].copy() + w = ts['u'][2,:,:,iz0].copy() if removeMean: u -= np.mean(u) v -= np.mean(v) @@ -326,7 +326,7 @@ def horizontalPlane(ts, z=None, iz0=None, removeMean=False): return u, v, w def verticalPlane(ts, y=None, iy0=None, removeMean=False): - """ return velocity components on a vertical plane + """ return velocity components on a vertical plane y=cst, (time x nz) If no y value is provided, returned at mid box """ if y is None and iy0 is None: @@ -334,15 +334,33 @@ def verticalPlane(ts, y=None, iy0=None, removeMean=False): elif y is not None: iy0, _ = ts.closestPoint(y, ts.z[0]) - u = ts['u'][0,:,iy0,:] - v = ts['u'][1,:,iy0,:] - w = ts['u'][2,:,iy0,:] + u = ts['u'][0,:,iy0,:].copy() + v = ts['u'][1,:,iy0,:].copy() + w = ts['u'][2,:,iy0,:].copy() if removeMean: u -= np.mean(u) v -= np.mean(v) w -= np.mean(w) return u, v, w + def normalPlane(ts, t=None, it0=None, removeMean=False): + """ return velocity components on a normal plane t=cst, (ny x nz) + """ + if t is None and it0 is None: + it0 = 0 + elif t is not None: + it0 = np.argmin(np.abs(self['t']-t)) + + u = ts['u'][0,it0,:,:].copy() + v = ts['u'][1,it0,:,:].copy() + w = ts['u'][2,it0,:,:].copy() + if removeMean: + u -= np.mean(u) + v -= np.mean(v) + w -= np.mean(w) + return u, v, w + + # --------------------------------------------------------------------------------} # --- Extracting average data # --------------------------------------------------------------------------------{ @@ -353,8 +371,9 @@ def vertProfile(self, y_span='full'): if 'mid', average the vertical profile at the middle y value """ if y_span=='full': + # Compute statistics with respect to time first, then average over "y" m = np.mean(np.mean(self['u'][:,:,:,:], axis=1), axis=1) - s = np.std( np.std( self['u'][:,:,:,:], axis=1), axis=1) + s = np.mean(np.std( self['u'][:,:,:,:], axis=1), axis=1) elif y_span=='mid': iy, iz = self.iMid m = np.mean(self['u'][:,:,iy,:], axis=1) @@ -647,6 +666,9 @@ def toDataFrame(self): # Mid csd try: + import warnings + with warnings.catch_warnings(): + warnings.filterwarnings('ignore') #, category=DeprecationWarning) fc, chi_uu, chi_vv, chi_ww = self.csd_longi() cols = ['f_[Hz]','chi_uu_[-]', 'chi_vv_[-]','chi_ww_[-]'] data = np.column_stack((fc, chi_uu, chi_vv, chi_ww)) @@ -681,6 +703,63 @@ def toDataFrame(self): # pass return dfs + def to2DFields(self, nTOut=10, nYOut=3, nZOut=3, **kwargs): + import xarray as xr + if len(kwargs.keys())>0: + print('[WARN] TurbSimFile: to2DFields: ignored keys: ',kwargs.keys()) + # Sanity check + nTOut = int(nTOut) + nYOut = int(nYOut) + nZOut = int(nZOut) + # 'u': velocity field, shape (3 x nt x ny x nz) + IT = list(np.arange(nTOut)) + [len(self.t)-1] + IT = np.unique(IT) + + IY = np.unique(np.linspace(0,len(self['y'])-1, nYOut).astype(int)) + IZ = np.unique(np.linspace(0,len(self['z'])-1, nZOut).astype(int)) + Fields=[] + # --- YZ planes + s1 = 'TSR' + s2 = 'pitch' + ds = xr.Dataset(coords={'t': self.t, 'y': self.y, 'z': self.z}) + ds['t'].attrs['unit'] = 's' + ds['y'].attrs['unit'] = 'm' + ds['z'].attrs['unit'] = 'm' + + ds['(y,z)_u_avg_[m/s]']= (['y','z'], np.squeeze(np.mean(self['u'][0,:,:,:], axis=0))) + ds['(y,z)_v_avg_[m/s]']= (['y','z'], np.squeeze(np.mean(self['u'][1,:,:,:], axis=0))) + ds['(y,z)_w_avg_[m/s]']= (['y','z'], np.squeeze(np.mean(self['u'][2,:,:,:], axis=0))) + + ds['(t,y)_u_avg_[m/s]']= (['t','y'], np.squeeze(np.mean(self['u'][0,:,:,:], axis=2))) + ds['(t,y)_v_avg_[m/s]']= (['t','y'], np.squeeze(np.mean(self['u'][1,:,:,:], axis=2))) + ds['(t,y)_w_avg_[m/s]']= (['t','y'], np.squeeze(np.mean(self['u'][2,:,:,:], axis=2))) + + ds['(t,z)_u_avg_[m/s]']= (['t','z'], np.squeeze(np.mean(self['u'][0,:,:,:], axis=1))) + ds['(t,z)_v_avg_[m/s]']= (['t','z'], np.squeeze(np.mean(self['u'][1,:,:,:], axis=1))) + ds['(t,z)_w_avg_[m/s]']= (['t','z'], np.squeeze(np.mean(self['u'][2,:,:,:], axis=1))) + + for it in IT: + ds['(y,z)_u_t={:.2f}_[m/s]'.format(self.t[it])] = (['y','z'], np.squeeze(self['u'][0,it,:,:])) + for it in IT: + ds['(y,z)_v_t={:.2f}_[m/s]'.format(self.t[it])] = (['y','z'], np.squeeze(self['u'][1,it,:,:])) + for it in IT: + ds['(y,z)_w_t={:.2f}_[m/s]'.format(self.t[it])] = (['y','z'], np.squeeze(self['u'][2,it,:,:])) + # --- TZ planes + for iy in IY: + ds['(t,z)_u_y={:.2f}_[m/s]'.format(self['y'][iy])] = (['t','z'], np.squeeze(self['u'][0,:,iy,:])) + for iy in IY: + ds['(t,z)_v_y={:.2f}_[m/s]'.format(self['y'][iy])] = (['t','z'], np.squeeze(self['u'][1,:,iy,:])) + for iy in IY: + ds['(t,z)_w_y={:.2f}_[m/s]'.format(self['y'][iy])] = (['t','z'], np.squeeze(self['u'][2,:,iy,:])) + # --- TY planes + for iz in IZ: + ds['(t,y)_u_z={:.2f}_[m/s]'.format(self['z'][iz])] = (['t','y'], np.squeeze(self['u'][0,:,:,iz])) + for iz in IZ: + ds['(t,y)_v_z={:.2f}_[m/s]'.format(self['z'][iz])] = (['t','y'], np.squeeze(self['u'][1,:,:,iz])) + for iz in IZ: + ds['(t,y)_w_z={:.2f}_[m/s]'.format(self['z'][iz])] = (['t','y'], np.squeeze(self['u'][2,:,:,iz])) + return ds + def toDataset(self): """ Convert the data that was read in into a xarray Dataset @@ -771,7 +850,7 @@ def fromAMRWind(self, filename, timestep, output_frequency, sampling_identifier, height to be written to turbsim as the reference height. if none is given, it is taken as the vertical centerpoint of the slice """ try: - from openfast_toolbox.io.amrwind_file import AMRWindFile + from welib.wio.amrwind_file import AMRWindFile except: try: from .amrwind_file import AMRWindFile @@ -812,7 +891,6 @@ def fromAMRWind(self, filename, timestep, output_frequency, sampling_identifier, if verbose: print("===> {0}".format(fileout)) self.write(fileout) - def fromAMRWind_legacy(self, filename, dt, nt, y, z, sampling_identifier='p_sw2'): """ @@ -821,7 +899,6 @@ def fromAMRWind_legacy(self, filename, dt, nt, y, z, sampling_identifier='p_sw2' -- u, v, w (nt, nx * ny * nz) -- u is aligned with x-axis (flow is not rotated) - this consideration needs to be added - INPUTS: - filename: (string) full path to .nc sampling data file - sampling_identifier: (string) name of sampling plane group from .inp file (e.g. "p_sw2") diff --git a/openfast_toolbox/io/vtk_file.py b/openfast_toolbox/io/vtk_file.py index e3ed2f7..6c4ab85 100644 --- a/openfast_toolbox/io/vtk_file.py +++ b/openfast_toolbox/io/vtk_file.py @@ -35,6 +35,13 @@ class VTKFile(File): - cells - cell_data + Main attributes for polydata: + --------- + - points + - point_data + - polygons + - cell_data + Main methods ------------ - read, write @@ -42,8 +49,8 @@ class VTKFile(File): Examples -------- vtk = VTKFile('DisXZ1.vtk') - x = vtk.x_grid - z = vtk.z_grid + x = vtk.xp_grid + z = vtk.zp_grid Ux = vtk.point_data_grid['DisXZ'][:,0,:,0] """ @@ -61,13 +68,17 @@ def __init__(self,filename=None,**kwargs): self.xp_grid=None # location of points self.yp_grid=None self.zp_grid=None - self.point_data_grid = None + self.point_data_grid = {} # Main Data - self.points = None + self.header = '' + self.points = None + self.polygons = None + self.cells = None + self.cell_data = None self.field_data = {} - self.point_data = {} - self.dataset = {} + self.point_data = {} + self.dataset = {'type':None} @@ -86,7 +97,7 @@ def __init__(self,filename=None,**kwargs): self.read(filename=filename,**kwargs) - def read(self, filename=None): + def read(self, filename=None, verbose=False): """ read a VTK file """ if filename: self.filename = filename @@ -97,47 +108,10 @@ def read(self, filename=None): if os.stat(self.filename).st_size == 0: raise EmptyFileError('File is empty:',self.filename) - with open(filename, "rb") as f: - # initialize output data - # skip header and title - f.readline() - f.readline() - - data_type = f.readline().decode("utf-8").strip().upper() - if data_type not in ["ASCII", "BINARY"]: - raise ReadError('Unknown VTK data type ',data_type) - self.is_ascii = data_type == "ASCII" - - while True: - line = f.readline().decode("utf-8") - if not line: - # EOF - break - - line = line.strip() - if len(line) == 0: - continue - - self.split = line.split() - self.section = self.split[0].upper() - - if self.section in vtk_sections: - _read_section(f, self) - else: - _read_subsection(f, self) - - # --- Postpro - _check_mesh(self) # generate points if needed - cells, cell_data = translate_cells(self.c, self.ct, self.cell_data_raw) - self.cells = cells - self.cell_data = cell_data - - if self.dataset['type']=='STRUCTURED_POINTS': - self.point_data_grid = {} - # We provide point_data_grid, corresponds to point_data but reshaped - for k,PD in self.point_data.items(): - # NOTE: tested foe len(y)=1, len(z)=1 - self.point_data_grid[k]=PD.reshape(len(self.xp_grid), len(self.yp_grid), len(self.zp_grid),PD.shape[1], order='F') + # Calling wrapped function + read_vtk(filename, self, verbose=verbose) + if verbose: + print('Dataset',self.dataset) def write(self, filename=None, binary=True): @@ -151,54 +125,7 @@ def write(self, filename=None, binary=True): if not self.filename: raise Exception('No filename provided') - - def pad(array): - return np.pad(array, ((0, 0), (0, 1)), "constant") - - if self.points.shape[1] == 2: - points = pad(self.points) - else: - points = self.points - - if self.point_data: - for name, values in self.point_data.items(): - if len(values.shape) == 2 and values.shape[1] == 2: - self.point_data[name] = pad(values) - - for name, data in self.cell_data.items(): - for k, values in enumerate(data): - if len(values.shape) == 2 and values.shape[1] == 2: - data[k] = pad(data[k]) - - with open(filename, "wb") as f: - f.write(b"# vtk DataFile Version 4.2\n") - f.write("written \n".encode("utf-8")) - f.write(("BINARY\n" if binary else "ASCII\n").encode("utf-8")) - f.write(b"DATASET UNSTRUCTURED_GRID\n") - - # write points and cells - _write_points(f, points, binary) - _write_cells(f, self.cells, binary) - - # write point data - if self.point_data: - num_points = self.points.shape[0] - f.write("POINT_DATA {}\n".format(num_points).encode("utf-8")) - _write_field_data(f, self.point_data, binary) - - # write cell data - if self.cell_data: - total_num_cells = sum(len(c.data) for c in self.cells) - f.write("CELL_DATA {}\n".format(total_num_cells).encode("utf-8")) - _write_field_data(f, self.cell_data, binary) - - - def __repr__(self): - s='<{} object> with keys:\n'.format(type(self).__name__) - for k,v in self.items(): - s+=' - {}: {}\n'.format(k,v) - return s - + write_vtk(self.filename, self) def __repr__(self): """ print function """ @@ -213,255 +140,246 @@ def show_grid(v,s): lines.append('- {}: [{} ... {}], dx: {}, n: {}'.format(s,v[0],v[-1],v[1]-v[0],len(v))) lines = ['<{} object> with attributes:'.format(type(self).__name__)] - show_grid(self.xp_grid, 'xp_grid') - show_grid(self.yp_grid, 'yp_grid') - show_grid(self.zp_grid, 'zp_grid') - if self.point_data_grid: - lines.append('- point_data_grid:') - for k,v in self.point_data_grid.items(): - lines.append(' "{}" : {}'.format(k,v.shape)) + try: + lines.append('- dataset: {}'.format(self.dataset)) + except: + pass + + # grid + try: + show_grid(self.xp_grid, 'xp_grid') + show_grid(self.yp_grid, 'yp_grid') + show_grid(self.zp_grid, 'zp_grid') + except: + pass + + lines.append('- point_data_grid:') + for k,v in self.point_data_grid.items(): + lines.append(' "{}" : {}'.format(k,v.shape)) lines.append('- points {}'.format(len(self.points))) - if len(self.cells) > 0: + if self.cells is not None and len(self.cells) > 0: lines.append("- cells:") for tpe, elems in self.cells: lines.append(" {}: {}".format(tpe,len(elems))) else: - lines.append(" No cells.") + lines.append("- cells: None") + if self.polygons is not None: + lines.append("- polygons: {}, consiting of {} points".format(len(self.polygons), len(self.polygons[0]))) + else: + lines.append("- polygons: None") - if self.point_data: - lines.append('- point_data:') - for k,v in self.point_data.items(): - lines.append(' "{}" : {}'.format(k,v.shape)) + lines.append('- point_data:') + for k,v in self.point_data.items(): + lines.append(' "{}" : {}'.format(k,v.shape)) - if self.cell_data: - names = ", ".join(self.cell_data.keys()) - lines.append(" Cell data: {}".format(names)) + names = ", ".join(self.cell_data.keys()) + lines.append("- cell_data: {}".format(names)) return "\n".join(lines) def toDataFrame(self): - return None + if self.dataset['type'] == 'STRUCTURED_POINTS': + data = np.zeros((2,3)) + data[:,0] = [np.min(self.xp_grid), np.max(self.xp_grid)] + data[:,1] = [np.min(self.yp_grid), np.max(self.yp_grid)] + data[:,2] = [np.min(self.zp_grid), np.max(self.zp_grid)] + return pd.DataFrame(data=data, columns=['xRange', 'yRange', 'zRange']) + #if self.point_data_grid is not None: + # for k,v in self.point_data_grid.items(): + + else: + print('[WARN] vtk_file: toDataFrame not implemented for dataset type: {}'.format(self.dataset['type'])) + data = np.zeros((2,2)) + data[:,0] = [0,1] + return pd.DataFrame(data=data, columns=['Dummy1', 'Dummy2']) + return None + + def to2DFields(self, **kwargs): + import xarray as xr + if len(kwargs.keys())>0: + print('[WARN] VTKFile: to2DFields: ignored keys: ',kwargs.keys()) + + ds = None + if self.dataset['type'] == 'STRUCTURED_POINTS': + I = np.where(np.asarray(self.dataset['DIMENSIONS'])==1)[0] + if len(I) == 1: + icst = I[0] + # --- 2D velocity fields + ds = xr.Dataset(coords={'x': self.xp_grid, 'y': self.yp_grid, 'z': self.zp_grid}) + dims = {0:['y','z'], 1:['x','z'], 2:['x','y']}[icst] + for k,v in self.point_data_grid.items(): + if v.shape[-1]==3: + ds[k + '_x'] = (dims, np.squeeze(v[:,:,:,0])) + ds[k + '_y'] = (dims, np.squeeze(v[:,:,:,1])) + ds[k + '_z'] = (dims, np.squeeze(v[:,:,:,2])) + else: + ds[k] = (dims, np.squeeze(v[:,:,:,0])) + else: + print('[WARN] VTKFile: field dimension not supported: {}'.format(self.dataset['DIMENSIONS'])) + return None + else: + print('[WARN] VTKFile: datatype not implemented/suitable for 2D fields: {}'.format(self.dataset['type'])) + return None + return ds + +# -------------------------------------------------------------------------------- +# --- Simple/dedicated reader and writers +# -------------------------------------------------------------------------------- +def write_dataset_unstructured_grid(filename, points, cells, point_data=None, cell_data=None, header='', binary=False): + def pad(array): + return np.pad(array, ((0, 0), (0, 1)), "constant") + + if points.shape[1] == 2: + points = pad(points) + else: + points = points + + if point_data: + for name, values in point_data.items(): + if len(values.shape) == 2 and values.shape[1] == 2: + point_data[name] = pad(values) + + for name, data in cell_data.items(): + for k, values in enumerate(data): + if len(values.shape) == 2 and values.shape[1] == 2: + data[k] = pad(data[k]) + + with open(filename, "wb") as f: + f.write(b"# vtk DataFile Version 4.2\n") + f.write((header.strip()+"\n").encode("utf-8")) + f.write(("BINARY\n" if binary else "ASCII\n").encode("utf-8")) + f.write(b"DATASET UNSTRUCTURED_GRID\n") + + # write points and cells + _write_points(f, points, binary) + _write_cells(f, cells, binary) + + # write point data + if point_data is not None: + num_points = points.shape[0] + f.write("POINT_DATA {}\n".format(num_points).encode("utf-8")) + _write_field_data(f, point_data, binary) + + # write cell data + if cell_data is not None: + total_num_cells = sum(len(c.data) for c in cells) + f.write("CELL_DATA {}\n".format(total_num_cells).encode("utf-8")) + _write_field_data(f, cell_data, binary) +def write_dataset_polydata(filename, points, polygons, point_data=None, cell_data=None, uniquePoints=False, header='', binary=False): + if uniquePoints: + # NOTE: this will completed change the order + # Create list of points per polygon + polygons_points = [ [tuple(points[i,:]) for i in polygon] for polygon in polygons] + + # Create a unique list of points + points_list = [tuple(row) for row in points] + points = list(set(points_list)) + # Convert the array to a view with a structured dtype + #pview = np.ascontiguousarray(points).view(np.dtype((np.void, points.dtype.itemsize * points.shape[1]))) + #points = np.unique(pview).view(points.dtype).reshape(-1, points.shape[1]) + + # Update indices in the connectivity table + polygons = [[points.index(point) for point in poly_points] for poly_points in polygons_points] + with open(filename, "wb") as f: + f.write(b"# vtk DataFile Version 2.0\n") + f.write((header.strip()+"\n").encode("utf-8")) + f.write(("BINARY\n" if binary else "ASCII\n").encode("utf-8")) + f.write(b"DATASET POLYDATA\n") + _write_points(f, points, binary) + _write_polygons(f, polygons, binary) + if point_data: + num_points = points.shape[0] + f.write("POINT_DATA {}\n".format(num_points).encode("utf-8")) + _write_field_data(f, point_data, binary) -# Save FlowData Object to vtk -# """ -# n_points = self.dimensions.x1 * self.dimensions.x2 * self.dimensions.x3 -# vtk_file = Output(filename) - #self.file = open(self.filename, "w") - #self.ln = "\n" -# vtk_file.write_line('# vtk DataFile Version 3.0') -# vtk_file.write_line('array.mean0D') -# vtk_file.write_line('ASCII') -# vtk_file.write_line('DATASET STRUCTURED_POINTS') -# vtk_file.write_line('DIMENSIONS {}'.format(self.dimensions)) -# vtk_file.write_line('ORIGIN {}'.format(self.origin)) -# vtk_file.write_line('SPACING {}'.format(self.spacing)) -# vtk_file.write_line('POINT_DATA {}'.format(n_points)) -# vtk_file.write_line('FIELD attributes 1') -# vtk_file.write_line('UAvg 3 {} float'.format(n_points)) -# for u, v, w in zip(self.u, self.v, self.w): -# vtk_file.write_line('{}'.format(Vec3(u, v, w))) - -# --- Paraview -# except: from paraview.simple import * -# sliceFile = sliceDir + '/' + tStartStr + '/' + sliceName -# print ' Slice file 1a: ' + sliceFile -# slice_1a_vtk = LegacyVTKReader( FileNames=[sliceFile] ) -# sliceFile = sliceDir + '/' + tEndStr + '/' + sliceName -# DataRepresentation3 = GetDisplayProperties(slice_1a_vtk) -# DataRepresentation3.Visibility = 0 -# SetActiveSource(slice_1a_vtk) - -# --- VTK -# import np -# from vtk import vtkStructuredPointsReader -# from vtk.util import np as VN -# -# reader = vtkStructuredPointsReader() -# reader.SetFileName(filename) -# reader.ReadAllVectorsOn() -# reader.ReadAllScalarsOn() -# reader.Update() -# -# data = reader.GetOutput() -# -# dim = data.GetDimensions() -# vec = list(dim) -# vec = [i-1 for i in dim] -# vec.append(3) -# -# u = VN.vtk_to_np(data.GetCellData().GetArray('velocity')) -# b = VN.vtk_to_numpy(data.GetCellData().GetArray('cell_centered_B')) -# -# u = u.reshape(vec,order='F') -# b = b.reshape(vec,order='F') -# -# x = zeros(data.GetNumberOfPoints()) -# y = zeros(data.GetNumberOfPoints()) -# z = zeros(data.GetNumberOfPoints()) -# -# for i in range(data.GetNumberOfPoints()): -# x[i],y[i],z[i] = data.GetPoint(i) -# -# x = x.reshape(dim,order='F') -# y = y.reshape(dim,order='F') -# z = z.reshape(dim,order='F') - -# --- vtk -# import vtk -# import numpy -# import vtk_demo.version as version -# -# -# def main(): -# """ -# :return: The render window interactor. -# """ -# -# chessboard_resolution = 5 -# n_lut_colors = 256 -# data_max_value = 1 -# -# # Provide some geometry -# chessboard = vtk.vtkPlaneSource() -# chessboard.SetXResolution(chessboard_resolution) -# chessboard.SetYResolution(chessboard_resolution) -# num_squares = chessboard_resolution * chessboard_resolution -# # Force an update so we can set cell data -# chessboard.Update() -# -# # Make some arbitrary data to show on the chessboard geometry -# data = vtk.vtkFloatArray() -# for i in range(num_squares): -# if i == 4: -# # This square should in principle light up with color given by SetNanColor below -# data.InsertNextTuple1(numpy.nan) -# else: -# thing = (i * data_max_value) / (num_squares - 1) -# data.InsertNextTuple1(thing) -# -# # Make a LookupTable -# lut = vtk.vtkLookupTable() -# lut.SetNumberOfColors(n_lut_colors) -# lut.Build() -# lut.SetTableRange(0, data_max_value) -# lut.SetNanColor(.1, .5, .99, 1.0) # <------ This color gets used -# for i in range(n_lut_colors): -# # Fill it with arbitrary colors, e.g. grayscale -# x = data_max_value*i/(n_lut_colors-1) -# lut.SetTableValue(i, x, x, x, 1.0) -# lut.SetNanColor(.99, .99, .1, 1.0) # <----- This color gets ignored! ...except by GetNanColor -# -# print(lut.GetNanColor()) # <-- Prints the color set by the last SetNanColor call above! -# -# chessboard.GetOutput().GetCellData().SetScalars(data) -# -# mapper = vtk.vtkPolyDataMapper() -# mapper.SetInputConnection(chessboard.GetOutputPort()) -# mapper.SetScalarRange(0, data_max_value) -# mapper.SetLookupTable(lut) -# mapper.Update() -# -# actor = vtk.vtkActor() -# actor.SetMapper(mapper) -# -# renderer = vtk.vtkRenderer() -# ren_win = vtk.vtkRenderWindow() -# ren_win.AddRenderer(renderer) -# renderer.SetBackground(vtk.vtkNamedColors().GetColor3d('MidnightBlue')) -# renderer.AddActor(actor) -# -# iren = vtk.vtkRenderWindowInteractor() - -# --- pyvtk -# """Read vtk-file stored previously with tovtk.""" -# p = pyvtk.VtkData(filename) -# xn = array(p.structure.points) -# dims = p.structure.dimensions -# try: -# N = eval(p.header.split(" ")[-1]) - - # --- Extract - # # Convert the center of the turbine coordinate in m to High-Resolution domains left most corner in (i,j) - # xe_index = int(origin_at_precusr[0]/10) - # ye_index = int(origin_at_precusr[1]/10) - # - # # Read the full domain from VTK - # reader = vtk.vtkStructuredPointsReader() - # reader.SetFileName(in_vtk) - # reader.Update() - # - # # Extract the High Resolution domain at same spacial spacing by specifying the (i,i+14),(j,j+14),(k,k+20) tuples - # extract = vtk.vtkExtractVOI() - # extract.SetInputConnection(reader.GetOutputPort()) - # extract.SetVOI(xe_index, xe_index+14, ye_index, ye_index+14, 0, 26) - # extract.SetSampleRate(1, 1, 1) - # extract.Update() - # - # # Write the extract as VTK - # points = extract.GetOutput() - # vec = points.GetPointData().GetVectors('Amb') - # - # with open(out_vtk, 'a') as the_file: - # the_file.write('# vtk DataFile Version 3.0\n') - # the_file.write('High\n') - # the_file.write('ASCII\n') - # the_file.write('DATASET STRUCTURED_POINTS\n') - # the_file.write('DIMENSIONS %d %d %d\n' % points.GetDimensions()) - # the_file.write('ORIGIN %f %f %f\n' % origin_at_stitch) - # the_file.write('SPACING %f %f %f\n' % points.GetSpacing()) - # the_file.write('POINT_DATA %d\n' % points.GetNumberOfPoints()) - # the_file.write('VECTORS Amb float\n') - # for i in range(points.GetNumberOfPoints()): - # the_file.write('%f %f %f\n' % vec.GetTuple(i) ) - - # --- Stitch - # reader = vtk.vtkStructuredPointsReader() - # reader.SetFileName(in_vtk) - # reader.Update() - # - # hAppend = vtk.vtkImageAppend() - # hAppend.SetAppendAxis(0) - # for i in range(nx): - # hAppend.AddInputData(reader.GetOutput()) - # hAppend.Update() - # - # vAppend = vtk.vtkImageAppend() - # vAppend.SetAppendAxis(1) - # for i in range(ny): - # vAppend.AddInputData(hAppend.GetOutput()) - # vAppend.Update() - # - # points = vAppend.GetOutput() - # vec = points.GetPointData().GetVectors('Amb') - # - # with open(out_vtk, 'a') as the_file: - # the_file.write('# vtk DataFile Version 3.0\n') - # the_file.write('Low\n') - # the_file.write('ASCII\n') - # the_file.write('DATASET STRUCTURED_POINTS\n') - # the_file.write('DIMENSIONS %d %d %d\n' % points.GetDimensions()) - # the_file.write('ORIGIN %f %f %f\n' % points.GetOrigin()) - # the_file.write('SPACING %f %f %f\n' % points.GetSpacing()) - # the_file.write('POINT_DATA %d\n' % points.GetNumberOfPoints()) - # the_file.write('VECTORS Amb float\n') - # for i in range(points.GetNumberOfPoints()): - # the_file.write('%f %f %f\n' % vec.GetTuple(i) ) - - - # + ## write cell data + if cell_data: + total_num_cells = len(polygons) + f.write("CELL_DATA {}\n".format(total_num_cells).encode("utf-8")) + _write_field_data(f, cell_data, binary) +def read_dataset_polydata(file_path): + """ Simple dedicated function to read a ASCII VTK file with polygons and field data""" + points = [] + polygons = [] + cell_data = {} + point_data = {} + + with open(file_path, 'r') as file: + lines = file.readlines() + + data_section = 'header' + header=[] + + for line in lines: + if len(line.strip())==0: + continue + if line.startswith("POINTS"): + data_section = "points" + continue + elif line.startswith("POLYGONS"): + data_section = "polygons" + continue + elif line.startswith("POINT_DATA") or line.startswith("CELL_DATA"): + data_section = "cell_data" + continue + elif line.startswith("SCALARS") : + sp = line.split() + field_name = sp[1] + field_type = sp[2] + data_section = "cell_data_scalar" + cell_data[field_name] = [] + continue + elif line.startswith("VECTORS"): + sp = line.split() + field_name = sp[1] + field_type = sp[2] + data_section = "cell_data_vector" + cell_data[field_name] = [] + continue + elif line.startswith("LOOKUP_TABLE"): + continue + + if data_section == "points": + # Extract points + point = list(map(float, line.split()[:])) + points.append(point) + elif data_section == "polygons": + # Extract polygons + polygon_data = list(map(int, line.split()[1:])) + polygons.append(polygon_data[:]) # Ignore the first value (number of vertices) + elif data_section == "cell_data_scalar": + field_values = list(map(float, line.split())) + cell_data[field_name].extend(field_values) + elif data_section == "cell_data_vector": + if line.startswith("LOOKUP_TABLE"): + continue + field_values = list(map(float, line.split())) + cell_data[field_name].append(field_values) + elif data_section == "header": + header.append(line) + else: + print('>>> Unknown data_section', data_section) + raise NotImplementedError() + points = np.asarray(points) + polygons = np.asarray(polygons) + for k,v in cell_data.items(): + cell_data[k] = np.asarray(v) -# --------------------------------------------------------------------------------} -# --- The code below is taken from meshio + return header, points, polygons, point_data, cell_data + +# -------------------------------------------------------------------------------- +# --- The code below was taken from meshio # https://github.com/nschloe/meshio # The MIT License (MIT) # Copyright (c) 2015-2020 meshio developers -# --------------------------------------------------------------------------------{ +# Adapted by E. Branlard +# -------------------------------------------------------------------------------- ReadError = BrokenFormatError WriteError = BrokenFormatError @@ -622,6 +540,7 @@ def __repr__(self): # supported vtk dataset types vtk_dataset_types = [ + "POLYDATA", "UNSTRUCTURED_GRID", "STRUCTURED_POINTS", "STRUCTURED_GRID", @@ -629,6 +548,7 @@ def __repr__(self): ] # additional infos per dataset type vtk_dataset_infos = { + "POLYDATA": [], "UNSTRUCTURED_GRID": [], "STRUCTURED_POINTS": [ "DIMENSIONS", @@ -650,6 +570,7 @@ def __repr__(self): "METADATA", "DATASET", "POINTS", + "POLYGONS", "CELLS", "CELL_TYPES", "POINT_DATA", @@ -659,23 +580,31 @@ def __repr__(self): ] - - - -def read(filename): +def read_vtk(filename, info, **kwargs): """Reads a VTK vtk file.""" + # initialize output data + if info is None: + info = VTKFile() + with open(filename, "rb") as f: - out = read_buffer(f) - return out + out = read_buffer(f, info, **kwargs) + # --- Postpro + _check_mesh(info) # generate points if needed + if info.polygons is not None: + info.cell_data = info.cell_data_raw + else: + cells, cell_data = translate_cells(info.c, info.ct, info.cell_data_raw) + info.cells = cells + info.cell_data = cell_data -def read_buffer(f): - # initialize output data - info = VTKFile() + return out + +def read_buffer(f, info, verbose=False): # skip header and title f.readline() - f.readline() + info.header = f.readline().decode("utf-8").strip() data_type = f.readline().decode("utf-8").strip().upper() if data_type not in ["ASCII", "BINARY"]: @@ -694,18 +623,14 @@ def read_buffer(f): info.split = line.split() info.section = info.split[0].upper() - + if verbose: + print('Section', info.section, info.section in vtk_sections) if info.section in vtk_sections: + # Sections: METADATA, DATASET, POINTS, POLYGONS, CELLS, CELL_TYPES, POINT_DATA, CELL_DATA, LOOKUP_TABLE, COLOR_SCALARS _read_section(f, info) else: + # SubSections: POINT_DATA, CELL_DATA, DATASET, SCALARS, VECTORS, TENSORS, FIELD _read_subsection(f, info) - - _check_mesh(info) - cells, cell_data = translate_cells(info.c, info.ct, info.cell_data_raw) - - info.cells = cells - info.cell_data = cell_data - return info @@ -715,6 +640,7 @@ def _read_section(f, info): elif info.section == "DATASET": info.active = "DATASET" + # Dataset types: POLYDATA, UNSTRUCTURED_GRID, STRUCTURED_POINTS, STRUCTURED_GRID, RECTILINEAR_GRID info.dataset["type"] = info.split[1].upper() if info.dataset["type"] not in vtk_dataset_types: raise BrokenFormatError( @@ -770,7 +696,7 @@ def _read_section(f, info): elif info.section == "LOOKUP_TABLE": info.num_items = int(info.split[2]) - numpy.fromfile(f, count=info.num_items * 4, sep=" ", dtype=float) + c = numpy.fromfile(f, count=info.num_items * 4, sep=" ", dtype=float) # rgba = data.reshape((info.num_items, 4)) elif info.section == "COLOR_SCALARS": @@ -780,7 +706,25 @@ def _read_section(f, info): dtype = numpy.ubyte if info.is_ascii: dtype = float - numpy.fromfile(f, count=num_items * nValues, dtype=dtype) + c = numpy.fromfile(f, count=num_items * nValues, dtype=dtype) + + elif info.section == "POLYGONS": + info.num_items = int(info.split[1]) + num_floats = int(info.split[2]) + nPerLine = int(num_floats/info.num_items) + if np.mod(num_floats, info.num_items): + raise NotImplementedError('Polygons with varying number of edges') + if info.is_ascii: + dtype = int + poly = numpy.fromfile(f, count=num_floats, sep=" ", dtype=float) + poly = poly.reshape(-1,nPerLine).astype(int) + poly = poly[:,1:] # We remove Ns. Put it back if it's an important data + info.polygons = poly + else: + raise NotImplementedError('Polygons binary') + + else: + raise NotImplementedError('Section not implemented `{}`'.format(info.section)) def _read_subsection(f, info): @@ -847,6 +791,13 @@ def _check_mesh(info): info.points = _generate_points(axis) info.c, info.ct = _generate_cells(dim=info.dataset["DIMENSIONS"]) + # --- point_data_grid added for convenience, TODO, make it a property + info.point_data_grid = {} + # We provide point_data_grid, corresponds to point_data but reshaped + for k,PD in info.point_data.items(): + # NOTE: tested for len(y)=1, len(z)=1 only + info.point_data_grid[k]=PD.reshape(len(info.xp_grid), len(info.yp_grid), len(info.zp_grid),PD.shape[1], order='F') + elif info.dataset["type"] == "RECTILINEAR_GRID": axis = [ info.dataset["X_COORDINATES"], @@ -1159,50 +1110,16 @@ def translate_cells(data, types, cell_data_raw): return cells, cell_data - -def write(filename, mesh, binary=True): - def pad(array): - return numpy.pad(array, ((0, 0), (0, 1)), "constant") - - if mesh.points.shape[1] == 2: - points = pad(mesh.points) +def write_vtk(filename, info, binary=False): + if info.dataset['type']=='POLYDATA': + write_dataset_polydata(info.filename, points=info.points, polygons=info.polygons, point_data=info.point_data, cell_data=info.cell_data, binary=binary, header=info.header) + elif info.dataset['type']=='UNSTRUCTURED_GRID': + write_dataset_unstructured_grid(info.filename, points=info.points, cells=info.cells, point_data=info.point_data, cell_data=info.cell_data, binary=binary) else: - points = mesh.points - - if mesh.point_data: - for name, values in mesh.point_data.items(): - if len(values.shape) == 2 and values.shape[1] == 2: - mesh.point_data[name] = pad(values) - - for name, data in mesh.cell_data.items(): - for k, values in enumerate(data): - if len(values.shape) == 2 and values.shape[1] == 2: - data[k] = pad(data[k]) - - with open(filename, "wb") as f: - f.write(b"# vtk DataFile Version 4.2\n") - f.write("written \n".encode("utf-8")) - f.write(("BINARY\n" if binary else "ASCII\n").encode("utf-8")) - f.write(b"DATASET UNSTRUCTURED_GRID\n") - - # write points and cells - _write_points(f, points, binary) - _write_cells(f, mesh.cells, binary) - - # write point data - if mesh.point_data: - num_points = mesh.points.shape[0] - f.write("POINT_DATA {}\n".format(num_points).encode("utf-8")) - _write_field_data(f, mesh.point_data, binary) - - # write cell data - if mesh.cell_data: - total_num_cells = sum(len(c.data) for c in mesh.cells) - f.write("CELL_DATA {}\n".format(total_num_cells).encode("utf-8")) - _write_field_data(f, mesh.cell_data, binary) - + raise NotImplementedError('Write function for DATASET TYPE: `{}`'.format(info.dataset['type'])) def _write_points(f, points, binary): + points = np.asarray(points) f.write( "POINTS {} {}\n".format( len(points), numpy_to_vtk_dtype[points.dtype.name] @@ -1218,7 +1135,10 @@ def _write_points(f, points, binary): points.astype(points.dtype.newbyteorder(">")).tofile(f, sep="") else: # ascii - points.tofile(f, sep=" ") + #points.tofile(f, sep=" ") + for point in points: + np.asarray(point).tofile(f, sep=" ") + f.write(b"\n") f.write(b"\n") @@ -1274,6 +1194,24 @@ def _write_cells(f, cells, binary): f.write(b"\n") +def _write_polygons(f, polygons, binary): + + polyN = [len(p) for p in polygons] + nFloats = np.sum(polyN) + len(polygons) + f.write(("POLYGONS {} {}\n".format(len(polygons), nFloats)).encode("utf-8")) + poly_list = [ [pn]+ list(polygon) for pn,polygon in zip(polyN, polygons)] + if binary: + print('>>>') +#> endian +# points.astype(points.dtype.newbyteorder(">")).tofile(f, sep="") + else: + # ascii + for poly in poly_list: + np.asarray(poly).tofile(f, sep=" ") + f.write(b"\n") + f.write(b"\n") + + def _write_field_data(f, data, binary): f.write(("FIELD FieldData {}\n".format(len(data))).encode("utf-8")) for name, values in data.items(): diff --git a/openfast_toolbox/linearization/campbell.py b/openfast_toolbox/linearization/campbell.py index 4b17a3f..541b82f 100644 --- a/openfast_toolbox/linearization/campbell.py +++ b/openfast_toolbox/linearization/campbell.py @@ -197,7 +197,7 @@ def postproMBC(xlsFile=None, csvModesIDFile=None, xlssheet=None, verbose=True, W else: raise Exception('Provide either an Excel file or a csv (ModesID) file') # --- Mode Identification - ID.iloc[:,0].fillna('Unknown', inplace=True) # replace nan + ID.iloc[:,0] = ID.iloc[:,0].fillna('Unknown') # replace nan ModeNames = ID.iloc[2: ,0].values ModeIDs = ID.iloc[2: ,1:].values nModesIDd = len(ModeNames) # Number of modes identified in the ID file diff --git a/openfast_toolbox/linearization/campbell_data.py b/openfast_toolbox/linearization/campbell_data.py index 3fb569d..ea6bd59 100644 --- a/openfast_toolbox/linearization/campbell_data.py +++ b/openfast_toolbox/linearization/campbell_data.py @@ -84,7 +84,7 @@ def campbell_diagram_data_oneOP(mbc_data, BladeLen=None, TowerLen=None): PhaseIndx = CData['PhaseDiff'] > 180; CData['PhaseDiff'][PhaseIndx] = CData['PhaseDiff'][PhaseIndx] - 360; # move to range (-180, 180] - if ~usePercent: + if not usePercent: PhaseIndx = CData['PhaseDiff'] > 90; CData['MagnitudePhase'][PhaseIndx] = -1*CData['MagnitudePhase'][PhaseIndx]; CData['PhaseDiff'][PhaseIndx] = CData['PhaseDiff'][PhaseIndx] - 180; diff --git a/openfast_toolbox/postpro/postpro.py b/openfast_toolbox/postpro/postpro.py index c5ebf56..c03485d 100644 --- a/openfast_toolbox/postpro/postpro.py +++ b/openfast_toolbox/postpro/postpro.py @@ -3,6 +3,10 @@ import pandas as pd import numpy as np import re +try: + from scipy.integrate import cumulative_trapezoid +except: + from scipy.integrate import cumtrapz as cumulative_trapezoid import openfast_toolbox.io as weio from openfast_toolbox.common import PYFASTException as WELIBException @@ -331,34 +335,47 @@ def _HarmonizeSpanwiseData(Name, Columns, vr, R, IR=None) : return dfRad, nrMax, ValidRow -def insert_spanwise_columns(df, vr=None, R=None, IR=None, sspan='r', sspan_bar='r/R'): - """ - Add some columns to the radial data - df: dataframe - """ +def compute_spanwise_columns(df, vr=None, R=None, IR=None, sspan='r', sspan_bar='r/R'): if df is None: return df if df.shape[1]==0: return None - nrMax=len(df) - ids=np.arange(nrMax) + nrMax = len(df) + ids = np.arange(nrMax) + + Columns={} if vr is None or R is None: # Radial position unknown vr_bar = ids/(nrMax-1) - df.insert(0, 'i/n_[-]', vr_bar) + Columns['i/n_[-]'] = vr_bar else: vr_bar=vr/R if (nrMax)<=len(vr_bar): vr_bar=vr_bar[:nrMax] elif (nrMax)>len(vr_bar): raise Exception('Inconsistent length between radial stations ({:d}) and max index present in output chanels ({:d})'.format(len(vr_bar),nrMax)) - df.insert(0, sspan_bar+'_[-]', vr_bar) + Columns[sspan_bar+'_[-]'] = vr_bar if IR is not None: - df['Node_[#]']=IR[:nrMax] - df['i_[#]']=ids+1 + Columns['Node_[#]']=IR[:nrMax] + Columns['i_[#]']=ids+1 if vr is not None: - df[sspan+'_[m]'] = vr[:nrMax] + Columns[sspan+'_[m]'] = vr[:nrMax] + return Columns + +def insert_spanwise_columns(df, vr=None, R=None, IR=None, sspan='r', sspan_bar='r/R'): + """ + Add some columns to the radial data + df: dataframe + """ + Columns = compute_spanwise_columns(df=df, vr=vr, R=R, IR=IR, sspan=sspan, sspan_bar=sspan_bar) + if Columns is None: + return None + for i, (k,v) in enumerate(Columns.items()): + if i==0: + df.insert(0, k, v) + else: + df[k] = v return df def find_matching_columns(Cols, PatternMap): @@ -379,36 +396,73 @@ def find_matching_columns(Cols, PatternMap): ColsInfo.append(col) return ColsInfo,nrMax -def extract_spanwise_data(ColsInfo, nrMax, df=None,ts=None): +def extract_spanwise_data(ColsInfo, nrMax, df=None, ts=None): """ Extract spanwise data based on some column info ColsInfo: see find_matching_columns """ nCols = len(ColsInfo) - if nCols==0: + if nCols == 0: return None if ts is not None: - Values = np.zeros((nrMax,nCols)) - Values[:] = np.nan + Values = np.full((nrMax, nCols), np.nan) elif df is not None: raise NotImplementedError() - ColNames =[c['name'] for c in ColsInfo] + ColNames = [c['name'] for c in ColsInfo] - for ic,c in enumerate(ColsInfo): + for ic, c in enumerate(ColsInfo): Idx, cols, colname = c['Idx'], c['cols'], c['name'] - for idx,col in zip(Idx,cols): - Values[idx-1,ic]=ts[col] - nMissing = np.sum(np.isnan(Values[:,ic])) - if len(cols)nrMax: - print('[WARN] More values found for {}, found {}/{}'.format(colname,len(cols),nrMax)) + Values[Idx - 1, ic] = ts[cols].values + + nMissing = np.sum(np.isnan(Values[:, ic])) + if len(cols) < nrMax: + print('[WARN] Not all values found for {}, missing {}/{}'.format(colname, nMissing, nrMax)) + if len(cols) > nrMax: + print('[WARN] More values found for {}, found {}/{}'.format(colname, len(cols), nrMax)) df = pd.DataFrame(data=Values, columns=ColNames) df = df.reindex(sorted(df.columns), axis=1) return df +def extract_spanwise_data_timeSeries(ColsInfo, nrMax, df, vr=None, R=None, IR=None, si1='i1', sir='ir'): + """ + Extract spanwise variables for a DataFrame, returns them in an xarray Dataset + Spanwise variables are then of the form: + (nFirstDim x nSpan) + Return an xarray + + ColsInfo: see find_matching_columns + """ + import xarray as xr + nCols = len(ColsInfo) + if nCols == 0: + return None + n1 = len(df) + + # --- Lazy implementation to figure out the radial positions + dfRad = extract_spanwise_data(ColsInfo, nrMax, ts=df.iloc[0]) + spanColumns = compute_spanwise_columns(dfRad, vr=vr, R=R, IR=IR) + k0 = list(spanColumns.keys())[0] + # --- Populate a xarray + # Create xarray dataset + i1 = np.arange(0, n1) + ir = np.arange(0, len(spanColumns[k0])) + ds = xr.Dataset(coords={si1: i1, sir: ir}) + ColNames = [c['name'] for c in ColsInfo] + for ic, c in enumerate(ColsInfo): + Idx, cols, colname = c['Idx'], c['cols'], c['name'] + Values = np.full((n1, nrMax), np.nan) + Values[:, Idx-1] = df[cols].values + # Store in Dataset + ds[colname] = ([si1, sir], Values[:, :]) + if len(cols) < nrMax: + print('[WARN] Not all values found for {}, found {}/{}'.format(colname, len(cols), nrMax)) + if len(cols) > nrMax: + print('[WARN] More values found for {}, found {}/{}'.format(colname, len(cols), nrMax)) + # --- Add spanColumns to Dataset + for ic, (c,v) in enumerate(spanColumns.items()): + ds[c] = ([sir], v) + return ds def _BDSpanMap(): BDSpanMap=dict() @@ -635,16 +689,62 @@ def spanwiseColEDTwr(Cols): def spanwiseColAD(Cols): """ Return column info, available columns and indices that contain AD spanwise data""" ADSpanMap=dict() +# From AeroDyn_AllBldNd: TODO Use it directly.. +# "ALPHA ","AXIND ","AXIND_QS ","BEM_CT_QS","BEM_F_QS ","BEM_KP_QS","BEM_K_QS ","CD ", & +# "CD_QS ","CHI ","CL ","CLRNC ","CL_QS ","CM ","CMA ","CM_QS ", & +# "CN ","CPMIN ","CT ","CURVE ","CX ","CXA ","CY ","DEBUG1 ", & +# "DEBUG2 ","DEBUG3 ","DYNP ","FBN ","FBS ","FBT ","FBXA ","FBXI ", & +# "FBXL ","FBXP ","FBYA ","FBYI ","FBYL ","FBYP ","FBZA ","FBZI ", & +# "FBZL ","FBZP ","FD ","FL ","FN ","FT ","FX ","FXA ", & +# "FXI ","FXL ","FXP ","FY ","FYI ","FYL ","FYP ","FZI ", & +# "FZL ","FZP ","GAM ","GEOMPHI ","M ","MBN ","MBS ","MBT ", & +# "MBXA ","MBXI ","MBXL ","MBXP ","MBYA ","MBYI ","MBYL ","MBYP ", & +# "MBZA ","MBZI ","MBZL ","MBZP ","MM ","MXI ","MXL ","MXP ", & +# "MYI ","MYL ","MYP ","MZA ","MZI ","MZL ","MZP ","PHI ", & +# "RE ","SGCAV ","SIGCR ","STVX ","STVXA ","STVXI ","STVXL ","STVXP ", & +# "STVY ","STVYA ","STVYI ","STVYL ","STVYP ","STVZ ","STVZA ","STVZI ", & +# "STVZL ","STVZP ","THETA ","TNIND ","TNIND_QS ","TOE ","UA_FLAG ","UA_X1 ", & +# "UA_X2 ","UA_X3 ","UA_X4 ","UA_X5 ","UIN ","UIR ","UIT ","VDISX ", & +# "VDISXA ","VDISXI ","VDISXL ","VDISXP ","VDISY ","VDISYA ","VDISYI ","VDISYL ", & +# "VDISYP ","VDISZ ","VDISZA ","VDISZI ","VDISZL ","VDISZP ","VINDX ","VINDXA ", & +# "VINDXI ","VINDXL ","VINDXP ","VINDY ","VINDYA ","VINDYI ","VINDYL ","VINDYP ", & +# "VINDZA ","VINDZI ","VINDZL ","VINDZP ","VREL ","VUNDX ","VUNDXA ","VUNDXI ", & +# "VUNDXL ","VUNDXP ","VUNDY ","VUNDYA ","VUNDYI ","VUNDYL ","VUNDYP ","VUNDZ ", & +# "VUNDZA ","VUNDZI ","VUNDZL ","VUNDZP ","VX ","VY "/) +# CHARACTER(ChanLen), PARAMETER :: ParamUnitsAry(166) = (/ character(ChanLen) :: & ! This lists the units corresponding to the allowed parameters +# "(deg) ","(-) ","(-) ","(-) ","(-) ","(-) ","(-) ","(-) ", & +# "(-) ","(deg) ","(-) ","(m) ","(-) ","(-) ","(-) ","(-) ", & +# "(-) ","(-) ","(-) ","(deg) ","(-) ","(-) ","(-) ","(-) ", & +# "(-) ","(-) ","(Pa) ","(N/m) ","(N/m) ","(N/m) ","(N/m) ","(N/m) ", & +# "(N/m) ","(N/m) ","(N/m) ","(N/m) ","(N/m) ","(N/m) ","(N/m) ","(N/m) ", & +# "(N/m) ","(N/m) ","(N/m) ","(N/m) ","(N/m) ","(N/m) ","(N/m) ","(N/m) ", & +# "(N/m) ","(N/m) ","(N/m) ","(N/m) ","(N/m) ","(N/m) ","(N/m) ","(N/m) ", & +# "(N/m) ","(N/m) ","(m^2/s)","(1/0) ","(-) ","(N-m/m)","(N-m/m)","(N-m/m)", & +# "(N-m/m)","(N-m/m)","(N-m/m)","(N-m/m)","(N-m/m)","(N-m/m)","(N-m/m)","(N-m/m)", & +# "(N-m/m)","(N-m/m)","(N-m/m)","(N-m/m)","(N-m/m)","(N-m/m)","(N-m/m)","(N-m/m)", & +# "(N-m/m)","(N-m/m)","(N-m/m)","(N-m/m)","(N-m/m)","(N-m/m)","(N-m/m)","(deg) ", & +# "(-) ","(-) ","(-) ","(m/s) ","(m/s) ","(m/s) ","(m/s) ","(m/s) ", & +# "(m/s) ","(m/s) ","(m/s) ","(m/s) ","(m/s) ","(m/s) ","(m/s) ","(m/s) ", & +# "(m/s) ","(m/s) ","(deg) ","(-) ","(-) ","(deg) ","(-) ","(rad) ", & +# "(rad) ","(-) ","(-) ","(-) ","(m/s) ","(m/s) ","(m/s) ","(m/s) ", & +# "(m/s) ","(m/s) ","(m/s) ","(m/s) ","(m/s) ","(m/s) ","(m/s) ","(m/s) ", & +# "(m/s) ","(m/s) ","(m/s) ","(m/s) ","(m/s) ","(m/s) ","(m/s) ","(m/s) ", & +# "(m/s) ","(m/s) ","(m/s) ","(m/s) ","(m/s) ","(m/s) ","(m/s) ","(m/s) ", & +# "(m/s) ","(m/s) ","(m/s) ","(m/s) ","(m/s) ","(m/s) ","(m/s) ","(m/s) ", & +# "(m/s) ","(m/s) ","(m/s) ","(m/s) ","(m/s) ","(m/s) ","(m/s) ","(m/s) ", & +# "(m/s) ","(m/s) ","(m/s) ","(m/s) ","(m/s) ","(m/s) "/) + + for sB in ['B1','B2','B3']: ADSpanMap['^[A]*'+sB+r'N(\d*)Alpha_\[deg\]'] =sB+'Alpha_[deg]' ADSpanMap['^[A]*'+sB+r'N(\d*)AxInd_\[-\]' ] =sB+'AxInd_[-]' ADSpanMap['^[A]*'+sB+r'N(\d*)TnInd_\[-\]' ] =sB+'TnInd_[-]' ADSpanMap['^[A]*'+sB+r'N(\d*)AxInd_qs_\[-\]' ]=sB+'AxInd_qs_[-]' ADSpanMap['^[A]*'+sB+r'N(\d*)TnInd_qs_\[-\]' ]=sB+'TnInd_qs_[-]' - ADSpanMap['^[A]*'+sB+r'N(\d*)BEM_k_\[-\]' ]=sB+'BEM_k_[-]' - ADSpanMap['^[A]*'+sB+r'N(\d*)BEM_kp_\[-\]' ]=sB+'BEM_kp_[-]' - ADSpanMap['^[A]*'+sB+r'N(\d*)BEM_F_\[-\]' ]=sB+'BEM_F_[-]' + ADSpanMap['^[A]*'+sB+r'N(\d*)BEM_kp_qs\[-\]' ]=sB+'BEM_kp_qs_[-]' + ADSpanMap['^[A]*'+sB+r'N(\d*)BEM_F_qs\[-\]' ]=sB+'BEM_F_qs_[-]' ADSpanMap['^[A]*'+sB+r'N(\d*)BEM_CT_qs_\[-\]' ]=sB+'BEM_CT_qs_[-]' + ADSpanMap['^[A]*'+sB+r'N(\d*)Chi_\[deg\]' ]=sB+'Chi_[deg]' ADSpanMap['^[A]*'+sB+r'N(\d*)Cl_\[-\]' ] =sB+'Cl_[-]' ADSpanMap['^[A]*'+sB+r'N(\d*)Cd_\[-\]' ] =sB+'Cd_[-]' ADSpanMap['^[A]*'+sB+r'N(\d*)Cm_\[-\]' ] =sB+'Cm_[-]' @@ -668,14 +768,23 @@ def spanwiseColAD(Cols): ADSpanMap['^[A]*'+sB+r'N(\d*)Vindxp_\[m/s\]'] =sB+'Vindxp_[m/s]' ADSpanMap['^[A]*'+sB+r'N(\d*)Vindyp_\[m/s\]'] =sB+'Vindyp_[m/s]' ADSpanMap['^[A]*'+sB+r'N(\d*)Vindzp_\[m/s\]'] =sB+'Vindzp_[m/s]' + ADSpanMap['^[A]*'+sB+r'N(\d*)Vindxa_\[m/s\]'] =sB+'Vindxa_[m/s]' + ADSpanMap['^[A]*'+sB+r'N(\d*)Vindya_\[m/s\]'] =sB+'Vindya_[m/s]' + ADSpanMap['^[A]*'+sB+r'N(\d*)Vindza_\[m/s\]'] =sB+'Vindza_[m/s]' ADSpanMap['^[A]*'+sB+r'N(\d*)Fx_\[N/m\]' ] =sB+'Fx_[N/m]' ADSpanMap['^[A]*'+sB+r'N(\d*)Fy_\[N/m\]' ] =sB+'Fy_[N/m]' ADSpanMap['^[A]*'+sB+r'N(\d*)Fxi_\[N/m\]' ] =sB+'Fxi_[N/m]' ADSpanMap['^[A]*'+sB+r'N(\d*)Fyi_\[N/m\]' ] =sB+'Fyi_[N/m]' ADSpanMap['^[A]*'+sB+r'N(\d*)Fzi_\[N/m\]' ] =sB+'Fzi_[N/m]' - ADSpanMap['^[A]*'+sB+r'N(\d*)Mxi_\[N-m/m\]' ] =sB+'Mxi_[N-m/m]' - ADSpanMap['^[A]*'+sB+r'N(\d*)Myi_\[N-m/m\]' ] =sB+'Myi_[N-m/m]' - ADSpanMap['^[A]*'+sB+r'N(\d*)Mzi_\[N-m/m\]' ] =sB+'Mzi_[N-m/m]' + ADSpanMap['^[A]*'+sB+r'N(\d*)Mxi_\[N-m/m\]' ] =sB+'Mxi_[N-m/m]' + ADSpanMap['^[A]*'+sB+r'N(\d*)Myi_\[N-m/m\]' ] =sB+'Myi_[N-m/m]' + ADSpanMap['^[A]*'+sB+r'N(\d*)Mzi_\[N-m/m\]' ] =sB+'Mzi_[N-m/m]' + ADSpanMap['^[A]*'+sB+r'N(\d*)Fxp_\[N/m\]' ] =sB+'Fxp_[N/m]' + ADSpanMap['^[A]*'+sB+r'N(\d*)Fyp_\[N/m\]' ] =sB+'Fyp_[N/m]' + ADSpanMap['^[A]*'+sB+r'N(\d*)Fzp_\[N/m\]' ] =sB+'Fzp_[N/m]' + ADSpanMap['^[A]*'+sB+r'N(\d*)Mxp_\[N-m/m\]' ] =sB+'Mxp_[N-m/m]' + ADSpanMap['^[A]*'+sB+r'N(\d*)Myp_\[N-m/m\]' ] =sB+'Myp_[N-m/m]' + ADSpanMap['^[A]*'+sB+r'N(\d*)Mzp_\[N-m/m\]' ] =sB+'Mzp_[N-m/m]' ADSpanMap['^[A]*'+sB+r'N(\d*)Fl_\[N/m\]' ] =sB+'Fl_[N/m]' ADSpanMap['^[A]*'+sB+r'N(\d*)Fd_\[N/m\]' ] =sB+'Fd_[N/m]' ADSpanMap['^[A]*'+sB+r'N(\d*)Fn_\[N/m\]' ] =sB+'Fn_[N/m]' @@ -718,6 +827,10 @@ def spanwiseColAD(Cols): ADSpanMap['^[A]*'+sB+r'N(\d*)Mm_\[N-m/m\]' ] =sB+'Mm_[N-m/m]' ADSpanMap['^[A]*'+sB+r'N(\d*)Gam_\[' ] =sB+'Gam_[m^2/s]' #DBGOuts # DEPRECIATED + ADSpanMap['^[A]*'+sB+r'N(\d*)BEM_k_\[-\]' ] =sB+'BEM_k_qs_[-]' + ADSpanMap['^[A]*'+sB+r'N(\d*)BEM_kp_\[-\]' ] =sB+'BEM_kp_qs_[-]' + ADSpanMap['^[A]*'+sB+r'N(\d*)BEM_F_\[-\]' ] =sB+'BEM_F_qs_[-]' + ADSpanMap['^[A]*'+sB+r'N(\d*)BEM_k_qs\[-\]'] =sB+'BEM_k_qs_[-]' ADSpanMap['^[A]*'+sB+r'N(\d*)AOA_\[deg\]' ] =sB+'Alpha_[deg]' # DBGOuts ADSpanMap['^[A]*'+sB+r'N(\d*)AIn_\[deg\]' ] =sB+'AxInd_[-]' # DBGOuts NOTE BUG Unit ADSpanMap['^[A]*'+sB+r'N(\d*)ApI_\[deg\]' ] =sB+'TnInd_[-]' # DBGOuts NOTE BUG Unit @@ -782,7 +895,10 @@ def insert_extra_columns_AD(dfRad, tsAvg, vr=None, rho=None, R=None, nB=None, ch Ct=nB*Fx/(0.5 * rho * 2 * U0**2 * np.pi * vr) Ct[vr<0.01*R] = 0 dfRad[sB+'Ctloc_[-]'] = Ct - CT=2*np.trapz(vr_bar*Ct,vr_bar) + try: + CT=2*np.trapezoid(vr_bar*Ct,vr_bar) + except: + CT=2*np.trapz(vr_bar*Ct,vr_bar) dfRad[sB+'CtAvg_[-]']= CT*np.ones(vr.shape) except: pass @@ -959,88 +1075,27 @@ def radialAvg(filename, avgMethod, avgParam, raw_name='', df=None, raiseExceptio names_new=[''] return dfs_new, names_new -def spanwisePostProRows(df, FST_In=None): +def spanwisePostProRows(df, FST_In=None, si1='i1', sir='ir'): """ Returns a 3D matrix: n x nSpan x nColumn where df is of size n x mColumn NOTE: this is really not optimal. Spanwise columns should be extracted only once.. """ - # --- Extract info (e.g. radial positions) from Fast input file - # We don't have a .fst input file, so we'll rely on some default values for "r" - rho = 1.225 - chord = None # --- Extract radial positions of output channels d = FASTSpanwiseOutputs(FST_In, OutputCols=df.columns.values) - r_AD = d['r_AD'] - r_ED_bld = d['r_ED_bld'] - r_ED_twr = d['r_ED_twr'] - r_BD = d['r_BD'] - IR_AD = d['IR_AD'] - IR_ED_bld = d['IR_ED_bld'] - IR_ED_twr = d['IR_ED_twr'] - IR_BD = d['IR_BD'] - TwrLen = d['TwrLen'] - R = d['R'] - r_hub = d['r_hub'] - fst = d['fst'] - #print('r_AD:', r_AD) - #print('r_ED:', r_ED) - #print('r_BD:', r_BD) - if R is None: - R=1 - try: - chord = fst.AD.Bld1['BldAeroNodes'][:,5] # Full span - except: - pass - try: - rho = fst.AD['Rho'] - except: - try: - rho = fst.AD['AirDens'] - except: - print('[WARN] Using default air density (1.225)') - pass - # --- Extract radial data for each azimuthal average - M_AD=None - M_ED=None - M_BD=None - Col_AD=None - Col_ED=None - Col_BD=None - v = df.index.values - + R = d['R'] if d['R'] is not None else 1 # --- Getting Column info Cols=df.columns.values - if r_AD is not None: - ColsInfoAD, nrMaxAD = spanwiseColAD(Cols) - if r_ED_bld is not None: - ColsInfoED, nrMaxED = spanwiseColED(Cols) - if r_BD is not None: - ColsInfoBD, nrMaxBD = spanwiseColBD(Cols) - for i,val in enumerate(v): - if r_AD is not None: - dfRad_AD = extract_spanwise_data(ColsInfoAD, nrMaxAD, df=None, ts=df.iloc[i]) - dfRad_AD = insert_extra_columns_AD(dfRad_AD, df.iloc[i], vr=r_AD, rho=rho, R=R, nB=3, chord=chord) - dfRad_AD = insert_spanwise_columns(dfRad_AD, r_AD, R=R, IR=IR_AD) - if i==0: - M_AD = np.zeros((len(v), len(dfRad_AD), len(dfRad_AD.columns))) - Col_AD=dfRad_AD.columns.values - M_AD[i, :, : ] = dfRad_AD.values - if r_ED_bld is not None and len(r_ED_bld)>0: - dfRad_ED = extract_spanwise_data(ColsInfoED, nrMaxED, df=None, ts=df.iloc[i]) - dfRad_ED = insert_spanwise_columns(dfRad_ED, r_ED_bld, R=R, IR=IR_ED) - if i==0: - M_ED = np.zeros((len(v), len(dfRad_ED), len(dfRad_ED.columns))) - Col_ED=dfRad_ED.columns.values - M_ED[i, :, : ] = dfRad_ED.values - if r_BD is not None and len(r_BD)>0: - dfRad_BD = extract_spanwise_data(ColsInfoBD, nrMaxBD, df=None, ts=df.iloc[i]) - dfRad_BD = insert_spanwise_columns(dfRad_BD, r_BD, R=R, IR=IR_BD) - if i==0: - M_BD = np.zeros((len(v), len(dfRad_BD), len(dfRad_BD.columns))) - Col_BD=dfRad_BD.columns.values - M_BD[i, :, : ] = dfRad_BD.values - return M_AD, Col_AD, M_ED, Col_ED, M_BD, Col_BD + ColsInfoAD, nrMaxAD = spanwiseColAD(Cols) + ColsInfoED, nrMaxED = spanwiseColED(Cols) + ColsInfoBD, nrMaxBD = spanwiseColBD(Cols) + + # --- Extract data (nt x nSpan) for each variables + ds_AD = extract_spanwise_data_timeSeries(ColsInfoAD, nrMaxAD, df, vr=d['r_AD'] , R=R, IR=d['IR_AD'] , si1=si1, sir=sir) + ds_ED = extract_spanwise_data_timeSeries(ColsInfoED, nrMaxED, df, vr=d['r_ED_bld'], R=R, IR=d['IR_ED_bld'], si1=si1, sir=sir) + ds_BD = extract_spanwise_data_timeSeries(ColsInfoBD, nrMaxBD, df, vr=d['r_BD'] , R=R, IR=d['IR_BD'] , si1=si1, sir=sir) + + return ds_AD, ds_ED, ds_BD def FASTSpanwiseOutputs(FST_In, OutputCols=None, verbose=False): @@ -1064,7 +1119,7 @@ def FASTSpanwiseOutputs(FST_In, OutputCols=None, verbose=False): IR_BD = None fst=None if FST_In is not None: - fst = FASTInputDeck(FST_In, readlist=['AD','ADbld','ED','BD','BDbld','SD']) + fst = FASTInputDeck(FST_In, readlist=['AD','ADbld','ED','BD','BDbld','SD'], verbose=False) # NOTE: all this below should be in FASTInputDeck if fst.version == 'F7': # --- FAST7 @@ -1566,16 +1621,16 @@ def bin_mean_DF(df, xbins, colBin ): raise Exception('The column `{}` does not appear to be in the dataframe'.format(colBin)) xmid = (xbins[:-1]+xbins[1:])/2 df['Bin'] = pd.cut(df[colBin], bins=xbins, labels=xmid ) # Adding a column that has bin attribute - df2 = df.groupby('Bin').mean() # Average by bin + df2 = df.groupby('Bin', observed=False).mean() # Average by bin # also counting df['Counts'] = 1 - dfCount=df[['Counts','Bin']].groupby('Bin').sum() + dfCount=df[['Counts','Bin']].groupby('Bin', observed=False).sum() df2['Counts'] = dfCount['Counts'] # Just in case some bins are missing (will be nan) df2 = df2.reindex(xmid) return df2 -def azimuthal_average_DF(df, psiBin=None, colPsi='Azimuth_[deg]', tStart=None, colTime='Time_[s]'): +def azimuthal_average_DF(df, psiBin=None, colPsi='Azimuth_[deg]', tStart=None, colTime='Time_[s]', periodic=False, nPeriods=None): """ Average a dataframe based on azimuthal value Returns a dataframe with same amount of columns as input, and azimuthal values as index @@ -1583,18 +1638,75 @@ def azimuthal_average_DF(df, psiBin=None, colPsi='Azimuth_[deg]', tStart=None, c if psiBin is None: psiBin = np.arange(0,360+1,10) - if tStart is not None: - if colTime not in df.columns.values: - raise Exception('The column `{}` does not appear to be in the dataframe'.format(colTime)) - df=df[ df[colTime]>tStart].copy() + if nPeriods is not None: + tStart, tEnd = findPeriodTimeRange(df, nPeriods=nPeriods, filename='', colPsi=colPsi, colTime=colTime) + df=df[ np.logical_and( df[colTime]>tStart, df[colTime]<=tEnd) ].copy() + else: + if tStart is not None: + if colTime not in df.columns.values: + raise Exception('The column `{}` does not appear to be in the dataframe'.format(colTime)) + df=df[ df[colTime]>tStart].copy() dfPsi= bin_mean_DF(df, psiBin, colPsi) if np.any(dfPsi['Counts']<1): print('[WARN] some bins have no data! Increase the bin size.') + if periodic: + # TODO, I should probably figure out a better way to do that + I = dfPsi.index.values + DI = I[1]-I[0] + dfEnd = pd.DataFrame(data=dfPsi.loc[I[0:1]].values, columns=dfPsi.columns, index=[I[-1]+DI]) + dfPsi = pd.concat([dfPsi, dfEnd], axis=0) + return dfPsi +def findPeriodTimeRange(df, nPeriods=3, filename='', colPsi=None, colTime=None): + + if colTime is None: + sTAllowed = ['Time_[s]','Time [s]', 'Time'] + sT = [s for s in sTAllowed if s in df.columns] + if len(sT)==0: + raise WELIBException('The dataframe must contain one of the following column: {}\n{}'.format(','.join(sTAllowed)),filename) + colTime = sT[0] + + if colPsi is None: + sAAllowed = ['Azimuth_[deg]','Azimuth [deg]', 'Azimuth', 'Psi'] + sA = [s for s in sAAllowed if s in df.columns] + if len(sA)==0: + raise WELIBException('The dataframe must contain one of the following columns: {}\n{}.'.format(','.join(sAAllowed),filename)) + colPsi = sA[0] + + + time = df[colTime].values + timenoNA = time[~np.isnan(time)] + + # NOTE: potentially we could average over each period and then average + psi=df[colPsi].values + _,iBef = _zero_crossings(psi-psi[-2],direction='up') + if len(iBef)==0: + _,iBef = _zero_crossings(psi-180,direction='up') + if len(iBef)==0: + print('[WARN] Not able to find a zero crossing!{}'.format(filename)) + tEnd = time[-1] + iBef=[0] + else: + tEnd = time[iBef[-1]] + + if nPeriods is None: + tStart=time[iBef[0]] + else: + nPeriods=int(nPeriods) + if len(iBef)-1 ' + Style.BRIGHT + '{0}', + } + + + backtrace.hook( + reverse=False, + align=False, + strip_path=True, + enable_on_envvar_only=False, + on_tty=False, + conservative=False, + styles=STYLES) +except: + print('Cannot use clean_exceptions, module `backtrace` not installed') + + # more code... + +# # if you wanna restore the default hook... +# backtrace.unhook() + + +# def MyExceptionHook(etype, value, trace): +# """ +# Handler for all unhandled exceptions. +# :param `etype`: the exception type (`SyntaxError`, `ZeroDivisionError`, etc...); +# :type `etype`: `Exception` +# :param string `value`: the exception error message; +# :param string `trace`: the traceback header, if any (otherwise, it prints the +# standard Python header: ``Traceback (most recent call last)``. +# +# +# Documentaion of traceback.print_exceptions +# This differs from print_tb() in the following ways: (1) if +# traceback is not None, it prints a header "Traceback (most recent +# call last):"; (2) it prints the exception type and value after the +# stack trace; (3) if type is SyntaxError and value has the +# appropriate format, it prints the line where the syntax error +# occurred with a caret on the next line indicating the approximate +# position of the error. +# +# """ +# outputstream = sys.stderr +# # Printing exception +# print(traceback.__file__) +# print('>>> Trace:') +# #traceback.print_exception(etype, value, trace) +# +# +# +# for line in traceback.TracebackException(type(value), value, trace, limit=None).format(chain=True): +# # print(line, file=sys.stderr, end="") +# print(type(line)) +# +# +# +# # Then showing to user the last error +# #frame = wx.GetApp().GetTopWindow() +# tmp = traceback.format_exception(etype, value, trace) +# print('>>> Exception:') +# print('The following exception occured:\n\n'+ tmp[-1] + '\n'+tmp[-2].strip()) +# +# sys.excepthook = MyExceptionHook diff --git a/openfast_toolbox/tools/curve_fitting.py b/openfast_toolbox/tools/curve_fitting.py index 17eab9f..27c89b0 100644 --- a/openfast_toolbox/tools/curve_fitting.py +++ b/openfast_toolbox/tools/curve_fitting.py @@ -4,6 +4,13 @@ The quality of a fit is usually a strong function of the initial guess. Because of this this package contains different kind of "helpers" and "wrapper" tools. +INTERFACE FOR MODEL FUNCTIONS: + def func(x, p, **const_kwargs): + # p a list, p[0] is the first coefficient + return y + + + FUNCTIONS --------- @@ -494,101 +501,135 @@ def model_fit(func, x, y, p0=None, bounds=None, **fun_kwargs): # --------------------------------------------------------------------------------} -# --- Main Class +# --- Model Info Class to store a Model, not its data # --------------------------------------------------------------------------------{ -class ModelFitter(): - def __init__(self,func=None, x=None, y=None, p0=None, bounds=None, **fun_kwargs): +class ModelInfo(dict): + def __init__(self): + # model signature + self['name'] = None + self['model_function'] = None + self['consts'] = None + self['formula'] = 'unavailable' + # Model fitting + self['coeffs'] = None + self['formula_num'] = 'unavailable' + self['fitted_function'] = None + self['coeffs_init'] = None + self['bounds'] = None + self['R2'] = None - self.model={ - 'name':None, 'model_function':None, 'consts':fun_kwargs, 'formula': 'unavailable', # model signature - 'coeffs':None, 'formula_num':'unavailable', 'fitted_function':None, 'coeffs_init':p0, 'bounds':bounds, # model fitting - 'R2':None, - } - self.data={'x':x,'y':y,'y_fit':None} - - if func is None: +# --------------------------------------------------------------------------------} +# --- Main Class +# --------------------------------------------------------------------------------{ +class FunctionFitter(): + """ """ + def __init__(self, func=None, coeffs=None, p0=None, bounds=None, x=None, y=None, name='', verbose=False, **consts): + + self.verbose = verbose + # We defer the validation of these variables to the fitting + self._p0 = p0 + self._bounds = bounds + self._varnames = coeffs + + self.model = ModelInfo() + self.model['name'] = name + self.model['model_function'] = func + self.model['consts'] = consts + #self.model['coeffs'] = None # deferred + + self.data = {'x':x, 'y':y, 'y_fit':None} + + D = {'varnames':coeffs, 'p0':p0, 'bounds':bounds} + if all([d is None for dname,d in D.items()]): + # We do not affect enough information to perform a fit return - self.set_model(func, **fun_kwargs) + #self.set_default_dict(varnames, p0, bounds) + #assert(self.model['coeffs'] is not None) # Initialize function if present # Perform fit if data and function is present if x is not None and y is not None: - self.fit_data(x,y,p0,bounds) + self.fit_data(x, y, p0, bounds) - def set_model(self,func, **fun_kwargs): - if callable(func): - # We don't have much additional info - self.model['model_function'] = func - self.model['name'] = func.__name__ - pass - - elif isinstance(func,six.string_types): - if func.find('predef:')==0: - # --- Minimization from a predefined function - predef_models=[m['id'] for m in MODELS] - if func not in predef_models: - raise Exception('Predefined function `{}` not defined in curve_fitting module\n Available functions: {}'.format(func,predef_models)) - i = predef_models.index(func) - ModelDict = MODELS[i] - self.model['model_function'] = ModelDict['handle'] - self.model['name'] = ModelDict['label'] - self.model['formula'] = ModelDict['formula'] - self.model['coeffs'] = extract_key_num(ModelDict['coeffs']) - self.model['coeffs_init'] = self.model['coeffs'].copy() - self.model['consts'] = extract_key_num(ModelDict['consts']) - self.model['bounds'] = extract_key_tuples(ModelDict['bounds']) - - elif func.find('eval:')==0: - # --- Minimization from a eval string - formula=func[5:] - # Extract coeffs {a} {b} {c}, replace by p[0] - variables, formula_eval = extract_variables(formula) - nParams=len(variables) - if nParams==0: - raise Exception('Formula should contains parameters in curly brackets, e.g.: {a}, {b}, {u_1}. No parameters found in {}'.format(formula)) - - # Check that the formula evaluates - x=np.array([1,2,5])*np.sqrt(2) # some random evaluation vector.. - p=[np.sqrt(2)/4]*nParams # some random initial conditions - try: - y=eval(formula_eval) - y=np.asarray(y) - if y.shape!=x.shape: - raise Exception('The formula does not return an array of same size as the input variable x. The formula must include `x`: {}'.format(formula_eval)) - except SyntaxError: - raise Exception('The formula does not evaluate, syntax error raised: {}'.format(formula_eval)) - except ZeroDivisionError: - pass - # Creating the actual function - def func(x, p): - return eval(formula_eval) + @property + def nParams(self): + return len(self.model['coeffs']) - self.model['model_function'] = func - self.model['name'] = 'user function' - self.model['formula'] = formula - self.model['coeffs'] = OrderedDict([(k,v) for k,v in zip(variables,p)]) - self.model['coeffs_init'] = self.model['coeffs'].copy() - self.model['consts'] = {} - self.model['bounds'] = None + def __repr__(self): + s='<{} object> with fields:\n'.format(type(self).__name__) + s+=' - data, dictionary with keys: \n' + if self.data['x'] is not None: + s+=' - x: [{} ... {}], n: {} \n'.format(self.data['x'][0],self.data['x'][-1],len(self.data['x'])) + s+=' - y: [{} ... {}], n: {} \n'.format(self.data['y'][0],self.data['y'][-1],len(self.data['y'])) + s+=' - model, dictionary with keys: \n' + for k,v in self.model.items(): + s=s+' - {:15s}: {}\n'.format(k,v) + return s - else: - raise Exception('func string needs to start with `eval:` of `predef:`, func: {}'.format(func)) + + def set_default_dict(self, varnames, p0, bounds): + D = {'varnames':varnames, 'p0':p0, 'bounds':bounds} + if all([d is None for dname,d in D.items()]): + raise Exception('Provide at least one of the following arguments: p0, bounds, or varnames') + # --- Infer key names if inputs are dictionaries + D = {'p0':p0, 'bounds':bounds} + D = {k: v for k,v in D.items() if v is not None} + if varnames is not None: + if isinstance(varnames, dict): + print('[WARN] varnames is a dictionary, it should be a list. Legacy warning.') + varnames = list(varnames.keys()) + if not isinstance(varnames, list): + raise Exception('varnames should be a list') + keys = varnames else: - raise Exception('func should be string or callable') - - if fun_kwargs is None: - return - if len(fun_kwargs)==0: - return - if self.model['consts'] is None: - raise Exception('Fun_kwargs provided, but no function constants were defined') + keys = None + for dn, d in D.items(): + if isinstance(d, dict): + if keys is None: + if len(d.keys())>0: + keys = list(d.keys()) + dn_key = dn + else: + new_keys = list(d.keys()) + if set(d.keys())!=set(keys) and len(new_keys)>0: + raise Exception(f'Argument `{dn}` has keys: `{new_keys}` but argument `{dn_key}` has keys: `{keys}`') + # --- Allow for bounds to be a tuple of just two falues + if bounds is not None: + bounds2 = np.asarray(bounds).flatten() + if len(bounds2)==2: + del D['bounds'] # Do not test on the size of bounds + # --- Infer dimensions (Number of variables) + nDim = None + for dn, d in D.items(): + if nDim is None: + if len(d)>0: + nDim = len(d) + dn_dim = dn + else: + nDim_new = len(d) + if nDim != nDim_new and nDim_new>0: + raise Exception(f'Argument `{dn}` has {nDim_new} values but Input `{dn_dim}` has {keys} values.') + assert(nDim is not None) # Programming error + # --- Set keys + if keys is None: + keys=['p{}'.format(i) for i in range(nDim)] + self.model['coeffs'] = OrderedDict([(k,np.nan) for k in keys]) - self.model['consts'], missing = set_common_keys(self.model['consts'], fun_kwargs ) - if len(missing)>0: - raise Exception('Curve fitting with function `{}` requires the following arguments {}. Missing: {}'.format(func.__name__,consts.keys(),missing)) - def setup_bounds(self, bounds, nParams): + def setup_bounds(self, bounds=None): + """ + Bounds can be provided in various format: + - string: e.g. ' key=(mi,mx) , key2=(mi2,mx2) ' + - dictionary: {'key':(mi,mx), 'key2' :(mi2,mx2)} + - 2-array: + - (mi, mx) -> ([mi, mi , mi ], [mx, mx, mx]) + - [(mi, mi2, m3), (mx, mx2, mx3)] -> ([mi, mi2, mi3], [mx, mx2, mx3]) + - None: will be -np.inf, np.inf + OUTPUTS: + - bounds: ([mi, mi2, mi3], [mx, mx2, mx3]) + """ + nParams = self.nParams if bounds is not None: self.model['bounds']=bounds # store in model bounds=self.model['bounds'] # usemodel bounds as default @@ -599,7 +640,7 @@ def setup_bounds(self, bounds, nParams): if isinstance(bounds ,dict): if len(bounds)==0 or 'all' in bounds.keys(): bounds=([-np.inf]*nParams,[np.inf]*nParams) - elif self.model['coeffs'] is not None: + else: b1=[] b2=[] for k in self.model['coeffs'].keys(): @@ -610,8 +651,6 @@ def setup_bounds(self, bounds, nParams): # TODO merge default bounds raise Exception('Bounds dictionary is missing the key: `{}`'.format(k)) bounds=(b1,b2) - else: - raise NotImplementedError('Bounds dictionary with no known model coeffs.') else: # so.curve_fit needs a 2-tuple b1,b2=bounds[0],bounds[1] @@ -623,9 +662,11 @@ def setup_bounds(self, bounds, nParams): else: bounds=([-np.inf]*nParams,[np.inf]*nParams) - self.model['bounds']=bounds # store in model + self.model['bounds'] = bounds # store in model + if self.verbose: + print('Setting bounds to:',bounds) - def setup_guess(self, p0, bounds, nParams): + def setup_guess(self, p0, bounds): """ Setup initial parameter values for the fit, based on what the user provided, and potentially the bounds @@ -640,10 +681,11 @@ def setup_guess(self, p0, bounds, nParams): We can assume that the bounds are set """ + nParams = self.nParams def middleOfBounds(i): """ return middle of bounds for parameter `i`""" bLow = bounds[0][i] - bHigh = bounds[0][2] + bHigh = bounds[1][i] if (bLow,bHigh)==(-np.inf,np.inf): p_i=0 elif bLow==-np.inf: @@ -661,10 +703,10 @@ def middleOfBounds(i): if p0 is None: # There is some tricky logic here between the priority of bounds and coeffs - if self.model['coeffs'] is not None: + if self.model['coeffs_init'] is not None: # We rely on function to give us decent init coefficients - p0 = ([v for _,v in self.model['coeffs'].items()]) - elif bounds is None: + p0 = ([v for _,v in self.model['coeffs_init'].items()]) + if bounds is None: p0 = ([0]*nParams) else: # use middle of bounds @@ -677,7 +719,7 @@ def middleOfBounds(i): p0_dict=p0.copy() if self.model['coeffs'] is not None: p0=[] - for k in self.model['coeffs'].keys(): + for k in self.model['coeffs']: if k in p0_dict.keys(): p0.append(p0_dict[k]) else: @@ -691,30 +733,28 @@ def middleOfBounds(i): # --- Last check that p0 is within bounds if bounds is not None: - for p,k,lb,ub in zip(p0, self.model['coeffs'].keys(), bounds[0], bounds[1]): - if pub: - raise Exception('Parameter `{}` has the guess value {}, which is larger than the upper bound ({})'.format(k,p,ub)) - # TODO potentially set it as middle of bounds + if self.model['coeffs'] is not None: + for p,k,lb,ub in zip(p0, self.model['coeffs'], bounds[0], bounds[1]): + if pub: + raise Exception('Parameter `{}` has the guess value {}, which is larger than the upper bound ({})'.format(k,p,ub)) + # TODO potentially set it as middle of bounds # --- Finally, store the initial guesses in the model self.model['coeffs_init'] = p0 - def fit(self, func, x, y, p0=None, bounds=None, **fun_kwargs): - """ Fit model defined by a function to data (x,y) """ - # Setup function - self.set_model(func, **fun_kwargs) - # Fit data to model - self.fit_data(x, y, p0, bounds) - def clean_data(self,x,y): x=np.asarray(x) y=np.asarray(y) bNaN=~np.isnan(y) + if np.sum(bNaN)0: + raise Exception('Curve fitting with function `{}` requires the following arguments {}. Missing: {}'.format(func.__name__,consts.keys(),missing)) # --------------------------------------------------------------------------------} # --- Wrapper for predefined fitters @@ -1016,11 +1146,12 @@ def fit_data(self, x, y, p0=None, bounds=None): # Cleaning data x,y=self.clean_data(x,y) - nParams=self.order+1 + #nParams=self.order+1 + #self.setup_coeffs(nParams) # TODO # Bounds - self.setup_bounds(bounds, nParams) # TODO + self.setup_bounds(bounds) # TODO # Initial conditions - self.setup_guess(p0, bounds, nParams) # TODO + self.setup_guess(p0, bounds) # TODO # Fitting pfit = np.polyfit(x,y,self.order) @@ -1056,11 +1187,11 @@ def fit_data(self, x, y, p0=None, bounds=None): # Cleaning data, and store it in object x,y=self.clean_data(x,y) - nParams=len(self.exponents) + #nParams=len(self.exponents) # Bounds - self.setup_bounds(bounds, nParams) # TODO + self.setup_bounds(bounds) # Initial conditions - self.setup_guess(p0, bounds, nParams) # TODO + self.setup_guess(p0, bounds) X_poly=np.array([]) for i,e in enumerate(self.exponents): @@ -1298,7 +1429,7 @@ def isint(s): if text is None: return {} - sp=re.compile('([\w]+)=').split(text.replace(' ','')) + sp=re.compile(r'([\w]+)=').split(text.replace(' ','')) if len(sp)<3: return {} sp=sp[1:] diff --git a/openfast_toolbox/tools/eva.py b/openfast_toolbox/tools/eva.py index a135ecd..e362839 100644 --- a/openfast_toolbox/tools/eva.py +++ b/openfast_toolbox/tools/eva.py @@ -73,7 +73,7 @@ def polyeig(*A, sort=False, normQ=None): return X, e -def eig(K, M=None, freq_out=False, sort=True, normQ=None, discardIm=False, massScaling=True): +def eig(K, M=None, freq_out=False, sort=True, normQ=None, discardIm=False, massScaling=True, verbose=False): """ performs eigenvalue analysis and return same values as matlab returns: @@ -92,7 +92,8 @@ def eig(K, M=None, freq_out=False, sort=True, normQ=None, discardIm=False, massS for j in range(M.shape[1]): q_j = Q[:,j] modalmass_j = np.dot(q_j.T,M).dot(q_j) - Q[:,j]= Q[:,j]/np.sqrt(modalmass_j) + if modalmass_j>0: + Q[:,j]= Q[:,j]/np.sqrt(modalmass_j) Lambda=np.dot(Q.T,K).dot(Q) else: D,Q = linalg.eig(K) @@ -125,7 +126,8 @@ def eig(K, M=None, freq_out=False, sort=True, normQ=None, discardIm=False, massS bb = imm>0 if sum(bb)>0: W=list(np.where(bb)[0]) - print('[WARN] Found {:d} complex eigenvectors at positions {}/{}'.format(sum(bb),W,Q.shape[0])) + if verbose: + print('[WARN] Found {:d} complex eigenvectors at positions {}/{}'.format(sum(bb),W,Q.shape[0])) Lambda = np.real(Lambda) @@ -200,7 +202,7 @@ def eigA(A, nq=None, nq1=None, fullEV=False, normQ=None, sort=True): -def eigMK(M, K, sort=True, normQ=None, discardIm=False, freq_out=True, massScaling=True): +def eigMK(M, K, sort=True, normQ=None, discardIm=False, freq_out=True, massScaling=True, Imodes=None): """ Eigenvalue analysis of a mechanical system M, K: mass, and stiffness matrices respectively @@ -209,10 +211,17 @@ def eigMK(M, K, sort=True, normQ=None, discardIm=False, freq_out=True, massScali except that frequencies are returned instead of "Lambda" OUTPUTS: - Q, freq_0 if freq_out - Q, Lambda otherwise + - Q, freq_0 if freq_out + - Q, Lambda otherwise """ - return eig(K, M, sort=sort, normQ=normQ, discardIm=discardIm, freq_out=freq_out, massScaling=massScaling) + Q,l = eig(K, M, sort=sort, normQ=normQ, discardIm=discardIm, freq_out=freq_out, massScaling=massScaling) + if Imodes is not None: + Q=Q[:,Imodes] + if len(l.shape)==2: + l=l[np.ix_(Imodes, Imodes)] # TODO + else: + l=l[Imodes] + return Q,l def eigMCK(M, C, K, method='full_matrix', sort=True, normQ=None): @@ -240,7 +249,7 @@ def eigMCK(M, C, K, method='full_matrix', sort=True, normQ=None): freq_0 = np.sqrt(np.diag(Lambda))/(2*np.pi) betaMat = np.dot(Q,C).dot(Q.T) xi = (np.diag(betaMat)*np.pi/(2*np.pi*freq_0)) - xi[xi>2*np.pi] = np.NAN + xi[xi>2*np.pi] = np.nan zeta = xi/(2*np.pi) freq_d = freq_0*np.sqrt(1-zeta**2) elif method.lower()=='full_matrix': diff --git a/openfast_toolbox/tools/fatigue.py b/openfast_toolbox/tools/fatigue.py index e88936d..e0a9577 100644 --- a/openfast_toolbox/tools/fatigue.py +++ b/openfast_toolbox/tools/fatigue.py @@ -98,6 +98,8 @@ def equivalent_load(time, signal, m=3, Teq=1, bins=100, method='rainflow_windap' DELi = S**m * N / neq Leq = DELi.sum() ** (1/m) # See e.g. eq. (30) of [1] + except ImportError: + raise except: if outputMore: return np.nan, np.nan, np.nan, np.nan, np.nan @@ -213,9 +215,9 @@ def bin_count(x, bins, meanBin=True): df = pd.DataFrame(data=x, columns=['x']) xmid = (bins[:-1]+bins[1:])/2 df['x_mid']= pd.cut(df['x'], bins= bins, labels = xmid ) # Adding a column that has bin attribute - df2 = df.groupby('x_mid').mean() # Average by bin + df2 = df.groupby('x_mid', observed=False).mean() # Average by bin df['N'] = 1 - dfCount = df[['N','x_mid']].groupby('x_mid').sum() + dfCount = df[['N','x_mid']].groupby('x_mid', observed=False).sum() df2['N'] = dfCount['N'] # Just in case some bins are missing (will be nan) df2 = df2.reindex(xmid) diff --git a/openfast_toolbox/tools/pandalib.py b/openfast_toolbox/tools/pandalib.py index 35ce9eb..e12f649 100644 --- a/openfast_toolbox/tools/pandalib.py +++ b/openfast_toolbox/tools/pandalib.py @@ -134,32 +134,54 @@ def changeUnits(df, flavor='SI', inPlace=True): # TODO harmonize with dfToSIunits in welib.fast.tools.lin.py ! """ - def splitunit(s): + def splitunit(s0): + """ + return (variable, unit, previous character, brackets) + e.g. ( 'Time' , 's', '_' , '[]') + """ + s=s0.replace('(',' [').replace(')',']') iu=s.rfind('[') - if iu>0: - return s[:iu], s[iu+1:].replace(']','') + if iu>1: + bracket = s0[iu:iu+1] + if bracket=='(': + brackets='()' + else: + brackets='[]' + if iu>1: + prev_char=s0[(iu-1):iu] + if prev_char not in [' ', '_']: + prev_char='' + svar=s[:iu] + else: + svar=s[:iu-1] + return svar, s[iu+1:].replace(']',''), prev_char, brackets else: - return s, '' + return s, '', '', '' def change_units_to_WE(s, c): """ Change units to wind energy units s: channel name (string) containing units, typically 'speed_[rad/s]' c: channel (array) """ - svar, u = splitunit(s) + svar, u, prev, brackets = splitunit(s) u=u.lower() scalings = {} # OLD = NEW scalings['rad/s'] = (30/np.pi,'rpm') # TODO decide scalings['rad' ] = (180/np.pi,'deg') scalings['n'] = (1e-3, 'kN') + scalings['mn'] = (1e+3, 'kN') scalings['nm'] = (1e-3, 'kNm') scalings['n-m'] = (1e-3, 'kNm') scalings['n*m'] = (1e-3, 'kNm') + scalings['mnm'] = (1e+3, 'kNm') + scalings['mn-m'] = (1e+3, 'kNm') + scalings['mn*m'] = (1e+3, 'kNm') scalings['w'] = (1e-3, 'kW') + scalings['mw'] = (1e+3, 'kW') if u in scalings.keys(): scale, new_unit = scalings[u] - s = svar+'['+new_unit+']' + s = svar+prev+brackets[0]+new_unit+brackets[1] c *= scale return s, c @@ -170,21 +192,27 @@ def change_units_to_SI(s, c): s: channel name (string) containing units, typically 'speed_[rad/s]' c: channel (array) """ - svar, u = splitunit(s) + svar, u, prev, brackets = splitunit(s) u=u.lower() scalings = {} # OLD = NEW scalings['rpm'] = (np.pi/30,'rad/s') scalings['rad' ] = (180/np.pi,'deg') scalings['deg/s' ] = (np.pi/180,'rad/s') + scalings['mn'] = (1e6, 'N') scalings['kn'] = (1e3, 'N') + scalings['mnm'] = (1e6, 'Nm') + scalings['mnm'] = (1e6, 'Nm') + scalings['mn-m'] = (1e6, 'Nm') + scalings['mn*m'] = (1e6, 'Nm') scalings['knm'] = (1e3, 'Nm') scalings['kn-m'] = (1e3, 'Nm') scalings['kn*m'] = (1e3, 'Nm') scalings['kw'] = (1e3, 'W') + scalings['mw'] = (1e6 ,'W') if u in scalings.keys(): scale, new_unit = scalings[u] - s = svar+'['+new_unit+']' + s = svar+prev+brackets[0]+new_unit+brackets[1] c *= scale return s, c @@ -200,7 +228,9 @@ def change_units_to_SI(s, c): elif flavor == 'SI': cols = [] for i, colname in enumerate(df.columns): - colname_new, df.iloc[:,i] = change_units_to_SI(colname, df.iloc[:,i]) + colname_new, col_new = change_units_to_SI(colname, df.iloc[:,i]) + df[colname] = df[colname].astype(col_new.dtype) # Need to cast to new type if type changed.. + df.iloc[:,i] = col_new cols.append(colname_new) df.columns = cols else: diff --git a/openfast_toolbox/tools/signal_analysis.py b/openfast_toolbox/tools/signal_analysis.py index 1cce7de..7e065bc 100644 --- a/openfast_toolbox/tools/signal_analysis.py +++ b/openfast_toolbox/tools/signal_analysis.py @@ -259,16 +259,21 @@ def applySampler(x_old, y_old, sampDict, df_old=None): sample_time = float(param[0]) if sample_time <= 0: raise Exception('Error: sample time must be positive') + # --- Version dependency... + pdVer = [int(s) for s in pd.__version__.split('.')] + sSample = "{:f}s".format(sample_time) + if pdVer[0]<=1 or (pdVer[0]<=2 and pdVer[1]<2): + sSample = "{:f}S".format(sample_time) - time_index = pd.TimedeltaIndex(x_old, unit="S") - x_new = pd.Series(x_old, index=time_index).resample("{:f}S".format(sample_time)).mean().interpolate().values + time_index = pd.to_timedelta(x_old, unit="s") + x_new = pd.Series(x_old, index=time_index).resample(sSample).mean().interpolate().values if df_old is not None: - df_new = df_old.set_index(time_index, inplace=False).resample("{:f}S".format(sample_time)).mean() + df_new = df_old.set_index(time_index, inplace=False).resample(sSample).mean() df_new = df_new.interpolate().reset_index(drop=True) return x_new, df_new if y_old is not None: - y_new = pd.Series(y_old, index=time_index).resample("{:f}S".format(sample_time)).mean() + y_new = pd.Series(y_old, index=time_index).resample(sSample).mean() y_new = y_new.interpolate().values return x_new, y_new @@ -285,9 +290,16 @@ def applySampler(x_old, y_old, sampDict, df_old=None): # #nw=400 # #u_new = moving_average(np.floor(np.linspace(0,3,nt+nw-1))*3+3.5, nw) # return np.convolve(x, np.ones(w), 'valid') / w -# def moving_average(x,N,mode='same'): -# y=np.convolve(x, np.ones((N,))/N, mode=mode) -# return y +def moving_average_conv(x, n,mode='same'): + x = np.asarray(x) + #n0 = len(x) + xaug = np.concatenate(([x[0]]*(n-1),x, [x[-1]]*(n-1))) # repeating first and last values + # TODO verify this + y = np.convolve(xaug, np.ones((n,))/n, mode=mode) + y = y [n-1:-n+1] + #y=np.convolve(x, np.ones((n,))/n, mode=mode) + return y + def moving_average(a, n=3) : """ perform moving average, return a vector of same length as input diff --git a/openfast_toolbox/tools/spectral.py b/openfast_toolbox/tools/spectral.py index e7d6857..c54dc14 100644 --- a/openfast_toolbox/tools/spectral.py +++ b/openfast_toolbox/tools/spectral.py @@ -31,7 +31,7 @@ # --------------------------------------------------------------------------------} # --- FFT wrap # --------------------------------------------------------------------------------{ -def fft_wrap(t,y,dt=None, output_type='amplitude',averaging='None',averaging_window='hamming',detrend=False,nExp=None, nPerDecade=None): +def fft_wrap(t,y,dt=None, output_type='amplitude',averaging='None',averaging_window='hamming',detrend=False,nExp=None, nPerDecade=None, verbose=False): """ Wrapper to compute FFT amplitude or power spectra, with averaging. INPUTS: @@ -65,7 +65,8 @@ def fft_wrap(t,y,dt=None, output_type='amplitude',averaging='None',averaging_win relDiff = abs(dtDelta0-dt)/dt*100 #if dtDelta0 !=dt: if relDiff>0.01: - print('[WARN] dt from tmax-tmin different from dt from t2-t1 {} {}'.format(dt, dtDelta0) ) + if verbose: + print('[WARN] dt from tmax-tmin different from dt from t2-t1 {} {}'.format(dt, dtDelta0) ) Fs = 1/dt if averaging =='none': frq, PSD, Info = psd(y, fs=Fs, detrend=detrend, return_onesided=True) @@ -80,7 +81,8 @@ def fft_wrap(t,y,dt=None, output_type='amplitude',averaging='None',averaging_win nExp=int(np.log(nFFTAll)/np.log(2))-1 nPerSeg=2**nExp if nPerSeg>n: - print('[WARN] Power of 2 value was too high and was reduced. Disable averaging to use the full spectrum.'); + if verbose: + print('[WARN] Power of 2 value was too high and was reduced. Disable averaging to use the full spectrum.'); nExp=int(np.log(nFFTAll)/np.log(2))-1 nPerSeg=2**nExp if averaging_window=='hamming': diff --git a/openfast_toolbox/tools/stats.py b/openfast_toolbox/tools/stats.py index ec9cc17..8aa035b 100644 --- a/openfast_toolbox/tools/stats.py +++ b/openfast_toolbox/tools/stats.py @@ -46,7 +46,7 @@ def comparison_stats(t1, y1, t2, y2, stats='sigRatio,eps,R2', method='mean', abs # Mean relative error eps = float(mean_rel_err(t1, y1, t2, y2, method=method, absVal=absVal)) stats['eps'] = eps - sStats+=['$\epsilon=$'+r'{:.1f}%'.format(eps)] + sStats+=[r'$\epsilon=$'+r'{:.1f}%'.format(eps)] elif s=='r2': # Rsquare @@ -102,9 +102,17 @@ def rsquare(y, f, c = True): y = y[tmp] f = f[tmp] if c: - r2 = max(0,1-np.sum((y-f)**2)/np.sum((y-np.mean(y))** 2)) + denom = np.sum((y-np.mean(y))** 2) + if abs(denom)>0: + r2 = max(0,1-np.sum((y-f)**2)/denom) + else: + r2 = np.inf else: - r2 = 1 - np.sum((y - f) ** 2) / np.sum((y) ** 2) + denom = np.sum((y) ** 2) + if abs(denom)>0: + r2 = 1 - np.sum((y - f) ** 2) /denom + else: + r2 = np.inf if r2 < 0: import warnings warnings.warn('Consider adding a constant term to your model') @@ -138,10 +146,16 @@ def myabs(y): if method=='mean': # Method 1 relative to mean ref_val = np.nanmean(y1) - meanrelerr = np.nanmean(myabs(y2-y1)/ref_val)*100 + if abs(ref_val)>0: + meanrelerr = np.nanmean(myabs(y2-y1)/ref_val)*100 + else: + meanrelerr = np.nan elif method=='meanabs': ref_val = np.nanmean(abs(y1)) - meanrelerr = np.nanmean(myabs(y2-y1)/ref_val)*100 + if abs(ref_val)>0: + meanrelerr = np.nanmean(myabs(y2-y1)/ref_val)*100 + else: + meanrelerr = np.nan elif method=='loc': meanrelerr = np.nanmean(myabs(y2-y1)/abs(y1))*100 elif method=='minmax': @@ -197,7 +211,10 @@ def pdf_histogram(y,nBins=50, norm=True, count=False): else: yh = yh / (nBins*dx) if norm: - yh=yh/np.trapz(yh,xh) + try: + yh=yh/np.trapezoid(yh,xh) + except: + yh=yh/np.trapz(yh,xh) return xh,yh def pdf_gaussian_kde(data, bw='scott', nOut=100, cut=3, clip=(-np.inf,np.inf)):