diff --git a/demos/JWST/S1_nirx_template.ecf b/demos/JWST/S1_nirx_template.ecf index c54225333..9c23704ab 100644 --- a/demos/JWST/S1_nirx_template.ecf +++ b/demos/JWST/S1_nirx_template.ecf @@ -65,7 +65,7 @@ expand_mask 8 ignore_low 600 ignore_hi None -# Manual reference pixel correction for NIRSpec PRISM +# Manual reference pixel correction for NIRSpec PRISM when not subtracting BG refpix_corr False npix_top 8 npix_bot 8 diff --git a/demos/JWST/S4_template.ecf b/demos/JWST/S4_template.ecf index 7200fee11..418390695 100644 --- a/demos/JWST/S4_template.ecf +++ b/demos/JWST/S4_template.ecf @@ -9,7 +9,7 @@ wave_min 1.5 # Minimum wavelength. Set to None to use the shortes wave_max 4.5 # Maximum wavelength. Set to None to use the longest extracted wavelength from Stage 3. allapers False # Run S4 on all of the apertures considered in S3? Otherwise will use newest output in the inputdir -# Mannualy mask pixel columns by index number +# Manually mask pixel columns by index number # mask_columns [] # Parameters for drift correction of 1D spectra diff --git a/docs/media/S1_template.ecf b/docs/media/S1_template.ecf index 0d0baf497..30f472083 100644 --- a/docs/media/S1_template.ecf +++ b/docs/media/S1_template.ecf @@ -62,7 +62,7 @@ expand_mask 8 ignore_low 600 ignore_hi None -# Manual reference pixel correction for NIRSpec PRISM +# Manual reference pixel correction for NIRSpec PRISM when not subtracting BG refpix_corr False npix_top 8 npix_bot 8 diff --git a/docs/media/S4_template.ecf b/docs/media/S4_template.ecf index 2b7e6df17..c59c928b0 100644 --- a/docs/media/S4_template.ecf +++ b/docs/media/S4_template.ecf @@ -9,7 +9,7 @@ wave_min 1.5 # Minimum wavelength. Set to None to use the shortes wave_max 4.5 # Maximum wavelength. Set to None to use the longest extracted wavelength from Stage 3. allapers False # Run S4 on all of the apertures considered in S3? Otherwise will use newest output in the inputdir -# Mannualy mask pixel columns by index number +# Manualy mask pixel columns by index number # mask_columns [] # Parameters for drift correction of 1D spectra diff --git a/docs/source/index.rst b/docs/source/index.rst index ad12ca889..94f274426 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -11,7 +11,7 @@ Welcome to Eureka!'s documentation! =================================== ``Eureka!`` is a data reduction and analysis pipeline for exoplanet time-series observations, with a particular focus on JWST data. -``Eureka!`` is capable of of reducing JWST time-series data (starting from raw, uncalibrated FITS files) and turning it into precise exoplanet transmission and emission spectra. +``Eureka!`` is capable of reducing JWST time-series data (starting from raw, uncalibrated FITS files) and turning it into precise exoplanet transmission and emission spectra. The package is continually being improved, so be sure to check back often for new updates. The code is not officially associated with JWST or the ERS team. diff --git a/docs/source/installation.rst b/docs/source/installation.rst index ad47e151d..f7de150b2 100644 --- a/docs/source/installation.rst +++ b/docs/source/installation.rst @@ -6,8 +6,8 @@ Installation methods -------------------- In order to have consistent, repeatable results across the ``Eureka!`` user community, we recommend that all general users install -the most recent stable release of ``Eureka!``, v0.9. The following installation instructions are written with this in mind, -and the most recent stable release is also available as a zipped archive `here `_. +the most recent stable release of ``Eureka!``, v0.10. The following installation instructions are written with this in mind, +and the most recent stable release is also available as a zipped archive `here `_. Also note that if you are using a macOS device with an M1 processor, you will need to use the ``conda`` environment.yml file installation instructions below as the pip dependencies fail to build on the M1 processor. @@ -31,7 +31,7 @@ Once in your new conda environment, you can install ``Eureka!`` directly from so .. code-block:: bash - git clone -b v0.9 https://github.com/kevin218/Eureka.git + git clone -b v0.10 https://github.com/kevin218/Eureka.git cd Eureka pip install -e '.[jwst]' @@ -49,7 +49,7 @@ Once in your new conda environment, you can install the ``Eureka!`` package with .. code-block:: bash - pip install 'eureka[jwst]@git+https://github.com/kevin218/Eureka.git@v0.9' + pip install -e 'eureka[jwst]@git+https://github.com/kevin218/Eureka.git@v0.10' Other specific branches can be installed using: @@ -89,7 +89,7 @@ To install using conda: .. code-block:: bash - git clone -b v0.9 https://github.com/kevin218/Eureka.git + git clone -b v0.10 https://github.com/kevin218/Eureka.git cd Eureka conda env create --file environment.yml --force conda activate eureka diff --git a/environment.yml b/environment.yml index 5fc18cf49..1c0f5da70 100644 --- a/environment.yml +++ b/environment.yml @@ -10,7 +10,6 @@ dependencies: - ccdproc - celerite # Needed for GP - corner - - cython - dynesty[version='>1.0'] # Lower limit needed for specific arguments - emcee[version='>3.0.0'] # Lower limit needed for specific arguments - flake8 # Needed for testing @@ -40,7 +39,7 @@ dependencies: - crds - exotic-ld - image_registration@git+https://github.com/keflavich/image_registration@master # Need GitHub version to avoid np.float issue - - jwst==1.10.2 + - jwst==1.11.4 - myst-parser # Needed for documentation - setuptools_scm # Needed for version number - sphinx-rtd-theme # Needed for documentation diff --git a/environment_pymc3.yml b/environment_pymc3.yml index 16bcf8cc6..2b148743a 100644 --- a/environment_pymc3.yml +++ b/environment_pymc3.yml @@ -11,7 +11,6 @@ dependencies: - ccdproc - celerite # Needed for GP - corner - - cython - dynesty[version='>1.0'] # Lower limit needed for specific arguments - emcee[version='>3.0.0'] # Lower limit needed for specific arguments - flake8 # Needed for testing @@ -43,7 +42,7 @@ dependencies: - exoplanet - exotic-ld - image_registration@git+https://github.com/keflavich/image_registration@master # Need GitHub version to avoid np.float issue - - jwst==1.10.2 + - jwst==1.11.4 - myst-parser # Needed for documentation - opencv-python<4.8 # Upper limit needed for numpy<1.22 - opencv-python-headless<4.8 # Upper limit needed for numpy<1.22 diff --git a/pyproject.toml b/pyproject.toml index a64829ed3..99cb092e0 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,5 +1,5 @@ [build-system] -requires = ["setuptools>=42", "wheel", "cython", "setuptools_scm[toml]>=6.2"] +requires = ["setuptools>=42", "wheel", "setuptools_scm[toml]>=6.2"] build-backend = "setuptools.build_meta" [tool.setuptools_scm] diff --git a/setup.cfg b/setup.cfg index 75db4a57b..0ed5c3261 100644 --- a/setup.cfg +++ b/setup.cfg @@ -2,11 +2,11 @@ name = eureka author = Eureka! pipeline developers author_email = kbstevenson@gmail.com -description = A data reduction and analysis pipeline intended for time-series observations with the JWST. +description = A data reduction and analysis pipeline intended for time-series observations with JWST. long_description = file: README.md long_description_content_type = text/markdown license = MIT License -license_file = LICENSE +license_files = [LICENSE,] url = https://github.com/kevin218/Eureka project_urls = Bug Tracker = https://github.com/kevin218/Eureka/issues @@ -23,8 +23,6 @@ package_dir = packages = find: zip_safe = True python_requires= >=3.8 -setup_requires = - cython install_requires = astraeus@git+https://github.com/kevin218/Astraeus@main astropy @@ -35,7 +33,6 @@ install_requires = celerite # Needed for GP corner crds - cython dynesty>1.0 # Lower limit needed for specific arguments emcee>3.0.0 # Lower limit needed for specific arguments exotic-ld @@ -60,7 +57,7 @@ where = src [options.extras_require] jwst = - jwst==1.10.2 + jwst==1.11.4 stcal>=1.0.0 # Lower limit needed for our create_integration_model function hst = image_registration@git+https://github.com/keflavich/image_registration@master # Need GitHub version to avoid np.float issue @@ -87,6 +84,7 @@ pymc3 = opencv-python<4.8 # Upper limit needed for numpy<1.22 opencv-python-headless<4.8 # Upper limit needed for numpy<1.22 scipy>=1.4.0,<1.8.0 # Lower limit needed for scipy.fft, upper limit needed for theano, starry, and pymc3 + xarray<2023.10.0 # Upper limit needed for numpy<1.22 jupyter = jupyter ipykernel diff --git a/setup.py b/setup.py index 0dd39fe3c..bebf1f027 100644 --- a/setup.py +++ b/setup.py @@ -1,10 +1,6 @@ #!/usr/bin/env python from setuptools import setup -from Cython.Build import cythonize if __name__ == "__main__": - setup( - ext_modules=cythonize( - ["src/eureka/S3_data_reduction/niriss_cython.pyx"]), - ) + setup() diff --git a/src/eureka/S1_detector_processing/group_level.py b/src/eureka/S1_detector_processing/group_level.py index 2382e61e7..cbfc9126a 100644 --- a/src/eureka/S1_detector_processing/group_level.py +++ b/src/eureka/S1_detector_processing/group_level.py @@ -195,7 +195,7 @@ def custom_ref_pixel(input_model, log, meta): intgrp_data[dqmask] = np.nan if hasattr(meta, 'masktrace') and meta.masktrace: - intgrp_data *= meta.trace_mask + intgrp_data *= input_model.trace_mask odd_med = np.nanmedian(intgrp_data[odd_row_ref, :]) evn_med = np.nanmedian(intgrp_data[evn_row_ref, :]) diff --git a/src/eureka/S1_detector_processing/ramp_fitting.py b/src/eureka/S1_detector_processing/ramp_fitting.py index 2730e1f38..220ec58b3 100644 --- a/src/eureka/S1_detector_processing/ramp_fitting.py +++ b/src/eureka/S1_detector_processing/ramp_fitting.py @@ -371,7 +371,7 @@ def calc_opt_sums_uniform_weight(rn_sect, gain_sect, data_masked, mask_2d, # Return 'empty' sums if there is no more data to fit if data_masked.size == 0: - return np.array([]), np.array([]), np.array([]), np.array([]),\ + return np.array([]), np.array([]), np.array([]), np.array([]), \ np.array([]), np.array([]) # get initial group for each good pixel for this semiramp diff --git a/src/eureka/S3_data_reduction/__init__.py b/src/eureka/S3_data_reduction/__init__.py index 2c7da9f13..9139fe594 100644 --- a/src/eureka/S3_data_reduction/__init__.py +++ b/src/eureka/S3_data_reduction/__init__.py @@ -5,6 +5,7 @@ from . import miri from . import nircam from . import niriss_profiles +from . import niriss_python from . import niriss from . import nirspec from . import optspex diff --git a/src/eureka/S3_data_reduction/background.py b/src/eureka/S3_data_reduction/background.py index f5496ad69..57cbb7bac 100644 --- a/src/eureka/S3_data_reduction/background.py +++ b/src/eureka/S3_data_reduction/background.py @@ -205,9 +205,9 @@ def fitbg(dataim, meta, mask, x1, x2, deg=1, threshold=5, isrotate=False, # Convert x1 and x2 to array, if need be ny, nx = np.shape(dataim) - if type(x1) == int or type(x1) == np.int64: + if isinstance(x1, (int, np.int64)): x1 = np.zeros(ny, dtype=int)+x1 - if type(x2) == int or type(x2) == np.int64: + if isinstance(x2, (int, np.int64)): x2 = np.zeros(ny, dtype=int)+x2 if deg < 0: diff --git a/src/eureka/S3_data_reduction/niriss.py b/src/eureka/S3_data_reduction/niriss.py index a8de1d087..6d737e64f 100644 --- a/src/eureka/S3_data_reduction/niriss.py +++ b/src/eureka/S3_data_reduction/niriss.py @@ -21,11 +21,7 @@ from .background import fitbg3 from .niriss_profiles import * - -# some cute cython code -import pyximport -pyximport.install() -from . import niriss_cython +from . import niriss_python __all__ = ['read', 'simplify_niriss_img', 'image_filtering', @@ -606,7 +602,7 @@ def construct_guesses(A, B, sig, length=10): combos = construct_guesses([0.1,30], [0.1,30], [1,40]) # generates length x length x length number of images and fits to the data - img1, sigout1 = niriss_cython.build_image_models(data.median, + img1, sigout1 = niriss_python.build_image_models(data.median, combos[:,0], combos[:,1], combos[:,2], pos1, pos2) @@ -619,7 +615,7 @@ def construct_guesses(A, B, sig, length=10): # generates length x length x length number of images centered around the previous # guess to optimize the image fit - img2, sigout2 = niriss_cython.build_image_models(data.median, + img2, sigout2 = niriss_python.build_image_models(data.median, combos[:,0], combos[:,1], combos[:,2], pos1, pos2) @@ -627,7 +623,7 @@ def construct_guesses(A, B, sig, length=10): # creates a 2D image for the first and second orders with the best-fit gaussian # profiles final_guess = combos[np.argmin(sigout2)] - ord1, ord2, _ = niriss_cython.build_image_models(data.median, + ord1, ord2, _ = niriss_python.build_image_models(data.median, [final_guess[0]], [final_guess[1]], [final_guess[2]], @@ -661,7 +657,7 @@ def residuals(params, data, y1_pos, y2_pos): """ Calcualtes residuals for best-fit profile. """ A, B, sig1 = params # Produce the model: - model,_ = niriss_cython.build_image_models(data, [A], [B], [sig1], y1_pos, y2_pos) + model,_ = niriss_python.build_image_models(data, [A], [B], [sig1], y1_pos, y2_pos) # Calculate residuals: res = (model[0] - data) return res.flatten() @@ -676,7 +672,7 @@ def residuals(params, data, y1_pos, y2_pos): ) # creates the final mask - out_img1,out_img2,_= niriss_cython.build_image_models(data.median, + out_img1,out_img2,_= niriss_python.build_image_models(data.median, results.x[0:1], results.x[1:2], results.x[2:3], diff --git a/src/eureka/S3_data_reduction/niriss_cython.pyx b/src/eureka/S3_data_reduction/niriss_cython.pyx deleted file mode 100644 index a84eee183..000000000 --- a/src/eureka/S3_data_reduction/niriss_cython.pyx +++ /dev/null @@ -1,26 +0,0 @@ -import numpy as np - -def build_image_models(data, A, B, sig, mu1, mu2, return_together=True): - - y = np.transpose(np.full((len(A), data.shape[-1], data.shape[0]), - np.arange(0,data.shape[0],1)), axes=(0,2,1)) - - sig = np.full(y.T.shape,sig).T - - A = np.full(y.T.shape, A).T - exp = np.exp(-(y-mu1)**2/(2.0 * sig**2)) - gauss = 1.0/(2.0*np.pi*np.sqrt(sig)) * exp - f1x = mu1*gauss*A - - B = np.full(y.T.shape, B).T - exp = np.exp(-(y-mu2)**2/(2.0 * sig**2)) - gauss = 1.0/(2.0*np.pi*np.sqrt(sig)) * exp - f2x = mu2*gauss*B - - model = f1x+f2x - sigma = np.nansum( np.sqrt( (model-data)**2.0), axis=(1,2)) - - if return_together: - return model, sigma - else: - return f1x, f2x, sigma \ No newline at end of file diff --git a/src/eureka/S3_data_reduction/niriss_python.py b/src/eureka/S3_data_reduction/niriss_python.py new file mode 100644 index 000000000..703dcd92b --- /dev/null +++ b/src/eureka/S3_data_reduction/niriss_python.py @@ -0,0 +1,26 @@ +import numpy as np + +def build_image_models(data, A, B, sig, mu1, mu2, return_together=True): + + y = np.transpose(np.full((len(A), data.shape[-1], data.shape[0]), + np.arange(0,data.shape[0],1)), axes=(0,2,1)) + + sig = np.full(y.T.shape, sig).T + + A = np.full(y.T.shape, A).T + exp = np.exp(-(y-mu1)**2/(2.0*sig**2)) + gauss = 1.0/(2.0*np.pi*np.sqrt(sig))*exp + f1x = mu1*gauss*A + + B = np.full(y.T.shape, B).T + exp = np.exp(-(y-mu2)**2/(2.0*sig**2)) + gauss = 1.0/(2.0*np.pi*np.sqrt(sig))*exp + f2x = mu2*gauss*B + + model = f1x+f2x + sigma = np.nansum(np.sqrt((model-data)**2.0), axis=(1,2)) + + if return_together: + return model, sigma + else: + return f1x, f2x, sigma diff --git a/src/eureka/S3_data_reduction/plots_s3.py b/src/eureka/S3_data_reduction/plots_s3.py index abaaff00f..f15224fb6 100644 --- a/src/eureka/S3_data_reduction/plots_s3.py +++ b/src/eureka/S3_data_reduction/plots_s3.py @@ -533,7 +533,7 @@ def residualBackground(data, meta, m, vmin=-200, vmax=1000): cmap = plt.cm.plasma.copy() cmap.set_bad('k', 1.) - plt.figure(3304) + plt.figure(3304, figsize=(8, 4)) plt.clf() fig, (a0, a1) = plt.subplots(1, 2, gridspec_kw={'width_ratios': [3, 1]}, num=3304, figsize=(8, 3.5)) diff --git a/src/eureka/S4_generate_lightcurves/generate_LD.py b/src/eureka/S4_generate_lightcurves/generate_LD.py index 3d9877d70..3aa982965 100644 --- a/src/eureka/S4_generate_lightcurves/generate_LD.py +++ b/src/eureka/S4_generate_lightcurves/generate_LD.py @@ -64,16 +64,17 @@ def exotic_ld(meta, spec, log, white=False): # Compute wavelength ranges if white: - wavelength_range = np.array([meta.wave_min, meta.wave_max]) + wavelength_range = np.array([meta.wave_min, meta.wave_max], + dtype=float) wavelength_range = np.repeat(wavelength_range[np.newaxis], meta.nspecchan, axis=0) else: - wsdata = np.array(meta.wave_low) + wsdata = np.array(meta.wave_low, dtype=float) wsdata = np.append(wsdata, meta.wave_hi[-1]) wavelength_range = [] for i in range(meta.nspecchan): wavelength_range.append([wsdata[i], wsdata[i+1]]) - wavelength_range = np.array(wavelength_range) + wavelength_range = np.array(wavelength_range, dtype=float) # wavelength range needs to be in Angstrom if spec.wave_1d.attrs['wave_units'] == 'microns': diff --git a/src/eureka/S4_generate_lightcurves/s4_genLC.py b/src/eureka/S4_generate_lightcurves/s4_genLC.py index 80a7ae6c9..873aa3d66 100644 --- a/src/eureka/S4_generate_lightcurves/s4_genLC.py +++ b/src/eureka/S4_generate_lightcurves/s4_genLC.py @@ -107,6 +107,8 @@ def genlc(eventlabel, ecf_path=None, s3_meta=None, input_meta=None): # Create directories for Stage 5 outputs meta.run_s4 = None + if not hasattr(meta, 'expand'): + meta.expand = 1 for spec_hw_val in meta.spec_hw_range: for bg_hw_val in meta.bg_hw_range: if not isinstance(bg_hw_val, str): diff --git a/src/eureka/S5_lightcurve_fitting/differentiable_models/PyMC3Models.py b/src/eureka/S5_lightcurve_fitting/differentiable_models/PyMC3Models.py index 36d4d0cd8..b2ddd3c6b 100644 --- a/src/eureka/S5_lightcurve_fitting/differentiable_models/PyMC3Models.py +++ b/src/eureka/S5_lightcurve_fitting/differentiable_models/PyMC3Models.py @@ -298,6 +298,11 @@ def __init__(self, components, **kwargs): # Add the PyMC3 model to each component component.model = self.model + self.GP = False + for component in self.components: + if component.modeltype == 'GP': + self.GP = True + # Setup PyMC3 model parameters with self.model: diff --git a/src/eureka/S5_lightcurve_fitting/differentiable_models/StarryModel.py b/src/eureka/S5_lightcurve_fitting/differentiable_models/StarryModel.py index 165ae3926..118a9d9f6 100644 --- a/src/eureka/S5_lightcurve_fitting/differentiable_models/StarryModel.py +++ b/src/eureka/S5_lightcurve_fitting/differentiable_models/StarryModel.py @@ -104,6 +104,7 @@ def setup(self): """Setup a model for evaluation and fitting. """ self.systems = [] + self.rps = [] for c in range(self.nchannel_fitted): # To save ourselves from tonnes of getattr lines, let's make a # new object without the _c parts of the parnames @@ -160,8 +161,9 @@ def setup(self): if hasattr(temp, f'Y{ell}{m}'): planet_map[ell, m] = getattr(temp, f'Y{ell}{m}') planet_map2[ell, m] = getattr(temp, f'Y{ell}{m}') - amp = temp.fp/planet_map2.flux(theta=0)[0] + amp = temp.fp/tt.abs_(planet_map2.flux(theta=0)[0]) planet_map.amp = amp + self.rps.append(temp.rp) # Initialize planet object planet = starry.Secondary( @@ -216,15 +218,12 @@ def eval(self, eval=True, channel=None, **kwargs): if eval: lib = np - lessthan = np.less systems = self.fit.systems - rps = [systems[chan].secondaries[0].r.eval() - for chan in range(nchan)] + rps = self.fit_rps else: lib = tt - lessthan = tt.lt systems = self.systems - rps = [systems[chan].secondaries[0].r for chan in range(nchan)] + rps = self.rps phys_flux = lib.zeros(0) for c in range(nchan): @@ -240,9 +239,11 @@ def eval(self, eval=True, channel=None, **kwargs): # Combine the planet and stellar flux (allowing negative rp) fstar, fp = systems[chan].flux(time, total=False) - if lessthan(rps[chan], 0): - fstar = 2-fstar - fp *= -1 + # Do some annoying math to allow theano functions to compile + # (correctly defined for -1 < rp < 1) + sign = (lib.ceil(rps[chan])+lib.floor(rps[chan])) + fstar = (fstar-1)*sign + 1 + fp = fp*sign lcpiece = fstar+fp if eval: @@ -286,6 +287,7 @@ def update(self, newparams, **kwargs): super().update(newparams, **kwargs) self.fit.systems = [] + self.fit_rps = [] for c in range(self.nchannel_fitted): # To save ourselves from tonnes of getattr lines, let's make a # new object without the _c parts of the parnames @@ -341,8 +343,9 @@ def update(self, newparams, **kwargs): if hasattr(temp, f'Y{ell}{m}'): planet_map[ell, m] = getattr(temp, f'Y{ell}{m}') planet_map2[ell, m] = getattr(temp, f'Y{ell}{m}') - amp = temp.fp/planet_map2.flux(theta=0)[0] + amp = temp.fp/np.abs(planet_map2.flux(theta=0)[0]) planet_map.amp = amp + self.fit_rps.append(temp.rp) # Initialize planet object planet = starry.Secondary( diff --git a/src/eureka/S5_lightcurve_fitting/fitters.py b/src/eureka/S5_lightcurve_fitting/fitters.py index caa4f9c19..96bf08d45 100644 --- a/src/eureka/S5_lightcurve_fitting/fitters.py +++ b/src/eureka/S5_lightcurve_fitting/fitters.py @@ -526,7 +526,7 @@ def start_from_oldchain_emcee(lc, meta, log, ndim, freenames): with h5py.File(full_fname, 'r') as hf: samples = hf['samples'][:] else: - samples = ds.to_array().T + samples = ds.to_array().T.values log.writelog(f'Old chain path: {full_fname}') # Initialize the walkers using samples from the old chain @@ -1267,7 +1267,13 @@ def save_fit(meta, lc, model, fitter, results_table, freenames, samples=[]): # Only divide if value is not a string (spectroscopic modes) bg_hw_val //= meta.expand # Save the S5 outputs in a human readable ecsv file - event_ap_bg = meta.eventlabel+"_ap"+str(spec_hw_val)+'_bg'+str(bg_hw_val) + if not isinstance(meta.bg_hw, str): + # Only divide if value is not a string (spectroscopic modes) + bg_hw = meta.bg_hw//meta.expand + else: + bg_hw = meta.bg_hw + event_ap_bg = meta.eventlabel+"_ap"+str(meta.spec_hw//meta.expand) + \ + '_bg'+str(bg_hw) meta.tab_filename_s5 = (meta.outputdir+'S5_'+event_ap_bg+"_Table_Save" + channel_tag+'.txt') wavelengths = np.mean(np.append(meta.wave_low.reshape(1, -1), diff --git a/src/eureka/S5_lightcurve_fitting/gradient_fitters.py b/src/eureka/S5_lightcurve_fitting/gradient_fitters.py index 0c218dc83..01db74a9e 100644 --- a/src/eureka/S5_lightcurve_fitting/gradient_fitters.py +++ b/src/eureka/S5_lightcurve_fitting/gradient_fitters.py @@ -58,6 +58,16 @@ def exoplanetfitter(lc, model, meta, log, calling_function='exoplanet', for name, val in zip(freenames, freepars): start[name] = val + model.update(freepars) + # Plot starting point + if meta.isplots_S5 >= 1: + plots.plot_fit(lc, model, meta, + fitter=calling_function+'StartingPoint') + # Plot GP starting point + if model.GP: + plots.plot_GP_components(lc, model, meta, + fitter=calling_function+'StartingPoint') + log.writelog('Running exoplanet optimizer...') with model.model: map_soln = pmx.optimize(start=start) @@ -179,6 +189,16 @@ def nutsfitter(lc, model, meta, log, **kwargs): for name, val in zip(freenames, freepars): start[name] = val + model.update(freepars) + # Plot starting point + if meta.isplots_S5 >= 1: + plots.plot_fit(lc, model, meta, + fitter='nutsStartingPoint') + # Plot GP starting point + if model.GP: + plots.plot_GP_components(lc, model, meta, + fitter='nutsStartingPoint') + if not hasattr(meta, 'target_accept'): meta.target_accept = 0.85 diff --git a/src/eureka/S5_lightcurve_fitting/lightcurve.py b/src/eureka/S5_lightcurve_fitting/lightcurve.py index 88fbe69a2..85d906b75 100644 --- a/src/eureka/S5_lightcurve_fitting/lightcurve.py +++ b/src/eureka/S5_lightcurve_fitting/lightcurve.py @@ -96,7 +96,7 @@ def __init__(self, time, flux, channel, nchannel, log, longparamlist, # Set the data arrays if unc is not None: - if type(unc) == float or type(unc) == np.float64: + if isinstance(unc, (float, np.float64)): log.writelog('Warning: Only one uncertainty input, assuming ' 'constant uncertainty.') elif (len(time)*self.nchannel_fitted != len(unc) @@ -232,8 +232,14 @@ def plot(self, meta, fits=True): model.plot(ax=ax, color=next(plot_COLORS), zorder=np.inf, share=self.share, chan=channel) + # Determine wavelength + if meta.multwhite: + wave = meta.wave[0] + else: + wave = meta.wave[channel] # Format axes - ax.set_title(f'{meta.eventlabel} - Channel {channel}') + ax.set_title(f'{meta.eventlabel} - Channel {channel} ' + + f'- {wave} microns') ax.set_xlabel(str(self.time_units)) ax.set_ylabel('Normalized Flux', size=14) ax.legend(loc='best') diff --git a/src/eureka/S5_lightcurve_fitting/models/BatmanModels.py b/src/eureka/S5_lightcurve_fitting/models/BatmanModels.py index a9fccfd8d..01e136769 100644 --- a/src/eureka/S5_lightcurve_fitting/models/BatmanModels.py +++ b/src/eureka/S5_lightcurve_fitting/models/BatmanModels.py @@ -25,12 +25,14 @@ def __init__(self, **kwargs): Can pass in the parameters, longparamlist, nchan, and paramtitles arguments here. """ - # Inherit from Model calss + # Inherit from Model class super().__init__(**kwargs) # Define model type (physical, systematic, other) self.modeltype = 'physical' + log = kwargs.get('log') + # Store the ld_profile self.ld_from_S4 = kwargs.get('ld_from_S4') ld_func = ld_profile(self.parameters.limb_dark.value, @@ -42,17 +44,19 @@ def __init__(self, **kwargs): # Replace u parameters with generated limb-darkening values if self.ld_from_S4 or self.ld_from_file: + log.writelog("Using the following limb-darkening values:") self.ld_array = kwargs.get('ld_coeffs') - if self.ld_from_S4: - self.ld_array = self.ld_array[len_params-2] for c in range(self.nchannel_fitted): chan = self.fitted_channels[c] + if self.ld_from_S4: + ld_array = self.ld_array[len_params-2] for u in self.coeffs: index = np.where(np.array(self.paramtitles) == u)[0] if len(index) != 0: item = self.longparamlist[c][index[0]] param = int(item.split('_')[0][-1]) - ld_val = self.ld_array[chan][param-1] + ld_val = ld_array[chan][param-1] + log.writelog(f"{item}, {ld_val}") # Use the file value as the starting guess self.parameters.dict[item][0] = ld_val # In a normal prior, center at the file value @@ -277,7 +281,7 @@ def eval(self, channel=None, **kwargs): bm_params.u = [] if self.compute_ltt: - if c == 0 or not self.compute_ltt_once or self.multwhite: + if c == 0 or not self.compute_ltt_once: self.adjusted_time = correct_light_travel_time(time, bm_params) else: diff --git a/src/eureka/S5_lightcurve_fitting/models/ExpRampModel.py b/src/eureka/S5_lightcurve_fitting/models/ExpRampModel.py index bc619b5ee..4974b6ffb 100644 --- a/src/eureka/S5_lightcurve_fitting/models/ExpRampModel.py +++ b/src/eureka/S5_lightcurve_fitting/models/ExpRampModel.py @@ -124,7 +124,7 @@ def eval(self, channel=None, **kwargs): # Split the arrays that have lengths of the original time axis time = split([time, ], self.nints, chan)[0] - r0, r1, r2, r3, r4, r5 = self.coeffs[c] + r0, r1, r2, r3, r4, r5 = self.coeffs[chan] lcpiece = (1+r0*np.exp(-r1*time + r2) + r3*np.exp(-r4*time + r5)) lcfinal = np.append(lcfinal, lcpiece) diff --git a/src/eureka/S5_lightcurve_fitting/models/GPModel.py b/src/eureka/S5_lightcurve_fitting/models/GPModel.py index 1f871e4bb..f31599842 100644 --- a/src/eureka/S5_lightcurve_fitting/models/GPModel.py +++ b/src/eureka/S5_lightcurve_fitting/models/GPModel.py @@ -179,7 +179,7 @@ def eval(self, fit, channel=None, gp=None, **kwargs): return_cov=False) elif self.gp_code_name == 'celerite': gp.compute(self.kernel_inputs[chan][0], yerr=unc_fit) - mu = gp.predict(residuals, return_cov=False) + mu = gp.predict(residuals) elif self.gp_code_name == 'tinygp': cond_gp = gp.condition(residuals, noise=unc_fit).gp mu = cond_gp.loc @@ -315,8 +315,9 @@ def get_kernel(self, kernel_name, k, c=0): amp = self.coeffs[c, k, 0] metric = self.coeffs[c, k, 1] - kernel = celerite.terms.Matern32Term(log_sigma=amp, - log_rho=metric) + kernel = celerite.terms.Matern32Term(log_sigma=0, log_rho=metric) + # Setting the amplitude + kernel *= celerite.terms.RealTerm(log_a=amp, log_c=-10) elif self.gp_code_name == 'tinygp': # get metric and amplitude for the current kernel and channel amp = np.exp(self.coeffs[c, k, 0]*2) # Want exp(sigma)^2 diff --git a/src/eureka/S5_lightcurve_fitting/models/KeplerOrbit.py b/src/eureka/S5_lightcurve_fitting/models/KeplerOrbit.py index 0a6d4b52d..ebfa34478 100644 --- a/src/eureka/S5_lightcurve_fitting/models/KeplerOrbit.py +++ b/src/eureka/S5_lightcurve_fitting/models/KeplerOrbit.py @@ -342,7 +342,7 @@ def eccentric_anomaly(self, t, useFSSI=None, xtol=1e-10): ndarray The eccentric anomaly in radians. """ - if type(t) != np.ndarray: + if not isinstance(t, np.ndarray): t = np.array([t]) tShape = t.shape t = t.flatten() @@ -600,7 +600,7 @@ def get_ssp(self, t, TA=None): and latitude. Each ndarray is in the same shape as t. """ if self.e == 0. and self.Prot == self.Porb: - if type(t) != np.ndarray: + if not isinstance(t, np.ndarray): sspLon = np.zeros_like([t]) else: sspLon = np.zeros_like(t) @@ -614,7 +614,7 @@ def get_ssp(self, t, TA=None): (-180.)*(np.rint(np.floor(sspLon % 360./180.) > 0))) if self.obliq == 0.: - if type(t) != np.ndarray: + if not isinstance(t, np.ndarray): sspLat = np.zeros_like([t]) else: sspLat = np.zeros_like(t) diff --git a/src/eureka/S5_lightcurve_fitting/models/SinusoidPhaseCurve.py b/src/eureka/S5_lightcurve_fitting/models/SinusoidPhaseCurve.py index 85f2a4a78..7158ddc33 100644 --- a/src/eureka/S5_lightcurve_fitting/models/SinusoidPhaseCurve.py +++ b/src/eureka/S5_lightcurve_fitting/models/SinusoidPhaseCurve.py @@ -184,10 +184,10 @@ def eval(self, channel=None, **kwargs): if self.transit_model is None: transit = 1 else: - transit = self.transit_model.eval() + transit = self.transit_model.eval(channel=channel) if self.eclipse_model is None: eclipse = 1 else: - eclipse = self.eclipse_model.eval() + eclipse = self.eclipse_model.eval(channel=channel) return transit + lcfinal*(eclipse-1) diff --git a/src/eureka/S5_lightcurve_fitting/plots_s5.py b/src/eureka/S5_lightcurve_fitting/plots_s5.py index 3d120a03a..357e9544c 100644 --- a/src/eureka/S5_lightcurve_fitting/plots_s5.py +++ b/src/eureka/S5_lightcurve_fitting/plots_s5.py @@ -43,7 +43,7 @@ def plot_fit(lc, model, meta, fitter, isTitle=True): - February 28-March 1, 2022 Caroline Piaulet Adding scatter_ppm parameter """ - if type(fitter) != str: + if not isinstance(fitter, str): raise ValueError(f'Expected type str for fitter, instead received a ' f'{type(fitter)}') @@ -52,12 +52,12 @@ def plot_fit(lc, model, meta, fitter, isTitle=True): model.physeval(interp=meta.interp) model_noGP = model.eval(incl_GP=False) model_gp = model.GPeval(model_noGP) - model_lc = model_noGP+model_gp + model_eval = model_noGP+model_gp for i, channel in enumerate(lc.fitted_channels): flux = np.ma.copy(lc.flux) unc = np.ma.copy(lc.unc_fit) - model = np.ma.copy(model_lc) + model_lc = np.ma.copy(model_eval) gp = np.ma.copy(model_gp) model_sys = model_sys_full model_phys = model_phys_full @@ -68,8 +68,8 @@ def plot_fit(lc, model, meta, fitter, isTitle=True): new_timet = new_time # Split the arrays that have lengths of the original time axis - flux, unc, model, model_sys, gp = \ - split([flux, unc, model, model_sys, gp], + flux, unc, model_lc, model_sys, gp = \ + split([flux, unc, model_lc, model_sys, gp], meta.nints, channel) # Split the arrays that have lengths of the new (potentially @@ -77,8 +77,8 @@ def plot_fit(lc, model, meta, fitter, isTitle=True): model_phys = split([model_phys, ], nints_interp, channel)[0] elif meta.multwhite: # Split the arrays that have lengths of the original time axis - time, flux, unc, model, model_sys, gp = \ - split([lc.time, flux, unc, model, model_sys, gp], + time, flux, unc, model_lc, model_sys, gp = \ + split([lc.time, flux, unc, model_lc, model_sys, gp], meta.nints, channel) # Split the arrays that have lengths of the new (potentially @@ -89,7 +89,7 @@ def plot_fit(lc, model, meta, fitter, isTitle=True): time = lc.time new_timet = new_time - residuals = flux - model + residuals = flux - model_lc # Get binned data and times if not hasattr(meta, 'nbin_plot') or meta.nbin_plot is None or \ @@ -109,7 +109,7 @@ def plot_fit(lc, model, meta, fitter, isTitle=True): ax = fig.subplots(3, 1) ax[0].errorbar(binned_time, binned_flux, yerr=binned_unc, fmt='.', color='w', ecolor=color, mec=color) - ax[0].plot(time, model, '.', ls='', ms=1, color='0.3', zorder=10) + ax[0].plot(time, model_lc, '.', ls='', ms=1, color='0.3', zorder=10) if isTitle: ax[0].set_title(f'{meta.eventlabel} - Channel {channel} - ' f'{fitter}') @@ -166,7 +166,7 @@ def plot_phase_variations(lc, model, meta, fitter, isTitle=True): - September 12, 2022 Taylor Bell Initial version. """ - if type(fitter) != str: + if not isinstance(fitter, str): raise ValueError(f'Expected type str for fitter, instead received a ' f'{type(fitter)}') @@ -200,7 +200,7 @@ def plot_phase_variations(lc, model, meta, fitter, isTitle=True): new_timet = new_time # Split the arrays that have lengths of the original time axis - flux, unc, model = split([flux, unc], meta.nints, channel) + flux, unc = split([flux, unc], meta.nints, channel) # Split the arrays that have lengths of the new (potentially # interpolated) time axis @@ -208,8 +208,8 @@ def plot_phase_variations(lc, model, meta, fitter, isTitle=True): nints_interp, channel)[0] elif meta.multwhite: # Split the arrays that have lengths of the original time axis - time, flux, unc, model = split([lc.time, flux, unc], - meta.nints, channel) + time, flux, unc = split([lc.time, flux, unc], + meta.nints, channel) # Split the arrays that have lengths of the new (potentially # interpolated) time axis @@ -251,7 +251,7 @@ def plot_phase_variations(lc, model, meta, fitter, isTitle=True): # Set nice axis limits sigma = np.ma.mean(binned_unc) max_astro = np.ma.max((model_phys-1)) - ax.set_ylim(-4*sigma, max_astro+6*sigma) + ax.set_ylim(-6*sigma, max_astro+6*sigma) ax.set_xlim(np.min(time), np.max(time)) # Save/show the figure @@ -284,7 +284,7 @@ def plot_phase_variations(lc, model, meta, fitter, isTitle=True): ax.errorbar(binned_time, binned_flux, yerr=binned_unc, fmt='.', color=color, zorder=1) # Plot the physical model - ax.plot(new_time, model_phys, '.', ls='', ms=2, color='0.3', + ax.plot(new_timet, model_phys, '.', ls='', ms=2, color='0.3', zorder=10) # Set nice axis limits @@ -328,29 +328,29 @@ def plot_rms(lc, model, meta, fitter): - January 7-22, 2022 Megan Mansfield Adding ability to do a single shared fit across all channels """ - if type(fitter) != str: + if not isinstance(fitter, str): raise ValueError(f'Expected type str for fitter, instead received a ' f'{type(fitter)}') - model_lc = model.eval(incl_GP=True) + model_eval = model.eval(incl_GP=True) for channel in lc.fitted_channels: flux = np.ma.copy(lc.flux) - model = np.ma.copy(model_lc) + model_lc = np.ma.copy(model_eval) if lc.share and not meta.multwhite: time = lc.time # Split the arrays that have lengths of the original time axis - flux, model = split([flux, model], meta.nints, channel) + flux, model_lc = split([flux, model_lc], meta.nints, channel) elif meta.multwhite: # Split the arrays that have lengths of the original time axis - time, flux, model = split([lc.time, flux, model], - meta.nints, channel) + time, flux, model_lc = split([lc.time, flux, model_lc], + meta.nints, channel) else: time = lc.time - residuals = flux - model + residuals = flux - model_lc residuals = residuals[np.argsort(time)] rms, stderr, binsz = computeRMS(residuals, binstep=1) @@ -636,11 +636,11 @@ def plot_res_distr(lc, model, meta, fitter): - February 18, 2022 Caroline Piaulet Created function """ - if type(fitter) != str: + if not isinstance(fitter, str): raise ValueError(f'Expected type str for fitter, instead received a ' f'{type(fitter)}') - model_lc = model.eval(incl_GP=True) + model_eval = model.eval(incl_GP=True) for channel in lc.fitted_channels: plt.figure(5302, figsize=(8, 6)) @@ -648,14 +648,14 @@ def plot_res_distr(lc, model, meta, fitter): flux = np.ma.copy(lc.flux) unc = np.ma.copy(np.array(lc.unc_fit)) - model = np.ma.copy(model_lc) + model_lc = np.ma.copy(model_eval) if lc.share or meta.multwhite: # Split the arrays that have lengths of the original time axis - flux, unc, model = split([flux, unc, model], - meta.nints, channel) + flux, unc, model_lc = split([flux, unc, model_lc], + meta.nints, channel) - residuals = flux - model + residuals = flux - model_lc hist_vals = residuals/unc hist_vals[~np.isfinite(hist_vals)] = np.nan # Mask out any infinities @@ -702,42 +702,42 @@ def plot_GP_components(lc, model, meta, fitter, isTitle=True): - March 9, 2022 Eva-Maria Ahrer Adapted with shared parameters """ - if type(fitter) != str: + if not isinstance(fitter, str): raise ValueError(f'Expected type str for fitter, instead received a ' f'{type(fitter)}') - model_lc = model.eval() - model_GP = model.GPeval(model_lc) - model_with_GP = model_lc + model_GP + model_eval = model.eval() + model_GP = model.GPeval(model_eval) + model_with_GP = model_eval + model_GP for i, channel in enumerate(lc.fitted_channels): flux = np.ma.copy(lc.flux) unc = np.ma.copy(lc.unc_fit) - model = np.ma.copy(model_with_GP) + model_lc = np.ma.copy(model_with_GP) model_GP_component = np.ma.copy(model_GP) color = lc.colors[i] if lc.share and not meta.multwhite: time = lc.time # Split the arrays that have lengths of the original time axis - flux, unc, model, model_GP_component = \ - split([flux, unc, model, model_GP_component], + flux, unc, model_lc, model_GP_component = \ + split([flux, unc, model_lc, model_GP_component], meta.nints, channel) elif meta.multwhite: # Split the arrays that have lengths of the original time axis - time, flux, unc, model, model_GP_component = \ - split([lc.time, flux, unc, model, model_GP_component], + time, flux, unc, model_lc, model_GP_component = \ + split([lc.time, flux, unc, model_lc, model_GP_component], meta.nints, channel) else: time = lc.time - residuals = flux - model + residuals = flux - model_lc fig = plt.figure(5102, figsize=(8, 6)) plt.clf() ax = fig.subplots(3, 1) ax[0].errorbar(time, flux, yerr=unc, fmt='.', color='w', ecolor=color, mec=color) - ax[0].plot(time, model, '.', ls='', ms=2, color='0.3', + ax[0].plot(time, model_lc, '.', ls='', ms=2, color='0.3', zorder=10) if isTitle: ax[0].set_title(f'{meta.eventlabel} - Channel {channel} - ' diff --git a/src/eureka/S5_lightcurve_fitting/s5_fit.py b/src/eureka/S5_lightcurve_fitting/s5_fit.py index 04145a4bd..15da512a9 100644 --- a/src/eureka/S5_lightcurve_fitting/s5_fit.py +++ b/src/eureka/S5_lightcurve_fitting/s5_fit.py @@ -105,6 +105,8 @@ def fitlc(eventlabel, ecf_path=None, s4_meta=None, input_meta=None): # Create directories for Stage 5 outputs meta.run_s5 = None + if not hasattr(meta, 'expand'): + meta.expand = 1 for spec_hw_val in meta.spec_hw_range: for bg_hw_val in meta.bg_hw_range: if not isinstance(bg_hw_val, str): @@ -299,15 +301,36 @@ def fitlc(eventlabel, ecf_path=None, s4_meta=None, input_meta=None): if meta.use_generate_ld: # Load limb-darkening coefficients made in Stage 4 ld_str = meta.use_generate_ld - if not hasattr(lc, ld_str + '_lin'): - raise Exception("Exotic-ld coefficients have not been " + - "calculated in Stage 4") - log.writelog("\nUsing generated limb-darkening coefficients " + - f"with {ld_str} \n") - ld_coeffs = [lc[ld_str + '_lin'].values, - lc[ld_str + '_quad'].values, - lc[ld_str + '_nonlin_3para'].values, - lc[ld_str + '_nonlin_4para'].values] + if meta.multwhite: + nwhitechan = len(meta.inputdirlist) + 1 + lin_c1 = np.zeros((nwhitechan, 1)) + quad = np.zeros((nwhitechan, 2)) + nonlin_3 = np.zeros((nwhitechan, 3)) + nonlin_4 = np.zeros((nwhitechan, 4)) + # Load LD coefficient from each lc + for p in range(nwhitechan): + if not hasattr(lc_whites[p], ld_str + '_lin'): + raise Exception("Exotic-ld coefficients have not" + + " been calculated in Stage 4") + log.writelog("\nUsing generated limb-darkening " + + f"coefficients with {ld_str} \n") + lin_c1[p] = lc_whites[p][ld_str + '_lin'].values[0] + quad[p] = lc_whites[p][ld_str + '_quad'].values[0] + nonlin_3[p] = lc_whites[p][ld_str + '_nonlin_3para'] \ + .values[0] + nonlin_4[p] = lc_whites[p][ld_str + '_nonlin_4para'] \ + .values[0] + ld_coeffs = [lin_c1, quad, nonlin_3, nonlin_4] + else: + if not hasattr(lc, ld_str + '_lin'): + raise Exception("Exotic-ld coefficients have not" + + " been calculated in Stage 4") + log.writelog("\nUsing generated limb-darkening " + + f"coefficients with {ld_str} \n") + ld_coeffs = [lc[ld_str + '_lin'].values, + lc[ld_str + '_quad'].values, + lc[ld_str + '_nonlin_3para'].values, + lc[ld_str + '_nonlin_4para'].values] elif meta.ld_file: # Load limb-darkening coefficients from a custom file ld_fix_file = str(meta.ld_file) @@ -418,7 +441,7 @@ def fitlc(eventlabel, ecf_path=None, s4_meta=None, input_meta=None): "_Meta_Save"), save=[]) else: for channel in range(chanrng): - log.writelog(f"\nStarting Channel {channel+1} of " + log.writelog(f"\nStarting Channel {channel} of " f"{chanrng}\n") # Get the flux and error measurements for diff --git a/src/eureka/S6_planet_spectra/plots_s6.py b/src/eureka/S6_planet_spectra/plots_s6.py index 8a57e0a94..413fdc05b 100644 --- a/src/eureka/S6_planet_spectra/plots_s6.py +++ b/src/eureka/S6_planet_spectra/plots_s6.py @@ -46,6 +46,11 @@ def plot_spectrum(meta, model_x=None, model_y=None, model_x = deepcopy(model_x) model_y = deepcopy(model_y) + # Trim repeated wavelengths for multwhite fits + if len(set(wavelength)) == 1: + wavelength = wavelength[0] + wavelength_error = wavelength_error[0] + if np.all(np.isnan(err)): err = None diff --git a/src/eureka/S6_planet_spectra/s6_spectra.py b/src/eureka/S6_planet_spectra/s6_spectra.py index 5e12dabd3..e03360498 100644 --- a/src/eureka/S6_planet_spectra/s6_spectra.py +++ b/src/eureka/S6_planet_spectra/s6_spectra.py @@ -90,6 +90,8 @@ def plot_spectra(eventlabel, ecf_path=None, s5_meta=None, input_meta=None): # Create directories for Stage 6 outputs meta.run_s6 = None + if not hasattr(meta, 'expand'): + meta.expand = 1 for spec_hw_val in meta.spec_hw_range: for bg_hw_val in meta.bg_hw_range: if not isinstance(bg_hw_val, str): @@ -185,8 +187,8 @@ def plot_spectra(eventlabel, ecf_path=None, s5_meta=None, input_meta=None): log.writelog(f'Plotting {meta.y_param}...') - meta.spectrum_median = [] - meta.spectrum_err = [] + meta.spectrum_median = None + meta.spectrum_err = None # Read in S5 fitted values if meta.y_param == 'fn': @@ -205,7 +207,7 @@ def plot_spectra(eventlabel, ecf_path=None, s5_meta=None, input_meta=None): else: meta = parse_unshared_saves(meta, log, fit_methods) - if (len(meta.spectrum_median) == 0 + if ((meta.spectrum_median is None) or all(x is None for x in meta.spectrum_median)): # The parameter could not be found - skip it continue @@ -422,7 +424,7 @@ def parse_s5_saves(meta, log, fit_methods, channel_key='shared'): 'fitted parameters which includes:\n [' + ', '.join(full_keys)+']') log.writelog(f' Skipping {y_param}') - return None, None + return meta for i, key in enumerate(keys): ind = np.where(fitted_values["Parameter"] == key)[0][0] @@ -443,7 +445,7 @@ def parse_s5_saves(meta, log, fit_methods, channel_key='shared'): 'fitted parameters which includes:\n[' + ', '.join(full_keys)+']') log.writelog(f'Skipping {y_param}') - return None, None + return meta medians = [] for i, key in enumerate(keys): @@ -459,7 +461,7 @@ def parse_s5_saves(meta, log, fit_methods, channel_key='shared'): meta.spectrum_median, meta.spectrum_err = medians, errs - return medians, errs + return meta def parse_unshared_saves(meta, log, fit_methods): @@ -484,15 +486,15 @@ def parse_unshared_saves(meta, log, fit_methods): for channel in range(meta.nspecchan): ch_number = str(channel).zfill(len(str(meta.nspecchan))) channel_key = f'ch{ch_number}' - median, err = parse_s5_saves(meta, log, fit_methods, channel_key) - if median is None: + meta = parse_s5_saves(meta, log, fit_methods, channel_key) + if meta.spectrum_median is None: # Parameter was found, so don't keep looking for it meta.spectrum_median = np.array([None for _ in range(meta.nspecchan)]) meta.spectrum_err = np.array([None for _ in range(meta.nspecchan)]) return meta - spectrum_median.extend(median) - spectrum_err.extend(err.T) + spectrum_median.extend(meta.spectrum_median) + spectrum_err.extend(meta.spectrum_err.T) meta.spectrum_median = np.array(spectrum_median) meta.spectrum_err = np.array(spectrum_err).T @@ -514,13 +516,16 @@ def compute_transit_depth(meta): The updated meta data object. """ if not np.all(np.isnan(meta.spectrum_err)): - lower = np.abs((meta.spectrum_median-meta.spectrum_err[0, :])**2 - - meta.spectrum_median**2) - upper = np.abs((meta.spectrum_median+meta.spectrum_err[1, :])**2 - - meta.spectrum_median**2) + lower = meta.spectrum_median - meta.spectrum_err[0, :] + upper = meta.spectrum_median + meta.spectrum_err[1, :] + + meta.spectrum_median *= np.abs(meta.spectrum_median) + + if not np.all(np.isnan(meta.spectrum_err)): + lower = meta.spectrum_median - lower*np.abs(lower) + upper = upper*np.abs(upper) - meta.spectrum_median meta.spectrum_err = np.append(lower.reshape(1, -1), upper.reshape(1, -1), axis=0) - meta.spectrum_median *= meta.spectrum_median return meta @@ -1211,6 +1216,10 @@ def save_table(meta, log): meta.wave_hi.reshape(1, -1), axis=0), axis=0) wave_errs = (meta.wave_hi-meta.wave_low)/2 + # Trim repeated wavelengths for multwhite fits + if len(set(wavelengths)) == 1: + wavelengths = wavelengths[0] + wave_errs = wave_errs[0] astropytable.savetable_S6(meta.tab_filename_s6, meta.y_param, wavelengths, wave_errs, meta.spectrum_median, meta.spectrum_err) diff --git a/src/eureka/lib/gaussian.py b/src/eureka/lib/gaussian.py index 94dd34029..3bc0b6ca6 100644 --- a/src/eureka/lib/gaussian.py +++ b/src/eureka/lib/gaussian.py @@ -131,11 +131,11 @@ def gaussian(x, width=1.0, center=0.0, height=None, bgpars=[0.0, 0.0, 0.0]): x.shape = (1, x.shape[0]) # Make center a ndarray: - if type(center) != np.ndarray: + if not isinstance(center, np.ndarray): center += np.zeros(ndim) # Make width a ndarray: - if type(width) != np.ndarray: + if not isinstance(width, np.ndarray): width += np.zeros(ndim) r2pi = np.sqrt(2. * np.pi) @@ -591,9 +591,9 @@ def gaussians(x, param): width = param[k][:pdim] center = param[k][pdim:2*pdim] - if type(center) != np.ndarray: + if not isinstance(center, np.ndarray): center += np.zeros(ndim) - if type(width) != np.ndarray: + if not isinstance(width, np.ndarray): width += np.zeros(ndim) if height is None: height = np.product(1.0 / (width * np.sqrt(2.0 * np.pi))) diff --git a/src/eureka/lib/readEPF.py b/src/eureka/lib/readEPF.py index dfa63ff1f..b0e350b34 100644 --- a/src/eureka/lib/readEPF.py +++ b/src/eureka/lib/readEPF.py @@ -297,7 +297,7 @@ def __add__(self, other): The combined model """ # Make sure it is the right type - if not type(self) == type(other): + if not isinstance(self, type(other)): raise TypeError('Only another Parameters instance may be added.') # Combine the model parameters too