From a3f3c7563cc218448951c25d99669488a407d15b Mon Sep 17 00:00:00 2001 From: Michele Vallisneri Date: Wed, 2 Mar 2022 16:55:17 -0800 Subject: [PATCH 01/42] Allow manual specification of Tspan for models 1/2/3 --- enterprise_extensions/models.py | 63 +++++++++++++++++++++++---------- 1 file changed, 45 insertions(+), 18 deletions(-) diff --git a/enterprise_extensions/models.py b/enterprise_extensions/models.py index 0f8b926c..551748ec 100644 --- a/enterprise_extensions/models.py +++ b/enterprise_extensions/models.py @@ -351,7 +351,7 @@ def model_singlepsr_noise(psr, tm_var=False, tm_linear=False, def model_1(psrs, psd='powerlaw', noisedict=None, white_vary=False, components=30, upper_limit=False, bayesephem=False, tnequad=False, - be_type='orbel', is_wideband=False, use_dmdata=False, + be_type='orbel', is_wideband=False, use_dmdata=False, Tspan=None, select='backend', tm_marg=False, dense_like=False, tm_svd=False): """ Reads in list of enterprise Pulsar instance and returns a PTA @@ -389,6 +389,8 @@ def model_1(psrs, psd='powerlaw', noisedict=None, white_vary=False, noise model. :param use_dmdata: whether to use DM data (WidebandTimingModel) if is_wideband. + :param Tspan: time baseline used to determine Fourier GP frequencies; + derived from data if not specified :param tm_marg: Use marginalized timing model. In many cases this will speed up the likelihood calculation significantly. :param dense_like: Use dense or sparse functions to evalute lnlikelihood @@ -398,7 +400,8 @@ def model_1(psrs, psd='powerlaw', noisedict=None, white_vary=False, amp_prior = 'uniform' if upper_limit else 'log-uniform' # find the maximum time span to set GW frequency sampling - Tspan = model_utils.get_tspan(psrs) + if Tspan is None: + Tspan = model_utils.get_tspan(psrs) # timing model if (is_wideband and use_dmdata): @@ -465,7 +468,7 @@ def model_2a(psrs, psd='powerlaw', noisedict=None, components=30, n_rnfreqs=None, n_gwbfreqs=None, gamma_common=None, delta_common=None, upper_limit=False, bayesephem=False, be_type='setIII', white_vary=False, is_wideband=False, - use_dmdata=False, select='backend', tnequad=False, + use_dmdata=False, Tspan=None, select='backend', tnequad=False, pshift=False, pseed=None, psr_models=False, tm_marg=False, dense_like=False, tm_svd=False): """ @@ -511,6 +514,8 @@ def model_2a(psrs, psd='powerlaw', noisedict=None, components=30, noise model. :param use_dmdata: whether to use DM data (WidebandTimingModel) if is_wideband. + :param Tspan: time baseline used to determine Fourier GP frequencies; + derived from data if not specified :param psr_models: Return list of psr models rather than signal_base.PTA object. :param n_rnfreqs: @@ -531,7 +536,8 @@ def model_2a(psrs, psd='powerlaw', noisedict=None, components=30, amp_prior = 'uniform' if upper_limit else 'log-uniform' # find the maximum time span to set GW frequency sampling - Tspan = model_utils.get_tspan(psrs) + if Tspan is None: + Tspan = model_utils.get_tspan(psrs) if n_gwbfreqs is None: n_gwbfreqs = components @@ -932,7 +938,7 @@ def model_general(psrs, tm_var=False, tm_linear=False, tmparam_list=None, def model_2b(psrs, psd='powerlaw', noisedict=None, white_vary=False, bayesephem=False, be_type='orbel', is_wideband=False, components=30, - use_dmdata=False, select='backend', pshift=False, tnequad=False, + use_dmdata=False, Tspan=None, select='backend', pshift=False, tnequad=False, tm_marg=False, dense_like=False, tm_svd=False, upper_limit=False, gamma_common=None): """ @@ -978,6 +984,8 @@ def model_2b(psrs, psd='powerlaw', noisedict=None, white_vary=False, noise model. :param use_dmdata: whether to use DM data (WidebandTimingModel) if is_wideband. + :param Tspan: time baseline used to determine Fourier GP frequencies; + derived from data if not specified :param tm_marg: Use marginalized timing model. In many cases this will speed up the likelihood calculation significantly. :param dense_like: Use dense or sparse functions to evalute lnlikelihood @@ -987,7 +995,8 @@ def model_2b(psrs, psd='powerlaw', noisedict=None, white_vary=False, amp_prior = 'uniform' if upper_limit else 'log-uniform' # find the maximum time span to set GW frequency sampling - Tspan = model_utils.get_tspan(psrs) + if Tspan is None: + Tspan = model_utils.get_tspan(psrs) # timing model if (is_wideband and use_dmdata): @@ -1057,7 +1066,7 @@ def model_2b(psrs, psd='powerlaw', noisedict=None, white_vary=False, def model_2c(psrs, psd='powerlaw', noisedict=None, white_vary=False, components=30, gamma_common=None, upper_limit=False, tnequad=False, bayesephem=False, be_type='orbel', is_wideband=False, - use_dmdata=False, select='backend', tm_marg=False, + use_dmdata=False, Tspan=None, select='backend', tm_marg=False, dense_like=False, tm_svd=False): """ Reads in list of enterprise Pulsar instance and returns a PTA @@ -1107,6 +1116,8 @@ def model_2c(psrs, psd='powerlaw', noisedict=None, white_vary=False, noise model. :param use_dmdata: whether to use DM data (WidebandTimingModel) if is_wideband. + :param Tspan: time baseline used to determine Fourier GP frequencies; + derived from data if not specified :param tm_marg: Use marginalized timing model. In many cases this will speed up the likelihood calculation significantly. :param dense_like: Use dense or sparse functions to evalute lnlikelihood @@ -1116,7 +1127,8 @@ def model_2c(psrs, psd='powerlaw', noisedict=None, white_vary=False, amp_prior = 'uniform' if upper_limit else 'log-uniform' # find the maximum time span to set GW frequency sampling - Tspan = model_utils.get_tspan(psrs) + if Tspan is None: + Tspan = model_utils.get_tspan(psrs) # timing model if (is_wideband and use_dmdata): @@ -1191,7 +1203,7 @@ def model_2c(psrs, psd='powerlaw', noisedict=None, white_vary=False, def model_2d(psrs, psd='powerlaw', noisedict=None, white_vary=False, components=30, gamma_common=None, upper_limit=False, tnequad=False, bayesephem=False, be_type='orbel', is_wideband=False, - use_dmdata=False, select='backend', pshift=False, + use_dmdata=False, Tspan=None, select='backend', pshift=False, tm_marg=False, dense_like=False, tm_svd=False): """ Reads in list of enterprise Pulsar instance and returns a PTA @@ -1236,6 +1248,8 @@ def model_2d(psrs, psd='powerlaw', noisedict=None, white_vary=False, noise model. :param use_dmdata: whether to use DM data (WidebandTimingModel) if is_wideband. + :param Tspan: time baseline used to determine Fourier GP frequencies; + derived from data if not specified :param tm_marg: Use marginalized timing model. In many cases this will speed up the likelihood calculation significantly. :param dense_like: Use dense or sparse functions to evalute lnlikelihood @@ -1245,7 +1259,8 @@ def model_2d(psrs, psd='powerlaw', noisedict=None, white_vary=False, amp_prior = 'uniform' if upper_limit else 'log-uniform' # find the maximum time span to set GW frequency sampling - Tspan = model_utils.get_tspan(psrs) + if Tspan is None: + Tspan = model_utils.get_tspan(psrs) # timing model if (is_wideband and use_dmdata): @@ -1316,7 +1331,7 @@ def model_3a(psrs, psd='powerlaw', noisedict=None, white_vary=False, components=30, n_rnfreqs=None, n_gwbfreqs=None, gamma_common=None, delta_common=None, upper_limit=False, bayesephem=False, be_type='setIII', is_wideband=False, - use_dmdata=False, select='backend', + use_dmdata=False, Tspan=None, select='backend', correlationsonly=False, tnequad=False, pshift=False, pseed=None, psr_models=False, tm_marg=False, dense_like=False, tm_svd=False): @@ -1367,6 +1382,8 @@ def model_3a(psrs, psd='powerlaw', noisedict=None, white_vary=False, noise model. :param use_dmdata: whether to use DM data (WidebandTimingModel) if is_wideband. + :param Tspan: time baseline used to determine Fourier GP frequencies; + derived from data if not specified :param correlationsonly: Give infinite power (well, 1e40) to pulsar red noise, effectively canceling out also GW diagonal terms @@ -1386,7 +1403,8 @@ def model_3a(psrs, psd='powerlaw', noisedict=None, white_vary=False, amp_prior = 'uniform' if upper_limit else 'log-uniform' # find the maximum time span to set GW frequency sampling - Tspan = model_utils.get_tspan(psrs) + if Tspan is None: + Tspan = model_utils.get_tspan(psrs) if n_gwbfreqs is None: n_gwbfreqs = components @@ -1468,7 +1486,7 @@ def model_3a(psrs, psd='powerlaw', noisedict=None, white_vary=False, def model_3b(psrs, psd='powerlaw', noisedict=None, white_vary=False, components=30, gamma_common=None, upper_limit=False, tnequad=False, bayesephem=False, be_type='setIII', is_wideband=False, - use_dmdata=False, select='backend', tm_marg=False, + use_dmdata=False, Tspan=None, select='backend', tm_marg=False, dense_like=False, tm_svd=False): """ Reads in list of enterprise Pulsar instance and returns a PTA @@ -1516,6 +1534,8 @@ def model_3b(psrs, psd='powerlaw', noisedict=None, white_vary=False, noise model. :param use_dmdata: whether to use DM data (WidebandTimingModel) if is_wideband. + :param Tspan: time baseline used to determine Fourier GP frequencies; + derived from data if not specified :param tm_marg: Use marginalized timing model. In many cases this will speed up the likelihood calculation significantly. :param dense_like: Use dense or sparse functions to evalute lnlikelihood @@ -1525,7 +1545,8 @@ def model_3b(psrs, psd='powerlaw', noisedict=None, white_vary=False, amp_prior = 'uniform' if upper_limit else 'log-uniform' # find the maximum time span to set GW frequency sampling - Tspan = model_utils.get_tspan(psrs) + if Tspan is None: + Tspan = model_utils.get_tspan(psrs) # timing model if (is_wideband and use_dmdata): @@ -1600,7 +1621,7 @@ def model_3b(psrs, psd='powerlaw', noisedict=None, white_vary=False, def model_3c(psrs, psd='powerlaw', noisedict=None, white_vary=False, components=30, gamma_common=None, upper_limit=False, tnequad=False, bayesephem=False, be_type='orbel', is_wideband=False, - use_dmdata=False, select='backend', tm_marg=False, + use_dmdata=False, Tspan=None, select='backend', tm_marg=False, dense_like=False, tm_svd=False): """ Reads in list of enterprise Pulsar instance and returns a PTA @@ -1651,6 +1672,8 @@ def model_3c(psrs, psd='powerlaw', noisedict=None, white_vary=False, noise model. :param use_dmdata: whether to use DM data (WidebandTimingModel) if is_wideband. + :param Tspan: time baseline used to determine Fourier GP frequencies; + derived from data if not specified :param tm_marg: Use marginalized timing model. In many cases this will speed up the likelihood calculation significantly. :param dense_like: Use dense or sparse functions to evalute lnlikelihood @@ -1660,7 +1683,8 @@ def model_3c(psrs, psd='powerlaw', noisedict=None, white_vary=False, amp_prior = 'uniform' if upper_limit else 'log-uniform' # find the maximum time span to set GW frequency sampling - Tspan = model_utils.get_tspan(psrs) + if Tspan is None: + Tspan = model_utils.get_tspan(psrs) # timing model if is_wideband and use_dmdata: @@ -1740,7 +1764,7 @@ def model_3c(psrs, psd='powerlaw', noisedict=None, white_vary=False, def model_3d(psrs, psd='powerlaw', noisedict=None, white_vary=False, components=30, gamma_common=None, upper_limit=False, tnequad=False, bayesephem=False, be_type='orbel', is_wideband=False, - use_dmdata=False, select='backend', tm_marg=False, + use_dmdata=False, Tspan=None, select='backend', tm_marg=False, dense_like=False, tm_svd=False): """ Reads in list of enterprise Pulsar instance and returns a PTA @@ -1788,6 +1812,8 @@ def model_3d(psrs, psd='powerlaw', noisedict=None, white_vary=False, noise model. :param use_dmdata: whether to use DM data (WidebandTimingModel) if is_wideband. + :param Tspan: time baseline used to determine Fourier GP frequencies; + derived from data if not specified :param tm_marg: Use marginalized timing model. In many cases this will speed up the likelihood calculation significantly. :param dense_like: Use dense or sparse functions to evalute lnlikelihood @@ -1797,7 +1823,8 @@ def model_3d(psrs, psd='powerlaw', noisedict=None, white_vary=False, amp_prior = 'uniform' if upper_limit else 'log-uniform' # find the maximum time span to set GW frequency sampling - Tspan = model_utils.get_tspan(psrs) + if Tspan is None: + Tspan = model_utils.get_tspan(psrs) # timing model if (is_wideband and use_dmdata): From 6ee95a4f3ba9046d5140ec0bfd1aba9559ab98c5 Mon Sep 17 00:00:00 2001 From: Michele Vallisneri Date: Wed, 2 Mar 2022 17:00:47 -0800 Subject: [PATCH 02/42] Remove correlationsonly from model3 and infinitepower from red_noise_block --- enterprise_extensions/blocks.py | 4 +--- enterprise_extensions/models.py | 9 +++------ 2 files changed, 4 insertions(+), 9 deletions(-) diff --git a/enterprise_extensions/blocks.py b/enterprise_extensions/blocks.py index dad9ba17..e1a4b16e 100644 --- a/enterprise_extensions/blocks.py +++ b/enterprise_extensions/blocks.py @@ -146,7 +146,7 @@ def red_noise_block(psd='powerlaw', prior='log-uniform', Tspan=None, """ # red noise parameters that are common if psd in ['powerlaw', 'powerlaw_genmodes', 'turnover', - 'tprocess', 'tprocess_adapt', 'infinitepower']: + 'tprocess', 'tprocess_adapt']: # parameters shared by PSD functions if logmin is not None and logmax is not None: if prior == 'uniform': @@ -194,8 +194,6 @@ def red_noise_block(psd='powerlaw', prior='log-uniform', Tspan=None, nfreq = parameter.Uniform(-0.5, 10-0.5) pl = gpp.t_process_adapt(log10_A=log10_A, gamma=gamma, alphas_adapt=alpha_adapt, nfreq=nfreq) - elif psd == 'infinitepower': - pl = gpp.infinitepower() if psd == 'spectrum': if prior == 'uniform': diff --git a/enterprise_extensions/models.py b/enterprise_extensions/models.py index 551748ec..3e9d787e 100644 --- a/enterprise_extensions/models.py +++ b/enterprise_extensions/models.py @@ -705,7 +705,7 @@ def model_general(psrs, tm_var=False, tm_linear=False, tmparam_list=None, :param red_var: boolean to switch on/off intrinsic red noise. [default = True] :param red_psd: psd of intrinsic red process. - ['powerlaw', 'spectrum', 'turnover', 'tprocess', 'tprocess_adapt', 'infinitepower'] + ['powerlaw', 'spectrum', 'turnover', 'tprocess', 'tprocess_adapt'] [default = 'powerlaw'] :param red_components: number of frequencies starting at 1/T for intrinsic red process. [default = 30] @@ -1332,7 +1332,7 @@ def model_3a(psrs, psd='powerlaw', noisedict=None, white_vary=False, gamma_common=None, delta_common=None, upper_limit=False, bayesephem=False, be_type='setIII', is_wideband=False, use_dmdata=False, Tspan=None, select='backend', - correlationsonly=False, tnequad=False, + tnequad=False, pshift=False, pseed=None, psr_models=False, tm_marg=False, dense_like=False, tm_svd=False): """ @@ -1384,9 +1384,6 @@ def model_3a(psrs, psd='powerlaw', noisedict=None, white_vary=False, is_wideband. :param Tspan: time baseline used to determine Fourier GP frequencies; derived from data if not specified - :param correlationsonly: - Give infinite power (well, 1e40) to pulsar red noise, effectively - canceling out also GW diagonal terms :param pshift: Option to use a random phase shift in design matrix. For testing the null hypothesis. @@ -1436,7 +1433,7 @@ def model_3a(psrs, psd='powerlaw', noisedict=None, white_vary=False, s = gp_signals.TimingModel(use_svd=tm_svd) # red noise - s += red_noise_block(psd='infinitepower' if correlationsonly else 'powerlaw', + s += red_noise_block(psd='powerlaw', prior=amp_prior, Tspan=Tspan, components=n_rnfreqs) From bba7c0ef171d073fc5560e99f878b5be847de680 Mon Sep 17 00:00:00 2001 From: Aaron Date: Tue, 26 Apr 2022 16:07:48 -0500 Subject: [PATCH 03/42] Update empirical_distr.py --- enterprise_extensions/empirical_distr.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/enterprise_extensions/empirical_distr.py b/enterprise_extensions/empirical_distr.py index a3c76827..0ba002df 100644 --- a/enterprise_extensions/empirical_distr.py +++ b/enterprise_extensions/empirical_distr.py @@ -195,7 +195,7 @@ def logprob(self, params): return self._logpdf(*params)[0] -def make_empirical_distributions(pta, paramlist, params, chain, +def make_empirical_distributions(pta, paramlist, chain, burn=0, nbins=81, filename='distr.pkl', return_distribution=True, save_dists=True): @@ -227,6 +227,7 @@ def make_empirical_distributions(pta, paramlist, params, chain, if len(pl) == 1: idx = pta.param_names.index(pl[0]) + prior_min = pta.params[idx].prior._defaults['pmin'] prior_max = pta.params[idx].prior._defaults['pmax'] @@ -270,7 +271,7 @@ def make_empirical_distributions(pta, paramlist, params, chain, return distr -def make_empirical_distributions_KDE(pta, paramlist, params, chain, +def make_empirical_distributions_KDE(pta, paramlist, chain, burn=0, nbins=41, filename='distr.pkl', bandwidth=0.1, return_distribution=True, From 7aef51707977bc68c82208ff54cad6bb35f8cb95 Mon Sep 17 00:00:00 2001 From: Aaron Date: Wed, 27 Apr 2022 15:44:38 -0500 Subject: [PATCH 04/42] Update sampler.py --- enterprise_extensions/sampler.py | 37 +++++++++++++++++++++++++------- 1 file changed, 29 insertions(+), 8 deletions(-) diff --git a/enterprise_extensions/sampler.py b/enterprise_extensions/sampler.py index b73f0a12..dc0aecff 100644 --- a/enterprise_extensions/sampler.py +++ b/enterprise_extensions/sampler.py @@ -16,7 +16,7 @@ EmpiricalDistribution2DKDE) -def extend_emp_dists(pta, emp_dists, npoints=100_000, save_ext_dists=False, outdir='chains'): +def extend_emp_dists(pta, emp_dists, npoints=100_000, save_ext_dists=False, outdir='./chains'): new_emp_dists = [] modified = False # check if anything was changed for emp_dist in emp_dists: @@ -29,8 +29,17 @@ def extend_emp_dists(pta, emp_dists, npoints=100_000, save_ext_dists=False, outd # check 2 conditions on both params to make sure that they cover their priors # skip if emp dist already covers the prior param_idx = pta.param_names.index(param) - prior_min = pta.params[param_idx].prior._defaults['pmin'] - prior_max = pta.params[param_idx].prior._defaults['pmax'] + if pta.params[param_idx].type not in ['uniform', 'normal']: + msg = 'This prior cannot be covered automatically by the empirical distribution\n' + msg += 'Please check that your prior is covered by the empirical distribution.\n' + print(msg) + continue + if pta.params[param_idx].type == 'uniform': + prior_min = pta.params[param_idx].prior._defaults['pmin'] + prior_max = pta.params[param_idx].prior._defaults['pmax'] + elif pta.params[param_idx].type == 'normal': + prior_min = pta.params[param_idx].prior._defaults['mu'] - 10 * pta.params[param_idx].prior._defaults['sigma'] + prior_max = pta.params[param_idx].prior._defaults['mu'] + 10 * pta.params[param_idx].prior._defaults['sigma'] # no need to extend if histogram edges are already prior min/max if isinstance(emp_dist, EmpiricalDistribution2D): @@ -54,8 +63,12 @@ def extend_emp_dists(pta, emp_dists, npoints=100_000, save_ext_dists=False, outd idxs_to_remove = [] for ii, (param, nbins) in enumerate(zip(emp_dist.param_names, emp_dist._Nbins)): param_idx = pta.param_names.index(param) - prior_min = pta.params[param_idx].prior._defaults['pmin'] - prior_max = pta.params[param_idx].prior._defaults['pmax'] + if pta.params[param_idx].type == 'uniform': + prior_min = pta.params[param_idx].prior._defaults['pmin'] + prior_max = pta.params[param_idx].prior._defaults['pmax'] + elif pta.params[param_idx].type == 'normal': + prior_min = pta.params[param_idx].prior._defaults['mu'] - 10 * pta.params[param_idx].prior._defaults['sigma'] + prior_max = pta.params[param_idx].prior._defaults['mu'] + 10 * pta.params[param_idx].prior._defaults['sigma'] # drop samples that are outside the prior range (in case prior is smaller than samples) if isinstance(emp_dist, EmpiricalDistribution2D): samples[(samples[:, ii] < prior_min) | (samples[:, ii] > prior_max), ii] = -np.inf @@ -77,8 +90,17 @@ def extend_emp_dists(pta, emp_dists, npoints=100_000, save_ext_dists=False, outd if emp_dist.param_name not in pta.param_names: continue param_idx = pta.param_names.index(emp_dist.param_name) - prior_min = pta.params[param_idx].prior._defaults['pmin'] - prior_max = pta.params[param_idx].prior._defaults['pmax'] + if pta.params[param_idx].type not in ['uniform', 'normal']: + msg = 'This prior cannot be covered automatically by the empirical distribution\n' + msg += 'Please check that your prior is covered by the empirical distribution.\n' + print(msg) + continue + if pta.params[param_idx].type == 'uniform': + prior_min = pta.params[param_idx].prior._defaults['pmin'] + prior_max = pta.params[param_idx].prior._defaults['pmax'] + elif pta.params[param_idx].type == 'uniform': + prior_min = pta.params[param_idx].prior._defaults['mu'] - 10 * pta.params[param_idx].prior._defaults['sigma'] + prior_max = pta.params[param_idx].prior._defaults['mu'] + 10 * pta.params[param_idx].prior._defaults['sigma'] # check 2 conditions on param to make sure that it covers the prior # skip if emp dist already covers the prior if isinstance(emp_dist, EmpiricalDistribution1D): @@ -96,7 +118,6 @@ def extend_emp_dists(pta, emp_dists, npoints=100_000, save_ext_dists=False, outd new_bins = [] idxs_to_remove = [] # drop samples that are outside the prior range (in case prior is smaller than samples) - if isinstance(emp_dist, EmpiricalDistribution1D): samples[(samples < prior_min) | (samples > prior_max)] = -np.inf elif isinstance(emp_dist, EmpiricalDistribution1DKDE): From 46724b82de9e031eb0510f67506222baf8431429 Mon Sep 17 00:00:00 2001 From: Aaron Date: Thu, 28 Apr 2022 12:57:49 -0500 Subject: [PATCH 05/42] cover priors with size!=None --- enterprise_extensions/sampler.py | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/enterprise_extensions/sampler.py b/enterprise_extensions/sampler.py index dc0aecff..a5238de2 100644 --- a/enterprise_extensions/sampler.py +++ b/enterprise_extensions/sampler.py @@ -25,16 +25,20 @@ def extend_emp_dists(pta, emp_dists, npoints=100_000, save_ext_dists=False, outd prior_ok=True for ii, (param, nbins) in enumerate(zip(emp_dist.param_names, emp_dist._Nbins)): if param not in pta.param_names: # skip if one of the parameters isn't in our PTA object - continue + short_par = '_'.join(param.split('_')[:-1]) # make sure we aren't skipping priors with size!=None + if short_par in pta.param_names: + param = short_par + else: + continue # check 2 conditions on both params to make sure that they cover their priors # skip if emp dist already covers the prior param_idx = pta.param_names.index(param) if pta.params[param_idx].type not in ['uniform', 'normal']: - msg = 'This prior cannot be covered automatically by the empirical distribution\n' + msg = '{} cannot be covered automatically by the empirical distribution\n'.format(pta.params[param_idx].prior) msg += 'Please check that your prior is covered by the empirical distribution.\n' print(msg) continue - if pta.params[param_idx].type == 'uniform': + elif pta.params[param_idx].type == 'uniform': prior_min = pta.params[param_idx].prior._defaults['pmin'] prior_max = pta.params[param_idx].prior._defaults['pmax'] elif pta.params[param_idx].type == 'normal': @@ -138,14 +142,15 @@ def extend_emp_dists(pta, emp_dists, npoints=100_000, save_ext_dists=False, outd new_emp_dists.append(emp_dist) continue - if save_ext_dists and modified: # if user wants to save them, and they have been modified... - pickle.dump(new_emp_dists, outdir + 'new_emp_dists.pkl') + if save_ext_dists and modified: # if user wants to save them, and they have been modified... + with open(outdir + '/new_emp_dists.pkl', 'wb') as f: + pickle.dump(new_emp_dists, f) return new_emp_dists class JumpProposal(object): - def __init__(self, pta, snames=None, empirical_distr=None, f_stat_file=None, save_ext_dists=False, outdir='chains'): + def __init__(self, pta, snames=None, empirical_distr=None, f_stat_file=None, save_ext_dists=False, outdir='./chains'): """Set up some custom jump proposals""" self.params = pta.params self.pnames = pta.param_names From b05191a7b149e0a9ed53d7ef2815dfee90244c6e Mon Sep 17 00:00:00 2001 From: Aaron Date: Thu, 28 Apr 2022 13:59:25 -0500 Subject: [PATCH 06/42] unremove unused parameter --- enterprise_extensions/empirical_distr.py | 4 ++-- enterprise_extensions/sampler.py | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/enterprise_extensions/empirical_distr.py b/enterprise_extensions/empirical_distr.py index 0ba002df..d8d14030 100644 --- a/enterprise_extensions/empirical_distr.py +++ b/enterprise_extensions/empirical_distr.py @@ -195,7 +195,7 @@ def logprob(self, params): return self._logpdf(*params)[0] -def make_empirical_distributions(pta, paramlist, chain, +def make_empirical_distributions(pta, paramlist, params, chain, burn=0, nbins=81, filename='distr.pkl', return_distribution=True, save_dists=True): @@ -271,7 +271,7 @@ def make_empirical_distributions(pta, paramlist, chain, return distr -def make_empirical_distributions_KDE(pta, paramlist, chain, +def make_empirical_distributions_KDE(pta, paramlist, params, chain, burn=0, nbins=41, filename='distr.pkl', bandwidth=0.1, return_distribution=True, diff --git a/enterprise_extensions/sampler.py b/enterprise_extensions/sampler.py index a5238de2..db55badc 100644 --- a/enterprise_extensions/sampler.py +++ b/enterprise_extensions/sampler.py @@ -136,6 +136,7 @@ def extend_emp_dists(pta, emp_dists, npoints=100_000, save_ext_dists=False, outd minval=prior_min, maxval=prior_max, bandwidth=emp_dist.bandwidth) new_emp_dists.append(new_emp) + print(new_emp_dists) else: print('Unable to extend class of unknown type to the edges of the priors.') From 3d47e4eae7586f25b5c4f0a848b9a7cf0836ae3e Mon Sep 17 00:00:00 2001 From: Aaron Date: Thu, 28 Apr 2022 14:01:42 -0500 Subject: [PATCH 07/42] Remove print statement --- enterprise_extensions/sampler.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/enterprise_extensions/sampler.py b/enterprise_extensions/sampler.py index db55badc..e47c668a 100644 --- a/enterprise_extensions/sampler.py +++ b/enterprise_extensions/sampler.py @@ -136,8 +136,6 @@ def extend_emp_dists(pta, emp_dists, npoints=100_000, save_ext_dists=False, outd minval=prior_min, maxval=prior_max, bandwidth=emp_dist.bandwidth) new_emp_dists.append(new_emp) - print(new_emp_dists) - else: print('Unable to extend class of unknown type to the edges of the priors.') new_emp_dists.append(emp_dist) From 150b78e862a61d6488b57da5dbf214cb034d250a Mon Sep 17 00:00:00 2001 From: Aaron Date: Thu, 28 Apr 2022 15:51:18 -0500 Subject: [PATCH 08/42] take param names from pta.params --- enterprise_extensions/sampler.py | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/enterprise_extensions/sampler.py b/enterprise_extensions/sampler.py index e47c668a..4b1fea21 100644 --- a/enterprise_extensions/sampler.py +++ b/enterprise_extensions/sampler.py @@ -24,15 +24,16 @@ def extend_emp_dists(pta, emp_dists, npoints=100_000, save_ext_dists=False, outd # check if we need to extend the distribution prior_ok=True for ii, (param, nbins) in enumerate(zip(emp_dist.param_names, emp_dist._Nbins)): - if param not in pta.param_names: # skip if one of the parameters isn't in our PTA object + param_names = [par.name for par in pta.params] + if param not in param_names: # skip if one of the parameters isn't in our PTA object short_par = '_'.join(param.split('_')[:-1]) # make sure we aren't skipping priors with size!=None - if short_par in pta.param_names: + if short_par in param_names: param = short_par else: continue # check 2 conditions on both params to make sure that they cover their priors # skip if emp dist already covers the prior - param_idx = pta.param_names.index(param) + param_idx = param_names.index(param) if pta.params[param_idx].type not in ['uniform', 'normal']: msg = '{} cannot be covered automatically by the empirical distribution\n'.format(pta.params[param_idx].prior) msg += 'Please check that your prior is covered by the empirical distribution.\n' @@ -66,7 +67,7 @@ def extend_emp_dists(pta, emp_dists, npoints=100_000, save_ext_dists=False, outd maxvals = [] idxs_to_remove = [] for ii, (param, nbins) in enumerate(zip(emp_dist.param_names, emp_dist._Nbins)): - param_idx = pta.param_names.index(param) + param_idx = param_names.index(param) if pta.params[param_idx].type == 'uniform': prior_min = pta.params[param_idx].prior._defaults['pmin'] prior_max = pta.params[param_idx].prior._defaults['pmax'] @@ -91,9 +92,14 @@ def extend_emp_dists(pta, emp_dists, npoints=100_000, save_ext_dists=False, outd new_emp_dists.append(new_emp) elif isinstance(emp_dist, EmpiricalDistribution1D) or isinstance(emp_dist, EmpiricalDistribution1DKDE): - if emp_dist.param_name not in pta.param_names: - continue - param_idx = pta.param_names.index(emp_dist.param_name) + param_names = [par.name for par in pta.params] + if param not in param_names: # skip if one of the parameters isn't in our PTA object + short_par = '_'.join(param.split('_')[:-1]) # make sure we aren't skipping priors with size!=None + if short_par in param_names: + param = short_par + else: + continue + param_idx = param_names.index(emp_dist.param_name) if pta.params[param_idx].type not in ['uniform', 'normal']: msg = 'This prior cannot be covered automatically by the empirical distribution\n' msg += 'Please check that your prior is covered by the empirical distribution.\n' From 77ccf1bbbfcc174a0b915966a054d7e4b6f43d84 Mon Sep 17 00:00:00 2001 From: Aaron Date: Fri, 29 Apr 2022 14:56:38 -0500 Subject: [PATCH 09/42] fix 1D param name --- enterprise_extensions/sampler.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/enterprise_extensions/sampler.py b/enterprise_extensions/sampler.py index 4b1fea21..5e6f1e3a 100644 --- a/enterprise_extensions/sampler.py +++ b/enterprise_extensions/sampler.py @@ -93,8 +93,8 @@ def extend_emp_dists(pta, emp_dists, npoints=100_000, save_ext_dists=False, outd elif isinstance(emp_dist, EmpiricalDistribution1D) or isinstance(emp_dist, EmpiricalDistribution1DKDE): param_names = [par.name for par in pta.params] - if param not in param_names: # skip if one of the parameters isn't in our PTA object - short_par = '_'.join(param.split('_')[:-1]) # make sure we aren't skipping priors with size!=None + if emp_dist.param_name not in param_names: # skip if one of the parameters isn't in our PTA object + short_par = '_'.join(emp_dist.param_namem.split('_')[:-1]) # make sure we aren't skipping priors with size!=None if short_par in param_names: param = short_par else: From eb6bd2dc1ed65a8de1418f1716d3158d48d7e70d Mon Sep 17 00:00:00 2001 From: Hazboun6 Date: Fri, 29 Apr 2022 15:33:23 -0700 Subject: [PATCH 10/42] fixed typo and added param switch --- enterprise_extensions/sampler.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/enterprise_extensions/sampler.py b/enterprise_extensions/sampler.py index 5e6f1e3a..b9a4d3d9 100644 --- a/enterprise_extensions/sampler.py +++ b/enterprise_extensions/sampler.py @@ -94,12 +94,14 @@ def extend_emp_dists(pta, emp_dists, npoints=100_000, save_ext_dists=False, outd elif isinstance(emp_dist, EmpiricalDistribution1D) or isinstance(emp_dist, EmpiricalDistribution1DKDE): param_names = [par.name for par in pta.params] if emp_dist.param_name not in param_names: # skip if one of the parameters isn't in our PTA object - short_par = '_'.join(emp_dist.param_namem.split('_')[:-1]) # make sure we aren't skipping priors with size!=None + short_par = '_'.join(emp_dist.param_name.split('_')[:-1]) # make sure we aren't skipping priors with size!=None if short_par in param_names: param = short_par else: continue - param_idx = param_names.index(emp_dist.param_name) + else: + param = emp_dist.param_name + param_idx = param_names.index(param) if pta.params[param_idx].type not in ['uniform', 'normal']: msg = 'This prior cannot be covered automatically by the empirical distribution\n' msg += 'Please check that your prior is covered by the empirical distribution.\n' From a368c8bcc90dcca3f733a557fd5e072ffa1e016a Mon Sep 17 00:00:00 2001 From: Aaron Date: Wed, 11 May 2022 21:05:33 -0500 Subject: [PATCH 11/42] duplicate fix for hypermodel.setup_sampler --- enterprise_extensions/hypermodel.py | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/enterprise_extensions/hypermodel.py b/enterprise_extensions/hypermodel.py index c8b9390a..84664a03 100644 --- a/enterprise_extensions/hypermodel.py +++ b/enterprise_extensions/hypermodel.py @@ -180,8 +180,18 @@ def setup_sampler(self, outdir='chains', resume=False, sample_nmodel=True, ndim = len(self.param_names) # initial jump covariance matrix - if os.path.exists(outdir+'/cov.npy'): + if os.path.exists(outdir+'/cov.npy') and resume: cov = np.load(outdir+'/cov.npy') + + # check that the one we load is the same shape as our data + cov_new = np.diag(np.ones(ndim) * 0.1**2) + if cov.shape != cov_new.shape: + msg = 'The covariance matrix (cov.npy) in the output folder is ' + msg += 'the wrong shape for the parameters given. ' + msg += 'Start with a different output directory or ' + msg += 'change resume to False to overwrite the run that exists.' + + raise ValueError(msg) else: cov = np.diag(np.ones(ndim) * 1.0**2) # used to be 0.1 From 12b3a79f51e6707ee17d6ec54fffe6912e9a1f37 Mon Sep 17 00:00:00 2001 From: Aaron Date: Wed, 11 May 2022 21:09:57 -0500 Subject: [PATCH 12/42] Update hypermodel.py --- enterprise_extensions/hypermodel.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/enterprise_extensions/hypermodel.py b/enterprise_extensions/hypermodel.py index 84664a03..7dd1f8e7 100644 --- a/enterprise_extensions/hypermodel.py +++ b/enterprise_extensions/hypermodel.py @@ -184,7 +184,7 @@ def setup_sampler(self, outdir='chains', resume=False, sample_nmodel=True, cov = np.load(outdir+'/cov.npy') # check that the one we load is the same shape as our data - cov_new = np.diag(np.ones(ndim) * 0.1**2) + cov_new = np.diag(np.ones(ndim) * 1.0**2) if cov.shape != cov_new.shape: msg = 'The covariance matrix (cov.npy) in the output folder is ' msg += 'the wrong shape for the parameters given. ' From cd92cf15cacce03f72c37d07d41f1bc2a5bf91b6 Mon Sep 17 00:00:00 2001 From: Aaron Date: Mon, 8 Aug 2022 13:13:19 -0500 Subject: [PATCH 13/42] No longer store cov matrix --- .../frequentist/F_statistic.py | 130 ++++++------------ 1 file changed, 42 insertions(+), 88 deletions(-) diff --git a/enterprise_extensions/frequentist/F_statistic.py b/enterprise_extensions/frequentist/F_statistic.py index 855b5908..868fed2c 100644 --- a/enterprise_extensions/frequentist/F_statistic.py +++ b/enterprise_extensions/frequentist/F_statistic.py @@ -1,13 +1,34 @@ # -*- coding: utf-8 -*- import numpy as np -import scipy.linalg as sl import scipy.special from enterprise.signals import deterministic_signals, gp_signals, signal_base from enterprise_extensions import blocks, deterministic +def get_xCy(Nvec, T, sigmainv, x, y): + """Get x^T C^{-1} y""" + TNx = Nvec.solve(x, left_array=T) + TNy = Nvec.solve(y, left_array=T) + xNy = Nvec.solve(y, left_array=x) + return xNy - TNx @ sigmainv @ TNy + + +def get_TCy(Nvec, T, y, sigmainv, TNT): + """Get T^T C^{-1} y""" + TNy = Nvec.solve(y, left_array=T) + return TNy - TNT @ sigmainv @ TNy + + +def innerprod(Nvec, T, sigmainv, TNT, x, y): + """Get the inner product between x and y""" + xCy = get_xCy(Nvec, T, sigmainv, x, y) + TCy = get_TCy(Nvec, T, y, sigmainv, TNT) + TCx = get_TCy(Nvec, T, x, sigmainv, TNT) + return xCy - TCx.T @ sigmainv @ TCy + + class FpStat(object): """ Class for the Fp-statistic. @@ -18,7 +39,7 @@ class FpStat(object): :param bayesephem: Include BayesEphem model. Default=True """ - def __init__(self, psrs, params=None, + def __init__(self, psrs, noisedict=None, psrTerm=True, bayesephem=True, pta=None, tnequad=False): if pta is None: @@ -31,10 +52,9 @@ def __init__(self, psrs, params=None, tmin = np.min([p.toas.min() for p in psrs]) tmax = np.max([p.toas.max() for p in psrs]) Tspan = tmax - tmin - - s = deterministic.cw_block_circ(amp_prior='log-uniform', - psrTerm=psrTerm, tref=tmin, name='cw') - s += gp_signals.TimingModel() + s = gp_signals.TimingModel(use_svd=True) + s += deterministic.cw_block_circ(amp_prior='log-uniform', + psrTerm=psrTerm, tref=tmin, name='cw') s += blocks.red_noise_block(prior='log-uniform', psd='powerlaw', Tspan=Tspan, components=30) @@ -55,36 +75,28 @@ def __init__(self, psrs, params=None, pta = signal_base.PTA(models) # set white noise parameters - if params is None: + if noisedict is None: print('No noise dictionary provided!') else: - pta.set_default_params(params) + pta.set_default_params(noisedict) self.pta = pta else: - # user can specify their own pta object # if ECORR is included, use the implementation in gp_signals self.pta = pta self.psrs = psrs - self.params = params + self.noisedict = noisedict - self.Nmats = self.get_Nmats() - - def get_Nmats(self): - '''Makes the Nmatrix used in the fstatistic''' - TNTs = self.pta.get_TNT(self.params) - phiinvs = self.pta.get_phiinv(self.params, logdet=False, method='partition') - # Get noise parameters for pta toaerr**2 - Nvecs = self.pta.get_ndiag(self.params) - # Get the basis matrix - Ts = self.pta.get_basis(self.params) - - Nmats = [make_Nmat(phiinv, TNT, Nvec, T) for phiinv, TNT, Nvec, T in zip(phiinvs, TNTs, Nvecs, Ts)] - - return Nmats + # precompute important bits: + self.phiinvs = self.pta.get_phiinv(noisedict) + self.TNTs = self.pta.get_TNT(noisedict) + self.Nvecs = self.pta.get_ndiag(noisedict) + self.Ts = self.pta.get_basis(noisedict) + # self.cf_TNT = [sl.cho_factor(TNT + np.diag(phiinv)) for TNT, phiinv in zip(self.TNTs, self.phiinvs)] + self.sigmainvs = [np.linalg.pinv(TNT + np.diag(phiinv)) for TNT, phiinv in zip(self.TNTs, self.phiinvs)] def compute_Fp(self, fgw): """ @@ -96,19 +108,10 @@ def compute_Fp(self, fgw): fstat: value of the Fp-statistic at the given frequency """ - - phiinvs = self.pta.get_phiinv(self.params, logdet=False) - TNTs = self.pta.get_TNT(self.params) - Ts = self.pta.get_basis() - N = np.zeros(2) M = np.zeros((2, 2)) fstat = 0 - - for psr, Nmat, TNT, phiinv, T in zip(self.psrs, self.Nmats, - TNTs, phiinvs, Ts): - - Sigma = TNT + (np.diag(phiinv) if phiinv.ndim == 1 else phiinv) + for psr, Nvec, TNT, T, sigmainv in zip(self.psrs, self.Nvecs, self.TNTs, self.Ts, self.sigmainvs): ntoa = len(psr.toas) @@ -116,14 +119,16 @@ def compute_Fp(self, fgw): A[0, :] = 1 / fgw ** (1 / 3) * np.sin(2 * np.pi * fgw * psr.toas) A[1, :] = 1 / fgw ** (1 / 3) * np.cos(2 * np.pi * fgw * psr.toas) - ip1 = innerProduct_rr(A[0, :], psr.residuals, Nmat, T, Sigma) - ip2 = innerProduct_rr(A[1, :], psr.residuals, Nmat, T, Sigma) + ip1 = innerprod(Nvec, T, sigmainv, TNT, A[0, :], psr.residuals) + # logger.info(ip1) + ip2 = innerprod(Nvec, T, sigmainv, TNT, A[1, :], psr.residuals) + # logger.info(ip2) N = np.array([ip1, ip2]) # define M matrix M_ij=(A_i|A_j) for jj in range(2): for kk in range(2): - M[jj, kk] = innerProduct_rr(A[jj, :], A[kk, :], Nmat, T, Sigma) + M[jj, kk] = innerprod(Nvec, T, sigmainv, TNT, A[jj, :], A[kk, :]) # take inverse of M Minv = np.linalg.pinv(M) @@ -150,54 +155,3 @@ def compute_fap(self, fgw): n = np.arange(0, N) return np.sum(np.exp(n*np.log(fp0)-fp0-np.log(scipy.special.gamma(n+1)))) - - -def innerProduct_rr(x, y, Nmat, Tmat, Sigma, TNx=None, TNy=None): - r""" - Compute inner product using rank-reduced - approximations for red noise/jitter - Compute: x^T N^{-1} y - x^T N^{-1} T \Sigma^{-1} T^T N^{-1} y - - :param x: vector timeseries 1 - :param y: vector timeseries 2 - :param Nmat: white noise matrix - :param Tmat: Modified design matrix including red noise/jitter - :param Sigma: Sigma matrix (\varphi^{-1} + T^T N^{-1} T) - :param TNx: T^T N^{-1} x precomputed - :param TNy: T^T N^{-1} y precomputed - :return: inner product (x|y) - """ - - # white noise term - Ni = Nmat - xNy = np.dot(np.dot(x, Ni), y) - Nx, Ny = np.dot(Ni, x), np.dot(Ni, y) - - if TNx is None and TNy is None: - TNx = np.dot(Tmat.T, Nx) - TNy = np.dot(Tmat.T, Ny) - - cf = sl.cho_factor(Sigma) - SigmaTNy = sl.cho_solve(cf, TNy) - - ret = xNy - np.dot(TNx, SigmaTNy) - - return ret - - -def make_Nmat(phiinv, TNT, Nvec, T): - - Sigma = TNT + (np.diag(phiinv) if phiinv.ndim == 1 else phiinv) - cf = sl.cho_factor(Sigma) - # Nshape = np.shape(T)[0] # Not currently used in code - - TtN = np.multiply((1/Nvec)[:, None], T).T - - # Put pulsar's autoerrors in a diagonal matrix - Ndiag = np.diag(1/Nvec) - - expval2 = sl.cho_solve(cf, TtN) - # TtNt = np.transpose(TtN) # Not currently used in code - - # An Ntoa by Ntoa noise matrix to be used in expand dense matrix calculations earlier - return Ndiag - np.dot(TtN.T, expval2) From b91568558b16005a8a1549f88ed4e3e8815c89dc Mon Sep 17 00:00:00 2001 From: Aaron Date: Sat, 13 Aug 2022 17:29:55 -0500 Subject: [PATCH 14/42] fix linting issues --- enterprise_extensions/sampler.py | 4 ++-- setup.cfg | 3 --- 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/enterprise_extensions/sampler.py b/enterprise_extensions/sampler.py index b73f0a12..8ca9e362 100644 --- a/enterprise_extensions/sampler.py +++ b/enterprise_extensions/sampler.py @@ -34,11 +34,11 @@ def extend_emp_dists(pta, emp_dists, npoints=100_000, save_ext_dists=False, outd # no need to extend if histogram edges are already prior min/max if isinstance(emp_dist, EmpiricalDistribution2D): - if not(emp_dist._edges[ii][0] == prior_min and emp_dist._edges[ii][-1] == prior_max): + if not (emp_dist._edges[ii][0] == prior_min and emp_dist._edges[ii][-1] == prior_max): prior_ok = False continue elif isinstance(emp_dist, EmpiricalDistribution2DKDE): - if not(emp_dist.minvals[ii] == prior_min and emp_dist.maxvals[ii] == prior_max): + if not (emp_dist.minvals[ii] == prior_min and emp_dist.maxvals[ii] == prior_max): prior_ok=False continue if prior_ok: diff --git a/setup.cfg b/setup.cfg index f9a47209..431ce871 100644 --- a/setup.cfg +++ b/setup.cfg @@ -14,9 +14,6 @@ replace = __version__ = '{new_version}' [bdist_wheel] universal = 1 -[flake8] -exclude = docs - [aliases] # Define setup.py command aliases here test = pytest From ab18e8b72753deb53caee874f90792d29ac385d0 Mon Sep 17 00:00:00 2001 From: Nihan Pol <33101514+NihanPol@users.noreply.github.com> Date: Mon, 7 Nov 2022 15:24:44 -0600 Subject: [PATCH 15/42] NANOGrav TWG WB noise setup Introduce the setup that the NANOGrav TWG uses for the wideband datasets. The only change is to place normal priors on DMEFAC and EFAC. This introduces two flags, `ng_twg_setup = False` and `wb_efac_sigma = 0.25`, which toggle on this mode and set the sigma for the normal prior on EFAC parameters respectively. There's also an assoicated change in `blocks.py` to allow for normal prior on the EFAC. --- enterprise_extensions/models.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/enterprise_extensions/models.py b/enterprise_extensions/models.py index 6f5d0c63..da915063 100644 --- a/enterprise_extensions/models.py +++ b/enterprise_extensions/models.py @@ -58,7 +58,7 @@ def model_singlepsr_noise(psr, tm_var=False, tm_linear=False, psr_model=False, factorized_like=False, Tspan=None, fact_like_gamma=13./3, gw_components=10, fact_like_logmin=None, fact_like_logmax=None, - select='backend', tm_marg=False, dense_like=False): + select='backend', tm_marg=False, dense_like=False, ng_twg_setup = False, wb_efac_sigma = 0.25): """ Single pulsar noise model. @@ -162,7 +162,10 @@ def model_singlepsr_noise(psr, tm_var=False, tm_linear=False, else: dmjump = parameter.Constant() if white_vary: - dmefac = parameter.Uniform(pmin=0.1, pmax=10.0) + if ng_twg_setup: + dmefac = parameter.Normal(1.0, wb_efac_sigma) + else: + dmefac = parameter.Uniform(pmin=0.1, pmax=10.0) log10_dmequad = parameter.Uniform(pmin=-7.0, pmax=0.0) # dmjump = parameter.Uniform(pmin=-0.005, pmax=0.005) else: From beb6ace02bbad4c67fef9bd221cd420a2d696d15 Mon Sep 17 00:00:00 2001 From: Nihan Pol <33101514+NihanPol@users.noreply.github.com> Date: Mon, 7 Nov 2022 15:28:58 -0600 Subject: [PATCH 16/42] NANOGrav TWG WB noise setup cont'd Update WN block to place normal priors on EFAC when running noise analyses on WB dataset. --- enterprise_extensions/blocks.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/enterprise_extensions/blocks.py b/enterprise_extensions/blocks.py index e1a4b16e..cd869229 100644 --- a/enterprise_extensions/blocks.py +++ b/enterprise_extensions/blocks.py @@ -37,7 +37,7 @@ def channelized_backends(backend_flags): def white_noise_block(vary=False, inc_ecorr=False, gp_ecorr=False, - efac1=False, select='backend', tnequad=False, name=None): + efac1=False, select='backend', tnequad=False, name=None, ng_twg_setup = False, wb_efac_sigma = 0.25): """ Returns the white noise block of the model: @@ -74,6 +74,8 @@ def white_noise_block(vary=False, inc_ecorr=False, gp_ecorr=False, if vary: if efac1: efac = parameter.Normal(1.0, 0.1) + elif ng_twg_setup: + efac = parameter.Normal(1.0, wb_efac_sigma) else: efac = parameter.Uniform(0.01, 10.0) equad = parameter.Uniform(-8.5, -5) From 64dc0daa741fe7b26ae0dd30fc806a33466cdc71 Mon Sep 17 00:00:00 2001 From: Nihan Pol <33101514+NihanPol@users.noreply.github.com> Date: Mon, 7 Nov 2022 15:30:38 -0600 Subject: [PATCH 17/42] NANOGrav TWG WB noise setup cont'd Added relevant flags to `white_noise_block` setup. --- enterprise_extensions/models.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/enterprise_extensions/models.py b/enterprise_extensions/models.py index da915063..a0810890 100644 --- a/enterprise_extensions/models.py +++ b/enterprise_extensions/models.py @@ -327,7 +327,7 @@ def model_singlepsr_noise(psr, tm_var=False, tm_linear=False, Model = s2 else: s3 = s + white_noise_block(vary=white_vary, inc_ecorr=False, - tnequad=tnequad, select=select) + tnequad=tnequad, select=select, ng_twg_setup = ng_twg_setup, wb_efac_sigma = wb_efac_sigma) model = s3(psr) if psr_model: Model = s3 From 2c66d060697604c6f4fa380f4c4ccc1874d809fa Mon Sep 17 00:00:00 2001 From: Nihan Pol Date: Wed, 9 Nov 2022 15:14:14 -0600 Subject: [PATCH 18/42] Update the WidebandTimingModel call --- enterprise_extensions/models.py | 60 +++++++++------------------------ 1 file changed, 15 insertions(+), 45 deletions(-) diff --git a/enterprise_extensions/models.py b/enterprise_extensions/models.py index a0810890..3b32047b 100644 --- a/enterprise_extensions/models.py +++ b/enterprise_extensions/models.py @@ -174,9 +174,7 @@ def model_singlepsr_noise(psr, tm_var=False, tm_linear=False, # dmjump = parameter.Constant() s = gp_signals.WidebandTimingModel(dmefac=dmefac, log10_dmequad=log10_dmequad, dmjump=dmjump, - dmefac_selection=selections.Selection( - selections.by_backend), - log10_dmequad_selection=selections.Selection( + selection=selections.Selection( selections.by_backend), dmjump_selection=selections.Selection( selections.by_frontend)) @@ -416,9 +414,7 @@ def model_1(psrs, psd='powerlaw', noisedict=None, white_vary=False, # dmjump = parameter.Constant() s = gp_signals.WidebandTimingModel(dmefac=dmefac, log10_dmequad=log10_dmequad, dmjump=dmjump, - dmefac_selection=selections.Selection(selections.by_backend), - log10_dmequad_selection=selections.Selection( - selections.by_backend), + selection=selections.Selection(selections.by_backend), dmjump_selection=selections.Selection(selections.by_frontend)) else: if tm_marg: @@ -555,9 +551,7 @@ def model_2a(psrs, psd='powerlaw', noisedict=None, components=30, # dmjump = parameter.Constant() s = gp_signals.WidebandTimingModel(dmefac=dmefac, log10_dmequad=log10_dmequad, dmjump=dmjump, - dmefac_selection=selections.Selection(selections.by_backend), - log10_dmequad_selection=selections.Selection( - selections.by_backend), + selection=selections.Selection(selections.by_backend), dmjump_selection=selections.Selection(selections.by_frontend)) else: if tm_marg: @@ -803,9 +797,7 @@ def model_general(psrs, tm_var=False, tm_linear=False, tmparam_list=None, # dmjump = parameter.Constant() s = gp_signals.WidebandTimingModel(dmefac=dmefac, log10_dmequad=log10_dmequad, dmjump=dmjump, - dmefac_selection=selections.Selection(selections.by_backend), - log10_dmequad_selection=selections.Selection( - selections.by_backend), + selection=selections.Selection(selections.by_backend), dmjump_selection=selections.Selection(selections.by_frontend)) else: # create new attribute for enterprise pulsar object @@ -1005,9 +997,7 @@ def model_2b(psrs, psd='powerlaw', noisedict=None, white_vary=False, # dmjump = parameter.Constant() s = gp_signals.WidebandTimingModel(dmefac=dmefac, log10_dmequad=log10_dmequad, dmjump=dmjump, - dmefac_selection=selections.Selection(selections.by_backend), - log10_dmequad_selection=selections.Selection( - selections.by_backend), + selection=selections.Selection(selections.by_backend), dmjump_selection=selections.Selection(selections.by_frontend)) else: if tm_marg: @@ -1134,9 +1124,7 @@ def model_2c(psrs, psd='powerlaw', noisedict=None, white_vary=False, # dmjump = parameter.Constant() s = gp_signals.WidebandTimingModel(dmefac=dmefac, log10_dmequad=log10_dmequad, dmjump=dmjump, - dmefac_selection=selections.Selection(selections.by_backend), - log10_dmequad_selection=selections.Selection( - selections.by_backend), + selection=selections.Selection(selections.by_backend), dmjump_selection=selections.Selection(selections.by_frontend)) else: if tm_marg: @@ -1263,9 +1251,7 @@ def model_2d(psrs, psd='powerlaw', noisedict=None, white_vary=False, # dmjump = parameter.Constant() s = gp_signals.WidebandTimingModel(dmefac=dmefac, log10_dmequad=log10_dmequad, dmjump=dmjump, - dmefac_selection=selections.Selection(selections.by_backend), - log10_dmequad_selection=selections.Selection( - selections.by_backend), + selection=selections.Selection(selections.by_backend), dmjump_selection=selections.Selection(selections.by_frontend)) else: if tm_marg: @@ -1407,9 +1393,7 @@ def model_3a(psrs, psd='powerlaw', noisedict=None, white_vary=False, # dmjump = parameter.Constant() s = gp_signals.WidebandTimingModel(dmefac=dmefac, log10_dmequad=log10_dmequad, dmjump=dmjump, - dmefac_selection=selections.Selection(selections.by_backend), - log10_dmequad_selection=selections.Selection( - selections.by_backend), + selection=selections.Selection(selections.by_backend), dmjump_selection=selections.Selection(selections.by_frontend)) else: if tm_marg: @@ -1538,9 +1522,7 @@ def model_3b(psrs, psd='powerlaw', noisedict=None, white_vary=False, # dmjump = parameter.Constant() s = gp_signals.WidebandTimingModel(dmefac=dmefac, log10_dmequad=log10_dmequad, dmjump=dmjump, - dmefac_selection=selections.Selection(selections.by_backend), - log10_dmequad_selection=selections.Selection( - selections.by_backend), + selection=selections.Selection(selections.by_backend), dmjump_selection=selections.Selection(selections.by_frontend)) else: if tm_marg: @@ -1673,9 +1655,7 @@ def model_3c(psrs, psd='powerlaw', noisedict=None, white_vary=False, # dmjump = parameter.Constant() s = gp_signals.WidebandTimingModel(dmefac=dmefac, log10_dmequad=log10_dmequad, dmjump=dmjump, - dmefac_selection=selections.Selection(selections.by_backend), - log10_dmequad_selection=selections.Selection( - selections.by_backend), + selection=selections.Selection(selections.by_backend), dmjump_selection=selections.Selection(selections.by_frontend)) else: if tm_marg: @@ -1810,9 +1790,7 @@ def model_3d(psrs, psd='powerlaw', noisedict=None, white_vary=False, # dmjump = parameter.Constant() s = gp_signals.WidebandTimingModel(dmefac=dmefac, log10_dmequad=log10_dmequad, dmjump=dmjump, - dmefac_selection=selections.Selection(selections.by_backend), - log10_dmequad_selection=selections.Selection( - selections.by_backend), + selection=selections.Selection(selections.by_backend), dmjump_selection=selections.Selection(selections.by_frontend)) else: if tm_marg: @@ -1937,9 +1915,7 @@ def model_2a_drop_be(psrs, psd='powerlaw', noisedict=None, white_vary=False, # dmjump = parameter.Constant() s = gp_signals.WidebandTimingModel(dmefac=dmefac, log10_dmequad=log10_dmequad, dmjump=dmjump, - dmefac_selection=selections.Selection(selections.by_backend), - log10_dmequad_selection=selections.Selection( - selections.by_backend), + selection=selections.Selection(selections.by_backend), dmjump_selection=selections.Selection(selections.by_frontend)) else: if tm_marg: @@ -2056,9 +2032,7 @@ def model_2a_drop_crn(psrs, psd='powerlaw', noisedict=None, white_vary=False, # dmjump = parameter.Constant() s = gp_signals.WidebandTimingModel(dmefac=dmefac, log10_dmequad=log10_dmequad, dmjump=dmjump, - dmefac_selection=selections.Selection(selections.by_backend), - log10_dmequad_selection=selections.Selection( - selections.by_backend), + selection=selections.Selection(selections.by_backend), dmjump_selection=selections.Selection(selections.by_frontend)) else: if tm_marg: @@ -2208,9 +2182,7 @@ def model_chromatic(psrs, psd='powerlaw', noisedict=None, white_vary=False, # dmjump = parameter.Constant() s = gp_signals.WidebandTimingModel(dmefac=dmefac, log10_dmequad=log10_dmequad, dmjump=dmjump, - dmefac_selection=selections.Selection(selections.by_backend), - log10_dmequad_selection=selections.Selection( - selections.by_backend), + selection=selections.Selection(selections.by_backend), dmjump_selection=selections.Selection(selections.by_frontend)) else: if tm_marg: @@ -2785,9 +2757,7 @@ def model_cw(psrs, upper_limit=False, rn_psd='powerlaw', noisedict=None, # dmjump = parameter.Constant() s = gp_signals.WidebandTimingModel(dmefac=dmefac, log10_dmequad=log10_dmequad, dmjump=dmjump, - dmefac_selection=selections.Selection(selections.by_backend), - log10_dmequad_selection=selections.Selection( - selections.by_backend), + selection=selections.Selection(selections.by_backend), dmjump_selection=selections.Selection(selections.by_frontend)) else: if tm_marg: From 4643d154d04bb13de39a84e26b4b6925f9680ca6 Mon Sep 17 00:00:00 2001 From: William Lamb <60004701+astrolamb@users.noreply.github.com> Date: Thu, 10 Nov 2022 09:36:56 -0600 Subject: [PATCH 19/42] added ptmcmc version info --- enterprise_extensions/sampler.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/enterprise_extensions/sampler.py b/enterprise_extensions/sampler.py index b73f0a12..f32213d9 100644 --- a/enterprise_extensions/sampler.py +++ b/enterprise_extensions/sampler.py @@ -7,6 +7,7 @@ import healpy as hp import numpy as np +from PTMCMCSampler import __version__ as __vPTMCMC__ from PTMCMCSampler.PTMCMCSampler import PTSampler as ptmcmc from enterprise_extensions import __version__ @@ -1008,6 +1009,7 @@ def save_runtime_info(pta, outdir='chains', human=None): fout.write(field + " : " + data + "\n") fout.write("\n") fout.write("enterprise_extensions v" + __version__ +"\n") + fout.write("PTMCMCSampler v" + __vPTMCMC__ +"\n") fout.write(pta.summary()) # save paramter list From b8e2d26126b2b7c3ef8b8f91fc943af3f0684fc2 Mon Sep 17 00:00:00 2001 From: Aaron Date: Sun, 13 Nov 2022 18:14:22 -0800 Subject: [PATCH 20/42] fix linting --- enterprise_extensions/sampler.py | 4 ++-- setup.cfg | 3 --- 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/enterprise_extensions/sampler.py b/enterprise_extensions/sampler.py index b73f0a12..8ca9e362 100644 --- a/enterprise_extensions/sampler.py +++ b/enterprise_extensions/sampler.py @@ -34,11 +34,11 @@ def extend_emp_dists(pta, emp_dists, npoints=100_000, save_ext_dists=False, outd # no need to extend if histogram edges are already prior min/max if isinstance(emp_dist, EmpiricalDistribution2D): - if not(emp_dist._edges[ii][0] == prior_min and emp_dist._edges[ii][-1] == prior_max): + if not (emp_dist._edges[ii][0] == prior_min and emp_dist._edges[ii][-1] == prior_max): prior_ok = False continue elif isinstance(emp_dist, EmpiricalDistribution2DKDE): - if not(emp_dist.minvals[ii] == prior_min and emp_dist.maxvals[ii] == prior_max): + if not (emp_dist.minvals[ii] == prior_min and emp_dist.maxvals[ii] == prior_max): prior_ok=False continue if prior_ok: diff --git a/setup.cfg b/setup.cfg index f9a47209..431ce871 100644 --- a/setup.cfg +++ b/setup.cfg @@ -14,9 +14,6 @@ replace = __version__ = '{new_version}' [bdist_wheel] universal = 1 -[flake8] -exclude = docs - [aliases] # Define setup.py command aliases here test = pytest From c46ce1ad2ee45911bc199e1f6ae9c36957769a49 Mon Sep 17 00:00:00 2001 From: Nihan Pol Date: Tue, 15 Nov 2022 09:15:08 -0600 Subject: [PATCH 21/42] fix linting issues --- enterprise_extensions/blocks.py | 2 +- enterprise_extensions/models.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/enterprise_extensions/blocks.py b/enterprise_extensions/blocks.py index cd869229..981872ae 100644 --- a/enterprise_extensions/blocks.py +++ b/enterprise_extensions/blocks.py @@ -37,7 +37,7 @@ def channelized_backends(backend_flags): def white_noise_block(vary=False, inc_ecorr=False, gp_ecorr=False, - efac1=False, select='backend', tnequad=False, name=None, ng_twg_setup = False, wb_efac_sigma = 0.25): + efac1=False, select='backend', tnequad=False, name=None, ng_twg_setup=False, wb_efac_sigma=0.25): """ Returns the white noise block of the model: diff --git a/enterprise_extensions/models.py b/enterprise_extensions/models.py index 3b32047b..33aac783 100644 --- a/enterprise_extensions/models.py +++ b/enterprise_extensions/models.py @@ -58,7 +58,7 @@ def model_singlepsr_noise(psr, tm_var=False, tm_linear=False, psr_model=False, factorized_like=False, Tspan=None, fact_like_gamma=13./3, gw_components=10, fact_like_logmin=None, fact_like_logmax=None, - select='backend', tm_marg=False, dense_like=False, ng_twg_setup = False, wb_efac_sigma = 0.25): + select='backend', tm_marg=False, dense_like=False, ng_twg_setup=False, wb_efac_sigma=0.25): """ Single pulsar noise model. @@ -325,7 +325,7 @@ def model_singlepsr_noise(psr, tm_var=False, tm_linear=False, Model = s2 else: s3 = s + white_noise_block(vary=white_vary, inc_ecorr=False, - tnequad=tnequad, select=select, ng_twg_setup = ng_twg_setup, wb_efac_sigma = wb_efac_sigma) + tnequad=tnequad, select=select, ng_twg_setup=ng_twg_setup, wb_efac_sigma=wb_efac_sigma) model = s3(psr) if psr_model: Model = s3 From 0e36ef145089559ae0c091592ce10ffef382d291 Mon Sep 17 00:00:00 2001 From: Aaron Date: Tue, 15 Nov 2022 08:09:41 -0800 Subject: [PATCH 22/42] update minor version --- HISTORY.rst | 3 +++ enterprise_extensions/__init__.py | 2 +- setup.cfg | 2 +- 3 files changed, 5 insertions(+), 2 deletions(-) diff --git a/HISTORY.rst b/HISTORY.rst index a9b42375..0db683e8 100644 --- a/HISTORY.rst +++ b/HISTORY.rst @@ -1,6 +1,9 @@ ======= History ======= +2.4.1 (2022-02-10) +Add support for new noise definitions in WidebandTimingModel. + 2.4.0 (2022-02-10) Use Timing Package (Tempo,Tempo2,Pint) definition of EQUAD. Enterprise has broken backwards compatibility, and here we use the `tnequad` flag to switch on diff --git a/enterprise_extensions/__init__.py b/enterprise_extensions/__init__.py index 3d67cd6b..54499df3 100644 --- a/enterprise_extensions/__init__.py +++ b/enterprise_extensions/__init__.py @@ -1 +1 @@ -__version__ = "2.4.0" +__version__ = "2.4.1" diff --git a/setup.cfg b/setup.cfg index 431ce871..37425600 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,5 +1,5 @@ [bumpversion] -current_version = 2.4.0 +current_version = 2.4.1 commit = True tag = True From a9abf7fb8bd8ebfb210239b774e07bd5638e6da1 Mon Sep 17 00:00:00 2001 From: Aaron Date: Tue, 15 Nov 2022 21:32:30 -0800 Subject: [PATCH 23/42] Add Python 3.10 to CI --- .github/workflows/ci_test.yml | 6 +++--- CONTRIBUTING.rst | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/ci_test.yml b/.github/workflows/ci_test.yml index 61b19e09..a70c5c0d 100644 --- a/.github/workflows/ci_test.yml +++ b/.github/workflows/ci_test.yml @@ -17,7 +17,7 @@ jobs: fail-fast: false matrix: os: [ubuntu-latest, macos-latest] - python-version: [3.6, 3.7, 3.8, 3.9] + python-version: [3.7, 3.8, 3.9, 3.10] steps: - name: Checkout repository @@ -69,7 +69,7 @@ jobs: - name: Set up Python uses: actions/setup-python@v2 with: - python-version: "3.8" + python-version: "3.10" - name: Install non-python dependencies on linux run: | sudo apt-get install libsuitesparse-dev @@ -115,7 +115,7 @@ jobs: - name: Set up Python uses: actions/setup-python@v2 with: - python-version: '3.8' + python-version: '3.10' - name: Install dependencies run: | python -m pip install --upgrade pip diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index cd74804b..0c543076 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -102,7 +102,7 @@ Before you submit a pull request, check that it meets these guidelines: 2. If the pull request adds functionality, the docs should be updated. Put your new functionality into a function with a docstring, and add the feature to the list in README.rst. -3. The pull request should work for Python 2.7, 3.4, 3.5 and 3.6, and for PyPy. Check +3. The pull request should work for Python 3.7, 3.8, 3.9, and 3.10, and for PyPy. Check https://travis-ci.org/stevertaylor/enterprise_extensions/pull_requests and make sure that the tests pass for all supported Python versions. From 9d269d138a6f0ef09fe79d278c8e8e7c1ce2fd62 Mon Sep 17 00:00:00 2001 From: Aaron Date: Fri, 18 Nov 2022 13:42:25 -0800 Subject: [PATCH 24/42] remove print statement --- enterprise_extensions/frequentist/optimal_statistic.py | 1 - 1 file changed, 1 deletion(-) diff --git a/enterprise_extensions/frequentist/optimal_statistic.py b/enterprise_extensions/frequentist/optimal_statistic.py index 9bb0d6be..6590f9c1 100644 --- a/enterprise_extensions/frequentist/optimal_statistic.py +++ b/enterprise_extensions/frequentist/optimal_statistic.py @@ -149,7 +149,6 @@ def compute_os(self, params=None, psd='powerlaw', fgw=None): if psd == 'powerlaw': if self.gamma_common is None and 'gw_gamma' in params.keys(): - print('{0:1.2}'.format(params['gw_gamma'])) phiIJ = utils.powerlaw(self.freqs, log10_A=0, gamma=params['gw_gamma']) else: From da4f30992957766884eae06a712f8a726e6d20fb Mon Sep 17 00:00:00 2001 From: Aaron Date: Fri, 18 Nov 2022 13:45:35 -0800 Subject: [PATCH 25/42] Revert "remove print statement" This reverts commit 9d269d138a6f0ef09fe79d278c8e8e7c1ce2fd62. --- enterprise_extensions/frequentist/optimal_statistic.py | 1 + 1 file changed, 1 insertion(+) diff --git a/enterprise_extensions/frequentist/optimal_statistic.py b/enterprise_extensions/frequentist/optimal_statistic.py index 6590f9c1..9bb0d6be 100644 --- a/enterprise_extensions/frequentist/optimal_statistic.py +++ b/enterprise_extensions/frequentist/optimal_statistic.py @@ -149,6 +149,7 @@ def compute_os(self, params=None, psd='powerlaw', fgw=None): if psd == 'powerlaw': if self.gamma_common is None and 'gw_gamma' in params.keys(): + print('{0:1.2}'.format(params['gw_gamma'])) phiIJ = utils.powerlaw(self.freqs, log10_A=0, gamma=params['gw_gamma']) else: From 70649b8877af2313417e419158733c714f07910b Mon Sep 17 00:00:00 2001 From: Aaron Date: Fri, 18 Nov 2022 13:47:32 -0800 Subject: [PATCH 26/42] 3.10, not 3.1 --- .github/workflows/ci_test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci_test.yml b/.github/workflows/ci_test.yml index a70c5c0d..3267b6ad 100644 --- a/.github/workflows/ci_test.yml +++ b/.github/workflows/ci_test.yml @@ -17,7 +17,7 @@ jobs: fail-fast: false matrix: os: [ubuntu-latest, macos-latest] - python-version: [3.7, 3.8, 3.9, 3.10] + python-version: [3.7, 3.8, 3.9, '3.10'] steps: - name: Checkout repository From 21287e167417a6ef7eda662cbc090005ec1ac75b Mon Sep 17 00:00:00 2001 From: Aaron Date: Fri, 18 Nov 2022 13:49:54 -0800 Subject: [PATCH 27/42] remove print statement --- enterprise_extensions/frequentist/optimal_statistic.py | 1 - 1 file changed, 1 deletion(-) diff --git a/enterprise_extensions/frequentist/optimal_statistic.py b/enterprise_extensions/frequentist/optimal_statistic.py index 9bb0d6be..6590f9c1 100644 --- a/enterprise_extensions/frequentist/optimal_statistic.py +++ b/enterprise_extensions/frequentist/optimal_statistic.py @@ -149,7 +149,6 @@ def compute_os(self, params=None, psd='powerlaw', fgw=None): if psd == 'powerlaw': if self.gamma_common is None and 'gw_gamma' in params.keys(): - print('{0:1.2}'.format(params['gw_gamma'])) phiIJ = utils.powerlaw(self.freqs, log10_A=0, gamma=params['gw_gamma']) else: From 64efa49134e6d572454f2fae1fdf53e32f4c0554 Mon Sep 17 00:00:00 2001 From: Michele Vallisneri Date: Wed, 21 Dec 2022 09:34:40 -0800 Subject: [PATCH 28/42] Allow specifying different gwb and rn components in model_2d --- enterprise_extensions/models.py | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/enterprise_extensions/models.py b/enterprise_extensions/models.py index 3e9d787e..2f9bc077 100644 --- a/enterprise_extensions/models.py +++ b/enterprise_extensions/models.py @@ -1201,7 +1201,8 @@ def model_2c(psrs, psd='powerlaw', noisedict=None, white_vary=False, def model_2d(psrs, psd='powerlaw', noisedict=None, white_vary=False, - components=30, gamma_common=None, upper_limit=False, tnequad=False, + components=30, n_rnfreqs=None, n_gwbfreqs=None, + gamma_common=None, upper_limit=False, tnequad=False, bayesephem=False, be_type='orbel', is_wideband=False, use_dmdata=False, Tspan=None, select='backend', pshift=False, tm_marg=False, dense_like=False, tm_svd=False): @@ -1262,6 +1263,12 @@ def model_2d(psrs, psd='powerlaw', noisedict=None, white_vary=False, if Tspan is None: Tspan = model_utils.get_tspan(psrs) + if n_gwbfreqs is None: + n_gwbfreqs = components + + if n_rnfreqs is None: + n_rnfreqs = components + # timing model if (is_wideband and use_dmdata): dmjump = parameter.Constant() @@ -1286,11 +1293,11 @@ def model_2d(psrs, psd='powerlaw', noisedict=None, white_vary=False, s = gp_signals.TimingModel(use_svd=tm_svd) # red noise - s += red_noise_block(prior=amp_prior, Tspan=Tspan, components=components) + s += red_noise_block(prior=amp_prior, Tspan=Tspan, components=n_rnfreqs) # monopole s += common_red_noise_block(psd=psd, prior=amp_prior, Tspan=Tspan, - components=components, gamma_val=gamma_common, + components=n_gwbfreqs, gamma_val=gamma_common, orf='monopole', name='monopole', pshift=pshift) # ephemeris model @@ -1364,7 +1371,7 @@ def model_3a(psrs, psd='powerlaw', noisedict=None, white_vary=False, :param gamma_common: Fixed common red process spectral index value. By default we vary the spectral index over the range [0, 7]. - :param gamma_common: + :param delta_common: Fixed common red process spectral index value for higher frequencies in broken power law model. By default we vary the spectral index over the range [0, 7]. From 066feb8be75b314d33d8580e3c42642006478dc3 Mon Sep 17 00:00:00 2001 From: Michele Vallisneri Date: Wed, 21 Dec 2022 09:37:54 -0800 Subject: [PATCH 29/42] Propagate GP combine setting in common_red_noise_block --- enterprise_extensions/blocks.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/enterprise_extensions/blocks.py b/enterprise_extensions/blocks.py index e1a4b16e..d436a0eb 100644 --- a/enterprise_extensions/blocks.py +++ b/enterprise_extensions/blocks.py @@ -772,23 +772,23 @@ def common_red_noise_block(psd='powerlaw', prior='log-uniform', cpl = gpp.free_spectrum(log10_rho=log10_rho_gw) if orf is None: - crn = gp_signals.FourierBasisGP(cpl, coefficients=coefficients, + crn = gp_signals.FourierBasisGP(cpl, coefficients=coefficients, combine=combine, components=components, Tspan=Tspan, name=name, pshift=pshift, pseed=pseed) elif orf in orfs.keys(): if orf == 'crn': - crn = gp_signals.FourierBasisGP(cpl, coefficients=coefficients, + crn = gp_signals.FourierBasisGP(cpl, coefficients=coefficients, combine=combine, components=components, Tspan=Tspan, name=name, pshift=pshift, pseed=pseed) else: crn = gp_signals.FourierBasisCommonGP(cpl, orfs[orf], - components=components, + components=components, combine=combine, Tspan=Tspan, name=name, pshift=pshift, pseed=pseed) elif isinstance(orf, types.FunctionType): crn = gp_signals.FourierBasisCommonGP(cpl, orf, - components=components, + components=components, combine=combine, Tspan=Tspan, name=name, pshift=pshift, pseed=pseed) From b1149d389cc611cbd6ab2918f9f3a0f5b9b24735 Mon Sep 17 00:00:00 2001 From: Michele Vallisneri Date: Wed, 21 Dec 2022 09:54:22 -0800 Subject: [PATCH 30/42] Fix flake8 configuration for new version; temporarily pin OS to avoid python 3.6 CI bug. We should really deprecate 3.6... --- .flake8 | 27 ++++++++++++++++++--------- .github/workflows/ci_test.yml | 2 +- 2 files changed, 19 insertions(+), 10 deletions(-) diff --git a/.flake8 b/.flake8 index 892058f2..4c3f0d94 100644 --- a/.flake8 +++ b/.flake8 @@ -3,15 +3,24 @@ max-line-length = 120 max-complexity = 45 ignore = E203 - E225 # missing whitespace around operator - E226 # missing whitespace around arithmetic operator - W503 # line break before binary operator; conflicts with black - W504 # Don't want to change with code frozen... - E722 # bare except ok - E731 # lambda expressions ok - E266 # block comments - E741 # ambiguous variable names - E501 # lines + # missing whitespace around operator + E225 + # missing whitespace around arithmetic operator + E226 + # line break before binary operator; conflicts with black + W503 + # Don't want to change with code frozen... + W504 + # bare except ok + E722 + # lambda expressions ok + E731 + # block comments + E266 + # ambiguous variable names + E741 + # lines + E501 exclude = .git .tox diff --git a/.github/workflows/ci_test.yml b/.github/workflows/ci_test.yml index 61b19e09..017c7260 100644 --- a/.github/workflows/ci_test.yml +++ b/.github/workflows/ci_test.yml @@ -16,7 +16,7 @@ jobs: strategy: fail-fast: false matrix: - os: [ubuntu-latest, macos-latest] + os: [ubuntu-20.04, macos-latest] python-version: [3.6, 3.7, 3.8, 3.9] steps: From 345312f403ae41e53a5d671f0126561b34b23a1d Mon Sep 17 00:00:00 2001 From: Aaron Date: Wed, 21 Dec 2022 13:21:56 -0600 Subject: [PATCH 31/42] reformat .flake8 file --- .flake8 | 41 +++++++++++++++++++++++++---------------- 1 file changed, 25 insertions(+), 16 deletions(-) diff --git a/.flake8 b/.flake8 index 892058f2..e714a89c 100644 --- a/.flake8 +++ b/.flake8 @@ -2,21 +2,30 @@ max-line-length = 120 max-complexity = 45 ignore = - E203 - E225 # missing whitespace around operator - E226 # missing whitespace around arithmetic operator - W503 # line break before binary operator; conflicts with black - W504 # Don't want to change with code frozen... - E722 # bare except ok - E731 # lambda expressions ok - E266 # block comments - E741 # ambiguous variable names - E501 # lines + E203, + # missing whitespace around operator + E225, + # missing whitespace around arithmetic operator + E226, + # line break before binary operator; conflicts with black + W503, + # Don't want to change with code frozen... + W504, + # bare except ok + E722, + # lambda expressions ok + E731, + # block comments + E266, + # ambiguous variable names + E741, + # lines + E501 exclude = - .git - .tox - __pycache__ - build - dist - docs/* + .git, + .tox, + __pycache__, + build, + dist, + docs/*, .enterprise_extensions/* From 63056b3659bee76f05023ba432e8f7b870ce470e Mon Sep 17 00:00:00 2001 From: Aaron Date: Wed, 21 Dec 2022 14:00:24 -0600 Subject: [PATCH 32/42] fix hypermodel sampling groups --- enterprise_extensions/hypermodel.py | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/enterprise_extensions/hypermodel.py b/enterprise_extensions/hypermodel.py index 7dd1f8e7..059efa4d 100644 --- a/enterprise_extensions/hypermodel.py +++ b/enterprise_extensions/hypermodel.py @@ -112,14 +112,20 @@ def get_lnprior(self, x): def get_parameter_groups(self): - groups = [] + unique_groups = [] for p in self.models.values(): - groups.extend(get_parameter_groups(p)) - list(np.unique(groups)) - - groups.extend([[len(self.param_names)-1]]) # nmodel - - return groups + groups = get_parameter_groups(p) + # check for any duplicate groups + # e.g. the GWB may have different indices in model 1 and model 2 + for group in groups: + check_group = [] + for idx in group: + param_name = p.param_names[idx] + check_group.append(self.param_names.index(param_name)) + if check_group not in unique_groups: + unique_groups.append(check_group) + unique_groups.extend([[len(self.param_names) - 1]]) + return unique_groups def initial_sample(self): """ From 5b36b4126daeab444d6b64b9a640e2eb17d2bb4c Mon Sep 17 00:00:00 2001 From: Aaron Date: Wed, 28 Dec 2022 10:58:11 -0600 Subject: [PATCH 33/42] increment version to 2.4.2 --- HISTORY.rst | 5 ++++- enterprise_extensions/__init__.py | 2 +- setup.cfg | 2 +- 3 files changed, 6 insertions(+), 3 deletions(-) diff --git a/HISTORY.rst b/HISTORY.rst index 0db683e8..24cc4728 100644 --- a/HISTORY.rst +++ b/HISTORY.rst @@ -1,6 +1,9 @@ ======= History ======= +2.4.2 (2022-28-12) +Fix HyperModel to support newer versions of numpy. + 2.4.1 (2022-02-10) Add support for new noise definitions in WidebandTimingModel. @@ -22,7 +25,7 @@ Fix bug in HyperModel when using save_runtime_info. Fix bugs associated with recent function additions. Added linting and mild PEP8 rules. Also removed older Python functionality which is no longer supported. -2.3.0 (2021-09-15)* +2.3.0 (2021-09-15) Functionality added for NANOGrav 15yr dataset analyses. Outlier analysis software moved into separate package. diff --git a/enterprise_extensions/__init__.py b/enterprise_extensions/__init__.py index 54499df3..60be088d 100644 --- a/enterprise_extensions/__init__.py +++ b/enterprise_extensions/__init__.py @@ -1 +1 @@ -__version__ = "2.4.1" +__version__ = "2.4.2" diff --git a/setup.cfg b/setup.cfg index 37425600..bc2faeea 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,5 +1,5 @@ [bumpversion] -current_version = 2.4.1 +current_version = 2.4.2 commit = True tag = True From ea67ef22245b5d8dabd3774d7bb0d91a901fd17e Mon Sep 17 00:00:00 2001 From: Aaron Date: Wed, 28 Dec 2022 11:19:20 -0600 Subject: [PATCH 34/42] update CI versions --- .github/workflows/ci_test.yml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/ci_test.yml b/.github/workflows/ci_test.yml index 3267b6ad..9dfe9b87 100644 --- a/.github/workflows/ci_test.yml +++ b/.github/workflows/ci_test.yml @@ -21,9 +21,9 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v2 + uses: actions/checkout@v3 - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v2 + uses: actions/setup-python@v4 with: python-version: ${{ matrix.python-version }} - name: Install non-python dependencies on mac @@ -65,9 +65,9 @@ jobs: if: github.event_name == 'release' steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Set up Python - uses: actions/setup-python@v2 + uses: actions/setup-python@v4 with: python-version: "3.10" - name: Install non-python dependencies on linux @@ -111,9 +111,9 @@ jobs: runs-on: ubuntu-latest if: github.event_name == 'release' steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Set up Python - uses: actions/setup-python@v2 + uses: actions/setup-python@v4 with: python-version: '3.10' - name: Install dependencies From d33e90db75e90c82e07796c0b2c157da94f58dd2 Mon Sep 17 00:00:00 2001 From: Aaron Date: Wed, 28 Dec 2022 12:28:15 -0600 Subject: [PATCH 35/42] update codecov to v3 --- .github/workflows/ci_test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci_test.yml b/.github/workflows/ci_test.yml index 9dfe9b87..9197fe92 100644 --- a/.github/workflows/ci_test.yml +++ b/.github/workflows/ci_test.yml @@ -53,7 +53,7 @@ jobs: - name: Test with pytest run: make test - name: Codecov - uses: codecov/codecov-action@v1 + uses: codecov/codecov-action@v3 #with: # fail_ci_if_error: true From a46be211cc9740a3ad6a45dd9d488864d32ca83b Mon Sep 17 00:00:00 2001 From: William G Lamb <60004701+astrolamb@users.noreply.github.com> Date: Thu, 5 Jan 2023 18:11:54 +0000 Subject: [PATCH 36/42] adding rho jump proposals for free spectra (#189) * adding rho jump proposals for free spectra * Update setup.cfg removing flake8 to fix linting issues, thanks @AaronDJohnson * Update sampler.py fixed linting issues --- enterprise_extensions/hypermodel.py | 5 +++++ enterprise_extensions/sampler.py | 35 +++++++++++++++++++++++++++++ 2 files changed, 40 insertions(+) diff --git a/enterprise_extensions/hypermodel.py b/enterprise_extensions/hypermodel.py index 059efa4d..1b5ed036 100644 --- a/enterprise_extensions/hypermodel.py +++ b/enterprise_extensions/hypermodel.py @@ -303,6 +303,11 @@ def setup_sampler(self, outdir='chains', resume=False, sample_nmodel=True, print('Adding CW prior draws...\n') sampler.addProposalToCycle(jp.draw_from_cw_log_uniform_distribution, 10) + # free spectrum prior draw + if np.any(['log10_rho' in par for par in self.param_names]): + print('Adding free spectrum prior draws...\n') + sampler.addProposalToCycle(jp.draw_from_gw_rho_prior, 25) + # Prior distribution draw for parameters named GW if any([str(p).split(':')[0] for p in list(self.params) if 'gw' in str(p)]): print('Adding gw param prior draws...\n') diff --git a/enterprise_extensions/sampler.py b/enterprise_extensions/sampler.py index eeaaae98..700caece 100644 --- a/enterprise_extensions/sampler.py +++ b/enterprise_extensions/sampler.py @@ -709,6 +709,36 @@ def draw_from_dm_sw_prior(self, x, iter, beta): return q, float(lqxy) + def draw_from_gw_rho_prior(self, x, iter, beta): + """ + Jump proposals on free spec + """ + + q = x.copy() + lqxy = 0 + + # draw parameter from signal model + parnames = [par.name for par in self.params] + pname = [pnm for pnm in parnames + if ('gw' in pnm and 'rho' in pnm)][0] + + idx = parnames.index(pname) + param = self.params[idx] + + if param.size: + idx2 = np.random.randint(0, param.size) + q[self.pmap[str(param)]][idx2] = param.sample()[idx2] + + # scalar parameter + else: + q[self.pmap[str(param)]] = param.sample() + + # forward-backward jump probability + lqxy = (param.get_logpdf(x[self.pmap[str(param)]]) - + param.get_logpdf(q[self.pmap[str(param)]])) + + return q, float(lqxy) + def draw_from_signal_prior(self, x, iter, beta): q = x.copy() @@ -1198,4 +1228,9 @@ def setup_sampler(pta, outdir='chains', resume=False, print('Adding CW prior draws...\n') sampler.addProposalToCycle(jp.draw_from_cw_distribution, 10) + # free spectrum prior draw + if np.any(['log10_rho' in par for par in pta.param_names]): + print('Adding free spectrum prior draws...\n') + sampler.addProposalToCycle(jp.draw_from_gw_rho_prior, 25) + return sampler From 7fc8f2e0300cc5ca3fe482e90cbba7e99f34d5be Mon Sep 17 00:00:00 2001 From: Aaron Date: Tue, 27 Jun 2023 20:04:33 -0700 Subject: [PATCH 37/42] update version to increment Zenodo --- README.md | 4 ++-- enterprise_extensions/__init__.py | 2 +- setup.cfg | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index bfef1c03..be9e0896 100644 --- a/README.md +++ b/README.md @@ -16,7 +16,7 @@ You can also install it by cloning the repo and running `pip install .` in the ` Did you use `enterprise_extensions`? Remember to cite it as: ->Taylor, S. R., Baker, P. T., Hazboun, J. S., Simon, J., & Vigeland, S. J. (2021). enterprise_extensions v2.3.1. https://github.com/nanograv/enterprise_extensions +>Taylor, S. R., Baker, P. T., Hazboun, J. S., Simon, J., & Vigeland, S. J. (2021). enterprise_extensions v2.4.3. https://github.com/nanograv/enterprise_extensions ```latex @misc{enterprise, @@ -24,7 +24,7 @@ Remember to cite it as: title = {enterprise_extensions}, year = {2021}, url = {https://github.com/nanograv/enterprise_extensions}, - note = {v2.3.3} + note = {v2.4.3} } ``` diff --git a/enterprise_extensions/__init__.py b/enterprise_extensions/__init__.py index 60be088d..5a8e0983 100644 --- a/enterprise_extensions/__init__.py +++ b/enterprise_extensions/__init__.py @@ -1 +1 @@ -__version__ = "2.4.2" +__version__ = "2.4.3" diff --git a/setup.cfg b/setup.cfg index bc2faeea..4336c221 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,5 +1,5 @@ [bumpversion] -current_version = 2.4.2 +current_version = 2.4.3 commit = True tag = True From e629b6036f59eb85bc7bf6439c09aeb3c4ecfb65 Mon Sep 17 00:00:00 2001 From: Aaron Date: Tue, 27 Jun 2023 20:08:22 -0700 Subject: [PATCH 38/42] Update HISTORY.rst --- HISTORY.rst | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/HISTORY.rst b/HISTORY.rst index 24cc4728..7b8bfcd0 100644 --- a/HISTORY.rst +++ b/HISTORY.rst @@ -1,6 +1,10 @@ ======= History ======= + +2.4.3 (2023-27-06) +Update Zenodo entry. + 2.4.2 (2022-28-12) Fix HyperModel to support newer versions of numpy. From fd36ea1b8b7b6eabf22988bd5e6b6063bc799cab Mon Sep 17 00:00:00 2001 From: Wang-weiYu <429682828@qq.com> Date: Wed, 20 Dec 2023 00:21:10 +0100 Subject: [PATCH 39/42] Fix to bug, add a parameter dictionary in JumpProposal Class --- enterprise_extensions/sampler.py | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/enterprise_extensions/sampler.py b/enterprise_extensions/sampler.py index 700caece..9fe840b4 100644 --- a/enterprise_extensions/sampler.py +++ b/enterprise_extensions/sampler.py @@ -166,6 +166,15 @@ def __init__(self, pta, snames=None, empirical_distr=None, f_stat_file=None, sav self.ndim = sum(p.size or 1 for p in pta.params) self.plist = [p.name for p in pta.params] + # wangwei add + self.params_dict = {} + for p in self.params: + if p.size: + for ii in range(0, p.size): + self.params_dict.update({p.name + "_{}".format(ii): p}) + else: + self.params_dict.update({p.name: p}) + # parameter map self.pmap = {} ct = 0 @@ -530,8 +539,9 @@ def draw_from_gwb_log_uniform_distribution(self, x, iter, beta): # draw parameter from signal model signal_name = [par for par in self.pnames if ('gw' in par and 'log10_A' in par)][0] - idx = list(self.pnames).index(signal_name) - param = self.params[idx] + + # wangwei add + param = self.params_dict[signal_name] q[self.pmap[str(param)]] = np.random.uniform(param.prior._defaults['pmin'], param.prior._defaults['pmax']) From 155aaf9d76a566dc8f77252bac6cfdedc109bc9a Mon Sep 17 00:00:00 2001 From: Wang-weiYu <429682828@qq.com> Date: Wed, 20 Dec 2023 01:55:30 +0100 Subject: [PATCH 40/42] Fix to bug, add a parameter dictionary in JumpProposal class --- enterprise_extensions/sampler.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/enterprise_extensions/sampler.py b/enterprise_extensions/sampler.py index 9fe840b4..f80b4a5a 100644 --- a/enterprise_extensions/sampler.py +++ b/enterprise_extensions/sampler.py @@ -166,7 +166,7 @@ def __init__(self, pta, snames=None, empirical_distr=None, f_stat_file=None, sav self.ndim = sum(p.size or 1 for p in pta.params) self.plist = [p.name for p in pta.params] - # wangwei add + # parameter dictionary self.params_dict = {} for p in self.params: if p.size: @@ -540,7 +540,6 @@ def draw_from_gwb_log_uniform_distribution(self, x, iter, beta): signal_name = [par for par in self.pnames if ('gw' in par and 'log10_A' in par)][0] - # wangwei add param = self.params_dict[signal_name] q[self.pmap[str(param)]] = np.random.uniform(param.prior._defaults['pmin'], param.prior._defaults['pmax']) From 2b935506420eeedd405455b4322e7fb53d733ff8 Mon Sep 17 00:00:00 2001 From: Wang-weiYu <429682828@qq.com> Date: Thu, 21 Dec 2023 10:34:29 +0100 Subject: [PATCH 41/42] delete the whitespace --- enterprise_extensions/sampler.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/enterprise_extensions/sampler.py b/enterprise_extensions/sampler.py index f80b4a5a..584ec86a 100644 --- a/enterprise_extensions/sampler.py +++ b/enterprise_extensions/sampler.py @@ -539,7 +539,7 @@ def draw_from_gwb_log_uniform_distribution(self, x, iter, beta): # draw parameter from signal model signal_name = [par for par in self.pnames if ('gw' in par and 'log10_A' in par)][0] - + param = self.params_dict[signal_name] q[self.pmap[str(param)]] = np.random.uniform(param.prior._defaults['pmin'], param.prior._defaults['pmax']) From 88072a90113461eb64d44e8feb48c4221f246b4b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 1 Feb 2024 21:11:38 +0000 Subject: [PATCH 42/42] Bump scikit-learn from 0.24 to 1.0.1 Bumps [scikit-learn](https://github.com/scikit-learn/scikit-learn) from 0.24 to 1.0.1. - [Release notes](https://github.com/scikit-learn/scikit-learn/releases) - [Commits](https://github.com/scikit-learn/scikit-learn/compare/0.24.0...1.0.1) --- updated-dependencies: - dependency-name: scikit-learn dependency-type: direct:production ... Signed-off-by: dependabot[bot] --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 1e5167f5..60263cf9 100644 --- a/requirements.txt +++ b/requirements.txt @@ -6,7 +6,7 @@ scikit-sparse>=0.4.5 pint-pulsar>=0.8.2 libstempo>=2.4.0 enterprise-pulsar>=3.3.0 -scikit-learn==0.24 +scikit-learn==1.0.1 emcee ptmcmcsampler numdifftools