Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Live cleanup combine single fits #4450

Merged
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
62 changes: 27 additions & 35 deletions bin/live/pycbc_live_combine_single_fits
Original file line number Diff line number Diff line change
Expand Up @@ -12,12 +12,15 @@
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.

"""Combine PyCBC Live single-detector trigger fitting parameters from several
different files."""

import h5py, numpy as np, argparse
import logging
import pycbc

parser = argparse.ArgumentParser(usage="",
description="Combine fitting parameters from several different files")

parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("--verbose", action="store_true",
help="Print extra debugging information", default=False)
parser.add_argument("--trfits-files", nargs="+", required=True,
Expand All @@ -30,7 +33,7 @@ parser.add_argument("--output", required=True,
parser.add_argument("--ifos", required=True, nargs="+",
help="list of ifos fo collect info for")

args=parser.parse_args()
args = parser.parse_args()

pycbc.init_logging(args.verbose)

Expand All @@ -42,8 +45,8 @@ if args.conservative_percentile < 50 or \
"otherwise it is either not a percentile, or not "
"conservative.")

counts_all = {ifo:[] for ifo in args.ifos}
alphas_all = {ifo:[] for ifo in args.ifos}
counts_all = {ifo: [] for ifo in args.ifos}
alphas_all = {ifo: [] for ifo in args.ifos}
analysis_dates = []

with h5py.File(args.trfits_files[0], 'r') as fit_f0:
Expand All @@ -58,7 +61,7 @@ with h5py.File(args.trfits_files[0], 'r') as fit_f0:
fit_thresh = fit_f0.attrs['fit_threshold']
fit_func = fit_f0.attrs['fit_function']

live_times = {ifo : [] for ifo in args.ifos}
live_times = {ifo: [] for ifo in args.ifos}

trigger_file_starts = []
trigger_file_ends = []
Expand All @@ -67,7 +70,6 @@ n_files = len(args.trfits_files)
logging.info("Checking through %d files", n_files)

for f in args.trfits_files:

fits_f = h5py.File(f, 'r')
# Check that the file uses the same setup as file 0, to make sure
# all coefficients are comparable
Expand All @@ -84,10 +86,9 @@ for f in args.trfits_files:
for ifo in args.ifos:
if ifo not in fits_f:
continue
else:
trig_times = fits_f[ifo]['triggers']['end_time'][:]
gps_last = max(gps_last, trig_times.max())
gps_first = min(gps_first, trig_times.min())
trig_times = fits_f[ifo]['triggers']['end_time'][:]
gps_last = max(gps_last, trig_times.max())
gps_first = min(gps_first, trig_times.min())
trigger_file_starts.append(gps_first)
trigger_file_ends.append(gps_last)

Expand All @@ -101,7 +102,7 @@ for f in args.trfits_files:
counts_all[ifo].append(fits_f[ifo + '/counts'][:])
alphas_all[ifo].append(fits_f[ifo + '/fit_coeff'][:])
if any(np.isnan(fits_f[ifo + '/fit_coeff'][:])):
logging.info("nan in " + f + ", " + ifo)
logging.info("nan in %s, %s", f, ifo)
logging.info(fits_f[ifo + '/fit_coeff'][:])
fits_f.close()

Expand All @@ -118,10 +119,10 @@ ad = trigger_file_ends[ad_order] - start_time_n
counts_bin = {ifo: [c for c in zip(*counts_all[ifo])] for ifo in args.ifos}
alphas_bin = {ifo: [a for a in zip(*alphas_all[ifo])] for ifo in args.ifos}

alphas_out = {ifo : np.zeros(len(alphas_bin[ifo])) for ifo in args.ifos}
counts_out = {ifo : np.inf * np.ones(len(counts_bin[ifo])) for ifo in args.ifos}
cons_alphas_out = {ifo : np.zeros(len(alphas_bin[ifo])) for ifo in args.ifos}
cons_counts_out = {ifo : np.inf * np.ones(len(alphas_bin[ifo])) for ifo in args.ifos}
alphas_out = {ifo: np.zeros(len(alphas_bin[ifo])) for ifo in args.ifos}
counts_out = {ifo: np.inf * np.ones(len(counts_bin[ifo])) for ifo in args.ifos}
cons_alphas_out = {ifo: np.zeros(len(alphas_bin[ifo])) for ifo in args.ifos}
cons_counts_out = {ifo: np.inf * np.ones(len(alphas_bin[ifo])) for ifo in args.ifos}

logging.info("Writing results")
fout = h5py.File(args.output, 'w')
Expand All @@ -132,23 +133,14 @@ fout['bins_edges'] = list(bl) + [bu[-1]]
fout['fits_dates'] = ad + start_time_n

for ifo in args.ifos:
fout.create_group(ifo)
fout[ifo].attrs['live_time'] = sum(live_times[ifo])

save_allmeanalpha = {}
for ifo in args.ifos:
fout_ifo = fout[ifo]
logging.info(ifo)
fout_ifo = fout.create_group(ifo)
l_times = np.array(live_times[ifo])
count_all = np.sum(counts_bin[ifo], axis=0) / l_times
invalphan = np.array(counts_bin[ifo]) / np.array(alphas_bin[ifo])
invalphan_all = np.mean(invalphan, axis=0)
alpha_all = np.mean(counts_bin[ifo], axis=0) / invalphan_all
meant = l_times.mean()
fout_ifo.attrs['live_time'] = l_times.sum()

fout_ifo[f'separate_fits/live_times'] = l_times[ad_order]
fout_ifo[f'separate_fits/start_time'] = trigger_file_starts[ad_order]
fout_ifo[f'separate_fits/end_time'] = trigger_file_ends[ad_order]
fout_ifo['separate_fits/live_times'] = l_times[ad_order]
fout_ifo['separate_fits/start_time'] = trigger_file_starts[ad_order]
fout_ifo['separate_fits/end_time'] = trigger_file_ends[ad_order]

for counter, a_c_u_l in enumerate(zip(alphas_bin[ifo],
counts_bin[ifo], bu, bl)):
Expand Down Expand Up @@ -181,15 +173,15 @@ for ifo in args.ifos:
# Take some averages for plotting and summary values
overall_invalphan = counts_out[ifo] / alphas_out[ifo]
overall_meanalpha = counts_out[ifo].mean() / overall_invalphan.mean()
sum_counts_out = counts_out[ifo].sum() / sum(live_times[ifo])
save_allmeanalpha[ifo] = overall_meanalpha

# For the fixed version, we just set this to 1
fout_ifo['fixed/counts'] = [1 for c in counts_out[ifo]]
fout_ifo['fixed/fit_coeff'] = [0 for a in alphas_out[ifo]]
fout_ifo['fixed/counts'] = [1] * len(counts_out[ifo])
fout_ifo['fixed/fit_coeff'] = [0] * len(alphas_out[ifo])

# Add some useful info to the output file
fout_ifo.attrs['mean_alpha'] = save_allmeanalpha[ifo]
fout_ifo.attrs['mean_alpha'] = overall_meanalpha
fout_ifo.attrs['total_counts'] = counts_out[ifo].sum()

fout.close()

logging.info('Done')
Loading