From 083523820e13aeaa95f48f8a75fe6ae41711d700 Mon Sep 17 00:00:00 2001 From: Mathieu Doucet Date: Tue, 7 Jan 2025 16:53:57 -0500 Subject: [PATCH 1/5] user doc for workflow --- docs/user/workflow.md | 75 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 75 insertions(+) create mode 100644 docs/user/workflow.md diff --git a/docs/user/workflow.md b/docs/user/workflow.md new file mode 100644 index 0000000..2a51fac --- /dev/null +++ b/docs/user/workflow.md @@ -0,0 +1,75 @@ +# Specular reflectivity reduction workflow + +The specular reflectivity data reduction is build around the `event_reduction.EventReflectivity` class, which +performs the reduction. A number of useful modules are available to handle parts of the workflow around the actual reduction. + +## Data sets + +Specular reflectivity measurements at BL4B are done by combining several runs, taken at different +scattering angle and wavelength band. To allow for the automation of the reduction process, several +meta data entries are stored in the data files. To be able to know which data files belong together +in a single reflectivity measurement, two important log entries are used: + +- `sequence_id`: The sequence ID identifies a unique reflectivity curve. All data runs with a matching + sequence_id are put together to create a single reflectivity curve. +- `sequence_number`: The sequence number identifies the location of a given run in the list of runs that define a full sequence. All sequences start at 1. For instance, a sequence number of 3 means that +this run is the third of the complete set. This becomes important for storing reduction parameters. + +## Reduction parameters and templates + +The reduction parameters are managed using the `reduction_template_reader.ReductionParameters` class. This class allows users to define and store the parameters required for the reduction process. By using this class, you can easily save, load, and modify the parameters, ensuring consistency and reproducibility in your data reduction workflow. + +#### Compatibility with RefRed + +[Refred](https://github.com/neutrons/RefRed) is the user interface that helps users define reduction +parameters by selecting the data to process, peak and background regions, etc. A complete reflectivity +curve is generally comprised of multiple runs, and RefRed allows one to save a so-called template file +that contains all the information needs to reduce each run in the set. The reduction backend (this package) has utilities to read and write such templates, which are stored in XML format. A template +consist of an ordered list of `ReductionParameters` objects, which corresponding to a specific `sequence_number`. + +To read a templates and obtains a list of `ReductionParameters` objects: +```python +from lr_reduction import reduction_template_reader + +with open(template_file, "r") as fd: + xml_str = fd.read() + data_sets = reduction_template_reader.from_xml(xml_str) +``` + +From a list of `ReductionParameters` objects: +```python + xml_str = reduction_template_reader.to_xml(data_sets) + with open(os.path.join(output_dir, "template.xml"), "w") as fd: + fd.write(xml_str) +``` + +## Reduction workflow + +The main reduction workflow, which will extract specular reflectivity from a data file given a +reduction template, is found in the `workflow` module. This workflow will is the one performed +by the automated reduction system at BL4B: + +- It will extract the correct reduction parameters from the provided template +- Perform the reduction and compute the reflectivity curve for that data +- Combine the reflectivity curve segment with other runs belonging to the same set +- Write out the complete reflectivity curve in an output file +- Write out a copy of the template by replacing the run numbers in the template by those that were used + +Once you have a template, you can simply do: +```python +from lr_reduction import workflow +from mantid.simpleapi import LoadEventNexus + +# Load the data from disk +ws = LoadEventNexus(Filename='/SNS/REF_L/IPTS-XXXX/nexus/REFL_YYYY.h5') + +# The template file you want to use +template_file = '/SNS/REF_L/IPTS-XXXX/autoreduce/template.xml' + +# The folder where you want your output +output_dir = '/tmp' + +workflow.reduce(ws, template_file, output_dir) +``` + +Thie will produce output files in the speficied output directory. \ No newline at end of file From 30c48ab5a26d42ba5d7cb522f5909590f9de40ca Mon Sep 17 00:00:00 2001 From: Mathieu Doucet Date: Tue, 7 Jan 2025 17:06:55 -0500 Subject: [PATCH 2/5] Document template reader --- reduction/lr_reduction/event_reduction.py | 2 +- .../lr_reduction/reduction_template_reader.py | 196 ++++++++++-------- 2 files changed, 115 insertions(+), 83 deletions(-) diff --git a/reduction/lr_reduction/event_reduction.py b/reduction/lr_reduction/event_reduction.py index 8ade183..12c5660 100644 --- a/reduction/lr_reduction/event_reduction.py +++ b/reduction/lr_reduction/event_reduction.py @@ -254,7 +254,7 @@ def apply_dead_time_correction(ws, template_data): return ws -class EventReflectivity(object): +class EventReflectivity(): """ Data reduction for the Liquids Reflectometer. List of items to be taken care of outside this class: diff --git a/reduction/lr_reduction/reduction_template_reader.py b/reduction/lr_reduction/reduction_template_reader.py index d8a064c..abe2bbe 100644 --- a/reduction/lr_reduction/reduction_template_reader.py +++ b/reduction/lr_reduction/reduction_template_reader.py @@ -1,7 +1,8 @@ """ - RefRed template reader. - Adapted from Mantid code. +RefRed template reader. +Adapted from Mantid code. """ + import time import xml.dom.minidom @@ -10,12 +11,16 @@ # Get the mantid version being used, if available try: import mantid + MANTID_VERSION = mantid.__version__ except: MANTID_VERSION = "None" -class ReductionParameters(object): +class ReductionParameters(): + """ + Class that hold the parameters for the reduction of a single data set. + """ def __init__(self): # Signal selection @@ -23,11 +28,11 @@ def __init__(self): self.subtract_background = True self.two_backgrounds: bool = False self.background_roi = [137, 153, 0, 0] - self.tof_range = [9600., 21600.] + self.tof_range = [9600.0, 21600.0] self.select_tof_range = True self.data_x_range_flag = True - self.data_x_range = [115,210] + self.data_x_range = [115, 210] # Normalization self.apply_normalization = True @@ -35,7 +40,7 @@ def __init__(self): self.subtract_norm_background = True self.norm_background_roi = [137, 153] self.norm_x_range_flag = True - self.norm_x_range = [115,210] + self.norm_x_range = [115, 210] # Data files self.data_files = [0] @@ -57,30 +62,30 @@ def __init__(self): self.angle_offset_error = 0.0 # Scaling factor file - self.scaling_factor_file = '' + self.scaling_factor_file = "" self.scaling_factor_flag = True self.slits_width_flag = True # Incident medium list and selected value - self.incident_medium_list = ['air'] + self.incident_medium_list = ["air"] self.incident_medium_index_selected = 0 # Dead time correction - self.dead_time:bool = False - self.paralyzable:bool = True + self.dead_time: bool = False + self.paralyzable: bool = True self.dead_time_value = 4.2 self.dead_time_tof_step = 100 # Calculate emission time delay instead of using an effective distance for all wavelengths - self.use_emission_time:bool = True + self.use_emission_time: bool = True def from_dict(self, data_dict, permissible=True): - r""" + """ Update object's attributes with a dictionary with entries of the type attribute_name: attribute_value. Parameters ---------- - permissible: bool + permissible : bool allow keys in data_dict that are not attribute names of ReductionParameters instances. Reading from `data_dict` will result in this instance having new attributes not defined in `__init__()` @@ -100,9 +105,9 @@ def from_dict(self, data_dict, permissible=True): def to_xml(self): """ - Create XML from the current data. + Create XML from the current data. """ - _xml = "\n" + _xml = "\n" _xml += "narrow\n" _xml += "%s\n" % str(self.data_peak_range[0]) _xml += "%s\n" % str(self.data_peak_range[1]) @@ -116,7 +121,7 @@ def to_xml(self): _xml += "%s\n" % str(self.select_tof_range) _xml += "%s\n" % str(self.tof_range[0]) _xml += "%s\n" % str(self.tof_range[1]) - _xml += "%s\n" % ','.join([str(i) for i in self.data_files]) + _xml += "%s\n" % ",".join([str(i) for i in self.data_files]) _xml += "%s\n" % str(self.data_x_range[0]) _xml += "%s\n" % str(self.data_x_range[1]) _xml += "%s\n" % str(self.data_x_range_flag) @@ -170,73 +175,71 @@ def to_xml(self): def from_xml_element(self, instrument_dom): """ - Read in data from XML - @param xml_str: text to read the data from + Read in data from XML + + Parameters + ---------- + instrument_dom : xml.dom.Document """ - #Peak from/to pixels - self.data_peak_range = [getIntElement(instrument_dom, "from_peak_pixels"), - getIntElement(instrument_dom, "to_peak_pixels")] + # Peak from/to pixels + self.data_peak_range = [getIntElement(instrument_dom, "from_peak_pixels"), getIntElement(instrument_dom, "to_peak_pixels")] - #data metadata + # data metadata _tthd_value = getStringElement(instrument_dom, "tthd_value") - if _tthd_value == '': - _tthd_value = 'N/A' + if _tthd_value == "": + _tthd_value = "N/A" self.tthd_value = _tthd_value _ths_value = getStringElement(instrument_dom, "ths_value") - if _ths_value == '': - _ths_value = 'N/A' + if _ths_value == "": + _ths_value = "N/A" self.ths_value = _ths_value - #low resolution range - self.data_x_range_flag = getBoolElement(instrument_dom, "x_range_flag", - default=self.data_x_range_flag) + # low resolution range + self.data_x_range_flag = getBoolElement(instrument_dom, "x_range_flag", default=self.data_x_range_flag) - self.data_x_range = [getIntElement(instrument_dom, "x_min_pixel"), - getIntElement(instrument_dom, "x_max_pixel")] + self.data_x_range = [getIntElement(instrument_dom, "x_min_pixel"), getIntElement(instrument_dom, "x_max_pixel")] - self.norm_x_range_flag = getBoolElement(instrument_dom, "norm_x_range_flag", - default=self.norm_x_range_flag) + self.norm_x_range_flag = getBoolElement(instrument_dom, "norm_x_range_flag", default=self.norm_x_range_flag) - self.norm_x_range = [getIntElement(instrument_dom, "norm_x_min"), - getIntElement(instrument_dom, "norm_x_max")] + self.norm_x_range = [getIntElement(instrument_dom, "norm_x_min"), getIntElement(instrument_dom, "norm_x_max")] # background flag - self.subtract_background = getBoolElement(instrument_dom, "background_flag", - default=self.subtract_background) + self.subtract_background = getBoolElement(instrument_dom, "background_flag", default=self.subtract_background) # use two backgrounds flag - self.two_backgrounds = getBoolElement(instrument_dom, "two_backgrounds", - default=self.two_backgrounds) + self.two_backgrounds = getBoolElement(instrument_dom, "two_backgrounds", default=self.two_backgrounds) # background from/to pixels - self.background_roi = [getIntElement(instrument_dom, "back_roi1_from"), - getIntElement(instrument_dom, "back_roi1_to"), - getIntElement(instrument_dom, "back_roi2_from"), - getIntElement(instrument_dom, "back_roi2_to")] + self.background_roi = [ + getIntElement(instrument_dom, "back_roi1_from"), + getIntElement(instrument_dom, "back_roi1_to"), + getIntElement(instrument_dom, "back_roi2_from"), + getIntElement(instrument_dom, "back_roi2_to"), + ] # TOF range - self.select_tof_range = getBoolElement(instrument_dom, "tof_range_flag", - default=self.select_tof_range) - self.tof_range = [getFloatElement(instrument_dom, "from_tof_range"), - getFloatElement(instrument_dom, "to_tof_range")] + self.select_tof_range = getBoolElement(instrument_dom, "tof_range_flag", default=self.select_tof_range) + self.tof_range = [getFloatElement(instrument_dom, "from_tof_range"), getFloatElement(instrument_dom, "to_tof_range")] self.data_files = getIntList(instrument_dom, "data_sets") - #with or without norm - self.apply_normalization = getBoolElement(instrument_dom, "norm_flag", - default=self.apply_normalization) + # with or without norm + self.apply_normalization = getBoolElement(instrument_dom, "norm_flag", default=self.apply_normalization) - #Peak from/to pixels - self.norm_peak_range = [getIntElement(instrument_dom, "norm_from_peak_pixels"), - getIntElement(instrument_dom, "norm_to_peak_pixels")] + # Peak from/to pixels + self.norm_peak_range = [ + getIntElement(instrument_dom, "norm_from_peak_pixels"), + getIntElement(instrument_dom, "norm_to_peak_pixels"), + ] # Background subtraction option - self.subtract_norm_background = getBoolElement(instrument_dom, "norm_background_flag", - default=self.subtract_norm_background) + self.subtract_norm_background = getBoolElement(instrument_dom, "norm_background_flag", default=self.subtract_norm_background) - self.norm_background_roi = [getIntElement(instrument_dom, "norm_from_back_pixels"), - getIntElement(instrument_dom, "norm_to_back_pixels")] + self.norm_background_roi = [ + getIntElement(instrument_dom, "norm_from_back_pixels"), + getIntElement(instrument_dom, "norm_to_back_pixels"), + ] self.norm_file = getIntElement(instrument_dom, "norm_dataset") @@ -249,8 +252,7 @@ def from_xml_element(self, instrument_dom): # Angle offset self.angle_offset = getFloatElement(instrument_dom, "angle_offset", default=self.angle_offset) - self.angle_offset_error = getFloatElement(instrument_dom, "angle_offset_error", - default=self.angle_offset_error) + self.angle_offset_error = getFloatElement(instrument_dom, "angle_offset_error", default=self.angle_offset_error) # Scaling factor file and options self.scaling_factor_file = getStringElement(instrument_dom, "scaling_factor_file") @@ -262,67 +264,74 @@ def from_xml_element(self, instrument_dom): self.incident_medium_list = getStringList(instrument_dom, "incident_medium_list") self.incident_medium_index_selected = getIntElement(instrument_dom, "incident_medium_index_selected") else: - self.incident_medium_list = ['H2O'] + self.incident_medium_list = ["H2O"] self.incident_medium_index_selected = 0 # Dead time correction - self.dead_time = getBoolElement(instrument_dom, "dead_time_correction", - default=self.dead_time) - self.paralyzable = getBoolElement(instrument_dom, "dead_time_paralyzable", - default=self.paralyzable) - self.dead_time_value = getFloatElement(instrument_dom, "dead_time_value", - default=self.dead_time_value) - self.dead_time_tof_step = getFloatElement(instrument_dom, "dead_time_tof_step", - default=self.dead_time_tof_step) + self.dead_time = getBoolElement(instrument_dom, "dead_time_correction", default=self.dead_time) + self.paralyzable = getBoolElement(instrument_dom, "dead_time_paralyzable", default=self.paralyzable) + self.dead_time_value = getFloatElement(instrument_dom, "dead_time_value", default=self.dead_time_value) + self.dead_time_tof_step = getFloatElement(instrument_dom, "dead_time_tof_step", default=self.dead_time_tof_step) # Emission time # Defaults to True, but will be skipped if the necessary meta data is not found - self.use_emission_time = getBoolElement(instrument_dom, "use_emission_time", - default=True) + self.use_emission_time = getBoolElement(instrument_dom, "use_emission_time", default=True) ###### Utility functions to read XML content ######################## def getText(nodelist): - """ - Utility method to extract text out of an XML node - """ + """Utility method to extract text out of an XML node""" rc = "" for node in nodelist: if node.nodeType == node.TEXT_NODE: rc = rc + node.data return rc + def getContent(dom, tag): + """Returns the content of a tag within a dom object""" element_list = dom.getElementsByTagName(tag) return getText(element_list[0].childNodes) if len(element_list) > 0 else None + def getIntElement(dom, tag, default=None): + """Parse an integer element from the dom object""" value = getContent(dom, tag) return int(value) if value is not None else default + def getIntList(dom, tag, default=[]): + """Parse a list of integers from the dom object""" value = getContent(dom, tag) if value is not None and len(value.strip()) > 0: - return list(map(int, value.split(','))) + return list(map(int, value.split(","))) else: return default + def getFloatElement(dom, tag, default=None): + """Parse a float element from the dom object""" value = getContent(dom, tag) return float(value) if value is not None else default + def getFloatList(dom, tag, default=[]): + """Parse a list of floats from the dom object""" value = getContent(dom, tag) if value is not None and len(value.strip()) > 0: - return list(map(float, value.split(','))) + return list(map(float, value.split(","))) else: return default -def getStringElement(dom, tag, default=''): + +def getStringElement(dom, tag, default=""): + """Parse a string element from the dom object""" value = getContent(dom, tag) return value if value is not None else default + def getStringList(dom, tag, _default=[]): + """Parse a list of strings from the dom object""" elem_list = [] element_list = dom.getElementsByTagName(tag) if len(element_list) > 0: @@ -330,7 +339,9 @@ def getStringList(dom, tag, _default=[]): elem_list.append(getText(l.childNodes).strip()) return elem_list -def getBoolElement(dom, tag, true_tag='true', default=False): + +def getBoolElement(dom, tag, true_tag="true", default=False): + """Parse a boolean element from the dom object""" value = getContent(dom, tag) return value.lower() == true_tag.lower() if value is not None else default @@ -338,7 +349,17 @@ def getBoolElement(dom, tag, true_tag='true', default=False): ###### Functions to read/write a template file ###################### def to_xml(data_sets): """ - Create XML from the current data. + Create XML from the current data. + + Parameters + ---------- + data_sets : list + List of ReductionParameters instances + + Returns + ------- + str + XML string """ _xml = "\n" _xml += " REFL\n" @@ -346,25 +367,36 @@ def to_xml(data_sets): _xml += " %s\n" % VERSION _xml += " %s\n" % MANTID_VERSION _xml += " lr_reduction-%s\n" % VERSION - _xml += "\n" + _xml += "\n" for item in data_sets: _xml += item.to_xml() _xml += "\n" _xml += "\n" return _xml + def from_xml(xml_str): """ - Read in data from XML string + Read in data from XML string + + Parameters + ---------- + xml_str : str + String representation of a list of ReductionParameters instances + + Returns + ------- + list + List of ReductionParameters instances """ data_sets = [] dom = xml.dom.minidom.parseString(xml_str) element_list = dom.getElementsByTagName("Data") - if len(element_list)==0: + if len(element_list) == 0: element_list = dom.getElementsByTagName("RefLData") - if len(element_list)>0: + if len(element_list) > 0: for item in element_list: if item is not None: data_set = ReductionParameters() From 8c423197c1c20b87e98481d566ba155a67e9d783 Mon Sep 17 00:00:00 2001 From: Mathieu Doucet Date: Tue, 7 Jan 2025 19:25:38 -0500 Subject: [PATCH 3/5] Add docs to output and peak finding --- reduction/lr_reduction/output.py | 143 ++++++++++++++++--------- reduction/lr_reduction/peak_finding.py | 62 ++++++++--- 2 files changed, 143 insertions(+), 62 deletions(-) diff --git a/reduction/lr_reduction/output.py b/reduction/lr_reduction/output.py index a027475..8714b3f 100644 --- a/reduction/lr_reduction/output.py +++ b/reduction/lr_reduction/output.py @@ -1,6 +1,7 @@ """ - Write R(q) output +Write R(q) output """ + import json import numpy as np @@ -8,10 +9,11 @@ from . import __version__ as VERSION -class RunCollection(): +class RunCollection: """ - A collection of runs to assemble into a single R(Q) + A collection of runs to assemble into a single R(Q) """ + def __init__(self, average_overlap=False): self.collection = [] self.average_overlap = average_overlap @@ -22,16 +24,29 @@ def __init__(self, average_overlap=False): def add(self, q, r, dr, meta_data, dq=None): """ - Add a partial R(q) to the collection + Add a partial R(q) to the collection + + Parameters + ---------- + q : array + Q values + r : array + R values + dr : array + Error in R values + meta_data : dict + Meta data for the run + dq : array, optional + Q resolution """ if dq is None: - resolution = meta_data['dq_over_q'] + resolution = meta_data["dq_over_q"] dq = resolution * q self.collection.append(dict(q=q, r=r, dr=dr, dq=dq, info=meta_data)) def merge(self): """ - Merge the collection of runs + Merge the collection of runs """ qz_all = [] refl_all = [] @@ -39,11 +54,11 @@ def merge(self): d_qz_all = [] for item in self.collection: - for i in range(len(item['q'])): - qz_all.append(item['q'][i]) - refl_all.append(item['r'][i]) - d_refl_all.append(item['dr'][i]) - d_qz_all.append(item['dq'][i]) + for i in range(len(item["q"])): + qz_all.append(item["q"][i]) + refl_all.append(item["r"][i]) + d_refl_all.append(item["dr"][i]) + d_qz_all.append(item["dq"][i]) qz_all = np.asarray(qz_all) refl_all = np.asarray(refl_all) @@ -66,84 +81,109 @@ def merge(self): # Average information for groups of points qz = self.qz_all[0] total = self.refl_all[0] - err2 = self.d_refl_all[0]**2 + err2 = self.d_refl_all[0] ** 2 dq = self.d_qz_all[0] - npts = 1. + npts = 1.0 for i in range(1, len(self.qz_all)): - if (self.qz_all[i] - qz)/qz > 0.000001: + if (self.qz_all[i] - qz) / qz > 0.000001: # Store the previous point qz_all.append(qz) - refl_all.append(total/npts) - d_refl_all.append(np.sqrt(err2)/npts) - d_qz_all .append(dq) + refl_all.append(total / npts) + d_refl_all.append(np.sqrt(err2) / npts) + d_qz_all.append(dq) # Start a new point qz = self.qz_all[i] total = self.refl_all[i] - err2 = self.d_refl_all[i]**2 + err2 = self.d_refl_all[i] ** 2 dq = self.d_qz_all[i] - npts = 1. + npts = 1.0 else: total += self.refl_all[i] - err2 += self.d_refl_all[i]**2 - npts += 1. + err2 += self.d_refl_all[i] ** 2 + npts += 1.0 self.qz_all = np.asarray(qz_all) self.refl_all = np.asarray(refl_all) self.d_refl_all = np.asarray(d_refl_all) self.d_qz_all = np.asarray(d_qz_all) def save_ascii(self, file_path, meta_as_json=False): + """ - Save R(Q) in ascii format + Save R(Q) in ASCII format. + This function merges the data before saving. It writes metadata and R(Q) data + to the specified file in ASCII format. The metadata includes experiment details, + reduction version, run title, start time, reduction time, and other optional + parameters. The R(Q) data includes Q, R, dR, and dQ values. + + Parameters + ---------- + file_path : str + The path to the file where the ASCII data will be saved. + meta_as_json : bool, optional + If True, metadata will be written in JSON format. Default is False. """ self.merge() - with open(file_path, 'w') as fd: + with open(file_path, "w") as fd: # Write meta data initial_entry_written = False for item in self.collection: - _meta = item['info'] + _meta = item["info"] if not initial_entry_written: - fd.write("# Experiment %s Run %s\n" % (_meta['experiment'], _meta['run_number'])) + fd.write("# Experiment %s Run %s\n" % (_meta["experiment"], _meta["run_number"])) fd.write("# Reduction %s\n" % VERSION) - fd.write("# Run title: %s\n" % _meta['run_title']) - fd.write("# Run start time: %s\n" % _meta['start_time']) - fd.write("# Reduction time: %s\n" % _meta['time']) - if 'q_summing' in _meta: - fd.write("# Q summing: %s\n" % _meta['q_summing']) - if 'tof_weighted' in _meta: - fd.write("# TOF weighted: %s\n" % _meta['tof_weighted']) - if 'bck_in_q' in _meta: - fd.write("# Bck in Q: %s\n" % _meta['bck_in_q']) - if 'theta_offset' in _meta: - fd.write("# Theta offset: %s\n" % _meta['theta_offset']) + fd.write("# Run title: %s\n" % _meta["run_title"]) + fd.write("# Run start time: %s\n" % _meta["start_time"]) + fd.write("# Reduction time: %s\n" % _meta["time"]) + if "q_summing" in _meta: + fd.write("# Q summing: %s\n" % _meta["q_summing"]) + if "tof_weighted" in _meta: + fd.write("# TOF weighted: %s\n" % _meta["tof_weighted"]) + if "bck_in_q" in _meta: + fd.write("# Bck in Q: %s\n" % _meta["bck_in_q"]) + if "theta_offset" in _meta: + fd.write("# Theta offset: %s\n" % _meta["theta_offset"]) if meta_as_json: fd.write("# Meta:%s\n" % json.dumps(_meta)) fd.write("# DataRun NormRun TwoTheta(deg) LambdaMin(A) ") fd.write("LambdaMax(A) Qmin(1/A) Qmax(1/A) SF_A SF_B\n") fd.write("") - if 'scaling_factors' in _meta: - a = _meta['scaling_factors']['a'] - b = _meta['scaling_factors']['b'] + if "scaling_factors" in _meta: + a = _meta["scaling_factors"]["a"] + b = _meta["scaling_factors"]["b"] else: a = 1 b = 0 - two_theta = _meta['theta'] * 360 / np.pi - value_list = (_meta['run_number'], _meta['norm_run'], two_theta, - _meta['wl_min'], _meta['wl_max'], _meta['q_min'], _meta['q_max'], - a, b) + two_theta = _meta["theta"] * 360 / np.pi + value_list = ( + _meta["run_number"], + _meta["norm_run"], + two_theta, + _meta["wl_min"], + _meta["wl_max"], + _meta["q_min"], + _meta["q_max"], + a, + b, + ) fd.write("# %-9s %-9s %-14.6g %-14.6g %-12.6g %-12.6s %-12.6s %-12.6s %-12.6s\n" % value_list) initial_entry_written = True # Write R(q) - fd.write('# %-21s %-21s %-21s %-21s\n' % ('Q [1/Angstrom]', 'R', 'dR', 'dQ [FWHM]')) + fd.write("# %-21s %-21s %-21s %-21s\n" % ("Q [1/Angstrom]", "R", "dR", "dQ [FWHM]")) for i in range(len(self.qz_all)): - fd.write('%20.16f %20.16f %20.16f %20.16f\n' % (self.qz_all[i], self.refl_all[i], self.d_refl_all[i], self.d_qz_all[i])) + fd.write("%20.16f %20.16f %20.16f %20.16f\n" % (self.qz_all[i], self.refl_all[i], self.d_refl_all[i], self.d_qz_all[i])) def add_from_file(self, file_path): """ - Read a partial result file and add it to the collection + Read a partial result file and add it to the collection + + Parameters + ---------- + file_path : str + The path to the file to be read """ _q, _r, _dr, _dq, _meta = read_file(file_path) self.add(_q, _r, _dr, _meta, dq=_dq) @@ -151,13 +191,18 @@ def add_from_file(self, file_path): def read_file(file_path): """ - Read a data file and extract meta data + Read a data file and extract meta data + + Parameters + ---------- + file_path : str + The path to the file to be read """ _meta = dict() - with open(file_path, 'r') as fd: + with open(file_path, "r") as fd: for l in fd.readlines(): if l.startswith("# Meta:"): - _meta = json.loads(l[len("# Meta:"):-1]) + _meta = json.loads(l[len("# Meta:") : -1]) try: _q, _r, _dr, _dq = np.loadtxt(file_path).T except: diff --git a/reduction/lr_reduction/peak_finding.py b/reduction/lr_reduction/peak_finding.py index 2f776b5..2ff39a5 100644 --- a/reduction/lr_reduction/peak_finding.py +++ b/reduction/lr_reduction/peak_finding.py @@ -10,11 +10,28 @@ def process_data(workspace, summed=True, tof_step=200): - r""" + """ Process a Mantid workspace to extract counts vs pixel. - :param workspace: Mantid workspace - :param summed: if True, the x pixels with be summed - :param tof_step: TOF bin size + + Parameters + ---------- + workspace : Mantid workspace + The Mantid workspace to process. + summed : bool, optional + If True, the x pixels will be summed (default is True). + tof_step : int, optional + The TOF bin size (default is 200). + + Returns + ------- + tuple + A tuple containing: + - tof : numpy.ndarray + The time-of-flight values. + - _x : numpy.ndarray + The pixel indices. + - _y : numpy.ndarray + The summed counts for each pixel. """ tof_min = workspace.getTofMin() tof_max = workspace.getTofMax() @@ -35,15 +52,34 @@ def process_data(workspace, summed=True, tof_step=200): def fit_signal_flat_bck(x, y, x_min=110, x_max=170, center=None, sigma=None, background=None): - r""" - Fit a Gaussian peak. - :param x: list of x values - :param y: list of y values - :param x_min: start index of the list of points - :param x_max: end index of the list of points - :param center: estimated center position - :param sigma: if provided, the sigma will be fixed to the given value - :param background: if provided, the value will be subtracted from y + """ + Fit a Gaussian peak. + + Parameters + ---------- + x : list + List of x values. + y : list + List of y values. + x_min : int, optional + Start index of the list of points, by default 110. + x_max : int, optional + End index of the list of points, by default 170. + center : float, optional + Estimated center position, by default None. + sigma : float, optional + If provided, the sigma will be fixed to the given value, by default None. + background : float, optional + If provided, the value will be subtracted from y, by default None. + + Returns + ------- + c : float + Fitted center position of the Gaussian peak. + width : float + Fitted width (sigma) of the Gaussian peak. + fit : lmfit.model.ModelResult + The result of the fit. """ gauss = GaussianModel(prefix='g_') From c06a1a904ecb515f6482b2478ae3b36d89ce27ad Mon Sep 17 00:00:00 2001 From: Mathieu Doucet Date: Wed, 8 Jan 2025 12:42:01 -0500 Subject: [PATCH 4/5] Add user docs --- docs/index.md | 1 + docs/user/event_processing.md | 70 ++++++ reduction/lr_reduction/template.py | 232 +++++++++--------- reduction/lr_reduction/time_resolved.py | 301 ++++++++++++++---------- 4 files changed, 372 insertions(+), 232 deletions(-) create mode 100644 docs/user/event_processing.md diff --git a/docs/index.md b/docs/index.md index 96d78d0..0b355fd 100644 --- a/docs/index.md +++ b/docs/index.md @@ -2,6 +2,7 @@ ## User Guide +- [Workflow overview](./user/workflow.md) - [Conda Environments](./user/conda_environments.md) - [Releases](./releases.md) diff --git a/docs/user/event_processing.md b/docs/user/event_processing.md new file mode 100644 index 0000000..eb625d7 --- /dev/null +++ b/docs/user/event_processing.md @@ -0,0 +1,70 @@ +# Event processing +The BL4B instrument leverages the concept of weighted events for several aspects of +the reduction process. Following this approach, each event is treated separately and is +assigned a weigth $w$ to accound for various corrections. Summing events then becomes the +sum of the weights for all events. + +## Loading events and dead time correction +A dead time correction is available for rates above around 2000 counts/sec. Both +paralyzing and non-paralyzing implementation are available. Paralyzing refers to a detector +that extends its dead time period when events occur while the detector is already unavailable +to process events, while non-paralyzing refers to a detector that always becomes available +after the dead time period [1]. + +The dead time correction to be multiplied by the measured detector counts is given by +the following for the paralyzing case: + +$$ +C_{par} = -{\cal Re}W_0(-R\tau/\Delta_{TOF}) \Delta_{TOF}/R +$$ +where $R$ is the number of triggers per accelerator pulse within a time-of-flight bin $\Delta_{TOF}$. +The dead time for the current BL4B detector is $\tau=4.2$ $\mu s$. In the equation avove, ${\cal Re}W_0$ referes to the principal branch of the Lambert W function. + +The following is used for the non-paralyzing case: +$$ +C_{non-par} = 1/(1-R\tau/\Delta_{TOF}) +$$ + +By default, we use a paralyzing dead time correction with $\Delta_{TOF}=100$ $\mu s$. These parameters can be changed. + +The BL4B detector is a wire chamber with a detector readout that includes digitization of the +position of each event. For a number of reasons, like event pileup, it is possible for the +electronics to be unable to assign a coordinate to a particular trigger event. These events are +labelled as error events and stored along with the good events. While only good events are used +to compute reflectivity, error events are included in the $R$ value defined above. For clarity, we chose to define $R$ in terms of number of triggers as opposed to events. + +Once the dead time correction as a function for time-of-flight is computed, each event +in the run being processed is assigned a weight according to the correction. + +$w_i = C(t_i)$ + +where $t_i$ is the time-of-flight of event $i$. The value of $C$ is interpolated from the +computed dead time correction distribution. + +[1] V. Bécares, J. Blázquez, Detector Dead Time Determination and OptimalCounting Rate for a Detector Near a Spallation Source ora Subcritical Multiplying System, Science and Technology of Nuclear Installations, 2012, 240693, https://doi.org/10.1155/2012/240693 + + +### Correct for emission time +Since neutrons of different wavelength will spend different amount of time on average +within the moderator, a linear approximation is used by the data acquisition system to +account for emission time when phasing choppers. + +The time of flight for each event $i$ is corrected by an small value given by + +$\Delta t_i = -t_{off} + t_{mult} * t_i * h L / m_n$ + +where $h$ is Planck's constant, $m_n$ is the mass of the neutron, and $L$ is the distance +between the moderator and the detector. + +The $t_{off}$, $t_{mult}$, and $L$ parameters are process variables that are stored in the +data file and can be changed in the data acquisition system. + + +### Gravity correction + + +### Q calculation + +### Constant-Q binning + +## Normalization options \ No newline at end of file diff --git a/reduction/lr_reduction/template.py b/reduction/lr_reduction/template.py index 71adb77..b448148 100644 --- a/reduction/lr_reduction/template.py +++ b/reduction/lr_reduction/template.py @@ -1,6 +1,7 @@ """ - Reduce a data run using a template generated by RefRed +Reduce a data run using a template generated by RefRed """ + import os from functools import reduce @@ -14,10 +15,11 @@ TOLERANCE = 0.07 OUTPUT_NORM_DATA = False + def read_template(template_file, sequence_number): """ - Read template from file. - @param sequence_number: the ID of the data set within the sequence of runs + Read template from file. + @param sequence_number: the ID of the data set within the sequence of runs """ fd = open(template_file, "r") xml_str = fd.read() @@ -33,15 +35,15 @@ def read_template(template_file, sequence_number): def scaling_factor(scaling_factor_file, workspace, match_slit_width=True): """ - Apply scaling factor from reference scaling data - @param workspace: Mantid workspace + Apply scaling factor from reference scaling data + @param workspace: Mantid workspace """ if not os.path.isfile(scaling_factor_file): print("Could not find scaling factor file: %s" % scaling_factor_file) return workspace # Get the wavelength - lr = workspace.getRun().getProperty('LambdaRequest').value[0] + lr = workspace.getRun().getProperty("LambdaRequest").value[0] lr_value = float("{0:.2f}".format(lr)) s1h = abs(workspace.getRun().getProperty("S1VHeight").value[0]) @@ -51,33 +53,32 @@ def scaling_factor(scaling_factor_file, workspace, match_slit_width=True): def _reduce(accumulation, item): """ - Reduce function that accumulates values in a dictionary + Reduce function that accumulates values in a dictionary """ - toks_item = item.split('=') - if len(toks_item)!=2: + toks_item = item.split("=") + if len(toks_item) != 2: return accumulation if isinstance(accumulation, dict): accumulation[toks_item[0].strip()] = toks_item[1].strip() else: - toks_accum = accumulation.split('=') - accumulation = {toks_item[0].strip(): toks_item[1].strip(), - toks_accum[0].strip(): toks_accum[1].strip()} + toks_accum = accumulation.split("=") + accumulation = {toks_item[0].strip(): toks_item[1].strip(), toks_accum[0].strip(): toks_accum[1].strip()} return accumulation def _value_check(key, data, reference): """ - Check an entry against a reference value + Check an entry against a reference value """ if key in data: return abs(abs(float(data[key])) - abs(float(reference))) <= TOLERANCE return False - with open(scaling_factor_file, 'r') as fd: + with open(scaling_factor_file, "r") as fd: file_content = fd.read() data_found = None - for line in file_content.split('\n'): - if line.startswith('#'): + for line in file_content.split("\n"): + if line.startswith("#"): continue # Parse the line of data and produce a dict @@ -87,65 +88,74 @@ def _value_check(key, data, reference): # Get ordered list of keys keys = [] for token in toks: - key_value = token.split('=') - if len(key_value)==2: + key_value = token.split("=") + if len(key_value) == 2: keys.append(key_value[0].strip()) # Skip empty lines - if len(keys)==0: + if len(keys) == 0: continue # Complain if the format is non-standard - elif len(keys)<10: + elif len(keys) < 10: print("Bad scaling factor entry\n %s" % line) continue # Sanity check - if keys[0] != 'IncidentMedium' and keys[1] != 'LambdaRequested' \ - and keys[2] != 'S1H': + if keys[0] != "IncidentMedium" and keys[1] != "LambdaRequested" and keys[2] != "S1H": print("The scaling factor file isn't standard: bad keywords") # The S2H key has been changing in the earlier version of REFL reduction. # Get the key from the data to make sure we are backward compatible. s2h_key = keys[3] s2w_key = keys[5] - if 'IncidentMedium' in data_dict \ - and _value_check('LambdaRequested', data_dict, lr_value) \ - and _value_check('S1H', data_dict, s1h) \ - and _value_check(s2h_key, data_dict, s2h): - - if not match_slit_width or (_value_check('S1W', data_dict, s1w) - and _value_check(s2w_key, data_dict, s2w)): + if ( + "IncidentMedium" in data_dict + and _value_check("LambdaRequested", data_dict, lr_value) + and _value_check("S1H", data_dict, s1h) + and _value_check(s2h_key, data_dict, s2h) + ): + if not match_slit_width or (_value_check("S1W", data_dict, s1w) and _value_check(s2w_key, data_dict, s2w)): data_found = data_dict break if data_found is not None: - a = float(data_found['a']) - b = float(data_found['b']) - a_error = float(data_found['error_a']) - b_error = float(data_found['error_b']) + a = float(data_found["a"]) + b = float(data_found["b"]) + a_error = float(data_found["error_a"]) + b_error = float(data_found["error_b"]) else: return 1, 0, 0, 0 return a, b, a_error, b_error -def process_from_template(run_number, template_path, q_summing=False, normalize=True, - tof_weighted=False, bck_in_q=False, clean=False, info=False): +def process_from_template( + run_number, template_path, q_summing=False, normalize=True, tof_weighted=False, bck_in_q=False, clean=False, info=False +): """ - The clean option removes leading zeros and the drop when doing q-summing + The clean option removes leading zeros and the drop when doing q-summing """ # For backward compatibility, consider the case of a list of run numbers to be added - if ',' in str(run_number): - list_of_runs = str(run_number).split(',') - run_number = '+'.join(list_of_runs) + if "," in str(run_number): + list_of_runs = str(run_number).split(",") + run_number = "+".join(list_of_runs) # Load data ws_sc = api.Load("REF_L_%s" % run_number, OutputWorkspace="REF_L_%s" % run_number) - return process_from_template_ws(ws_sc, template_path, q_summing=q_summing, - tof_weighted=tof_weighted, bck_in_q=bck_in_q, - clean=clean, info=info, normalize=normalize) - - -def process_from_template_ws(ws_sc, template_data, q_summing=False, - tof_weighted=False, bck_in_q=False, clean=False, - info=False, normalize=True, theta_value=None, ws_db=None): + return process_from_template_ws( + ws_sc, template_path, q_summing=q_summing, tof_weighted=tof_weighted, bck_in_q=bck_in_q, clean=clean, info=info, normalize=normalize + ) + + +def process_from_template_ws( + ws_sc, + template_data, + q_summing=False, + tof_weighted=False, + bck_in_q=False, + clean=False, + info=False, + normalize=True, + theta_value=None, + ws_db=None, +): # Get the sequence number sequence_number = 1 if ws_sc.getRun().hasProperty("sequence_number"): @@ -171,52 +181,51 @@ def process_from_template_ws(ws_sc, template_data, q_summing=False, ws_db = event_reduction.apply_dead_time_correction(ws_db, template_data) # If we run in theta-theta geometry, we'll need thi - thi_value = ws_sc.getRun()['thi'].value[0] - ths_value = ws_sc.getRun()['ths'].value[0] + thi_value = ws_sc.getRun()["thi"].value[0] + ths_value = ws_sc.getRun()["ths"].value[0] # NOTE: An offset is no longer used be default. To use itm we can use # the EventReflectivity directly. - _wl = ws_sc.getRun()['LambdaRequest'].value[0] + _wl = ws_sc.getRun()["LambdaRequest"].value[0] # Determine scattering angle. If a value was given, don't add the offset if theta_value is not None: - theta = theta_value * np.pi / 180. + theta = theta_value * np.pi / 180.0 else: - if 'BL4B:CS:ExpPl:OperatingMode' in ws_sc.getRun() \ - and ws_sc.getRun().getProperty('BL4B:CS:ExpPl:OperatingMode').value[0] == 'Free Liquid': - theta = thi_value * np.pi / 180. + if ( + "BL4B:CS:ExpPl:OperatingMode" in ws_sc.getRun() + and ws_sc.getRun().getProperty("BL4B:CS:ExpPl:OperatingMode").value[0] == "Free Liquid" + ): + theta = thi_value * np.pi / 180.0 else: - theta = ths_value * np.pi / 180. + theta = ths_value * np.pi / 180.0 # Add offset - theta += template_data.angle_offset * np.pi / 180. + theta += template_data.angle_offset * np.pi / 180.0 theta_degrees = theta * 180 / np.pi - print('wl=%g; ths=%g; thi=%g; offset=%g; theta used=%g' % (_wl, ths_value, - thi_value, - template_data.angle_offset, - theta_degrees)) + print("wl=%g; ths=%g; thi=%g; offset=%g; theta used=%g" % (_wl, ths_value, thi_value, template_data.angle_offset, theta_degrees)) # Get the reduction parameters from the template peak = template_data.data_peak_range if template_data.subtract_background: peak_bck = template_data.background_roi if template_data.two_backgrounds is False: - peak_bck = peak_bck[0: 2] # retain only the first background + peak_bck = peak_bck[0:2] # retain only the first background else: peak_bck = None - peak_center = (peak[0]+peak[1])/2.0 + peak_center = (peak[0] + peak[1]) / 2.0 # Fit the reflected beam position, which may not be in the middle and is # used in the q-summing calculation if q_summing: - x_min=template_data.data_peak_range[0] - x_max=template_data.data_peak_range[1] + x_min = template_data.data_peak_range[0] + x_max = template_data.data_peak_range[1] _, _x, _y = peak_finding.process_data(ws_sc, summed=True, tof_step=200) peak_center = np.argmax(_y[x_min:x_max]) + x_min - peak_center, sc_width, _ = peak_finding.fit_signal_flat_bck(_x, _y, x_min=x_min, x_max=x_max, center=peak_center, sigma=3.) + peak_center, sc_width, _ = peak_finding.fit_signal_flat_bck(_x, _y, x_min=x_min, x_max=x_max, center=peak_center, sigma=3.0) print("Peak center: %g" % peak_center) if template_data.data_x_range_flag: @@ -241,35 +250,44 @@ def process_from_template_ws(ws_sc, template_data, q_summing=False, q_step = -template_data.q_step # Perform the reduction - event_refl = event_reduction.EventReflectivity(ws_sc, ws_db, - signal_peak=peak, signal_bck=peak_bck, - norm_peak=norm_peak, norm_bck=norm_bck, - specular_pixel=peak_center, - signal_low_res=low_res, norm_low_res=norm_low_res, - q_min=q_min, q_step=q_step, q_max=None, - tof_range=[tof_min, tof_max], - theta=np.abs(theta), - dead_time=template_data.dead_time, - paralyzable=template_data.paralyzable, - dead_time_value=template_data.dead_time_value, - dead_time_tof_step=template_data.dead_time_tof_step, - use_emission_time=template_data.use_emission_time, - functional_background=template_data.two_backgrounds, - instrument=event_reduction.EventReflectivity.INSTRUMENT_4B) + event_refl = event_reduction.EventReflectivity( + ws_sc, + ws_db, + signal_peak=peak, + signal_bck=peak_bck, + norm_peak=norm_peak, + norm_bck=norm_bck, + specular_pixel=peak_center, + signal_low_res=low_res, + norm_low_res=norm_low_res, + q_min=q_min, + q_step=q_step, + q_max=None, + tof_range=[tof_min, tof_max], + theta=np.abs(theta), + dead_time=template_data.dead_time, + paralyzable=template_data.paralyzable, + dead_time_value=template_data.dead_time_value, + dead_time_tof_step=template_data.dead_time_tof_step, + use_emission_time=template_data.use_emission_time, + functional_background=template_data.two_backgrounds, + instrument=event_reduction.EventReflectivity.INSTRUMENT_4B, + ) print(event_refl) # R(Q) - qz, refl, d_refl = event_refl.specular(q_summing=q_summing, tof_weighted=tof_weighted, - bck_in_q=bck_in_q, clean=clean, normalize=normalize) - qz_mid = (qz[:-1] + qz[1:])/2.0 + qz, refl, d_refl = event_refl.specular( + q_summing=q_summing, tof_weighted=tof_weighted, bck_in_q=bck_in_q, clean=clean, normalize=normalize + ) + qz_mid = (qz[:-1] + qz[1:]) / 2.0 # When using composite direct beam, we don't need a scaling # factor file if the multiplier is in the logs - scaling_factor_PV = 'BL4B:CS:Autoreduce:ScaleMultiplier' + scaling_factor_PV = "BL4B:CS:Autoreduce:ScaleMultiplier" if normalize and scaling_factor_PV in ws_db.getRun(): # The standard changed during early implementation... if int(template_data.norm_file) > 212005: - a = ws_db.getRun()[scaling_factor_PV].value[0]**2 + a = ws_db.getRun()[scaling_factor_PV].value[0] ** 2 else: a = ws_db.getRun()[scaling_factor_PV].value[0] print("Composite scaling factor: %s" % a) @@ -281,13 +299,13 @@ def process_from_template_ws(ws_sc, template_data, q_summing=False, # Get the scaling factors a, b, err_a, err_b = scaling_factor(template_data.scaling_factor_file, ws_sc) - _tof = 4*np.pi*np.sin(event_refl.theta)*event_refl.constant/qz - _tof_mid = (_tof[1:] + _tof[:-1])/2.0 + _tof = 4 * np.pi * np.sin(event_refl.theta) * event_refl.constant / qz + _tof_mid = (_tof[1:] + _tof[:-1]) / 2.0 - a_q = _tof_mid*b + a + a_q = _tof_mid * b + a d_a_q = np.sqrt(_tof_mid**2 * err_b**2 + err_a**2) - d_refl = np.sqrt(d_refl**2/a_q**2 + refl**2*d_a_q**2/a_q**4) + d_refl = np.sqrt(d_refl**2 / a_q**2 + refl**2 * d_a_q**2 / a_q**4) refl /= a_q else: print("Skipping scaling factor") @@ -296,39 +314,39 @@ def process_from_template_ws(ws_sc, template_data, q_summing=False, # Trim ends as needed npts = len(qz_mid) - qz_mid = qz_mid[template_data.pre_cut:npts-template_data.post_cut] - refl = refl[template_data.pre_cut:npts-template_data.post_cut] - d_refl = d_refl[template_data.pre_cut:npts-template_data.post_cut] + qz_mid = qz_mid[template_data.pre_cut : npts - template_data.post_cut] + refl = refl[template_data.pre_cut : npts - template_data.post_cut] + d_refl = d_refl[template_data.pre_cut : npts - template_data.post_cut] if normalize and OUTPUT_NORM_DATA: - lr = ws_sc.getRun().getProperty('LambdaRequest').value[0] + lr = ws_sc.getRun().getProperty("LambdaRequest").value[0] s1h = abs(ws_sc.getRun().getProperty("S1VHeight").value[0]) s1w = abs(ws_sc.getRun().getProperty("S1HWidth").value[0]) s2h = abs(ws_sc.getRun().getProperty("SiVHeight").value[0]) s2w = abs(ws_sc.getRun().getProperty("SiHWidth").value[0]) # Apply scaling factor - _norm = (a_q * event_refl.norm)[template_data.pre_cut:npts-template_data.post_cut] - _d_norm = (a_q * event_refl.d_norm)[template_data.pre_cut:npts-template_data.post_cut] + _norm = (a_q * event_refl.norm)[template_data.pre_cut : npts - template_data.post_cut] + _d_norm = (a_q * event_refl.d_norm)[template_data.pre_cut : npts - template_data.post_cut] - wl=4.0*np.pi/qz_mid * np.sin(np.abs(theta)) + wl = 4.0 * np.pi / qz_mid * np.sin(np.abs(theta)) - with open('%s_%s_%2.3g_counts.txt' % (template_data.norm_file, lr, np.abs(ths_value)), 'w') as fd: - fd.write('# Theta: %g\n' % np.abs(ths_value)) - fd.write('# Slit 1 (h x w): %g x %g\n' % (s1h, s1w)) - fd.write('# Slit 2 (h x w): %g x %g\n' % (s2h, s2w)) - fd.write('# Q, wavelength, counts/mC, error\n') + with open("%s_%s_%2.3g_counts.txt" % (template_data.norm_file, lr, np.abs(ths_value)), "w") as fd: + fd.write("# Theta: %g\n" % np.abs(ths_value)) + fd.write("# Slit 1 (h x w): %g x %g\n" % (s1h, s1w)) + fd.write("# Slit 2 (h x w): %g x %g\n" % (s2h, s2w)) + fd.write("# Q, wavelength, counts/mC, error\n") for i in range(len(_norm)): - fd.write('%g %g %g %g\n' % (qz_mid[i], wl[i], _norm[i], _d_norm[i])) + fd.write("%g %g %g %g\n" % (qz_mid[i], wl[i], _norm[i], _d_norm[i])) # We can optionally return details about the reduction process if info: meta_data = event_refl.to_dict() - meta_data['scaling_factors'] = dict(a=a, err_a=err_a, b=b, err_b=err_b) - meta_data['q_summing'] = q_summing - meta_data['tof_weighted'] = tof_weighted - meta_data['bck_in_q'] = bck_in_q - meta_data['theta_offset'] = template_data.angle_offset + meta_data["scaling_factors"] = dict(a=a, err_a=err_a, b=b, err_b=err_b) + meta_data["q_summing"] = q_summing + meta_data["tof_weighted"] = tof_weighted + meta_data["bck_in_q"] = bck_in_q + meta_data["theta_offset"] = template_data.angle_offset return qz_mid, refl, d_refl, meta_data return qz_mid, refl, d_refl diff --git a/reduction/lr_reduction/time_resolved.py b/reduction/lr_reduction/time_resolved.py index 86715ce..36b0883 100644 --- a/reduction/lr_reduction/time_resolved.py +++ b/reduction/lr_reduction/time_resolved.py @@ -1,6 +1,7 @@ """ - Time-resolved data reduction +Time-resolved data reduction """ + import json import os import sys @@ -18,62 +19,68 @@ from .event_reduction import apply_dead_time_correction, compute_resolution -def reduce_30Hz_from_ws(meas_ws_30Hz, ref_ws_30Hz, data_60Hz, template_data, scan_index=1, # noqa ARG001 - template_reference=None, q_summing=False): +def reduce_30Hz_from_ws( + meas_ws_30Hz, + ref_ws_30Hz, + data_60Hz, + template_data, + scan_index=1, # noqa ARG001 + template_reference=None, + q_summing=False, +): """ - Perform 30Hz reduction - @param meas_ws_30Hz: Mantid workspace of the data we want to reduce - @param ref_ws_30Hz: Mantid workspace of the reference data, take with the same config - @param data_60Hz: reduced reference data at 60Hz - @param template_data: template data object (for 30Hz) - @param scan_index: scan index to use within the template. + Perform 30Hz reduction + @param meas_ws_30Hz: Mantid workspace of the data we want to reduce + @param ref_ws_30Hz: Mantid workspace of the reference data, take with the same config + @param data_60Hz: reduced reference data at 60Hz + @param template_data: template data object (for 30Hz) + @param scan_index: scan index to use within the template. """ # Reduce the reference at 30Hz if template_reference is None: - r_ref = template.process_from_template_ws(ref_ws_30Hz, template_data, - q_summing=q_summing, normalize=False) + r_ref = template.process_from_template_ws(ref_ws_30Hz, template_data, q_summing=q_summing, normalize=False) else: - r_ref = template.process_from_template_ws(ref_ws_30Hz, template_reference, - q_summing=q_summing, normalize=False) + r_ref = template.process_from_template_ws(ref_ws_30Hz, template_reference, q_summing=q_summing, normalize=False) # Reduce the sample data at 30Hz - r_meas = template.process_from_template_ws(meas_ws_30Hz, template_data, - q_summing=q_summing, normalize=False) + r_meas = template.process_from_template_ws(meas_ws_30Hz, template_data, q_summing=q_summing, normalize=False) # Identify the bins we need to overlap with the 30Hz measurement # The assumption is that the binning is the same _max_q = min(r_ref[0].max(), r_meas[0].max()) _min_q = max(r_ref[0].min(), r_meas[0].min()) - _binning_60hz = (data_60Hz[0][1]-data_60Hz[0][0])/data_60Hz[0][0] - _binning_ref = (r_ref[0][1]-r_ref[0][0])/r_ref[0][0] - _binning_meas = (r_meas[0][1]-r_meas[0][0])/r_meas[0][0] + _binning_60hz = (data_60Hz[0][1] - data_60Hz[0][0]) / data_60Hz[0][0] + _binning_ref = (r_ref[0][1] - r_ref[0][0]) / r_ref[0][0] + _binning_meas = (r_meas[0][1] - r_meas[0][0]) / r_meas[0][0] if not np.fabs(_binning_60hz - _binning_ref) < 1e-5: print("ERROR: The binning of the 60 Hz reference is not the same as the dynamic template") print(" %s <> %s" % (_binning_60hz, _binning_ref)) # The tolerance will be half a bin - _tolerance = _binning_ref/2.0 + _tolerance = _binning_ref / 2.0 print("60Hz: %g %g [%g]" % (data_60Hz[0].min(), data_60Hz[0].max(), _binning_60hz)) print("Ref 30Hz: %g %g [%g]" % (r_meas[0].min(), r_meas[0].max(), _binning_ref)) print("Meas 30Hz: %g %g [%g]" % (r_ref[0].min(), r_ref[0].max(), _binning_meas)) - _q_idx_60 = np.asarray(np.where((data_60Hz[0] > _min_q*(1-_tolerance)) & (data_60Hz[0] < _max_q*(1+_tolerance))))[0] - _q_idx_meas30 = np.asarray(np.where((r_meas[0] > _min_q*(1-_tolerance)) & (r_meas[0] < _max_q*(1+_tolerance))))[0] - _q_idx_ref30 = np.asarray(np.where((r_ref[0] > _min_q*(1-_tolerance)) & (r_ref[0] < _max_q*(1+_tolerance))))[0] + _q_idx_60 = np.asarray(np.where((data_60Hz[0] > _min_q * (1 - _tolerance)) & (data_60Hz[0] < _max_q * (1 + _tolerance))))[0] + _q_idx_meas30 = np.asarray(np.where((r_meas[0] > _min_q * (1 - _tolerance)) & (r_meas[0] < _max_q * (1 + _tolerance))))[0] + _q_idx_ref30 = np.asarray(np.where((r_ref[0] > _min_q * (1 - _tolerance)) & (r_ref[0] < _max_q * (1 + _tolerance))))[0] if not data_60Hz[0][_q_idx_60].shape[0] == r_ref[0][_q_idx_ref30].shape[0]: print("\n\n60Hz reference may have been reduced with different binning!") print("Array sizes: %g %g %g" % (len(_q_idx_60), len(_q_idx_meas30), len(_q_idx_ref30))) print("Remember to average overlapping points!\n\n") - r_q_final = r_meas[1][_q_idx_meas30]/r_ref[1][_q_idx_ref30]*data_60Hz[1][_q_idx_60] + r_q_final = r_meas[1][_q_idx_meas30] / r_ref[1][_q_idx_ref30] * data_60Hz[1][_q_idx_60] - dr_q_final = np.sqrt((r_meas[2][_q_idx_meas30]/r_ref[1][_q_idx_ref30]*data_60Hz[1][_q_idx_60])**2 \ - +(r_meas[1][_q_idx_meas30]/r_ref[1][_q_idx_ref30]*data_60Hz[2][_q_idx_60])**2 \ - +(r_meas[1][_q_idx_meas30]/r_ref[1][_q_idx_ref30]**2*data_60Hz[1][_q_idx_60]*r_ref[2][_q_idx_ref30])**2) + dr_q_final = np.sqrt( + (r_meas[2][_q_idx_meas30] / r_ref[1][_q_idx_ref30] * data_60Hz[1][_q_idx_60]) ** 2 + + (r_meas[1][_q_idx_meas30] / r_ref[1][_q_idx_ref30] * data_60Hz[2][_q_idx_60]) ** 2 + + (r_meas[1][_q_idx_meas30] / r_ref[1][_q_idx_ref30] ** 2 * data_60Hz[1][_q_idx_60] * r_ref[2][_q_idx_ref30]) ** 2 + ) print("Q range: %s - %s" % (r_meas[0][0], r_meas[0][_q_idx_meas30][-1])) print("Constant-Q binning: %s" % str(q_summing)) @@ -93,45 +100,76 @@ def reduce_30Hz_from_ws(meas_ws_30Hz, ref_ws_30Hz, data_60Hz, template_data, sca return np.asarray([q[_idx], r_q_final[_idx], dr_q_final[_idx], dq]) -def reduce_30Hz_slices(meas_run_30Hz, ref_run_30Hz, ref_data_60Hz, template_30Hz, - time_interval, output_dir, scan_index=1, create_plot=True, - template_reference=None, q_summing=False): # noqa ARG001 - +def reduce_30Hz_slices( + meas_run_30Hz, + ref_run_30Hz, + ref_data_60Hz, + template_30Hz, + time_interval, + output_dir, + scan_index=1, + create_plot=True, + template_reference=None, + q_summing=False, +): # noqa ARG001 meas_ws_30Hz = api.LoadEventNexus("REF_L_%s" % meas_run_30Hz) - return reduce_30Hz_slices_ws(meas_ws_30Hz, ref_run_30Hz, ref_data_60Hz, template_30Hz, - time_interval, output_dir, scan_index=scan_index, create_plot=create_plot, - template_reference=template_reference, q_summing=False) - -def reduce_slices(meas_run, template_file, - time_interval, output_dir, scan_index=1, - theta_offset=None, create_plot=True): - + return reduce_30Hz_slices_ws( + meas_ws_30Hz, + ref_run_30Hz, + ref_data_60Hz, + template_30Hz, + time_interval, + output_dir, + scan_index=scan_index, + create_plot=create_plot, + template_reference=template_reference, + q_summing=False, + ) + + +def reduce_slices(meas_run, template_file, time_interval, output_dir, scan_index=1, theta_offset=None, create_plot=True): meas_ws = api.LoadEventNexus("REF_L_%s" % meas_run) - return reduce_slices_ws(meas_ws, template_file, - time_interval, output_dir, scan_index=scan_index, - theta_offset=theta_offset, create_plot=create_plot) - -def reduce_30Hz_slices_ws(meas_ws_30Hz, ref_run_30Hz, ref_data_60Hz, template_30Hz, - time_interval, output_dir, scan_index=1, create_plot=True, - template_reference=None, q_summing=False): + return reduce_slices_ws( + meas_ws, template_file, time_interval, output_dir, scan_index=scan_index, theta_offset=theta_offset, create_plot=create_plot + ) + + +def reduce_30Hz_slices_ws( + meas_ws_30Hz, + ref_run_30Hz, + ref_data_60Hz, + template_30Hz, + time_interval, + output_dir, + scan_index=1, + create_plot=True, + template_reference=None, + q_summing=False, +): """ - Perform 30Hz reduction - @param meas_ws_30Hz: workspace of the data we want to reduce - @param ref_ws_30Hz: workspace of the reference data, take with the same config - @param ref_data_60Hz: file path of the reduce data file at 60Hz - @param template_30Hz: file path of the template file for 30Hz - @param time_interval: time step in seconds - @param scan_index: scan index to use within the template. + Perform 30Hz reduction + @param meas_ws_30Hz: workspace of the data we want to reduce + @param ref_ws_30Hz: workspace of the reference data, take with the same config + @param ref_data_60Hz: file path of the reduce data file at 60Hz + @param template_30Hz: file path of the template file for 30Hz + @param time_interval: time step in seconds + @param scan_index: scan index to use within the template. """ # Save options - options = dict(meas_run_30Hz=meas_ws_30Hz.getRun()['run_number'].value, - ref_run_30Hz=ref_run_30Hz, ref_data_60Hz=ref_data_60Hz, - template_30Hz=template_30Hz, time_interval=time_interval, - output_dir=output_dir, scan_index=scan_index, - template_reference=template_reference, q_summing=q_summing) - with open(os.path.join(output_dir, 'options.json'), 'w') as fp: + options = dict( + meas_run_30Hz=meas_ws_30Hz.getRun()["run_number"].value, + ref_run_30Hz=ref_run_30Hz, + ref_data_60Hz=ref_data_60Hz, + template_30Hz=template_30Hz, + time_interval=time_interval, + output_dir=output_dir, + scan_index=scan_index, + template_reference=template_reference, + q_summing=q_summing, + ) + with open(os.path.join(output_dir, "options.json"), "w") as fp: json.dump(options, fp) # Load the template @@ -147,16 +185,16 @@ def reduce_30Hz_slices_ws(meas_ws_30Hz, ref_run_30Hz, ref_data_60Hz, template_30 # Reduce the sample data at 30Hz print("Reading sample data at 30Hz") - #meas_ws_30Hz = api.LoadEventNexus("REF_L_%s" % meas_run_30Hz) + # meas_ws_30Hz = api.LoadEventNexus("REF_L_%s" % meas_run_30Hz) # Some meta-data are not filled in for the live data stream # Use dummy values for those try: - duration = meas_ws_30Hz.getRun()['duration'].value + duration = meas_ws_30Hz.getRun()["duration"].value except: duration = 0 try: - meas_run_30Hz = meas_ws_30Hz.getRun()['run_number'].value + meas_run_30Hz = meas_ws_30Hz.getRun()["run_number"].value except: meas_run_30Hz = 0 @@ -164,17 +202,19 @@ def reduce_30Hz_slices_ws(meas_ws_30Hz, ref_run_30Hz, ref_data_60Hz, template_30 print("Slicing data") splitws, infows = api.GenerateEventsFilter(InputWorkspace=meas_ws_30Hz, TimeInterval=time_interval) - api.FilterEvents(InputWorkspace=meas_ws_30Hz, + api.FilterEvents( + InputWorkspace=meas_ws_30Hz, SplitterWorkspace=splitws, InformationWorkspace=infows, - OutputWorkspaceBaseName='time_ws', + OutputWorkspaceBaseName="time_ws", GroupWorkspaces=True, - FilterByPulseTime = True, - OutputWorkspaceIndexedFrom1 = True, - CorrectionToSample = "None", - SpectrumWithoutDetector = "Skip", - SplitSampleLogs = False, - OutputTOFCorrectionWorkspace='mock') + FilterByPulseTime=True, + OutputWorkspaceIndexedFrom1=True, + CorrectionToSample="None", + SpectrumWithoutDetector="Skip", + SplitSampleLogs=False, + OutputTOFCorrectionWorkspace="mock", + ) wsgroup = api.mtd["time_ws"] wsnames = wsgroup.getNames() @@ -188,50 +228,60 @@ def reduce_30Hz_slices_ws(meas_ws_30Hz, ref_run_30Hz, ref_data_60Hz, template_30 tmpws = api.mtd[name] print("workspace %s has %d events" % (name, tmpws.getNumberEvents())) try: - _reduced = reduce_30Hz_from_ws(tmpws, ref_ws_30Hz, data_60Hz, template_data, - scan_index=scan_index, template_reference=template_reference, - q_summing=q_summing) + _reduced = reduce_30Hz_from_ws( + tmpws, + ref_ws_30Hz, + data_60Hz, + template_data, + scan_index=scan_index, + template_reference=template_reference, + q_summing=q_summing, + ) # Remove first point reduced.append(_reduced) - _filename = 'r{0}_t{1:06d}.txt'.format(meas_run_30Hz, int(total_time)) + _filename = "r{0}_t{1:06d}.txt".format(meas_run_30Hz, int(total_time)) np.savetxt(os.path.join(output_dir, _filename), _reduced.T) except: print("reduce_30Hz_slices_ws: %s" % sys.exc_info()[0]) total_time += time_interval # Save output - output_file = os.path.join(output_dir, 'r%s-time-resolved.json' % meas_run_30Hz) + output_file = os.path.join(output_dir, "r%s-time-resolved.json" % meas_run_30Hz) print("Saving t-NR to %s" % output_file) package_json_data(meas_run_30Hz, output_dir, out_array=output_file) if create_plot: - plot_slices(reduced, title='Duration: %g seconds' % duration, - time_interval=time_interval, - file_path=os.path.join(output_dir, 'r%s.png' % meas_run_30Hz)) + plot_slices( + reduced, + title="Duration: %g seconds" % duration, + time_interval=time_interval, + file_path=os.path.join(output_dir, "r%s.png" % meas_run_30Hz), + ) return reduced -def reduce_slices_ws(meas_ws, template_file, - time_interval, output_dir, scan_index=1, - theta_offset=0, theta_value=None, create_plot=True): + +def reduce_slices_ws(meas_ws, template_file, time_interval, output_dir, scan_index=1, theta_offset=0, theta_value=None, create_plot=True): """ - Perform time-resolved reduction - :param meas_ws: workspace of the data we want to reduce - :param template_file: autoreduction template file - :param time_interval: time step in seconds - :param scan_index: scan index to use within the template. - :param theta_value: force theta value - :param theta_offset: add a theta offset, defaults to zero + Perform time-resolved reduction + :param meas_ws: workspace of the data we want to reduce + :param template_file: autoreduction template file + :param time_interval: time step in seconds + :param scan_index: scan index to use within the template. + :param theta_value: force theta value + :param theta_offset: add a theta offset, defaults to zero """ # Save options - options = dict(meas_run=meas_ws.getRun()['run_number'].value, - template=template_file, - time_interval=time_interval, - output_dir=output_dir, - scan_index=scan_index, - theta_value=theta_value, - theta_offset=theta_offset) - with open(os.path.join(output_dir, 'options.json'), 'w') as fp: + options = dict( + meas_run=meas_ws.getRun()["run_number"].value, + template=template_file, + time_interval=time_interval, + output_dir=output_dir, + scan_index=scan_index, + theta_value=theta_value, + theta_offset=theta_offset, + ) + with open(os.path.join(output_dir, "options.json"), "w") as fp: json.dump(options, fp) # Load the template @@ -246,11 +296,11 @@ def reduce_slices_ws(meas_ws, template_file, # Some meta-data are not filled in for the live data stream # Use dummy values for those try: - duration = meas_ws.getRun()['duration'].value + duration = meas_ws.getRun()["duration"].value except: duration = 0 try: - meas_run = meas_ws.getRun()['run_number'].value + meas_run = meas_ws.getRun()["run_number"].value except: meas_run = 0 @@ -263,17 +313,19 @@ def reduce_slices_ws(meas_ws, template_file, print("Slicing data") splitws, infows = api.GenerateEventsFilter(InputWorkspace=meas_ws, TimeInterval=time_interval) - api.FilterEvents(InputWorkspace=meas_ws, + api.FilterEvents( + InputWorkspace=meas_ws, SplitterWorkspace=splitws, InformationWorkspace=infows, - OutputWorkspaceBaseName='time_ws', + OutputWorkspaceBaseName="time_ws", GroupWorkspaces=True, - FilterByPulseTime = True, - OutputWorkspaceIndexedFrom1 = True, - CorrectionToSample = "None", - SpectrumWithoutDetector = "Skip", - SplitSampleLogs = False, - OutputTOFCorrectionWorkspace='mock') + FilterByPulseTime=True, + OutputWorkspaceIndexedFrom1=True, + CorrectionToSample="None", + SpectrumWithoutDetector="Skip", + SplitSampleLogs=False, + OutputTOFCorrectionWorkspace="mock", + ) wsgroup = api.mtd["time_ws"] wsnames = wsgroup.getNames() @@ -293,9 +345,7 @@ def reduce_slices_ws(meas_ws, template_file, tmpws = api.mtd[name] print("workspace %s has %d events" % (name, tmpws.getNumberEvents())) try: - _reduced = template.process_from_template_ws(tmpws, template_data, - theta_value=theta_value, - ws_db=ws_db) + _reduced = template.process_from_template_ws(tmpws, template_data, theta_value=theta_value, ws_db=ws_db) dq0 = 0 dq_slope = compute_resolution(tmpws) @@ -304,65 +354,66 @@ def reduce_slices_ws(meas_ws, template_file, _reduced = np.asarray(_reduced) reduced.append(_reduced) - _filename = 'r{0}_t{1:06d}.txt'.format(meas_run, int(total_time)) + _filename = "r{0}_t{1:06d}.txt".format(meas_run, int(total_time)) np.savetxt(os.path.join(output_dir, _filename), _reduced.T) except: print("reduce_slices_ws: %s" % sys.exc_info()[0]) total_time += time_interval if create_plot: - plot_slices(reduced, title='Duration: %g seconds' % duration, - time_interval=time_interval, - file_path=os.path.join(output_dir, 'r%s.png' % meas_run)) + plot_slices( + reduced, + title="Duration: %g seconds" % duration, + time_interval=time_interval, + file_path=os.path.join(output_dir, "r%s.png" % meas_run), + ) return reduced def plot_slices(reduced, title, time_interval, file_path, offset=10, show=True): - fig, ax = plt.subplots(figsize=(6,6)) + fig, ax = plt.subplots(figsize=(6, 6)) total_time = 0 - _running_offset = 1. + _running_offset = 1.0 for _data in reduced: qz, refl, d_refl, _ = _data - plt.errorbar(qz, refl*_running_offset, yerr=d_refl*_running_offset, markersize=4, marker='o', - label='T=%g s' % total_time) + plt.errorbar(qz, refl * _running_offset, yerr=d_refl * _running_offset, markersize=4, marker="o", label="T=%g s" % total_time) total_time += time_interval _running_offset *= offset plt.legend() plt.title(title) - plt.xlabel(r'q [$1/\AA$]') - plt.ylabel('R(q)') - ax.set_yscale('log') - ax.set_xscale('log') + plt.xlabel(r"q [$1/\AA$]") + plt.ylabel("R(q)") + ax.set_yscale("log") + ax.set_xscale("log") if show: plt.show() plt.savefig(file_path) def package_json_data(dynamic_run, dyn_data_dir, out_array=None): - compiled_array = [] compiled_times = [] _file_list = sorted(os.listdir(dyn_data_dir)) # Get only the files for the run we're interested in - _good_files = [_f for _f in _file_list if _f.startswith('r%s_t' % dynamic_run)] + _good_files = [_f for _f in _file_list if _f.startswith("r%s_t" % dynamic_run)] for i, _file in enumerate(_good_files): - if _file.startswith('r%s_t' % dynamic_run): + if _file.startswith("r%s_t" % dynamic_run): _data = np.loadtxt(os.path.join(dyn_data_dir, _file)).T _data_name, _ = os.path.splitext(_file) - _time = int(_data_name.replace('r%s_t' % dynamic_run, '')) + _time = int(_data_name.replace("r%s_t" % dynamic_run, "")) compiled_array.append(_data.tolist()) compiled_times.append(_time) if out_array: - with open(out_array, 'w') as fp: + with open(out_array, "w") as fp: json.dump(dict(times=compiled_times, data=compiled_array), fp) return compiled_times, compiled_array From 122769d6920023ef7c23e51a11b9793b522b1186 Mon Sep 17 00:00:00 2001 From: Mathieu Doucet Date: Wed, 8 Jan 2025 17:00:54 -0500 Subject: [PATCH 5/5] Add reduction --- docs/index.md | 1 + docs/user/event_processing.md | 100 ++++++++++++++++++++++++++++++++-- 2 files changed, 96 insertions(+), 5 deletions(-) diff --git a/docs/index.md b/docs/index.md index 0b355fd..90eb34e 100644 --- a/docs/index.md +++ b/docs/index.md @@ -3,6 +3,7 @@ ## User Guide - [Workflow overview](./user/workflow.md) +- [Event processing](./user/event_processing.md) - [Conda Environments](./user/conda_environments.md) - [Releases](./releases.md) diff --git a/docs/user/event_processing.md b/docs/user/event_processing.md index eb625d7..910837f 100644 --- a/docs/user/event_processing.md +++ b/docs/user/event_processing.md @@ -43,7 +43,6 @@ computed dead time correction distribution. [1] V. Bécares, J. Blázquez, Detector Dead Time Determination and OptimalCounting Rate for a Detector Near a Spallation Source ora Subcritical Multiplying System, Science and Technology of Nuclear Installations, 2012, 240693, https://doi.org/10.1155/2012/240693 - ### Correct for emission time Since neutrons of different wavelength will spend different amount of time on average within the moderator, a linear approximation is used by the data acquisition system to @@ -51,20 +50,111 @@ account for emission time when phasing choppers. The time of flight for each event $i$ is corrected by an small value given by -$\Delta t_i = -t_{off} + t_{mult} * t_i * h L / m_n$ +$\Delta t_i = -t_{off} + \frac{h L}{m_n} A t_i $ where $h$ is Planck's constant, $m_n$ is the mass of the neutron, and $L$ is the distance between the moderator and the detector. -The $t_{off}$, $t_{mult}$, and $L$ parameters are process variables that are stored in the +The $t_{off}$, $A$, and $L$ parameters are process variables that are stored in the data file and can be changed in the data acquisition system. - ### Gravity correction +The reflected angle of each neutron is corrected for the effect of gravity according to +reference Campbell et al [2]. This correction is done individually for each neutron event according to its wavelength. +[2] R.A. Campbell et al, Eur. Phys. J. Plus (2011) 126: 107. https://doi.org/10.1140/epjp/i2011-11107-8 + +### Event selection +Following the correction described above, we are left with a list of events, each having +a detector position ($p_x, p_y$) and a wavelength $\lambda$. +As necessary, regions of interests can be defined to identify events to include in the specular +reflectivity calculation, and which will be used to estimate and subtract background. +Event selection is performed before computing the reflectivity as described in the following sections. ### Q calculation +The reflectivity $R(q)$ is computed by computing the $q$ value for each even and histogramming +in a predefined binning of the user's choice. This approach is slightly different from the +traditional approach of binning events in TOF, and then converting the TOF axis to $q$. +The event-based approach allows us to bin directly into a $q$ binning of our choice and avoid +the need for a final rebinning. + +The standard way of computing the reflected signal is simply to compute $q$ for each event $i$ +using the following equation: + +$q_{z, i} = \frac{4\pi}{\lambda_i}\sin(\theta - \delta_{g,i})$ + +where the $\delta_{g,i}$ refers to the angular offset caused by gravity. + +Once $q$ is computed for each neutron, they can be histogrammed, taking into account the +weight assigned to each event: + +$S(q_z) = \frac{1}{Q} \sum_{i \in q_z \pm \Delta{q_z}/2} w_i$ + +where the sum is over all event falling in the $q_z$ bin or width $\Delta q_z$, and $w_i$ is the +weight if the $i^{th}$ event. At this point we have an unnormalized $S(q_z)$, which remains to be +corrected for the neutron flux. The value of $Q$ is the integrated proton charge for the ### Constant-Q binning +When using a divergent beam, or when measuring a warped sample, it may be beneficial to take +into accound where a neutron landed on the detector in order to recalculate its angle, and its +$q$ value. + +In this case, the $q_{z, i}$ equation above becomes: + +$q_{z, i} = \frac{4\pi}{\lambda_i}\sin(\theta + \delta_{f,i} - \delta_{g,i})$ + +where $\delta_{f,i}$ is the angular offset between where the specular peak appears on the +detector and where the neutron was detected: + +$\delta_{f,i} = \mathrm{sgn}(\theta)\arctan(d(p_i-p_{spec})/L_{det})/2$ + +where $d$ is the size of a pixel, $p_i$ is the pixel where event $i$ was detected, +$p_{spec}$ is the pixel at the center of the peak distribution, $L_{det}$ is the distance +between the sample and the detector. Care should be taken to asign the correct sign to +the angle offset. For this reason, we add the sign the scattering angle $\mathrm{sgn}(\theta)$ on from of the +previous equation to account for when we reflect up or down. + -## Normalization options \ No newline at end of file +## Normalization options +The scattering signal computed above needs to be normalized by the incoming flux in order +to produce $R(q_z)$. For the simplest case, we follow the same procedure as above for the +relevant direct beam run, and simply compute the $S_1(q_z)$ using the standard procedure above, +using the same $q_z$ binning, +and replacing $\theta$ by the value at which the reflected beam was measured. We are then effectively computing what the measured signal would be if all neutron from the beam would reflect with a probability of 1. We refer this distribution at $S_1(q_z)$. + +The measured reflectivity then becomes + +$$ + R(q_z) = S(q_z) / S_1(q_z) +$$ + +This approach is equivalent to predetermining the TOF binning that would be needed to produce +the $q_z$ binning we actually want, summing counts in TOF for both scattered and direct beam, +taking the ratio of the two, and finally converting TOF to $q_z$. The only difference is that we +don't bother with the TOF bins and assign events directly into the $q_z$ we know they will contribute to the denominator of for normalization. + +### Normalization using weighted events +An alternative approach to the normalization described above is also implemented to BL4B. +It leverages the weighted event approach. Using this approach, we can simply histogram the direct +beam event in a wavelenth distribution. In such a histogram, each bin in wavelength will have +a flux + +$$\phi(\lambda) = N_{\lambda} / Q / \Delta_{\lambda}$$ + +where $N_{\lambda}$ is the number of neutrons in the bin of center $\lambda$, $Q$ is the +integrated proton charge, and $\Delta(\lambda)$ is the wavelength bin width for the distribution. + +Coming back to the calculation of the reflected signal above, we now can add a new weight for +each event according to the flux for its particular wavelength: + +$$ +w_i \rightarrow w_i / \phi(\lambda_i) q_{z,i} / \lambda_i +$$ + +where $\phi(\lambda)$ is interpolated from the distribution we measured above. The $q_z/\lambda$ +term is the Jacobian to account for the transformation of wavelength to $q$. +With this new weigth, we can compute reflectivity directly from the $S(q_z)$ equation above: + +$$ +R(q_z) = \frac{1}{Q} \sum_{i \in q_z \pm \Delta{q_z}/2} w_i / \phi(\lambda_i) q_{z,i} / \lambda_i +$$ \ No newline at end of file