diff --git a/reduction/data/sf_197912_Si_dt_par_42_200.cfg b/reduction/data/sf_197912_Si_dt_par_42_200.cfg new file mode 100644 index 0000000..e751d6a --- /dev/null +++ b/reduction/data/sf_197912_Si_dt_par_42_200.cfg @@ -0,0 +1,14 @@ +# y=a+bx +# +# LambdaRequested[Angstroms] S1H[mm] (S2/Si)H[mm] S1W[mm] (S2/Si)W[mm] a b error_a error_b +# +# Medium=Si, runs: [197920, 197921, 197922, 197923, 197924, 197925, 197926, 197927, 197928, 197929, 197930, 197931] +IncidentMedium=Si LambdaRequested=9.74 S1H=0.391 S2iH=0.24999999999999978 S1W=20.005 S2iW=20.000084375 a=1.0888766996055554 b=-5.6872177686659006e-08 error_a=161.84564441183014 error_b=0.003988540742274152 +IncidentMedium=Si LambdaRequested=7.043 S1H=0.39 S2iH=0.24999999999999978 S1W=19.952000000000005 S2iW=19.950864375000002 a=7.341821040427503 b=-6.012040147643712e-06 error_a=741.2376390289155 error_b=0.024193504430316405 +IncidentMedium=Si LambdaRequested=4.25 S1H=0.39 S2iH=0.24999999999999978 S1W=8.779 S2iW=8.780164375000002 a=9.320962216410319 b=-3.708412514404863e-05 error_a=606.2605886476512 error_b=0.03099199091035445 +IncidentMedium=Si LambdaRequested=4.25 S1H=0.39 S2iH=0.24999999999999978 S1W=20.002 S2iW=19.999844375000002 a=30.370149781266015 b=9.763444187558825e-05 error_a=2680.8766315704106 error_b=0.1365883376903538 +IncidentMedium=Si LambdaRequested=4.25 S1H=0.77 S2iH=0.49312 S1W=12.489000000000004 S2iW=12.485564375000003 a=62.89965009567662 b=0.0001557592235018875 error_a=5488.459056149113 error_b=0.27948760026302427 +IncidentMedium=Si LambdaRequested=4.25 S1H=0.774 S2iH=0.4930399999999997 S1W=20.001000000000005 S2iW=20.000244375 a=120.9852469160573 b=0.00020691590164400054 error_a=11657.27959807004 error_b=0.5869489140907286 +IncidentMedium=Si LambdaRequested=4.25 S1H=1.525 S2iH=0.976 S1W=16.384000000000004 S2iW=16.395444375000004 a=356.40436234906264 b=0.0018275069497976878 error_a=36527.53453159627 error_b=1.8464457216875345 +IncidentMedium=Si LambdaRequested=4.25 S1H=1.528 S2iH=0.976 S1W=20.004 S2iW=20.000244375 a=455.2870337782051 b=0.0018152748579145265 error_a=50088.87968853836 error_b=2.508098158009919 +IncidentMedium=Si LambdaRequested=4.25 S1H=3.016 S2iH=1.9319199999999999 S1W=20.003000000000004 S2iW=20.000164375 a=2380.7392962875742 b=-0.03222957069862086 error_a=184998.5077522072 error_b=9.063057308166101 diff --git a/reduction/data/sf_197912_Si_dt_par_46_300.cfg b/reduction/data/sf_197912_Si_dt_par_46_300.cfg new file mode 100644 index 0000000..e751d6a --- /dev/null +++ b/reduction/data/sf_197912_Si_dt_par_46_300.cfg @@ -0,0 +1,14 @@ +# y=a+bx +# +# LambdaRequested[Angstroms] S1H[mm] (S2/Si)H[mm] S1W[mm] (S2/Si)W[mm] a b error_a error_b +# +# Medium=Si, runs: [197920, 197921, 197922, 197923, 197924, 197925, 197926, 197927, 197928, 197929, 197930, 197931] +IncidentMedium=Si LambdaRequested=9.74 S1H=0.391 S2iH=0.24999999999999978 S1W=20.005 S2iW=20.000084375 a=1.0888766996055554 b=-5.6872177686659006e-08 error_a=161.84564441183014 error_b=0.003988540742274152 +IncidentMedium=Si LambdaRequested=7.043 S1H=0.39 S2iH=0.24999999999999978 S1W=19.952000000000005 S2iW=19.950864375000002 a=7.341821040427503 b=-6.012040147643712e-06 error_a=741.2376390289155 error_b=0.024193504430316405 +IncidentMedium=Si LambdaRequested=4.25 S1H=0.39 S2iH=0.24999999999999978 S1W=8.779 S2iW=8.780164375000002 a=9.320962216410319 b=-3.708412514404863e-05 error_a=606.2605886476512 error_b=0.03099199091035445 +IncidentMedium=Si LambdaRequested=4.25 S1H=0.39 S2iH=0.24999999999999978 S1W=20.002 S2iW=19.999844375000002 a=30.370149781266015 b=9.763444187558825e-05 error_a=2680.8766315704106 error_b=0.1365883376903538 +IncidentMedium=Si LambdaRequested=4.25 S1H=0.77 S2iH=0.49312 S1W=12.489000000000004 S2iW=12.485564375000003 a=62.89965009567662 b=0.0001557592235018875 error_a=5488.459056149113 error_b=0.27948760026302427 +IncidentMedium=Si LambdaRequested=4.25 S1H=0.774 S2iH=0.4930399999999997 S1W=20.001000000000005 S2iW=20.000244375 a=120.9852469160573 b=0.00020691590164400054 error_a=11657.27959807004 error_b=0.5869489140907286 +IncidentMedium=Si LambdaRequested=4.25 S1H=1.525 S2iH=0.976 S1W=16.384000000000004 S2iW=16.395444375000004 a=356.40436234906264 b=0.0018275069497976878 error_a=36527.53453159627 error_b=1.8464457216875345 +IncidentMedium=Si LambdaRequested=4.25 S1H=1.528 S2iH=0.976 S1W=20.004 S2iW=20.000244375 a=455.2870337782051 b=0.0018152748579145265 error_a=50088.87968853836 error_b=2.508098158009919 +IncidentMedium=Si LambdaRequested=4.25 S1H=3.016 S2iH=1.9319199999999999 S1W=20.003000000000004 S2iW=20.000164375 a=2380.7392962875742 b=-0.03222957069862086 error_a=184998.5077522072 error_b=9.063057308166101 diff --git a/reduction/lr_reduction/scaling_factors/LRDirectBeamSort.py b/reduction/lr_reduction/scaling_factors/LRDirectBeamSort.py new file mode 100644 index 0000000..1629e66 --- /dev/null +++ b/reduction/lr_reduction/scaling_factors/LRDirectBeamSort.py @@ -0,0 +1,362 @@ +# Mantid Repository : https://github.com/mantidproject/mantid +# +# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI, +# NScD Oak Ridge National Laboratory, European Spallation Source, +# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS +# SPDX - License - Identifier: GPL - 3.0 + +# pylint: disable=no-init,invalid-name +from mantid.api import * +from mantid.simpleapi import * +from mantid.kernel import * +import functools +import numpy as np +from typing import List, Tuple +import datetime +from math import ceil + +THI_TOLERANCE = 0.002 + +from . import LRScalingFactors + + +class CompareTwoNXSDataForSFcalculator(object): + """ + will return -1, 0 or 1 according to the position of the nexusToPosition in relation to the + nexusToCompareWith based on the following criteria + #1: number of attenuators (ascending order) + #2: lambda requested (descending order) + #3: S2W (ascending order) + #4: S2H (descending order) + #5 if everything up to this point is identical, return 0 + """ + + nexusToCompareWithRun = None + nexusToPositionRun = None + resultComparison = 0 + + def __init__(self, nxsdataToCompareWith, nxsdataToPosition): + self.nexusToCompareWithRun = nxsdataToCompareWith.getRun() + self.nexusToPositionRun = nxsdataToPosition.getRun() + + compare = self.compareParameter("LambdaRequest", "descending") + if compare != 0: + self.resultComparison = compare + return + + compare = self.compareParameter("thi", "descending", tolerance=THI_TOLERANCE) + if compare != 0: + self.resultComparison = compare + return + + compare = self.compareParameter("vAtt", "ascending") + if compare != 0: + self.resultComparison = compare + return + + pcharge1 = self.nexusToCompareWithRun.getProperty("gd_prtn_chrg").value / nxsdataToCompareWith.getNEvents() + pcharge2 = self.nexusToPositionRun.getProperty("gd_prtn_chrg").value / nxsdataToPosition.getNEvents() + + self.resultComparison = -1 if pcharge1 < pcharge2 else 1 + + def compareParameter(self, param, order, tolerance=0.0): + """ + Compare parameters for the two runs + :param string param: name of the parameter to compare + :param string order: ascending or descending + :param float tolerance: tolerance to apply to the comparison [optional] + """ + _nexusToCompareWithRun = self.nexusToCompareWithRun + _nexusToPositionRun = self.nexusToPositionRun + + _paramNexusToCompareWith = float(_nexusToCompareWithRun.getProperty(param).value[0]) + _paramNexusToPosition = float(_nexusToPositionRun.getProperty(param).value[0]) + + if abs(_paramNexusToPosition - _paramNexusToCompareWith) <= tolerance: + return 0 + + if order == "ascending": + resultLessThan = -1 + resultMoreThan = 1 + else: + resultLessThan = 1 + resultMoreThan = -1 + + if _paramNexusToPosition < _paramNexusToCompareWith: + return resultLessThan + elif _paramNexusToPosition > _paramNexusToCompareWith: + return resultMoreThan + else: + return 0 + + def result(self): + return self.resultComparison + + +def sorter_function(r1, r2): + """ + Sorter function used by with the 'sorted' call to sort the direct beams. + """ + return CompareTwoNXSDataForSFcalculator(r2, r1).result() + + +class LRDirectBeamSort(PythonAlgorithm): + def category(self): + return "Reflectometry\\SNS" + + def name(self): + return "LRDirectBeamSort" + + def version(self): + return 2 + + def summary(self): + return "Sort a set of direct beams for the purpose of calculating scaling factors." + + def PyInit(self): + self.declareProperty( + IntArrayProperty("RunList", [], direction=Direction.Input), + "List of run numbers (integers) to be sorted - takes precedence over WorkspaceList", + ) + self.declareProperty(StringArrayProperty("WorkspaceList", [], direction=Direction.Input), "List of workspace names to be sorted") + self.declareProperty( + "UseLowResCut", False, direction=Direction.Input, doc="If True, an x-direction cut will be determined and used" + ) + self.declareProperty("ComputeScalingFactors", True, direction=Direction.Input, doc="If True, the scaling factors will be computed") + self.declareProperty("TOFSteps", 200.0, doc="TOF bin width") + self.declareProperty("WavelengthOffset", 0.0, doc="Wavelength offset used for TOF range determination") + self.declareProperty("IncidentMedium", "Air", doc="Name of the incident medium") + self.declareProperty("OrderDirectBeamsByRunNumber", False, "Force the sequence of direct beam files to be ordered by run number") + self.declareProperty( + FileProperty("ScalingFactorFile", "", action=FileAction.OptionalSave, extensions=["cfg"]), "Scaling factor file to be created" + ) + self.declareProperty(IntArrayProperty("OrderedRunList", [], direction=Direction.Output), "Ordered list of run numbers") + self.declareProperty( + StringArrayProperty("OrderedNameList", [], direction=Direction.Output), + "Ordered list of workspace names corresponding to the run list", + ) + self.declareProperty("SlitTolerance", 0.02, doc="Tolerance for matching slit positions") + self.declareProperty("UseDeadTimeCorrection", False, doc="If True, correct for dead time") + self.declareProperty("ParalyzableDeadTime", True, doc="Use paralyzable dead time correction") + self.declareProperty("DeadTime", 4.2, doc="Dead time value") + self.declareProperty("DeadTimeTOFStep", 200., doc="TOF step to bin into for dead time") + + def PyExec(self): + compute = self.getProperty("ComputeScalingFactors").value + lr_data = [] + run_list = self.getProperty("RunList").value + if len(run_list) > 0: + for run in run_list: + workspace = LoadEventNexus(Filename="REF_L_%s" % run, OutputWorkspace="__data_file_%s" % run, MetaDataOnly=not compute) + lr_data.append(workspace) + else: + ws_list = self.getProperty("WorkspaceList").value + for ws in ws_list: + lr_data.append(mtd[ws]) + + sort_by_runs = self.getProperty("OrderDirectBeamsByRunNumber").value + if sort_by_runs is True: + lr_data_sorted = sorted(lr_data, key=lambda r: r.getRunNumber()) + else: + lr_data_sorted = sorted(lr_data, key=functools.cmp_to_key(sorter_function)) + + # Set the output properties + run_numbers = [r.getRunNumber() for r in lr_data_sorted] + ws_names = [str(r) for r in lr_data_sorted] + self.setProperty("OrderedRunList", run_numbers) + self.setProperty("OrderedNameList", ws_names) + + # Compute the scaling factors if requested + if compute: + sf_file = self.getProperty("ScalingFactorFile").value + if len(sf_file) == 0: + logger.error("Scaling factors were requested but no output file was set") + else: + self._compute_scaling_factors(lr_data_sorted) + + def _compute_scaling_factors(self, lr_data_sorted): + """ + If we need to compute the scaling factors, group the runs by their wavelength request + @param lr_data_sorted: ordered list of workspaces + """ + group_list = [] + current_group = [] + _current_wl = None + _current_thi = None + for r in lr_data_sorted: + wl_ = r.getRun().getProperty("LambdaRequest").value[0] + thi = r.getRun().getProperty("thi").value[0] + + if _current_thi is None or abs(thi - _current_thi) > THI_TOLERANCE or not _current_wl == wl_: + # New group + _current_wl = wl_ + _current_thi = thi + if len(current_group) > 0: + group_list.append(current_group) + current_group = [] + + current_group.append(r) + + # Add in the last group + group_list.append(current_group) + + tof_steps = self.getProperty("TOFSteps").value + scaling_file = self.getProperty("ScalingFactorFile").value + # use_low_res_cut = self.getProperty("UseLowResCut").value + incident_medium = self.getProperty("IncidentMedium").value + summary = "" + for g in group_list: + if len(g) == 0: + continue + + direct_beam_runs = [] + peak_ranges = [] + x_ranges = [] + bck_ranges = [] + + for run in g: + + peak, low_res = self._find_peak(run) # , use_low_res_cut) + + att = run.getRun().getProperty("vAtt").value[0] - 1 + wl = run.getRun().getProperty("LambdaRequest").value[0] + thi = run.getRun().getProperty("thi").value[0] + direct_beam_runs.append(run.getRunNumber()) + peak_ranges.append(int(peak[0])) + peak_ranges.append(int(peak[1])) + x_ranges.append(int(low_res[0])) + x_ranges.append(int(low_res[1])) + bck_ranges.append(int(peak[0]) - 3) + bck_ranges.append(int(peak[1]) + 3) + + summary += "%10s wl=%5s thi=%5s att=%s %5s,%5s %5s,%5s\n" % ( + run.getRunNumber(), + wl, + thi, + att, + peak[0], + peak[1], + low_res[0], + low_res[1], + ) + + # Determine TOF range from first file + sample = g[0].getInstrument().getSample() + source = g[0].getInstrument().getSource() + source_sample_distance = sample.getDistance(source) + detector = g[0].getDetector(0) + sample_detector_distance = detector.getPos().getZ() + source_detector_distance = source_sample_distance + sample_detector_distance + h = 6.626e-34 # m^2 kg s^-1 + m = 1.675e-27 # kg + wl = g[0].getRun().getProperty("LambdaRequest").value[0] + chopper_speed = g[0].getRun().getProperty("SpeedRequest1").value[0] + wl_offset = self.getProperty("WavelengthOffset").value + tof_min = source_detector_distance / h * m * (wl + wl_offset * 60.0 / chopper_speed - 1.7 * 60.0 / chopper_speed) * 1e-4 + tof_max = source_detector_distance / h * m * (wl + wl_offset * 60.0 / chopper_speed + 1.7 * 60.0 / chopper_speed) * 1e-4 + tof_range = [tof_min, tof_max] + + summary += " TOF: %s\n\n" % tof_range + + # Compute the scaling factors + logger.notice("Computing scaling factors for %s" % str(direct_beam_runs)) + slit_tolerance = self.getProperty("SlitTolerance").value + + use_deadtime = self.getProperty("UseDeadTimeCorrection").value + paralyzable = self.getProperty("ParalyzableDeadTime").value + deadtime = self.getProperty("DeadTime").value + deadtime_step = self.getProperty("DeadTimeTOFStep").value + + algo = LRScalingFactors.LRScalingFactors() + algo.PyInit() + algo.setProperty("DirectBeamRuns", direct_beam_runs) + algo.setProperty("TOFRange", tof_range) + algo.setProperty("TOFSteps", tof_steps) + algo.setProperty("SignalPeakPixelRange", peak_ranges) + algo.setProperty("SignalBackgroundPixelRange", bck_ranges) + algo.setProperty("LowResolutionPixelRange", x_ranges) + algo.setProperty("IncidentMedium", incident_medium) + algo.setProperty("SlitTolerance", slit_tolerance) + algo.setProperty("ScalingFactorFile", scaling_file) + algo.setProperty("DirectBeamRuns", direct_beam_runs) + algo.setProperty("UseDeadTimeCorrection", use_deadtime) + algo.setProperty("ParalyzableDeadTime", paralyzable) + algo.setProperty("DeadTime", deadtime) + algo.setProperty("DeadTimeTOFStep", deadtime_step) + algo.PyExec() + + # log output summary + logger.notice(summary) + + @staticmethod + def _find_peak(ws, crop=25, factor=1.0) -> Tuple[List[int], List[int]]: + """Find peak by Mantid FindPeaks with Gaussian peak in the counts + summed from detector pixels on the same row. + + Assumption + 1. The maximum count is belonged to the real peak + + Parameters + ---------- + ws: MatrixWorkspace + workspace to find peak + crop: int + number of pixels to crop out at the edge of detector + factor: float + multiplier factor to extend from peak width to peak range + + Returns + ------- + tuple + peak range, low resolution range + + """ + # Sum detector counts into 1D + y = ws.extractY() + y = np.reshape(y, (256, 304, y.shape[1])) + p_vs_t = np.sum(y, axis=0) + signal = np.sum(p_vs_t, axis=1) + + # Max index as the "observed" peak center + max_index = np.argmax(signal) + + # Fit peak by Gaussian + # create workspace + now = datetime.datetime.now() + ws_name = f"REL{now.hour:02}{now.minute:02}{now.second:02}{now.microsecond:04}.dat" + CreateWorkspace(DataX=np.arange(len(signal)), DataY=signal, DataE=np.sqrt(signal), OutputWorkspace=ws_name) + + # prepare fitting + model_ws_name = f"{ws_name}_model" + param_ws_name = f"{ws_name}_parameter" + peak_ws_name = f"{ws_name}_peaks" + + FitPeaks( + InputWorkspace=ws_name, + OutputWorkspace=peak_ws_name, + PeakCenters=f"{max_index}", + FitWindowBoundaryList=f"{crop},{signal.shape[0]-crop}", + HighBackground=False, + ConstrainPeakPositions=False, + FittedPeaksWorkspace=model_ws_name, + OutputPeakParametersWorkspace=param_ws_name, + RawPeakParameters=False, + ) + + # Retrieve value + peak_width = mtd[param_ws_name].cell(0, 3) + peak_center = mtd[param_ws_name].cell(0, 2) + + info_str = f"{ws}: Max = {max_index}, Peak center = {peak_center}, Width = {peak_width}" + logger.notice(info_str) + + # Form output + peak = [int(peak_center - factor * peak_width), int(ceil(peak_center + factor * peak_width))] + + # Delete workspaces + for ws_name in [peak_ws_name, model_ws_name, param_ws_name]: + DeleteWorkspace(ws_name) + + return peak, [0, 255] + + +AlgorithmFactory.subscribe(LRDirectBeamSort) diff --git a/reduction/lr_reduction/scaling_factors/LRScalingFactors.py b/reduction/lr_reduction/scaling_factors/LRScalingFactors.py new file mode 100644 index 0000000..5a36b9c --- /dev/null +++ b/reduction/lr_reduction/scaling_factors/LRScalingFactors.py @@ -0,0 +1,540 @@ +# Mantid Repository : https://github.com/mantidproject/mantid +# +# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI, +# NScD Oak Ridge National Laboratory, European Spallation Source, +# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS +# SPDX - License - Identifier: GPL - 3.0 + +# pylint: disable=invalid-name, no-init +import os +import re +from mantid.api import * +from mantid.simpleapi import * +from mantid.kernel import * + +from lr_reduction import DeadTimeCorrection + + +class LRScalingFactors(PythonAlgorithm): + """ + This algorithm runs through a sequence of direct beam data sets + to extract scaling factors. The method was developed by J. Ankner (ORNL). + + As we loop through, we find matching data sets with the only + difference between the two is an attenuator. + The ratio of those matching data sets allows use to rescale + a direct beam run taken with a larger number of attenuators + to a standard data set taken with tighter slit settings and + no attenuators. + + The normalization run for a data set taken in a given slit setting + configuration can then be expressed in terms of the standard 0-attenuator + data set with: + D_i = F_i D_0 + + Here's an example of runs and how they are related to F. + + run: 55889, att: 0, s1: 0.26, s2: 0.26 + run: 55890, att: 1, s1: 0.26, s2: 0.26 + run: 55891, att: 1, s1: 0.33, s2: 0.26 --> F = 55891 / 55890 + run: 55892, att: 1, s1: 0.45, s2: 0.26 --> F = 55892 / 55890 + run: 55895, att: 1, s1: 0.81, s2: 0.26 + run: 55896, att: 2, s1: 0.81, s2: 0.26 + run: 55897, att: 2, s1: 1.05, s2: 0.35 --> F = 55897 / 55896 * 55895 / 55890 + + """ + + tolerance = 0.0 + + def category(self): + return "Reflectometry\\SNS" + + def name(self): + return "LiquidsReflectometryScalingFactors" + + def version(self): + return 2 + + def summary(self): + return "Liquids Reflectometer (REFL) scaling factor calculation" + + def PyInit(self): + self.declareProperty(IntArrayProperty("DirectBeamRuns", []), "Run number of the signal run to use") + self.declareProperty(IntArrayProperty("Attenuators", []), "Number of attenuators for each run") + self.declareProperty( + FloatArrayProperty("TOFRange", [10000.0, 35000.0], FloatArrayLengthValidator(2), direction=Direction.Input), "TOF range to use" + ) + self.declareProperty(IntArrayProperty("SignalPeakPixelRange", [150, 160]), "Pixel range defining the data peak") + self.declareProperty(IntArrayProperty("SignalBackgroundPixelRange", [147, 163]), "Pixel range defining the background") + self.declareProperty( + IntArrayProperty("LowResolutionPixelRange", [Property.EMPTY_INT, Property.EMPTY_INT], direction=Direction.Input), + "Pixel range defining the region to use in the low-resolution direction", + ) + self.declareProperty("IncidentMedium", "Medium", doc="Name of the incident medium") + self.declareProperty("FrontSlitName", "S1", doc="Name of the front slit") + self.declareProperty("BackSlitName", "Si", doc="Name of the back slit") + self.declareProperty("TOFSteps", 500.0, doc="TOF step size") + self.declareProperty("SlitTolerance", 0.02, doc="Tolerance for matching slit positions") + self.declareProperty("UseDeadTimeCorrection", True, doc="If True, counts will be corrected for dead time") + self.declareProperty("ParalyzableDeadTime", True, + doc="If true, paralyzable correction will be applied, non-paralyzing otherwise") + self.declareProperty("DeadTime", 4.2, doc="Dead time value") + self.declareProperty("DeadTimeTOFStep", 200., doc="TOF step to bin into for dead time") + self.declareProperty(FileProperty("ScalingFactorFile", "", action=FileAction.Save, extensions=["cfg"])) + + # pylint: disable=too-many-locals,too-many-branches + def PyExec(self): + # Verify whether we have a sorted list of runs. + data_runs = self.getProperty("DirectBeamRuns").value + + # We will be rejecting prompt pulses. We will store pulses here: + self.prompt_pulses = None + + # Get a valid attenuator array + self.read_attenuators_property(len(data_runs)) + + # Get peak ranges + peak_range = self.get_valid_pixel_range("SignalPeakPixelRange", len(data_runs)) + background_range = self.get_valid_pixel_range("SignalBackgroundPixelRange", len(data_runs)) + lowres_range = self.get_valid_pixel_range("LowResolutionPixelRange", len(data_runs)) + + # Slit information for the previous run (see loop below) + previous_slits = None + + # Previous processed workspace + previous_ws = None + + # Number of attenuators for the run being considered + n_attenuator = 0 + + # Transition sreferences used to propagate the attenuation + self.references = {} + + # Current wavelength value + self.wavelength = None + self.wavelength_tolerance = 0.2 + + # Slit settings tolerance + self.tolerance = self.getProperty("SlitTolerance").value + + # Scaling factor output + self.scaling_factors = [] + + # Run through the runs + for i in range(len(data_runs)): + run = data_runs[i] + workspace_name = "REF_L_%s" % int(run) + workspace = LoadEventNexus("REF_L_%s" % run, OutputWorkspace=workspace_name) + + # Get S1H, S2H, S1W, S2W + s1h, s1w, s2h, s2w = self.get_slit_settings(workspace) + + # Get wavelength, to make sure they all match across runs + self.validate_wavelength(workspace) + + # Get attenuators + current_att = n_attenuator + n_attenuator = self.get_attenuators(workspace, i) + if current_att > n_attenuator: + raise RuntimeError("Runs were not supplied in increasing number of attenuators.") + + self.process_data( + workspace, + peak_range=[int(peak_range[2 * i]), int(peak_range[2 * i + 1])], + background_range=[int(background_range[2 * i]), int(background_range[2 * i + 1])], + low_res_range=[int(lowres_range[2 * i]), int(lowres_range[2 * i + 1])], + ) + + is_reference = False + + # Matching slits with the previous run signals a reference run + if ( + i > 0 + and previous_slits is not None + and abs(previous_slits[0] - s1h) < self.tolerance + and abs(previous_slits[1] - s1w) < self.tolerance + and abs(previous_slits[2] - s2h) < self.tolerance + and abs(previous_slits[3] - s2w) < self.tolerance + ): + is_reference = True + + previous_slits = [s1h, s1w, s2h, s2w] + + # If the number of attenuators is zero, skip. + if n_attenuator == 0: + if 0 in self.references: + raise RuntimeError("More than one run with zero attenuator was supplied.") + self.references[0] = {"index": i, "run": run, "ref_ws": workspace_name, "ratio_ws": None, "diagnostics": str(run)} + previous_ws = workspace_name + continue + + if is_reference is True: + self.references[n_attenuator] = {"index": i, "run": run, "ref_ws": workspace_name} + # Compute ratio of the run with fewer attenuators and the + # reference with the same number of attenuators as that run. + Divide( + LHSWorkspace=previous_ws, + RHSWorkspace=self.references[n_attenuator - 1]["ref_ws"], + OutputWorkspace="ScalingRatio_%s" % n_attenuator, + ) + self.references[n_attenuator]["diagnostics"] = "%s / %s" % (str(data_runs[i - 1]), self.references[n_attenuator - 1]["run"]) + # Multiply the result by the ratio for that run, and store + if self.references[n_attenuator - 1]["ratio_ws"] is not None: + Multiply( + LHSWorkspace=self.references[n_attenuator - 1]["ratio_ws"], + RHSWorkspace="ScalingRatio_%s" % n_attenuator, + OutputWorkspace="ScalingRatio_%s" % n_attenuator, + ) + self.references[n_attenuator]["diagnostics"] += " * %s" % self.references[n_attenuator - 1]["diagnostics"] + self.references[n_attenuator]["ratio_ws"] = "ScalingRatio_%s" % n_attenuator + + # If this is not a reference run, compute F + else: + self.compute_scaling_factor(run, n_attenuator, workspace_name) + + previous_ws = workspace_name + + # Log some useful information to track what happened + for item in self.scaling_factors: + log_info = "LambdaRequested=%s " % item["LambdaRequested"] + log_info += "S1H=%s " % item["S1H"] + log_info += "S2iH=%s " % item["S2iH"] + log_info += "S1W=%s " % item["S1W"] + log_info += "S2iW=%s " % item["S2iW"] + log_info += "a=%s " % item["a"] + log_info += "b=%s " % item["b"] + log_info += " | %s" % item["diagnostics"] + logger.information(log_info) + # Save the output in a configuration file + self.save_scaling_factor_file() + + def validate_wavelength(self, workspace): + """ + All supplied runs should have the same wavelength band. + Verify that it is the case or raise an exception. + + @param workspace: data set we are checking + """ + # Get the wavelength for the workspace being considered + wl = workspace.getRun().getProperty("LambdaRequest").value[0] + + # If this is the first workspace we process, set the our + # internal reference value to be used with the following runs. + if self.wavelength is None: + self.wavelength = wl + # Check that the wavelength is the same as our reference within tolerence. + elif abs(wl - self.wavelength) > self.wavelength_tolerance: + raise RuntimeError("Supplied runs don't have matching wavelengths.") + + def read_attenuators_property(self, number_of_runs): + """ + Return the number of attenuators for each run as an array. + Check whether the supplied attenuation array is of the same length, + otherwise we will read the number of attenuators from the logs. + + @param number_of_runs: number of runs we are processing + """ + self.attenuators = self.getProperty("Attenuators").value + self.have_attenuator_info = False + if len(self.attenuators) == 0: + logger.notice("No attenuator information supplied: will be determined.") + elif not len(self.attenuators) == number_of_runs: + logger.error("Attenuation list should be of the same length as the list of runs") + else: + self.have_attenuator_info = True + + def get_attenuators(self, workspace, expInfoIndex): + """ + @param workspace: workspace we are determining the number of attenuators for + @param expInfoIndex: index of the run in case we are getting the attenuators from the input properties + """ + if self.have_attenuator_info: + return self.attenuators[expInfoIndex] + else: + return int(workspace.getRun().getProperty("vAtt").value[0] - 1) + + def get_valid_pixel_range(self, property_name, number_of_runs): + """ + Return a valid pixel range we can use in our calculations. + The output is a list of length 2*number_of_runs. + + @param property_name: name of the algorithm property specifying a pixel range + @param number_of_runs: number of runs we are processing + """ + pixel_range = self.getProperty(property_name).value + if len(pixel_range) == 2: + x_min = int(pixel_range[0]) + x_max = int(pixel_range[1]) + pixel_range = 2 * number_of_runs * [0] + for i in range(number_of_runs): + pixel_range[2 * i] = x_min + pixel_range[2 * i + 1] = x_max + elif len(pixel_range) < 2: + raise RuntimeError("%s should have a length of at least 2." % property_name) + + # Check that the peak range arrays are of the proper length + if not len(pixel_range) == 2 * number_of_runs: + raise RuntimeError("Supplied peak/background arrays should be of the same length as the run array.") + + return pixel_range + + def get_slit_settings(self, workspace): + """ + Return the slit settings + @param workspace: workspace to get the information from + """ + # Get the slit information + front_slit = self.getProperty("FrontSlitName").value + back_slit = self.getProperty("BackSlitName").value + + # Get S1H, S2H, S1W, S2W + s1h = abs(workspace.getRun().getProperty("%sVHeight" % front_slit).value[0]) + s1w = abs(workspace.getRun().getProperty("%sHWidth" % front_slit).value[0]) + try: + s2h = abs(workspace.getRun().getProperty("%sVHeight" % back_slit).value[0]) + s2w = abs(workspace.getRun().getProperty("%sHWidth" % back_slit).value[0]) + except RuntimeError: + # For backward compatibility with old code + logger.error("Specified slit could not be found: %s Trying S2" % back_slit) + s2h = abs(workspace.getRun().getProperty("S2VHeight").value[0]) + s2w = abs(workspace.getRun().getProperty("S2HWidth").value[0]) + return s1h, s1w, s2h, s2w + + def compute_scaling_factor(self, run, n_attenuator, workspace_name): + """ + Compute the scaling factor for this run. + @param run: run number we are working with + @param n_attenuator: number of attenuators for this run + @param workspace_name: name of processed workspace + """ + # Divide by the reference for this number of attenuators + # and multiply by the reference ratio + if n_attenuator not in self.references: + raise RuntimeError("No reference for %s attenuators: check run ordering." % n_attenuator) + f_ws = "F_%s_%s" % (run, n_attenuator) + Divide(LHSWorkspace=workspace_name, RHSWorkspace=self.references[n_attenuator]["ref_ws"], OutputWorkspace=f_ws) + Multiply(LHSWorkspace=self.references[n_attenuator]["ratio_ws"], RHSWorkspace=f_ws, OutputWorkspace=f_ws) + # Store the final result for this setting + ReplaceSpecialValues( + InputWorkspace=f_ws, OutputWorkspace=f_ws, NaNValue=0.0, NaNError=1000.0, InfinityValue=0.0, InfinityError=1000.0 + ) + + # Remove prompt pulse bin, replace the y value by the + # average and give it a very large error. + x_values = mtd[f_ws].readX(0) + y_values = mtd[f_ws].dataY(0) + e_values = mtd[f_ws].dataE(0) + # We will create a cleaned up workspace without the bins + # corresponding to the prompt pulses + x_clean = [] + y_clean = [] + e_clean = [] + for i in range(len(y_values)): + has_prompt_pulse = self.is_prompt_pulse_in_range(mtd[f_ws], x_values[i], x_values[i + 1]) + if has_prompt_pulse: + logger.notice("Prompt pulse bin [%g, %g]" % (x_values[i], x_values[i + 1])) + elif y_values[i] > 0.0: + x_clean.append((x_values[i + 1] + x_values[i]) / 2.0) + y_clean.append(y_values[i]) + e_clean.append(e_values[i]) + + CreateWorkspace(OutputWorkspace=f_ws, DataX=x_clean, DataY=y_clean, DataE=e_clean, NSpec=1) + + Fit(InputWorkspace=f_ws, Function="name=UserFunction, Formula=a+b*x, a=1, b=0", Output="fit_result") + a = mtd["fit_result_Parameters"].cell(0, 1) + b = mtd["fit_result_Parameters"].cell(1, 1) + error_a = mtd["fit_result_Parameters"].cell(0, 2) + error_b = mtd["fit_result_Parameters"].cell(1, 2) + + medium = self.getProperty("IncidentMedium").value + wl = mtd[workspace_name].getRun().getProperty("LambdaRequest").value[0] + s1h, s1w, s2h, s2w = self.get_slit_settings(mtd[workspace_name]) + self.scaling_factors.append( + { + "IncidentMedium": medium, + "LambdaRequested": wl, + "S1H": s1h, + "S1W": s1w, + "S2iH": s2h, + "S2iW": s2w, + "a": a, + "error_a": error_a, + "b": b, + "error_b": error_b, + "diagnostics": "%s / %s * %s" % (run, self.references[n_attenuator]["run"], self.references[n_attenuator]["diagnostics"]), + } + ) + + def is_prompt_pulse_in_range(self, workspace, x_min, x_max): + """ + Returns True if a prompt pulse is in the given TOF range + @param workspace: a data workspace to get the frequency from + @param x_min: min TOF value + @param x_max: max TOD value + """ + # Initialize the prompt pulses if it hasn't been done + if self.prompt_pulses is None: + # Accelerator rep rate + # Use max here because the first entry can be zero + frequency = max(workspace.getRun().getProperty("frequency").value) + + # Go up to 4 frames - that should cover more than enough TOF + self.prompt_pulses = [1.0e6 / frequency * i for i in range(4)] + + for peak_x in self.prompt_pulses: + if peak_x > x_min and peak_x < x_max: + return True + return False + + def save_scaling_factor_file(self): + """ + Save the output. First see if the scaling factor file exists. + If it does, we need to update it. + """ + scaling_file = self.getPropertyValue("ScalingFactorFile") + # Extend the existing content of the scaling factor file + scaling_file_content, scaling_file_meta = self.read_scaling_factor_file(scaling_file) + scaling_file_content.extend(self.scaling_factors) + + direct_beams = list(self.getProperty("DirectBeamRuns").value) + medium = self.getProperty("IncidentMedium").value + scaling_file_meta[medium] = "# Medium=%s, runs: %s" % (medium, direct_beams) + + fd = open(scaling_file, "w") + fd.write("# y=a+bx\n#\n") + fd.write("# LambdaRequested[Angstroms] S1H[mm] (S2/Si)H[mm] S1W[mm] (S2/Si)W[mm] a b error_a error_b\n#\n") + + for k, v in scaling_file_meta.items(): + fd.write("%s\n" % v) + for item in scaling_file_content: + fd.write("IncidentMedium=%s " % item["IncidentMedium"]) + fd.write("LambdaRequested=%s " % item["LambdaRequested"]) + fd.write("S1H=%s " % item["S1H"]) + fd.write("S2iH=%s " % item["S2iH"]) + fd.write("S1W=%s " % item["S1W"]) + fd.write("S2iW=%s " % item["S2iW"]) + fd.write("a=%s " % item["a"]) + fd.write("b=%s " % item["b"]) + fd.write("error_a=%s " % item["error_a"]) + fd.write("error_b=%s\n" % item["error_b"]) + fd.close() + + def read_scaling_factor_file(self, scaling_file): + """ + Read in a scaling factor file and return its content as a list of entries + @param scaling_file: path of the scaling factor file to read + """ + scaling_file_content = [] + scaling_file_meta = {} + if os.path.isfile(scaling_file): + fd = open(scaling_file, "r") + content = fd.read() + fd.close() + for line in content.split("\n"): + if line.startswith("# Medium="): + m = re.search("# Medium=(.+), runs", line) + if m is not None: + scaling_file_meta[m.group(1)] = line + continue + elif line.startswith("#") or len(line.strip()) == 0: + continue + toks = line.split() + entry = {} + for token in toks: + pair = token.split("=") + entry[pair[0].strip()] = pair[1].strip() + # If we are about to update an entry, don't include it in the new file + add_this_entry = True + for new_entry in self.scaling_factors: + is_matching = entry["IncidentMedium"] == new_entry["IncidentMedium"] + for slit in ["LambdaRequested", "S1H", "S1W", "S2iH", "S2iW"]: + is_matching = is_matching and abs(float(entry[slit]) - float(new_entry[slit])) < self.tolerance + if is_matching: + add_this_entry = False + if add_this_entry: + scaling_file_content.append(entry) + return scaling_file_content, scaling_file_meta + + def compute_dead_time_correction(self, ws, tof_min, tof_max, tof_step): + """ + Compute dead time correction to be applied to the reflectivity curve. + """ + paralyzable = self.getProperty("ParalyzableDeadTime").value + deadtime = self.getProperty("DeadTime").value + deadtime_step = self.getProperty("DeadTimeTOFStep").value + error_ws = LoadErrorEventsNexus(ws.getRun().getProperty("run_number").value) + corr_ws = DeadTimeCorrection.call(InputWorkspace=ws, + InputErrorEventsWorkspace=error_ws, + Paralyzable=paralyzable, + TOFStep=tof_step, + TOFRange=[tof_min, tof_max], + OutputWorkspace="corr") + + return corr_ws + + def process_data(self, workspace, peak_range, background_range, low_res_range): + """ + Common processing for both sample data and normalization. + @param workspace: name of the workspace to work with + @param peak_range: range of pixels defining the peak + @param background_range: range of pixels defining the background + @param low_res_range: range of pixels in the x-direction + """ + # Get dead time correction + # We compute this first in case the rest of the processing modifies the workspaces + tof_range = self.getProperty("TOFRange").value + tof_step = self.getProperty("TOFSteps").value + correct_for_deadtime = self.getProperty("UseDeadTimeCorrection").value + if correct_for_deadtime: + dead_time_correction = self.compute_dead_time_correction(workspace, + tof_range[0], + tof_range[1], + tof_step) + + # Check low-res axis + if low_res_range[0] == Property.EMPTY_INT: + low_res_range[0] = 0 + if low_res_range[1] == Property.EMPTY_INT: + low_res_range[1] = int(workspace.getInstrument().getNumberParameter("number-of-x-pixels")[0]) - 1 + + # Rebin TOF axis + workspace = Rebin( + InputWorkspace=workspace, Params=[tof_range[0], tof_step, tof_range[1]], PreserveEvents=False, OutputWorkspace=str(workspace) + ) + + # Subtract background + workspace = LRSubtractAverageBackground( + InputWorkspace=workspace, + PeakRange=peak_range, + BackgroundRange=background_range, + LowResolutionRange=low_res_range, + OutputWorkspace=str(workspace), + ErrorWeighting=True, + ) + + # Normalize by current proton charge + # Note that the background subtraction will use an error weighted mean + # and use 1 as the error on counts of zero. We normalize by the integrated + # current _after_ the background subtraction so that the 1 doesn't have + # to be changed to a 1/Charge. + workspace = NormaliseByCurrent(InputWorkspace=workspace, OutputWorkspace=str(workspace)) + + # Crop to only the selected peak region + workspace = CropWorkspace( + InputWorkspace=workspace, + StartWorkspaceIndex=int(peak_range[0]), + EndWorkspaceIndex=int(peak_range[1]), + OutputWorkspace=str(workspace), + ) + workspace = SumSpectra(InputWorkspace=workspace, OutputWorkspace=str(workspace)) + + if correct_for_deadtime: + Multiply( + LHSWorkspace=workspace, + RHSWorkspace=dead_time_correction, + OutputWorkspace=str(workspace) + ) + + return str(workspace) + + +AlgorithmFactory.subscribe(LRScalingFactors) diff --git a/reduction/lr_reduction/scaling_factors/__init__.py b/reduction/lr_reduction/scaling_factors/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/reduction/lr_reduction/scaling_factors/workflow.py b/reduction/lr_reduction/scaling_factors/workflow.py new file mode 100644 index 0000000..57ced23 --- /dev/null +++ b/reduction/lr_reduction/scaling_factors/workflow.py @@ -0,0 +1,59 @@ +""" + Scaling factors calculation workflow +""" +import os + +from . import LRDirectBeamSort + + +def process_scaling_factors(ws, output_dir, tof_step=200., order_by_runs=True, + incident_medium='air', slit_tolerance=0.06, wait=False, + postfix='_auto', use_deadtime=True, paralyzable=True, + deadtime=4.2, deadtime_tof_step=200): + """ + Compute scaling factors given a DB run, assumed to be the last + one of a set. + :param workspace ws: Mantid workspace for one of the direct beams to use. + """ + # Read in the sequence information + meta_data_run = ws.getRun() + run_number = ws.getRunNumber() + sequence_number = meta_data_run.getProperty("sequence_number").value[0] + first_run_of_set = meta_data_run.getProperty("sequence_id").value[0] + sequence_total = meta_data_run.getProperty("sequence_total").value[0] + + print(f"Run {run_number} - Sequence {first_run_of_set} [{first_run_of_set}/{sequence_total}]") + + # For an automated reduction situation, we have to wait until all the direct + # beams are acquired. + if wait and sequence_number < sequence_total: + print(f"Waiting for at least {sequence_total} runs to compute scaling factors") + return False + + # The medium for these direct beam runs may not be what was set in the template, + # so either use the medium in the data file or a default name + + if meta_data_run.hasProperty("incident_medium"): + incident_medium = meta_data_run.getProperty("incident_medium").value[0] + + file_id = incident_medium.replace("medium", "") + + output_cfg = os.path.join(output_dir, "sf_%s_%s%s.cfg" % (first_run_of_set, file_id, postfix)) + + algo = LRDirectBeamSort.LRDirectBeamSort() + algo.PyInit() + algo.setProperty("RunList", list(range(first_run_of_set, first_run_of_set + sequence_total))) + algo.setProperty("UseLowResCut", True) + algo.setProperty("ComputeScalingFactors", True) + algo.setProperty("TOFSteps", tof_step) + algo.setProperty("IncidentMedium", incident_medium) + algo.setProperty("SlitTolerance", slit_tolerance) + algo.setProperty("OrderDirectBeamsByRunNumber", order_by_runs) + algo.setProperty("UseDeadTimeCorrection", use_deadtime) + algo.setProperty("ParalyzableDeadTime", paralyzable) + algo.setProperty("DeadTime", deadtime) + algo.setProperty("DeadTimeTOFStep", deadtime_tof_step) + algo.setProperty("ScalingFactorFile", output_cfg) + algo.PyExec() + + return True diff --git a/reduction/test/test_scaling_factors.py b/reduction/test/test_scaling_factors.py new file mode 100644 index 0000000..f09ae68 --- /dev/null +++ b/reduction/test/test_scaling_factors.py @@ -0,0 +1,117 @@ +import os +import numpy as np + +from lr_reduction.DeadTimeCorrection import SingleReadoutDeadTimeCorrection + +import mantid +import mantid.simpleapi as mtd_api +mtd_api.config["default.facility"] = "SNS" +mtd_api.config["default.instrument"] = "REF_L" + +from lr_reduction import event_reduction, template, workflow +from lr_reduction.utils import amend_config + +from lr_reduction.scaling_factors import workflow as sf_workflow + + +def check_results(data_file, reference): + """ + Check scaling factor file output against reference + """ + with open(data_file, 'r') as fd: + cfg_data = fd.readlines() + + with open(reference, 'r') as fd: + cfg_ref = fd.readlines() + + for i in range(5, len(cfg_ref)): + # Newly generated data + toks = cfg_data[i].split(' ') + for t in toks: + kv = t.split('=') + + # Reference data + toks = cfg_ref[i].split(' ') + for t in toks: + kv_ref = t.split('=') + + for j in range(len(kv_ref)): + v_calc = float(kv[1]) + v_ref = float(kv_ref[1]) + delta = np.fabs((v_ref - v_calc)/v_ref) + assert delta < 0.02 + + +def test_compute_sf(nexus_dir): + """ + Test the computation of scaling factors + """ + with amend_config(data_dir=nexus_dir): + ws = mtd_api.Load("REF_L_197912") + + output_dir = '/tmp' + + # We are passing the first run of the set. For the autoreduction, + # we would be missing runs from the complete set so we will want to + # wait for the whole set to be acquired. + output = sf_workflow.process_scaling_factors(ws, output_dir, + use_deadtime=False, + wait=True, postfix='_test') + assert output is False + + output_cfg = os.path.join(output_dir, "sf_197912_Si_test.cfg") + if os.path.isfile(output_cfg): + os.remove(output_cfg) + + output = sf_workflow.process_scaling_factors(ws, output_dir, + use_deadtime=False, + wait=False, postfix='_test') + assert output is True + + check_results(output_cfg, 'data/sf_197912_Si_auto.cfg') + + +def test_compute_sf_with_deadtime(nexus_dir): + """ + Test the computation of scaling factors + """ + with amend_config(data_dir=nexus_dir): + ws = mtd_api.Load("REF_L_197912") + + output_dir = '/tmp' + + output_cfg = os.path.join(output_dir, "sf_197912_Si_test_dt.cfg") + if os.path.isfile(output_cfg): + os.remove(output_cfg) + + output = sf_workflow.process_scaling_factors(ws, output_dir, + use_deadtime=True, + wait=False, postfix='_test_dt') + assert output is True + + check_results(output_cfg, 'data/sf_197912_Si_dt_par_42_200.cfg') + + +def test_compute_sf_with_deadtime_tof_300(nexus_dir): + """ + Test the computation of scaling factors + """ + with amend_config(data_dir=nexus_dir): + ws = mtd_api.Load("REF_L_197912") + + output_dir = '/tmp' + + output_cfg = os.path.join(output_dir, "sf_197912_Si_test_dt.cfg") + if os.path.isfile(output_cfg): + os.remove(output_cfg) + + output = sf_workflow.process_scaling_factors(ws, output_dir, + use_deadtime=True, + deadtime=4.6, + deadtime_tof_step=300, + paralyzable=False, + wait=False, postfix='_test_dt') + assert output is True + + check_results(output_cfg, 'data/sf_197912_Si_dt_par_46_300.cfg') +