From 95ad055a7aa0bf35fa54330853965db8b476cbcf Mon Sep 17 00:00:00 2001 From: GuiMacielPereira Date: Thu, 21 Nov 2024 15:28:25 +0000 Subject: [PATCH 1/9] Add tests for calculating y space + kinematics Restructured the functions of the analysis reduction and added two unit tests, one for calculating the kinematics and the other for calculating y spaces. --- src/mvesuvio/analysis_reduction.py | 148 ++++++------------ src/mvesuvio/util/analysis_helpers.py | 41 +++-- .../unit/analysis/test_analysis_reduction.py | 67 +++++++- 3 files changed, 140 insertions(+), 116 deletions(-) diff --git a/src/mvesuvio/analysis_reduction.py b/src/mvesuvio/analysis_reduction.py index ead6a60f..ea8e710b 100644 --- a/src/mvesuvio/analysis_reduction.py +++ b/src/mvesuvio/analysis_reduction.py @@ -12,12 +12,16 @@ VesuvioThickness, Integration, Divide, Multiply, DeleteWorkspaces, \ CreateWorkspace, CreateSampleWorkspace -from mvesuvio.util.analysis_helpers import loadConstants, numericalThirdDerivative - - +from mvesuvio.util.analysis_helpers import numericalThirdDerivative, load_resolution, load_instrument_params np.set_printoptions(suppress=True, precision=4, linewidth=200) +NEUTRON_MASS = 1.008 # a.m.u. +ENERGY_FINAL = 4906.0 # meV +ENERGY_TO_VELOCITY = 4.3737 * 1.0e-4 +VELOCITY_FINAL = np.sqrt(ENERGY_FINAL) * ENERGY_TO_VELOCITY # m/us +H_BAR = 2.0445 + class VesuvioAnalysisRoutine(PythonAlgorithm): @@ -160,9 +164,11 @@ def _setup(self): self._save_figures_path = self.getProperty("FiguresPath").value self._h_ratio = self.getProperty("HRatioToLowestMass").value self._constraints = dill.loads(eval(self.getProperty("Constraints").value)) - self._profiles_table = self.getProperty("InputProfiles").value + self._instrument_params = load_instrument_params(self._ip_file, self.getProperty("InputWorkspace").value.getSpectrumNumbers()) + self._resolution_params = load_resolution(self._instrument_params) + # Need to transform profiles table into parameter array for optimize.minimize() self._initial_fit_parameters = [] for intensity, width, center in zip( @@ -192,14 +198,16 @@ def _setup(self): self._fit_profiles_workspaces = {} - def _update_workspace_data(self): self._dataX = self._workspace_being_fit.extractX() self._dataY = self._workspace_being_fit.extractY() self._dataE = self._workspace_being_fit.extractE() - self._set_up_kinematic_arrays() + v0, E0, delta_E, delta_Q = self._calculate_kinematics(self._dataX) + + self._kinematic_arrays = np.swapaxes([v0, E0, delta_E, delta_Q], 0, 1) + self._y_space_arrays = self._calculate_y_spaces(self._dataX, delta_Q, delta_E) self._fit_parameters = np.zeros((len(self._dataY), 3 * self._profiles_table.rowCount() + 3)) self._row_being_fit = 0 @@ -252,14 +260,6 @@ def _create_emtpy_ncp_workspace(self, suffix): ) - def _set_up_kinematic_arrays(self): - resolutionPars, instrPars, kinematicArrays, ySpacesForEachMass = self.prepareFitArgs() - self._resolution_params = resolutionPars - self._instrument_params = instrPars - self._kinematic_arrays = kinematicArrays - self._y_space_arrays = ySpacesForEachMass - - def run(self): assert self._profiles_table.rowCount() > 0, "Need at least one profile to run the routine!" @@ -329,94 +329,38 @@ def _fit_neutron_compton_profiles(self): return - def prepareFitArgs(self): - instrPars = self.loadInstrParsFileIntoArray() - resolutionPars = self.loadResolutionPars(instrPars) - - v0, E0, delta_E, delta_Q = self.calculateKinematicsArrays(instrPars) - kinematicArrays = np.array([v0, E0, delta_E, delta_Q]) - ySpacesForEachMass = self.convertDataXToYSpacesForEachMass( - self._dataX, delta_Q, delta_E - ) - kinematicArrays = np.swapaxes(kinematicArrays, 0, 1) - ySpacesForEachMass = np.swapaxes(ySpacesForEachMass, 0, 1) - return resolutionPars, instrPars, kinematicArrays, ySpacesForEachMass - - - def loadInstrParsFileIntoArray(self): - """Loads instrument parameters into array, from the file in the specified path""" + def _calculate_kinematics(self, dataX): + det, plick, angle, T0, L0, L1 = np.hsplit(self._instrument_params, 6) # each is of len(dataX) - data = np.loadtxt(self._ip_file, dtype=str)[1:].astype(float) - spectra = data[:, 0] - - workspace_spectrum_list = self._workspace_being_fit.getSpectrumNumbers() - first_spec = min(workspace_spectrum_list) - last_spec = max(workspace_spectrum_list) - - select_rows = np.where((spectra >= first_spec) & (spectra <= last_spec)) - instrPars = data[select_rows] - return instrPars - - - @staticmethod - def loadResolutionPars(instrPars): - """Resolution of parameters to propagate into TOF resolution - Output: matrix with each parameter in each column""" - spectrums = instrPars[:, 0] - L = len(spectrums) - # For spec no below 135, back scattering detectors, mode is double difference - # For spec no 135 or above, front scattering detectors, mode is single difference - dE1 = np.where(spectrums < 135, 88.7, 73) # meV, STD - dE1_lorz = np.where(spectrums < 135, 40.3, 24) # meV, HFHM - dTOF = np.repeat(0.37, L) # us - dTheta = np.repeat(0.016, L) # rad - dL0 = np.repeat(0.021, L) # meters - dL1 = np.repeat(0.023, L) # meters - - resolutionPars = np.vstack((dE1, dTOF, dTheta, dL0, dL1, dE1_lorz)).transpose() - return resolutionPars - - - def calculateKinematicsArrays(self, instrPars): - """Kinematics quantities calculated from TOF data""" - - dataX = self._dataX - - mN, Ef, en_to_vel, vf, hbar = loadConstants() - det, plick, angle, T0, L0, L1 = np.hsplit(instrPars, 6) # each is of len(dataX) - t_us = dataX - T0 # T0 is electronic delay due to instruments - v0 = vf * L0 / (vf * t_us - L1) - E0 = np.square( - v0 / en_to_vel - ) # en_to_vel is a factor used to easily change velocity to energy and vice-versa - - delta_E = E0 - Ef + # T0 is electronic delay due to instruments + t_us = dataX - T0 + v0 = VELOCITY_FINAL * L0 / (VELOCITY_FINAL * t_us - L1) + # en_to_vel is a factor used to easily change velocity to energy and vice-versa + E0 = np.square(v0 / ENERGY_TO_VELOCITY) + delta_E = E0 - ENERGY_FINAL delta_Q2 = ( 2.0 - * mN - / hbar**2 - * (E0 + Ef - 2.0 * np.sqrt(E0 * Ef) * np.cos(angle / 180.0 * np.pi)) + * NEUTRON_MASS + / H_BAR**2 + * (E0 + ENERGY_FINAL - 2.0 * np.sqrt(E0 * ENERGY_FINAL) * np.cos(angle / 180.0 * np.pi)) ) delta_Q = np.sqrt(delta_Q2) return v0, E0, delta_E, delta_Q # shape(no of spectrums, no of bins) - def convertDataXToYSpacesForEachMass(self, dataX, delta_Q, delta_E): - """"Calculates y spaces from TOF data, each row corresponds to one mass""" + def _calculate_y_spaces(self, dataX, delta_Q, delta_E): # Prepare arrays to broadcast dataX = dataX[np.newaxis, :, :] delta_Q = delta_Q[np.newaxis, :, :] delta_E = delta_E[np.newaxis, :, :] - - mN, Ef, en_to_vel, vf, hbar = loadConstants() masses = self._masses.reshape(-1, 1, 1) - energyRecoil = np.square(hbar * delta_Q) / 2.0 / masses - ySpacesForEachMass = ( - masses / hbar**2 / delta_Q * (delta_E - energyRecoil) - ) # y-scaling - return ySpacesForEachMass + energy_recoil = np.square(H_BAR * delta_Q) / 2.0 / masses + y_spaces = masses / H_BAR**2 / delta_Q * (delta_E - energy_recoil) + + # First axis selects spectra + return np.swapaxes(y_spaces, 0, 1) def _save_plots(self): @@ -712,13 +656,12 @@ def calcGaussianResolution(self, centers): v0, E0, delta_E, delta_Q = self.kinematicsAtYCenters(centers) det, plick, angle, T0, L0, L1 = self._instrument_params[self._row_being_fit] dE1, dTOF, dTheta, dL0, dL1, dE1_lorz = self._resolution_params[self._row_being_fit] - mN, Ef, en_to_vel, vf, hbar = loadConstants() angle = angle * np.pi / 180 - dWdE1 = 1.0 + (E0 / Ef) ** 1.5 * (L1 / L0) + dWdE1 = 1.0 + (E0 / ENERGY_FINAL) ** 1.5 * (L1 / L0) dWdTOF = 2.0 * E0 * v0 / L0 - dWdL1 = 2.0 * E0**1.5 / Ef**0.5 / L0 + dWdL1 = 2.0 * E0**1.5 / ENERGY_FINAL**0.5 / L0 dWdL0 = 2.0 * E0 / L0 dW2 = ( @@ -728,25 +671,25 @@ def calcGaussianResolution(self, centers): + dWdL0**2 * dL0**2 ) # conversion from meV^2 to A^-2, dydW = (M/q)^2 - dW2 *= (masses / hbar**2 / delta_Q) ** 2 + dW2 *= (masses / H_BAR**2 / delta_Q) ** 2 dQdE1 = ( 1.0 - - (E0 / Ef) ** 1.5 * L1 / L0 - - np.cos(angle) * ((E0 / Ef) ** 0.5 - L1 / L0 * E0 / Ef) + - (E0 / ENERGY_FINAL) ** 1.5 * L1 / L0 + - np.cos(angle) * ((E0 / ENERGY_FINAL) ** 0.5 - L1 / L0 * E0 / ENERGY_FINAL) ) dQdTOF = 2.0 * E0 * v0 / L0 - dQdL1 = 2.0 * E0**1.5 / L0 / Ef**0.5 + dQdL1 = 2.0 * E0**1.5 / L0 / ENERGY_FINAL**0.5 dQdL0 = 2.0 * E0 / L0 - dQdTheta = 2.0 * np.sqrt(E0 * Ef) * np.sin(angle) + dQdTheta = 2.0 * np.sqrt(E0 * ENERGY_FINAL) * np.sin(angle) dQ2 = ( dQdE1**2 * dE1**2 + (dQdTOF**2 * dTOF**2 + dQdL1**2 * dL1**2 + dQdL0**2 * dL0**2) - * np.abs(Ef / E0 * np.cos(angle) - 1) + * np.abs(ENERGY_FINAL / E0 * np.cos(angle) - 1) + dQdTheta**2 * dTheta**2 ) - dQ2 *= (mN / hbar**2 / delta_Q) ** 2 + dQ2 *= (NEUTRON_MASS / H_BAR**2 / delta_Q) ** 2 # in A-1 #same as dy^2 = (dy/dw)^2*dw^2 + (dy/dq)^2*dq^2 gaussianResWidth = np.sqrt(dW2 + dQ2) @@ -758,20 +701,19 @@ def calcLorentzianResolution(self, centers): v0, E0, delta_E, delta_Q = self.kinematicsAtYCenters(centers) det, plick, angle, T0, L0, L1 = self._instrument_params[self._row_being_fit] dE1, dTOF, dTheta, dL0, dL1, dE1_lorz = self._resolution_params[self._row_being_fit] - mN, Ef, en_to_vel, vf, hbar = loadConstants() angle = angle * np.pi / 180 - dWdE1_lor = (1.0 + (E0 / Ef) ** 1.5 * (L1 / L0)) ** 2 + dWdE1_lor = (1.0 + (E0 / ENERGY_FINAL) ** 1.5 * (L1 / L0)) ** 2 # conversion from meV^2 to A^-2 - dWdE1_lor *= (masses / hbar**2 / delta_Q) ** 2 + dWdE1_lor *= (masses / H_BAR**2 / delta_Q) ** 2 dQdE1_lor = ( 1.0 - - (E0 / Ef) ** 1.5 * L1 / L0 - - np.cos(angle) * ((E0 / Ef) ** 0.5 + L1 / L0 * E0 / Ef) + - (E0 / ENERGY_FINAL) ** 1.5 * L1 / L0 + - np.cos(angle) * ((E0 / ENERGY_FINAL) ** 0.5 + L1 / L0 * E0 / ENERGY_FINAL) ) ** 2 - dQdE1_lor *= (mN / hbar**2 / delta_Q) ** 2 + dQdE1_lor *= (NEUTRON_MASS / H_BAR**2 / delta_Q) ** 2 lorentzianResWidth = np.sqrt(dWdE1_lor + dQdE1_lor) * dE1_lorz # in A-1 return lorentzianResWidth diff --git a/src/mvesuvio/util/analysis_helpers.py b/src/mvesuvio/util/analysis_helpers.py index fa2c974e..93b74709 100644 --- a/src/mvesuvio/util/analysis_helpers.py +++ b/src/mvesuvio/util/analysis_helpers.py @@ -237,18 +237,6 @@ def extractWS(ws): return ws.extractX(), ws.extractY(), ws.extractE() -def loadConstants(): - """Output: the mass of the neutron, final energy of neutrons (selected by gold foil), - factor to change energies into velocities, final velocity of neutron and hbar""" - mN = 1.008 # a.m.u. - Ef = 4906.0 # meV - en_to_vel = 4.3737 * 1.0e-4 - vf = np.sqrt(Ef) * en_to_vel # m/us - hbar = 2.0445 - constants = (mN, Ef, en_to_vel, vf, hbar) - return constants - - def numericalThirdDerivative(x, y): k6 = (- y[:, 12:] + y[:, :-12]) * 1 k5 = (+ y[:, 11:-1] - y[:, 1:-11]) * 24 @@ -267,6 +255,35 @@ def numericalThirdDerivative(x, y): return derivative +def load_resolution(instrument_params): + """Resolution of parameters to propagate into TOF resolution + Output: matrix with each parameter in each column""" + spectra = instrument_params[:, 0] + L = len(spectra) + # For spec no below 135, back scattering detectors, mode is double difference + # For spec no 135 or above, front scattering detectors, mode is single difference + dE1 = np.where(spectra < 135, 88.7, 73) # meV, STD + dE1_lorz = np.where(spectra < 135, 40.3, 24) # meV, HFHM + dTOF = np.repeat(0.37, L) # us + dTheta = np.repeat(0.016, L) # rad + dL0 = np.repeat(0.021, L) # meters + dL1 = np.repeat(0.023, L) # meters + + resolutionPars = np.vstack((dE1, dTOF, dTheta, dL0, dL1, dE1_lorz)).transpose() + return resolutionPars + + +def load_instrument_params(ip_file, spectrum_list): + + first_spec = min(spectrum_list) + last_spec = max(spectrum_list) + data = np.loadtxt(ip_file, dtype=str)[1:].astype(float) + spectra = data[:, 0] + + select_rows = np.where((spectra >= first_spec) & (spectra <= last_spec)) + return data[select_rows] + + def createWS(dataX, dataY, dataE, wsName, parentWorkspace=None): ws = CreateWorkspace( DataX=dataX.flatten(), diff --git a/tests/unit/analysis/test_analysis_reduction.py b/tests/unit/analysis/test_analysis_reduction.py index 57d55b2a..4a945a1a 100644 --- a/tests/unit/analysis/test_analysis_reduction.py +++ b/tests/unit/analysis/test_analysis_reduction.py @@ -7,9 +7,74 @@ import inspect -class TestAnalysisFunctions(unittest.TestCase): +np.set_printoptions(suppress=True, precision=5, linewidth=200) + +class TestAnalysisReduction(unittest.TestCase): def setUp(self): pass + def test_calculate_kinematics(self): + alg = VesuvioAnalysisRoutine() + alg._instrument_params = np.array( + [[3, 3, 131.12, -0.2, 11.005, 0.6039], + [4, 4, 132.77, -0.2, 11.005, 0.5789], + [5, 5, 133.69, -0.2, 11.005, 0.5696], + ]) + dataX = np.array([ + [110.5, 111.5, 112.5, 113.5, 114.5], + [110.5, 111.5, 112.5, 113.5, 114.5], + [110.5, 111.5, 112.5, 113.5, 114.5], + ]) + v0, E0, delta_E, delta_Q = alg._calculate_kinematics(dataX) + np.testing.assert_allclose(v0, np.array([[0.12095, 0.11964, 0.11835, 0.11709, 0.11586], + [0.11988, 0.11858, 0.11732, 0.11608, 0.11487], + [0.11948, 0.1182 , 0.11694, 0.11571, 0.11451], + ]), atol=1e-4) + np.testing.assert_allclose(E0, np.array([[76475.65823, 74821.94729, 73221.30191, 71671.47572, 70170.33998], + [75122.06378, 73511.83023, 71952.81999, 70442.88326, 68979.98182], + [74627.68443, 73033.23536, 71489.34475, 69993.89741, 68544.88764], + ]), atol=1e-4) + np.testing.assert_allclose(delta_E, np.array([[71569.65823, 69915.94729, 68315.30191, 66765.47572, 65264.33998], + [70216.06378, 68605.83023, 67046.81999, 65536.88326, 64073.98182], + [69721.68443, 68127.23536, 66583.34475, 65087.89741, 63638.88764], + ]), atol=1e-4) + np.testing.assert_allclose(delta_Q, np.array([[227.01905, 224.95887, 222.94348, 220.97148, 219.04148], + [226.21278, 224.18766, 222.20618, 220.26696, 218.36867], + [226.07138, 224.05877, 222.08939, 220.16185, 218.27485], + ]), atol=1e-4) + + def test_calculate_y_spaces(self): + alg = VesuvioAnalysisRoutine() + alg._masses = np.array([1, 12, 16]) + dataX = np.array([ + [110.5, 111.5, 112.5, 113.5, 114.5], + [110.5, 111.5, 112.5, 113.5, 114.5], + [110.5, 111.5, 112.5, 113.5, 114.5], + ]) + delta_Q = np.array([[227.01905, 224.95887, 222.94348, 220.97148, 219.04148], + [226.21278, 224.18766, 222.20618, 220.26696, 218.36867], + [226.07138, 224.05877, 222.08939, 220.16185, 218.27485], + ]) + delta_E = np.array([[71569.65823, 69915.94729, 68315.30191, 66765.47572, 65264.33998], + [70216.06378, 68605.83023, 67046.81999, 65536.88326, 64073.98182], + [69721.68443, 68127.23536, 66583.34475, 65087.89741, 63638.88764], + ]) + y_spaces = alg._calculate_y_spaces(dataX, delta_Q, delta_E) + y_spaces_expected = np.array( + [[[-38.0885,-38.12637,-38.16414,-38.20185,-38.23948], + [791.54274,779.75738,768.21943,756.92089,745.85436], + [1093.22682,1077.16966,1061.44982,1046.05644,1030.97939]], + + [[-38.84807,-38.88304,-38.91795,-38.95279,-38.98756], + [777.99344,766.43564,755.11863,744.0348,733.17695], + [1075.02671,1059.2788,1043.8592,1028.75757,1013.96405]], + + [[-39.25409,-39.28749,-39.32085,-39.35414,-39.38735], + [772.34348,760.87331,749.64145,738.64055,727.86342], + [1067.46987,1051.84087,1036.53683,1021.54771,1006.8637,]]]) + np.testing.assert_allclose(y_spaces, y_spaces_expected, atol=1e-4) + + + if __name__ == "__main__": unittest.main() From d04875dc421ef1057983ea882e242d32f3b3f794 Mon Sep 17 00:00:00 2001 From: GuiMacielPereira Date: Fri, 22 Nov 2024 11:07:13 +0000 Subject: [PATCH 2/9] Add unit test for ncp fits Added test for 3 different spectra: one with normal data, one with a range in TOF masked with zeros and another fully masked spectra. Checked that the fitting function (total ncp) and fitting parameters match. --- .../unit/analysis/test_analysis_reduction.py | 96 ++++++++++++++++++- 1 file changed, 94 insertions(+), 2 deletions(-) diff --git a/tests/unit/analysis/test_analysis_reduction.py b/tests/unit/analysis/test_analysis_reduction.py index 4a945a1a..08df0c63 100644 --- a/tests/unit/analysis/test_analysis_reduction.py +++ b/tests/unit/analysis/test_analysis_reduction.py @@ -1,13 +1,14 @@ import unittest import numpy as np import numpy.testing as nptest -from mock import MagicMock +from mock import MagicMock, patch, call from mvesuvio.analysis_reduction import VesuvioAnalysisRoutine +from mvesuvio.util.analysis_helpers import load_resolution from mantid.simpleapi import CreateWorkspace, DeleteWorkspace import inspect -np.set_printoptions(suppress=True, precision=5, linewidth=200) +np.set_printoptions(suppress=True, precision=6, linewidth=200) class TestAnalysisReduction(unittest.TestCase): def setUp(self): @@ -75,6 +76,97 @@ def test_calculate_y_spaces(self): np.testing.assert_allclose(y_spaces, y_spaces_expected, atol=1e-4) + def test_fit_neutron_compton_profiles_number_of_calls(self): + alg = VesuvioAnalysisRoutine() + alg._dataY = np.array([[1, 1], [2, 2], [3, 3]]) + alg._fit_parameters = np.ones(3) # To avoid assertion error + alg._fit_neutron_compton_profiles_to_row = MagicMock(return_value=None) + alg._fit_neutron_compton_profiles() + self.assertEqual(alg._fit_neutron_compton_profiles_to_row.call_count, 3) + + + def test_fit_neutron_compton_profiles_to_row(self): + # Test 3 spectra, one nornal, one with masked Tof and another fully masked + alg = VesuvioAnalysisRoutine() + alg._workspace_being_fit = MagicMock() + alg._workspace_being_fit.name.return_value = "test_workspace" + alg._workspace_being_fit.getNumberHistograms.return_value = 3 + alg._workspace_being_fit.extractY.return_value = np.array( + [[-0.0001, 0.0016, 0.0018, -0.0004, 0.0008, 0.002, 0.0025, 0.0033, 0.0012, 0.0012, 0.0024, 0.0035, 0.0019, 0.0069, 0.008, 0.0097, 0.0104, 0.0124, 0.0147, 0.0165, 0.0163, 0.0195, 0.0185, 0.0149, 0.0143, 0.0145, 0.0109, 0.0085, 0.0065, 0.0043, 0.0029, 0.0023, 0.001, -0.0001, 0.0009, 0.0004, -0., 0.0001, 0.0008, 0.0001, -0.0007, 0.0011, 0.0032, 0.0057, 0.0094, 0.0036, 0.0012, -0.0023, -0.0015, -0.0006, 0.0006, 0.0011, 0.0004, 0.0009], + [0.0008, 0., 0., 0., 0., 0., 0.0007, 0.0033, 0.0045, 0.0029, 0.0008, 0.0026, 0.0019, 0.0004, 0.0044, 0.0057, 0.0083, 0.0115, 0.012, 0.013, 0.0168, 0.0191, 0.0167, 0.0165, 0.0165, 0.018, 0.0131, 0.0131, 0.0111, 0.0069, 0.0045, 0.0049, 0.0008, 0.0022, 0.0017, -0., 0.0003, -0.0007, 0.0001, -0., 0.0009, 0.0017, 0.0033, 0.0061, 0.0097, 0.0044, 0.0016, 0.0003, -0.0002, 0.0008, -0.0009, 0.0004, 0.0001, 0.0025], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]] + ) + alg._workspace_being_fit.extractX.return_value = np.array( + [[113., 119., 125., 131., 137., 143., 149., 155., 161., 167., 173., 179., 185., 191., 197., 203., 209., 215., 221., 227., 233., 239., 245., 251., 257., 263., 269., 275., 281., 287., 293., 299., 305., 311., 317., 323., 329., 335., 341., 347., 353., 359., 365., 371., 377., 383., 389., 395., 401., 407., 413., 419., 425., 429.], + [113., 119., 125., 131., 137., 143., 149., 155., 161., 167., 173., 179., 185., 191., 197., 203., 209., 215., 221., 227., 233., 239., 245., 251., 257., 263., 269., 275., 281., 287., 293., 299., 305., 311., 317., 323., 329., 335., 341., 347., 353., 359., 365., 371., 377., 383., 389., 395., 401., 407., 413., 419., 425., 429.]] + ) + alg._workspace_being_fit.extractE.return_value = np.array( + [[0.0015, 0.0014, 0.0014, 0.0014, 0.0014, 0.0014, 0.0013, 0.0013, 0.0013, 0.0013, 0.0013, 0.0013, 0.0013, 0.0013, 0.0013, 0.0013, 0.0014, 0.0014, 0.0014, 0.0014, 0.0014, 0.0014, 0.0014, 0.0013, 0.0013, 0.0013, 0.0013, 0.0012, 0.0012, 0.0012, 0.0012, 0.0011, 0.0011, 0.0011, 0.0011, 0.0011, 0.0011, 0.0011, 0.0011, 0.0011, 0.0011, 0.0011, 0.0011, 0.0011, 0.0011, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.0009, 0.0009, 0.0016], + [0.0014, 0.0014, 0.0014, 0.0013, 0.0013, 0.0013, 0.0013, 0.0013, 0.0012, 0.0012, 0.0012, 0.0012, 0.0012, 0.0012, 0.0012, 0.0013, 0.0013, 0.0013, 0.0013, 0.0013, 0.0013, 0.0013, 0.0013, 0.0013, 0.0013, 0.0013, 0.0013, 0.0012, 0.0012, 0.0012, 0.0011, 0.0011, 0.0011, 0.0011, 0.0011, 0.0011, 0.001, 0.001, 0.001, 0.001, 0.001, 0.0011, 0.0011, 0.0011, 0.001, 0.001, 0.001, 0.001, 0.0009, 0.0009, 0.0009, 0.0009, 0.0009, 0.0016]] + ) + alg._instrument_params = np.array( + [[3, 3, 131.12, -0.2, 11.005, 0.6039], + [4, 4, 132.77, -0.2, 11.005, 0.5789], + ]) + alg._resolution_params = load_resolution(alg._instrument_params) + alg._masses = np.array([1, 12, 16, 27]) + alg._initial_fit_parameters = np.array([1, 4.7, 0, 1, 12.71, 0.0, 1, 8.76, 0.0, 1, 13.897, 0.0]) + alg._initial_fit_bounds = np.array([ + [0, None], [3, 6], [-3, 1], + [0, None], [12.71, 12.71], [-3, 1], + [0, None], [8.76, 8.76], [-3, 1], + [0, None], [13.897, 13.897], [-3, 1], + ]) + alg._constraints = () + + # Set up several fit arguments + alg._profiles_table = MagicMock() + alg._profiles_table.column.return_value = ['1', '12', '16', '27'] + alg._profiles_table.rowCount.return_value = 4 + alg._create_emtpy_ncp_workspace = MagicMock(return_value = None) + alg._update_workspace_data() + + # Create mock for storing ncp total result + ncp_total_array = np.zeros_like(alg._dataY) + + def pick_ncp_row_result(row): + return ncp_total_array[row] + + ncp_total_ws_mock = MagicMock() + ncp_total_ws_mock.dataY.side_effect = pick_ncp_row_result + + alg._fit_profiles_workspaces = { + "total": ncp_total_ws_mock, + "1": MagicMock(), + "12": MagicMock(), + "16": MagicMock(), + "27": MagicMock() + } + # Fit ncp + alg._row_being_fit = 0 + alg._fit_neutron_compton_profiles_to_row() + alg._row_being_fit = 1 + alg._fit_neutron_compton_profiles_to_row() + alg._row_being_fit = 2 + alg._fit_neutron_compton_profiles_to_row() + + # Compare results + expected_total_ncp_fits = np.array( + [[0.003246, 0.003334, 0.003416, 0.003492, 0.003562, 0.003628, 0.003633, 0.003679, 0.003721, 0.00376, 0.003796, 0.003829, 0.00386, 0.003888, 0.003914, 0.003937, 0.003959, 0.003978, 0.003996, 0.004012, 0.004026, 0.004038, 0.004049, 0.004059, 0.004067, 0.004074, 0.004079, 0.004083, 0.004086, 0.004088, 0.004088, 0.004087, 0.004085, 0.004082, 0.004078, 0.004073, 0.004067, 0.00406, 0.004052, 0.004043, 0.004033, 0.004022, 0.00401, 0.003996, 0.003982, 0.003967, 0.003951, 0.00384, 0.004628, 0.004635, 0.004642, 0.004649, 0.004655, 0.004659], + [0.003383, 0.003481, 0.003572, 0.003657, 0.003737, 0.003811, 0.003866, 0.003927, 0.003984, 0.004038, 0.004088, 0.004136, 0.00418, 0.004222, 0.004261, 0.004298, 0.004333, 0.004366, 0.004397, 0.004426, 0.004453, 0.004479, 0.004503, 0.004526, 0.004547, 0.004567, 0.004586, 0.004603, 0.00462, 0.004635, 0.004649, 0.004663, 0.004675, 0.004686, 0.004697, 0.004707, 0.004715, 0.004723, 0.004731, 0.004737, 0.004743, 0.004748, 0.004752, 0.004756, 0.004759, 0.004761, 0.004763, 0.004727, 0.005024, 0.005033, 0.005043, 0.005052, 0.00506, 0.005066], + [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., ]] + ) + np.testing.assert_allclose(ncp_total_array, expected_total_ncp_fits, atol=1e-6) + + expected_fit_parameters = np.array( + [[3., 5568.767144, 6., -3., 0., 12.71, 0.999934, 0., 8.76, -3., 0., 13.897, -2.999712, 26.483439, 16.], + [4., 6551.082529, 4.511378, -2.999998, 0., 12.71, 0.85686, 0., 8.76, -2.782142, 0., 13.897, -2.683993, 28.399091, 18.], + [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]] + ) + np.testing.assert_allclose(alg._fit_parameters, expected_fit_parameters, atol=1e-6) + + + if __name__ == "__main__": unittest.main() From 912f46eee3fd986a7aa241c433aa4f6a8951ec3c Mon Sep 17 00:00:00 2001 From: GuiMacielPereira Date: Mon, 25 Nov 2024 11:40:57 +0000 Subject: [PATCH 3/9] Add unit test for masking tof range Added unit test that compares masking tof range with zeros with using a range that has been cut off (i.e. does not include the values with zeros). If the error function is successfully ignoring zeros then the result of both fits should be the same --- .../unit/analysis/test_analysis_reduction.py | 84 +++++++++++++++---- 1 file changed, 69 insertions(+), 15 deletions(-) diff --git a/tests/unit/analysis/test_analysis_reduction.py b/tests/unit/analysis/test_analysis_reduction.py index 08df0c63..17a4fcf0 100644 --- a/tests/unit/analysis/test_analysis_reduction.py +++ b/tests/unit/analysis/test_analysis_reduction.py @@ -6,6 +6,8 @@ from mvesuvio.util.analysis_helpers import load_resolution from mantid.simpleapi import CreateWorkspace, DeleteWorkspace import inspect +import scipy +import re np.set_printoptions(suppress=True, precision=6, linewidth=200) @@ -86,7 +88,6 @@ def test_fit_neutron_compton_profiles_number_of_calls(self): def test_fit_neutron_compton_profiles_to_row(self): - # Test 3 spectra, one nornal, one with masked Tof and another fully masked alg = VesuvioAnalysisRoutine() alg._workspace_being_fit = MagicMock() alg._workspace_being_fit.name.return_value = "test_workspace" @@ -104,9 +105,9 @@ def test_fit_neutron_compton_profiles_to_row(self): [[0.0015, 0.0014, 0.0014, 0.0014, 0.0014, 0.0014, 0.0013, 0.0013, 0.0013, 0.0013, 0.0013, 0.0013, 0.0013, 0.0013, 0.0013, 0.0013, 0.0014, 0.0014, 0.0014, 0.0014, 0.0014, 0.0014, 0.0014, 0.0013, 0.0013, 0.0013, 0.0013, 0.0012, 0.0012, 0.0012, 0.0012, 0.0011, 0.0011, 0.0011, 0.0011, 0.0011, 0.0011, 0.0011, 0.0011, 0.0011, 0.0011, 0.0011, 0.0011, 0.0011, 0.0011, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.0009, 0.0009, 0.0016], [0.0014, 0.0014, 0.0014, 0.0013, 0.0013, 0.0013, 0.0013, 0.0013, 0.0012, 0.0012, 0.0012, 0.0012, 0.0012, 0.0012, 0.0012, 0.0013, 0.0013, 0.0013, 0.0013, 0.0013, 0.0013, 0.0013, 0.0013, 0.0013, 0.0013, 0.0013, 0.0013, 0.0012, 0.0012, 0.0012, 0.0011, 0.0011, 0.0011, 0.0011, 0.0011, 0.0011, 0.001, 0.001, 0.001, 0.001, 0.001, 0.0011, 0.0011, 0.0011, 0.001, 0.001, 0.001, 0.001, 0.0009, 0.0009, 0.0009, 0.0009, 0.0009, 0.0016]] ) - alg._instrument_params = np.array( - [[3, 3, 131.12, -0.2, 11.005, 0.6039], - [4, 4, 132.77, -0.2, 11.005, 0.5789], + alg._instrument_params = np.array([ + [144, 144, 54.1686, -0.41, 11.005, 0.720184], + [145, 145, 52.3407, -0.53, 11.005, 0.717311] ]) alg._resolution_params = load_resolution(alg._instrument_params) alg._masses = np.array([1, 12, 16, 27]) @@ -149,23 +150,76 @@ def pick_ncp_row_result(row): alg._fit_neutron_compton_profiles_to_row() alg._row_being_fit = 2 alg._fit_neutron_compton_profiles_to_row() - # Compare results - expected_total_ncp_fits = np.array( - [[0.003246, 0.003334, 0.003416, 0.003492, 0.003562, 0.003628, 0.003633, 0.003679, 0.003721, 0.00376, 0.003796, 0.003829, 0.00386, 0.003888, 0.003914, 0.003937, 0.003959, 0.003978, 0.003996, 0.004012, 0.004026, 0.004038, 0.004049, 0.004059, 0.004067, 0.004074, 0.004079, 0.004083, 0.004086, 0.004088, 0.004088, 0.004087, 0.004085, 0.004082, 0.004078, 0.004073, 0.004067, 0.00406, 0.004052, 0.004043, 0.004033, 0.004022, 0.00401, 0.003996, 0.003982, 0.003967, 0.003951, 0.00384, 0.004628, 0.004635, 0.004642, 0.004649, 0.004655, 0.004659], - [0.003383, 0.003481, 0.003572, 0.003657, 0.003737, 0.003811, 0.003866, 0.003927, 0.003984, 0.004038, 0.004088, 0.004136, 0.00418, 0.004222, 0.004261, 0.004298, 0.004333, 0.004366, 0.004397, 0.004426, 0.004453, 0.004479, 0.004503, 0.004526, 0.004547, 0.004567, 0.004586, 0.004603, 0.00462, 0.004635, 0.004649, 0.004663, 0.004675, 0.004686, 0.004697, 0.004707, 0.004715, 0.004723, 0.004731, 0.004737, 0.004743, 0.004748, 0.004752, 0.004756, 0.004759, 0.004761, 0.004763, 0.004727, 0.005024, 0.005033, 0.005043, 0.005052, 0.00506, 0.005066], - [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., ]] - ) + expected_total_ncp_fits = np.array([ + [0.00004, 0.000059, 0.000089, 0.000138, 0.000215, 0.000335, 0.000647, 0.000943, 0.001347, 0.001888, 0.002593, 0.003486, 0.004589, 0.005913, 0.007453, 0.009182, 0.011037, 0.01292, 0.014694, 0.016196, 0.017254, 0.017722, 0.017509, 0.016606, 0.01509, 0.013115, 0.010885, 0.00861, 0.006475, 0.004615, 0.003101, 0.001949, 0.001128, 0.000583, 0.000251, 0.000068, -0.000016, -0.000041, -0.000005, 0.000119, 0.000355, 0.00105, 0.002667, 0.006238, 0.008899, 0.004131, 0.000602, -0.000026, 0.000111, 0.000078, 0.000061, 0.00005, 0.000043, 0.00004], + [0.000021, 0.000029, 0.000042, 0.000062, 0.000095, 0.000148, 0.000324, 0.000486, 0.00072, 0.001046, 0.001492, 0.002084, 0.002851, 0.003818, 0.005006, 0.006424, 0.008066, 0.009895, 0.011838, 0.013776, 0.015551, 0.016978, 0.017872, 0.018092, 0.017566, 0.016325, 0.014492, 0.012265, 0.009878, 0.007552, 0.005464, 0.003723, 0.002369, 0.001389, 0.00073, 0.000322, 0.000095, -0.000012, -0.000016, 0.000117, 0.000383, 0.001178, 0.003156, 0.006523, 0.009388, 0.00489, 0.00077, -0.000037, 0.000125, 0.000087, 0.000067, 0.000055, 0.000047, 0.000043], + [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.] + ]) np.testing.assert_allclose(ncp_total_array, expected_total_ncp_fits, atol=1e-6) - expected_fit_parameters = np.array( - [[3., 5568.767144, 6., -3., 0., 12.71, 0.999934, 0., 8.76, -3., 0., 13.897, -2.999712, 26.483439, 16.], - [4., 6551.082529, 4.511378, -2.999998, 0., 12.71, 0.85686, 0., 8.76, -2.782142, 0., 13.897, -2.683993, 28.399091, 18.], - [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]] - ) + expected_fit_parameters = np.array([ + [144., 7.095403, 5.051053, 0.015996, 0.218192, 12.71, 1., 0., 8.76, -1.091821, 0.29291, 13.897, 0.639245, 0.848425, 21.], + [145., 6.825532, 5.073835, -0.087401, 0.29456, 12.71, 1., 0., 8.76, -0.14256, 0.26776, 13.897, -2.563441, 1.246451, 20.], + [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.] + ]) np.testing.assert_allclose(alg._fit_parameters, expected_fit_parameters, atol=1e-6) + def test_fit_neutron_compton_profiles_to_row_with_masked_tof(self): + alg = VesuvioAnalysisRoutine() + alg._workspace_being_fit = MagicMock() + alg._workspace_being_fit.name.return_value = "test_workspace" + alg._workspace_being_fit.getNumberHistograms.return_value = 1 + dataX = np.arange(113, 430).reshape(1, -1) + dataE = np.full_like(dataX, 0.0015) + dataY = scipy.special.voigt_profile(dataX - 235, 30, 0) + 0.005*(np.random.random_sample(dataX.shape)-0.5) + + # Mask TOF range + cut_off_idx = 100 + dataY[:, :cut_off_idx] = 0 + + alg._workspace_being_fit.extractY.return_value = dataY + alg._workspace_being_fit.extractX.return_value = dataX + alg._workspace_being_fit.extractE.return_value = dataE + alg._instrument_params = np.array([[144, 144, 54.1686, -0.41, 11.005, 0.720184]]) + alg._resolution_params = load_resolution(alg._instrument_params) + alg._masses = np.array([1]) + alg._initial_fit_parameters = np.array([1, 5.2, 0]) + alg._initial_fit_bounds = np.array([[0, None], [3, 6], [None, None]]) + alg._constraints = () + + # Set up several fit arguments + alg._profiles_table = MagicMock() + alg._profiles_table.column.return_value = ['1'] + alg._profiles_table.rowCount.return_value = 1 + alg._create_emtpy_ncp_workspace = MagicMock() + alg._update_workspace_data() + + # Create mock for storing ncp total result + ncp_array_masked = np.zeros_like(dataY) + alg._fit_profiles_workspaces = {"total": MagicMock(dataY=lambda arg: ncp_array_masked), "1": MagicMock()} + + # Fit ncp + alg._row_being_fit = 0 + alg._fit_neutron_compton_profiles_to_row() + fit_parameters_masked = alg._fit_parameters.copy() + + # Now cut range so that zeros are not part of dataY + # (Still need to leave a padding with 6 zeros due to numerical derivative in ncp) + alg._workspace_being_fit.extractY.return_value = dataY[:, cut_off_idx - 6:].reshape(1, -1) + alg._workspace_being_fit.extractX.return_value = dataX[:, cut_off_idx - 6:].reshape(1, -1) + alg._workspace_being_fit.extractE.return_value = dataE[:, cut_off_idx - 6:].reshape(1, -1) + alg._update_workspace_data() + + ncp_array_cut = ncp_array_masked[:, cut_off_idx - 6:].reshape(1, -1) + alg._fit_profiles_workspaces = {"total": MagicMock(dataY=lambda arg: ncp_array_cut), "1": MagicMock()} + + alg._fit_neutron_compton_profiles_to_row() + fit_parameters_cut = alg._fit_parameters.copy() + + np.testing.assert_allclose(fit_parameters_cut, fit_parameters_masked, atol=1e-6) + np.testing.assert_allclose(ncp_array_masked[:, cut_off_idx-6:], ncp_array_cut, atol=1e-6) if __name__ == "__main__": From 08dfe7aea813a6a91b74c6ae5f88e4fbe7665ca6 Mon Sep 17 00:00:00 2001 From: GuiMacielPereira Date: Tue, 26 Nov 2024 16:08:30 +0000 Subject: [PATCH 4/9] Change setting of kinematic arrays Cleaned the code for setting kinematic arrays and fixed unit tests --- src/mvesuvio/analysis_reduction.py | 46 ++++++++++--------- .../unit/analysis/test_analysis_reduction.py | 40 ++++++++-------- 2 files changed, 45 insertions(+), 41 deletions(-) diff --git a/src/mvesuvio/analysis_reduction.py b/src/mvesuvio/analysis_reduction.py index ea8e710b..df2bff79 100644 --- a/src/mvesuvio/analysis_reduction.py +++ b/src/mvesuvio/analysis_reduction.py @@ -204,10 +204,9 @@ def _update_workspace_data(self): self._dataY = self._workspace_being_fit.extractY() self._dataE = self._workspace_being_fit.extractE() - v0, E0, delta_E, delta_Q = self._calculate_kinematics(self._dataX) - - self._kinematic_arrays = np.swapaxes([v0, E0, delta_E, delta_Q], 0, 1) - self._y_space_arrays = self._calculate_y_spaces(self._dataX, delta_Q, delta_E) + self._set_kinematic_arrays(self._dataX) + # Calculate y space after kinematics + self._y_space_arrays = self._calculate_y_spaces(self._dataX) self._fit_parameters = np.zeros((len(self._dataY), 3 * self._profiles_table.rowCount() + 3)) self._row_being_fit = 0 @@ -329,31 +328,31 @@ def _fit_neutron_compton_profiles(self): return - def _calculate_kinematics(self, dataX): + def _set_kinematic_arrays(self, dataX): det, plick, angle, T0, L0, L1 = np.hsplit(self._instrument_params, 6) # each is of len(dataX) # T0 is electronic delay due to instruments t_us = dataX - T0 - v0 = VELOCITY_FINAL * L0 / (VELOCITY_FINAL * t_us - L1) + self._v0 = VELOCITY_FINAL * L0 / (VELOCITY_FINAL * t_us - L1) # en_to_vel is a factor used to easily change velocity to energy and vice-versa - E0 = np.square(v0 / ENERGY_TO_VELOCITY) - delta_E = E0 - ENERGY_FINAL + self._E0 = np.square(self._v0 / ENERGY_TO_VELOCITY) + self._deltaE = self._E0 - ENERGY_FINAL delta_Q2 = ( 2.0 * NEUTRON_MASS / H_BAR**2 - * (E0 + ENERGY_FINAL - 2.0 * np.sqrt(E0 * ENERGY_FINAL) * np.cos(angle / 180.0 * np.pi)) + * (self._E0 + ENERGY_FINAL - 2.0 * np.sqrt(self._E0 * ENERGY_FINAL) * np.cos(angle / 180.0 * np.pi)) ) - delta_Q = np.sqrt(delta_Q2) - return v0, E0, delta_E, delta_Q # shape(no of spectrums, no of bins) + self._deltaQ = np.sqrt(delta_Q2) + return - def _calculate_y_spaces(self, dataX, delta_Q, delta_E): + def _calculate_y_spaces(self, dataX): # Prepare arrays to broadcast dataX = dataX[np.newaxis, :, :] - delta_Q = delta_Q[np.newaxis, :, :] - delta_E = delta_E[np.newaxis, :, :] + delta_Q = self._deltaQ[np.newaxis, :, :] + delta_E = self._deltaE[np.newaxis, :, :] masses = self._masses.reshape(-1, 1, 1) energy_recoil = np.square(H_BAR * delta_Q) / 2.0 / masses @@ -600,7 +599,10 @@ def _neutron_compton_profiles(self, pars): centers = pars[2::3].reshape(-1, 1) masses = self._masses.reshape(-1, 1) - v0, E0, deltaE, deltaQ = self._kinematic_arrays[self._row_being_fit] + # v0, E0, deltaE, deltaQ = self._kinematic_arrays[self._row_being_fit] + E0 = self._E0[self._row_being_fit] + deltaQ = self._deltaQ[self._row_being_fit] + gaussRes, lorzRes = self.caculateResolutionForEachMass(centers) totalGaussWidth = np.sqrt(widths**2 + gaussRes**2) @@ -613,10 +615,9 @@ def _neutron_compton_profiles(self, pars): / deltaQ * 0.72 ) - scaling_factor = intensities * E0 * E0 ** (-0.92) * masses / deltaQ - JOfY *= scaling_factor - FSE *= scaling_factor - return JOfY+FSE, FSE + NCP = intensities * (JOfY+FSE) * E0 * E0 ** (-0.92) * masses / deltaQ + FSE = intensities * FSE * E0 * E0 ** (-0.92) * masses / deltaQ + return NCP, FSE def caculateResolutionForEachMass(self, centers): @@ -633,10 +634,13 @@ def kinematicsAtYCenters(self, centers): shapeOfArrays = centers.shape proximityToYCenters = np.abs(self._y_space_arrays[self._row_being_fit] - centers) - yClosestToCenters = proximityToYCenters.min(axis=1).reshape(shapeOfArrays) + yClosestToCenters = proximityToYCenters.min(axis=1).reshape(-1, 1) yCentersMask = proximityToYCenters == yClosestToCenters - v0, E0, deltaE, deltaQ = self._kinematic_arrays[self._row_being_fit] + v0 = self._v0[self._row_being_fit] + E0 = self._E0[self._row_being_fit] + deltaE = self._deltaE[self._row_being_fit] + deltaQ = self._deltaQ[self._row_being_fit] # Expand arrays to match shape of yCentersMask v0 = v0 * np.ones(shapeOfArrays) diff --git a/tests/unit/analysis/test_analysis_reduction.py b/tests/unit/analysis/test_analysis_reduction.py index 17a4fcf0..59627f5a 100644 --- a/tests/unit/analysis/test_analysis_reduction.py +++ b/tests/unit/analysis/test_analysis_reduction.py @@ -28,20 +28,20 @@ def test_calculate_kinematics(self): [110.5, 111.5, 112.5, 113.5, 114.5], [110.5, 111.5, 112.5, 113.5, 114.5], ]) - v0, E0, delta_E, delta_Q = alg._calculate_kinematics(dataX) - np.testing.assert_allclose(v0, np.array([[0.12095, 0.11964, 0.11835, 0.11709, 0.11586], + alg._set_kinematic_arrays(dataX) + np.testing.assert_allclose(alg._v0, np.array([[0.12095, 0.11964, 0.11835, 0.11709, 0.11586], [0.11988, 0.11858, 0.11732, 0.11608, 0.11487], [0.11948, 0.1182 , 0.11694, 0.11571, 0.11451], ]), atol=1e-4) - np.testing.assert_allclose(E0, np.array([[76475.65823, 74821.94729, 73221.30191, 71671.47572, 70170.33998], + np.testing.assert_allclose(alg._E0, np.array([[76475.65823, 74821.94729, 73221.30191, 71671.47572, 70170.33998], [75122.06378, 73511.83023, 71952.81999, 70442.88326, 68979.98182], [74627.68443, 73033.23536, 71489.34475, 69993.89741, 68544.88764], ]), atol=1e-4) - np.testing.assert_allclose(delta_E, np.array([[71569.65823, 69915.94729, 68315.30191, 66765.47572, 65264.33998], + np.testing.assert_allclose(alg._deltaE, np.array([[71569.65823, 69915.94729, 68315.30191, 66765.47572, 65264.33998], [70216.06378, 68605.83023, 67046.81999, 65536.88326, 64073.98182], [69721.68443, 68127.23536, 66583.34475, 65087.89741, 63638.88764], ]), atol=1e-4) - np.testing.assert_allclose(delta_Q, np.array([[227.01905, 224.95887, 222.94348, 220.97148, 219.04148], + np.testing.assert_allclose(alg._deltaQ, np.array([[227.01905, 224.95887, 222.94348, 220.97148, 219.04148], [226.21278, 224.18766, 222.20618, 220.26696, 218.36867], [226.07138, 224.05877, 222.08939, 220.16185, 218.27485], ]), atol=1e-4) @@ -54,15 +54,15 @@ def test_calculate_y_spaces(self): [110.5, 111.5, 112.5, 113.5, 114.5], [110.5, 111.5, 112.5, 113.5, 114.5], ]) - delta_Q = np.array([[227.01905, 224.95887, 222.94348, 220.97148, 219.04148], + alg._deltaQ = np.array([[227.01905, 224.95887, 222.94348, 220.97148, 219.04148], [226.21278, 224.18766, 222.20618, 220.26696, 218.36867], [226.07138, 224.05877, 222.08939, 220.16185, 218.27485], ]) - delta_E = np.array([[71569.65823, 69915.94729, 68315.30191, 66765.47572, 65264.33998], + alg._deltaE = np.array([[71569.65823, 69915.94729, 68315.30191, 66765.47572, 65264.33998], [70216.06378, 68605.83023, 67046.81999, 65536.88326, 64073.98182], [69721.68443, 68127.23536, 66583.34475, 65087.89741, 63638.88764], ]) - y_spaces = alg._calculate_y_spaces(dataX, delta_Q, delta_E) + y_spaces = alg._calculate_y_spaces(dataX) y_spaces_expected = np.array( [[[-38.0885,-38.12637,-38.16414,-38.20185,-38.23948], [791.54274,779.75738,768.21943,756.92089,745.85436], @@ -87,6 +87,8 @@ def test_fit_neutron_compton_profiles_number_of_calls(self): self.assertEqual(alg._fit_neutron_compton_profiles_to_row.call_count, 3) + # def test_neutron_compton_profile_fit_function(self): + def test_fit_neutron_compton_profiles_to_row(self): alg = VesuvioAnalysisRoutine() alg._workspace_being_fit = MagicMock() @@ -127,21 +129,17 @@ def test_fit_neutron_compton_profiles_to_row(self): alg._create_emtpy_ncp_workspace = MagicMock(return_value = None) alg._update_workspace_data() - # Create mock for storing ncp total result + # Create arrays for storing ncp and fse total result ncp_total_array = np.zeros_like(alg._dataY) - - def pick_ncp_row_result(row): - return ncp_total_array[row] - - ncp_total_ws_mock = MagicMock() - ncp_total_ws_mock.dataY.side_effect = pick_ncp_row_result + fse_total_array = np.zeros_like(alg._dataY) alg._fit_profiles_workspaces = { - "total": ncp_total_ws_mock, - "1": MagicMock(), - "12": MagicMock(), - "16": MagicMock(), - "27": MagicMock() + "total": MagicMock(dataY=lambda row: ncp_total_array[row]), + "1": MagicMock(), "12": MagicMock(), "16": MagicMock(), "27": MagicMock() + } + alg._fit_fse_workspaces = { + "total": MagicMock(dataY=lambda row: fse_total_array[row]), + "1": MagicMock(), "12": MagicMock(), "16": MagicMock(), "27": MagicMock() } # Fit ncp alg._row_being_fit = 0 @@ -164,6 +162,8 @@ def pick_ncp_row_result(row): [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.] ]) np.testing.assert_allclose(alg._fit_parameters, expected_fit_parameters, atol=1e-6) + import re + print(re.sub(f"\s+", ", ", str(fse_total_array))) def test_fit_neutron_compton_profiles_to_row_with_masked_tof(self): From 035ba65068257f4975c35841300f08c425f0e7f9 Mon Sep 17 00:00:00 2001 From: GuiMacielPereira Date: Tue, 26 Nov 2024 16:40:28 +0000 Subject: [PATCH 5/9] Add unit test for NCP fitting function This unit test tests the fit function itself, rather than the fitting engine --- src/mvesuvio/analysis_reduction.py | 10 +++---- .../unit/analysis/test_analysis_reduction.py | 30 +++++++++++++++---- 2 files changed, 29 insertions(+), 11 deletions(-) diff --git a/src/mvesuvio/analysis_reduction.py b/src/mvesuvio/analysis_reduction.py index df2bff79..ed97abed 100644 --- a/src/mvesuvio/analysis_reduction.py +++ b/src/mvesuvio/analysis_reduction.py @@ -206,7 +206,7 @@ def _update_workspace_data(self): self._set_kinematic_arrays(self._dataX) # Calculate y space after kinematics - self._y_space_arrays = self._calculate_y_spaces(self._dataX) + self._set_y_space_arrays(self._dataX) self._fit_parameters = np.zeros((len(self._dataY), 3 * self._profiles_table.rowCount() + 3)) self._row_being_fit = 0 @@ -347,8 +347,7 @@ def _set_kinematic_arrays(self, dataX): return - def _calculate_y_spaces(self, dataX): - + def _set_y_space_arrays(self, dataX): # Prepare arrays to broadcast dataX = dataX[np.newaxis, :, :] delta_Q = self._deltaQ[np.newaxis, :, :] @@ -358,8 +357,9 @@ def _calculate_y_spaces(self, dataX): energy_recoil = np.square(H_BAR * delta_Q) / 2.0 / masses y_spaces = masses / H_BAR**2 / delta_Q * (delta_E - energy_recoil) - # First axis selects spectra - return np.swapaxes(y_spaces, 0, 1) + # Swap axis so that first axis selects spectra + self._y_space_arrays = np.swapaxes(y_spaces, 0, 1) + return def _save_plots(self): diff --git a/tests/unit/analysis/test_analysis_reduction.py b/tests/unit/analysis/test_analysis_reduction.py index 59627f5a..2633fcd7 100644 --- a/tests/unit/analysis/test_analysis_reduction.py +++ b/tests/unit/analysis/test_analysis_reduction.py @@ -46,7 +46,7 @@ def test_calculate_kinematics(self): [226.07138, 224.05877, 222.08939, 220.16185, 218.27485], ]), atol=1e-4) - def test_calculate_y_spaces(self): + def test_set_y_space_arrays(self): alg = VesuvioAnalysisRoutine() alg._masses = np.array([1, 12, 16]) dataX = np.array([ @@ -62,7 +62,7 @@ def test_calculate_y_spaces(self): [70216.06378, 68605.83023, 67046.81999, 65536.88326, 64073.98182], [69721.68443, 68127.23536, 66583.34475, 65087.89741, 63638.88764], ]) - y_spaces = alg._calculate_y_spaces(dataX) + alg._set_y_space_arrays(dataX) y_spaces_expected = np.array( [[[-38.0885,-38.12637,-38.16414,-38.20185,-38.23948], [791.54274,779.75738,768.21943,756.92089,745.85436], @@ -75,7 +75,7 @@ def test_calculate_y_spaces(self): [[-39.25409,-39.28749,-39.32085,-39.35414,-39.38735], [772.34348,760.87331,749.64145,738.64055,727.86342], [1067.46987,1051.84087,1036.53683,1021.54771,1006.8637,]]]) - np.testing.assert_allclose(y_spaces, y_spaces_expected, atol=1e-4) + np.testing.assert_allclose(alg._y_space_arrays, y_spaces_expected, atol=1e-4) def test_fit_neutron_compton_profiles_number_of_calls(self): @@ -87,7 +87,27 @@ def test_fit_neutron_compton_profiles_number_of_calls(self): self.assertEqual(alg._fit_neutron_compton_profiles_to_row.call_count, 3) - # def test_neutron_compton_profile_fit_function(self): + def test_neutron_compton_profile_fit_function(self): + alg = VesuvioAnalysisRoutine() + alg._dataX = np.arange(113, 430, 6).reshape(1, -1) + alg._instrument_params = np.array([ + [144, 144, 54.1686, -0.41, 11.005, 0.720184] + ]) + alg._resolution_params = load_resolution(alg._instrument_params) + alg._masses = np.array([1, 12, 16, 27]) + alg._set_kinematic_arrays(alg._dataX) + alg._set_y_space_arrays(alg._dataX) + example_fit_parameters = np.array([7.1, 5.05, 0.02, 0.22, 12.71, 1.0, 0.0, 8.76, -1.1, 0.3, 13.897, 0.64]) + alg._row_being_fit = 0 + NCP, FSE = alg._neutron_compton_profiles(example_fit_parameters) + expected_NCP = np.array([ + [0.00004, 0.000059, 0.00009, 0.000138, 0.000216, 0.000336, 0.000648, 0.000944, 0.001349, 0.001891, 0.002596, 0.003491, 0.004595, 0.005921, 0.007464, 0.009194, 0.011051, 0.012936, 0.014711, 0.016212, 0.017269, 0.017734, 0.017518, 0.016611, 0.01509, 0.013112, 0.010879, 0.008601, 0.006465, 0.004605, 0.003092, 0.001939, 0.001119, 0.000574, 0.00024, 0.000055, -0.000033, -0.000064, -0.000064, -0.00005, -0.000033, -0.000017, -0.000004, 0.000005, 0.000012, 0.000016, 0.000018, 0.000027, 0.000025, 0.000023, 0.000022, 0.000021, 0.00002], + [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.000001, 0.000001, 0.000001, 0.000001, 0.000001, 0.000001, 0.000001, 0.000002, 0.000002, 0.000003, 0.000004, 0.000005, 0.000006, 0.000008, 0.000011, 0.000043, 0.000149, 0.00034, 0.000926, 0.002246, 0.003149, 0.00241, 0.000926, 0.00007, 0.000067, 0.00003, 0.000019, 0.000014, 0.000011, 0.000009], + [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.], + [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.000001, 0.000001, 0.000001, 0.000001, 0.000001, 0.000001, 0.000001, 0.000002, 0.000002, 0.000002, 0.000003, 0.000004, 0.000005, 0.000006, 0.000008, 0.000011, 0.000017, 0.000021, 0.000052, 0.000152, 0.000455, 0.003186, 0.006654, 0.003274, 0.000527, 0.000117, 0.000058, 0.000037, 0.000026, 0.000019, 0.000015] + ]) + np.testing.assert_allclose(NCP, expected_NCP, atol=1e-6) + def test_fit_neutron_compton_profiles_to_row(self): alg = VesuvioAnalysisRoutine() @@ -162,8 +182,6 @@ def test_fit_neutron_compton_profiles_to_row(self): [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.] ]) np.testing.assert_allclose(alg._fit_parameters, expected_fit_parameters, atol=1e-6) - import re - print(re.sub(f"\s+", ", ", str(fse_total_array))) def test_fit_neutron_compton_profiles_to_row_with_masked_tof(self): From 4e4c830583652b967b2747e82dff0725d4878ad4 Mon Sep 17 00:00:00 2001 From: GuiMacielPereira Date: Wed, 27 Nov 2024 11:43:46 +0000 Subject: [PATCH 6/9] Improve unit test for fitting ncp Instead of hard-coded values it's better to compare the ncp fit to synthetic (original) ncp to make sure that optimizer is working correctly --- .../unit/analysis/test_analysis_reduction.py | 91 +++++++++---------- 1 file changed, 45 insertions(+), 46 deletions(-) diff --git a/tests/unit/analysis/test_analysis_reduction.py b/tests/unit/analysis/test_analysis_reduction.py index 2633fcd7..59bbbb9a 100644 --- a/tests/unit/analysis/test_analysis_reduction.py +++ b/tests/unit/analysis/test_analysis_reduction.py @@ -9,8 +9,9 @@ import scipy import re - np.set_printoptions(suppress=True, precision=6, linewidth=200) +np.random.seed(4) + class TestAnalysisReduction(unittest.TestCase): def setUp(self): @@ -111,48 +112,51 @@ def test_neutron_compton_profile_fit_function(self): def test_fit_neutron_compton_profiles_to_row(self): alg = VesuvioAnalysisRoutine() - alg._workspace_being_fit = MagicMock() - alg._workspace_being_fit.name.return_value = "test_workspace" - alg._workspace_being_fit.getNumberHistograms.return_value = 3 - alg._workspace_being_fit.extractY.return_value = np.array( - [[-0.0001, 0.0016, 0.0018, -0.0004, 0.0008, 0.002, 0.0025, 0.0033, 0.0012, 0.0012, 0.0024, 0.0035, 0.0019, 0.0069, 0.008, 0.0097, 0.0104, 0.0124, 0.0147, 0.0165, 0.0163, 0.0195, 0.0185, 0.0149, 0.0143, 0.0145, 0.0109, 0.0085, 0.0065, 0.0043, 0.0029, 0.0023, 0.001, -0.0001, 0.0009, 0.0004, -0., 0.0001, 0.0008, 0.0001, -0.0007, 0.0011, 0.0032, 0.0057, 0.0094, 0.0036, 0.0012, -0.0023, -0.0015, -0.0006, 0.0006, 0.0011, 0.0004, 0.0009], - [0.0008, 0., 0., 0., 0., 0., 0.0007, 0.0033, 0.0045, 0.0029, 0.0008, 0.0026, 0.0019, 0.0004, 0.0044, 0.0057, 0.0083, 0.0115, 0.012, 0.013, 0.0168, 0.0191, 0.0167, 0.0165, 0.0165, 0.018, 0.0131, 0.0131, 0.0111, 0.0069, 0.0045, 0.0049, 0.0008, 0.0022, 0.0017, -0., 0.0003, -0.0007, 0.0001, -0., 0.0009, 0.0017, 0.0033, 0.0061, 0.0097, 0.0044, 0.0016, 0.0003, -0.0002, 0.0008, -0.0009, 0.0004, 0.0001, 0.0025], - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]] - ) - alg._workspace_being_fit.extractX.return_value = np.array( - [[113., 119., 125., 131., 137., 143., 149., 155., 161., 167., 173., 179., 185., 191., 197., 203., 209., 215., 221., 227., 233., 239., 245., 251., 257., 263., 269., 275., 281., 287., 293., 299., 305., 311., 317., 323., 329., 335., 341., 347., 353., 359., 365., 371., 377., 383., 389., 395., 401., 407., 413., 419., 425., 429.], - [113., 119., 125., 131., 137., 143., 149., 155., 161., 167., 173., 179., 185., 191., 197., 203., 209., 215., 221., 227., 233., 239., 245., 251., 257., 263., 269., 275., 281., 287., 293., 299., 305., 311., 317., 323., 329., 335., 341., 347., 353., 359., 365., 371., 377., 383., 389., 395., 401., 407., 413., 419., 425., 429.]] - ) - alg._workspace_being_fit.extractE.return_value = np.array( - [[0.0015, 0.0014, 0.0014, 0.0014, 0.0014, 0.0014, 0.0013, 0.0013, 0.0013, 0.0013, 0.0013, 0.0013, 0.0013, 0.0013, 0.0013, 0.0013, 0.0014, 0.0014, 0.0014, 0.0014, 0.0014, 0.0014, 0.0014, 0.0013, 0.0013, 0.0013, 0.0013, 0.0012, 0.0012, 0.0012, 0.0012, 0.0011, 0.0011, 0.0011, 0.0011, 0.0011, 0.0011, 0.0011, 0.0011, 0.0011, 0.0011, 0.0011, 0.0011, 0.0011, 0.0011, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.0009, 0.0009, 0.0016], - [0.0014, 0.0014, 0.0014, 0.0013, 0.0013, 0.0013, 0.0013, 0.0013, 0.0012, 0.0012, 0.0012, 0.0012, 0.0012, 0.0012, 0.0012, 0.0013, 0.0013, 0.0013, 0.0013, 0.0013, 0.0013, 0.0013, 0.0013, 0.0013, 0.0013, 0.0013, 0.0013, 0.0012, 0.0012, 0.0012, 0.0011, 0.0011, 0.0011, 0.0011, 0.0011, 0.0011, 0.001, 0.001, 0.001, 0.001, 0.001, 0.0011, 0.0011, 0.0011, 0.001, 0.001, 0.001, 0.001, 0.0009, 0.0009, 0.0009, 0.0009, 0.0009, 0.0016]] - ) alg._instrument_params = np.array([ [144, 144, 54.1686, -0.41, 11.005, 0.720184], - [145, 145, 52.3407, -0.53, 11.005, 0.717311] + [145, 145, 52.3407, -0.53, 11.005, 0.717311], + [146, 146, 50.7811, -1.14, 11.005, 0.742482] ]) alg._resolution_params = load_resolution(alg._instrument_params) alg._masses = np.array([1, 12, 16, 27]) - alg._initial_fit_parameters = np.array([1, 4.7, 0, 1, 12.71, 0.0, 1, 8.76, 0.0, 1, 13.897, 0.0]) + + alg._dataX = np.array([ + [113., 119., 125., 131., 137., 143., 149., 155., 161., 167., 173., 179., 185., 191., 197., 203., 209., 215., 221., 227., 233., 239., 245., 251., 257., 263., 269., 275., 281., 287., 293., 299., 305., 311., 317., 323., 329., 335., 341., 347., 353., 359., 365., 371., 377., 383., 389., 395., 401., 407., 413., 419., 425., 429.], + [113., 119., 125., 131., 137., 143., 149., 155., 161., 167., 173., 179., 185., 191., 197., 203., 209., 215., 221., 227., 233., 239., 245., 251., 257., 263., 269., 275., 281., 287., 293., 299., 305., 311., 317., 323., 329., 335., 341., 347., 353., 359., 365., 371., 377., 383., 389., 395., 401., 407., 413., 419., 425., 429.], + [113., 119., 125., 131., 137., 143., 149., 155., 161., 167., 173., 179., 185., 191., 197., 203., 209., 215., 221., 227., 233., 239., 245., 251., 257., 263., 269., 275., 281., 287., 293., 299., 305., 311., 317., 323., 329., 335., 341., 347., 353., 359., 365., 371., 377., 383., 389., 395., 401., 407., 413., 419., 425., 429.], + ]) + alg._set_kinematic_arrays(alg._dataX) + alg._set_y_space_arrays(alg._dataX) + + synthetic_ncp = np.zeros_like(alg._dataX) + example_fit_parameters = np.array([ + [7.1, 5.05, 0.02, 0.22, 12.71, 1.0, 0.0, 8.76, -1.1, 0.3, 13.897, 0.64], + [5, 4, -1, 0.5, 12, -2, 0.8, 8., -1.1, 0.6, 14., 0.5] + ]) + for i, pars in enumerate(example_fit_parameters): + alg._row_being_fit = i + NCP, FSE = alg._neutron_compton_profiles(pars) + synthetic_ncp[i] = NCP.sum(axis=0) + + alg._dataY = synthetic_ncp.copy() + # Add noise (except for third row that is masked) + alg._dataY[:2, :] += 0.002*(np.random.random_sample((2, synthetic_ncp.shape[-1]))-0.5) + # Mask tof range on second row + alg._dataY[1, 5:15] = np.zeros(10) + alg._dataE = np.full_like(alg._dataX, 0.0015) + + alg._initial_fit_parameters = np.array([1, 5.0, 0.1, 1, 12.0, 0.1, 1, 8.0, 0.1, 1, 13.0, 0.1]) alg._initial_fit_bounds = np.array([ [0, None], [3, 6], [-3, 1], - [0, None], [12.71, 12.71], [-3, 1], - [0, None], [8.76, 8.76], [-3, 1], - [0, None], [13.897, 13.897], [-3, 1], + [0, None], [9, 13], [-3, 1], + [0, None], [6, 11], [-3, 1], + [0, None], [11, 15], [-3, 1], ]) alg._constraints = () - # Set up several fit arguments - alg._profiles_table = MagicMock() - alg._profiles_table.column.return_value = ['1', '12', '16', '27'] - alg._profiles_table.rowCount.return_value = 4 - alg._create_emtpy_ncp_workspace = MagicMock(return_value = None) - alg._update_workspace_data() - - # Create arrays for storing ncp and fse total result + # Create arrays for storing results ncp_total_array = np.zeros_like(alg._dataY) fse_total_array = np.zeros_like(alg._dataY) - alg._fit_profiles_workspaces = { "total": MagicMock(dataY=lambda row: ncp_total_array[row]), "1": MagicMock(), "12": MagicMock(), "16": MagicMock(), "27": MagicMock() @@ -161,27 +165,22 @@ def test_fit_neutron_compton_profiles_to_row(self): "total": MagicMock(dataY=lambda row: fse_total_array[row]), "1": MagicMock(), "12": MagicMock(), "16": MagicMock(), "27": MagicMock() } - # Fit ncp + # Mock methods that are not used + alg._table_fit_results = MagicMock() + alg._profiles_table = MagicMock() + alg._fit_parameters = MagicMock() + + # Fit alg._row_being_fit = 0 alg._fit_neutron_compton_profiles_to_row() alg._row_being_fit = 1 alg._fit_neutron_compton_profiles_to_row() alg._row_being_fit = 2 alg._fit_neutron_compton_profiles_to_row() - # Compare results - expected_total_ncp_fits = np.array([ - [0.00004, 0.000059, 0.000089, 0.000138, 0.000215, 0.000335, 0.000647, 0.000943, 0.001347, 0.001888, 0.002593, 0.003486, 0.004589, 0.005913, 0.007453, 0.009182, 0.011037, 0.01292, 0.014694, 0.016196, 0.017254, 0.017722, 0.017509, 0.016606, 0.01509, 0.013115, 0.010885, 0.00861, 0.006475, 0.004615, 0.003101, 0.001949, 0.001128, 0.000583, 0.000251, 0.000068, -0.000016, -0.000041, -0.000005, 0.000119, 0.000355, 0.00105, 0.002667, 0.006238, 0.008899, 0.004131, 0.000602, -0.000026, 0.000111, 0.000078, 0.000061, 0.00005, 0.000043, 0.00004], - [0.000021, 0.000029, 0.000042, 0.000062, 0.000095, 0.000148, 0.000324, 0.000486, 0.00072, 0.001046, 0.001492, 0.002084, 0.002851, 0.003818, 0.005006, 0.006424, 0.008066, 0.009895, 0.011838, 0.013776, 0.015551, 0.016978, 0.017872, 0.018092, 0.017566, 0.016325, 0.014492, 0.012265, 0.009878, 0.007552, 0.005464, 0.003723, 0.002369, 0.001389, 0.00073, 0.000322, 0.000095, -0.000012, -0.000016, 0.000117, 0.000383, 0.001178, 0.003156, 0.006523, 0.009388, 0.00489, 0.00077, -0.000037, 0.000125, 0.000087, 0.000067, 0.000055, 0.000047, 0.000043], - [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.] - ]) - np.testing.assert_allclose(ncp_total_array, expected_total_ncp_fits, atol=1e-6) - - expected_fit_parameters = np.array([ - [144., 7.095403, 5.051053, 0.015996, 0.218192, 12.71, 1., 0., 8.76, -1.091821, 0.29291, 13.897, 0.639245, 0.848425, 21.], - [145., 6.825532, 5.073835, -0.087401, 0.29456, 12.71, 1., 0., 8.76, -0.14256, 0.26776, 13.897, -2.563441, 1.246451, 20.], - [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.] - ]) - np.testing.assert_allclose(alg._fit_parameters, expected_fit_parameters, atol=1e-6) + + np.testing.assert_allclose(synthetic_ncp, ncp_total_array, atol=1e-3) + # Check masked row is correctly ignored + np.testing.assert_allclose(synthetic_ncp[-1], np.zeros(synthetic_ncp.shape[-1])) def test_fit_neutron_compton_profiles_to_row_with_masked_tof(self): From 61c81b2cbe06d1726249faa9962d4ea81a2d31e5 Mon Sep 17 00:00:00 2001 From: GuiMacielPereira Date: Wed, 27 Nov 2024 16:10:31 +0000 Subject: [PATCH 7/9] Make calculation of resolution a bit clearer --- src/mvesuvio/analysis_reduction.py | 22 ++++++---------------- 1 file changed, 6 insertions(+), 16 deletions(-) diff --git a/src/mvesuvio/analysis_reduction.py b/src/mvesuvio/analysis_reduction.py index ed97abed..c78307c5 100644 --- a/src/mvesuvio/analysis_reduction.py +++ b/src/mvesuvio/analysis_reduction.py @@ -599,15 +599,14 @@ def _neutron_compton_profiles(self, pars): centers = pars[2::3].reshape(-1, 1) masses = self._masses.reshape(-1, 1) - # v0, E0, deltaE, deltaQ = self._kinematic_arrays[self._row_being_fit] E0 = self._E0[self._row_being_fit] deltaQ = self._deltaQ[self._row_being_fit] + gaussian_width = self.calculate_gaussian_resolution(centers) + lorentzian_width = self.calculate_lorentzian_resolution(centers) + total_gaussian_width = np.sqrt(widths**2 + gaussian_width**2) - gaussRes, lorzRes = self.caculateResolutionForEachMass(centers) - totalGaussWidth = np.sqrt(widths**2 + gaussRes**2) - - JOfY = scipy.special.voigt_profile(self._y_space_arrays[self._row_being_fit] - centers, totalGaussWidth, lorzRes) + JOfY = scipy.special.voigt_profile(self._y_space_arrays[self._row_being_fit] - centers, total_gaussian_width, lorentzian_width) FSE = ( -numericalThirdDerivative(self._y_space_arrays[self._row_being_fit], JOfY) @@ -620,15 +619,6 @@ def _neutron_compton_profiles(self, pars): return NCP, FSE - def caculateResolutionForEachMass(self, centers): - """Calculates the gaussian and lorentzian resolution - output: two column vectors, each row corresponds to each mass""" - - gaussianResWidth = self.calcGaussianResolution(centers) - lorentzianResWidth = self.calcLorentzianResolution(centers) - return gaussianResWidth, lorentzianResWidth - - def kinematicsAtYCenters(self, centers): """v0, E0, deltaE, deltaQ at the peak of the ncpTotal for each mass""" @@ -655,7 +645,7 @@ def kinematicsAtYCenters(self, centers): return v0, E0, deltaE, deltaQ - def calcGaussianResolution(self, centers): + def calculate_gaussian_resolution(self, centers): masses = self._masses.reshape(-1, 1) v0, E0, delta_E, delta_Q = self.kinematicsAtYCenters(centers) det, plick, angle, T0, L0, L1 = self._instrument_params[self._row_being_fit] @@ -700,7 +690,7 @@ def calcGaussianResolution(self, centers): return gaussianResWidth - def calcLorentzianResolution(self, centers): + def calculate_lorentzian_resolution(self, centers): masses = self._masses.reshape(-1, 1) v0, E0, delta_E, delta_Q = self.kinematicsAtYCenters(centers) det, plick, angle, T0, L0, L1 = self._instrument_params[self._row_being_fit] From 91e635f4f77a52293fee31e6adf7eeafc3f9d86d Mon Sep 17 00:00:00 2001 From: GuiMacielPereira Date: Thu, 28 Nov 2024 18:41:18 +0000 Subject: [PATCH 8/9] Add unit test for resolution and error function Took out calculation of resolution function for lorentzian out of fit. Cleaned error function and added unit test that checks cases when errors are present and not present. --- src/mvesuvio/analysis_reduction.py | 93 +++++----- .../unit/analysis/test_analysis_reduction.py | 168 +++++++++++++++--- 2 files changed, 184 insertions(+), 77 deletions(-) diff --git a/src/mvesuvio/analysis_reduction.py b/src/mvesuvio/analysis_reduction.py index c78307c5..131eb3e4 100644 --- a/src/mvesuvio/analysis_reduction.py +++ b/src/mvesuvio/analysis_reduction.py @@ -205,6 +205,8 @@ def _update_workspace_data(self): self._dataE = self._workspace_being_fit.extractE() self._set_kinematic_arrays(self._dataX) + self._set_gaussian_resolution() + self._set_lorentzian_resolution() # Calculate y space after kinematics self._set_y_space_arrays(self._dataX) @@ -538,7 +540,7 @@ def _fit_neutron_compton_profiles_to_row(self): return result = scipy.optimize.minimize( - self.errorFunction, + self._error_function, self._initial_fit_parameters, method="SLSQP", bounds=self._initial_fit_bounds, @@ -570,19 +572,21 @@ def _fit_neutron_compton_profiles_to_row(self): return - def errorFunction(self, pars): + def _error_function(self, pars): """Error function to be minimized, in TOF space""" ncp_for_each_mass, fse_for_each_mass = self._neutron_compton_profiles(pars) + ncp_total = np.sum(ncp_for_each_mass, axis=0) + data_y = self._dataY[self._row_being_fit] + data_e = self._dataE[self._row_being_fit] - # Ignore any masked values from Jackknife or masked tof range - zerosMask = self._dataY[self._row_being_fit] == 0 - ncp_total = ncp_total[~zerosMask] - data_y = self._dataY[self._row_being_fit, ~zerosMask] - data_e = self._dataE[self._row_being_fit, ~zerosMask] + # Ignore any masked values on tof range + ncp_total = ncp_total[np.nonzero(data_y)] + data_y = data_y[np.nonzero(data_y)] + data_e = data_e[np.nonzero(data_y)] - if np.all(self._dataE[self._row_being_fit] == 0): # When errors not present + if np.all(data_e == 0): # When errors not present return np.sum((ncp_total - data_y) ** 2) return np.sum((ncp_total - data_y) ** 2 / data_e**2) @@ -602,8 +606,8 @@ def _neutron_compton_profiles(self, pars): E0 = self._E0[self._row_being_fit] deltaQ = self._deltaQ[self._row_being_fit] - gaussian_width = self.calculate_gaussian_resolution(centers) - lorentzian_width = self.calculate_lorentzian_resolution(centers) + gaussian_width = self._get_gaussian_resolution(centers) + lorentzian_width = self._get_lorentzian_resolution(centers) total_gaussian_width = np.sqrt(widths**2 + gaussian_width**2) JOfY = scipy.special.voigt_profile(self._y_space_arrays[self._row_being_fit] - centers, total_gaussian_width, lorentzian_width) @@ -619,37 +623,19 @@ def _neutron_compton_profiles(self, pars): return NCP, FSE - def kinematicsAtYCenters(self, centers): - """v0, E0, deltaE, deltaQ at the peak of the ncpTotal for each mass""" - - shapeOfArrays = centers.shape - proximityToYCenters = np.abs(self._y_space_arrays[self._row_being_fit] - centers) - yClosestToCenters = proximityToYCenters.min(axis=1).reshape(-1, 1) - yCentersMask = proximityToYCenters == yClosestToCenters - - v0 = self._v0[self._row_being_fit] - E0 = self._E0[self._row_being_fit] - deltaE = self._deltaE[self._row_being_fit] - deltaQ = self._deltaQ[self._row_being_fit] - - # Expand arrays to match shape of yCentersMask - v0 = v0 * np.ones(shapeOfArrays) - E0 = E0 * np.ones(shapeOfArrays) - deltaE = deltaE * np.ones(shapeOfArrays) - deltaQ = deltaQ * np.ones(shapeOfArrays) + def _get_gaussian_resolution(self, centers): + proximity_to_y_centers = np.abs(self._y_space_arrays[self._row_being_fit] - centers) + gauss_resolution = self._gaussian_resolution[self._row_being_fit] + return np.take_along_axis(gauss_resolution, proximity_to_y_centers.argmin(axis=1, keepdims=True), axis=1) - v0 = v0[yCentersMask].reshape(shapeOfArrays) - E0 = E0[yCentersMask].reshape(shapeOfArrays) - deltaE = deltaE[yCentersMask].reshape(shapeOfArrays) - deltaQ = deltaQ[yCentersMask].reshape(shapeOfArrays) - return v0, E0, deltaE, deltaQ - - def calculate_gaussian_resolution(self, centers): - masses = self._masses.reshape(-1, 1) - v0, E0, delta_E, delta_Q = self.kinematicsAtYCenters(centers) - det, plick, angle, T0, L0, L1 = self._instrument_params[self._row_being_fit] - dE1, dTOF, dTheta, dL0, dL1, dE1_lorz = self._resolution_params[self._row_being_fit] + def _set_gaussian_resolution(self): + masses = self._masses.reshape(-1, 1, 1) + v0 = self._v0 + E0 = self._E0 + delta_Q = self._deltaQ + det, plick, angle, T0, L0, L1 = np.hsplit(self._instrument_params, 6) + dE1, dTOF, dTheta, dL0, dL1, dE1_lorz = np.hsplit(self._resolution_params, 6) angle = angle * np.pi / 180 @@ -663,7 +649,7 @@ def calculate_gaussian_resolution(self, centers): + dWdTOF**2 * dTOF**2 + dWdL1**2 * dL1**2 + dWdL0**2 * dL0**2 - ) + ) * np.ones((masses.size, 1, 1)) # conversion from meV^2 to A^-2, dydW = (M/q)^2 dW2 *= (masses / H_BAR**2 / delta_Q) ** 2 @@ -687,20 +673,28 @@ def calculate_gaussian_resolution(self, centers): # in A-1 #same as dy^2 = (dy/dw)^2*dw^2 + (dy/dq)^2*dq^2 gaussianResWidth = np.sqrt(dW2 + dQ2) - return gaussianResWidth + self._gaussian_resolution = np.swapaxes(gaussianResWidth, 0, 1) + return - def calculate_lorentzian_resolution(self, centers): - masses = self._masses.reshape(-1, 1) - v0, E0, delta_E, delta_Q = self.kinematicsAtYCenters(centers) - det, plick, angle, T0, L0, L1 = self._instrument_params[self._row_being_fit] - dE1, dTOF, dTheta, dL0, dL1, dE1_lorz = self._resolution_params[self._row_being_fit] + def _get_lorentzian_resolution(self, centers): + proximity_to_y_centers = np.abs(self._y_space_arrays[self._row_being_fit] - centers) + lorentzian_resolution = self._lorentzian_resolution[self._row_being_fit] + return np.take_along_axis(lorentzian_resolution, proximity_to_y_centers.argmin(axis=1, keepdims=True), axis=1) + + + def _set_lorentzian_resolution(self): + masses = self._masses.reshape(-1, 1, 1) + E0 = self._E0 + delta_Q = self._deltaQ + det, plick, angle, T0, L0, L1 = np.hsplit(self._instrument_params, 6) + dE1, dTOF, dTheta, dL0, dL1, dE1_lorz = np.hsplit(self._resolution_params, 6) angle = angle * np.pi / 180 - dWdE1_lor = (1.0 + (E0 / ENERGY_FINAL) ** 1.5 * (L1 / L0)) ** 2 + dWdE1_lor = (1.0 + (E0 / ENERGY_FINAL) ** 1.5 * (L1 / L0)) ** 2 * np.ones((masses.size, 1, 1)) # conversion from meV^2 to A^-2 - dWdE1_lor *= (masses / H_BAR**2 / delta_Q) ** 2 + dWdE1_lor *= (masses / H_BAR**2 / delta_Q) ** 2 dQdE1_lor = ( 1.0 @@ -710,7 +704,8 @@ def calculate_lorentzian_resolution(self, centers): dQdE1_lor *= (NEUTRON_MASS / H_BAR**2 / delta_Q) ** 2 lorentzianResWidth = np.sqrt(dWdE1_lor + dQdE1_lor) * dE1_lorz # in A-1 - return lorentzianResWidth + self._lorentzian_resolution = np.swapaxes(lorentzianResWidth, 0, 1) + return def _get_parsed_constraints(self): diff --git a/tests/unit/analysis/test_analysis_reduction.py b/tests/unit/analysis/test_analysis_reduction.py index 59bbbb9a..61bf3ce4 100644 --- a/tests/unit/analysis/test_analysis_reduction.py +++ b/tests/unit/analysis/test_analysis_reduction.py @@ -97,6 +97,8 @@ def test_neutron_compton_profile_fit_function(self): alg._resolution_params = load_resolution(alg._instrument_params) alg._masses = np.array([1, 12, 16, 27]) alg._set_kinematic_arrays(alg._dataX) + alg._set_gaussian_resolution() + alg._set_lorentzian_resolution() alg._set_y_space_arrays(alg._dataX) example_fit_parameters = np.array([7.1, 5.05, 0.02, 0.22, 12.71, 1.0, 0.0, 8.76, -1.1, 0.3, 13.897, 0.64]) alg._row_being_fit = 0 @@ -110,6 +112,39 @@ def test_neutron_compton_profile_fit_function(self): np.testing.assert_allclose(NCP, expected_NCP, atol=1e-6) + def test_error_function(self): + alg = VesuvioAnalysisRoutine() + alg._dataX = np.arange(113, 430, 6).reshape(1, -1) + alg._instrument_params = np.array([ + [144, 144, 54.1686, -0.41, 11.005, 0.720184] + ]) + alg._resolution_params = load_resolution(alg._instrument_params) + alg._masses = np.array([1, 12, 16, 27]) + alg._set_kinematic_arrays(alg._dataX) + alg._set_gaussian_resolution() + alg._set_lorentzian_resolution() + alg._set_y_space_arrays(alg._dataX) + example_fit_parameters = np.array([7.1, 5.05, 0.02, 0.22, 12.71, 1.0, 0.0, 8.76, -1.1, 0.3, 13.897, 0.64]) + alg._row_being_fit = 0 + + NCP, FSE = alg._neutron_compton_profiles(example_fit_parameters) + alg._dataY = NCP.sum(axis=0) + 0.002*(np.random.random_sample(alg._dataX.shape)-0.5) + + alg._dataE = np.zeros_like(alg._dataX) + chi2_without_errors = alg._error_function(example_fit_parameters) + + alg._dataE = np.full_like(alg._dataX, 0.0015, dtype=np.double) + chi2_with_errors = alg._error_function(example_fit_parameters) + + alg._dataY[:, 5:15] = 0 + chi2_with_errors_with_tof_masked = alg._error_function(example_fit_parameters) + + self.assertEqual(chi2_without_errors, chi2_with_errors * 0.0015**2) + self.assertEqual(chi2_without_errors, 1.762715850011478e-05) + self.assertEqual(chi2_with_errors, 7.834292666717679) + self.assertEqual(chi2_with_errors_with_tof_masked, 5.5864483595799195) + + def test_fit_neutron_compton_profiles_to_row(self): alg = VesuvioAnalysisRoutine() alg._instrument_params = np.array([ @@ -126,6 +161,8 @@ def test_fit_neutron_compton_profiles_to_row(self): [113., 119., 125., 131., 137., 143., 149., 155., 161., 167., 173., 179., 185., 191., 197., 203., 209., 215., 221., 227., 233., 239., 245., 251., 257., 263., 269., 275., 281., 287., 293., 299., 305., 311., 317., 323., 329., 335., 341., 347., 353., 359., 365., 371., 377., 383., 389., 395., 401., 407., 413., 419., 425., 429.], ]) alg._set_kinematic_arrays(alg._dataX) + alg._set_gaussian_resolution() + alg._set_lorentzian_resolution() alg._set_y_space_arrays(alg._dataX) synthetic_ncp = np.zeros_like(alg._dataX) @@ -143,7 +180,7 @@ def test_fit_neutron_compton_profiles_to_row(self): alg._dataY[:2, :] += 0.002*(np.random.random_sample((2, synthetic_ncp.shape[-1]))-0.5) # Mask tof range on second row alg._dataY[1, 5:15] = np.zeros(10) - alg._dataE = np.full_like(alg._dataX, 0.0015) + alg._dataE = np.full_like(alg._dataX, 0.0015, dtype=np.double) alg._initial_fit_parameters = np.array([1, 5.0, 0.1, 1, 12.0, 0.1, 1, 8.0, 0.1, 1, 13.0, 0.1]) alg._initial_fit_bounds = np.array([ @@ -185,37 +222,31 @@ def test_fit_neutron_compton_profiles_to_row(self): def test_fit_neutron_compton_profiles_to_row_with_masked_tof(self): alg = VesuvioAnalysisRoutine() - alg._workspace_being_fit = MagicMock() - alg._workspace_being_fit.name.return_value = "test_workspace" - alg._workspace_being_fit.getNumberHistograms.return_value = 1 - dataX = np.arange(113, 430).reshape(1, -1) - dataE = np.full_like(dataX, 0.0015) - dataY = scipy.special.voigt_profile(dataX - 235, 30, 0) + 0.005*(np.random.random_sample(dataX.shape)-0.5) - - # Mask TOF range - cut_off_idx = 100 - dataY[:, :cut_off_idx] = 0 - - alg._workspace_being_fit.extractY.return_value = dataY - alg._workspace_being_fit.extractX.return_value = dataX - alg._workspace_being_fit.extractE.return_value = dataE + alg._masses = np.array([1]) alg._instrument_params = np.array([[144, 144, 54.1686, -0.41, 11.005, 0.720184]]) alg._resolution_params = load_resolution(alg._instrument_params) - alg._masses = np.array([1]) alg._initial_fit_parameters = np.array([1, 5.2, 0]) alg._initial_fit_bounds = np.array([[0, None], [3, 6], [None, None]]) alg._constraints = () - - # Set up several fit arguments + alg._table_fit_results = MagicMock() alg._profiles_table = MagicMock() - alg._profiles_table.column.return_value = ['1'] - alg._profiles_table.rowCount.return_value = 1 - alg._create_emtpy_ncp_workspace = MagicMock() - alg._update_workspace_data() + alg._fit_parameters = MagicMock() + + alg._dataX = np.arange(113, 430).reshape(1, -1) + alg._dataE = np.full_like(alg._dataX, 0.0015, dtype=np.double) + alg._dataY = scipy.special.voigt_profile(alg._dataX - 235, 30, 0) + 0.005*(np.random.random_sample(alg._dataX.shape)-0.5) + # Mask TOF range + cut_off_idx = 100 + alg._dataY[:, :cut_off_idx] = 0 + alg._set_kinematic_arrays(alg._dataX) + alg._set_gaussian_resolution() + alg._set_lorentzian_resolution() + alg._set_y_space_arrays(alg._dataX) # Create mock for storing ncp total result - ncp_array_masked = np.zeros_like(dataY) + ncp_array_masked = np.zeros_like(alg._dataY) alg._fit_profiles_workspaces = {"total": MagicMock(dataY=lambda arg: ncp_array_masked), "1": MagicMock()} + alg._fit_fse_workspaces = {"total": MagicMock(), "1": MagicMock()} # Fit ncp alg._row_being_fit = 0 @@ -224,10 +255,15 @@ def test_fit_neutron_compton_profiles_to_row_with_masked_tof(self): # Now cut range so that zeros are not part of dataY # (Still need to leave a padding with 6 zeros due to numerical derivative in ncp) - alg._workspace_being_fit.extractY.return_value = dataY[:, cut_off_idx - 6:].reshape(1, -1) - alg._workspace_being_fit.extractX.return_value = dataX[:, cut_off_idx - 6:].reshape(1, -1) - alg._workspace_being_fit.extractE.return_value = dataE[:, cut_off_idx - 6:].reshape(1, -1) - alg._update_workspace_data() + alg._dataY = alg._dataY[:, cut_off_idx - 6:].reshape(1, -1) + alg._dataX = alg._dataX[:, cut_off_idx - 6:].reshape(1, -1) + alg._dataE = alg._dataE[:, cut_off_idx - 6:].reshape(1, -1) + + # Run methods that depend on dataX + alg._set_kinematic_arrays(alg._dataX) + alg._set_gaussian_resolution() + alg._set_lorentzian_resolution() + alg._set_y_space_arrays(alg._dataX) ncp_array_cut = ncp_array_masked[:, cut_off_idx - 6:].reshape(1, -1) alg._fit_profiles_workspaces = {"total": MagicMock(dataY=lambda arg: ncp_array_cut), "1": MagicMock()} @@ -235,9 +271,85 @@ def test_fit_neutron_compton_profiles_to_row_with_masked_tof(self): alg._fit_neutron_compton_profiles_to_row() fit_parameters_cut = alg._fit_parameters.copy() - np.testing.assert_allclose(fit_parameters_cut, fit_parameters_masked, atol=1e-6) + np.testing.assert_allclose(fit_parameters_cut[:, 1:-2], fit_parameters_masked[:, 1:-2], atol=1e-6) np.testing.assert_allclose(ncp_array_masked[:, cut_off_idx-6:], ncp_array_cut, atol=1e-6) + def test_set_gaussian_resolution(self): + alg = VesuvioAnalysisRoutine() + alg._instrument_params = np.array([ + [144, 144, 54.1686, -0.41, 11.005, 0.720184], + [145, 145, 52.3407, -0.53, 11.005, 0.717311], + ]) + alg._resolution_params = load_resolution(alg._instrument_params) + alg._masses = np.array([1, 12]) + + alg._dataX = np.array([ + [227., 233., 239.], + [227., 233., 239.], + ]) + alg._set_kinematic_arrays(alg._dataX) + alg._set_gaussian_resolution() + expected_gaussian_resolution = np.array( + [[[1.066538, 1.046284, 1.028226], + [7.070845, 6.854225, 6.662274]], + [[1.076188, 1.055946, 1.037921], + [7.219413, 7.003598, 6.812526]]] + ) + np.testing.assert_allclose(expected_gaussian_resolution, alg._gaussian_resolution, atol=1e-6) + + + def test_set_lorentzian_resolution(self): + alg = VesuvioAnalysisRoutine() + alg._instrument_params = np.array([ + [144, 144, 54.1686, -0.41, 11.005, 0.720184], + [145, 145, 52.3407, -0.53, 11.005, 0.717311], + ]) + alg._resolution_params = load_resolution(alg._instrument_params) + alg._masses = np.array([1, 12]) + + alg._dataX = np.array([ + [227., 233., 239.], + [227., 233., 239.], + ]) + alg._set_kinematic_arrays(alg._dataX) + alg._set_lorentzian_resolution() + expected_lorentzian_resolution = np.array( + [[[0.119899, 0.119167, 0.118724], + [1.346864, 1.356234, 1.366692]], + [[0.124002, 0.123212, 0.122712], + [1.376647, 1.387298, 1.399038]]] + ) + np.testing.assert_allclose(expected_lorentzian_resolution, alg._lorentzian_resolution, atol=1e-6) + + + def test_get_gaussian_resolution(self): + alg = VesuvioAnalysisRoutine() + alg._gaussian_resolution = np.arange(18).reshape(2, 3, 3) + alg._y_space_arrays = np.arange(18).reshape(2, 3, 3) + + alg._row_being_fit = 0 + centers = np.array([0.5, 3.6, 7.6]).reshape(-1, 1) + np.testing.assert_allclose(alg._get_gaussian_resolution(centers), np.array([0, 4, 8]).reshape(-1, 1)) + + alg._row_being_fit = 1 + centers = np.array([11.3, 9.6, 14.7]).reshape(-1, 1) + np.testing.assert_allclose(alg._get_gaussian_resolution(centers), np.array([11, 12, 15]).reshape(-1, 1)) + + + def test_get_lorentzian_resolution(self): + alg = VesuvioAnalysisRoutine() + alg._lorentzian_resolution = np.arange(18).reshape(2, 3, 3) + alg._y_space_arrays = np.arange(18).reshape(2, 3, 3) + + alg._row_being_fit = 0 + centers = np.array([0.5, 3.6, 7.6]).reshape(-1, 1) + np.testing.assert_allclose(alg._get_lorentzian_resolution(centers), np.array([0, 4, 8]).reshape(-1, 1)) + + alg._row_being_fit = 1 + centers = np.array([11.3, 9.6, 14.7]).reshape(-1, 1) + np.testing.assert_allclose(alg._get_lorentzian_resolution(centers), np.array([11, 12, 15]).reshape(-1, 1)) + + if __name__ == "__main__": unittest.main() From 52d741706a2b206b1b268b90d02a4bbc9103965e Mon Sep 17 00:00:00 2001 From: GuiMacielPereira Date: Fri, 29 Nov 2024 16:11:34 +0000 Subject: [PATCH 9/9] Add test for calculating means Cleaned function for calculating means in the reduction analysis and added unit test to check the filtering of widths and intensities is working. I removed all of the assertions because instead of stoping the code it's best to at least keep going and produce the table for means so that at least the user can have a clue for what happened. --- src/mvesuvio/analysis_reduction.py | 114 +++++------------- .../unit/analysis/test_analysis_reduction.py | 31 ++++- 2 files changed, 60 insertions(+), 85 deletions(-) diff --git a/src/mvesuvio/analysis_reduction.py b/src/mvesuvio/analysis_reduction.py index 131eb3e4..930b3284 100644 --- a/src/mvesuvio/analysis_reduction.py +++ b/src/mvesuvio/analysis_reduction.py @@ -413,34 +413,43 @@ def _create_summed_workspaces(self): ) def _set_means_and_std(self): - """Calculate mean widths and intensities from tableWorkspace""" - - fitParsTable = self._table_fit_results - widths = np.zeros((self._profiles_table.rowCount(), fitParsTable.rowCount())) + widths = np.zeros((self._profiles_table.rowCount(), self._table_fit_results.rowCount())) intensities = np.zeros(widths.shape) + for i, label in enumerate(self._profiles_table.column("label")): - widths[i] = fitParsTable.column(f"{label} width") - intensities[i] = fitParsTable.column(f"{label} intensity") - ( - meanWidths, - stdWidths, - meanIntensityRatios, - stdIntensityRatios, - ) = self.calculateMeansAndStds(widths, intensities) - - assert ( - len(meanWidths) == self._profiles_table.rowCount() - ), "Number of mean widths must match number of profiles!" - - self._mean_widths = meanWidths - self._std_widths = stdWidths - self._mean_intensity_ratios = meanIntensityRatios - self._std_intensity_ratios = stdIntensityRatios + widths[i] = self._table_fit_results.column(f"{label} width") + intensities[i] = self._table_fit_results.column(f"{label} intensity") + self._set_means_and_std_arrays(widths, intensities) self._create_means_table() return + def _set_means_and_std_arrays(self, widths, intensities): + # Remove failed fits and masked spectra + non_zero_columns = np.any(widths!=0, axis=0) + widths = widths[:, non_zero_columns] + intensities = intensities[:, non_zero_columns] + + widths_mean = np.mean(widths, axis=1).reshape(-1, 1) + widths_std = np.std(widths, axis=1).reshape(-1, 1) + + widths_deviations = np.abs(widths - widths_mean) + + # Remove width outliers + widths[widths_deviations > widths_std] = np.nan + intensities[widths_deviations > widths_std] = np.nan + + # Use sum instead of nansum to propagate nans + intensities = intensities / intensities.sum(axis=0) + + self._mean_widths = np.nanmean(widths, axis=1) + self._std_widths = np.nanstd(widths, axis=1) + self._mean_intensity_ratios = np.nanmean(intensities, axis=1) + self._std_intensity_ratios = np.nanstd(intensities, axis=1) + return + + def _create_means_table(self): table = CreateEmptyTableWorkspace( OutputWorkspace=self._workspace_being_fit.name() + "_means" @@ -470,69 +479,6 @@ def _create_means_table(self): return table - def calculateMeansAndStds(self, widthsIn, intensitiesIn): - betterWidths, betterIntensities = self.filterWidthsAndIntensities(widthsIn, intensitiesIn) - - meanWidths = np.nanmean(betterWidths, axis=1) - stdWidths = np.nanstd(betterWidths, axis=1) - - meanIntensityRatios = np.nanmean(betterIntensities, axis=1) - stdIntensityRatios = np.nanstd(betterIntensities, axis=1) - - return meanWidths, stdWidths, meanIntensityRatios, stdIntensityRatios - - - def filterWidthsAndIntensities(self, widthsIn, intensitiesIn): - """Puts nans in places to be ignored""" - - widths = widthsIn.copy() # Copy to avoid accidental changes in arrays - intensities = intensitiesIn.copy() - - zeroSpecs = np.all( - widths == 0, axis=0 - ) # Catches all failed fits, not just masked spectra - widths[:, zeroSpecs] = np.nan - intensities[:, zeroSpecs] = np.nan - - meanWidths = np.nanmean(widths, axis=1)[:, np.newaxis] - - widthDeviation = np.abs(widths - meanWidths) - stdWidths = np.nanstd(widths, axis=1)[:, np.newaxis] - - # Put nan in places where width deviation is bigger than std - filterMask = widthDeviation > stdWidths - betterWidths = np.where(filterMask, np.nan, widths) - - maskedIntensities = np.where(filterMask, np.nan, intensities) - betterIntensities = maskedIntensities / np.sum( - maskedIntensities, axis=0 - ) # Not nansum() - - # Keep this around in case it is needed again - # When trying to estimate HToMassIdxRatio and normalization fails, skip normalization - # if np.all(np.isnan(betterIntensities)) & IC.runningPreliminary: - # assert IC.noOfMSIterations == 0, ( - # "Calculation of mean intensities failed, cannot proceed with MS correction." - # "Try to run again with noOfMSIterations=0." - # ) - # betterIntensities = maskedIntensities - # else: - # pass - - assert np.all(meanWidths != np.nan), "At least one mean of widths is nan!" - assert np.sum(filterMask) >= 1, "No widths survive filtering condition" - assert not (np.all(np.isnan(betterWidths))), "All filtered widths are nan" - assert not (np.all(np.isnan(betterIntensities))), "All filtered intensities are nan" - assert np.nanmax(betterWidths) != np.nanmin( - betterWidths - ), f"All filtered widths have the same value: {np.nanmin(betterWidths)}" - assert np.nanmax(betterIntensities) != np.nanmin( - betterIntensities - ), f"All filtered intensities have the same value: {np.nanmin(betterIntensities)}" - - return betterWidths, betterIntensities - - def _fit_neutron_compton_profiles_to_row(self): if np.all(self._dataY[self._row_being_fit] == 0): diff --git a/tests/unit/analysis/test_analysis_reduction.py b/tests/unit/analysis/test_analysis_reduction.py index 61bf3ce4..f93c8fd1 100644 --- a/tests/unit/analysis/test_analysis_reduction.py +++ b/tests/unit/analysis/test_analysis_reduction.py @@ -350,6 +350,35 @@ def test_get_lorentzian_resolution(self): centers = np.array([11.3, 9.6, 14.7]).reshape(-1, 1) np.testing.assert_allclose(alg._get_lorentzian_resolution(centers), np.array([11, 12, 15]).reshape(-1, 1)) - + + def test_set_means_and_std(self): + alg = VesuvioAnalysisRoutine() + alg._create_means_table = MagicMock() + alg._profiles_table = MagicMock(rowCount=MagicMock(return_value=2), column=MagicMock(return_value=['1.0', '12.0'])) + + def pick_column(arg): + table = { + '1.0 width': [5.6, 5.1, 0, 2, 5.4], + '12.0 width': [2.1, 1, 0, 2.3, 1.9], + '1.0 intensity': [7.8, 7.6, 0, 5, 7.3], + '12.0 intensity': [3.1, 2, 0, 3.2, 3.1], + } + return table[arg] + + alg._table_fit_results = MagicMock(rowCount=MagicMock(return_value=5), column=MagicMock(side_effect=pick_column)) + + alg._set_means_and_std() + + self.assertEqual(alg._table_fit_results.column.call_count, 4) + self.assertEqual(alg._mean_widths[0], np.mean([5.6, 5.1, 5.4])) + self.assertEqual(alg._std_widths[0], np.std([5.6, 5.1, 5.4])) + self.assertEqual(alg._mean_widths[1], np.mean([2.1, 2.3, 1.9])) + self.assertEqual(alg._std_widths[1], np.std([2.1, 2.3, 1.9])) + self.assertEqual(alg._mean_intensity_ratios[0], np.nanmean(np.array([7.8, 7.6, np.nan, np.nan, 7.3]) / np.array([7.8+3.1, np.nan, np.nan, np.nan, 7.3+3.1]))) + self.assertEqual(alg._std_intensity_ratios[0], np.nanstd(np.array([7.8, 7.6, np.nan, np.nan, 7.3]) / np.array([7.8+3.1, np.nan, np.nan, np.nan, 7.3+3.1]))) + self.assertEqual(alg._mean_intensity_ratios[1], np.nanmean(np.array([3.1, np.nan, np.nan, 3.2, 3.1]) / np.array([7.8+3.1, np.nan, np.nan, np.nan, 7.3+3.1]))) + self.assertEqual(alg._std_intensity_ratios[1], np.nanstd(np.array([3.1, np.nan, np.nan, 3.2, 3.1]) / np.array([7.8+3.1, np.nan, np.nan, np.nan, 7.3+3.1]))) + + if __name__ == "__main__": unittest.main()