diff --git a/D47crunch/__init__.py b/D47crunch/__init__.py index 80ac114..03651fb 100755 --- a/D47crunch/__init__.py +++ b/D47crunch/__init__.py @@ -20,8 +20,8 @@ __contact__ = 'daeron@lsce.ipsl.fr' __copyright__ = 'Copyright (c) 2023 Mathieu Daëron' __license__ = 'Modified BSD License - https://opensource.org/licenses/BSD-3-Clause' -__date__ = '2023-05-11' -__version__ = '2.0.5' +__date__ = '2023-05-13' +__version__ = '2.0.6' import os import numpy as np @@ -855,16 +855,11 @@ def _fullcovar(minresult, epsilon = 0.01, named = False): def f(values): interp = asteval.Interpreter() - print(minresult.var_names, values) for n,v in zip(minresult.var_names, values): interp(f'{n} = {v}') - print(f'{n} = {v}') for q in minresult.params: - print(q, minresult.params[q].expr) if minresult.params[q].expr: interp(f'{q} = {minresult.params[q].expr}') - print(f'{q} = {minresult.params[q].expr}') - print() return np.array([interp.symtable[q] for q in minresult.params]) # construct Jacobian diff --git a/changelog.md b/changelog.md index 9600c7c..a2f209d 100644 --- a/changelog.md +++ b/changelog.md @@ -1,5 +1,11 @@ # Changelog +## v2.0.6 +*Released on 2023-05-13* + +### Bugfix +* Eliminate some spurious debugging messages in `_fullcovar()` + ## v2.0.5 *Released on 2023-05-11* diff --git a/docs/index.html b/docs/index.html index b571380..32a6933 100644 --- a/docs/index.html +++ b/docs/index.html @@ -818,8 +818,8 @@

API Documentation

20__contact__ = 'daeron@lsce.ipsl.fr' 21__copyright__ = 'Copyright (c) 2023 Mathieu Daëron' 22__license__ = 'Modified BSD License - https://opensource.org/licenses/BSD-3-Clause' - 23__date__ = '2023-05-11' - 24__version__ = '2.0.5' + 23__date__ = '2023-05-13' + 24__version__ = '2.0.6' 25 26import os 27import numpy as np @@ -1653,2273 +1653,2268 @@

API Documentation

855 856 def f(values): 857 interp = asteval.Interpreter() - 858 print(minresult.var_names, values) - 859 for n,v in zip(minresult.var_names, values): - 860 interp(f'{n} = {v}') - 861 print(f'{n} = {v}') - 862 for q in minresult.params: - 863 print(q, minresult.params[q].expr) - 864 if minresult.params[q].expr: - 865 interp(f'{q} = {minresult.params[q].expr}') - 866 print(f'{q} = {minresult.params[q].expr}') - 867 print() - 868 return np.array([interp.symtable[q] for q in minresult.params]) + 858 for n,v in zip(minresult.var_names, values): + 859 interp(f'{n} = {v}') + 860 for q in minresult.params: + 861 if minresult.params[q].expr: + 862 interp(f'{q} = {minresult.params[q].expr}') + 863 return np.array([interp.symtable[q] for q in minresult.params]) + 864 + 865 # construct Jacobian + 866 J = np.zeros((minresult.nvarys, len(minresult.params))) + 867 X = np.array([minresult.params[p].value for p in minresult.var_names]) + 868 sX = np.array([minresult.params[p].stderr for p in minresult.var_names]) 869 - 870 # construct Jacobian - 871 J = np.zeros((minresult.nvarys, len(minresult.params))) - 872 X = np.array([minresult.params[p].value for p in minresult.var_names]) - 873 sX = np.array([minresult.params[p].stderr for p in minresult.var_names]) - 874 - 875 for j in range(minresult.nvarys): - 876 x1 = [_ for _ in X] - 877 x1[j] += epsilon * sX[j] - 878 x2 = [_ for _ in X] - 879 x2[j] -= epsilon * sX[j] - 880 J[j,:] = (f(x1) - f(x2)) / (2 * epsilon * sX[j]) - 881 - 882 _names = [q for q in minresult.params] - 883 _covar = J.T @ minresult.covar @ J - 884 _se = np.diag(_covar)**.5 - 885 _correl = _covar.copy() - 886 for k,s in enumerate(_se): - 887 if s: - 888 _correl[k,:] /= s - 889 _correl[:,k] /= s + 870 for j in range(minresult.nvarys): + 871 x1 = [_ for _ in X] + 872 x1[j] += epsilon * sX[j] + 873 x2 = [_ for _ in X] + 874 x2[j] -= epsilon * sX[j] + 875 J[j,:] = (f(x1) - f(x2)) / (2 * epsilon * sX[j]) + 876 + 877 _names = [q for q in minresult.params] + 878 _covar = J.T @ minresult.covar @ J + 879 _se = np.diag(_covar)**.5 + 880 _correl = _covar.copy() + 881 for k,s in enumerate(_se): + 882 if s: + 883 _correl[k,:] /= s + 884 _correl[:,k] /= s + 885 + 886 if named: + 887 _covar = {i: {j:_covar[i,j] for j in minresult.params} for i in minresult.params} + 888 _se = {i: _se[i] for i in minresult.params} + 889 _correl = {i: {j:_correl[i,j] for j in minresult.params} for i in minresult.params} 890 - 891 if named: - 892 _covar = {i: {j:_covar[i,j] for j in minresult.params} for i in minresult.params} - 893 _se = {i: _se[i] for i in minresult.params} - 894 _correl = {i: {j:_correl[i,j] for j in minresult.params} for i in minresult.params} - 895 - 896 return _names, _covar, _se, _correl - 897 - 898 - 899class D4xdata(list): - 900 ''' - 901 Store and process data for a large set of Δ47 and/or Δ48 - 902 analyses, usually comprising more than one analytical session. - 903 ''' - 904 - 905 ### 17O CORRECTION PARAMETERS - 906 R13_VPDB = 0.01118 # (Chang & Li, 1990) - 907 ''' - 908 Absolute (13C/12C) ratio of VPDB. - 909 By default equal to 0.01118 ([Chang & Li, 1990](http://www.cnki.com.cn/Article/CJFDTotal-JXTW199004006.htm)) - 910 ''' - 911 - 912 R18_VSMOW = 0.0020052 # (Baertschi, 1976) - 913 ''' - 914 Absolute (18O/16C) ratio of VSMOW. - 915 By default equal to 0.0020052 ([Baertschi, 1976](https://doi.org/10.1016/0012-821X(76)90115-1)) - 916 ''' - 917 - 918 LAMBDA_17 = 0.528 # (Barkan & Luz, 2005) - 919 ''' - 920 Mass-dependent exponent for triple oxygen isotopes. - 921 By default equal to 0.528 ([Barkan & Luz, 2005](https://doi.org/10.1002/rcm.2250)) - 922 ''' - 923 - 924 R17_VSMOW = 0.00038475 # (Assonov & Brenninkmeijer, 2003, rescaled to R13_VPDB) - 925 ''' - 926 Absolute (17O/16C) ratio of VSMOW. - 927 By default equal to 0.00038475 - 928 ([Assonov & Brenninkmeijer, 2003](https://dx.doi.org/10.1002/rcm.1011), - 929 rescaled to `R13_VPDB`) - 930 ''' - 931 - 932 R18_VPDB = R18_VSMOW * 1.03092 - 933 ''' - 934 Absolute (18O/16C) ratio of VPDB. - 935 By definition equal to `R18_VSMOW * 1.03092`. - 936 ''' - 937 - 938 R17_VPDB = R17_VSMOW * 1.03092 ** LAMBDA_17 - 939 ''' - 940 Absolute (17O/16C) ratio of VPDB. - 941 By definition equal to `R17_VSMOW * 1.03092 ** LAMBDA_17`. - 942 ''' - 943 - 944 LEVENE_REF_SAMPLE = 'ETH-3' - 945 ''' - 946 After the Δ4x standardization step, each sample is tested to - 947 assess whether the Δ4x variance within all analyses for that - 948 sample differs significantly from that observed for a given reference - 949 sample (using [Levene's test](https://en.wikipedia.org/wiki/Levene%27s_test), - 950 which yields a p-value corresponding to the null hypothesis that the - 951 underlying variances are equal). - 952 - 953 `LEVENE_REF_SAMPLE` (by default equal to `'ETH-3'`) specifies which - 954 sample should be used as a reference for this test. - 955 ''' - 956 - 957 ALPHA_18O_ACID_REACTION = round(np.exp(3.59 / (90 + 273.15) - 1.79e-3), 6) # (Kim et al., 2007, calcite) - 958 ''' - 959 Specifies the 18O/16O fractionation factor generally applicable - 960 to acid reactions in the dataset. Currently used by `D4xdata.wg()`, - 961 `D4xdata.standardize_d13C`, and `D4xdata.standardize_d18O`. - 962 - 963 By default equal to 1.008129 (calcite reacted at 90 °C, - 964 [Kim et al., 2007](https://dx.doi.org/10.1016/j.chemgeo.2007.08.005)). - 965 ''' - 966 - 967 Nominal_d13C_VPDB = { - 968 'ETH-1': 2.02, - 969 'ETH-2': -10.17, - 970 'ETH-3': 1.71, - 971 } # (Bernasconi et al., 2018) - 972 ''' - 973 Nominal δ13C_VPDB values assigned to carbonate standards, used by - 974 `D4xdata.standardize_d13C()`. - 975 - 976 By default equal to `{'ETH-1': 2.02, 'ETH-2': -10.17, 'ETH-3': 1.71}` after - 977 [Bernasconi et al. (2018)](https://doi.org/10.1029/2017GC007385). - 978 ''' - 979 - 980 Nominal_d18O_VPDB = { - 981 'ETH-1': -2.19, - 982 'ETH-2': -18.69, - 983 'ETH-3': -1.78, - 984 } # (Bernasconi et al., 2018) - 985 ''' - 986 Nominal δ18O_VPDB values assigned to carbonate standards, used by - 987 `D4xdata.standardize_d18O()`. - 988 - 989 By default equal to `{'ETH-1': -2.19, 'ETH-2': -18.69, 'ETH-3': -1.78}` after - 990 [Bernasconi et al. (2018)](https://doi.org/10.1029/2017GC007385). - 991 ''' - 992 - 993 d13C_STANDARDIZATION_METHOD = '2pt' - 994 ''' - 995 Method by which to standardize δ13C values: - 996 - 997 + `none`: do not apply any δ13C standardization. - 998 + `'1pt'`: within each session, offset all initial δ13C values so as to - 999 minimize the difference between final δ13C_VPDB values and -1000 `Nominal_d13C_VPDB` (averaged over all analyses for which `Nominal_d13C_VPDB` is defined). -1001 + `'2pt'`: within each session, apply a affine trasformation to all δ13C -1002 values so as to minimize the difference between final δ13C_VPDB -1003 values and `Nominal_d13C_VPDB` (averaged over all analyses for which `Nominal_d13C_VPDB` -1004 is defined). -1005 ''' -1006 -1007 d18O_STANDARDIZATION_METHOD = '2pt' -1008 ''' -1009 Method by which to standardize δ18O values: -1010 -1011 + `none`: do not apply any δ18O standardization. -1012 + `'1pt'`: within each session, offset all initial δ18O values so as to -1013 minimize the difference between final δ18O_VPDB values and -1014 `Nominal_d18O_VPDB` (averaged over all analyses for which `Nominal_d18O_VPDB` is defined). -1015 + `'2pt'`: within each session, apply a affine trasformation to all δ18O -1016 values so as to minimize the difference between final δ18O_VPDB -1017 values and `Nominal_d18O_VPDB` (averaged over all analyses for which `Nominal_d18O_VPDB` -1018 is defined). -1019 ''' -1020 -1021 def __init__(self, l = [], mass = '47', logfile = '', session = 'mySession', verbose = False): -1022 ''' -1023 **Parameters** -1024 -1025 + `l`: a list of dictionaries, with each dictionary including at least the keys -1026 `Sample`, `d45`, `d46`, and `d47` or `d48`. -1027 + `mass`: `'47'` or `'48'` -1028 + `logfile`: if specified, write detailed logs to this file path when calling `D4xdata` methods. -1029 + `session`: define session name for analyses without a `Session` key -1030 + `verbose`: if `True`, print out detailed logs when calling `D4xdata` methods. -1031 -1032 Returns a `D4xdata` object derived from `list`. -1033 ''' -1034 self._4x = mass -1035 self.verbose = verbose -1036 self.prefix = 'D4xdata' -1037 self.logfile = logfile -1038 list.__init__(self, l) -1039 self.Nf = None -1040 self.repeatability = {} -1041 self.refresh(session = session) -1042 -1043 -1044 def make_verbal(oldfun): -1045 ''' -1046 Decorator: allow temporarily changing `self.prefix` and overriding `self.verbose`. -1047 ''' -1048 @wraps(oldfun) -1049 def newfun(*args, verbose = '', **kwargs): -1050 myself = args[0] -1051 oldprefix = myself.prefix -1052 myself.prefix = oldfun.__name__ + 891 return _names, _covar, _se, _correl + 892 + 893 + 894class D4xdata(list): + 895 ''' + 896 Store and process data for a large set of Δ47 and/or Δ48 + 897 analyses, usually comprising more than one analytical session. + 898 ''' + 899 + 900 ### 17O CORRECTION PARAMETERS + 901 R13_VPDB = 0.01118 # (Chang & Li, 1990) + 902 ''' + 903 Absolute (13C/12C) ratio of VPDB. + 904 By default equal to 0.01118 ([Chang & Li, 1990](http://www.cnki.com.cn/Article/CJFDTotal-JXTW199004006.htm)) + 905 ''' + 906 + 907 R18_VSMOW = 0.0020052 # (Baertschi, 1976) + 908 ''' + 909 Absolute (18O/16C) ratio of VSMOW. + 910 By default equal to 0.0020052 ([Baertschi, 1976](https://doi.org/10.1016/0012-821X(76)90115-1)) + 911 ''' + 912 + 913 LAMBDA_17 = 0.528 # (Barkan & Luz, 2005) + 914 ''' + 915 Mass-dependent exponent for triple oxygen isotopes. + 916 By default equal to 0.528 ([Barkan & Luz, 2005](https://doi.org/10.1002/rcm.2250)) + 917 ''' + 918 + 919 R17_VSMOW = 0.00038475 # (Assonov & Brenninkmeijer, 2003, rescaled to R13_VPDB) + 920 ''' + 921 Absolute (17O/16C) ratio of VSMOW. + 922 By default equal to 0.00038475 + 923 ([Assonov & Brenninkmeijer, 2003](https://dx.doi.org/10.1002/rcm.1011), + 924 rescaled to `R13_VPDB`) + 925 ''' + 926 + 927 R18_VPDB = R18_VSMOW * 1.03092 + 928 ''' + 929 Absolute (18O/16C) ratio of VPDB. + 930 By definition equal to `R18_VSMOW * 1.03092`. + 931 ''' + 932 + 933 R17_VPDB = R17_VSMOW * 1.03092 ** LAMBDA_17 + 934 ''' + 935 Absolute (17O/16C) ratio of VPDB. + 936 By definition equal to `R17_VSMOW * 1.03092 ** LAMBDA_17`. + 937 ''' + 938 + 939 LEVENE_REF_SAMPLE = 'ETH-3' + 940 ''' + 941 After the Δ4x standardization step, each sample is tested to + 942 assess whether the Δ4x variance within all analyses for that + 943 sample differs significantly from that observed for a given reference + 944 sample (using [Levene's test](https://en.wikipedia.org/wiki/Levene%27s_test), + 945 which yields a p-value corresponding to the null hypothesis that the + 946 underlying variances are equal). + 947 + 948 `LEVENE_REF_SAMPLE` (by default equal to `'ETH-3'`) specifies which + 949 sample should be used as a reference for this test. + 950 ''' + 951 + 952 ALPHA_18O_ACID_REACTION = round(np.exp(3.59 / (90 + 273.15) - 1.79e-3), 6) # (Kim et al., 2007, calcite) + 953 ''' + 954 Specifies the 18O/16O fractionation factor generally applicable + 955 to acid reactions in the dataset. Currently used by `D4xdata.wg()`, + 956 `D4xdata.standardize_d13C`, and `D4xdata.standardize_d18O`. + 957 + 958 By default equal to 1.008129 (calcite reacted at 90 °C, + 959 [Kim et al., 2007](https://dx.doi.org/10.1016/j.chemgeo.2007.08.005)). + 960 ''' + 961 + 962 Nominal_d13C_VPDB = { + 963 'ETH-1': 2.02, + 964 'ETH-2': -10.17, + 965 'ETH-3': 1.71, + 966 } # (Bernasconi et al., 2018) + 967 ''' + 968 Nominal δ13C_VPDB values assigned to carbonate standards, used by + 969 `D4xdata.standardize_d13C()`. + 970 + 971 By default equal to `{'ETH-1': 2.02, 'ETH-2': -10.17, 'ETH-3': 1.71}` after + 972 [Bernasconi et al. (2018)](https://doi.org/10.1029/2017GC007385). + 973 ''' + 974 + 975 Nominal_d18O_VPDB = { + 976 'ETH-1': -2.19, + 977 'ETH-2': -18.69, + 978 'ETH-3': -1.78, + 979 } # (Bernasconi et al., 2018) + 980 ''' + 981 Nominal δ18O_VPDB values assigned to carbonate standards, used by + 982 `D4xdata.standardize_d18O()`. + 983 + 984 By default equal to `{'ETH-1': -2.19, 'ETH-2': -18.69, 'ETH-3': -1.78}` after + 985 [Bernasconi et al. (2018)](https://doi.org/10.1029/2017GC007385). + 986 ''' + 987 + 988 d13C_STANDARDIZATION_METHOD = '2pt' + 989 ''' + 990 Method by which to standardize δ13C values: + 991 + 992 + `none`: do not apply any δ13C standardization. + 993 + `'1pt'`: within each session, offset all initial δ13C values so as to + 994 minimize the difference between final δ13C_VPDB values and + 995 `Nominal_d13C_VPDB` (averaged over all analyses for which `Nominal_d13C_VPDB` is defined). + 996 + `'2pt'`: within each session, apply a affine trasformation to all δ13C + 997 values so as to minimize the difference between final δ13C_VPDB + 998 values and `Nominal_d13C_VPDB` (averaged over all analyses for which `Nominal_d13C_VPDB` + 999 is defined). +1000 ''' +1001 +1002 d18O_STANDARDIZATION_METHOD = '2pt' +1003 ''' +1004 Method by which to standardize δ18O values: +1005 +1006 + `none`: do not apply any δ18O standardization. +1007 + `'1pt'`: within each session, offset all initial δ18O values so as to +1008 minimize the difference between final δ18O_VPDB values and +1009 `Nominal_d18O_VPDB` (averaged over all analyses for which `Nominal_d18O_VPDB` is defined). +1010 + `'2pt'`: within each session, apply a affine trasformation to all δ18O +1011 values so as to minimize the difference between final δ18O_VPDB +1012 values and `Nominal_d18O_VPDB` (averaged over all analyses for which `Nominal_d18O_VPDB` +1013 is defined). +1014 ''' +1015 +1016 def __init__(self, l = [], mass = '47', logfile = '', session = 'mySession', verbose = False): +1017 ''' +1018 **Parameters** +1019 +1020 + `l`: a list of dictionaries, with each dictionary including at least the keys +1021 `Sample`, `d45`, `d46`, and `d47` or `d48`. +1022 + `mass`: `'47'` or `'48'` +1023 + `logfile`: if specified, write detailed logs to this file path when calling `D4xdata` methods. +1024 + `session`: define session name for analyses without a `Session` key +1025 + `verbose`: if `True`, print out detailed logs when calling `D4xdata` methods. +1026 +1027 Returns a `D4xdata` object derived from `list`. +1028 ''' +1029 self._4x = mass +1030 self.verbose = verbose +1031 self.prefix = 'D4xdata' +1032 self.logfile = logfile +1033 list.__init__(self, l) +1034 self.Nf = None +1035 self.repeatability = {} +1036 self.refresh(session = session) +1037 +1038 +1039 def make_verbal(oldfun): +1040 ''' +1041 Decorator: allow temporarily changing `self.prefix` and overriding `self.verbose`. +1042 ''' +1043 @wraps(oldfun) +1044 def newfun(*args, verbose = '', **kwargs): +1045 myself = args[0] +1046 oldprefix = myself.prefix +1047 myself.prefix = oldfun.__name__ +1048 if verbose != '': +1049 oldverbose = myself.verbose +1050 myself.verbose = verbose +1051 out = oldfun(*args, **kwargs) +1052 myself.prefix = oldprefix 1053 if verbose != '': -1054 oldverbose = myself.verbose -1055 myself.verbose = verbose -1056 out = oldfun(*args, **kwargs) -1057 myself.prefix = oldprefix -1058 if verbose != '': -1059 myself.verbose = oldverbose -1060 return out -1061 return newfun -1062 -1063 -1064 def msg(self, txt): -1065 ''' -1066 Log a message to `self.logfile`, and print it out if `verbose = True` -1067 ''' -1068 self.log(txt) -1069 if self.verbose: -1070 print(f'{f"[{self.prefix}]":<16} {txt}') -1071 -1072 -1073 def vmsg(self, txt): -1074 ''' -1075 Log a message to `self.logfile` and print it out -1076 ''' -1077 self.log(txt) -1078 print(txt) -1079 -1080 -1081 def log(self, *txts): -1082 ''' -1083 Log a message to `self.logfile` -1084 ''' -1085 if self.logfile: -1086 with open(self.logfile, 'a') as fid: -1087 for txt in txts: -1088 fid.write(f'\n{dt.now().strftime("%Y-%m-%d %H:%M:%S")} {f"[{self.prefix}]":<16} {txt}') -1089 -1090 -1091 def refresh(self, session = 'mySession'): -1092 ''' -1093 Update `self.sessions`, `self.samples`, `self.anchors`, and `self.unknowns`. -1094 ''' -1095 self.fill_in_missing_info(session = session) -1096 self.refresh_sessions() -1097 self.refresh_samples() -1098 -1099 -1100 def refresh_sessions(self): -1101 ''' -1102 Update `self.sessions` and set `scrambling_drift`, `slope_drift`, and `wg_drift` -1103 to `False` for all sessions. -1104 ''' -1105 self.sessions = { -1106 s: {'data': [r for r in self if r['Session'] == s]} -1107 for s in sorted({r['Session'] for r in self}) -1108 } -1109 for s in self.sessions: -1110 self.sessions[s]['scrambling_drift'] = False -1111 self.sessions[s]['slope_drift'] = False -1112 self.sessions[s]['wg_drift'] = False -1113 self.sessions[s]['d13C_standardization_method'] = self.d13C_STANDARDIZATION_METHOD -1114 self.sessions[s]['d18O_standardization_method'] = self.d18O_STANDARDIZATION_METHOD -1115 -1116 -1117 def refresh_samples(self): -1118 ''' -1119 Define `self.samples`, `self.anchors`, and `self.unknowns`. -1120 ''' -1121 self.samples = { -1122 s: {'data': [r for r in self if r['Sample'] == s]} -1123 for s in sorted({r['Sample'] for r in self}) -1124 } -1125 self.anchors = {s: self.samples[s] for s in self.samples if s in self.Nominal_D4x} -1126 self.unknowns = {s: self.samples[s] for s in self.samples if s not in self.Nominal_D4x} +1054 myself.verbose = oldverbose +1055 return out +1056 return newfun +1057 +1058 +1059 def msg(self, txt): +1060 ''' +1061 Log a message to `self.logfile`, and print it out if `verbose = True` +1062 ''' +1063 self.log(txt) +1064 if self.verbose: +1065 print(f'{f"[{self.prefix}]":<16} {txt}') +1066 +1067 +1068 def vmsg(self, txt): +1069 ''' +1070 Log a message to `self.logfile` and print it out +1071 ''' +1072 self.log(txt) +1073 print(txt) +1074 +1075 +1076 def log(self, *txts): +1077 ''' +1078 Log a message to `self.logfile` +1079 ''' +1080 if self.logfile: +1081 with open(self.logfile, 'a') as fid: +1082 for txt in txts: +1083 fid.write(f'\n{dt.now().strftime("%Y-%m-%d %H:%M:%S")} {f"[{self.prefix}]":<16} {txt}') +1084 +1085 +1086 def refresh(self, session = 'mySession'): +1087 ''' +1088 Update `self.sessions`, `self.samples`, `self.anchors`, and `self.unknowns`. +1089 ''' +1090 self.fill_in_missing_info(session = session) +1091 self.refresh_sessions() +1092 self.refresh_samples() +1093 +1094 +1095 def refresh_sessions(self): +1096 ''' +1097 Update `self.sessions` and set `scrambling_drift`, `slope_drift`, and `wg_drift` +1098 to `False` for all sessions. +1099 ''' +1100 self.sessions = { +1101 s: {'data': [r for r in self if r['Session'] == s]} +1102 for s in sorted({r['Session'] for r in self}) +1103 } +1104 for s in self.sessions: +1105 self.sessions[s]['scrambling_drift'] = False +1106 self.sessions[s]['slope_drift'] = False +1107 self.sessions[s]['wg_drift'] = False +1108 self.sessions[s]['d13C_standardization_method'] = self.d13C_STANDARDIZATION_METHOD +1109 self.sessions[s]['d18O_standardization_method'] = self.d18O_STANDARDIZATION_METHOD +1110 +1111 +1112 def refresh_samples(self): +1113 ''' +1114 Define `self.samples`, `self.anchors`, and `self.unknowns`. +1115 ''' +1116 self.samples = { +1117 s: {'data': [r for r in self if r['Sample'] == s]} +1118 for s in sorted({r['Sample'] for r in self}) +1119 } +1120 self.anchors = {s: self.samples[s] for s in self.samples if s in self.Nominal_D4x} +1121 self.unknowns = {s: self.samples[s] for s in self.samples if s not in self.Nominal_D4x} +1122 +1123 +1124 def read(self, filename, sep = '', session = ''): +1125 ''' +1126 Read file in csv format to load data into a `D47data` object. 1127 -1128 -1129 def read(self, filename, sep = '', session = ''): -1130 ''' -1131 Read file in csv format to load data into a `D47data` object. +1128 In the csv file, spaces before and after field separators (`','` by default) +1129 are optional. Each line corresponds to a single analysis. +1130 +1131 The required fields are: 1132 -1133 In the csv file, spaces before and after field separators (`','` by default) -1134 are optional. Each line corresponds to a single analysis. -1135 -1136 The required fields are: +1133 + `UID`: a unique identifier +1134 + `Session`: an identifier for the analytical session +1135 + `Sample`: a sample identifier +1136 + `d45`, `d46`, and at least one of `d47` or `d48`: the working-gas delta values 1137 -1138 + `UID`: a unique identifier -1139 + `Session`: an identifier for the analytical session -1140 + `Sample`: a sample identifier -1141 + `d45`, `d46`, and at least one of `d47` or `d48`: the working-gas delta values -1142 -1143 Independently known oxygen-17 anomalies may be provided as `D17O` (in ‰ relative to -1144 VSMOW, λ = `self.LAMBDA_17`), and are otherwise assumed to be zero. Working-gas deltas `d47`, `d48` -1145 and `d49` are optional, and set to NaN by default. -1146 -1147 **Parameters** -1148 -1149 + `fileneme`: the path of the file to read -1150 + `sep`: csv separator delimiting the fields -1151 + `session`: set `Session` field to this string for all analyses -1152 ''' -1153 with open(filename) as fid: -1154 self.input(fid.read(), sep = sep, session = session) +1138 Independently known oxygen-17 anomalies may be provided as `D17O` (in ‰ relative to +1139 VSMOW, λ = `self.LAMBDA_17`), and are otherwise assumed to be zero. Working-gas deltas `d47`, `d48` +1140 and `d49` are optional, and set to NaN by default. +1141 +1142 **Parameters** +1143 +1144 + `fileneme`: the path of the file to read +1145 + `sep`: csv separator delimiting the fields +1146 + `session`: set `Session` field to this string for all analyses +1147 ''' +1148 with open(filename) as fid: +1149 self.input(fid.read(), sep = sep, session = session) +1150 +1151 +1152 def input(self, txt, sep = '', session = ''): +1153 ''' +1154 Read `txt` string in csv format to load analysis data into a `D47data` object. 1155 -1156 -1157 def input(self, txt, sep = '', session = ''): -1158 ''' -1159 Read `txt` string in csv format to load analysis data into a `D47data` object. +1156 In the csv string, spaces before and after field separators (`','` by default) +1157 are optional. Each line corresponds to a single analysis. +1158 +1159 The required fields are: 1160 -1161 In the csv string, spaces before and after field separators (`','` by default) -1162 are optional. Each line corresponds to a single analysis. -1163 -1164 The required fields are: +1161 + `UID`: a unique identifier +1162 + `Session`: an identifier for the analytical session +1163 + `Sample`: a sample identifier +1164 + `d45`, `d46`, and at least one of `d47` or `d48`: the working-gas delta values 1165 -1166 + `UID`: a unique identifier -1167 + `Session`: an identifier for the analytical session -1168 + `Sample`: a sample identifier -1169 + `d45`, `d46`, and at least one of `d47` or `d48`: the working-gas delta values -1170 -1171 Independently known oxygen-17 anomalies may be provided as `D17O` (in ‰ relative to -1172 VSMOW, λ = `self.LAMBDA_17`), and are otherwise assumed to be zero. Working-gas deltas `d47`, `d48` -1173 and `d49` are optional, and set to NaN by default. -1174 -1175 **Parameters** -1176 -1177 + `txt`: the csv string to read -1178 + `sep`: csv separator delimiting the fields. By default, use `,`, `;`, or `\t`, -1179 whichever appers most often in `txt`. -1180 + `session`: set `Session` field to this string for all analyses -1181 ''' -1182 if sep == '': -1183 sep = sorted(',;\t', key = lambda x: - txt.count(x))[0] -1184 txt = [[x.strip() for x in l.split(sep)] for l in txt.splitlines() if l.strip()] -1185 data = [{k: v if k in ['UID', 'Session', 'Sample'] else smart_type(v) for k,v in zip(txt[0], l) if v != ''} for l in txt[1:]] -1186 -1187 if session != '': -1188 for r in data: -1189 r['Session'] = session -1190 -1191 self += data -1192 self.refresh() -1193 -1194 -1195 @make_verbal -1196 def wg(self, samples = None, a18_acid = None): -1197 ''' -1198 Compute bulk composition of the working gas for each session based on -1199 the carbonate standards defined in both `self.Nominal_d13C_VPDB` and -1200 `self.Nominal_d18O_VPDB`. -1201 ''' -1202 -1203 self.msg('Computing WG composition:') +1166 Independently known oxygen-17 anomalies may be provided as `D17O` (in ‰ relative to +1167 VSMOW, λ = `self.LAMBDA_17`), and are otherwise assumed to be zero. Working-gas deltas `d47`, `d48` +1168 and `d49` are optional, and set to NaN by default. +1169 +1170 **Parameters** +1171 +1172 + `txt`: the csv string to read +1173 + `sep`: csv separator delimiting the fields. By default, use `,`, `;`, or `\t`, +1174 whichever appers most often in `txt`. +1175 + `session`: set `Session` field to this string for all analyses +1176 ''' +1177 if sep == '': +1178 sep = sorted(',;\t', key = lambda x: - txt.count(x))[0] +1179 txt = [[x.strip() for x in l.split(sep)] for l in txt.splitlines() if l.strip()] +1180 data = [{k: v if k in ['UID', 'Session', 'Sample'] else smart_type(v) for k,v in zip(txt[0], l) if v != ''} for l in txt[1:]] +1181 +1182 if session != '': +1183 for r in data: +1184 r['Session'] = session +1185 +1186 self += data +1187 self.refresh() +1188 +1189 +1190 @make_verbal +1191 def wg(self, samples = None, a18_acid = None): +1192 ''' +1193 Compute bulk composition of the working gas for each session based on +1194 the carbonate standards defined in both `self.Nominal_d13C_VPDB` and +1195 `self.Nominal_d18O_VPDB`. +1196 ''' +1197 +1198 self.msg('Computing WG composition:') +1199 +1200 if a18_acid is None: +1201 a18_acid = self.ALPHA_18O_ACID_REACTION +1202 if samples is None: +1203 samples = [s for s in self.Nominal_d13C_VPDB if s in self.Nominal_d18O_VPDB] 1204 -1205 if a18_acid is None: -1206 a18_acid = self.ALPHA_18O_ACID_REACTION -1207 if samples is None: -1208 samples = [s for s in self.Nominal_d13C_VPDB if s in self.Nominal_d18O_VPDB] -1209 -1210 assert a18_acid, f'Acid fractionation factor should not be zero.' -1211 -1212 samples = [s for s in samples if s in self.Nominal_d13C_VPDB and s in self.Nominal_d18O_VPDB] -1213 R45R46_standards = {} -1214 for sample in samples: -1215 d13C_vpdb = self.Nominal_d13C_VPDB[sample] -1216 d18O_vpdb = self.Nominal_d18O_VPDB[sample] -1217 R13_s = self.R13_VPDB * (1 + d13C_vpdb / 1000) -1218 R17_s = self.R17_VPDB * ((1 + d18O_vpdb / 1000) * a18_acid) ** self.LAMBDA_17 -1219 R18_s = self.R18_VPDB * (1 + d18O_vpdb / 1000) * a18_acid -1220 -1221 C12_s = 1 / (1 + R13_s) -1222 C13_s = R13_s / (1 + R13_s) -1223 C16_s = 1 / (1 + R17_s + R18_s) -1224 C17_s = R17_s / (1 + R17_s + R18_s) -1225 C18_s = R18_s / (1 + R17_s + R18_s) -1226 -1227 C626_s = C12_s * C16_s ** 2 -1228 C627_s = 2 * C12_s * C16_s * C17_s -1229 C628_s = 2 * C12_s * C16_s * C18_s -1230 C636_s = C13_s * C16_s ** 2 -1231 C637_s = 2 * C13_s * C16_s * C17_s -1232 C727_s = C12_s * C17_s ** 2 -1233 -1234 R45_s = (C627_s + C636_s) / C626_s -1235 R46_s = (C628_s + C637_s + C727_s) / C626_s -1236 R45R46_standards[sample] = (R45_s, R46_s) -1237 -1238 for s in self.sessions: -1239 db = [r for r in self.sessions[s]['data'] if r['Sample'] in samples] -1240 assert db, f'No sample from {samples} found in session "{s}".' -1241# dbsamples = sorted({r['Sample'] for r in db}) -1242 -1243 X = [r['d45'] for r in db] -1244 Y = [R45R46_standards[r['Sample']][0] for r in db] -1245 x1, x2 = np.min(X), np.max(X) +1205 assert a18_acid, f'Acid fractionation factor should not be zero.' +1206 +1207 samples = [s for s in samples if s in self.Nominal_d13C_VPDB and s in self.Nominal_d18O_VPDB] +1208 R45R46_standards = {} +1209 for sample in samples: +1210 d13C_vpdb = self.Nominal_d13C_VPDB[sample] +1211 d18O_vpdb = self.Nominal_d18O_VPDB[sample] +1212 R13_s = self.R13_VPDB * (1 + d13C_vpdb / 1000) +1213 R17_s = self.R17_VPDB * ((1 + d18O_vpdb / 1000) * a18_acid) ** self.LAMBDA_17 +1214 R18_s = self.R18_VPDB * (1 + d18O_vpdb / 1000) * a18_acid +1215 +1216 C12_s = 1 / (1 + R13_s) +1217 C13_s = R13_s / (1 + R13_s) +1218 C16_s = 1 / (1 + R17_s + R18_s) +1219 C17_s = R17_s / (1 + R17_s + R18_s) +1220 C18_s = R18_s / (1 + R17_s + R18_s) +1221 +1222 C626_s = C12_s * C16_s ** 2 +1223 C627_s = 2 * C12_s * C16_s * C17_s +1224 C628_s = 2 * C12_s * C16_s * C18_s +1225 C636_s = C13_s * C16_s ** 2 +1226 C637_s = 2 * C13_s * C16_s * C17_s +1227 C727_s = C12_s * C17_s ** 2 +1228 +1229 R45_s = (C627_s + C636_s) / C626_s +1230 R46_s = (C628_s + C637_s + C727_s) / C626_s +1231 R45R46_standards[sample] = (R45_s, R46_s) +1232 +1233 for s in self.sessions: +1234 db = [r for r in self.sessions[s]['data'] if r['Sample'] in samples] +1235 assert db, f'No sample from {samples} found in session "{s}".' +1236# dbsamples = sorted({r['Sample'] for r in db}) +1237 +1238 X = [r['d45'] for r in db] +1239 Y = [R45R46_standards[r['Sample']][0] for r in db] +1240 x1, x2 = np.min(X), np.max(X) +1241 +1242 if x1 < x2: +1243 wgcoord = x1/(x1-x2) +1244 else: +1245 wgcoord = 999 1246 -1247 if x1 < x2: -1248 wgcoord = x1/(x1-x2) -1249 else: -1250 wgcoord = 999 -1251 -1252 if wgcoord < -.5 or wgcoord > 1.5: -1253 # unreasonable to extrapolate to d45 = 0 -1254 R45_wg = np.mean([y/(1+x/1000) for x,y in zip(X,Y)]) -1255 else : -1256 # d45 = 0 is reasonably well bracketed -1257 R45_wg = np.polyfit(X, Y, 1)[1] -1258 -1259 X = [r['d46'] for r in db] -1260 Y = [R45R46_standards[r['Sample']][1] for r in db] -1261 x1, x2 = np.min(X), np.max(X) +1247 if wgcoord < -.5 or wgcoord > 1.5: +1248 # unreasonable to extrapolate to d45 = 0 +1249 R45_wg = np.mean([y/(1+x/1000) for x,y in zip(X,Y)]) +1250 else : +1251 # d45 = 0 is reasonably well bracketed +1252 R45_wg = np.polyfit(X, Y, 1)[1] +1253 +1254 X = [r['d46'] for r in db] +1255 Y = [R45R46_standards[r['Sample']][1] for r in db] +1256 x1, x2 = np.min(X), np.max(X) +1257 +1258 if x1 < x2: +1259 wgcoord = x1/(x1-x2) +1260 else: +1261 wgcoord = 999 1262 -1263 if x1 < x2: -1264 wgcoord = x1/(x1-x2) -1265 else: -1266 wgcoord = 999 -1267 -1268 if wgcoord < -.5 or wgcoord > 1.5: -1269 # unreasonable to extrapolate to d46 = 0 -1270 R46_wg = np.mean([y/(1+x/1000) for x,y in zip(X,Y)]) -1271 else : -1272 # d46 = 0 is reasonably well bracketed -1273 R46_wg = np.polyfit(X, Y, 1)[1] -1274 -1275 d13Cwg_VPDB, d18Owg_VSMOW = self.compute_bulk_delta(R45_wg, R46_wg) -1276 -1277 self.msg(f'Session {s} WG: δ13C_VPDB = {d13Cwg_VPDB:.3f} δ18O_VSMOW = {d18Owg_VSMOW:.3f}') -1278 -1279 self.sessions[s]['d13Cwg_VPDB'] = d13Cwg_VPDB -1280 self.sessions[s]['d18Owg_VSMOW'] = d18Owg_VSMOW -1281 for r in self.sessions[s]['data']: -1282 r['d13Cwg_VPDB'] = d13Cwg_VPDB -1283 r['d18Owg_VSMOW'] = d18Owg_VSMOW -1284 -1285 -1286 def compute_bulk_delta(self, R45, R46, D17O = 0): -1287 ''' -1288 Compute δ13C_VPDB and δ18O_VSMOW, -1289 by solving the generalized form of equation (17) from -1290 [Brand et al. (2010)](https://doi.org/10.1351/PAC-REP-09-01-05), -1291 assuming that δ18O_VSMOW is not too big (0 ± 50 ‰) and -1292 solving the corresponding second-order Taylor polynomial. -1293 (Appendix A of [Daëron et al., 2016](https://doi.org/10.1016/j.chemgeo.2016.08.014)) -1294 ''' -1295 -1296 K = np.exp(D17O / 1000) * self.R17_VSMOW * self.R18_VSMOW ** -self.LAMBDA_17 +1263 if wgcoord < -.5 or wgcoord > 1.5: +1264 # unreasonable to extrapolate to d46 = 0 +1265 R46_wg = np.mean([y/(1+x/1000) for x,y in zip(X,Y)]) +1266 else : +1267 # d46 = 0 is reasonably well bracketed +1268 R46_wg = np.polyfit(X, Y, 1)[1] +1269 +1270 d13Cwg_VPDB, d18Owg_VSMOW = self.compute_bulk_delta(R45_wg, R46_wg) +1271 +1272 self.msg(f'Session {s} WG: δ13C_VPDB = {d13Cwg_VPDB:.3f} δ18O_VSMOW = {d18Owg_VSMOW:.3f}') +1273 +1274 self.sessions[s]['d13Cwg_VPDB'] = d13Cwg_VPDB +1275 self.sessions[s]['d18Owg_VSMOW'] = d18Owg_VSMOW +1276 for r in self.sessions[s]['data']: +1277 r['d13Cwg_VPDB'] = d13Cwg_VPDB +1278 r['d18Owg_VSMOW'] = d18Owg_VSMOW +1279 +1280 +1281 def compute_bulk_delta(self, R45, R46, D17O = 0): +1282 ''' +1283 Compute δ13C_VPDB and δ18O_VSMOW, +1284 by solving the generalized form of equation (17) from +1285 [Brand et al. (2010)](https://doi.org/10.1351/PAC-REP-09-01-05), +1286 assuming that δ18O_VSMOW is not too big (0 ± 50 ‰) and +1287 solving the corresponding second-order Taylor polynomial. +1288 (Appendix A of [Daëron et al., 2016](https://doi.org/10.1016/j.chemgeo.2016.08.014)) +1289 ''' +1290 +1291 K = np.exp(D17O / 1000) * self.R17_VSMOW * self.R18_VSMOW ** -self.LAMBDA_17 +1292 +1293 A = -3 * K ** 2 * self.R18_VSMOW ** (2 * self.LAMBDA_17) +1294 B = 2 * K * R45 * self.R18_VSMOW ** self.LAMBDA_17 +1295 C = 2 * self.R18_VSMOW +1296 D = -R46 1297 -1298 A = -3 * K ** 2 * self.R18_VSMOW ** (2 * self.LAMBDA_17) -1299 B = 2 * K * R45 * self.R18_VSMOW ** self.LAMBDA_17 -1300 C = 2 * self.R18_VSMOW -1301 D = -R46 -1302 -1303 aa = A * self.LAMBDA_17 * (2 * self.LAMBDA_17 - 1) + B * self.LAMBDA_17 * (self.LAMBDA_17 - 1) / 2 -1304 bb = 2 * A * self.LAMBDA_17 + B * self.LAMBDA_17 + C -1305 cc = A + B + C + D -1306 -1307 d18O_VSMOW = 1000 * (-bb + (bb ** 2 - 4 * aa * cc) ** .5) / (2 * aa) -1308 -1309 R18 = (1 + d18O_VSMOW / 1000) * self.R18_VSMOW -1310 R17 = K * R18 ** self.LAMBDA_17 -1311 R13 = R45 - 2 * R17 +1298 aa = A * self.LAMBDA_17 * (2 * self.LAMBDA_17 - 1) + B * self.LAMBDA_17 * (self.LAMBDA_17 - 1) / 2 +1299 bb = 2 * A * self.LAMBDA_17 + B * self.LAMBDA_17 + C +1300 cc = A + B + C + D +1301 +1302 d18O_VSMOW = 1000 * (-bb + (bb ** 2 - 4 * aa * cc) ** .5) / (2 * aa) +1303 +1304 R18 = (1 + d18O_VSMOW / 1000) * self.R18_VSMOW +1305 R17 = K * R18 ** self.LAMBDA_17 +1306 R13 = R45 - 2 * R17 +1307 +1308 d13C_VPDB = 1000 * (R13 / self.R13_VPDB - 1) +1309 +1310 return d13C_VPDB, d18O_VSMOW +1311 1312 -1313 d13C_VPDB = 1000 * (R13 / self.R13_VPDB - 1) -1314 -1315 return d13C_VPDB, d18O_VSMOW -1316 -1317 -1318 @make_verbal -1319 def crunch(self, verbose = ''): -1320 ''' -1321 Compute bulk composition and raw clumped isotope anomalies for all analyses. -1322 ''' -1323 for r in self: -1324 self.compute_bulk_and_clumping_deltas(r) -1325 self.standardize_d13C() -1326 self.standardize_d18O() -1327 self.msg(f"Crunched {len(self)} analyses.") -1328 -1329 -1330 def fill_in_missing_info(self, session = 'mySession'): -1331 ''' -1332 Fill in optional fields with default values -1333 ''' -1334 for i,r in enumerate(self): -1335 if 'D17O' not in r: -1336 r['D17O'] = 0. -1337 if 'UID' not in r: -1338 r['UID'] = f'{i+1}' -1339 if 'Session' not in r: -1340 r['Session'] = session -1341 for k in ['d47', 'd48', 'd49']: -1342 if k not in r: -1343 r[k] = np.nan -1344 -1345 -1346 def standardize_d13C(self): -1347 ''' -1348 Perform δ13C standadization within each session `s` according to -1349 `self.sessions[s]['d13C_standardization_method']`, which is defined by default -1350 by `D47data.refresh_sessions()`as equal to `self.d13C_STANDARDIZATION_METHOD`, but -1351 may be redefined abitrarily at a later stage. -1352 ''' -1353 for s in self.sessions: -1354 if self.sessions[s]['d13C_standardization_method'] in ['1pt', '2pt']: -1355 XY = [(r['d13C_VPDB'], self.Nominal_d13C_VPDB[r['Sample']]) for r in self.sessions[s]['data'] if r['Sample'] in self.Nominal_d13C_VPDB] -1356 X,Y = zip(*XY) -1357 if self.sessions[s]['d13C_standardization_method'] == '1pt': -1358 offset = np.mean(Y) - np.mean(X) -1359 for r in self.sessions[s]['data']: -1360 r['d13C_VPDB'] += offset -1361 elif self.sessions[s]['d13C_standardization_method'] == '2pt': -1362 a,b = np.polyfit(X,Y,1) -1363 for r in self.sessions[s]['data']: -1364 r['d13C_VPDB'] = a * r['d13C_VPDB'] + b -1365 -1366 def standardize_d18O(self): -1367 ''' -1368 Perform δ18O standadization within each session `s` according to -1369 `self.ALPHA_18O_ACID_REACTION` and `self.sessions[s]['d18O_standardization_method']`, -1370 which is defined by default by `D47data.refresh_sessions()`as equal to -1371 `self.d18O_STANDARDIZATION_METHOD`, but may be redefined abitrarily at a later stage. -1372 ''' -1373 for s in self.sessions: -1374 if self.sessions[s]['d18O_standardization_method'] in ['1pt', '2pt']: -1375 XY = [(r['d18O_VSMOW'], self.Nominal_d18O_VPDB[r['Sample']]) for r in self.sessions[s]['data'] if r['Sample'] in self.Nominal_d18O_VPDB] -1376 X,Y = zip(*XY) -1377 Y = [(1000+y) * self.R18_VPDB * self.ALPHA_18O_ACID_REACTION / self.R18_VSMOW - 1000 for y in Y] -1378 if self.sessions[s]['d18O_standardization_method'] == '1pt': -1379 offset = np.mean(Y) - np.mean(X) -1380 for r in self.sessions[s]['data']: -1381 r['d18O_VSMOW'] += offset -1382 elif self.sessions[s]['d18O_standardization_method'] == '2pt': -1383 a,b = np.polyfit(X,Y,1) -1384 for r in self.sessions[s]['data']: -1385 r['d18O_VSMOW'] = a * r['d18O_VSMOW'] + b -1386 +1313 @make_verbal +1314 def crunch(self, verbose = ''): +1315 ''' +1316 Compute bulk composition and raw clumped isotope anomalies for all analyses. +1317 ''' +1318 for r in self: +1319 self.compute_bulk_and_clumping_deltas(r) +1320 self.standardize_d13C() +1321 self.standardize_d18O() +1322 self.msg(f"Crunched {len(self)} analyses.") +1323 +1324 +1325 def fill_in_missing_info(self, session = 'mySession'): +1326 ''' +1327 Fill in optional fields with default values +1328 ''' +1329 for i,r in enumerate(self): +1330 if 'D17O' not in r: +1331 r['D17O'] = 0. +1332 if 'UID' not in r: +1333 r['UID'] = f'{i+1}' +1334 if 'Session' not in r: +1335 r['Session'] = session +1336 for k in ['d47', 'd48', 'd49']: +1337 if k not in r: +1338 r[k] = np.nan +1339 +1340 +1341 def standardize_d13C(self): +1342 ''' +1343 Perform δ13C standadization within each session `s` according to +1344 `self.sessions[s]['d13C_standardization_method']`, which is defined by default +1345 by `D47data.refresh_sessions()`as equal to `self.d13C_STANDARDIZATION_METHOD`, but +1346 may be redefined abitrarily at a later stage. +1347 ''' +1348 for s in self.sessions: +1349 if self.sessions[s]['d13C_standardization_method'] in ['1pt', '2pt']: +1350 XY = [(r['d13C_VPDB'], self.Nominal_d13C_VPDB[r['Sample']]) for r in self.sessions[s]['data'] if r['Sample'] in self.Nominal_d13C_VPDB] +1351 X,Y = zip(*XY) +1352 if self.sessions[s]['d13C_standardization_method'] == '1pt': +1353 offset = np.mean(Y) - np.mean(X) +1354 for r in self.sessions[s]['data']: +1355 r['d13C_VPDB'] += offset +1356 elif self.sessions[s]['d13C_standardization_method'] == '2pt': +1357 a,b = np.polyfit(X,Y,1) +1358 for r in self.sessions[s]['data']: +1359 r['d13C_VPDB'] = a * r['d13C_VPDB'] + b +1360 +1361 def standardize_d18O(self): +1362 ''' +1363 Perform δ18O standadization within each session `s` according to +1364 `self.ALPHA_18O_ACID_REACTION` and `self.sessions[s]['d18O_standardization_method']`, +1365 which is defined by default by `D47data.refresh_sessions()`as equal to +1366 `self.d18O_STANDARDIZATION_METHOD`, but may be redefined abitrarily at a later stage. +1367 ''' +1368 for s in self.sessions: +1369 if self.sessions[s]['d18O_standardization_method'] in ['1pt', '2pt']: +1370 XY = [(r['d18O_VSMOW'], self.Nominal_d18O_VPDB[r['Sample']]) for r in self.sessions[s]['data'] if r['Sample'] in self.Nominal_d18O_VPDB] +1371 X,Y = zip(*XY) +1372 Y = [(1000+y) * self.R18_VPDB * self.ALPHA_18O_ACID_REACTION / self.R18_VSMOW - 1000 for y in Y] +1373 if self.sessions[s]['d18O_standardization_method'] == '1pt': +1374 offset = np.mean(Y) - np.mean(X) +1375 for r in self.sessions[s]['data']: +1376 r['d18O_VSMOW'] += offset +1377 elif self.sessions[s]['d18O_standardization_method'] == '2pt': +1378 a,b = np.polyfit(X,Y,1) +1379 for r in self.sessions[s]['data']: +1380 r['d18O_VSMOW'] = a * r['d18O_VSMOW'] + b +1381 +1382 +1383 def compute_bulk_and_clumping_deltas(self, r): +1384 ''' +1385 Compute δ13C_VPDB, δ18O_VSMOW, and raw Δ47, Δ48, Δ49 values for a single analysis `r`. +1386 ''' 1387 -1388 def compute_bulk_and_clumping_deltas(self, r): -1389 ''' -1390 Compute δ13C_VPDB, δ18O_VSMOW, and raw Δ47, Δ48, Δ49 values for a single analysis `r`. -1391 ''' +1388 # Compute working gas R13, R18, and isobar ratios +1389 R13_wg = self.R13_VPDB * (1 + r['d13Cwg_VPDB'] / 1000) +1390 R18_wg = self.R18_VSMOW * (1 + r['d18Owg_VSMOW'] / 1000) +1391 R45_wg, R46_wg, R47_wg, R48_wg, R49_wg = self.compute_isobar_ratios(R13_wg, R18_wg) 1392 -1393 # Compute working gas R13, R18, and isobar ratios -1394 R13_wg = self.R13_VPDB * (1 + r['d13Cwg_VPDB'] / 1000) -1395 R18_wg = self.R18_VSMOW * (1 + r['d18Owg_VSMOW'] / 1000) -1396 R45_wg, R46_wg, R47_wg, R48_wg, R49_wg = self.compute_isobar_ratios(R13_wg, R18_wg) -1397 -1398 # Compute analyte isobar ratios -1399 R45 = (1 + r['d45'] / 1000) * R45_wg -1400 R46 = (1 + r['d46'] / 1000) * R46_wg -1401 R47 = (1 + r['d47'] / 1000) * R47_wg -1402 R48 = (1 + r['d48'] / 1000) * R48_wg -1403 R49 = (1 + r['d49'] / 1000) * R49_wg -1404 -1405 r['d13C_VPDB'], r['d18O_VSMOW'] = self.compute_bulk_delta(R45, R46, D17O = r['D17O']) -1406 R13 = (1 + r['d13C_VPDB'] / 1000) * self.R13_VPDB -1407 R18 = (1 + r['d18O_VSMOW'] / 1000) * self.R18_VSMOW +1393 # Compute analyte isobar ratios +1394 R45 = (1 + r['d45'] / 1000) * R45_wg +1395 R46 = (1 + r['d46'] / 1000) * R46_wg +1396 R47 = (1 + r['d47'] / 1000) * R47_wg +1397 R48 = (1 + r['d48'] / 1000) * R48_wg +1398 R49 = (1 + r['d49'] / 1000) * R49_wg +1399 +1400 r['d13C_VPDB'], r['d18O_VSMOW'] = self.compute_bulk_delta(R45, R46, D17O = r['D17O']) +1401 R13 = (1 + r['d13C_VPDB'] / 1000) * self.R13_VPDB +1402 R18 = (1 + r['d18O_VSMOW'] / 1000) * self.R18_VSMOW +1403 +1404 # Compute stochastic isobar ratios of the analyte +1405 R45stoch, R46stoch, R47stoch, R48stoch, R49stoch = self.compute_isobar_ratios( +1406 R13, R18, D17O = r['D17O'] +1407 ) 1408 -1409 # Compute stochastic isobar ratios of the analyte -1410 R45stoch, R46stoch, R47stoch, R48stoch, R49stoch = self.compute_isobar_ratios( -1411 R13, R18, D17O = r['D17O'] -1412 ) -1413 -1414 # Check that R45/R45stoch and R46/R46stoch are undistinguishable from 1, -1415 # and raise a warning if the corresponding anomalies exceed 0.02 ppm. -1416 if (R45 / R45stoch - 1) > 5e-8: -1417 self.vmsg(f'This is unexpected: R45/R45stoch - 1 = {1e6 * (R45 / R45stoch - 1):.3f} ppm') -1418 if (R46 / R46stoch - 1) > 5e-8: -1419 self.vmsg(f'This is unexpected: R46/R46stoch - 1 = {1e6 * (R46 / R46stoch - 1):.3f} ppm') +1409 # Check that R45/R45stoch and R46/R46stoch are undistinguishable from 1, +1410 # and raise a warning if the corresponding anomalies exceed 0.02 ppm. +1411 if (R45 / R45stoch - 1) > 5e-8: +1412 self.vmsg(f'This is unexpected: R45/R45stoch - 1 = {1e6 * (R45 / R45stoch - 1):.3f} ppm') +1413 if (R46 / R46stoch - 1) > 5e-8: +1414 self.vmsg(f'This is unexpected: R46/R46stoch - 1 = {1e6 * (R46 / R46stoch - 1):.3f} ppm') +1415 +1416 # Compute raw clumped isotope anomalies +1417 r['D47raw'] = 1000 * (R47 / R47stoch - 1) +1418 r['D48raw'] = 1000 * (R48 / R48stoch - 1) +1419 r['D49raw'] = 1000 * (R49 / R49stoch - 1) 1420 -1421 # Compute raw clumped isotope anomalies -1422 r['D47raw'] = 1000 * (R47 / R47stoch - 1) -1423 r['D48raw'] = 1000 * (R48 / R48stoch - 1) -1424 r['D49raw'] = 1000 * (R49 / R49stoch - 1) -1425 -1426 -1427 def compute_isobar_ratios(self, R13, R18, D17O=0, D47=0, D48=0, D49=0): -1428 ''' -1429 Compute isobar ratios for a sample with isotopic ratios `R13` and `R18`, -1430 optionally accounting for non-zero values of Δ17O (`D17O`) and clumped isotope -1431 anomalies (`D47`, `D48`, `D49`), all expressed in permil. -1432 ''' -1433 -1434 # Compute R17 -1435 R17 = self.R17_VSMOW * np.exp(D17O / 1000) * (R18 / self.R18_VSMOW) ** self.LAMBDA_17 -1436 -1437 # Compute isotope concentrations -1438 C12 = (1 + R13) ** -1 -1439 C13 = C12 * R13 -1440 C16 = (1 + R17 + R18) ** -1 -1441 C17 = C16 * R17 -1442 C18 = C16 * R18 -1443 -1444 # Compute stochastic isotopologue concentrations -1445 C626 = C16 * C12 * C16 -1446 C627 = C16 * C12 * C17 * 2 -1447 C628 = C16 * C12 * C18 * 2 -1448 C636 = C16 * C13 * C16 -1449 C637 = C16 * C13 * C17 * 2 -1450 C638 = C16 * C13 * C18 * 2 -1451 C727 = C17 * C12 * C17 -1452 C728 = C17 * C12 * C18 * 2 -1453 C737 = C17 * C13 * C17 -1454 C738 = C17 * C13 * C18 * 2 -1455 C828 = C18 * C12 * C18 -1456 C838 = C18 * C13 * C18 -1457 -1458 # Compute stochastic isobar ratios -1459 R45 = (C636 + C627) / C626 -1460 R46 = (C628 + C637 + C727) / C626 -1461 R47 = (C638 + C728 + C737) / C626 -1462 R48 = (C738 + C828) / C626 -1463 R49 = C838 / C626 +1421 +1422 def compute_isobar_ratios(self, R13, R18, D17O=0, D47=0, D48=0, D49=0): +1423 ''' +1424 Compute isobar ratios for a sample with isotopic ratios `R13` and `R18`, +1425 optionally accounting for non-zero values of Δ17O (`D17O`) and clumped isotope +1426 anomalies (`D47`, `D48`, `D49`), all expressed in permil. +1427 ''' +1428 +1429 # Compute R17 +1430 R17 = self.R17_VSMOW * np.exp(D17O / 1000) * (R18 / self.R18_VSMOW) ** self.LAMBDA_17 +1431 +1432 # Compute isotope concentrations +1433 C12 = (1 + R13) ** -1 +1434 C13 = C12 * R13 +1435 C16 = (1 + R17 + R18) ** -1 +1436 C17 = C16 * R17 +1437 C18 = C16 * R18 +1438 +1439 # Compute stochastic isotopologue concentrations +1440 C626 = C16 * C12 * C16 +1441 C627 = C16 * C12 * C17 * 2 +1442 C628 = C16 * C12 * C18 * 2 +1443 C636 = C16 * C13 * C16 +1444 C637 = C16 * C13 * C17 * 2 +1445 C638 = C16 * C13 * C18 * 2 +1446 C727 = C17 * C12 * C17 +1447 C728 = C17 * C12 * C18 * 2 +1448 C737 = C17 * C13 * C17 +1449 C738 = C17 * C13 * C18 * 2 +1450 C828 = C18 * C12 * C18 +1451 C838 = C18 * C13 * C18 +1452 +1453 # Compute stochastic isobar ratios +1454 R45 = (C636 + C627) / C626 +1455 R46 = (C628 + C637 + C727) / C626 +1456 R47 = (C638 + C728 + C737) / C626 +1457 R48 = (C738 + C828) / C626 +1458 R49 = C838 / C626 +1459 +1460 # Account for stochastic anomalies +1461 R47 *= 1 + D47 / 1000 +1462 R48 *= 1 + D48 / 1000 +1463 R49 *= 1 + D49 / 1000 1464 -1465 # Account for stochastic anomalies -1466 R47 *= 1 + D47 / 1000 -1467 R48 *= 1 + D48 / 1000 -1468 R49 *= 1 + D49 / 1000 -1469 -1470 # Return isobar ratios -1471 return R45, R46, R47, R48, R49 -1472 -1473 -1474 def split_samples(self, samples_to_split = 'all', grouping = 'by_session'): -1475 ''' -1476 Split unknown samples by UID (treat all analyses as different samples) -1477 or by session (treat analyses of a given sample in different sessions as -1478 different samples). -1479 -1480 **Parameters** -1481 -1482 + `samples_to_split`: a list of samples to split, e.g., `['IAEA-C1', 'IAEA-C2']` -1483 + `grouping`: `by_uid` | `by_session` -1484 ''' -1485 if samples_to_split == 'all': -1486 samples_to_split = [s for s in self.unknowns] -1487 gkeys = {'by_uid':'UID', 'by_session':'Session'} -1488 self.grouping = grouping.lower() -1489 if self.grouping in gkeys: -1490 gkey = gkeys[self.grouping] -1491 for r in self: -1492 if r['Sample'] in samples_to_split: -1493 r['Sample_original'] = r['Sample'] -1494 r['Sample'] = f"{r['Sample']}__{r[gkey]}" -1495 elif r['Sample'] in self.unknowns: -1496 r['Sample_original'] = r['Sample'] -1497 self.refresh_samples() -1498 -1499 -1500 def unsplit_samples(self, tables = False): -1501 ''' -1502 Reverse the effects of `D47data.split_samples()`. -1503 -1504 This should only be used after `D4xdata.standardize()` with `method='pooled'`. -1505 -1506 After `D4xdata.standardize()` with `method='indep_sessions'`, one should -1507 probably use `D4xdata.combine_samples()` instead to reverse the effects of -1508 `D47data.split_samples()` with `grouping='by_uid'`, or `w_avg()` to reverse the -1509 effects of `D47data.split_samples()` with `grouping='by_sessions'` (because in -1510 that case session-averaged Δ4x values are statistically independent). -1511 ''' -1512 unknowns_old = sorted({s for s in self.unknowns}) -1513 CM_old = self.standardization.covar[:,:] -1514 VD_old = self.standardization.params.valuesdict().copy() -1515 vars_old = self.standardization.var_names -1516 -1517 unknowns_new = sorted({r['Sample_original'] for r in self if 'Sample_original' in r}) -1518 -1519 Ns = len(vars_old) - len(unknowns_old) -1520 vars_new = vars_old[:Ns] + [f'D{self._4x}_{pf(u)}' for u in unknowns_new] -1521 VD_new = {k: VD_old[k] for k in vars_old[:Ns]} -1522 -1523 W = np.zeros((len(vars_new), len(vars_old))) -1524 W[:Ns,:Ns] = np.eye(Ns) -1525 for u in unknowns_new: -1526 splits = sorted({r['Sample'] for r in self if 'Sample_original' in r and r['Sample_original'] == u}) -1527 if self.grouping == 'by_session': -1528 weights = [self.samples[s][f'SE_D{self._4x}']**-2 for s in splits] -1529 elif self.grouping == 'by_uid': -1530 weights = [1 for s in splits] -1531 sw = sum(weights) -1532 weights = [w/sw for w in weights] -1533 W[vars_new.index(f'D{self._4x}_{pf(u)}'),[vars_old.index(f'D{self._4x}_{pf(s)}') for s in splits]] = weights[:] -1534 -1535 CM_new = W @ CM_old @ W.T -1536 V = W @ np.array([[VD_old[k]] for k in vars_old]) -1537 VD_new = {k:v[0] for k,v in zip(vars_new, V)} -1538 -1539 self.standardization.covar = CM_new -1540 self.standardization.params.valuesdict = lambda : VD_new -1541 self.standardization.var_names = vars_new +1465 # Return isobar ratios +1466 return R45, R46, R47, R48, R49 +1467 +1468 +1469 def split_samples(self, samples_to_split = 'all', grouping = 'by_session'): +1470 ''' +1471 Split unknown samples by UID (treat all analyses as different samples) +1472 or by session (treat analyses of a given sample in different sessions as +1473 different samples). +1474 +1475 **Parameters** +1476 +1477 + `samples_to_split`: a list of samples to split, e.g., `['IAEA-C1', 'IAEA-C2']` +1478 + `grouping`: `by_uid` | `by_session` +1479 ''' +1480 if samples_to_split == 'all': +1481 samples_to_split = [s for s in self.unknowns] +1482 gkeys = {'by_uid':'UID', 'by_session':'Session'} +1483 self.grouping = grouping.lower() +1484 if self.grouping in gkeys: +1485 gkey = gkeys[self.grouping] +1486 for r in self: +1487 if r['Sample'] in samples_to_split: +1488 r['Sample_original'] = r['Sample'] +1489 r['Sample'] = f"{r['Sample']}__{r[gkey]}" +1490 elif r['Sample'] in self.unknowns: +1491 r['Sample_original'] = r['Sample'] +1492 self.refresh_samples() +1493 +1494 +1495 def unsplit_samples(self, tables = False): +1496 ''' +1497 Reverse the effects of `D47data.split_samples()`. +1498 +1499 This should only be used after `D4xdata.standardize()` with `method='pooled'`. +1500 +1501 After `D4xdata.standardize()` with `method='indep_sessions'`, one should +1502 probably use `D4xdata.combine_samples()` instead to reverse the effects of +1503 `D47data.split_samples()` with `grouping='by_uid'`, or `w_avg()` to reverse the +1504 effects of `D47data.split_samples()` with `grouping='by_sessions'` (because in +1505 that case session-averaged Δ4x values are statistically independent). +1506 ''' +1507 unknowns_old = sorted({s for s in self.unknowns}) +1508 CM_old = self.standardization.covar[:,:] +1509 VD_old = self.standardization.params.valuesdict().copy() +1510 vars_old = self.standardization.var_names +1511 +1512 unknowns_new = sorted({r['Sample_original'] for r in self if 'Sample_original' in r}) +1513 +1514 Ns = len(vars_old) - len(unknowns_old) +1515 vars_new = vars_old[:Ns] + [f'D{self._4x}_{pf(u)}' for u in unknowns_new] +1516 VD_new = {k: VD_old[k] for k in vars_old[:Ns]} +1517 +1518 W = np.zeros((len(vars_new), len(vars_old))) +1519 W[:Ns,:Ns] = np.eye(Ns) +1520 for u in unknowns_new: +1521 splits = sorted({r['Sample'] for r in self if 'Sample_original' in r and r['Sample_original'] == u}) +1522 if self.grouping == 'by_session': +1523 weights = [self.samples[s][f'SE_D{self._4x}']**-2 for s in splits] +1524 elif self.grouping == 'by_uid': +1525 weights = [1 for s in splits] +1526 sw = sum(weights) +1527 weights = [w/sw for w in weights] +1528 W[vars_new.index(f'D{self._4x}_{pf(u)}'),[vars_old.index(f'D{self._4x}_{pf(s)}') for s in splits]] = weights[:] +1529 +1530 CM_new = W @ CM_old @ W.T +1531 V = W @ np.array([[VD_old[k]] for k in vars_old]) +1532 VD_new = {k:v[0] for k,v in zip(vars_new, V)} +1533 +1534 self.standardization.covar = CM_new +1535 self.standardization.params.valuesdict = lambda : VD_new +1536 self.standardization.var_names = vars_new +1537 +1538 for r in self: +1539 if r['Sample'] in self.unknowns: +1540 r['Sample_split'] = r['Sample'] +1541 r['Sample'] = r['Sample_original'] 1542 -1543 for r in self: -1544 if r['Sample'] in self.unknowns: -1545 r['Sample_split'] = r['Sample'] -1546 r['Sample'] = r['Sample_original'] -1547 -1548 self.refresh_samples() -1549 self.consolidate_samples() -1550 self.repeatabilities() -1551 -1552 if tables: -1553 self.table_of_analyses() -1554 self.table_of_samples() -1555 -1556 def assign_timestamps(self): -1557 ''' -1558 Assign a time field `t` of type `float` to each analysis. -1559 -1560 If `TimeTag` is one of the data fields, `t` is equal within a given session -1561 to `TimeTag` minus the mean value of `TimeTag` for that session. -1562 Otherwise, `TimeTag` is by default equal to the index of each analysis -1563 in the dataset and `t` is defined as above. -1564 ''' -1565 for session in self.sessions: -1566 sdata = self.sessions[session]['data'] -1567 try: -1568 t0 = np.mean([r['TimeTag'] for r in sdata]) -1569 for r in sdata: -1570 r['t'] = r['TimeTag'] - t0 -1571 except KeyError: -1572 t0 = (len(sdata)-1)/2 -1573 for t,r in enumerate(sdata): -1574 r['t'] = t - t0 -1575 -1576 -1577 def report(self): -1578 ''' -1579 Prints a report on the standardization fit. -1580 Only applicable after `D4xdata.standardize(method='pooled')`. -1581 ''' -1582 report_fit(self.standardization) -1583 -1584 -1585 def combine_samples(self, sample_groups): -1586 ''' -1587 Combine analyses of different samples to compute weighted average Δ4x -1588 and new error (co)variances corresponding to the groups defined by the `sample_groups` -1589 dictionary. -1590 -1591 Caution: samples are weighted by number of replicate analyses, which is a -1592 reasonable default behavior but is not always optimal (e.g., in the case of strongly -1593 correlated analytical errors for one or more samples). -1594 -1595 Returns a tuplet of: -1596 -1597 + the list of group names -1598 + an array of the corresponding Δ4x values -1599 + the corresponding (co)variance matrix -1600 -1601 **Parameters** -1602 -1603 + `sample_groups`: a dictionary of the form: -1604 ```py -1605 {'group1': ['sample_1', 'sample_2'], -1606 'group2': ['sample_3', 'sample_4', 'sample_5']} -1607 ``` -1608 ''' -1609 -1610 samples = [s for k in sorted(sample_groups.keys()) for s in sorted(sample_groups[k])] -1611 groups = sorted(sample_groups.keys()) -1612 group_total_weights = {k: sum([self.samples[s]['N'] for s in sample_groups[k]]) for k in groups} -1613 D4x_old = np.array([[self.samples[x][f'D{self._4x}']] for x in samples]) -1614 CM_old = np.array([[self.sample_D4x_covar(x,y) for x in samples] for y in samples]) -1615 W = np.array([ -1616 [self.samples[i]['N']/group_total_weights[j] if i in sample_groups[j] else 0 for i in samples] -1617 for j in groups]) -1618 D4x_new = W @ D4x_old -1619 CM_new = W @ CM_old @ W.T -1620 -1621 return groups, D4x_new[:,0], CM_new -1622 -1623 -1624 @make_verbal -1625 def standardize(self, -1626 method = 'pooled', -1627 weighted_sessions = [], -1628 consolidate = True, -1629 consolidate_tables = False, -1630 consolidate_plots = False, -1631 constraints = {}, -1632 ): -1633 ''' -1634 Compute absolute Δ4x values for all replicate analyses and for sample averages. -1635 If `method` argument is set to `'pooled'`, the standardization processes all sessions -1636 in a single step, assuming that all samples (anchors and unknowns alike) are homogeneous, -1637 i.e. that their true Δ4x value does not change between sessions, -1638 ([Daëron, 2021](https://doi.org/10.1029/2020GC009592)). If `method` argument is set to -1639 `'indep_sessions'`, the standardization processes each session independently, based only -1640 on anchors analyses. -1641 ''' -1642 -1643 self.standardization_method = method -1644 self.assign_timestamps() -1645 -1646 if method == 'pooled': -1647 if weighted_sessions: -1648 for session_group in weighted_sessions: -1649 if self._4x == '47': -1650 X = D47data([r for r in self if r['Session'] in session_group]) -1651 elif self._4x == '48': -1652 X = D48data([r for r in self if r['Session'] in session_group]) -1653 X.Nominal_D4x = self.Nominal_D4x.copy() -1654 X.refresh() -1655 result = X.standardize(method = 'pooled', weighted_sessions = [], consolidate = False) -1656 w = np.sqrt(result.redchi) -1657 self.msg(f'Session group {session_group} MRSWD = {w:.4f}') -1658 for r in X: -1659 r[f'wD{self._4x}raw'] *= w -1660 else: -1661 self.msg(f'All D{self._4x}raw weights set to 1 ‰') -1662 for r in self: -1663 r[f'wD{self._4x}raw'] = 1. -1664 -1665 params = Parameters() -1666 for k,session in enumerate(self.sessions): -1667 self.msg(f"Session {session}: scrambling_drift is {self.sessions[session]['scrambling_drift']}.") -1668 self.msg(f"Session {session}: slope_drift is {self.sessions[session]['slope_drift']}.") -1669 self.msg(f"Session {session}: wg_drift is {self.sessions[session]['wg_drift']}.") -1670 s = pf(session) -1671 params.add(f'a_{s}', value = 0.9) -1672 params.add(f'b_{s}', value = 0.) -1673 params.add(f'c_{s}', value = -0.9) -1674 params.add(f'a2_{s}', value = 0., -1675# vary = self.sessions[session]['scrambling_drift'], -1676 ) -1677 params.add(f'b2_{s}', value = 0., -1678# vary = self.sessions[session]['slope_drift'], -1679 ) -1680 params.add(f'c2_{s}', value = 0., -1681# vary = self.sessions[session]['wg_drift'], -1682 ) -1683 if not self.sessions[session]['scrambling_drift']: -1684 params[f'a2_{s}'].expr = '0' -1685 if not self.sessions[session]['slope_drift']: -1686 params[f'b2_{s}'].expr = '0' -1687 if not self.sessions[session]['wg_drift']: -1688 params[f'c2_{s}'].expr = '0' -1689 -1690 for sample in self.unknowns: -1691 params.add(f'D{self._4x}_{pf(sample)}', value = 0.5) -1692 -1693 for k in constraints: -1694 params[k].expr = constraints[k] -1695 -1696 def residuals(p): -1697 R = [] -1698 for r in self: -1699 session = pf(r['Session']) -1700 sample = pf(r['Sample']) -1701 if r['Sample'] in self.Nominal_D4x: -1702 R += [ ( -1703 r[f'D{self._4x}raw'] - ( -1704 p[f'a_{session}'] * self.Nominal_D4x[r['Sample']] -1705 + p[f'b_{session}'] * r[f'd{self._4x}'] -1706 + p[f'c_{session}'] -1707 + r['t'] * ( -1708 p[f'a2_{session}'] * self.Nominal_D4x[r['Sample']] -1709 + p[f'b2_{session}'] * r[f'd{self._4x}'] -1710 + p[f'c2_{session}'] -1711 ) -1712 ) -1713 ) / r[f'wD{self._4x}raw'] ] -1714 else: -1715 R += [ ( -1716 r[f'D{self._4x}raw'] - ( -1717 p[f'a_{session}'] * p[f'D{self._4x}_{sample}'] -1718 + p[f'b_{session}'] * r[f'd{self._4x}'] -1719 + p[f'c_{session}'] -1720 + r['t'] * ( -1721 p[f'a2_{session}'] * p[f'D{self._4x}_{sample}'] -1722 + p[f'b2_{session}'] * r[f'd{self._4x}'] -1723 + p[f'c2_{session}'] -1724 ) -1725 ) -1726 ) / r[f'wD{self._4x}raw'] ] -1727 return R -1728 -1729 M = Minimizer(residuals, params) -1730 result = M.least_squares() -1731 self.Nf = result.nfree -1732 self.t95 = tstudent.ppf(1 - 0.05/2, self.Nf) -1733 new_names, new_covar, new_se = _fullcovar(result)[:3] -1734 result.var_names = new_names -1735 result.covar = new_covar -1736 -1737 for r in self: -1738 s = pf(r["Session"]) -1739 a = result.params.valuesdict()[f'a_{s}'] -1740 b = result.params.valuesdict()[f'b_{s}'] -1741 c = result.params.valuesdict()[f'c_{s}'] -1742 a2 = result.params.valuesdict()[f'a2_{s}'] -1743 b2 = result.params.valuesdict()[f'b2_{s}'] -1744 c2 = result.params.valuesdict()[f'c2_{s}'] -1745 r[f'D{self._4x}'] = (r[f'D{self._4x}raw'] - c - b * r[f'd{self._4x}'] - c2 * r['t'] - b2 * r['t'] * r[f'd{self._4x}']) / (a + a2 * r['t']) -1746 -1747 self.standardization = result -1748 -1749 for session in self.sessions: -1750 self.sessions[session]['Np'] = 3 -1751 for k in ['scrambling', 'slope', 'wg']: -1752 if self.sessions[session][f'{k}_drift']: -1753 self.sessions[session]['Np'] += 1 +1543 self.refresh_samples() +1544 self.consolidate_samples() +1545 self.repeatabilities() +1546 +1547 if tables: +1548 self.table_of_analyses() +1549 self.table_of_samples() +1550 +1551 def assign_timestamps(self): +1552 ''' +1553 Assign a time field `t` of type `float` to each analysis. +1554 +1555 If `TimeTag` is one of the data fields, `t` is equal within a given session +1556 to `TimeTag` minus the mean value of `TimeTag` for that session. +1557 Otherwise, `TimeTag` is by default equal to the index of each analysis +1558 in the dataset and `t` is defined as above. +1559 ''' +1560 for session in self.sessions: +1561 sdata = self.sessions[session]['data'] +1562 try: +1563 t0 = np.mean([r['TimeTag'] for r in sdata]) +1564 for r in sdata: +1565 r['t'] = r['TimeTag'] - t0 +1566 except KeyError: +1567 t0 = (len(sdata)-1)/2 +1568 for t,r in enumerate(sdata): +1569 r['t'] = t - t0 +1570 +1571 +1572 def report(self): +1573 ''' +1574 Prints a report on the standardization fit. +1575 Only applicable after `D4xdata.standardize(method='pooled')`. +1576 ''' +1577 report_fit(self.standardization) +1578 +1579 +1580 def combine_samples(self, sample_groups): +1581 ''' +1582 Combine analyses of different samples to compute weighted average Δ4x +1583 and new error (co)variances corresponding to the groups defined by the `sample_groups` +1584 dictionary. +1585 +1586 Caution: samples are weighted by number of replicate analyses, which is a +1587 reasonable default behavior but is not always optimal (e.g., in the case of strongly +1588 correlated analytical errors for one or more samples). +1589 +1590 Returns a tuplet of: +1591 +1592 + the list of group names +1593 + an array of the corresponding Δ4x values +1594 + the corresponding (co)variance matrix +1595 +1596 **Parameters** +1597 +1598 + `sample_groups`: a dictionary of the form: +1599 ```py +1600 {'group1': ['sample_1', 'sample_2'], +1601 'group2': ['sample_3', 'sample_4', 'sample_5']} +1602 ``` +1603 ''' +1604 +1605 samples = [s for k in sorted(sample_groups.keys()) for s in sorted(sample_groups[k])] +1606 groups = sorted(sample_groups.keys()) +1607 group_total_weights = {k: sum([self.samples[s]['N'] for s in sample_groups[k]]) for k in groups} +1608 D4x_old = np.array([[self.samples[x][f'D{self._4x}']] for x in samples]) +1609 CM_old = np.array([[self.sample_D4x_covar(x,y) for x in samples] for y in samples]) +1610 W = np.array([ +1611 [self.samples[i]['N']/group_total_weights[j] if i in sample_groups[j] else 0 for i in samples] +1612 for j in groups]) +1613 D4x_new = W @ D4x_old +1614 CM_new = W @ CM_old @ W.T +1615 +1616 return groups, D4x_new[:,0], CM_new +1617 +1618 +1619 @make_verbal +1620 def standardize(self, +1621 method = 'pooled', +1622 weighted_sessions = [], +1623 consolidate = True, +1624 consolidate_tables = False, +1625 consolidate_plots = False, +1626 constraints = {}, +1627 ): +1628 ''' +1629 Compute absolute Δ4x values for all replicate analyses and for sample averages. +1630 If `method` argument is set to `'pooled'`, the standardization processes all sessions +1631 in a single step, assuming that all samples (anchors and unknowns alike) are homogeneous, +1632 i.e. that their true Δ4x value does not change between sessions, +1633 ([Daëron, 2021](https://doi.org/10.1029/2020GC009592)). If `method` argument is set to +1634 `'indep_sessions'`, the standardization processes each session independently, based only +1635 on anchors analyses. +1636 ''' +1637 +1638 self.standardization_method = method +1639 self.assign_timestamps() +1640 +1641 if method == 'pooled': +1642 if weighted_sessions: +1643 for session_group in weighted_sessions: +1644 if self._4x == '47': +1645 X = D47data([r for r in self if r['Session'] in session_group]) +1646 elif self._4x == '48': +1647 X = D48data([r for r in self if r['Session'] in session_group]) +1648 X.Nominal_D4x = self.Nominal_D4x.copy() +1649 X.refresh() +1650 result = X.standardize(method = 'pooled', weighted_sessions = [], consolidate = False) +1651 w = np.sqrt(result.redchi) +1652 self.msg(f'Session group {session_group} MRSWD = {w:.4f}') +1653 for r in X: +1654 r[f'wD{self._4x}raw'] *= w +1655 else: +1656 self.msg(f'All D{self._4x}raw weights set to 1 ‰') +1657 for r in self: +1658 r[f'wD{self._4x}raw'] = 1. +1659 +1660 params = Parameters() +1661 for k,session in enumerate(self.sessions): +1662 self.msg(f"Session {session}: scrambling_drift is {self.sessions[session]['scrambling_drift']}.") +1663 self.msg(f"Session {session}: slope_drift is {self.sessions[session]['slope_drift']}.") +1664 self.msg(f"Session {session}: wg_drift is {self.sessions[session]['wg_drift']}.") +1665 s = pf(session) +1666 params.add(f'a_{s}', value = 0.9) +1667 params.add(f'b_{s}', value = 0.) +1668 params.add(f'c_{s}', value = -0.9) +1669 params.add(f'a2_{s}', value = 0., +1670# vary = self.sessions[session]['scrambling_drift'], +1671 ) +1672 params.add(f'b2_{s}', value = 0., +1673# vary = self.sessions[session]['slope_drift'], +1674 ) +1675 params.add(f'c2_{s}', value = 0., +1676# vary = self.sessions[session]['wg_drift'], +1677 ) +1678 if not self.sessions[session]['scrambling_drift']: +1679 params[f'a2_{s}'].expr = '0' +1680 if not self.sessions[session]['slope_drift']: +1681 params[f'b2_{s}'].expr = '0' +1682 if not self.sessions[session]['wg_drift']: +1683 params[f'c2_{s}'].expr = '0' +1684 +1685 for sample in self.unknowns: +1686 params.add(f'D{self._4x}_{pf(sample)}', value = 0.5) +1687 +1688 for k in constraints: +1689 params[k].expr = constraints[k] +1690 +1691 def residuals(p): +1692 R = [] +1693 for r in self: +1694 session = pf(r['Session']) +1695 sample = pf(r['Sample']) +1696 if r['Sample'] in self.Nominal_D4x: +1697 R += [ ( +1698 r[f'D{self._4x}raw'] - ( +1699 p[f'a_{session}'] * self.Nominal_D4x[r['Sample']] +1700 + p[f'b_{session}'] * r[f'd{self._4x}'] +1701 + p[f'c_{session}'] +1702 + r['t'] * ( +1703 p[f'a2_{session}'] * self.Nominal_D4x[r['Sample']] +1704 + p[f'b2_{session}'] * r[f'd{self._4x}'] +1705 + p[f'c2_{session}'] +1706 ) +1707 ) +1708 ) / r[f'wD{self._4x}raw'] ] +1709 else: +1710 R += [ ( +1711 r[f'D{self._4x}raw'] - ( +1712 p[f'a_{session}'] * p[f'D{self._4x}_{sample}'] +1713 + p[f'b_{session}'] * r[f'd{self._4x}'] +1714 + p[f'c_{session}'] +1715 + r['t'] * ( +1716 p[f'a2_{session}'] * p[f'D{self._4x}_{sample}'] +1717 + p[f'b2_{session}'] * r[f'd{self._4x}'] +1718 + p[f'c2_{session}'] +1719 ) +1720 ) +1721 ) / r[f'wD{self._4x}raw'] ] +1722 return R +1723 +1724 M = Minimizer(residuals, params) +1725 result = M.least_squares() +1726 self.Nf = result.nfree +1727 self.t95 = tstudent.ppf(1 - 0.05/2, self.Nf) +1728 new_names, new_covar, new_se = _fullcovar(result)[:3] +1729 result.var_names = new_names +1730 result.covar = new_covar +1731 +1732 for r in self: +1733 s = pf(r["Session"]) +1734 a = result.params.valuesdict()[f'a_{s}'] +1735 b = result.params.valuesdict()[f'b_{s}'] +1736 c = result.params.valuesdict()[f'c_{s}'] +1737 a2 = result.params.valuesdict()[f'a2_{s}'] +1738 b2 = result.params.valuesdict()[f'b2_{s}'] +1739 c2 = result.params.valuesdict()[f'c2_{s}'] +1740 r[f'D{self._4x}'] = (r[f'D{self._4x}raw'] - c - b * r[f'd{self._4x}'] - c2 * r['t'] - b2 * r['t'] * r[f'd{self._4x}']) / (a + a2 * r['t']) +1741 +1742 self.standardization = result +1743 +1744 for session in self.sessions: +1745 self.sessions[session]['Np'] = 3 +1746 for k in ['scrambling', 'slope', 'wg']: +1747 if self.sessions[session][f'{k}_drift']: +1748 self.sessions[session]['Np'] += 1 +1749 +1750 if consolidate: +1751 self.consolidate(tables = consolidate_tables, plots = consolidate_plots) +1752 return result +1753 1754 -1755 if consolidate: -1756 self.consolidate(tables = consolidate_tables, plots = consolidate_plots) -1757 return result -1758 -1759 -1760 elif method == 'indep_sessions': -1761 -1762 if weighted_sessions: -1763 for session_group in weighted_sessions: -1764 X = D4xdata([r for r in self if r['Session'] in session_group], mass = self._4x) -1765 X.Nominal_D4x = self.Nominal_D4x.copy() -1766 X.refresh() -1767 # This is only done to assign r['wD47raw'] for r in X: -1768 X.standardize(method = method, weighted_sessions = [], consolidate = False) -1769 self.msg(f'D{self._4x}raw weights set to {1000*X[0][f"wD{self._4x}raw"]:.1f} ppm for sessions in {session_group}') -1770 else: -1771 self.msg('All weights set to 1 ‰') -1772 for r in self: -1773 r[f'wD{self._4x}raw'] = 1 -1774 -1775 for session in self.sessions: -1776 s = self.sessions[session] -1777 p_names = ['a', 'b', 'c', 'a2', 'b2', 'c2'] -1778 p_active = [True, True, True, s['scrambling_drift'], s['slope_drift'], s['wg_drift']] -1779 s['Np'] = sum(p_active) -1780 sdata = s['data'] -1781 -1782 A = np.array([ -1783 [ -1784 self.Nominal_D4x[r['Sample']] / r[f'wD{self._4x}raw'], -1785 r[f'd{self._4x}'] / r[f'wD{self._4x}raw'], -1786 1 / r[f'wD{self._4x}raw'], -1787 self.Nominal_D4x[r['Sample']] * r['t'] / r[f'wD{self._4x}raw'], -1788 r[f'd{self._4x}'] * r['t'] / r[f'wD{self._4x}raw'], -1789 r['t'] / r[f'wD{self._4x}raw'] -1790 ] -1791 for r in sdata if r['Sample'] in self.anchors -1792 ])[:,p_active] # only keep columns for the active parameters -1793 Y = np.array([[r[f'D{self._4x}raw'] / r[f'wD{self._4x}raw']] for r in sdata if r['Sample'] in self.anchors]) -1794 s['Na'] = Y.size -1795 CM = linalg.inv(A.T @ A) -1796 bf = (CM @ A.T @ Y).T[0,:] -1797 k = 0 -1798 for n,a in zip(p_names, p_active): -1799 if a: -1800 s[n] = bf[k] -1801# self.msg(f'{n} = {bf[k]}') -1802 k += 1 -1803 else: -1804 s[n] = 0. -1805# self.msg(f'{n} = 0.0') +1755 elif method == 'indep_sessions': +1756 +1757 if weighted_sessions: +1758 for session_group in weighted_sessions: +1759 X = D4xdata([r for r in self if r['Session'] in session_group], mass = self._4x) +1760 X.Nominal_D4x = self.Nominal_D4x.copy() +1761 X.refresh() +1762 # This is only done to assign r['wD47raw'] for r in X: +1763 X.standardize(method = method, weighted_sessions = [], consolidate = False) +1764 self.msg(f'D{self._4x}raw weights set to {1000*X[0][f"wD{self._4x}raw"]:.1f} ppm for sessions in {session_group}') +1765 else: +1766 self.msg('All weights set to 1 ‰') +1767 for r in self: +1768 r[f'wD{self._4x}raw'] = 1 +1769 +1770 for session in self.sessions: +1771 s = self.sessions[session] +1772 p_names = ['a', 'b', 'c', 'a2', 'b2', 'c2'] +1773 p_active = [True, True, True, s['scrambling_drift'], s['slope_drift'], s['wg_drift']] +1774 s['Np'] = sum(p_active) +1775 sdata = s['data'] +1776 +1777 A = np.array([ +1778 [ +1779 self.Nominal_D4x[r['Sample']] / r[f'wD{self._4x}raw'], +1780 r[f'd{self._4x}'] / r[f'wD{self._4x}raw'], +1781 1 / r[f'wD{self._4x}raw'], +1782 self.Nominal_D4x[r['Sample']] * r['t'] / r[f'wD{self._4x}raw'], +1783 r[f'd{self._4x}'] * r['t'] / r[f'wD{self._4x}raw'], +1784 r['t'] / r[f'wD{self._4x}raw'] +1785 ] +1786 for r in sdata if r['Sample'] in self.anchors +1787 ])[:,p_active] # only keep columns for the active parameters +1788 Y = np.array([[r[f'D{self._4x}raw'] / r[f'wD{self._4x}raw']] for r in sdata if r['Sample'] in self.anchors]) +1789 s['Na'] = Y.size +1790 CM = linalg.inv(A.T @ A) +1791 bf = (CM @ A.T @ Y).T[0,:] +1792 k = 0 +1793 for n,a in zip(p_names, p_active): +1794 if a: +1795 s[n] = bf[k] +1796# self.msg(f'{n} = {bf[k]}') +1797 k += 1 +1798 else: +1799 s[n] = 0. +1800# self.msg(f'{n} = 0.0') +1801 +1802 for r in sdata : +1803 a, b, c, a2, b2, c2 = s['a'], s['b'], s['c'], s['a2'], s['b2'], s['c2'] +1804 r[f'D{self._4x}'] = (r[f'D{self._4x}raw'] - c - b * r[f'd{self._4x}'] - c2 * r['t'] - b2 * r['t'] * r[f'd{self._4x}']) / (a + a2 * r['t']) +1805 r[f'wD{self._4x}'] = r[f'wD{self._4x}raw'] / (a + a2 * r['t']) 1806 -1807 for r in sdata : -1808 a, b, c, a2, b2, c2 = s['a'], s['b'], s['c'], s['a2'], s['b2'], s['c2'] -1809 r[f'D{self._4x}'] = (r[f'D{self._4x}raw'] - c - b * r[f'd{self._4x}'] - c2 * r['t'] - b2 * r['t'] * r[f'd{self._4x}']) / (a + a2 * r['t']) -1810 r[f'wD{self._4x}'] = r[f'wD{self._4x}raw'] / (a + a2 * r['t']) -1811 -1812 s['CM'] = np.zeros((6,6)) -1813 i = 0 -1814 k_active = [j for j,a in enumerate(p_active) if a] -1815 for j,a in enumerate(p_active): -1816 if a: -1817 s['CM'][j,k_active] = CM[i,:] -1818 i += 1 -1819 -1820 if not weighted_sessions: -1821 w = self.rmswd()['rmswd'] -1822 for r in self: -1823 r[f'wD{self._4x}'] *= w -1824 r[f'wD{self._4x}raw'] *= w -1825 for session in self.sessions: -1826 self.sessions[session]['CM'] *= w**2 -1827 -1828 for session in self.sessions: -1829 s = self.sessions[session] -1830 s['SE_a'] = s['CM'][0,0]**.5 -1831 s['SE_b'] = s['CM'][1,1]**.5 -1832 s['SE_c'] = s['CM'][2,2]**.5 -1833 s['SE_a2'] = s['CM'][3,3]**.5 -1834 s['SE_b2'] = s['CM'][4,4]**.5 -1835 s['SE_c2'] = s['CM'][5,5]**.5 -1836 -1837 if not weighted_sessions: -1838 self.Nf = len(self) - len(self.unknowns) - np.sum([self.sessions[s]['Np'] for s in self.sessions]) -1839 else: -1840 self.Nf = 0 -1841 for sg in weighted_sessions: -1842 self.Nf += self.rmswd(sessions = sg)['Nf'] -1843 -1844 self.t95 = tstudent.ppf(1 - 0.05/2, self.Nf) -1845 -1846 avgD4x = { -1847 sample: np.mean([r[f'D{self._4x}'] for r in self if r['Sample'] == sample]) -1848 for sample in self.samples -1849 } -1850 chi2 = np.sum([(r[f'D{self._4x}'] - avgD4x[r['Sample']])**2 for r in self]) -1851 rD4x = (chi2/self.Nf)**.5 -1852 self.repeatability[f'sigma_{self._4x}'] = rD4x -1853 -1854 if consolidate: -1855 self.consolidate(tables = consolidate_tables, plots = consolidate_plots) -1856 -1857 -1858 def standardization_error(self, session, d4x, D4x, t = 0): -1859 ''' -1860 Compute standardization error for a given session and -1861 (δ47, Δ47) composition. -1862 ''' -1863 a = self.sessions[session]['a'] -1864 b = self.sessions[session]['b'] -1865 c = self.sessions[session]['c'] -1866 a2 = self.sessions[session]['a2'] -1867 b2 = self.sessions[session]['b2'] -1868 c2 = self.sessions[session]['c2'] -1869 CM = self.sessions[session]['CM'] -1870 -1871 x, y = D4x, d4x -1872 z = a * x + b * y + c + a2 * x * t + b2 * y * t + c2 * t -1873# x = (z - b*y - b2*y*t - c - c2*t) / (a+a2*t) -1874 dxdy = -(b+b2*t) / (a+a2*t) -1875 dxdz = 1. / (a+a2*t) -1876 dxda = -x / (a+a2*t) -1877 dxdb = -y / (a+a2*t) -1878 dxdc = -1. / (a+a2*t) -1879 dxda2 = -x * a2 / (a+a2*t) -1880 dxdb2 = -y * t / (a+a2*t) -1881 dxdc2 = -t / (a+a2*t) -1882 V = np.array([dxda, dxdb, dxdc, dxda2, dxdb2, dxdc2]) -1883 sx = (V @ CM @ V.T) ** .5 -1884 return sx -1885 -1886 -1887 @make_verbal -1888 def summary(self, -1889 dir = 'output', -1890 filename = None, -1891 save_to_file = True, -1892 print_out = True, -1893 ): -1894 ''' -1895 Print out an/or save to disk a summary of the standardization results. -1896 -1897 **Parameters** -1898 -1899 + `dir`: the directory in which to save the table -1900 + `filename`: the name to the csv file to write to -1901 + `save_to_file`: whether to save the table to disk -1902 + `print_out`: whether to print out the table -1903 ''' -1904 -1905 out = [] -1906 out += [['N samples (anchors + unknowns)', f"{len(self.samples)} ({len(self.anchors)} + {len(self.unknowns)})"]] -1907 out += [['N analyses (anchors + unknowns)', f"{len(self)} ({len([r for r in self if r['Sample'] in self.anchors])} + {len([r for r in self if r['Sample'] in self.unknowns])})"]] -1908 out += [['Repeatability of δ13C_VPDB', f"{1000 * self.repeatability['r_d13C_VPDB']:.1f} ppm"]] -1909 out += [['Repeatability of δ18O_VSMOW', f"{1000 * self.repeatability['r_d18O_VSMOW']:.1f} ppm"]] -1910 out += [[f'Repeatability of Δ{self._4x} (anchors)', f"{1000 * self.repeatability[f'r_D{self._4x}a']:.1f} ppm"]] -1911 out += [[f'Repeatability of Δ{self._4x} (unknowns)', f"{1000 * self.repeatability[f'r_D{self._4x}u']:.1f} ppm"]] -1912 out += [[f'Repeatability of Δ{self._4x} (all)', f"{1000 * self.repeatability[f'r_D{self._4x}']:.1f} ppm"]] -1913 out += [['Model degrees of freedom', f"{self.Nf}"]] -1914 out += [['Student\'s 95% t-factor', f"{self.t95:.2f}"]] -1915 out += [['Standardization method', self.standardization_method]] -1916 -1917 if save_to_file: -1918 if not os.path.exists(dir): -1919 os.makedirs(dir) -1920 if filename is None: -1921 filename = f'D{self._4x}_summary.csv' -1922 with open(f'{dir}/{filename}', 'w') as fid: -1923 fid.write(make_csv(out)) -1924 if print_out: -1925 self.msg('\n' + pretty_table(out, header = 0)) -1926 -1927 -1928 @make_verbal -1929 def table_of_sessions(self, -1930 dir = 'output', -1931 filename = None, -1932 save_to_file = True, -1933 print_out = True, -1934 output = None, -1935 ): -1936 ''' -1937 Print out an/or save to disk a table of sessions. -1938 -1939 **Parameters** -1940 -1941 + `dir`: the directory in which to save the table -1942 + `filename`: the name to the csv file to write to -1943 + `save_to_file`: whether to save the table to disk -1944 + `print_out`: whether to print out the table -1945 + `output`: if set to `'pretty'`: return a pretty text table (see `pretty_table()`); -1946 if set to `'raw'`: return a list of list of strings -1947 (e.g., `[['header1', 'header2'], ['0.1', '0.2']]`) -1948 ''' -1949 include_a2 = any([self.sessions[session]['scrambling_drift'] for session in self.sessions]) -1950 include_b2 = any([self.sessions[session]['slope_drift'] for session in self.sessions]) -1951 include_c2 = any([self.sessions[session]['wg_drift'] for session in self.sessions]) -1952 -1953 out = [['Session','Na','Nu','d13Cwg_VPDB','d18Owg_VSMOW','r_d13C','r_d18O',f'r_D{self._4x}','a ± SE','1e3 x b ± SE','c ± SE']] -1954 if include_a2: -1955 out[-1] += ['a2 ± SE'] -1956 if include_b2: -1957 out[-1] += ['b2 ± SE'] -1958 if include_c2: -1959 out[-1] += ['c2 ± SE'] -1960 for session in self.sessions: -1961 out += [[ -1962 session, -1963 f"{self.sessions[session]['Na']}", -1964 f"{self.sessions[session]['Nu']}", -1965 f"{self.sessions[session]['d13Cwg_VPDB']:.3f}", -1966 f"{self.sessions[session]['d18Owg_VSMOW']:.3f}", -1967 f"{self.sessions[session]['r_d13C_VPDB']:.4f}", -1968 f"{self.sessions[session]['r_d18O_VSMOW']:.4f}", -1969 f"{self.sessions[session][f'r_D{self._4x}']:.4f}", -1970 f"{self.sessions[session]['a']:.3f} ± {self.sessions[session]['SE_a']:.3f}", -1971 f"{1e3*self.sessions[session]['b']:.3f} ± {1e3*self.sessions[session]['SE_b']:.3f}", -1972 f"{self.sessions[session]['c']:.3f} ± {self.sessions[session]['SE_c']:.3f}", -1973 ]] -1974 if include_a2: -1975 if self.sessions[session]['scrambling_drift']: -1976 out[-1] += [f"{self.sessions[session]['a2']:.1e} ± {self.sessions[session]['SE_a2']:.1e}"] +1807 s['CM'] = np.zeros((6,6)) +1808 i = 0 +1809 k_active = [j for j,a in enumerate(p_active) if a] +1810 for j,a in enumerate(p_active): +1811 if a: +1812 s['CM'][j,k_active] = CM[i,:] +1813 i += 1 +1814 +1815 if not weighted_sessions: +1816 w = self.rmswd()['rmswd'] +1817 for r in self: +1818 r[f'wD{self._4x}'] *= w +1819 r[f'wD{self._4x}raw'] *= w +1820 for session in self.sessions: +1821 self.sessions[session]['CM'] *= w**2 +1822 +1823 for session in self.sessions: +1824 s = self.sessions[session] +1825 s['SE_a'] = s['CM'][0,0]**.5 +1826 s['SE_b'] = s['CM'][1,1]**.5 +1827 s['SE_c'] = s['CM'][2,2]**.5 +1828 s['SE_a2'] = s['CM'][3,3]**.5 +1829 s['SE_b2'] = s['CM'][4,4]**.5 +1830 s['SE_c2'] = s['CM'][5,5]**.5 +1831 +1832 if not weighted_sessions: +1833 self.Nf = len(self) - len(self.unknowns) - np.sum([self.sessions[s]['Np'] for s in self.sessions]) +1834 else: +1835 self.Nf = 0 +1836 for sg in weighted_sessions: +1837 self.Nf += self.rmswd(sessions = sg)['Nf'] +1838 +1839 self.t95 = tstudent.ppf(1 - 0.05/2, self.Nf) +1840 +1841 avgD4x = { +1842 sample: np.mean([r[f'D{self._4x}'] for r in self if r['Sample'] == sample]) +1843 for sample in self.samples +1844 } +1845 chi2 = np.sum([(r[f'D{self._4x}'] - avgD4x[r['Sample']])**2 for r in self]) +1846 rD4x = (chi2/self.Nf)**.5 +1847 self.repeatability[f'sigma_{self._4x}'] = rD4x +1848 +1849 if consolidate: +1850 self.consolidate(tables = consolidate_tables, plots = consolidate_plots) +1851 +1852 +1853 def standardization_error(self, session, d4x, D4x, t = 0): +1854 ''' +1855 Compute standardization error for a given session and +1856 (δ47, Δ47) composition. +1857 ''' +1858 a = self.sessions[session]['a'] +1859 b = self.sessions[session]['b'] +1860 c = self.sessions[session]['c'] +1861 a2 = self.sessions[session]['a2'] +1862 b2 = self.sessions[session]['b2'] +1863 c2 = self.sessions[session]['c2'] +1864 CM = self.sessions[session]['CM'] +1865 +1866 x, y = D4x, d4x +1867 z = a * x + b * y + c + a2 * x * t + b2 * y * t + c2 * t +1868# x = (z - b*y - b2*y*t - c - c2*t) / (a+a2*t) +1869 dxdy = -(b+b2*t) / (a+a2*t) +1870 dxdz = 1. / (a+a2*t) +1871 dxda = -x / (a+a2*t) +1872 dxdb = -y / (a+a2*t) +1873 dxdc = -1. / (a+a2*t) +1874 dxda2 = -x * a2 / (a+a2*t) +1875 dxdb2 = -y * t / (a+a2*t) +1876 dxdc2 = -t / (a+a2*t) +1877 V = np.array([dxda, dxdb, dxdc, dxda2, dxdb2, dxdc2]) +1878 sx = (V @ CM @ V.T) ** .5 +1879 return sx +1880 +1881 +1882 @make_verbal +1883 def summary(self, +1884 dir = 'output', +1885 filename = None, +1886 save_to_file = True, +1887 print_out = True, +1888 ): +1889 ''' +1890 Print out an/or save to disk a summary of the standardization results. +1891 +1892 **Parameters** +1893 +1894 + `dir`: the directory in which to save the table +1895 + `filename`: the name to the csv file to write to +1896 + `save_to_file`: whether to save the table to disk +1897 + `print_out`: whether to print out the table +1898 ''' +1899 +1900 out = [] +1901 out += [['N samples (anchors + unknowns)', f"{len(self.samples)} ({len(self.anchors)} + {len(self.unknowns)})"]] +1902 out += [['N analyses (anchors + unknowns)', f"{len(self)} ({len([r for r in self if r['Sample'] in self.anchors])} + {len([r for r in self if r['Sample'] in self.unknowns])})"]] +1903 out += [['Repeatability of δ13C_VPDB', f"{1000 * self.repeatability['r_d13C_VPDB']:.1f} ppm"]] +1904 out += [['Repeatability of δ18O_VSMOW', f"{1000 * self.repeatability['r_d18O_VSMOW']:.1f} ppm"]] +1905 out += [[f'Repeatability of Δ{self._4x} (anchors)', f"{1000 * self.repeatability[f'r_D{self._4x}a']:.1f} ppm"]] +1906 out += [[f'Repeatability of Δ{self._4x} (unknowns)', f"{1000 * self.repeatability[f'r_D{self._4x}u']:.1f} ppm"]] +1907 out += [[f'Repeatability of Δ{self._4x} (all)', f"{1000 * self.repeatability[f'r_D{self._4x}']:.1f} ppm"]] +1908 out += [['Model degrees of freedom', f"{self.Nf}"]] +1909 out += [['Student\'s 95% t-factor', f"{self.t95:.2f}"]] +1910 out += [['Standardization method', self.standardization_method]] +1911 +1912 if save_to_file: +1913 if not os.path.exists(dir): +1914 os.makedirs(dir) +1915 if filename is None: +1916 filename = f'D{self._4x}_summary.csv' +1917 with open(f'{dir}/{filename}', 'w') as fid: +1918 fid.write(make_csv(out)) +1919 if print_out: +1920 self.msg('\n' + pretty_table(out, header = 0)) +1921 +1922 +1923 @make_verbal +1924 def table_of_sessions(self, +1925 dir = 'output', +1926 filename = None, +1927 save_to_file = True, +1928 print_out = True, +1929 output = None, +1930 ): +1931 ''' +1932 Print out an/or save to disk a table of sessions. +1933 +1934 **Parameters** +1935 +1936 + `dir`: the directory in which to save the table +1937 + `filename`: the name to the csv file to write to +1938 + `save_to_file`: whether to save the table to disk +1939 + `print_out`: whether to print out the table +1940 + `output`: if set to `'pretty'`: return a pretty text table (see `pretty_table()`); +1941 if set to `'raw'`: return a list of list of strings +1942 (e.g., `[['header1', 'header2'], ['0.1', '0.2']]`) +1943 ''' +1944 include_a2 = any([self.sessions[session]['scrambling_drift'] for session in self.sessions]) +1945 include_b2 = any([self.sessions[session]['slope_drift'] for session in self.sessions]) +1946 include_c2 = any([self.sessions[session]['wg_drift'] for session in self.sessions]) +1947 +1948 out = [['Session','Na','Nu','d13Cwg_VPDB','d18Owg_VSMOW','r_d13C','r_d18O',f'r_D{self._4x}','a ± SE','1e3 x b ± SE','c ± SE']] +1949 if include_a2: +1950 out[-1] += ['a2 ± SE'] +1951 if include_b2: +1952 out[-1] += ['b2 ± SE'] +1953 if include_c2: +1954 out[-1] += ['c2 ± SE'] +1955 for session in self.sessions: +1956 out += [[ +1957 session, +1958 f"{self.sessions[session]['Na']}", +1959 f"{self.sessions[session]['Nu']}", +1960 f"{self.sessions[session]['d13Cwg_VPDB']:.3f}", +1961 f"{self.sessions[session]['d18Owg_VSMOW']:.3f}", +1962 f"{self.sessions[session]['r_d13C_VPDB']:.4f}", +1963 f"{self.sessions[session]['r_d18O_VSMOW']:.4f}", +1964 f"{self.sessions[session][f'r_D{self._4x}']:.4f}", +1965 f"{self.sessions[session]['a']:.3f} ± {self.sessions[session]['SE_a']:.3f}", +1966 f"{1e3*self.sessions[session]['b']:.3f} ± {1e3*self.sessions[session]['SE_b']:.3f}", +1967 f"{self.sessions[session]['c']:.3f} ± {self.sessions[session]['SE_c']:.3f}", +1968 ]] +1969 if include_a2: +1970 if self.sessions[session]['scrambling_drift']: +1971 out[-1] += [f"{self.sessions[session]['a2']:.1e} ± {self.sessions[session]['SE_a2']:.1e}"] +1972 else: +1973 out[-1] += [''] +1974 if include_b2: +1975 if self.sessions[session]['slope_drift']: +1976 out[-1] += [f"{self.sessions[session]['b2']:.1e} ± {self.sessions[session]['SE_b2']:.1e}"] 1977 else: 1978 out[-1] += [''] -1979 if include_b2: -1980 if self.sessions[session]['slope_drift']: -1981 out[-1] += [f"{self.sessions[session]['b2']:.1e} ± {self.sessions[session]['SE_b2']:.1e}"] +1979 if include_c2: +1980 if self.sessions[session]['wg_drift']: +1981 out[-1] += [f"{self.sessions[session]['c2']:.1e} ± {self.sessions[session]['SE_c2']:.1e}"] 1982 else: 1983 out[-1] += [''] -1984 if include_c2: -1985 if self.sessions[session]['wg_drift']: -1986 out[-1] += [f"{self.sessions[session]['c2']:.1e} ± {self.sessions[session]['SE_c2']:.1e}"] -1987 else: -1988 out[-1] += [''] -1989 -1990 if save_to_file: -1991 if not os.path.exists(dir): -1992 os.makedirs(dir) -1993 if filename is None: -1994 filename = f'D{self._4x}_sessions.csv' -1995 with open(f'{dir}/{filename}', 'w') as fid: -1996 fid.write(make_csv(out)) -1997 if print_out: -1998 self.msg('\n' + pretty_table(out)) -1999 if output == 'raw': -2000 return out -2001 elif output == 'pretty': -2002 return pretty_table(out) -2003 -2004 -2005 @make_verbal -2006 def table_of_analyses( -2007 self, -2008 dir = 'output', -2009 filename = None, -2010 save_to_file = True, -2011 print_out = True, -2012 output = None, -2013 ): -2014 ''' -2015 Print out an/or save to disk a table of analyses. -2016 -2017 **Parameters** -2018 -2019 + `dir`: the directory in which to save the table -2020 + `filename`: the name to the csv file to write to -2021 + `save_to_file`: whether to save the table to disk -2022 + `print_out`: whether to print out the table -2023 + `output`: if set to `'pretty'`: return a pretty text table (see `pretty_table()`); -2024 if set to `'raw'`: return a list of list of strings -2025 (e.g., `[['header1', 'header2'], ['0.1', '0.2']]`) -2026 ''' -2027 -2028 out = [['UID','Session','Sample']] -2029 extra_fields = [f for f in [('SampleMass','.2f'),('ColdFingerPressure','.1f'),('AcidReactionYield','.3f')] if f[0] in {k for r in self for k in r}] -2030 for f in extra_fields: -2031 out[-1] += [f[0]] -2032 out[-1] += ['d13Cwg_VPDB','d18Owg_VSMOW','d45','d46','d47','d48','d49','d13C_VPDB','d18O_VSMOW','D47raw','D48raw','D49raw',f'D{self._4x}'] -2033 for r in self: -2034 out += [[f"{r['UID']}",f"{r['Session']}",f"{r['Sample']}"]] -2035 for f in extra_fields: -2036 out[-1] += [f"{r[f[0]]:{f[1]}}"] -2037 out[-1] += [ -2038 f"{r['d13Cwg_VPDB']:.3f}", -2039 f"{r['d18Owg_VSMOW']:.3f}", -2040 f"{r['d45']:.6f}", -2041 f"{r['d46']:.6f}", -2042 f"{r['d47']:.6f}", -2043 f"{r['d48']:.6f}", -2044 f"{r['d49']:.6f}", -2045 f"{r['d13C_VPDB']:.6f}", -2046 f"{r['d18O_VSMOW']:.6f}", -2047 f"{r['D47raw']:.6f}", -2048 f"{r['D48raw']:.6f}", -2049 f"{r['D49raw']:.6f}", -2050 f"{r[f'D{self._4x}']:.6f}" -2051 ] -2052 if save_to_file: -2053 if not os.path.exists(dir): -2054 os.makedirs(dir) -2055 if filename is None: -2056 filename = f'D{self._4x}_analyses.csv' -2057 with open(f'{dir}/{filename}', 'w') as fid: -2058 fid.write(make_csv(out)) -2059 if print_out: -2060 self.msg('\n' + pretty_table(out)) -2061 return out -2062 -2063 @make_verbal -2064 def covar_table( -2065 self, -2066 correl = False, -2067 dir = 'output', -2068 filename = None, -2069 save_to_file = True, -2070 print_out = True, -2071 output = None, -2072 ): -2073 ''' -2074 Print out, save to disk and/or return the variance-covariance matrix of D4x -2075 for all unknown samples. -2076 -2077 **Parameters** -2078 -2079 + `dir`: the directory in which to save the csv -2080 + `filename`: the name of the csv file to write to -2081 + `save_to_file`: whether to save the csv -2082 + `print_out`: whether to print out the matrix -2083 + `output`: if set to `'pretty'`: return a pretty text matrix (see `pretty_table()`); -2084 if set to `'raw'`: return a list of list of strings -2085 (e.g., `[['header1', 'header2'], ['0.1', '0.2']]`) -2086 ''' -2087 samples = sorted([u for u in self.unknowns]) -2088 out = [[''] + samples] -2089 for s1 in samples: -2090 out.append([s1]) -2091 for s2 in samples: -2092 if correl: -2093 out[-1].append(f'{self.sample_D4x_correl(s1, s2):.6f}') -2094 else: -2095 out[-1].append(f'{self.sample_D4x_covar(s1, s2):.8e}') -2096 -2097 if save_to_file: -2098 if not os.path.exists(dir): -2099 os.makedirs(dir) -2100 if filename is None: -2101 if correl: -2102 filename = f'D{self._4x}_correl.csv' -2103 else: -2104 filename = f'D{self._4x}_covar.csv' -2105 with open(f'{dir}/{filename}', 'w') as fid: -2106 fid.write(make_csv(out)) -2107 if print_out: -2108 self.msg('\n'+pretty_table(out)) -2109 if output == 'raw': -2110 return out -2111 elif output == 'pretty': -2112 return pretty_table(out) -2113 -2114 @make_verbal -2115 def table_of_samples( -2116 self, -2117 dir = 'output', -2118 filename = None, -2119 save_to_file = True, -2120 print_out = True, -2121 output = None, -2122 ): -2123 ''' -2124 Print out, save to disk and/or return a table of samples. -2125 -2126 **Parameters** -2127 -2128 + `dir`: the directory in which to save the csv -2129 + `filename`: the name of the csv file to write to -2130 + `save_to_file`: whether to save the csv -2131 + `print_out`: whether to print out the table -2132 + `output`: if set to `'pretty'`: return a pretty text table (see `pretty_table()`); -2133 if set to `'raw'`: return a list of list of strings -2134 (e.g., `[['header1', 'header2'], ['0.1', '0.2']]`) -2135 ''' -2136 -2137 out = [['Sample','N','d13C_VPDB','d18O_VSMOW',f'D{self._4x}','SE','95% CL','SD','p_Levene']] -2138 for sample in self.anchors: -2139 out += [[ -2140 f"{sample}", -2141 f"{self.samples[sample]['N']}", -2142 f"{self.samples[sample]['d13C_VPDB']:.2f}", -2143 f"{self.samples[sample]['d18O_VSMOW']:.2f}", -2144 f"{self.samples[sample][f'D{self._4x}']:.4f}",'','', -2145 f"{self.samples[sample][f'SD_D{self._4x}']:.4f}" if self.samples[sample]['N'] > 1 else '', '' -2146 ]] -2147 for sample in self.unknowns: -2148 out += [[ -2149 f"{sample}", -2150 f"{self.samples[sample]['N']}", -2151 f"{self.samples[sample]['d13C_VPDB']:.2f}", -2152 f"{self.samples[sample]['d18O_VSMOW']:.2f}", -2153 f"{self.samples[sample][f'D{self._4x}']:.4f}", -2154 f"{self.samples[sample][f'SE_D{self._4x}']:.4f}", -2155 f{self.samples[sample][f'SE_D{self._4x}'] * self.t95:.4f}", -2156 f"{self.samples[sample][f'SD_D{self._4x}']:.4f}" if self.samples[sample]['N'] > 1 else '', -2157 f"{self.samples[sample]['p_Levene']:.3f}" if self.samples[sample]['N'] > 2 else '' -2158 ]] -2159 if save_to_file: -2160 if not os.path.exists(dir): -2161 os.makedirs(dir) -2162 if filename is None: -2163 filename = f'D{self._4x}_samples.csv' -2164 with open(f'{dir}/{filename}', 'w') as fid: -2165 fid.write(make_csv(out)) -2166 if print_out: -2167 self.msg('\n'+pretty_table(out)) -2168 if output == 'raw': -2169 return out -2170 elif output == 'pretty': -2171 return pretty_table(out) +1984 +1985 if save_to_file: +1986 if not os.path.exists(dir): +1987 os.makedirs(dir) +1988 if filename is None: +1989 filename = f'D{self._4x}_sessions.csv' +1990 with open(f'{dir}/{filename}', 'w') as fid: +1991 fid.write(make_csv(out)) +1992 if print_out: +1993 self.msg('\n' + pretty_table(out)) +1994 if output == 'raw': +1995 return out +1996 elif output == 'pretty': +1997 return pretty_table(out) +1998 +1999 +2000 @make_verbal +2001 def table_of_analyses( +2002 self, +2003 dir = 'output', +2004 filename = None, +2005 save_to_file = True, +2006 print_out = True, +2007 output = None, +2008 ): +2009 ''' +2010 Print out an/or save to disk a table of analyses. +2011 +2012 **Parameters** +2013 +2014 + `dir`: the directory in which to save the table +2015 + `filename`: the name to the csv file to write to +2016 + `save_to_file`: whether to save the table to disk +2017 + `print_out`: whether to print out the table +2018 + `output`: if set to `'pretty'`: return a pretty text table (see `pretty_table()`); +2019 if set to `'raw'`: return a list of list of strings +2020 (e.g., `[['header1', 'header2'], ['0.1', '0.2']]`) +2021 ''' +2022 +2023 out = [['UID','Session','Sample']] +2024 extra_fields = [f for f in [('SampleMass','.2f'),('ColdFingerPressure','.1f'),('AcidReactionYield','.3f')] if f[0] in {k for r in self for k in r}] +2025 for f in extra_fields: +2026 out[-1] += [f[0]] +2027 out[-1] += ['d13Cwg_VPDB','d18Owg_VSMOW','d45','d46','d47','d48','d49','d13C_VPDB','d18O_VSMOW','D47raw','D48raw','D49raw',f'D{self._4x}'] +2028 for r in self: +2029 out += [[f"{r['UID']}",f"{r['Session']}",f"{r['Sample']}"]] +2030 for f in extra_fields: +2031 out[-1] += [f"{r[f[0]]:{f[1]}}"] +2032 out[-1] += [ +2033 f"{r['d13Cwg_VPDB']:.3f}", +2034 f"{r['d18Owg_VSMOW']:.3f}", +2035 f"{r['d45']:.6f}", +2036 f"{r['d46']:.6f}", +2037 f"{r['d47']:.6f}", +2038 f"{r['d48']:.6f}", +2039 f"{r['d49']:.6f}", +2040 f"{r['d13C_VPDB']:.6f}", +2041 f"{r['d18O_VSMOW']:.6f}", +2042 f"{r['D47raw']:.6f}", +2043 f"{r['D48raw']:.6f}", +2044 f"{r['D49raw']:.6f}", +2045 f"{r[f'D{self._4x}']:.6f}" +2046 ] +2047 if save_to_file: +2048 if not os.path.exists(dir): +2049 os.makedirs(dir) +2050 if filename is None: +2051 filename = f'D{self._4x}_analyses.csv' +2052 with open(f'{dir}/{filename}', 'w') as fid: +2053 fid.write(make_csv(out)) +2054 if print_out: +2055 self.msg('\n' + pretty_table(out)) +2056 return out +2057 +2058 @make_verbal +2059 def covar_table( +2060 self, +2061 correl = False, +2062 dir = 'output', +2063 filename = None, +2064 save_to_file = True, +2065 print_out = True, +2066 output = None, +2067 ): +2068 ''' +2069 Print out, save to disk and/or return the variance-covariance matrix of D4x +2070 for all unknown samples. +2071 +2072 **Parameters** +2073 +2074 + `dir`: the directory in which to save the csv +2075 + `filename`: the name of the csv file to write to +2076 + `save_to_file`: whether to save the csv +2077 + `print_out`: whether to print out the matrix +2078 + `output`: if set to `'pretty'`: return a pretty text matrix (see `pretty_table()`); +2079 if set to `'raw'`: return a list of list of strings +2080 (e.g., `[['header1', 'header2'], ['0.1', '0.2']]`) +2081 ''' +2082 samples = sorted([u for u in self.unknowns]) +2083 out = [[''] + samples] +2084 for s1 in samples: +2085 out.append([s1]) +2086 for s2 in samples: +2087 if correl: +2088 out[-1].append(f'{self.sample_D4x_correl(s1, s2):.6f}') +2089 else: +2090 out[-1].append(f'{self.sample_D4x_covar(s1, s2):.8e}') +2091 +2092 if save_to_file: +2093 if not os.path.exists(dir): +2094 os.makedirs(dir) +2095 if filename is None: +2096 if correl: +2097 filename = f'D{self._4x}_correl.csv' +2098 else: +2099 filename = f'D{self._4x}_covar.csv' +2100 with open(f'{dir}/{filename}', 'w') as fid: +2101 fid.write(make_csv(out)) +2102 if print_out: +2103 self.msg('\n'+pretty_table(out)) +2104 if output == 'raw': +2105 return out +2106 elif output == 'pretty': +2107 return pretty_table(out) +2108 +2109 @make_verbal +2110 def table_of_samples( +2111 self, +2112 dir = 'output', +2113 filename = None, +2114 save_to_file = True, +2115 print_out = True, +2116 output = None, +2117 ): +2118 ''' +2119 Print out, save to disk and/or return a table of samples. +2120 +2121 **Parameters** +2122 +2123 + `dir`: the directory in which to save the csv +2124 + `filename`: the name of the csv file to write to +2125 + `save_to_file`: whether to save the csv +2126 + `print_out`: whether to print out the table +2127 + `output`: if set to `'pretty'`: return a pretty text table (see `pretty_table()`); +2128 if set to `'raw'`: return a list of list of strings +2129 (e.g., `[['header1', 'header2'], ['0.1', '0.2']]`) +2130 ''' +2131 +2132 out = [['Sample','N','d13C_VPDB','d18O_VSMOW',f'D{self._4x}','SE','95% CL','SD','p_Levene']] +2133 for sample in self.anchors: +2134 out += [[ +2135 f"{sample}", +2136 f"{self.samples[sample]['N']}", +2137 f"{self.samples[sample]['d13C_VPDB']:.2f}", +2138 f"{self.samples[sample]['d18O_VSMOW']:.2f}", +2139 f"{self.samples[sample][f'D{self._4x}']:.4f}",'','', +2140 f"{self.samples[sample][f'SD_D{self._4x}']:.4f}" if self.samples[sample]['N'] > 1 else '', '' +2141 ]] +2142 for sample in self.unknowns: +2143 out += [[ +2144 f"{sample}", +2145 f"{self.samples[sample]['N']}", +2146 f"{self.samples[sample]['d13C_VPDB']:.2f}", +2147 f"{self.samples[sample]['d18O_VSMOW']:.2f}", +2148 f"{self.samples[sample][f'D{self._4x}']:.4f}", +2149 f"{self.samples[sample][f'SE_D{self._4x}']:.4f}", +2150 f{self.samples[sample][f'SE_D{self._4x}'] * self.t95:.4f}", +2151 f"{self.samples[sample][f'SD_D{self._4x}']:.4f}" if self.samples[sample]['N'] > 1 else '', +2152 f"{self.samples[sample]['p_Levene']:.3f}" if self.samples[sample]['N'] > 2 else '' +2153 ]] +2154 if save_to_file: +2155 if not os.path.exists(dir): +2156 os.makedirs(dir) +2157 if filename is None: +2158 filename = f'D{self._4x}_samples.csv' +2159 with open(f'{dir}/{filename}', 'w') as fid: +2160 fid.write(make_csv(out)) +2161 if print_out: +2162 self.msg('\n'+pretty_table(out)) +2163 if output == 'raw': +2164 return out +2165 elif output == 'pretty': +2166 return pretty_table(out) +2167 +2168 +2169 def plot_sessions(self, dir = 'output', figsize = (8,8)): +2170 ''' +2171 Generate session plots and save them to disk. 2172 -2173 -2174 def plot_sessions(self, dir = 'output', figsize = (8,8)): -2175 ''' -2176 Generate session plots and save them to disk. -2177 -2178 **Parameters** -2179 -2180 + `dir`: the directory in which to save the plots -2181 + `figsize`: the width and height (in inches) of each plot -2182 ''' -2183 if not os.path.exists(dir): -2184 os.makedirs(dir) +2173 **Parameters** +2174 +2175 + `dir`: the directory in which to save the plots +2176 + `figsize`: the width and height (in inches) of each plot +2177 ''' +2178 if not os.path.exists(dir): +2179 os.makedirs(dir) +2180 +2181 for session in self.sessions: +2182 sp = self.plot_single_session(session, xylimits = 'constant') +2183 ppl.savefig(f'{dir}/D{self._4x}_plot_{session}.pdf') +2184 ppl.close(sp.fig) 2185 -2186 for session in self.sessions: -2187 sp = self.plot_single_session(session, xylimits = 'constant') -2188 ppl.savefig(f'{dir}/D{self._4x}_plot_{session}.pdf') -2189 ppl.close(sp.fig) -2190 +2186 +2187 @make_verbal +2188 def consolidate_samples(self): +2189 ''' +2190 Compile various statistics for each sample. 2191 -2192 @make_verbal -2193 def consolidate_samples(self): -2194 ''' -2195 Compile various statistics for each sample. +2192 For each anchor sample: +2193 +2194 + `D47` or `D48`: the nominal Δ4x value for this anchor, specified by `self.Nominal_D4x` +2195 + `SE_D47` or `SE_D48`: set to zero by definition 2196 -2197 For each anchor sample: +2197 For each unknown sample: 2198 -2199 + `D47` or `D48`: the nominal Δ4x value for this anchor, specified by `self.Nominal_D4x` -2200 + `SE_D47` or `SE_D48`: set to zero by definition +2199 + `D47` or `D48`: the standardized Δ4x value for this unknown +2200 + `SE_D47` or `SE_D48`: the standard error of Δ4x for this unknown 2201 -2202 For each unknown sample: +2202 For each anchor and unknown: 2203 -2204 + `D47` or `D48`: the standardized Δ4x value for this unknown -2205 + `SE_D47` or `SE_D48`: the standard error of Δ4x for this unknown -2206 -2207 For each anchor and unknown: -2208 -2209 + `N`: the total number of analyses of this sample -2210 + `SD_D47` or `SD_D48`: the “sample” (in the statistical sense) standard deviation for this sample -2211 + `d13C_VPDB`: the average δ13C_VPDB value for this sample -2212 + `d18O_VSMOW`: the average δ18O_VSMOW value for this sample (as CO2) -2213 + `p_Levene`: the p-value from a [Levene test](https://en.wikipedia.org/wiki/Levene%27s_test) of equal -2214 variance, indicating whether the Δ4x repeatability this sample differs significantly from -2215 that observed for the reference sample specified by `self.LEVENE_REF_SAMPLE`. -2216 ''' -2217 D4x_ref_pop = [r[f'D{self._4x}'] for r in self.samples[self.LEVENE_REF_SAMPLE]['data']] -2218 for sample in self.samples: -2219 self.samples[sample]['N'] = len(self.samples[sample]['data']) -2220 if self.samples[sample]['N'] > 1: -2221 self.samples[sample][f'SD_D{self._4x}'] = stdev([r[f'D{self._4x}'] for r in self.samples[sample]['data']]) -2222 -2223 self.samples[sample]['d13C_VPDB'] = np.mean([r['d13C_VPDB'] for r in self.samples[sample]['data']]) -2224 self.samples[sample]['d18O_VSMOW'] = np.mean([r['d18O_VSMOW'] for r in self.samples[sample]['data']]) -2225 -2226 D4x_pop = [r[f'D{self._4x}'] for r in self.samples[sample]['data']] -2227 if len(D4x_pop) > 2: -2228 self.samples[sample]['p_Levene'] = levene(D4x_ref_pop, D4x_pop, center = 'median')[1] -2229 -2230 if self.standardization_method == 'pooled': -2231 for sample in self.anchors: -2232 self.samples[sample][f'D{self._4x}'] = self.Nominal_D4x[sample] -2233 self.samples[sample][f'SE_D{self._4x}'] = 0. -2234 for sample in self.unknowns: -2235 self.samples[sample][f'D{self._4x}'] = self.standardization.params.valuesdict()[f'D{self._4x}_{pf(sample)}'] -2236 try: -2237 self.samples[sample][f'SE_D{self._4x}'] = self.sample_D4x_covar(sample)**.5 -2238 except ValueError: -2239 # when `sample` is constrained by self.standardize(constraints = {...}), -2240 # it is no longer listed in self.standardization.var_names. -2241 # Temporary fix: define SE as zero for now -2242 self.samples[sample][f'SE_D4{self._4x}'] = 0. -2243 -2244 elif self.standardization_method == 'indep_sessions': -2245 for sample in self.anchors: -2246 self.samples[sample][f'D{self._4x}'] = self.Nominal_D4x[sample] -2247 self.samples[sample][f'SE_D{self._4x}'] = 0. -2248 for sample in self.unknowns: -2249 self.msg(f'Consolidating sample {sample}') -2250 self.unknowns[sample][f'session_D{self._4x}'] = {} -2251 session_avg = [] -2252 for session in self.sessions: -2253 sdata = [r for r in self.sessions[session]['data'] if r['Sample'] == sample] -2254 if sdata: -2255 self.msg(f'{sample} found in session {session}') -2256 avg_D4x = np.mean([r[f'D{self._4x}'] for r in sdata]) -2257 avg_d4x = np.mean([r[f'd{self._4x}'] for r in sdata]) -2258 # !! TODO: sigma_s below does not account for temporal changes in standardization error -2259 sigma_s = self.standardization_error(session, avg_d4x, avg_D4x) -2260 sigma_u = sdata[0][f'wD{self._4x}raw'] / self.sessions[session]['a'] / len(sdata)**.5 -2261 session_avg.append([avg_D4x, (sigma_u**2 + sigma_s**2)**.5]) -2262 self.unknowns[sample][f'session_D{self._4x}'][session] = session_avg[-1] -2263 self.samples[sample][f'D{self._4x}'], self.samples[sample][f'SE_D{self._4x}'] = w_avg(*zip(*session_avg)) -2264 weights = {s: self.unknowns[sample][f'session_D{self._4x}'][s][1]**-2 for s in self.unknowns[sample][f'session_D{self._4x}']} -2265 wsum = sum([weights[s] for s in weights]) -2266 for s in weights: -2267 self.unknowns[sample][f'session_D{self._4x}'][s] += [self.unknowns[sample][f'session_D{self._4x}'][s][1]**-2 / wsum] +2204 + `N`: the total number of analyses of this sample +2205 + `SD_D47` or `SD_D48`: the “sample” (in the statistical sense) standard deviation for this sample +2206 + `d13C_VPDB`: the average δ13C_VPDB value for this sample +2207 + `d18O_VSMOW`: the average δ18O_VSMOW value for this sample (as CO2) +2208 + `p_Levene`: the p-value from a [Levene test](https://en.wikipedia.org/wiki/Levene%27s_test) of equal +2209 variance, indicating whether the Δ4x repeatability this sample differs significantly from +2210 that observed for the reference sample specified by `self.LEVENE_REF_SAMPLE`. +2211 ''' +2212 D4x_ref_pop = [r[f'D{self._4x}'] for r in self.samples[self.LEVENE_REF_SAMPLE]['data']] +2213 for sample in self.samples: +2214 self.samples[sample]['N'] = len(self.samples[sample]['data']) +2215 if self.samples[sample]['N'] > 1: +2216 self.samples[sample][f'SD_D{self._4x}'] = stdev([r[f'D{self._4x}'] for r in self.samples[sample]['data']]) +2217 +2218 self.samples[sample]['d13C_VPDB'] = np.mean([r['d13C_VPDB'] for r in self.samples[sample]['data']]) +2219 self.samples[sample]['d18O_VSMOW'] = np.mean([r['d18O_VSMOW'] for r in self.samples[sample]['data']]) +2220 +2221 D4x_pop = [r[f'D{self._4x}'] for r in self.samples[sample]['data']] +2222 if len(D4x_pop) > 2: +2223 self.samples[sample]['p_Levene'] = levene(D4x_ref_pop, D4x_pop, center = 'median')[1] +2224 +2225 if self.standardization_method == 'pooled': +2226 for sample in self.anchors: +2227 self.samples[sample][f'D{self._4x}'] = self.Nominal_D4x[sample] +2228 self.samples[sample][f'SE_D{self._4x}'] = 0. +2229 for sample in self.unknowns: +2230 self.samples[sample][f'D{self._4x}'] = self.standardization.params.valuesdict()[f'D{self._4x}_{pf(sample)}'] +2231 try: +2232 self.samples[sample][f'SE_D{self._4x}'] = self.sample_D4x_covar(sample)**.5 +2233 except ValueError: +2234 # when `sample` is constrained by self.standardize(constraints = {...}), +2235 # it is no longer listed in self.standardization.var_names. +2236 # Temporary fix: define SE as zero for now +2237 self.samples[sample][f'SE_D4{self._4x}'] = 0. +2238 +2239 elif self.standardization_method == 'indep_sessions': +2240 for sample in self.anchors: +2241 self.samples[sample][f'D{self._4x}'] = self.Nominal_D4x[sample] +2242 self.samples[sample][f'SE_D{self._4x}'] = 0. +2243 for sample in self.unknowns: +2244 self.msg(f'Consolidating sample {sample}') +2245 self.unknowns[sample][f'session_D{self._4x}'] = {} +2246 session_avg = [] +2247 for session in self.sessions: +2248 sdata = [r for r in self.sessions[session]['data'] if r['Sample'] == sample] +2249 if sdata: +2250 self.msg(f'{sample} found in session {session}') +2251 avg_D4x = np.mean([r[f'D{self._4x}'] for r in sdata]) +2252 avg_d4x = np.mean([r[f'd{self._4x}'] for r in sdata]) +2253 # !! TODO: sigma_s below does not account for temporal changes in standardization error +2254 sigma_s = self.standardization_error(session, avg_d4x, avg_D4x) +2255 sigma_u = sdata[0][f'wD{self._4x}raw'] / self.sessions[session]['a'] / len(sdata)**.5 +2256 session_avg.append([avg_D4x, (sigma_u**2 + sigma_s**2)**.5]) +2257 self.unknowns[sample][f'session_D{self._4x}'][session] = session_avg[-1] +2258 self.samples[sample][f'D{self._4x}'], self.samples[sample][f'SE_D{self._4x}'] = w_avg(*zip(*session_avg)) +2259 weights = {s: self.unknowns[sample][f'session_D{self._4x}'][s][1]**-2 for s in self.unknowns[sample][f'session_D{self._4x}']} +2260 wsum = sum([weights[s] for s in weights]) +2261 for s in weights: +2262 self.unknowns[sample][f'session_D{self._4x}'][s] += [self.unknowns[sample][f'session_D{self._4x}'][s][1]**-2 / wsum] +2263 +2264 +2265 def consolidate_sessions(self): +2266 ''' +2267 Compute various statistics for each session. 2268 -2269 -2270 def consolidate_sessions(self): -2271 ''' -2272 Compute various statistics for each session. -2273 -2274 + `Na`: Number of anchor analyses in the session -2275 + `Nu`: Number of unknown analyses in the session -2276 + `r_d13C_VPDB`: δ13C_VPDB repeatability of analyses within the session -2277 + `r_d18O_VSMOW`: δ18O_VSMOW repeatability of analyses within the session -2278 + `r_D47` or `r_D48`: Δ4x repeatability of analyses within the session -2279 + `a`: scrambling factor -2280 + `b`: compositional slope -2281 + `c`: WG offset -2282 + `SE_a`: Model stadard erorr of `a` -2283 + `SE_b`: Model stadard erorr of `b` -2284 + `SE_c`: Model stadard erorr of `c` -2285 + `scrambling_drift` (boolean): whether to allow a temporal drift in the scrambling factor (`a`) -2286 + `slope_drift` (boolean): whether to allow a temporal drift in the compositional slope (`b`) -2287 + `wg_drift` (boolean): whether to allow a temporal drift in the WG offset (`c`) -2288 + `a2`: scrambling factor drift -2289 + `b2`: compositional slope drift -2290 + `c2`: WG offset drift -2291 + `Np`: Number of standardization parameters to fit -2292 + `CM`: model covariance matrix for (`a`, `b`, `c`, `a2`, `b2`, `c2`) -2293 + `d13Cwg_VPDB`: δ13C_VPDB of WG -2294 + `d18Owg_VSMOW`: δ18O_VSMOW of WG -2295 ''' -2296 for session in self.sessions: -2297 if 'd13Cwg_VPDB' not in self.sessions[session]: -2298 self.sessions[session]['d13Cwg_VPDB'] = self.sessions[session]['data'][0]['d13Cwg_VPDB'] -2299 if 'd18Owg_VSMOW' not in self.sessions[session]: -2300 self.sessions[session]['d18Owg_VSMOW'] = self.sessions[session]['data'][0]['d18Owg_VSMOW'] -2301 self.sessions[session]['Na'] = len([r for r in self.sessions[session]['data'] if r['Sample'] in self.anchors]) -2302 self.sessions[session]['Nu'] = len([r for r in self.sessions[session]['data'] if r['Sample'] in self.unknowns]) +2269 + `Na`: Number of anchor analyses in the session +2270 + `Nu`: Number of unknown analyses in the session +2271 + `r_d13C_VPDB`: δ13C_VPDB repeatability of analyses within the session +2272 + `r_d18O_VSMOW`: δ18O_VSMOW repeatability of analyses within the session +2273 + `r_D47` or `r_D48`: Δ4x repeatability of analyses within the session +2274 + `a`: scrambling factor +2275 + `b`: compositional slope +2276 + `c`: WG offset +2277 + `SE_a`: Model stadard erorr of `a` +2278 + `SE_b`: Model stadard erorr of `b` +2279 + `SE_c`: Model stadard erorr of `c` +2280 + `scrambling_drift` (boolean): whether to allow a temporal drift in the scrambling factor (`a`) +2281 + `slope_drift` (boolean): whether to allow a temporal drift in the compositional slope (`b`) +2282 + `wg_drift` (boolean): whether to allow a temporal drift in the WG offset (`c`) +2283 + `a2`: scrambling factor drift +2284 + `b2`: compositional slope drift +2285 + `c2`: WG offset drift +2286 + `Np`: Number of standardization parameters to fit +2287 + `CM`: model covariance matrix for (`a`, `b`, `c`, `a2`, `b2`, `c2`) +2288 + `d13Cwg_VPDB`: δ13C_VPDB of WG +2289 + `d18Owg_VSMOW`: δ18O_VSMOW of WG +2290 ''' +2291 for session in self.sessions: +2292 if 'd13Cwg_VPDB' not in self.sessions[session]: +2293 self.sessions[session]['d13Cwg_VPDB'] = self.sessions[session]['data'][0]['d13Cwg_VPDB'] +2294 if 'd18Owg_VSMOW' not in self.sessions[session]: +2295 self.sessions[session]['d18Owg_VSMOW'] = self.sessions[session]['data'][0]['d18Owg_VSMOW'] +2296 self.sessions[session]['Na'] = len([r for r in self.sessions[session]['data'] if r['Sample'] in self.anchors]) +2297 self.sessions[session]['Nu'] = len([r for r in self.sessions[session]['data'] if r['Sample'] in self.unknowns]) +2298 +2299 self.msg(f'Computing repeatabilities for session {session}') +2300 self.sessions[session]['r_d13C_VPDB'] = self.compute_r('d13C_VPDB', samples = 'anchors', sessions = [session]) +2301 self.sessions[session]['r_d18O_VSMOW'] = self.compute_r('d18O_VSMOW', samples = 'anchors', sessions = [session]) +2302 self.sessions[session][f'r_D{self._4x}'] = self.compute_r(f'D{self._4x}', sessions = [session]) 2303 -2304 self.msg(f'Computing repeatabilities for session {session}') -2305 self.sessions[session]['r_d13C_VPDB'] = self.compute_r('d13C_VPDB', samples = 'anchors', sessions = [session]) -2306 self.sessions[session]['r_d18O_VSMOW'] = self.compute_r('d18O_VSMOW', samples = 'anchors', sessions = [session]) -2307 self.sessions[session][f'r_D{self._4x}'] = self.compute_r(f'D{self._4x}', sessions = [session]) -2308 -2309 if self.standardization_method == 'pooled': -2310 for session in self.sessions: -2311 -2312 self.sessions[session]['a'] = self.standardization.params.valuesdict()[f'a_{pf(session)}'] -2313 i = self.standardization.var_names.index(f'a_{pf(session)}') -2314 self.sessions[session]['SE_a'] = self.standardization.covar[i,i]**.5 -2315 -2316 self.sessions[session]['b'] = self.standardization.params.valuesdict()[f'b_{pf(session)}'] -2317 i = self.standardization.var_names.index(f'b_{pf(session)}') -2318 self.sessions[session]['SE_b'] = self.standardization.covar[i,i]**.5 -2319 -2320 self.sessions[session]['c'] = self.standardization.params.valuesdict()[f'c_{pf(session)}'] -2321 i = self.standardization.var_names.index(f'c_{pf(session)}') -2322 self.sessions[session]['SE_c'] = self.standardization.covar[i,i]**.5 -2323 -2324 self.sessions[session]['a2'] = self.standardization.params.valuesdict()[f'a2_{pf(session)}'] -2325 if self.sessions[session]['scrambling_drift']: -2326 i = self.standardization.var_names.index(f'a2_{pf(session)}') -2327 self.sessions[session]['SE_a2'] = self.standardization.covar[i,i]**.5 -2328 else: -2329 self.sessions[session]['SE_a2'] = 0. -2330 -2331 self.sessions[session]['b2'] = self.standardization.params.valuesdict()[f'b2_{pf(session)}'] -2332 if self.sessions[session]['slope_drift']: -2333 i = self.standardization.var_names.index(f'b2_{pf(session)}') -2334 self.sessions[session]['SE_b2'] = self.standardization.covar[i,i]**.5 -2335 else: -2336 self.sessions[session]['SE_b2'] = 0. -2337 -2338 self.sessions[session]['c2'] = self.standardization.params.valuesdict()[f'c2_{pf(session)}'] -2339 if self.sessions[session]['wg_drift']: -2340 i = self.standardization.var_names.index(f'c2_{pf(session)}') -2341 self.sessions[session]['SE_c2'] = self.standardization.covar[i,i]**.5 -2342 else: -2343 self.sessions[session]['SE_c2'] = 0. -2344 -2345 i = self.standardization.var_names.index(f'a_{pf(session)}') -2346 j = self.standardization.var_names.index(f'b_{pf(session)}') -2347 k = self.standardization.var_names.index(f'c_{pf(session)}') -2348 CM = np.zeros((6,6)) -2349 CM[:3,:3] = self.standardization.covar[[i,j,k],:][:,[i,j,k]] -2350 try: -2351 i2 = self.standardization.var_names.index(f'a2_{pf(session)}') -2352 CM[3,[0,1,2,3]] = self.standardization.covar[i2,[i,j,k,i2]] -2353 CM[[0,1,2,3],3] = self.standardization.covar[[i,j,k,i2],i2] -2354 try: -2355 j2 = self.standardization.var_names.index(f'b2_{pf(session)}') -2356 CM[3,4] = self.standardization.covar[i2,j2] -2357 CM[4,3] = self.standardization.covar[j2,i2] -2358 except ValueError: -2359 pass -2360 try: -2361 k2 = self.standardization.var_names.index(f'c2_{pf(session)}') -2362 CM[3,5] = self.standardization.covar[i2,k2] -2363 CM[5,3] = self.standardization.covar[k2,i2] -2364 except ValueError: -2365 pass -2366 except ValueError: -2367 pass -2368 try: -2369 j2 = self.standardization.var_names.index(f'b2_{pf(session)}') -2370 CM[4,[0,1,2,4]] = self.standardization.covar[j2,[i,j,k,j2]] -2371 CM[[0,1,2,4],4] = self.standardization.covar[[i,j,k,j2],j2] -2372 try: -2373 k2 = self.standardization.var_names.index(f'c2_{pf(session)}') -2374 CM[4,5] = self.standardization.covar[j2,k2] -2375 CM[5,4] = self.standardization.covar[k2,j2] -2376 except ValueError: -2377 pass -2378 except ValueError: -2379 pass -2380 try: -2381 k2 = self.standardization.var_names.index(f'c2_{pf(session)}') -2382 CM[5,[0,1,2,5]] = self.standardization.covar[k2,[i,j,k,k2]] -2383 CM[[0,1,2,5],5] = self.standardization.covar[[i,j,k,k2],k2] -2384 except ValueError: -2385 pass +2304 if self.standardization_method == 'pooled': +2305 for session in self.sessions: +2306 +2307 self.sessions[session]['a'] = self.standardization.params.valuesdict()[f'a_{pf(session)}'] +2308 i = self.standardization.var_names.index(f'a_{pf(session)}') +2309 self.sessions[session]['SE_a'] = self.standardization.covar[i,i]**.5 +2310 +2311 self.sessions[session]['b'] = self.standardization.params.valuesdict()[f'b_{pf(session)}'] +2312 i = self.standardization.var_names.index(f'b_{pf(session)}') +2313 self.sessions[session]['SE_b'] = self.standardization.covar[i,i]**.5 +2314 +2315 self.sessions[session]['c'] = self.standardization.params.valuesdict()[f'c_{pf(session)}'] +2316 i = self.standardization.var_names.index(f'c_{pf(session)}') +2317 self.sessions[session]['SE_c'] = self.standardization.covar[i,i]**.5 +2318 +2319 self.sessions[session]['a2'] = self.standardization.params.valuesdict()[f'a2_{pf(session)}'] +2320 if self.sessions[session]['scrambling_drift']: +2321 i = self.standardization.var_names.index(f'a2_{pf(session)}') +2322 self.sessions[session]['SE_a2'] = self.standardization.covar[i,i]**.5 +2323 else: +2324 self.sessions[session]['SE_a2'] = 0. +2325 +2326 self.sessions[session]['b2'] = self.standardization.params.valuesdict()[f'b2_{pf(session)}'] +2327 if self.sessions[session]['slope_drift']: +2328 i = self.standardization.var_names.index(f'b2_{pf(session)}') +2329 self.sessions[session]['SE_b2'] = self.standardization.covar[i,i]**.5 +2330 else: +2331 self.sessions[session]['SE_b2'] = 0. +2332 +2333 self.sessions[session]['c2'] = self.standardization.params.valuesdict()[f'c2_{pf(session)}'] +2334 if self.sessions[session]['wg_drift']: +2335 i = self.standardization.var_names.index(f'c2_{pf(session)}') +2336 self.sessions[session]['SE_c2'] = self.standardization.covar[i,i]**.5 +2337 else: +2338 self.sessions[session]['SE_c2'] = 0. +2339 +2340 i = self.standardization.var_names.index(f'a_{pf(session)}') +2341 j = self.standardization.var_names.index(f'b_{pf(session)}') +2342 k = self.standardization.var_names.index(f'c_{pf(session)}') +2343 CM = np.zeros((6,6)) +2344 CM[:3,:3] = self.standardization.covar[[i,j,k],:][:,[i,j,k]] +2345 try: +2346 i2 = self.standardization.var_names.index(f'a2_{pf(session)}') +2347 CM[3,[0,1,2,3]] = self.standardization.covar[i2,[i,j,k,i2]] +2348 CM[[0,1,2,3],3] = self.standardization.covar[[i,j,k,i2],i2] +2349 try: +2350 j2 = self.standardization.var_names.index(f'b2_{pf(session)}') +2351 CM[3,4] = self.standardization.covar[i2,j2] +2352 CM[4,3] = self.standardization.covar[j2,i2] +2353 except ValueError: +2354 pass +2355 try: +2356 k2 = self.standardization.var_names.index(f'c2_{pf(session)}') +2357 CM[3,5] = self.standardization.covar[i2,k2] +2358 CM[5,3] = self.standardization.covar[k2,i2] +2359 except ValueError: +2360 pass +2361 except ValueError: +2362 pass +2363 try: +2364 j2 = self.standardization.var_names.index(f'b2_{pf(session)}') +2365 CM[4,[0,1,2,4]] = self.standardization.covar[j2,[i,j,k,j2]] +2366 CM[[0,1,2,4],4] = self.standardization.covar[[i,j,k,j2],j2] +2367 try: +2368 k2 = self.standardization.var_names.index(f'c2_{pf(session)}') +2369 CM[4,5] = self.standardization.covar[j2,k2] +2370 CM[5,4] = self.standardization.covar[k2,j2] +2371 except ValueError: +2372 pass +2373 except ValueError: +2374 pass +2375 try: +2376 k2 = self.standardization.var_names.index(f'c2_{pf(session)}') +2377 CM[5,[0,1,2,5]] = self.standardization.covar[k2,[i,j,k,k2]] +2378 CM[[0,1,2,5],5] = self.standardization.covar[[i,j,k,k2],k2] +2379 except ValueError: +2380 pass +2381 +2382 self.sessions[session]['CM'] = CM +2383 +2384 elif self.standardization_method == 'indep_sessions': +2385 pass # Not implemented yet 2386 -2387 self.sessions[session]['CM'] = CM -2388 -2389 elif self.standardization_method == 'indep_sessions': -2390 pass # Not implemented yet -2391 -2392 -2393 @make_verbal -2394 def repeatabilities(self): -2395 ''' -2396 Compute analytical repeatabilities for δ13C_VPDB, δ18O_VSMOW, Δ4x -2397 (for all samples, for anchors, and for unknowns). -2398 ''' -2399 self.msg('Computing reproducibilities for all sessions') -2400 -2401 self.repeatability['r_d13C_VPDB'] = self.compute_r('d13C_VPDB', samples = 'anchors') -2402 self.repeatability['r_d18O_VSMOW'] = self.compute_r('d18O_VSMOW', samples = 'anchors') -2403 self.repeatability[f'r_D{self._4x}a'] = self.compute_r(f'D{self._4x}', samples = 'anchors') -2404 self.repeatability[f'r_D{self._4x}u'] = self.compute_r(f'D{self._4x}', samples = 'unknowns') -2405 self.repeatability[f'r_D{self._4x}'] = self.compute_r(f'D{self._4x}', samples = 'all samples') -2406 -2407 -2408 @make_verbal -2409 def consolidate(self, tables = True, plots = True): -2410 ''' -2411 Collect information about samples, sessions and repeatabilities. -2412 ''' -2413 self.consolidate_samples() -2414 self.consolidate_sessions() -2415 self.repeatabilities() -2416 -2417 if tables: -2418 self.summary() -2419 self.table_of_sessions() -2420 self.table_of_analyses() -2421 self.table_of_samples() -2422 -2423 if plots: -2424 self.plot_sessions() -2425 -2426 -2427 @make_verbal -2428 def rmswd(self, -2429 samples = 'all samples', -2430 sessions = 'all sessions', -2431 ): -2432 ''' -2433 Compute the χ2, root mean squared weighted deviation -2434 (i.e. reduced χ2), and corresponding degrees of freedom of the -2435 Δ4x values for samples in `samples` and sessions in `sessions`. -2436 -2437 Only used in `D4xdata.standardize()` with `method='indep_sessions'`. -2438 ''' -2439 if samples == 'all samples': -2440 mysamples = [k for k in self.samples] -2441 elif samples == 'anchors': -2442 mysamples = [k for k in self.anchors] -2443 elif samples == 'unknowns': -2444 mysamples = [k for k in self.unknowns] -2445 else: -2446 mysamples = samples -2447 -2448 if sessions == 'all sessions': -2449 sessions = [k for k in self.sessions] -2450 -2451 chisq, Nf = 0, 0 -2452 for sample in mysamples : -2453 G = [ r for r in self if r['Sample'] == sample and r['Session'] in sessions ] -2454 if len(G) > 1 : -2455 X, sX = w_avg([r[f'D{self._4x}'] for r in G], [r[f'wD{self._4x}'] for r in G]) -2456 Nf += (len(G) - 1) -2457 chisq += np.sum([ ((r[f'D{self._4x}']-X)/r[f'wD{self._4x}'])**2 for r in G]) -2458 r = (chisq / Nf)**.5 if Nf > 0 else 0 -2459 self.msg(f'RMSWD of r["D{self._4x}"] is {r:.6f} for {samples}.') -2460 return {'rmswd': r, 'chisq': chisq, 'Nf': Nf} -2461 -2462 -2463 @make_verbal -2464 def compute_r(self, key, samples = 'all samples', sessions = 'all sessions'): -2465 ''' -2466 Compute the repeatability of `[r[key] for r in self]` -2467 ''' -2468 # NB: it's debatable whether rD47 should be computed -2469 # with Nf = len(self)-len(self.samples) instead of -2470 # Nf = len(self) - len(self.unknwons) - 3*len(self.sessions) -2471 -2472 if samples == 'all samples': -2473 mysamples = [k for k in self.samples] -2474 elif samples == 'anchors': -2475 mysamples = [k for k in self.anchors] -2476 elif samples == 'unknowns': -2477 mysamples = [k for k in self.unknowns] -2478 else: -2479 mysamples = samples -2480 -2481 if sessions == 'all sessions': -2482 sessions = [k for k in self.sessions] -2483 -2484 if key in ['D47', 'D48']: -2485 chisq, Nf = 0, 0 -2486 for sample in mysamples : -2487 X = [ r[key] for r in self if r['Sample'] == sample and r['Session'] in sessions ] -2488 if len(X) > 1 : -2489 chisq += np.sum([ (x-self.samples[sample][key])**2 for x in X ]) -2490 if sample in self.unknowns: -2491 Nf += len(X) - 1 -2492 else: -2493 Nf += len(X) -2494 if samples in ['anchors', 'all samples']: -2495 Nf -= sum([self.sessions[s]['Np'] for s in sessions]) -2496 r = (chisq / Nf)**.5 if Nf > 0 else 0 -2497 -2498 else: # if key not in ['D47', 'D48'] -2499 chisq, Nf = 0, 0 -2500 for sample in mysamples : -2501 X = [ r[key] for r in self if r['Sample'] == sample and r['Session'] in sessions ] -2502 if len(X) > 1 : -2503 Nf += len(X) - 1 -2504 chisq += np.sum([ (x-np.mean(X))**2 for x in X ]) -2505 r = (chisq / Nf)**.5 if Nf > 0 else 0 -2506 -2507 self.msg(f'Repeatability of r["{key}"] is {1000*r:.1f} ppm for {samples}.') -2508 return r -2509 -2510 def sample_average(self, samples, weights = 'equal', normalize = True): -2511 ''' -2512 Weighted average Δ4x value of a group of samples, accounting for covariance. -2513 -2514 Returns the weighed average Δ4x value and associated SE -2515 of a group of samples. Weights are equal by default. If `normalize` is -2516 true, `weights` will be rescaled so that their sum equals 1. -2517 -2518 **Examples** -2519 -2520 ```python -2521 self.sample_average(['X','Y'], [1, 2]) -2522 ``` -2523 -2524 returns the value and SE of [Δ4x(X) + 2 Δ4x(Y)]/3, -2525 where Δ4x(X) and Δ4x(Y) are the average Δ4x -2526 values of samples X and Y, respectively. -2527 -2528 ```python -2529 self.sample_average(['X','Y'], [1, -1], normalize = False) -2530 ``` +2387 +2388 @make_verbal +2389 def repeatabilities(self): +2390 ''' +2391 Compute analytical repeatabilities for δ13C_VPDB, δ18O_VSMOW, Δ4x +2392 (for all samples, for anchors, and for unknowns). +2393 ''' +2394 self.msg('Computing reproducibilities for all sessions') +2395 +2396 self.repeatability['r_d13C_VPDB'] = self.compute_r('d13C_VPDB', samples = 'anchors') +2397 self.repeatability['r_d18O_VSMOW'] = self.compute_r('d18O_VSMOW', samples = 'anchors') +2398 self.repeatability[f'r_D{self._4x}a'] = self.compute_r(f'D{self._4x}', samples = 'anchors') +2399 self.repeatability[f'r_D{self._4x}u'] = self.compute_r(f'D{self._4x}', samples = 'unknowns') +2400 self.repeatability[f'r_D{self._4x}'] = self.compute_r(f'D{self._4x}', samples = 'all samples') +2401 +2402 +2403 @make_verbal +2404 def consolidate(self, tables = True, plots = True): +2405 ''' +2406 Collect information about samples, sessions and repeatabilities. +2407 ''' +2408 self.consolidate_samples() +2409 self.consolidate_sessions() +2410 self.repeatabilities() +2411 +2412 if tables: +2413 self.summary() +2414 self.table_of_sessions() +2415 self.table_of_analyses() +2416 self.table_of_samples() +2417 +2418 if plots: +2419 self.plot_sessions() +2420 +2421 +2422 @make_verbal +2423 def rmswd(self, +2424 samples = 'all samples', +2425 sessions = 'all sessions', +2426 ): +2427 ''' +2428 Compute the χ2, root mean squared weighted deviation +2429 (i.e. reduced χ2), and corresponding degrees of freedom of the +2430 Δ4x values for samples in `samples` and sessions in `sessions`. +2431 +2432 Only used in `D4xdata.standardize()` with `method='indep_sessions'`. +2433 ''' +2434 if samples == 'all samples': +2435 mysamples = [k for k in self.samples] +2436 elif samples == 'anchors': +2437 mysamples = [k for k in self.anchors] +2438 elif samples == 'unknowns': +2439 mysamples = [k for k in self.unknowns] +2440 else: +2441 mysamples = samples +2442 +2443 if sessions == 'all sessions': +2444 sessions = [k for k in self.sessions] +2445 +2446 chisq, Nf = 0, 0 +2447 for sample in mysamples : +2448 G = [ r for r in self if r['Sample'] == sample and r['Session'] in sessions ] +2449 if len(G) > 1 : +2450 X, sX = w_avg([r[f'D{self._4x}'] for r in G], [r[f'wD{self._4x}'] for r in G]) +2451 Nf += (len(G) - 1) +2452 chisq += np.sum([ ((r[f'D{self._4x}']-X)/r[f'wD{self._4x}'])**2 for r in G]) +2453 r = (chisq / Nf)**.5 if Nf > 0 else 0 +2454 self.msg(f'RMSWD of r["D{self._4x}"] is {r:.6f} for {samples}.') +2455 return {'rmswd': r, 'chisq': chisq, 'Nf': Nf} +2456 +2457 +2458 @make_verbal +2459 def compute_r(self, key, samples = 'all samples', sessions = 'all sessions'): +2460 ''' +2461 Compute the repeatability of `[r[key] for r in self]` +2462 ''' +2463 # NB: it's debatable whether rD47 should be computed +2464 # with Nf = len(self)-len(self.samples) instead of +2465 # Nf = len(self) - len(self.unknwons) - 3*len(self.sessions) +2466 +2467 if samples == 'all samples': +2468 mysamples = [k for k in self.samples] +2469 elif samples == 'anchors': +2470 mysamples = [k for k in self.anchors] +2471 elif samples == 'unknowns': +2472 mysamples = [k for k in self.unknowns] +2473 else: +2474 mysamples = samples +2475 +2476 if sessions == 'all sessions': +2477 sessions = [k for k in self.sessions] +2478 +2479 if key in ['D47', 'D48']: +2480 chisq, Nf = 0, 0 +2481 for sample in mysamples : +2482 X = [ r[key] for r in self if r['Sample'] == sample and r['Session'] in sessions ] +2483 if len(X) > 1 : +2484 chisq += np.sum([ (x-self.samples[sample][key])**2 for x in X ]) +2485 if sample in self.unknowns: +2486 Nf += len(X) - 1 +2487 else: +2488 Nf += len(X) +2489 if samples in ['anchors', 'all samples']: +2490 Nf -= sum([self.sessions[s]['Np'] for s in sessions]) +2491 r = (chisq / Nf)**.5 if Nf > 0 else 0 +2492 +2493 else: # if key not in ['D47', 'D48'] +2494 chisq, Nf = 0, 0 +2495 for sample in mysamples : +2496 X = [ r[key] for r in self if r['Sample'] == sample and r['Session'] in sessions ] +2497 if len(X) > 1 : +2498 Nf += len(X) - 1 +2499 chisq += np.sum([ (x-np.mean(X))**2 for x in X ]) +2500 r = (chisq / Nf)**.5 if Nf > 0 else 0 +2501 +2502 self.msg(f'Repeatability of r["{key}"] is {1000*r:.1f} ppm for {samples}.') +2503 return r +2504 +2505 def sample_average(self, samples, weights = 'equal', normalize = True): +2506 ''' +2507 Weighted average Δ4x value of a group of samples, accounting for covariance. +2508 +2509 Returns the weighed average Δ4x value and associated SE +2510 of a group of samples. Weights are equal by default. If `normalize` is +2511 true, `weights` will be rescaled so that their sum equals 1. +2512 +2513 **Examples** +2514 +2515 ```python +2516 self.sample_average(['X','Y'], [1, 2]) +2517 ``` +2518 +2519 returns the value and SE of [Δ4x(X) + 2 Δ4x(Y)]/3, +2520 where Δ4x(X) and Δ4x(Y) are the average Δ4x +2521 values of samples X and Y, respectively. +2522 +2523 ```python +2524 self.sample_average(['X','Y'], [1, -1], normalize = False) +2525 ``` +2526 +2527 returns the value and SE of the difference Δ4x(X) - Δ4x(Y). +2528 ''' +2529 if weights == 'equal': +2530 weights = [1/len(samples)] * len(samples) 2531 -2532 returns the value and SE of the difference Δ4x(X) - Δ4x(Y). -2533 ''' -2534 if weights == 'equal': -2535 weights = [1/len(samples)] * len(samples) +2532 if normalize: +2533 s = sum(weights) +2534 if s: +2535 weights = [w/s for w in weights] 2536 -2537 if normalize: -2538 s = sum(weights) -2539 if s: -2540 weights = [w/s for w in weights] -2541 -2542 try: -2543# indices = [self.standardization.var_names.index(f'D47_{pf(sample)}') for sample in samples] -2544# C = self.standardization.covar[indices,:][:,indices] -2545 C = np.array([[self.sample_D4x_covar(x, y) for x in samples] for y in samples]) -2546 X = [self.samples[sample][f'D{self._4x}'] for sample in samples] -2547 return correlated_sum(X, C, weights) -2548 except ValueError: -2549 return (0., 0.) +2537 try: +2538# indices = [self.standardization.var_names.index(f'D47_{pf(sample)}') for sample in samples] +2539# C = self.standardization.covar[indices,:][:,indices] +2540 C = np.array([[self.sample_D4x_covar(x, y) for x in samples] for y in samples]) +2541 X = [self.samples[sample][f'D{self._4x}'] for sample in samples] +2542 return correlated_sum(X, C, weights) +2543 except ValueError: +2544 return (0., 0.) +2545 +2546 +2547 def sample_D4x_covar(self, sample1, sample2 = None): +2548 ''' +2549 Covariance between Δ4x values of samples 2550 -2551 -2552 def sample_D4x_covar(self, sample1, sample2 = None): -2553 ''' -2554 Covariance between Δ4x values of samples -2555 -2556 Returns the error covariance between the average Δ4x values of two -2557 samples. If if only `sample_1` is specified, or if `sample_1 == sample_2`), -2558 returns the Δ4x variance for that sample. -2559 ''' -2560 if sample2 is None: -2561 sample2 = sample1 -2562 if self.standardization_method == 'pooled': -2563 i = self.standardization.var_names.index(f'D{self._4x}_{pf(sample1)}') -2564 j = self.standardization.var_names.index(f'D{self._4x}_{pf(sample2)}') -2565 return self.standardization.covar[i, j] -2566 elif self.standardization_method == 'indep_sessions': -2567 if sample1 == sample2: -2568 return self.samples[sample1][f'SE_D{self._4x}']**2 -2569 else: -2570 c = 0 -2571 for session in self.sessions: -2572 sdata1 = [r for r in self.sessions[session]['data'] if r['Sample'] == sample1] -2573 sdata2 = [r for r in self.sessions[session]['data'] if r['Sample'] == sample2] -2574 if sdata1 and sdata2: -2575 a = self.sessions[session]['a'] -2576 # !! TODO: CM below does not account for temporal changes in standardization parameters -2577 CM = self.sessions[session]['CM'][:3,:3] -2578 avg_D4x_1 = np.mean([r[f'D{self._4x}'] for r in sdata1]) -2579 avg_d4x_1 = np.mean([r[f'd{self._4x}'] for r in sdata1]) -2580 avg_D4x_2 = np.mean([r[f'D{self._4x}'] for r in sdata2]) -2581 avg_d4x_2 = np.mean([r[f'd{self._4x}'] for r in sdata2]) -2582 c += ( -2583 self.unknowns[sample1][f'session_D{self._4x}'][session][2] -2584 * self.unknowns[sample2][f'session_D{self._4x}'][session][2] -2585 * np.array([[avg_D4x_1, avg_d4x_1, 1]]) -2586 @ CM -2587 @ np.array([[avg_D4x_2, avg_d4x_2, 1]]).T -2588 ) / a**2 -2589 return float(c) -2590 -2591 def sample_D4x_correl(self, sample1, sample2 = None): -2592 ''' -2593 Correlation between Δ4x errors of samples -2594 -2595 Returns the error correlation between the average Δ4x values of two samples. -2596 ''' -2597 if sample2 is None or sample2 == sample1: -2598 return 1. -2599 return ( -2600 self.sample_D4x_covar(sample1, sample2) -2601 / self.unknowns[sample1][f'SE_D{self._4x}'] -2602 / self.unknowns[sample2][f'SE_D{self._4x}'] -2603 ) -2604 -2605 def plot_single_session(self, -2606 session, -2607 kw_plot_anchors = dict(ls='None', marker='x', mec=(.75, 0, 0), mew = .75, ms = 4), -2608 kw_plot_unknowns = dict(ls='None', marker='x', mec=(0, 0, .75), mew = .75, ms = 4), -2609 kw_plot_anchor_avg = dict(ls='-', marker='None', color=(.75, 0, 0), lw = .75), -2610 kw_plot_unknown_avg = dict(ls='-', marker='None', color=(0, 0, .75), lw = .75), -2611 kw_contour_error = dict(colors = [[0, 0, 0]], alpha = .5, linewidths = 0.75), -2612 xylimits = 'free', # | 'constant' -2613 x_label = None, -2614 y_label = None, -2615 error_contour_interval = 'auto', -2616 fig = 'new', -2617 ): -2618 ''' -2619 Generate plot for a single session -2620 ''' -2621 if x_label is None: -2622 x_label = f'δ$_{{{self._4x}}}$ (‰)' -2623 if y_label is None: -2624 y_label = f'Δ$_{{{self._4x}}}$ (‰)' -2625 -2626 out = _SessionPlot() -2627 anchors = [a for a in self.anchors if [r for r in self.sessions[session]['data'] if r['Sample'] == a]] -2628 unknowns = [u for u in self.unknowns if [r for r in self.sessions[session]['data'] if r['Sample'] == u]] -2629 -2630 if fig == 'new': -2631 out.fig = ppl.figure(figsize = (6,6)) -2632 ppl.subplots_adjust(.1,.1,.9,.9) -2633 -2634 out.anchor_analyses, = ppl.plot( -2635 [r[f'd{self._4x}'] for r in self.sessions[session]['data'] if r['Sample'] in self.anchors], -2636 [r[f'D{self._4x}'] for r in self.sessions[session]['data'] if r['Sample'] in self.anchors], -2637 **kw_plot_anchors) -2638 out.unknown_analyses, = ppl.plot( -2639 [r[f'd{self._4x}'] for r in self.sessions[session]['data'] if r['Sample'] in self.unknowns], -2640 [r[f'D{self._4x}'] for r in self.sessions[session]['data'] if r['Sample'] in self.unknowns], -2641 **kw_plot_unknowns) -2642 out.anchor_avg = ppl.plot( -2643 np.array([ np.array([ -2644 np.min([r[f'd{self._4x}'] for r in self.sessions[session]['data'] if r['Sample'] == sample]) - 1, -2645 np.max([r[f'd{self._4x}'] for r in self.sessions[session]['data'] if r['Sample'] == sample]) + 1 -2646 ]) for sample in anchors]).T, -2647 np.array([ np.array([0, 0]) + self.Nominal_D4x[sample] for sample in anchors]).T, -2648 **kw_plot_anchor_avg) -2649 out.unknown_avg = ppl.plot( -2650 np.array([ np.array([ -2651 np.min([r[f'd{self._4x}'] for r in self.sessions[session]['data'] if r['Sample'] == sample]) - 1, -2652 np.max([r[f'd{self._4x}'] for r in self.sessions[session]['data'] if r['Sample'] == sample]) + 1 -2653 ]) for sample in unknowns]).T, -2654 np.array([ np.array([0, 0]) + self.unknowns[sample][f'D{self._4x}'] for sample in unknowns]).T, -2655 **kw_plot_unknown_avg) -2656 if xylimits == 'constant': -2657 x = [r[f'd{self._4x}'] for r in self] -2658 y = [r[f'D{self._4x}'] for r in self] -2659 x1, x2, y1, y2 = np.min(x), np.max(x), np.min(y), np.max(y) -2660 w, h = x2-x1, y2-y1 -2661 x1 -= w/20 -2662 x2 += w/20 -2663 y1 -= h/20 -2664 y2 += h/20 -2665 ppl.axis([x1, x2, y1, y2]) -2666 elif xylimits == 'free': -2667 x1, x2, y1, y2 = ppl.axis() -2668 else: -2669 x1, x2, y1, y2 = ppl.axis(xylimits) -2670 -2671 if error_contour_interval != 'none': -2672 xi, yi = np.linspace(x1, x2), np.linspace(y1, y2) -2673 XI,YI = np.meshgrid(xi, yi) -2674 SI = np.array([[self.standardization_error(session, x, y) for x in xi] for y in yi]) -2675 if error_contour_interval == 'auto': -2676 rng = np.max(SI) - np.min(SI) -2677 if rng <= 0.01: -2678 cinterval = 0.001 -2679 elif rng <= 0.03: -2680 cinterval = 0.004 -2681 elif rng <= 0.1: -2682 cinterval = 0.01 -2683 elif rng <= 0.3: -2684 cinterval = 0.03 -2685 elif rng <= 1.: -2686 cinterval = 0.1 -2687 else: -2688 cinterval = 0.5 -2689 else: -2690 cinterval = error_contour_interval -2691 -2692 cval = np.arange(np.ceil(SI.min() / .001) * .001, np.ceil(SI.max() / .001 + 1) * .001, cinterval) -2693 out.contour = ppl.contour(XI, YI, SI, cval, **kw_contour_error) -2694 out.clabel = ppl.clabel(out.contour) -2695 -2696 ppl.xlabel(x_label) -2697 ppl.ylabel(y_label) -2698 ppl.title(session, weight = 'bold') -2699 ppl.grid(alpha = .2) -2700 out.ax = ppl.gca() -2701 -2702 return out -2703 -2704 def plot_residuals( -2705 self, -2706 hist = False, -2707 binwidth = 2/3, -2708 dir = 'output', -2709 filename = None, -2710 highlight = [], -2711 colors = None, -2712 figsize = None, -2713 ): -2714 ''' -2715 Plot residuals of each analysis as a function of time (actually, as a function of -2716 the order of analyses in the `D4xdata` object) -2717 -2718 + `hist`: whether to add a histogram of residuals -2719 + `histbins`: specify bin edges for the histogram -2720 + `dir`: the directory in which to save the plot -2721 + `highlight`: a list of samples to highlight -2722 + `colors`: a dict of `{<sample>: <color>}` for all samples -2723 + `figsize`: (width, height) of figure -2724 ''' -2725 # Layout -2726 fig = ppl.figure(figsize = (8,4) if figsize is None else figsize) -2727 if hist: -2728 ppl.subplots_adjust(left = .08, bottom = .05, right = .98, top = .8, wspace = -0.72) -2729 ax1, ax2 = ppl.subplot(121), ppl.subplot(1,15,15) -2730 else: -2731 ppl.subplots_adjust(.08,.05,.78,.8) -2732 ax1 = ppl.subplot(111) -2733 -2734 # Colors -2735 N = len(self.anchors) -2736 if colors is None: -2737 if len(highlight) > 0: -2738 Nh = len(highlight) -2739 if Nh == 1: -2740 colors = {highlight[0]: (0,0,0)} -2741 elif Nh == 3: -2742 colors = {a: c for a,c in zip(highlight, [(0,0,1), (1,0,0), (0,2/3,0)])} -2743 elif Nh == 4: -2744 colors = {a: c for a,c in zip(highlight, [(0,0,1), (1,0,0), (0,2/3,0), (.75,0,.75)])} -2745 else: -2746 colors = {a: hls_to_rgb(k/Nh, .4, 1) for k,a in enumerate(highlight)} -2747 else: -2748 if N == 3: -2749 colors = {a: c for a,c in zip(self.anchors, [(0,0,1), (1,0,0), (0,2/3,0)])} -2750 elif N == 4: -2751 colors = {a: c for a,c in zip(self.anchors, [(0,0,1), (1,0,0), (0,2/3,0), (.75,0,.75)])} -2752 else: -2753 colors = {a: hls_to_rgb(k/N, .4, 1) for k,a in enumerate(self.anchors)} -2754 -2755 ppl.sca(ax1) -2756 -2757 ppl.axhline(0, color = 'k', alpha = .25, lw = 0.75) -2758 -2759 session = self[0]['Session'] -2760 x1 = 0 -2761# ymax = np.max([1e3 * (r['D47'] - self.samples[r['Sample']]['D47']) for r in self]) -2762 x_sessions = {} -2763 one_or_more_singlets = False -2764 one_or_more_multiplets = False -2765 multiplets = set() -2766 for k,r in enumerate(self): -2767 if r['Session'] != session: -2768 x2 = k-1 -2769 x_sessions[session] = (x1+x2)/2 -2770 ppl.axvline(k - 0.5, color = 'k', lw = .5) -2771 session = r['Session'] -2772 x1 = k -2773 singlet = len(self.samples[r['Sample']]['data']) == 1 -2774 if not singlet: -2775 multiplets.add(r['Sample']) -2776 if r['Sample'] in self.unknowns: -2777 if singlet: -2778 one_or_more_singlets = True -2779 else: -2780 one_or_more_multiplets = True -2781 kw = dict( -2782 marker = 'x' if singlet else '+', -2783 ms = 4 if singlet else 5, -2784 ls = 'None', -2785 mec = colors[r['Sample']] if r['Sample'] in colors else (0,0,0), -2786 mew = 1, -2787 alpha = 0.2 if singlet else 1, -2788 ) -2789 if highlight and r['Sample'] not in highlight: -2790 kw['alpha'] = 0.2 -2791 ppl.plot(k, 1e3 * (r['D47'] - self.samples[r['Sample']]['D47']), **kw) -2792 x2 = k -2793 x_sessions[session] = (x1+x2)/2 -2794 -2795 ppl.axhspan(-self.repeatability['r_D47']*1000, self.repeatability['r_D47']*1000, color = 'k', alpha = .05, lw = 1) -2796 ppl.axhspan(-self.repeatability['r_D47']*1000*self.t95, self.repeatability['r_D47']*1000*self.t95, color = 'k', alpha = .05, lw = 1) -2797 if not hist: -2798 ppl.text(len(self), self.repeatability['r_D47']*1000, f" SD = {self.repeatability['r_D47']*1000:.1f} ppm", size = 9, alpha = 1, va = 'center') -2799 ppl.text(len(self), self.repeatability['r_D47']*1000*self.t95, f" 95% CL = ± {self.repeatability['r_D47']*1000*self.t95:.1f} ppm", size = 9, alpha = 1, va = 'center') -2800 -2801 xmin, xmax, ymin, ymax = ppl.axis() -2802 for s in x_sessions: -2803 ppl.text( -2804 x_sessions[s], -2805 ymax +1, -2806 s, -2807 va = 'bottom', -2808 **( -2809 dict(ha = 'center') -2810 if len(self.sessions[s]['data']) > (0.15 * len(self)) -2811 else dict(ha = 'left', rotation = 45) -2812 ) -2813 ) -2814 -2815 if hist: -2816 ppl.sca(ax2) -2817 -2818 for s in colors: -2819 kw['marker'] = '+' -2820 kw['ms'] = 5 -2821 kw['mec'] = colors[s] -2822 kw['label'] = s -2823 kw['alpha'] = 1 -2824 ppl.plot([], [], **kw) -2825 -2826 kw['mec'] = (0,0,0) -2827 -2828 if one_or_more_singlets: -2829 kw['marker'] = 'x' -2830 kw['ms'] = 4 -2831 kw['alpha'] = .2 -2832 kw['label'] = 'other (N$\\,$=$\\,$1)' if one_or_more_multiplets else 'other' -2833 ppl.plot([], [], **kw) -2834 -2835 if one_or_more_multiplets: -2836 kw['marker'] = '+' -2837 kw['ms'] = 4 -2838 kw['alpha'] = 1 -2839 kw['label'] = 'other (N$\\,$>$\\,$1)' if one_or_more_singlets else 'other' -2840 ppl.plot([], [], **kw) -2841 -2842 if hist: -2843 leg = ppl.legend(loc = 'upper right', bbox_to_anchor = (1, 1), bbox_transform=fig.transFigure, borderaxespad = 1.5, fontsize = 9) -2844 else: -2845 leg = ppl.legend(loc = 'lower right', bbox_to_anchor = (1, 0), bbox_transform=fig.transFigure, borderaxespad = 1.5) -2846 leg.set_zorder(-1000) -2847 -2848 ppl.sca(ax1) -2849 -2850 ppl.ylabel('Δ$_{47}$ residuals (ppm)') -2851 ppl.xticks([]) -2852 ppl.axis([-1, len(self), None, None]) -2853 -2854 if hist: -2855 ppl.sca(ax2) -2856 X = [1e3 * (r['D47'] - self.samples[r['Sample']]['D47']) for r in self if r['Sample'] in multiplets] -2857 ppl.hist( -2858 X, -2859 orientation = 'horizontal', -2860 histtype = 'stepfilled', -2861 ec = [.4]*3, -2862 fc = [.25]*3, -2863 alpha = .25, -2864 bins = np.linspace(-9e3*self.repeatability['r_D47'], 9e3*self.repeatability['r_D47'], int(18/binwidth+1)), -2865 ) -2866 ppl.axis([None, None, ymin, ymax]) -2867 ppl.text(0, 0, -2868 f" SD = {self.repeatability['r_D47']*1000:.1f} ppm\n 95% CL = ± {self.repeatability['r_D47']*1000*self.t95:.1f} ppm", -2869 size = 8, -2870 alpha = 1, -2871 va = 'center', -2872 ha = 'left', -2873 ) -2874 -2875 ppl.xticks([]) -2876 ppl.yticks([]) -2877# ax2.spines['left'].set_visible(False) -2878 ax2.spines['right'].set_visible(False) -2879 ax2.spines['top'].set_visible(False) -2880 ax2.spines['bottom'].set_visible(False) -2881 -2882 -2883 if not os.path.exists(dir): -2884 os.makedirs(dir) -2885 if filename is None: -2886 return fig -2887 elif filename == '': -2888 filename = f'D{self._4x}_residuals.pdf' -2889 ppl.savefig(f'{dir}/{filename}') -2890 ppl.close(fig) -2891 -2892 -2893 def simulate(self, *args, **kwargs): -2894 ''' -2895 Legacy function with warning message pointing to `virtual_data()` -2896 ''' -2897 raise DeprecationWarning('D4xdata.simulate is deprecated and has been replaced by virtual_data()') -2898 -2899 def plot_distribution_of_analyses( -2900 self, -2901 dir = 'output', -2902 filename = None, -2903 vs_time = False, -2904 figsize = (6,4), -2905 subplots_adjust = (0.02, 0.13, 0.85, 0.8), -2906 output = None, -2907 ): -2908 ''' -2909 Plot temporal distribution of all analyses in the data set. -2910 -2911 **Parameters** -2912 -2913 + `vs_time`: if `True`, plot as a function of `TimeTag` rather than sequentially. -2914 ''' -2915 -2916 asamples = [s for s in self.anchors] -2917 usamples = [s for s in self.unknowns] -2918 if output is None or output == 'fig': -2919 fig = ppl.figure(figsize = figsize) -2920 ppl.subplots_adjust(*subplots_adjust) -2921 Xmin = min([r['TimeTag'] if vs_time else j for j,r in enumerate(self)]) -2922 Xmax = max([r['TimeTag'] if vs_time else j for j,r in enumerate(self)]) -2923 Xmax += (Xmax-Xmin)/40 -2924 Xmin -= (Xmax-Xmin)/41 -2925 for k, s in enumerate(asamples + usamples): -2926 if vs_time: -2927 X = [r['TimeTag'] for r in self if r['Sample'] == s] -2928 else: -2929 X = [x for x,r in enumerate(self) if r['Sample'] == s] -2930 Y = [-k for x in X] -2931 ppl.plot(X, Y, 'o', mec = None, mew = 0, mfc = 'b' if s in usamples else 'r', ms = 3, alpha = .75) -2932 ppl.axhline(-k, color = 'b' if s in usamples else 'r', lw = .5, alpha = .25) -2933 ppl.text(Xmax, -k, f' {s}', va = 'center', ha = 'left', size = 7, color = 'b' if s in usamples else 'r') -2934 ppl.axis([Xmin, Xmax, -k-1, 1]) -2935 ppl.xlabel('\ntime') -2936 ppl.gca().annotate('', -2937 xy = (0.6, -0.02), -2938 xycoords = 'axes fraction', -2939 xytext = (.4, -0.02), -2940 arrowprops = dict(arrowstyle = "->", color = 'k'), -2941 ) -2942 -2943 -2944 x2 = -1 -2945 for session in self.sessions: -2946 x1 = min([r['TimeTag'] if vs_time else j for j,r in enumerate(self) if r['Session'] == session]) -2947 if vs_time: -2948 ppl.axvline(x1, color = 'k', lw = .75) -2949 if x2 > -1: -2950 if not vs_time: -2951 ppl.axvline((x1+x2)/2, color = 'k', lw = .75, alpha = .5) -2952 x2 = max([r['TimeTag'] if vs_time else j for j,r in enumerate(self) if r['Session'] == session]) -2953# from xlrd import xldate_as_datetime -2954# print(session, xldate_as_datetime(x1, 0), xldate_as_datetime(x2, 0)) -2955 if vs_time: -2956 ppl.axvline(x2, color = 'k', lw = .75) -2957 ppl.axvspan(x1,x2,color = 'k', zorder = -100, alpha = .15) -2958 ppl.text((x1+x2)/2, 1, f' {session}', ha = 'left', va = 'bottom', rotation = 45, size = 8) -2959 -2960 ppl.xticks([]) -2961 ppl.yticks([]) -2962 -2963 if output is None: -2964 if not os.path.exists(dir): -2965 os.makedirs(dir) -2966 if filename == None: -2967 filename = f'D{self._4x}_distribution_of_analyses.pdf' -2968 ppl.savefig(f'{dir}/{filename}') -2969 ppl.close(fig) -2970 elif output == 'ax': -2971 return ppl.gca() -2972 elif output == 'fig': -2973 return fig -2974 -2975 -2976class D47data(D4xdata): -2977 ''' -2978 Store and process data for a large set of Δ47 analyses, -2979 usually comprising more than one analytical session. -2980 ''' -2981 -2982 Nominal_D4x = { -2983 'ETH-1': 0.2052, -2984 'ETH-2': 0.2085, -2985 'ETH-3': 0.6132, -2986 'ETH-4': 0.4511, -2987 'IAEA-C1': 0.3018, -2988 'IAEA-C2': 0.6409, -2989 'MERCK': 0.5135, -2990 } # I-CDES (Bernasconi et al., 2021) -2991 ''' -2992 Nominal Δ47 values assigned to the Δ47 anchor samples, used by -2993 `D47data.standardize()` to normalize unknown samples to an absolute Δ47 -2994 reference frame. -2995 -2996 By default equal to (after [Bernasconi et al. (2021)](https://doi.org/10.1029/2020GC009588)): -2997 ```py -2998 { -2999 'ETH-1' : 0.2052, -3000 'ETH-2' : 0.2085, -3001 'ETH-3' : 0.6132, -3002 'ETH-4' : 0.4511, -3003 'IAEA-C1' : 0.3018, -3004 'IAEA-C2' : 0.6409, -3005 'MERCK' : 0.5135, -3006 } -3007 ``` -3008 ''' -3009 +2551 Returns the error covariance between the average Δ4x values of two +2552 samples. If if only `sample_1` is specified, or if `sample_1 == sample_2`), +2553 returns the Δ4x variance for that sample. +2554 ''' +2555 if sample2 is None: +2556 sample2 = sample1 +2557 if self.standardization_method == 'pooled': +2558 i = self.standardization.var_names.index(f'D{self._4x}_{pf(sample1)}') +2559 j = self.standardization.var_names.index(f'D{self._4x}_{pf(sample2)}') +2560 return self.standardization.covar[i, j] +2561 elif self.standardization_method == 'indep_sessions': +2562 if sample1 == sample2: +2563 return self.samples[sample1][f'SE_D{self._4x}']**2 +2564 else: +2565 c = 0 +2566 for session in self.sessions: +2567 sdata1 = [r for r in self.sessions[session]['data'] if r['Sample'] == sample1] +2568 sdata2 = [r for r in self.sessions[session]['data'] if r['Sample'] == sample2] +2569 if sdata1 and sdata2: +2570 a = self.sessions[session]['a'] +2571 # !! TODO: CM below does not account for temporal changes in standardization parameters +2572 CM = self.sessions[session]['CM'][:3,:3] +2573 avg_D4x_1 = np.mean([r[f'D{self._4x}'] for r in sdata1]) +2574 avg_d4x_1 = np.mean([r[f'd{self._4x}'] for r in sdata1]) +2575 avg_D4x_2 = np.mean([r[f'D{self._4x}'] for r in sdata2]) +2576 avg_d4x_2 = np.mean([r[f'd{self._4x}'] for r in sdata2]) +2577 c += ( +2578 self.unknowns[sample1][f'session_D{self._4x}'][session][2] +2579 * self.unknowns[sample2][f'session_D{self._4x}'][session][2] +2580 * np.array([[avg_D4x_1, avg_d4x_1, 1]]) +2581 @ CM +2582 @ np.array([[avg_D4x_2, avg_d4x_2, 1]]).T +2583 ) / a**2 +2584 return float(c) +2585 +2586 def sample_D4x_correl(self, sample1, sample2 = None): +2587 ''' +2588 Correlation between Δ4x errors of samples +2589 +2590 Returns the error correlation between the average Δ4x values of two samples. +2591 ''' +2592 if sample2 is None or sample2 == sample1: +2593 return 1. +2594 return ( +2595 self.sample_D4x_covar(sample1, sample2) +2596 / self.unknowns[sample1][f'SE_D{self._4x}'] +2597 / self.unknowns[sample2][f'SE_D{self._4x}'] +2598 ) +2599 +2600 def plot_single_session(self, +2601 session, +2602 kw_plot_anchors = dict(ls='None', marker='x', mec=(.75, 0, 0), mew = .75, ms = 4), +2603 kw_plot_unknowns = dict(ls='None', marker='x', mec=(0, 0, .75), mew = .75, ms = 4), +2604 kw_plot_anchor_avg = dict(ls='-', marker='None', color=(.75, 0, 0), lw = .75), +2605 kw_plot_unknown_avg = dict(ls='-', marker='None', color=(0, 0, .75), lw = .75), +2606 kw_contour_error = dict(colors = [[0, 0, 0]], alpha = .5, linewidths = 0.75), +2607 xylimits = 'free', # | 'constant' +2608 x_label = None, +2609 y_label = None, +2610 error_contour_interval = 'auto', +2611 fig = 'new', +2612 ): +2613 ''' +2614 Generate plot for a single session +2615 ''' +2616 if x_label is None: +2617 x_label = f'δ$_{{{self._4x}}}$ (‰)' +2618 if y_label is None: +2619 y_label = f'Δ$_{{{self._4x}}}$ (‰)' +2620 +2621 out = _SessionPlot() +2622 anchors = [a for a in self.anchors if [r for r in self.sessions[session]['data'] if r['Sample'] == a]] +2623 unknowns = [u for u in self.unknowns if [r for r in self.sessions[session]['data'] if r['Sample'] == u]] +2624 +2625 if fig == 'new': +2626 out.fig = ppl.figure(figsize = (6,6)) +2627 ppl.subplots_adjust(.1,.1,.9,.9) +2628 +2629 out.anchor_analyses, = ppl.plot( +2630 [r[f'd{self._4x}'] for r in self.sessions[session]['data'] if r['Sample'] in self.anchors], +2631 [r[f'D{self._4x}'] for r in self.sessions[session]['data'] if r['Sample'] in self.anchors], +2632 **kw_plot_anchors) +2633 out.unknown_analyses, = ppl.plot( +2634 [r[f'd{self._4x}'] for r in self.sessions[session]['data'] if r['Sample'] in self.unknowns], +2635 [r[f'D{self._4x}'] for r in self.sessions[session]['data'] if r['Sample'] in self.unknowns], +2636 **kw_plot_unknowns) +2637 out.anchor_avg = ppl.plot( +2638 np.array([ np.array([ +2639 np.min([r[f'd{self._4x}'] for r in self.sessions[session]['data'] if r['Sample'] == sample]) - 1, +2640 np.max([r[f'd{self._4x}'] for r in self.sessions[session]['data'] if r['Sample'] == sample]) + 1 +2641 ]) for sample in anchors]).T, +2642 np.array([ np.array([0, 0]) + self.Nominal_D4x[sample] for sample in anchors]).T, +2643 **kw_plot_anchor_avg) +2644 out.unknown_avg = ppl.plot( +2645 np.array([ np.array([ +2646 np.min([r[f'd{self._4x}'] for r in self.sessions[session]['data'] if r['Sample'] == sample]) - 1, +2647 np.max([r[f'd{self._4x}'] for r in self.sessions[session]['data'] if r['Sample'] == sample]) + 1 +2648 ]) for sample in unknowns]).T, +2649 np.array([ np.array([0, 0]) + self.unknowns[sample][f'D{self._4x}'] for sample in unknowns]).T, +2650 **kw_plot_unknown_avg) +2651 if xylimits == 'constant': +2652 x = [r[f'd{self._4x}'] for r in self] +2653 y = [r[f'D{self._4x}'] for r in self] +2654 x1, x2, y1, y2 = np.min(x), np.max(x), np.min(y), np.max(y) +2655 w, h = x2-x1, y2-y1 +2656 x1 -= w/20 +2657 x2 += w/20 +2658 y1 -= h/20 +2659 y2 += h/20 +2660 ppl.axis([x1, x2, y1, y2]) +2661 elif xylimits == 'free': +2662 x1, x2, y1, y2 = ppl.axis() +2663 else: +2664 x1, x2, y1, y2 = ppl.axis(xylimits) +2665 +2666 if error_contour_interval != 'none': +2667 xi, yi = np.linspace(x1, x2), np.linspace(y1, y2) +2668 XI,YI = np.meshgrid(xi, yi) +2669 SI = np.array([[self.standardization_error(session, x, y) for x in xi] for y in yi]) +2670 if error_contour_interval == 'auto': +2671 rng = np.max(SI) - np.min(SI) +2672 if rng <= 0.01: +2673 cinterval = 0.001 +2674 elif rng <= 0.03: +2675 cinterval = 0.004 +2676 elif rng <= 0.1: +2677 cinterval = 0.01 +2678 elif rng <= 0.3: +2679 cinterval = 0.03 +2680 elif rng <= 1.: +2681 cinterval = 0.1 +2682 else: +2683 cinterval = 0.5 +2684 else: +2685 cinterval = error_contour_interval +2686 +2687 cval = np.arange(np.ceil(SI.min() / .001) * .001, np.ceil(SI.max() / .001 + 1) * .001, cinterval) +2688 out.contour = ppl.contour(XI, YI, SI, cval, **kw_contour_error) +2689 out.clabel = ppl.clabel(out.contour) +2690 +2691 ppl.xlabel(x_label) +2692 ppl.ylabel(y_label) +2693 ppl.title(session, weight = 'bold') +2694 ppl.grid(alpha = .2) +2695 out.ax = ppl.gca() +2696 +2697 return out +2698 +2699 def plot_residuals( +2700 self, +2701 hist = False, +2702 binwidth = 2/3, +2703 dir = 'output', +2704 filename = None, +2705 highlight = [], +2706 colors = None, +2707 figsize = None, +2708 ): +2709 ''' +2710 Plot residuals of each analysis as a function of time (actually, as a function of +2711 the order of analyses in the `D4xdata` object) +2712 +2713 + `hist`: whether to add a histogram of residuals +2714 + `histbins`: specify bin edges for the histogram +2715 + `dir`: the directory in which to save the plot +2716 + `highlight`: a list of samples to highlight +2717 + `colors`: a dict of `{<sample>: <color>}` for all samples +2718 + `figsize`: (width, height) of figure +2719 ''' +2720 # Layout +2721 fig = ppl.figure(figsize = (8,4) if figsize is None else figsize) +2722 if hist: +2723 ppl.subplots_adjust(left = .08, bottom = .05, right = .98, top = .8, wspace = -0.72) +2724 ax1, ax2 = ppl.subplot(121), ppl.subplot(1,15,15) +2725 else: +2726 ppl.subplots_adjust(.08,.05,.78,.8) +2727 ax1 = ppl.subplot(111) +2728 +2729 # Colors +2730 N = len(self.anchors) +2731 if colors is None: +2732 if len(highlight) > 0: +2733 Nh = len(highlight) +2734 if Nh == 1: +2735 colors = {highlight[0]: (0,0,0)} +2736 elif Nh == 3: +2737 colors = {a: c for a,c in zip(highlight, [(0,0,1), (1,0,0), (0,2/3,0)])} +2738 elif Nh == 4: +2739 colors = {a: c for a,c in zip(highlight, [(0,0,1), (1,0,0), (0,2/3,0), (.75,0,.75)])} +2740 else: +2741 colors = {a: hls_to_rgb(k/Nh, .4, 1) for k,a in enumerate(highlight)} +2742 else: +2743 if N == 3: +2744 colors = {a: c for a,c in zip(self.anchors, [(0,0,1), (1,0,0), (0,2/3,0)])} +2745 elif N == 4: +2746 colors = {a: c for a,c in zip(self.anchors, [(0,0,1), (1,0,0), (0,2/3,0), (.75,0,.75)])} +2747 else: +2748 colors = {a: hls_to_rgb(k/N, .4, 1) for k,a in enumerate(self.anchors)} +2749 +2750 ppl.sca(ax1) +2751 +2752 ppl.axhline(0, color = 'k', alpha = .25, lw = 0.75) +2753 +2754 session = self[0]['Session'] +2755 x1 = 0 +2756# ymax = np.max([1e3 * (r['D47'] - self.samples[r['Sample']]['D47']) for r in self]) +2757 x_sessions = {} +2758 one_or_more_singlets = False +2759 one_or_more_multiplets = False +2760 multiplets = set() +2761 for k,r in enumerate(self): +2762 if r['Session'] != session: +2763 x2 = k-1 +2764 x_sessions[session] = (x1+x2)/2 +2765 ppl.axvline(k - 0.5, color = 'k', lw = .5) +2766 session = r['Session'] +2767 x1 = k +2768 singlet = len(self.samples[r['Sample']]['data']) == 1 +2769 if not singlet: +2770 multiplets.add(r['Sample']) +2771 if r['Sample'] in self.unknowns: +2772 if singlet: +2773 one_or_more_singlets = True +2774 else: +2775 one_or_more_multiplets = True +2776 kw = dict( +2777 marker = 'x' if singlet else '+', +2778 ms = 4 if singlet else 5, +2779 ls = 'None', +2780 mec = colors[r['Sample']] if r['Sample'] in colors else (0,0,0), +2781 mew = 1, +2782 alpha = 0.2 if singlet else 1, +2783 ) +2784 if highlight and r['Sample'] not in highlight: +2785 kw['alpha'] = 0.2 +2786 ppl.plot(k, 1e3 * (r['D47'] - self.samples[r['Sample']]['D47']), **kw) +2787 x2 = k +2788 x_sessions[session] = (x1+x2)/2 +2789 +2790 ppl.axhspan(-self.repeatability['r_D47']*1000, self.repeatability['r_D47']*1000, color = 'k', alpha = .05, lw = 1) +2791 ppl.axhspan(-self.repeatability['r_D47']*1000*self.t95, self.repeatability['r_D47']*1000*self.t95, color = 'k', alpha = .05, lw = 1) +2792 if not hist: +2793 ppl.text(len(self), self.repeatability['r_D47']*1000, f" SD = {self.repeatability['r_D47']*1000:.1f} ppm", size = 9, alpha = 1, va = 'center') +2794 ppl.text(len(self), self.repeatability['r_D47']*1000*self.t95, f" 95% CL = ± {self.repeatability['r_D47']*1000*self.t95:.1f} ppm", size = 9, alpha = 1, va = 'center') +2795 +2796 xmin, xmax, ymin, ymax = ppl.axis() +2797 for s in x_sessions: +2798 ppl.text( +2799 x_sessions[s], +2800 ymax +1, +2801 s, +2802 va = 'bottom', +2803 **( +2804 dict(ha = 'center') +2805 if len(self.sessions[s]['data']) > (0.15 * len(self)) +2806 else dict(ha = 'left', rotation = 45) +2807 ) +2808 ) +2809 +2810 if hist: +2811 ppl.sca(ax2) +2812 +2813 for s in colors: +2814 kw['marker'] = '+' +2815 kw['ms'] = 5 +2816 kw['mec'] = colors[s] +2817 kw['label'] = s +2818 kw['alpha'] = 1 +2819 ppl.plot([], [], **kw) +2820 +2821 kw['mec'] = (0,0,0) +2822 +2823 if one_or_more_singlets: +2824 kw['marker'] = 'x' +2825 kw['ms'] = 4 +2826 kw['alpha'] = .2 +2827 kw['label'] = 'other (N$\\,$=$\\,$1)' if one_or_more_multiplets else 'other' +2828 ppl.plot([], [], **kw) +2829 +2830 if one_or_more_multiplets: +2831 kw['marker'] = '+' +2832 kw['ms'] = 4 +2833 kw['alpha'] = 1 +2834 kw['label'] = 'other (N$\\,$>$\\,$1)' if one_or_more_singlets else 'other' +2835 ppl.plot([], [], **kw) +2836 +2837 if hist: +2838 leg = ppl.legend(loc = 'upper right', bbox_to_anchor = (1, 1), bbox_transform=fig.transFigure, borderaxespad = 1.5, fontsize = 9) +2839 else: +2840 leg = ppl.legend(loc = 'lower right', bbox_to_anchor = (1, 0), bbox_transform=fig.transFigure, borderaxespad = 1.5) +2841 leg.set_zorder(-1000) +2842 +2843 ppl.sca(ax1) +2844 +2845 ppl.ylabel('Δ$_{47}$ residuals (ppm)') +2846 ppl.xticks([]) +2847 ppl.axis([-1, len(self), None, None]) +2848 +2849 if hist: +2850 ppl.sca(ax2) +2851 X = [1e3 * (r['D47'] - self.samples[r['Sample']]['D47']) for r in self if r['Sample'] in multiplets] +2852 ppl.hist( +2853 X, +2854 orientation = 'horizontal', +2855 histtype = 'stepfilled', +2856 ec = [.4]*3, +2857 fc = [.25]*3, +2858 alpha = .25, +2859 bins = np.linspace(-9e3*self.repeatability['r_D47'], 9e3*self.repeatability['r_D47'], int(18/binwidth+1)), +2860 ) +2861 ppl.axis([None, None, ymin, ymax]) +2862 ppl.text(0, 0, +2863 f" SD = {self.repeatability['r_D47']*1000:.1f} ppm\n 95% CL = ± {self.repeatability['r_D47']*1000*self.t95:.1f} ppm", +2864 size = 8, +2865 alpha = 1, +2866 va = 'center', +2867 ha = 'left', +2868 ) +2869 +2870 ppl.xticks([]) +2871 ppl.yticks([]) +2872# ax2.spines['left'].set_visible(False) +2873 ax2.spines['right'].set_visible(False) +2874 ax2.spines['top'].set_visible(False) +2875 ax2.spines['bottom'].set_visible(False) +2876 +2877 +2878 if not os.path.exists(dir): +2879 os.makedirs(dir) +2880 if filename is None: +2881 return fig +2882 elif filename == '': +2883 filename = f'D{self._4x}_residuals.pdf' +2884 ppl.savefig(f'{dir}/{filename}') +2885 ppl.close(fig) +2886 +2887 +2888 def simulate(self, *args, **kwargs): +2889 ''' +2890 Legacy function with warning message pointing to `virtual_data()` +2891 ''' +2892 raise DeprecationWarning('D4xdata.simulate is deprecated and has been replaced by virtual_data()') +2893 +2894 def plot_distribution_of_analyses( +2895 self, +2896 dir = 'output', +2897 filename = None, +2898 vs_time = False, +2899 figsize = (6,4), +2900 subplots_adjust = (0.02, 0.13, 0.85, 0.8), +2901 output = None, +2902 ): +2903 ''' +2904 Plot temporal distribution of all analyses in the data set. +2905 +2906 **Parameters** +2907 +2908 + `vs_time`: if `True`, plot as a function of `TimeTag` rather than sequentially. +2909 ''' +2910 +2911 asamples = [s for s in self.anchors] +2912 usamples = [s for s in self.unknowns] +2913 if output is None or output == 'fig': +2914 fig = ppl.figure(figsize = figsize) +2915 ppl.subplots_adjust(*subplots_adjust) +2916 Xmin = min([r['TimeTag'] if vs_time else j for j,r in enumerate(self)]) +2917 Xmax = max([r['TimeTag'] if vs_time else j for j,r in enumerate(self)]) +2918 Xmax += (Xmax-Xmin)/40 +2919 Xmin -= (Xmax-Xmin)/41 +2920 for k, s in enumerate(asamples + usamples): +2921 if vs_time: +2922 X = [r['TimeTag'] for r in self if r['Sample'] == s] +2923 else: +2924 X = [x for x,r in enumerate(self) if r['Sample'] == s] +2925 Y = [-k for x in X] +2926 ppl.plot(X, Y, 'o', mec = None, mew = 0, mfc = 'b' if s in usamples else 'r', ms = 3, alpha = .75) +2927 ppl.axhline(-k, color = 'b' if s in usamples else 'r', lw = .5, alpha = .25) +2928 ppl.text(Xmax, -k, f' {s}', va = 'center', ha = 'left', size = 7, color = 'b' if s in usamples else 'r') +2929 ppl.axis([Xmin, Xmax, -k-1, 1]) +2930 ppl.xlabel('\ntime') +2931 ppl.gca().annotate('', +2932 xy = (0.6, -0.02), +2933 xycoords = 'axes fraction', +2934 xytext = (.4, -0.02), +2935 arrowprops = dict(arrowstyle = "->", color = 'k'), +2936 ) +2937 +2938 +2939 x2 = -1 +2940 for session in self.sessions: +2941 x1 = min([r['TimeTag'] if vs_time else j for j,r in enumerate(self) if r['Session'] == session]) +2942 if vs_time: +2943 ppl.axvline(x1, color = 'k', lw = .75) +2944 if x2 > -1: +2945 if not vs_time: +2946 ppl.axvline((x1+x2)/2, color = 'k', lw = .75, alpha = .5) +2947 x2 = max([r['TimeTag'] if vs_time else j for j,r in enumerate(self) if r['Session'] == session]) +2948# from xlrd import xldate_as_datetime +2949# print(session, xldate_as_datetime(x1, 0), xldate_as_datetime(x2, 0)) +2950 if vs_time: +2951 ppl.axvline(x2, color = 'k', lw = .75) +2952 ppl.axvspan(x1,x2,color = 'k', zorder = -100, alpha = .15) +2953 ppl.text((x1+x2)/2, 1, f' {session}', ha = 'left', va = 'bottom', rotation = 45, size = 8) +2954 +2955 ppl.xticks([]) +2956 ppl.yticks([]) +2957 +2958 if output is None: +2959 if not os.path.exists(dir): +2960 os.makedirs(dir) +2961 if filename == None: +2962 filename = f'D{self._4x}_distribution_of_analyses.pdf' +2963 ppl.savefig(f'{dir}/{filename}') +2964 ppl.close(fig) +2965 elif output == 'ax': +2966 return ppl.gca() +2967 elif output == 'fig': +2968 return fig +2969 +2970 +2971class D47data(D4xdata): +2972 ''' +2973 Store and process data for a large set of Δ47 analyses, +2974 usually comprising more than one analytical session. +2975 ''' +2976 +2977 Nominal_D4x = { +2978 'ETH-1': 0.2052, +2979 'ETH-2': 0.2085, +2980 'ETH-3': 0.6132, +2981 'ETH-4': 0.4511, +2982 'IAEA-C1': 0.3018, +2983 'IAEA-C2': 0.6409, +2984 'MERCK': 0.5135, +2985 } # I-CDES (Bernasconi et al., 2021) +2986 ''' +2987 Nominal Δ47 values assigned to the Δ47 anchor samples, used by +2988 `D47data.standardize()` to normalize unknown samples to an absolute Δ47 +2989 reference frame. +2990 +2991 By default equal to (after [Bernasconi et al. (2021)](https://doi.org/10.1029/2020GC009588)): +2992 ```py +2993 { +2994 'ETH-1' : 0.2052, +2995 'ETH-2' : 0.2085, +2996 'ETH-3' : 0.6132, +2997 'ETH-4' : 0.4511, +2998 'IAEA-C1' : 0.3018, +2999 'IAEA-C2' : 0.6409, +3000 'MERCK' : 0.5135, +3001 } +3002 ``` +3003 ''' +3004 +3005 +3006 @property +3007 def Nominal_D47(self): +3008 return self.Nominal_D4x +3009 3010 -3011 @property -3012 def Nominal_D47(self): -3013 return self.Nominal_D4x -3014 +3011 @Nominal_D47.setter +3012 def Nominal_D47(self, new): +3013 self.Nominal_D4x = dict(**new) +3014 self.refresh() 3015 -3016 @Nominal_D47.setter -3017 def Nominal_D47(self, new): -3018 self.Nominal_D4x = dict(**new) -3019 self.refresh() -3020 -3021 -3022 def __init__(self, l = [], **kwargs): -3023 ''' -3024 **Parameters:** same as `D4xdata.__init__()` -3025 ''' -3026 D4xdata.__init__(self, l = l, mass = '47', **kwargs) -3027 +3016 +3017 def __init__(self, l = [], **kwargs): +3018 ''' +3019 **Parameters:** same as `D4xdata.__init__()` +3020 ''' +3021 D4xdata.__init__(self, l = l, mass = '47', **kwargs) +3022 +3023 +3024 def D47fromTeq(self, fCo2eqD47 = 'petersen', priority = 'new'): +3025 ''' +3026 Find all samples for which `Teq` is specified, compute equilibrium Δ47 +3027 value for that temperature, and add treat these samples as additional anchors. 3028 -3029 def D47fromTeq(self, fCo2eqD47 = 'petersen', priority = 'new'): -3030 ''' -3031 Find all samples for which `Teq` is specified, compute equilibrium Δ47 -3032 value for that temperature, and add treat these samples as additional anchors. -3033 -3034 **Parameters** -3035 -3036 + `fCo2eqD47`: Which CO2 equilibrium law to use -3037 (`petersen`: [Petersen et al. (2019)](https://doi.org/10.1029/2018GC008127); -3038 `wang`: [Wang et al. (2019)](https://doi.org/10.1016/j.gca.2004.05.039)). -3039 + `priority`: if `replace`: forget old anchors and only use the new ones; -3040 if `new`: keep pre-existing anchors but update them in case of conflict -3041 between old and new Δ47 values; -3042 if `old`: keep pre-existing anchors but preserve their original Δ47 -3043 values in case of conflict. -3044 ''' -3045 f = { -3046 'petersen': fCO2eqD47_Petersen, -3047 'wang': fCO2eqD47_Wang, -3048 }[fCo2eqD47] -3049 foo = {} -3050 for r in self: -3051 if 'Teq' in r: -3052 if r['Sample'] in foo: -3053 assert foo[r['Sample']] == f(r['Teq']), f'Different values of `Teq` provided for sample `{r["Sample"]}`.' -3054 else: -3055 foo[r['Sample']] = f(r['Teq']) -3056 else: -3057 assert r['Sample'] not in foo, f'`Teq` is inconsistently specified for sample `{r["Sample"]}`.' -3058 -3059 if priority == 'replace': -3060 self.Nominal_D47 = {} -3061 for s in foo: -3062 if priority != 'old' or s not in self.Nominal_D47: -3063 self.Nominal_D47[s] = foo[s] -3064 -3065 -3066 -3067 -3068class D48data(D4xdata): -3069 ''' -3070 Store and process data for a large set of Δ48 analyses, -3071 usually comprising more than one analytical session. -3072 ''' -3073 -3074 Nominal_D4x = { -3075 'ETH-1': 0.138, -3076 'ETH-2': 0.138, -3077 'ETH-3': 0.270, -3078 'ETH-4': 0.223, -3079 'GU-1': -0.419, -3080 } # (Fiebig et al., 2019, 2021) -3081 ''' -3082 Nominal Δ48 values assigned to the Δ48 anchor samples, used by -3083 `D48data.standardize()` to normalize unknown samples to an absolute Δ48 -3084 reference frame. -3085 -3086 By default equal to (after [Fiebig et al. (2019)](https://doi.org/10.1016/j.chemgeo.2019.05.019), -3087 Fiebig et al. (in press)): -3088 -3089 ```py -3090 { -3091 'ETH-1' : 0.138, -3092 'ETH-2' : 0.138, -3093 'ETH-3' : 0.270, -3094 'ETH-4' : 0.223, -3095 'GU-1' : -0.419, -3096 } -3097 ``` -3098 ''' +3029 **Parameters** +3030 +3031 + `fCo2eqD47`: Which CO2 equilibrium law to use +3032 (`petersen`: [Petersen et al. (2019)](https://doi.org/10.1029/2018GC008127); +3033 `wang`: [Wang et al. (2019)](https://doi.org/10.1016/j.gca.2004.05.039)). +3034 + `priority`: if `replace`: forget old anchors and only use the new ones; +3035 if `new`: keep pre-existing anchors but update them in case of conflict +3036 between old and new Δ47 values; +3037 if `old`: keep pre-existing anchors but preserve their original Δ47 +3038 values in case of conflict. +3039 ''' +3040 f = { +3041 'petersen': fCO2eqD47_Petersen, +3042 'wang': fCO2eqD47_Wang, +3043 }[fCo2eqD47] +3044 foo = {} +3045 for r in self: +3046 if 'Teq' in r: +3047 if r['Sample'] in foo: +3048 assert foo[r['Sample']] == f(r['Teq']), f'Different values of `Teq` provided for sample `{r["Sample"]}`.' +3049 else: +3050 foo[r['Sample']] = f(r['Teq']) +3051 else: +3052 assert r['Sample'] not in foo, f'`Teq` is inconsistently specified for sample `{r["Sample"]}`.' +3053 +3054 if priority == 'replace': +3055 self.Nominal_D47 = {} +3056 for s in foo: +3057 if priority != 'old' or s not in self.Nominal_D47: +3058 self.Nominal_D47[s] = foo[s] +3059 +3060 +3061 +3062 +3063class D48data(D4xdata): +3064 ''' +3065 Store and process data for a large set of Δ48 analyses, +3066 usually comprising more than one analytical session. +3067 ''' +3068 +3069 Nominal_D4x = { +3070 'ETH-1': 0.138, +3071 'ETH-2': 0.138, +3072 'ETH-3': 0.270, +3073 'ETH-4': 0.223, +3074 'GU-1': -0.419, +3075 } # (Fiebig et al., 2019, 2021) +3076 ''' +3077 Nominal Δ48 values assigned to the Δ48 anchor samples, used by +3078 `D48data.standardize()` to normalize unknown samples to an absolute Δ48 +3079 reference frame. +3080 +3081 By default equal to (after [Fiebig et al. (2019)](https://doi.org/10.1016/j.chemgeo.2019.05.019), +3082 Fiebig et al. (in press)): +3083 +3084 ```py +3085 { +3086 'ETH-1' : 0.138, +3087 'ETH-2' : 0.138, +3088 'ETH-3' : 0.270, +3089 'ETH-4' : 0.223, +3090 'GU-1' : -0.419, +3091 } +3092 ``` +3093 ''' +3094 +3095 +3096 @property +3097 def Nominal_D48(self): +3098 return self.Nominal_D4x 3099 -3100 -3101 @property -3102 def Nominal_D48(self): -3103 return self.Nominal_D4x -3104 -3105 -3106 @Nominal_D48.setter -3107 def Nominal_D48(self, new): -3108 self.Nominal_D4x = dict(**new) -3109 self.refresh() -3110 -3111 -3112 def __init__(self, l = [], **kwargs): -3113 ''' -3114 **Parameters:** same as `D4xdata.__init__()` -3115 ''' -3116 D4xdata.__init__(self, l = l, mass = '48', **kwargs) -3117 -3118 -3119class _SessionPlot(): -3120 ''' -3121 Simple placeholder class -3122 ''' -3123 def __init__(self): -3124 pass +3100 +3101 @Nominal_D48.setter +3102 def Nominal_D48(self, new): +3103 self.Nominal_D4x = dict(**new) +3104 self.refresh() +3105 +3106 +3107 def __init__(self, l = [], **kwargs): +3108 ''' +3109 **Parameters:** same as `D4xdata.__init__()` +3110 ''' +3111 D4xdata.__init__(self, l = l, mass = '48', **kwargs) +3112 +3113 +3114class _SessionPlot(): +3115 ''' +3116 Simple placeholder class +3117 ''' +3118 def __init__(self): +3119 pass @@ -5323,2081 +5318,2081 @@

API Documentation

-
 900class D4xdata(list):
- 901	'''
- 902	Store and process data for a large set of Δ47 and/or Δ48
- 903	analyses, usually comprising more than one analytical session.
- 904	'''
- 905
- 906	### 17O CORRECTION PARAMETERS
- 907	R13_VPDB = 0.01118  # (Chang & Li, 1990)
- 908	'''
- 909	Absolute (13C/12C) ratio of VPDB.
- 910	By default equal to 0.01118 ([Chang & Li, 1990](http://www.cnki.com.cn/Article/CJFDTotal-JXTW199004006.htm))
- 911	'''
- 912
- 913	R18_VSMOW = 0.0020052  # (Baertschi, 1976)
- 914	'''
- 915	Absolute (18O/16C) ratio of VSMOW.
- 916	By default equal to 0.0020052 ([Baertschi, 1976](https://doi.org/10.1016/0012-821X(76)90115-1))
- 917	'''
- 918
- 919	LAMBDA_17 = 0.528  # (Barkan & Luz, 2005)
- 920	'''
- 921	Mass-dependent exponent for triple oxygen isotopes.
- 922	By default equal to 0.528 ([Barkan & Luz, 2005](https://doi.org/10.1002/rcm.2250))
- 923	'''
- 924
- 925	R17_VSMOW = 0.00038475  # (Assonov & Brenninkmeijer, 2003, rescaled to R13_VPDB)
- 926	'''
- 927	Absolute (17O/16C) ratio of VSMOW.
- 928	By default equal to 0.00038475
- 929	([Assonov & Brenninkmeijer, 2003](https://dx.doi.org/10.1002/rcm.1011),
- 930	rescaled to `R13_VPDB`)
- 931	'''
- 932
- 933	R18_VPDB = R18_VSMOW * 1.03092
- 934	'''
- 935	Absolute (18O/16C) ratio of VPDB.
- 936	By definition equal to `R18_VSMOW * 1.03092`.
- 937	'''
- 938
- 939	R17_VPDB = R17_VSMOW * 1.03092 ** LAMBDA_17
- 940	'''
- 941	Absolute (17O/16C) ratio of VPDB.
- 942	By definition equal to `R17_VSMOW * 1.03092 ** LAMBDA_17`.
- 943	'''
- 944
- 945	LEVENE_REF_SAMPLE = 'ETH-3'
- 946	'''
- 947	After the Δ4x standardization step, each sample is tested to
- 948	assess whether the Δ4x variance within all analyses for that
- 949	sample differs significantly from that observed for a given reference
- 950	sample (using [Levene's test](https://en.wikipedia.org/wiki/Levene%27s_test),
- 951	which yields a p-value corresponding to the null hypothesis that the
- 952	underlying variances are equal).
- 953
- 954	`LEVENE_REF_SAMPLE` (by default equal to `'ETH-3'`) specifies which
- 955	sample should be used as a reference for this test.
- 956	'''
- 957
- 958	ALPHA_18O_ACID_REACTION = round(np.exp(3.59 / (90 + 273.15) - 1.79e-3), 6)  # (Kim et al., 2007, calcite)
- 959	'''
- 960	Specifies the 18O/16O fractionation factor generally applicable
- 961	to acid reactions in the dataset. Currently used by `D4xdata.wg()`,
- 962	`D4xdata.standardize_d13C`, and `D4xdata.standardize_d18O`.
- 963
- 964	By default equal to 1.008129 (calcite reacted at 90 °C,
- 965	[Kim et al., 2007](https://dx.doi.org/10.1016/j.chemgeo.2007.08.005)).
- 966	'''
- 967
- 968	Nominal_d13C_VPDB = {
- 969		'ETH-1': 2.02,
- 970		'ETH-2': -10.17,
- 971		'ETH-3': 1.71,
- 972		}	# (Bernasconi et al., 2018)
- 973	'''
- 974	Nominal δ13C_VPDB values assigned to carbonate standards, used by
- 975	`D4xdata.standardize_d13C()`.
- 976
- 977	By default equal to `{'ETH-1': 2.02, 'ETH-2': -10.17, 'ETH-3': 1.71}` after
- 978	[Bernasconi et al. (2018)](https://doi.org/10.1029/2017GC007385).
- 979	'''
- 980
- 981	Nominal_d18O_VPDB = {
- 982		'ETH-1': -2.19,
- 983		'ETH-2': -18.69,
- 984		'ETH-3': -1.78,
- 985		}	# (Bernasconi et al., 2018)
- 986	'''
- 987	Nominal δ18O_VPDB values assigned to carbonate standards, used by
- 988	`D4xdata.standardize_d18O()`.
- 989
- 990	By default equal to `{'ETH-1': -2.19, 'ETH-2': -18.69, 'ETH-3': -1.78}` after
- 991	[Bernasconi et al. (2018)](https://doi.org/10.1029/2017GC007385).
- 992	'''
- 993
- 994	d13C_STANDARDIZATION_METHOD = '2pt'
- 995	'''
- 996	Method by which to standardize δ13C values:
- 997	
- 998	+ `none`: do not apply any δ13C standardization.
- 999	+ `'1pt'`: within each session, offset all initial δ13C values so as to
-1000	minimize the difference between final δ13C_VPDB values and
-1001	`Nominal_d13C_VPDB` (averaged over all analyses for which `Nominal_d13C_VPDB` is defined).
-1002	+ `'2pt'`: within each session, apply a affine trasformation to all δ13C
-1003	values so as to minimize the difference between final δ13C_VPDB
-1004	values and `Nominal_d13C_VPDB` (averaged over all analyses for which `Nominal_d13C_VPDB`
-1005	is defined).
-1006	'''
-1007
-1008	d18O_STANDARDIZATION_METHOD = '2pt'
-1009	'''
-1010	Method by which to standardize δ18O values:
-1011	
-1012	+ `none`: do not apply any δ18O standardization.
-1013	+ `'1pt'`: within each session, offset all initial δ18O values so as to
-1014	minimize the difference between final δ18O_VPDB values and
-1015	`Nominal_d18O_VPDB` (averaged over all analyses for which `Nominal_d18O_VPDB` is defined).
-1016	+ `'2pt'`: within each session, apply a affine trasformation to all δ18O
-1017	values so as to minimize the difference between final δ18O_VPDB
-1018	values and `Nominal_d18O_VPDB` (averaged over all analyses for which `Nominal_d18O_VPDB`
-1019	is defined).
-1020	'''
-1021
-1022	def __init__(self, l = [], mass = '47', logfile = '', session = 'mySession', verbose = False):
-1023		'''
-1024		**Parameters**
-1025
-1026		+ `l`: a list of dictionaries, with each dictionary including at least the keys
-1027		`Sample`, `d45`, `d46`, and `d47` or `d48`.
-1028		+ `mass`: `'47'` or `'48'`
-1029		+ `logfile`: if specified, write detailed logs to this file path when calling `D4xdata` methods.
-1030		+ `session`: define session name for analyses without a `Session` key
-1031		+ `verbose`: if `True`, print out detailed logs when calling `D4xdata` methods.
-1032
-1033		Returns a `D4xdata` object derived from `list`.
-1034		'''
-1035		self._4x = mass
-1036		self.verbose = verbose
-1037		self.prefix = 'D4xdata'
-1038		self.logfile = logfile
-1039		list.__init__(self, l)
-1040		self.Nf = None
-1041		self.repeatability = {}
-1042		self.refresh(session = session)
-1043
-1044
-1045	def make_verbal(oldfun):
-1046		'''
-1047		Decorator: allow temporarily changing `self.prefix` and overriding `self.verbose`.
-1048		'''
-1049		@wraps(oldfun)
-1050		def newfun(*args, verbose = '', **kwargs):
-1051			myself = args[0]
-1052			oldprefix = myself.prefix
-1053			myself.prefix = oldfun.__name__
+            
 895class D4xdata(list):
+ 896	'''
+ 897	Store and process data for a large set of Δ47 and/or Δ48
+ 898	analyses, usually comprising more than one analytical session.
+ 899	'''
+ 900
+ 901	### 17O CORRECTION PARAMETERS
+ 902	R13_VPDB = 0.01118  # (Chang & Li, 1990)
+ 903	'''
+ 904	Absolute (13C/12C) ratio of VPDB.
+ 905	By default equal to 0.01118 ([Chang & Li, 1990](http://www.cnki.com.cn/Article/CJFDTotal-JXTW199004006.htm))
+ 906	'''
+ 907
+ 908	R18_VSMOW = 0.0020052  # (Baertschi, 1976)
+ 909	'''
+ 910	Absolute (18O/16C) ratio of VSMOW.
+ 911	By default equal to 0.0020052 ([Baertschi, 1976](https://doi.org/10.1016/0012-821X(76)90115-1))
+ 912	'''
+ 913
+ 914	LAMBDA_17 = 0.528  # (Barkan & Luz, 2005)
+ 915	'''
+ 916	Mass-dependent exponent for triple oxygen isotopes.
+ 917	By default equal to 0.528 ([Barkan & Luz, 2005](https://doi.org/10.1002/rcm.2250))
+ 918	'''
+ 919
+ 920	R17_VSMOW = 0.00038475  # (Assonov & Brenninkmeijer, 2003, rescaled to R13_VPDB)
+ 921	'''
+ 922	Absolute (17O/16C) ratio of VSMOW.
+ 923	By default equal to 0.00038475
+ 924	([Assonov & Brenninkmeijer, 2003](https://dx.doi.org/10.1002/rcm.1011),
+ 925	rescaled to `R13_VPDB`)
+ 926	'''
+ 927
+ 928	R18_VPDB = R18_VSMOW * 1.03092
+ 929	'''
+ 930	Absolute (18O/16C) ratio of VPDB.
+ 931	By definition equal to `R18_VSMOW * 1.03092`.
+ 932	'''
+ 933
+ 934	R17_VPDB = R17_VSMOW * 1.03092 ** LAMBDA_17
+ 935	'''
+ 936	Absolute (17O/16C) ratio of VPDB.
+ 937	By definition equal to `R17_VSMOW * 1.03092 ** LAMBDA_17`.
+ 938	'''
+ 939
+ 940	LEVENE_REF_SAMPLE = 'ETH-3'
+ 941	'''
+ 942	After the Δ4x standardization step, each sample is tested to
+ 943	assess whether the Δ4x variance within all analyses for that
+ 944	sample differs significantly from that observed for a given reference
+ 945	sample (using [Levene's test](https://en.wikipedia.org/wiki/Levene%27s_test),
+ 946	which yields a p-value corresponding to the null hypothesis that the
+ 947	underlying variances are equal).
+ 948
+ 949	`LEVENE_REF_SAMPLE` (by default equal to `'ETH-3'`) specifies which
+ 950	sample should be used as a reference for this test.
+ 951	'''
+ 952
+ 953	ALPHA_18O_ACID_REACTION = round(np.exp(3.59 / (90 + 273.15) - 1.79e-3), 6)  # (Kim et al., 2007, calcite)
+ 954	'''
+ 955	Specifies the 18O/16O fractionation factor generally applicable
+ 956	to acid reactions in the dataset. Currently used by `D4xdata.wg()`,
+ 957	`D4xdata.standardize_d13C`, and `D4xdata.standardize_d18O`.
+ 958
+ 959	By default equal to 1.008129 (calcite reacted at 90 °C,
+ 960	[Kim et al., 2007](https://dx.doi.org/10.1016/j.chemgeo.2007.08.005)).
+ 961	'''
+ 962
+ 963	Nominal_d13C_VPDB = {
+ 964		'ETH-1': 2.02,
+ 965		'ETH-2': -10.17,
+ 966		'ETH-3': 1.71,
+ 967		}	# (Bernasconi et al., 2018)
+ 968	'''
+ 969	Nominal δ13C_VPDB values assigned to carbonate standards, used by
+ 970	`D4xdata.standardize_d13C()`.
+ 971
+ 972	By default equal to `{'ETH-1': 2.02, 'ETH-2': -10.17, 'ETH-3': 1.71}` after
+ 973	[Bernasconi et al. (2018)](https://doi.org/10.1029/2017GC007385).
+ 974	'''
+ 975
+ 976	Nominal_d18O_VPDB = {
+ 977		'ETH-1': -2.19,
+ 978		'ETH-2': -18.69,
+ 979		'ETH-3': -1.78,
+ 980		}	# (Bernasconi et al., 2018)
+ 981	'''
+ 982	Nominal δ18O_VPDB values assigned to carbonate standards, used by
+ 983	`D4xdata.standardize_d18O()`.
+ 984
+ 985	By default equal to `{'ETH-1': -2.19, 'ETH-2': -18.69, 'ETH-3': -1.78}` after
+ 986	[Bernasconi et al. (2018)](https://doi.org/10.1029/2017GC007385).
+ 987	'''
+ 988
+ 989	d13C_STANDARDIZATION_METHOD = '2pt'
+ 990	'''
+ 991	Method by which to standardize δ13C values:
+ 992	
+ 993	+ `none`: do not apply any δ13C standardization.
+ 994	+ `'1pt'`: within each session, offset all initial δ13C values so as to
+ 995	minimize the difference between final δ13C_VPDB values and
+ 996	`Nominal_d13C_VPDB` (averaged over all analyses for which `Nominal_d13C_VPDB` is defined).
+ 997	+ `'2pt'`: within each session, apply a affine trasformation to all δ13C
+ 998	values so as to minimize the difference between final δ13C_VPDB
+ 999	values and `Nominal_d13C_VPDB` (averaged over all analyses for which `Nominal_d13C_VPDB`
+1000	is defined).
+1001	'''
+1002
+1003	d18O_STANDARDIZATION_METHOD = '2pt'
+1004	'''
+1005	Method by which to standardize δ18O values:
+1006	
+1007	+ `none`: do not apply any δ18O standardization.
+1008	+ `'1pt'`: within each session, offset all initial δ18O values so as to
+1009	minimize the difference between final δ18O_VPDB values and
+1010	`Nominal_d18O_VPDB` (averaged over all analyses for which `Nominal_d18O_VPDB` is defined).
+1011	+ `'2pt'`: within each session, apply a affine trasformation to all δ18O
+1012	values so as to minimize the difference between final δ18O_VPDB
+1013	values and `Nominal_d18O_VPDB` (averaged over all analyses for which `Nominal_d18O_VPDB`
+1014	is defined).
+1015	'''
+1016
+1017	def __init__(self, l = [], mass = '47', logfile = '', session = 'mySession', verbose = False):
+1018		'''
+1019		**Parameters**
+1020
+1021		+ `l`: a list of dictionaries, with each dictionary including at least the keys
+1022		`Sample`, `d45`, `d46`, and `d47` or `d48`.
+1023		+ `mass`: `'47'` or `'48'`
+1024		+ `logfile`: if specified, write detailed logs to this file path when calling `D4xdata` methods.
+1025		+ `session`: define session name for analyses without a `Session` key
+1026		+ `verbose`: if `True`, print out detailed logs when calling `D4xdata` methods.
+1027
+1028		Returns a `D4xdata` object derived from `list`.
+1029		'''
+1030		self._4x = mass
+1031		self.verbose = verbose
+1032		self.prefix = 'D4xdata'
+1033		self.logfile = logfile
+1034		list.__init__(self, l)
+1035		self.Nf = None
+1036		self.repeatability = {}
+1037		self.refresh(session = session)
+1038
+1039
+1040	def make_verbal(oldfun):
+1041		'''
+1042		Decorator: allow temporarily changing `self.prefix` and overriding `self.verbose`.
+1043		'''
+1044		@wraps(oldfun)
+1045		def newfun(*args, verbose = '', **kwargs):
+1046			myself = args[0]
+1047			oldprefix = myself.prefix
+1048			myself.prefix = oldfun.__name__
+1049			if verbose != '':
+1050				oldverbose = myself.verbose
+1051				myself.verbose = verbose
+1052			out = oldfun(*args, **kwargs)
+1053			myself.prefix = oldprefix
 1054			if verbose != '':
-1055				oldverbose = myself.verbose
-1056				myself.verbose = verbose
-1057			out = oldfun(*args, **kwargs)
-1058			myself.prefix = oldprefix
-1059			if verbose != '':
-1060				myself.verbose = oldverbose
-1061			return out
-1062		return newfun
-1063
-1064
-1065	def msg(self, txt):
-1066		'''
-1067		Log a message to `self.logfile`, and print it out if `verbose = True`
-1068		'''
-1069		self.log(txt)
-1070		if self.verbose:
-1071			print(f'{f"[{self.prefix}]":<16} {txt}')
-1072
-1073
-1074	def vmsg(self, txt):
-1075		'''
-1076		Log a message to `self.logfile` and print it out
-1077		'''
-1078		self.log(txt)
-1079		print(txt)
-1080
-1081
-1082	def log(self, *txts):
-1083		'''
-1084		Log a message to `self.logfile`
-1085		'''
-1086		if self.logfile:
-1087			with open(self.logfile, 'a') as fid:
-1088				for txt in txts:
-1089					fid.write(f'\n{dt.now().strftime("%Y-%m-%d %H:%M:%S")} {f"[{self.prefix}]":<16} {txt}')
-1090
-1091
-1092	def refresh(self, session = 'mySession'):
-1093		'''
-1094		Update `self.sessions`, `self.samples`, `self.anchors`, and `self.unknowns`.
-1095		'''
-1096		self.fill_in_missing_info(session = session)
-1097		self.refresh_sessions()
-1098		self.refresh_samples()
-1099
-1100
-1101	def refresh_sessions(self):
-1102		'''
-1103		Update `self.sessions` and set `scrambling_drift`, `slope_drift`, and `wg_drift`
-1104		to `False` for all sessions.
-1105		'''
-1106		self.sessions = {
-1107			s: {'data': [r for r in self if r['Session'] == s]}
-1108			for s in sorted({r['Session'] for r in self})
-1109			}
-1110		for s in self.sessions:
-1111			self.sessions[s]['scrambling_drift'] = False
-1112			self.sessions[s]['slope_drift'] = False
-1113			self.sessions[s]['wg_drift'] = False
-1114			self.sessions[s]['d13C_standardization_method'] = self.d13C_STANDARDIZATION_METHOD
-1115			self.sessions[s]['d18O_standardization_method'] = self.d18O_STANDARDIZATION_METHOD
-1116
-1117
-1118	def refresh_samples(self):
-1119		'''
-1120		Define `self.samples`, `self.anchors`, and `self.unknowns`.
-1121		'''
-1122		self.samples = {
-1123			s: {'data': [r for r in self if r['Sample'] == s]}
-1124			for s in sorted({r['Sample'] for r in self})
-1125			}
-1126		self.anchors = {s: self.samples[s] for s in self.samples if s in self.Nominal_D4x}
-1127		self.unknowns = {s: self.samples[s] for s in self.samples if s not in self.Nominal_D4x}
+1055				myself.verbose = oldverbose
+1056			return out
+1057		return newfun
+1058
+1059
+1060	def msg(self, txt):
+1061		'''
+1062		Log a message to `self.logfile`, and print it out if `verbose = True`
+1063		'''
+1064		self.log(txt)
+1065		if self.verbose:
+1066			print(f'{f"[{self.prefix}]":<16} {txt}')
+1067
+1068
+1069	def vmsg(self, txt):
+1070		'''
+1071		Log a message to `self.logfile` and print it out
+1072		'''
+1073		self.log(txt)
+1074		print(txt)
+1075
+1076
+1077	def log(self, *txts):
+1078		'''
+1079		Log a message to `self.logfile`
+1080		'''
+1081		if self.logfile:
+1082			with open(self.logfile, 'a') as fid:
+1083				for txt in txts:
+1084					fid.write(f'\n{dt.now().strftime("%Y-%m-%d %H:%M:%S")} {f"[{self.prefix}]":<16} {txt}')
+1085
+1086
+1087	def refresh(self, session = 'mySession'):
+1088		'''
+1089		Update `self.sessions`, `self.samples`, `self.anchors`, and `self.unknowns`.
+1090		'''
+1091		self.fill_in_missing_info(session = session)
+1092		self.refresh_sessions()
+1093		self.refresh_samples()
+1094
+1095
+1096	def refresh_sessions(self):
+1097		'''
+1098		Update `self.sessions` and set `scrambling_drift`, `slope_drift`, and `wg_drift`
+1099		to `False` for all sessions.
+1100		'''
+1101		self.sessions = {
+1102			s: {'data': [r for r in self if r['Session'] == s]}
+1103			for s in sorted({r['Session'] for r in self})
+1104			}
+1105		for s in self.sessions:
+1106			self.sessions[s]['scrambling_drift'] = False
+1107			self.sessions[s]['slope_drift'] = False
+1108			self.sessions[s]['wg_drift'] = False
+1109			self.sessions[s]['d13C_standardization_method'] = self.d13C_STANDARDIZATION_METHOD
+1110			self.sessions[s]['d18O_standardization_method'] = self.d18O_STANDARDIZATION_METHOD
+1111
+1112
+1113	def refresh_samples(self):
+1114		'''
+1115		Define `self.samples`, `self.anchors`, and `self.unknowns`.
+1116		'''
+1117		self.samples = {
+1118			s: {'data': [r for r in self if r['Sample'] == s]}
+1119			for s in sorted({r['Sample'] for r in self})
+1120			}
+1121		self.anchors = {s: self.samples[s] for s in self.samples if s in self.Nominal_D4x}
+1122		self.unknowns = {s: self.samples[s] for s in self.samples if s not in self.Nominal_D4x}
+1123
+1124
+1125	def read(self, filename, sep = '', session = ''):
+1126		'''
+1127		Read file in csv format to load data into a `D47data` object.
 1128
-1129
-1130	def read(self, filename, sep = '', session = ''):
-1131		'''
-1132		Read file in csv format to load data into a `D47data` object.
+1129		In the csv file, spaces before and after field separators (`','` by default)
+1130		are optional. Each line corresponds to a single analysis.
+1131
+1132		The required fields are:
 1133
-1134		In the csv file, spaces before and after field separators (`','` by default)
-1135		are optional. Each line corresponds to a single analysis.
-1136
-1137		The required fields are:
+1134		+ `UID`: a unique identifier
+1135		+ `Session`: an identifier for the analytical session
+1136		+ `Sample`: a sample identifier
+1137		+ `d45`, `d46`, and at least one of `d47` or `d48`: the working-gas delta values
 1138
-1139		+ `UID`: a unique identifier
-1140		+ `Session`: an identifier for the analytical session
-1141		+ `Sample`: a sample identifier
-1142		+ `d45`, `d46`, and at least one of `d47` or `d48`: the working-gas delta values
-1143
-1144		Independently known oxygen-17 anomalies may be provided as `D17O` (in ‰ relative to
-1145		VSMOW, λ = `self.LAMBDA_17`), and are otherwise assumed to be zero. Working-gas deltas `d47`, `d48`
-1146		and `d49` are optional, and set to NaN by default.
-1147
-1148		**Parameters**
-1149
-1150		+ `fileneme`: the path of the file to read
-1151		+ `sep`: csv separator delimiting the fields
-1152		+ `session`: set `Session` field to this string for all analyses
-1153		'''
-1154		with open(filename) as fid:
-1155			self.input(fid.read(), sep = sep, session = session)
+1139		Independently known oxygen-17 anomalies may be provided as `D17O` (in ‰ relative to
+1140		VSMOW, λ = `self.LAMBDA_17`), and are otherwise assumed to be zero. Working-gas deltas `d47`, `d48`
+1141		and `d49` are optional, and set to NaN by default.
+1142
+1143		**Parameters**
+1144
+1145		+ `fileneme`: the path of the file to read
+1146		+ `sep`: csv separator delimiting the fields
+1147		+ `session`: set `Session` field to this string for all analyses
+1148		'''
+1149		with open(filename) as fid:
+1150			self.input(fid.read(), sep = sep, session = session)
+1151
+1152
+1153	def input(self, txt, sep = '', session = ''):
+1154		'''
+1155		Read `txt` string in csv format to load analysis data into a `D47data` object.
 1156
-1157
-1158	def input(self, txt, sep = '', session = ''):
-1159		'''
-1160		Read `txt` string in csv format to load analysis data into a `D47data` object.
+1157		In the csv string, spaces before and after field separators (`','` by default)
+1158		are optional. Each line corresponds to a single analysis.
+1159
+1160		The required fields are:
 1161
-1162		In the csv string, spaces before and after field separators (`','` by default)
-1163		are optional. Each line corresponds to a single analysis.
-1164
-1165		The required fields are:
+1162		+ `UID`: a unique identifier
+1163		+ `Session`: an identifier for the analytical session
+1164		+ `Sample`: a sample identifier
+1165		+ `d45`, `d46`, and at least one of `d47` or `d48`: the working-gas delta values
 1166
-1167		+ `UID`: a unique identifier
-1168		+ `Session`: an identifier for the analytical session
-1169		+ `Sample`: a sample identifier
-1170		+ `d45`, `d46`, and at least one of `d47` or `d48`: the working-gas delta values
-1171
-1172		Independently known oxygen-17 anomalies may be provided as `D17O` (in ‰ relative to
-1173		VSMOW, λ = `self.LAMBDA_17`), and are otherwise assumed to be zero. Working-gas deltas `d47`, `d48`
-1174		and `d49` are optional, and set to NaN by default.
-1175
-1176		**Parameters**
-1177
-1178		+ `txt`: the csv string to read
-1179		+ `sep`: csv separator delimiting the fields. By default, use `,`, `;`, or `\t`,
-1180		whichever appers most often in `txt`.
-1181		+ `session`: set `Session` field to this string for all analyses
-1182		'''
-1183		if sep == '':
-1184			sep = sorted(',;\t', key = lambda x: - txt.count(x))[0]
-1185		txt = [[x.strip() for x in l.split(sep)] for l in txt.splitlines() if l.strip()]
-1186		data = [{k: v if k in ['UID', 'Session', 'Sample'] else smart_type(v) for k,v in zip(txt[0], l) if v != ''} for l in txt[1:]]
-1187
-1188		if session != '':
-1189			for r in data:
-1190				r['Session'] = session
-1191
-1192		self += data
-1193		self.refresh()
-1194
-1195
-1196	@make_verbal
-1197	def wg(self, samples = None, a18_acid = None):
-1198		'''
-1199		Compute bulk composition of the working gas for each session based on
-1200		the carbonate standards defined in both `self.Nominal_d13C_VPDB` and
-1201		`self.Nominal_d18O_VPDB`.
-1202		'''
-1203
-1204		self.msg('Computing WG composition:')
+1167		Independently known oxygen-17 anomalies may be provided as `D17O` (in ‰ relative to
+1168		VSMOW, λ = `self.LAMBDA_17`), and are otherwise assumed to be zero. Working-gas deltas `d47`, `d48`
+1169		and `d49` are optional, and set to NaN by default.
+1170
+1171		**Parameters**
+1172
+1173		+ `txt`: the csv string to read
+1174		+ `sep`: csv separator delimiting the fields. By default, use `,`, `;`, or `\t`,
+1175		whichever appers most often in `txt`.
+1176		+ `session`: set `Session` field to this string for all analyses
+1177		'''
+1178		if sep == '':
+1179			sep = sorted(',;\t', key = lambda x: - txt.count(x))[0]
+1180		txt = [[x.strip() for x in l.split(sep)] for l in txt.splitlines() if l.strip()]
+1181		data = [{k: v if k in ['UID', 'Session', 'Sample'] else smart_type(v) for k,v in zip(txt[0], l) if v != ''} for l in txt[1:]]
+1182
+1183		if session != '':
+1184			for r in data:
+1185				r['Session'] = session
+1186
+1187		self += data
+1188		self.refresh()
+1189
+1190
+1191	@make_verbal
+1192	def wg(self, samples = None, a18_acid = None):
+1193		'''
+1194		Compute bulk composition of the working gas for each session based on
+1195		the carbonate standards defined in both `self.Nominal_d13C_VPDB` and
+1196		`self.Nominal_d18O_VPDB`.
+1197		'''
+1198
+1199		self.msg('Computing WG composition:')
+1200
+1201		if a18_acid is None:
+1202			a18_acid = self.ALPHA_18O_ACID_REACTION
+1203		if samples is None:
+1204			samples = [s for s in self.Nominal_d13C_VPDB if s in self.Nominal_d18O_VPDB]
 1205
-1206		if a18_acid is None:
-1207			a18_acid = self.ALPHA_18O_ACID_REACTION
-1208		if samples is None:
-1209			samples = [s for s in self.Nominal_d13C_VPDB if s in self.Nominal_d18O_VPDB]
-1210
-1211		assert a18_acid, f'Acid fractionation factor should not be zero.'
-1212
-1213		samples = [s for s in samples if s in self.Nominal_d13C_VPDB and s in self.Nominal_d18O_VPDB]
-1214		R45R46_standards = {}
-1215		for sample in samples:
-1216			d13C_vpdb = self.Nominal_d13C_VPDB[sample]
-1217			d18O_vpdb = self.Nominal_d18O_VPDB[sample]
-1218			R13_s = self.R13_VPDB * (1 + d13C_vpdb / 1000)
-1219			R17_s = self.R17_VPDB * ((1 + d18O_vpdb / 1000) * a18_acid) ** self.LAMBDA_17
-1220			R18_s = self.R18_VPDB * (1 + d18O_vpdb / 1000) * a18_acid
-1221
-1222			C12_s = 1 / (1 + R13_s)
-1223			C13_s = R13_s / (1 + R13_s)
-1224			C16_s = 1 / (1 + R17_s + R18_s)
-1225			C17_s = R17_s / (1 + R17_s + R18_s)
-1226			C18_s = R18_s / (1 + R17_s + R18_s)
-1227
-1228			C626_s = C12_s * C16_s ** 2
-1229			C627_s = 2 * C12_s * C16_s * C17_s
-1230			C628_s = 2 * C12_s * C16_s * C18_s
-1231			C636_s = C13_s * C16_s ** 2
-1232			C637_s = 2 * C13_s * C16_s * C17_s
-1233			C727_s = C12_s * C17_s ** 2
-1234
-1235			R45_s = (C627_s + C636_s) / C626_s
-1236			R46_s = (C628_s + C637_s + C727_s) / C626_s
-1237			R45R46_standards[sample] = (R45_s, R46_s)
-1238		
-1239		for s in self.sessions:
-1240			db = [r for r in self.sessions[s]['data'] if r['Sample'] in samples]
-1241			assert db, f'No sample from {samples} found in session "{s}".'
-1242# 			dbsamples = sorted({r['Sample'] for r in db})
-1243
-1244			X = [r['d45'] for r in db]
-1245			Y = [R45R46_standards[r['Sample']][0] for r in db]
-1246			x1, x2 = np.min(X), np.max(X)
+1206		assert a18_acid, f'Acid fractionation factor should not be zero.'
+1207
+1208		samples = [s for s in samples if s in self.Nominal_d13C_VPDB and s in self.Nominal_d18O_VPDB]
+1209		R45R46_standards = {}
+1210		for sample in samples:
+1211			d13C_vpdb = self.Nominal_d13C_VPDB[sample]
+1212			d18O_vpdb = self.Nominal_d18O_VPDB[sample]
+1213			R13_s = self.R13_VPDB * (1 + d13C_vpdb / 1000)
+1214			R17_s = self.R17_VPDB * ((1 + d18O_vpdb / 1000) * a18_acid) ** self.LAMBDA_17
+1215			R18_s = self.R18_VPDB * (1 + d18O_vpdb / 1000) * a18_acid
+1216
+1217			C12_s = 1 / (1 + R13_s)
+1218			C13_s = R13_s / (1 + R13_s)
+1219			C16_s = 1 / (1 + R17_s + R18_s)
+1220			C17_s = R17_s / (1 + R17_s + R18_s)
+1221			C18_s = R18_s / (1 + R17_s + R18_s)
+1222
+1223			C626_s = C12_s * C16_s ** 2
+1224			C627_s = 2 * C12_s * C16_s * C17_s
+1225			C628_s = 2 * C12_s * C16_s * C18_s
+1226			C636_s = C13_s * C16_s ** 2
+1227			C637_s = 2 * C13_s * C16_s * C17_s
+1228			C727_s = C12_s * C17_s ** 2
+1229
+1230			R45_s = (C627_s + C636_s) / C626_s
+1231			R46_s = (C628_s + C637_s + C727_s) / C626_s
+1232			R45R46_standards[sample] = (R45_s, R46_s)
+1233		
+1234		for s in self.sessions:
+1235			db = [r for r in self.sessions[s]['data'] if r['Sample'] in samples]
+1236			assert db, f'No sample from {samples} found in session "{s}".'
+1237# 			dbsamples = sorted({r['Sample'] for r in db})
+1238
+1239			X = [r['d45'] for r in db]
+1240			Y = [R45R46_standards[r['Sample']][0] for r in db]
+1241			x1, x2 = np.min(X), np.max(X)
+1242
+1243			if x1 < x2:
+1244				wgcoord = x1/(x1-x2)
+1245			else:
+1246				wgcoord = 999
 1247
-1248			if x1 < x2:
-1249				wgcoord = x1/(x1-x2)
-1250			else:
-1251				wgcoord = 999
-1252
-1253			if wgcoord < -.5 or wgcoord > 1.5:
-1254				# unreasonable to extrapolate to d45 = 0
-1255				R45_wg = np.mean([y/(1+x/1000) for x,y in zip(X,Y)])
-1256			else :
-1257				# d45 = 0 is reasonably well bracketed
-1258				R45_wg = np.polyfit(X, Y, 1)[1]
-1259
-1260			X = [r['d46'] for r in db]
-1261			Y = [R45R46_standards[r['Sample']][1] for r in db]
-1262			x1, x2 = np.min(X), np.max(X)
+1248			if wgcoord < -.5 or wgcoord > 1.5:
+1249				# unreasonable to extrapolate to d45 = 0
+1250				R45_wg = np.mean([y/(1+x/1000) for x,y in zip(X,Y)])
+1251			else :
+1252				# d45 = 0 is reasonably well bracketed
+1253				R45_wg = np.polyfit(X, Y, 1)[1]
+1254
+1255			X = [r['d46'] for r in db]
+1256			Y = [R45R46_standards[r['Sample']][1] for r in db]
+1257			x1, x2 = np.min(X), np.max(X)
+1258
+1259			if x1 < x2:
+1260				wgcoord = x1/(x1-x2)
+1261			else:
+1262				wgcoord = 999
 1263
-1264			if x1 < x2:
-1265				wgcoord = x1/(x1-x2)
-1266			else:
-1267				wgcoord = 999
-1268
-1269			if wgcoord < -.5 or wgcoord > 1.5:
-1270				# unreasonable to extrapolate to d46 = 0
-1271				R46_wg = np.mean([y/(1+x/1000) for x,y in zip(X,Y)])
-1272			else :
-1273				# d46 = 0 is reasonably well bracketed
-1274				R46_wg = np.polyfit(X, Y, 1)[1]
-1275
-1276			d13Cwg_VPDB, d18Owg_VSMOW = self.compute_bulk_delta(R45_wg, R46_wg)
-1277
-1278			self.msg(f'Session {s} WG:   δ13C_VPDB = {d13Cwg_VPDB:.3f}   δ18O_VSMOW = {d18Owg_VSMOW:.3f}')
-1279
-1280			self.sessions[s]['d13Cwg_VPDB'] = d13Cwg_VPDB
-1281			self.sessions[s]['d18Owg_VSMOW'] = d18Owg_VSMOW
-1282			for r in self.sessions[s]['data']:
-1283				r['d13Cwg_VPDB'] = d13Cwg_VPDB
-1284				r['d18Owg_VSMOW'] = d18Owg_VSMOW
-1285
-1286
-1287	def compute_bulk_delta(self, R45, R46, D17O = 0):
-1288		'''
-1289		Compute δ13C_VPDB and δ18O_VSMOW,
-1290		by solving the generalized form of equation (17) from
-1291		[Brand et al. (2010)](https://doi.org/10.1351/PAC-REP-09-01-05),
-1292		assuming that δ18O_VSMOW is not too big (0 ± 50 ‰) and
-1293		solving the corresponding second-order Taylor polynomial.
-1294		(Appendix A of [Daëron et al., 2016](https://doi.org/10.1016/j.chemgeo.2016.08.014))
-1295		'''
-1296
-1297		K = np.exp(D17O / 1000) * self.R17_VSMOW * self.R18_VSMOW ** -self.LAMBDA_17
+1264			if wgcoord < -.5 or wgcoord > 1.5:
+1265				# unreasonable to extrapolate to d46 = 0
+1266				R46_wg = np.mean([y/(1+x/1000) for x,y in zip(X,Y)])
+1267			else :
+1268				# d46 = 0 is reasonably well bracketed
+1269				R46_wg = np.polyfit(X, Y, 1)[1]
+1270
+1271			d13Cwg_VPDB, d18Owg_VSMOW = self.compute_bulk_delta(R45_wg, R46_wg)
+1272
+1273			self.msg(f'Session {s} WG:   δ13C_VPDB = {d13Cwg_VPDB:.3f}   δ18O_VSMOW = {d18Owg_VSMOW:.3f}')
+1274
+1275			self.sessions[s]['d13Cwg_VPDB'] = d13Cwg_VPDB
+1276			self.sessions[s]['d18Owg_VSMOW'] = d18Owg_VSMOW
+1277			for r in self.sessions[s]['data']:
+1278				r['d13Cwg_VPDB'] = d13Cwg_VPDB
+1279				r['d18Owg_VSMOW'] = d18Owg_VSMOW
+1280
+1281
+1282	def compute_bulk_delta(self, R45, R46, D17O = 0):
+1283		'''
+1284		Compute δ13C_VPDB and δ18O_VSMOW,
+1285		by solving the generalized form of equation (17) from
+1286		[Brand et al. (2010)](https://doi.org/10.1351/PAC-REP-09-01-05),
+1287		assuming that δ18O_VSMOW is not too big (0 ± 50 ‰) and
+1288		solving the corresponding second-order Taylor polynomial.
+1289		(Appendix A of [Daëron et al., 2016](https://doi.org/10.1016/j.chemgeo.2016.08.014))
+1290		'''
+1291
+1292		K = np.exp(D17O / 1000) * self.R17_VSMOW * self.R18_VSMOW ** -self.LAMBDA_17
+1293
+1294		A = -3 * K ** 2 * self.R18_VSMOW ** (2 * self.LAMBDA_17)
+1295		B = 2 * K * R45 * self.R18_VSMOW ** self.LAMBDA_17
+1296		C = 2 * self.R18_VSMOW
+1297		D = -R46
 1298
-1299		A = -3 * K ** 2 * self.R18_VSMOW ** (2 * self.LAMBDA_17)
-1300		B = 2 * K * R45 * self.R18_VSMOW ** self.LAMBDA_17
-1301		C = 2 * self.R18_VSMOW
-1302		D = -R46
-1303
-1304		aa = A * self.LAMBDA_17 * (2 * self.LAMBDA_17 - 1) + B * self.LAMBDA_17 * (self.LAMBDA_17 - 1) / 2
-1305		bb = 2 * A * self.LAMBDA_17 + B * self.LAMBDA_17 + C
-1306		cc = A + B + C + D
-1307
-1308		d18O_VSMOW = 1000 * (-bb + (bb ** 2 - 4 * aa * cc) ** .5) / (2 * aa)
-1309
-1310		R18 = (1 + d18O_VSMOW / 1000) * self.R18_VSMOW
-1311		R17 = K * R18 ** self.LAMBDA_17
-1312		R13 = R45 - 2 * R17
+1299		aa = A * self.LAMBDA_17 * (2 * self.LAMBDA_17 - 1) + B * self.LAMBDA_17 * (self.LAMBDA_17 - 1) / 2
+1300		bb = 2 * A * self.LAMBDA_17 + B * self.LAMBDA_17 + C
+1301		cc = A + B + C + D
+1302
+1303		d18O_VSMOW = 1000 * (-bb + (bb ** 2 - 4 * aa * cc) ** .5) / (2 * aa)
+1304
+1305		R18 = (1 + d18O_VSMOW / 1000) * self.R18_VSMOW
+1306		R17 = K * R18 ** self.LAMBDA_17
+1307		R13 = R45 - 2 * R17
+1308
+1309		d13C_VPDB = 1000 * (R13 / self.R13_VPDB - 1)
+1310
+1311		return d13C_VPDB, d18O_VSMOW
+1312
 1313
-1314		d13C_VPDB = 1000 * (R13 / self.R13_VPDB - 1)
-1315
-1316		return d13C_VPDB, d18O_VSMOW
-1317
-1318
-1319	@make_verbal
-1320	def crunch(self, verbose = ''):
-1321		'''
-1322		Compute bulk composition and raw clumped isotope anomalies for all analyses.
-1323		'''
-1324		for r in self:
-1325			self.compute_bulk_and_clumping_deltas(r)
-1326		self.standardize_d13C()
-1327		self.standardize_d18O()
-1328		self.msg(f"Crunched {len(self)} analyses.")
-1329
-1330
-1331	def fill_in_missing_info(self, session = 'mySession'):
-1332		'''
-1333		Fill in optional fields with default values
-1334		'''
-1335		for i,r in enumerate(self):
-1336			if 'D17O' not in r:
-1337				r['D17O'] = 0.
-1338			if 'UID' not in r:
-1339				r['UID'] = f'{i+1}'
-1340			if 'Session' not in r:
-1341				r['Session'] = session
-1342			for k in ['d47', 'd48', 'd49']:
-1343				if k not in r:
-1344					r[k] = np.nan
-1345
-1346
-1347	def standardize_d13C(self):
-1348		'''
-1349		Perform δ13C standadization within each session `s` according to
-1350		`self.sessions[s]['d13C_standardization_method']`, which is defined by default
-1351		by `D47data.refresh_sessions()`as equal to `self.d13C_STANDARDIZATION_METHOD`, but
-1352		may be redefined abitrarily at a later stage.
-1353		'''
-1354		for s in self.sessions:
-1355			if self.sessions[s]['d13C_standardization_method'] in ['1pt', '2pt']:
-1356				XY = [(r['d13C_VPDB'], self.Nominal_d13C_VPDB[r['Sample']]) for r in self.sessions[s]['data'] if r['Sample'] in self.Nominal_d13C_VPDB]
-1357				X,Y = zip(*XY)
-1358				if self.sessions[s]['d13C_standardization_method'] == '1pt':
-1359					offset = np.mean(Y) - np.mean(X)
-1360					for r in self.sessions[s]['data']:
-1361						r['d13C_VPDB'] += offset				
-1362				elif self.sessions[s]['d13C_standardization_method'] == '2pt':
-1363					a,b = np.polyfit(X,Y,1)
-1364					for r in self.sessions[s]['data']:
-1365						r['d13C_VPDB'] = a * r['d13C_VPDB'] + b
-1366
-1367	def standardize_d18O(self):
-1368		'''
-1369		Perform δ18O standadization within each session `s` according to
-1370		`self.ALPHA_18O_ACID_REACTION` and `self.sessions[s]['d18O_standardization_method']`,
-1371		which is defined by default by `D47data.refresh_sessions()`as equal to
-1372		`self.d18O_STANDARDIZATION_METHOD`, but may be redefined abitrarily at a later stage.
-1373		'''
-1374		for s in self.sessions:
-1375			if self.sessions[s]['d18O_standardization_method'] in ['1pt', '2pt']:
-1376				XY = [(r['d18O_VSMOW'], self.Nominal_d18O_VPDB[r['Sample']]) for r in self.sessions[s]['data'] if r['Sample'] in self.Nominal_d18O_VPDB]
-1377				X,Y = zip(*XY)
-1378				Y = [(1000+y) * self.R18_VPDB * self.ALPHA_18O_ACID_REACTION / self.R18_VSMOW - 1000 for y in Y]
-1379				if self.sessions[s]['d18O_standardization_method'] == '1pt':
-1380					offset = np.mean(Y) - np.mean(X)
-1381					for r in self.sessions[s]['data']:
-1382						r['d18O_VSMOW'] += offset				
-1383				elif self.sessions[s]['d18O_standardization_method'] == '2pt':
-1384					a,b = np.polyfit(X,Y,1)
-1385					for r in self.sessions[s]['data']:
-1386						r['d18O_VSMOW'] = a * r['d18O_VSMOW'] + b
-1387	
+1314	@make_verbal
+1315	def crunch(self, verbose = ''):
+1316		'''
+1317		Compute bulk composition and raw clumped isotope anomalies for all analyses.
+1318		'''
+1319		for r in self:
+1320			self.compute_bulk_and_clumping_deltas(r)
+1321		self.standardize_d13C()
+1322		self.standardize_d18O()
+1323		self.msg(f"Crunched {len(self)} analyses.")
+1324
+1325
+1326	def fill_in_missing_info(self, session = 'mySession'):
+1327		'''
+1328		Fill in optional fields with default values
+1329		'''
+1330		for i,r in enumerate(self):
+1331			if 'D17O' not in r:
+1332				r['D17O'] = 0.
+1333			if 'UID' not in r:
+1334				r['UID'] = f'{i+1}'
+1335			if 'Session' not in r:
+1336				r['Session'] = session
+1337			for k in ['d47', 'd48', 'd49']:
+1338				if k not in r:
+1339					r[k] = np.nan
+1340
+1341
+1342	def standardize_d13C(self):
+1343		'''
+1344		Perform δ13C standadization within each session `s` according to
+1345		`self.sessions[s]['d13C_standardization_method']`, which is defined by default
+1346		by `D47data.refresh_sessions()`as equal to `self.d13C_STANDARDIZATION_METHOD`, but
+1347		may be redefined abitrarily at a later stage.
+1348		'''
+1349		for s in self.sessions:
+1350			if self.sessions[s]['d13C_standardization_method'] in ['1pt', '2pt']:
+1351				XY = [(r['d13C_VPDB'], self.Nominal_d13C_VPDB[r['Sample']]) for r in self.sessions[s]['data'] if r['Sample'] in self.Nominal_d13C_VPDB]
+1352				X,Y = zip(*XY)
+1353				if self.sessions[s]['d13C_standardization_method'] == '1pt':
+1354					offset = np.mean(Y) - np.mean(X)
+1355					for r in self.sessions[s]['data']:
+1356						r['d13C_VPDB'] += offset				
+1357				elif self.sessions[s]['d13C_standardization_method'] == '2pt':
+1358					a,b = np.polyfit(X,Y,1)
+1359					for r in self.sessions[s]['data']:
+1360						r['d13C_VPDB'] = a * r['d13C_VPDB'] + b
+1361
+1362	def standardize_d18O(self):
+1363		'''
+1364		Perform δ18O standadization within each session `s` according to
+1365		`self.ALPHA_18O_ACID_REACTION` and `self.sessions[s]['d18O_standardization_method']`,
+1366		which is defined by default by `D47data.refresh_sessions()`as equal to
+1367		`self.d18O_STANDARDIZATION_METHOD`, but may be redefined abitrarily at a later stage.
+1368		'''
+1369		for s in self.sessions:
+1370			if self.sessions[s]['d18O_standardization_method'] in ['1pt', '2pt']:
+1371				XY = [(r['d18O_VSMOW'], self.Nominal_d18O_VPDB[r['Sample']]) for r in self.sessions[s]['data'] if r['Sample'] in self.Nominal_d18O_VPDB]
+1372				X,Y = zip(*XY)
+1373				Y = [(1000+y) * self.R18_VPDB * self.ALPHA_18O_ACID_REACTION / self.R18_VSMOW - 1000 for y in Y]
+1374				if self.sessions[s]['d18O_standardization_method'] == '1pt':
+1375					offset = np.mean(Y) - np.mean(X)
+1376					for r in self.sessions[s]['data']:
+1377						r['d18O_VSMOW'] += offset				
+1378				elif self.sessions[s]['d18O_standardization_method'] == '2pt':
+1379					a,b = np.polyfit(X,Y,1)
+1380					for r in self.sessions[s]['data']:
+1381						r['d18O_VSMOW'] = a * r['d18O_VSMOW'] + b
+1382	
+1383
+1384	def compute_bulk_and_clumping_deltas(self, r):
+1385		'''
+1386		Compute δ13C_VPDB, δ18O_VSMOW, and raw Δ47, Δ48, Δ49 values for a single analysis `r`.
+1387		'''
 1388
-1389	def compute_bulk_and_clumping_deltas(self, r):
-1390		'''
-1391		Compute δ13C_VPDB, δ18O_VSMOW, and raw Δ47, Δ48, Δ49 values for a single analysis `r`.
-1392		'''
+1389		# Compute working gas R13, R18, and isobar ratios
+1390		R13_wg = self.R13_VPDB * (1 + r['d13Cwg_VPDB'] / 1000)
+1391		R18_wg = self.R18_VSMOW * (1 + r['d18Owg_VSMOW'] / 1000)
+1392		R45_wg, R46_wg, R47_wg, R48_wg, R49_wg = self.compute_isobar_ratios(R13_wg, R18_wg)
 1393
-1394		# Compute working gas R13, R18, and isobar ratios
-1395		R13_wg = self.R13_VPDB * (1 + r['d13Cwg_VPDB'] / 1000)
-1396		R18_wg = self.R18_VSMOW * (1 + r['d18Owg_VSMOW'] / 1000)
-1397		R45_wg, R46_wg, R47_wg, R48_wg, R49_wg = self.compute_isobar_ratios(R13_wg, R18_wg)
-1398
-1399		# Compute analyte isobar ratios
-1400		R45 = (1 + r['d45'] / 1000) * R45_wg
-1401		R46 = (1 + r['d46'] / 1000) * R46_wg
-1402		R47 = (1 + r['d47'] / 1000) * R47_wg
-1403		R48 = (1 + r['d48'] / 1000) * R48_wg
-1404		R49 = (1 + r['d49'] / 1000) * R49_wg
-1405
-1406		r['d13C_VPDB'], r['d18O_VSMOW'] = self.compute_bulk_delta(R45, R46, D17O = r['D17O'])
-1407		R13 = (1 + r['d13C_VPDB'] / 1000) * self.R13_VPDB
-1408		R18 = (1 + r['d18O_VSMOW'] / 1000) * self.R18_VSMOW
+1394		# Compute analyte isobar ratios
+1395		R45 = (1 + r['d45'] / 1000) * R45_wg
+1396		R46 = (1 + r['d46'] / 1000) * R46_wg
+1397		R47 = (1 + r['d47'] / 1000) * R47_wg
+1398		R48 = (1 + r['d48'] / 1000) * R48_wg
+1399		R49 = (1 + r['d49'] / 1000) * R49_wg
+1400
+1401		r['d13C_VPDB'], r['d18O_VSMOW'] = self.compute_bulk_delta(R45, R46, D17O = r['D17O'])
+1402		R13 = (1 + r['d13C_VPDB'] / 1000) * self.R13_VPDB
+1403		R18 = (1 + r['d18O_VSMOW'] / 1000) * self.R18_VSMOW
+1404
+1405		# Compute stochastic isobar ratios of the analyte
+1406		R45stoch, R46stoch, R47stoch, R48stoch, R49stoch = self.compute_isobar_ratios(
+1407			R13, R18, D17O = r['D17O']
+1408		)
 1409
-1410		# Compute stochastic isobar ratios of the analyte
-1411		R45stoch, R46stoch, R47stoch, R48stoch, R49stoch = self.compute_isobar_ratios(
-1412			R13, R18, D17O = r['D17O']
-1413		)
-1414
-1415		# Check that R45/R45stoch and R46/R46stoch are undistinguishable from 1,
-1416		# and raise a warning if the corresponding anomalies exceed 0.02 ppm.
-1417		if (R45 / R45stoch - 1) > 5e-8:
-1418			self.vmsg(f'This is unexpected: R45/R45stoch - 1 = {1e6 * (R45 / R45stoch - 1):.3f} ppm')
-1419		if (R46 / R46stoch - 1) > 5e-8:
-1420			self.vmsg(f'This is unexpected: R46/R46stoch - 1 = {1e6 * (R46 / R46stoch - 1):.3f} ppm')
+1410		# Check that R45/R45stoch and R46/R46stoch are undistinguishable from 1,
+1411		# and raise a warning if the corresponding anomalies exceed 0.02 ppm.
+1412		if (R45 / R45stoch - 1) > 5e-8:
+1413			self.vmsg(f'This is unexpected: R45/R45stoch - 1 = {1e6 * (R45 / R45stoch - 1):.3f} ppm')
+1414		if (R46 / R46stoch - 1) > 5e-8:
+1415			self.vmsg(f'This is unexpected: R46/R46stoch - 1 = {1e6 * (R46 / R46stoch - 1):.3f} ppm')
+1416
+1417		# Compute raw clumped isotope anomalies
+1418		r['D47raw'] = 1000 * (R47 / R47stoch - 1)
+1419		r['D48raw'] = 1000 * (R48 / R48stoch - 1)
+1420		r['D49raw'] = 1000 * (R49 / R49stoch - 1)
 1421
-1422		# Compute raw clumped isotope anomalies
-1423		r['D47raw'] = 1000 * (R47 / R47stoch - 1)
-1424		r['D48raw'] = 1000 * (R48 / R48stoch - 1)
-1425		r['D49raw'] = 1000 * (R49 / R49stoch - 1)
-1426
-1427
-1428	def compute_isobar_ratios(self, R13, R18, D17O=0, D47=0, D48=0, D49=0):
-1429		'''
-1430		Compute isobar ratios for a sample with isotopic ratios `R13` and `R18`,
-1431		optionally accounting for non-zero values of Δ17O (`D17O`) and clumped isotope
-1432		anomalies (`D47`, `D48`, `D49`), all expressed in permil.
-1433		'''
-1434
-1435		# Compute R17
-1436		R17 = self.R17_VSMOW * np.exp(D17O / 1000) * (R18 / self.R18_VSMOW) ** self.LAMBDA_17
-1437
-1438		# Compute isotope concentrations
-1439		C12 = (1 + R13) ** -1
-1440		C13 = C12 * R13
-1441		C16 = (1 + R17 + R18) ** -1
-1442		C17 = C16 * R17
-1443		C18 = C16 * R18
-1444
-1445		# Compute stochastic isotopologue concentrations
-1446		C626 = C16 * C12 * C16
-1447		C627 = C16 * C12 * C17 * 2
-1448		C628 = C16 * C12 * C18 * 2
-1449		C636 = C16 * C13 * C16
-1450		C637 = C16 * C13 * C17 * 2
-1451		C638 = C16 * C13 * C18 * 2
-1452		C727 = C17 * C12 * C17
-1453		C728 = C17 * C12 * C18 * 2
-1454		C737 = C17 * C13 * C17
-1455		C738 = C17 * C13 * C18 * 2
-1456		C828 = C18 * C12 * C18
-1457		C838 = C18 * C13 * C18
-1458
-1459		# Compute stochastic isobar ratios
-1460		R45 = (C636 + C627) / C626
-1461		R46 = (C628 + C637 + C727) / C626
-1462		R47 = (C638 + C728 + C737) / C626
-1463		R48 = (C738 + C828) / C626
-1464		R49 = C838 / C626
+1422
+1423	def compute_isobar_ratios(self, R13, R18, D17O=0, D47=0, D48=0, D49=0):
+1424		'''
+1425		Compute isobar ratios for a sample with isotopic ratios `R13` and `R18`,
+1426		optionally accounting for non-zero values of Δ17O (`D17O`) and clumped isotope
+1427		anomalies (`D47`, `D48`, `D49`), all expressed in permil.
+1428		'''
+1429
+1430		# Compute R17
+1431		R17 = self.R17_VSMOW * np.exp(D17O / 1000) * (R18 / self.R18_VSMOW) ** self.LAMBDA_17
+1432
+1433		# Compute isotope concentrations
+1434		C12 = (1 + R13) ** -1
+1435		C13 = C12 * R13
+1436		C16 = (1 + R17 + R18) ** -1
+1437		C17 = C16 * R17
+1438		C18 = C16 * R18
+1439
+1440		# Compute stochastic isotopologue concentrations
+1441		C626 = C16 * C12 * C16
+1442		C627 = C16 * C12 * C17 * 2
+1443		C628 = C16 * C12 * C18 * 2
+1444		C636 = C16 * C13 * C16
+1445		C637 = C16 * C13 * C17 * 2
+1446		C638 = C16 * C13 * C18 * 2
+1447		C727 = C17 * C12 * C17
+1448		C728 = C17 * C12 * C18 * 2
+1449		C737 = C17 * C13 * C17
+1450		C738 = C17 * C13 * C18 * 2
+1451		C828 = C18 * C12 * C18
+1452		C838 = C18 * C13 * C18
+1453
+1454		# Compute stochastic isobar ratios
+1455		R45 = (C636 + C627) / C626
+1456		R46 = (C628 + C637 + C727) / C626
+1457		R47 = (C638 + C728 + C737) / C626
+1458		R48 = (C738 + C828) / C626
+1459		R49 = C838 / C626
+1460
+1461		# Account for stochastic anomalies
+1462		R47 *= 1 + D47 / 1000
+1463		R48 *= 1 + D48 / 1000
+1464		R49 *= 1 + D49 / 1000
 1465
-1466		# Account for stochastic anomalies
-1467		R47 *= 1 + D47 / 1000
-1468		R48 *= 1 + D48 / 1000
-1469		R49 *= 1 + D49 / 1000
-1470
-1471		# Return isobar ratios
-1472		return R45, R46, R47, R48, R49
-1473
-1474
-1475	def split_samples(self, samples_to_split = 'all', grouping = 'by_session'):
-1476		'''
-1477		Split unknown samples by UID (treat all analyses as different samples)
-1478		or by session (treat analyses of a given sample in different sessions as
-1479		different samples).
-1480
-1481		**Parameters**
-1482
-1483		+ `samples_to_split`: a list of samples to split, e.g., `['IAEA-C1', 'IAEA-C2']`
-1484		+ `grouping`: `by_uid` | `by_session`
-1485		'''
-1486		if samples_to_split == 'all':
-1487			samples_to_split = [s for s in self.unknowns]
-1488		gkeys = {'by_uid':'UID', 'by_session':'Session'}
-1489		self.grouping = grouping.lower()
-1490		if self.grouping in gkeys:
-1491			gkey = gkeys[self.grouping]
-1492		for r in self:
-1493			if r['Sample'] in samples_to_split:
-1494				r['Sample_original'] = r['Sample']
-1495				r['Sample'] = f"{r['Sample']}__{r[gkey]}"
-1496			elif r['Sample'] in self.unknowns:
-1497				r['Sample_original'] = r['Sample']
-1498		self.refresh_samples()
-1499
-1500
-1501	def unsplit_samples(self, tables = False):
-1502		'''
-1503		Reverse the effects of `D47data.split_samples()`.
-1504		
-1505		This should only be used after `D4xdata.standardize()` with `method='pooled'`.
-1506		
-1507		After `D4xdata.standardize()` with `method='indep_sessions'`, one should
-1508		probably use `D4xdata.combine_samples()` instead to reverse the effects of
-1509		`D47data.split_samples()` with `grouping='by_uid'`, or `w_avg()` to reverse the
-1510		effects of `D47data.split_samples()` with `grouping='by_sessions'` (because in
-1511		that case session-averaged Δ4x values are statistically independent).
-1512		'''
-1513		unknowns_old = sorted({s for s in self.unknowns})
-1514		CM_old = self.standardization.covar[:,:]
-1515		VD_old = self.standardization.params.valuesdict().copy()
-1516		vars_old = self.standardization.var_names
-1517
-1518		unknowns_new = sorted({r['Sample_original'] for r in self if 'Sample_original' in r})
-1519
-1520		Ns = len(vars_old) - len(unknowns_old)
-1521		vars_new = vars_old[:Ns] + [f'D{self._4x}_{pf(u)}' for u in unknowns_new]
-1522		VD_new = {k: VD_old[k] for k in vars_old[:Ns]}
-1523
-1524		W = np.zeros((len(vars_new), len(vars_old)))
-1525		W[:Ns,:Ns] = np.eye(Ns)
-1526		for u in unknowns_new:
-1527			splits = sorted({r['Sample'] for r in self if 'Sample_original' in r and r['Sample_original'] == u})
-1528			if self.grouping == 'by_session':
-1529				weights = [self.samples[s][f'SE_D{self._4x}']**-2 for s in splits]
-1530			elif self.grouping == 'by_uid':
-1531				weights = [1 for s in splits]
-1532			sw = sum(weights)
-1533			weights = [w/sw for w in weights]
-1534			W[vars_new.index(f'D{self._4x}_{pf(u)}'),[vars_old.index(f'D{self._4x}_{pf(s)}') for s in splits]] = weights[:]
-1535
-1536		CM_new = W @ CM_old @ W.T
-1537		V = W @ np.array([[VD_old[k]] for k in vars_old])
-1538		VD_new = {k:v[0] for k,v in zip(vars_new, V)}
-1539
-1540		self.standardization.covar = CM_new
-1541		self.standardization.params.valuesdict = lambda : VD_new
-1542		self.standardization.var_names = vars_new
+1466		# Return isobar ratios
+1467		return R45, R46, R47, R48, R49
+1468
+1469
+1470	def split_samples(self, samples_to_split = 'all', grouping = 'by_session'):
+1471		'''
+1472		Split unknown samples by UID (treat all analyses as different samples)
+1473		or by session (treat analyses of a given sample in different sessions as
+1474		different samples).
+1475
+1476		**Parameters**
+1477
+1478		+ `samples_to_split`: a list of samples to split, e.g., `['IAEA-C1', 'IAEA-C2']`
+1479		+ `grouping`: `by_uid` | `by_session`
+1480		'''
+1481		if samples_to_split == 'all':
+1482			samples_to_split = [s for s in self.unknowns]
+1483		gkeys = {'by_uid':'UID', 'by_session':'Session'}
+1484		self.grouping = grouping.lower()
+1485		if self.grouping in gkeys:
+1486			gkey = gkeys[self.grouping]
+1487		for r in self:
+1488			if r['Sample'] in samples_to_split:
+1489				r['Sample_original'] = r['Sample']
+1490				r['Sample'] = f"{r['Sample']}__{r[gkey]}"
+1491			elif r['Sample'] in self.unknowns:
+1492				r['Sample_original'] = r['Sample']
+1493		self.refresh_samples()
+1494
+1495
+1496	def unsplit_samples(self, tables = False):
+1497		'''
+1498		Reverse the effects of `D47data.split_samples()`.
+1499		
+1500		This should only be used after `D4xdata.standardize()` with `method='pooled'`.
+1501		
+1502		After `D4xdata.standardize()` with `method='indep_sessions'`, one should
+1503		probably use `D4xdata.combine_samples()` instead to reverse the effects of
+1504		`D47data.split_samples()` with `grouping='by_uid'`, or `w_avg()` to reverse the
+1505		effects of `D47data.split_samples()` with `grouping='by_sessions'` (because in
+1506		that case session-averaged Δ4x values are statistically independent).
+1507		'''
+1508		unknowns_old = sorted({s for s in self.unknowns})
+1509		CM_old = self.standardization.covar[:,:]
+1510		VD_old = self.standardization.params.valuesdict().copy()
+1511		vars_old = self.standardization.var_names
+1512
+1513		unknowns_new = sorted({r['Sample_original'] for r in self if 'Sample_original' in r})
+1514
+1515		Ns = len(vars_old) - len(unknowns_old)
+1516		vars_new = vars_old[:Ns] + [f'D{self._4x}_{pf(u)}' for u in unknowns_new]
+1517		VD_new = {k: VD_old[k] for k in vars_old[:Ns]}
+1518
+1519		W = np.zeros((len(vars_new), len(vars_old)))
+1520		W[:Ns,:Ns] = np.eye(Ns)
+1521		for u in unknowns_new:
+1522			splits = sorted({r['Sample'] for r in self if 'Sample_original' in r and r['Sample_original'] == u})
+1523			if self.grouping == 'by_session':
+1524				weights = [self.samples[s][f'SE_D{self._4x}']**-2 for s in splits]
+1525			elif self.grouping == 'by_uid':
+1526				weights = [1 for s in splits]
+1527			sw = sum(weights)
+1528			weights = [w/sw for w in weights]
+1529			W[vars_new.index(f'D{self._4x}_{pf(u)}'),[vars_old.index(f'D{self._4x}_{pf(s)}') for s in splits]] = weights[:]
+1530
+1531		CM_new = W @ CM_old @ W.T
+1532		V = W @ np.array([[VD_old[k]] for k in vars_old])
+1533		VD_new = {k:v[0] for k,v in zip(vars_new, V)}
+1534
+1535		self.standardization.covar = CM_new
+1536		self.standardization.params.valuesdict = lambda : VD_new
+1537		self.standardization.var_names = vars_new
+1538
+1539		for r in self:
+1540			if r['Sample'] in self.unknowns:
+1541				r['Sample_split'] = r['Sample']
+1542				r['Sample'] = r['Sample_original']
 1543
-1544		for r in self:
-1545			if r['Sample'] in self.unknowns:
-1546				r['Sample_split'] = r['Sample']
-1547				r['Sample'] = r['Sample_original']
-1548
-1549		self.refresh_samples()
-1550		self.consolidate_samples()
-1551		self.repeatabilities()
-1552
-1553		if tables:
-1554			self.table_of_analyses()
-1555			self.table_of_samples()
-1556
-1557	def assign_timestamps(self):
-1558		'''
-1559		Assign a time field `t` of type `float` to each analysis.
-1560
-1561		If `TimeTag` is one of the data fields, `t` is equal within a given session
-1562		to `TimeTag` minus the mean value of `TimeTag` for that session.
-1563		Otherwise, `TimeTag` is by default equal to the index of each analysis
-1564		in the dataset and `t` is defined as above.
-1565		'''
-1566		for session in self.sessions:
-1567			sdata = self.sessions[session]['data']
-1568			try:
-1569				t0 = np.mean([r['TimeTag'] for r in sdata])
-1570				for r in sdata:
-1571					r['t'] = r['TimeTag'] - t0
-1572			except KeyError:
-1573				t0 = (len(sdata)-1)/2
-1574				for t,r in enumerate(sdata):
-1575					r['t'] = t - t0
-1576
-1577
-1578	def report(self):
-1579		'''
-1580		Prints a report on the standardization fit.
-1581		Only applicable after `D4xdata.standardize(method='pooled')`.
-1582		'''
-1583		report_fit(self.standardization)
-1584
-1585
-1586	def combine_samples(self, sample_groups):
-1587		'''
-1588		Combine analyses of different samples to compute weighted average Δ4x
-1589		and new error (co)variances corresponding to the groups defined by the `sample_groups`
-1590		dictionary.
-1591		
-1592		Caution: samples are weighted by number of replicate analyses, which is a
-1593		reasonable default behavior but is not always optimal (e.g., in the case of strongly
-1594		correlated analytical errors for one or more samples).
-1595		
-1596		Returns a tuplet of:
-1597		
-1598		+ the list of group names
-1599		+ an array of the corresponding Δ4x values
-1600		+ the corresponding (co)variance matrix
-1601		
-1602		**Parameters**
-1603
-1604		+ `sample_groups`: a dictionary of the form:
-1605		```py
-1606		{'group1': ['sample_1', 'sample_2'],
-1607		 'group2': ['sample_3', 'sample_4', 'sample_5']}
-1608		```
-1609		'''
-1610		
-1611		samples = [s for k in sorted(sample_groups.keys()) for s in sorted(sample_groups[k])]
-1612		groups = sorted(sample_groups.keys())
-1613		group_total_weights = {k: sum([self.samples[s]['N'] for s in sample_groups[k]]) for k in groups}
-1614		D4x_old = np.array([[self.samples[x][f'D{self._4x}']] for x in samples])
-1615		CM_old = np.array([[self.sample_D4x_covar(x,y) for x in samples] for y in samples])
-1616		W = np.array([
-1617			[self.samples[i]['N']/group_total_weights[j] if i in sample_groups[j] else 0 for i in samples]
-1618			for j in groups])
-1619		D4x_new = W @ D4x_old
-1620		CM_new = W @ CM_old @ W.T
-1621
-1622		return groups, D4x_new[:,0], CM_new
-1623		
-1624
-1625	@make_verbal
-1626	def standardize(self,
-1627		method = 'pooled',
-1628		weighted_sessions = [],
-1629		consolidate = True,
-1630		consolidate_tables = False,
-1631		consolidate_plots = False,
-1632		constraints = {},
-1633		):
-1634		'''
-1635		Compute absolute Δ4x values for all replicate analyses and for sample averages.
-1636		If `method` argument is set to `'pooled'`, the standardization processes all sessions
-1637		in a single step, assuming that all samples (anchors and unknowns alike) are homogeneous,
-1638		i.e. that their true Δ4x value does not change between sessions,
-1639		([Daëron, 2021](https://doi.org/10.1029/2020GC009592)). If `method` argument is set to
-1640		`'indep_sessions'`, the standardization processes each session independently, based only
-1641		on anchors analyses.
-1642		'''
-1643
-1644		self.standardization_method = method
-1645		self.assign_timestamps()
-1646
-1647		if method == 'pooled':
-1648			if weighted_sessions:
-1649				for session_group in weighted_sessions:
-1650					if self._4x == '47':
-1651						X = D47data([r for r in self if r['Session'] in session_group])
-1652					elif self._4x == '48':
-1653						X = D48data([r for r in self if r['Session'] in session_group])
-1654					X.Nominal_D4x = self.Nominal_D4x.copy()
-1655					X.refresh()
-1656					result = X.standardize(method = 'pooled', weighted_sessions = [], consolidate = False)
-1657					w = np.sqrt(result.redchi)
-1658					self.msg(f'Session group {session_group} MRSWD = {w:.4f}')
-1659					for r in X:
-1660						r[f'wD{self._4x}raw'] *= w
-1661			else:
-1662				self.msg(f'All D{self._4x}raw weights set to 1 ‰')
-1663				for r in self:
-1664					r[f'wD{self._4x}raw'] = 1.
-1665
-1666			params = Parameters()
-1667			for k,session in enumerate(self.sessions):
-1668				self.msg(f"Session {session}: scrambling_drift is {self.sessions[session]['scrambling_drift']}.")
-1669				self.msg(f"Session {session}: slope_drift is {self.sessions[session]['slope_drift']}.")
-1670				self.msg(f"Session {session}: wg_drift is {self.sessions[session]['wg_drift']}.")
-1671				s = pf(session)
-1672				params.add(f'a_{s}', value = 0.9)
-1673				params.add(f'b_{s}', value = 0.)
-1674				params.add(f'c_{s}', value = -0.9)
-1675				params.add(f'a2_{s}', value = 0.,
-1676# 					vary = self.sessions[session]['scrambling_drift'],
-1677					)
-1678				params.add(f'b2_{s}', value = 0.,
-1679# 					vary = self.sessions[session]['slope_drift'],
-1680					)
-1681				params.add(f'c2_{s}', value = 0.,
-1682# 					vary = self.sessions[session]['wg_drift'],
-1683					)
-1684				if not self.sessions[session]['scrambling_drift']:
-1685					params[f'a2_{s}'].expr = '0'
-1686				if not self.sessions[session]['slope_drift']:
-1687					params[f'b2_{s}'].expr = '0'
-1688				if not self.sessions[session]['wg_drift']:
-1689					params[f'c2_{s}'].expr = '0'
-1690
-1691			for sample in self.unknowns:
-1692				params.add(f'D{self._4x}_{pf(sample)}', value = 0.5)
-1693
-1694			for k in constraints:
-1695				params[k].expr = constraints[k]
-1696
-1697			def residuals(p):
-1698				R = []
-1699				for r in self:
-1700					session = pf(r['Session'])
-1701					sample = pf(r['Sample'])
-1702					if r['Sample'] in self.Nominal_D4x:
-1703						R += [ (
-1704							r[f'D{self._4x}raw'] - (
-1705								p[f'a_{session}'] * self.Nominal_D4x[r['Sample']]
-1706								+ p[f'b_{session}'] * r[f'd{self._4x}']
-1707								+	p[f'c_{session}']
-1708								+ r['t'] * (
-1709									p[f'a2_{session}'] * self.Nominal_D4x[r['Sample']]
-1710									+ p[f'b2_{session}'] * r[f'd{self._4x}']
-1711									+	p[f'c2_{session}']
-1712									)
-1713								)
-1714							) / r[f'wD{self._4x}raw'] ]
-1715					else:
-1716						R += [ (
-1717							r[f'D{self._4x}raw'] - (
-1718								p[f'a_{session}'] * p[f'D{self._4x}_{sample}']
-1719								+ p[f'b_{session}'] * r[f'd{self._4x}']
-1720								+	p[f'c_{session}']
-1721								+ r['t'] * (
-1722									p[f'a2_{session}'] * p[f'D{self._4x}_{sample}']
-1723									+ p[f'b2_{session}'] * r[f'd{self._4x}']
-1724									+	p[f'c2_{session}']
-1725									)
-1726								)
-1727							) / r[f'wD{self._4x}raw'] ]
-1728				return R
-1729
-1730			M = Minimizer(residuals, params)
-1731			result = M.least_squares()
-1732			self.Nf = result.nfree
-1733			self.t95 = tstudent.ppf(1 - 0.05/2, self.Nf)
-1734			new_names, new_covar, new_se = _fullcovar(result)[:3]
-1735			result.var_names = new_names
-1736			result.covar = new_covar
-1737
-1738			for r in self:
-1739				s = pf(r["Session"])
-1740				a = result.params.valuesdict()[f'a_{s}']
-1741				b = result.params.valuesdict()[f'b_{s}']
-1742				c = result.params.valuesdict()[f'c_{s}']
-1743				a2 = result.params.valuesdict()[f'a2_{s}']
-1744				b2 = result.params.valuesdict()[f'b2_{s}']
-1745				c2 = result.params.valuesdict()[f'c2_{s}']
-1746				r[f'D{self._4x}'] = (r[f'D{self._4x}raw'] - c - b * r[f'd{self._4x}'] - c2 * r['t'] - b2 * r['t'] * r[f'd{self._4x}']) / (a + a2 * r['t'])
-1747
-1748			self.standardization = result
-1749
-1750			for session in self.sessions:
-1751				self.sessions[session]['Np'] = 3
-1752				for k in ['scrambling', 'slope', 'wg']:
-1753					if self.sessions[session][f'{k}_drift']:
-1754						self.sessions[session]['Np'] += 1
+1544		self.refresh_samples()
+1545		self.consolidate_samples()
+1546		self.repeatabilities()
+1547
+1548		if tables:
+1549			self.table_of_analyses()
+1550			self.table_of_samples()
+1551
+1552	def assign_timestamps(self):
+1553		'''
+1554		Assign a time field `t` of type `float` to each analysis.
+1555
+1556		If `TimeTag` is one of the data fields, `t` is equal within a given session
+1557		to `TimeTag` minus the mean value of `TimeTag` for that session.
+1558		Otherwise, `TimeTag` is by default equal to the index of each analysis
+1559		in the dataset and `t` is defined as above.
+1560		'''
+1561		for session in self.sessions:
+1562			sdata = self.sessions[session]['data']
+1563			try:
+1564				t0 = np.mean([r['TimeTag'] for r in sdata])
+1565				for r in sdata:
+1566					r['t'] = r['TimeTag'] - t0
+1567			except KeyError:
+1568				t0 = (len(sdata)-1)/2
+1569				for t,r in enumerate(sdata):
+1570					r['t'] = t - t0
+1571
+1572
+1573	def report(self):
+1574		'''
+1575		Prints a report on the standardization fit.
+1576		Only applicable after `D4xdata.standardize(method='pooled')`.
+1577		'''
+1578		report_fit(self.standardization)
+1579
+1580
+1581	def combine_samples(self, sample_groups):
+1582		'''
+1583		Combine analyses of different samples to compute weighted average Δ4x
+1584		and new error (co)variances corresponding to the groups defined by the `sample_groups`
+1585		dictionary.
+1586		
+1587		Caution: samples are weighted by number of replicate analyses, which is a
+1588		reasonable default behavior but is not always optimal (e.g., in the case of strongly
+1589		correlated analytical errors for one or more samples).
+1590		
+1591		Returns a tuplet of:
+1592		
+1593		+ the list of group names
+1594		+ an array of the corresponding Δ4x values
+1595		+ the corresponding (co)variance matrix
+1596		
+1597		**Parameters**
+1598
+1599		+ `sample_groups`: a dictionary of the form:
+1600		```py
+1601		{'group1': ['sample_1', 'sample_2'],
+1602		 'group2': ['sample_3', 'sample_4', 'sample_5']}
+1603		```
+1604		'''
+1605		
+1606		samples = [s for k in sorted(sample_groups.keys()) for s in sorted(sample_groups[k])]
+1607		groups = sorted(sample_groups.keys())
+1608		group_total_weights = {k: sum([self.samples[s]['N'] for s in sample_groups[k]]) for k in groups}
+1609		D4x_old = np.array([[self.samples[x][f'D{self._4x}']] for x in samples])
+1610		CM_old = np.array([[self.sample_D4x_covar(x,y) for x in samples] for y in samples])
+1611		W = np.array([
+1612			[self.samples[i]['N']/group_total_weights[j] if i in sample_groups[j] else 0 for i in samples]
+1613			for j in groups])
+1614		D4x_new = W @ D4x_old
+1615		CM_new = W @ CM_old @ W.T
+1616
+1617		return groups, D4x_new[:,0], CM_new
+1618		
+1619
+1620	@make_verbal
+1621	def standardize(self,
+1622		method = 'pooled',
+1623		weighted_sessions = [],
+1624		consolidate = True,
+1625		consolidate_tables = False,
+1626		consolidate_plots = False,
+1627		constraints = {},
+1628		):
+1629		'''
+1630		Compute absolute Δ4x values for all replicate analyses and for sample averages.
+1631		If `method` argument is set to `'pooled'`, the standardization processes all sessions
+1632		in a single step, assuming that all samples (anchors and unknowns alike) are homogeneous,
+1633		i.e. that their true Δ4x value does not change between sessions,
+1634		([Daëron, 2021](https://doi.org/10.1029/2020GC009592)). If `method` argument is set to
+1635		`'indep_sessions'`, the standardization processes each session independently, based only
+1636		on anchors analyses.
+1637		'''
+1638
+1639		self.standardization_method = method
+1640		self.assign_timestamps()
+1641
+1642		if method == 'pooled':
+1643			if weighted_sessions:
+1644				for session_group in weighted_sessions:
+1645					if self._4x == '47':
+1646						X = D47data([r for r in self if r['Session'] in session_group])
+1647					elif self._4x == '48':
+1648						X = D48data([r for r in self if r['Session'] in session_group])
+1649					X.Nominal_D4x = self.Nominal_D4x.copy()
+1650					X.refresh()
+1651					result = X.standardize(method = 'pooled', weighted_sessions = [], consolidate = False)
+1652					w = np.sqrt(result.redchi)
+1653					self.msg(f'Session group {session_group} MRSWD = {w:.4f}')
+1654					for r in X:
+1655						r[f'wD{self._4x}raw'] *= w
+1656			else:
+1657				self.msg(f'All D{self._4x}raw weights set to 1 ‰')
+1658				for r in self:
+1659					r[f'wD{self._4x}raw'] = 1.
+1660
+1661			params = Parameters()
+1662			for k,session in enumerate(self.sessions):
+1663				self.msg(f"Session {session}: scrambling_drift is {self.sessions[session]['scrambling_drift']}.")
+1664				self.msg(f"Session {session}: slope_drift is {self.sessions[session]['slope_drift']}.")
+1665				self.msg(f"Session {session}: wg_drift is {self.sessions[session]['wg_drift']}.")
+1666				s = pf(session)
+1667				params.add(f'a_{s}', value = 0.9)
+1668				params.add(f'b_{s}', value = 0.)
+1669				params.add(f'c_{s}', value = -0.9)
+1670				params.add(f'a2_{s}', value = 0.,
+1671# 					vary = self.sessions[session]['scrambling_drift'],
+1672					)
+1673				params.add(f'b2_{s}', value = 0.,
+1674# 					vary = self.sessions[session]['slope_drift'],
+1675					)
+1676				params.add(f'c2_{s}', value = 0.,
+1677# 					vary = self.sessions[session]['wg_drift'],
+1678					)
+1679				if not self.sessions[session]['scrambling_drift']:
+1680					params[f'a2_{s}'].expr = '0'
+1681				if not self.sessions[session]['slope_drift']:
+1682					params[f'b2_{s}'].expr = '0'
+1683				if not self.sessions[session]['wg_drift']:
+1684					params[f'c2_{s}'].expr = '0'
+1685
+1686			for sample in self.unknowns:
+1687				params.add(f'D{self._4x}_{pf(sample)}', value = 0.5)
+1688
+1689			for k in constraints:
+1690				params[k].expr = constraints[k]
+1691
+1692			def residuals(p):
+1693				R = []
+1694				for r in self:
+1695					session = pf(r['Session'])
+1696					sample = pf(r['Sample'])
+1697					if r['Sample'] in self.Nominal_D4x:
+1698						R += [ (
+1699							r[f'D{self._4x}raw'] - (
+1700								p[f'a_{session}'] * self.Nominal_D4x[r['Sample']]
+1701								+ p[f'b_{session}'] * r[f'd{self._4x}']
+1702								+	p[f'c_{session}']
+1703								+ r['t'] * (
+1704									p[f'a2_{session}'] * self.Nominal_D4x[r['Sample']]
+1705									+ p[f'b2_{session}'] * r[f'd{self._4x}']
+1706									+	p[f'c2_{session}']
+1707									)
+1708								)
+1709							) / r[f'wD{self._4x}raw'] ]
+1710					else:
+1711						R += [ (
+1712							r[f'D{self._4x}raw'] - (
+1713								p[f'a_{session}'] * p[f'D{self._4x}_{sample}']
+1714								+ p[f'b_{session}'] * r[f'd{self._4x}']
+1715								+	p[f'c_{session}']
+1716								+ r['t'] * (
+1717									p[f'a2_{session}'] * p[f'D{self._4x}_{sample}']
+1718									+ p[f'b2_{session}'] * r[f'd{self._4x}']
+1719									+	p[f'c2_{session}']
+1720									)
+1721								)
+1722							) / r[f'wD{self._4x}raw'] ]
+1723				return R
+1724
+1725			M = Minimizer(residuals, params)
+1726			result = M.least_squares()
+1727			self.Nf = result.nfree
+1728			self.t95 = tstudent.ppf(1 - 0.05/2, self.Nf)
+1729			new_names, new_covar, new_se = _fullcovar(result)[:3]
+1730			result.var_names = new_names
+1731			result.covar = new_covar
+1732
+1733			for r in self:
+1734				s = pf(r["Session"])
+1735				a = result.params.valuesdict()[f'a_{s}']
+1736				b = result.params.valuesdict()[f'b_{s}']
+1737				c = result.params.valuesdict()[f'c_{s}']
+1738				a2 = result.params.valuesdict()[f'a2_{s}']
+1739				b2 = result.params.valuesdict()[f'b2_{s}']
+1740				c2 = result.params.valuesdict()[f'c2_{s}']
+1741				r[f'D{self._4x}'] = (r[f'D{self._4x}raw'] - c - b * r[f'd{self._4x}'] - c2 * r['t'] - b2 * r['t'] * r[f'd{self._4x}']) / (a + a2 * r['t'])
+1742
+1743			self.standardization = result
+1744
+1745			for session in self.sessions:
+1746				self.sessions[session]['Np'] = 3
+1747				for k in ['scrambling', 'slope', 'wg']:
+1748					if self.sessions[session][f'{k}_drift']:
+1749						self.sessions[session]['Np'] += 1
+1750
+1751			if consolidate:
+1752				self.consolidate(tables = consolidate_tables, plots = consolidate_plots)
+1753			return result
+1754
 1755
-1756			if consolidate:
-1757				self.consolidate(tables = consolidate_tables, plots = consolidate_plots)
-1758			return result
-1759
-1760
-1761		elif method == 'indep_sessions':
-1762
-1763			if weighted_sessions:
-1764				for session_group in weighted_sessions:
-1765					X = D4xdata([r for r in self if r['Session'] in session_group], mass = self._4x)
-1766					X.Nominal_D4x = self.Nominal_D4x.copy()
-1767					X.refresh()
-1768					# This is only done to assign r['wD47raw'] for r in X:
-1769					X.standardize(method = method, weighted_sessions = [], consolidate = False)
-1770					self.msg(f'D{self._4x}raw weights set to {1000*X[0][f"wD{self._4x}raw"]:.1f} ppm for sessions in {session_group}')
-1771			else:
-1772				self.msg('All weights set to 1 ‰')
-1773				for r in self:
-1774					r[f'wD{self._4x}raw'] = 1
-1775
-1776			for session in self.sessions:
-1777				s = self.sessions[session]
-1778				p_names = ['a', 'b', 'c', 'a2', 'b2', 'c2']
-1779				p_active = [True, True, True, s['scrambling_drift'], s['slope_drift'], s['wg_drift']]
-1780				s['Np'] = sum(p_active)
-1781				sdata = s['data']
-1782
-1783				A = np.array([
-1784					[
-1785						self.Nominal_D4x[r['Sample']] / r[f'wD{self._4x}raw'],
-1786						r[f'd{self._4x}'] / r[f'wD{self._4x}raw'],
-1787						1 / r[f'wD{self._4x}raw'],
-1788						self.Nominal_D4x[r['Sample']] * r['t'] / r[f'wD{self._4x}raw'],
-1789						r[f'd{self._4x}'] * r['t'] / r[f'wD{self._4x}raw'],
-1790						r['t'] / r[f'wD{self._4x}raw']
-1791						]
-1792					for r in sdata if r['Sample'] in self.anchors
-1793					])[:,p_active] # only keep columns for the active parameters
-1794				Y = np.array([[r[f'D{self._4x}raw'] / r[f'wD{self._4x}raw']] for r in sdata if r['Sample'] in self.anchors])
-1795				s['Na'] = Y.size
-1796				CM = linalg.inv(A.T @ A)
-1797				bf = (CM @ A.T @ Y).T[0,:]
-1798				k = 0
-1799				for n,a in zip(p_names, p_active):
-1800					if a:
-1801						s[n] = bf[k]
-1802# 						self.msg(f'{n} = {bf[k]}')
-1803						k += 1
-1804					else:
-1805						s[n] = 0.
-1806# 						self.msg(f'{n} = 0.0')
+1756		elif method == 'indep_sessions':
+1757
+1758			if weighted_sessions:
+1759				for session_group in weighted_sessions:
+1760					X = D4xdata([r for r in self if r['Session'] in session_group], mass = self._4x)
+1761					X.Nominal_D4x = self.Nominal_D4x.copy()
+1762					X.refresh()
+1763					# This is only done to assign r['wD47raw'] for r in X:
+1764					X.standardize(method = method, weighted_sessions = [], consolidate = False)
+1765					self.msg(f'D{self._4x}raw weights set to {1000*X[0][f"wD{self._4x}raw"]:.1f} ppm for sessions in {session_group}')
+1766			else:
+1767				self.msg('All weights set to 1 ‰')
+1768				for r in self:
+1769					r[f'wD{self._4x}raw'] = 1
+1770
+1771			for session in self.sessions:
+1772				s = self.sessions[session]
+1773				p_names = ['a', 'b', 'c', 'a2', 'b2', 'c2']
+1774				p_active = [True, True, True, s['scrambling_drift'], s['slope_drift'], s['wg_drift']]
+1775				s['Np'] = sum(p_active)
+1776				sdata = s['data']
+1777
+1778				A = np.array([
+1779					[
+1780						self.Nominal_D4x[r['Sample']] / r[f'wD{self._4x}raw'],
+1781						r[f'd{self._4x}'] / r[f'wD{self._4x}raw'],
+1782						1 / r[f'wD{self._4x}raw'],
+1783						self.Nominal_D4x[r['Sample']] * r['t'] / r[f'wD{self._4x}raw'],
+1784						r[f'd{self._4x}'] * r['t'] / r[f'wD{self._4x}raw'],
+1785						r['t'] / r[f'wD{self._4x}raw']
+1786						]
+1787					for r in sdata if r['Sample'] in self.anchors
+1788					])[:,p_active] # only keep columns for the active parameters
+1789				Y = np.array([[r[f'D{self._4x}raw'] / r[f'wD{self._4x}raw']] for r in sdata if r['Sample'] in self.anchors])
+1790				s['Na'] = Y.size
+1791				CM = linalg.inv(A.T @ A)
+1792				bf = (CM @ A.T @ Y).T[0,:]
+1793				k = 0
+1794				for n,a in zip(p_names, p_active):
+1795					if a:
+1796						s[n] = bf[k]
+1797# 						self.msg(f'{n} = {bf[k]}')
+1798						k += 1
+1799					else:
+1800						s[n] = 0.
+1801# 						self.msg(f'{n} = 0.0')
+1802
+1803				for r in sdata :
+1804					a, b, c, a2, b2, c2 = s['a'], s['b'], s['c'], s['a2'], s['b2'], s['c2']
+1805					r[f'D{self._4x}'] = (r[f'D{self._4x}raw'] - c - b * r[f'd{self._4x}'] - c2 * r['t'] - b2 * r['t'] * r[f'd{self._4x}']) / (a + a2 * r['t'])
+1806					r[f'wD{self._4x}'] = r[f'wD{self._4x}raw'] / (a + a2 * r['t'])
 1807
-1808				for r in sdata :
-1809					a, b, c, a2, b2, c2 = s['a'], s['b'], s['c'], s['a2'], s['b2'], s['c2']
-1810					r[f'D{self._4x}'] = (r[f'D{self._4x}raw'] - c - b * r[f'd{self._4x}'] - c2 * r['t'] - b2 * r['t'] * r[f'd{self._4x}']) / (a + a2 * r['t'])
-1811					r[f'wD{self._4x}'] = r[f'wD{self._4x}raw'] / (a + a2 * r['t'])
-1812
-1813				s['CM'] = np.zeros((6,6))
-1814				i = 0
-1815				k_active = [j for j,a in enumerate(p_active) if a]
-1816				for j,a in enumerate(p_active):
-1817					if a:
-1818						s['CM'][j,k_active] = CM[i,:]
-1819						i += 1
-1820
-1821			if not weighted_sessions:
-1822				w = self.rmswd()['rmswd']
-1823				for r in self:
-1824						r[f'wD{self._4x}'] *= w
-1825						r[f'wD{self._4x}raw'] *= w
-1826				for session in self.sessions:
-1827					self.sessions[session]['CM'] *= w**2
-1828
-1829			for session in self.sessions:
-1830				s = self.sessions[session]
-1831				s['SE_a'] = s['CM'][0,0]**.5
-1832				s['SE_b'] = s['CM'][1,1]**.5
-1833				s['SE_c'] = s['CM'][2,2]**.5
-1834				s['SE_a2'] = s['CM'][3,3]**.5
-1835				s['SE_b2'] = s['CM'][4,4]**.5
-1836				s['SE_c2'] = s['CM'][5,5]**.5
-1837
-1838			if not weighted_sessions:
-1839				self.Nf = len(self) - len(self.unknowns) - np.sum([self.sessions[s]['Np'] for s in self.sessions])
-1840			else:
-1841				self.Nf = 0
-1842				for sg in weighted_sessions:
-1843					self.Nf += self.rmswd(sessions = sg)['Nf']
-1844
-1845			self.t95 = tstudent.ppf(1 - 0.05/2, self.Nf)
-1846
-1847			avgD4x = {
-1848				sample: np.mean([r[f'D{self._4x}'] for r in self if r['Sample'] == sample])
-1849				for sample in self.samples
-1850				}
-1851			chi2 = np.sum([(r[f'D{self._4x}'] - avgD4x[r['Sample']])**2 for r in self])
-1852			rD4x = (chi2/self.Nf)**.5
-1853			self.repeatability[f'sigma_{self._4x}'] = rD4x
-1854
-1855			if consolidate:
-1856				self.consolidate(tables = consolidate_tables, plots = consolidate_plots)
-1857
-1858
-1859	def standardization_error(self, session, d4x, D4x, t = 0):
-1860		'''
-1861		Compute standardization error for a given session and
-1862		(δ47, Δ47) composition.
-1863		'''
-1864		a = self.sessions[session]['a']
-1865		b = self.sessions[session]['b']
-1866		c = self.sessions[session]['c']
-1867		a2 = self.sessions[session]['a2']
-1868		b2 = self.sessions[session]['b2']
-1869		c2 = self.sessions[session]['c2']
-1870		CM = self.sessions[session]['CM']
-1871
-1872		x, y = D4x, d4x
-1873		z = a * x + b * y + c + a2 * x * t + b2 * y * t + c2 * t
-1874# 		x = (z - b*y - b2*y*t - c - c2*t) / (a+a2*t)
-1875		dxdy = -(b+b2*t) / (a+a2*t)
-1876		dxdz = 1. / (a+a2*t)
-1877		dxda = -x / (a+a2*t)
-1878		dxdb = -y / (a+a2*t)
-1879		dxdc = -1. / (a+a2*t)
-1880		dxda2 = -x * a2 / (a+a2*t)
-1881		dxdb2 = -y * t / (a+a2*t)
-1882		dxdc2 = -t / (a+a2*t)
-1883		V = np.array([dxda, dxdb, dxdc, dxda2, dxdb2, dxdc2])
-1884		sx = (V @ CM @ V.T) ** .5
-1885		return sx
-1886
-1887
-1888	@make_verbal
-1889	def summary(self,
-1890		dir = 'output',
-1891		filename = None,
-1892		save_to_file = True,
-1893		print_out = True,
-1894		):
-1895		'''
-1896		Print out an/or save to disk a summary of the standardization results.
-1897
-1898		**Parameters**
-1899
-1900		+ `dir`: the directory in which to save the table
-1901		+ `filename`: the name to the csv file to write to
-1902		+ `save_to_file`: whether to save the table to disk
-1903		+ `print_out`: whether to print out the table
-1904		'''
-1905
-1906		out = []
-1907		out += [['N samples (anchors + unknowns)', f"{len(self.samples)} ({len(self.anchors)} + {len(self.unknowns)})"]]
-1908		out += [['N analyses (anchors + unknowns)', f"{len(self)} ({len([r for r in self if r['Sample'] in self.anchors])} + {len([r for r in self if r['Sample'] in self.unknowns])})"]]
-1909		out += [['Repeatability of δ13C_VPDB', f"{1000 * self.repeatability['r_d13C_VPDB']:.1f} ppm"]]
-1910		out += [['Repeatability of δ18O_VSMOW', f"{1000 * self.repeatability['r_d18O_VSMOW']:.1f} ppm"]]
-1911		out += [[f'Repeatability of Δ{self._4x} (anchors)', f"{1000 * self.repeatability[f'r_D{self._4x}a']:.1f} ppm"]]
-1912		out += [[f'Repeatability of Δ{self._4x} (unknowns)', f"{1000 * self.repeatability[f'r_D{self._4x}u']:.1f} ppm"]]
-1913		out += [[f'Repeatability of Δ{self._4x} (all)', f"{1000 * self.repeatability[f'r_D{self._4x}']:.1f} ppm"]]
-1914		out += [['Model degrees of freedom', f"{self.Nf}"]]
-1915		out += [['Student\'s 95% t-factor', f"{self.t95:.2f}"]]
-1916		out += [['Standardization method', self.standardization_method]]
-1917
-1918		if save_to_file:
-1919			if not os.path.exists(dir):
-1920				os.makedirs(dir)
-1921			if filename is None:
-1922				filename = f'D{self._4x}_summary.csv'
-1923			with open(f'{dir}/{filename}', 'w') as fid:
-1924				fid.write(make_csv(out))
-1925		if print_out:
-1926			self.msg('\n' + pretty_table(out, header = 0))
-1927
-1928
-1929	@make_verbal
-1930	def table_of_sessions(self,
-1931		dir = 'output',
-1932		filename = None,
-1933		save_to_file = True,
-1934		print_out = True,
-1935		output = None,
-1936		):
-1937		'''
-1938		Print out an/or save to disk a table of sessions.
-1939
-1940		**Parameters**
-1941
-1942		+ `dir`: the directory in which to save the table
-1943		+ `filename`: the name to the csv file to write to
-1944		+ `save_to_file`: whether to save the table to disk
-1945		+ `print_out`: whether to print out the table
-1946		+ `output`: if set to `'pretty'`: return a pretty text table (see `pretty_table()`);
-1947		    if set to `'raw'`: return a list of list of strings
-1948		    (e.g., `[['header1', 'header2'], ['0.1', '0.2']]`)
-1949		'''
-1950		include_a2 = any([self.sessions[session]['scrambling_drift'] for session in self.sessions])
-1951		include_b2 = any([self.sessions[session]['slope_drift'] for session in self.sessions])
-1952		include_c2 = any([self.sessions[session]['wg_drift'] for session in self.sessions])
-1953
-1954		out = [['Session','Na','Nu','d13Cwg_VPDB','d18Owg_VSMOW','r_d13C','r_d18O',f'r_D{self._4x}','a ± SE','1e3 x b ± SE','c ± SE']]
-1955		if include_a2:
-1956			out[-1] += ['a2 ± SE']
-1957		if include_b2:
-1958			out[-1] += ['b2 ± SE']
-1959		if include_c2:
-1960			out[-1] += ['c2 ± SE']
-1961		for session in self.sessions:
-1962			out += [[
-1963				session,
-1964				f"{self.sessions[session]['Na']}",
-1965				f"{self.sessions[session]['Nu']}",
-1966				f"{self.sessions[session]['d13Cwg_VPDB']:.3f}",
-1967				f"{self.sessions[session]['d18Owg_VSMOW']:.3f}",
-1968				f"{self.sessions[session]['r_d13C_VPDB']:.4f}",
-1969				f"{self.sessions[session]['r_d18O_VSMOW']:.4f}",
-1970				f"{self.sessions[session][f'r_D{self._4x}']:.4f}",
-1971				f"{self.sessions[session]['a']:.3f} ± {self.sessions[session]['SE_a']:.3f}",
-1972				f"{1e3*self.sessions[session]['b']:.3f} ± {1e3*self.sessions[session]['SE_b']:.3f}",
-1973				f"{self.sessions[session]['c']:.3f} ± {self.sessions[session]['SE_c']:.3f}",
-1974				]]
-1975			if include_a2:
-1976				if self.sessions[session]['scrambling_drift']:
-1977					out[-1] += [f"{self.sessions[session]['a2']:.1e} ± {self.sessions[session]['SE_a2']:.1e}"]
+1808				s['CM'] = np.zeros((6,6))
+1809				i = 0
+1810				k_active = [j for j,a in enumerate(p_active) if a]
+1811				for j,a in enumerate(p_active):
+1812					if a:
+1813						s['CM'][j,k_active] = CM[i,:]
+1814						i += 1
+1815
+1816			if not weighted_sessions:
+1817				w = self.rmswd()['rmswd']
+1818				for r in self:
+1819						r[f'wD{self._4x}'] *= w
+1820						r[f'wD{self._4x}raw'] *= w
+1821				for session in self.sessions:
+1822					self.sessions[session]['CM'] *= w**2
+1823
+1824			for session in self.sessions:
+1825				s = self.sessions[session]
+1826				s['SE_a'] = s['CM'][0,0]**.5
+1827				s['SE_b'] = s['CM'][1,1]**.5
+1828				s['SE_c'] = s['CM'][2,2]**.5
+1829				s['SE_a2'] = s['CM'][3,3]**.5
+1830				s['SE_b2'] = s['CM'][4,4]**.5
+1831				s['SE_c2'] = s['CM'][5,5]**.5
+1832
+1833			if not weighted_sessions:
+1834				self.Nf = len(self) - len(self.unknowns) - np.sum([self.sessions[s]['Np'] for s in self.sessions])
+1835			else:
+1836				self.Nf = 0
+1837				for sg in weighted_sessions:
+1838					self.Nf += self.rmswd(sessions = sg)['Nf']
+1839
+1840			self.t95 = tstudent.ppf(1 - 0.05/2, self.Nf)
+1841
+1842			avgD4x = {
+1843				sample: np.mean([r[f'D{self._4x}'] for r in self if r['Sample'] == sample])
+1844				for sample in self.samples
+1845				}
+1846			chi2 = np.sum([(r[f'D{self._4x}'] - avgD4x[r['Sample']])**2 for r in self])
+1847			rD4x = (chi2/self.Nf)**.5
+1848			self.repeatability[f'sigma_{self._4x}'] = rD4x
+1849
+1850			if consolidate:
+1851				self.consolidate(tables = consolidate_tables, plots = consolidate_plots)
+1852
+1853
+1854	def standardization_error(self, session, d4x, D4x, t = 0):
+1855		'''
+1856		Compute standardization error for a given session and
+1857		(δ47, Δ47) composition.
+1858		'''
+1859		a = self.sessions[session]['a']
+1860		b = self.sessions[session]['b']
+1861		c = self.sessions[session]['c']
+1862		a2 = self.sessions[session]['a2']
+1863		b2 = self.sessions[session]['b2']
+1864		c2 = self.sessions[session]['c2']
+1865		CM = self.sessions[session]['CM']
+1866
+1867		x, y = D4x, d4x
+1868		z = a * x + b * y + c + a2 * x * t + b2 * y * t + c2 * t
+1869# 		x = (z - b*y - b2*y*t - c - c2*t) / (a+a2*t)
+1870		dxdy = -(b+b2*t) / (a+a2*t)
+1871		dxdz = 1. / (a+a2*t)
+1872		dxda = -x / (a+a2*t)
+1873		dxdb = -y / (a+a2*t)
+1874		dxdc = -1. / (a+a2*t)
+1875		dxda2 = -x * a2 / (a+a2*t)
+1876		dxdb2 = -y * t / (a+a2*t)
+1877		dxdc2 = -t / (a+a2*t)
+1878		V = np.array([dxda, dxdb, dxdc, dxda2, dxdb2, dxdc2])
+1879		sx = (V @ CM @ V.T) ** .5
+1880		return sx
+1881
+1882
+1883	@make_verbal
+1884	def summary(self,
+1885		dir = 'output',
+1886		filename = None,
+1887		save_to_file = True,
+1888		print_out = True,
+1889		):
+1890		'''
+1891		Print out an/or save to disk a summary of the standardization results.
+1892
+1893		**Parameters**
+1894
+1895		+ `dir`: the directory in which to save the table
+1896		+ `filename`: the name to the csv file to write to
+1897		+ `save_to_file`: whether to save the table to disk
+1898		+ `print_out`: whether to print out the table
+1899		'''
+1900
+1901		out = []
+1902		out += [['N samples (anchors + unknowns)', f"{len(self.samples)} ({len(self.anchors)} + {len(self.unknowns)})"]]
+1903		out += [['N analyses (anchors + unknowns)', f"{len(self)} ({len([r for r in self if r['Sample'] in self.anchors])} + {len([r for r in self if r['Sample'] in self.unknowns])})"]]
+1904		out += [['Repeatability of δ13C_VPDB', f"{1000 * self.repeatability['r_d13C_VPDB']:.1f} ppm"]]
+1905		out += [['Repeatability of δ18O_VSMOW', f"{1000 * self.repeatability['r_d18O_VSMOW']:.1f} ppm"]]
+1906		out += [[f'Repeatability of Δ{self._4x} (anchors)', f"{1000 * self.repeatability[f'r_D{self._4x}a']:.1f} ppm"]]
+1907		out += [[f'Repeatability of Δ{self._4x} (unknowns)', f"{1000 * self.repeatability[f'r_D{self._4x}u']:.1f} ppm"]]
+1908		out += [[f'Repeatability of Δ{self._4x} (all)', f"{1000 * self.repeatability[f'r_D{self._4x}']:.1f} ppm"]]
+1909		out += [['Model degrees of freedom', f"{self.Nf}"]]
+1910		out += [['Student\'s 95% t-factor', f"{self.t95:.2f}"]]
+1911		out += [['Standardization method', self.standardization_method]]
+1912
+1913		if save_to_file:
+1914			if not os.path.exists(dir):
+1915				os.makedirs(dir)
+1916			if filename is None:
+1917				filename = f'D{self._4x}_summary.csv'
+1918			with open(f'{dir}/{filename}', 'w') as fid:
+1919				fid.write(make_csv(out))
+1920		if print_out:
+1921			self.msg('\n' + pretty_table(out, header = 0))
+1922
+1923
+1924	@make_verbal
+1925	def table_of_sessions(self,
+1926		dir = 'output',
+1927		filename = None,
+1928		save_to_file = True,
+1929		print_out = True,
+1930		output = None,
+1931		):
+1932		'''
+1933		Print out an/or save to disk a table of sessions.
+1934
+1935		**Parameters**
+1936
+1937		+ `dir`: the directory in which to save the table
+1938		+ `filename`: the name to the csv file to write to
+1939		+ `save_to_file`: whether to save the table to disk
+1940		+ `print_out`: whether to print out the table
+1941		+ `output`: if set to `'pretty'`: return a pretty text table (see `pretty_table()`);
+1942		    if set to `'raw'`: return a list of list of strings
+1943		    (e.g., `[['header1', 'header2'], ['0.1', '0.2']]`)
+1944		'''
+1945		include_a2 = any([self.sessions[session]['scrambling_drift'] for session in self.sessions])
+1946		include_b2 = any([self.sessions[session]['slope_drift'] for session in self.sessions])
+1947		include_c2 = any([self.sessions[session]['wg_drift'] for session in self.sessions])
+1948
+1949		out = [['Session','Na','Nu','d13Cwg_VPDB','d18Owg_VSMOW','r_d13C','r_d18O',f'r_D{self._4x}','a ± SE','1e3 x b ± SE','c ± SE']]
+1950		if include_a2:
+1951			out[-1] += ['a2 ± SE']
+1952		if include_b2:
+1953			out[-1] += ['b2 ± SE']
+1954		if include_c2:
+1955			out[-1] += ['c2 ± SE']
+1956		for session in self.sessions:
+1957			out += [[
+1958				session,
+1959				f"{self.sessions[session]['Na']}",
+1960				f"{self.sessions[session]['Nu']}",
+1961				f"{self.sessions[session]['d13Cwg_VPDB']:.3f}",
+1962				f"{self.sessions[session]['d18Owg_VSMOW']:.3f}",
+1963				f"{self.sessions[session]['r_d13C_VPDB']:.4f}",
+1964				f"{self.sessions[session]['r_d18O_VSMOW']:.4f}",
+1965				f"{self.sessions[session][f'r_D{self._4x}']:.4f}",
+1966				f"{self.sessions[session]['a']:.3f} ± {self.sessions[session]['SE_a']:.3f}",
+1967				f"{1e3*self.sessions[session]['b']:.3f} ± {1e3*self.sessions[session]['SE_b']:.3f}",
+1968				f"{self.sessions[session]['c']:.3f} ± {self.sessions[session]['SE_c']:.3f}",
+1969				]]
+1970			if include_a2:
+1971				if self.sessions[session]['scrambling_drift']:
+1972					out[-1] += [f"{self.sessions[session]['a2']:.1e} ± {self.sessions[session]['SE_a2']:.1e}"]
+1973				else:
+1974					out[-1] += ['']
+1975			if include_b2:
+1976				if self.sessions[session]['slope_drift']:
+1977					out[-1] += [f"{self.sessions[session]['b2']:.1e} ± {self.sessions[session]['SE_b2']:.1e}"]
 1978				else:
 1979					out[-1] += ['']
-1980			if include_b2:
-1981				if self.sessions[session]['slope_drift']:
-1982					out[-1] += [f"{self.sessions[session]['b2']:.1e} ± {self.sessions[session]['SE_b2']:.1e}"]
+1980			if include_c2:
+1981				if self.sessions[session]['wg_drift']:
+1982					out[-1] += [f"{self.sessions[session]['c2']:.1e} ± {self.sessions[session]['SE_c2']:.1e}"]
 1983				else:
 1984					out[-1] += ['']
-1985			if include_c2:
-1986				if self.sessions[session]['wg_drift']:
-1987					out[-1] += [f"{self.sessions[session]['c2']:.1e} ± {self.sessions[session]['SE_c2']:.1e}"]
-1988				else:
-1989					out[-1] += ['']
-1990
-1991		if save_to_file:
-1992			if not os.path.exists(dir):
-1993				os.makedirs(dir)
-1994			if filename is None:
-1995				filename = f'D{self._4x}_sessions.csv'
-1996			with open(f'{dir}/{filename}', 'w') as fid:
-1997				fid.write(make_csv(out))
-1998		if print_out:
-1999			self.msg('\n' + pretty_table(out))
-2000		if output == 'raw':
-2001			return out
-2002		elif output == 'pretty':
-2003			return pretty_table(out)
-2004
-2005
-2006	@make_verbal
-2007	def table_of_analyses(
-2008		self,
-2009		dir = 'output',
-2010		filename = None,
-2011		save_to_file = True,
-2012		print_out = True,
-2013		output = None,
-2014		):
-2015		'''
-2016		Print out an/or save to disk a table of analyses.
-2017
-2018		**Parameters**
-2019
-2020		+ `dir`: the directory in which to save the table
-2021		+ `filename`: the name to the csv file to write to
-2022		+ `save_to_file`: whether to save the table to disk
-2023		+ `print_out`: whether to print out the table
-2024		+ `output`: if set to `'pretty'`: return a pretty text table (see `pretty_table()`);
-2025		    if set to `'raw'`: return a list of list of strings
-2026		    (e.g., `[['header1', 'header2'], ['0.1', '0.2']]`)
-2027		'''
-2028
-2029		out = [['UID','Session','Sample']]
-2030		extra_fields = [f for f in [('SampleMass','.2f'),('ColdFingerPressure','.1f'),('AcidReactionYield','.3f')] if f[0] in {k for r in self for k in r}]
-2031		for f in extra_fields:
-2032			out[-1] += [f[0]]
-2033		out[-1] += ['d13Cwg_VPDB','d18Owg_VSMOW','d45','d46','d47','d48','d49','d13C_VPDB','d18O_VSMOW','D47raw','D48raw','D49raw',f'D{self._4x}']
-2034		for r in self:
-2035			out += [[f"{r['UID']}",f"{r['Session']}",f"{r['Sample']}"]]
-2036			for f in extra_fields:
-2037				out[-1] += [f"{r[f[0]]:{f[1]}}"]
-2038			out[-1] += [
-2039				f"{r['d13Cwg_VPDB']:.3f}",
-2040				f"{r['d18Owg_VSMOW']:.3f}",
-2041				f"{r['d45']:.6f}",
-2042				f"{r['d46']:.6f}",
-2043				f"{r['d47']:.6f}",
-2044				f"{r['d48']:.6f}",
-2045				f"{r['d49']:.6f}",
-2046				f"{r['d13C_VPDB']:.6f}",
-2047				f"{r['d18O_VSMOW']:.6f}",
-2048				f"{r['D47raw']:.6f}",
-2049				f"{r['D48raw']:.6f}",
-2050				f"{r['D49raw']:.6f}",
-2051				f"{r[f'D{self._4x}']:.6f}"
-2052				]
-2053		if save_to_file:
-2054			if not os.path.exists(dir):
-2055				os.makedirs(dir)
-2056			if filename is None:
-2057				filename = f'D{self._4x}_analyses.csv'
-2058			with open(f'{dir}/{filename}', 'w') as fid:
-2059				fid.write(make_csv(out))
-2060		if print_out:
-2061			self.msg('\n' + pretty_table(out))
-2062		return out
-2063
-2064	@make_verbal
-2065	def covar_table(
-2066		self,
-2067		correl = False,
-2068		dir = 'output',
-2069		filename = None,
-2070		save_to_file = True,
-2071		print_out = True,
-2072		output = None,
-2073		):
-2074		'''
-2075		Print out, save to disk and/or return the variance-covariance matrix of D4x
-2076		for all unknown samples.
-2077
-2078		**Parameters**
-2079
-2080		+ `dir`: the directory in which to save the csv
-2081		+ `filename`: the name of the csv file to write to
-2082		+ `save_to_file`: whether to save the csv
-2083		+ `print_out`: whether to print out the matrix
-2084		+ `output`: if set to `'pretty'`: return a pretty text matrix (see `pretty_table()`);
-2085		    if set to `'raw'`: return a list of list of strings
-2086		    (e.g., `[['header1', 'header2'], ['0.1', '0.2']]`)
-2087		'''
-2088		samples = sorted([u for u in self.unknowns])
-2089		out = [[''] + samples]
-2090		for s1 in samples:
-2091			out.append([s1])
-2092			for s2 in samples:
-2093				if correl:
-2094					out[-1].append(f'{self.sample_D4x_correl(s1, s2):.6f}')
-2095				else:
-2096					out[-1].append(f'{self.sample_D4x_covar(s1, s2):.8e}')
-2097
-2098		if save_to_file:
-2099			if not os.path.exists(dir):
-2100				os.makedirs(dir)
-2101			if filename is None:
-2102				if correl:
-2103					filename = f'D{self._4x}_correl.csv'
-2104				else:
-2105					filename = f'D{self._4x}_covar.csv'
-2106			with open(f'{dir}/{filename}', 'w') as fid:
-2107				fid.write(make_csv(out))
-2108		if print_out:
-2109			self.msg('\n'+pretty_table(out))
-2110		if output == 'raw':
-2111			return out
-2112		elif output == 'pretty':
-2113			return pretty_table(out)
-2114
-2115	@make_verbal
-2116	def table_of_samples(
-2117		self,
-2118		dir = 'output',
-2119		filename = None,
-2120		save_to_file = True,
-2121		print_out = True,
-2122		output = None,
-2123		):
-2124		'''
-2125		Print out, save to disk and/or return a table of samples.
-2126
-2127		**Parameters**
-2128
-2129		+ `dir`: the directory in which to save the csv
-2130		+ `filename`: the name of the csv file to write to
-2131		+ `save_to_file`: whether to save the csv
-2132		+ `print_out`: whether to print out the table
-2133		+ `output`: if set to `'pretty'`: return a pretty text table (see `pretty_table()`);
-2134		    if set to `'raw'`: return a list of list of strings
-2135		    (e.g., `[['header1', 'header2'], ['0.1', '0.2']]`)
-2136		'''
-2137
-2138		out = [['Sample','N','d13C_VPDB','d18O_VSMOW',f'D{self._4x}','SE','95% CL','SD','p_Levene']]
-2139		for sample in self.anchors:
-2140			out += [[
-2141				f"{sample}",
-2142				f"{self.samples[sample]['N']}",
-2143				f"{self.samples[sample]['d13C_VPDB']:.2f}",
-2144				f"{self.samples[sample]['d18O_VSMOW']:.2f}",
-2145				f"{self.samples[sample][f'D{self._4x}']:.4f}",'','',
-2146				f"{self.samples[sample][f'SD_D{self._4x}']:.4f}" if self.samples[sample]['N'] > 1 else '', ''
-2147				]]
-2148		for sample in self.unknowns:
-2149			out += [[
-2150				f"{sample}",
-2151				f"{self.samples[sample]['N']}",
-2152				f"{self.samples[sample]['d13C_VPDB']:.2f}",
-2153				f"{self.samples[sample]['d18O_VSMOW']:.2f}",
-2154				f"{self.samples[sample][f'D{self._4x}']:.4f}",
-2155				f"{self.samples[sample][f'SE_D{self._4x}']:.4f}",
-2156				f{self.samples[sample][f'SE_D{self._4x}'] * self.t95:.4f}",
-2157				f"{self.samples[sample][f'SD_D{self._4x}']:.4f}" if self.samples[sample]['N'] > 1 else '',
-2158				f"{self.samples[sample]['p_Levene']:.3f}" if self.samples[sample]['N'] > 2 else ''
-2159				]]
-2160		if save_to_file:
-2161			if not os.path.exists(dir):
-2162				os.makedirs(dir)
-2163			if filename is None:
-2164				filename = f'D{self._4x}_samples.csv'
-2165			with open(f'{dir}/{filename}', 'w') as fid:
-2166				fid.write(make_csv(out))
-2167		if print_out:
-2168			self.msg('\n'+pretty_table(out))
-2169		if output == 'raw':
-2170			return out
-2171		elif output == 'pretty':
-2172			return pretty_table(out)
+1985
+1986		if save_to_file:
+1987			if not os.path.exists(dir):
+1988				os.makedirs(dir)
+1989			if filename is None:
+1990				filename = f'D{self._4x}_sessions.csv'
+1991			with open(f'{dir}/{filename}', 'w') as fid:
+1992				fid.write(make_csv(out))
+1993		if print_out:
+1994			self.msg('\n' + pretty_table(out))
+1995		if output == 'raw':
+1996			return out
+1997		elif output == 'pretty':
+1998			return pretty_table(out)
+1999
+2000
+2001	@make_verbal
+2002	def table_of_analyses(
+2003		self,
+2004		dir = 'output',
+2005		filename = None,
+2006		save_to_file = True,
+2007		print_out = True,
+2008		output = None,
+2009		):
+2010		'''
+2011		Print out an/or save to disk a table of analyses.
+2012
+2013		**Parameters**
+2014
+2015		+ `dir`: the directory in which to save the table
+2016		+ `filename`: the name to the csv file to write to
+2017		+ `save_to_file`: whether to save the table to disk
+2018		+ `print_out`: whether to print out the table
+2019		+ `output`: if set to `'pretty'`: return a pretty text table (see `pretty_table()`);
+2020		    if set to `'raw'`: return a list of list of strings
+2021		    (e.g., `[['header1', 'header2'], ['0.1', '0.2']]`)
+2022		'''
+2023
+2024		out = [['UID','Session','Sample']]
+2025		extra_fields = [f for f in [('SampleMass','.2f'),('ColdFingerPressure','.1f'),('AcidReactionYield','.3f')] if f[0] in {k for r in self for k in r}]
+2026		for f in extra_fields:
+2027			out[-1] += [f[0]]
+2028		out[-1] += ['d13Cwg_VPDB','d18Owg_VSMOW','d45','d46','d47','d48','d49','d13C_VPDB','d18O_VSMOW','D47raw','D48raw','D49raw',f'D{self._4x}']
+2029		for r in self:
+2030			out += [[f"{r['UID']}",f"{r['Session']}",f"{r['Sample']}"]]
+2031			for f in extra_fields:
+2032				out[-1] += [f"{r[f[0]]:{f[1]}}"]
+2033			out[-1] += [
+2034				f"{r['d13Cwg_VPDB']:.3f}",
+2035				f"{r['d18Owg_VSMOW']:.3f}",
+2036				f"{r['d45']:.6f}",
+2037				f"{r['d46']:.6f}",
+2038				f"{r['d47']:.6f}",
+2039				f"{r['d48']:.6f}",
+2040				f"{r['d49']:.6f}",
+2041				f"{r['d13C_VPDB']:.6f}",
+2042				f"{r['d18O_VSMOW']:.6f}",
+2043				f"{r['D47raw']:.6f}",
+2044				f"{r['D48raw']:.6f}",
+2045				f"{r['D49raw']:.6f}",
+2046				f"{r[f'D{self._4x}']:.6f}"
+2047				]
+2048		if save_to_file:
+2049			if not os.path.exists(dir):
+2050				os.makedirs(dir)
+2051			if filename is None:
+2052				filename = f'D{self._4x}_analyses.csv'
+2053			with open(f'{dir}/{filename}', 'w') as fid:
+2054				fid.write(make_csv(out))
+2055		if print_out:
+2056			self.msg('\n' + pretty_table(out))
+2057		return out
+2058
+2059	@make_verbal
+2060	def covar_table(
+2061		self,
+2062		correl = False,
+2063		dir = 'output',
+2064		filename = None,
+2065		save_to_file = True,
+2066		print_out = True,
+2067		output = None,
+2068		):
+2069		'''
+2070		Print out, save to disk and/or return the variance-covariance matrix of D4x
+2071		for all unknown samples.
+2072
+2073		**Parameters**
+2074
+2075		+ `dir`: the directory in which to save the csv
+2076		+ `filename`: the name of the csv file to write to
+2077		+ `save_to_file`: whether to save the csv
+2078		+ `print_out`: whether to print out the matrix
+2079		+ `output`: if set to `'pretty'`: return a pretty text matrix (see `pretty_table()`);
+2080		    if set to `'raw'`: return a list of list of strings
+2081		    (e.g., `[['header1', 'header2'], ['0.1', '0.2']]`)
+2082		'''
+2083		samples = sorted([u for u in self.unknowns])
+2084		out = [[''] + samples]
+2085		for s1 in samples:
+2086			out.append([s1])
+2087			for s2 in samples:
+2088				if correl:
+2089					out[-1].append(f'{self.sample_D4x_correl(s1, s2):.6f}')
+2090				else:
+2091					out[-1].append(f'{self.sample_D4x_covar(s1, s2):.8e}')
+2092
+2093		if save_to_file:
+2094			if not os.path.exists(dir):
+2095				os.makedirs(dir)
+2096			if filename is None:
+2097				if correl:
+2098					filename = f'D{self._4x}_correl.csv'
+2099				else:
+2100					filename = f'D{self._4x}_covar.csv'
+2101			with open(f'{dir}/{filename}', 'w') as fid:
+2102				fid.write(make_csv(out))
+2103		if print_out:
+2104			self.msg('\n'+pretty_table(out))
+2105		if output == 'raw':
+2106			return out
+2107		elif output == 'pretty':
+2108			return pretty_table(out)
+2109
+2110	@make_verbal
+2111	def table_of_samples(
+2112		self,
+2113		dir = 'output',
+2114		filename = None,
+2115		save_to_file = True,
+2116		print_out = True,
+2117		output = None,
+2118		):
+2119		'''
+2120		Print out, save to disk and/or return a table of samples.
+2121
+2122		**Parameters**
+2123
+2124		+ `dir`: the directory in which to save the csv
+2125		+ `filename`: the name of the csv file to write to
+2126		+ `save_to_file`: whether to save the csv
+2127		+ `print_out`: whether to print out the table
+2128		+ `output`: if set to `'pretty'`: return a pretty text table (see `pretty_table()`);
+2129		    if set to `'raw'`: return a list of list of strings
+2130		    (e.g., `[['header1', 'header2'], ['0.1', '0.2']]`)
+2131		'''
+2132
+2133		out = [['Sample','N','d13C_VPDB','d18O_VSMOW',f'D{self._4x}','SE','95% CL','SD','p_Levene']]
+2134		for sample in self.anchors:
+2135			out += [[
+2136				f"{sample}",
+2137				f"{self.samples[sample]['N']}",
+2138				f"{self.samples[sample]['d13C_VPDB']:.2f}",
+2139				f"{self.samples[sample]['d18O_VSMOW']:.2f}",
+2140				f"{self.samples[sample][f'D{self._4x}']:.4f}",'','',
+2141				f"{self.samples[sample][f'SD_D{self._4x}']:.4f}" if self.samples[sample]['N'] > 1 else '', ''
+2142				]]
+2143		for sample in self.unknowns:
+2144			out += [[
+2145				f"{sample}",
+2146				f"{self.samples[sample]['N']}",
+2147				f"{self.samples[sample]['d13C_VPDB']:.2f}",
+2148				f"{self.samples[sample]['d18O_VSMOW']:.2f}",
+2149				f"{self.samples[sample][f'D{self._4x}']:.4f}",
+2150				f"{self.samples[sample][f'SE_D{self._4x}']:.4f}",
+2151				f{self.samples[sample][f'SE_D{self._4x}'] * self.t95:.4f}",
+2152				f"{self.samples[sample][f'SD_D{self._4x}']:.4f}" if self.samples[sample]['N'] > 1 else '',
+2153				f"{self.samples[sample]['p_Levene']:.3f}" if self.samples[sample]['N'] > 2 else ''
+2154				]]
+2155		if save_to_file:
+2156			if not os.path.exists(dir):
+2157				os.makedirs(dir)
+2158			if filename is None:
+2159				filename = f'D{self._4x}_samples.csv'
+2160			with open(f'{dir}/{filename}', 'w') as fid:
+2161				fid.write(make_csv(out))
+2162		if print_out:
+2163			self.msg('\n'+pretty_table(out))
+2164		if output == 'raw':
+2165			return out
+2166		elif output == 'pretty':
+2167			return pretty_table(out)
+2168
+2169
+2170	def plot_sessions(self, dir = 'output', figsize = (8,8)):
+2171		'''
+2172		Generate session plots and save them to disk.
 2173
-2174
-2175	def plot_sessions(self, dir = 'output', figsize = (8,8)):
-2176		'''
-2177		Generate session plots and save them to disk.
-2178
-2179		**Parameters**
-2180
-2181		+ `dir`: the directory in which to save the plots
-2182		+ `figsize`: the width and height (in inches) of each plot
-2183		'''
-2184		if not os.path.exists(dir):
-2185			os.makedirs(dir)
+2174		**Parameters**
+2175
+2176		+ `dir`: the directory in which to save the plots
+2177		+ `figsize`: the width and height (in inches) of each plot
+2178		'''
+2179		if not os.path.exists(dir):
+2180			os.makedirs(dir)
+2181
+2182		for session in self.sessions:
+2183			sp = self.plot_single_session(session, xylimits = 'constant')
+2184			ppl.savefig(f'{dir}/D{self._4x}_plot_{session}.pdf')
+2185			ppl.close(sp.fig)
 2186
-2187		for session in self.sessions:
-2188			sp = self.plot_single_session(session, xylimits = 'constant')
-2189			ppl.savefig(f'{dir}/D{self._4x}_plot_{session}.pdf')
-2190			ppl.close(sp.fig)
-2191
+2187
+2188	@make_verbal
+2189	def consolidate_samples(self):
+2190		'''
+2191		Compile various statistics for each sample.
 2192
-2193	@make_verbal
-2194	def consolidate_samples(self):
-2195		'''
-2196		Compile various statistics for each sample.
+2193		For each anchor sample:
+2194
+2195		+ `D47` or `D48`: the nominal Δ4x value for this anchor, specified by `self.Nominal_D4x`
+2196		+ `SE_D47` or `SE_D48`: set to zero by definition
 2197
-2198		For each anchor sample:
+2198		For each unknown sample:
 2199
-2200		+ `D47` or `D48`: the nominal Δ4x value for this anchor, specified by `self.Nominal_D4x`
-2201		+ `SE_D47` or `SE_D48`: set to zero by definition
+2200		+ `D47` or `D48`: the standardized Δ4x value for this unknown
+2201		+ `SE_D47` or `SE_D48`: the standard error of Δ4x for this unknown
 2202
-2203		For each unknown sample:
+2203		For each anchor and unknown:
 2204
-2205		+ `D47` or `D48`: the standardized Δ4x value for this unknown
-2206		+ `SE_D47` or `SE_D48`: the standard error of Δ4x for this unknown
-2207
-2208		For each anchor and unknown:
-2209
-2210		+ `N`: the total number of analyses of this sample
-2211		+ `SD_D47` or `SD_D48`: the “sample” (in the statistical sense) standard deviation for this sample
-2212		+ `d13C_VPDB`: the average δ13C_VPDB value for this sample
-2213		+ `d18O_VSMOW`: the average δ18O_VSMOW value for this sample (as CO2)
-2214		+ `p_Levene`: the p-value from a [Levene test](https://en.wikipedia.org/wiki/Levene%27s_test) of equal
-2215		variance, indicating whether the Δ4x repeatability this sample differs significantly from
-2216		that observed for the reference sample specified by `self.LEVENE_REF_SAMPLE`.
-2217		'''
-2218		D4x_ref_pop = [r[f'D{self._4x}'] for r in self.samples[self.LEVENE_REF_SAMPLE]['data']]
-2219		for sample in self.samples:
-2220			self.samples[sample]['N'] = len(self.samples[sample]['data'])
-2221			if self.samples[sample]['N'] > 1:
-2222				self.samples[sample][f'SD_D{self._4x}'] = stdev([r[f'D{self._4x}'] for r in self.samples[sample]['data']])
-2223
-2224			self.samples[sample]['d13C_VPDB'] = np.mean([r['d13C_VPDB'] for r in self.samples[sample]['data']])
-2225			self.samples[sample]['d18O_VSMOW'] = np.mean([r['d18O_VSMOW'] for r in self.samples[sample]['data']])
-2226
-2227			D4x_pop = [r[f'D{self._4x}'] for r in self.samples[sample]['data']]
-2228			if len(D4x_pop) > 2:
-2229				self.samples[sample]['p_Levene'] = levene(D4x_ref_pop, D4x_pop, center = 'median')[1]
-2230
-2231		if self.standardization_method == 'pooled':
-2232			for sample in self.anchors:
-2233				self.samples[sample][f'D{self._4x}'] = self.Nominal_D4x[sample]
-2234				self.samples[sample][f'SE_D{self._4x}'] = 0.
-2235			for sample in self.unknowns:
-2236				self.samples[sample][f'D{self._4x}'] = self.standardization.params.valuesdict()[f'D{self._4x}_{pf(sample)}']
-2237				try:
-2238					self.samples[sample][f'SE_D{self._4x}'] = self.sample_D4x_covar(sample)**.5
-2239				except ValueError:
-2240					# when `sample` is constrained by self.standardize(constraints = {...}),
-2241					# it is no longer listed in self.standardization.var_names.
-2242					# Temporary fix: define SE as zero for now
-2243					self.samples[sample][f'SE_D4{self._4x}'] = 0.
-2244
-2245		elif self.standardization_method == 'indep_sessions':
-2246			for sample in self.anchors:
-2247				self.samples[sample][f'D{self._4x}'] = self.Nominal_D4x[sample]
-2248				self.samples[sample][f'SE_D{self._4x}'] = 0.
-2249			for sample in self.unknowns:
-2250				self.msg(f'Consolidating sample {sample}')
-2251				self.unknowns[sample][f'session_D{self._4x}'] = {}
-2252				session_avg = []
-2253				for session in self.sessions:
-2254					sdata = [r for r in self.sessions[session]['data'] if r['Sample'] == sample]
-2255					if sdata:
-2256						self.msg(f'{sample} found in session {session}')
-2257						avg_D4x = np.mean([r[f'D{self._4x}'] for r in sdata])
-2258						avg_d4x = np.mean([r[f'd{self._4x}'] for r in sdata])
-2259						# !! TODO: sigma_s below does not account for temporal changes in standardization error
-2260						sigma_s = self.standardization_error(session, avg_d4x, avg_D4x)
-2261						sigma_u = sdata[0][f'wD{self._4x}raw'] / self.sessions[session]['a'] / len(sdata)**.5
-2262						session_avg.append([avg_D4x, (sigma_u**2 + sigma_s**2)**.5])
-2263						self.unknowns[sample][f'session_D{self._4x}'][session] = session_avg[-1]
-2264				self.samples[sample][f'D{self._4x}'], self.samples[sample][f'SE_D{self._4x}'] = w_avg(*zip(*session_avg))
-2265				weights = {s: self.unknowns[sample][f'session_D{self._4x}'][s][1]**-2 for s in self.unknowns[sample][f'session_D{self._4x}']}
-2266				wsum = sum([weights[s] for s in weights])
-2267				for s in weights:
-2268					self.unknowns[sample][f'session_D{self._4x}'][s] += [self.unknowns[sample][f'session_D{self._4x}'][s][1]**-2 / wsum]
+2205		+ `N`: the total number of analyses of this sample
+2206		+ `SD_D47` or `SD_D48`: the “sample” (in the statistical sense) standard deviation for this sample
+2207		+ `d13C_VPDB`: the average δ13C_VPDB value for this sample
+2208		+ `d18O_VSMOW`: the average δ18O_VSMOW value for this sample (as CO2)
+2209		+ `p_Levene`: the p-value from a [Levene test](https://en.wikipedia.org/wiki/Levene%27s_test) of equal
+2210		variance, indicating whether the Δ4x repeatability this sample differs significantly from
+2211		that observed for the reference sample specified by `self.LEVENE_REF_SAMPLE`.
+2212		'''
+2213		D4x_ref_pop = [r[f'D{self._4x}'] for r in self.samples[self.LEVENE_REF_SAMPLE]['data']]
+2214		for sample in self.samples:
+2215			self.samples[sample]['N'] = len(self.samples[sample]['data'])
+2216			if self.samples[sample]['N'] > 1:
+2217				self.samples[sample][f'SD_D{self._4x}'] = stdev([r[f'D{self._4x}'] for r in self.samples[sample]['data']])
+2218
+2219			self.samples[sample]['d13C_VPDB'] = np.mean([r['d13C_VPDB'] for r in self.samples[sample]['data']])
+2220			self.samples[sample]['d18O_VSMOW'] = np.mean([r['d18O_VSMOW'] for r in self.samples[sample]['data']])
+2221
+2222			D4x_pop = [r[f'D{self._4x}'] for r in self.samples[sample]['data']]
+2223			if len(D4x_pop) > 2:
+2224				self.samples[sample]['p_Levene'] = levene(D4x_ref_pop, D4x_pop, center = 'median')[1]
+2225
+2226		if self.standardization_method == 'pooled':
+2227			for sample in self.anchors:
+2228				self.samples[sample][f'D{self._4x}'] = self.Nominal_D4x[sample]
+2229				self.samples[sample][f'SE_D{self._4x}'] = 0.
+2230			for sample in self.unknowns:
+2231				self.samples[sample][f'D{self._4x}'] = self.standardization.params.valuesdict()[f'D{self._4x}_{pf(sample)}']
+2232				try:
+2233					self.samples[sample][f'SE_D{self._4x}'] = self.sample_D4x_covar(sample)**.5
+2234				except ValueError:
+2235					# when `sample` is constrained by self.standardize(constraints = {...}),
+2236					# it is no longer listed in self.standardization.var_names.
+2237					# Temporary fix: define SE as zero for now
+2238					self.samples[sample][f'SE_D4{self._4x}'] = 0.
+2239
+2240		elif self.standardization_method == 'indep_sessions':
+2241			for sample in self.anchors:
+2242				self.samples[sample][f'D{self._4x}'] = self.Nominal_D4x[sample]
+2243				self.samples[sample][f'SE_D{self._4x}'] = 0.
+2244			for sample in self.unknowns:
+2245				self.msg(f'Consolidating sample {sample}')
+2246				self.unknowns[sample][f'session_D{self._4x}'] = {}
+2247				session_avg = []
+2248				for session in self.sessions:
+2249					sdata = [r for r in self.sessions[session]['data'] if r['Sample'] == sample]
+2250					if sdata:
+2251						self.msg(f'{sample} found in session {session}')
+2252						avg_D4x = np.mean([r[f'D{self._4x}'] for r in sdata])
+2253						avg_d4x = np.mean([r[f'd{self._4x}'] for r in sdata])
+2254						# !! TODO: sigma_s below does not account for temporal changes in standardization error
+2255						sigma_s = self.standardization_error(session, avg_d4x, avg_D4x)
+2256						sigma_u = sdata[0][f'wD{self._4x}raw'] / self.sessions[session]['a'] / len(sdata)**.5
+2257						session_avg.append([avg_D4x, (sigma_u**2 + sigma_s**2)**.5])
+2258						self.unknowns[sample][f'session_D{self._4x}'][session] = session_avg[-1]
+2259				self.samples[sample][f'D{self._4x}'], self.samples[sample][f'SE_D{self._4x}'] = w_avg(*zip(*session_avg))
+2260				weights = {s: self.unknowns[sample][f'session_D{self._4x}'][s][1]**-2 for s in self.unknowns[sample][f'session_D{self._4x}']}
+2261				wsum = sum([weights[s] for s in weights])
+2262				for s in weights:
+2263					self.unknowns[sample][f'session_D{self._4x}'][s] += [self.unknowns[sample][f'session_D{self._4x}'][s][1]**-2 / wsum]
+2264
+2265
+2266	def consolidate_sessions(self):
+2267		'''
+2268		Compute various statistics for each session.
 2269
-2270
-2271	def consolidate_sessions(self):
-2272		'''
-2273		Compute various statistics for each session.
-2274
-2275		+ `Na`: Number of anchor analyses in the session
-2276		+ `Nu`: Number of unknown analyses in the session
-2277		+ `r_d13C_VPDB`: δ13C_VPDB repeatability of analyses within the session
-2278		+ `r_d18O_VSMOW`: δ18O_VSMOW repeatability of analyses within the session
-2279		+ `r_D47` or `r_D48`: Δ4x repeatability of analyses within the session
-2280		+ `a`: scrambling factor
-2281		+ `b`: compositional slope
-2282		+ `c`: WG offset
-2283		+ `SE_a`: Model stadard erorr of `a`
-2284		+ `SE_b`: Model stadard erorr of `b`
-2285		+ `SE_c`: Model stadard erorr of `c`
-2286		+ `scrambling_drift` (boolean): whether to allow a temporal drift in the scrambling factor (`a`)
-2287		+ `slope_drift` (boolean): whether to allow a temporal drift in the compositional slope (`b`)
-2288		+ `wg_drift` (boolean): whether to allow a temporal drift in the WG offset (`c`)
-2289		+ `a2`: scrambling factor drift
-2290		+ `b2`: compositional slope drift
-2291		+ `c2`: WG offset drift
-2292		+ `Np`: Number of standardization parameters to fit
-2293		+ `CM`: model covariance matrix for (`a`, `b`, `c`, `a2`, `b2`, `c2`)
-2294		+ `d13Cwg_VPDB`: δ13C_VPDB of WG
-2295		+ `d18Owg_VSMOW`: δ18O_VSMOW of WG
-2296		'''
-2297		for session in self.sessions:
-2298			if 'd13Cwg_VPDB' not in self.sessions[session]:
-2299				self.sessions[session]['d13Cwg_VPDB'] = self.sessions[session]['data'][0]['d13Cwg_VPDB']
-2300			if 'd18Owg_VSMOW' not in self.sessions[session]:
-2301				self.sessions[session]['d18Owg_VSMOW'] = self.sessions[session]['data'][0]['d18Owg_VSMOW']
-2302			self.sessions[session]['Na'] = len([r for r in self.sessions[session]['data'] if r['Sample'] in self.anchors])
-2303			self.sessions[session]['Nu'] = len([r for r in self.sessions[session]['data'] if r['Sample'] in self.unknowns])
+2270		+ `Na`: Number of anchor analyses in the session
+2271		+ `Nu`: Number of unknown analyses in the session
+2272		+ `r_d13C_VPDB`: δ13C_VPDB repeatability of analyses within the session
+2273		+ `r_d18O_VSMOW`: δ18O_VSMOW repeatability of analyses within the session
+2274		+ `r_D47` or `r_D48`: Δ4x repeatability of analyses within the session
+2275		+ `a`: scrambling factor
+2276		+ `b`: compositional slope
+2277		+ `c`: WG offset
+2278		+ `SE_a`: Model stadard erorr of `a`
+2279		+ `SE_b`: Model stadard erorr of `b`
+2280		+ `SE_c`: Model stadard erorr of `c`
+2281		+ `scrambling_drift` (boolean): whether to allow a temporal drift in the scrambling factor (`a`)
+2282		+ `slope_drift` (boolean): whether to allow a temporal drift in the compositional slope (`b`)
+2283		+ `wg_drift` (boolean): whether to allow a temporal drift in the WG offset (`c`)
+2284		+ `a2`: scrambling factor drift
+2285		+ `b2`: compositional slope drift
+2286		+ `c2`: WG offset drift
+2287		+ `Np`: Number of standardization parameters to fit
+2288		+ `CM`: model covariance matrix for (`a`, `b`, `c`, `a2`, `b2`, `c2`)
+2289		+ `d13Cwg_VPDB`: δ13C_VPDB of WG
+2290		+ `d18Owg_VSMOW`: δ18O_VSMOW of WG
+2291		'''
+2292		for session in self.sessions:
+2293			if 'd13Cwg_VPDB' not in self.sessions[session]:
+2294				self.sessions[session]['d13Cwg_VPDB'] = self.sessions[session]['data'][0]['d13Cwg_VPDB']
+2295			if 'd18Owg_VSMOW' not in self.sessions[session]:
+2296				self.sessions[session]['d18Owg_VSMOW'] = self.sessions[session]['data'][0]['d18Owg_VSMOW']
+2297			self.sessions[session]['Na'] = len([r for r in self.sessions[session]['data'] if r['Sample'] in self.anchors])
+2298			self.sessions[session]['Nu'] = len([r for r in self.sessions[session]['data'] if r['Sample'] in self.unknowns])
+2299
+2300			self.msg(f'Computing repeatabilities for session {session}')
+2301			self.sessions[session]['r_d13C_VPDB'] = self.compute_r('d13C_VPDB', samples = 'anchors', sessions = [session])
+2302			self.sessions[session]['r_d18O_VSMOW'] = self.compute_r('d18O_VSMOW', samples = 'anchors', sessions = [session])
+2303			self.sessions[session][f'r_D{self._4x}'] = self.compute_r(f'D{self._4x}', sessions = [session])
 2304
-2305			self.msg(f'Computing repeatabilities for session {session}')
-2306			self.sessions[session]['r_d13C_VPDB'] = self.compute_r('d13C_VPDB', samples = 'anchors', sessions = [session])
-2307			self.sessions[session]['r_d18O_VSMOW'] = self.compute_r('d18O_VSMOW', samples = 'anchors', sessions = [session])
-2308			self.sessions[session][f'r_D{self._4x}'] = self.compute_r(f'D{self._4x}', sessions = [session])
-2309
-2310		if self.standardization_method == 'pooled':
-2311			for session in self.sessions:
-2312
-2313				self.sessions[session]['a'] = self.standardization.params.valuesdict()[f'a_{pf(session)}']
-2314				i = self.standardization.var_names.index(f'a_{pf(session)}')
-2315				self.sessions[session]['SE_a'] = self.standardization.covar[i,i]**.5
-2316
-2317				self.sessions[session]['b'] = self.standardization.params.valuesdict()[f'b_{pf(session)}']
-2318				i = self.standardization.var_names.index(f'b_{pf(session)}')
-2319				self.sessions[session]['SE_b'] = self.standardization.covar[i,i]**.5
-2320
-2321				self.sessions[session]['c'] = self.standardization.params.valuesdict()[f'c_{pf(session)}']
-2322				i = self.standardization.var_names.index(f'c_{pf(session)}')
-2323				self.sessions[session]['SE_c'] = self.standardization.covar[i,i]**.5
-2324
-2325				self.sessions[session]['a2'] = self.standardization.params.valuesdict()[f'a2_{pf(session)}']
-2326				if self.sessions[session]['scrambling_drift']:
-2327					i = self.standardization.var_names.index(f'a2_{pf(session)}')
-2328					self.sessions[session]['SE_a2'] = self.standardization.covar[i,i]**.5
-2329				else:
-2330					self.sessions[session]['SE_a2'] = 0.
-2331
-2332				self.sessions[session]['b2'] = self.standardization.params.valuesdict()[f'b2_{pf(session)}']
-2333				if self.sessions[session]['slope_drift']:
-2334					i = self.standardization.var_names.index(f'b2_{pf(session)}')
-2335					self.sessions[session]['SE_b2'] = self.standardization.covar[i,i]**.5
-2336				else:
-2337					self.sessions[session]['SE_b2'] = 0.
-2338
-2339				self.sessions[session]['c2'] = self.standardization.params.valuesdict()[f'c2_{pf(session)}']
-2340				if self.sessions[session]['wg_drift']:
-2341					i = self.standardization.var_names.index(f'c2_{pf(session)}')
-2342					self.sessions[session]['SE_c2'] = self.standardization.covar[i,i]**.5
-2343				else:
-2344					self.sessions[session]['SE_c2'] = 0.
-2345
-2346				i = self.standardization.var_names.index(f'a_{pf(session)}')
-2347				j = self.standardization.var_names.index(f'b_{pf(session)}')
-2348				k = self.standardization.var_names.index(f'c_{pf(session)}')
-2349				CM = np.zeros((6,6))
-2350				CM[:3,:3] = self.standardization.covar[[i,j,k],:][:,[i,j,k]]
-2351				try:
-2352					i2 = self.standardization.var_names.index(f'a2_{pf(session)}')
-2353					CM[3,[0,1,2,3]] = self.standardization.covar[i2,[i,j,k,i2]]
-2354					CM[[0,1,2,3],3] = self.standardization.covar[[i,j,k,i2],i2]
-2355					try:
-2356						j2 = self.standardization.var_names.index(f'b2_{pf(session)}')
-2357						CM[3,4] = self.standardization.covar[i2,j2]
-2358						CM[4,3] = self.standardization.covar[j2,i2]
-2359					except ValueError:
-2360						pass
-2361					try:
-2362						k2 = self.standardization.var_names.index(f'c2_{pf(session)}')
-2363						CM[3,5] = self.standardization.covar[i2,k2]
-2364						CM[5,3] = self.standardization.covar[k2,i2]
-2365					except ValueError:
-2366						pass
-2367				except ValueError:
-2368					pass
-2369				try:
-2370					j2 = self.standardization.var_names.index(f'b2_{pf(session)}')
-2371					CM[4,[0,1,2,4]] = self.standardization.covar[j2,[i,j,k,j2]]
-2372					CM[[0,1,2,4],4] = self.standardization.covar[[i,j,k,j2],j2]
-2373					try:
-2374						k2 = self.standardization.var_names.index(f'c2_{pf(session)}')
-2375						CM[4,5] = self.standardization.covar[j2,k2]
-2376						CM[5,4] = self.standardization.covar[k2,j2]
-2377					except ValueError:
-2378						pass
-2379				except ValueError:
-2380					pass
-2381				try:
-2382					k2 = self.standardization.var_names.index(f'c2_{pf(session)}')
-2383					CM[5,[0,1,2,5]] = self.standardization.covar[k2,[i,j,k,k2]]
-2384					CM[[0,1,2,5],5] = self.standardization.covar[[i,j,k,k2],k2]
-2385				except ValueError:
-2386					pass
+2305		if self.standardization_method == 'pooled':
+2306			for session in self.sessions:
+2307
+2308				self.sessions[session]['a'] = self.standardization.params.valuesdict()[f'a_{pf(session)}']
+2309				i = self.standardization.var_names.index(f'a_{pf(session)}')
+2310				self.sessions[session]['SE_a'] = self.standardization.covar[i,i]**.5
+2311
+2312				self.sessions[session]['b'] = self.standardization.params.valuesdict()[f'b_{pf(session)}']
+2313				i = self.standardization.var_names.index(f'b_{pf(session)}')
+2314				self.sessions[session]['SE_b'] = self.standardization.covar[i,i]**.5
+2315
+2316				self.sessions[session]['c'] = self.standardization.params.valuesdict()[f'c_{pf(session)}']
+2317				i = self.standardization.var_names.index(f'c_{pf(session)}')
+2318				self.sessions[session]['SE_c'] = self.standardization.covar[i,i]**.5
+2319
+2320				self.sessions[session]['a2'] = self.standardization.params.valuesdict()[f'a2_{pf(session)}']
+2321				if self.sessions[session]['scrambling_drift']:
+2322					i = self.standardization.var_names.index(f'a2_{pf(session)}')
+2323					self.sessions[session]['SE_a2'] = self.standardization.covar[i,i]**.5
+2324				else:
+2325					self.sessions[session]['SE_a2'] = 0.
+2326
+2327				self.sessions[session]['b2'] = self.standardization.params.valuesdict()[f'b2_{pf(session)}']
+2328				if self.sessions[session]['slope_drift']:
+2329					i = self.standardization.var_names.index(f'b2_{pf(session)}')
+2330					self.sessions[session]['SE_b2'] = self.standardization.covar[i,i]**.5
+2331				else:
+2332					self.sessions[session]['SE_b2'] = 0.
+2333
+2334				self.sessions[session]['c2'] = self.standardization.params.valuesdict()[f'c2_{pf(session)}']
+2335				if self.sessions[session]['wg_drift']:
+2336					i = self.standardization.var_names.index(f'c2_{pf(session)}')
+2337					self.sessions[session]['SE_c2'] = self.standardization.covar[i,i]**.5
+2338				else:
+2339					self.sessions[session]['SE_c2'] = 0.
+2340
+2341				i = self.standardization.var_names.index(f'a_{pf(session)}')
+2342				j = self.standardization.var_names.index(f'b_{pf(session)}')
+2343				k = self.standardization.var_names.index(f'c_{pf(session)}')
+2344				CM = np.zeros((6,6))
+2345				CM[:3,:3] = self.standardization.covar[[i,j,k],:][:,[i,j,k]]
+2346				try:
+2347					i2 = self.standardization.var_names.index(f'a2_{pf(session)}')
+2348					CM[3,[0,1,2,3]] = self.standardization.covar[i2,[i,j,k,i2]]
+2349					CM[[0,1,2,3],3] = self.standardization.covar[[i,j,k,i2],i2]
+2350					try:
+2351						j2 = self.standardization.var_names.index(f'b2_{pf(session)}')
+2352						CM[3,4] = self.standardization.covar[i2,j2]
+2353						CM[4,3] = self.standardization.covar[j2,i2]
+2354					except ValueError:
+2355						pass
+2356					try:
+2357						k2 = self.standardization.var_names.index(f'c2_{pf(session)}')
+2358						CM[3,5] = self.standardization.covar[i2,k2]
+2359						CM[5,3] = self.standardization.covar[k2,i2]
+2360					except ValueError:
+2361						pass
+2362				except ValueError:
+2363					pass
+2364				try:
+2365					j2 = self.standardization.var_names.index(f'b2_{pf(session)}')
+2366					CM[4,[0,1,2,4]] = self.standardization.covar[j2,[i,j,k,j2]]
+2367					CM[[0,1,2,4],4] = self.standardization.covar[[i,j,k,j2],j2]
+2368					try:
+2369						k2 = self.standardization.var_names.index(f'c2_{pf(session)}')
+2370						CM[4,5] = self.standardization.covar[j2,k2]
+2371						CM[5,4] = self.standardization.covar[k2,j2]
+2372					except ValueError:
+2373						pass
+2374				except ValueError:
+2375					pass
+2376				try:
+2377					k2 = self.standardization.var_names.index(f'c2_{pf(session)}')
+2378					CM[5,[0,1,2,5]] = self.standardization.covar[k2,[i,j,k,k2]]
+2379					CM[[0,1,2,5],5] = self.standardization.covar[[i,j,k,k2],k2]
+2380				except ValueError:
+2381					pass
+2382
+2383				self.sessions[session]['CM'] = CM
+2384
+2385		elif self.standardization_method == 'indep_sessions':
+2386			pass # Not implemented yet
 2387
-2388				self.sessions[session]['CM'] = CM
-2389
-2390		elif self.standardization_method == 'indep_sessions':
-2391			pass # Not implemented yet
-2392
-2393
-2394	@make_verbal
-2395	def repeatabilities(self):
-2396		'''
-2397		Compute analytical repeatabilities for δ13C_VPDB, δ18O_VSMOW, Δ4x
-2398		(for all samples, for anchors, and for unknowns).
-2399		'''
-2400		self.msg('Computing reproducibilities for all sessions')
-2401
-2402		self.repeatability['r_d13C_VPDB'] = self.compute_r('d13C_VPDB', samples = 'anchors')
-2403		self.repeatability['r_d18O_VSMOW'] = self.compute_r('d18O_VSMOW', samples = 'anchors')
-2404		self.repeatability[f'r_D{self._4x}a'] = self.compute_r(f'D{self._4x}', samples = 'anchors')
-2405		self.repeatability[f'r_D{self._4x}u'] = self.compute_r(f'D{self._4x}', samples = 'unknowns')
-2406		self.repeatability[f'r_D{self._4x}'] = self.compute_r(f'D{self._4x}', samples = 'all samples')
-2407
-2408
-2409	@make_verbal
-2410	def consolidate(self, tables = True, plots = True):
-2411		'''
-2412		Collect information about samples, sessions and repeatabilities.
-2413		'''
-2414		self.consolidate_samples()
-2415		self.consolidate_sessions()
-2416		self.repeatabilities()
-2417
-2418		if tables:
-2419			self.summary()
-2420			self.table_of_sessions()
-2421			self.table_of_analyses()
-2422			self.table_of_samples()
-2423
-2424		if plots:
-2425			self.plot_sessions()
-2426
-2427
-2428	@make_verbal
-2429	def rmswd(self,
-2430		samples = 'all samples',
-2431		sessions = 'all sessions',
-2432		):
-2433		'''
-2434		Compute the χ2, root mean squared weighted deviation
-2435		(i.e. reduced χ2), and corresponding degrees of freedom of the
-2436		Δ4x values for samples in `samples` and sessions in `sessions`.
-2437		
-2438		Only used in `D4xdata.standardize()` with `method='indep_sessions'`.
-2439		'''
-2440		if samples == 'all samples':
-2441			mysamples = [k for k in self.samples]
-2442		elif samples == 'anchors':
-2443			mysamples = [k for k in self.anchors]
-2444		elif samples == 'unknowns':
-2445			mysamples = [k for k in self.unknowns]
-2446		else:
-2447			mysamples = samples
-2448
-2449		if sessions == 'all sessions':
-2450			sessions = [k for k in self.sessions]
-2451
-2452		chisq, Nf = 0, 0
-2453		for sample in mysamples :
-2454			G = [ r for r in self if r['Sample'] == sample and r['Session'] in sessions ]
-2455			if len(G) > 1 :
-2456				X, sX = w_avg([r[f'D{self._4x}'] for r in G], [r[f'wD{self._4x}'] for r in G])
-2457				Nf += (len(G) - 1)
-2458				chisq += np.sum([ ((r[f'D{self._4x}']-X)/r[f'wD{self._4x}'])**2 for r in G])
-2459		r = (chisq / Nf)**.5 if Nf > 0 else 0
-2460		self.msg(f'RMSWD of r["D{self._4x}"] is {r:.6f} for {samples}.')
-2461		return {'rmswd': r, 'chisq': chisq, 'Nf': Nf}
-2462
-2463	
-2464	@make_verbal
-2465	def compute_r(self, key, samples = 'all samples', sessions = 'all sessions'):
-2466		'''
-2467		Compute the repeatability of `[r[key] for r in self]`
-2468		'''
-2469		# NB: it's debatable whether rD47 should be computed
-2470		# with Nf = len(self)-len(self.samples) instead of
-2471		# Nf = len(self) - len(self.unknwons) - 3*len(self.sessions)
-2472
-2473		if samples == 'all samples':
-2474			mysamples = [k for k in self.samples]
-2475		elif samples == 'anchors':
-2476			mysamples = [k for k in self.anchors]
-2477		elif samples == 'unknowns':
-2478			mysamples = [k for k in self.unknowns]
-2479		else:
-2480			mysamples = samples
-2481
-2482		if sessions == 'all sessions':
-2483			sessions = [k for k in self.sessions]
-2484
-2485		if key in ['D47', 'D48']:
-2486			chisq, Nf = 0, 0
-2487			for sample in mysamples :
-2488				X = [ r[key] for r in self if r['Sample'] == sample and r['Session'] in sessions ]
-2489				if len(X) > 1 :
-2490					chisq += np.sum([ (x-self.samples[sample][key])**2 for x in X ])
-2491					if sample in self.unknowns:
-2492						Nf += len(X) - 1
-2493					else:
-2494						Nf += len(X)
-2495			if samples in ['anchors', 'all samples']:
-2496				Nf -= sum([self.sessions[s]['Np'] for s in sessions])
-2497			r = (chisq / Nf)**.5 if Nf > 0 else 0
-2498
-2499		else: # if key not in ['D47', 'D48']
-2500			chisq, Nf = 0, 0
-2501			for sample in mysamples :
-2502				X = [ r[key] for r in self if r['Sample'] == sample and r['Session'] in sessions ]
-2503				if len(X) > 1 :
-2504					Nf += len(X) - 1
-2505					chisq += np.sum([ (x-np.mean(X))**2 for x in X ])
-2506			r = (chisq / Nf)**.5 if Nf > 0 else 0
-2507
-2508		self.msg(f'Repeatability of r["{key}"] is {1000*r:.1f} ppm for {samples}.')
-2509		return r
-2510
-2511	def sample_average(self, samples, weights = 'equal', normalize = True):
-2512		'''
-2513		Weighted average Δ4x value of a group of samples, accounting for covariance.
-2514
-2515		Returns the weighed average Δ4x value and associated SE
-2516		of a group of samples. Weights are equal by default. If `normalize` is
-2517		true, `weights` will be rescaled so that their sum equals 1.
-2518
-2519		**Examples**
-2520
-2521		```python
-2522		self.sample_average(['X','Y'], [1, 2])
-2523		```
-2524
-2525		returns the value and SE of [Δ4x(X) + 2 Δ4x(Y)]/3,
-2526		where Δ4x(X) and Δ4x(Y) are the average Δ4x
-2527		values of samples X and Y, respectively.
-2528
-2529		```python
-2530		self.sample_average(['X','Y'], [1, -1], normalize = False)
-2531		```
+2388
+2389	@make_verbal
+2390	def repeatabilities(self):
+2391		'''
+2392		Compute analytical repeatabilities for δ13C_VPDB, δ18O_VSMOW, Δ4x
+2393		(for all samples, for anchors, and for unknowns).
+2394		'''
+2395		self.msg('Computing reproducibilities for all sessions')
+2396
+2397		self.repeatability['r_d13C_VPDB'] = self.compute_r('d13C_VPDB', samples = 'anchors')
+2398		self.repeatability['r_d18O_VSMOW'] = self.compute_r('d18O_VSMOW', samples = 'anchors')
+2399		self.repeatability[f'r_D{self._4x}a'] = self.compute_r(f'D{self._4x}', samples = 'anchors')
+2400		self.repeatability[f'r_D{self._4x}u'] = self.compute_r(f'D{self._4x}', samples = 'unknowns')
+2401		self.repeatability[f'r_D{self._4x}'] = self.compute_r(f'D{self._4x}', samples = 'all samples')
+2402
+2403
+2404	@make_verbal
+2405	def consolidate(self, tables = True, plots = True):
+2406		'''
+2407		Collect information about samples, sessions and repeatabilities.
+2408		'''
+2409		self.consolidate_samples()
+2410		self.consolidate_sessions()
+2411		self.repeatabilities()
+2412
+2413		if tables:
+2414			self.summary()
+2415			self.table_of_sessions()
+2416			self.table_of_analyses()
+2417			self.table_of_samples()
+2418
+2419		if plots:
+2420			self.plot_sessions()
+2421
+2422
+2423	@make_verbal
+2424	def rmswd(self,
+2425		samples = 'all samples',
+2426		sessions = 'all sessions',
+2427		):
+2428		'''
+2429		Compute the χ2, root mean squared weighted deviation
+2430		(i.e. reduced χ2), and corresponding degrees of freedom of the
+2431		Δ4x values for samples in `samples` and sessions in `sessions`.
+2432		
+2433		Only used in `D4xdata.standardize()` with `method='indep_sessions'`.
+2434		'''
+2435		if samples == 'all samples':
+2436			mysamples = [k for k in self.samples]
+2437		elif samples == 'anchors':
+2438			mysamples = [k for k in self.anchors]
+2439		elif samples == 'unknowns':
+2440			mysamples = [k for k in self.unknowns]
+2441		else:
+2442			mysamples = samples
+2443
+2444		if sessions == 'all sessions':
+2445			sessions = [k for k in self.sessions]
+2446
+2447		chisq, Nf = 0, 0
+2448		for sample in mysamples :
+2449			G = [ r for r in self if r['Sample'] == sample and r['Session'] in sessions ]
+2450			if len(G) > 1 :
+2451				X, sX = w_avg([r[f'D{self._4x}'] for r in G], [r[f'wD{self._4x}'] for r in G])
+2452				Nf += (len(G) - 1)
+2453				chisq += np.sum([ ((r[f'D{self._4x}']-X)/r[f'wD{self._4x}'])**2 for r in G])
+2454		r = (chisq / Nf)**.5 if Nf > 0 else 0
+2455		self.msg(f'RMSWD of r["D{self._4x}"] is {r:.6f} for {samples}.')
+2456		return {'rmswd': r, 'chisq': chisq, 'Nf': Nf}
+2457
+2458	
+2459	@make_verbal
+2460	def compute_r(self, key, samples = 'all samples', sessions = 'all sessions'):
+2461		'''
+2462		Compute the repeatability of `[r[key] for r in self]`
+2463		'''
+2464		# NB: it's debatable whether rD47 should be computed
+2465		# with Nf = len(self)-len(self.samples) instead of
+2466		# Nf = len(self) - len(self.unknwons) - 3*len(self.sessions)
+2467
+2468		if samples == 'all samples':
+2469			mysamples = [k for k in self.samples]
+2470		elif samples == 'anchors':
+2471			mysamples = [k for k in self.anchors]
+2472		elif samples == 'unknowns':
+2473			mysamples = [k for k in self.unknowns]
+2474		else:
+2475			mysamples = samples
+2476
+2477		if sessions == 'all sessions':
+2478			sessions = [k for k in self.sessions]
+2479
+2480		if key in ['D47', 'D48']:
+2481			chisq, Nf = 0, 0
+2482			for sample in mysamples :
+2483				X = [ r[key] for r in self if r['Sample'] == sample and r['Session'] in sessions ]
+2484				if len(X) > 1 :
+2485					chisq += np.sum([ (x-self.samples[sample][key])**2 for x in X ])
+2486					if sample in self.unknowns:
+2487						Nf += len(X) - 1
+2488					else:
+2489						Nf += len(X)
+2490			if samples in ['anchors', 'all samples']:
+2491				Nf -= sum([self.sessions[s]['Np'] for s in sessions])
+2492			r = (chisq / Nf)**.5 if Nf > 0 else 0
+2493
+2494		else: # if key not in ['D47', 'D48']
+2495			chisq, Nf = 0, 0
+2496			for sample in mysamples :
+2497				X = [ r[key] for r in self if r['Sample'] == sample and r['Session'] in sessions ]
+2498				if len(X) > 1 :
+2499					Nf += len(X) - 1
+2500					chisq += np.sum([ (x-np.mean(X))**2 for x in X ])
+2501			r = (chisq / Nf)**.5 if Nf > 0 else 0
+2502
+2503		self.msg(f'Repeatability of r["{key}"] is {1000*r:.1f} ppm for {samples}.')
+2504		return r
+2505
+2506	def sample_average(self, samples, weights = 'equal', normalize = True):
+2507		'''
+2508		Weighted average Δ4x value of a group of samples, accounting for covariance.
+2509
+2510		Returns the weighed average Δ4x value and associated SE
+2511		of a group of samples. Weights are equal by default. If `normalize` is
+2512		true, `weights` will be rescaled so that their sum equals 1.
+2513
+2514		**Examples**
+2515
+2516		```python
+2517		self.sample_average(['X','Y'], [1, 2])
+2518		```
+2519
+2520		returns the value and SE of [Δ4x(X) + 2 Δ4x(Y)]/3,
+2521		where Δ4x(X) and Δ4x(Y) are the average Δ4x
+2522		values of samples X and Y, respectively.
+2523
+2524		```python
+2525		self.sample_average(['X','Y'], [1, -1], normalize = False)
+2526		```
+2527
+2528		returns the value and SE of the difference Δ4x(X) - Δ4x(Y).
+2529		'''
+2530		if weights == 'equal':
+2531			weights = [1/len(samples)] * len(samples)
 2532
-2533		returns the value and SE of the difference Δ4x(X) - Δ4x(Y).
-2534		'''
-2535		if weights == 'equal':
-2536			weights = [1/len(samples)] * len(samples)
+2533		if normalize:
+2534			s = sum(weights)
+2535			if s:
+2536				weights = [w/s for w in weights]
 2537
-2538		if normalize:
-2539			s = sum(weights)
-2540			if s:
-2541				weights = [w/s for w in weights]
-2542
-2543		try:
-2544# 			indices = [self.standardization.var_names.index(f'D47_{pf(sample)}') for sample in samples]
-2545# 			C = self.standardization.covar[indices,:][:,indices]
-2546			C = np.array([[self.sample_D4x_covar(x, y) for x in samples] for y in samples])
-2547			X = [self.samples[sample][f'D{self._4x}'] for sample in samples]
-2548			return correlated_sum(X, C, weights)
-2549		except ValueError:
-2550			return (0., 0.)
+2538		try:
+2539# 			indices = [self.standardization.var_names.index(f'D47_{pf(sample)}') for sample in samples]
+2540# 			C = self.standardization.covar[indices,:][:,indices]
+2541			C = np.array([[self.sample_D4x_covar(x, y) for x in samples] for y in samples])
+2542			X = [self.samples[sample][f'D{self._4x}'] for sample in samples]
+2543			return correlated_sum(X, C, weights)
+2544		except ValueError:
+2545			return (0., 0.)
+2546
+2547
+2548	def sample_D4x_covar(self, sample1, sample2 = None):
+2549		'''
+2550		Covariance between Δ4x values of samples
 2551
-2552
-2553	def sample_D4x_covar(self, sample1, sample2 = None):
-2554		'''
-2555		Covariance between Δ4x values of samples
-2556
-2557		Returns the error covariance between the average Δ4x values of two
-2558		samples. If if only `sample_1` is specified, or if `sample_1 == sample_2`),
-2559		returns the Δ4x variance for that sample.
-2560		'''
-2561		if sample2 is None:
-2562			sample2 = sample1
-2563		if self.standardization_method == 'pooled':
-2564			i = self.standardization.var_names.index(f'D{self._4x}_{pf(sample1)}')
-2565			j = self.standardization.var_names.index(f'D{self._4x}_{pf(sample2)}')
-2566			return self.standardization.covar[i, j]
-2567		elif self.standardization_method == 'indep_sessions':
-2568			if sample1 == sample2:
-2569				return self.samples[sample1][f'SE_D{self._4x}']**2
-2570			else:
-2571				c = 0
-2572				for session in self.sessions:
-2573					sdata1 = [r for r in self.sessions[session]['data'] if r['Sample'] == sample1]
-2574					sdata2 = [r for r in self.sessions[session]['data'] if r['Sample'] == sample2]
-2575					if sdata1 and sdata2:
-2576						a = self.sessions[session]['a']
-2577						# !! TODO: CM below does not account for temporal changes in standardization parameters
-2578						CM = self.sessions[session]['CM'][:3,:3]
-2579						avg_D4x_1 = np.mean([r[f'D{self._4x}'] for r in sdata1])
-2580						avg_d4x_1 = np.mean([r[f'd{self._4x}'] for r in sdata1])
-2581						avg_D4x_2 = np.mean([r[f'D{self._4x}'] for r in sdata2])
-2582						avg_d4x_2 = np.mean([r[f'd{self._4x}'] for r in sdata2])
-2583						c += (
-2584							self.unknowns[sample1][f'session_D{self._4x}'][session][2]
-2585							* self.unknowns[sample2][f'session_D{self._4x}'][session][2]
-2586							* np.array([[avg_D4x_1, avg_d4x_1, 1]])
-2587							@ CM
-2588							@ np.array([[avg_D4x_2, avg_d4x_2, 1]]).T
-2589							) / a**2
-2590				return float(c)
-2591
-2592	def sample_D4x_correl(self, sample1, sample2 = None):
-2593		'''
-2594		Correlation between Δ4x errors of samples
-2595
-2596		Returns the error correlation between the average Δ4x values of two samples.
-2597		'''
-2598		if sample2 is None or sample2 == sample1:
-2599			return 1.
-2600		return (
-2601			self.sample_D4x_covar(sample1, sample2)
-2602			/ self.unknowns[sample1][f'SE_D{self._4x}']
-2603			/ self.unknowns[sample2][f'SE_D{self._4x}']
-2604			)
-2605
-2606	def plot_single_session(self,
-2607		session,
-2608		kw_plot_anchors = dict(ls='None', marker='x', mec=(.75, 0, 0), mew = .75, ms = 4),
-2609		kw_plot_unknowns = dict(ls='None', marker='x', mec=(0, 0, .75), mew = .75, ms = 4),
-2610		kw_plot_anchor_avg = dict(ls='-', marker='None', color=(.75, 0, 0), lw = .75),
-2611		kw_plot_unknown_avg = dict(ls='-', marker='None', color=(0, 0, .75), lw = .75),
-2612		kw_contour_error = dict(colors = [[0, 0, 0]], alpha = .5, linewidths = 0.75),
-2613		xylimits = 'free', # | 'constant'
-2614		x_label = None,
-2615		y_label = None,
-2616		error_contour_interval = 'auto',
-2617		fig = 'new',
-2618		):
-2619		'''
-2620		Generate plot for a single session
-2621		'''
-2622		if x_label is None:
-2623			x_label = f'δ$_{{{self._4x}}}$ (‰)'
-2624		if y_label is None:
-2625			y_label = f'Δ$_{{{self._4x}}}$ (‰)'
-2626
-2627		out = _SessionPlot()
-2628		anchors = [a for a in self.anchors if [r for r in self.sessions[session]['data'] if r['Sample'] == a]]
-2629		unknowns = [u for u in self.unknowns if [r for r in self.sessions[session]['data'] if r['Sample'] == u]]
-2630		
-2631		if fig == 'new':
-2632			out.fig = ppl.figure(figsize = (6,6))
-2633			ppl.subplots_adjust(.1,.1,.9,.9)
-2634
-2635		out.anchor_analyses, = ppl.plot(
-2636			[r[f'd{self._4x}'] for r in self.sessions[session]['data'] if r['Sample'] in self.anchors],
-2637			[r[f'D{self._4x}'] for r in self.sessions[session]['data'] if r['Sample'] in self.anchors],
-2638			**kw_plot_anchors)
-2639		out.unknown_analyses, = ppl.plot(
-2640			[r[f'd{self._4x}'] for r in self.sessions[session]['data'] if r['Sample'] in self.unknowns],
-2641			[r[f'D{self._4x}'] for r in self.sessions[session]['data'] if r['Sample'] in self.unknowns],
-2642			**kw_plot_unknowns)
-2643		out.anchor_avg = ppl.plot(
-2644			np.array([ np.array([
-2645				np.min([r[f'd{self._4x}'] for r in self.sessions[session]['data'] if r['Sample'] == sample]) - 1,
-2646				np.max([r[f'd{self._4x}'] for r in self.sessions[session]['data'] if r['Sample'] == sample]) + 1
-2647				]) for sample in anchors]).T,
-2648			np.array([ np.array([0, 0]) + self.Nominal_D4x[sample] for sample in anchors]).T,
-2649			**kw_plot_anchor_avg)
-2650		out.unknown_avg = ppl.plot(
-2651			np.array([ np.array([
-2652				np.min([r[f'd{self._4x}'] for r in self.sessions[session]['data'] if r['Sample'] == sample]) - 1,
-2653				np.max([r[f'd{self._4x}'] for r in self.sessions[session]['data'] if r['Sample'] == sample]) + 1
-2654				]) for sample in unknowns]).T,
-2655			np.array([ np.array([0, 0]) + self.unknowns[sample][f'D{self._4x}'] for sample in unknowns]).T,
-2656			**kw_plot_unknown_avg)
-2657		if xylimits == 'constant':
-2658			x = [r[f'd{self._4x}'] for r in self]
-2659			y = [r[f'D{self._4x}'] for r in self]
-2660			x1, x2, y1, y2 = np.min(x), np.max(x), np.min(y), np.max(y)
-2661			w, h = x2-x1, y2-y1
-2662			x1 -= w/20
-2663			x2 += w/20
-2664			y1 -= h/20
-2665			y2 += h/20
-2666			ppl.axis([x1, x2, y1, y2])
-2667		elif xylimits == 'free':
-2668			x1, x2, y1, y2 = ppl.axis()
-2669		else:
-2670			x1, x2, y1, y2 = ppl.axis(xylimits)
-2671				
-2672		if error_contour_interval != 'none':
-2673			xi, yi = np.linspace(x1, x2), np.linspace(y1, y2)
-2674			XI,YI = np.meshgrid(xi, yi)
-2675			SI = np.array([[self.standardization_error(session, x, y) for x in xi] for y in yi])
-2676			if error_contour_interval == 'auto':
-2677				rng = np.max(SI) - np.min(SI)
-2678				if rng <= 0.01:
-2679					cinterval = 0.001
-2680				elif rng <= 0.03:
-2681					cinterval = 0.004
-2682				elif rng <= 0.1:
-2683					cinterval = 0.01
-2684				elif rng <= 0.3:
-2685					cinterval = 0.03
-2686				elif rng <= 1.:
-2687					cinterval = 0.1
-2688				else:
-2689					cinterval = 0.5
-2690			else:
-2691				cinterval = error_contour_interval
-2692
-2693			cval = np.arange(np.ceil(SI.min() / .001) * .001, np.ceil(SI.max() / .001 + 1) * .001, cinterval)
-2694			out.contour = ppl.contour(XI, YI, SI, cval, **kw_contour_error)
-2695			out.clabel = ppl.clabel(out.contour)
-2696
-2697		ppl.xlabel(x_label)
-2698		ppl.ylabel(y_label)
-2699		ppl.title(session, weight = 'bold')
-2700		ppl.grid(alpha = .2)
-2701		out.ax = ppl.gca()		
-2702
-2703		return out
-2704
-2705	def plot_residuals(
-2706		self,
-2707		hist = False,
-2708		binwidth = 2/3,
-2709		dir = 'output',
-2710		filename = None,
-2711		highlight = [],
-2712		colors = None,
-2713		figsize = None,
-2714		):
-2715		'''
-2716		Plot residuals of each analysis as a function of time (actually, as a function of
-2717		the order of analyses in the `D4xdata` object)
-2718
-2719		+ `hist`: whether to add a histogram of residuals
-2720		+ `histbins`: specify bin edges for the histogram
-2721		+ `dir`: the directory in which to save the plot
-2722		+ `highlight`: a list of samples to highlight
-2723		+ `colors`: a dict of `{<sample>: <color>}` for all samples
-2724		+ `figsize`: (width, height) of figure
-2725		'''
-2726		# Layout
-2727		fig = ppl.figure(figsize = (8,4) if figsize is None else figsize)
-2728		if hist:
-2729			ppl.subplots_adjust(left = .08, bottom = .05, right = .98, top = .8, wspace = -0.72)
-2730			ax1, ax2 = ppl.subplot(121), ppl.subplot(1,15,15)
-2731		else:
-2732			ppl.subplots_adjust(.08,.05,.78,.8)
-2733			ax1 = ppl.subplot(111)
-2734		
-2735		# Colors
-2736		N = len(self.anchors)
-2737		if colors is None:
-2738			if len(highlight) > 0:
-2739				Nh = len(highlight)
-2740				if Nh == 1:
-2741					colors = {highlight[0]: (0,0,0)}
-2742				elif Nh == 3:
-2743					colors = {a: c for a,c in zip(highlight, [(0,0,1), (1,0,0), (0,2/3,0)])}
-2744				elif Nh == 4:
-2745					colors = {a: c for a,c in zip(highlight, [(0,0,1), (1,0,0), (0,2/3,0), (.75,0,.75)])}
-2746				else:
-2747					colors = {a: hls_to_rgb(k/Nh, .4, 1) for k,a in enumerate(highlight)}
-2748			else:
-2749				if N == 3:
-2750					colors = {a: c for a,c in zip(self.anchors, [(0,0,1), (1,0,0), (0,2/3,0)])}
-2751				elif N == 4:
-2752					colors = {a: c for a,c in zip(self.anchors, [(0,0,1), (1,0,0), (0,2/3,0), (.75,0,.75)])}
-2753				else:
-2754					colors = {a: hls_to_rgb(k/N, .4, 1) for k,a in enumerate(self.anchors)}
-2755
-2756		ppl.sca(ax1)
-2757		
-2758		ppl.axhline(0, color = 'k', alpha = .25, lw = 0.75)
-2759
-2760		session = self[0]['Session']
-2761		x1 = 0
-2762# 		ymax = np.max([1e3 * (r['D47'] - self.samples[r['Sample']]['D47']) for r in self])
-2763		x_sessions = {}
-2764		one_or_more_singlets = False
-2765		one_or_more_multiplets = False
-2766		multiplets = set()
-2767		for k,r in enumerate(self):
-2768			if r['Session'] != session:
-2769				x2 = k-1
-2770				x_sessions[session] = (x1+x2)/2
-2771				ppl.axvline(k - 0.5, color = 'k', lw = .5)
-2772				session = r['Session']
-2773				x1 = k
-2774			singlet = len(self.samples[r['Sample']]['data']) == 1
-2775			if not singlet:
-2776				multiplets.add(r['Sample'])
-2777			if r['Sample'] in self.unknowns:
-2778				if singlet:
-2779					one_or_more_singlets = True
-2780				else:
-2781					one_or_more_multiplets = True
-2782			kw = dict(
-2783				marker = 'x' if singlet else '+',
-2784				ms = 4 if singlet else 5,
-2785				ls = 'None',
-2786				mec = colors[r['Sample']] if r['Sample'] in colors else (0,0,0),
-2787				mew = 1,
-2788				alpha = 0.2 if singlet else 1,
-2789				)
-2790			if highlight and r['Sample'] not in highlight:
-2791				kw['alpha'] = 0.2
-2792			ppl.plot(k, 1e3 * (r['D47'] - self.samples[r['Sample']]['D47']), **kw)
-2793		x2 = k
-2794		x_sessions[session] = (x1+x2)/2
-2795
-2796		ppl.axhspan(-self.repeatability['r_D47']*1000, self.repeatability['r_D47']*1000, color = 'k', alpha = .05, lw = 1)
-2797		ppl.axhspan(-self.repeatability['r_D47']*1000*self.t95, self.repeatability['r_D47']*1000*self.t95, color = 'k', alpha = .05, lw = 1)
-2798		if not hist:
-2799			ppl.text(len(self), self.repeatability['r_D47']*1000, f"   SD = {self.repeatability['r_D47']*1000:.1f} ppm", size = 9, alpha = 1, va = 'center')
-2800			ppl.text(len(self), self.repeatability['r_D47']*1000*self.t95, f"   95% CL = ± {self.repeatability['r_D47']*1000*self.t95:.1f} ppm", size = 9, alpha = 1, va = 'center')
-2801
-2802		xmin, xmax, ymin, ymax = ppl.axis()
-2803		for s in x_sessions:
-2804			ppl.text(
-2805				x_sessions[s],
-2806				ymax +1,
-2807				s,
-2808				va = 'bottom',
-2809				**(
-2810					dict(ha = 'center')
-2811					if len(self.sessions[s]['data']) > (0.15 * len(self))
-2812					else dict(ha = 'left', rotation = 45)
-2813					)
-2814				)
-2815
-2816		if hist:
-2817			ppl.sca(ax2)
-2818
-2819		for s in colors:
-2820			kw['marker'] = '+'
-2821			kw['ms'] = 5
-2822			kw['mec'] = colors[s]
-2823			kw['label'] = s
-2824			kw['alpha'] = 1
-2825			ppl.plot([], [], **kw)
-2826
-2827		kw['mec'] = (0,0,0)
-2828
-2829		if one_or_more_singlets:
-2830			kw['marker'] = 'x'
-2831			kw['ms'] = 4
-2832			kw['alpha'] = .2
-2833			kw['label'] = 'other (N$\\,$=$\\,$1)' if one_or_more_multiplets else 'other'
-2834			ppl.plot([], [], **kw)
-2835
-2836		if one_or_more_multiplets:
-2837			kw['marker'] = '+'
-2838			kw['ms'] = 4
-2839			kw['alpha'] = 1
-2840			kw['label'] = 'other (N$\\,$>$\\,$1)' if one_or_more_singlets else 'other'
-2841			ppl.plot([], [], **kw)
-2842
-2843		if hist:
-2844			leg = ppl.legend(loc = 'upper right', bbox_to_anchor = (1, 1), bbox_transform=fig.transFigure, borderaxespad = 1.5, fontsize = 9)
-2845		else:
-2846			leg = ppl.legend(loc = 'lower right', bbox_to_anchor = (1, 0), bbox_transform=fig.transFigure, borderaxespad = 1.5)
-2847		leg.set_zorder(-1000)
-2848
-2849		ppl.sca(ax1)
-2850
-2851		ppl.ylabel('Δ$_{47}$ residuals (ppm)')
-2852		ppl.xticks([])
-2853		ppl.axis([-1, len(self), None, None])
-2854
-2855		if hist:
-2856			ppl.sca(ax2)
-2857			X = [1e3 * (r['D47'] - self.samples[r['Sample']]['D47']) for r in self if r['Sample'] in multiplets]
-2858			ppl.hist(
-2859				X,
-2860				orientation = 'horizontal',
-2861				histtype = 'stepfilled',
-2862				ec = [.4]*3,
-2863				fc = [.25]*3,
-2864				alpha = .25,
-2865				bins = np.linspace(-9e3*self.repeatability['r_D47'], 9e3*self.repeatability['r_D47'], int(18/binwidth+1)),
-2866				)
-2867			ppl.axis([None, None, ymin, ymax])
-2868			ppl.text(0, 0,
-2869				f"   SD = {self.repeatability['r_D47']*1000:.1f} ppm\n   95% CL = ± {self.repeatability['r_D47']*1000*self.t95:.1f} ppm",
-2870				size = 8,
-2871				alpha = 1,
-2872				va = 'center',
-2873				ha = 'left',
-2874				)
-2875
-2876			ppl.xticks([])
-2877			ppl.yticks([])
-2878# 			ax2.spines['left'].set_visible(False)
-2879			ax2.spines['right'].set_visible(False)
-2880			ax2.spines['top'].set_visible(False)
-2881			ax2.spines['bottom'].set_visible(False)
-2882
-2883
-2884		if not os.path.exists(dir):
-2885			os.makedirs(dir)
-2886		if filename is None:
-2887			return fig
-2888		elif filename == '':
-2889			filename = f'D{self._4x}_residuals.pdf'
-2890		ppl.savefig(f'{dir}/{filename}')
-2891		ppl.close(fig)
-2892				
-2893
-2894	def simulate(self, *args, **kwargs):
-2895		'''
-2896		Legacy function with warning message pointing to `virtual_data()`
-2897		'''
-2898		raise DeprecationWarning('D4xdata.simulate is deprecated and has been replaced by virtual_data()')
-2899
-2900	def plot_distribution_of_analyses(
-2901		self,
-2902		dir = 'output',
-2903		filename = None,
-2904		vs_time = False,
-2905		figsize = (6,4),
-2906		subplots_adjust = (0.02, 0.13, 0.85, 0.8),
-2907		output = None,
-2908		):
-2909		'''
-2910		Plot temporal distribution of all analyses in the data set.
-2911		
-2912		**Parameters**
-2913
-2914		+ `vs_time`: if `True`, plot as a function of `TimeTag` rather than sequentially.
-2915		'''
-2916
-2917		asamples = [s for s in self.anchors]
-2918		usamples = [s for s in self.unknowns]
-2919		if output is None or output == 'fig':
-2920			fig = ppl.figure(figsize = figsize)
-2921			ppl.subplots_adjust(*subplots_adjust)
-2922		Xmin = min([r['TimeTag'] if vs_time else j for j,r in enumerate(self)])
-2923		Xmax = max([r['TimeTag'] if vs_time else j for j,r in enumerate(self)])
-2924		Xmax += (Xmax-Xmin)/40
-2925		Xmin -= (Xmax-Xmin)/41
-2926		for k, s in enumerate(asamples + usamples):
-2927			if vs_time:
-2928				X = [r['TimeTag'] for r in self if r['Sample'] == s]
-2929			else:
-2930				X = [x for x,r in enumerate(self) if r['Sample'] == s]
-2931			Y = [-k for x in X]
-2932			ppl.plot(X, Y, 'o', mec = None, mew = 0, mfc = 'b' if s in usamples else 'r', ms = 3, alpha = .75)
-2933			ppl.axhline(-k, color = 'b' if s in usamples else 'r', lw = .5, alpha = .25)
-2934			ppl.text(Xmax, -k, f'   {s}', va = 'center', ha = 'left', size = 7, color = 'b' if s in usamples else 'r')
-2935		ppl.axis([Xmin, Xmax, -k-1, 1])
-2936		ppl.xlabel('\ntime')
-2937		ppl.gca().annotate('',
-2938			xy = (0.6, -0.02),
-2939			xycoords = 'axes fraction',
-2940			xytext = (.4, -0.02), 
-2941            arrowprops = dict(arrowstyle = "->", color = 'k'),
-2942            )
-2943			
-2944
-2945		x2 = -1
-2946		for session in self.sessions:
-2947			x1 = min([r['TimeTag'] if vs_time else j for j,r in enumerate(self) if r['Session'] == session])
-2948			if vs_time:
-2949				ppl.axvline(x1, color = 'k', lw = .75)
-2950			if x2 > -1:
-2951				if not vs_time:
-2952					ppl.axvline((x1+x2)/2, color = 'k', lw = .75, alpha = .5)
-2953			x2 = max([r['TimeTag'] if vs_time else j for j,r in enumerate(self) if r['Session'] == session])
-2954# 			from xlrd import xldate_as_datetime
-2955# 			print(session, xldate_as_datetime(x1, 0), xldate_as_datetime(x2, 0))
-2956			if vs_time:
-2957				ppl.axvline(x2, color = 'k', lw = .75)
-2958				ppl.axvspan(x1,x2,color = 'k', zorder = -100, alpha = .15)
-2959			ppl.text((x1+x2)/2, 1, f' {session}', ha = 'left', va = 'bottom', rotation = 45, size = 8)
-2960
-2961		ppl.xticks([])
-2962		ppl.yticks([])
-2963
-2964		if output is None:
-2965			if not os.path.exists(dir):
-2966				os.makedirs(dir)
-2967			if filename == None:
-2968				filename = f'D{self._4x}_distribution_of_analyses.pdf'
-2969			ppl.savefig(f'{dir}/{filename}')
-2970			ppl.close(fig)
-2971		elif output == 'ax':
-2972			return ppl.gca()
-2973		elif output == 'fig':
-2974			return fig
+2552		Returns the error covariance between the average Δ4x values of two
+2553		samples. If if only `sample_1` is specified, or if `sample_1 == sample_2`),
+2554		returns the Δ4x variance for that sample.
+2555		'''
+2556		if sample2 is None:
+2557			sample2 = sample1
+2558		if self.standardization_method == 'pooled':
+2559			i = self.standardization.var_names.index(f'D{self._4x}_{pf(sample1)}')
+2560			j = self.standardization.var_names.index(f'D{self._4x}_{pf(sample2)}')
+2561			return self.standardization.covar[i, j]
+2562		elif self.standardization_method == 'indep_sessions':
+2563			if sample1 == sample2:
+2564				return self.samples[sample1][f'SE_D{self._4x}']**2
+2565			else:
+2566				c = 0
+2567				for session in self.sessions:
+2568					sdata1 = [r for r in self.sessions[session]['data'] if r['Sample'] == sample1]
+2569					sdata2 = [r for r in self.sessions[session]['data'] if r['Sample'] == sample2]
+2570					if sdata1 and sdata2:
+2571						a = self.sessions[session]['a']
+2572						# !! TODO: CM below does not account for temporal changes in standardization parameters
+2573						CM = self.sessions[session]['CM'][:3,:3]
+2574						avg_D4x_1 = np.mean([r[f'D{self._4x}'] for r in sdata1])
+2575						avg_d4x_1 = np.mean([r[f'd{self._4x}'] for r in sdata1])
+2576						avg_D4x_2 = np.mean([r[f'D{self._4x}'] for r in sdata2])
+2577						avg_d4x_2 = np.mean([r[f'd{self._4x}'] for r in sdata2])
+2578						c += (
+2579							self.unknowns[sample1][f'session_D{self._4x}'][session][2]
+2580							* self.unknowns[sample2][f'session_D{self._4x}'][session][2]
+2581							* np.array([[avg_D4x_1, avg_d4x_1, 1]])
+2582							@ CM
+2583							@ np.array([[avg_D4x_2, avg_d4x_2, 1]]).T
+2584							) / a**2
+2585				return float(c)
+2586
+2587	def sample_D4x_correl(self, sample1, sample2 = None):
+2588		'''
+2589		Correlation between Δ4x errors of samples
+2590
+2591		Returns the error correlation between the average Δ4x values of two samples.
+2592		'''
+2593		if sample2 is None or sample2 == sample1:
+2594			return 1.
+2595		return (
+2596			self.sample_D4x_covar(sample1, sample2)
+2597			/ self.unknowns[sample1][f'SE_D{self._4x}']
+2598			/ self.unknowns[sample2][f'SE_D{self._4x}']
+2599			)
+2600
+2601	def plot_single_session(self,
+2602		session,
+2603		kw_plot_anchors = dict(ls='None', marker='x', mec=(.75, 0, 0), mew = .75, ms = 4),
+2604		kw_plot_unknowns = dict(ls='None', marker='x', mec=(0, 0, .75), mew = .75, ms = 4),
+2605		kw_plot_anchor_avg = dict(ls='-', marker='None', color=(.75, 0, 0), lw = .75),
+2606		kw_plot_unknown_avg = dict(ls='-', marker='None', color=(0, 0, .75), lw = .75),
+2607		kw_contour_error = dict(colors = [[0, 0, 0]], alpha = .5, linewidths = 0.75),
+2608		xylimits = 'free', # | 'constant'
+2609		x_label = None,
+2610		y_label = None,
+2611		error_contour_interval = 'auto',
+2612		fig = 'new',
+2613		):
+2614		'''
+2615		Generate plot for a single session
+2616		'''
+2617		if x_label is None:
+2618			x_label = f'δ$_{{{self._4x}}}$ (‰)'
+2619		if y_label is None:
+2620			y_label = f'Δ$_{{{self._4x}}}$ (‰)'
+2621
+2622		out = _SessionPlot()
+2623		anchors = [a for a in self.anchors if [r for r in self.sessions[session]['data'] if r['Sample'] == a]]
+2624		unknowns = [u for u in self.unknowns if [r for r in self.sessions[session]['data'] if r['Sample'] == u]]
+2625		
+2626		if fig == 'new':
+2627			out.fig = ppl.figure(figsize = (6,6))
+2628			ppl.subplots_adjust(.1,.1,.9,.9)
+2629
+2630		out.anchor_analyses, = ppl.plot(
+2631			[r[f'd{self._4x}'] for r in self.sessions[session]['data'] if r['Sample'] in self.anchors],
+2632			[r[f'D{self._4x}'] for r in self.sessions[session]['data'] if r['Sample'] in self.anchors],
+2633			**kw_plot_anchors)
+2634		out.unknown_analyses, = ppl.plot(
+2635			[r[f'd{self._4x}'] for r in self.sessions[session]['data'] if r['Sample'] in self.unknowns],
+2636			[r[f'D{self._4x}'] for r in self.sessions[session]['data'] if r['Sample'] in self.unknowns],
+2637			**kw_plot_unknowns)
+2638		out.anchor_avg = ppl.plot(
+2639			np.array([ np.array([
+2640				np.min([r[f'd{self._4x}'] for r in self.sessions[session]['data'] if r['Sample'] == sample]) - 1,
+2641				np.max([r[f'd{self._4x}'] for r in self.sessions[session]['data'] if r['Sample'] == sample]) + 1
+2642				]) for sample in anchors]).T,
+2643			np.array([ np.array([0, 0]) + self.Nominal_D4x[sample] for sample in anchors]).T,
+2644			**kw_plot_anchor_avg)
+2645		out.unknown_avg = ppl.plot(
+2646			np.array([ np.array([
+2647				np.min([r[f'd{self._4x}'] for r in self.sessions[session]['data'] if r['Sample'] == sample]) - 1,
+2648				np.max([r[f'd{self._4x}'] for r in self.sessions[session]['data'] if r['Sample'] == sample]) + 1
+2649				]) for sample in unknowns]).T,
+2650			np.array([ np.array([0, 0]) + self.unknowns[sample][f'D{self._4x}'] for sample in unknowns]).T,
+2651			**kw_plot_unknown_avg)
+2652		if xylimits == 'constant':
+2653			x = [r[f'd{self._4x}'] for r in self]
+2654			y = [r[f'D{self._4x}'] for r in self]
+2655			x1, x2, y1, y2 = np.min(x), np.max(x), np.min(y), np.max(y)
+2656			w, h = x2-x1, y2-y1
+2657			x1 -= w/20
+2658			x2 += w/20
+2659			y1 -= h/20
+2660			y2 += h/20
+2661			ppl.axis([x1, x2, y1, y2])
+2662		elif xylimits == 'free':
+2663			x1, x2, y1, y2 = ppl.axis()
+2664		else:
+2665			x1, x2, y1, y2 = ppl.axis(xylimits)
+2666				
+2667		if error_contour_interval != 'none':
+2668			xi, yi = np.linspace(x1, x2), np.linspace(y1, y2)
+2669			XI,YI = np.meshgrid(xi, yi)
+2670			SI = np.array([[self.standardization_error(session, x, y) for x in xi] for y in yi])
+2671			if error_contour_interval == 'auto':
+2672				rng = np.max(SI) - np.min(SI)
+2673				if rng <= 0.01:
+2674					cinterval = 0.001
+2675				elif rng <= 0.03:
+2676					cinterval = 0.004
+2677				elif rng <= 0.1:
+2678					cinterval = 0.01
+2679				elif rng <= 0.3:
+2680					cinterval = 0.03
+2681				elif rng <= 1.:
+2682					cinterval = 0.1
+2683				else:
+2684					cinterval = 0.5
+2685			else:
+2686				cinterval = error_contour_interval
+2687
+2688			cval = np.arange(np.ceil(SI.min() / .001) * .001, np.ceil(SI.max() / .001 + 1) * .001, cinterval)
+2689			out.contour = ppl.contour(XI, YI, SI, cval, **kw_contour_error)
+2690			out.clabel = ppl.clabel(out.contour)
+2691
+2692		ppl.xlabel(x_label)
+2693		ppl.ylabel(y_label)
+2694		ppl.title(session, weight = 'bold')
+2695		ppl.grid(alpha = .2)
+2696		out.ax = ppl.gca()		
+2697
+2698		return out
+2699
+2700	def plot_residuals(
+2701		self,
+2702		hist = False,
+2703		binwidth = 2/3,
+2704		dir = 'output',
+2705		filename = None,
+2706		highlight = [],
+2707		colors = None,
+2708		figsize = None,
+2709		):
+2710		'''
+2711		Plot residuals of each analysis as a function of time (actually, as a function of
+2712		the order of analyses in the `D4xdata` object)
+2713
+2714		+ `hist`: whether to add a histogram of residuals
+2715		+ `histbins`: specify bin edges for the histogram
+2716		+ `dir`: the directory in which to save the plot
+2717		+ `highlight`: a list of samples to highlight
+2718		+ `colors`: a dict of `{<sample>: <color>}` for all samples
+2719		+ `figsize`: (width, height) of figure
+2720		'''
+2721		# Layout
+2722		fig = ppl.figure(figsize = (8,4) if figsize is None else figsize)
+2723		if hist:
+2724			ppl.subplots_adjust(left = .08, bottom = .05, right = .98, top = .8, wspace = -0.72)
+2725			ax1, ax2 = ppl.subplot(121), ppl.subplot(1,15,15)
+2726		else:
+2727			ppl.subplots_adjust(.08,.05,.78,.8)
+2728			ax1 = ppl.subplot(111)
+2729		
+2730		# Colors
+2731		N = len(self.anchors)
+2732		if colors is None:
+2733			if len(highlight) > 0:
+2734				Nh = len(highlight)
+2735				if Nh == 1:
+2736					colors = {highlight[0]: (0,0,0)}
+2737				elif Nh == 3:
+2738					colors = {a: c for a,c in zip(highlight, [(0,0,1), (1,0,0), (0,2/3,0)])}
+2739				elif Nh == 4:
+2740					colors = {a: c for a,c in zip(highlight, [(0,0,1), (1,0,0), (0,2/3,0), (.75,0,.75)])}
+2741				else:
+2742					colors = {a: hls_to_rgb(k/Nh, .4, 1) for k,a in enumerate(highlight)}
+2743			else:
+2744				if N == 3:
+2745					colors = {a: c for a,c in zip(self.anchors, [(0,0,1), (1,0,0), (0,2/3,0)])}
+2746				elif N == 4:
+2747					colors = {a: c for a,c in zip(self.anchors, [(0,0,1), (1,0,0), (0,2/3,0), (.75,0,.75)])}
+2748				else:
+2749					colors = {a: hls_to_rgb(k/N, .4, 1) for k,a in enumerate(self.anchors)}
+2750
+2751		ppl.sca(ax1)
+2752		
+2753		ppl.axhline(0, color = 'k', alpha = .25, lw = 0.75)
+2754
+2755		session = self[0]['Session']
+2756		x1 = 0
+2757# 		ymax = np.max([1e3 * (r['D47'] - self.samples[r['Sample']]['D47']) for r in self])
+2758		x_sessions = {}
+2759		one_or_more_singlets = False
+2760		one_or_more_multiplets = False
+2761		multiplets = set()
+2762		for k,r in enumerate(self):
+2763			if r['Session'] != session:
+2764				x2 = k-1
+2765				x_sessions[session] = (x1+x2)/2
+2766				ppl.axvline(k - 0.5, color = 'k', lw = .5)
+2767				session = r['Session']
+2768				x1 = k
+2769			singlet = len(self.samples[r['Sample']]['data']) == 1
+2770			if not singlet:
+2771				multiplets.add(r['Sample'])
+2772			if r['Sample'] in self.unknowns:
+2773				if singlet:
+2774					one_or_more_singlets = True
+2775				else:
+2776					one_or_more_multiplets = True
+2777			kw = dict(
+2778				marker = 'x' if singlet else '+',
+2779				ms = 4 if singlet else 5,
+2780				ls = 'None',
+2781				mec = colors[r['Sample']] if r['Sample'] in colors else (0,0,0),
+2782				mew = 1,
+2783				alpha = 0.2 if singlet else 1,
+2784				)
+2785			if highlight and r['Sample'] not in highlight:
+2786				kw['alpha'] = 0.2
+2787			ppl.plot(k, 1e3 * (r['D47'] - self.samples[r['Sample']]['D47']), **kw)
+2788		x2 = k
+2789		x_sessions[session] = (x1+x2)/2
+2790
+2791		ppl.axhspan(-self.repeatability['r_D47']*1000, self.repeatability['r_D47']*1000, color = 'k', alpha = .05, lw = 1)
+2792		ppl.axhspan(-self.repeatability['r_D47']*1000*self.t95, self.repeatability['r_D47']*1000*self.t95, color = 'k', alpha = .05, lw = 1)
+2793		if not hist:
+2794			ppl.text(len(self), self.repeatability['r_D47']*1000, f"   SD = {self.repeatability['r_D47']*1000:.1f} ppm", size = 9, alpha = 1, va = 'center')
+2795			ppl.text(len(self), self.repeatability['r_D47']*1000*self.t95, f"   95% CL = ± {self.repeatability['r_D47']*1000*self.t95:.1f} ppm", size = 9, alpha = 1, va = 'center')
+2796
+2797		xmin, xmax, ymin, ymax = ppl.axis()
+2798		for s in x_sessions:
+2799			ppl.text(
+2800				x_sessions[s],
+2801				ymax +1,
+2802				s,
+2803				va = 'bottom',
+2804				**(
+2805					dict(ha = 'center')
+2806					if len(self.sessions[s]['data']) > (0.15 * len(self))
+2807					else dict(ha = 'left', rotation = 45)
+2808					)
+2809				)
+2810
+2811		if hist:
+2812			ppl.sca(ax2)
+2813
+2814		for s in colors:
+2815			kw['marker'] = '+'
+2816			kw['ms'] = 5
+2817			kw['mec'] = colors[s]
+2818			kw['label'] = s
+2819			kw['alpha'] = 1
+2820			ppl.plot([], [], **kw)
+2821
+2822		kw['mec'] = (0,0,0)
+2823
+2824		if one_or_more_singlets:
+2825			kw['marker'] = 'x'
+2826			kw['ms'] = 4
+2827			kw['alpha'] = .2
+2828			kw['label'] = 'other (N$\\,$=$\\,$1)' if one_or_more_multiplets else 'other'
+2829			ppl.plot([], [], **kw)
+2830
+2831		if one_or_more_multiplets:
+2832			kw['marker'] = '+'
+2833			kw['ms'] = 4
+2834			kw['alpha'] = 1
+2835			kw['label'] = 'other (N$\\,$>$\\,$1)' if one_or_more_singlets else 'other'
+2836			ppl.plot([], [], **kw)
+2837
+2838		if hist:
+2839			leg = ppl.legend(loc = 'upper right', bbox_to_anchor = (1, 1), bbox_transform=fig.transFigure, borderaxespad = 1.5, fontsize = 9)
+2840		else:
+2841			leg = ppl.legend(loc = 'lower right', bbox_to_anchor = (1, 0), bbox_transform=fig.transFigure, borderaxespad = 1.5)
+2842		leg.set_zorder(-1000)
+2843
+2844		ppl.sca(ax1)
+2845
+2846		ppl.ylabel('Δ$_{47}$ residuals (ppm)')
+2847		ppl.xticks([])
+2848		ppl.axis([-1, len(self), None, None])
+2849
+2850		if hist:
+2851			ppl.sca(ax2)
+2852			X = [1e3 * (r['D47'] - self.samples[r['Sample']]['D47']) for r in self if r['Sample'] in multiplets]
+2853			ppl.hist(
+2854				X,
+2855				orientation = 'horizontal',
+2856				histtype = 'stepfilled',
+2857				ec = [.4]*3,
+2858				fc = [.25]*3,
+2859				alpha = .25,
+2860				bins = np.linspace(-9e3*self.repeatability['r_D47'], 9e3*self.repeatability['r_D47'], int(18/binwidth+1)),
+2861				)
+2862			ppl.axis([None, None, ymin, ymax])
+2863			ppl.text(0, 0,
+2864				f"   SD = {self.repeatability['r_D47']*1000:.1f} ppm\n   95% CL = ± {self.repeatability['r_D47']*1000*self.t95:.1f} ppm",
+2865				size = 8,
+2866				alpha = 1,
+2867				va = 'center',
+2868				ha = 'left',
+2869				)
+2870
+2871			ppl.xticks([])
+2872			ppl.yticks([])
+2873# 			ax2.spines['left'].set_visible(False)
+2874			ax2.spines['right'].set_visible(False)
+2875			ax2.spines['top'].set_visible(False)
+2876			ax2.spines['bottom'].set_visible(False)
+2877
+2878
+2879		if not os.path.exists(dir):
+2880			os.makedirs(dir)
+2881		if filename is None:
+2882			return fig
+2883		elif filename == '':
+2884			filename = f'D{self._4x}_residuals.pdf'
+2885		ppl.savefig(f'{dir}/{filename}')
+2886		ppl.close(fig)
+2887				
+2888
+2889	def simulate(self, *args, **kwargs):
+2890		'''
+2891		Legacy function with warning message pointing to `virtual_data()`
+2892		'''
+2893		raise DeprecationWarning('D4xdata.simulate is deprecated and has been replaced by virtual_data()')
+2894
+2895	def plot_distribution_of_analyses(
+2896		self,
+2897		dir = 'output',
+2898		filename = None,
+2899		vs_time = False,
+2900		figsize = (6,4),
+2901		subplots_adjust = (0.02, 0.13, 0.85, 0.8),
+2902		output = None,
+2903		):
+2904		'''
+2905		Plot temporal distribution of all analyses in the data set.
+2906		
+2907		**Parameters**
+2908
+2909		+ `vs_time`: if `True`, plot as a function of `TimeTag` rather than sequentially.
+2910		'''
+2911
+2912		asamples = [s for s in self.anchors]
+2913		usamples = [s for s in self.unknowns]
+2914		if output is None or output == 'fig':
+2915			fig = ppl.figure(figsize = figsize)
+2916			ppl.subplots_adjust(*subplots_adjust)
+2917		Xmin = min([r['TimeTag'] if vs_time else j for j,r in enumerate(self)])
+2918		Xmax = max([r['TimeTag'] if vs_time else j for j,r in enumerate(self)])
+2919		Xmax += (Xmax-Xmin)/40
+2920		Xmin -= (Xmax-Xmin)/41
+2921		for k, s in enumerate(asamples + usamples):
+2922			if vs_time:
+2923				X = [r['TimeTag'] for r in self if r['Sample'] == s]
+2924			else:
+2925				X = [x for x,r in enumerate(self) if r['Sample'] == s]
+2926			Y = [-k for x in X]
+2927			ppl.plot(X, Y, 'o', mec = None, mew = 0, mfc = 'b' if s in usamples else 'r', ms = 3, alpha = .75)
+2928			ppl.axhline(-k, color = 'b' if s in usamples else 'r', lw = .5, alpha = .25)
+2929			ppl.text(Xmax, -k, f'   {s}', va = 'center', ha = 'left', size = 7, color = 'b' if s in usamples else 'r')
+2930		ppl.axis([Xmin, Xmax, -k-1, 1])
+2931		ppl.xlabel('\ntime')
+2932		ppl.gca().annotate('',
+2933			xy = (0.6, -0.02),
+2934			xycoords = 'axes fraction',
+2935			xytext = (.4, -0.02), 
+2936            arrowprops = dict(arrowstyle = "->", color = 'k'),
+2937            )
+2938			
+2939
+2940		x2 = -1
+2941		for session in self.sessions:
+2942			x1 = min([r['TimeTag'] if vs_time else j for j,r in enumerate(self) if r['Session'] == session])
+2943			if vs_time:
+2944				ppl.axvline(x1, color = 'k', lw = .75)
+2945			if x2 > -1:
+2946				if not vs_time:
+2947					ppl.axvline((x1+x2)/2, color = 'k', lw = .75, alpha = .5)
+2948			x2 = max([r['TimeTag'] if vs_time else j for j,r in enumerate(self) if r['Session'] == session])
+2949# 			from xlrd import xldate_as_datetime
+2950# 			print(session, xldate_as_datetime(x1, 0), xldate_as_datetime(x2, 0))
+2951			if vs_time:
+2952				ppl.axvline(x2, color = 'k', lw = .75)
+2953				ppl.axvspan(x1,x2,color = 'k', zorder = -100, alpha = .15)
+2954			ppl.text((x1+x2)/2, 1, f' {session}', ha = 'left', va = 'bottom', rotation = 45, size = 8)
+2955
+2956		ppl.xticks([])
+2957		ppl.yticks([])
+2958
+2959		if output is None:
+2960			if not os.path.exists(dir):
+2961				os.makedirs(dir)
+2962			if filename == None:
+2963				filename = f'D{self._4x}_distribution_of_analyses.pdf'
+2964			ppl.savefig(f'{dir}/{filename}')
+2965			ppl.close(fig)
+2966		elif output == 'ax':
+2967			return ppl.gca()
+2968		elif output == 'fig':
+2969			return fig
 
@@ -7416,27 +7411,27 @@

API Documentation

-
1022	def __init__(self, l = [], mass = '47', logfile = '', session = 'mySession', verbose = False):
-1023		'''
-1024		**Parameters**
-1025
-1026		+ `l`: a list of dictionaries, with each dictionary including at least the keys
-1027		`Sample`, `d45`, `d46`, and `d47` or `d48`.
-1028		+ `mass`: `'47'` or `'48'`
-1029		+ `logfile`: if specified, write detailed logs to this file path when calling `D4xdata` methods.
-1030		+ `session`: define session name for analyses without a `Session` key
-1031		+ `verbose`: if `True`, print out detailed logs when calling `D4xdata` methods.
-1032
-1033		Returns a `D4xdata` object derived from `list`.
-1034		'''
-1035		self._4x = mass
-1036		self.verbose = verbose
-1037		self.prefix = 'D4xdata'
-1038		self.logfile = logfile
-1039		list.__init__(self, l)
-1040		self.Nf = None
-1041		self.repeatability = {}
-1042		self.refresh(session = session)
+            
1017	def __init__(self, l = [], mass = '47', logfile = '', session = 'mySession', verbose = False):
+1018		'''
+1019		**Parameters**
+1020
+1021		+ `l`: a list of dictionaries, with each dictionary including at least the keys
+1022		`Sample`, `d45`, `d46`, and `d47` or `d48`.
+1023		+ `mass`: `'47'` or `'48'`
+1024		+ `logfile`: if specified, write detailed logs to this file path when calling `D4xdata` methods.
+1025		+ `session`: define session name for analyses without a `Session` key
+1026		+ `verbose`: if `True`, print out detailed logs when calling `D4xdata` methods.
+1027
+1028		Returns a `D4xdata` object derived from `list`.
+1029		'''
+1030		self._4x = mass
+1031		self.verbose = verbose
+1032		self.prefix = 'D4xdata'
+1033		self.logfile = logfile
+1034		list.__init__(self, l)
+1035		self.Nf = None
+1036		self.repeatability = {}
+1037		self.refresh(session = session)
 
@@ -7686,24 +7681,24 @@

API Documentation

-
1045	def make_verbal(oldfun):
-1046		'''
-1047		Decorator: allow temporarily changing `self.prefix` and overriding `self.verbose`.
-1048		'''
-1049		@wraps(oldfun)
-1050		def newfun(*args, verbose = '', **kwargs):
-1051			myself = args[0]
-1052			oldprefix = myself.prefix
-1053			myself.prefix = oldfun.__name__
+            
1040	def make_verbal(oldfun):
+1041		'''
+1042		Decorator: allow temporarily changing `self.prefix` and overriding `self.verbose`.
+1043		'''
+1044		@wraps(oldfun)
+1045		def newfun(*args, verbose = '', **kwargs):
+1046			myself = args[0]
+1047			oldprefix = myself.prefix
+1048			myself.prefix = oldfun.__name__
+1049			if verbose != '':
+1050				oldverbose = myself.verbose
+1051				myself.verbose = verbose
+1052			out = oldfun(*args, **kwargs)
+1053			myself.prefix = oldprefix
 1054			if verbose != '':
-1055				oldverbose = myself.verbose
-1056				myself.verbose = verbose
-1057			out = oldfun(*args, **kwargs)
-1058			myself.prefix = oldprefix
-1059			if verbose != '':
-1060				myself.verbose = oldverbose
-1061			return out
-1062		return newfun
+1055				myself.verbose = oldverbose
+1056			return out
+1057		return newfun
 
@@ -7723,13 +7718,13 @@

API Documentation

-
1065	def msg(self, txt):
-1066		'''
-1067		Log a message to `self.logfile`, and print it out if `verbose = True`
-1068		'''
-1069		self.log(txt)
-1070		if self.verbose:
-1071			print(f'{f"[{self.prefix}]":<16} {txt}')
+            
1060	def msg(self, txt):
+1061		'''
+1062		Log a message to `self.logfile`, and print it out if `verbose = True`
+1063		'''
+1064		self.log(txt)
+1065		if self.verbose:
+1066			print(f'{f"[{self.prefix}]":<16} {txt}')
 
@@ -7749,12 +7744,12 @@

API Documentation

-
1074	def vmsg(self, txt):
-1075		'''
-1076		Log a message to `self.logfile` and print it out
-1077		'''
-1078		self.log(txt)
-1079		print(txt)
+            
1069	def vmsg(self, txt):
+1070		'''
+1071		Log a message to `self.logfile` and print it out
+1072		'''
+1073		self.log(txt)
+1074		print(txt)
 
@@ -7774,14 +7769,14 @@

API Documentation

-
1082	def log(self, *txts):
-1083		'''
-1084		Log a message to `self.logfile`
-1085		'''
-1086		if self.logfile:
-1087			with open(self.logfile, 'a') as fid:
-1088				for txt in txts:
-1089					fid.write(f'\n{dt.now().strftime("%Y-%m-%d %H:%M:%S")} {f"[{self.prefix}]":<16} {txt}')
+            
1077	def log(self, *txts):
+1078		'''
+1079		Log a message to `self.logfile`
+1080		'''
+1081		if self.logfile:
+1082			with open(self.logfile, 'a') as fid:
+1083				for txt in txts:
+1084					fid.write(f'\n{dt.now().strftime("%Y-%m-%d %H:%M:%S")} {f"[{self.prefix}]":<16} {txt}')
 
@@ -7801,13 +7796,13 @@

API Documentation

-
1092	def refresh(self, session = 'mySession'):
-1093		'''
-1094		Update `self.sessions`, `self.samples`, `self.anchors`, and `self.unknowns`.
-1095		'''
-1096		self.fill_in_missing_info(session = session)
-1097		self.refresh_sessions()
-1098		self.refresh_samples()
+            
1087	def refresh(self, session = 'mySession'):
+1088		'''
+1089		Update `self.sessions`, `self.samples`, `self.anchors`, and `self.unknowns`.
+1090		'''
+1091		self.fill_in_missing_info(session = session)
+1092		self.refresh_sessions()
+1093		self.refresh_samples()
 
@@ -7827,21 +7822,21 @@

API Documentation

-
1101	def refresh_sessions(self):
-1102		'''
-1103		Update `self.sessions` and set `scrambling_drift`, `slope_drift`, and `wg_drift`
-1104		to `False` for all sessions.
-1105		'''
-1106		self.sessions = {
-1107			s: {'data': [r for r in self if r['Session'] == s]}
-1108			for s in sorted({r['Session'] for r in self})
-1109			}
-1110		for s in self.sessions:
-1111			self.sessions[s]['scrambling_drift'] = False
-1112			self.sessions[s]['slope_drift'] = False
-1113			self.sessions[s]['wg_drift'] = False
-1114			self.sessions[s]['d13C_standardization_method'] = self.d13C_STANDARDIZATION_METHOD
-1115			self.sessions[s]['d18O_standardization_method'] = self.d18O_STANDARDIZATION_METHOD
+            
1096	def refresh_sessions(self):
+1097		'''
+1098		Update `self.sessions` and set `scrambling_drift`, `slope_drift`, and `wg_drift`
+1099		to `False` for all sessions.
+1100		'''
+1101		self.sessions = {
+1102			s: {'data': [r for r in self if r['Session'] == s]}
+1103			for s in sorted({r['Session'] for r in self})
+1104			}
+1105		for s in self.sessions:
+1106			self.sessions[s]['scrambling_drift'] = False
+1107			self.sessions[s]['slope_drift'] = False
+1108			self.sessions[s]['wg_drift'] = False
+1109			self.sessions[s]['d13C_standardization_method'] = self.d13C_STANDARDIZATION_METHOD
+1110			self.sessions[s]['d18O_standardization_method'] = self.d18O_STANDARDIZATION_METHOD
 
@@ -7862,16 +7857,16 @@

API Documentation

-
1118	def refresh_samples(self):
-1119		'''
-1120		Define `self.samples`, `self.anchors`, and `self.unknowns`.
-1121		'''
-1122		self.samples = {
-1123			s: {'data': [r for r in self if r['Sample'] == s]}
-1124			for s in sorted({r['Sample'] for r in self})
-1125			}
-1126		self.anchors = {s: self.samples[s] for s in self.samples if s in self.Nominal_D4x}
-1127		self.unknowns = {s: self.samples[s] for s in self.samples if s not in self.Nominal_D4x}
+            
1113	def refresh_samples(self):
+1114		'''
+1115		Define `self.samples`, `self.anchors`, and `self.unknowns`.
+1116		'''
+1117		self.samples = {
+1118			s: {'data': [r for r in self if r['Sample'] == s]}
+1119			for s in sorted({r['Sample'] for r in self})
+1120			}
+1121		self.anchors = {s: self.samples[s] for s in self.samples if s in self.Nominal_D4x}
+1122		self.unknowns = {s: self.samples[s] for s in self.samples if s not in self.Nominal_D4x}
 
@@ -7891,32 +7886,32 @@

API Documentation

-
1130	def read(self, filename, sep = '', session = ''):
-1131		'''
-1132		Read file in csv format to load data into a `D47data` object.
+            
1125	def read(self, filename, sep = '', session = ''):
+1126		'''
+1127		Read file in csv format to load data into a `D47data` object.
+1128
+1129		In the csv file, spaces before and after field separators (`','` by default)
+1130		are optional. Each line corresponds to a single analysis.
+1131
+1132		The required fields are:
 1133
-1134		In the csv file, spaces before and after field separators (`','` by default)
-1135		are optional. Each line corresponds to a single analysis.
-1136
-1137		The required fields are:
+1134		+ `UID`: a unique identifier
+1135		+ `Session`: an identifier for the analytical session
+1136		+ `Sample`: a sample identifier
+1137		+ `d45`, `d46`, and at least one of `d47` or `d48`: the working-gas delta values
 1138
-1139		+ `UID`: a unique identifier
-1140		+ `Session`: an identifier for the analytical session
-1141		+ `Sample`: a sample identifier
-1142		+ `d45`, `d46`, and at least one of `d47` or `d48`: the working-gas delta values
-1143
-1144		Independently known oxygen-17 anomalies may be provided as `D17O` (in ‰ relative to
-1145		VSMOW, λ = `self.LAMBDA_17`), and are otherwise assumed to be zero. Working-gas deltas `d47`, `d48`
-1146		and `d49` are optional, and set to NaN by default.
-1147
-1148		**Parameters**
-1149
-1150		+ `fileneme`: the path of the file to read
-1151		+ `sep`: csv separator delimiting the fields
-1152		+ `session`: set `Session` field to this string for all analyses
-1153		'''
-1154		with open(filename) as fid:
-1155			self.input(fid.read(), sep = sep, session = session)
+1139		Independently known oxygen-17 anomalies may be provided as `D17O` (in ‰ relative to
+1140		VSMOW, λ = `self.LAMBDA_17`), and are otherwise assumed to be zero. Working-gas deltas `d47`, `d48`
+1141		and `d49` are optional, and set to NaN by default.
+1142
+1143		**Parameters**
+1144
+1145		+ `fileneme`: the path of the file to read
+1146		+ `sep`: csv separator delimiting the fields
+1147		+ `session`: set `Session` field to this string for all analyses
+1148		'''
+1149		with open(filename) as fid:
+1150			self.input(fid.read(), sep = sep, session = session)
 
@@ -7960,42 +7955,42 @@

API Documentation

-
1158	def input(self, txt, sep = '', session = ''):
-1159		'''
-1160		Read `txt` string in csv format to load analysis data into a `D47data` object.
+            
1153	def input(self, txt, sep = '', session = ''):
+1154		'''
+1155		Read `txt` string in csv format to load analysis data into a `D47data` object.
+1156
+1157		In the csv string, spaces before and after field separators (`','` by default)
+1158		are optional. Each line corresponds to a single analysis.
+1159
+1160		The required fields are:
 1161
-1162		In the csv string, spaces before and after field separators (`','` by default)
-1163		are optional. Each line corresponds to a single analysis.
-1164
-1165		The required fields are:
+1162		+ `UID`: a unique identifier
+1163		+ `Session`: an identifier for the analytical session
+1164		+ `Sample`: a sample identifier
+1165		+ `d45`, `d46`, and at least one of `d47` or `d48`: the working-gas delta values
 1166
-1167		+ `UID`: a unique identifier
-1168		+ `Session`: an identifier for the analytical session
-1169		+ `Sample`: a sample identifier
-1170		+ `d45`, `d46`, and at least one of `d47` or `d48`: the working-gas delta values
-1171
-1172		Independently known oxygen-17 anomalies may be provided as `D17O` (in ‰ relative to
-1173		VSMOW, λ = `self.LAMBDA_17`), and are otherwise assumed to be zero. Working-gas deltas `d47`, `d48`
-1174		and `d49` are optional, and set to NaN by default.
-1175
-1176		**Parameters**
-1177
-1178		+ `txt`: the csv string to read
-1179		+ `sep`: csv separator delimiting the fields. By default, use `,`, `;`, or `\t`,
-1180		whichever appers most often in `txt`.
-1181		+ `session`: set `Session` field to this string for all analyses
-1182		'''
-1183		if sep == '':
-1184			sep = sorted(',;\t', key = lambda x: - txt.count(x))[0]
-1185		txt = [[x.strip() for x in l.split(sep)] for l in txt.splitlines() if l.strip()]
-1186		data = [{k: v if k in ['UID', 'Session', 'Sample'] else smart_type(v) for k,v in zip(txt[0], l) if v != ''} for l in txt[1:]]
-1187
-1188		if session != '':
-1189			for r in data:
-1190				r['Session'] = session
-1191
-1192		self += data
-1193		self.refresh()
+1167		Independently known oxygen-17 anomalies may be provided as `D17O` (in ‰ relative to
+1168		VSMOW, λ = `self.LAMBDA_17`), and are otherwise assumed to be zero. Working-gas deltas `d47`, `d48`
+1169		and `d49` are optional, and set to NaN by default.
+1170
+1171		**Parameters**
+1172
+1173		+ `txt`: the csv string to read
+1174		+ `sep`: csv separator delimiting the fields. By default, use `,`, `;`, or `\t`,
+1175		whichever appers most often in `txt`.
+1176		+ `session`: set `Session` field to this string for all analyses
+1177		'''
+1178		if sep == '':
+1179			sep = sorted(',;\t', key = lambda x: - txt.count(x))[0]
+1180		txt = [[x.strip() for x in l.split(sep)] for l in txt.splitlines() if l.strip()]
+1181		data = [{k: v if k in ['UID', 'Session', 'Sample'] else smart_type(v) for k,v in zip(txt[0], l) if v != ''} for l in txt[1:]]
+1182
+1183		if session != '':
+1184			for r in data:
+1185				r['Session'] = session
+1186
+1187		self += data
+1188		self.refresh()
 
@@ -8041,95 +8036,95 @@

API Documentation

-
1196	@make_verbal
-1197	def wg(self, samples = None, a18_acid = None):
-1198		'''
-1199		Compute bulk composition of the working gas for each session based on
-1200		the carbonate standards defined in both `self.Nominal_d13C_VPDB` and
-1201		`self.Nominal_d18O_VPDB`.
-1202		'''
-1203
-1204		self.msg('Computing WG composition:')
+            
1191	@make_verbal
+1192	def wg(self, samples = None, a18_acid = None):
+1193		'''
+1194		Compute bulk composition of the working gas for each session based on
+1195		the carbonate standards defined in both `self.Nominal_d13C_VPDB` and
+1196		`self.Nominal_d18O_VPDB`.
+1197		'''
+1198
+1199		self.msg('Computing WG composition:')
+1200
+1201		if a18_acid is None:
+1202			a18_acid = self.ALPHA_18O_ACID_REACTION
+1203		if samples is None:
+1204			samples = [s for s in self.Nominal_d13C_VPDB if s in self.Nominal_d18O_VPDB]
 1205
-1206		if a18_acid is None:
-1207			a18_acid = self.ALPHA_18O_ACID_REACTION
-1208		if samples is None:
-1209			samples = [s for s in self.Nominal_d13C_VPDB if s in self.Nominal_d18O_VPDB]
-1210
-1211		assert a18_acid, f'Acid fractionation factor should not be zero.'
-1212
-1213		samples = [s for s in samples if s in self.Nominal_d13C_VPDB and s in self.Nominal_d18O_VPDB]
-1214		R45R46_standards = {}
-1215		for sample in samples:
-1216			d13C_vpdb = self.Nominal_d13C_VPDB[sample]
-1217			d18O_vpdb = self.Nominal_d18O_VPDB[sample]
-1218			R13_s = self.R13_VPDB * (1 + d13C_vpdb / 1000)
-1219			R17_s = self.R17_VPDB * ((1 + d18O_vpdb / 1000) * a18_acid) ** self.LAMBDA_17
-1220			R18_s = self.R18_VPDB * (1 + d18O_vpdb / 1000) * a18_acid
-1221
-1222			C12_s = 1 / (1 + R13_s)
-1223			C13_s = R13_s / (1 + R13_s)
-1224			C16_s = 1 / (1 + R17_s + R18_s)
-1225			C17_s = R17_s / (1 + R17_s + R18_s)
-1226			C18_s = R18_s / (1 + R17_s + R18_s)
-1227
-1228			C626_s = C12_s * C16_s ** 2
-1229			C627_s = 2 * C12_s * C16_s * C17_s
-1230			C628_s = 2 * C12_s * C16_s * C18_s
-1231			C636_s = C13_s * C16_s ** 2
-1232			C637_s = 2 * C13_s * C16_s * C17_s
-1233			C727_s = C12_s * C17_s ** 2
-1234
-1235			R45_s = (C627_s + C636_s) / C626_s
-1236			R46_s = (C628_s + C637_s + C727_s) / C626_s
-1237			R45R46_standards[sample] = (R45_s, R46_s)
-1238		
-1239		for s in self.sessions:
-1240			db = [r for r in self.sessions[s]['data'] if r['Sample'] in samples]
-1241			assert db, f'No sample from {samples} found in session "{s}".'
-1242# 			dbsamples = sorted({r['Sample'] for r in db})
-1243
-1244			X = [r['d45'] for r in db]
-1245			Y = [R45R46_standards[r['Sample']][0] for r in db]
-1246			x1, x2 = np.min(X), np.max(X)
+1206		assert a18_acid, f'Acid fractionation factor should not be zero.'
+1207
+1208		samples = [s for s in samples if s in self.Nominal_d13C_VPDB and s in self.Nominal_d18O_VPDB]
+1209		R45R46_standards = {}
+1210		for sample in samples:
+1211			d13C_vpdb = self.Nominal_d13C_VPDB[sample]
+1212			d18O_vpdb = self.Nominal_d18O_VPDB[sample]
+1213			R13_s = self.R13_VPDB * (1 + d13C_vpdb / 1000)
+1214			R17_s = self.R17_VPDB * ((1 + d18O_vpdb / 1000) * a18_acid) ** self.LAMBDA_17
+1215			R18_s = self.R18_VPDB * (1 + d18O_vpdb / 1000) * a18_acid
+1216
+1217			C12_s = 1 / (1 + R13_s)
+1218			C13_s = R13_s / (1 + R13_s)
+1219			C16_s = 1 / (1 + R17_s + R18_s)
+1220			C17_s = R17_s / (1 + R17_s + R18_s)
+1221			C18_s = R18_s / (1 + R17_s + R18_s)
+1222
+1223			C626_s = C12_s * C16_s ** 2
+1224			C627_s = 2 * C12_s * C16_s * C17_s
+1225			C628_s = 2 * C12_s * C16_s * C18_s
+1226			C636_s = C13_s * C16_s ** 2
+1227			C637_s = 2 * C13_s * C16_s * C17_s
+1228			C727_s = C12_s * C17_s ** 2
+1229
+1230			R45_s = (C627_s + C636_s) / C626_s
+1231			R46_s = (C628_s + C637_s + C727_s) / C626_s
+1232			R45R46_standards[sample] = (R45_s, R46_s)
+1233		
+1234		for s in self.sessions:
+1235			db = [r for r in self.sessions[s]['data'] if r['Sample'] in samples]
+1236			assert db, f'No sample from {samples} found in session "{s}".'
+1237# 			dbsamples = sorted({r['Sample'] for r in db})
+1238
+1239			X = [r['d45'] for r in db]
+1240			Y = [R45R46_standards[r['Sample']][0] for r in db]
+1241			x1, x2 = np.min(X), np.max(X)
+1242
+1243			if x1 < x2:
+1244				wgcoord = x1/(x1-x2)
+1245			else:
+1246				wgcoord = 999
 1247
-1248			if x1 < x2:
-1249				wgcoord = x1/(x1-x2)
-1250			else:
-1251				wgcoord = 999
-1252
-1253			if wgcoord < -.5 or wgcoord > 1.5:
-1254				# unreasonable to extrapolate to d45 = 0
-1255				R45_wg = np.mean([y/(1+x/1000) for x,y in zip(X,Y)])
-1256			else :
-1257				# d45 = 0 is reasonably well bracketed
-1258				R45_wg = np.polyfit(X, Y, 1)[1]
-1259
-1260			X = [r['d46'] for r in db]
-1261			Y = [R45R46_standards[r['Sample']][1] for r in db]
-1262			x1, x2 = np.min(X), np.max(X)
+1248			if wgcoord < -.5 or wgcoord > 1.5:
+1249				# unreasonable to extrapolate to d45 = 0
+1250				R45_wg = np.mean([y/(1+x/1000) for x,y in zip(X,Y)])
+1251			else :
+1252				# d45 = 0 is reasonably well bracketed
+1253				R45_wg = np.polyfit(X, Y, 1)[1]
+1254
+1255			X = [r['d46'] for r in db]
+1256			Y = [R45R46_standards[r['Sample']][1] for r in db]
+1257			x1, x2 = np.min(X), np.max(X)
+1258
+1259			if x1 < x2:
+1260				wgcoord = x1/(x1-x2)
+1261			else:
+1262				wgcoord = 999
 1263
-1264			if x1 < x2:
-1265				wgcoord = x1/(x1-x2)
-1266			else:
-1267				wgcoord = 999
-1268
-1269			if wgcoord < -.5 or wgcoord > 1.5:
-1270				# unreasonable to extrapolate to d46 = 0
-1271				R46_wg = np.mean([y/(1+x/1000) for x,y in zip(X,Y)])
-1272			else :
-1273				# d46 = 0 is reasonably well bracketed
-1274				R46_wg = np.polyfit(X, Y, 1)[1]
-1275
-1276			d13Cwg_VPDB, d18Owg_VSMOW = self.compute_bulk_delta(R45_wg, R46_wg)
-1277
-1278			self.msg(f'Session {s} WG:   δ13C_VPDB = {d13Cwg_VPDB:.3f}   δ18O_VSMOW = {d18Owg_VSMOW:.3f}')
-1279
-1280			self.sessions[s]['d13Cwg_VPDB'] = d13Cwg_VPDB
-1281			self.sessions[s]['d18Owg_VSMOW'] = d18Owg_VSMOW
-1282			for r in self.sessions[s]['data']:
-1283				r['d13Cwg_VPDB'] = d13Cwg_VPDB
-1284				r['d18Owg_VSMOW'] = d18Owg_VSMOW
+1264			if wgcoord < -.5 or wgcoord > 1.5:
+1265				# unreasonable to extrapolate to d46 = 0
+1266				R46_wg = np.mean([y/(1+x/1000) for x,y in zip(X,Y)])
+1267			else :
+1268				# d46 = 0 is reasonably well bracketed
+1269				R46_wg = np.polyfit(X, Y, 1)[1]
+1270
+1271			d13Cwg_VPDB, d18Owg_VSMOW = self.compute_bulk_delta(R45_wg, R46_wg)
+1272
+1273			self.msg(f'Session {s} WG:   δ13C_VPDB = {d13Cwg_VPDB:.3f}   δ18O_VSMOW = {d18Owg_VSMOW:.3f}')
+1274
+1275			self.sessions[s]['d13Cwg_VPDB'] = d13Cwg_VPDB
+1276			self.sessions[s]['d18Owg_VSMOW'] = d18Owg_VSMOW
+1277			for r in self.sessions[s]['data']:
+1278				r['d13Cwg_VPDB'] = d13Cwg_VPDB
+1279				r['d18Owg_VSMOW'] = d18Owg_VSMOW
 
@@ -8151,36 +8146,36 @@

API Documentation

-
1287	def compute_bulk_delta(self, R45, R46, D17O = 0):
-1288		'''
-1289		Compute δ13C_VPDB and δ18O_VSMOW,
-1290		by solving the generalized form of equation (17) from
-1291		[Brand et al. (2010)](https://doi.org/10.1351/PAC-REP-09-01-05),
-1292		assuming that δ18O_VSMOW is not too big (0 ± 50 ‰) and
-1293		solving the corresponding second-order Taylor polynomial.
-1294		(Appendix A of [Daëron et al., 2016](https://doi.org/10.1016/j.chemgeo.2016.08.014))
-1295		'''
-1296
-1297		K = np.exp(D17O / 1000) * self.R17_VSMOW * self.R18_VSMOW ** -self.LAMBDA_17
+            
1282	def compute_bulk_delta(self, R45, R46, D17O = 0):
+1283		'''
+1284		Compute δ13C_VPDB and δ18O_VSMOW,
+1285		by solving the generalized form of equation (17) from
+1286		[Brand et al. (2010)](https://doi.org/10.1351/PAC-REP-09-01-05),
+1287		assuming that δ18O_VSMOW is not too big (0 ± 50 ‰) and
+1288		solving the corresponding second-order Taylor polynomial.
+1289		(Appendix A of [Daëron et al., 2016](https://doi.org/10.1016/j.chemgeo.2016.08.014))
+1290		'''
+1291
+1292		K = np.exp(D17O / 1000) * self.R17_VSMOW * self.R18_VSMOW ** -self.LAMBDA_17
+1293
+1294		A = -3 * K ** 2 * self.R18_VSMOW ** (2 * self.LAMBDA_17)
+1295		B = 2 * K * R45 * self.R18_VSMOW ** self.LAMBDA_17
+1296		C = 2 * self.R18_VSMOW
+1297		D = -R46
 1298
-1299		A = -3 * K ** 2 * self.R18_VSMOW ** (2 * self.LAMBDA_17)
-1300		B = 2 * K * R45 * self.R18_VSMOW ** self.LAMBDA_17
-1301		C = 2 * self.R18_VSMOW
-1302		D = -R46
-1303
-1304		aa = A * self.LAMBDA_17 * (2 * self.LAMBDA_17 - 1) + B * self.LAMBDA_17 * (self.LAMBDA_17 - 1) / 2
-1305		bb = 2 * A * self.LAMBDA_17 + B * self.LAMBDA_17 + C
-1306		cc = A + B + C + D
-1307
-1308		d18O_VSMOW = 1000 * (-bb + (bb ** 2 - 4 * aa * cc) ** .5) / (2 * aa)
-1309
-1310		R18 = (1 + d18O_VSMOW / 1000) * self.R18_VSMOW
-1311		R17 = K * R18 ** self.LAMBDA_17
-1312		R13 = R45 - 2 * R17
-1313
-1314		d13C_VPDB = 1000 * (R13 / self.R13_VPDB - 1)
-1315
-1316		return d13C_VPDB, d18O_VSMOW
+1299		aa = A * self.LAMBDA_17 * (2 * self.LAMBDA_17 - 1) + B * self.LAMBDA_17 * (self.LAMBDA_17 - 1) / 2
+1300		bb = 2 * A * self.LAMBDA_17 + B * self.LAMBDA_17 + C
+1301		cc = A + B + C + D
+1302
+1303		d18O_VSMOW = 1000 * (-bb + (bb ** 2 - 4 * aa * cc) ** .5) / (2 * aa)
+1304
+1305		R18 = (1 + d18O_VSMOW / 1000) * self.R18_VSMOW
+1306		R17 = K * R18 ** self.LAMBDA_17
+1307		R13 = R45 - 2 * R17
+1308
+1309		d13C_VPDB = 1000 * (R13 / self.R13_VPDB - 1)
+1310
+1311		return d13C_VPDB, d18O_VSMOW
 
@@ -8206,16 +8201,16 @@

API Documentation

-
1319	@make_verbal
-1320	def crunch(self, verbose = ''):
-1321		'''
-1322		Compute bulk composition and raw clumped isotope anomalies for all analyses.
-1323		'''
-1324		for r in self:
-1325			self.compute_bulk_and_clumping_deltas(r)
-1326		self.standardize_d13C()
-1327		self.standardize_d18O()
-1328		self.msg(f"Crunched {len(self)} analyses.")
+            
1314	@make_verbal
+1315	def crunch(self, verbose = ''):
+1316		'''
+1317		Compute bulk composition and raw clumped isotope anomalies for all analyses.
+1318		'''
+1319		for r in self:
+1320			self.compute_bulk_and_clumping_deltas(r)
+1321		self.standardize_d13C()
+1322		self.standardize_d18O()
+1323		self.msg(f"Crunched {len(self)} analyses.")
 
@@ -8235,20 +8230,20 @@

API Documentation

-
1331	def fill_in_missing_info(self, session = 'mySession'):
-1332		'''
-1333		Fill in optional fields with default values
-1334		'''
-1335		for i,r in enumerate(self):
-1336			if 'D17O' not in r:
-1337				r['D17O'] = 0.
-1338			if 'UID' not in r:
-1339				r['UID'] = f'{i+1}'
-1340			if 'Session' not in r:
-1341				r['Session'] = session
-1342			for k in ['d47', 'd48', 'd49']:
-1343				if k not in r:
-1344					r[k] = np.nan
+            
1326	def fill_in_missing_info(self, session = 'mySession'):
+1327		'''
+1328		Fill in optional fields with default values
+1329		'''
+1330		for i,r in enumerate(self):
+1331			if 'D17O' not in r:
+1332				r['D17O'] = 0.
+1333			if 'UID' not in r:
+1334				r['UID'] = f'{i+1}'
+1335			if 'Session' not in r:
+1336				r['Session'] = session
+1337			for k in ['d47', 'd48', 'd49']:
+1338				if k not in r:
+1339					r[k] = np.nan
 
@@ -8268,25 +8263,25 @@

API Documentation

-
1347	def standardize_d13C(self):
-1348		'''
-1349		Perform δ13C standadization within each session `s` according to
-1350		`self.sessions[s]['d13C_standardization_method']`, which is defined by default
-1351		by `D47data.refresh_sessions()`as equal to `self.d13C_STANDARDIZATION_METHOD`, but
-1352		may be redefined abitrarily at a later stage.
-1353		'''
-1354		for s in self.sessions:
-1355			if self.sessions[s]['d13C_standardization_method'] in ['1pt', '2pt']:
-1356				XY = [(r['d13C_VPDB'], self.Nominal_d13C_VPDB[r['Sample']]) for r in self.sessions[s]['data'] if r['Sample'] in self.Nominal_d13C_VPDB]
-1357				X,Y = zip(*XY)
-1358				if self.sessions[s]['d13C_standardization_method'] == '1pt':
-1359					offset = np.mean(Y) - np.mean(X)
-1360					for r in self.sessions[s]['data']:
-1361						r['d13C_VPDB'] += offset				
-1362				elif self.sessions[s]['d13C_standardization_method'] == '2pt':
-1363					a,b = np.polyfit(X,Y,1)
-1364					for r in self.sessions[s]['data']:
-1365						r['d13C_VPDB'] = a * r['d13C_VPDB'] + b
+            
1342	def standardize_d13C(self):
+1343		'''
+1344		Perform δ13C standadization within each session `s` according to
+1345		`self.sessions[s]['d13C_standardization_method']`, which is defined by default
+1346		by `D47data.refresh_sessions()`as equal to `self.d13C_STANDARDIZATION_METHOD`, but
+1347		may be redefined abitrarily at a later stage.
+1348		'''
+1349		for s in self.sessions:
+1350			if self.sessions[s]['d13C_standardization_method'] in ['1pt', '2pt']:
+1351				XY = [(r['d13C_VPDB'], self.Nominal_d13C_VPDB[r['Sample']]) for r in self.sessions[s]['data'] if r['Sample'] in self.Nominal_d13C_VPDB]
+1352				X,Y = zip(*XY)
+1353				if self.sessions[s]['d13C_standardization_method'] == '1pt':
+1354					offset = np.mean(Y) - np.mean(X)
+1355					for r in self.sessions[s]['data']:
+1356						r['d13C_VPDB'] += offset				
+1357				elif self.sessions[s]['d13C_standardization_method'] == '2pt':
+1358					a,b = np.polyfit(X,Y,1)
+1359					for r in self.sessions[s]['data']:
+1360						r['d13C_VPDB'] = a * r['d13C_VPDB'] + b
 
@@ -8309,26 +8304,26 @@

API Documentation

-
1367	def standardize_d18O(self):
-1368		'''
-1369		Perform δ18O standadization within each session `s` according to
-1370		`self.ALPHA_18O_ACID_REACTION` and `self.sessions[s]['d18O_standardization_method']`,
-1371		which is defined by default by `D47data.refresh_sessions()`as equal to
-1372		`self.d18O_STANDARDIZATION_METHOD`, but may be redefined abitrarily at a later stage.
-1373		'''
-1374		for s in self.sessions:
-1375			if self.sessions[s]['d18O_standardization_method'] in ['1pt', '2pt']:
-1376				XY = [(r['d18O_VSMOW'], self.Nominal_d18O_VPDB[r['Sample']]) for r in self.sessions[s]['data'] if r['Sample'] in self.Nominal_d18O_VPDB]
-1377				X,Y = zip(*XY)
-1378				Y = [(1000+y) * self.R18_VPDB * self.ALPHA_18O_ACID_REACTION / self.R18_VSMOW - 1000 for y in Y]
-1379				if self.sessions[s]['d18O_standardization_method'] == '1pt':
-1380					offset = np.mean(Y) - np.mean(X)
-1381					for r in self.sessions[s]['data']:
-1382						r['d18O_VSMOW'] += offset				
-1383				elif self.sessions[s]['d18O_standardization_method'] == '2pt':
-1384					a,b = np.polyfit(X,Y,1)
-1385					for r in self.sessions[s]['data']:
-1386						r['d18O_VSMOW'] = a * r['d18O_VSMOW'] + b
+            
1362	def standardize_d18O(self):
+1363		'''
+1364		Perform δ18O standadization within each session `s` according to
+1365		`self.ALPHA_18O_ACID_REACTION` and `self.sessions[s]['d18O_standardization_method']`,
+1366		which is defined by default by `D47data.refresh_sessions()`as equal to
+1367		`self.d18O_STANDARDIZATION_METHOD`, but may be redefined abitrarily at a later stage.
+1368		'''
+1369		for s in self.sessions:
+1370			if self.sessions[s]['d18O_standardization_method'] in ['1pt', '2pt']:
+1371				XY = [(r['d18O_VSMOW'], self.Nominal_d18O_VPDB[r['Sample']]) for r in self.sessions[s]['data'] if r['Sample'] in self.Nominal_d18O_VPDB]
+1372				X,Y = zip(*XY)
+1373				Y = [(1000+y) * self.R18_VPDB * self.ALPHA_18O_ACID_REACTION / self.R18_VSMOW - 1000 for y in Y]
+1374				if self.sessions[s]['d18O_standardization_method'] == '1pt':
+1375					offset = np.mean(Y) - np.mean(X)
+1376					for r in self.sessions[s]['data']:
+1377						r['d18O_VSMOW'] += offset				
+1378				elif self.sessions[s]['d18O_standardization_method'] == '2pt':
+1379					a,b = np.polyfit(X,Y,1)
+1380					for r in self.sessions[s]['data']:
+1381						r['d18O_VSMOW'] = a * r['d18O_VSMOW'] + b
 
@@ -8351,43 +8346,43 @@

API Documentation

-
1389	def compute_bulk_and_clumping_deltas(self, r):
-1390		'''
-1391		Compute δ13C_VPDB, δ18O_VSMOW, and raw Δ47, Δ48, Δ49 values for a single analysis `r`.
-1392		'''
+            
1384	def compute_bulk_and_clumping_deltas(self, r):
+1385		'''
+1386		Compute δ13C_VPDB, δ18O_VSMOW, and raw Δ47, Δ48, Δ49 values for a single analysis `r`.
+1387		'''
+1388
+1389		# Compute working gas R13, R18, and isobar ratios
+1390		R13_wg = self.R13_VPDB * (1 + r['d13Cwg_VPDB'] / 1000)
+1391		R18_wg = self.R18_VSMOW * (1 + r['d18Owg_VSMOW'] / 1000)
+1392		R45_wg, R46_wg, R47_wg, R48_wg, R49_wg = self.compute_isobar_ratios(R13_wg, R18_wg)
 1393
-1394		# Compute working gas R13, R18, and isobar ratios
-1395		R13_wg = self.R13_VPDB * (1 + r['d13Cwg_VPDB'] / 1000)
-1396		R18_wg = self.R18_VSMOW * (1 + r['d18Owg_VSMOW'] / 1000)
-1397		R45_wg, R46_wg, R47_wg, R48_wg, R49_wg = self.compute_isobar_ratios(R13_wg, R18_wg)
-1398
-1399		# Compute analyte isobar ratios
-1400		R45 = (1 + r['d45'] / 1000) * R45_wg
-1401		R46 = (1 + r['d46'] / 1000) * R46_wg
-1402		R47 = (1 + r['d47'] / 1000) * R47_wg
-1403		R48 = (1 + r['d48'] / 1000) * R48_wg
-1404		R49 = (1 + r['d49'] / 1000) * R49_wg
-1405
-1406		r['d13C_VPDB'], r['d18O_VSMOW'] = self.compute_bulk_delta(R45, R46, D17O = r['D17O'])
-1407		R13 = (1 + r['d13C_VPDB'] / 1000) * self.R13_VPDB
-1408		R18 = (1 + r['d18O_VSMOW'] / 1000) * self.R18_VSMOW
+1394		# Compute analyte isobar ratios
+1395		R45 = (1 + r['d45'] / 1000) * R45_wg
+1396		R46 = (1 + r['d46'] / 1000) * R46_wg
+1397		R47 = (1 + r['d47'] / 1000) * R47_wg
+1398		R48 = (1 + r['d48'] / 1000) * R48_wg
+1399		R49 = (1 + r['d49'] / 1000) * R49_wg
+1400
+1401		r['d13C_VPDB'], r['d18O_VSMOW'] = self.compute_bulk_delta(R45, R46, D17O = r['D17O'])
+1402		R13 = (1 + r['d13C_VPDB'] / 1000) * self.R13_VPDB
+1403		R18 = (1 + r['d18O_VSMOW'] / 1000) * self.R18_VSMOW
+1404
+1405		# Compute stochastic isobar ratios of the analyte
+1406		R45stoch, R46stoch, R47stoch, R48stoch, R49stoch = self.compute_isobar_ratios(
+1407			R13, R18, D17O = r['D17O']
+1408		)
 1409
-1410		# Compute stochastic isobar ratios of the analyte
-1411		R45stoch, R46stoch, R47stoch, R48stoch, R49stoch = self.compute_isobar_ratios(
-1412			R13, R18, D17O = r['D17O']
-1413		)
-1414
-1415		# Check that R45/R45stoch and R46/R46stoch are undistinguishable from 1,
-1416		# and raise a warning if the corresponding anomalies exceed 0.02 ppm.
-1417		if (R45 / R45stoch - 1) > 5e-8:
-1418			self.vmsg(f'This is unexpected: R45/R45stoch - 1 = {1e6 * (R45 / R45stoch - 1):.3f} ppm')
-1419		if (R46 / R46stoch - 1) > 5e-8:
-1420			self.vmsg(f'This is unexpected: R46/R46stoch - 1 = {1e6 * (R46 / R46stoch - 1):.3f} ppm')
-1421
-1422		# Compute raw clumped isotope anomalies
-1423		r['D47raw'] = 1000 * (R47 / R47stoch - 1)
-1424		r['D48raw'] = 1000 * (R48 / R48stoch - 1)
-1425		r['D49raw'] = 1000 * (R49 / R49stoch - 1)
+1410		# Check that R45/R45stoch and R46/R46stoch are undistinguishable from 1,
+1411		# and raise a warning if the corresponding anomalies exceed 0.02 ppm.
+1412		if (R45 / R45stoch - 1) > 5e-8:
+1413			self.vmsg(f'This is unexpected: R45/R45stoch - 1 = {1e6 * (R45 / R45stoch - 1):.3f} ppm')
+1414		if (R46 / R46stoch - 1) > 5e-8:
+1415			self.vmsg(f'This is unexpected: R46/R46stoch - 1 = {1e6 * (R46 / R46stoch - 1):.3f} ppm')
+1416
+1417		# Compute raw clumped isotope anomalies
+1418		r['D47raw'] = 1000 * (R47 / R47stoch - 1)
+1419		r['D48raw'] = 1000 * (R48 / R48stoch - 1)
+1420		r['D49raw'] = 1000 * (R49 / R49stoch - 1)
 
@@ -8407,51 +8402,51 @@

API Documentation

-
1428	def compute_isobar_ratios(self, R13, R18, D17O=0, D47=0, D48=0, D49=0):
-1429		'''
-1430		Compute isobar ratios for a sample with isotopic ratios `R13` and `R18`,
-1431		optionally accounting for non-zero values of Δ17O (`D17O`) and clumped isotope
-1432		anomalies (`D47`, `D48`, `D49`), all expressed in permil.
-1433		'''
-1434
-1435		# Compute R17
-1436		R17 = self.R17_VSMOW * np.exp(D17O / 1000) * (R18 / self.R18_VSMOW) ** self.LAMBDA_17
-1437
-1438		# Compute isotope concentrations
-1439		C12 = (1 + R13) ** -1
-1440		C13 = C12 * R13
-1441		C16 = (1 + R17 + R18) ** -1
-1442		C17 = C16 * R17
-1443		C18 = C16 * R18
-1444
-1445		# Compute stochastic isotopologue concentrations
-1446		C626 = C16 * C12 * C16
-1447		C627 = C16 * C12 * C17 * 2
-1448		C628 = C16 * C12 * C18 * 2
-1449		C636 = C16 * C13 * C16
-1450		C637 = C16 * C13 * C17 * 2
-1451		C638 = C16 * C13 * C18 * 2
-1452		C727 = C17 * C12 * C17
-1453		C728 = C17 * C12 * C18 * 2
-1454		C737 = C17 * C13 * C17
-1455		C738 = C17 * C13 * C18 * 2
-1456		C828 = C18 * C12 * C18
-1457		C838 = C18 * C13 * C18
-1458
-1459		# Compute stochastic isobar ratios
-1460		R45 = (C636 + C627) / C626
-1461		R46 = (C628 + C637 + C727) / C626
-1462		R47 = (C638 + C728 + C737) / C626
-1463		R48 = (C738 + C828) / C626
-1464		R49 = C838 / C626
+            
1423	def compute_isobar_ratios(self, R13, R18, D17O=0, D47=0, D48=0, D49=0):
+1424		'''
+1425		Compute isobar ratios for a sample with isotopic ratios `R13` and `R18`,
+1426		optionally accounting for non-zero values of Δ17O (`D17O`) and clumped isotope
+1427		anomalies (`D47`, `D48`, `D49`), all expressed in permil.
+1428		'''
+1429
+1430		# Compute R17
+1431		R17 = self.R17_VSMOW * np.exp(D17O / 1000) * (R18 / self.R18_VSMOW) ** self.LAMBDA_17
+1432
+1433		# Compute isotope concentrations
+1434		C12 = (1 + R13) ** -1
+1435		C13 = C12 * R13
+1436		C16 = (1 + R17 + R18) ** -1
+1437		C17 = C16 * R17
+1438		C18 = C16 * R18
+1439
+1440		# Compute stochastic isotopologue concentrations
+1441		C626 = C16 * C12 * C16
+1442		C627 = C16 * C12 * C17 * 2
+1443		C628 = C16 * C12 * C18 * 2
+1444		C636 = C16 * C13 * C16
+1445		C637 = C16 * C13 * C17 * 2
+1446		C638 = C16 * C13 * C18 * 2
+1447		C727 = C17 * C12 * C17
+1448		C728 = C17 * C12 * C18 * 2
+1449		C737 = C17 * C13 * C17
+1450		C738 = C17 * C13 * C18 * 2
+1451		C828 = C18 * C12 * C18
+1452		C838 = C18 * C13 * C18
+1453
+1454		# Compute stochastic isobar ratios
+1455		R45 = (C636 + C627) / C626
+1456		R46 = (C628 + C637 + C727) / C626
+1457		R47 = (C638 + C728 + C737) / C626
+1458		R48 = (C738 + C828) / C626
+1459		R49 = C838 / C626
+1460
+1461		# Account for stochastic anomalies
+1462		R47 *= 1 + D47 / 1000
+1463		R48 *= 1 + D48 / 1000
+1464		R49 *= 1 + D49 / 1000
 1465
-1466		# Account for stochastic anomalies
-1467		R47 *= 1 + D47 / 1000
-1468		R48 *= 1 + D48 / 1000
-1469		R49 *= 1 + D49 / 1000
-1470
-1471		# Return isobar ratios
-1472		return R45, R46, R47, R48, R49
+1466		# Return isobar ratios
+1467		return R45, R46, R47, R48, R49
 
@@ -8473,30 +8468,30 @@

API Documentation

-
1475	def split_samples(self, samples_to_split = 'all', grouping = 'by_session'):
-1476		'''
-1477		Split unknown samples by UID (treat all analyses as different samples)
-1478		or by session (treat analyses of a given sample in different sessions as
-1479		different samples).
-1480
-1481		**Parameters**
-1482
-1483		+ `samples_to_split`: a list of samples to split, e.g., `['IAEA-C1', 'IAEA-C2']`
-1484		+ `grouping`: `by_uid` | `by_session`
-1485		'''
-1486		if samples_to_split == 'all':
-1487			samples_to_split = [s for s in self.unknowns]
-1488		gkeys = {'by_uid':'UID', 'by_session':'Session'}
-1489		self.grouping = grouping.lower()
-1490		if self.grouping in gkeys:
-1491			gkey = gkeys[self.grouping]
-1492		for r in self:
-1493			if r['Sample'] in samples_to_split:
-1494				r['Sample_original'] = r['Sample']
-1495				r['Sample'] = f"{r['Sample']}__{r[gkey]}"
-1496			elif r['Sample'] in self.unknowns:
-1497				r['Sample_original'] = r['Sample']
-1498		self.refresh_samples()
+            
1470	def split_samples(self, samples_to_split = 'all', grouping = 'by_session'):
+1471		'''
+1472		Split unknown samples by UID (treat all analyses as different samples)
+1473		or by session (treat analyses of a given sample in different sessions as
+1474		different samples).
+1475
+1476		**Parameters**
+1477
+1478		+ `samples_to_split`: a list of samples to split, e.g., `['IAEA-C1', 'IAEA-C2']`
+1479		+ `grouping`: `by_uid` | `by_session`
+1480		'''
+1481		if samples_to_split == 'all':
+1482			samples_to_split = [s for s in self.unknowns]
+1483		gkeys = {'by_uid':'UID', 'by_session':'Session'}
+1484		self.grouping = grouping.lower()
+1485		if self.grouping in gkeys:
+1486			gkey = gkeys[self.grouping]
+1487		for r in self:
+1488			if r['Sample'] in samples_to_split:
+1489				r['Sample_original'] = r['Sample']
+1490				r['Sample'] = f"{r['Sample']}__{r[gkey]}"
+1491			elif r['Sample'] in self.unknowns:
+1492				r['Sample_original'] = r['Sample']
+1493		self.refresh_samples()
 
@@ -8525,61 +8520,61 @@

API Documentation

-
1501	def unsplit_samples(self, tables = False):
-1502		'''
-1503		Reverse the effects of `D47data.split_samples()`.
-1504		
-1505		This should only be used after `D4xdata.standardize()` with `method='pooled'`.
-1506		
-1507		After `D4xdata.standardize()` with `method='indep_sessions'`, one should
-1508		probably use `D4xdata.combine_samples()` instead to reverse the effects of
-1509		`D47data.split_samples()` with `grouping='by_uid'`, or `w_avg()` to reverse the
-1510		effects of `D47data.split_samples()` with `grouping='by_sessions'` (because in
-1511		that case session-averaged Δ4x values are statistically independent).
-1512		'''
-1513		unknowns_old = sorted({s for s in self.unknowns})
-1514		CM_old = self.standardization.covar[:,:]
-1515		VD_old = self.standardization.params.valuesdict().copy()
-1516		vars_old = self.standardization.var_names
-1517
-1518		unknowns_new = sorted({r['Sample_original'] for r in self if 'Sample_original' in r})
-1519
-1520		Ns = len(vars_old) - len(unknowns_old)
-1521		vars_new = vars_old[:Ns] + [f'D{self._4x}_{pf(u)}' for u in unknowns_new]
-1522		VD_new = {k: VD_old[k] for k in vars_old[:Ns]}
-1523
-1524		W = np.zeros((len(vars_new), len(vars_old)))
-1525		W[:Ns,:Ns] = np.eye(Ns)
-1526		for u in unknowns_new:
-1527			splits = sorted({r['Sample'] for r in self if 'Sample_original' in r and r['Sample_original'] == u})
-1528			if self.grouping == 'by_session':
-1529				weights = [self.samples[s][f'SE_D{self._4x}']**-2 for s in splits]
-1530			elif self.grouping == 'by_uid':
-1531				weights = [1 for s in splits]
-1532			sw = sum(weights)
-1533			weights = [w/sw for w in weights]
-1534			W[vars_new.index(f'D{self._4x}_{pf(u)}'),[vars_old.index(f'D{self._4x}_{pf(s)}') for s in splits]] = weights[:]
-1535
-1536		CM_new = W @ CM_old @ W.T
-1537		V = W @ np.array([[VD_old[k]] for k in vars_old])
-1538		VD_new = {k:v[0] for k,v in zip(vars_new, V)}
-1539
-1540		self.standardization.covar = CM_new
-1541		self.standardization.params.valuesdict = lambda : VD_new
-1542		self.standardization.var_names = vars_new
+            
1496	def unsplit_samples(self, tables = False):
+1497		'''
+1498		Reverse the effects of `D47data.split_samples()`.
+1499		
+1500		This should only be used after `D4xdata.standardize()` with `method='pooled'`.
+1501		
+1502		After `D4xdata.standardize()` with `method='indep_sessions'`, one should
+1503		probably use `D4xdata.combine_samples()` instead to reverse the effects of
+1504		`D47data.split_samples()` with `grouping='by_uid'`, or `w_avg()` to reverse the
+1505		effects of `D47data.split_samples()` with `grouping='by_sessions'` (because in
+1506		that case session-averaged Δ4x values are statistically independent).
+1507		'''
+1508		unknowns_old = sorted({s for s in self.unknowns})
+1509		CM_old = self.standardization.covar[:,:]
+1510		VD_old = self.standardization.params.valuesdict().copy()
+1511		vars_old = self.standardization.var_names
+1512
+1513		unknowns_new = sorted({r['Sample_original'] for r in self if 'Sample_original' in r})
+1514
+1515		Ns = len(vars_old) - len(unknowns_old)
+1516		vars_new = vars_old[:Ns] + [f'D{self._4x}_{pf(u)}' for u in unknowns_new]
+1517		VD_new = {k: VD_old[k] for k in vars_old[:Ns]}
+1518
+1519		W = np.zeros((len(vars_new), len(vars_old)))
+1520		W[:Ns,:Ns] = np.eye(Ns)
+1521		for u in unknowns_new:
+1522			splits = sorted({r['Sample'] for r in self if 'Sample_original' in r and r['Sample_original'] == u})
+1523			if self.grouping == 'by_session':
+1524				weights = [self.samples[s][f'SE_D{self._4x}']**-2 for s in splits]
+1525			elif self.grouping == 'by_uid':
+1526				weights = [1 for s in splits]
+1527			sw = sum(weights)
+1528			weights = [w/sw for w in weights]
+1529			W[vars_new.index(f'D{self._4x}_{pf(u)}'),[vars_old.index(f'D{self._4x}_{pf(s)}') for s in splits]] = weights[:]
+1530
+1531		CM_new = W @ CM_old @ W.T
+1532		V = W @ np.array([[VD_old[k]] for k in vars_old])
+1533		VD_new = {k:v[0] for k,v in zip(vars_new, V)}
+1534
+1535		self.standardization.covar = CM_new
+1536		self.standardization.params.valuesdict = lambda : VD_new
+1537		self.standardization.var_names = vars_new
+1538
+1539		for r in self:
+1540			if r['Sample'] in self.unknowns:
+1541				r['Sample_split'] = r['Sample']
+1542				r['Sample'] = r['Sample_original']
 1543
-1544		for r in self:
-1545			if r['Sample'] in self.unknowns:
-1546				r['Sample_split'] = r['Sample']
-1547				r['Sample'] = r['Sample_original']
-1548
-1549		self.refresh_samples()
-1550		self.consolidate_samples()
-1551		self.repeatabilities()
-1552
-1553		if tables:
-1554			self.table_of_analyses()
-1555			self.table_of_samples()
+1544		self.refresh_samples()
+1545		self.consolidate_samples()
+1546		self.repeatabilities()
+1547
+1548		if tables:
+1549			self.table_of_analyses()
+1550			self.table_of_samples()
 
@@ -8607,25 +8602,25 @@

API Documentation

-
1557	def assign_timestamps(self):
-1558		'''
-1559		Assign a time field `t` of type `float` to each analysis.
-1560
-1561		If `TimeTag` is one of the data fields, `t` is equal within a given session
-1562		to `TimeTag` minus the mean value of `TimeTag` for that session.
-1563		Otherwise, `TimeTag` is by default equal to the index of each analysis
-1564		in the dataset and `t` is defined as above.
-1565		'''
-1566		for session in self.sessions:
-1567			sdata = self.sessions[session]['data']
-1568			try:
-1569				t0 = np.mean([r['TimeTag'] for r in sdata])
-1570				for r in sdata:
-1571					r['t'] = r['TimeTag'] - t0
-1572			except KeyError:
-1573				t0 = (len(sdata)-1)/2
-1574				for t,r in enumerate(sdata):
-1575					r['t'] = t - t0
+            
1552	def assign_timestamps(self):
+1553		'''
+1554		Assign a time field `t` of type `float` to each analysis.
+1555
+1556		If `TimeTag` is one of the data fields, `t` is equal within a given session
+1557		to `TimeTag` minus the mean value of `TimeTag` for that session.
+1558		Otherwise, `TimeTag` is by default equal to the index of each analysis
+1559		in the dataset and `t` is defined as above.
+1560		'''
+1561		for session in self.sessions:
+1562			sdata = self.sessions[session]['data']
+1563			try:
+1564				t0 = np.mean([r['TimeTag'] for r in sdata])
+1565				for r in sdata:
+1566					r['t'] = r['TimeTag'] - t0
+1567			except KeyError:
+1568				t0 = (len(sdata)-1)/2
+1569				for t,r in enumerate(sdata):
+1570					r['t'] = t - t0
 
@@ -8650,12 +8645,12 @@

API Documentation

-
1578	def report(self):
-1579		'''
-1580		Prints a report on the standardization fit.
-1581		Only applicable after `D4xdata.standardize(method='pooled')`.
-1582		'''
-1583		report_fit(self.standardization)
+            
1573	def report(self):
+1574		'''
+1575		Prints a report on the standardization fit.
+1576		Only applicable after `D4xdata.standardize(method='pooled')`.
+1577		'''
+1578		report_fit(self.standardization)
 
@@ -8676,43 +8671,43 @@

API Documentation

-
1586	def combine_samples(self, sample_groups):
-1587		'''
-1588		Combine analyses of different samples to compute weighted average Δ4x
-1589		and new error (co)variances corresponding to the groups defined by the `sample_groups`
-1590		dictionary.
-1591		
-1592		Caution: samples are weighted by number of replicate analyses, which is a
-1593		reasonable default behavior but is not always optimal (e.g., in the case of strongly
-1594		correlated analytical errors for one or more samples).
-1595		
-1596		Returns a tuplet of:
-1597		
-1598		+ the list of group names
-1599		+ an array of the corresponding Δ4x values
-1600		+ the corresponding (co)variance matrix
-1601		
-1602		**Parameters**
-1603
-1604		+ `sample_groups`: a dictionary of the form:
-1605		```py
-1606		{'group1': ['sample_1', 'sample_2'],
-1607		 'group2': ['sample_3', 'sample_4', 'sample_5']}
-1608		```
-1609		'''
-1610		
-1611		samples = [s for k in sorted(sample_groups.keys()) for s in sorted(sample_groups[k])]
-1612		groups = sorted(sample_groups.keys())
-1613		group_total_weights = {k: sum([self.samples[s]['N'] for s in sample_groups[k]]) for k in groups}
-1614		D4x_old = np.array([[self.samples[x][f'D{self._4x}']] for x in samples])
-1615		CM_old = np.array([[self.sample_D4x_covar(x,y) for x in samples] for y in samples])
-1616		W = np.array([
-1617			[self.samples[i]['N']/group_total_weights[j] if i in sample_groups[j] else 0 for i in samples]
-1618			for j in groups])
-1619		D4x_new = W @ D4x_old
-1620		CM_new = W @ CM_old @ W.T
-1621
-1622		return groups, D4x_new[:,0], CM_new
+            
1581	def combine_samples(self, sample_groups):
+1582		'''
+1583		Combine analyses of different samples to compute weighted average Δ4x
+1584		and new error (co)variances corresponding to the groups defined by the `sample_groups`
+1585		dictionary.
+1586		
+1587		Caution: samples are weighted by number of replicate analyses, which is a
+1588		reasonable default behavior but is not always optimal (e.g., in the case of strongly
+1589		correlated analytical errors for one or more samples).
+1590		
+1591		Returns a tuplet of:
+1592		
+1593		+ the list of group names
+1594		+ an array of the corresponding Δ4x values
+1595		+ the corresponding (co)variance matrix
+1596		
+1597		**Parameters**
+1598
+1599		+ `sample_groups`: a dictionary of the form:
+1600		```py
+1601		{'group1': ['sample_1', 'sample_2'],
+1602		 'group2': ['sample_3', 'sample_4', 'sample_5']}
+1603		```
+1604		'''
+1605		
+1606		samples = [s for k in sorted(sample_groups.keys()) for s in sorted(sample_groups[k])]
+1607		groups = sorted(sample_groups.keys())
+1608		group_total_weights = {k: sum([self.samples[s]['N'] for s in sample_groups[k]]) for k in groups}
+1609		D4x_old = np.array([[self.samples[x][f'D{self._4x}']] for x in samples])
+1610		CM_old = np.array([[self.sample_D4x_covar(x,y) for x in samples] for y in samples])
+1611		W = np.array([
+1612			[self.samples[i]['N']/group_total_weights[j] if i in sample_groups[j] else 0 for i in samples]
+1613			for j in groups])
+1614		D4x_new = W @ D4x_old
+1615		CM_new = W @ CM_old @ W.T
+1616
+1617		return groups, D4x_new[:,0], CM_new
 
@@ -8759,238 +8754,238 @@

API Documentation

-
1625	@make_verbal
-1626	def standardize(self,
-1627		method = 'pooled',
-1628		weighted_sessions = [],
-1629		consolidate = True,
-1630		consolidate_tables = False,
-1631		consolidate_plots = False,
-1632		constraints = {},
-1633		):
-1634		'''
-1635		Compute absolute Δ4x values for all replicate analyses and for sample averages.
-1636		If `method` argument is set to `'pooled'`, the standardization processes all sessions
-1637		in a single step, assuming that all samples (anchors and unknowns alike) are homogeneous,
-1638		i.e. that their true Δ4x value does not change between sessions,
-1639		([Daëron, 2021](https://doi.org/10.1029/2020GC009592)). If `method` argument is set to
-1640		`'indep_sessions'`, the standardization processes each session independently, based only
-1641		on anchors analyses.
-1642		'''
-1643
-1644		self.standardization_method = method
-1645		self.assign_timestamps()
-1646
-1647		if method == 'pooled':
-1648			if weighted_sessions:
-1649				for session_group in weighted_sessions:
-1650					if self._4x == '47':
-1651						X = D47data([r for r in self if r['Session'] in session_group])
-1652					elif self._4x == '48':
-1653						X = D48data([r for r in self if r['Session'] in session_group])
-1654					X.Nominal_D4x = self.Nominal_D4x.copy()
-1655					X.refresh()
-1656					result = X.standardize(method = 'pooled', weighted_sessions = [], consolidate = False)
-1657					w = np.sqrt(result.redchi)
-1658					self.msg(f'Session group {session_group} MRSWD = {w:.4f}')
-1659					for r in X:
-1660						r[f'wD{self._4x}raw'] *= w
-1661			else:
-1662				self.msg(f'All D{self._4x}raw weights set to 1 ‰')
-1663				for r in self:
-1664					r[f'wD{self._4x}raw'] = 1.
-1665
-1666			params = Parameters()
-1667			for k,session in enumerate(self.sessions):
-1668				self.msg(f"Session {session}: scrambling_drift is {self.sessions[session]['scrambling_drift']}.")
-1669				self.msg(f"Session {session}: slope_drift is {self.sessions[session]['slope_drift']}.")
-1670				self.msg(f"Session {session}: wg_drift is {self.sessions[session]['wg_drift']}.")
-1671				s = pf(session)
-1672				params.add(f'a_{s}', value = 0.9)
-1673				params.add(f'b_{s}', value = 0.)
-1674				params.add(f'c_{s}', value = -0.9)
-1675				params.add(f'a2_{s}', value = 0.,
-1676# 					vary = self.sessions[session]['scrambling_drift'],
-1677					)
-1678				params.add(f'b2_{s}', value = 0.,
-1679# 					vary = self.sessions[session]['slope_drift'],
-1680					)
-1681				params.add(f'c2_{s}', value = 0.,
-1682# 					vary = self.sessions[session]['wg_drift'],
-1683					)
-1684				if not self.sessions[session]['scrambling_drift']:
-1685					params[f'a2_{s}'].expr = '0'
-1686				if not self.sessions[session]['slope_drift']:
-1687					params[f'b2_{s}'].expr = '0'
-1688				if not self.sessions[session]['wg_drift']:
-1689					params[f'c2_{s}'].expr = '0'
-1690
-1691			for sample in self.unknowns:
-1692				params.add(f'D{self._4x}_{pf(sample)}', value = 0.5)
-1693
-1694			for k in constraints:
-1695				params[k].expr = constraints[k]
-1696
-1697			def residuals(p):
-1698				R = []
-1699				for r in self:
-1700					session = pf(r['Session'])
-1701					sample = pf(r['Sample'])
-1702					if r['Sample'] in self.Nominal_D4x:
-1703						R += [ (
-1704							r[f'D{self._4x}raw'] - (
-1705								p[f'a_{session}'] * self.Nominal_D4x[r['Sample']]
-1706								+ p[f'b_{session}'] * r[f'd{self._4x}']
-1707								+	p[f'c_{session}']
-1708								+ r['t'] * (
-1709									p[f'a2_{session}'] * self.Nominal_D4x[r['Sample']]
-1710									+ p[f'b2_{session}'] * r[f'd{self._4x}']
-1711									+	p[f'c2_{session}']
-1712									)
-1713								)
-1714							) / r[f'wD{self._4x}raw'] ]
-1715					else:
-1716						R += [ (
-1717							r[f'D{self._4x}raw'] - (
-1718								p[f'a_{session}'] * p[f'D{self._4x}_{sample}']
-1719								+ p[f'b_{session}'] * r[f'd{self._4x}']
-1720								+	p[f'c_{session}']
-1721								+ r['t'] * (
-1722									p[f'a2_{session}'] * p[f'D{self._4x}_{sample}']
-1723									+ p[f'b2_{session}'] * r[f'd{self._4x}']
-1724									+	p[f'c2_{session}']
-1725									)
-1726								)
-1727							) / r[f'wD{self._4x}raw'] ]
-1728				return R
-1729
-1730			M = Minimizer(residuals, params)
-1731			result = M.least_squares()
-1732			self.Nf = result.nfree
-1733			self.t95 = tstudent.ppf(1 - 0.05/2, self.Nf)
-1734			new_names, new_covar, new_se = _fullcovar(result)[:3]
-1735			result.var_names = new_names
-1736			result.covar = new_covar
-1737
-1738			for r in self:
-1739				s = pf(r["Session"])
-1740				a = result.params.valuesdict()[f'a_{s}']
-1741				b = result.params.valuesdict()[f'b_{s}']
-1742				c = result.params.valuesdict()[f'c_{s}']
-1743				a2 = result.params.valuesdict()[f'a2_{s}']
-1744				b2 = result.params.valuesdict()[f'b2_{s}']
-1745				c2 = result.params.valuesdict()[f'c2_{s}']
-1746				r[f'D{self._4x}'] = (r[f'D{self._4x}raw'] - c - b * r[f'd{self._4x}'] - c2 * r['t'] - b2 * r['t'] * r[f'd{self._4x}']) / (a + a2 * r['t'])
-1747
-1748			self.standardization = result
-1749
-1750			for session in self.sessions:
-1751				self.sessions[session]['Np'] = 3
-1752				for k in ['scrambling', 'slope', 'wg']:
-1753					if self.sessions[session][f'{k}_drift']:
-1754						self.sessions[session]['Np'] += 1
+            
1620	@make_verbal
+1621	def standardize(self,
+1622		method = 'pooled',
+1623		weighted_sessions = [],
+1624		consolidate = True,
+1625		consolidate_tables = False,
+1626		consolidate_plots = False,
+1627		constraints = {},
+1628		):
+1629		'''
+1630		Compute absolute Δ4x values for all replicate analyses and for sample averages.
+1631		If `method` argument is set to `'pooled'`, the standardization processes all sessions
+1632		in a single step, assuming that all samples (anchors and unknowns alike) are homogeneous,
+1633		i.e. that their true Δ4x value does not change between sessions,
+1634		([Daëron, 2021](https://doi.org/10.1029/2020GC009592)). If `method` argument is set to
+1635		`'indep_sessions'`, the standardization processes each session independently, based only
+1636		on anchors analyses.
+1637		'''
+1638
+1639		self.standardization_method = method
+1640		self.assign_timestamps()
+1641
+1642		if method == 'pooled':
+1643			if weighted_sessions:
+1644				for session_group in weighted_sessions:
+1645					if self._4x == '47':
+1646						X = D47data([r for r in self if r['Session'] in session_group])
+1647					elif self._4x == '48':
+1648						X = D48data([r for r in self if r['Session'] in session_group])
+1649					X.Nominal_D4x = self.Nominal_D4x.copy()
+1650					X.refresh()
+1651					result = X.standardize(method = 'pooled', weighted_sessions = [], consolidate = False)
+1652					w = np.sqrt(result.redchi)
+1653					self.msg(f'Session group {session_group} MRSWD = {w:.4f}')
+1654					for r in X:
+1655						r[f'wD{self._4x}raw'] *= w
+1656			else:
+1657				self.msg(f'All D{self._4x}raw weights set to 1 ‰')
+1658				for r in self:
+1659					r[f'wD{self._4x}raw'] = 1.
+1660
+1661			params = Parameters()
+1662			for k,session in enumerate(self.sessions):
+1663				self.msg(f"Session {session}: scrambling_drift is {self.sessions[session]['scrambling_drift']}.")
+1664				self.msg(f"Session {session}: slope_drift is {self.sessions[session]['slope_drift']}.")
+1665				self.msg(f"Session {session}: wg_drift is {self.sessions[session]['wg_drift']}.")
+1666				s = pf(session)
+1667				params.add(f'a_{s}', value = 0.9)
+1668				params.add(f'b_{s}', value = 0.)
+1669				params.add(f'c_{s}', value = -0.9)
+1670				params.add(f'a2_{s}', value = 0.,
+1671# 					vary = self.sessions[session]['scrambling_drift'],
+1672					)
+1673				params.add(f'b2_{s}', value = 0.,
+1674# 					vary = self.sessions[session]['slope_drift'],
+1675					)
+1676				params.add(f'c2_{s}', value = 0.,
+1677# 					vary = self.sessions[session]['wg_drift'],
+1678					)
+1679				if not self.sessions[session]['scrambling_drift']:
+1680					params[f'a2_{s}'].expr = '0'
+1681				if not self.sessions[session]['slope_drift']:
+1682					params[f'b2_{s}'].expr = '0'
+1683				if not self.sessions[session]['wg_drift']:
+1684					params[f'c2_{s}'].expr = '0'
+1685
+1686			for sample in self.unknowns:
+1687				params.add(f'D{self._4x}_{pf(sample)}', value = 0.5)
+1688
+1689			for k in constraints:
+1690				params[k].expr = constraints[k]
+1691
+1692			def residuals(p):
+1693				R = []
+1694				for r in self:
+1695					session = pf(r['Session'])
+1696					sample = pf(r['Sample'])
+1697					if r['Sample'] in self.Nominal_D4x:
+1698						R += [ (
+1699							r[f'D{self._4x}raw'] - (
+1700								p[f'a_{session}'] * self.Nominal_D4x[r['Sample']]
+1701								+ p[f'b_{session}'] * r[f'd{self._4x}']
+1702								+	p[f'c_{session}']
+1703								+ r['t'] * (
+1704									p[f'a2_{session}'] * self.Nominal_D4x[r['Sample']]
+1705									+ p[f'b2_{session}'] * r[f'd{self._4x}']
+1706									+	p[f'c2_{session}']
+1707									)
+1708								)
+1709							) / r[f'wD{self._4x}raw'] ]
+1710					else:
+1711						R += [ (
+1712							r[f'D{self._4x}raw'] - (
+1713								p[f'a_{session}'] * p[f'D{self._4x}_{sample}']
+1714								+ p[f'b_{session}'] * r[f'd{self._4x}']
+1715								+	p[f'c_{session}']
+1716								+ r['t'] * (
+1717									p[f'a2_{session}'] * p[f'D{self._4x}_{sample}']
+1718									+ p[f'b2_{session}'] * r[f'd{self._4x}']
+1719									+	p[f'c2_{session}']
+1720									)
+1721								)
+1722							) / r[f'wD{self._4x}raw'] ]
+1723				return R
+1724
+1725			M = Minimizer(residuals, params)
+1726			result = M.least_squares()
+1727			self.Nf = result.nfree
+1728			self.t95 = tstudent.ppf(1 - 0.05/2, self.Nf)
+1729			new_names, new_covar, new_se = _fullcovar(result)[:3]
+1730			result.var_names = new_names
+1731			result.covar = new_covar
+1732
+1733			for r in self:
+1734				s = pf(r["Session"])
+1735				a = result.params.valuesdict()[f'a_{s}']
+1736				b = result.params.valuesdict()[f'b_{s}']
+1737				c = result.params.valuesdict()[f'c_{s}']
+1738				a2 = result.params.valuesdict()[f'a2_{s}']
+1739				b2 = result.params.valuesdict()[f'b2_{s}']
+1740				c2 = result.params.valuesdict()[f'c2_{s}']
+1741				r[f'D{self._4x}'] = (r[f'D{self._4x}raw'] - c - b * r[f'd{self._4x}'] - c2 * r['t'] - b2 * r['t'] * r[f'd{self._4x}']) / (a + a2 * r['t'])
+1742
+1743			self.standardization = result
+1744
+1745			for session in self.sessions:
+1746				self.sessions[session]['Np'] = 3
+1747				for k in ['scrambling', 'slope', 'wg']:
+1748					if self.sessions[session][f'{k}_drift']:
+1749						self.sessions[session]['Np'] += 1
+1750
+1751			if consolidate:
+1752				self.consolidate(tables = consolidate_tables, plots = consolidate_plots)
+1753			return result
+1754
 1755
-1756			if consolidate:
-1757				self.consolidate(tables = consolidate_tables, plots = consolidate_plots)
-1758			return result
-1759
-1760
-1761		elif method == 'indep_sessions':
-1762
-1763			if weighted_sessions:
-1764				for session_group in weighted_sessions:
-1765					X = D4xdata([r for r in self if r['Session'] in session_group], mass = self._4x)
-1766					X.Nominal_D4x = self.Nominal_D4x.copy()
-1767					X.refresh()
-1768					# This is only done to assign r['wD47raw'] for r in X:
-1769					X.standardize(method = method, weighted_sessions = [], consolidate = False)
-1770					self.msg(f'D{self._4x}raw weights set to {1000*X[0][f"wD{self._4x}raw"]:.1f} ppm for sessions in {session_group}')
-1771			else:
-1772				self.msg('All weights set to 1 ‰')
-1773				for r in self:
-1774					r[f'wD{self._4x}raw'] = 1
-1775
-1776			for session in self.sessions:
-1777				s = self.sessions[session]
-1778				p_names = ['a', 'b', 'c', 'a2', 'b2', 'c2']
-1779				p_active = [True, True, True, s['scrambling_drift'], s['slope_drift'], s['wg_drift']]
-1780				s['Np'] = sum(p_active)
-1781				sdata = s['data']
-1782
-1783				A = np.array([
-1784					[
-1785						self.Nominal_D4x[r['Sample']] / r[f'wD{self._4x}raw'],
-1786						r[f'd{self._4x}'] / r[f'wD{self._4x}raw'],
-1787						1 / r[f'wD{self._4x}raw'],
-1788						self.Nominal_D4x[r['Sample']] * r['t'] / r[f'wD{self._4x}raw'],
-1789						r[f'd{self._4x}'] * r['t'] / r[f'wD{self._4x}raw'],
-1790						r['t'] / r[f'wD{self._4x}raw']
-1791						]
-1792					for r in sdata if r['Sample'] in self.anchors
-1793					])[:,p_active] # only keep columns for the active parameters
-1794				Y = np.array([[r[f'D{self._4x}raw'] / r[f'wD{self._4x}raw']] for r in sdata if r['Sample'] in self.anchors])
-1795				s['Na'] = Y.size
-1796				CM = linalg.inv(A.T @ A)
-1797				bf = (CM @ A.T @ Y).T[0,:]
-1798				k = 0
-1799				for n,a in zip(p_names, p_active):
-1800					if a:
-1801						s[n] = bf[k]
-1802# 						self.msg(f'{n} = {bf[k]}')
-1803						k += 1
-1804					else:
-1805						s[n] = 0.
-1806# 						self.msg(f'{n} = 0.0')
+1756		elif method == 'indep_sessions':
+1757
+1758			if weighted_sessions:
+1759				for session_group in weighted_sessions:
+1760					X = D4xdata([r for r in self if r['Session'] in session_group], mass = self._4x)
+1761					X.Nominal_D4x = self.Nominal_D4x.copy()
+1762					X.refresh()
+1763					# This is only done to assign r['wD47raw'] for r in X:
+1764					X.standardize(method = method, weighted_sessions = [], consolidate = False)
+1765					self.msg(f'D{self._4x}raw weights set to {1000*X[0][f"wD{self._4x}raw"]:.1f} ppm for sessions in {session_group}')
+1766			else:
+1767				self.msg('All weights set to 1 ‰')
+1768				for r in self:
+1769					r[f'wD{self._4x}raw'] = 1
+1770
+1771			for session in self.sessions:
+1772				s = self.sessions[session]
+1773				p_names = ['a', 'b', 'c', 'a2', 'b2', 'c2']
+1774				p_active = [True, True, True, s['scrambling_drift'], s['slope_drift'], s['wg_drift']]
+1775				s['Np'] = sum(p_active)
+1776				sdata = s['data']
+1777
+1778				A = np.array([
+1779					[
+1780						self.Nominal_D4x[r['Sample']] / r[f'wD{self._4x}raw'],
+1781						r[f'd{self._4x}'] / r[f'wD{self._4x}raw'],
+1782						1 / r[f'wD{self._4x}raw'],
+1783						self.Nominal_D4x[r['Sample']] * r['t'] / r[f'wD{self._4x}raw'],
+1784						r[f'd{self._4x}'] * r['t'] / r[f'wD{self._4x}raw'],
+1785						r['t'] / r[f'wD{self._4x}raw']
+1786						]
+1787					for r in sdata if r['Sample'] in self.anchors
+1788					])[:,p_active] # only keep columns for the active parameters
+1789				Y = np.array([[r[f'D{self._4x}raw'] / r[f'wD{self._4x}raw']] for r in sdata if r['Sample'] in self.anchors])
+1790				s['Na'] = Y.size
+1791				CM = linalg.inv(A.T @ A)
+1792				bf = (CM @ A.T @ Y).T[0,:]
+1793				k = 0
+1794				for n,a in zip(p_names, p_active):
+1795					if a:
+1796						s[n] = bf[k]
+1797# 						self.msg(f'{n} = {bf[k]}')
+1798						k += 1
+1799					else:
+1800						s[n] = 0.
+1801# 						self.msg(f'{n} = 0.0')
+1802
+1803				for r in sdata :
+1804					a, b, c, a2, b2, c2 = s['a'], s['b'], s['c'], s['a2'], s['b2'], s['c2']
+1805					r[f'D{self._4x}'] = (r[f'D{self._4x}raw'] - c - b * r[f'd{self._4x}'] - c2 * r['t'] - b2 * r['t'] * r[f'd{self._4x}']) / (a + a2 * r['t'])
+1806					r[f'wD{self._4x}'] = r[f'wD{self._4x}raw'] / (a + a2 * r['t'])
 1807
-1808				for r in sdata :
-1809					a, b, c, a2, b2, c2 = s['a'], s['b'], s['c'], s['a2'], s['b2'], s['c2']
-1810					r[f'D{self._4x}'] = (r[f'D{self._4x}raw'] - c - b * r[f'd{self._4x}'] - c2 * r['t'] - b2 * r['t'] * r[f'd{self._4x}']) / (a + a2 * r['t'])
-1811					r[f'wD{self._4x}'] = r[f'wD{self._4x}raw'] / (a + a2 * r['t'])
-1812
-1813				s['CM'] = np.zeros((6,6))
-1814				i = 0
-1815				k_active = [j for j,a in enumerate(p_active) if a]
-1816				for j,a in enumerate(p_active):
-1817					if a:
-1818						s['CM'][j,k_active] = CM[i,:]
-1819						i += 1
-1820
-1821			if not weighted_sessions:
-1822				w = self.rmswd()['rmswd']
-1823				for r in self:
-1824						r[f'wD{self._4x}'] *= w
-1825						r[f'wD{self._4x}raw'] *= w
-1826				for session in self.sessions:
-1827					self.sessions[session]['CM'] *= w**2
-1828
-1829			for session in self.sessions:
-1830				s = self.sessions[session]
-1831				s['SE_a'] = s['CM'][0,0]**.5
-1832				s['SE_b'] = s['CM'][1,1]**.5
-1833				s['SE_c'] = s['CM'][2,2]**.5
-1834				s['SE_a2'] = s['CM'][3,3]**.5
-1835				s['SE_b2'] = s['CM'][4,4]**.5
-1836				s['SE_c2'] = s['CM'][5,5]**.5
-1837
-1838			if not weighted_sessions:
-1839				self.Nf = len(self) - len(self.unknowns) - np.sum([self.sessions[s]['Np'] for s in self.sessions])
-1840			else:
-1841				self.Nf = 0
-1842				for sg in weighted_sessions:
-1843					self.Nf += self.rmswd(sessions = sg)['Nf']
-1844
-1845			self.t95 = tstudent.ppf(1 - 0.05/2, self.Nf)
-1846
-1847			avgD4x = {
-1848				sample: np.mean([r[f'D{self._4x}'] for r in self if r['Sample'] == sample])
-1849				for sample in self.samples
-1850				}
-1851			chi2 = np.sum([(r[f'D{self._4x}'] - avgD4x[r['Sample']])**2 for r in self])
-1852			rD4x = (chi2/self.Nf)**.5
-1853			self.repeatability[f'sigma_{self._4x}'] = rD4x
-1854
-1855			if consolidate:
-1856				self.consolidate(tables = consolidate_tables, plots = consolidate_plots)
+1808				s['CM'] = np.zeros((6,6))
+1809				i = 0
+1810				k_active = [j for j,a in enumerate(p_active) if a]
+1811				for j,a in enumerate(p_active):
+1812					if a:
+1813						s['CM'][j,k_active] = CM[i,:]
+1814						i += 1
+1815
+1816			if not weighted_sessions:
+1817				w = self.rmswd()['rmswd']
+1818				for r in self:
+1819						r[f'wD{self._4x}'] *= w
+1820						r[f'wD{self._4x}raw'] *= w
+1821				for session in self.sessions:
+1822					self.sessions[session]['CM'] *= w**2
+1823
+1824			for session in self.sessions:
+1825				s = self.sessions[session]
+1826				s['SE_a'] = s['CM'][0,0]**.5
+1827				s['SE_b'] = s['CM'][1,1]**.5
+1828				s['SE_c'] = s['CM'][2,2]**.5
+1829				s['SE_a2'] = s['CM'][3,3]**.5
+1830				s['SE_b2'] = s['CM'][4,4]**.5
+1831				s['SE_c2'] = s['CM'][5,5]**.5
+1832
+1833			if not weighted_sessions:
+1834				self.Nf = len(self) - len(self.unknowns) - np.sum([self.sessions[s]['Np'] for s in self.sessions])
+1835			else:
+1836				self.Nf = 0
+1837				for sg in weighted_sessions:
+1838					self.Nf += self.rmswd(sessions = sg)['Nf']
+1839
+1840			self.t95 = tstudent.ppf(1 - 0.05/2, self.Nf)
+1841
+1842			avgD4x = {
+1843				sample: np.mean([r[f'D{self._4x}'] for r in self if r['Sample'] == sample])
+1844				for sample in self.samples
+1845				}
+1846			chi2 = np.sum([(r[f'D{self._4x}'] - avgD4x[r['Sample']])**2 for r in self])
+1847			rD4x = (chi2/self.Nf)**.5
+1848			self.repeatability[f'sigma_{self._4x}'] = rD4x
+1849
+1850			if consolidate:
+1851				self.consolidate(tables = consolidate_tables, plots = consolidate_plots)
 
@@ -9016,33 +9011,33 @@

API Documentation

-
1859	def standardization_error(self, session, d4x, D4x, t = 0):
-1860		'''
-1861		Compute standardization error for a given session and
-1862		(δ47, Δ47) composition.
-1863		'''
-1864		a = self.sessions[session]['a']
-1865		b = self.sessions[session]['b']
-1866		c = self.sessions[session]['c']
-1867		a2 = self.sessions[session]['a2']
-1868		b2 = self.sessions[session]['b2']
-1869		c2 = self.sessions[session]['c2']
-1870		CM = self.sessions[session]['CM']
-1871
-1872		x, y = D4x, d4x
-1873		z = a * x + b * y + c + a2 * x * t + b2 * y * t + c2 * t
-1874# 		x = (z - b*y - b2*y*t - c - c2*t) / (a+a2*t)
-1875		dxdy = -(b+b2*t) / (a+a2*t)
-1876		dxdz = 1. / (a+a2*t)
-1877		dxda = -x / (a+a2*t)
-1878		dxdb = -y / (a+a2*t)
-1879		dxdc = -1. / (a+a2*t)
-1880		dxda2 = -x * a2 / (a+a2*t)
-1881		dxdb2 = -y * t / (a+a2*t)
-1882		dxdc2 = -t / (a+a2*t)
-1883		V = np.array([dxda, dxdb, dxdc, dxda2, dxdb2, dxdc2])
-1884		sx = (V @ CM @ V.T) ** .5
-1885		return sx
+            
1854	def standardization_error(self, session, d4x, D4x, t = 0):
+1855		'''
+1856		Compute standardization error for a given session and
+1857		(δ47, Δ47) composition.
+1858		'''
+1859		a = self.sessions[session]['a']
+1860		b = self.sessions[session]['b']
+1861		c = self.sessions[session]['c']
+1862		a2 = self.sessions[session]['a2']
+1863		b2 = self.sessions[session]['b2']
+1864		c2 = self.sessions[session]['c2']
+1865		CM = self.sessions[session]['CM']
+1866
+1867		x, y = D4x, d4x
+1868		z = a * x + b * y + c + a2 * x * t + b2 * y * t + c2 * t
+1869# 		x = (z - b*y - b2*y*t - c - c2*t) / (a+a2*t)
+1870		dxdy = -(b+b2*t) / (a+a2*t)
+1871		dxdz = 1. / (a+a2*t)
+1872		dxda = -x / (a+a2*t)
+1873		dxdb = -y / (a+a2*t)
+1874		dxdc = -1. / (a+a2*t)
+1875		dxda2 = -x * a2 / (a+a2*t)
+1876		dxdb2 = -y * t / (a+a2*t)
+1877		dxdc2 = -t / (a+a2*t)
+1878		V = np.array([dxda, dxdb, dxdc, dxda2, dxdb2, dxdc2])
+1879		sx = (V @ CM @ V.T) ** .5
+1880		return sx
 
@@ -9064,45 +9059,45 @@

API Documentation

-
1888	@make_verbal
-1889	def summary(self,
-1890		dir = 'output',
-1891		filename = None,
-1892		save_to_file = True,
-1893		print_out = True,
-1894		):
-1895		'''
-1896		Print out an/or save to disk a summary of the standardization results.
-1897
-1898		**Parameters**
-1899
-1900		+ `dir`: the directory in which to save the table
-1901		+ `filename`: the name to the csv file to write to
-1902		+ `save_to_file`: whether to save the table to disk
-1903		+ `print_out`: whether to print out the table
-1904		'''
-1905
-1906		out = []
-1907		out += [['N samples (anchors + unknowns)', f"{len(self.samples)} ({len(self.anchors)} + {len(self.unknowns)})"]]
-1908		out += [['N analyses (anchors + unknowns)', f"{len(self)} ({len([r for r in self if r['Sample'] in self.anchors])} + {len([r for r in self if r['Sample'] in self.unknowns])})"]]
-1909		out += [['Repeatability of δ13C_VPDB', f"{1000 * self.repeatability['r_d13C_VPDB']:.1f} ppm"]]
-1910		out += [['Repeatability of δ18O_VSMOW', f"{1000 * self.repeatability['r_d18O_VSMOW']:.1f} ppm"]]
-1911		out += [[f'Repeatability of Δ{self._4x} (anchors)', f"{1000 * self.repeatability[f'r_D{self._4x}a']:.1f} ppm"]]
-1912		out += [[f'Repeatability of Δ{self._4x} (unknowns)', f"{1000 * self.repeatability[f'r_D{self._4x}u']:.1f} ppm"]]
-1913		out += [[f'Repeatability of Δ{self._4x} (all)', f"{1000 * self.repeatability[f'r_D{self._4x}']:.1f} ppm"]]
-1914		out += [['Model degrees of freedom', f"{self.Nf}"]]
-1915		out += [['Student\'s 95% t-factor', f"{self.t95:.2f}"]]
-1916		out += [['Standardization method', self.standardization_method]]
-1917
-1918		if save_to_file:
-1919			if not os.path.exists(dir):
-1920				os.makedirs(dir)
-1921			if filename is None:
-1922				filename = f'D{self._4x}_summary.csv'
-1923			with open(f'{dir}/{filename}', 'w') as fid:
-1924				fid.write(make_csv(out))
-1925		if print_out:
-1926			self.msg('\n' + pretty_table(out, header = 0))
+            
1883	@make_verbal
+1884	def summary(self,
+1885		dir = 'output',
+1886		filename = None,
+1887		save_to_file = True,
+1888		print_out = True,
+1889		):
+1890		'''
+1891		Print out an/or save to disk a summary of the standardization results.
+1892
+1893		**Parameters**
+1894
+1895		+ `dir`: the directory in which to save the table
+1896		+ `filename`: the name to the csv file to write to
+1897		+ `save_to_file`: whether to save the table to disk
+1898		+ `print_out`: whether to print out the table
+1899		'''
+1900
+1901		out = []
+1902		out += [['N samples (anchors + unknowns)', f"{len(self.samples)} ({len(self.anchors)} + {len(self.unknowns)})"]]
+1903		out += [['N analyses (anchors + unknowns)', f"{len(self)} ({len([r for r in self if r['Sample'] in self.anchors])} + {len([r for r in self if r['Sample'] in self.unknowns])})"]]
+1904		out += [['Repeatability of δ13C_VPDB', f"{1000 * self.repeatability['r_d13C_VPDB']:.1f} ppm"]]
+1905		out += [['Repeatability of δ18O_VSMOW', f"{1000 * self.repeatability['r_d18O_VSMOW']:.1f} ppm"]]
+1906		out += [[f'Repeatability of Δ{self._4x} (anchors)', f"{1000 * self.repeatability[f'r_D{self._4x}a']:.1f} ppm"]]
+1907		out += [[f'Repeatability of Δ{self._4x} (unknowns)', f"{1000 * self.repeatability[f'r_D{self._4x}u']:.1f} ppm"]]
+1908		out += [[f'Repeatability of Δ{self._4x} (all)', f"{1000 * self.repeatability[f'r_D{self._4x}']:.1f} ppm"]]
+1909		out += [['Model degrees of freedom', f"{self.Nf}"]]
+1910		out += [['Student\'s 95% t-factor', f"{self.t95:.2f}"]]
+1911		out += [['Standardization method', self.standardization_method]]
+1912
+1913		if save_to_file:
+1914			if not os.path.exists(dir):
+1915				os.makedirs(dir)
+1916			if filename is None:
+1917				filename = f'D{self._4x}_summary.csv'
+1918			with open(f'{dir}/{filename}', 'w') as fid:
+1919				fid.write(make_csv(out))
+1920		if print_out:
+1921			self.msg('\n' + pretty_table(out, header = 0))
 
@@ -9132,81 +9127,81 @@

API Documentation

-
1929	@make_verbal
-1930	def table_of_sessions(self,
-1931		dir = 'output',
-1932		filename = None,
-1933		save_to_file = True,
-1934		print_out = True,
-1935		output = None,
-1936		):
-1937		'''
-1938		Print out an/or save to disk a table of sessions.
-1939
-1940		**Parameters**
-1941
-1942		+ `dir`: the directory in which to save the table
-1943		+ `filename`: the name to the csv file to write to
-1944		+ `save_to_file`: whether to save the table to disk
-1945		+ `print_out`: whether to print out the table
-1946		+ `output`: if set to `'pretty'`: return a pretty text table (see `pretty_table()`);
-1947		    if set to `'raw'`: return a list of list of strings
-1948		    (e.g., `[['header1', 'header2'], ['0.1', '0.2']]`)
-1949		'''
-1950		include_a2 = any([self.sessions[session]['scrambling_drift'] for session in self.sessions])
-1951		include_b2 = any([self.sessions[session]['slope_drift'] for session in self.sessions])
-1952		include_c2 = any([self.sessions[session]['wg_drift'] for session in self.sessions])
-1953
-1954		out = [['Session','Na','Nu','d13Cwg_VPDB','d18Owg_VSMOW','r_d13C','r_d18O',f'r_D{self._4x}','a ± SE','1e3 x b ± SE','c ± SE']]
-1955		if include_a2:
-1956			out[-1] += ['a2 ± SE']
-1957		if include_b2:
-1958			out[-1] += ['b2 ± SE']
-1959		if include_c2:
-1960			out[-1] += ['c2 ± SE']
-1961		for session in self.sessions:
-1962			out += [[
-1963				session,
-1964				f"{self.sessions[session]['Na']}",
-1965				f"{self.sessions[session]['Nu']}",
-1966				f"{self.sessions[session]['d13Cwg_VPDB']:.3f}",
-1967				f"{self.sessions[session]['d18Owg_VSMOW']:.3f}",
-1968				f"{self.sessions[session]['r_d13C_VPDB']:.4f}",
-1969				f"{self.sessions[session]['r_d18O_VSMOW']:.4f}",
-1970				f"{self.sessions[session][f'r_D{self._4x}']:.4f}",
-1971				f"{self.sessions[session]['a']:.3f} ± {self.sessions[session]['SE_a']:.3f}",
-1972				f"{1e3*self.sessions[session]['b']:.3f} ± {1e3*self.sessions[session]['SE_b']:.3f}",
-1973				f"{self.sessions[session]['c']:.3f} ± {self.sessions[session]['SE_c']:.3f}",
-1974				]]
-1975			if include_a2:
-1976				if self.sessions[session]['scrambling_drift']:
-1977					out[-1] += [f"{self.sessions[session]['a2']:.1e} ± {self.sessions[session]['SE_a2']:.1e}"]
+            
1924	@make_verbal
+1925	def table_of_sessions(self,
+1926		dir = 'output',
+1927		filename = None,
+1928		save_to_file = True,
+1929		print_out = True,
+1930		output = None,
+1931		):
+1932		'''
+1933		Print out an/or save to disk a table of sessions.
+1934
+1935		**Parameters**
+1936
+1937		+ `dir`: the directory in which to save the table
+1938		+ `filename`: the name to the csv file to write to
+1939		+ `save_to_file`: whether to save the table to disk
+1940		+ `print_out`: whether to print out the table
+1941		+ `output`: if set to `'pretty'`: return a pretty text table (see `pretty_table()`);
+1942		    if set to `'raw'`: return a list of list of strings
+1943		    (e.g., `[['header1', 'header2'], ['0.1', '0.2']]`)
+1944		'''
+1945		include_a2 = any([self.sessions[session]['scrambling_drift'] for session in self.sessions])
+1946		include_b2 = any([self.sessions[session]['slope_drift'] for session in self.sessions])
+1947		include_c2 = any([self.sessions[session]['wg_drift'] for session in self.sessions])
+1948
+1949		out = [['Session','Na','Nu','d13Cwg_VPDB','d18Owg_VSMOW','r_d13C','r_d18O',f'r_D{self._4x}','a ± SE','1e3 x b ± SE','c ± SE']]
+1950		if include_a2:
+1951			out[-1] += ['a2 ± SE']
+1952		if include_b2:
+1953			out[-1] += ['b2 ± SE']
+1954		if include_c2:
+1955			out[-1] += ['c2 ± SE']
+1956		for session in self.sessions:
+1957			out += [[
+1958				session,
+1959				f"{self.sessions[session]['Na']}",
+1960				f"{self.sessions[session]['Nu']}",
+1961				f"{self.sessions[session]['d13Cwg_VPDB']:.3f}",
+1962				f"{self.sessions[session]['d18Owg_VSMOW']:.3f}",
+1963				f"{self.sessions[session]['r_d13C_VPDB']:.4f}",
+1964				f"{self.sessions[session]['r_d18O_VSMOW']:.4f}",
+1965				f"{self.sessions[session][f'r_D{self._4x}']:.4f}",
+1966				f"{self.sessions[session]['a']:.3f} ± {self.sessions[session]['SE_a']:.3f}",
+1967				f"{1e3*self.sessions[session]['b']:.3f} ± {1e3*self.sessions[session]['SE_b']:.3f}",
+1968				f"{self.sessions[session]['c']:.3f} ± {self.sessions[session]['SE_c']:.3f}",
+1969				]]
+1970			if include_a2:
+1971				if self.sessions[session]['scrambling_drift']:
+1972					out[-1] += [f"{self.sessions[session]['a2']:.1e} ± {self.sessions[session]['SE_a2']:.1e}"]
+1973				else:
+1974					out[-1] += ['']
+1975			if include_b2:
+1976				if self.sessions[session]['slope_drift']:
+1977					out[-1] += [f"{self.sessions[session]['b2']:.1e} ± {self.sessions[session]['SE_b2']:.1e}"]
 1978				else:
 1979					out[-1] += ['']
-1980			if include_b2:
-1981				if self.sessions[session]['slope_drift']:
-1982					out[-1] += [f"{self.sessions[session]['b2']:.1e} ± {self.sessions[session]['SE_b2']:.1e}"]
+1980			if include_c2:
+1981				if self.sessions[session]['wg_drift']:
+1982					out[-1] += [f"{self.sessions[session]['c2']:.1e} ± {self.sessions[session]['SE_c2']:.1e}"]
 1983				else:
 1984					out[-1] += ['']
-1985			if include_c2:
-1986				if self.sessions[session]['wg_drift']:
-1987					out[-1] += [f"{self.sessions[session]['c2']:.1e} ± {self.sessions[session]['SE_c2']:.1e}"]
-1988				else:
-1989					out[-1] += ['']
-1990
-1991		if save_to_file:
-1992			if not os.path.exists(dir):
-1993				os.makedirs(dir)
-1994			if filename is None:
-1995				filename = f'D{self._4x}_sessions.csv'
-1996			with open(f'{dir}/{filename}', 'w') as fid:
-1997				fid.write(make_csv(out))
-1998		if print_out:
-1999			self.msg('\n' + pretty_table(out))
-2000		if output == 'raw':
-2001			return out
-2002		elif output == 'pretty':
-2003			return pretty_table(out)
+1985
+1986		if save_to_file:
+1987			if not os.path.exists(dir):
+1988				os.makedirs(dir)
+1989			if filename is None:
+1990				filename = f'D{self._4x}_sessions.csv'
+1991			with open(f'{dir}/{filename}', 'w') as fid:
+1992				fid.write(make_csv(out))
+1993		if print_out:
+1994			self.msg('\n' + pretty_table(out))
+1995		if output == 'raw':
+1996			return out
+1997		elif output == 'pretty':
+1998			return pretty_table(out)
 
@@ -9239,63 +9234,63 @@

API Documentation

-
2006	@make_verbal
-2007	def table_of_analyses(
-2008		self,
-2009		dir = 'output',
-2010		filename = None,
-2011		save_to_file = True,
-2012		print_out = True,
-2013		output = None,
-2014		):
-2015		'''
-2016		Print out an/or save to disk a table of analyses.
-2017
-2018		**Parameters**
-2019
-2020		+ `dir`: the directory in which to save the table
-2021		+ `filename`: the name to the csv file to write to
-2022		+ `save_to_file`: whether to save the table to disk
-2023		+ `print_out`: whether to print out the table
-2024		+ `output`: if set to `'pretty'`: return a pretty text table (see `pretty_table()`);
-2025		    if set to `'raw'`: return a list of list of strings
-2026		    (e.g., `[['header1', 'header2'], ['0.1', '0.2']]`)
-2027		'''
-2028
-2029		out = [['UID','Session','Sample']]
-2030		extra_fields = [f for f in [('SampleMass','.2f'),('ColdFingerPressure','.1f'),('AcidReactionYield','.3f')] if f[0] in {k for r in self for k in r}]
-2031		for f in extra_fields:
-2032			out[-1] += [f[0]]
-2033		out[-1] += ['d13Cwg_VPDB','d18Owg_VSMOW','d45','d46','d47','d48','d49','d13C_VPDB','d18O_VSMOW','D47raw','D48raw','D49raw',f'D{self._4x}']
-2034		for r in self:
-2035			out += [[f"{r['UID']}",f"{r['Session']}",f"{r['Sample']}"]]
-2036			for f in extra_fields:
-2037				out[-1] += [f"{r[f[0]]:{f[1]}}"]
-2038			out[-1] += [
-2039				f"{r['d13Cwg_VPDB']:.3f}",
-2040				f"{r['d18Owg_VSMOW']:.3f}",
-2041				f"{r['d45']:.6f}",
-2042				f"{r['d46']:.6f}",
-2043				f"{r['d47']:.6f}",
-2044				f"{r['d48']:.6f}",
-2045				f"{r['d49']:.6f}",
-2046				f"{r['d13C_VPDB']:.6f}",
-2047				f"{r['d18O_VSMOW']:.6f}",
-2048				f"{r['D47raw']:.6f}",
-2049				f"{r['D48raw']:.6f}",
-2050				f"{r['D49raw']:.6f}",
-2051				f"{r[f'D{self._4x}']:.6f}"
-2052				]
-2053		if save_to_file:
-2054			if not os.path.exists(dir):
-2055				os.makedirs(dir)
-2056			if filename is None:
-2057				filename = f'D{self._4x}_analyses.csv'
-2058			with open(f'{dir}/{filename}', 'w') as fid:
-2059				fid.write(make_csv(out))
-2060		if print_out:
-2061			self.msg('\n' + pretty_table(out))
-2062		return out
+            
2001	@make_verbal
+2002	def table_of_analyses(
+2003		self,
+2004		dir = 'output',
+2005		filename = None,
+2006		save_to_file = True,
+2007		print_out = True,
+2008		output = None,
+2009		):
+2010		'''
+2011		Print out an/or save to disk a table of analyses.
+2012
+2013		**Parameters**
+2014
+2015		+ `dir`: the directory in which to save the table
+2016		+ `filename`: the name to the csv file to write to
+2017		+ `save_to_file`: whether to save the table to disk
+2018		+ `print_out`: whether to print out the table
+2019		+ `output`: if set to `'pretty'`: return a pretty text table (see `pretty_table()`);
+2020		    if set to `'raw'`: return a list of list of strings
+2021		    (e.g., `[['header1', 'header2'], ['0.1', '0.2']]`)
+2022		'''
+2023
+2024		out = [['UID','Session','Sample']]
+2025		extra_fields = [f for f in [('SampleMass','.2f'),('ColdFingerPressure','.1f'),('AcidReactionYield','.3f')] if f[0] in {k for r in self for k in r}]
+2026		for f in extra_fields:
+2027			out[-1] += [f[0]]
+2028		out[-1] += ['d13Cwg_VPDB','d18Owg_VSMOW','d45','d46','d47','d48','d49','d13C_VPDB','d18O_VSMOW','D47raw','D48raw','D49raw',f'D{self._4x}']
+2029		for r in self:
+2030			out += [[f"{r['UID']}",f"{r['Session']}",f"{r['Sample']}"]]
+2031			for f in extra_fields:
+2032				out[-1] += [f"{r[f[0]]:{f[1]}}"]
+2033			out[-1] += [
+2034				f"{r['d13Cwg_VPDB']:.3f}",
+2035				f"{r['d18Owg_VSMOW']:.3f}",
+2036				f"{r['d45']:.6f}",
+2037				f"{r['d46']:.6f}",
+2038				f"{r['d47']:.6f}",
+2039				f"{r['d48']:.6f}",
+2040				f"{r['d49']:.6f}",
+2041				f"{r['d13C_VPDB']:.6f}",
+2042				f"{r['d18O_VSMOW']:.6f}",
+2043				f"{r['D47raw']:.6f}",
+2044				f"{r['D48raw']:.6f}",
+2045				f"{r['D49raw']:.6f}",
+2046				f"{r[f'D{self._4x}']:.6f}"
+2047				]
+2048		if save_to_file:
+2049			if not os.path.exists(dir):
+2050				os.makedirs(dir)
+2051			if filename is None:
+2052				filename = f'D{self._4x}_analyses.csv'
+2053			with open(f'{dir}/{filename}', 'w') as fid:
+2054				fid.write(make_csv(out))
+2055		if print_out:
+2056			self.msg('\n' + pretty_table(out))
+2057		return out
 
@@ -9328,56 +9323,56 @@

API Documentation

-
2064	@make_verbal
-2065	def covar_table(
-2066		self,
-2067		correl = False,
-2068		dir = 'output',
-2069		filename = None,
-2070		save_to_file = True,
-2071		print_out = True,
-2072		output = None,
-2073		):
-2074		'''
-2075		Print out, save to disk and/or return the variance-covariance matrix of D4x
-2076		for all unknown samples.
-2077
-2078		**Parameters**
-2079
-2080		+ `dir`: the directory in which to save the csv
-2081		+ `filename`: the name of the csv file to write to
-2082		+ `save_to_file`: whether to save the csv
-2083		+ `print_out`: whether to print out the matrix
-2084		+ `output`: if set to `'pretty'`: return a pretty text matrix (see `pretty_table()`);
-2085		    if set to `'raw'`: return a list of list of strings
-2086		    (e.g., `[['header1', 'header2'], ['0.1', '0.2']]`)
-2087		'''
-2088		samples = sorted([u for u in self.unknowns])
-2089		out = [[''] + samples]
-2090		for s1 in samples:
-2091			out.append([s1])
-2092			for s2 in samples:
-2093				if correl:
-2094					out[-1].append(f'{self.sample_D4x_correl(s1, s2):.6f}')
-2095				else:
-2096					out[-1].append(f'{self.sample_D4x_covar(s1, s2):.8e}')
-2097
-2098		if save_to_file:
-2099			if not os.path.exists(dir):
-2100				os.makedirs(dir)
-2101			if filename is None:
-2102				if correl:
-2103					filename = f'D{self._4x}_correl.csv'
-2104				else:
-2105					filename = f'D{self._4x}_covar.csv'
-2106			with open(f'{dir}/{filename}', 'w') as fid:
-2107				fid.write(make_csv(out))
-2108		if print_out:
-2109			self.msg('\n'+pretty_table(out))
-2110		if output == 'raw':
-2111			return out
-2112		elif output == 'pretty':
-2113			return pretty_table(out)
+            
2059	@make_verbal
+2060	def covar_table(
+2061		self,
+2062		correl = False,
+2063		dir = 'output',
+2064		filename = None,
+2065		save_to_file = True,
+2066		print_out = True,
+2067		output = None,
+2068		):
+2069		'''
+2070		Print out, save to disk and/or return the variance-covariance matrix of D4x
+2071		for all unknown samples.
+2072
+2073		**Parameters**
+2074
+2075		+ `dir`: the directory in which to save the csv
+2076		+ `filename`: the name of the csv file to write to
+2077		+ `save_to_file`: whether to save the csv
+2078		+ `print_out`: whether to print out the matrix
+2079		+ `output`: if set to `'pretty'`: return a pretty text matrix (see `pretty_table()`);
+2080		    if set to `'raw'`: return a list of list of strings
+2081		    (e.g., `[['header1', 'header2'], ['0.1', '0.2']]`)
+2082		'''
+2083		samples = sorted([u for u in self.unknowns])
+2084		out = [[''] + samples]
+2085		for s1 in samples:
+2086			out.append([s1])
+2087			for s2 in samples:
+2088				if correl:
+2089					out[-1].append(f'{self.sample_D4x_correl(s1, s2):.6f}')
+2090				else:
+2091					out[-1].append(f'{self.sample_D4x_covar(s1, s2):.8e}')
+2092
+2093		if save_to_file:
+2094			if not os.path.exists(dir):
+2095				os.makedirs(dir)
+2096			if filename is None:
+2097				if correl:
+2098					filename = f'D{self._4x}_correl.csv'
+2099				else:
+2100					filename = f'D{self._4x}_covar.csv'
+2101			with open(f'{dir}/{filename}', 'w') as fid:
+2102				fid.write(make_csv(out))
+2103		if print_out:
+2104			self.msg('\n'+pretty_table(out))
+2105		if output == 'raw':
+2106			return out
+2107		elif output == 'pretty':
+2108			return pretty_table(out)
 
@@ -9411,64 +9406,64 @@

API Documentation

-
2115	@make_verbal
-2116	def table_of_samples(
-2117		self,
-2118		dir = 'output',
-2119		filename = None,
-2120		save_to_file = True,
-2121		print_out = True,
-2122		output = None,
-2123		):
-2124		'''
-2125		Print out, save to disk and/or return a table of samples.
-2126
-2127		**Parameters**
-2128
-2129		+ `dir`: the directory in which to save the csv
-2130		+ `filename`: the name of the csv file to write to
-2131		+ `save_to_file`: whether to save the csv
-2132		+ `print_out`: whether to print out the table
-2133		+ `output`: if set to `'pretty'`: return a pretty text table (see `pretty_table()`);
-2134		    if set to `'raw'`: return a list of list of strings
-2135		    (e.g., `[['header1', 'header2'], ['0.1', '0.2']]`)
-2136		'''
-2137
-2138		out = [['Sample','N','d13C_VPDB','d18O_VSMOW',f'D{self._4x}','SE','95% CL','SD','p_Levene']]
-2139		for sample in self.anchors:
-2140			out += [[
-2141				f"{sample}",
-2142				f"{self.samples[sample]['N']}",
-2143				f"{self.samples[sample]['d13C_VPDB']:.2f}",
-2144				f"{self.samples[sample]['d18O_VSMOW']:.2f}",
-2145				f"{self.samples[sample][f'D{self._4x}']:.4f}",'','',
-2146				f"{self.samples[sample][f'SD_D{self._4x}']:.4f}" if self.samples[sample]['N'] > 1 else '', ''
-2147				]]
-2148		for sample in self.unknowns:
-2149			out += [[
-2150				f"{sample}",
-2151				f"{self.samples[sample]['N']}",
-2152				f"{self.samples[sample]['d13C_VPDB']:.2f}",
-2153				f"{self.samples[sample]['d18O_VSMOW']:.2f}",
-2154				f"{self.samples[sample][f'D{self._4x}']:.4f}",
-2155				f"{self.samples[sample][f'SE_D{self._4x}']:.4f}",
-2156				f{self.samples[sample][f'SE_D{self._4x}'] * self.t95:.4f}",
-2157				f"{self.samples[sample][f'SD_D{self._4x}']:.4f}" if self.samples[sample]['N'] > 1 else '',
-2158				f"{self.samples[sample]['p_Levene']:.3f}" if self.samples[sample]['N'] > 2 else ''
-2159				]]
-2160		if save_to_file:
-2161			if not os.path.exists(dir):
-2162				os.makedirs(dir)
-2163			if filename is None:
-2164				filename = f'D{self._4x}_samples.csv'
-2165			with open(f'{dir}/{filename}', 'w') as fid:
-2166				fid.write(make_csv(out))
-2167		if print_out:
-2168			self.msg('\n'+pretty_table(out))
-2169		if output == 'raw':
-2170			return out
-2171		elif output == 'pretty':
-2172			return pretty_table(out)
+            
2110	@make_verbal
+2111	def table_of_samples(
+2112		self,
+2113		dir = 'output',
+2114		filename = None,
+2115		save_to_file = True,
+2116		print_out = True,
+2117		output = None,
+2118		):
+2119		'''
+2120		Print out, save to disk and/or return a table of samples.
+2121
+2122		**Parameters**
+2123
+2124		+ `dir`: the directory in which to save the csv
+2125		+ `filename`: the name of the csv file to write to
+2126		+ `save_to_file`: whether to save the csv
+2127		+ `print_out`: whether to print out the table
+2128		+ `output`: if set to `'pretty'`: return a pretty text table (see `pretty_table()`);
+2129		    if set to `'raw'`: return a list of list of strings
+2130		    (e.g., `[['header1', 'header2'], ['0.1', '0.2']]`)
+2131		'''
+2132
+2133		out = [['Sample','N','d13C_VPDB','d18O_VSMOW',f'D{self._4x}','SE','95% CL','SD','p_Levene']]
+2134		for sample in self.anchors:
+2135			out += [[
+2136				f"{sample}",
+2137				f"{self.samples[sample]['N']}",
+2138				f"{self.samples[sample]['d13C_VPDB']:.2f}",
+2139				f"{self.samples[sample]['d18O_VSMOW']:.2f}",
+2140				f"{self.samples[sample][f'D{self._4x}']:.4f}",'','',
+2141				f"{self.samples[sample][f'SD_D{self._4x}']:.4f}" if self.samples[sample]['N'] > 1 else '', ''
+2142				]]
+2143		for sample in self.unknowns:
+2144			out += [[
+2145				f"{sample}",
+2146				f"{self.samples[sample]['N']}",
+2147				f"{self.samples[sample]['d13C_VPDB']:.2f}",
+2148				f"{self.samples[sample]['d18O_VSMOW']:.2f}",
+2149				f"{self.samples[sample][f'D{self._4x}']:.4f}",
+2150				f"{self.samples[sample][f'SE_D{self._4x}']:.4f}",
+2151				f{self.samples[sample][f'SE_D{self._4x}'] * self.t95:.4f}",
+2152				f"{self.samples[sample][f'SD_D{self._4x}']:.4f}" if self.samples[sample]['N'] > 1 else '',
+2153				f"{self.samples[sample]['p_Levene']:.3f}" if self.samples[sample]['N'] > 2 else ''
+2154				]]
+2155		if save_to_file:
+2156			if not os.path.exists(dir):
+2157				os.makedirs(dir)
+2158			if filename is None:
+2159				filename = f'D{self._4x}_samples.csv'
+2160			with open(f'{dir}/{filename}', 'w') as fid:
+2161				fid.write(make_csv(out))
+2162		if print_out:
+2163			self.msg('\n'+pretty_table(out))
+2164		if output == 'raw':
+2165			return out
+2166		elif output == 'pretty':
+2167			return pretty_table(out)
 
@@ -9500,22 +9495,22 @@

API Documentation

-
2175	def plot_sessions(self, dir = 'output', figsize = (8,8)):
-2176		'''
-2177		Generate session plots and save them to disk.
-2178
-2179		**Parameters**
-2180
-2181		+ `dir`: the directory in which to save the plots
-2182		+ `figsize`: the width and height (in inches) of each plot
-2183		'''
-2184		if not os.path.exists(dir):
-2185			os.makedirs(dir)
-2186
-2187		for session in self.sessions:
-2188			sp = self.plot_single_session(session, xylimits = 'constant')
-2189			ppl.savefig(f'{dir}/D{self._4x}_plot_{session}.pdf')
-2190			ppl.close(sp.fig)
+            
2170	def plot_sessions(self, dir = 'output', figsize = (8,8)):
+2171		'''
+2172		Generate session plots and save them to disk.
+2173
+2174		**Parameters**
+2175
+2176		+ `dir`: the directory in which to save the plots
+2177		+ `figsize`: the width and height (in inches) of each plot
+2178		'''
+2179		if not os.path.exists(dir):
+2180			os.makedirs(dir)
+2181
+2182		for session in self.sessions:
+2183			sp = self.plot_single_session(session, xylimits = 'constant')
+2184			ppl.savefig(f'{dir}/D{self._4x}_plot_{session}.pdf')
+2185			ppl.close(sp.fig)
 
@@ -9543,82 +9538,82 @@

API Documentation

-
2193	@make_verbal
-2194	def consolidate_samples(self):
-2195		'''
-2196		Compile various statistics for each sample.
+            
2188	@make_verbal
+2189	def consolidate_samples(self):
+2190		'''
+2191		Compile various statistics for each sample.
+2192
+2193		For each anchor sample:
+2194
+2195		+ `D47` or `D48`: the nominal Δ4x value for this anchor, specified by `self.Nominal_D4x`
+2196		+ `SE_D47` or `SE_D48`: set to zero by definition
 2197
-2198		For each anchor sample:
+2198		For each unknown sample:
 2199
-2200		+ `D47` or `D48`: the nominal Δ4x value for this anchor, specified by `self.Nominal_D4x`
-2201		+ `SE_D47` or `SE_D48`: set to zero by definition
+2200		+ `D47` or `D48`: the standardized Δ4x value for this unknown
+2201		+ `SE_D47` or `SE_D48`: the standard error of Δ4x for this unknown
 2202
-2203		For each unknown sample:
+2203		For each anchor and unknown:
 2204
-2205		+ `D47` or `D48`: the standardized Δ4x value for this unknown
-2206		+ `SE_D47` or `SE_D48`: the standard error of Δ4x for this unknown
-2207
-2208		For each anchor and unknown:
-2209
-2210		+ `N`: the total number of analyses of this sample
-2211		+ `SD_D47` or `SD_D48`: the “sample” (in the statistical sense) standard deviation for this sample
-2212		+ `d13C_VPDB`: the average δ13C_VPDB value for this sample
-2213		+ `d18O_VSMOW`: the average δ18O_VSMOW value for this sample (as CO2)
-2214		+ `p_Levene`: the p-value from a [Levene test](https://en.wikipedia.org/wiki/Levene%27s_test) of equal
-2215		variance, indicating whether the Δ4x repeatability this sample differs significantly from
-2216		that observed for the reference sample specified by `self.LEVENE_REF_SAMPLE`.
-2217		'''
-2218		D4x_ref_pop = [r[f'D{self._4x}'] for r in self.samples[self.LEVENE_REF_SAMPLE]['data']]
-2219		for sample in self.samples:
-2220			self.samples[sample]['N'] = len(self.samples[sample]['data'])
-2221			if self.samples[sample]['N'] > 1:
-2222				self.samples[sample][f'SD_D{self._4x}'] = stdev([r[f'D{self._4x}'] for r in self.samples[sample]['data']])
-2223
-2224			self.samples[sample]['d13C_VPDB'] = np.mean([r['d13C_VPDB'] for r in self.samples[sample]['data']])
-2225			self.samples[sample]['d18O_VSMOW'] = np.mean([r['d18O_VSMOW'] for r in self.samples[sample]['data']])
-2226
-2227			D4x_pop = [r[f'D{self._4x}'] for r in self.samples[sample]['data']]
-2228			if len(D4x_pop) > 2:
-2229				self.samples[sample]['p_Levene'] = levene(D4x_ref_pop, D4x_pop, center = 'median')[1]
-2230
-2231		if self.standardization_method == 'pooled':
-2232			for sample in self.anchors:
-2233				self.samples[sample][f'D{self._4x}'] = self.Nominal_D4x[sample]
-2234				self.samples[sample][f'SE_D{self._4x}'] = 0.
-2235			for sample in self.unknowns:
-2236				self.samples[sample][f'D{self._4x}'] = self.standardization.params.valuesdict()[f'D{self._4x}_{pf(sample)}']
-2237				try:
-2238					self.samples[sample][f'SE_D{self._4x}'] = self.sample_D4x_covar(sample)**.5
-2239				except ValueError:
-2240					# when `sample` is constrained by self.standardize(constraints = {...}),
-2241					# it is no longer listed in self.standardization.var_names.
-2242					# Temporary fix: define SE as zero for now
-2243					self.samples[sample][f'SE_D4{self._4x}'] = 0.
-2244
-2245		elif self.standardization_method == 'indep_sessions':
-2246			for sample in self.anchors:
-2247				self.samples[sample][f'D{self._4x}'] = self.Nominal_D4x[sample]
-2248				self.samples[sample][f'SE_D{self._4x}'] = 0.
-2249			for sample in self.unknowns:
-2250				self.msg(f'Consolidating sample {sample}')
-2251				self.unknowns[sample][f'session_D{self._4x}'] = {}
-2252				session_avg = []
-2253				for session in self.sessions:
-2254					sdata = [r for r in self.sessions[session]['data'] if r['Sample'] == sample]
-2255					if sdata:
-2256						self.msg(f'{sample} found in session {session}')
-2257						avg_D4x = np.mean([r[f'D{self._4x}'] for r in sdata])
-2258						avg_d4x = np.mean([r[f'd{self._4x}'] for r in sdata])
-2259						# !! TODO: sigma_s below does not account for temporal changes in standardization error
-2260						sigma_s = self.standardization_error(session, avg_d4x, avg_D4x)
-2261						sigma_u = sdata[0][f'wD{self._4x}raw'] / self.sessions[session]['a'] / len(sdata)**.5
-2262						session_avg.append([avg_D4x, (sigma_u**2 + sigma_s**2)**.5])
-2263						self.unknowns[sample][f'session_D{self._4x}'][session] = session_avg[-1]
-2264				self.samples[sample][f'D{self._4x}'], self.samples[sample][f'SE_D{self._4x}'] = w_avg(*zip(*session_avg))
-2265				weights = {s: self.unknowns[sample][f'session_D{self._4x}'][s][1]**-2 for s in self.unknowns[sample][f'session_D{self._4x}']}
-2266				wsum = sum([weights[s] for s in weights])
-2267				for s in weights:
-2268					self.unknowns[sample][f'session_D{self._4x}'][s] += [self.unknowns[sample][f'session_D{self._4x}'][s][1]**-2 / wsum]
+2205		+ `N`: the total number of analyses of this sample
+2206		+ `SD_D47` or `SD_D48`: the “sample” (in the statistical sense) standard deviation for this sample
+2207		+ `d13C_VPDB`: the average δ13C_VPDB value for this sample
+2208		+ `d18O_VSMOW`: the average δ18O_VSMOW value for this sample (as CO2)
+2209		+ `p_Levene`: the p-value from a [Levene test](https://en.wikipedia.org/wiki/Levene%27s_test) of equal
+2210		variance, indicating whether the Δ4x repeatability this sample differs significantly from
+2211		that observed for the reference sample specified by `self.LEVENE_REF_SAMPLE`.
+2212		'''
+2213		D4x_ref_pop = [r[f'D{self._4x}'] for r in self.samples[self.LEVENE_REF_SAMPLE]['data']]
+2214		for sample in self.samples:
+2215			self.samples[sample]['N'] = len(self.samples[sample]['data'])
+2216			if self.samples[sample]['N'] > 1:
+2217				self.samples[sample][f'SD_D{self._4x}'] = stdev([r[f'D{self._4x}'] for r in self.samples[sample]['data']])
+2218
+2219			self.samples[sample]['d13C_VPDB'] = np.mean([r['d13C_VPDB'] for r in self.samples[sample]['data']])
+2220			self.samples[sample]['d18O_VSMOW'] = np.mean([r['d18O_VSMOW'] for r in self.samples[sample]['data']])
+2221
+2222			D4x_pop = [r[f'D{self._4x}'] for r in self.samples[sample]['data']]
+2223			if len(D4x_pop) > 2:
+2224				self.samples[sample]['p_Levene'] = levene(D4x_ref_pop, D4x_pop, center = 'median')[1]
+2225
+2226		if self.standardization_method == 'pooled':
+2227			for sample in self.anchors:
+2228				self.samples[sample][f'D{self._4x}'] = self.Nominal_D4x[sample]
+2229				self.samples[sample][f'SE_D{self._4x}'] = 0.
+2230			for sample in self.unknowns:
+2231				self.samples[sample][f'D{self._4x}'] = self.standardization.params.valuesdict()[f'D{self._4x}_{pf(sample)}']
+2232				try:
+2233					self.samples[sample][f'SE_D{self._4x}'] = self.sample_D4x_covar(sample)**.5
+2234				except ValueError:
+2235					# when `sample` is constrained by self.standardize(constraints = {...}),
+2236					# it is no longer listed in self.standardization.var_names.
+2237					# Temporary fix: define SE as zero for now
+2238					self.samples[sample][f'SE_D4{self._4x}'] = 0.
+2239
+2240		elif self.standardization_method == 'indep_sessions':
+2241			for sample in self.anchors:
+2242				self.samples[sample][f'D{self._4x}'] = self.Nominal_D4x[sample]
+2243				self.samples[sample][f'SE_D{self._4x}'] = 0.
+2244			for sample in self.unknowns:
+2245				self.msg(f'Consolidating sample {sample}')
+2246				self.unknowns[sample][f'session_D{self._4x}'] = {}
+2247				session_avg = []
+2248				for session in self.sessions:
+2249					sdata = [r for r in self.sessions[session]['data'] if r['Sample'] == sample]
+2250					if sdata:
+2251						self.msg(f'{sample} found in session {session}')
+2252						avg_D4x = np.mean([r[f'D{self._4x}'] for r in sdata])
+2253						avg_d4x = np.mean([r[f'd{self._4x}'] for r in sdata])
+2254						# !! TODO: sigma_s below does not account for temporal changes in standardization error
+2255						sigma_s = self.standardization_error(session, avg_d4x, avg_D4x)
+2256						sigma_u = sdata[0][f'wD{self._4x}raw'] / self.sessions[session]['a'] / len(sdata)**.5
+2257						session_avg.append([avg_D4x, (sigma_u**2 + sigma_s**2)**.5])
+2258						self.unknowns[sample][f'session_D{self._4x}'][session] = session_avg[-1]
+2259				self.samples[sample][f'D{self._4x}'], self.samples[sample][f'SE_D{self._4x}'] = w_avg(*zip(*session_avg))
+2260				weights = {s: self.unknowns[sample][f'session_D{self._4x}'][s][1]**-2 for s in self.unknowns[sample][f'session_D{self._4x}']}
+2261				wsum = sum([weights[s] for s in weights])
+2262				for s in weights:
+2263					self.unknowns[sample][f'session_D{self._4x}'][s] += [self.unknowns[sample][f'session_D{self._4x}'][s][1]**-2 / wsum]
 
@@ -9664,127 +9659,127 @@

API Documentation

-
2271	def consolidate_sessions(self):
-2272		'''
-2273		Compute various statistics for each session.
-2274
-2275		+ `Na`: Number of anchor analyses in the session
-2276		+ `Nu`: Number of unknown analyses in the session
-2277		+ `r_d13C_VPDB`: δ13C_VPDB repeatability of analyses within the session
-2278		+ `r_d18O_VSMOW`: δ18O_VSMOW repeatability of analyses within the session
-2279		+ `r_D47` or `r_D48`: Δ4x repeatability of analyses within the session
-2280		+ `a`: scrambling factor
-2281		+ `b`: compositional slope
-2282		+ `c`: WG offset
-2283		+ `SE_a`: Model stadard erorr of `a`
-2284		+ `SE_b`: Model stadard erorr of `b`
-2285		+ `SE_c`: Model stadard erorr of `c`
-2286		+ `scrambling_drift` (boolean): whether to allow a temporal drift in the scrambling factor (`a`)
-2287		+ `slope_drift` (boolean): whether to allow a temporal drift in the compositional slope (`b`)
-2288		+ `wg_drift` (boolean): whether to allow a temporal drift in the WG offset (`c`)
-2289		+ `a2`: scrambling factor drift
-2290		+ `b2`: compositional slope drift
-2291		+ `c2`: WG offset drift
-2292		+ `Np`: Number of standardization parameters to fit
-2293		+ `CM`: model covariance matrix for (`a`, `b`, `c`, `a2`, `b2`, `c2`)
-2294		+ `d13Cwg_VPDB`: δ13C_VPDB of WG
-2295		+ `d18Owg_VSMOW`: δ18O_VSMOW of WG
-2296		'''
-2297		for session in self.sessions:
-2298			if 'd13Cwg_VPDB' not in self.sessions[session]:
-2299				self.sessions[session]['d13Cwg_VPDB'] = self.sessions[session]['data'][0]['d13Cwg_VPDB']
-2300			if 'd18Owg_VSMOW' not in self.sessions[session]:
-2301				self.sessions[session]['d18Owg_VSMOW'] = self.sessions[session]['data'][0]['d18Owg_VSMOW']
-2302			self.sessions[session]['Na'] = len([r for r in self.sessions[session]['data'] if r['Sample'] in self.anchors])
-2303			self.sessions[session]['Nu'] = len([r for r in self.sessions[session]['data'] if r['Sample'] in self.unknowns])
+            
2266	def consolidate_sessions(self):
+2267		'''
+2268		Compute various statistics for each session.
+2269
+2270		+ `Na`: Number of anchor analyses in the session
+2271		+ `Nu`: Number of unknown analyses in the session
+2272		+ `r_d13C_VPDB`: δ13C_VPDB repeatability of analyses within the session
+2273		+ `r_d18O_VSMOW`: δ18O_VSMOW repeatability of analyses within the session
+2274		+ `r_D47` or `r_D48`: Δ4x repeatability of analyses within the session
+2275		+ `a`: scrambling factor
+2276		+ `b`: compositional slope
+2277		+ `c`: WG offset
+2278		+ `SE_a`: Model stadard erorr of `a`
+2279		+ `SE_b`: Model stadard erorr of `b`
+2280		+ `SE_c`: Model stadard erorr of `c`
+2281		+ `scrambling_drift` (boolean): whether to allow a temporal drift in the scrambling factor (`a`)
+2282		+ `slope_drift` (boolean): whether to allow a temporal drift in the compositional slope (`b`)
+2283		+ `wg_drift` (boolean): whether to allow a temporal drift in the WG offset (`c`)
+2284		+ `a2`: scrambling factor drift
+2285		+ `b2`: compositional slope drift
+2286		+ `c2`: WG offset drift
+2287		+ `Np`: Number of standardization parameters to fit
+2288		+ `CM`: model covariance matrix for (`a`, `b`, `c`, `a2`, `b2`, `c2`)
+2289		+ `d13Cwg_VPDB`: δ13C_VPDB of WG
+2290		+ `d18Owg_VSMOW`: δ18O_VSMOW of WG
+2291		'''
+2292		for session in self.sessions:
+2293			if 'd13Cwg_VPDB' not in self.sessions[session]:
+2294				self.sessions[session]['d13Cwg_VPDB'] = self.sessions[session]['data'][0]['d13Cwg_VPDB']
+2295			if 'd18Owg_VSMOW' not in self.sessions[session]:
+2296				self.sessions[session]['d18Owg_VSMOW'] = self.sessions[session]['data'][0]['d18Owg_VSMOW']
+2297			self.sessions[session]['Na'] = len([r for r in self.sessions[session]['data'] if r['Sample'] in self.anchors])
+2298			self.sessions[session]['Nu'] = len([r for r in self.sessions[session]['data'] if r['Sample'] in self.unknowns])
+2299
+2300			self.msg(f'Computing repeatabilities for session {session}')
+2301			self.sessions[session]['r_d13C_VPDB'] = self.compute_r('d13C_VPDB', samples = 'anchors', sessions = [session])
+2302			self.sessions[session]['r_d18O_VSMOW'] = self.compute_r('d18O_VSMOW', samples = 'anchors', sessions = [session])
+2303			self.sessions[session][f'r_D{self._4x}'] = self.compute_r(f'D{self._4x}', sessions = [session])
 2304
-2305			self.msg(f'Computing repeatabilities for session {session}')
-2306			self.sessions[session]['r_d13C_VPDB'] = self.compute_r('d13C_VPDB', samples = 'anchors', sessions = [session])
-2307			self.sessions[session]['r_d18O_VSMOW'] = self.compute_r('d18O_VSMOW', samples = 'anchors', sessions = [session])
-2308			self.sessions[session][f'r_D{self._4x}'] = self.compute_r(f'D{self._4x}', sessions = [session])
-2309
-2310		if self.standardization_method == 'pooled':
-2311			for session in self.sessions:
-2312
-2313				self.sessions[session]['a'] = self.standardization.params.valuesdict()[f'a_{pf(session)}']
-2314				i = self.standardization.var_names.index(f'a_{pf(session)}')
-2315				self.sessions[session]['SE_a'] = self.standardization.covar[i,i]**.5
-2316
-2317				self.sessions[session]['b'] = self.standardization.params.valuesdict()[f'b_{pf(session)}']
-2318				i = self.standardization.var_names.index(f'b_{pf(session)}')
-2319				self.sessions[session]['SE_b'] = self.standardization.covar[i,i]**.5
-2320
-2321				self.sessions[session]['c'] = self.standardization.params.valuesdict()[f'c_{pf(session)}']
-2322				i = self.standardization.var_names.index(f'c_{pf(session)}')
-2323				self.sessions[session]['SE_c'] = self.standardization.covar[i,i]**.5
-2324
-2325				self.sessions[session]['a2'] = self.standardization.params.valuesdict()[f'a2_{pf(session)}']
-2326				if self.sessions[session]['scrambling_drift']:
-2327					i = self.standardization.var_names.index(f'a2_{pf(session)}')
-2328					self.sessions[session]['SE_a2'] = self.standardization.covar[i,i]**.5
-2329				else:
-2330					self.sessions[session]['SE_a2'] = 0.
-2331
-2332				self.sessions[session]['b2'] = self.standardization.params.valuesdict()[f'b2_{pf(session)}']
-2333				if self.sessions[session]['slope_drift']:
-2334					i = self.standardization.var_names.index(f'b2_{pf(session)}')
-2335					self.sessions[session]['SE_b2'] = self.standardization.covar[i,i]**.5
-2336				else:
-2337					self.sessions[session]['SE_b2'] = 0.
-2338
-2339				self.sessions[session]['c2'] = self.standardization.params.valuesdict()[f'c2_{pf(session)}']
-2340				if self.sessions[session]['wg_drift']:
-2341					i = self.standardization.var_names.index(f'c2_{pf(session)}')
-2342					self.sessions[session]['SE_c2'] = self.standardization.covar[i,i]**.5
-2343				else:
-2344					self.sessions[session]['SE_c2'] = 0.
-2345
-2346				i = self.standardization.var_names.index(f'a_{pf(session)}')
-2347				j = self.standardization.var_names.index(f'b_{pf(session)}')
-2348				k = self.standardization.var_names.index(f'c_{pf(session)}')
-2349				CM = np.zeros((6,6))
-2350				CM[:3,:3] = self.standardization.covar[[i,j,k],:][:,[i,j,k]]
-2351				try:
-2352					i2 = self.standardization.var_names.index(f'a2_{pf(session)}')
-2353					CM[3,[0,1,2,3]] = self.standardization.covar[i2,[i,j,k,i2]]
-2354					CM[[0,1,2,3],3] = self.standardization.covar[[i,j,k,i2],i2]
-2355					try:
-2356						j2 = self.standardization.var_names.index(f'b2_{pf(session)}')
-2357						CM[3,4] = self.standardization.covar[i2,j2]
-2358						CM[4,3] = self.standardization.covar[j2,i2]
-2359					except ValueError:
-2360						pass
-2361					try:
-2362						k2 = self.standardization.var_names.index(f'c2_{pf(session)}')
-2363						CM[3,5] = self.standardization.covar[i2,k2]
-2364						CM[5,3] = self.standardization.covar[k2,i2]
-2365					except ValueError:
-2366						pass
-2367				except ValueError:
-2368					pass
-2369				try:
-2370					j2 = self.standardization.var_names.index(f'b2_{pf(session)}')
-2371					CM[4,[0,1,2,4]] = self.standardization.covar[j2,[i,j,k,j2]]
-2372					CM[[0,1,2,4],4] = self.standardization.covar[[i,j,k,j2],j2]
-2373					try:
-2374						k2 = self.standardization.var_names.index(f'c2_{pf(session)}')
-2375						CM[4,5] = self.standardization.covar[j2,k2]
-2376						CM[5,4] = self.standardization.covar[k2,j2]
-2377					except ValueError:
-2378						pass
-2379				except ValueError:
-2380					pass
-2381				try:
-2382					k2 = self.standardization.var_names.index(f'c2_{pf(session)}')
-2383					CM[5,[0,1,2,5]] = self.standardization.covar[k2,[i,j,k,k2]]
-2384					CM[[0,1,2,5],5] = self.standardization.covar[[i,j,k,k2],k2]
-2385				except ValueError:
-2386					pass
-2387
-2388				self.sessions[session]['CM'] = CM
-2389
-2390		elif self.standardization_method == 'indep_sessions':
-2391			pass # Not implemented yet
+2305		if self.standardization_method == 'pooled':
+2306			for session in self.sessions:
+2307
+2308				self.sessions[session]['a'] = self.standardization.params.valuesdict()[f'a_{pf(session)}']
+2309				i = self.standardization.var_names.index(f'a_{pf(session)}')
+2310				self.sessions[session]['SE_a'] = self.standardization.covar[i,i]**.5
+2311
+2312				self.sessions[session]['b'] = self.standardization.params.valuesdict()[f'b_{pf(session)}']
+2313				i = self.standardization.var_names.index(f'b_{pf(session)}')
+2314				self.sessions[session]['SE_b'] = self.standardization.covar[i,i]**.5
+2315
+2316				self.sessions[session]['c'] = self.standardization.params.valuesdict()[f'c_{pf(session)}']
+2317				i = self.standardization.var_names.index(f'c_{pf(session)}')
+2318				self.sessions[session]['SE_c'] = self.standardization.covar[i,i]**.5
+2319
+2320				self.sessions[session]['a2'] = self.standardization.params.valuesdict()[f'a2_{pf(session)}']
+2321				if self.sessions[session]['scrambling_drift']:
+2322					i = self.standardization.var_names.index(f'a2_{pf(session)}')
+2323					self.sessions[session]['SE_a2'] = self.standardization.covar[i,i]**.5
+2324				else:
+2325					self.sessions[session]['SE_a2'] = 0.
+2326
+2327				self.sessions[session]['b2'] = self.standardization.params.valuesdict()[f'b2_{pf(session)}']
+2328				if self.sessions[session]['slope_drift']:
+2329					i = self.standardization.var_names.index(f'b2_{pf(session)}')
+2330					self.sessions[session]['SE_b2'] = self.standardization.covar[i,i]**.5
+2331				else:
+2332					self.sessions[session]['SE_b2'] = 0.
+2333
+2334				self.sessions[session]['c2'] = self.standardization.params.valuesdict()[f'c2_{pf(session)}']
+2335				if self.sessions[session]['wg_drift']:
+2336					i = self.standardization.var_names.index(f'c2_{pf(session)}')
+2337					self.sessions[session]['SE_c2'] = self.standardization.covar[i,i]**.5
+2338				else:
+2339					self.sessions[session]['SE_c2'] = 0.
+2340
+2341				i = self.standardization.var_names.index(f'a_{pf(session)}')
+2342				j = self.standardization.var_names.index(f'b_{pf(session)}')
+2343				k = self.standardization.var_names.index(f'c_{pf(session)}')
+2344				CM = np.zeros((6,6))
+2345				CM[:3,:3] = self.standardization.covar[[i,j,k],:][:,[i,j,k]]
+2346				try:
+2347					i2 = self.standardization.var_names.index(f'a2_{pf(session)}')
+2348					CM[3,[0,1,2,3]] = self.standardization.covar[i2,[i,j,k,i2]]
+2349					CM[[0,1,2,3],3] = self.standardization.covar[[i,j,k,i2],i2]
+2350					try:
+2351						j2 = self.standardization.var_names.index(f'b2_{pf(session)}')
+2352						CM[3,4] = self.standardization.covar[i2,j2]
+2353						CM[4,3] = self.standardization.covar[j2,i2]
+2354					except ValueError:
+2355						pass
+2356					try:
+2357						k2 = self.standardization.var_names.index(f'c2_{pf(session)}')
+2358						CM[3,5] = self.standardization.covar[i2,k2]
+2359						CM[5,3] = self.standardization.covar[k2,i2]
+2360					except ValueError:
+2361						pass
+2362				except ValueError:
+2363					pass
+2364				try:
+2365					j2 = self.standardization.var_names.index(f'b2_{pf(session)}')
+2366					CM[4,[0,1,2,4]] = self.standardization.covar[j2,[i,j,k,j2]]
+2367					CM[[0,1,2,4],4] = self.standardization.covar[[i,j,k,j2],j2]
+2368					try:
+2369						k2 = self.standardization.var_names.index(f'c2_{pf(session)}')
+2370						CM[4,5] = self.standardization.covar[j2,k2]
+2371						CM[5,4] = self.standardization.covar[k2,j2]
+2372					except ValueError:
+2373						pass
+2374				except ValueError:
+2375					pass
+2376				try:
+2377					k2 = self.standardization.var_names.index(f'c2_{pf(session)}')
+2378					CM[5,[0,1,2,5]] = self.standardization.covar[k2,[i,j,k,k2]]
+2379					CM[[0,1,2,5],5] = self.standardization.covar[[i,j,k,k2],k2]
+2380				except ValueError:
+2381					pass
+2382
+2383				self.sessions[session]['CM'] = CM
+2384
+2385		elif self.standardization_method == 'indep_sessions':
+2386			pass # Not implemented yet
 
@@ -9829,19 +9824,19 @@

API Documentation

-
2394	@make_verbal
-2395	def repeatabilities(self):
-2396		'''
-2397		Compute analytical repeatabilities for δ13C_VPDB, δ18O_VSMOW, Δ4x
-2398		(for all samples, for anchors, and for unknowns).
-2399		'''
-2400		self.msg('Computing reproducibilities for all sessions')
-2401
-2402		self.repeatability['r_d13C_VPDB'] = self.compute_r('d13C_VPDB', samples = 'anchors')
-2403		self.repeatability['r_d18O_VSMOW'] = self.compute_r('d18O_VSMOW', samples = 'anchors')
-2404		self.repeatability[f'r_D{self._4x}a'] = self.compute_r(f'D{self._4x}', samples = 'anchors')
-2405		self.repeatability[f'r_D{self._4x}u'] = self.compute_r(f'D{self._4x}', samples = 'unknowns')
-2406		self.repeatability[f'r_D{self._4x}'] = self.compute_r(f'D{self._4x}', samples = 'all samples')
+            
2389	@make_verbal
+2390	def repeatabilities(self):
+2391		'''
+2392		Compute analytical repeatabilities for δ13C_VPDB, δ18O_VSMOW, Δ4x
+2393		(for all samples, for anchors, and for unknowns).
+2394		'''
+2395		self.msg('Computing reproducibilities for all sessions')
+2396
+2397		self.repeatability['r_d13C_VPDB'] = self.compute_r('d13C_VPDB', samples = 'anchors')
+2398		self.repeatability['r_d18O_VSMOW'] = self.compute_r('d18O_VSMOW', samples = 'anchors')
+2399		self.repeatability[f'r_D{self._4x}a'] = self.compute_r(f'D{self._4x}', samples = 'anchors')
+2400		self.repeatability[f'r_D{self._4x}u'] = self.compute_r(f'D{self._4x}', samples = 'unknowns')
+2401		self.repeatability[f'r_D{self._4x}'] = self.compute_r(f'D{self._4x}', samples = 'all samples')
 
@@ -9863,23 +9858,23 @@

API Documentation

-
2409	@make_verbal
-2410	def consolidate(self, tables = True, plots = True):
-2411		'''
-2412		Collect information about samples, sessions and repeatabilities.
-2413		'''
-2414		self.consolidate_samples()
-2415		self.consolidate_sessions()
-2416		self.repeatabilities()
-2417
-2418		if tables:
-2419			self.summary()
-2420			self.table_of_sessions()
-2421			self.table_of_analyses()
-2422			self.table_of_samples()
-2423
-2424		if plots:
-2425			self.plot_sessions()
+            
2404	@make_verbal
+2405	def consolidate(self, tables = True, plots = True):
+2406		'''
+2407		Collect information about samples, sessions and repeatabilities.
+2408		'''
+2409		self.consolidate_samples()
+2410		self.consolidate_sessions()
+2411		self.repeatabilities()
+2412
+2413		if tables:
+2414			self.summary()
+2415			self.table_of_sessions()
+2416			self.table_of_analyses()
+2417			self.table_of_samples()
+2418
+2419		if plots:
+2420			self.plot_sessions()
 
@@ -9900,40 +9895,40 @@

API Documentation

-
2428	@make_verbal
-2429	def rmswd(self,
-2430		samples = 'all samples',
-2431		sessions = 'all sessions',
-2432		):
-2433		'''
-2434		Compute the χ2, root mean squared weighted deviation
-2435		(i.e. reduced χ2), and corresponding degrees of freedom of the
-2436		Δ4x values for samples in `samples` and sessions in `sessions`.
-2437		
-2438		Only used in `D4xdata.standardize()` with `method='indep_sessions'`.
-2439		'''
-2440		if samples == 'all samples':
-2441			mysamples = [k for k in self.samples]
-2442		elif samples == 'anchors':
-2443			mysamples = [k for k in self.anchors]
-2444		elif samples == 'unknowns':
-2445			mysamples = [k for k in self.unknowns]
-2446		else:
-2447			mysamples = samples
-2448
-2449		if sessions == 'all sessions':
-2450			sessions = [k for k in self.sessions]
-2451
-2452		chisq, Nf = 0, 0
-2453		for sample in mysamples :
-2454			G = [ r for r in self if r['Sample'] == sample and r['Session'] in sessions ]
-2455			if len(G) > 1 :
-2456				X, sX = w_avg([r[f'D{self._4x}'] for r in G], [r[f'wD{self._4x}'] for r in G])
-2457				Nf += (len(G) - 1)
-2458				chisq += np.sum([ ((r[f'D{self._4x}']-X)/r[f'wD{self._4x}'])**2 for r in G])
-2459		r = (chisq / Nf)**.5 if Nf > 0 else 0
-2460		self.msg(f'RMSWD of r["D{self._4x}"] is {r:.6f} for {samples}.')
-2461		return {'rmswd': r, 'chisq': chisq, 'Nf': Nf}
+            
2423	@make_verbal
+2424	def rmswd(self,
+2425		samples = 'all samples',
+2426		sessions = 'all sessions',
+2427		):
+2428		'''
+2429		Compute the χ2, root mean squared weighted deviation
+2430		(i.e. reduced χ2), and corresponding degrees of freedom of the
+2431		Δ4x values for samples in `samples` and sessions in `sessions`.
+2432		
+2433		Only used in `D4xdata.standardize()` with `method='indep_sessions'`.
+2434		'''
+2435		if samples == 'all samples':
+2436			mysamples = [k for k in self.samples]
+2437		elif samples == 'anchors':
+2438			mysamples = [k for k in self.anchors]
+2439		elif samples == 'unknowns':
+2440			mysamples = [k for k in self.unknowns]
+2441		else:
+2442			mysamples = samples
+2443
+2444		if sessions == 'all sessions':
+2445			sessions = [k for k in self.sessions]
+2446
+2447		chisq, Nf = 0, 0
+2448		for sample in mysamples :
+2449			G = [ r for r in self if r['Sample'] == sample and r['Session'] in sessions ]
+2450			if len(G) > 1 :
+2451				X, sX = w_avg([r[f'D{self._4x}'] for r in G], [r[f'wD{self._4x}'] for r in G])
+2452				Nf += (len(G) - 1)
+2453				chisq += np.sum([ ((r[f'D{self._4x}']-X)/r[f'wD{self._4x}'])**2 for r in G])
+2454		r = (chisq / Nf)**.5 if Nf > 0 else 0
+2455		self.msg(f'RMSWD of r["D{self._4x}"] is {r:.6f} for {samples}.')
+2456		return {'rmswd': r, 'chisq': chisq, 'Nf': Nf}
 
@@ -9958,52 +9953,52 @@

API Documentation

-
2464	@make_verbal
-2465	def compute_r(self, key, samples = 'all samples', sessions = 'all sessions'):
-2466		'''
-2467		Compute the repeatability of `[r[key] for r in self]`
-2468		'''
-2469		# NB: it's debatable whether rD47 should be computed
-2470		# with Nf = len(self)-len(self.samples) instead of
-2471		# Nf = len(self) - len(self.unknwons) - 3*len(self.sessions)
-2472
-2473		if samples == 'all samples':
-2474			mysamples = [k for k in self.samples]
-2475		elif samples == 'anchors':
-2476			mysamples = [k for k in self.anchors]
-2477		elif samples == 'unknowns':
-2478			mysamples = [k for k in self.unknowns]
-2479		else:
-2480			mysamples = samples
-2481
-2482		if sessions == 'all sessions':
-2483			sessions = [k for k in self.sessions]
-2484
-2485		if key in ['D47', 'D48']:
-2486			chisq, Nf = 0, 0
-2487			for sample in mysamples :
-2488				X = [ r[key] for r in self if r['Sample'] == sample and r['Session'] in sessions ]
-2489				if len(X) > 1 :
-2490					chisq += np.sum([ (x-self.samples[sample][key])**2 for x in X ])
-2491					if sample in self.unknowns:
-2492						Nf += len(X) - 1
-2493					else:
-2494						Nf += len(X)
-2495			if samples in ['anchors', 'all samples']:
-2496				Nf -= sum([self.sessions[s]['Np'] for s in sessions])
-2497			r = (chisq / Nf)**.5 if Nf > 0 else 0
-2498
-2499		else: # if key not in ['D47', 'D48']
-2500			chisq, Nf = 0, 0
-2501			for sample in mysamples :
-2502				X = [ r[key] for r in self if r['Sample'] == sample and r['Session'] in sessions ]
-2503				if len(X) > 1 :
-2504					Nf += len(X) - 1
-2505					chisq += np.sum([ (x-np.mean(X))**2 for x in X ])
-2506			r = (chisq / Nf)**.5 if Nf > 0 else 0
-2507
-2508		self.msg(f'Repeatability of r["{key}"] is {1000*r:.1f} ppm for {samples}.')
-2509		return r
+            
2459	@make_verbal
+2460	def compute_r(self, key, samples = 'all samples', sessions = 'all sessions'):
+2461		'''
+2462		Compute the repeatability of `[r[key] for r in self]`
+2463		'''
+2464		# NB: it's debatable whether rD47 should be computed
+2465		# with Nf = len(self)-len(self.samples) instead of
+2466		# Nf = len(self) - len(self.unknwons) - 3*len(self.sessions)
+2467
+2468		if samples == 'all samples':
+2469			mysamples = [k for k in self.samples]
+2470		elif samples == 'anchors':
+2471			mysamples = [k for k in self.anchors]
+2472		elif samples == 'unknowns':
+2473			mysamples = [k for k in self.unknowns]
+2474		else:
+2475			mysamples = samples
+2476
+2477		if sessions == 'all sessions':
+2478			sessions = [k for k in self.sessions]
+2479
+2480		if key in ['D47', 'D48']:
+2481			chisq, Nf = 0, 0
+2482			for sample in mysamples :
+2483				X = [ r[key] for r in self if r['Sample'] == sample and r['Session'] in sessions ]
+2484				if len(X) > 1 :
+2485					chisq += np.sum([ (x-self.samples[sample][key])**2 for x in X ])
+2486					if sample in self.unknowns:
+2487						Nf += len(X) - 1
+2488					else:
+2489						Nf += len(X)
+2490			if samples in ['anchors', 'all samples']:
+2491				Nf -= sum([self.sessions[s]['Np'] for s in sessions])
+2492			r = (chisq / Nf)**.5 if Nf > 0 else 0
+2493
+2494		else: # if key not in ['D47', 'D48']
+2495			chisq, Nf = 0, 0
+2496			for sample in mysamples :
+2497				X = [ r[key] for r in self if r['Sample'] == sample and r['Session'] in sessions ]
+2498				if len(X) > 1 :
+2499					Nf += len(X) - 1
+2500					chisq += np.sum([ (x-np.mean(X))**2 for x in X ])
+2501			r = (chisq / Nf)**.5 if Nf > 0 else 0
+2502
+2503		self.msg(f'Repeatability of r["{key}"] is {1000*r:.1f} ppm for {samples}.')
+2504		return r
 
@@ -10023,46 +10018,46 @@

API Documentation

-
2511	def sample_average(self, samples, weights = 'equal', normalize = True):
-2512		'''
-2513		Weighted average Δ4x value of a group of samples, accounting for covariance.
-2514
-2515		Returns the weighed average Δ4x value and associated SE
-2516		of a group of samples. Weights are equal by default. If `normalize` is
-2517		true, `weights` will be rescaled so that their sum equals 1.
-2518
-2519		**Examples**
-2520
-2521		```python
-2522		self.sample_average(['X','Y'], [1, 2])
-2523		```
-2524
-2525		returns the value and SE of [Δ4x(X) + 2 Δ4x(Y)]/3,
-2526		where Δ4x(X) and Δ4x(Y) are the average Δ4x
-2527		values of samples X and Y, respectively.
-2528
-2529		```python
-2530		self.sample_average(['X','Y'], [1, -1], normalize = False)
-2531		```
+            
2506	def sample_average(self, samples, weights = 'equal', normalize = True):
+2507		'''
+2508		Weighted average Δ4x value of a group of samples, accounting for covariance.
+2509
+2510		Returns the weighed average Δ4x value and associated SE
+2511		of a group of samples. Weights are equal by default. If `normalize` is
+2512		true, `weights` will be rescaled so that their sum equals 1.
+2513
+2514		**Examples**
+2515
+2516		```python
+2517		self.sample_average(['X','Y'], [1, 2])
+2518		```
+2519
+2520		returns the value and SE of [Δ4x(X) + 2 Δ4x(Y)]/3,
+2521		where Δ4x(X) and Δ4x(Y) are the average Δ4x
+2522		values of samples X and Y, respectively.
+2523
+2524		```python
+2525		self.sample_average(['X','Y'], [1, -1], normalize = False)
+2526		```
+2527
+2528		returns the value and SE of the difference Δ4x(X) - Δ4x(Y).
+2529		'''
+2530		if weights == 'equal':
+2531			weights = [1/len(samples)] * len(samples)
 2532
-2533		returns the value and SE of the difference Δ4x(X) - Δ4x(Y).
-2534		'''
-2535		if weights == 'equal':
-2536			weights = [1/len(samples)] * len(samples)
+2533		if normalize:
+2534			s = sum(weights)
+2535			if s:
+2536				weights = [w/s for w in weights]
 2537
-2538		if normalize:
-2539			s = sum(weights)
-2540			if s:
-2541				weights = [w/s for w in weights]
-2542
-2543		try:
-2544# 			indices = [self.standardization.var_names.index(f'D47_{pf(sample)}') for sample in samples]
-2545# 			C = self.standardization.covar[indices,:][:,indices]
-2546			C = np.array([[self.sample_D4x_covar(x, y) for x in samples] for y in samples])
-2547			X = [self.samples[sample][f'D{self._4x}'] for sample in samples]
-2548			return correlated_sum(X, C, weights)
-2549		except ValueError:
-2550			return (0., 0.)
+2538		try:
+2539# 			indices = [self.standardization.var_names.index(f'D47_{pf(sample)}') for sample in samples]
+2540# 			C = self.standardization.covar[indices,:][:,indices]
+2541			C = np.array([[self.sample_D4x_covar(x, y) for x in samples] for y in samples])
+2542			X = [self.samples[sample][f'D{self._4x}'] for sample in samples]
+2543			return correlated_sum(X, C, weights)
+2544		except ValueError:
+2545			return (0., 0.)
 
@@ -10104,44 +10099,44 @@

API Documentation

-
2553	def sample_D4x_covar(self, sample1, sample2 = None):
-2554		'''
-2555		Covariance between Δ4x values of samples
-2556
-2557		Returns the error covariance between the average Δ4x values of two
-2558		samples. If if only `sample_1` is specified, or if `sample_1 == sample_2`),
-2559		returns the Δ4x variance for that sample.
-2560		'''
-2561		if sample2 is None:
-2562			sample2 = sample1
-2563		if self.standardization_method == 'pooled':
-2564			i = self.standardization.var_names.index(f'D{self._4x}_{pf(sample1)}')
-2565			j = self.standardization.var_names.index(f'D{self._4x}_{pf(sample2)}')
-2566			return self.standardization.covar[i, j]
-2567		elif self.standardization_method == 'indep_sessions':
-2568			if sample1 == sample2:
-2569				return self.samples[sample1][f'SE_D{self._4x}']**2
-2570			else:
-2571				c = 0
-2572				for session in self.sessions:
-2573					sdata1 = [r for r in self.sessions[session]['data'] if r['Sample'] == sample1]
-2574					sdata2 = [r for r in self.sessions[session]['data'] if r['Sample'] == sample2]
-2575					if sdata1 and sdata2:
-2576						a = self.sessions[session]['a']
-2577						# !! TODO: CM below does not account for temporal changes in standardization parameters
-2578						CM = self.sessions[session]['CM'][:3,:3]
-2579						avg_D4x_1 = np.mean([r[f'D{self._4x}'] for r in sdata1])
-2580						avg_d4x_1 = np.mean([r[f'd{self._4x}'] for r in sdata1])
-2581						avg_D4x_2 = np.mean([r[f'D{self._4x}'] for r in sdata2])
-2582						avg_d4x_2 = np.mean([r[f'd{self._4x}'] for r in sdata2])
-2583						c += (
-2584							self.unknowns[sample1][f'session_D{self._4x}'][session][2]
-2585							* self.unknowns[sample2][f'session_D{self._4x}'][session][2]
-2586							* np.array([[avg_D4x_1, avg_d4x_1, 1]])
-2587							@ CM
-2588							@ np.array([[avg_D4x_2, avg_d4x_2, 1]]).T
-2589							) / a**2
-2590				return float(c)
+            
2548	def sample_D4x_covar(self, sample1, sample2 = None):
+2549		'''
+2550		Covariance between Δ4x values of samples
+2551
+2552		Returns the error covariance between the average Δ4x values of two
+2553		samples. If if only `sample_1` is specified, or if `sample_1 == sample_2`),
+2554		returns the Δ4x variance for that sample.
+2555		'''
+2556		if sample2 is None:
+2557			sample2 = sample1
+2558		if self.standardization_method == 'pooled':
+2559			i = self.standardization.var_names.index(f'D{self._4x}_{pf(sample1)}')
+2560			j = self.standardization.var_names.index(f'D{self._4x}_{pf(sample2)}')
+2561			return self.standardization.covar[i, j]
+2562		elif self.standardization_method == 'indep_sessions':
+2563			if sample1 == sample2:
+2564				return self.samples[sample1][f'SE_D{self._4x}']**2
+2565			else:
+2566				c = 0
+2567				for session in self.sessions:
+2568					sdata1 = [r for r in self.sessions[session]['data'] if r['Sample'] == sample1]
+2569					sdata2 = [r for r in self.sessions[session]['data'] if r['Sample'] == sample2]
+2570					if sdata1 and sdata2:
+2571						a = self.sessions[session]['a']
+2572						# !! TODO: CM below does not account for temporal changes in standardization parameters
+2573						CM = self.sessions[session]['CM'][:3,:3]
+2574						avg_D4x_1 = np.mean([r[f'D{self._4x}'] for r in sdata1])
+2575						avg_d4x_1 = np.mean([r[f'd{self._4x}'] for r in sdata1])
+2576						avg_D4x_2 = np.mean([r[f'D{self._4x}'] for r in sdata2])
+2577						avg_d4x_2 = np.mean([r[f'd{self._4x}'] for r in sdata2])
+2578						c += (
+2579							self.unknowns[sample1][f'session_D{self._4x}'][session][2]
+2580							* self.unknowns[sample2][f'session_D{self._4x}'][session][2]
+2581							* np.array([[avg_D4x_1, avg_d4x_1, 1]])
+2582							@ CM
+2583							@ np.array([[avg_D4x_2, avg_d4x_2, 1]]).T
+2584							) / a**2
+2585				return float(c)
 
@@ -10165,19 +10160,19 @@

API Documentation

-
2592	def sample_D4x_correl(self, sample1, sample2 = None):
-2593		'''
-2594		Correlation between Δ4x errors of samples
-2595
-2596		Returns the error correlation between the average Δ4x values of two samples.
-2597		'''
-2598		if sample2 is None or sample2 == sample1:
-2599			return 1.
-2600		return (
-2601			self.sample_D4x_covar(sample1, sample2)
-2602			/ self.unknowns[sample1][f'SE_D{self._4x}']
-2603			/ self.unknowns[sample2][f'SE_D{self._4x}']
-2604			)
+            
2587	def sample_D4x_correl(self, sample1, sample2 = None):
+2588		'''
+2589		Correlation between Δ4x errors of samples
+2590
+2591		Returns the error correlation between the average Δ4x values of two samples.
+2592		'''
+2593		if sample2 is None or sample2 == sample1:
+2594			return 1.
+2595		return (
+2596			self.sample_D4x_covar(sample1, sample2)
+2597			/ self.unknowns[sample1][f'SE_D{self._4x}']
+2598			/ self.unknowns[sample2][f'SE_D{self._4x}']
+2599			)
 
@@ -10199,104 +10194,104 @@

API Documentation

-
2606	def plot_single_session(self,
-2607		session,
-2608		kw_plot_anchors = dict(ls='None', marker='x', mec=(.75, 0, 0), mew = .75, ms = 4),
-2609		kw_plot_unknowns = dict(ls='None', marker='x', mec=(0, 0, .75), mew = .75, ms = 4),
-2610		kw_plot_anchor_avg = dict(ls='-', marker='None', color=(.75, 0, 0), lw = .75),
-2611		kw_plot_unknown_avg = dict(ls='-', marker='None', color=(0, 0, .75), lw = .75),
-2612		kw_contour_error = dict(colors = [[0, 0, 0]], alpha = .5, linewidths = 0.75),
-2613		xylimits = 'free', # | 'constant'
-2614		x_label = None,
-2615		y_label = None,
-2616		error_contour_interval = 'auto',
-2617		fig = 'new',
-2618		):
-2619		'''
-2620		Generate plot for a single session
-2621		'''
-2622		if x_label is None:
-2623			x_label = f'δ$_{{{self._4x}}}$ (‰)'
-2624		if y_label is None:
-2625			y_label = f'Δ$_{{{self._4x}}}$ (‰)'
-2626
-2627		out = _SessionPlot()
-2628		anchors = [a for a in self.anchors if [r for r in self.sessions[session]['data'] if r['Sample'] == a]]
-2629		unknowns = [u for u in self.unknowns if [r for r in self.sessions[session]['data'] if r['Sample'] == u]]
-2630		
-2631		if fig == 'new':
-2632			out.fig = ppl.figure(figsize = (6,6))
-2633			ppl.subplots_adjust(.1,.1,.9,.9)
-2634
-2635		out.anchor_analyses, = ppl.plot(
-2636			[r[f'd{self._4x}'] for r in self.sessions[session]['data'] if r['Sample'] in self.anchors],
-2637			[r[f'D{self._4x}'] for r in self.sessions[session]['data'] if r['Sample'] in self.anchors],
-2638			**kw_plot_anchors)
-2639		out.unknown_analyses, = ppl.plot(
-2640			[r[f'd{self._4x}'] for r in self.sessions[session]['data'] if r['Sample'] in self.unknowns],
-2641			[r[f'D{self._4x}'] for r in self.sessions[session]['data'] if r['Sample'] in self.unknowns],
-2642			**kw_plot_unknowns)
-2643		out.anchor_avg = ppl.plot(
-2644			np.array([ np.array([
-2645				np.min([r[f'd{self._4x}'] for r in self.sessions[session]['data'] if r['Sample'] == sample]) - 1,
-2646				np.max([r[f'd{self._4x}'] for r in self.sessions[session]['data'] if r['Sample'] == sample]) + 1
-2647				]) for sample in anchors]).T,
-2648			np.array([ np.array([0, 0]) + self.Nominal_D4x[sample] for sample in anchors]).T,
-2649			**kw_plot_anchor_avg)
-2650		out.unknown_avg = ppl.plot(
-2651			np.array([ np.array([
-2652				np.min([r[f'd{self._4x}'] for r in self.sessions[session]['data'] if r['Sample'] == sample]) - 1,
-2653				np.max([r[f'd{self._4x}'] for r in self.sessions[session]['data'] if r['Sample'] == sample]) + 1
-2654				]) for sample in unknowns]).T,
-2655			np.array([ np.array([0, 0]) + self.unknowns[sample][f'D{self._4x}'] for sample in unknowns]).T,
-2656			**kw_plot_unknown_avg)
-2657		if xylimits == 'constant':
-2658			x = [r[f'd{self._4x}'] for r in self]
-2659			y = [r[f'D{self._4x}'] for r in self]
-2660			x1, x2, y1, y2 = np.min(x), np.max(x), np.min(y), np.max(y)
-2661			w, h = x2-x1, y2-y1
-2662			x1 -= w/20
-2663			x2 += w/20
-2664			y1 -= h/20
-2665			y2 += h/20
-2666			ppl.axis([x1, x2, y1, y2])
-2667		elif xylimits == 'free':
-2668			x1, x2, y1, y2 = ppl.axis()
-2669		else:
-2670			x1, x2, y1, y2 = ppl.axis(xylimits)
-2671				
-2672		if error_contour_interval != 'none':
-2673			xi, yi = np.linspace(x1, x2), np.linspace(y1, y2)
-2674			XI,YI = np.meshgrid(xi, yi)
-2675			SI = np.array([[self.standardization_error(session, x, y) for x in xi] for y in yi])
-2676			if error_contour_interval == 'auto':
-2677				rng = np.max(SI) - np.min(SI)
-2678				if rng <= 0.01:
-2679					cinterval = 0.001
-2680				elif rng <= 0.03:
-2681					cinterval = 0.004
-2682				elif rng <= 0.1:
-2683					cinterval = 0.01
-2684				elif rng <= 0.3:
-2685					cinterval = 0.03
-2686				elif rng <= 1.:
-2687					cinterval = 0.1
-2688				else:
-2689					cinterval = 0.5
-2690			else:
-2691				cinterval = error_contour_interval
-2692
-2693			cval = np.arange(np.ceil(SI.min() / .001) * .001, np.ceil(SI.max() / .001 + 1) * .001, cinterval)
-2694			out.contour = ppl.contour(XI, YI, SI, cval, **kw_contour_error)
-2695			out.clabel = ppl.clabel(out.contour)
-2696
-2697		ppl.xlabel(x_label)
-2698		ppl.ylabel(y_label)
-2699		ppl.title(session, weight = 'bold')
-2700		ppl.grid(alpha = .2)
-2701		out.ax = ppl.gca()		
-2702
-2703		return out
+            
2601	def plot_single_session(self,
+2602		session,
+2603		kw_plot_anchors = dict(ls='None', marker='x', mec=(.75, 0, 0), mew = .75, ms = 4),
+2604		kw_plot_unknowns = dict(ls='None', marker='x', mec=(0, 0, .75), mew = .75, ms = 4),
+2605		kw_plot_anchor_avg = dict(ls='-', marker='None', color=(.75, 0, 0), lw = .75),
+2606		kw_plot_unknown_avg = dict(ls='-', marker='None', color=(0, 0, .75), lw = .75),
+2607		kw_contour_error = dict(colors = [[0, 0, 0]], alpha = .5, linewidths = 0.75),
+2608		xylimits = 'free', # | 'constant'
+2609		x_label = None,
+2610		y_label = None,
+2611		error_contour_interval = 'auto',
+2612		fig = 'new',
+2613		):
+2614		'''
+2615		Generate plot for a single session
+2616		'''
+2617		if x_label is None:
+2618			x_label = f'δ$_{{{self._4x}}}$ (‰)'
+2619		if y_label is None:
+2620			y_label = f'Δ$_{{{self._4x}}}$ (‰)'
+2621
+2622		out = _SessionPlot()
+2623		anchors = [a for a in self.anchors if [r for r in self.sessions[session]['data'] if r['Sample'] == a]]
+2624		unknowns = [u for u in self.unknowns if [r for r in self.sessions[session]['data'] if r['Sample'] == u]]
+2625		
+2626		if fig == 'new':
+2627			out.fig = ppl.figure(figsize = (6,6))
+2628			ppl.subplots_adjust(.1,.1,.9,.9)
+2629
+2630		out.anchor_analyses, = ppl.plot(
+2631			[r[f'd{self._4x}'] for r in self.sessions[session]['data'] if r['Sample'] in self.anchors],
+2632			[r[f'D{self._4x}'] for r in self.sessions[session]['data'] if r['Sample'] in self.anchors],
+2633			**kw_plot_anchors)
+2634		out.unknown_analyses, = ppl.plot(
+2635			[r[f'd{self._4x}'] for r in self.sessions[session]['data'] if r['Sample'] in self.unknowns],
+2636			[r[f'D{self._4x}'] for r in self.sessions[session]['data'] if r['Sample'] in self.unknowns],
+2637			**kw_plot_unknowns)
+2638		out.anchor_avg = ppl.plot(
+2639			np.array([ np.array([
+2640				np.min([r[f'd{self._4x}'] for r in self.sessions[session]['data'] if r['Sample'] == sample]) - 1,
+2641				np.max([r[f'd{self._4x}'] for r in self.sessions[session]['data'] if r['Sample'] == sample]) + 1
+2642				]) for sample in anchors]).T,
+2643			np.array([ np.array([0, 0]) + self.Nominal_D4x[sample] for sample in anchors]).T,
+2644			**kw_plot_anchor_avg)
+2645		out.unknown_avg = ppl.plot(
+2646			np.array([ np.array([
+2647				np.min([r[f'd{self._4x}'] for r in self.sessions[session]['data'] if r['Sample'] == sample]) - 1,
+2648				np.max([r[f'd{self._4x}'] for r in self.sessions[session]['data'] if r['Sample'] == sample]) + 1
+2649				]) for sample in unknowns]).T,
+2650			np.array([ np.array([0, 0]) + self.unknowns[sample][f'D{self._4x}'] for sample in unknowns]).T,
+2651			**kw_plot_unknown_avg)
+2652		if xylimits == 'constant':
+2653			x = [r[f'd{self._4x}'] for r in self]
+2654			y = [r[f'D{self._4x}'] for r in self]
+2655			x1, x2, y1, y2 = np.min(x), np.max(x), np.min(y), np.max(y)
+2656			w, h = x2-x1, y2-y1
+2657			x1 -= w/20
+2658			x2 += w/20
+2659			y1 -= h/20
+2660			y2 += h/20
+2661			ppl.axis([x1, x2, y1, y2])
+2662		elif xylimits == 'free':
+2663			x1, x2, y1, y2 = ppl.axis()
+2664		else:
+2665			x1, x2, y1, y2 = ppl.axis(xylimits)
+2666				
+2667		if error_contour_interval != 'none':
+2668			xi, yi = np.linspace(x1, x2), np.linspace(y1, y2)
+2669			XI,YI = np.meshgrid(xi, yi)
+2670			SI = np.array([[self.standardization_error(session, x, y) for x in xi] for y in yi])
+2671			if error_contour_interval == 'auto':
+2672				rng = np.max(SI) - np.min(SI)
+2673				if rng <= 0.01:
+2674					cinterval = 0.001
+2675				elif rng <= 0.03:
+2676					cinterval = 0.004
+2677				elif rng <= 0.1:
+2678					cinterval = 0.01
+2679				elif rng <= 0.3:
+2680					cinterval = 0.03
+2681				elif rng <= 1.:
+2682					cinterval = 0.1
+2683				else:
+2684					cinterval = 0.5
+2685			else:
+2686				cinterval = error_contour_interval
+2687
+2688			cval = np.arange(np.ceil(SI.min() / .001) * .001, np.ceil(SI.max() / .001 + 1) * .001, cinterval)
+2689			out.contour = ppl.contour(XI, YI, SI, cval, **kw_contour_error)
+2690			out.clabel = ppl.clabel(out.contour)
+2691
+2692		ppl.xlabel(x_label)
+2693		ppl.ylabel(y_label)
+2694		ppl.title(session, weight = 'bold')
+2695		ppl.grid(alpha = .2)
+2696		out.ax = ppl.gca()		
+2697
+2698		return out
 
@@ -10316,193 +10311,193 @@

API Documentation

-
2705	def plot_residuals(
-2706		self,
-2707		hist = False,
-2708		binwidth = 2/3,
-2709		dir = 'output',
-2710		filename = None,
-2711		highlight = [],
-2712		colors = None,
-2713		figsize = None,
-2714		):
-2715		'''
-2716		Plot residuals of each analysis as a function of time (actually, as a function of
-2717		the order of analyses in the `D4xdata` object)
-2718
-2719		+ `hist`: whether to add a histogram of residuals
-2720		+ `histbins`: specify bin edges for the histogram
-2721		+ `dir`: the directory in which to save the plot
-2722		+ `highlight`: a list of samples to highlight
-2723		+ `colors`: a dict of `{<sample>: <color>}` for all samples
-2724		+ `figsize`: (width, height) of figure
-2725		'''
-2726		# Layout
-2727		fig = ppl.figure(figsize = (8,4) if figsize is None else figsize)
-2728		if hist:
-2729			ppl.subplots_adjust(left = .08, bottom = .05, right = .98, top = .8, wspace = -0.72)
-2730			ax1, ax2 = ppl.subplot(121), ppl.subplot(1,15,15)
-2731		else:
-2732			ppl.subplots_adjust(.08,.05,.78,.8)
-2733			ax1 = ppl.subplot(111)
-2734		
-2735		# Colors
-2736		N = len(self.anchors)
-2737		if colors is None:
-2738			if len(highlight) > 0:
-2739				Nh = len(highlight)
-2740				if Nh == 1:
-2741					colors = {highlight[0]: (0,0,0)}
-2742				elif Nh == 3:
-2743					colors = {a: c for a,c in zip(highlight, [(0,0,1), (1,0,0), (0,2/3,0)])}
-2744				elif Nh == 4:
-2745					colors = {a: c for a,c in zip(highlight, [(0,0,1), (1,0,0), (0,2/3,0), (.75,0,.75)])}
-2746				else:
-2747					colors = {a: hls_to_rgb(k/Nh, .4, 1) for k,a in enumerate(highlight)}
-2748			else:
-2749				if N == 3:
-2750					colors = {a: c for a,c in zip(self.anchors, [(0,0,1), (1,0,0), (0,2/3,0)])}
-2751				elif N == 4:
-2752					colors = {a: c for a,c in zip(self.anchors, [(0,0,1), (1,0,0), (0,2/3,0), (.75,0,.75)])}
-2753				else:
-2754					colors = {a: hls_to_rgb(k/N, .4, 1) for k,a in enumerate(self.anchors)}
-2755
-2756		ppl.sca(ax1)
-2757		
-2758		ppl.axhline(0, color = 'k', alpha = .25, lw = 0.75)
-2759
-2760		session = self[0]['Session']
-2761		x1 = 0
-2762# 		ymax = np.max([1e3 * (r['D47'] - self.samples[r['Sample']]['D47']) for r in self])
-2763		x_sessions = {}
-2764		one_or_more_singlets = False
-2765		one_or_more_multiplets = False
-2766		multiplets = set()
-2767		for k,r in enumerate(self):
-2768			if r['Session'] != session:
-2769				x2 = k-1
-2770				x_sessions[session] = (x1+x2)/2
-2771				ppl.axvline(k - 0.5, color = 'k', lw = .5)
-2772				session = r['Session']
-2773				x1 = k
-2774			singlet = len(self.samples[r['Sample']]['data']) == 1
-2775			if not singlet:
-2776				multiplets.add(r['Sample'])
-2777			if r['Sample'] in self.unknowns:
-2778				if singlet:
-2779					one_or_more_singlets = True
-2780				else:
-2781					one_or_more_multiplets = True
-2782			kw = dict(
-2783				marker = 'x' if singlet else '+',
-2784				ms = 4 if singlet else 5,
-2785				ls = 'None',
-2786				mec = colors[r['Sample']] if r['Sample'] in colors else (0,0,0),
-2787				mew = 1,
-2788				alpha = 0.2 if singlet else 1,
-2789				)
-2790			if highlight and r['Sample'] not in highlight:
-2791				kw['alpha'] = 0.2
-2792			ppl.plot(k, 1e3 * (r['D47'] - self.samples[r['Sample']]['D47']), **kw)
-2793		x2 = k
-2794		x_sessions[session] = (x1+x2)/2
-2795
-2796		ppl.axhspan(-self.repeatability['r_D47']*1000, self.repeatability['r_D47']*1000, color = 'k', alpha = .05, lw = 1)
-2797		ppl.axhspan(-self.repeatability['r_D47']*1000*self.t95, self.repeatability['r_D47']*1000*self.t95, color = 'k', alpha = .05, lw = 1)
-2798		if not hist:
-2799			ppl.text(len(self), self.repeatability['r_D47']*1000, f"   SD = {self.repeatability['r_D47']*1000:.1f} ppm", size = 9, alpha = 1, va = 'center')
-2800			ppl.text(len(self), self.repeatability['r_D47']*1000*self.t95, f"   95% CL = ± {self.repeatability['r_D47']*1000*self.t95:.1f} ppm", size = 9, alpha = 1, va = 'center')
-2801
-2802		xmin, xmax, ymin, ymax = ppl.axis()
-2803		for s in x_sessions:
-2804			ppl.text(
-2805				x_sessions[s],
-2806				ymax +1,
-2807				s,
-2808				va = 'bottom',
-2809				**(
-2810					dict(ha = 'center')
-2811					if len(self.sessions[s]['data']) > (0.15 * len(self))
-2812					else dict(ha = 'left', rotation = 45)
-2813					)
-2814				)
-2815
-2816		if hist:
-2817			ppl.sca(ax2)
-2818
-2819		for s in colors:
-2820			kw['marker'] = '+'
-2821			kw['ms'] = 5
-2822			kw['mec'] = colors[s]
-2823			kw['label'] = s
-2824			kw['alpha'] = 1
-2825			ppl.plot([], [], **kw)
-2826
-2827		kw['mec'] = (0,0,0)
-2828
-2829		if one_or_more_singlets:
-2830			kw['marker'] = 'x'
-2831			kw['ms'] = 4
-2832			kw['alpha'] = .2
-2833			kw['label'] = 'other (N$\\,$=$\\,$1)' if one_or_more_multiplets else 'other'
-2834			ppl.plot([], [], **kw)
-2835
-2836		if one_or_more_multiplets:
-2837			kw['marker'] = '+'
-2838			kw['ms'] = 4
-2839			kw['alpha'] = 1
-2840			kw['label'] = 'other (N$\\,$>$\\,$1)' if one_or_more_singlets else 'other'
-2841			ppl.plot([], [], **kw)
-2842
-2843		if hist:
-2844			leg = ppl.legend(loc = 'upper right', bbox_to_anchor = (1, 1), bbox_transform=fig.transFigure, borderaxespad = 1.5, fontsize = 9)
-2845		else:
-2846			leg = ppl.legend(loc = 'lower right', bbox_to_anchor = (1, 0), bbox_transform=fig.transFigure, borderaxespad = 1.5)
-2847		leg.set_zorder(-1000)
-2848
-2849		ppl.sca(ax1)
-2850
-2851		ppl.ylabel('Δ$_{47}$ residuals (ppm)')
-2852		ppl.xticks([])
-2853		ppl.axis([-1, len(self), None, None])
-2854
-2855		if hist:
-2856			ppl.sca(ax2)
-2857			X = [1e3 * (r['D47'] - self.samples[r['Sample']]['D47']) for r in self if r['Sample'] in multiplets]
-2858			ppl.hist(
-2859				X,
-2860				orientation = 'horizontal',
-2861				histtype = 'stepfilled',
-2862				ec = [.4]*3,
-2863				fc = [.25]*3,
-2864				alpha = .25,
-2865				bins = np.linspace(-9e3*self.repeatability['r_D47'], 9e3*self.repeatability['r_D47'], int(18/binwidth+1)),
-2866				)
-2867			ppl.axis([None, None, ymin, ymax])
-2868			ppl.text(0, 0,
-2869				f"   SD = {self.repeatability['r_D47']*1000:.1f} ppm\n   95% CL = ± {self.repeatability['r_D47']*1000*self.t95:.1f} ppm",
-2870				size = 8,
-2871				alpha = 1,
-2872				va = 'center',
-2873				ha = 'left',
-2874				)
-2875
-2876			ppl.xticks([])
-2877			ppl.yticks([])
-2878# 			ax2.spines['left'].set_visible(False)
-2879			ax2.spines['right'].set_visible(False)
-2880			ax2.spines['top'].set_visible(False)
-2881			ax2.spines['bottom'].set_visible(False)
-2882
-2883
-2884		if not os.path.exists(dir):
-2885			os.makedirs(dir)
-2886		if filename is None:
-2887			return fig
-2888		elif filename == '':
-2889			filename = f'D{self._4x}_residuals.pdf'
-2890		ppl.savefig(f'{dir}/{filename}')
-2891		ppl.close(fig)
+            
2700	def plot_residuals(
+2701		self,
+2702		hist = False,
+2703		binwidth = 2/3,
+2704		dir = 'output',
+2705		filename = None,
+2706		highlight = [],
+2707		colors = None,
+2708		figsize = None,
+2709		):
+2710		'''
+2711		Plot residuals of each analysis as a function of time (actually, as a function of
+2712		the order of analyses in the `D4xdata` object)
+2713
+2714		+ `hist`: whether to add a histogram of residuals
+2715		+ `histbins`: specify bin edges for the histogram
+2716		+ `dir`: the directory in which to save the plot
+2717		+ `highlight`: a list of samples to highlight
+2718		+ `colors`: a dict of `{<sample>: <color>}` for all samples
+2719		+ `figsize`: (width, height) of figure
+2720		'''
+2721		# Layout
+2722		fig = ppl.figure(figsize = (8,4) if figsize is None else figsize)
+2723		if hist:
+2724			ppl.subplots_adjust(left = .08, bottom = .05, right = .98, top = .8, wspace = -0.72)
+2725			ax1, ax2 = ppl.subplot(121), ppl.subplot(1,15,15)
+2726		else:
+2727			ppl.subplots_adjust(.08,.05,.78,.8)
+2728			ax1 = ppl.subplot(111)
+2729		
+2730		# Colors
+2731		N = len(self.anchors)
+2732		if colors is None:
+2733			if len(highlight) > 0:
+2734				Nh = len(highlight)
+2735				if Nh == 1:
+2736					colors = {highlight[0]: (0,0,0)}
+2737				elif Nh == 3:
+2738					colors = {a: c for a,c in zip(highlight, [(0,0,1), (1,0,0), (0,2/3,0)])}
+2739				elif Nh == 4:
+2740					colors = {a: c for a,c in zip(highlight, [(0,0,1), (1,0,0), (0,2/3,0), (.75,0,.75)])}
+2741				else:
+2742					colors = {a: hls_to_rgb(k/Nh, .4, 1) for k,a in enumerate(highlight)}
+2743			else:
+2744				if N == 3:
+2745					colors = {a: c for a,c in zip(self.anchors, [(0,0,1), (1,0,0), (0,2/3,0)])}
+2746				elif N == 4:
+2747					colors = {a: c for a,c in zip(self.anchors, [(0,0,1), (1,0,0), (0,2/3,0), (.75,0,.75)])}
+2748				else:
+2749					colors = {a: hls_to_rgb(k/N, .4, 1) for k,a in enumerate(self.anchors)}
+2750
+2751		ppl.sca(ax1)
+2752		
+2753		ppl.axhline(0, color = 'k', alpha = .25, lw = 0.75)
+2754
+2755		session = self[0]['Session']
+2756		x1 = 0
+2757# 		ymax = np.max([1e3 * (r['D47'] - self.samples[r['Sample']]['D47']) for r in self])
+2758		x_sessions = {}
+2759		one_or_more_singlets = False
+2760		one_or_more_multiplets = False
+2761		multiplets = set()
+2762		for k,r in enumerate(self):
+2763			if r['Session'] != session:
+2764				x2 = k-1
+2765				x_sessions[session] = (x1+x2)/2
+2766				ppl.axvline(k - 0.5, color = 'k', lw = .5)
+2767				session = r['Session']
+2768				x1 = k
+2769			singlet = len(self.samples[r['Sample']]['data']) == 1
+2770			if not singlet:
+2771				multiplets.add(r['Sample'])
+2772			if r['Sample'] in self.unknowns:
+2773				if singlet:
+2774					one_or_more_singlets = True
+2775				else:
+2776					one_or_more_multiplets = True
+2777			kw = dict(
+2778				marker = 'x' if singlet else '+',
+2779				ms = 4 if singlet else 5,
+2780				ls = 'None',
+2781				mec = colors[r['Sample']] if r['Sample'] in colors else (0,0,0),
+2782				mew = 1,
+2783				alpha = 0.2 if singlet else 1,
+2784				)
+2785			if highlight and r['Sample'] not in highlight:
+2786				kw['alpha'] = 0.2
+2787			ppl.plot(k, 1e3 * (r['D47'] - self.samples[r['Sample']]['D47']), **kw)
+2788		x2 = k
+2789		x_sessions[session] = (x1+x2)/2
+2790
+2791		ppl.axhspan(-self.repeatability['r_D47']*1000, self.repeatability['r_D47']*1000, color = 'k', alpha = .05, lw = 1)
+2792		ppl.axhspan(-self.repeatability['r_D47']*1000*self.t95, self.repeatability['r_D47']*1000*self.t95, color = 'k', alpha = .05, lw = 1)
+2793		if not hist:
+2794			ppl.text(len(self), self.repeatability['r_D47']*1000, f"   SD = {self.repeatability['r_D47']*1000:.1f} ppm", size = 9, alpha = 1, va = 'center')
+2795			ppl.text(len(self), self.repeatability['r_D47']*1000*self.t95, f"   95% CL = ± {self.repeatability['r_D47']*1000*self.t95:.1f} ppm", size = 9, alpha = 1, va = 'center')
+2796
+2797		xmin, xmax, ymin, ymax = ppl.axis()
+2798		for s in x_sessions:
+2799			ppl.text(
+2800				x_sessions[s],
+2801				ymax +1,
+2802				s,
+2803				va = 'bottom',
+2804				**(
+2805					dict(ha = 'center')
+2806					if len(self.sessions[s]['data']) > (0.15 * len(self))
+2807					else dict(ha = 'left', rotation = 45)
+2808					)
+2809				)
+2810
+2811		if hist:
+2812			ppl.sca(ax2)
+2813
+2814		for s in colors:
+2815			kw['marker'] = '+'
+2816			kw['ms'] = 5
+2817			kw['mec'] = colors[s]
+2818			kw['label'] = s
+2819			kw['alpha'] = 1
+2820			ppl.plot([], [], **kw)
+2821
+2822		kw['mec'] = (0,0,0)
+2823
+2824		if one_or_more_singlets:
+2825			kw['marker'] = 'x'
+2826			kw['ms'] = 4
+2827			kw['alpha'] = .2
+2828			kw['label'] = 'other (N$\\,$=$\\,$1)' if one_or_more_multiplets else 'other'
+2829			ppl.plot([], [], **kw)
+2830
+2831		if one_or_more_multiplets:
+2832			kw['marker'] = '+'
+2833			kw['ms'] = 4
+2834			kw['alpha'] = 1
+2835			kw['label'] = 'other (N$\\,$>$\\,$1)' if one_or_more_singlets else 'other'
+2836			ppl.plot([], [], **kw)
+2837
+2838		if hist:
+2839			leg = ppl.legend(loc = 'upper right', bbox_to_anchor = (1, 1), bbox_transform=fig.transFigure, borderaxespad = 1.5, fontsize = 9)
+2840		else:
+2841			leg = ppl.legend(loc = 'lower right', bbox_to_anchor = (1, 0), bbox_transform=fig.transFigure, borderaxespad = 1.5)
+2842		leg.set_zorder(-1000)
+2843
+2844		ppl.sca(ax1)
+2845
+2846		ppl.ylabel('Δ$_{47}$ residuals (ppm)')
+2847		ppl.xticks([])
+2848		ppl.axis([-1, len(self), None, None])
+2849
+2850		if hist:
+2851			ppl.sca(ax2)
+2852			X = [1e3 * (r['D47'] - self.samples[r['Sample']]['D47']) for r in self if r['Sample'] in multiplets]
+2853			ppl.hist(
+2854				X,
+2855				orientation = 'horizontal',
+2856				histtype = 'stepfilled',
+2857				ec = [.4]*3,
+2858				fc = [.25]*3,
+2859				alpha = .25,
+2860				bins = np.linspace(-9e3*self.repeatability['r_D47'], 9e3*self.repeatability['r_D47'], int(18/binwidth+1)),
+2861				)
+2862			ppl.axis([None, None, ymin, ymax])
+2863			ppl.text(0, 0,
+2864				f"   SD = {self.repeatability['r_D47']*1000:.1f} ppm\n   95% CL = ± {self.repeatability['r_D47']*1000*self.t95:.1f} ppm",
+2865				size = 8,
+2866				alpha = 1,
+2867				va = 'center',
+2868				ha = 'left',
+2869				)
+2870
+2871			ppl.xticks([])
+2872			ppl.yticks([])
+2873# 			ax2.spines['left'].set_visible(False)
+2874			ax2.spines['right'].set_visible(False)
+2875			ax2.spines['top'].set_visible(False)
+2876			ax2.spines['bottom'].set_visible(False)
+2877
+2878
+2879		if not os.path.exists(dir):
+2880			os.makedirs(dir)
+2881		if filename is None:
+2882			return fig
+2883		elif filename == '':
+2884			filename = f'D{self._4x}_residuals.pdf'
+2885		ppl.savefig(f'{dir}/{filename}')
+2886		ppl.close(fig)
 
@@ -10532,11 +10527,11 @@

API Documentation

-
2894	def simulate(self, *args, **kwargs):
-2895		'''
-2896		Legacy function with warning message pointing to `virtual_data()`
-2897		'''
-2898		raise DeprecationWarning('D4xdata.simulate is deprecated and has been replaced by virtual_data()')
+            
2889	def simulate(self, *args, **kwargs):
+2890		'''
+2891		Legacy function with warning message pointing to `virtual_data()`
+2892		'''
+2893		raise DeprecationWarning('D4xdata.simulate is deprecated and has been replaced by virtual_data()')
 
@@ -10556,81 +10551,81 @@

API Documentation

-
2900	def plot_distribution_of_analyses(
-2901		self,
-2902		dir = 'output',
-2903		filename = None,
-2904		vs_time = False,
-2905		figsize = (6,4),
-2906		subplots_adjust = (0.02, 0.13, 0.85, 0.8),
-2907		output = None,
-2908		):
-2909		'''
-2910		Plot temporal distribution of all analyses in the data set.
-2911		
-2912		**Parameters**
-2913
-2914		+ `vs_time`: if `True`, plot as a function of `TimeTag` rather than sequentially.
-2915		'''
-2916
-2917		asamples = [s for s in self.anchors]
-2918		usamples = [s for s in self.unknowns]
-2919		if output is None or output == 'fig':
-2920			fig = ppl.figure(figsize = figsize)
-2921			ppl.subplots_adjust(*subplots_adjust)
-2922		Xmin = min([r['TimeTag'] if vs_time else j for j,r in enumerate(self)])
-2923		Xmax = max([r['TimeTag'] if vs_time else j for j,r in enumerate(self)])
-2924		Xmax += (Xmax-Xmin)/40
-2925		Xmin -= (Xmax-Xmin)/41
-2926		for k, s in enumerate(asamples + usamples):
-2927			if vs_time:
-2928				X = [r['TimeTag'] for r in self if r['Sample'] == s]
-2929			else:
-2930				X = [x for x,r in enumerate(self) if r['Sample'] == s]
-2931			Y = [-k for x in X]
-2932			ppl.plot(X, Y, 'o', mec = None, mew = 0, mfc = 'b' if s in usamples else 'r', ms = 3, alpha = .75)
-2933			ppl.axhline(-k, color = 'b' if s in usamples else 'r', lw = .5, alpha = .25)
-2934			ppl.text(Xmax, -k, f'   {s}', va = 'center', ha = 'left', size = 7, color = 'b' if s in usamples else 'r')
-2935		ppl.axis([Xmin, Xmax, -k-1, 1])
-2936		ppl.xlabel('\ntime')
-2937		ppl.gca().annotate('',
-2938			xy = (0.6, -0.02),
-2939			xycoords = 'axes fraction',
-2940			xytext = (.4, -0.02), 
-2941            arrowprops = dict(arrowstyle = "->", color = 'k'),
-2942            )
-2943			
-2944
-2945		x2 = -1
-2946		for session in self.sessions:
-2947			x1 = min([r['TimeTag'] if vs_time else j for j,r in enumerate(self) if r['Session'] == session])
-2948			if vs_time:
-2949				ppl.axvline(x1, color = 'k', lw = .75)
-2950			if x2 > -1:
-2951				if not vs_time:
-2952					ppl.axvline((x1+x2)/2, color = 'k', lw = .75, alpha = .5)
-2953			x2 = max([r['TimeTag'] if vs_time else j for j,r in enumerate(self) if r['Session'] == session])
-2954# 			from xlrd import xldate_as_datetime
-2955# 			print(session, xldate_as_datetime(x1, 0), xldate_as_datetime(x2, 0))
-2956			if vs_time:
-2957				ppl.axvline(x2, color = 'k', lw = .75)
-2958				ppl.axvspan(x1,x2,color = 'k', zorder = -100, alpha = .15)
-2959			ppl.text((x1+x2)/2, 1, f' {session}', ha = 'left', va = 'bottom', rotation = 45, size = 8)
-2960
-2961		ppl.xticks([])
-2962		ppl.yticks([])
-2963
-2964		if output is None:
-2965			if not os.path.exists(dir):
-2966				os.makedirs(dir)
-2967			if filename == None:
-2968				filename = f'D{self._4x}_distribution_of_analyses.pdf'
-2969			ppl.savefig(f'{dir}/{filename}')
-2970			ppl.close(fig)
-2971		elif output == 'ax':
-2972			return ppl.gca()
-2973		elif output == 'fig':
-2974			return fig
+            
2895	def plot_distribution_of_analyses(
+2896		self,
+2897		dir = 'output',
+2898		filename = None,
+2899		vs_time = False,
+2900		figsize = (6,4),
+2901		subplots_adjust = (0.02, 0.13, 0.85, 0.8),
+2902		output = None,
+2903		):
+2904		'''
+2905		Plot temporal distribution of all analyses in the data set.
+2906		
+2907		**Parameters**
+2908
+2909		+ `vs_time`: if `True`, plot as a function of `TimeTag` rather than sequentially.
+2910		'''
+2911
+2912		asamples = [s for s in self.anchors]
+2913		usamples = [s for s in self.unknowns]
+2914		if output is None or output == 'fig':
+2915			fig = ppl.figure(figsize = figsize)
+2916			ppl.subplots_adjust(*subplots_adjust)
+2917		Xmin = min([r['TimeTag'] if vs_time else j for j,r in enumerate(self)])
+2918		Xmax = max([r['TimeTag'] if vs_time else j for j,r in enumerate(self)])
+2919		Xmax += (Xmax-Xmin)/40
+2920		Xmin -= (Xmax-Xmin)/41
+2921		for k, s in enumerate(asamples + usamples):
+2922			if vs_time:
+2923				X = [r['TimeTag'] for r in self if r['Sample'] == s]
+2924			else:
+2925				X = [x for x,r in enumerate(self) if r['Sample'] == s]
+2926			Y = [-k for x in X]
+2927			ppl.plot(X, Y, 'o', mec = None, mew = 0, mfc = 'b' if s in usamples else 'r', ms = 3, alpha = .75)
+2928			ppl.axhline(-k, color = 'b' if s in usamples else 'r', lw = .5, alpha = .25)
+2929			ppl.text(Xmax, -k, f'   {s}', va = 'center', ha = 'left', size = 7, color = 'b' if s in usamples else 'r')
+2930		ppl.axis([Xmin, Xmax, -k-1, 1])
+2931		ppl.xlabel('\ntime')
+2932		ppl.gca().annotate('',
+2933			xy = (0.6, -0.02),
+2934			xycoords = 'axes fraction',
+2935			xytext = (.4, -0.02), 
+2936            arrowprops = dict(arrowstyle = "->", color = 'k'),
+2937            )
+2938			
+2939
+2940		x2 = -1
+2941		for session in self.sessions:
+2942			x1 = min([r['TimeTag'] if vs_time else j for j,r in enumerate(self) if r['Session'] == session])
+2943			if vs_time:
+2944				ppl.axvline(x1, color = 'k', lw = .75)
+2945			if x2 > -1:
+2946				if not vs_time:
+2947					ppl.axvline((x1+x2)/2, color = 'k', lw = .75, alpha = .5)
+2948			x2 = max([r['TimeTag'] if vs_time else j for j,r in enumerate(self) if r['Session'] == session])
+2949# 			from xlrd import xldate_as_datetime
+2950# 			print(session, xldate_as_datetime(x1, 0), xldate_as_datetime(x2, 0))
+2951			if vs_time:
+2952				ppl.axvline(x2, color = 'k', lw = .75)
+2953				ppl.axvspan(x1,x2,color = 'k', zorder = -100, alpha = .15)
+2954			ppl.text((x1+x2)/2, 1, f' {session}', ha = 'left', va = 'bottom', rotation = 45, size = 8)
+2955
+2956		ppl.xticks([])
+2957		ppl.yticks([])
+2958
+2959		if output is None:
+2960			if not os.path.exists(dir):
+2961				os.makedirs(dir)
+2962			if filename == None:
+2963				filename = f'D{self._4x}_distribution_of_analyses.pdf'
+2964			ppl.savefig(f'{dir}/{filename}')
+2965			ppl.close(fig)
+2966		elif output == 'ax':
+2967			return ppl.gca()
+2968		elif output == 'fig':
+2969			return fig
 
@@ -10676,94 +10671,94 @@
Inherited Members
-
2977class D47data(D4xdata):
-2978	'''
-2979	Store and process data for a large set of Δ47 analyses,
-2980	usually comprising more than one analytical session.
-2981	'''
-2982
-2983	Nominal_D4x = {
-2984		'ETH-1':   0.2052,
-2985		'ETH-2':   0.2085,
-2986		'ETH-3':   0.6132,
-2987		'ETH-4':   0.4511,
-2988		'IAEA-C1': 0.3018,
-2989		'IAEA-C2': 0.6409,
-2990		'MERCK':   0.5135,
-2991		} # I-CDES (Bernasconi et al., 2021)
-2992	'''
-2993	Nominal Δ47 values assigned to the Δ47 anchor samples, used by
-2994	`D47data.standardize()` to normalize unknown samples to an absolute Δ47
-2995	reference frame.
-2996
-2997	By default equal to (after [Bernasconi et al. (2021)](https://doi.org/10.1029/2020GC009588)):
-2998	```py
-2999	{
-3000		'ETH-1'   : 0.2052,
-3001		'ETH-2'   : 0.2085,
-3002		'ETH-3'   : 0.6132,
-3003		'ETH-4'   : 0.4511,
-3004		'IAEA-C1' : 0.3018,
-3005		'IAEA-C2' : 0.6409,
-3006		'MERCK'   : 0.5135,
-3007	}
-3008	```
-3009	'''
-3010
+            
2972class D47data(D4xdata):
+2973	'''
+2974	Store and process data for a large set of Δ47 analyses,
+2975	usually comprising more than one analytical session.
+2976	'''
+2977
+2978	Nominal_D4x = {
+2979		'ETH-1':   0.2052,
+2980		'ETH-2':   0.2085,
+2981		'ETH-3':   0.6132,
+2982		'ETH-4':   0.4511,
+2983		'IAEA-C1': 0.3018,
+2984		'IAEA-C2': 0.6409,
+2985		'MERCK':   0.5135,
+2986		} # I-CDES (Bernasconi et al., 2021)
+2987	'''
+2988	Nominal Δ47 values assigned to the Δ47 anchor samples, used by
+2989	`D47data.standardize()` to normalize unknown samples to an absolute Δ47
+2990	reference frame.
+2991
+2992	By default equal to (after [Bernasconi et al. (2021)](https://doi.org/10.1029/2020GC009588)):
+2993	```py
+2994	{
+2995		'ETH-1'   : 0.2052,
+2996		'ETH-2'   : 0.2085,
+2997		'ETH-3'   : 0.6132,
+2998		'ETH-4'   : 0.4511,
+2999		'IAEA-C1' : 0.3018,
+3000		'IAEA-C2' : 0.6409,
+3001		'MERCK'   : 0.5135,
+3002	}
+3003	```
+3004	'''
+3005
+3006
+3007	@property
+3008	def Nominal_D47(self):
+3009		return self.Nominal_D4x
+3010	
 3011
-3012	@property
-3013	def Nominal_D47(self):
-3014		return self.Nominal_D4x
-3015	
+3012	@Nominal_D47.setter
+3013	def Nominal_D47(self, new):
+3014		self.Nominal_D4x = dict(**new)
+3015		self.refresh()
 3016
-3017	@Nominal_D47.setter
-3018	def Nominal_D47(self, new):
-3019		self.Nominal_D4x = dict(**new)
-3020		self.refresh()
-3021
-3022
-3023	def __init__(self, l = [], **kwargs):
-3024		'''
-3025		**Parameters:** same as `D4xdata.__init__()`
-3026		'''
-3027		D4xdata.__init__(self, l = l, mass = '47', **kwargs)
-3028
+3017
+3018	def __init__(self, l = [], **kwargs):
+3019		'''
+3020		**Parameters:** same as `D4xdata.__init__()`
+3021		'''
+3022		D4xdata.__init__(self, l = l, mass = '47', **kwargs)
+3023
+3024
+3025	def D47fromTeq(self, fCo2eqD47 = 'petersen', priority = 'new'):
+3026		'''
+3027		Find all samples for which `Teq` is specified, compute equilibrium Δ47
+3028		value for that temperature, and add treat these samples as additional anchors.
 3029
-3030	def D47fromTeq(self, fCo2eqD47 = 'petersen', priority = 'new'):
-3031		'''
-3032		Find all samples for which `Teq` is specified, compute equilibrium Δ47
-3033		value for that temperature, and add treat these samples as additional anchors.
-3034
-3035		**Parameters**
-3036
-3037		+ `fCo2eqD47`: Which CO2 equilibrium law to use
-3038		(`petersen`: [Petersen et al. (2019)](https://doi.org/10.1029/2018GC008127);
-3039		`wang`: [Wang et al. (2019)](https://doi.org/10.1016/j.gca.2004.05.039)).
-3040		+ `priority`: if `replace`: forget old anchors and only use the new ones;
-3041		if `new`: keep pre-existing anchors but update them in case of conflict
-3042		between old and new Δ47 values;
-3043		if `old`: keep pre-existing anchors but preserve their original Δ47
-3044		values in case of conflict.
-3045		'''
-3046		f = {
-3047			'petersen': fCO2eqD47_Petersen,
-3048			'wang': fCO2eqD47_Wang,
-3049			}[fCo2eqD47]
-3050		foo = {}
-3051		for r in self:
-3052			if 'Teq' in r:
-3053				if r['Sample'] in foo:
-3054					assert foo[r['Sample']] == f(r['Teq']), f'Different values of `Teq` provided for sample `{r["Sample"]}`.'
-3055				else:
-3056					foo[r['Sample']] = f(r['Teq'])
-3057			else:
-3058					assert r['Sample'] not in foo, f'`Teq` is inconsistently specified for sample `{r["Sample"]}`.'
-3059
-3060		if priority == 'replace':
-3061			self.Nominal_D47 = {}
-3062		for s in foo:
-3063			if priority != 'old' or s not in self.Nominal_D47:
-3064				self.Nominal_D47[s] = foo[s]
+3030		**Parameters**
+3031
+3032		+ `fCo2eqD47`: Which CO2 equilibrium law to use
+3033		(`petersen`: [Petersen et al. (2019)](https://doi.org/10.1029/2018GC008127);
+3034		`wang`: [Wang et al. (2019)](https://doi.org/10.1016/j.gca.2004.05.039)).
+3035		+ `priority`: if `replace`: forget old anchors and only use the new ones;
+3036		if `new`: keep pre-existing anchors but update them in case of conflict
+3037		between old and new Δ47 values;
+3038		if `old`: keep pre-existing anchors but preserve their original Δ47
+3039		values in case of conflict.
+3040		'''
+3041		f = {
+3042			'petersen': fCO2eqD47_Petersen,
+3043			'wang': fCO2eqD47_Wang,
+3044			}[fCo2eqD47]
+3045		foo = {}
+3046		for r in self:
+3047			if 'Teq' in r:
+3048				if r['Sample'] in foo:
+3049					assert foo[r['Sample']] == f(r['Teq']), f'Different values of `Teq` provided for sample `{r["Sample"]}`.'
+3050				else:
+3051					foo[r['Sample']] = f(r['Teq'])
+3052			else:
+3053					assert r['Sample'] not in foo, f'`Teq` is inconsistently specified for sample `{r["Sample"]}`.'
+3054
+3055		if priority == 'replace':
+3056			self.Nominal_D47 = {}
+3057		for s in foo:
+3058			if priority != 'old' or s not in self.Nominal_D47:
+3059				self.Nominal_D47[s] = foo[s]
 
@@ -10782,11 +10777,11 @@
Inherited Members
-
3023	def __init__(self, l = [], **kwargs):
-3024		'''
-3025		**Parameters:** same as `D4xdata.__init__()`
-3026		'''
-3027		D4xdata.__init__(self, l = l, mass = '47', **kwargs)
+            
3018	def __init__(self, l = [], **kwargs):
+3019		'''
+3020		**Parameters:** same as `D4xdata.__init__()`
+3021		'''
+3022		D4xdata.__init__(self, l = l, mass = '47', **kwargs)
 
@@ -10838,41 +10833,41 @@
Inherited Members
-
3030	def D47fromTeq(self, fCo2eqD47 = 'petersen', priority = 'new'):
-3031		'''
-3032		Find all samples for which `Teq` is specified, compute equilibrium Δ47
-3033		value for that temperature, and add treat these samples as additional anchors.
-3034
-3035		**Parameters**
-3036
-3037		+ `fCo2eqD47`: Which CO2 equilibrium law to use
-3038		(`petersen`: [Petersen et al. (2019)](https://doi.org/10.1029/2018GC008127);
-3039		`wang`: [Wang et al. (2019)](https://doi.org/10.1016/j.gca.2004.05.039)).
-3040		+ `priority`: if `replace`: forget old anchors and only use the new ones;
-3041		if `new`: keep pre-existing anchors but update them in case of conflict
-3042		between old and new Δ47 values;
-3043		if `old`: keep pre-existing anchors but preserve their original Δ47
-3044		values in case of conflict.
-3045		'''
-3046		f = {
-3047			'petersen': fCO2eqD47_Petersen,
-3048			'wang': fCO2eqD47_Wang,
-3049			}[fCo2eqD47]
-3050		foo = {}
-3051		for r in self:
-3052			if 'Teq' in r:
-3053				if r['Sample'] in foo:
-3054					assert foo[r['Sample']] == f(r['Teq']), f'Different values of `Teq` provided for sample `{r["Sample"]}`.'
-3055				else:
-3056					foo[r['Sample']] = f(r['Teq'])
-3057			else:
-3058					assert r['Sample'] not in foo, f'`Teq` is inconsistently specified for sample `{r["Sample"]}`.'
-3059
-3060		if priority == 'replace':
-3061			self.Nominal_D47 = {}
-3062		for s in foo:
-3063			if priority != 'old' or s not in self.Nominal_D47:
-3064				self.Nominal_D47[s] = foo[s]
+            
3025	def D47fromTeq(self, fCo2eqD47 = 'petersen', priority = 'new'):
+3026		'''
+3027		Find all samples for which `Teq` is specified, compute equilibrium Δ47
+3028		value for that temperature, and add treat these samples as additional anchors.
+3029
+3030		**Parameters**
+3031
+3032		+ `fCo2eqD47`: Which CO2 equilibrium law to use
+3033		(`petersen`: [Petersen et al. (2019)](https://doi.org/10.1029/2018GC008127);
+3034		`wang`: [Wang et al. (2019)](https://doi.org/10.1016/j.gca.2004.05.039)).
+3035		+ `priority`: if `replace`: forget old anchors and only use the new ones;
+3036		if `new`: keep pre-existing anchors but update them in case of conflict
+3037		between old and new Δ47 values;
+3038		if `old`: keep pre-existing anchors but preserve their original Δ47
+3039		values in case of conflict.
+3040		'''
+3041		f = {
+3042			'petersen': fCO2eqD47_Petersen,
+3043			'wang': fCO2eqD47_Wang,
+3044			}[fCo2eqD47]
+3045		foo = {}
+3046		for r in self:
+3047			if 'Teq' in r:
+3048				if r['Sample'] in foo:
+3049					assert foo[r['Sample']] == f(r['Teq']), f'Different values of `Teq` provided for sample `{r["Sample"]}`.'
+3050				else:
+3051					foo[r['Sample']] = f(r['Teq'])
+3052			else:
+3053					assert r['Sample'] not in foo, f'`Teq` is inconsistently specified for sample `{r["Sample"]}`.'
+3054
+3055		if priority == 'replace':
+3056			self.Nominal_D47 = {}
+3057		for s in foo:
+3058			if priority != 'old' or s not in self.Nominal_D47:
+3059				self.Nominal_D47[s] = foo[s]
 
@@ -10984,55 +10979,55 @@
Inherited Members
-
3069class D48data(D4xdata):
-3070	'''
-3071	Store and process data for a large set of Δ48 analyses,
-3072	usually comprising more than one analytical session.
-3073	'''
-3074
-3075	Nominal_D4x = {
-3076		'ETH-1':  0.138,
-3077		'ETH-2':  0.138,
-3078		'ETH-3':  0.270,
-3079		'ETH-4':  0.223,
-3080		'GU-1':  -0.419,
-3081		} # (Fiebig et al., 2019, 2021)
-3082	'''
-3083	Nominal Δ48 values assigned to the Δ48 anchor samples, used by
-3084	`D48data.standardize()` to normalize unknown samples to an absolute Δ48
-3085	reference frame.
-3086
-3087	By default equal to (after [Fiebig et al. (2019)](https://doi.org/10.1016/j.chemgeo.2019.05.019),
-3088	Fiebig et al. (in press)):
-3089
-3090	```py
-3091	{
-3092		'ETH-1' :  0.138,
-3093		'ETH-2' :  0.138,
-3094		'ETH-3' :  0.270,
-3095		'ETH-4' :  0.223,
-3096		'GU-1'  : -0.419,
-3097	}
-3098	```
-3099	'''
+            
3064class D48data(D4xdata):
+3065	'''
+3066	Store and process data for a large set of Δ48 analyses,
+3067	usually comprising more than one analytical session.
+3068	'''
+3069
+3070	Nominal_D4x = {
+3071		'ETH-1':  0.138,
+3072		'ETH-2':  0.138,
+3073		'ETH-3':  0.270,
+3074		'ETH-4':  0.223,
+3075		'GU-1':  -0.419,
+3076		} # (Fiebig et al., 2019, 2021)
+3077	'''
+3078	Nominal Δ48 values assigned to the Δ48 anchor samples, used by
+3079	`D48data.standardize()` to normalize unknown samples to an absolute Δ48
+3080	reference frame.
+3081
+3082	By default equal to (after [Fiebig et al. (2019)](https://doi.org/10.1016/j.chemgeo.2019.05.019),
+3083	Fiebig et al. (in press)):
+3084
+3085	```py
+3086	{
+3087		'ETH-1' :  0.138,
+3088		'ETH-2' :  0.138,
+3089		'ETH-3' :  0.270,
+3090		'ETH-4' :  0.223,
+3091		'GU-1'  : -0.419,
+3092	}
+3093	```
+3094	'''
+3095
+3096
+3097	@property
+3098	def Nominal_D48(self):
+3099		return self.Nominal_D4x
 3100
-3101
-3102	@property
-3103	def Nominal_D48(self):
-3104		return self.Nominal_D4x
-3105
-3106	
-3107	@Nominal_D48.setter
-3108	def Nominal_D48(self, new):
-3109		self.Nominal_D4x = dict(**new)
-3110		self.refresh()
-3111
-3112
-3113	def __init__(self, l = [], **kwargs):
-3114		'''
-3115		**Parameters:** same as `D4xdata.__init__()`
-3116		'''
-3117		D4xdata.__init__(self, l = l, mass = '48', **kwargs)
+3101	
+3102	@Nominal_D48.setter
+3103	def Nominal_D48(self, new):
+3104		self.Nominal_D4x = dict(**new)
+3105		self.refresh()
+3106
+3107
+3108	def __init__(self, l = [], **kwargs):
+3109		'''
+3110		**Parameters:** same as `D4xdata.__init__()`
+3111		'''
+3112		D4xdata.__init__(self, l = l, mass = '48', **kwargs)
 
@@ -11051,11 +11046,11 @@
Inherited Members
-
3113	def __init__(self, l = [], **kwargs):
-3114		'''
-3115		**Parameters:** same as `D4xdata.__init__()`
-3116		'''
-3117		D4xdata.__init__(self, l = l, mass = '48', **kwargs)
+            
3108	def __init__(self, l = [], **kwargs):
+3109		'''
+3110		**Parameters:** same as `D4xdata.__init__()`
+3111		'''
+3112		D4xdata.__init__(self, l = l, mass = '48', **kwargs)