diff --git a/D47crunch/__init__.py b/D47crunch/__init__.py index 03651fb..9483f98 100755 --- a/D47crunch/__init__.py +++ b/D47crunch/__init__.py @@ -412,6 +412,7 @@ def virtual_data( samples = [], a47 = 1., b47 = 0., c47 = -0.9, a48 = 1., b48 = 0., c48 = -0.45, + rd45 = 0.020, rd46 = 0.060, rD47 = 0.015, rD48 = 0.045, d13Cwg_VPDB = None, d18Owg_VSMOW = None, session = None, @@ -441,6 +442,8 @@ def virtual_data( + `a48`: scrambling factor for Δ48 + `b48`: compositional nonlinearity for Δ48 + `c48`: working gas offset for Δ48 + + `rd45`: analytical repeatability of δ45 + + `rd46`: analytical repeatability of δ46 + `rD47`: analytical repeatability of Δ47 + `rD48`: analytical repeatability of Δ48 + `d13Cwg_VPDB`, `d18Owg_VSMOW`: bulk composition of the working gas @@ -595,6 +598,10 @@ def virtual_data( rng = nprandom.default_rng() N = sum([s['N'] for s in samples]) + errors45 = rng.normal(loc = 0, scale = 1, size = N) # generate random measurement errors + errors45 *= rd45 / stdev(errors45) # scale errors to rd45 + errors46 = rng.normal(loc = 0, scale = 1, size = N) # generate random measurement errors + errors46 *= rd46 / stdev(errors46) # scale errors to rd46 errors47 = rng.normal(loc = 0, scale = 1, size = N) # generate random measurement errors errors47 *= rD47 / stdev(errors47) # scale errors to rD47 errors48 = rng.normal(loc = 0, scale = 1, size = N) # generate random measurement errors @@ -623,8 +630,10 @@ def virtual_data( sN = s['N'] while sN: out.append(simulate_single_analysis(**kw)) - out[-1]['d47'] += errors47[k] * a47 - out[-1]['d48'] += errors48[k] * a48 + out[-1]['d45'] += errors45[k] + out[-1]['d46'] += errors46[k] + out[-1]['d47'] += (errors45[k] + errors46[k] + errors47[k]) * a47 + out[-1]['d48'] += (2*errors46[k] + errors48[k]) * a48 sN -= 1 k += 1 @@ -2968,6 +2977,156 @@ def plot_distribution_of_analyses( return fig + def plot_bulk_compositions( + self, + samples = None, + dir = 'output/bulk_compositions', + figsize = (6,6), + subplots_adjust = (0.15, 0.12, 0.95, 0.92), + show = False, + sample_color = (0,.5,1), + analysis_color = (.7,.7,.7), + labeldist = 0.3, + radius = 0.05, + ): + ''' + Plot δ13C_VBDP vs δ18O_VSMOW (of CO2) for all analyses. + + By default, creates a directory `./output/bulk_compositions` where plots for + each sample are saved. Another plot named `__all__.pdf` shows all analyses together. + + + **Parameters** + + + `samples`: Only these samples are processed (by default: all samples). + + `dir`: where to save the plots + + `figsize`: (width, height) of figure + + `subplots_adjust`: passed to `subplots_adjust()` + + `show`: whether to call `matplotlib.pyplot.show()` on the plot with all samples, + allowing for interactive visualization/exploration in (δ13C, δ18O) space. + + `sample_color`: color used for replicate markers/labels + + `analysis_color`: color used for sample markers/labels + + `labeldist`: distance (in inches) from replicate markers to replicate labels + + `radius`: radius of the dashed circle providing scale. No circle if `radius = 0`. + ''' + + from matplotlib.patches import Ellipse + + if samples is None: + samples = [_ for _ in self.samples] + + saved = {} + + for s in samples: + + fig = ppl.figure(figsize = figsize) + fig.subplots_adjust(*subplots_adjust) + ax = ppl.subplot(111) + ppl.xlabel('$δ^{18}O_{VSMOW}$ of $CO_2$ (‰)') + ppl.ylabel('$δ^{13}C_{VPDB}$ (‰)') + ppl.title(s) + + + XY = np.array([[_['d18O_VSMOW'], _['d13C_VPDB']] for _ in self.samples[s]['data']]) + UID = [_['UID'] for _ in self.samples[s]['data']] + XY0 = XY.mean(0) + + for xy in XY: + ppl.plot([xy[0], XY0[0]], [xy[1], XY0[1]], '-', lw = 1, color = analysis_color) + + ppl.plot(*XY.T, 'wo', mew = 1, mec = analysis_color) + ppl.plot(*XY0, 'wo', mew = 2, mec = sample_color) + ppl.text(*XY0, f' {s}', va = 'center', ha = 'left', color = sample_color, weight = 'bold') + saved[s] = [XY, XY0] + + x1, x2, y1, y2 = ppl.axis() + x0, dx = (x1+x2)/2, (x2-x1)/2 + y0, dy = (y1+y2)/2, (y2-y1)/2 + dx, dy = [max(max(dx, dy), radius)]*2 + + ppl.axis([ + x0 - 1.2*dx, + x0 + 1.2*dx, + y0 - 1.2*dy, + y0 + 1.2*dy, + ]) + + XY0_in_display_space = fig.dpi_scale_trans.inverted().transform(ax.transData.transform(XY0)) + + for xy, uid in zip(XY, UID): + + xy_in_display_space = fig.dpi_scale_trans.inverted().transform(ax.transData.transform(xy)) + vector_in_display_space = xy_in_display_space - XY0_in_display_space + + if (vector_in_display_space**2).sum() > 0: + + unit_vector_in_display_space = vector_in_display_space / ((vector_in_display_space**2).sum())**0.5 + label_vector_in_display_space = vector_in_display_space + unit_vector_in_display_space * labeldist + label_xy_in_display_space = XY0_in_display_space + label_vector_in_display_space + label_xy_in_data_space = ax.transData.inverted().transform(fig.dpi_scale_trans.transform(label_xy_in_display_space)) + + ppl.text(*label_xy_in_data_space, uid, va = 'center', ha = 'center', color = analysis_color) + + else: + + ppl.text(*xy, f'{uid} ', va = 'center', ha = 'right', color = analysis_color) + + if radius: + ax.add_artist(Ellipse( + xy = XY0, + width = radius*2, + height = radius*2, + ls = (0, (2,2)), + lw = .7, + ec = analysis_color, + fc = 'None', + )) + ppl.text( + XY0[0], + XY0[1]-radius, + f'\n± {radius*1e3:.0f} ppm', + color = analysis_color, + va = 'top', + ha = 'center', + linespacing = 0.4, + size = 8, + ) + + if not os.path.exists(dir): + os.makedirs(dir) + fig.savefig(f'{dir}/{s}.pdf') + ppl.close(fig) + + fig = ppl.figure(figsize = figsize) + fig.subplots_adjust(*subplots_adjust) + ppl.xlabel('$δ^{18}O_{VSMOW}$ of $CO_2$ (‰)') + ppl.ylabel('$δ^{13}C_{VPDB}$ (‰)') + + for s in saved: + for xy in saved[s][0]: + ppl.plot([xy[0], saved[s][1][0]], [xy[1], saved[s][1][1]], '-', lw = 1, color = analysis_color) + ppl.plot(*saved[s][0].T, 'wo', mew = 1, mec = analysis_color) + ppl.plot(*saved[s][1], 'wo', mew = 1.5, mec = sample_color) + ppl.text(*saved[s][1], f' {s}', va = 'center', ha = 'left', color = sample_color, weight = 'bold') + + x1, x2, y1, y2 = ppl.axis() + ppl.axis([ + x1 - (x2-x1)/10, + x2 + (x2-x1)/10, + y1 - (y2-y1)/10, + y2 + (y2-y1)/10, + ]) + + + if not os.path.exists(dir): + os.makedirs(dir) + fig.savefig(f'{dir}/__all__.pdf') + if show: + ppl.show() + ppl.close(fig) + + + class D47data(D4xdata): ''' Store and process data for a large set of Δ47 analyses, diff --git a/build_doc.py b/build_doc.py index 9943534..f48b0df 100755 --- a/build_doc.py +++ b/build_doc.py @@ -63,4 +63,5 @@ def myfilter(docstr): # ``` # ''' # -# print(myfilter(foo)) \ No newline at end of file +# print(myfilter(foo)) + diff --git a/docs/bulk_compositions.png b/docs/bulk_compositions.png new file mode 100644 index 0000000..a4a7623 Binary files /dev/null and b/docs/bulk_compositions.png differ diff --git a/docs/howto.md b/docs/howto.md index ee7bd27..d223a04 100755 --- a/docs/howto.md +++ b/docs/howto.md @@ -111,6 +111,29 @@ data47.plot_residuals(filename = 'residuals.pdf') Again, note that this plot only shows the succession of analyses as if they were all distributed at regular time intervals. +### 2.2.4 Checking δ13C and δ18O dispersion + +```py +mydata = D47data(virtual_data( + session = 'mysession', + samples = [ + dict(Sample = 'ETH-1', N = 4), + dict(Sample = 'ETH-2', N = 4), + dict(Sample = 'ETH-3', N = 4), + dict(Sample = 'MYSAMPLE', N = 8, D47 = 0.6, D48 = 0.1, d13C_VPDB = -4.0, d18O_VPDB = -12.0), + ], seed = 123)) + +mydata.refresh() +mydata.wg() +mydata.crunch() +mydata.plot_bulk_compositions() +``` + +`D4xdata.plot_bulk_compositions()` produces a series of plots, one for each sample, and an additional plot with all samples together. For example, here is the plot for sample `MYSAMPLE`: + +![bulk_compositions.png](bulk_compositions.png) + + ## 2.3 Use a different set of anchors, change anchor nominal values, and/or change oxygen-17 correction parameters Nominal values for various carbonate standards are defined in four places: diff --git a/docs/index.html b/docs/index.html index 32a6933..fc70f05 100644 --- a/docs/index.html +++ b/docs/index.html @@ -256,6 +256,9 @@

API Documentation

  • plot_distribution_of_analyses
  • +
  • + plot_bulk_compositions +
  • @@ -571,6 +574,29 @@

    2.2.3 Plotting Δ47Again, note that this plot only shows the succession of analyses as if they were all distributed at regular time intervals.

    +

    2.2.4 Checking δ13C and δ18O dispersion

    + +
    +
    mydata = D47data(virtual_data(
    +    session = 'mysession',
    +    samples = [
    +        dict(Sample = 'ETH-1', N = 4),
    +        dict(Sample = 'ETH-2', N = 4),
    +        dict(Sample = 'ETH-3', N = 4),
    +        dict(Sample = 'MYSAMPLE', N = 8, D47 = 0.6, D48 = 0.1, d13C_VPDB = -4.0, d18O_VPDB = -12.0),
    +    ], seed = 123))
    +
    +mydata.refresh()
    +mydata.wg()
    +mydata.crunch()
    +mydata.plot_bulk_compositions()
    +
    +
    + +

    D4xdata.plot_bulk_compositions() produces a series of plots, one for each sample, and an additional plot with all samples together. For example, here is the plot for sample MYSAMPLE:

    + +

    bulk_compositions.png

    +

    2.3 Use a different set of anchors, change anchor nominal values, and/or change oxygen-17 correction parameters

    Nominal values for various carbonate standards are defined in four places:

    @@ -1210,2711 +1236,2870 @@

    API Documentation

    412 samples = [], 413 a47 = 1., b47 = 0., c47 = -0.9, 414 a48 = 1., b48 = 0., c48 = -0.45, - 415 rD47 = 0.015, rD48 = 0.045, - 416 d13Cwg_VPDB = None, d18Owg_VSMOW = None, - 417 session = None, - 418 Nominal_D47 = None, Nominal_D48 = None, - 419 Nominal_d13C_VPDB = None, Nominal_d18O_VPDB = None, - 420 ALPHA_18O_ACID_REACTION = None, - 421 R13_VPDB = None, - 422 R17_VSMOW = None, - 423 R18_VSMOW = None, - 424 LAMBDA_17 = None, - 425 R18_VPDB = None, - 426 seed = 0, - 427 ): - 428 ''' - 429 Return list with simulated analyses from a single session. - 430 - 431 **Parameters** - 432 - 433 + `samples`: a list of entries; each entry is a dictionary with the following fields: - 434 * `Sample`: the name of the sample - 435 * `d13C_VPDB`, `d18O_VPDB`: bulk composition of the carbonate sample - 436 * `D47`, `D48`, `D49`, `D17O` (all optional): clumped-isotope and oxygen-17 anomalies of the carbonate sample - 437 * `N`: how many analyses to generate for this sample - 438 + `a47`: scrambling factor for Δ47 - 439 + `b47`: compositional nonlinearity for Δ47 - 440 + `c47`: working gas offset for Δ47 - 441 + `a48`: scrambling factor for Δ48 - 442 + `b48`: compositional nonlinearity for Δ48 - 443 + `c48`: working gas offset for Δ48 - 444 + `rD47`: analytical repeatability of Δ47 - 445 + `rD48`: analytical repeatability of Δ48 - 446 + `d13Cwg_VPDB`, `d18Owg_VSMOW`: bulk composition of the working gas - 447 (by default equal to the `simulate_single_analysis` default values) - 448 + `session`: name of the session (no name by default) - 449 + `Nominal_D47`, `Nominal_D48`: where to lookup Δ47 and Δ48 values - 450 if `D47` or `D48` are not specified (by default equal to the `simulate_single_analysis` defaults) - 451 + `Nominal_d13C_VPDB`, `Nominal_d18O_VPDB`: where to lookup δ13C and - 452 δ18O values if `d13C_VPDB` or `d18O_VPDB` are not specified - 453 (by default equal to the `simulate_single_analysis` defaults) - 454 + `ALPHA_18O_ACID_REACTION`: 18O/16O acid fractionation factor - 455 (by default equal to the `simulate_single_analysis` defaults) - 456 + `R13_VPDB`, `R17_VSMOW`, `R18_VSMOW`, `LAMBDA_17`, `R18_VPDB`: oxygen-17 - 457 correction parameters (by default equal to the `simulate_single_analysis` default) - 458 + `seed`: explicitly set to a non-zero value to achieve random but repeatable simulations - 459 - 460 - 461 Here is an example of using this method to generate an arbitrary combination of - 462 anchors and unknowns for a bunch of sessions: - 463 - 464 ```py - 465 args = dict( - 466 samples = [ - 467 dict(Sample = 'ETH-1', N = 4), - 468 dict(Sample = 'ETH-2', N = 5), - 469 dict(Sample = 'ETH-3', N = 6), - 470 dict(Sample = 'FOO', N = 2, - 471 d13C_VPDB = -5., d18O_VPDB = -10., - 472 D47 = 0.3, D48 = 0.15), - 473 ], rD47 = 0.010, rD48 = 0.030) - 474 - 475 session1 = virtual_data(session = 'Session_01', **args, seed = 123) - 476 session2 = virtual_data(session = 'Session_02', **args, seed = 1234) - 477 session3 = virtual_data(session = 'Session_03', **args, seed = 12345) - 478 session4 = virtual_data(session = 'Session_04', **args, seed = 123456) - 479 - 480 D = D47data(session1 + session2 + session3 + session4) - 481 - 482 D.crunch() - 483 D.standardize() + 415 rd45 = 0.020, rd46 = 0.060, + 416 rD47 = 0.015, rD48 = 0.045, + 417 d13Cwg_VPDB = None, d18Owg_VSMOW = None, + 418 session = None, + 419 Nominal_D47 = None, Nominal_D48 = None, + 420 Nominal_d13C_VPDB = None, Nominal_d18O_VPDB = None, + 421 ALPHA_18O_ACID_REACTION = None, + 422 R13_VPDB = None, + 423 R17_VSMOW = None, + 424 R18_VSMOW = None, + 425 LAMBDA_17 = None, + 426 R18_VPDB = None, + 427 seed = 0, + 428 ): + 429 ''' + 430 Return list with simulated analyses from a single session. + 431 + 432 **Parameters** + 433 + 434 + `samples`: a list of entries; each entry is a dictionary with the following fields: + 435 * `Sample`: the name of the sample + 436 * `d13C_VPDB`, `d18O_VPDB`: bulk composition of the carbonate sample + 437 * `D47`, `D48`, `D49`, `D17O` (all optional): clumped-isotope and oxygen-17 anomalies of the carbonate sample + 438 * `N`: how many analyses to generate for this sample + 439 + `a47`: scrambling factor for Δ47 + 440 + `b47`: compositional nonlinearity for Δ47 + 441 + `c47`: working gas offset for Δ47 + 442 + `a48`: scrambling factor for Δ48 + 443 + `b48`: compositional nonlinearity for Δ48 + 444 + `c48`: working gas offset for Δ48 + 445 + `rd45`: analytical repeatability of δ45 + 446 + `rd46`: analytical repeatability of δ46 + 447 + `rD47`: analytical repeatability of Δ47 + 448 + `rD48`: analytical repeatability of Δ48 + 449 + `d13Cwg_VPDB`, `d18Owg_VSMOW`: bulk composition of the working gas + 450 (by default equal to the `simulate_single_analysis` default values) + 451 + `session`: name of the session (no name by default) + 452 + `Nominal_D47`, `Nominal_D48`: where to lookup Δ47 and Δ48 values + 453 if `D47` or `D48` are not specified (by default equal to the `simulate_single_analysis` defaults) + 454 + `Nominal_d13C_VPDB`, `Nominal_d18O_VPDB`: where to lookup δ13C and + 455 δ18O values if `d13C_VPDB` or `d18O_VPDB` are not specified + 456 (by default equal to the `simulate_single_analysis` defaults) + 457 + `ALPHA_18O_ACID_REACTION`: 18O/16O acid fractionation factor + 458 (by default equal to the `simulate_single_analysis` defaults) + 459 + `R13_VPDB`, `R17_VSMOW`, `R18_VSMOW`, `LAMBDA_17`, `R18_VPDB`: oxygen-17 + 460 correction parameters (by default equal to the `simulate_single_analysis` default) + 461 + `seed`: explicitly set to a non-zero value to achieve random but repeatable simulations + 462 + 463 + 464 Here is an example of using this method to generate an arbitrary combination of + 465 anchors and unknowns for a bunch of sessions: + 466 + 467 ```py + 468 args = dict( + 469 samples = [ + 470 dict(Sample = 'ETH-1', N = 4), + 471 dict(Sample = 'ETH-2', N = 5), + 472 dict(Sample = 'ETH-3', N = 6), + 473 dict(Sample = 'FOO', N = 2, + 474 d13C_VPDB = -5., d18O_VPDB = -10., + 475 D47 = 0.3, D48 = 0.15), + 476 ], rD47 = 0.010, rD48 = 0.030) + 477 + 478 session1 = virtual_data(session = 'Session_01', **args, seed = 123) + 479 session2 = virtual_data(session = 'Session_02', **args, seed = 1234) + 480 session3 = virtual_data(session = 'Session_03', **args, seed = 12345) + 481 session4 = virtual_data(session = 'Session_04', **args, seed = 123456) + 482 + 483 D = D47data(session1 + session2 + session3 + session4) 484 - 485 D.table_of_sessions(verbose = True, save_to_file = False) - 486 D.table_of_samples(verbose = True, save_to_file = False) - 487 D.table_of_analyses(verbose = True, save_to_file = False) - 488 ``` - 489 - 490 This should output something like: - 491 - 492 ``` - 493 [table_of_sessions] - 494 –––––––––– –– –– ––––––––––– –––––––––––– –––––– –––––– –––––– ––––––––––––– –––––––––––––– –––––––––––––– - 495 Session Na Nu d13Cwg_VPDB d18Owg_VSMOW r_d13C r_d18O r_D47 a ± SE 1e3 x b ± SE c ± SE - 496 –––––––––– –– –– ––––––––––– –––––––––––– –––––– –––––– –––––– ––––––––––––– –––––––––––––– –––––––––––––– - 497 Session_01 15 2 -4.000 26.000 0.0000 0.0000 0.0110 0.997 ± 0.017 -0.097 ± 0.244 -0.896 ± 0.006 - 498 Session_02 15 2 -4.000 26.000 0.0000 0.0000 0.0109 1.002 ± 0.017 -0.110 ± 0.244 -0.901 ± 0.006 - 499 Session_03 15 2 -4.000 26.000 0.0000 0.0000 0.0107 1.010 ± 0.017 -0.037 ± 0.244 -0.904 ± 0.006 - 500 Session_04 15 2 -4.000 26.000 0.0000 0.0000 0.0106 1.001 ± 0.017 -0.181 ± 0.244 -0.894 ± 0.006 - 501 –––––––––– –– –– ––––––––––– –––––––––––– –––––– –––––– –––––– ––––––––––––– –––––––––––––– –––––––––––––– - 502 - 503 [table_of_samples] - 504 –––––– –– ––––––––– –––––––––– –––––– –––––– –––––––– –––––– –––––––– - 505 Sample N d13C_VPDB d18O_VSMOW D47 SE 95% CL SD p_Levene - 506 –––––– –– ––––––––– –––––––––– –––––– –––––– –––––––– –––––– –––––––– - 507 ETH-1 16 2.02 37.02 0.2052 0.0079 - 508 ETH-2 20 -10.17 19.88 0.2085 0.0100 - 509 ETH-3 24 1.71 37.45 0.6132 0.0105 - 510 FOO 8 -5.00 28.91 0.2989 0.0040 ± 0.0080 0.0101 0.638 - 511 –––––– –– ––––––––– –––––––––– –––––– –––––– –––––––– –––––– –––––––– - 512 - 513 [table_of_analyses] - 514 ––– –––––––––– –––––– ––––––––––– –––––––––––– ––––––––– ––––––––– –––––––––– –––––––––– –––––––––– –––––––––– –––––––––– ––––––––– ––––––––– ––––––––– –––––––– - 515 UID Session Sample d13Cwg_VPDB d18Owg_VSMOW d45 d46 d47 d48 d49 d13C_VPDB d18O_VSMOW D47raw D48raw D49raw D47 - 516 ––– –––––––––– –––––– ––––––––––– –––––––––––– ––––––––– ––––––––– –––––––––– –––––––––– –––––––––– –––––––––– –––––––––– ––––––––– ––––––––– ––––––––– –––––––– - 517 1 Session_01 ETH-1 -4.000 26.000 6.018962 10.747026 16.122986 21.273526 27.780042 2.020000 37.024281 -0.706013 -0.328878 -0.000013 0.192554 - 518 2 Session_01 ETH-1 -4.000 26.000 6.018962 10.747026 16.130144 21.282615 27.780042 2.020000 37.024281 -0.698974 -0.319981 -0.000013 0.199615 - 519 3 Session_01 ETH-1 -4.000 26.000 6.018962 10.747026 16.149219 21.299572 27.780042 2.020000 37.024281 -0.680215 -0.303383 -0.000013 0.218429 - 520 4 Session_01 ETH-1 -4.000 26.000 6.018962 10.747026 16.136616 21.233128 27.780042 2.020000 37.024281 -0.692609 -0.368421 -0.000013 0.205998 - 521 5 Session_01 ETH-2 -4.000 26.000 -5.995859 -5.976076 -12.697171 -12.203054 -18.023381 -10.170000 19.875825 -0.680771 -0.290128 -0.000002 0.215054 - 522 6 Session_01 ETH-2 -4.000 26.000 -5.995859 -5.976076 -12.701124 -12.184422 -18.023381 -10.170000 19.875825 -0.684772 -0.271272 -0.000002 0.211041 - 523 7 Session_01 ETH-2 -4.000 26.000 -5.995859 -5.976076 -12.715105 -12.195251 -18.023381 -10.170000 19.875825 -0.698923 -0.282232 -0.000002 0.196848 - 524 8 Session_01 ETH-2 -4.000 26.000 -5.995859 -5.976076 -12.701529 -12.204963 -18.023381 -10.170000 19.875825 -0.685182 -0.292061 -0.000002 0.210630 - 525 9 Session_01 ETH-2 -4.000 26.000 -5.995859 -5.976076 -12.711420 -12.228478 -18.023381 -10.170000 19.875825 -0.695193 -0.315859 -0.000002 0.200589 - 526 10 Session_01 ETH-3 -4.000 26.000 5.742374 11.161270 16.666719 22.296486 28.306614 1.710000 37.450394 -0.290459 -0.147284 -0.000014 0.609363 - 527 11 Session_01 ETH-3 -4.000 26.000 5.742374 11.161270 16.671553 22.291060 28.306614 1.710000 37.450394 -0.285706 -0.152592 -0.000014 0.614130 - 528 12 Session_01 ETH-3 -4.000 26.000 5.742374 11.161270 16.652854 22.273271 28.306614 1.710000 37.450394 -0.304093 -0.169990 -0.000014 0.595689 - 529 13 Session_01 ETH-3 -4.000 26.000 5.742374 11.161270 16.684168 22.263156 28.306614 1.710000 37.450394 -0.273302 -0.179883 -0.000014 0.626572 - 530 14 Session_01 ETH-3 -4.000 26.000 5.742374 11.161270 16.662702 22.253578 28.306614 1.710000 37.450394 -0.294409 -0.189251 -0.000014 0.605401 - 531 15 Session_01 ETH-3 -4.000 26.000 5.742374 11.161270 16.681957 22.230907 28.306614 1.710000 37.450394 -0.275476 -0.211424 -0.000014 0.624391 - 532 16 Session_01 FOO -4.000 26.000 -0.840413 2.828738 1.312044 5.395798 4.665655 -5.000000 28.907344 -0.598436 -0.268176 -0.000006 0.298996 - 533 17 Session_01 FOO -4.000 26.000 -0.840413 2.828738 1.328123 5.307086 4.665655 -5.000000 28.907344 -0.582387 -0.356389 -0.000006 0.315092 - 534 18 Session_02 ETH-1 -4.000 26.000 6.018962 10.747026 16.122201 21.340606 27.780042 2.020000 37.024281 -0.706785 -0.263217 -0.000013 0.195135 - 535 19 Session_02 ETH-1 -4.000 26.000 6.018962 10.747026 16.134868 21.305714 27.780042 2.020000 37.024281 -0.694328 -0.297370 -0.000013 0.207564 - 536 20 Session_02 ETH-1 -4.000 26.000 6.018962 10.747026 16.140008 21.261931 27.780042 2.020000 37.024281 -0.689273 -0.340227 -0.000013 0.212607 - 537 21 Session_02 ETH-1 -4.000 26.000 6.018962 10.747026 16.135540 21.298472 27.780042 2.020000 37.024281 -0.693667 -0.304459 -0.000013 0.208224 - 538 22 Session_02 ETH-2 -4.000 26.000 -5.995859 -5.976076 -12.701213 -12.202602 -18.023381 -10.170000 19.875825 -0.684862 -0.289671 -0.000002 0.213842 - 539 23 Session_02 ETH-2 -4.000 26.000 -5.995859 -5.976076 -12.685649 -12.190405 -18.023381 -10.170000 19.875825 -0.669108 -0.277327 -0.000002 0.229559 - 540 24 Session_02 ETH-2 -4.000 26.000 -5.995859 -5.976076 -12.719003 -12.257955 -18.023381 -10.170000 19.875825 -0.702869 -0.345692 -0.000002 0.195876 - 541 25 Session_02 ETH-2 -4.000 26.000 -5.995859 -5.976076 -12.700592 -12.204641 -18.023381 -10.170000 19.875825 -0.684233 -0.291735 -0.000002 0.214469 - 542 26 Session_02 ETH-2 -4.000 26.000 -5.995859 -5.976076 -12.720426 -12.214561 -18.023381 -10.170000 19.875825 -0.704308 -0.301774 -0.000002 0.194439 - 543 27 Session_02 ETH-3 -4.000 26.000 5.742374 11.161270 16.673044 22.262090 28.306614 1.710000 37.450394 -0.284240 -0.180926 -0.000014 0.616730 - 544 28 Session_02 ETH-3 -4.000 26.000 5.742374 11.161270 16.666542 22.263401 28.306614 1.710000 37.450394 -0.290634 -0.179643 -0.000014 0.610350 - 545 29 Session_02 ETH-3 -4.000 26.000 5.742374 11.161270 16.680487 22.243486 28.306614 1.710000 37.450394 -0.276921 -0.199121 -0.000014 0.624031 - 546 30 Session_02 ETH-3 -4.000 26.000 5.742374 11.161270 16.663900 22.245175 28.306614 1.710000 37.450394 -0.293231 -0.197469 -0.000014 0.607759 - 547 31 Session_02 ETH-3 -4.000 26.000 5.742374 11.161270 16.674379 22.301309 28.306614 1.710000 37.450394 -0.282927 -0.142568 -0.000014 0.618039 - 548 32 Session_02 ETH-3 -4.000 26.000 5.742374 11.161270 16.660825 22.270466 28.306614 1.710000 37.450394 -0.296255 -0.172733 -0.000014 0.604742 - 549 33 Session_02 FOO -4.000 26.000 -0.840413 2.828738 1.294076 5.349940 4.665655 -5.000000 28.907344 -0.616369 -0.313776 -0.000006 0.283707 - 550 34 Session_02 FOO -4.000 26.000 -0.840413 2.828738 1.313775 5.292121 4.665655 -5.000000 28.907344 -0.596708 -0.371269 -0.000006 0.303323 - 551 35 Session_03 ETH-1 -4.000 26.000 6.018962 10.747026 16.121613 21.259909 27.780042 2.020000 37.024281 -0.707364 -0.342207 -0.000013 0.194934 - 552 36 Session_03 ETH-1 -4.000 26.000 6.018962 10.747026 16.145714 21.304889 27.780042 2.020000 37.024281 -0.683661 -0.298178 -0.000013 0.218401 - 553 37 Session_03 ETH-1 -4.000 26.000 6.018962 10.747026 16.126573 21.325093 27.780042 2.020000 37.024281 -0.702485 -0.278401 -0.000013 0.199764 - 554 38 Session_03 ETH-1 -4.000 26.000 6.018962 10.747026 16.132057 21.323211 27.780042 2.020000 37.024281 -0.697092 -0.280244 -0.000013 0.205104 - 555 39 Session_03 ETH-2 -4.000 26.000 -5.995859 -5.976076 -12.708448 -12.232023 -18.023381 -10.170000 19.875825 -0.692185 -0.319447 -0.000002 0.208915 - 556 40 Session_03 ETH-2 -4.000 26.000 -5.995859 -5.976076 -12.714417 -12.202504 -18.023381 -10.170000 19.875825 -0.698226 -0.289572 -0.000002 0.202934 - 557 41 Session_03 ETH-2 -4.000 26.000 -5.995859 -5.976076 -12.720039 -12.264469 -18.023381 -10.170000 19.875825 -0.703917 -0.352285 -0.000002 0.197300 - 558 42 Session_03 ETH-2 -4.000 26.000 -5.995859 -5.976076 -12.701953 -12.228550 -18.023381 -10.170000 19.875825 -0.685611 -0.315932 -0.000002 0.215423 - 559 43 Session_03 ETH-2 -4.000 26.000 -5.995859 -5.976076 -12.704535 -12.213634 -18.023381 -10.170000 19.875825 -0.688224 -0.300836 -0.000002 0.212837 - 560 44 Session_03 ETH-3 -4.000 26.000 5.742374 11.161270 16.652920 22.230043 28.306614 1.710000 37.450394 -0.304028 -0.212269 -0.000014 0.594265 - 561 45 Session_03 ETH-3 -4.000 26.000 5.742374 11.161270 16.691485 22.261017 28.306614 1.710000 37.450394 -0.266106 -0.181975 -0.000014 0.631810 - 562 46 Session_03 ETH-3 -4.000 26.000 5.742374 11.161270 16.679119 22.305357 28.306614 1.710000 37.450394 -0.278266 -0.138609 -0.000014 0.619771 - 563 47 Session_03 ETH-3 -4.000 26.000 5.742374 11.161270 16.663623 22.327286 28.306614 1.710000 37.450394 -0.293503 -0.117161 -0.000014 0.604685 - 564 48 Session_03 ETH-3 -4.000 26.000 5.742374 11.161270 16.678524 22.282103 28.306614 1.710000 37.450394 -0.278851 -0.161352 -0.000014 0.619192 - 565 49 Session_03 ETH-3 -4.000 26.000 5.742374 11.161270 16.666246 22.283361 28.306614 1.710000 37.450394 -0.290925 -0.160121 -0.000014 0.607238 - 566 50 Session_03 FOO -4.000 26.000 -0.840413 2.828738 1.309929 5.340249 4.665655 -5.000000 28.907344 -0.600546 -0.323413 -0.000006 0.300148 - 567 51 Session_03 FOO -4.000 26.000 -0.840413 2.828738 1.317548 5.334102 4.665655 -5.000000 28.907344 -0.592942 -0.329524 -0.000006 0.307676 - 568 52 Session_04 ETH-1 -4.000 26.000 6.018962 10.747026 16.136865 21.300298 27.780042 2.020000 37.024281 -0.692364 -0.302672 -0.000013 0.204033 - 569 53 Session_04 ETH-1 -4.000 26.000 6.018962 10.747026 16.133538 21.291260 27.780042 2.020000 37.024281 -0.695637 -0.311519 -0.000013 0.200762 - 570 54 Session_04 ETH-1 -4.000 26.000 6.018962 10.747026 16.139991 21.319865 27.780042 2.020000 37.024281 -0.689290 -0.283519 -0.000013 0.207107 - 571 55 Session_04 ETH-1 -4.000 26.000 6.018962 10.747026 16.145748 21.330075 27.780042 2.020000 37.024281 -0.683629 -0.273524 -0.000013 0.212766 - 572 56 Session_04 ETH-2 -4.000 26.000 -5.995859 -5.976076 -12.702989 -12.202762 -18.023381 -10.170000 19.875825 -0.686660 -0.289833 -0.000002 0.204507 - 573 57 Session_04 ETH-2 -4.000 26.000 -5.995859 -5.976076 -12.692830 -12.240287 -18.023381 -10.170000 19.875825 -0.676377 -0.327811 -0.000002 0.214786 - 574 58 Session_04 ETH-2 -4.000 26.000 -5.995859 -5.976076 -12.702899 -12.180291 -18.023381 -10.170000 19.875825 -0.686568 -0.267091 -0.000002 0.204598 - 575 59 Session_04 ETH-2 -4.000 26.000 -5.995859 -5.976076 -12.709282 -12.282257 -18.023381 -10.170000 19.875825 -0.693029 -0.370287 -0.000002 0.198140 - 576 60 Session_04 ETH-2 -4.000 26.000 -5.995859 -5.976076 -12.679330 -12.235994 -18.023381 -10.170000 19.875825 -0.662712 -0.323466 -0.000002 0.228446 - 577 61 Session_04 ETH-3 -4.000 26.000 5.742374 11.161270 16.695594 22.238663 28.306614 1.710000 37.450394 -0.262066 -0.203838 -0.000014 0.634200 - 578 62 Session_04 ETH-3 -4.000 26.000 5.742374 11.161270 16.663504 22.286354 28.306614 1.710000 37.450394 -0.293620 -0.157194 -0.000014 0.602656 - 579 63 Session_04 ETH-3 -4.000 26.000 5.742374 11.161270 16.666457 22.254290 28.306614 1.710000 37.450394 -0.290717 -0.188555 -0.000014 0.605558 - 580 64 Session_04 ETH-3 -4.000 26.000 5.742374 11.161270 16.666910 22.223232 28.306614 1.710000 37.450394 -0.290271 -0.218930 -0.000014 0.606004 - 581 65 Session_04 ETH-3 -4.000 26.000 5.742374 11.161270 16.679662 22.257256 28.306614 1.710000 37.450394 -0.277732 -0.185653 -0.000014 0.618539 - 582 66 Session_04 ETH-3 -4.000 26.000 5.742374 11.161270 16.676768 22.267680 28.306614 1.710000 37.450394 -0.280578 -0.175459 -0.000014 0.615693 - 583 67 Session_04 FOO -4.000 26.000 -0.840413 2.828738 1.307663 5.317330 4.665655 -5.000000 28.907344 -0.602808 -0.346202 -0.000006 0.290853 - 584 68 Session_04 FOO -4.000 26.000 -0.840413 2.828738 1.308562 5.331400 4.665655 -5.000000 28.907344 -0.601911 -0.332212 -0.000006 0.291749 - 585 ––– –––––––––– –––––– ––––––––––– –––––––––––– ––––––––– ––––––––– –––––––––– –––––––––– –––––––––– –––––––––– –––––––––– ––––––––– ––––––––– ––––––––– –––––––– - 586 ``` - 587 ''' - 588 - 589 kwargs = locals().copy() - 590 - 591 from numpy import random as nprandom - 592 if seed: - 593 rng = nprandom.default_rng(seed) - 594 else: - 595 rng = nprandom.default_rng() - 596 - 597 N = sum([s['N'] for s in samples]) - 598 errors47 = rng.normal(loc = 0, scale = 1, size = N) # generate random measurement errors - 599 errors47 *= rD47 / stdev(errors47) # scale errors to rD47 - 600 errors48 = rng.normal(loc = 0, scale = 1, size = N) # generate random measurement errors - 601 errors48 *= rD48 / stdev(errors48) # scale errors to rD48 - 602 - 603 k = 0 - 604 out = [] - 605 for s in samples: - 606 kw = {} - 607 kw['sample'] = s['Sample'] - 608 kw = { - 609 **kw, - 610 **{var: kwargs[var] - 611 for var in [ - 612 'd13Cwg_VPDB', 'd18Owg_VSMOW', 'ALPHA_18O_ACID_REACTION', - 613 'Nominal_D47', 'Nominal_D48', 'Nominal_d13C_VPDB', 'Nominal_d18O_VPDB', - 614 'R13_VPDB', 'R17_VSMOW', 'R18_VSMOW', 'LAMBDA_17', 'R18_VPDB', - 615 'a47', 'b47', 'c47', 'a48', 'b48', 'c48', - 616 ] - 617 if kwargs[var] is not None}, - 618 **{var: s[var] - 619 for var in ['d13C_VPDB', 'd18O_VPDB', 'D47', 'D48', 'D49', 'D17O'] - 620 if var in s}, - 621 } - 622 - 623 sN = s['N'] - 624 while sN: - 625 out.append(simulate_single_analysis(**kw)) - 626 out[-1]['d47'] += errors47[k] * a47 - 627 out[-1]['d48'] += errors48[k] * a48 - 628 sN -= 1 - 629 k += 1 - 630 - 631 if session is not None: - 632 for r in out: - 633 r['Session'] = session - 634 return out - 635 - 636def table_of_samples( - 637 data47 = None, - 638 data48 = None, - 639 dir = 'output', - 640 filename = None, - 641 save_to_file = True, - 642 print_out = True, - 643 output = None, - 644 ): - 645 ''' - 646 Print out, save to disk and/or return a combined table of samples - 647 for a pair of `D47data` and `D48data` objects. - 648 - 649 **Parameters** - 650 - 651 + `data47`: `D47data` instance - 652 + `data48`: `D48data` instance - 653 + `dir`: the directory in which to save the table - 654 + `filename`: the name to the csv file to write to - 655 + `save_to_file`: whether to save the table to disk - 656 + `print_out`: whether to print out the table - 657 + `output`: if set to `'pretty'`: return a pretty text table (see `pretty_table()`); - 658 if set to `'raw'`: return a list of list of strings - 659 (e.g., `[['header1', 'header2'], ['0.1', '0.2']]`) - 660 ''' - 661 if data47 is None: - 662 if data48 is None: - 663 raise TypeError("Arguments must include at least one D47data() or D48data() instance.") - 664 else: - 665 return data48.table_of_samples( - 666 dir = dir, - 667 filename = filename, - 668 save_to_file = save_to_file, - 669 print_out = print_out, - 670 output = output - 671 ) - 672 else: - 673 if data48 is None: - 674 return data47.table_of_samples( + 485 D.crunch() + 486 D.standardize() + 487 + 488 D.table_of_sessions(verbose = True, save_to_file = False) + 489 D.table_of_samples(verbose = True, save_to_file = False) + 490 D.table_of_analyses(verbose = True, save_to_file = False) + 491 ``` + 492 + 493 This should output something like: + 494 + 495 ``` + 496 [table_of_sessions] + 497 –––––––––– –– –– ––––––––––– –––––––––––– –––––– –––––– –––––– ––––––––––––– –––––––––––––– –––––––––––––– + 498 Session Na Nu d13Cwg_VPDB d18Owg_VSMOW r_d13C r_d18O r_D47 a ± SE 1e3 x b ± SE c ± SE + 499 –––––––––– –– –– ––––––––––– –––––––––––– –––––– –––––– –––––– ––––––––––––– –––––––––––––– –––––––––––––– + 500 Session_01 15 2 -4.000 26.000 0.0000 0.0000 0.0110 0.997 ± 0.017 -0.097 ± 0.244 -0.896 ± 0.006 + 501 Session_02 15 2 -4.000 26.000 0.0000 0.0000 0.0109 1.002 ± 0.017 -0.110 ± 0.244 -0.901 ± 0.006 + 502 Session_03 15 2 -4.000 26.000 0.0000 0.0000 0.0107 1.010 ± 0.017 -0.037 ± 0.244 -0.904 ± 0.006 + 503 Session_04 15 2 -4.000 26.000 0.0000 0.0000 0.0106 1.001 ± 0.017 -0.181 ± 0.244 -0.894 ± 0.006 + 504 –––––––––– –– –– ––––––––––– –––––––––––– –––––– –––––– –––––– ––––––––––––– –––––––––––––– –––––––––––––– + 505 + 506 [table_of_samples] + 507 –––––– –– ––––––––– –––––––––– –––––– –––––– –––––––– –––––– –––––––– + 508 Sample N d13C_VPDB d18O_VSMOW D47 SE 95% CL SD p_Levene + 509 –––––– –– ––––––––– –––––––––– –––––– –––––– –––––––– –––––– –––––––– + 510 ETH-1 16 2.02 37.02 0.2052 0.0079 + 511 ETH-2 20 -10.17 19.88 0.2085 0.0100 + 512 ETH-3 24 1.71 37.45 0.6132 0.0105 + 513 FOO 8 -5.00 28.91 0.2989 0.0040 ± 0.0080 0.0101 0.638 + 514 –––––– –– ––––––––– –––––––––– –––––– –––––– –––––––– –––––– –––––––– + 515 + 516 [table_of_analyses] + 517 ––– –––––––––– –––––– ––––––––––– –––––––––––– ––––––––– ––––––––– –––––––––– –––––––––– –––––––––– –––––––––– –––––––––– ––––––––– ––––––––– ––––––––– –––––––– + 518 UID Session Sample d13Cwg_VPDB d18Owg_VSMOW d45 d46 d47 d48 d49 d13C_VPDB d18O_VSMOW D47raw D48raw D49raw D47 + 519 ––– –––––––––– –––––– ––––––––––– –––––––––––– ––––––––– ––––––––– –––––––––– –––––––––– –––––––––– –––––––––– –––––––––– ––––––––– ––––––––– ––––––––– –––––––– + 520 1 Session_01 ETH-1 -4.000 26.000 6.018962 10.747026 16.122986 21.273526 27.780042 2.020000 37.024281 -0.706013 -0.328878 -0.000013 0.192554 + 521 2 Session_01 ETH-1 -4.000 26.000 6.018962 10.747026 16.130144 21.282615 27.780042 2.020000 37.024281 -0.698974 -0.319981 -0.000013 0.199615 + 522 3 Session_01 ETH-1 -4.000 26.000 6.018962 10.747026 16.149219 21.299572 27.780042 2.020000 37.024281 -0.680215 -0.303383 -0.000013 0.218429 + 523 4 Session_01 ETH-1 -4.000 26.000 6.018962 10.747026 16.136616 21.233128 27.780042 2.020000 37.024281 -0.692609 -0.368421 -0.000013 0.205998 + 524 5 Session_01 ETH-2 -4.000 26.000 -5.995859 -5.976076 -12.697171 -12.203054 -18.023381 -10.170000 19.875825 -0.680771 -0.290128 -0.000002 0.215054 + 525 6 Session_01 ETH-2 -4.000 26.000 -5.995859 -5.976076 -12.701124 -12.184422 -18.023381 -10.170000 19.875825 -0.684772 -0.271272 -0.000002 0.211041 + 526 7 Session_01 ETH-2 -4.000 26.000 -5.995859 -5.976076 -12.715105 -12.195251 -18.023381 -10.170000 19.875825 -0.698923 -0.282232 -0.000002 0.196848 + 527 8 Session_01 ETH-2 -4.000 26.000 -5.995859 -5.976076 -12.701529 -12.204963 -18.023381 -10.170000 19.875825 -0.685182 -0.292061 -0.000002 0.210630 + 528 9 Session_01 ETH-2 -4.000 26.000 -5.995859 -5.976076 -12.711420 -12.228478 -18.023381 -10.170000 19.875825 -0.695193 -0.315859 -0.000002 0.200589 + 529 10 Session_01 ETH-3 -4.000 26.000 5.742374 11.161270 16.666719 22.296486 28.306614 1.710000 37.450394 -0.290459 -0.147284 -0.000014 0.609363 + 530 11 Session_01 ETH-3 -4.000 26.000 5.742374 11.161270 16.671553 22.291060 28.306614 1.710000 37.450394 -0.285706 -0.152592 -0.000014 0.614130 + 531 12 Session_01 ETH-3 -4.000 26.000 5.742374 11.161270 16.652854 22.273271 28.306614 1.710000 37.450394 -0.304093 -0.169990 -0.000014 0.595689 + 532 13 Session_01 ETH-3 -4.000 26.000 5.742374 11.161270 16.684168 22.263156 28.306614 1.710000 37.450394 -0.273302 -0.179883 -0.000014 0.626572 + 533 14 Session_01 ETH-3 -4.000 26.000 5.742374 11.161270 16.662702 22.253578 28.306614 1.710000 37.450394 -0.294409 -0.189251 -0.000014 0.605401 + 534 15 Session_01 ETH-3 -4.000 26.000 5.742374 11.161270 16.681957 22.230907 28.306614 1.710000 37.450394 -0.275476 -0.211424 -0.000014 0.624391 + 535 16 Session_01 FOO -4.000 26.000 -0.840413 2.828738 1.312044 5.395798 4.665655 -5.000000 28.907344 -0.598436 -0.268176 -0.000006 0.298996 + 536 17 Session_01 FOO -4.000 26.000 -0.840413 2.828738 1.328123 5.307086 4.665655 -5.000000 28.907344 -0.582387 -0.356389 -0.000006 0.315092 + 537 18 Session_02 ETH-1 -4.000 26.000 6.018962 10.747026 16.122201 21.340606 27.780042 2.020000 37.024281 -0.706785 -0.263217 -0.000013 0.195135 + 538 19 Session_02 ETH-1 -4.000 26.000 6.018962 10.747026 16.134868 21.305714 27.780042 2.020000 37.024281 -0.694328 -0.297370 -0.000013 0.207564 + 539 20 Session_02 ETH-1 -4.000 26.000 6.018962 10.747026 16.140008 21.261931 27.780042 2.020000 37.024281 -0.689273 -0.340227 -0.000013 0.212607 + 540 21 Session_02 ETH-1 -4.000 26.000 6.018962 10.747026 16.135540 21.298472 27.780042 2.020000 37.024281 -0.693667 -0.304459 -0.000013 0.208224 + 541 22 Session_02 ETH-2 -4.000 26.000 -5.995859 -5.976076 -12.701213 -12.202602 -18.023381 -10.170000 19.875825 -0.684862 -0.289671 -0.000002 0.213842 + 542 23 Session_02 ETH-2 -4.000 26.000 -5.995859 -5.976076 -12.685649 -12.190405 -18.023381 -10.170000 19.875825 -0.669108 -0.277327 -0.000002 0.229559 + 543 24 Session_02 ETH-2 -4.000 26.000 -5.995859 -5.976076 -12.719003 -12.257955 -18.023381 -10.170000 19.875825 -0.702869 -0.345692 -0.000002 0.195876 + 544 25 Session_02 ETH-2 -4.000 26.000 -5.995859 -5.976076 -12.700592 -12.204641 -18.023381 -10.170000 19.875825 -0.684233 -0.291735 -0.000002 0.214469 + 545 26 Session_02 ETH-2 -4.000 26.000 -5.995859 -5.976076 -12.720426 -12.214561 -18.023381 -10.170000 19.875825 -0.704308 -0.301774 -0.000002 0.194439 + 546 27 Session_02 ETH-3 -4.000 26.000 5.742374 11.161270 16.673044 22.262090 28.306614 1.710000 37.450394 -0.284240 -0.180926 -0.000014 0.616730 + 547 28 Session_02 ETH-3 -4.000 26.000 5.742374 11.161270 16.666542 22.263401 28.306614 1.710000 37.450394 -0.290634 -0.179643 -0.000014 0.610350 + 548 29 Session_02 ETH-3 -4.000 26.000 5.742374 11.161270 16.680487 22.243486 28.306614 1.710000 37.450394 -0.276921 -0.199121 -0.000014 0.624031 + 549 30 Session_02 ETH-3 -4.000 26.000 5.742374 11.161270 16.663900 22.245175 28.306614 1.710000 37.450394 -0.293231 -0.197469 -0.000014 0.607759 + 550 31 Session_02 ETH-3 -4.000 26.000 5.742374 11.161270 16.674379 22.301309 28.306614 1.710000 37.450394 -0.282927 -0.142568 -0.000014 0.618039 + 551 32 Session_02 ETH-3 -4.000 26.000 5.742374 11.161270 16.660825 22.270466 28.306614 1.710000 37.450394 -0.296255 -0.172733 -0.000014 0.604742 + 552 33 Session_02 FOO -4.000 26.000 -0.840413 2.828738 1.294076 5.349940 4.665655 -5.000000 28.907344 -0.616369 -0.313776 -0.000006 0.283707 + 553 34 Session_02 FOO -4.000 26.000 -0.840413 2.828738 1.313775 5.292121 4.665655 -5.000000 28.907344 -0.596708 -0.371269 -0.000006 0.303323 + 554 35 Session_03 ETH-1 -4.000 26.000 6.018962 10.747026 16.121613 21.259909 27.780042 2.020000 37.024281 -0.707364 -0.342207 -0.000013 0.194934 + 555 36 Session_03 ETH-1 -4.000 26.000 6.018962 10.747026 16.145714 21.304889 27.780042 2.020000 37.024281 -0.683661 -0.298178 -0.000013 0.218401 + 556 37 Session_03 ETH-1 -4.000 26.000 6.018962 10.747026 16.126573 21.325093 27.780042 2.020000 37.024281 -0.702485 -0.278401 -0.000013 0.199764 + 557 38 Session_03 ETH-1 -4.000 26.000 6.018962 10.747026 16.132057 21.323211 27.780042 2.020000 37.024281 -0.697092 -0.280244 -0.000013 0.205104 + 558 39 Session_03 ETH-2 -4.000 26.000 -5.995859 -5.976076 -12.708448 -12.232023 -18.023381 -10.170000 19.875825 -0.692185 -0.319447 -0.000002 0.208915 + 559 40 Session_03 ETH-2 -4.000 26.000 -5.995859 -5.976076 -12.714417 -12.202504 -18.023381 -10.170000 19.875825 -0.698226 -0.289572 -0.000002 0.202934 + 560 41 Session_03 ETH-2 -4.000 26.000 -5.995859 -5.976076 -12.720039 -12.264469 -18.023381 -10.170000 19.875825 -0.703917 -0.352285 -0.000002 0.197300 + 561 42 Session_03 ETH-2 -4.000 26.000 -5.995859 -5.976076 -12.701953 -12.228550 -18.023381 -10.170000 19.875825 -0.685611 -0.315932 -0.000002 0.215423 + 562 43 Session_03 ETH-2 -4.000 26.000 -5.995859 -5.976076 -12.704535 -12.213634 -18.023381 -10.170000 19.875825 -0.688224 -0.300836 -0.000002 0.212837 + 563 44 Session_03 ETH-3 -4.000 26.000 5.742374 11.161270 16.652920 22.230043 28.306614 1.710000 37.450394 -0.304028 -0.212269 -0.000014 0.594265 + 564 45 Session_03 ETH-3 -4.000 26.000 5.742374 11.161270 16.691485 22.261017 28.306614 1.710000 37.450394 -0.266106 -0.181975 -0.000014 0.631810 + 565 46 Session_03 ETH-3 -4.000 26.000 5.742374 11.161270 16.679119 22.305357 28.306614 1.710000 37.450394 -0.278266 -0.138609 -0.000014 0.619771 + 566 47 Session_03 ETH-3 -4.000 26.000 5.742374 11.161270 16.663623 22.327286 28.306614 1.710000 37.450394 -0.293503 -0.117161 -0.000014 0.604685 + 567 48 Session_03 ETH-3 -4.000 26.000 5.742374 11.161270 16.678524 22.282103 28.306614 1.710000 37.450394 -0.278851 -0.161352 -0.000014 0.619192 + 568 49 Session_03 ETH-3 -4.000 26.000 5.742374 11.161270 16.666246 22.283361 28.306614 1.710000 37.450394 -0.290925 -0.160121 -0.000014 0.607238 + 569 50 Session_03 FOO -4.000 26.000 -0.840413 2.828738 1.309929 5.340249 4.665655 -5.000000 28.907344 -0.600546 -0.323413 -0.000006 0.300148 + 570 51 Session_03 FOO -4.000 26.000 -0.840413 2.828738 1.317548 5.334102 4.665655 -5.000000 28.907344 -0.592942 -0.329524 -0.000006 0.307676 + 571 52 Session_04 ETH-1 -4.000 26.000 6.018962 10.747026 16.136865 21.300298 27.780042 2.020000 37.024281 -0.692364 -0.302672 -0.000013 0.204033 + 572 53 Session_04 ETH-1 -4.000 26.000 6.018962 10.747026 16.133538 21.291260 27.780042 2.020000 37.024281 -0.695637 -0.311519 -0.000013 0.200762 + 573 54 Session_04 ETH-1 -4.000 26.000 6.018962 10.747026 16.139991 21.319865 27.780042 2.020000 37.024281 -0.689290 -0.283519 -0.000013 0.207107 + 574 55 Session_04 ETH-1 -4.000 26.000 6.018962 10.747026 16.145748 21.330075 27.780042 2.020000 37.024281 -0.683629 -0.273524 -0.000013 0.212766 + 575 56 Session_04 ETH-2 -4.000 26.000 -5.995859 -5.976076 -12.702989 -12.202762 -18.023381 -10.170000 19.875825 -0.686660 -0.289833 -0.000002 0.204507 + 576 57 Session_04 ETH-2 -4.000 26.000 -5.995859 -5.976076 -12.692830 -12.240287 -18.023381 -10.170000 19.875825 -0.676377 -0.327811 -0.000002 0.214786 + 577 58 Session_04 ETH-2 -4.000 26.000 -5.995859 -5.976076 -12.702899 -12.180291 -18.023381 -10.170000 19.875825 -0.686568 -0.267091 -0.000002 0.204598 + 578 59 Session_04 ETH-2 -4.000 26.000 -5.995859 -5.976076 -12.709282 -12.282257 -18.023381 -10.170000 19.875825 -0.693029 -0.370287 -0.000002 0.198140 + 579 60 Session_04 ETH-2 -4.000 26.000 -5.995859 -5.976076 -12.679330 -12.235994 -18.023381 -10.170000 19.875825 -0.662712 -0.323466 -0.000002 0.228446 + 580 61 Session_04 ETH-3 -4.000 26.000 5.742374 11.161270 16.695594 22.238663 28.306614 1.710000 37.450394 -0.262066 -0.203838 -0.000014 0.634200 + 581 62 Session_04 ETH-3 -4.000 26.000 5.742374 11.161270 16.663504 22.286354 28.306614 1.710000 37.450394 -0.293620 -0.157194 -0.000014 0.602656 + 582 63 Session_04 ETH-3 -4.000 26.000 5.742374 11.161270 16.666457 22.254290 28.306614 1.710000 37.450394 -0.290717 -0.188555 -0.000014 0.605558 + 583 64 Session_04 ETH-3 -4.000 26.000 5.742374 11.161270 16.666910 22.223232 28.306614 1.710000 37.450394 -0.290271 -0.218930 -0.000014 0.606004 + 584 65 Session_04 ETH-3 -4.000 26.000 5.742374 11.161270 16.679662 22.257256 28.306614 1.710000 37.450394 -0.277732 -0.185653 -0.000014 0.618539 + 585 66 Session_04 ETH-3 -4.000 26.000 5.742374 11.161270 16.676768 22.267680 28.306614 1.710000 37.450394 -0.280578 -0.175459 -0.000014 0.615693 + 586 67 Session_04 FOO -4.000 26.000 -0.840413 2.828738 1.307663 5.317330 4.665655 -5.000000 28.907344 -0.602808 -0.346202 -0.000006 0.290853 + 587 68 Session_04 FOO -4.000 26.000 -0.840413 2.828738 1.308562 5.331400 4.665655 -5.000000 28.907344 -0.601911 -0.332212 -0.000006 0.291749 + 588 ––– –––––––––– –––––– ––––––––––– –––––––––––– ––––––––– ––––––––– –––––––––– –––––––––– –––––––––– –––––––––– –––––––––– ––––––––– ––––––––– ––––––––– –––––––– + 589 ``` + 590 ''' + 591 + 592 kwargs = locals().copy() + 593 + 594 from numpy import random as nprandom + 595 if seed: + 596 rng = nprandom.default_rng(seed) + 597 else: + 598 rng = nprandom.default_rng() + 599 + 600 N = sum([s['N'] for s in samples]) + 601 errors45 = rng.normal(loc = 0, scale = 1, size = N) # generate random measurement errors + 602 errors45 *= rd45 / stdev(errors45) # scale errors to rd45 + 603 errors46 = rng.normal(loc = 0, scale = 1, size = N) # generate random measurement errors + 604 errors46 *= rd46 / stdev(errors46) # scale errors to rd46 + 605 errors47 = rng.normal(loc = 0, scale = 1, size = N) # generate random measurement errors + 606 errors47 *= rD47 / stdev(errors47) # scale errors to rD47 + 607 errors48 = rng.normal(loc = 0, scale = 1, size = N) # generate random measurement errors + 608 errors48 *= rD48 / stdev(errors48) # scale errors to rD48 + 609 + 610 k = 0 + 611 out = [] + 612 for s in samples: + 613 kw = {} + 614 kw['sample'] = s['Sample'] + 615 kw = { + 616 **kw, + 617 **{var: kwargs[var] + 618 for var in [ + 619 'd13Cwg_VPDB', 'd18Owg_VSMOW', 'ALPHA_18O_ACID_REACTION', + 620 'Nominal_D47', 'Nominal_D48', 'Nominal_d13C_VPDB', 'Nominal_d18O_VPDB', + 621 'R13_VPDB', 'R17_VSMOW', 'R18_VSMOW', 'LAMBDA_17', 'R18_VPDB', + 622 'a47', 'b47', 'c47', 'a48', 'b48', 'c48', + 623 ] + 624 if kwargs[var] is not None}, + 625 **{var: s[var] + 626 for var in ['d13C_VPDB', 'd18O_VPDB', 'D47', 'D48', 'D49', 'D17O'] + 627 if var in s}, + 628 } + 629 + 630 sN = s['N'] + 631 while sN: + 632 out.append(simulate_single_analysis(**kw)) + 633 out[-1]['d45'] += errors45[k] + 634 out[-1]['d46'] += errors46[k] + 635 out[-1]['d47'] += (errors45[k] + errors46[k] + errors47[k]) * a47 + 636 out[-1]['d48'] += (2*errors46[k] + errors48[k]) * a48 + 637 sN -= 1 + 638 k += 1 + 639 + 640 if session is not None: + 641 for r in out: + 642 r['Session'] = session + 643 return out + 644 + 645def table_of_samples( + 646 data47 = None, + 647 data48 = None, + 648 dir = 'output', + 649 filename = None, + 650 save_to_file = True, + 651 print_out = True, + 652 output = None, + 653 ): + 654 ''' + 655 Print out, save to disk and/or return a combined table of samples + 656 for a pair of `D47data` and `D48data` objects. + 657 + 658 **Parameters** + 659 + 660 + `data47`: `D47data` instance + 661 + `data48`: `D48data` instance + 662 + `dir`: the directory in which to save the table + 663 + `filename`: the name to the csv file to write to + 664 + `save_to_file`: whether to save the table to disk + 665 + `print_out`: whether to print out the table + 666 + `output`: if set to `'pretty'`: return a pretty text table (see `pretty_table()`); + 667 if set to `'raw'`: return a list of list of strings + 668 (e.g., `[['header1', 'header2'], ['0.1', '0.2']]`) + 669 ''' + 670 if data47 is None: + 671 if data48 is None: + 672 raise TypeError("Arguments must include at least one D47data() or D48data() instance.") + 673 else: + 674 return data48.table_of_samples( 675 dir = dir, 676 filename = filename, 677 save_to_file = save_to_file, 678 print_out = print_out, 679 output = output 680 ) - 681 else: - 682 out47 = data47.table_of_samples(save_to_file = False, print_out = False, output = 'raw') - 683 out48 = data48.table_of_samples(save_to_file = False, print_out = False, output = 'raw') - 684 out = transpose_table(transpose_table(out47) + transpose_table(out48)[4:]) - 685 - 686 if save_to_file: - 687 if not os.path.exists(dir): - 688 os.makedirs(dir) - 689 if filename is None: - 690 filename = f'D47D48_samples.csv' - 691 with open(f'{dir}/{filename}', 'w') as fid: - 692 fid.write(make_csv(out)) - 693 if print_out: - 694 print('\n'+pretty_table(out)) - 695 if output == 'raw': - 696 return out - 697 elif output == 'pretty': - 698 return pretty_table(out) - 699 - 700 - 701def table_of_sessions( - 702 data47 = None, - 703 data48 = None, - 704 dir = 'output', - 705 filename = None, - 706 save_to_file = True, - 707 print_out = True, - 708 output = None, - 709 ): - 710 ''' - 711 Print out, save to disk and/or return a combined table of sessions - 712 for a pair of `D47data` and `D48data` objects. - 713 ***Only applicable if the sessions in `data47` and those in `data48` - 714 consist of the exact same sets of analyses.*** - 715 - 716 **Parameters** - 717 - 718 + `data47`: `D47data` instance - 719 + `data48`: `D48data` instance - 720 + `dir`: the directory in which to save the table - 721 + `filename`: the name to the csv file to write to - 722 + `save_to_file`: whether to save the table to disk - 723 + `print_out`: whether to print out the table - 724 + `output`: if set to `'pretty'`: return a pretty text table (see `pretty_table()`); - 725 if set to `'raw'`: return a list of list of strings - 726 (e.g., `[['header1', 'header2'], ['0.1', '0.2']]`) - 727 ''' - 728 if data47 is None: - 729 if data48 is None: - 730 raise TypeError("Arguments must include at least one D47data() or D48data() instance.") - 731 else: - 732 return data48.table_of_sessions( - 733 dir = dir, - 734 filename = filename, - 735 save_to_file = save_to_file, - 736 print_out = print_out, - 737 output = output - 738 ) - 739 else: - 740 if data48 is None: - 741 return data47.table_of_sessions( + 681 else: + 682 if data48 is None: + 683 return data47.table_of_samples( + 684 dir = dir, + 685 filename = filename, + 686 save_to_file = save_to_file, + 687 print_out = print_out, + 688 output = output + 689 ) + 690 else: + 691 out47 = data47.table_of_samples(save_to_file = False, print_out = False, output = 'raw') + 692 out48 = data48.table_of_samples(save_to_file = False, print_out = False, output = 'raw') + 693 out = transpose_table(transpose_table(out47) + transpose_table(out48)[4:]) + 694 + 695 if save_to_file: + 696 if not os.path.exists(dir): + 697 os.makedirs(dir) + 698 if filename is None: + 699 filename = f'D47D48_samples.csv' + 700 with open(f'{dir}/{filename}', 'w') as fid: + 701 fid.write(make_csv(out)) + 702 if print_out: + 703 print('\n'+pretty_table(out)) + 704 if output == 'raw': + 705 return out + 706 elif output == 'pretty': + 707 return pretty_table(out) + 708 + 709 + 710def table_of_sessions( + 711 data47 = None, + 712 data48 = None, + 713 dir = 'output', + 714 filename = None, + 715 save_to_file = True, + 716 print_out = True, + 717 output = None, + 718 ): + 719 ''' + 720 Print out, save to disk and/or return a combined table of sessions + 721 for a pair of `D47data` and `D48data` objects. + 722 ***Only applicable if the sessions in `data47` and those in `data48` + 723 consist of the exact same sets of analyses.*** + 724 + 725 **Parameters** + 726 + 727 + `data47`: `D47data` instance + 728 + `data48`: `D48data` instance + 729 + `dir`: the directory in which to save the table + 730 + `filename`: the name to the csv file to write to + 731 + `save_to_file`: whether to save the table to disk + 732 + `print_out`: whether to print out the table + 733 + `output`: if set to `'pretty'`: return a pretty text table (see `pretty_table()`); + 734 if set to `'raw'`: return a list of list of strings + 735 (e.g., `[['header1', 'header2'], ['0.1', '0.2']]`) + 736 ''' + 737 if data47 is None: + 738 if data48 is None: + 739 raise TypeError("Arguments must include at least one D47data() or D48data() instance.") + 740 else: + 741 return data48.table_of_sessions( 742 dir = dir, 743 filename = filename, 744 save_to_file = save_to_file, 745 print_out = print_out, 746 output = output 747 ) - 748 else: - 749 out47 = data47.table_of_sessions(save_to_file = False, print_out = False, output = 'raw') - 750 out48 = data48.table_of_sessions(save_to_file = False, print_out = False, output = 'raw') - 751 for k,x in enumerate(out47[0]): - 752 if k>7: - 753 out47[0][k] = out47[0][k].replace('a', 'a_47').replace('b', 'b_47').replace('c', 'c_47') - 754 out48[0][k] = out48[0][k].replace('a', 'a_48').replace('b', 'b_48').replace('c', 'c_48') - 755 out = transpose_table(transpose_table(out47) + transpose_table(out48)[7:]) - 756 - 757 if save_to_file: - 758 if not os.path.exists(dir): - 759 os.makedirs(dir) - 760 if filename is None: - 761 filename = f'D47D48_sessions.csv' - 762 with open(f'{dir}/{filename}', 'w') as fid: - 763 fid.write(make_csv(out)) - 764 if print_out: - 765 print('\n'+pretty_table(out)) - 766 if output == 'raw': - 767 return out - 768 elif output == 'pretty': - 769 return pretty_table(out) - 770 - 771 - 772def table_of_analyses( - 773 data47 = None, - 774 data48 = None, - 775 dir = 'output', - 776 filename = None, - 777 save_to_file = True, - 778 print_out = True, - 779 output = None, - 780 ): - 781 ''' - 782 Print out, save to disk and/or return a combined table of analyses - 783 for a pair of `D47data` and `D48data` objects. - 784 - 785 If the sessions in `data47` and those in `data48` do not consist of - 786 the exact same sets of analyses, the table will have two columns - 787 `Session_47` and `Session_48` instead of a single `Session` column. - 788 - 789 **Parameters** - 790 - 791 + `data47`: `D47data` instance - 792 + `data48`: `D48data` instance - 793 + `dir`: the directory in which to save the table - 794 + `filename`: the name to the csv file to write to - 795 + `save_to_file`: whether to save the table to disk - 796 + `print_out`: whether to print out the table - 797 + `output`: if set to `'pretty'`: return a pretty text table (see `pretty_table()`); - 798 if set to `'raw'`: return a list of list of strings - 799 (e.g., `[['header1', 'header2'], ['0.1', '0.2']]`) - 800 ''' - 801 if data47 is None: - 802 if data48 is None: - 803 raise TypeError("Arguments must include at least one D47data() or D48data() instance.") - 804 else: - 805 return data48.table_of_analyses( - 806 dir = dir, - 807 filename = filename, - 808 save_to_file = save_to_file, - 809 print_out = print_out, - 810 output = output - 811 ) - 812 else: - 813 if data48 is None: - 814 return data47.table_of_analyses( + 748 else: + 749 if data48 is None: + 750 return data47.table_of_sessions( + 751 dir = dir, + 752 filename = filename, + 753 save_to_file = save_to_file, + 754 print_out = print_out, + 755 output = output + 756 ) + 757 else: + 758 out47 = data47.table_of_sessions(save_to_file = False, print_out = False, output = 'raw') + 759 out48 = data48.table_of_sessions(save_to_file = False, print_out = False, output = 'raw') + 760 for k,x in enumerate(out47[0]): + 761 if k>7: + 762 out47[0][k] = out47[0][k].replace('a', 'a_47').replace('b', 'b_47').replace('c', 'c_47') + 763 out48[0][k] = out48[0][k].replace('a', 'a_48').replace('b', 'b_48').replace('c', 'c_48') + 764 out = transpose_table(transpose_table(out47) + transpose_table(out48)[7:]) + 765 + 766 if save_to_file: + 767 if not os.path.exists(dir): + 768 os.makedirs(dir) + 769 if filename is None: + 770 filename = f'D47D48_sessions.csv' + 771 with open(f'{dir}/{filename}', 'w') as fid: + 772 fid.write(make_csv(out)) + 773 if print_out: + 774 print('\n'+pretty_table(out)) + 775 if output == 'raw': + 776 return out + 777 elif output == 'pretty': + 778 return pretty_table(out) + 779 + 780 + 781def table_of_analyses( + 782 data47 = None, + 783 data48 = None, + 784 dir = 'output', + 785 filename = None, + 786 save_to_file = True, + 787 print_out = True, + 788 output = None, + 789 ): + 790 ''' + 791 Print out, save to disk and/or return a combined table of analyses + 792 for a pair of `D47data` and `D48data` objects. + 793 + 794 If the sessions in `data47` and those in `data48` do not consist of + 795 the exact same sets of analyses, the table will have two columns + 796 `Session_47` and `Session_48` instead of a single `Session` column. + 797 + 798 **Parameters** + 799 + 800 + `data47`: `D47data` instance + 801 + `data48`: `D48data` instance + 802 + `dir`: the directory in which to save the table + 803 + `filename`: the name to the csv file to write to + 804 + `save_to_file`: whether to save the table to disk + 805 + `print_out`: whether to print out the table + 806 + `output`: if set to `'pretty'`: return a pretty text table (see `pretty_table()`); + 807 if set to `'raw'`: return a list of list of strings + 808 (e.g., `[['header1', 'header2'], ['0.1', '0.2']]`) + 809 ''' + 810 if data47 is None: + 811 if data48 is None: + 812 raise TypeError("Arguments must include at least one D47data() or D48data() instance.") + 813 else: + 814 return data48.table_of_analyses( 815 dir = dir, 816 filename = filename, 817 save_to_file = save_to_file, 818 print_out = print_out, 819 output = output 820 ) - 821 else: - 822 out47 = data47.table_of_analyses(save_to_file = False, print_out = False, output = 'raw') - 823 out48 = data48.table_of_analyses(save_to_file = False, print_out = False, output = 'raw') - 824 - 825 if [l[1] for l in out47[1:]] == [l[1] for l in out48[1:]]: # if sessions are identical - 826 out = transpose_table(transpose_table(out47) + transpose_table(out48)[-1:]) - 827 else: - 828 out47[0][1] = 'Session_47' - 829 out48[0][1] = 'Session_48' - 830 out47 = transpose_table(out47) - 831 out48 = transpose_table(out48) - 832 out = transpose_table(out47[:2] + out48[1:2] + out47[2:] + out48[-1:]) - 833 - 834 if save_to_file: - 835 if not os.path.exists(dir): - 836 os.makedirs(dir) - 837 if filename is None: - 838 filename = f'D47D48_sessions.csv' - 839 with open(f'{dir}/{filename}', 'w') as fid: - 840 fid.write(make_csv(out)) - 841 if print_out: - 842 print('\n'+pretty_table(out)) - 843 if output == 'raw': - 844 return out - 845 elif output == 'pretty': - 846 return pretty_table(out) - 847 - 848 - 849def _fullcovar(minresult, epsilon = 0.01, named = False): - 850 ''' - 851 Construct full covariance matrix in the case of constrained parameters - 852 ''' - 853 - 854 import asteval - 855 - 856 def f(values): - 857 interp = asteval.Interpreter() - 858 for n,v in zip(minresult.var_names, values): - 859 interp(f'{n} = {v}') - 860 for q in minresult.params: - 861 if minresult.params[q].expr: - 862 interp(f'{q} = {minresult.params[q].expr}') - 863 return np.array([interp.symtable[q] for q in minresult.params]) - 864 - 865 # construct Jacobian - 866 J = np.zeros((minresult.nvarys, len(minresult.params))) - 867 X = np.array([minresult.params[p].value for p in minresult.var_names]) - 868 sX = np.array([minresult.params[p].stderr for p in minresult.var_names]) - 869 - 870 for j in range(minresult.nvarys): - 871 x1 = [_ for _ in X] - 872 x1[j] += epsilon * sX[j] - 873 x2 = [_ for _ in X] - 874 x2[j] -= epsilon * sX[j] - 875 J[j,:] = (f(x1) - f(x2)) / (2 * epsilon * sX[j]) - 876 - 877 _names = [q for q in minresult.params] - 878 _covar = J.T @ minresult.covar @ J - 879 _se = np.diag(_covar)**.5 - 880 _correl = _covar.copy() - 881 for k,s in enumerate(_se): - 882 if s: - 883 _correl[k,:] /= s - 884 _correl[:,k] /= s + 821 else: + 822 if data48 is None: + 823 return data47.table_of_analyses( + 824 dir = dir, + 825 filename = filename, + 826 save_to_file = save_to_file, + 827 print_out = print_out, + 828 output = output + 829 ) + 830 else: + 831 out47 = data47.table_of_analyses(save_to_file = False, print_out = False, output = 'raw') + 832 out48 = data48.table_of_analyses(save_to_file = False, print_out = False, output = 'raw') + 833 + 834 if [l[1] for l in out47[1:]] == [l[1] for l in out48[1:]]: # if sessions are identical + 835 out = transpose_table(transpose_table(out47) + transpose_table(out48)[-1:]) + 836 else: + 837 out47[0][1] = 'Session_47' + 838 out48[0][1] = 'Session_48' + 839 out47 = transpose_table(out47) + 840 out48 = transpose_table(out48) + 841 out = transpose_table(out47[:2] + out48[1:2] + out47[2:] + out48[-1:]) + 842 + 843 if save_to_file: + 844 if not os.path.exists(dir): + 845 os.makedirs(dir) + 846 if filename is None: + 847 filename = f'D47D48_sessions.csv' + 848 with open(f'{dir}/{filename}', 'w') as fid: + 849 fid.write(make_csv(out)) + 850 if print_out: + 851 print('\n'+pretty_table(out)) + 852 if output == 'raw': + 853 return out + 854 elif output == 'pretty': + 855 return pretty_table(out) + 856 + 857 + 858def _fullcovar(minresult, epsilon = 0.01, named = False): + 859 ''' + 860 Construct full covariance matrix in the case of constrained parameters + 861 ''' + 862 + 863 import asteval + 864 + 865 def f(values): + 866 interp = asteval.Interpreter() + 867 for n,v in zip(minresult.var_names, values): + 868 interp(f'{n} = {v}') + 869 for q in minresult.params: + 870 if minresult.params[q].expr: + 871 interp(f'{q} = {minresult.params[q].expr}') + 872 return np.array([interp.symtable[q] for q in minresult.params]) + 873 + 874 # construct Jacobian + 875 J = np.zeros((minresult.nvarys, len(minresult.params))) + 876 X = np.array([minresult.params[p].value for p in minresult.var_names]) + 877 sX = np.array([minresult.params[p].stderr for p in minresult.var_names]) + 878 + 879 for j in range(minresult.nvarys): + 880 x1 = [_ for _ in X] + 881 x1[j] += epsilon * sX[j] + 882 x2 = [_ for _ in X] + 883 x2[j] -= epsilon * sX[j] + 884 J[j,:] = (f(x1) - f(x2)) / (2 * epsilon * sX[j]) 885 - 886 if named: - 887 _covar = {i: {j:_covar[i,j] for j in minresult.params} for i in minresult.params} - 888 _se = {i: _se[i] for i in minresult.params} - 889 _correl = {i: {j:_correl[i,j] for j in minresult.params} for i in minresult.params} - 890 - 891 return _names, _covar, _se, _correl - 892 - 893 - 894class D4xdata(list): - 895 ''' - 896 Store and process data for a large set of Δ47 and/or Δ48 - 897 analyses, usually comprising more than one analytical session. - 898 ''' + 886 _names = [q for q in minresult.params] + 887 _covar = J.T @ minresult.covar @ J + 888 _se = np.diag(_covar)**.5 + 889 _correl = _covar.copy() + 890 for k,s in enumerate(_se): + 891 if s: + 892 _correl[k,:] /= s + 893 _correl[:,k] /= s + 894 + 895 if named: + 896 _covar = {i: {j:_covar[i,j] for j in minresult.params} for i in minresult.params} + 897 _se = {i: _se[i] for i in minresult.params} + 898 _correl = {i: {j:_correl[i,j] for j in minresult.params} for i in minresult.params} 899 - 900 ### 17O CORRECTION PARAMETERS - 901 R13_VPDB = 0.01118 # (Chang & Li, 1990) - 902 ''' - 903 Absolute (13C/12C) ratio of VPDB. - 904 By default equal to 0.01118 ([Chang & Li, 1990](http://www.cnki.com.cn/Article/CJFDTotal-JXTW199004006.htm)) - 905 ''' - 906 - 907 R18_VSMOW = 0.0020052 # (Baertschi, 1976) - 908 ''' - 909 Absolute (18O/16C) ratio of VSMOW. - 910 By default equal to 0.0020052 ([Baertschi, 1976](https://doi.org/10.1016/0012-821X(76)90115-1)) - 911 ''' - 912 - 913 LAMBDA_17 = 0.528 # (Barkan & Luz, 2005) - 914 ''' - 915 Mass-dependent exponent for triple oxygen isotopes. - 916 By default equal to 0.528 ([Barkan & Luz, 2005](https://doi.org/10.1002/rcm.2250)) - 917 ''' - 918 - 919 R17_VSMOW = 0.00038475 # (Assonov & Brenninkmeijer, 2003, rescaled to R13_VPDB) - 920 ''' - 921 Absolute (17O/16C) ratio of VSMOW. - 922 By default equal to 0.00038475 - 923 ([Assonov & Brenninkmeijer, 2003](https://dx.doi.org/10.1002/rcm.1011), - 924 rescaled to `R13_VPDB`) - 925 ''' - 926 - 927 R18_VPDB = R18_VSMOW * 1.03092 - 928 ''' - 929 Absolute (18O/16C) ratio of VPDB. - 930 By definition equal to `R18_VSMOW * 1.03092`. - 931 ''' - 932 - 933 R17_VPDB = R17_VSMOW * 1.03092 ** LAMBDA_17 - 934 ''' - 935 Absolute (17O/16C) ratio of VPDB. - 936 By definition equal to `R17_VSMOW * 1.03092 ** LAMBDA_17`. - 937 ''' - 938 - 939 LEVENE_REF_SAMPLE = 'ETH-3' - 940 ''' - 941 After the Δ4x standardization step, each sample is tested to - 942 assess whether the Δ4x variance within all analyses for that - 943 sample differs significantly from that observed for a given reference - 944 sample (using [Levene's test](https://en.wikipedia.org/wiki/Levene%27s_test), - 945 which yields a p-value corresponding to the null hypothesis that the - 946 underlying variances are equal). + 900 return _names, _covar, _se, _correl + 901 + 902 + 903class D4xdata(list): + 904 ''' + 905 Store and process data for a large set of Δ47 and/or Δ48 + 906 analyses, usually comprising more than one analytical session. + 907 ''' + 908 + 909 ### 17O CORRECTION PARAMETERS + 910 R13_VPDB = 0.01118 # (Chang & Li, 1990) + 911 ''' + 912 Absolute (13C/12C) ratio of VPDB. + 913 By default equal to 0.01118 ([Chang & Li, 1990](http://www.cnki.com.cn/Article/CJFDTotal-JXTW199004006.htm)) + 914 ''' + 915 + 916 R18_VSMOW = 0.0020052 # (Baertschi, 1976) + 917 ''' + 918 Absolute (18O/16C) ratio of VSMOW. + 919 By default equal to 0.0020052 ([Baertschi, 1976](https://doi.org/10.1016/0012-821X(76)90115-1)) + 920 ''' + 921 + 922 LAMBDA_17 = 0.528 # (Barkan & Luz, 2005) + 923 ''' + 924 Mass-dependent exponent for triple oxygen isotopes. + 925 By default equal to 0.528 ([Barkan & Luz, 2005](https://doi.org/10.1002/rcm.2250)) + 926 ''' + 927 + 928 R17_VSMOW = 0.00038475 # (Assonov & Brenninkmeijer, 2003, rescaled to R13_VPDB) + 929 ''' + 930 Absolute (17O/16C) ratio of VSMOW. + 931 By default equal to 0.00038475 + 932 ([Assonov & Brenninkmeijer, 2003](https://dx.doi.org/10.1002/rcm.1011), + 933 rescaled to `R13_VPDB`) + 934 ''' + 935 + 936 R18_VPDB = R18_VSMOW * 1.03092 + 937 ''' + 938 Absolute (18O/16C) ratio of VPDB. + 939 By definition equal to `R18_VSMOW * 1.03092`. + 940 ''' + 941 + 942 R17_VPDB = R17_VSMOW * 1.03092 ** LAMBDA_17 + 943 ''' + 944 Absolute (17O/16C) ratio of VPDB. + 945 By definition equal to `R17_VSMOW * 1.03092 ** LAMBDA_17`. + 946 ''' 947 - 948 `LEVENE_REF_SAMPLE` (by default equal to `'ETH-3'`) specifies which - 949 sample should be used as a reference for this test. - 950 ''' - 951 - 952 ALPHA_18O_ACID_REACTION = round(np.exp(3.59 / (90 + 273.15) - 1.79e-3), 6) # (Kim et al., 2007, calcite) - 953 ''' - 954 Specifies the 18O/16O fractionation factor generally applicable - 955 to acid reactions in the dataset. Currently used by `D4xdata.wg()`, - 956 `D4xdata.standardize_d13C`, and `D4xdata.standardize_d18O`. - 957 - 958 By default equal to 1.008129 (calcite reacted at 90 °C, - 959 [Kim et al., 2007](https://dx.doi.org/10.1016/j.chemgeo.2007.08.005)). - 960 ''' - 961 - 962 Nominal_d13C_VPDB = { - 963 'ETH-1': 2.02, - 964 'ETH-2': -10.17, - 965 'ETH-3': 1.71, - 966 } # (Bernasconi et al., 2018) - 967 ''' - 968 Nominal δ13C_VPDB values assigned to carbonate standards, used by - 969 `D4xdata.standardize_d13C()`. + 948 LEVENE_REF_SAMPLE = 'ETH-3' + 949 ''' + 950 After the Δ4x standardization step, each sample is tested to + 951 assess whether the Δ4x variance within all analyses for that + 952 sample differs significantly from that observed for a given reference + 953 sample (using [Levene's test](https://en.wikipedia.org/wiki/Levene%27s_test), + 954 which yields a p-value corresponding to the null hypothesis that the + 955 underlying variances are equal). + 956 + 957 `LEVENE_REF_SAMPLE` (by default equal to `'ETH-3'`) specifies which + 958 sample should be used as a reference for this test. + 959 ''' + 960 + 961 ALPHA_18O_ACID_REACTION = round(np.exp(3.59 / (90 + 273.15) - 1.79e-3), 6) # (Kim et al., 2007, calcite) + 962 ''' + 963 Specifies the 18O/16O fractionation factor generally applicable + 964 to acid reactions in the dataset. Currently used by `D4xdata.wg()`, + 965 `D4xdata.standardize_d13C`, and `D4xdata.standardize_d18O`. + 966 + 967 By default equal to 1.008129 (calcite reacted at 90 °C, + 968 [Kim et al., 2007](https://dx.doi.org/10.1016/j.chemgeo.2007.08.005)). + 969 ''' 970 - 971 By default equal to `{'ETH-1': 2.02, 'ETH-2': -10.17, 'ETH-3': 1.71}` after - 972 [Bernasconi et al. (2018)](https://doi.org/10.1029/2017GC007385). - 973 ''' - 974 - 975 Nominal_d18O_VPDB = { - 976 'ETH-1': -2.19, - 977 'ETH-2': -18.69, - 978 'ETH-3': -1.78, - 979 } # (Bernasconi et al., 2018) - 980 ''' - 981 Nominal δ18O_VPDB values assigned to carbonate standards, used by - 982 `D4xdata.standardize_d18O()`. + 971 Nominal_d13C_VPDB = { + 972 'ETH-1': 2.02, + 973 'ETH-2': -10.17, + 974 'ETH-3': 1.71, + 975 } # (Bernasconi et al., 2018) + 976 ''' + 977 Nominal δ13C_VPDB values assigned to carbonate standards, used by + 978 `D4xdata.standardize_d13C()`. + 979 + 980 By default equal to `{'ETH-1': 2.02, 'ETH-2': -10.17, 'ETH-3': 1.71}` after + 981 [Bernasconi et al. (2018)](https://doi.org/10.1029/2017GC007385). + 982 ''' 983 - 984 By default equal to `{'ETH-1': -2.19, 'ETH-2': -18.69, 'ETH-3': -1.78}` after - 985 [Bernasconi et al. (2018)](https://doi.org/10.1029/2017GC007385). - 986 ''' - 987 - 988 d13C_STANDARDIZATION_METHOD = '2pt' + 984 Nominal_d18O_VPDB = { + 985 'ETH-1': -2.19, + 986 'ETH-2': -18.69, + 987 'ETH-3': -1.78, + 988 } # (Bernasconi et al., 2018) 989 ''' - 990 Method by which to standardize δ13C values: - 991 - 992 + `none`: do not apply any δ13C standardization. - 993 + `'1pt'`: within each session, offset all initial δ13C values so as to - 994 minimize the difference between final δ13C_VPDB values and - 995 `Nominal_d13C_VPDB` (averaged over all analyses for which `Nominal_d13C_VPDB` is defined). - 996 + `'2pt'`: within each session, apply a affine trasformation to all δ13C - 997 values so as to minimize the difference between final δ13C_VPDB - 998 values and `Nominal_d13C_VPDB` (averaged over all analyses for which `Nominal_d13C_VPDB` - 999 is defined). -1000 ''' -1001 -1002 d18O_STANDARDIZATION_METHOD = '2pt' -1003 ''' -1004 Method by which to standardize δ18O values: -1005 -1006 + `none`: do not apply any δ18O standardization. -1007 + `'1pt'`: within each session, offset all initial δ18O values so as to -1008 minimize the difference between final δ18O_VPDB values and -1009 `Nominal_d18O_VPDB` (averaged over all analyses for which `Nominal_d18O_VPDB` is defined). -1010 + `'2pt'`: within each session, apply a affine trasformation to all δ18O -1011 values so as to minimize the difference between final δ18O_VPDB -1012 values and `Nominal_d18O_VPDB` (averaged over all analyses for which `Nominal_d18O_VPDB` -1013 is defined). -1014 ''' -1015 -1016 def __init__(self, l = [], mass = '47', logfile = '', session = 'mySession', verbose = False): -1017 ''' -1018 **Parameters** -1019 -1020 + `l`: a list of dictionaries, with each dictionary including at least the keys -1021 `Sample`, `d45`, `d46`, and `d47` or `d48`. -1022 + `mass`: `'47'` or `'48'` -1023 + `logfile`: if specified, write detailed logs to this file path when calling `D4xdata` methods. -1024 + `session`: define session name for analyses without a `Session` key -1025 + `verbose`: if `True`, print out detailed logs when calling `D4xdata` methods. -1026 -1027 Returns a `D4xdata` object derived from `list`. -1028 ''' -1029 self._4x = mass -1030 self.verbose = verbose -1031 self.prefix = 'D4xdata' -1032 self.logfile = logfile -1033 list.__init__(self, l) -1034 self.Nf = None -1035 self.repeatability = {} -1036 self.refresh(session = session) -1037 -1038 -1039 def make_verbal(oldfun): -1040 ''' -1041 Decorator: allow temporarily changing `self.prefix` and overriding `self.verbose`. -1042 ''' -1043 @wraps(oldfun) -1044 def newfun(*args, verbose = '', **kwargs): -1045 myself = args[0] -1046 oldprefix = myself.prefix -1047 myself.prefix = oldfun.__name__ -1048 if verbose != '': -1049 oldverbose = myself.verbose -1050 myself.verbose = verbose -1051 out = oldfun(*args, **kwargs) -1052 myself.prefix = oldprefix -1053 if verbose != '': -1054 myself.verbose = oldverbose -1055 return out -1056 return newfun -1057 -1058 -1059 def msg(self, txt): -1060 ''' -1061 Log a message to `self.logfile`, and print it out if `verbose = True` -1062 ''' -1063 self.log(txt) -1064 if self.verbose: -1065 print(f'{f"[{self.prefix}]":<16} {txt}') + 990 Nominal δ18O_VPDB values assigned to carbonate standards, used by + 991 `D4xdata.standardize_d18O()`. + 992 + 993 By default equal to `{'ETH-1': -2.19, 'ETH-2': -18.69, 'ETH-3': -1.78}` after + 994 [Bernasconi et al. (2018)](https://doi.org/10.1029/2017GC007385). + 995 ''' + 996 + 997 d13C_STANDARDIZATION_METHOD = '2pt' + 998 ''' + 999 Method by which to standardize δ13C values: +1000 +1001 + `none`: do not apply any δ13C standardization. +1002 + `'1pt'`: within each session, offset all initial δ13C values so as to +1003 minimize the difference between final δ13C_VPDB values and +1004 `Nominal_d13C_VPDB` (averaged over all analyses for which `Nominal_d13C_VPDB` is defined). +1005 + `'2pt'`: within each session, apply a affine trasformation to all δ13C +1006 values so as to minimize the difference between final δ13C_VPDB +1007 values and `Nominal_d13C_VPDB` (averaged over all analyses for which `Nominal_d13C_VPDB` +1008 is defined). +1009 ''' +1010 +1011 d18O_STANDARDIZATION_METHOD = '2pt' +1012 ''' +1013 Method by which to standardize δ18O values: +1014 +1015 + `none`: do not apply any δ18O standardization. +1016 + `'1pt'`: within each session, offset all initial δ18O values so as to +1017 minimize the difference between final δ18O_VPDB values and +1018 `Nominal_d18O_VPDB` (averaged over all analyses for which `Nominal_d18O_VPDB` is defined). +1019 + `'2pt'`: within each session, apply a affine trasformation to all δ18O +1020 values so as to minimize the difference between final δ18O_VPDB +1021 values and `Nominal_d18O_VPDB` (averaged over all analyses for which `Nominal_d18O_VPDB` +1022 is defined). +1023 ''' +1024 +1025 def __init__(self, l = [], mass = '47', logfile = '', session = 'mySession', verbose = False): +1026 ''' +1027 **Parameters** +1028 +1029 + `l`: a list of dictionaries, with each dictionary including at least the keys +1030 `Sample`, `d45`, `d46`, and `d47` or `d48`. +1031 + `mass`: `'47'` or `'48'` +1032 + `logfile`: if specified, write detailed logs to this file path when calling `D4xdata` methods. +1033 + `session`: define session name for analyses without a `Session` key +1034 + `verbose`: if `True`, print out detailed logs when calling `D4xdata` methods. +1035 +1036 Returns a `D4xdata` object derived from `list`. +1037 ''' +1038 self._4x = mass +1039 self.verbose = verbose +1040 self.prefix = 'D4xdata' +1041 self.logfile = logfile +1042 list.__init__(self, l) +1043 self.Nf = None +1044 self.repeatability = {} +1045 self.refresh(session = session) +1046 +1047 +1048 def make_verbal(oldfun): +1049 ''' +1050 Decorator: allow temporarily changing `self.prefix` and overriding `self.verbose`. +1051 ''' +1052 @wraps(oldfun) +1053 def newfun(*args, verbose = '', **kwargs): +1054 myself = args[0] +1055 oldprefix = myself.prefix +1056 myself.prefix = oldfun.__name__ +1057 if verbose != '': +1058 oldverbose = myself.verbose +1059 myself.verbose = verbose +1060 out = oldfun(*args, **kwargs) +1061 myself.prefix = oldprefix +1062 if verbose != '': +1063 myself.verbose = oldverbose +1064 return out +1065 return newfun 1066 1067 -1068 def vmsg(self, txt): +1068 def msg(self, txt): 1069 ''' -1070 Log a message to `self.logfile` and print it out +1070 Log a message to `self.logfile`, and print it out if `verbose = True` 1071 ''' 1072 self.log(txt) -1073 print(txt) -1074 +1073 if self.verbose: +1074 print(f'{f"[{self.prefix}]":<16} {txt}') 1075 -1076 def log(self, *txts): -1077 ''' -1078 Log a message to `self.logfile` -1079 ''' -1080 if self.logfile: -1081 with open(self.logfile, 'a') as fid: -1082 for txt in txts: -1083 fid.write(f'\n{dt.now().strftime("%Y-%m-%d %H:%M:%S")} {f"[{self.prefix}]":<16} {txt}') +1076 +1077 def vmsg(self, txt): +1078 ''' +1079 Log a message to `self.logfile` and print it out +1080 ''' +1081 self.log(txt) +1082 print(txt) +1083 1084 -1085 -1086 def refresh(self, session = 'mySession'): -1087 ''' -1088 Update `self.sessions`, `self.samples`, `self.anchors`, and `self.unknowns`. -1089 ''' -1090 self.fill_in_missing_info(session = session) -1091 self.refresh_sessions() -1092 self.refresh_samples() +1085 def log(self, *txts): +1086 ''' +1087 Log a message to `self.logfile` +1088 ''' +1089 if self.logfile: +1090 with open(self.logfile, 'a') as fid: +1091 for txt in txts: +1092 fid.write(f'\n{dt.now().strftime("%Y-%m-%d %H:%M:%S")} {f"[{self.prefix}]":<16} {txt}') 1093 1094 -1095 def refresh_sessions(self): +1095 def refresh(self, session = 'mySession'): 1096 ''' -1097 Update `self.sessions` and set `scrambling_drift`, `slope_drift`, and `wg_drift` -1098 to `False` for all sessions. -1099 ''' -1100 self.sessions = { -1101 s: {'data': [r for r in self if r['Session'] == s]} -1102 for s in sorted({r['Session'] for r in self}) -1103 } -1104 for s in self.sessions: -1105 self.sessions[s]['scrambling_drift'] = False -1106 self.sessions[s]['slope_drift'] = False -1107 self.sessions[s]['wg_drift'] = False -1108 self.sessions[s]['d13C_standardization_method'] = self.d13C_STANDARDIZATION_METHOD -1109 self.sessions[s]['d18O_standardization_method'] = self.d18O_STANDARDIZATION_METHOD -1110 -1111 -1112 def refresh_samples(self): -1113 ''' -1114 Define `self.samples`, `self.anchors`, and `self.unknowns`. -1115 ''' -1116 self.samples = { -1117 s: {'data': [r for r in self if r['Sample'] == s]} -1118 for s in sorted({r['Sample'] for r in self}) -1119 } -1120 self.anchors = {s: self.samples[s] for s in self.samples if s in self.Nominal_D4x} -1121 self.unknowns = {s: self.samples[s] for s in self.samples if s not in self.Nominal_D4x} -1122 -1123 -1124 def read(self, filename, sep = '', session = ''): -1125 ''' -1126 Read file in csv format to load data into a `D47data` object. -1127 -1128 In the csv file, spaces before and after field separators (`','` by default) -1129 are optional. Each line corresponds to a single analysis. -1130 -1131 The required fields are: +1097 Update `self.sessions`, `self.samples`, `self.anchors`, and `self.unknowns`. +1098 ''' +1099 self.fill_in_missing_info(session = session) +1100 self.refresh_sessions() +1101 self.refresh_samples() +1102 +1103 +1104 def refresh_sessions(self): +1105 ''' +1106 Update `self.sessions` and set `scrambling_drift`, `slope_drift`, and `wg_drift` +1107 to `False` for all sessions. +1108 ''' +1109 self.sessions = { +1110 s: {'data': [r for r in self if r['Session'] == s]} +1111 for s in sorted({r['Session'] for r in self}) +1112 } +1113 for s in self.sessions: +1114 self.sessions[s]['scrambling_drift'] = False +1115 self.sessions[s]['slope_drift'] = False +1116 self.sessions[s]['wg_drift'] = False +1117 self.sessions[s]['d13C_standardization_method'] = self.d13C_STANDARDIZATION_METHOD +1118 self.sessions[s]['d18O_standardization_method'] = self.d18O_STANDARDIZATION_METHOD +1119 +1120 +1121 def refresh_samples(self): +1122 ''' +1123 Define `self.samples`, `self.anchors`, and `self.unknowns`. +1124 ''' +1125 self.samples = { +1126 s: {'data': [r for r in self if r['Sample'] == s]} +1127 for s in sorted({r['Sample'] for r in self}) +1128 } +1129 self.anchors = {s: self.samples[s] for s in self.samples if s in self.Nominal_D4x} +1130 self.unknowns = {s: self.samples[s] for s in self.samples if s not in self.Nominal_D4x} +1131 1132 -1133 + `UID`: a unique identifier -1134 + `Session`: an identifier for the analytical session -1135 + `Sample`: a sample identifier -1136 + `d45`, `d46`, and at least one of `d47` or `d48`: the working-gas delta values -1137 -1138 Independently known oxygen-17 anomalies may be provided as `D17O` (in ‰ relative to -1139 VSMOW, λ = `self.LAMBDA_17`), and are otherwise assumed to be zero. Working-gas deltas `d47`, `d48` -1140 and `d49` are optional, and set to NaN by default. +1133 def read(self, filename, sep = '', session = ''): +1134 ''' +1135 Read file in csv format to load data into a `D47data` object. +1136 +1137 In the csv file, spaces before and after field separators (`','` by default) +1138 are optional. Each line corresponds to a single analysis. +1139 +1140 The required fields are: 1141 -1142 **Parameters** -1143 -1144 + `fileneme`: the path of the file to read -1145 + `sep`: csv separator delimiting the fields -1146 + `session`: set `Session` field to this string for all analyses -1147 ''' -1148 with open(filename) as fid: -1149 self.input(fid.read(), sep = sep, session = session) +1142 + `UID`: a unique identifier +1143 + `Session`: an identifier for the analytical session +1144 + `Sample`: a sample identifier +1145 + `d45`, `d46`, and at least one of `d47` or `d48`: the working-gas delta values +1146 +1147 Independently known oxygen-17 anomalies may be provided as `D17O` (in ‰ relative to +1148 VSMOW, λ = `self.LAMBDA_17`), and are otherwise assumed to be zero. Working-gas deltas `d47`, `d48` +1149 and `d49` are optional, and set to NaN by default. 1150 -1151 -1152 def input(self, txt, sep = '', session = ''): -1153 ''' -1154 Read `txt` string in csv format to load analysis data into a `D47data` object. -1155 -1156 In the csv string, spaces before and after field separators (`','` by default) -1157 are optional. Each line corresponds to a single analysis. -1158 -1159 The required fields are: +1151 **Parameters** +1152 +1153 + `fileneme`: the path of the file to read +1154 + `sep`: csv separator delimiting the fields +1155 + `session`: set `Session` field to this string for all analyses +1156 ''' +1157 with open(filename) as fid: +1158 self.input(fid.read(), sep = sep, session = session) +1159 1160 -1161 + `UID`: a unique identifier -1162 + `Session`: an identifier for the analytical session -1163 + `Sample`: a sample identifier -1164 + `d45`, `d46`, and at least one of `d47` or `d48`: the working-gas delta values -1165 -1166 Independently known oxygen-17 anomalies may be provided as `D17O` (in ‰ relative to -1167 VSMOW, λ = `self.LAMBDA_17`), and are otherwise assumed to be zero. Working-gas deltas `d47`, `d48` -1168 and `d49` are optional, and set to NaN by default. +1161 def input(self, txt, sep = '', session = ''): +1162 ''' +1163 Read `txt` string in csv format to load analysis data into a `D47data` object. +1164 +1165 In the csv string, spaces before and after field separators (`','` by default) +1166 are optional. Each line corresponds to a single analysis. +1167 +1168 The required fields are: 1169 -1170 **Parameters** -1171 -1172 + `txt`: the csv string to read -1173 + `sep`: csv separator delimiting the fields. By default, use `,`, `;`, or `\t`, -1174 whichever appers most often in `txt`. -1175 + `session`: set `Session` field to this string for all analyses -1176 ''' -1177 if sep == '': -1178 sep = sorted(',;\t', key = lambda x: - txt.count(x))[0] -1179 txt = [[x.strip() for x in l.split(sep)] for l in txt.splitlines() if l.strip()] -1180 data = [{k: v if k in ['UID', 'Session', 'Sample'] else smart_type(v) for k,v in zip(txt[0], l) if v != ''} for l in txt[1:]] -1181 -1182 if session != '': -1183 for r in data: -1184 r['Session'] = session -1185 -1186 self += data -1187 self.refresh() -1188 -1189 -1190 @make_verbal -1191 def wg(self, samples = None, a18_acid = None): -1192 ''' -1193 Compute bulk composition of the working gas for each session based on -1194 the carbonate standards defined in both `self.Nominal_d13C_VPDB` and -1195 `self.Nominal_d18O_VPDB`. -1196 ''' +1170 + `UID`: a unique identifier +1171 + `Session`: an identifier for the analytical session +1172 + `Sample`: a sample identifier +1173 + `d45`, `d46`, and at least one of `d47` or `d48`: the working-gas delta values +1174 +1175 Independently known oxygen-17 anomalies may be provided as `D17O` (in ‰ relative to +1176 VSMOW, λ = `self.LAMBDA_17`), and are otherwise assumed to be zero. Working-gas deltas `d47`, `d48` +1177 and `d49` are optional, and set to NaN by default. +1178 +1179 **Parameters** +1180 +1181 + `txt`: the csv string to read +1182 + `sep`: csv separator delimiting the fields. By default, use `,`, `;`, or `\t`, +1183 whichever appers most often in `txt`. +1184 + `session`: set `Session` field to this string for all analyses +1185 ''' +1186 if sep == '': +1187 sep = sorted(',;\t', key = lambda x: - txt.count(x))[0] +1188 txt = [[x.strip() for x in l.split(sep)] for l in txt.splitlines() if l.strip()] +1189 data = [{k: v if k in ['UID', 'Session', 'Sample'] else smart_type(v) for k,v in zip(txt[0], l) if v != ''} for l in txt[1:]] +1190 +1191 if session != '': +1192 for r in data: +1193 r['Session'] = session +1194 +1195 self += data +1196 self.refresh() 1197 -1198 self.msg('Computing WG composition:') -1199 -1200 if a18_acid is None: -1201 a18_acid = self.ALPHA_18O_ACID_REACTION -1202 if samples is None: -1203 samples = [s for s in self.Nominal_d13C_VPDB if s in self.Nominal_d18O_VPDB] -1204 -1205 assert a18_acid, f'Acid fractionation factor should not be zero.' +1198 +1199 @make_verbal +1200 def wg(self, samples = None, a18_acid = None): +1201 ''' +1202 Compute bulk composition of the working gas for each session based on +1203 the carbonate standards defined in both `self.Nominal_d13C_VPDB` and +1204 `self.Nominal_d18O_VPDB`. +1205 ''' 1206 -1207 samples = [s for s in samples if s in self.Nominal_d13C_VPDB and s in self.Nominal_d18O_VPDB] -1208 R45R46_standards = {} -1209 for sample in samples: -1210 d13C_vpdb = self.Nominal_d13C_VPDB[sample] -1211 d18O_vpdb = self.Nominal_d18O_VPDB[sample] -1212 R13_s = self.R13_VPDB * (1 + d13C_vpdb / 1000) -1213 R17_s = self.R17_VPDB * ((1 + d18O_vpdb / 1000) * a18_acid) ** self.LAMBDA_17 -1214 R18_s = self.R18_VPDB * (1 + d18O_vpdb / 1000) * a18_acid +1207 self.msg('Computing WG composition:') +1208 +1209 if a18_acid is None: +1210 a18_acid = self.ALPHA_18O_ACID_REACTION +1211 if samples is None: +1212 samples = [s for s in self.Nominal_d13C_VPDB if s in self.Nominal_d18O_VPDB] +1213 +1214 assert a18_acid, f'Acid fractionation factor should not be zero.' 1215 -1216 C12_s = 1 / (1 + R13_s) -1217 C13_s = R13_s / (1 + R13_s) -1218 C16_s = 1 / (1 + R17_s + R18_s) -1219 C17_s = R17_s / (1 + R17_s + R18_s) -1220 C18_s = R18_s / (1 + R17_s + R18_s) -1221 -1222 C626_s = C12_s * C16_s ** 2 -1223 C627_s = 2 * C12_s * C16_s * C17_s -1224 C628_s = 2 * C12_s * C16_s * C18_s -1225 C636_s = C13_s * C16_s ** 2 -1226 C637_s = 2 * C13_s * C16_s * C17_s -1227 C727_s = C12_s * C17_s ** 2 -1228 -1229 R45_s = (C627_s + C636_s) / C626_s -1230 R46_s = (C628_s + C637_s + C727_s) / C626_s -1231 R45R46_standards[sample] = (R45_s, R46_s) -1232 -1233 for s in self.sessions: -1234 db = [r for r in self.sessions[s]['data'] if r['Sample'] in samples] -1235 assert db, f'No sample from {samples} found in session "{s}".' -1236# dbsamples = sorted({r['Sample'] for r in db}) +1216 samples = [s for s in samples if s in self.Nominal_d13C_VPDB and s in self.Nominal_d18O_VPDB] +1217 R45R46_standards = {} +1218 for sample in samples: +1219 d13C_vpdb = self.Nominal_d13C_VPDB[sample] +1220 d18O_vpdb = self.Nominal_d18O_VPDB[sample] +1221 R13_s = self.R13_VPDB * (1 + d13C_vpdb / 1000) +1222 R17_s = self.R17_VPDB * ((1 + d18O_vpdb / 1000) * a18_acid) ** self.LAMBDA_17 +1223 R18_s = self.R18_VPDB * (1 + d18O_vpdb / 1000) * a18_acid +1224 +1225 C12_s = 1 / (1 + R13_s) +1226 C13_s = R13_s / (1 + R13_s) +1227 C16_s = 1 / (1 + R17_s + R18_s) +1228 C17_s = R17_s / (1 + R17_s + R18_s) +1229 C18_s = R18_s / (1 + R17_s + R18_s) +1230 +1231 C626_s = C12_s * C16_s ** 2 +1232 C627_s = 2 * C12_s * C16_s * C17_s +1233 C628_s = 2 * C12_s * C16_s * C18_s +1234 C636_s = C13_s * C16_s ** 2 +1235 C637_s = 2 * C13_s * C16_s * C17_s +1236 C727_s = C12_s * C17_s ** 2 1237 -1238 X = [r['d45'] for r in db] -1239 Y = [R45R46_standards[r['Sample']][0] for r in db] -1240 x1, x2 = np.min(X), np.max(X) -1241 -1242 if x1 < x2: -1243 wgcoord = x1/(x1-x2) -1244 else: -1245 wgcoord = 999 +1238 R45_s = (C627_s + C636_s) / C626_s +1239 R46_s = (C628_s + C637_s + C727_s) / C626_s +1240 R45R46_standards[sample] = (R45_s, R46_s) +1241 +1242 for s in self.sessions: +1243 db = [r for r in self.sessions[s]['data'] if r['Sample'] in samples] +1244 assert db, f'No sample from {samples} found in session "{s}".' +1245# dbsamples = sorted({r['Sample'] for r in db}) 1246 -1247 if wgcoord < -.5 or wgcoord > 1.5: -1248 # unreasonable to extrapolate to d45 = 0 -1249 R45_wg = np.mean([y/(1+x/1000) for x,y in zip(X,Y)]) -1250 else : -1251 # d45 = 0 is reasonably well bracketed -1252 R45_wg = np.polyfit(X, Y, 1)[1] -1253 -1254 X = [r['d46'] for r in db] -1255 Y = [R45R46_standards[r['Sample']][1] for r in db] -1256 x1, x2 = np.min(X), np.max(X) -1257 -1258 if x1 < x2: -1259 wgcoord = x1/(x1-x2) -1260 else: -1261 wgcoord = 999 +1247 X = [r['d45'] for r in db] +1248 Y = [R45R46_standards[r['Sample']][0] for r in db] +1249 x1, x2 = np.min(X), np.max(X) +1250 +1251 if x1 < x2: +1252 wgcoord = x1/(x1-x2) +1253 else: +1254 wgcoord = 999 +1255 +1256 if wgcoord < -.5 or wgcoord > 1.5: +1257 # unreasonable to extrapolate to d45 = 0 +1258 R45_wg = np.mean([y/(1+x/1000) for x,y in zip(X,Y)]) +1259 else : +1260 # d45 = 0 is reasonably well bracketed +1261 R45_wg = np.polyfit(X, Y, 1)[1] 1262 -1263 if wgcoord < -.5 or wgcoord > 1.5: -1264 # unreasonable to extrapolate to d46 = 0 -1265 R46_wg = np.mean([y/(1+x/1000) for x,y in zip(X,Y)]) -1266 else : -1267 # d46 = 0 is reasonably well bracketed -1268 R46_wg = np.polyfit(X, Y, 1)[1] -1269 -1270 d13Cwg_VPDB, d18Owg_VSMOW = self.compute_bulk_delta(R45_wg, R46_wg) +1263 X = [r['d46'] for r in db] +1264 Y = [R45R46_standards[r['Sample']][1] for r in db] +1265 x1, x2 = np.min(X), np.max(X) +1266 +1267 if x1 < x2: +1268 wgcoord = x1/(x1-x2) +1269 else: +1270 wgcoord = 999 1271 -1272 self.msg(f'Session {s} WG: δ13C_VPDB = {d13Cwg_VPDB:.3f} δ18O_VSMOW = {d18Owg_VSMOW:.3f}') -1273 -1274 self.sessions[s]['d13Cwg_VPDB'] = d13Cwg_VPDB -1275 self.sessions[s]['d18Owg_VSMOW'] = d18Owg_VSMOW -1276 for r in self.sessions[s]['data']: -1277 r['d13Cwg_VPDB'] = d13Cwg_VPDB -1278 r['d18Owg_VSMOW'] = d18Owg_VSMOW -1279 +1272 if wgcoord < -.5 or wgcoord > 1.5: +1273 # unreasonable to extrapolate to d46 = 0 +1274 R46_wg = np.mean([y/(1+x/1000) for x,y in zip(X,Y)]) +1275 else : +1276 # d46 = 0 is reasonably well bracketed +1277 R46_wg = np.polyfit(X, Y, 1)[1] +1278 +1279 d13Cwg_VPDB, d18Owg_VSMOW = self.compute_bulk_delta(R45_wg, R46_wg) 1280 -1281 def compute_bulk_delta(self, R45, R46, D17O = 0): -1282 ''' -1283 Compute δ13C_VPDB and δ18O_VSMOW, -1284 by solving the generalized form of equation (17) from -1285 [Brand et al. (2010)](https://doi.org/10.1351/PAC-REP-09-01-05), -1286 assuming that δ18O_VSMOW is not too big (0 ± 50 ‰) and -1287 solving the corresponding second-order Taylor polynomial. -1288 (Appendix A of [Daëron et al., 2016](https://doi.org/10.1016/j.chemgeo.2016.08.014)) -1289 ''' -1290 -1291 K = np.exp(D17O / 1000) * self.R17_VSMOW * self.R18_VSMOW ** -self.LAMBDA_17 -1292 -1293 A = -3 * K ** 2 * self.R18_VSMOW ** (2 * self.LAMBDA_17) -1294 B = 2 * K * R45 * self.R18_VSMOW ** self.LAMBDA_17 -1295 C = 2 * self.R18_VSMOW -1296 D = -R46 -1297 -1298 aa = A * self.LAMBDA_17 * (2 * self.LAMBDA_17 - 1) + B * self.LAMBDA_17 * (self.LAMBDA_17 - 1) / 2 -1299 bb = 2 * A * self.LAMBDA_17 + B * self.LAMBDA_17 + C -1300 cc = A + B + C + D +1281 self.msg(f'Session {s} WG: δ13C_VPDB = {d13Cwg_VPDB:.3f} δ18O_VSMOW = {d18Owg_VSMOW:.3f}') +1282 +1283 self.sessions[s]['d13Cwg_VPDB'] = d13Cwg_VPDB +1284 self.sessions[s]['d18Owg_VSMOW'] = d18Owg_VSMOW +1285 for r in self.sessions[s]['data']: +1286 r['d13Cwg_VPDB'] = d13Cwg_VPDB +1287 r['d18Owg_VSMOW'] = d18Owg_VSMOW +1288 +1289 +1290 def compute_bulk_delta(self, R45, R46, D17O = 0): +1291 ''' +1292 Compute δ13C_VPDB and δ18O_VSMOW, +1293 by solving the generalized form of equation (17) from +1294 [Brand et al. (2010)](https://doi.org/10.1351/PAC-REP-09-01-05), +1295 assuming that δ18O_VSMOW is not too big (0 ± 50 ‰) and +1296 solving the corresponding second-order Taylor polynomial. +1297 (Appendix A of [Daëron et al., 2016](https://doi.org/10.1016/j.chemgeo.2016.08.014)) +1298 ''' +1299 +1300 K = np.exp(D17O / 1000) * self.R17_VSMOW * self.R18_VSMOW ** -self.LAMBDA_17 1301 -1302 d18O_VSMOW = 1000 * (-bb + (bb ** 2 - 4 * aa * cc) ** .5) / (2 * aa) -1303 -1304 R18 = (1 + d18O_VSMOW / 1000) * self.R18_VSMOW -1305 R17 = K * R18 ** self.LAMBDA_17 -1306 R13 = R45 - 2 * R17 -1307 -1308 d13C_VPDB = 1000 * (R13 / self.R13_VPDB - 1) -1309 -1310 return d13C_VPDB, d18O_VSMOW -1311 +1302 A = -3 * K ** 2 * self.R18_VSMOW ** (2 * self.LAMBDA_17) +1303 B = 2 * K * R45 * self.R18_VSMOW ** self.LAMBDA_17 +1304 C = 2 * self.R18_VSMOW +1305 D = -R46 +1306 +1307 aa = A * self.LAMBDA_17 * (2 * self.LAMBDA_17 - 1) + B * self.LAMBDA_17 * (self.LAMBDA_17 - 1) / 2 +1308 bb = 2 * A * self.LAMBDA_17 + B * self.LAMBDA_17 + C +1309 cc = A + B + C + D +1310 +1311 d18O_VSMOW = 1000 * (-bb + (bb ** 2 - 4 * aa * cc) ** .5) / (2 * aa) 1312 -1313 @make_verbal -1314 def crunch(self, verbose = ''): -1315 ''' -1316 Compute bulk composition and raw clumped isotope anomalies for all analyses. -1317 ''' -1318 for r in self: -1319 self.compute_bulk_and_clumping_deltas(r) -1320 self.standardize_d13C() -1321 self.standardize_d18O() -1322 self.msg(f"Crunched {len(self)} analyses.") -1323 -1324 -1325 def fill_in_missing_info(self, session = 'mySession'): -1326 ''' -1327 Fill in optional fields with default values -1328 ''' -1329 for i,r in enumerate(self): -1330 if 'D17O' not in r: -1331 r['D17O'] = 0. -1332 if 'UID' not in r: -1333 r['UID'] = f'{i+1}' -1334 if 'Session' not in r: -1335 r['Session'] = session -1336 for k in ['d47', 'd48', 'd49']: -1337 if k not in r: -1338 r[k] = np.nan -1339 -1340 -1341 def standardize_d13C(self): -1342 ''' -1343 Perform δ13C standadization within each session `s` according to -1344 `self.sessions[s]['d13C_standardization_method']`, which is defined by default -1345 by `D47data.refresh_sessions()`as equal to `self.d13C_STANDARDIZATION_METHOD`, but -1346 may be redefined abitrarily at a later stage. -1347 ''' -1348 for s in self.sessions: -1349 if self.sessions[s]['d13C_standardization_method'] in ['1pt', '2pt']: -1350 XY = [(r['d13C_VPDB'], self.Nominal_d13C_VPDB[r['Sample']]) for r in self.sessions[s]['data'] if r['Sample'] in self.Nominal_d13C_VPDB] -1351 X,Y = zip(*XY) -1352 if self.sessions[s]['d13C_standardization_method'] == '1pt': -1353 offset = np.mean(Y) - np.mean(X) -1354 for r in self.sessions[s]['data']: -1355 r['d13C_VPDB'] += offset -1356 elif self.sessions[s]['d13C_standardization_method'] == '2pt': -1357 a,b = np.polyfit(X,Y,1) -1358 for r in self.sessions[s]['data']: -1359 r['d13C_VPDB'] = a * r['d13C_VPDB'] + b -1360 -1361 def standardize_d18O(self): -1362 ''' -1363 Perform δ18O standadization within each session `s` according to -1364 `self.ALPHA_18O_ACID_REACTION` and `self.sessions[s]['d18O_standardization_method']`, -1365 which is defined by default by `D47data.refresh_sessions()`as equal to -1366 `self.d18O_STANDARDIZATION_METHOD`, but may be redefined abitrarily at a later stage. -1367 ''' -1368 for s in self.sessions: -1369 if self.sessions[s]['d18O_standardization_method'] in ['1pt', '2pt']: -1370 XY = [(r['d18O_VSMOW'], self.Nominal_d18O_VPDB[r['Sample']]) for r in self.sessions[s]['data'] if r['Sample'] in self.Nominal_d18O_VPDB] -1371 X,Y = zip(*XY) -1372 Y = [(1000+y) * self.R18_VPDB * self.ALPHA_18O_ACID_REACTION / self.R18_VSMOW - 1000 for y in Y] -1373 if self.sessions[s]['d18O_standardization_method'] == '1pt': -1374 offset = np.mean(Y) - np.mean(X) -1375 for r in self.sessions[s]['data']: -1376 r['d18O_VSMOW'] += offset -1377 elif self.sessions[s]['d18O_standardization_method'] == '2pt': -1378 a,b = np.polyfit(X,Y,1) -1379 for r in self.sessions[s]['data']: -1380 r['d18O_VSMOW'] = a * r['d18O_VSMOW'] + b -1381 -1382 -1383 def compute_bulk_and_clumping_deltas(self, r): -1384 ''' -1385 Compute δ13C_VPDB, δ18O_VSMOW, and raw Δ47, Δ48, Δ49 values for a single analysis `r`. -1386 ''' -1387 -1388 # Compute working gas R13, R18, and isobar ratios -1389 R13_wg = self.R13_VPDB * (1 + r['d13Cwg_VPDB'] / 1000) -1390 R18_wg = self.R18_VSMOW * (1 + r['d18Owg_VSMOW'] / 1000) -1391 R45_wg, R46_wg, R47_wg, R48_wg, R49_wg = self.compute_isobar_ratios(R13_wg, R18_wg) -1392 -1393 # Compute analyte isobar ratios -1394 R45 = (1 + r['d45'] / 1000) * R45_wg -1395 R46 = (1 + r['d46'] / 1000) * R46_wg -1396 R47 = (1 + r['d47'] / 1000) * R47_wg -1397 R48 = (1 + r['d48'] / 1000) * R48_wg -1398 R49 = (1 + r['d49'] / 1000) * R49_wg -1399 -1400 r['d13C_VPDB'], r['d18O_VSMOW'] = self.compute_bulk_delta(R45, R46, D17O = r['D17O']) -1401 R13 = (1 + r['d13C_VPDB'] / 1000) * self.R13_VPDB -1402 R18 = (1 + r['d18O_VSMOW'] / 1000) * self.R18_VSMOW -1403 -1404 # Compute stochastic isobar ratios of the analyte -1405 R45stoch, R46stoch, R47stoch, R48stoch, R49stoch = self.compute_isobar_ratios( -1406 R13, R18, D17O = r['D17O'] -1407 ) +1313 R18 = (1 + d18O_VSMOW / 1000) * self.R18_VSMOW +1314 R17 = K * R18 ** self.LAMBDA_17 +1315 R13 = R45 - 2 * R17 +1316 +1317 d13C_VPDB = 1000 * (R13 / self.R13_VPDB - 1) +1318 +1319 return d13C_VPDB, d18O_VSMOW +1320 +1321 +1322 @make_verbal +1323 def crunch(self, verbose = ''): +1324 ''' +1325 Compute bulk composition and raw clumped isotope anomalies for all analyses. +1326 ''' +1327 for r in self: +1328 self.compute_bulk_and_clumping_deltas(r) +1329 self.standardize_d13C() +1330 self.standardize_d18O() +1331 self.msg(f"Crunched {len(self)} analyses.") +1332 +1333 +1334 def fill_in_missing_info(self, session = 'mySession'): +1335 ''' +1336 Fill in optional fields with default values +1337 ''' +1338 for i,r in enumerate(self): +1339 if 'D17O' not in r: +1340 r['D17O'] = 0. +1341 if 'UID' not in r: +1342 r['UID'] = f'{i+1}' +1343 if 'Session' not in r: +1344 r['Session'] = session +1345 for k in ['d47', 'd48', 'd49']: +1346 if k not in r: +1347 r[k] = np.nan +1348 +1349 +1350 def standardize_d13C(self): +1351 ''' +1352 Perform δ13C standadization within each session `s` according to +1353 `self.sessions[s]['d13C_standardization_method']`, which is defined by default +1354 by `D47data.refresh_sessions()`as equal to `self.d13C_STANDARDIZATION_METHOD`, but +1355 may be redefined abitrarily at a later stage. +1356 ''' +1357 for s in self.sessions: +1358 if self.sessions[s]['d13C_standardization_method'] in ['1pt', '2pt']: +1359 XY = [(r['d13C_VPDB'], self.Nominal_d13C_VPDB[r['Sample']]) for r in self.sessions[s]['data'] if r['Sample'] in self.Nominal_d13C_VPDB] +1360 X,Y = zip(*XY) +1361 if self.sessions[s]['d13C_standardization_method'] == '1pt': +1362 offset = np.mean(Y) - np.mean(X) +1363 for r in self.sessions[s]['data']: +1364 r['d13C_VPDB'] += offset +1365 elif self.sessions[s]['d13C_standardization_method'] == '2pt': +1366 a,b = np.polyfit(X,Y,1) +1367 for r in self.sessions[s]['data']: +1368 r['d13C_VPDB'] = a * r['d13C_VPDB'] + b +1369 +1370 def standardize_d18O(self): +1371 ''' +1372 Perform δ18O standadization within each session `s` according to +1373 `self.ALPHA_18O_ACID_REACTION` and `self.sessions[s]['d18O_standardization_method']`, +1374 which is defined by default by `D47data.refresh_sessions()`as equal to +1375 `self.d18O_STANDARDIZATION_METHOD`, but may be redefined abitrarily at a later stage. +1376 ''' +1377 for s in self.sessions: +1378 if self.sessions[s]['d18O_standardization_method'] in ['1pt', '2pt']: +1379 XY = [(r['d18O_VSMOW'], self.Nominal_d18O_VPDB[r['Sample']]) for r in self.sessions[s]['data'] if r['Sample'] in self.Nominal_d18O_VPDB] +1380 X,Y = zip(*XY) +1381 Y = [(1000+y) * self.R18_VPDB * self.ALPHA_18O_ACID_REACTION / self.R18_VSMOW - 1000 for y in Y] +1382 if self.sessions[s]['d18O_standardization_method'] == '1pt': +1383 offset = np.mean(Y) - np.mean(X) +1384 for r in self.sessions[s]['data']: +1385 r['d18O_VSMOW'] += offset +1386 elif self.sessions[s]['d18O_standardization_method'] == '2pt': +1387 a,b = np.polyfit(X,Y,1) +1388 for r in self.sessions[s]['data']: +1389 r['d18O_VSMOW'] = a * r['d18O_VSMOW'] + b +1390 +1391 +1392 def compute_bulk_and_clumping_deltas(self, r): +1393 ''' +1394 Compute δ13C_VPDB, δ18O_VSMOW, and raw Δ47, Δ48, Δ49 values for a single analysis `r`. +1395 ''' +1396 +1397 # Compute working gas R13, R18, and isobar ratios +1398 R13_wg = self.R13_VPDB * (1 + r['d13Cwg_VPDB'] / 1000) +1399 R18_wg = self.R18_VSMOW * (1 + r['d18Owg_VSMOW'] / 1000) +1400 R45_wg, R46_wg, R47_wg, R48_wg, R49_wg = self.compute_isobar_ratios(R13_wg, R18_wg) +1401 +1402 # Compute analyte isobar ratios +1403 R45 = (1 + r['d45'] / 1000) * R45_wg +1404 R46 = (1 + r['d46'] / 1000) * R46_wg +1405 R47 = (1 + r['d47'] / 1000) * R47_wg +1406 R48 = (1 + r['d48'] / 1000) * R48_wg +1407 R49 = (1 + r['d49'] / 1000) * R49_wg 1408 -1409 # Check that R45/R45stoch and R46/R46stoch are undistinguishable from 1, -1410 # and raise a warning if the corresponding anomalies exceed 0.02 ppm. -1411 if (R45 / R45stoch - 1) > 5e-8: -1412 self.vmsg(f'This is unexpected: R45/R45stoch - 1 = {1e6 * (R45 / R45stoch - 1):.3f} ppm') -1413 if (R46 / R46stoch - 1) > 5e-8: -1414 self.vmsg(f'This is unexpected: R46/R46stoch - 1 = {1e6 * (R46 / R46stoch - 1):.3f} ppm') -1415 -1416 # Compute raw clumped isotope anomalies -1417 r['D47raw'] = 1000 * (R47 / R47stoch - 1) -1418 r['D48raw'] = 1000 * (R48 / R48stoch - 1) -1419 r['D49raw'] = 1000 * (R49 / R49stoch - 1) -1420 -1421 -1422 def compute_isobar_ratios(self, R13, R18, D17O=0, D47=0, D48=0, D49=0): -1423 ''' -1424 Compute isobar ratios for a sample with isotopic ratios `R13` and `R18`, -1425 optionally accounting for non-zero values of Δ17O (`D17O`) and clumped isotope -1426 anomalies (`D47`, `D48`, `D49`), all expressed in permil. -1427 ''' -1428 -1429 # Compute R17 -1430 R17 = self.R17_VSMOW * np.exp(D17O / 1000) * (R18 / self.R18_VSMOW) ** self.LAMBDA_17 -1431 -1432 # Compute isotope concentrations -1433 C12 = (1 + R13) ** -1 -1434 C13 = C12 * R13 -1435 C16 = (1 + R17 + R18) ** -1 -1436 C17 = C16 * R17 -1437 C18 = C16 * R18 -1438 -1439 # Compute stochastic isotopologue concentrations -1440 C626 = C16 * C12 * C16 -1441 C627 = C16 * C12 * C17 * 2 -1442 C628 = C16 * C12 * C18 * 2 -1443 C636 = C16 * C13 * C16 -1444 C637 = C16 * C13 * C17 * 2 -1445 C638 = C16 * C13 * C18 * 2 -1446 C727 = C17 * C12 * C17 -1447 C728 = C17 * C12 * C18 * 2 -1448 C737 = C17 * C13 * C17 -1449 C738 = C17 * C13 * C18 * 2 -1450 C828 = C18 * C12 * C18 -1451 C838 = C18 * C13 * C18 -1452 -1453 # Compute stochastic isobar ratios -1454 R45 = (C636 + C627) / C626 -1455 R46 = (C628 + C637 + C727) / C626 -1456 R47 = (C638 + C728 + C737) / C626 -1457 R48 = (C738 + C828) / C626 -1458 R49 = C838 / C626 -1459 -1460 # Account for stochastic anomalies -1461 R47 *= 1 + D47 / 1000 -1462 R48 *= 1 + D48 / 1000 -1463 R49 *= 1 + D49 / 1000 -1464 -1465 # Return isobar ratios -1466 return R45, R46, R47, R48, R49 -1467 +1409 r['d13C_VPDB'], r['d18O_VSMOW'] = self.compute_bulk_delta(R45, R46, D17O = r['D17O']) +1410 R13 = (1 + r['d13C_VPDB'] / 1000) * self.R13_VPDB +1411 R18 = (1 + r['d18O_VSMOW'] / 1000) * self.R18_VSMOW +1412 +1413 # Compute stochastic isobar ratios of the analyte +1414 R45stoch, R46stoch, R47stoch, R48stoch, R49stoch = self.compute_isobar_ratios( +1415 R13, R18, D17O = r['D17O'] +1416 ) +1417 +1418 # Check that R45/R45stoch and R46/R46stoch are undistinguishable from 1, +1419 # and raise a warning if the corresponding anomalies exceed 0.02 ppm. +1420 if (R45 / R45stoch - 1) > 5e-8: +1421 self.vmsg(f'This is unexpected: R45/R45stoch - 1 = {1e6 * (R45 / R45stoch - 1):.3f} ppm') +1422 if (R46 / R46stoch - 1) > 5e-8: +1423 self.vmsg(f'This is unexpected: R46/R46stoch - 1 = {1e6 * (R46 / R46stoch - 1):.3f} ppm') +1424 +1425 # Compute raw clumped isotope anomalies +1426 r['D47raw'] = 1000 * (R47 / R47stoch - 1) +1427 r['D48raw'] = 1000 * (R48 / R48stoch - 1) +1428 r['D49raw'] = 1000 * (R49 / R49stoch - 1) +1429 +1430 +1431 def compute_isobar_ratios(self, R13, R18, D17O=0, D47=0, D48=0, D49=0): +1432 ''' +1433 Compute isobar ratios for a sample with isotopic ratios `R13` and `R18`, +1434 optionally accounting for non-zero values of Δ17O (`D17O`) and clumped isotope +1435 anomalies (`D47`, `D48`, `D49`), all expressed in permil. +1436 ''' +1437 +1438 # Compute R17 +1439 R17 = self.R17_VSMOW * np.exp(D17O / 1000) * (R18 / self.R18_VSMOW) ** self.LAMBDA_17 +1440 +1441 # Compute isotope concentrations +1442 C12 = (1 + R13) ** -1 +1443 C13 = C12 * R13 +1444 C16 = (1 + R17 + R18) ** -1 +1445 C17 = C16 * R17 +1446 C18 = C16 * R18 +1447 +1448 # Compute stochastic isotopologue concentrations +1449 C626 = C16 * C12 * C16 +1450 C627 = C16 * C12 * C17 * 2 +1451 C628 = C16 * C12 * C18 * 2 +1452 C636 = C16 * C13 * C16 +1453 C637 = C16 * C13 * C17 * 2 +1454 C638 = C16 * C13 * C18 * 2 +1455 C727 = C17 * C12 * C17 +1456 C728 = C17 * C12 * C18 * 2 +1457 C737 = C17 * C13 * C17 +1458 C738 = C17 * C13 * C18 * 2 +1459 C828 = C18 * C12 * C18 +1460 C838 = C18 * C13 * C18 +1461 +1462 # Compute stochastic isobar ratios +1463 R45 = (C636 + C627) / C626 +1464 R46 = (C628 + C637 + C727) / C626 +1465 R47 = (C638 + C728 + C737) / C626 +1466 R48 = (C738 + C828) / C626 +1467 R49 = C838 / C626 1468 -1469 def split_samples(self, samples_to_split = 'all', grouping = 'by_session'): -1470 ''' -1471 Split unknown samples by UID (treat all analyses as different samples) -1472 or by session (treat analyses of a given sample in different sessions as -1473 different samples). -1474 -1475 **Parameters** +1469 # Account for stochastic anomalies +1470 R47 *= 1 + D47 / 1000 +1471 R48 *= 1 + D48 / 1000 +1472 R49 *= 1 + D49 / 1000 +1473 +1474 # Return isobar ratios +1475 return R45, R46, R47, R48, R49 1476 -1477 + `samples_to_split`: a list of samples to split, e.g., `['IAEA-C1', 'IAEA-C2']` -1478 + `grouping`: `by_uid` | `by_session` -1479 ''' -1480 if samples_to_split == 'all': -1481 samples_to_split = [s for s in self.unknowns] -1482 gkeys = {'by_uid':'UID', 'by_session':'Session'} -1483 self.grouping = grouping.lower() -1484 if self.grouping in gkeys: -1485 gkey = gkeys[self.grouping] -1486 for r in self: -1487 if r['Sample'] in samples_to_split: -1488 r['Sample_original'] = r['Sample'] -1489 r['Sample'] = f"{r['Sample']}__{r[gkey]}" -1490 elif r['Sample'] in self.unknowns: -1491 r['Sample_original'] = r['Sample'] -1492 self.refresh_samples() -1493 -1494 -1495 def unsplit_samples(self, tables = False): -1496 ''' -1497 Reverse the effects of `D47data.split_samples()`. -1498 -1499 This should only be used after `D4xdata.standardize()` with `method='pooled'`. -1500 -1501 After `D4xdata.standardize()` with `method='indep_sessions'`, one should -1502 probably use `D4xdata.combine_samples()` instead to reverse the effects of -1503 `D47data.split_samples()` with `grouping='by_uid'`, or `w_avg()` to reverse the -1504 effects of `D47data.split_samples()` with `grouping='by_sessions'` (because in -1505 that case session-averaged Δ4x values are statistically independent). -1506 ''' -1507 unknowns_old = sorted({s for s in self.unknowns}) -1508 CM_old = self.standardization.covar[:,:] -1509 VD_old = self.standardization.params.valuesdict().copy() -1510 vars_old = self.standardization.var_names -1511 -1512 unknowns_new = sorted({r['Sample_original'] for r in self if 'Sample_original' in r}) -1513 -1514 Ns = len(vars_old) - len(unknowns_old) -1515 vars_new = vars_old[:Ns] + [f'D{self._4x}_{pf(u)}' for u in unknowns_new] -1516 VD_new = {k: VD_old[k] for k in vars_old[:Ns]} -1517 -1518 W = np.zeros((len(vars_new), len(vars_old))) -1519 W[:Ns,:Ns] = np.eye(Ns) -1520 for u in unknowns_new: -1521 splits = sorted({r['Sample'] for r in self if 'Sample_original' in r and r['Sample_original'] == u}) -1522 if self.grouping == 'by_session': -1523 weights = [self.samples[s][f'SE_D{self._4x}']**-2 for s in splits] -1524 elif self.grouping == 'by_uid': -1525 weights = [1 for s in splits] -1526 sw = sum(weights) -1527 weights = [w/sw for w in weights] -1528 W[vars_new.index(f'D{self._4x}_{pf(u)}'),[vars_old.index(f'D{self._4x}_{pf(s)}') for s in splits]] = weights[:] -1529 -1530 CM_new = W @ CM_old @ W.T -1531 V = W @ np.array([[VD_old[k]] for k in vars_old]) -1532 VD_new = {k:v[0] for k,v in zip(vars_new, V)} -1533 -1534 self.standardization.covar = CM_new -1535 self.standardization.params.valuesdict = lambda : VD_new -1536 self.standardization.var_names = vars_new -1537 -1538 for r in self: -1539 if r['Sample'] in self.unknowns: -1540 r['Sample_split'] = r['Sample'] -1541 r['Sample'] = r['Sample_original'] +1477 +1478 def split_samples(self, samples_to_split = 'all', grouping = 'by_session'): +1479 ''' +1480 Split unknown samples by UID (treat all analyses as different samples) +1481 or by session (treat analyses of a given sample in different sessions as +1482 different samples). +1483 +1484 **Parameters** +1485 +1486 + `samples_to_split`: a list of samples to split, e.g., `['IAEA-C1', 'IAEA-C2']` +1487 + `grouping`: `by_uid` | `by_session` +1488 ''' +1489 if samples_to_split == 'all': +1490 samples_to_split = [s for s in self.unknowns] +1491 gkeys = {'by_uid':'UID', 'by_session':'Session'} +1492 self.grouping = grouping.lower() +1493 if self.grouping in gkeys: +1494 gkey = gkeys[self.grouping] +1495 for r in self: +1496 if r['Sample'] in samples_to_split: +1497 r['Sample_original'] = r['Sample'] +1498 r['Sample'] = f"{r['Sample']}__{r[gkey]}" +1499 elif r['Sample'] in self.unknowns: +1500 r['Sample_original'] = r['Sample'] +1501 self.refresh_samples() +1502 +1503 +1504 def unsplit_samples(self, tables = False): +1505 ''' +1506 Reverse the effects of `D47data.split_samples()`. +1507 +1508 This should only be used after `D4xdata.standardize()` with `method='pooled'`. +1509 +1510 After `D4xdata.standardize()` with `method='indep_sessions'`, one should +1511 probably use `D4xdata.combine_samples()` instead to reverse the effects of +1512 `D47data.split_samples()` with `grouping='by_uid'`, or `w_avg()` to reverse the +1513 effects of `D47data.split_samples()` with `grouping='by_sessions'` (because in +1514 that case session-averaged Δ4x values are statistically independent). +1515 ''' +1516 unknowns_old = sorted({s for s in self.unknowns}) +1517 CM_old = self.standardization.covar[:,:] +1518 VD_old = self.standardization.params.valuesdict().copy() +1519 vars_old = self.standardization.var_names +1520 +1521 unknowns_new = sorted({r['Sample_original'] for r in self if 'Sample_original' in r}) +1522 +1523 Ns = len(vars_old) - len(unknowns_old) +1524 vars_new = vars_old[:Ns] + [f'D{self._4x}_{pf(u)}' for u in unknowns_new] +1525 VD_new = {k: VD_old[k] for k in vars_old[:Ns]} +1526 +1527 W = np.zeros((len(vars_new), len(vars_old))) +1528 W[:Ns,:Ns] = np.eye(Ns) +1529 for u in unknowns_new: +1530 splits = sorted({r['Sample'] for r in self if 'Sample_original' in r and r['Sample_original'] == u}) +1531 if self.grouping == 'by_session': +1532 weights = [self.samples[s][f'SE_D{self._4x}']**-2 for s in splits] +1533 elif self.grouping == 'by_uid': +1534 weights = [1 for s in splits] +1535 sw = sum(weights) +1536 weights = [w/sw for w in weights] +1537 W[vars_new.index(f'D{self._4x}_{pf(u)}'),[vars_old.index(f'D{self._4x}_{pf(s)}') for s in splits]] = weights[:] +1538 +1539 CM_new = W @ CM_old @ W.T +1540 V = W @ np.array([[VD_old[k]] for k in vars_old]) +1541 VD_new = {k:v[0] for k,v in zip(vars_new, V)} 1542 -1543 self.refresh_samples() -1544 self.consolidate_samples() -1545 self.repeatabilities() +1543 self.standardization.covar = CM_new +1544 self.standardization.params.valuesdict = lambda : VD_new +1545 self.standardization.var_names = vars_new 1546 -1547 if tables: -1548 self.table_of_analyses() -1549 self.table_of_samples() -1550 -1551 def assign_timestamps(self): -1552 ''' -1553 Assign a time field `t` of type `float` to each analysis. -1554 -1555 If `TimeTag` is one of the data fields, `t` is equal within a given session -1556 to `TimeTag` minus the mean value of `TimeTag` for that session. -1557 Otherwise, `TimeTag` is by default equal to the index of each analysis -1558 in the dataset and `t` is defined as above. -1559 ''' -1560 for session in self.sessions: -1561 sdata = self.sessions[session]['data'] -1562 try: -1563 t0 = np.mean([r['TimeTag'] for r in sdata]) -1564 for r in sdata: -1565 r['t'] = r['TimeTag'] - t0 -1566 except KeyError: -1567 t0 = (len(sdata)-1)/2 -1568 for t,r in enumerate(sdata): -1569 r['t'] = t - t0 -1570 -1571 -1572 def report(self): -1573 ''' -1574 Prints a report on the standardization fit. -1575 Only applicable after `D4xdata.standardize(method='pooled')`. -1576 ''' -1577 report_fit(self.standardization) -1578 +1547 for r in self: +1548 if r['Sample'] in self.unknowns: +1549 r['Sample_split'] = r['Sample'] +1550 r['Sample'] = r['Sample_original'] +1551 +1552 self.refresh_samples() +1553 self.consolidate_samples() +1554 self.repeatabilities() +1555 +1556 if tables: +1557 self.table_of_analyses() +1558 self.table_of_samples() +1559 +1560 def assign_timestamps(self): +1561 ''' +1562 Assign a time field `t` of type `float` to each analysis. +1563 +1564 If `TimeTag` is one of the data fields, `t` is equal within a given session +1565 to `TimeTag` minus the mean value of `TimeTag` for that session. +1566 Otherwise, `TimeTag` is by default equal to the index of each analysis +1567 in the dataset and `t` is defined as above. +1568 ''' +1569 for session in self.sessions: +1570 sdata = self.sessions[session]['data'] +1571 try: +1572 t0 = np.mean([r['TimeTag'] for r in sdata]) +1573 for r in sdata: +1574 r['t'] = r['TimeTag'] - t0 +1575 except KeyError: +1576 t0 = (len(sdata)-1)/2 +1577 for t,r in enumerate(sdata): +1578 r['t'] = t - t0 1579 -1580 def combine_samples(self, sample_groups): -1581 ''' -1582 Combine analyses of different samples to compute weighted average Δ4x -1583 and new error (co)variances corresponding to the groups defined by the `sample_groups` -1584 dictionary. -1585 -1586 Caution: samples are weighted by number of replicate analyses, which is a -1587 reasonable default behavior but is not always optimal (e.g., in the case of strongly -1588 correlated analytical errors for one or more samples). -1589 -1590 Returns a tuplet of: -1591 -1592 + the list of group names -1593 + an array of the corresponding Δ4x values -1594 + the corresponding (co)variance matrix -1595 -1596 **Parameters** -1597 -1598 + `sample_groups`: a dictionary of the form: -1599 ```py -1600 {'group1': ['sample_1', 'sample_2'], -1601 'group2': ['sample_3', 'sample_4', 'sample_5']} -1602 ``` -1603 ''' -1604 -1605 samples = [s for k in sorted(sample_groups.keys()) for s in sorted(sample_groups[k])] -1606 groups = sorted(sample_groups.keys()) -1607 group_total_weights = {k: sum([self.samples[s]['N'] for s in sample_groups[k]]) for k in groups} -1608 D4x_old = np.array([[self.samples[x][f'D{self._4x}']] for x in samples]) -1609 CM_old = np.array([[self.sample_D4x_covar(x,y) for x in samples] for y in samples]) -1610 W = np.array([ -1611 [self.samples[i]['N']/group_total_weights[j] if i in sample_groups[j] else 0 for i in samples] -1612 for j in groups]) -1613 D4x_new = W @ D4x_old -1614 CM_new = W @ CM_old @ W.T -1615 -1616 return groups, D4x_new[:,0], CM_new -1617 -1618 -1619 @make_verbal -1620 def standardize(self, -1621 method = 'pooled', -1622 weighted_sessions = [], -1623 consolidate = True, -1624 consolidate_tables = False, -1625 consolidate_plots = False, -1626 constraints = {}, -1627 ): -1628 ''' -1629 Compute absolute Δ4x values for all replicate analyses and for sample averages. -1630 If `method` argument is set to `'pooled'`, the standardization processes all sessions -1631 in a single step, assuming that all samples (anchors and unknowns alike) are homogeneous, -1632 i.e. that their true Δ4x value does not change between sessions, -1633 ([Daëron, 2021](https://doi.org/10.1029/2020GC009592)). If `method` argument is set to -1634 `'indep_sessions'`, the standardization processes each session independently, based only -1635 on anchors analyses. -1636 ''' -1637 -1638 self.standardization_method = method -1639 self.assign_timestamps() -1640 -1641 if method == 'pooled': -1642 if weighted_sessions: -1643 for session_group in weighted_sessions: -1644 if self._4x == '47': -1645 X = D47data([r for r in self if r['Session'] in session_group]) -1646 elif self._4x == '48': -1647 X = D48data([r for r in self if r['Session'] in session_group]) -1648 X.Nominal_D4x = self.Nominal_D4x.copy() -1649 X.refresh() -1650 result = X.standardize(method = 'pooled', weighted_sessions = [], consolidate = False) -1651 w = np.sqrt(result.redchi) -1652 self.msg(f'Session group {session_group} MRSWD = {w:.4f}') -1653 for r in X: -1654 r[f'wD{self._4x}raw'] *= w -1655 else: -1656 self.msg(f'All D{self._4x}raw weights set to 1 ‰') -1657 for r in self: -1658 r[f'wD{self._4x}raw'] = 1. -1659 -1660 params = Parameters() -1661 for k,session in enumerate(self.sessions): -1662 self.msg(f"Session {session}: scrambling_drift is {self.sessions[session]['scrambling_drift']}.") -1663 self.msg(f"Session {session}: slope_drift is {self.sessions[session]['slope_drift']}.") -1664 self.msg(f"Session {session}: wg_drift is {self.sessions[session]['wg_drift']}.") -1665 s = pf(session) -1666 params.add(f'a_{s}', value = 0.9) -1667 params.add(f'b_{s}', value = 0.) -1668 params.add(f'c_{s}', value = -0.9) -1669 params.add(f'a2_{s}', value = 0., -1670# vary = self.sessions[session]['scrambling_drift'], -1671 ) -1672 params.add(f'b2_{s}', value = 0., -1673# vary = self.sessions[session]['slope_drift'], -1674 ) -1675 params.add(f'c2_{s}', value = 0., -1676# vary = self.sessions[session]['wg_drift'], -1677 ) -1678 if not self.sessions[session]['scrambling_drift']: -1679 params[f'a2_{s}'].expr = '0' -1680 if not self.sessions[session]['slope_drift']: -1681 params[f'b2_{s}'].expr = '0' -1682 if not self.sessions[session]['wg_drift']: -1683 params[f'c2_{s}'].expr = '0' -1684 -1685 for sample in self.unknowns: -1686 params.add(f'D{self._4x}_{pf(sample)}', value = 0.5) -1687 -1688 for k in constraints: -1689 params[k].expr = constraints[k] -1690 -1691 def residuals(p): -1692 R = [] -1693 for r in self: -1694 session = pf(r['Session']) -1695 sample = pf(r['Sample']) -1696 if r['Sample'] in self.Nominal_D4x: -1697 R += [ ( -1698 r[f'D{self._4x}raw'] - ( -1699 p[f'a_{session}'] * self.Nominal_D4x[r['Sample']] -1700 + p[f'b_{session}'] * r[f'd{self._4x}'] -1701 + p[f'c_{session}'] -1702 + r['t'] * ( -1703 p[f'a2_{session}'] * self.Nominal_D4x[r['Sample']] -1704 + p[f'b2_{session}'] * r[f'd{self._4x}'] -1705 + p[f'c2_{session}'] -1706 ) -1707 ) -1708 ) / r[f'wD{self._4x}raw'] ] -1709 else: -1710 R += [ ( -1711 r[f'D{self._4x}raw'] - ( -1712 p[f'a_{session}'] * p[f'D{self._4x}_{sample}'] -1713 + p[f'b_{session}'] * r[f'd{self._4x}'] -1714 + p[f'c_{session}'] -1715 + r['t'] * ( -1716 p[f'a2_{session}'] * p[f'D{self._4x}_{sample}'] -1717 + p[f'b2_{session}'] * r[f'd{self._4x}'] -1718 + p[f'c2_{session}'] -1719 ) -1720 ) -1721 ) / r[f'wD{self._4x}raw'] ] -1722 return R -1723 -1724 M = Minimizer(residuals, params) -1725 result = M.least_squares() -1726 self.Nf = result.nfree -1727 self.t95 = tstudent.ppf(1 - 0.05/2, self.Nf) -1728 new_names, new_covar, new_se = _fullcovar(result)[:3] -1729 result.var_names = new_names -1730 result.covar = new_covar -1731 -1732 for r in self: -1733 s = pf(r["Session"]) -1734 a = result.params.valuesdict()[f'a_{s}'] -1735 b = result.params.valuesdict()[f'b_{s}'] -1736 c = result.params.valuesdict()[f'c_{s}'] -1737 a2 = result.params.valuesdict()[f'a2_{s}'] -1738 b2 = result.params.valuesdict()[f'b2_{s}'] -1739 c2 = result.params.valuesdict()[f'c2_{s}'] -1740 r[f'D{self._4x}'] = (r[f'D{self._4x}raw'] - c - b * r[f'd{self._4x}'] - c2 * r['t'] - b2 * r['t'] * r[f'd{self._4x}']) / (a + a2 * r['t']) -1741 -1742 self.standardization = result -1743 -1744 for session in self.sessions: -1745 self.sessions[session]['Np'] = 3 -1746 for k in ['scrambling', 'slope', 'wg']: -1747 if self.sessions[session][f'{k}_drift']: -1748 self.sessions[session]['Np'] += 1 -1749 -1750 if consolidate: -1751 self.consolidate(tables = consolidate_tables, plots = consolidate_plots) -1752 return result -1753 -1754 -1755 elif method == 'indep_sessions': -1756 -1757 if weighted_sessions: -1758 for session_group in weighted_sessions: -1759 X = D4xdata([r for r in self if r['Session'] in session_group], mass = self._4x) -1760 X.Nominal_D4x = self.Nominal_D4x.copy() -1761 X.refresh() -1762 # This is only done to assign r['wD47raw'] for r in X: -1763 X.standardize(method = method, weighted_sessions = [], consolidate = False) -1764 self.msg(f'D{self._4x}raw weights set to {1000*X[0][f"wD{self._4x}raw"]:.1f} ppm for sessions in {session_group}') -1765 else: -1766 self.msg('All weights set to 1 ‰') -1767 for r in self: -1768 r[f'wD{self._4x}raw'] = 1 -1769 -1770 for session in self.sessions: -1771 s = self.sessions[session] -1772 p_names = ['a', 'b', 'c', 'a2', 'b2', 'c2'] -1773 p_active = [True, True, True, s['scrambling_drift'], s['slope_drift'], s['wg_drift']] -1774 s['Np'] = sum(p_active) -1775 sdata = s['data'] -1776 -1777 A = np.array([ -1778 [ -1779 self.Nominal_D4x[r['Sample']] / r[f'wD{self._4x}raw'], -1780 r[f'd{self._4x}'] / r[f'wD{self._4x}raw'], -1781 1 / r[f'wD{self._4x}raw'], -1782 self.Nominal_D4x[r['Sample']] * r['t'] / r[f'wD{self._4x}raw'], -1783 r[f'd{self._4x}'] * r['t'] / r[f'wD{self._4x}raw'], -1784 r['t'] / r[f'wD{self._4x}raw'] -1785 ] -1786 for r in sdata if r['Sample'] in self.anchors -1787 ])[:,p_active] # only keep columns for the active parameters -1788 Y = np.array([[r[f'D{self._4x}raw'] / r[f'wD{self._4x}raw']] for r in sdata if r['Sample'] in self.anchors]) -1789 s['Na'] = Y.size -1790 CM = linalg.inv(A.T @ A) -1791 bf = (CM @ A.T @ Y).T[0,:] -1792 k = 0 -1793 for n,a in zip(p_names, p_active): -1794 if a: -1795 s[n] = bf[k] -1796# self.msg(f'{n} = {bf[k]}') -1797 k += 1 -1798 else: -1799 s[n] = 0. -1800# self.msg(f'{n} = 0.0') -1801 -1802 for r in sdata : -1803 a, b, c, a2, b2, c2 = s['a'], s['b'], s['c'], s['a2'], s['b2'], s['c2'] -1804 r[f'D{self._4x}'] = (r[f'D{self._4x}raw'] - c - b * r[f'd{self._4x}'] - c2 * r['t'] - b2 * r['t'] * r[f'd{self._4x}']) / (a + a2 * r['t']) -1805 r[f'wD{self._4x}'] = r[f'wD{self._4x}raw'] / (a + a2 * r['t']) -1806 -1807 s['CM'] = np.zeros((6,6)) -1808 i = 0 -1809 k_active = [j for j,a in enumerate(p_active) if a] -1810 for j,a in enumerate(p_active): -1811 if a: -1812 s['CM'][j,k_active] = CM[i,:] -1813 i += 1 -1814 -1815 if not weighted_sessions: -1816 w = self.rmswd()['rmswd'] -1817 for r in self: -1818 r[f'wD{self._4x}'] *= w -1819 r[f'wD{self._4x}raw'] *= w -1820 for session in self.sessions: -1821 self.sessions[session]['CM'] *= w**2 -1822 -1823 for session in self.sessions: -1824 s = self.sessions[session] -1825 s['SE_a'] = s['CM'][0,0]**.5 -1826 s['SE_b'] = s['CM'][1,1]**.5 -1827 s['SE_c'] = s['CM'][2,2]**.5 -1828 s['SE_a2'] = s['CM'][3,3]**.5 -1829 s['SE_b2'] = s['CM'][4,4]**.5 -1830 s['SE_c2'] = s['CM'][5,5]**.5 +1580 +1581 def report(self): +1582 ''' +1583 Prints a report on the standardization fit. +1584 Only applicable after `D4xdata.standardize(method='pooled')`. +1585 ''' +1586 report_fit(self.standardization) +1587 +1588 +1589 def combine_samples(self, sample_groups): +1590 ''' +1591 Combine analyses of different samples to compute weighted average Δ4x +1592 and new error (co)variances corresponding to the groups defined by the `sample_groups` +1593 dictionary. +1594 +1595 Caution: samples are weighted by number of replicate analyses, which is a +1596 reasonable default behavior but is not always optimal (e.g., in the case of strongly +1597 correlated analytical errors for one or more samples). +1598 +1599 Returns a tuplet of: +1600 +1601 + the list of group names +1602 + an array of the corresponding Δ4x values +1603 + the corresponding (co)variance matrix +1604 +1605 **Parameters** +1606 +1607 + `sample_groups`: a dictionary of the form: +1608 ```py +1609 {'group1': ['sample_1', 'sample_2'], +1610 'group2': ['sample_3', 'sample_4', 'sample_5']} +1611 ``` +1612 ''' +1613 +1614 samples = [s for k in sorted(sample_groups.keys()) for s in sorted(sample_groups[k])] +1615 groups = sorted(sample_groups.keys()) +1616 group_total_weights = {k: sum([self.samples[s]['N'] for s in sample_groups[k]]) for k in groups} +1617 D4x_old = np.array([[self.samples[x][f'D{self._4x}']] for x in samples]) +1618 CM_old = np.array([[self.sample_D4x_covar(x,y) for x in samples] for y in samples]) +1619 W = np.array([ +1620 [self.samples[i]['N']/group_total_weights[j] if i in sample_groups[j] else 0 for i in samples] +1621 for j in groups]) +1622 D4x_new = W @ D4x_old +1623 CM_new = W @ CM_old @ W.T +1624 +1625 return groups, D4x_new[:,0], CM_new +1626 +1627 +1628 @make_verbal +1629 def standardize(self, +1630 method = 'pooled', +1631 weighted_sessions = [], +1632 consolidate = True, +1633 consolidate_tables = False, +1634 consolidate_plots = False, +1635 constraints = {}, +1636 ): +1637 ''' +1638 Compute absolute Δ4x values for all replicate analyses and for sample averages. +1639 If `method` argument is set to `'pooled'`, the standardization processes all sessions +1640 in a single step, assuming that all samples (anchors and unknowns alike) are homogeneous, +1641 i.e. that their true Δ4x value does not change between sessions, +1642 ([Daëron, 2021](https://doi.org/10.1029/2020GC009592)). If `method` argument is set to +1643 `'indep_sessions'`, the standardization processes each session independently, based only +1644 on anchors analyses. +1645 ''' +1646 +1647 self.standardization_method = method +1648 self.assign_timestamps() +1649 +1650 if method == 'pooled': +1651 if weighted_sessions: +1652 for session_group in weighted_sessions: +1653 if self._4x == '47': +1654 X = D47data([r for r in self if r['Session'] in session_group]) +1655 elif self._4x == '48': +1656 X = D48data([r for r in self if r['Session'] in session_group]) +1657 X.Nominal_D4x = self.Nominal_D4x.copy() +1658 X.refresh() +1659 result = X.standardize(method = 'pooled', weighted_sessions = [], consolidate = False) +1660 w = np.sqrt(result.redchi) +1661 self.msg(f'Session group {session_group} MRSWD = {w:.4f}') +1662 for r in X: +1663 r[f'wD{self._4x}raw'] *= w +1664 else: +1665 self.msg(f'All D{self._4x}raw weights set to 1 ‰') +1666 for r in self: +1667 r[f'wD{self._4x}raw'] = 1. +1668 +1669 params = Parameters() +1670 for k,session in enumerate(self.sessions): +1671 self.msg(f"Session {session}: scrambling_drift is {self.sessions[session]['scrambling_drift']}.") +1672 self.msg(f"Session {session}: slope_drift is {self.sessions[session]['slope_drift']}.") +1673 self.msg(f"Session {session}: wg_drift is {self.sessions[session]['wg_drift']}.") +1674 s = pf(session) +1675 params.add(f'a_{s}', value = 0.9) +1676 params.add(f'b_{s}', value = 0.) +1677 params.add(f'c_{s}', value = -0.9) +1678 params.add(f'a2_{s}', value = 0., +1679# vary = self.sessions[session]['scrambling_drift'], +1680 ) +1681 params.add(f'b2_{s}', value = 0., +1682# vary = self.sessions[session]['slope_drift'], +1683 ) +1684 params.add(f'c2_{s}', value = 0., +1685# vary = self.sessions[session]['wg_drift'], +1686 ) +1687 if not self.sessions[session]['scrambling_drift']: +1688 params[f'a2_{s}'].expr = '0' +1689 if not self.sessions[session]['slope_drift']: +1690 params[f'b2_{s}'].expr = '0' +1691 if not self.sessions[session]['wg_drift']: +1692 params[f'c2_{s}'].expr = '0' +1693 +1694 for sample in self.unknowns: +1695 params.add(f'D{self._4x}_{pf(sample)}', value = 0.5) +1696 +1697 for k in constraints: +1698 params[k].expr = constraints[k] +1699 +1700 def residuals(p): +1701 R = [] +1702 for r in self: +1703 session = pf(r['Session']) +1704 sample = pf(r['Sample']) +1705 if r['Sample'] in self.Nominal_D4x: +1706 R += [ ( +1707 r[f'D{self._4x}raw'] - ( +1708 p[f'a_{session}'] * self.Nominal_D4x[r['Sample']] +1709 + p[f'b_{session}'] * r[f'd{self._4x}'] +1710 + p[f'c_{session}'] +1711 + r['t'] * ( +1712 p[f'a2_{session}'] * self.Nominal_D4x[r['Sample']] +1713 + p[f'b2_{session}'] * r[f'd{self._4x}'] +1714 + p[f'c2_{session}'] +1715 ) +1716 ) +1717 ) / r[f'wD{self._4x}raw'] ] +1718 else: +1719 R += [ ( +1720 r[f'D{self._4x}raw'] - ( +1721 p[f'a_{session}'] * p[f'D{self._4x}_{sample}'] +1722 + p[f'b_{session}'] * r[f'd{self._4x}'] +1723 + p[f'c_{session}'] +1724 + r['t'] * ( +1725 p[f'a2_{session}'] * p[f'D{self._4x}_{sample}'] +1726 + p[f'b2_{session}'] * r[f'd{self._4x}'] +1727 + p[f'c2_{session}'] +1728 ) +1729 ) +1730 ) / r[f'wD{self._4x}raw'] ] +1731 return R +1732 +1733 M = Minimizer(residuals, params) +1734 result = M.least_squares() +1735 self.Nf = result.nfree +1736 self.t95 = tstudent.ppf(1 - 0.05/2, self.Nf) +1737 new_names, new_covar, new_se = _fullcovar(result)[:3] +1738 result.var_names = new_names +1739 result.covar = new_covar +1740 +1741 for r in self: +1742 s = pf(r["Session"]) +1743 a = result.params.valuesdict()[f'a_{s}'] +1744 b = result.params.valuesdict()[f'b_{s}'] +1745 c = result.params.valuesdict()[f'c_{s}'] +1746 a2 = result.params.valuesdict()[f'a2_{s}'] +1747 b2 = result.params.valuesdict()[f'b2_{s}'] +1748 c2 = result.params.valuesdict()[f'c2_{s}'] +1749 r[f'D{self._4x}'] = (r[f'D{self._4x}raw'] - c - b * r[f'd{self._4x}'] - c2 * r['t'] - b2 * r['t'] * r[f'd{self._4x}']) / (a + a2 * r['t']) +1750 +1751 self.standardization = result +1752 +1753 for session in self.sessions: +1754 self.sessions[session]['Np'] = 3 +1755 for k in ['scrambling', 'slope', 'wg']: +1756 if self.sessions[session][f'{k}_drift']: +1757 self.sessions[session]['Np'] += 1 +1758 +1759 if consolidate: +1760 self.consolidate(tables = consolidate_tables, plots = consolidate_plots) +1761 return result +1762 +1763 +1764 elif method == 'indep_sessions': +1765 +1766 if weighted_sessions: +1767 for session_group in weighted_sessions: +1768 X = D4xdata([r for r in self if r['Session'] in session_group], mass = self._4x) +1769 X.Nominal_D4x = self.Nominal_D4x.copy() +1770 X.refresh() +1771 # This is only done to assign r['wD47raw'] for r in X: +1772 X.standardize(method = method, weighted_sessions = [], consolidate = False) +1773 self.msg(f'D{self._4x}raw weights set to {1000*X[0][f"wD{self._4x}raw"]:.1f} ppm for sessions in {session_group}') +1774 else: +1775 self.msg('All weights set to 1 ‰') +1776 for r in self: +1777 r[f'wD{self._4x}raw'] = 1 +1778 +1779 for session in self.sessions: +1780 s = self.sessions[session] +1781 p_names = ['a', 'b', 'c', 'a2', 'b2', 'c2'] +1782 p_active = [True, True, True, s['scrambling_drift'], s['slope_drift'], s['wg_drift']] +1783 s['Np'] = sum(p_active) +1784 sdata = s['data'] +1785 +1786 A = np.array([ +1787 [ +1788 self.Nominal_D4x[r['Sample']] / r[f'wD{self._4x}raw'], +1789 r[f'd{self._4x}'] / r[f'wD{self._4x}raw'], +1790 1 / r[f'wD{self._4x}raw'], +1791 self.Nominal_D4x[r['Sample']] * r['t'] / r[f'wD{self._4x}raw'], +1792 r[f'd{self._4x}'] * r['t'] / r[f'wD{self._4x}raw'], +1793 r['t'] / r[f'wD{self._4x}raw'] +1794 ] +1795 for r in sdata if r['Sample'] in self.anchors +1796 ])[:,p_active] # only keep columns for the active parameters +1797 Y = np.array([[r[f'D{self._4x}raw'] / r[f'wD{self._4x}raw']] for r in sdata if r['Sample'] in self.anchors]) +1798 s['Na'] = Y.size +1799 CM = linalg.inv(A.T @ A) +1800 bf = (CM @ A.T @ Y).T[0,:] +1801 k = 0 +1802 for n,a in zip(p_names, p_active): +1803 if a: +1804 s[n] = bf[k] +1805# self.msg(f'{n} = {bf[k]}') +1806 k += 1 +1807 else: +1808 s[n] = 0. +1809# self.msg(f'{n} = 0.0') +1810 +1811 for r in sdata : +1812 a, b, c, a2, b2, c2 = s['a'], s['b'], s['c'], s['a2'], s['b2'], s['c2'] +1813 r[f'D{self._4x}'] = (r[f'D{self._4x}raw'] - c - b * r[f'd{self._4x}'] - c2 * r['t'] - b2 * r['t'] * r[f'd{self._4x}']) / (a + a2 * r['t']) +1814 r[f'wD{self._4x}'] = r[f'wD{self._4x}raw'] / (a + a2 * r['t']) +1815 +1816 s['CM'] = np.zeros((6,6)) +1817 i = 0 +1818 k_active = [j for j,a in enumerate(p_active) if a] +1819 for j,a in enumerate(p_active): +1820 if a: +1821 s['CM'][j,k_active] = CM[i,:] +1822 i += 1 +1823 +1824 if not weighted_sessions: +1825 w = self.rmswd()['rmswd'] +1826 for r in self: +1827 r[f'wD{self._4x}'] *= w +1828 r[f'wD{self._4x}raw'] *= w +1829 for session in self.sessions: +1830 self.sessions[session]['CM'] *= w**2 1831 -1832 if not weighted_sessions: -1833 self.Nf = len(self) - len(self.unknowns) - np.sum([self.sessions[s]['Np'] for s in self.sessions]) -1834 else: -1835 self.Nf = 0 -1836 for sg in weighted_sessions: -1837 self.Nf += self.rmswd(sessions = sg)['Nf'] -1838 -1839 self.t95 = tstudent.ppf(1 - 0.05/2, self.Nf) +1832 for session in self.sessions: +1833 s = self.sessions[session] +1834 s['SE_a'] = s['CM'][0,0]**.5 +1835 s['SE_b'] = s['CM'][1,1]**.5 +1836 s['SE_c'] = s['CM'][2,2]**.5 +1837 s['SE_a2'] = s['CM'][3,3]**.5 +1838 s['SE_b2'] = s['CM'][4,4]**.5 +1839 s['SE_c2'] = s['CM'][5,5]**.5 1840 -1841 avgD4x = { -1842 sample: np.mean([r[f'D{self._4x}'] for r in self if r['Sample'] == sample]) -1843 for sample in self.samples -1844 } -1845 chi2 = np.sum([(r[f'D{self._4x}'] - avgD4x[r['Sample']])**2 for r in self]) -1846 rD4x = (chi2/self.Nf)**.5 -1847 self.repeatability[f'sigma_{self._4x}'] = rD4x -1848 -1849 if consolidate: -1850 self.consolidate(tables = consolidate_tables, plots = consolidate_plots) -1851 -1852 -1853 def standardization_error(self, session, d4x, D4x, t = 0): -1854 ''' -1855 Compute standardization error for a given session and -1856 (δ47, Δ47) composition. -1857 ''' -1858 a = self.sessions[session]['a'] -1859 b = self.sessions[session]['b'] -1860 c = self.sessions[session]['c'] -1861 a2 = self.sessions[session]['a2'] -1862 b2 = self.sessions[session]['b2'] -1863 c2 = self.sessions[session]['c2'] -1864 CM = self.sessions[session]['CM'] -1865 -1866 x, y = D4x, d4x -1867 z = a * x + b * y + c + a2 * x * t + b2 * y * t + c2 * t -1868# x = (z - b*y - b2*y*t - c - c2*t) / (a+a2*t) -1869 dxdy = -(b+b2*t) / (a+a2*t) -1870 dxdz = 1. / (a+a2*t) -1871 dxda = -x / (a+a2*t) -1872 dxdb = -y / (a+a2*t) -1873 dxdc = -1. / (a+a2*t) -1874 dxda2 = -x * a2 / (a+a2*t) -1875 dxdb2 = -y * t / (a+a2*t) -1876 dxdc2 = -t / (a+a2*t) -1877 V = np.array([dxda, dxdb, dxdc, dxda2, dxdb2, dxdc2]) -1878 sx = (V @ CM @ V.T) ** .5 -1879 return sx -1880 -1881 -1882 @make_verbal -1883 def summary(self, -1884 dir = 'output', -1885 filename = None, -1886 save_to_file = True, -1887 print_out = True, -1888 ): -1889 ''' -1890 Print out an/or save to disk a summary of the standardization results. -1891 -1892 **Parameters** -1893 -1894 + `dir`: the directory in which to save the table -1895 + `filename`: the name to the csv file to write to -1896 + `save_to_file`: whether to save the table to disk -1897 + `print_out`: whether to print out the table -1898 ''' -1899 -1900 out = [] -1901 out += [['N samples (anchors + unknowns)', f"{len(self.samples)} ({len(self.anchors)} + {len(self.unknowns)})"]] -1902 out += [['N analyses (anchors + unknowns)', f"{len(self)} ({len([r for r in self if r['Sample'] in self.anchors])} + {len([r for r in self if r['Sample'] in self.unknowns])})"]] -1903 out += [['Repeatability of δ13C_VPDB', f"{1000 * self.repeatability['r_d13C_VPDB']:.1f} ppm"]] -1904 out += [['Repeatability of δ18O_VSMOW', f"{1000 * self.repeatability['r_d18O_VSMOW']:.1f} ppm"]] -1905 out += [[f'Repeatability of Δ{self._4x} (anchors)', f"{1000 * self.repeatability[f'r_D{self._4x}a']:.1f} ppm"]] -1906 out += [[f'Repeatability of Δ{self._4x} (unknowns)', f"{1000 * self.repeatability[f'r_D{self._4x}u']:.1f} ppm"]] -1907 out += [[f'Repeatability of Δ{self._4x} (all)', f"{1000 * self.repeatability[f'r_D{self._4x}']:.1f} ppm"]] -1908 out += [['Model degrees of freedom', f"{self.Nf}"]] -1909 out += [['Student\'s 95% t-factor', f"{self.t95:.2f}"]] -1910 out += [['Standardization method', self.standardization_method]] -1911 -1912 if save_to_file: -1913 if not os.path.exists(dir): -1914 os.makedirs(dir) -1915 if filename is None: -1916 filename = f'D{self._4x}_summary.csv' -1917 with open(f'{dir}/{filename}', 'w') as fid: -1918 fid.write(make_csv(out)) -1919 if print_out: -1920 self.msg('\n' + pretty_table(out, header = 0)) -1921 -1922 -1923 @make_verbal -1924 def table_of_sessions(self, -1925 dir = 'output', -1926 filename = None, -1927 save_to_file = True, -1928 print_out = True, -1929 output = None, -1930 ): -1931 ''' -1932 Print out an/or save to disk a table of sessions. -1933 -1934 **Parameters** -1935 -1936 + `dir`: the directory in which to save the table -1937 + `filename`: the name to the csv file to write to -1938 + `save_to_file`: whether to save the table to disk -1939 + `print_out`: whether to print out the table -1940 + `output`: if set to `'pretty'`: return a pretty text table (see `pretty_table()`); -1941 if set to `'raw'`: return a list of list of strings -1942 (e.g., `[['header1', 'header2'], ['0.1', '0.2']]`) -1943 ''' -1944 include_a2 = any([self.sessions[session]['scrambling_drift'] for session in self.sessions]) -1945 include_b2 = any([self.sessions[session]['slope_drift'] for session in self.sessions]) -1946 include_c2 = any([self.sessions[session]['wg_drift'] for session in self.sessions]) -1947 -1948 out = [['Session','Na','Nu','d13Cwg_VPDB','d18Owg_VSMOW','r_d13C','r_d18O',f'r_D{self._4x}','a ± SE','1e3 x b ± SE','c ± SE']] -1949 if include_a2: -1950 out[-1] += ['a2 ± SE'] -1951 if include_b2: -1952 out[-1] += ['b2 ± SE'] -1953 if include_c2: -1954 out[-1] += ['c2 ± SE'] -1955 for session in self.sessions: -1956 out += [[ -1957 session, -1958 f"{self.sessions[session]['Na']}", -1959 f"{self.sessions[session]['Nu']}", -1960 f"{self.sessions[session]['d13Cwg_VPDB']:.3f}", -1961 f"{self.sessions[session]['d18Owg_VSMOW']:.3f}", -1962 f"{self.sessions[session]['r_d13C_VPDB']:.4f}", -1963 f"{self.sessions[session]['r_d18O_VSMOW']:.4f}", -1964 f"{self.sessions[session][f'r_D{self._4x}']:.4f}", -1965 f"{self.sessions[session]['a']:.3f} ± {self.sessions[session]['SE_a']:.3f}", -1966 f"{1e3*self.sessions[session]['b']:.3f} ± {1e3*self.sessions[session]['SE_b']:.3f}", -1967 f"{self.sessions[session]['c']:.3f} ± {self.sessions[session]['SE_c']:.3f}", -1968 ]] -1969 if include_a2: -1970 if self.sessions[session]['scrambling_drift']: -1971 out[-1] += [f"{self.sessions[session]['a2']:.1e} ± {self.sessions[session]['SE_a2']:.1e}"] -1972 else: -1973 out[-1] += [''] -1974 if include_b2: -1975 if self.sessions[session]['slope_drift']: -1976 out[-1] += [f"{self.sessions[session]['b2']:.1e} ± {self.sessions[session]['SE_b2']:.1e}"] -1977 else: -1978 out[-1] += [''] -1979 if include_c2: -1980 if self.sessions[session]['wg_drift']: -1981 out[-1] += [f"{self.sessions[session]['c2']:.1e} ± {self.sessions[session]['SE_c2']:.1e}"] -1982 else: -1983 out[-1] += [''] -1984 -1985 if save_to_file: -1986 if not os.path.exists(dir): -1987 os.makedirs(dir) -1988 if filename is None: -1989 filename = f'D{self._4x}_sessions.csv' -1990 with open(f'{dir}/{filename}', 'w') as fid: -1991 fid.write(make_csv(out)) -1992 if print_out: -1993 self.msg('\n' + pretty_table(out)) -1994 if output == 'raw': -1995 return out -1996 elif output == 'pretty': -1997 return pretty_table(out) -1998 -1999 -2000 @make_verbal -2001 def table_of_analyses( -2002 self, -2003 dir = 'output', -2004 filename = None, -2005 save_to_file = True, -2006 print_out = True, -2007 output = None, -2008 ): -2009 ''' -2010 Print out an/or save to disk a table of analyses. -2011 -2012 **Parameters** -2013 -2014 + `dir`: the directory in which to save the table -2015 + `filename`: the name to the csv file to write to -2016 + `save_to_file`: whether to save the table to disk -2017 + `print_out`: whether to print out the table -2018 + `output`: if set to `'pretty'`: return a pretty text table (see `pretty_table()`); -2019 if set to `'raw'`: return a list of list of strings -2020 (e.g., `[['header1', 'header2'], ['0.1', '0.2']]`) -2021 ''' +1841 if not weighted_sessions: +1842 self.Nf = len(self) - len(self.unknowns) - np.sum([self.sessions[s]['Np'] for s in self.sessions]) +1843 else: +1844 self.Nf = 0 +1845 for sg in weighted_sessions: +1846 self.Nf += self.rmswd(sessions = sg)['Nf'] +1847 +1848 self.t95 = tstudent.ppf(1 - 0.05/2, self.Nf) +1849 +1850 avgD4x = { +1851 sample: np.mean([r[f'D{self._4x}'] for r in self if r['Sample'] == sample]) +1852 for sample in self.samples +1853 } +1854 chi2 = np.sum([(r[f'D{self._4x}'] - avgD4x[r['Sample']])**2 for r in self]) +1855 rD4x = (chi2/self.Nf)**.5 +1856 self.repeatability[f'sigma_{self._4x}'] = rD4x +1857 +1858 if consolidate: +1859 self.consolidate(tables = consolidate_tables, plots = consolidate_plots) +1860 +1861 +1862 def standardization_error(self, session, d4x, D4x, t = 0): +1863 ''' +1864 Compute standardization error for a given session and +1865 (δ47, Δ47) composition. +1866 ''' +1867 a = self.sessions[session]['a'] +1868 b = self.sessions[session]['b'] +1869 c = self.sessions[session]['c'] +1870 a2 = self.sessions[session]['a2'] +1871 b2 = self.sessions[session]['b2'] +1872 c2 = self.sessions[session]['c2'] +1873 CM = self.sessions[session]['CM'] +1874 +1875 x, y = D4x, d4x +1876 z = a * x + b * y + c + a2 * x * t + b2 * y * t + c2 * t +1877# x = (z - b*y - b2*y*t - c - c2*t) / (a+a2*t) +1878 dxdy = -(b+b2*t) / (a+a2*t) +1879 dxdz = 1. / (a+a2*t) +1880 dxda = -x / (a+a2*t) +1881 dxdb = -y / (a+a2*t) +1882 dxdc = -1. / (a+a2*t) +1883 dxda2 = -x * a2 / (a+a2*t) +1884 dxdb2 = -y * t / (a+a2*t) +1885 dxdc2 = -t / (a+a2*t) +1886 V = np.array([dxda, dxdb, dxdc, dxda2, dxdb2, dxdc2]) +1887 sx = (V @ CM @ V.T) ** .5 +1888 return sx +1889 +1890 +1891 @make_verbal +1892 def summary(self, +1893 dir = 'output', +1894 filename = None, +1895 save_to_file = True, +1896 print_out = True, +1897 ): +1898 ''' +1899 Print out an/or save to disk a summary of the standardization results. +1900 +1901 **Parameters** +1902 +1903 + `dir`: the directory in which to save the table +1904 + `filename`: the name to the csv file to write to +1905 + `save_to_file`: whether to save the table to disk +1906 + `print_out`: whether to print out the table +1907 ''' +1908 +1909 out = [] +1910 out += [['N samples (anchors + unknowns)', f"{len(self.samples)} ({len(self.anchors)} + {len(self.unknowns)})"]] +1911 out += [['N analyses (anchors + unknowns)', f"{len(self)} ({len([r for r in self if r['Sample'] in self.anchors])} + {len([r for r in self if r['Sample'] in self.unknowns])})"]] +1912 out += [['Repeatability of δ13C_VPDB', f"{1000 * self.repeatability['r_d13C_VPDB']:.1f} ppm"]] +1913 out += [['Repeatability of δ18O_VSMOW', f"{1000 * self.repeatability['r_d18O_VSMOW']:.1f} ppm"]] +1914 out += [[f'Repeatability of Δ{self._4x} (anchors)', f"{1000 * self.repeatability[f'r_D{self._4x}a']:.1f} ppm"]] +1915 out += [[f'Repeatability of Δ{self._4x} (unknowns)', f"{1000 * self.repeatability[f'r_D{self._4x}u']:.1f} ppm"]] +1916 out += [[f'Repeatability of Δ{self._4x} (all)', f"{1000 * self.repeatability[f'r_D{self._4x}']:.1f} ppm"]] +1917 out += [['Model degrees of freedom', f"{self.Nf}"]] +1918 out += [['Student\'s 95% t-factor', f"{self.t95:.2f}"]] +1919 out += [['Standardization method', self.standardization_method]] +1920 +1921 if save_to_file: +1922 if not os.path.exists(dir): +1923 os.makedirs(dir) +1924 if filename is None: +1925 filename = f'D{self._4x}_summary.csv' +1926 with open(f'{dir}/{filename}', 'w') as fid: +1927 fid.write(make_csv(out)) +1928 if print_out: +1929 self.msg('\n' + pretty_table(out, header = 0)) +1930 +1931 +1932 @make_verbal +1933 def table_of_sessions(self, +1934 dir = 'output', +1935 filename = None, +1936 save_to_file = True, +1937 print_out = True, +1938 output = None, +1939 ): +1940 ''' +1941 Print out an/or save to disk a table of sessions. +1942 +1943 **Parameters** +1944 +1945 + `dir`: the directory in which to save the table +1946 + `filename`: the name to the csv file to write to +1947 + `save_to_file`: whether to save the table to disk +1948 + `print_out`: whether to print out the table +1949 + `output`: if set to `'pretty'`: return a pretty text table (see `pretty_table()`); +1950 if set to `'raw'`: return a list of list of strings +1951 (e.g., `[['header1', 'header2'], ['0.1', '0.2']]`) +1952 ''' +1953 include_a2 = any([self.sessions[session]['scrambling_drift'] for session in self.sessions]) +1954 include_b2 = any([self.sessions[session]['slope_drift'] for session in self.sessions]) +1955 include_c2 = any([self.sessions[session]['wg_drift'] for session in self.sessions]) +1956 +1957 out = [['Session','Na','Nu','d13Cwg_VPDB','d18Owg_VSMOW','r_d13C','r_d18O',f'r_D{self._4x}','a ± SE','1e3 x b ± SE','c ± SE']] +1958 if include_a2: +1959 out[-1] += ['a2 ± SE'] +1960 if include_b2: +1961 out[-1] += ['b2 ± SE'] +1962 if include_c2: +1963 out[-1] += ['c2 ± SE'] +1964 for session in self.sessions: +1965 out += [[ +1966 session, +1967 f"{self.sessions[session]['Na']}", +1968 f"{self.sessions[session]['Nu']}", +1969 f"{self.sessions[session]['d13Cwg_VPDB']:.3f}", +1970 f"{self.sessions[session]['d18Owg_VSMOW']:.3f}", +1971 f"{self.sessions[session]['r_d13C_VPDB']:.4f}", +1972 f"{self.sessions[session]['r_d18O_VSMOW']:.4f}", +1973 f"{self.sessions[session][f'r_D{self._4x}']:.4f}", +1974 f"{self.sessions[session]['a']:.3f} ± {self.sessions[session]['SE_a']:.3f}", +1975 f"{1e3*self.sessions[session]['b']:.3f} ± {1e3*self.sessions[session]['SE_b']:.3f}", +1976 f"{self.sessions[session]['c']:.3f} ± {self.sessions[session]['SE_c']:.3f}", +1977 ]] +1978 if include_a2: +1979 if self.sessions[session]['scrambling_drift']: +1980 out[-1] += [f"{self.sessions[session]['a2']:.1e} ± {self.sessions[session]['SE_a2']:.1e}"] +1981 else: +1982 out[-1] += [''] +1983 if include_b2: +1984 if self.sessions[session]['slope_drift']: +1985 out[-1] += [f"{self.sessions[session]['b2']:.1e} ± {self.sessions[session]['SE_b2']:.1e}"] +1986 else: +1987 out[-1] += [''] +1988 if include_c2: +1989 if self.sessions[session]['wg_drift']: +1990 out[-1] += [f"{self.sessions[session]['c2']:.1e} ± {self.sessions[session]['SE_c2']:.1e}"] +1991 else: +1992 out[-1] += [''] +1993 +1994 if save_to_file: +1995 if not os.path.exists(dir): +1996 os.makedirs(dir) +1997 if filename is None: +1998 filename = f'D{self._4x}_sessions.csv' +1999 with open(f'{dir}/{filename}', 'w') as fid: +2000 fid.write(make_csv(out)) +2001 if print_out: +2002 self.msg('\n' + pretty_table(out)) +2003 if output == 'raw': +2004 return out +2005 elif output == 'pretty': +2006 return pretty_table(out) +2007 +2008 +2009 @make_verbal +2010 def table_of_analyses( +2011 self, +2012 dir = 'output', +2013 filename = None, +2014 save_to_file = True, +2015 print_out = True, +2016 output = None, +2017 ): +2018 ''' +2019 Print out an/or save to disk a table of analyses. +2020 +2021 **Parameters** 2022 -2023 out = [['UID','Session','Sample']] -2024 extra_fields = [f for f in [('SampleMass','.2f'),('ColdFingerPressure','.1f'),('AcidReactionYield','.3f')] if f[0] in {k for r in self for k in r}] -2025 for f in extra_fields: -2026 out[-1] += [f[0]] -2027 out[-1] += ['d13Cwg_VPDB','d18Owg_VSMOW','d45','d46','d47','d48','d49','d13C_VPDB','d18O_VSMOW','D47raw','D48raw','D49raw',f'D{self._4x}'] -2028 for r in self: -2029 out += [[f"{r['UID']}",f"{r['Session']}",f"{r['Sample']}"]] -2030 for f in extra_fields: -2031 out[-1] += [f"{r[f[0]]:{f[1]}}"] -2032 out[-1] += [ -2033 f"{r['d13Cwg_VPDB']:.3f}", -2034 f"{r['d18Owg_VSMOW']:.3f}", -2035 f"{r['d45']:.6f}", -2036 f"{r['d46']:.6f}", -2037 f"{r['d47']:.6f}", -2038 f"{r['d48']:.6f}", -2039 f"{r['d49']:.6f}", -2040 f"{r['d13C_VPDB']:.6f}", -2041 f"{r['d18O_VSMOW']:.6f}", -2042 f"{r['D47raw']:.6f}", -2043 f"{r['D48raw']:.6f}", -2044 f"{r['D49raw']:.6f}", -2045 f"{r[f'D{self._4x}']:.6f}" -2046 ] -2047 if save_to_file: -2048 if not os.path.exists(dir): -2049 os.makedirs(dir) -2050 if filename is None: -2051 filename = f'D{self._4x}_analyses.csv' -2052 with open(f'{dir}/{filename}', 'w') as fid: -2053 fid.write(make_csv(out)) -2054 if print_out: -2055 self.msg('\n' + pretty_table(out)) -2056 return out -2057 -2058 @make_verbal -2059 def covar_table( -2060 self, -2061 correl = False, -2062 dir = 'output', -2063 filename = None, -2064 save_to_file = True, -2065 print_out = True, -2066 output = None, -2067 ): -2068 ''' -2069 Print out, save to disk and/or return the variance-covariance matrix of D4x -2070 for all unknown samples. -2071 -2072 **Parameters** -2073 -2074 + `dir`: the directory in which to save the csv -2075 + `filename`: the name of the csv file to write to -2076 + `save_to_file`: whether to save the csv -2077 + `print_out`: whether to print out the matrix -2078 + `output`: if set to `'pretty'`: return a pretty text matrix (see `pretty_table()`); -2079 if set to `'raw'`: return a list of list of strings -2080 (e.g., `[['header1', 'header2'], ['0.1', '0.2']]`) -2081 ''' -2082 samples = sorted([u for u in self.unknowns]) -2083 out = [[''] + samples] -2084 for s1 in samples: -2085 out.append([s1]) -2086 for s2 in samples: -2087 if correl: -2088 out[-1].append(f'{self.sample_D4x_correl(s1, s2):.6f}') -2089 else: -2090 out[-1].append(f'{self.sample_D4x_covar(s1, s2):.8e}') -2091 -2092 if save_to_file: -2093 if not os.path.exists(dir): -2094 os.makedirs(dir) -2095 if filename is None: +2023 + `dir`: the directory in which to save the table +2024 + `filename`: the name to the csv file to write to +2025 + `save_to_file`: whether to save the table to disk +2026 + `print_out`: whether to print out the table +2027 + `output`: if set to `'pretty'`: return a pretty text table (see `pretty_table()`); +2028 if set to `'raw'`: return a list of list of strings +2029 (e.g., `[['header1', 'header2'], ['0.1', '0.2']]`) +2030 ''' +2031 +2032 out = [['UID','Session','Sample']] +2033 extra_fields = [f for f in [('SampleMass','.2f'),('ColdFingerPressure','.1f'),('AcidReactionYield','.3f')] if f[0] in {k for r in self for k in r}] +2034 for f in extra_fields: +2035 out[-1] += [f[0]] +2036 out[-1] += ['d13Cwg_VPDB','d18Owg_VSMOW','d45','d46','d47','d48','d49','d13C_VPDB','d18O_VSMOW','D47raw','D48raw','D49raw',f'D{self._4x}'] +2037 for r in self: +2038 out += [[f"{r['UID']}",f"{r['Session']}",f"{r['Sample']}"]] +2039 for f in extra_fields: +2040 out[-1] += [f"{r[f[0]]:{f[1]}}"] +2041 out[-1] += [ +2042 f"{r['d13Cwg_VPDB']:.3f}", +2043 f"{r['d18Owg_VSMOW']:.3f}", +2044 f"{r['d45']:.6f}", +2045 f"{r['d46']:.6f}", +2046 f"{r['d47']:.6f}", +2047 f"{r['d48']:.6f}", +2048 f"{r['d49']:.6f}", +2049 f"{r['d13C_VPDB']:.6f}", +2050 f"{r['d18O_VSMOW']:.6f}", +2051 f"{r['D47raw']:.6f}", +2052 f"{r['D48raw']:.6f}", +2053 f"{r['D49raw']:.6f}", +2054 f"{r[f'D{self._4x}']:.6f}" +2055 ] +2056 if save_to_file: +2057 if not os.path.exists(dir): +2058 os.makedirs(dir) +2059 if filename is None: +2060 filename = f'D{self._4x}_analyses.csv' +2061 with open(f'{dir}/{filename}', 'w') as fid: +2062 fid.write(make_csv(out)) +2063 if print_out: +2064 self.msg('\n' + pretty_table(out)) +2065 return out +2066 +2067 @make_verbal +2068 def covar_table( +2069 self, +2070 correl = False, +2071 dir = 'output', +2072 filename = None, +2073 save_to_file = True, +2074 print_out = True, +2075 output = None, +2076 ): +2077 ''' +2078 Print out, save to disk and/or return the variance-covariance matrix of D4x +2079 for all unknown samples. +2080 +2081 **Parameters** +2082 +2083 + `dir`: the directory in which to save the csv +2084 + `filename`: the name of the csv file to write to +2085 + `save_to_file`: whether to save the csv +2086 + `print_out`: whether to print out the matrix +2087 + `output`: if set to `'pretty'`: return a pretty text matrix (see `pretty_table()`); +2088 if set to `'raw'`: return a list of list of strings +2089 (e.g., `[['header1', 'header2'], ['0.1', '0.2']]`) +2090 ''' +2091 samples = sorted([u for u in self.unknowns]) +2092 out = [[''] + samples] +2093 for s1 in samples: +2094 out.append([s1]) +2095 for s2 in samples: 2096 if correl: -2097 filename = f'D{self._4x}_correl.csv' +2097 out[-1].append(f'{self.sample_D4x_correl(s1, s2):.6f}') 2098 else: -2099 filename = f'D{self._4x}_covar.csv' -2100 with open(f'{dir}/{filename}', 'w') as fid: -2101 fid.write(make_csv(out)) -2102 if print_out: -2103 self.msg('\n'+pretty_table(out)) -2104 if output == 'raw': -2105 return out -2106 elif output == 'pretty': -2107 return pretty_table(out) -2108 -2109 @make_verbal -2110 def table_of_samples( -2111 self, -2112 dir = 'output', -2113 filename = None, -2114 save_to_file = True, -2115 print_out = True, -2116 output = None, -2117 ): -2118 ''' -2119 Print out, save to disk and/or return a table of samples. -2120 -2121 **Parameters** -2122 -2123 + `dir`: the directory in which to save the csv -2124 + `filename`: the name of the csv file to write to -2125 + `save_to_file`: whether to save the csv -2126 + `print_out`: whether to print out the table -2127 + `output`: if set to `'pretty'`: return a pretty text table (see `pretty_table()`); -2128 if set to `'raw'`: return a list of list of strings -2129 (e.g., `[['header1', 'header2'], ['0.1', '0.2']]`) -2130 ''' +2099 out[-1].append(f'{self.sample_D4x_covar(s1, s2):.8e}') +2100 +2101 if save_to_file: +2102 if not os.path.exists(dir): +2103 os.makedirs(dir) +2104 if filename is None: +2105 if correl: +2106 filename = f'D{self._4x}_correl.csv' +2107 else: +2108 filename = f'D{self._4x}_covar.csv' +2109 with open(f'{dir}/{filename}', 'w') as fid: +2110 fid.write(make_csv(out)) +2111 if print_out: +2112 self.msg('\n'+pretty_table(out)) +2113 if output == 'raw': +2114 return out +2115 elif output == 'pretty': +2116 return pretty_table(out) +2117 +2118 @make_verbal +2119 def table_of_samples( +2120 self, +2121 dir = 'output', +2122 filename = None, +2123 save_to_file = True, +2124 print_out = True, +2125 output = None, +2126 ): +2127 ''' +2128 Print out, save to disk and/or return a table of samples. +2129 +2130 **Parameters** 2131 -2132 out = [['Sample','N','d13C_VPDB','d18O_VSMOW',f'D{self._4x}','SE','95% CL','SD','p_Levene']] -2133 for sample in self.anchors: -2134 out += [[ -2135 f"{sample}", -2136 f"{self.samples[sample]['N']}", -2137 f"{self.samples[sample]['d13C_VPDB']:.2f}", -2138 f"{self.samples[sample]['d18O_VSMOW']:.2f}", -2139 f"{self.samples[sample][f'D{self._4x}']:.4f}",'','', -2140 f"{self.samples[sample][f'SD_D{self._4x}']:.4f}" if self.samples[sample]['N'] > 1 else '', '' -2141 ]] -2142 for sample in self.unknowns: +2132 + `dir`: the directory in which to save the csv +2133 + `filename`: the name of the csv file to write to +2134 + `save_to_file`: whether to save the csv +2135 + `print_out`: whether to print out the table +2136 + `output`: if set to `'pretty'`: return a pretty text table (see `pretty_table()`); +2137 if set to `'raw'`: return a list of list of strings +2138 (e.g., `[['header1', 'header2'], ['0.1', '0.2']]`) +2139 ''' +2140 +2141 out = [['Sample','N','d13C_VPDB','d18O_VSMOW',f'D{self._4x}','SE','95% CL','SD','p_Levene']] +2142 for sample in self.anchors: 2143 out += [[ 2144 f"{sample}", 2145 f"{self.samples[sample]['N']}", 2146 f"{self.samples[sample]['d13C_VPDB']:.2f}", 2147 f"{self.samples[sample]['d18O_VSMOW']:.2f}", -2148 f"{self.samples[sample][f'D{self._4x}']:.4f}", -2149 f"{self.samples[sample][f'SE_D{self._4x}']:.4f}", -2150 f{self.samples[sample][f'SE_D{self._4x}'] * self.t95:.4f}", -2151 f"{self.samples[sample][f'SD_D{self._4x}']:.4f}" if self.samples[sample]['N'] > 1 else '', -2152 f"{self.samples[sample]['p_Levene']:.3f}" if self.samples[sample]['N'] > 2 else '' -2153 ]] -2154 if save_to_file: -2155 if not os.path.exists(dir): -2156 os.makedirs(dir) -2157 if filename is None: -2158 filename = f'D{self._4x}_samples.csv' -2159 with open(f'{dir}/{filename}', 'w') as fid: -2160 fid.write(make_csv(out)) -2161 if print_out: -2162 self.msg('\n'+pretty_table(out)) -2163 if output == 'raw': -2164 return out -2165 elif output == 'pretty': -2166 return pretty_table(out) -2167 -2168 -2169 def plot_sessions(self, dir = 'output', figsize = (8,8)): -2170 ''' -2171 Generate session plots and save them to disk. -2172 -2173 **Parameters** -2174 -2175 + `dir`: the directory in which to save the plots -2176 + `figsize`: the width and height (in inches) of each plot -2177 ''' -2178 if not os.path.exists(dir): -2179 os.makedirs(dir) -2180 -2181 for session in self.sessions: -2182 sp = self.plot_single_session(session, xylimits = 'constant') -2183 ppl.savefig(f'{dir}/D{self._4x}_plot_{session}.pdf') -2184 ppl.close(sp.fig) -2185 -2186 -2187 @make_verbal -2188 def consolidate_samples(self): -2189 ''' -2190 Compile various statistics for each sample. -2191 -2192 For each anchor sample: -2193 -2194 + `D47` or `D48`: the nominal Δ4x value for this anchor, specified by `self.Nominal_D4x` -2195 + `SE_D47` or `SE_D48`: set to zero by definition -2196 -2197 For each unknown sample: -2198 -2199 + `D47` or `D48`: the standardized Δ4x value for this unknown -2200 + `SE_D47` or `SE_D48`: the standard error of Δ4x for this unknown -2201 -2202 For each anchor and unknown: -2203 -2204 + `N`: the total number of analyses of this sample -2205 + `SD_D47` or `SD_D48`: the “sample” (in the statistical sense) standard deviation for this sample -2206 + `d13C_VPDB`: the average δ13C_VPDB value for this sample -2207 + `d18O_VSMOW`: the average δ18O_VSMOW value for this sample (as CO2) -2208 + `p_Levene`: the p-value from a [Levene test](https://en.wikipedia.org/wiki/Levene%27s_test) of equal -2209 variance, indicating whether the Δ4x repeatability this sample differs significantly from -2210 that observed for the reference sample specified by `self.LEVENE_REF_SAMPLE`. -2211 ''' -2212 D4x_ref_pop = [r[f'D{self._4x}'] for r in self.samples[self.LEVENE_REF_SAMPLE]['data']] -2213 for sample in self.samples: -2214 self.samples[sample]['N'] = len(self.samples[sample]['data']) -2215 if self.samples[sample]['N'] > 1: -2216 self.samples[sample][f'SD_D{self._4x}'] = stdev([r[f'D{self._4x}'] for r in self.samples[sample]['data']]) -2217 -2218 self.samples[sample]['d13C_VPDB'] = np.mean([r['d13C_VPDB'] for r in self.samples[sample]['data']]) -2219 self.samples[sample]['d18O_VSMOW'] = np.mean([r['d18O_VSMOW'] for r in self.samples[sample]['data']]) -2220 -2221 D4x_pop = [r[f'D{self._4x}'] for r in self.samples[sample]['data']] -2222 if len(D4x_pop) > 2: -2223 self.samples[sample]['p_Levene'] = levene(D4x_ref_pop, D4x_pop, center = 'median')[1] -2224 -2225 if self.standardization_method == 'pooled': -2226 for sample in self.anchors: -2227 self.samples[sample][f'D{self._4x}'] = self.Nominal_D4x[sample] -2228 self.samples[sample][f'SE_D{self._4x}'] = 0. -2229 for sample in self.unknowns: -2230 self.samples[sample][f'D{self._4x}'] = self.standardization.params.valuesdict()[f'D{self._4x}_{pf(sample)}'] -2231 try: -2232 self.samples[sample][f'SE_D{self._4x}'] = self.sample_D4x_covar(sample)**.5 -2233 except ValueError: -2234 # when `sample` is constrained by self.standardize(constraints = {...}), -2235 # it is no longer listed in self.standardization.var_names. -2236 # Temporary fix: define SE as zero for now -2237 self.samples[sample][f'SE_D4{self._4x}'] = 0. -2238 -2239 elif self.standardization_method == 'indep_sessions': -2240 for sample in self.anchors: -2241 self.samples[sample][f'D{self._4x}'] = self.Nominal_D4x[sample] -2242 self.samples[sample][f'SE_D{self._4x}'] = 0. -2243 for sample in self.unknowns: -2244 self.msg(f'Consolidating sample {sample}') -2245 self.unknowns[sample][f'session_D{self._4x}'] = {} -2246 session_avg = [] -2247 for session in self.sessions: -2248 sdata = [r for r in self.sessions[session]['data'] if r['Sample'] == sample] -2249 if sdata: -2250 self.msg(f'{sample} found in session {session}') -2251 avg_D4x = np.mean([r[f'D{self._4x}'] for r in sdata]) -2252 avg_d4x = np.mean([r[f'd{self._4x}'] for r in sdata]) -2253 # !! TODO: sigma_s below does not account for temporal changes in standardization error -2254 sigma_s = self.standardization_error(session, avg_d4x, avg_D4x) -2255 sigma_u = sdata[0][f'wD{self._4x}raw'] / self.sessions[session]['a'] / len(sdata)**.5 -2256 session_avg.append([avg_D4x, (sigma_u**2 + sigma_s**2)**.5]) -2257 self.unknowns[sample][f'session_D{self._4x}'][session] = session_avg[-1] -2258 self.samples[sample][f'D{self._4x}'], self.samples[sample][f'SE_D{self._4x}'] = w_avg(*zip(*session_avg)) -2259 weights = {s: self.unknowns[sample][f'session_D{self._4x}'][s][1]**-2 for s in self.unknowns[sample][f'session_D{self._4x}']} -2260 wsum = sum([weights[s] for s in weights]) -2261 for s in weights: -2262 self.unknowns[sample][f'session_D{self._4x}'][s] += [self.unknowns[sample][f'session_D{self._4x}'][s][1]**-2 / wsum] -2263 -2264 -2265 def consolidate_sessions(self): -2266 ''' -2267 Compute various statistics for each session. -2268 -2269 + `Na`: Number of anchor analyses in the session -2270 + `Nu`: Number of unknown analyses in the session -2271 + `r_d13C_VPDB`: δ13C_VPDB repeatability of analyses within the session -2272 + `r_d18O_VSMOW`: δ18O_VSMOW repeatability of analyses within the session -2273 + `r_D47` or `r_D48`: Δ4x repeatability of analyses within the session -2274 + `a`: scrambling factor -2275 + `b`: compositional slope -2276 + `c`: WG offset -2277 + `SE_a`: Model stadard erorr of `a` -2278 + `SE_b`: Model stadard erorr of `b` -2279 + `SE_c`: Model stadard erorr of `c` -2280 + `scrambling_drift` (boolean): whether to allow a temporal drift in the scrambling factor (`a`) -2281 + `slope_drift` (boolean): whether to allow a temporal drift in the compositional slope (`b`) -2282 + `wg_drift` (boolean): whether to allow a temporal drift in the WG offset (`c`) -2283 + `a2`: scrambling factor drift -2284 + `b2`: compositional slope drift -2285 + `c2`: WG offset drift -2286 + `Np`: Number of standardization parameters to fit -2287 + `CM`: model covariance matrix for (`a`, `b`, `c`, `a2`, `b2`, `c2`) -2288 + `d13Cwg_VPDB`: δ13C_VPDB of WG -2289 + `d18Owg_VSMOW`: δ18O_VSMOW of WG -2290 ''' -2291 for session in self.sessions: -2292 if 'd13Cwg_VPDB' not in self.sessions[session]: -2293 self.sessions[session]['d13Cwg_VPDB'] = self.sessions[session]['data'][0]['d13Cwg_VPDB'] -2294 if 'd18Owg_VSMOW' not in self.sessions[session]: -2295 self.sessions[session]['d18Owg_VSMOW'] = self.sessions[session]['data'][0]['d18Owg_VSMOW'] -2296 self.sessions[session]['Na'] = len([r for r in self.sessions[session]['data'] if r['Sample'] in self.anchors]) -2297 self.sessions[session]['Nu'] = len([r for r in self.sessions[session]['data'] if r['Sample'] in self.unknowns]) -2298 -2299 self.msg(f'Computing repeatabilities for session {session}') -2300 self.sessions[session]['r_d13C_VPDB'] = self.compute_r('d13C_VPDB', samples = 'anchors', sessions = [session]) -2301 self.sessions[session]['r_d18O_VSMOW'] = self.compute_r('d18O_VSMOW', samples = 'anchors', sessions = [session]) -2302 self.sessions[session][f'r_D{self._4x}'] = self.compute_r(f'D{self._4x}', sessions = [session]) -2303 -2304 if self.standardization_method == 'pooled': -2305 for session in self.sessions: -2306 -2307 self.sessions[session]['a'] = self.standardization.params.valuesdict()[f'a_{pf(session)}'] -2308 i = self.standardization.var_names.index(f'a_{pf(session)}') -2309 self.sessions[session]['SE_a'] = self.standardization.covar[i,i]**.5 -2310 -2311 self.sessions[session]['b'] = self.standardization.params.valuesdict()[f'b_{pf(session)}'] -2312 i = self.standardization.var_names.index(f'b_{pf(session)}') -2313 self.sessions[session]['SE_b'] = self.standardization.covar[i,i]**.5 -2314 -2315 self.sessions[session]['c'] = self.standardization.params.valuesdict()[f'c_{pf(session)}'] -2316 i = self.standardization.var_names.index(f'c_{pf(session)}') -2317 self.sessions[session]['SE_c'] = self.standardization.covar[i,i]**.5 -2318 -2319 self.sessions[session]['a2'] = self.standardization.params.valuesdict()[f'a2_{pf(session)}'] -2320 if self.sessions[session]['scrambling_drift']: -2321 i = self.standardization.var_names.index(f'a2_{pf(session)}') -2322 self.sessions[session]['SE_a2'] = self.standardization.covar[i,i]**.5 -2323 else: -2324 self.sessions[session]['SE_a2'] = 0. -2325 -2326 self.sessions[session]['b2'] = self.standardization.params.valuesdict()[f'b2_{pf(session)}'] -2327 if self.sessions[session]['slope_drift']: -2328 i = self.standardization.var_names.index(f'b2_{pf(session)}') -2329 self.sessions[session]['SE_b2'] = self.standardization.covar[i,i]**.5 -2330 else: -2331 self.sessions[session]['SE_b2'] = 0. -2332 -2333 self.sessions[session]['c2'] = self.standardization.params.valuesdict()[f'c2_{pf(session)}'] -2334 if self.sessions[session]['wg_drift']: -2335 i = self.standardization.var_names.index(f'c2_{pf(session)}') -2336 self.sessions[session]['SE_c2'] = self.standardization.covar[i,i]**.5 -2337 else: -2338 self.sessions[session]['SE_c2'] = 0. -2339 -2340 i = self.standardization.var_names.index(f'a_{pf(session)}') -2341 j = self.standardization.var_names.index(f'b_{pf(session)}') -2342 k = self.standardization.var_names.index(f'c_{pf(session)}') -2343 CM = np.zeros((6,6)) -2344 CM[:3,:3] = self.standardization.covar[[i,j,k],:][:,[i,j,k]] -2345 try: -2346 i2 = self.standardization.var_names.index(f'a2_{pf(session)}') -2347 CM[3,[0,1,2,3]] = self.standardization.covar[i2,[i,j,k,i2]] -2348 CM[[0,1,2,3],3] = self.standardization.covar[[i,j,k,i2],i2] -2349 try: -2350 j2 = self.standardization.var_names.index(f'b2_{pf(session)}') -2351 CM[3,4] = self.standardization.covar[i2,j2] -2352 CM[4,3] = self.standardization.covar[j2,i2] -2353 except ValueError: -2354 pass -2355 try: -2356 k2 = self.standardization.var_names.index(f'c2_{pf(session)}') -2357 CM[3,5] = self.standardization.covar[i2,k2] -2358 CM[5,3] = self.standardization.covar[k2,i2] -2359 except ValueError: -2360 pass -2361 except ValueError: -2362 pass -2363 try: -2364 j2 = self.standardization.var_names.index(f'b2_{pf(session)}') -2365 CM[4,[0,1,2,4]] = self.standardization.covar[j2,[i,j,k,j2]] -2366 CM[[0,1,2,4],4] = self.standardization.covar[[i,j,k,j2],j2] -2367 try: -2368 k2 = self.standardization.var_names.index(f'c2_{pf(session)}') -2369 CM[4,5] = self.standardization.covar[j2,k2] -2370 CM[5,4] = self.standardization.covar[k2,j2] -2371 except ValueError: -2372 pass -2373 except ValueError: -2374 pass -2375 try: -2376 k2 = self.standardization.var_names.index(f'c2_{pf(session)}') -2377 CM[5,[0,1,2,5]] = self.standardization.covar[k2,[i,j,k,k2]] -2378 CM[[0,1,2,5],5] = self.standardization.covar[[i,j,k,k2],k2] -2379 except ValueError: -2380 pass -2381 -2382 self.sessions[session]['CM'] = CM -2383 -2384 elif self.standardization_method == 'indep_sessions': -2385 pass # Not implemented yet -2386 -2387 -2388 @make_verbal -2389 def repeatabilities(self): -2390 ''' -2391 Compute analytical repeatabilities for δ13C_VPDB, δ18O_VSMOW, Δ4x -2392 (for all samples, for anchors, and for unknowns). -2393 ''' -2394 self.msg('Computing reproducibilities for all sessions') +2148 f"{self.samples[sample][f'D{self._4x}']:.4f}",'','', +2149 f"{self.samples[sample][f'SD_D{self._4x}']:.4f}" if self.samples[sample]['N'] > 1 else '', '' +2150 ]] +2151 for sample in self.unknowns: +2152 out += [[ +2153 f"{sample}", +2154 f"{self.samples[sample]['N']}", +2155 f"{self.samples[sample]['d13C_VPDB']:.2f}", +2156 f"{self.samples[sample]['d18O_VSMOW']:.2f}", +2157 f"{self.samples[sample][f'D{self._4x}']:.4f}", +2158 f"{self.samples[sample][f'SE_D{self._4x}']:.4f}", +2159 f{self.samples[sample][f'SE_D{self._4x}'] * self.t95:.4f}", +2160 f"{self.samples[sample][f'SD_D{self._4x}']:.4f}" if self.samples[sample]['N'] > 1 else '', +2161 f"{self.samples[sample]['p_Levene']:.3f}" if self.samples[sample]['N'] > 2 else '' +2162 ]] +2163 if save_to_file: +2164 if not os.path.exists(dir): +2165 os.makedirs(dir) +2166 if filename is None: +2167 filename = f'D{self._4x}_samples.csv' +2168 with open(f'{dir}/{filename}', 'w') as fid: +2169 fid.write(make_csv(out)) +2170 if print_out: +2171 self.msg('\n'+pretty_table(out)) +2172 if output == 'raw': +2173 return out +2174 elif output == 'pretty': +2175 return pretty_table(out) +2176 +2177 +2178 def plot_sessions(self, dir = 'output', figsize = (8,8)): +2179 ''' +2180 Generate session plots and save them to disk. +2181 +2182 **Parameters** +2183 +2184 + `dir`: the directory in which to save the plots +2185 + `figsize`: the width and height (in inches) of each plot +2186 ''' +2187 if not os.path.exists(dir): +2188 os.makedirs(dir) +2189 +2190 for session in self.sessions: +2191 sp = self.plot_single_session(session, xylimits = 'constant') +2192 ppl.savefig(f'{dir}/D{self._4x}_plot_{session}.pdf') +2193 ppl.close(sp.fig) +2194 +2195 +2196 @make_verbal +2197 def consolidate_samples(self): +2198 ''' +2199 Compile various statistics for each sample. +2200 +2201 For each anchor sample: +2202 +2203 + `D47` or `D48`: the nominal Δ4x value for this anchor, specified by `self.Nominal_D4x` +2204 + `SE_D47` or `SE_D48`: set to zero by definition +2205 +2206 For each unknown sample: +2207 +2208 + `D47` or `D48`: the standardized Δ4x value for this unknown +2209 + `SE_D47` or `SE_D48`: the standard error of Δ4x for this unknown +2210 +2211 For each anchor and unknown: +2212 +2213 + `N`: the total number of analyses of this sample +2214 + `SD_D47` or `SD_D48`: the “sample” (in the statistical sense) standard deviation for this sample +2215 + `d13C_VPDB`: the average δ13C_VPDB value for this sample +2216 + `d18O_VSMOW`: the average δ18O_VSMOW value for this sample (as CO2) +2217 + `p_Levene`: the p-value from a [Levene test](https://en.wikipedia.org/wiki/Levene%27s_test) of equal +2218 variance, indicating whether the Δ4x repeatability this sample differs significantly from +2219 that observed for the reference sample specified by `self.LEVENE_REF_SAMPLE`. +2220 ''' +2221 D4x_ref_pop = [r[f'D{self._4x}'] for r in self.samples[self.LEVENE_REF_SAMPLE]['data']] +2222 for sample in self.samples: +2223 self.samples[sample]['N'] = len(self.samples[sample]['data']) +2224 if self.samples[sample]['N'] > 1: +2225 self.samples[sample][f'SD_D{self._4x}'] = stdev([r[f'D{self._4x}'] for r in self.samples[sample]['data']]) +2226 +2227 self.samples[sample]['d13C_VPDB'] = np.mean([r['d13C_VPDB'] for r in self.samples[sample]['data']]) +2228 self.samples[sample]['d18O_VSMOW'] = np.mean([r['d18O_VSMOW'] for r in self.samples[sample]['data']]) +2229 +2230 D4x_pop = [r[f'D{self._4x}'] for r in self.samples[sample]['data']] +2231 if len(D4x_pop) > 2: +2232 self.samples[sample]['p_Levene'] = levene(D4x_ref_pop, D4x_pop, center = 'median')[1] +2233 +2234 if self.standardization_method == 'pooled': +2235 for sample in self.anchors: +2236 self.samples[sample][f'D{self._4x}'] = self.Nominal_D4x[sample] +2237 self.samples[sample][f'SE_D{self._4x}'] = 0. +2238 for sample in self.unknowns: +2239 self.samples[sample][f'D{self._4x}'] = self.standardization.params.valuesdict()[f'D{self._4x}_{pf(sample)}'] +2240 try: +2241 self.samples[sample][f'SE_D{self._4x}'] = self.sample_D4x_covar(sample)**.5 +2242 except ValueError: +2243 # when `sample` is constrained by self.standardize(constraints = {...}), +2244 # it is no longer listed in self.standardization.var_names. +2245 # Temporary fix: define SE as zero for now +2246 self.samples[sample][f'SE_D4{self._4x}'] = 0. +2247 +2248 elif self.standardization_method == 'indep_sessions': +2249 for sample in self.anchors: +2250 self.samples[sample][f'D{self._4x}'] = self.Nominal_D4x[sample] +2251 self.samples[sample][f'SE_D{self._4x}'] = 0. +2252 for sample in self.unknowns: +2253 self.msg(f'Consolidating sample {sample}') +2254 self.unknowns[sample][f'session_D{self._4x}'] = {} +2255 session_avg = [] +2256 for session in self.sessions: +2257 sdata = [r for r in self.sessions[session]['data'] if r['Sample'] == sample] +2258 if sdata: +2259 self.msg(f'{sample} found in session {session}') +2260 avg_D4x = np.mean([r[f'D{self._4x}'] for r in sdata]) +2261 avg_d4x = np.mean([r[f'd{self._4x}'] for r in sdata]) +2262 # !! TODO: sigma_s below does not account for temporal changes in standardization error +2263 sigma_s = self.standardization_error(session, avg_d4x, avg_D4x) +2264 sigma_u = sdata[0][f'wD{self._4x}raw'] / self.sessions[session]['a'] / len(sdata)**.5 +2265 session_avg.append([avg_D4x, (sigma_u**2 + sigma_s**2)**.5]) +2266 self.unknowns[sample][f'session_D{self._4x}'][session] = session_avg[-1] +2267 self.samples[sample][f'D{self._4x}'], self.samples[sample][f'SE_D{self._4x}'] = w_avg(*zip(*session_avg)) +2268 weights = {s: self.unknowns[sample][f'session_D{self._4x}'][s][1]**-2 for s in self.unknowns[sample][f'session_D{self._4x}']} +2269 wsum = sum([weights[s] for s in weights]) +2270 for s in weights: +2271 self.unknowns[sample][f'session_D{self._4x}'][s] += [self.unknowns[sample][f'session_D{self._4x}'][s][1]**-2 / wsum] +2272 +2273 +2274 def consolidate_sessions(self): +2275 ''' +2276 Compute various statistics for each session. +2277 +2278 + `Na`: Number of anchor analyses in the session +2279 + `Nu`: Number of unknown analyses in the session +2280 + `r_d13C_VPDB`: δ13C_VPDB repeatability of analyses within the session +2281 + `r_d18O_VSMOW`: δ18O_VSMOW repeatability of analyses within the session +2282 + `r_D47` or `r_D48`: Δ4x repeatability of analyses within the session +2283 + `a`: scrambling factor +2284 + `b`: compositional slope +2285 + `c`: WG offset +2286 + `SE_a`: Model stadard erorr of `a` +2287 + `SE_b`: Model stadard erorr of `b` +2288 + `SE_c`: Model stadard erorr of `c` +2289 + `scrambling_drift` (boolean): whether to allow a temporal drift in the scrambling factor (`a`) +2290 + `slope_drift` (boolean): whether to allow a temporal drift in the compositional slope (`b`) +2291 + `wg_drift` (boolean): whether to allow a temporal drift in the WG offset (`c`) +2292 + `a2`: scrambling factor drift +2293 + `b2`: compositional slope drift +2294 + `c2`: WG offset drift +2295 + `Np`: Number of standardization parameters to fit +2296 + `CM`: model covariance matrix for (`a`, `b`, `c`, `a2`, `b2`, `c2`) +2297 + `d13Cwg_VPDB`: δ13C_VPDB of WG +2298 + `d18Owg_VSMOW`: δ18O_VSMOW of WG +2299 ''' +2300 for session in self.sessions: +2301 if 'd13Cwg_VPDB' not in self.sessions[session]: +2302 self.sessions[session]['d13Cwg_VPDB'] = self.sessions[session]['data'][0]['d13Cwg_VPDB'] +2303 if 'd18Owg_VSMOW' not in self.sessions[session]: +2304 self.sessions[session]['d18Owg_VSMOW'] = self.sessions[session]['data'][0]['d18Owg_VSMOW'] +2305 self.sessions[session]['Na'] = len([r for r in self.sessions[session]['data'] if r['Sample'] in self.anchors]) +2306 self.sessions[session]['Nu'] = len([r for r in self.sessions[session]['data'] if r['Sample'] in self.unknowns]) +2307 +2308 self.msg(f'Computing repeatabilities for session {session}') +2309 self.sessions[session]['r_d13C_VPDB'] = self.compute_r('d13C_VPDB', samples = 'anchors', sessions = [session]) +2310 self.sessions[session]['r_d18O_VSMOW'] = self.compute_r('d18O_VSMOW', samples = 'anchors', sessions = [session]) +2311 self.sessions[session][f'r_D{self._4x}'] = self.compute_r(f'D{self._4x}', sessions = [session]) +2312 +2313 if self.standardization_method == 'pooled': +2314 for session in self.sessions: +2315 +2316 self.sessions[session]['a'] = self.standardization.params.valuesdict()[f'a_{pf(session)}'] +2317 i = self.standardization.var_names.index(f'a_{pf(session)}') +2318 self.sessions[session]['SE_a'] = self.standardization.covar[i,i]**.5 +2319 +2320 self.sessions[session]['b'] = self.standardization.params.valuesdict()[f'b_{pf(session)}'] +2321 i = self.standardization.var_names.index(f'b_{pf(session)}') +2322 self.sessions[session]['SE_b'] = self.standardization.covar[i,i]**.5 +2323 +2324 self.sessions[session]['c'] = self.standardization.params.valuesdict()[f'c_{pf(session)}'] +2325 i = self.standardization.var_names.index(f'c_{pf(session)}') +2326 self.sessions[session]['SE_c'] = self.standardization.covar[i,i]**.5 +2327 +2328 self.sessions[session]['a2'] = self.standardization.params.valuesdict()[f'a2_{pf(session)}'] +2329 if self.sessions[session]['scrambling_drift']: +2330 i = self.standardization.var_names.index(f'a2_{pf(session)}') +2331 self.sessions[session]['SE_a2'] = self.standardization.covar[i,i]**.5 +2332 else: +2333 self.sessions[session]['SE_a2'] = 0. +2334 +2335 self.sessions[session]['b2'] = self.standardization.params.valuesdict()[f'b2_{pf(session)}'] +2336 if self.sessions[session]['slope_drift']: +2337 i = self.standardization.var_names.index(f'b2_{pf(session)}') +2338 self.sessions[session]['SE_b2'] = self.standardization.covar[i,i]**.5 +2339 else: +2340 self.sessions[session]['SE_b2'] = 0. +2341 +2342 self.sessions[session]['c2'] = self.standardization.params.valuesdict()[f'c2_{pf(session)}'] +2343 if self.sessions[session]['wg_drift']: +2344 i = self.standardization.var_names.index(f'c2_{pf(session)}') +2345 self.sessions[session]['SE_c2'] = self.standardization.covar[i,i]**.5 +2346 else: +2347 self.sessions[session]['SE_c2'] = 0. +2348 +2349 i = self.standardization.var_names.index(f'a_{pf(session)}') +2350 j = self.standardization.var_names.index(f'b_{pf(session)}') +2351 k = self.standardization.var_names.index(f'c_{pf(session)}') +2352 CM = np.zeros((6,6)) +2353 CM[:3,:3] = self.standardization.covar[[i,j,k],:][:,[i,j,k]] +2354 try: +2355 i2 = self.standardization.var_names.index(f'a2_{pf(session)}') +2356 CM[3,[0,1,2,3]] = self.standardization.covar[i2,[i,j,k,i2]] +2357 CM[[0,1,2,3],3] = self.standardization.covar[[i,j,k,i2],i2] +2358 try: +2359 j2 = self.standardization.var_names.index(f'b2_{pf(session)}') +2360 CM[3,4] = self.standardization.covar[i2,j2] +2361 CM[4,3] = self.standardization.covar[j2,i2] +2362 except ValueError: +2363 pass +2364 try: +2365 k2 = self.standardization.var_names.index(f'c2_{pf(session)}') +2366 CM[3,5] = self.standardization.covar[i2,k2] +2367 CM[5,3] = self.standardization.covar[k2,i2] +2368 except ValueError: +2369 pass +2370 except ValueError: +2371 pass +2372 try: +2373 j2 = self.standardization.var_names.index(f'b2_{pf(session)}') +2374 CM[4,[0,1,2,4]] = self.standardization.covar[j2,[i,j,k,j2]] +2375 CM[[0,1,2,4],4] = self.standardization.covar[[i,j,k,j2],j2] +2376 try: +2377 k2 = self.standardization.var_names.index(f'c2_{pf(session)}') +2378 CM[4,5] = self.standardization.covar[j2,k2] +2379 CM[5,4] = self.standardization.covar[k2,j2] +2380 except ValueError: +2381 pass +2382 except ValueError: +2383 pass +2384 try: +2385 k2 = self.standardization.var_names.index(f'c2_{pf(session)}') +2386 CM[5,[0,1,2,5]] = self.standardization.covar[k2,[i,j,k,k2]] +2387 CM[[0,1,2,5],5] = self.standardization.covar[[i,j,k,k2],k2] +2388 except ValueError: +2389 pass +2390 +2391 self.sessions[session]['CM'] = CM +2392 +2393 elif self.standardization_method == 'indep_sessions': +2394 pass # Not implemented yet 2395 -2396 self.repeatability['r_d13C_VPDB'] = self.compute_r('d13C_VPDB', samples = 'anchors') -2397 self.repeatability['r_d18O_VSMOW'] = self.compute_r('d18O_VSMOW', samples = 'anchors') -2398 self.repeatability[f'r_D{self._4x}a'] = self.compute_r(f'D{self._4x}', samples = 'anchors') -2399 self.repeatability[f'r_D{self._4x}u'] = self.compute_r(f'D{self._4x}', samples = 'unknowns') -2400 self.repeatability[f'r_D{self._4x}'] = self.compute_r(f'D{self._4x}', samples = 'all samples') -2401 -2402 -2403 @make_verbal -2404 def consolidate(self, tables = True, plots = True): -2405 ''' -2406 Collect information about samples, sessions and repeatabilities. -2407 ''' -2408 self.consolidate_samples() -2409 self.consolidate_sessions() -2410 self.repeatabilities() +2396 +2397 @make_verbal +2398 def repeatabilities(self): +2399 ''' +2400 Compute analytical repeatabilities for δ13C_VPDB, δ18O_VSMOW, Δ4x +2401 (for all samples, for anchors, and for unknowns). +2402 ''' +2403 self.msg('Computing reproducibilities for all sessions') +2404 +2405 self.repeatability['r_d13C_VPDB'] = self.compute_r('d13C_VPDB', samples = 'anchors') +2406 self.repeatability['r_d18O_VSMOW'] = self.compute_r('d18O_VSMOW', samples = 'anchors') +2407 self.repeatability[f'r_D{self._4x}a'] = self.compute_r(f'D{self._4x}', samples = 'anchors') +2408 self.repeatability[f'r_D{self._4x}u'] = self.compute_r(f'D{self._4x}', samples = 'unknowns') +2409 self.repeatability[f'r_D{self._4x}'] = self.compute_r(f'D{self._4x}', samples = 'all samples') +2410 2411 -2412 if tables: -2413 self.summary() -2414 self.table_of_sessions() -2415 self.table_of_analyses() -2416 self.table_of_samples() -2417 -2418 if plots: -2419 self.plot_sessions() +2412 @make_verbal +2413 def consolidate(self, tables = True, plots = True): +2414 ''' +2415 Collect information about samples, sessions and repeatabilities. +2416 ''' +2417 self.consolidate_samples() +2418 self.consolidate_sessions() +2419 self.repeatabilities() 2420 -2421 -2422 @make_verbal -2423 def rmswd(self, -2424 samples = 'all samples', -2425 sessions = 'all sessions', -2426 ): -2427 ''' -2428 Compute the χ2, root mean squared weighted deviation -2429 (i.e. reduced χ2), and corresponding degrees of freedom of the -2430 Δ4x values for samples in `samples` and sessions in `sessions`. -2431 -2432 Only used in `D4xdata.standardize()` with `method='indep_sessions'`. -2433 ''' -2434 if samples == 'all samples': -2435 mysamples = [k for k in self.samples] -2436 elif samples == 'anchors': -2437 mysamples = [k for k in self.anchors] -2438 elif samples == 'unknowns': -2439 mysamples = [k for k in self.unknowns] -2440 else: -2441 mysamples = samples -2442 -2443 if sessions == 'all sessions': -2444 sessions = [k for k in self.sessions] -2445 -2446 chisq, Nf = 0, 0 -2447 for sample in mysamples : -2448 G = [ r for r in self if r['Sample'] == sample and r['Session'] in sessions ] -2449 if len(G) > 1 : -2450 X, sX = w_avg([r[f'D{self._4x}'] for r in G], [r[f'wD{self._4x}'] for r in G]) -2451 Nf += (len(G) - 1) -2452 chisq += np.sum([ ((r[f'D{self._4x}']-X)/r[f'wD{self._4x}'])**2 for r in G]) -2453 r = (chisq / Nf)**.5 if Nf > 0 else 0 -2454 self.msg(f'RMSWD of r["D{self._4x}"] is {r:.6f} for {samples}.') -2455 return {'rmswd': r, 'chisq': chisq, 'Nf': Nf} -2456 -2457 -2458 @make_verbal -2459 def compute_r(self, key, samples = 'all samples', sessions = 'all sessions'): -2460 ''' -2461 Compute the repeatability of `[r[key] for r in self]` -2462 ''' -2463 # NB: it's debatable whether rD47 should be computed -2464 # with Nf = len(self)-len(self.samples) instead of -2465 # Nf = len(self) - len(self.unknwons) - 3*len(self.sessions) -2466 -2467 if samples == 'all samples': -2468 mysamples = [k for k in self.samples] -2469 elif samples == 'anchors': -2470 mysamples = [k for k in self.anchors] -2471 elif samples == 'unknowns': -2472 mysamples = [k for k in self.unknowns] -2473 else: -2474 mysamples = samples +2421 if tables: +2422 self.summary() +2423 self.table_of_sessions() +2424 self.table_of_analyses() +2425 self.table_of_samples() +2426 +2427 if plots: +2428 self.plot_sessions() +2429 +2430 +2431 @make_verbal +2432 def rmswd(self, +2433 samples = 'all samples', +2434 sessions = 'all sessions', +2435 ): +2436 ''' +2437 Compute the χ2, root mean squared weighted deviation +2438 (i.e. reduced χ2), and corresponding degrees of freedom of the +2439 Δ4x values for samples in `samples` and sessions in `sessions`. +2440 +2441 Only used in `D4xdata.standardize()` with `method='indep_sessions'`. +2442 ''' +2443 if samples == 'all samples': +2444 mysamples = [k for k in self.samples] +2445 elif samples == 'anchors': +2446 mysamples = [k for k in self.anchors] +2447 elif samples == 'unknowns': +2448 mysamples = [k for k in self.unknowns] +2449 else: +2450 mysamples = samples +2451 +2452 if sessions == 'all sessions': +2453 sessions = [k for k in self.sessions] +2454 +2455 chisq, Nf = 0, 0 +2456 for sample in mysamples : +2457 G = [ r for r in self if r['Sample'] == sample and r['Session'] in sessions ] +2458 if len(G) > 1 : +2459 X, sX = w_avg([r[f'D{self._4x}'] for r in G], [r[f'wD{self._4x}'] for r in G]) +2460 Nf += (len(G) - 1) +2461 chisq += np.sum([ ((r[f'D{self._4x}']-X)/r[f'wD{self._4x}'])**2 for r in G]) +2462 r = (chisq / Nf)**.5 if Nf > 0 else 0 +2463 self.msg(f'RMSWD of r["D{self._4x}"] is {r:.6f} for {samples}.') +2464 return {'rmswd': r, 'chisq': chisq, 'Nf': Nf} +2465 +2466 +2467 @make_verbal +2468 def compute_r(self, key, samples = 'all samples', sessions = 'all sessions'): +2469 ''' +2470 Compute the repeatability of `[r[key] for r in self]` +2471 ''' +2472 # NB: it's debatable whether rD47 should be computed +2473 # with Nf = len(self)-len(self.samples) instead of +2474 # Nf = len(self) - len(self.unknwons) - 3*len(self.sessions) 2475 -2476 if sessions == 'all sessions': -2477 sessions = [k for k in self.sessions] -2478 -2479 if key in ['D47', 'D48']: -2480 chisq, Nf = 0, 0 -2481 for sample in mysamples : -2482 X = [ r[key] for r in self if r['Sample'] == sample and r['Session'] in sessions ] -2483 if len(X) > 1 : -2484 chisq += np.sum([ (x-self.samples[sample][key])**2 for x in X ]) -2485 if sample in self.unknowns: -2486 Nf += len(X) - 1 -2487 else: -2488 Nf += len(X) -2489 if samples in ['anchors', 'all samples']: -2490 Nf -= sum([self.sessions[s]['Np'] for s in sessions]) -2491 r = (chisq / Nf)**.5 if Nf > 0 else 0 -2492 -2493 else: # if key not in ['D47', 'D48'] -2494 chisq, Nf = 0, 0 -2495 for sample in mysamples : -2496 X = [ r[key] for r in self if r['Sample'] == sample and r['Session'] in sessions ] -2497 if len(X) > 1 : -2498 Nf += len(X) - 1 -2499 chisq += np.sum([ (x-np.mean(X))**2 for x in X ]) +2476 if samples == 'all samples': +2477 mysamples = [k for k in self.samples] +2478 elif samples == 'anchors': +2479 mysamples = [k for k in self.anchors] +2480 elif samples == 'unknowns': +2481 mysamples = [k for k in self.unknowns] +2482 else: +2483 mysamples = samples +2484 +2485 if sessions == 'all sessions': +2486 sessions = [k for k in self.sessions] +2487 +2488 if key in ['D47', 'D48']: +2489 chisq, Nf = 0, 0 +2490 for sample in mysamples : +2491 X = [ r[key] for r in self if r['Sample'] == sample and r['Session'] in sessions ] +2492 if len(X) > 1 : +2493 chisq += np.sum([ (x-self.samples[sample][key])**2 for x in X ]) +2494 if sample in self.unknowns: +2495 Nf += len(X) - 1 +2496 else: +2497 Nf += len(X) +2498 if samples in ['anchors', 'all samples']: +2499 Nf -= sum([self.sessions[s]['Np'] for s in sessions]) 2500 r = (chisq / Nf)**.5 if Nf > 0 else 0 2501 -2502 self.msg(f'Repeatability of r["{key}"] is {1000*r:.1f} ppm for {samples}.') -2503 return r -2504 -2505 def sample_average(self, samples, weights = 'equal', normalize = True): -2506 ''' -2507 Weighted average Δ4x value of a group of samples, accounting for covariance. -2508 -2509 Returns the weighed average Δ4x value and associated SE -2510 of a group of samples. Weights are equal by default. If `normalize` is -2511 true, `weights` will be rescaled so that their sum equals 1. -2512 -2513 **Examples** -2514 -2515 ```python -2516 self.sample_average(['X','Y'], [1, 2]) -2517 ``` -2518 -2519 returns the value and SE of [Δ4x(X) + 2 Δ4x(Y)]/3, -2520 where Δ4x(X) and Δ4x(Y) are the average Δ4x -2521 values of samples X and Y, respectively. -2522 -2523 ```python -2524 self.sample_average(['X','Y'], [1, -1], normalize = False) -2525 ``` -2526 -2527 returns the value and SE of the difference Δ4x(X) - Δ4x(Y). -2528 ''' -2529 if weights == 'equal': -2530 weights = [1/len(samples)] * len(samples) +2502 else: # if key not in ['D47', 'D48'] +2503 chisq, Nf = 0, 0 +2504 for sample in mysamples : +2505 X = [ r[key] for r in self if r['Sample'] == sample and r['Session'] in sessions ] +2506 if len(X) > 1 : +2507 Nf += len(X) - 1 +2508 chisq += np.sum([ (x-np.mean(X))**2 for x in X ]) +2509 r = (chisq / Nf)**.5 if Nf > 0 else 0 +2510 +2511 self.msg(f'Repeatability of r["{key}"] is {1000*r:.1f} ppm for {samples}.') +2512 return r +2513 +2514 def sample_average(self, samples, weights = 'equal', normalize = True): +2515 ''' +2516 Weighted average Δ4x value of a group of samples, accounting for covariance. +2517 +2518 Returns the weighed average Δ4x value and associated SE +2519 of a group of samples. Weights are equal by default. If `normalize` is +2520 true, `weights` will be rescaled so that their sum equals 1. +2521 +2522 **Examples** +2523 +2524 ```python +2525 self.sample_average(['X','Y'], [1, 2]) +2526 ``` +2527 +2528 returns the value and SE of [Δ4x(X) + 2 Δ4x(Y)]/3, +2529 where Δ4x(X) and Δ4x(Y) are the average Δ4x +2530 values of samples X and Y, respectively. 2531 -2532 if normalize: -2533 s = sum(weights) -2534 if s: -2535 weights = [w/s for w in weights] -2536 -2537 try: -2538# indices = [self.standardization.var_names.index(f'D47_{pf(sample)}') for sample in samples] -2539# C = self.standardization.covar[indices,:][:,indices] -2540 C = np.array([[self.sample_D4x_covar(x, y) for x in samples] for y in samples]) -2541 X = [self.samples[sample][f'D{self._4x}'] for sample in samples] -2542 return correlated_sum(X, C, weights) -2543 except ValueError: -2544 return (0., 0.) +2532 ```python +2533 self.sample_average(['X','Y'], [1, -1], normalize = False) +2534 ``` +2535 +2536 returns the value and SE of the difference Δ4x(X) - Δ4x(Y). +2537 ''' +2538 if weights == 'equal': +2539 weights = [1/len(samples)] * len(samples) +2540 +2541 if normalize: +2542 s = sum(weights) +2543 if s: +2544 weights = [w/s for w in weights] 2545 -2546 -2547 def sample_D4x_covar(self, sample1, sample2 = None): -2548 ''' -2549 Covariance between Δ4x values of samples -2550 -2551 Returns the error covariance between the average Δ4x values of two -2552 samples. If if only `sample_1` is specified, or if `sample_1 == sample_2`), -2553 returns the Δ4x variance for that sample. -2554 ''' -2555 if sample2 is None: -2556 sample2 = sample1 -2557 if self.standardization_method == 'pooled': -2558 i = self.standardization.var_names.index(f'D{self._4x}_{pf(sample1)}') -2559 j = self.standardization.var_names.index(f'D{self._4x}_{pf(sample2)}') -2560 return self.standardization.covar[i, j] -2561 elif self.standardization_method == 'indep_sessions': -2562 if sample1 == sample2: -2563 return self.samples[sample1][f'SE_D{self._4x}']**2 -2564 else: -2565 c = 0 -2566 for session in self.sessions: -2567 sdata1 = [r for r in self.sessions[session]['data'] if r['Sample'] == sample1] -2568 sdata2 = [r for r in self.sessions[session]['data'] if r['Sample'] == sample2] -2569 if sdata1 and sdata2: -2570 a = self.sessions[session]['a'] -2571 # !! TODO: CM below does not account for temporal changes in standardization parameters -2572 CM = self.sessions[session]['CM'][:3,:3] -2573 avg_D4x_1 = np.mean([r[f'D{self._4x}'] for r in sdata1]) -2574 avg_d4x_1 = np.mean([r[f'd{self._4x}'] for r in sdata1]) -2575 avg_D4x_2 = np.mean([r[f'D{self._4x}'] for r in sdata2]) -2576 avg_d4x_2 = np.mean([r[f'd{self._4x}'] for r in sdata2]) -2577 c += ( -2578 self.unknowns[sample1][f'session_D{self._4x}'][session][2] -2579 * self.unknowns[sample2][f'session_D{self._4x}'][session][2] -2580 * np.array([[avg_D4x_1, avg_d4x_1, 1]]) -2581 @ CM -2582 @ np.array([[avg_D4x_2, avg_d4x_2, 1]]).T -2583 ) / a**2 -2584 return float(c) -2585 -2586 def sample_D4x_correl(self, sample1, sample2 = None): -2587 ''' -2588 Correlation between Δ4x errors of samples -2589 -2590 Returns the error correlation between the average Δ4x values of two samples. -2591 ''' -2592 if sample2 is None or sample2 == sample1: -2593 return 1. -2594 return ( -2595 self.sample_D4x_covar(sample1, sample2) -2596 / self.unknowns[sample1][f'SE_D{self._4x}'] -2597 / self.unknowns[sample2][f'SE_D{self._4x}'] -2598 ) -2599 -2600 def plot_single_session(self, -2601 session, -2602 kw_plot_anchors = dict(ls='None', marker='x', mec=(.75, 0, 0), mew = .75, ms = 4), -2603 kw_plot_unknowns = dict(ls='None', marker='x', mec=(0, 0, .75), mew = .75, ms = 4), -2604 kw_plot_anchor_avg = dict(ls='-', marker='None', color=(.75, 0, 0), lw = .75), -2605 kw_plot_unknown_avg = dict(ls='-', marker='None', color=(0, 0, .75), lw = .75), -2606 kw_contour_error = dict(colors = [[0, 0, 0]], alpha = .5, linewidths = 0.75), -2607 xylimits = 'free', # | 'constant' -2608 x_label = None, -2609 y_label = None, -2610 error_contour_interval = 'auto', -2611 fig = 'new', -2612 ): -2613 ''' -2614 Generate plot for a single session -2615 ''' -2616 if x_label is None: -2617 x_label = f'δ$_{{{self._4x}}}$ (‰)' -2618 if y_label is None: -2619 y_label = f'Δ$_{{{self._4x}}}$ (‰)' -2620 -2621 out = _SessionPlot() -2622 anchors = [a for a in self.anchors if [r for r in self.sessions[session]['data'] if r['Sample'] == a]] -2623 unknowns = [u for u in self.unknowns if [r for r in self.sessions[session]['data'] if r['Sample'] == u]] -2624 -2625 if fig == 'new': -2626 out.fig = ppl.figure(figsize = (6,6)) -2627 ppl.subplots_adjust(.1,.1,.9,.9) -2628 -2629 out.anchor_analyses, = ppl.plot( -2630 [r[f'd{self._4x}'] for r in self.sessions[session]['data'] if r['Sample'] in self.anchors], -2631 [r[f'D{self._4x}'] for r in self.sessions[session]['data'] if r['Sample'] in self.anchors], -2632 **kw_plot_anchors) -2633 out.unknown_analyses, = ppl.plot( -2634 [r[f'd{self._4x}'] for r in self.sessions[session]['data'] if r['Sample'] in self.unknowns], -2635 [r[f'D{self._4x}'] for r in self.sessions[session]['data'] if r['Sample'] in self.unknowns], -2636 **kw_plot_unknowns) -2637 out.anchor_avg = ppl.plot( -2638 np.array([ np.array([ -2639 np.min([r[f'd{self._4x}'] for r in self.sessions[session]['data'] if r['Sample'] == sample]) - 1, -2640 np.max([r[f'd{self._4x}'] for r in self.sessions[session]['data'] if r['Sample'] == sample]) + 1 -2641 ]) for sample in anchors]).T, -2642 np.array([ np.array([0, 0]) + self.Nominal_D4x[sample] for sample in anchors]).T, -2643 **kw_plot_anchor_avg) -2644 out.unknown_avg = ppl.plot( -2645 np.array([ np.array([ -2646 np.min([r[f'd{self._4x}'] for r in self.sessions[session]['data'] if r['Sample'] == sample]) - 1, -2647 np.max([r[f'd{self._4x}'] for r in self.sessions[session]['data'] if r['Sample'] == sample]) + 1 -2648 ]) for sample in unknowns]).T, -2649 np.array([ np.array([0, 0]) + self.unknowns[sample][f'D{self._4x}'] for sample in unknowns]).T, -2650 **kw_plot_unknown_avg) -2651 if xylimits == 'constant': -2652 x = [r[f'd{self._4x}'] for r in self] -2653 y = [r[f'D{self._4x}'] for r in self] -2654 x1, x2, y1, y2 = np.min(x), np.max(x), np.min(y), np.max(y) -2655 w, h = x2-x1, y2-y1 -2656 x1 -= w/20 -2657 x2 += w/20 -2658 y1 -= h/20 -2659 y2 += h/20 -2660 ppl.axis([x1, x2, y1, y2]) -2661 elif xylimits == 'free': -2662 x1, x2, y1, y2 = ppl.axis() -2663 else: -2664 x1, x2, y1, y2 = ppl.axis(xylimits) -2665 -2666 if error_contour_interval != 'none': -2667 xi, yi = np.linspace(x1, x2), np.linspace(y1, y2) -2668 XI,YI = np.meshgrid(xi, yi) -2669 SI = np.array([[self.standardization_error(session, x, y) for x in xi] for y in yi]) -2670 if error_contour_interval == 'auto': -2671 rng = np.max(SI) - np.min(SI) -2672 if rng <= 0.01: -2673 cinterval = 0.001 -2674 elif rng <= 0.03: -2675 cinterval = 0.004 -2676 elif rng <= 0.1: -2677 cinterval = 0.01 -2678 elif rng <= 0.3: -2679 cinterval = 0.03 -2680 elif rng <= 1.: -2681 cinterval = 0.1 -2682 else: -2683 cinterval = 0.5 -2684 else: -2685 cinterval = error_contour_interval -2686 -2687 cval = np.arange(np.ceil(SI.min() / .001) * .001, np.ceil(SI.max() / .001 + 1) * .001, cinterval) -2688 out.contour = ppl.contour(XI, YI, SI, cval, **kw_contour_error) -2689 out.clabel = ppl.clabel(out.contour) -2690 -2691 ppl.xlabel(x_label) -2692 ppl.ylabel(y_label) -2693 ppl.title(session, weight = 'bold') -2694 ppl.grid(alpha = .2) -2695 out.ax = ppl.gca() -2696 -2697 return out -2698 -2699 def plot_residuals( -2700 self, -2701 hist = False, -2702 binwidth = 2/3, -2703 dir = 'output', -2704 filename = None, -2705 highlight = [], -2706 colors = None, -2707 figsize = None, -2708 ): -2709 ''' -2710 Plot residuals of each analysis as a function of time (actually, as a function of -2711 the order of analyses in the `D4xdata` object) -2712 -2713 + `hist`: whether to add a histogram of residuals -2714 + `histbins`: specify bin edges for the histogram -2715 + `dir`: the directory in which to save the plot -2716 + `highlight`: a list of samples to highlight -2717 + `colors`: a dict of `{<sample>: <color>}` for all samples -2718 + `figsize`: (width, height) of figure -2719 ''' -2720 # Layout -2721 fig = ppl.figure(figsize = (8,4) if figsize is None else figsize) -2722 if hist: -2723 ppl.subplots_adjust(left = .08, bottom = .05, right = .98, top = .8, wspace = -0.72) -2724 ax1, ax2 = ppl.subplot(121), ppl.subplot(1,15,15) -2725 else: -2726 ppl.subplots_adjust(.08,.05,.78,.8) -2727 ax1 = ppl.subplot(111) -2728 -2729 # Colors -2730 N = len(self.anchors) -2731 if colors is None: -2732 if len(highlight) > 0: -2733 Nh = len(highlight) -2734 if Nh == 1: -2735 colors = {highlight[0]: (0,0,0)} -2736 elif Nh == 3: -2737 colors = {a: c for a,c in zip(highlight, [(0,0,1), (1,0,0), (0,2/3,0)])} -2738 elif Nh == 4: -2739 colors = {a: c for a,c in zip(highlight, [(0,0,1), (1,0,0), (0,2/3,0), (.75,0,.75)])} -2740 else: -2741 colors = {a: hls_to_rgb(k/Nh, .4, 1) for k,a in enumerate(highlight)} -2742 else: -2743 if N == 3: -2744 colors = {a: c for a,c in zip(self.anchors, [(0,0,1), (1,0,0), (0,2/3,0)])} -2745 elif N == 4: -2746 colors = {a: c for a,c in zip(self.anchors, [(0,0,1), (1,0,0), (0,2/3,0), (.75,0,.75)])} -2747 else: -2748 colors = {a: hls_to_rgb(k/N, .4, 1) for k,a in enumerate(self.anchors)} -2749 -2750 ppl.sca(ax1) -2751 -2752 ppl.axhline(0, color = 'k', alpha = .25, lw = 0.75) -2753 -2754 session = self[0]['Session'] -2755 x1 = 0 -2756# ymax = np.max([1e3 * (r['D47'] - self.samples[r['Sample']]['D47']) for r in self]) -2757 x_sessions = {} -2758 one_or_more_singlets = False -2759 one_or_more_multiplets = False -2760 multiplets = set() -2761 for k,r in enumerate(self): -2762 if r['Session'] != session: -2763 x2 = k-1 -2764 x_sessions[session] = (x1+x2)/2 -2765 ppl.axvline(k - 0.5, color = 'k', lw = .5) -2766 session = r['Session'] -2767 x1 = k -2768 singlet = len(self.samples[r['Sample']]['data']) == 1 -2769 if not singlet: -2770 multiplets.add(r['Sample']) -2771 if r['Sample'] in self.unknowns: -2772 if singlet: -2773 one_or_more_singlets = True -2774 else: -2775 one_or_more_multiplets = True -2776 kw = dict( -2777 marker = 'x' if singlet else '+', -2778 ms = 4 if singlet else 5, -2779 ls = 'None', -2780 mec = colors[r['Sample']] if r['Sample'] in colors else (0,0,0), -2781 mew = 1, -2782 alpha = 0.2 if singlet else 1, -2783 ) -2784 if highlight and r['Sample'] not in highlight: -2785 kw['alpha'] = 0.2 -2786 ppl.plot(k, 1e3 * (r['D47'] - self.samples[r['Sample']]['D47']), **kw) -2787 x2 = k -2788 x_sessions[session] = (x1+x2)/2 -2789 -2790 ppl.axhspan(-self.repeatability['r_D47']*1000, self.repeatability['r_D47']*1000, color = 'k', alpha = .05, lw = 1) -2791 ppl.axhspan(-self.repeatability['r_D47']*1000*self.t95, self.repeatability['r_D47']*1000*self.t95, color = 'k', alpha = .05, lw = 1) -2792 if not hist: -2793 ppl.text(len(self), self.repeatability['r_D47']*1000, f" SD = {self.repeatability['r_D47']*1000:.1f} ppm", size = 9, alpha = 1, va = 'center') -2794 ppl.text(len(self), self.repeatability['r_D47']*1000*self.t95, f" 95% CL = ± {self.repeatability['r_D47']*1000*self.t95:.1f} ppm", size = 9, alpha = 1, va = 'center') -2795 -2796 xmin, xmax, ymin, ymax = ppl.axis() -2797 for s in x_sessions: -2798 ppl.text( -2799 x_sessions[s], -2800 ymax +1, -2801 s, -2802 va = 'bottom', -2803 **( -2804 dict(ha = 'center') -2805 if len(self.sessions[s]['data']) > (0.15 * len(self)) -2806 else dict(ha = 'left', rotation = 45) -2807 ) -2808 ) -2809 -2810 if hist: -2811 ppl.sca(ax2) -2812 -2813 for s in colors: -2814 kw['marker'] = '+' -2815 kw['ms'] = 5 -2816 kw['mec'] = colors[s] -2817 kw['label'] = s -2818 kw['alpha'] = 1 -2819 ppl.plot([], [], **kw) -2820 -2821 kw['mec'] = (0,0,0) -2822 -2823 if one_or_more_singlets: -2824 kw['marker'] = 'x' -2825 kw['ms'] = 4 -2826 kw['alpha'] = .2 -2827 kw['label'] = 'other (N$\\,$=$\\,$1)' if one_or_more_multiplets else 'other' +2546 try: +2547# indices = [self.standardization.var_names.index(f'D47_{pf(sample)}') for sample in samples] +2548# C = self.standardization.covar[indices,:][:,indices] +2549 C = np.array([[self.sample_D4x_covar(x, y) for x in samples] for y in samples]) +2550 X = [self.samples[sample][f'D{self._4x}'] for sample in samples] +2551 return correlated_sum(X, C, weights) +2552 except ValueError: +2553 return (0., 0.) +2554 +2555 +2556 def sample_D4x_covar(self, sample1, sample2 = None): +2557 ''' +2558 Covariance between Δ4x values of samples +2559 +2560 Returns the error covariance between the average Δ4x values of two +2561 samples. If if only `sample_1` is specified, or if `sample_1 == sample_2`), +2562 returns the Δ4x variance for that sample. +2563 ''' +2564 if sample2 is None: +2565 sample2 = sample1 +2566 if self.standardization_method == 'pooled': +2567 i = self.standardization.var_names.index(f'D{self._4x}_{pf(sample1)}') +2568 j = self.standardization.var_names.index(f'D{self._4x}_{pf(sample2)}') +2569 return self.standardization.covar[i, j] +2570 elif self.standardization_method == 'indep_sessions': +2571 if sample1 == sample2: +2572 return self.samples[sample1][f'SE_D{self._4x}']**2 +2573 else: +2574 c = 0 +2575 for session in self.sessions: +2576 sdata1 = [r for r in self.sessions[session]['data'] if r['Sample'] == sample1] +2577 sdata2 = [r for r in self.sessions[session]['data'] if r['Sample'] == sample2] +2578 if sdata1 and sdata2: +2579 a = self.sessions[session]['a'] +2580 # !! TODO: CM below does not account for temporal changes in standardization parameters +2581 CM = self.sessions[session]['CM'][:3,:3] +2582 avg_D4x_1 = np.mean([r[f'D{self._4x}'] for r in sdata1]) +2583 avg_d4x_1 = np.mean([r[f'd{self._4x}'] for r in sdata1]) +2584 avg_D4x_2 = np.mean([r[f'D{self._4x}'] for r in sdata2]) +2585 avg_d4x_2 = np.mean([r[f'd{self._4x}'] for r in sdata2]) +2586 c += ( +2587 self.unknowns[sample1][f'session_D{self._4x}'][session][2] +2588 * self.unknowns[sample2][f'session_D{self._4x}'][session][2] +2589 * np.array([[avg_D4x_1, avg_d4x_1, 1]]) +2590 @ CM +2591 @ np.array([[avg_D4x_2, avg_d4x_2, 1]]).T +2592 ) / a**2 +2593 return float(c) +2594 +2595 def sample_D4x_correl(self, sample1, sample2 = None): +2596 ''' +2597 Correlation between Δ4x errors of samples +2598 +2599 Returns the error correlation between the average Δ4x values of two samples. +2600 ''' +2601 if sample2 is None or sample2 == sample1: +2602 return 1. +2603 return ( +2604 self.sample_D4x_covar(sample1, sample2) +2605 / self.unknowns[sample1][f'SE_D{self._4x}'] +2606 / self.unknowns[sample2][f'SE_D{self._4x}'] +2607 ) +2608 +2609 def plot_single_session(self, +2610 session, +2611 kw_plot_anchors = dict(ls='None', marker='x', mec=(.75, 0, 0), mew = .75, ms = 4), +2612 kw_plot_unknowns = dict(ls='None', marker='x', mec=(0, 0, .75), mew = .75, ms = 4), +2613 kw_plot_anchor_avg = dict(ls='-', marker='None', color=(.75, 0, 0), lw = .75), +2614 kw_plot_unknown_avg = dict(ls='-', marker='None', color=(0, 0, .75), lw = .75), +2615 kw_contour_error = dict(colors = [[0, 0, 0]], alpha = .5, linewidths = 0.75), +2616 xylimits = 'free', # | 'constant' +2617 x_label = None, +2618 y_label = None, +2619 error_contour_interval = 'auto', +2620 fig = 'new', +2621 ): +2622 ''' +2623 Generate plot for a single session +2624 ''' +2625 if x_label is None: +2626 x_label = f'δ$_{{{self._4x}}}$ (‰)' +2627 if y_label is None: +2628 y_label = f'Δ$_{{{self._4x}}}$ (‰)' +2629 +2630 out = _SessionPlot() +2631 anchors = [a for a in self.anchors if [r for r in self.sessions[session]['data'] if r['Sample'] == a]] +2632 unknowns = [u for u in self.unknowns if [r for r in self.sessions[session]['data'] if r['Sample'] == u]] +2633 +2634 if fig == 'new': +2635 out.fig = ppl.figure(figsize = (6,6)) +2636 ppl.subplots_adjust(.1,.1,.9,.9) +2637 +2638 out.anchor_analyses, = ppl.plot( +2639 [r[f'd{self._4x}'] for r in self.sessions[session]['data'] if r['Sample'] in self.anchors], +2640 [r[f'D{self._4x}'] for r in self.sessions[session]['data'] if r['Sample'] in self.anchors], +2641 **kw_plot_anchors) +2642 out.unknown_analyses, = ppl.plot( +2643 [r[f'd{self._4x}'] for r in self.sessions[session]['data'] if r['Sample'] in self.unknowns], +2644 [r[f'D{self._4x}'] for r in self.sessions[session]['data'] if r['Sample'] in self.unknowns], +2645 **kw_plot_unknowns) +2646 out.anchor_avg = ppl.plot( +2647 np.array([ np.array([ +2648 np.min([r[f'd{self._4x}'] for r in self.sessions[session]['data'] if r['Sample'] == sample]) - 1, +2649 np.max([r[f'd{self._4x}'] for r in self.sessions[session]['data'] if r['Sample'] == sample]) + 1 +2650 ]) for sample in anchors]).T, +2651 np.array([ np.array([0, 0]) + self.Nominal_D4x[sample] for sample in anchors]).T, +2652 **kw_plot_anchor_avg) +2653 out.unknown_avg = ppl.plot( +2654 np.array([ np.array([ +2655 np.min([r[f'd{self._4x}'] for r in self.sessions[session]['data'] if r['Sample'] == sample]) - 1, +2656 np.max([r[f'd{self._4x}'] for r in self.sessions[session]['data'] if r['Sample'] == sample]) + 1 +2657 ]) for sample in unknowns]).T, +2658 np.array([ np.array([0, 0]) + self.unknowns[sample][f'D{self._4x}'] for sample in unknowns]).T, +2659 **kw_plot_unknown_avg) +2660 if xylimits == 'constant': +2661 x = [r[f'd{self._4x}'] for r in self] +2662 y = [r[f'D{self._4x}'] for r in self] +2663 x1, x2, y1, y2 = np.min(x), np.max(x), np.min(y), np.max(y) +2664 w, h = x2-x1, y2-y1 +2665 x1 -= w/20 +2666 x2 += w/20 +2667 y1 -= h/20 +2668 y2 += h/20 +2669 ppl.axis([x1, x2, y1, y2]) +2670 elif xylimits == 'free': +2671 x1, x2, y1, y2 = ppl.axis() +2672 else: +2673 x1, x2, y1, y2 = ppl.axis(xylimits) +2674 +2675 if error_contour_interval != 'none': +2676 xi, yi = np.linspace(x1, x2), np.linspace(y1, y2) +2677 XI,YI = np.meshgrid(xi, yi) +2678 SI = np.array([[self.standardization_error(session, x, y) for x in xi] for y in yi]) +2679 if error_contour_interval == 'auto': +2680 rng = np.max(SI) - np.min(SI) +2681 if rng <= 0.01: +2682 cinterval = 0.001 +2683 elif rng <= 0.03: +2684 cinterval = 0.004 +2685 elif rng <= 0.1: +2686 cinterval = 0.01 +2687 elif rng <= 0.3: +2688 cinterval = 0.03 +2689 elif rng <= 1.: +2690 cinterval = 0.1 +2691 else: +2692 cinterval = 0.5 +2693 else: +2694 cinterval = error_contour_interval +2695 +2696 cval = np.arange(np.ceil(SI.min() / .001) * .001, np.ceil(SI.max() / .001 + 1) * .001, cinterval) +2697 out.contour = ppl.contour(XI, YI, SI, cval, **kw_contour_error) +2698 out.clabel = ppl.clabel(out.contour) +2699 +2700 ppl.xlabel(x_label) +2701 ppl.ylabel(y_label) +2702 ppl.title(session, weight = 'bold') +2703 ppl.grid(alpha = .2) +2704 out.ax = ppl.gca() +2705 +2706 return out +2707 +2708 def plot_residuals( +2709 self, +2710 hist = False, +2711 binwidth = 2/3, +2712 dir = 'output', +2713 filename = None, +2714 highlight = [], +2715 colors = None, +2716 figsize = None, +2717 ): +2718 ''' +2719 Plot residuals of each analysis as a function of time (actually, as a function of +2720 the order of analyses in the `D4xdata` object) +2721 +2722 + `hist`: whether to add a histogram of residuals +2723 + `histbins`: specify bin edges for the histogram +2724 + `dir`: the directory in which to save the plot +2725 + `highlight`: a list of samples to highlight +2726 + `colors`: a dict of `{<sample>: <color>}` for all samples +2727 + `figsize`: (width, height) of figure +2728 ''' +2729 # Layout +2730 fig = ppl.figure(figsize = (8,4) if figsize is None else figsize) +2731 if hist: +2732 ppl.subplots_adjust(left = .08, bottom = .05, right = .98, top = .8, wspace = -0.72) +2733 ax1, ax2 = ppl.subplot(121), ppl.subplot(1,15,15) +2734 else: +2735 ppl.subplots_adjust(.08,.05,.78,.8) +2736 ax1 = ppl.subplot(111) +2737 +2738 # Colors +2739 N = len(self.anchors) +2740 if colors is None: +2741 if len(highlight) > 0: +2742 Nh = len(highlight) +2743 if Nh == 1: +2744 colors = {highlight[0]: (0,0,0)} +2745 elif Nh == 3: +2746 colors = {a: c for a,c in zip(highlight, [(0,0,1), (1,0,0), (0,2/3,0)])} +2747 elif Nh == 4: +2748 colors = {a: c for a,c in zip(highlight, [(0,0,1), (1,0,0), (0,2/3,0), (.75,0,.75)])} +2749 else: +2750 colors = {a: hls_to_rgb(k/Nh, .4, 1) for k,a in enumerate(highlight)} +2751 else: +2752 if N == 3: +2753 colors = {a: c for a,c in zip(self.anchors, [(0,0,1), (1,0,0), (0,2/3,0)])} +2754 elif N == 4: +2755 colors = {a: c for a,c in zip(self.anchors, [(0,0,1), (1,0,0), (0,2/3,0), (.75,0,.75)])} +2756 else: +2757 colors = {a: hls_to_rgb(k/N, .4, 1) for k,a in enumerate(self.anchors)} +2758 +2759 ppl.sca(ax1) +2760 +2761 ppl.axhline(0, color = 'k', alpha = .25, lw = 0.75) +2762 +2763 session = self[0]['Session'] +2764 x1 = 0 +2765# ymax = np.max([1e3 * (r['D47'] - self.samples[r['Sample']]['D47']) for r in self]) +2766 x_sessions = {} +2767 one_or_more_singlets = False +2768 one_or_more_multiplets = False +2769 multiplets = set() +2770 for k,r in enumerate(self): +2771 if r['Session'] != session: +2772 x2 = k-1 +2773 x_sessions[session] = (x1+x2)/2 +2774 ppl.axvline(k - 0.5, color = 'k', lw = .5) +2775 session = r['Session'] +2776 x1 = k +2777 singlet = len(self.samples[r['Sample']]['data']) == 1 +2778 if not singlet: +2779 multiplets.add(r['Sample']) +2780 if r['Sample'] in self.unknowns: +2781 if singlet: +2782 one_or_more_singlets = True +2783 else: +2784 one_or_more_multiplets = True +2785 kw = dict( +2786 marker = 'x' if singlet else '+', +2787 ms = 4 if singlet else 5, +2788 ls = 'None', +2789 mec = colors[r['Sample']] if r['Sample'] in colors else (0,0,0), +2790 mew = 1, +2791 alpha = 0.2 if singlet else 1, +2792 ) +2793 if highlight and r['Sample'] not in highlight: +2794 kw['alpha'] = 0.2 +2795 ppl.plot(k, 1e3 * (r['D47'] - self.samples[r['Sample']]['D47']), **kw) +2796 x2 = k +2797 x_sessions[session] = (x1+x2)/2 +2798 +2799 ppl.axhspan(-self.repeatability['r_D47']*1000, self.repeatability['r_D47']*1000, color = 'k', alpha = .05, lw = 1) +2800 ppl.axhspan(-self.repeatability['r_D47']*1000*self.t95, self.repeatability['r_D47']*1000*self.t95, color = 'k', alpha = .05, lw = 1) +2801 if not hist: +2802 ppl.text(len(self), self.repeatability['r_D47']*1000, f" SD = {self.repeatability['r_D47']*1000:.1f} ppm", size = 9, alpha = 1, va = 'center') +2803 ppl.text(len(self), self.repeatability['r_D47']*1000*self.t95, f" 95% CL = ± {self.repeatability['r_D47']*1000*self.t95:.1f} ppm", size = 9, alpha = 1, va = 'center') +2804 +2805 xmin, xmax, ymin, ymax = ppl.axis() +2806 for s in x_sessions: +2807 ppl.text( +2808 x_sessions[s], +2809 ymax +1, +2810 s, +2811 va = 'bottom', +2812 **( +2813 dict(ha = 'center') +2814 if len(self.sessions[s]['data']) > (0.15 * len(self)) +2815 else dict(ha = 'left', rotation = 45) +2816 ) +2817 ) +2818 +2819 if hist: +2820 ppl.sca(ax2) +2821 +2822 for s in colors: +2823 kw['marker'] = '+' +2824 kw['ms'] = 5 +2825 kw['mec'] = colors[s] +2826 kw['label'] = s +2827 kw['alpha'] = 1 2828 ppl.plot([], [], **kw) 2829 -2830 if one_or_more_multiplets: -2831 kw['marker'] = '+' -2832 kw['ms'] = 4 -2833 kw['alpha'] = 1 -2834 kw['label'] = 'other (N$\\,$>$\\,$1)' if one_or_more_singlets else 'other' -2835 ppl.plot([], [], **kw) -2836 -2837 if hist: -2838 leg = ppl.legend(loc = 'upper right', bbox_to_anchor = (1, 1), bbox_transform=fig.transFigure, borderaxespad = 1.5, fontsize = 9) -2839 else: -2840 leg = ppl.legend(loc = 'lower right', bbox_to_anchor = (1, 0), bbox_transform=fig.transFigure, borderaxespad = 1.5) -2841 leg.set_zorder(-1000) -2842 -2843 ppl.sca(ax1) -2844 -2845 ppl.ylabel('Δ$_{47}$ residuals (ppm)') -2846 ppl.xticks([]) -2847 ppl.axis([-1, len(self), None, None]) -2848 -2849 if hist: -2850 ppl.sca(ax2) -2851 X = [1e3 * (r['D47'] - self.samples[r['Sample']]['D47']) for r in self if r['Sample'] in multiplets] -2852 ppl.hist( -2853 X, -2854 orientation = 'horizontal', -2855 histtype = 'stepfilled', -2856 ec = [.4]*3, -2857 fc = [.25]*3, -2858 alpha = .25, -2859 bins = np.linspace(-9e3*self.repeatability['r_D47'], 9e3*self.repeatability['r_D47'], int(18/binwidth+1)), -2860 ) -2861 ppl.axis([None, None, ymin, ymax]) -2862 ppl.text(0, 0, -2863 f" SD = {self.repeatability['r_D47']*1000:.1f} ppm\n 95% CL = ± {self.repeatability['r_D47']*1000*self.t95:.1f} ppm", -2864 size = 8, -2865 alpha = 1, -2866 va = 'center', -2867 ha = 'left', -2868 ) -2869 -2870 ppl.xticks([]) -2871 ppl.yticks([]) -2872# ax2.spines['left'].set_visible(False) -2873 ax2.spines['right'].set_visible(False) -2874 ax2.spines['top'].set_visible(False) -2875 ax2.spines['bottom'].set_visible(False) -2876 -2877 -2878 if not os.path.exists(dir): -2879 os.makedirs(dir) -2880 if filename is None: -2881 return fig -2882 elif filename == '': -2883 filename = f'D{self._4x}_residuals.pdf' -2884 ppl.savefig(f'{dir}/{filename}') -2885 ppl.close(fig) -2886 -2887 -2888 def simulate(self, *args, **kwargs): -2889 ''' -2890 Legacy function with warning message pointing to `virtual_data()` -2891 ''' -2892 raise DeprecationWarning('D4xdata.simulate is deprecated and has been replaced by virtual_data()') -2893 -2894 def plot_distribution_of_analyses( -2895 self, -2896 dir = 'output', -2897 filename = None, -2898 vs_time = False, -2899 figsize = (6,4), -2900 subplots_adjust = (0.02, 0.13, 0.85, 0.8), -2901 output = None, -2902 ): -2903 ''' -2904 Plot temporal distribution of all analyses in the data set. -2905 -2906 **Parameters** -2907 -2908 + `vs_time`: if `True`, plot as a function of `TimeTag` rather than sequentially. -2909 ''' -2910 -2911 asamples = [s for s in self.anchors] -2912 usamples = [s for s in self.unknowns] -2913 if output is None or output == 'fig': -2914 fig = ppl.figure(figsize = figsize) -2915 ppl.subplots_adjust(*subplots_adjust) -2916 Xmin = min([r['TimeTag'] if vs_time else j for j,r in enumerate(self)]) -2917 Xmax = max([r['TimeTag'] if vs_time else j for j,r in enumerate(self)]) -2918 Xmax += (Xmax-Xmin)/40 -2919 Xmin -= (Xmax-Xmin)/41 -2920 for k, s in enumerate(asamples + usamples): -2921 if vs_time: -2922 X = [r['TimeTag'] for r in self if r['Sample'] == s] -2923 else: -2924 X = [x for x,r in enumerate(self) if r['Sample'] == s] -2925 Y = [-k for x in X] -2926 ppl.plot(X, Y, 'o', mec = None, mew = 0, mfc = 'b' if s in usamples else 'r', ms = 3, alpha = .75) -2927 ppl.axhline(-k, color = 'b' if s in usamples else 'r', lw = .5, alpha = .25) -2928 ppl.text(Xmax, -k, f' {s}', va = 'center', ha = 'left', size = 7, color = 'b' if s in usamples else 'r') -2929 ppl.axis([Xmin, Xmax, -k-1, 1]) -2930 ppl.xlabel('\ntime') -2931 ppl.gca().annotate('', -2932 xy = (0.6, -0.02), -2933 xycoords = 'axes fraction', -2934 xytext = (.4, -0.02), -2935 arrowprops = dict(arrowstyle = "->", color = 'k'), -2936 ) -2937 -2938 -2939 x2 = -1 -2940 for session in self.sessions: -2941 x1 = min([r['TimeTag'] if vs_time else j for j,r in enumerate(self) if r['Session'] == session]) -2942 if vs_time: -2943 ppl.axvline(x1, color = 'k', lw = .75) -2944 if x2 > -1: -2945 if not vs_time: -2946 ppl.axvline((x1+x2)/2, color = 'k', lw = .75, alpha = .5) -2947 x2 = max([r['TimeTag'] if vs_time else j for j,r in enumerate(self) if r['Session'] == session]) -2948# from xlrd import xldate_as_datetime -2949# print(session, xldate_as_datetime(x1, 0), xldate_as_datetime(x2, 0)) -2950 if vs_time: -2951 ppl.axvline(x2, color = 'k', lw = .75) -2952 ppl.axvspan(x1,x2,color = 'k', zorder = -100, alpha = .15) -2953 ppl.text((x1+x2)/2, 1, f' {session}', ha = 'left', va = 'bottom', rotation = 45, size = 8) -2954 -2955 ppl.xticks([]) -2956 ppl.yticks([]) -2957 -2958 if output is None: -2959 if not os.path.exists(dir): -2960 os.makedirs(dir) -2961 if filename == None: -2962 filename = f'D{self._4x}_distribution_of_analyses.pdf' -2963 ppl.savefig(f'{dir}/{filename}') -2964 ppl.close(fig) -2965 elif output == 'ax': -2966 return ppl.gca() -2967 elif output == 'fig': -2968 return fig -2969 -2970 -2971class D47data(D4xdata): -2972 ''' -2973 Store and process data for a large set of Δ47 analyses, -2974 usually comprising more than one analytical session. -2975 ''' -2976 -2977 Nominal_D4x = { -2978 'ETH-1': 0.2052, -2979 'ETH-2': 0.2085, -2980 'ETH-3': 0.6132, -2981 'ETH-4': 0.4511, -2982 'IAEA-C1': 0.3018, -2983 'IAEA-C2': 0.6409, -2984 'MERCK': 0.5135, -2985 } # I-CDES (Bernasconi et al., 2021) -2986 ''' -2987 Nominal Δ47 values assigned to the Δ47 anchor samples, used by -2988 `D47data.standardize()` to normalize unknown samples to an absolute Δ47 -2989 reference frame. -2990 -2991 By default equal to (after [Bernasconi et al. (2021)](https://doi.org/10.1029/2020GC009588)): -2992 ```py -2993 { -2994 'ETH-1' : 0.2052, -2995 'ETH-2' : 0.2085, -2996 'ETH-3' : 0.6132, -2997 'ETH-4' : 0.4511, -2998 'IAEA-C1' : 0.3018, -2999 'IAEA-C2' : 0.6409, -3000 'MERCK' : 0.5135, -3001 } -3002 ``` -3003 ''' -3004 -3005 -3006 @property -3007 def Nominal_D47(self): -3008 return self.Nominal_D4x -3009 -3010 -3011 @Nominal_D47.setter -3012 def Nominal_D47(self, new): -3013 self.Nominal_D4x = dict(**new) -3014 self.refresh() -3015 -3016 -3017 def __init__(self, l = [], **kwargs): -3018 ''' -3019 **Parameters:** same as `D4xdata.__init__()` -3020 ''' -3021 D4xdata.__init__(self, l = l, mass = '47', **kwargs) -3022 -3023 -3024 def D47fromTeq(self, fCo2eqD47 = 'petersen', priority = 'new'): -3025 ''' -3026 Find all samples for which `Teq` is specified, compute equilibrium Δ47 -3027 value for that temperature, and add treat these samples as additional anchors. +2830 kw['mec'] = (0,0,0) +2831 +2832 if one_or_more_singlets: +2833 kw['marker'] = 'x' +2834 kw['ms'] = 4 +2835 kw['alpha'] = .2 +2836 kw['label'] = 'other (N$\\,$=$\\,$1)' if one_or_more_multiplets else 'other' +2837 ppl.plot([], [], **kw) +2838 +2839 if one_or_more_multiplets: +2840 kw['marker'] = '+' +2841 kw['ms'] = 4 +2842 kw['alpha'] = 1 +2843 kw['label'] = 'other (N$\\,$>$\\,$1)' if one_or_more_singlets else 'other' +2844 ppl.plot([], [], **kw) +2845 +2846 if hist: +2847 leg = ppl.legend(loc = 'upper right', bbox_to_anchor = (1, 1), bbox_transform=fig.transFigure, borderaxespad = 1.5, fontsize = 9) +2848 else: +2849 leg = ppl.legend(loc = 'lower right', bbox_to_anchor = (1, 0), bbox_transform=fig.transFigure, borderaxespad = 1.5) +2850 leg.set_zorder(-1000) +2851 +2852 ppl.sca(ax1) +2853 +2854 ppl.ylabel('Δ$_{47}$ residuals (ppm)') +2855 ppl.xticks([]) +2856 ppl.axis([-1, len(self), None, None]) +2857 +2858 if hist: +2859 ppl.sca(ax2) +2860 X = [1e3 * (r['D47'] - self.samples[r['Sample']]['D47']) for r in self if r['Sample'] in multiplets] +2861 ppl.hist( +2862 X, +2863 orientation = 'horizontal', +2864 histtype = 'stepfilled', +2865 ec = [.4]*3, +2866 fc = [.25]*3, +2867 alpha = .25, +2868 bins = np.linspace(-9e3*self.repeatability['r_D47'], 9e3*self.repeatability['r_D47'], int(18/binwidth+1)), +2869 ) +2870 ppl.axis([None, None, ymin, ymax]) +2871 ppl.text(0, 0, +2872 f" SD = {self.repeatability['r_D47']*1000:.1f} ppm\n 95% CL = ± {self.repeatability['r_D47']*1000*self.t95:.1f} ppm", +2873 size = 8, +2874 alpha = 1, +2875 va = 'center', +2876 ha = 'left', +2877 ) +2878 +2879 ppl.xticks([]) +2880 ppl.yticks([]) +2881# ax2.spines['left'].set_visible(False) +2882 ax2.spines['right'].set_visible(False) +2883 ax2.spines['top'].set_visible(False) +2884 ax2.spines['bottom'].set_visible(False) +2885 +2886 +2887 if not os.path.exists(dir): +2888 os.makedirs(dir) +2889 if filename is None: +2890 return fig +2891 elif filename == '': +2892 filename = f'D{self._4x}_residuals.pdf' +2893 ppl.savefig(f'{dir}/{filename}') +2894 ppl.close(fig) +2895 +2896 +2897 def simulate(self, *args, **kwargs): +2898 ''' +2899 Legacy function with warning message pointing to `virtual_data()` +2900 ''' +2901 raise DeprecationWarning('D4xdata.simulate is deprecated and has been replaced by virtual_data()') +2902 +2903 def plot_distribution_of_analyses( +2904 self, +2905 dir = 'output', +2906 filename = None, +2907 vs_time = False, +2908 figsize = (6,4), +2909 subplots_adjust = (0.02, 0.13, 0.85, 0.8), +2910 output = None, +2911 ): +2912 ''' +2913 Plot temporal distribution of all analyses in the data set. +2914 +2915 **Parameters** +2916 +2917 + `vs_time`: if `True`, plot as a function of `TimeTag` rather than sequentially. +2918 ''' +2919 +2920 asamples = [s for s in self.anchors] +2921 usamples = [s for s in self.unknowns] +2922 if output is None or output == 'fig': +2923 fig = ppl.figure(figsize = figsize) +2924 ppl.subplots_adjust(*subplots_adjust) +2925 Xmin = min([r['TimeTag'] if vs_time else j for j,r in enumerate(self)]) +2926 Xmax = max([r['TimeTag'] if vs_time else j for j,r in enumerate(self)]) +2927 Xmax += (Xmax-Xmin)/40 +2928 Xmin -= (Xmax-Xmin)/41 +2929 for k, s in enumerate(asamples + usamples): +2930 if vs_time: +2931 X = [r['TimeTag'] for r in self if r['Sample'] == s] +2932 else: +2933 X = [x for x,r in enumerate(self) if r['Sample'] == s] +2934 Y = [-k for x in X] +2935 ppl.plot(X, Y, 'o', mec = None, mew = 0, mfc = 'b' if s in usamples else 'r', ms = 3, alpha = .75) +2936 ppl.axhline(-k, color = 'b' if s in usamples else 'r', lw = .5, alpha = .25) +2937 ppl.text(Xmax, -k, f' {s}', va = 'center', ha = 'left', size = 7, color = 'b' if s in usamples else 'r') +2938 ppl.axis([Xmin, Xmax, -k-1, 1]) +2939 ppl.xlabel('\ntime') +2940 ppl.gca().annotate('', +2941 xy = (0.6, -0.02), +2942 xycoords = 'axes fraction', +2943 xytext = (.4, -0.02), +2944 arrowprops = dict(arrowstyle = "->", color = 'k'), +2945 ) +2946 +2947 +2948 x2 = -1 +2949 for session in self.sessions: +2950 x1 = min([r['TimeTag'] if vs_time else j for j,r in enumerate(self) if r['Session'] == session]) +2951 if vs_time: +2952 ppl.axvline(x1, color = 'k', lw = .75) +2953 if x2 > -1: +2954 if not vs_time: +2955 ppl.axvline((x1+x2)/2, color = 'k', lw = .75, alpha = .5) +2956 x2 = max([r['TimeTag'] if vs_time else j for j,r in enumerate(self) if r['Session'] == session]) +2957# from xlrd import xldate_as_datetime +2958# print(session, xldate_as_datetime(x1, 0), xldate_as_datetime(x2, 0)) +2959 if vs_time: +2960 ppl.axvline(x2, color = 'k', lw = .75) +2961 ppl.axvspan(x1,x2,color = 'k', zorder = -100, alpha = .15) +2962 ppl.text((x1+x2)/2, 1, f' {session}', ha = 'left', va = 'bottom', rotation = 45, size = 8) +2963 +2964 ppl.xticks([]) +2965 ppl.yticks([]) +2966 +2967 if output is None: +2968 if not os.path.exists(dir): +2969 os.makedirs(dir) +2970 if filename == None: +2971 filename = f'D{self._4x}_distribution_of_analyses.pdf' +2972 ppl.savefig(f'{dir}/{filename}') +2973 ppl.close(fig) +2974 elif output == 'ax': +2975 return ppl.gca() +2976 elif output == 'fig': +2977 return fig +2978 +2979 +2980 def plot_bulk_compositions( +2981 self, +2982 samples = None, +2983 dir = 'output/bulk_compositions', +2984 figsize = (6,6), +2985 subplots_adjust = (0.15, 0.12, 0.95, 0.92), +2986 show = False, +2987 sample_color = (0,.5,1), +2988 analysis_color = (.7,.7,.7), +2989 labeldist = 0.3, +2990 radius = 0.05, +2991 ): +2992 ''' +2993 Plot δ13C_VBDP vs δ18O_VSMOW (of CO2) for all analyses. +2994 +2995 By default, creates a directory `./output/bulk_compositions` where plots for +2996 each sample are saved. Another plot named `__all__.pdf` shows all analyses together. +2997 +2998 +2999 **Parameters** +3000 +3001 + `samples`: Only these samples are processed (by default: all samples). +3002 + `dir`: where to save the plots +3003 + `figsize`: (width, height) of figure +3004 + `subplots_adjust`: passed to `subplots_adjust()` +3005 + `show`: whether to call `matplotlib.pyplot.show()` on the plot with all samples, +3006 allowing for interactive visualization/exploration in (δ13C, δ18O) space. +3007 + `sample_color`: color used for replicate markers/labels +3008 + `analysis_color`: color used for sample markers/labels +3009 + `labeldist`: distance (in inches) from replicate markers to replicate labels +3010 + `radius`: radius of the dashed circle providing scale. No circle if `radius = 0`. +3011 ''' +3012 +3013 from matplotlib.patches import Ellipse +3014 +3015 if samples is None: +3016 samples = [_ for _ in self.samples] +3017 +3018 saved = {} +3019 +3020 for s in samples: +3021 +3022 fig = ppl.figure(figsize = figsize) +3023 fig.subplots_adjust(*subplots_adjust) +3024 ax = ppl.subplot(111) +3025 ppl.xlabel('$δ^{18}O_{VSMOW}$ of $CO_2$ (‰)') +3026 ppl.ylabel('$δ^{13}C_{VPDB}$ (‰)') +3027 ppl.title(s) 3028 -3029 **Parameters** -3030 -3031 + `fCo2eqD47`: Which CO2 equilibrium law to use -3032 (`petersen`: [Petersen et al. (2019)](https://doi.org/10.1029/2018GC008127); -3033 `wang`: [Wang et al. (2019)](https://doi.org/10.1016/j.gca.2004.05.039)). -3034 + `priority`: if `replace`: forget old anchors and only use the new ones; -3035 if `new`: keep pre-existing anchors but update them in case of conflict -3036 between old and new Δ47 values; -3037 if `old`: keep pre-existing anchors but preserve their original Δ47 -3038 values in case of conflict. -3039 ''' -3040 f = { -3041 'petersen': fCO2eqD47_Petersen, -3042 'wang': fCO2eqD47_Wang, -3043 }[fCo2eqD47] -3044 foo = {} -3045 for r in self: -3046 if 'Teq' in r: -3047 if r['Sample'] in foo: -3048 assert foo[r['Sample']] == f(r['Teq']), f'Different values of `Teq` provided for sample `{r["Sample"]}`.' -3049 else: -3050 foo[r['Sample']] = f(r['Teq']) -3051 else: -3052 assert r['Sample'] not in foo, f'`Teq` is inconsistently specified for sample `{r["Sample"]}`.' +3029 +3030 XY = np.array([[_['d18O_VSMOW'], _['d13C_VPDB']] for _ in self.samples[s]['data']]) +3031 UID = [_['UID'] for _ in self.samples[s]['data']] +3032 XY0 = XY.mean(0) +3033 +3034 for xy in XY: +3035 ppl.plot([xy[0], XY0[0]], [xy[1], XY0[1]], '-', lw = 1, color = analysis_color) +3036 +3037 ppl.plot(*XY.T, 'wo', mew = 1, mec = analysis_color) +3038 ppl.plot(*XY0, 'wo', mew = 2, mec = sample_color) +3039 ppl.text(*XY0, f' {s}', va = 'center', ha = 'left', color = sample_color, weight = 'bold') +3040 saved[s] = [XY, XY0] +3041 +3042 x1, x2, y1, y2 = ppl.axis() +3043 x0, dx = (x1+x2)/2, (x2-x1)/2 +3044 y0, dy = (y1+y2)/2, (y2-y1)/2 +3045 dx, dy = [max(max(dx, dy), radius)]*2 +3046 +3047 ppl.axis([ +3048 x0 - 1.2*dx, +3049 x0 + 1.2*dx, +3050 y0 - 1.2*dy, +3051 y0 + 1.2*dy, +3052 ]) 3053 -3054 if priority == 'replace': -3055 self.Nominal_D47 = {} -3056 for s in foo: -3057 if priority != 'old' or s not in self.Nominal_D47: -3058 self.Nominal_D47[s] = foo[s] -3059 +3054 XY0_in_display_space = fig.dpi_scale_trans.inverted().transform(ax.transData.transform(XY0)) +3055 +3056 for xy, uid in zip(XY, UID): +3057 +3058 xy_in_display_space = fig.dpi_scale_trans.inverted().transform(ax.transData.transform(xy)) +3059 vector_in_display_space = xy_in_display_space - XY0_in_display_space 3060 -3061 +3061 if (vector_in_display_space**2).sum() > 0: 3062 -3063class D48data(D4xdata): -3064 ''' -3065 Store and process data for a large set of Δ48 analyses, -3066 usually comprising more than one analytical session. -3067 ''' -3068 -3069 Nominal_D4x = { -3070 'ETH-1': 0.138, -3071 'ETH-2': 0.138, -3072 'ETH-3': 0.270, -3073 'ETH-4': 0.223, -3074 'GU-1': -0.419, -3075 } # (Fiebig et al., 2019, 2021) -3076 ''' -3077 Nominal Δ48 values assigned to the Δ48 anchor samples, used by -3078 `D48data.standardize()` to normalize unknown samples to an absolute Δ48 -3079 reference frame. -3080 -3081 By default equal to (after [Fiebig et al. (2019)](https://doi.org/10.1016/j.chemgeo.2019.05.019), -3082 Fiebig et al. (in press)): -3083 -3084 ```py -3085 { -3086 'ETH-1' : 0.138, -3087 'ETH-2' : 0.138, -3088 'ETH-3' : 0.270, -3089 'ETH-4' : 0.223, -3090 'GU-1' : -0.419, -3091 } -3092 ``` -3093 ''' +3063 unit_vector_in_display_space = vector_in_display_space / ((vector_in_display_space**2).sum())**0.5 +3064 label_vector_in_display_space = vector_in_display_space + unit_vector_in_display_space * labeldist +3065 label_xy_in_display_space = XY0_in_display_space + label_vector_in_display_space +3066 label_xy_in_data_space = ax.transData.inverted().transform(fig.dpi_scale_trans.transform(label_xy_in_display_space)) +3067 +3068 ppl.text(*label_xy_in_data_space, uid, va = 'center', ha = 'center', color = analysis_color) +3069 +3070 else: +3071 +3072 ppl.text(*xy, f'{uid} ', va = 'center', ha = 'right', color = analysis_color) +3073 +3074 if radius: +3075 ax.add_artist(Ellipse( +3076 xy = XY0, +3077 width = radius*2, +3078 height = radius*2, +3079 ls = (0, (2,2)), +3080 lw = .7, +3081 ec = analysis_color, +3082 fc = 'None', +3083 )) +3084 ppl.text( +3085 XY0[0], +3086 XY0[1]-radius, +3087 f'\n± {radius*1e3:.0f} ppm', +3088 color = analysis_color, +3089 va = 'top', +3090 ha = 'center', +3091 linespacing = 0.4, +3092 size = 8, +3093 ) 3094 -3095 -3096 @property -3097 def Nominal_D48(self): -3098 return self.Nominal_D4x +3095 if not os.path.exists(dir): +3096 os.makedirs(dir) +3097 fig.savefig(f'{dir}/{s}.pdf') +3098 ppl.close(fig) 3099 -3100 -3101 @Nominal_D48.setter -3102 def Nominal_D48(self, new): -3103 self.Nominal_D4x = dict(**new) -3104 self.refresh() -3105 -3106 -3107 def __init__(self, l = [], **kwargs): -3108 ''' -3109 **Parameters:** same as `D4xdata.__init__()` -3110 ''' -3111 D4xdata.__init__(self, l = l, mass = '48', **kwargs) -3112 -3113 -3114class _SessionPlot(): -3115 ''' -3116 Simple placeholder class -3117 ''' -3118 def __init__(self): -3119 pass +3100 fig = ppl.figure(figsize = figsize) +3101 fig.subplots_adjust(*subplots_adjust) +3102 ppl.xlabel('$δ^{18}O_{VSMOW}$ of $CO_2$ (‰)') +3103 ppl.ylabel('$δ^{13}C_{VPDB}$ (‰)') +3104 +3105 for s in saved: +3106 for xy in saved[s][0]: +3107 ppl.plot([xy[0], saved[s][1][0]], [xy[1], saved[s][1][1]], '-', lw = 1, color = analysis_color) +3108 ppl.plot(*saved[s][0].T, 'wo', mew = 1, mec = analysis_color) +3109 ppl.plot(*saved[s][1], 'wo', mew = 1.5, mec = sample_color) +3110 ppl.text(*saved[s][1], f' {s}', va = 'center', ha = 'left', color = sample_color, weight = 'bold') +3111 +3112 x1, x2, y1, y2 = ppl.axis() +3113 ppl.axis([ +3114 x1 - (x2-x1)/10, +3115 x2 + (x2-x1)/10, +3116 y1 - (y2-y1)/10, +3117 y2 + (y2-y1)/10, +3118 ]) +3119 +3120 +3121 if not os.path.exists(dir): +3122 os.makedirs(dir) +3123 fig.savefig(f'{dir}/__all__.pdf') +3124 if show: +3125 ppl.show() +3126 ppl.close(fig) +3127 +3128 +3129 +3130class D47data(D4xdata): +3131 ''' +3132 Store and process data for a large set of Δ47 analyses, +3133 usually comprising more than one analytical session. +3134 ''' +3135 +3136 Nominal_D4x = { +3137 'ETH-1': 0.2052, +3138 'ETH-2': 0.2085, +3139 'ETH-3': 0.6132, +3140 'ETH-4': 0.4511, +3141 'IAEA-C1': 0.3018, +3142 'IAEA-C2': 0.6409, +3143 'MERCK': 0.5135, +3144 } # I-CDES (Bernasconi et al., 2021) +3145 ''' +3146 Nominal Δ47 values assigned to the Δ47 anchor samples, used by +3147 `D47data.standardize()` to normalize unknown samples to an absolute Δ47 +3148 reference frame. +3149 +3150 By default equal to (after [Bernasconi et al. (2021)](https://doi.org/10.1029/2020GC009588)): +3151 ```py +3152 { +3153 'ETH-1' : 0.2052, +3154 'ETH-2' : 0.2085, +3155 'ETH-3' : 0.6132, +3156 'ETH-4' : 0.4511, +3157 'IAEA-C1' : 0.3018, +3158 'IAEA-C2' : 0.6409, +3159 'MERCK' : 0.5135, +3160 } +3161 ``` +3162 ''' +3163 +3164 +3165 @property +3166 def Nominal_D47(self): +3167 return self.Nominal_D4x +3168 +3169 +3170 @Nominal_D47.setter +3171 def Nominal_D47(self, new): +3172 self.Nominal_D4x = dict(**new) +3173 self.refresh() +3174 +3175 +3176 def __init__(self, l = [], **kwargs): +3177 ''' +3178 **Parameters:** same as `D4xdata.__init__()` +3179 ''' +3180 D4xdata.__init__(self, l = l, mass = '47', **kwargs) +3181 +3182 +3183 def D47fromTeq(self, fCo2eqD47 = 'petersen', priority = 'new'): +3184 ''' +3185 Find all samples for which `Teq` is specified, compute equilibrium Δ47 +3186 value for that temperature, and add treat these samples as additional anchors. +3187 +3188 **Parameters** +3189 +3190 + `fCo2eqD47`: Which CO2 equilibrium law to use +3191 (`petersen`: [Petersen et al. (2019)](https://doi.org/10.1029/2018GC008127); +3192 `wang`: [Wang et al. (2019)](https://doi.org/10.1016/j.gca.2004.05.039)). +3193 + `priority`: if `replace`: forget old anchors and only use the new ones; +3194 if `new`: keep pre-existing anchors but update them in case of conflict +3195 between old and new Δ47 values; +3196 if `old`: keep pre-existing anchors but preserve their original Δ47 +3197 values in case of conflict. +3198 ''' +3199 f = { +3200 'petersen': fCO2eqD47_Petersen, +3201 'wang': fCO2eqD47_Wang, +3202 }[fCo2eqD47] +3203 foo = {} +3204 for r in self: +3205 if 'Teq' in r: +3206 if r['Sample'] in foo: +3207 assert foo[r['Sample']] == f(r['Teq']), f'Different values of `Teq` provided for sample `{r["Sample"]}`.' +3208 else: +3209 foo[r['Sample']] = f(r['Teq']) +3210 else: +3211 assert r['Sample'] not in foo, f'`Teq` is inconsistently specified for sample `{r["Sample"]}`.' +3212 +3213 if priority == 'replace': +3214 self.Nominal_D47 = {} +3215 for s in foo: +3216 if priority != 'old' or s not in self.Nominal_D47: +3217 self.Nominal_D47[s] = foo[s] +3218 +3219 +3220 +3221 +3222class D48data(D4xdata): +3223 ''' +3224 Store and process data for a large set of Δ48 analyses, +3225 usually comprising more than one analytical session. +3226 ''' +3227 +3228 Nominal_D4x = { +3229 'ETH-1': 0.138, +3230 'ETH-2': 0.138, +3231 'ETH-3': 0.270, +3232 'ETH-4': 0.223, +3233 'GU-1': -0.419, +3234 } # (Fiebig et al., 2019, 2021) +3235 ''' +3236 Nominal Δ48 values assigned to the Δ48 anchor samples, used by +3237 `D48data.standardize()` to normalize unknown samples to an absolute Δ48 +3238 reference frame. +3239 +3240 By default equal to (after [Fiebig et al. (2019)](https://doi.org/10.1016/j.chemgeo.2019.05.019), +3241 Fiebig et al. (in press)): +3242 +3243 ```py +3244 { +3245 'ETH-1' : 0.138, +3246 'ETH-2' : 0.138, +3247 'ETH-3' : 0.270, +3248 'ETH-4' : 0.223, +3249 'GU-1' : -0.419, +3250 } +3251 ``` +3252 ''' +3253 +3254 +3255 @property +3256 def Nominal_D48(self): +3257 return self.Nominal_D4x +3258 +3259 +3260 @Nominal_D48.setter +3261 def Nominal_D48(self, new): +3262 self.Nominal_D4x = dict(**new) +3263 self.refresh() +3264 +3265 +3266 def __init__(self, l = [], **kwargs): +3267 ''' +3268 **Parameters:** same as `D4xdata.__init__()` +3269 ''' +3270 D4xdata.__init__(self, l = l, mass = '48', **kwargs) +3271 +3272 +3273class _SessionPlot(): +3274 ''' +3275 Simple placeholder class +3276 ''' +3277 def __init__(self): +3278 pass @@ -4594,7 +4779,7 @@

    API Documentation

    def - virtual_data( samples=[], a47=1.0, b47=0.0, c47=-0.9, a48=1.0, b48=0.0, c48=-0.45, rD47=0.015, rD48=0.045, d13Cwg_VPDB=None, d18Owg_VSMOW=None, session=None, Nominal_D47=None, Nominal_D48=None, Nominal_d13C_VPDB=None, Nominal_d18O_VPDB=None, ALPHA_18O_ACID_REACTION=None, R13_VPDB=None, R17_VSMOW=None, R18_VSMOW=None, LAMBDA_17=None, R18_VPDB=None, seed=0): + virtual_data( samples=[], a47=1.0, b47=0.0, c47=-0.9, a48=1.0, b48=0.0, c48=-0.45, rd45=0.02, rd46=0.06, rD47=0.015, rD48=0.045, d13Cwg_VPDB=None, d18Owg_VSMOW=None, session=None, Nominal_D47=None, Nominal_D48=None, Nominal_d13C_VPDB=None, Nominal_d18O_VPDB=None, ALPHA_18O_ACID_REACTION=None, R13_VPDB=None, R17_VSMOW=None, R18_VSMOW=None, LAMBDA_17=None, R18_VPDB=None, seed=0): @@ -4604,226 +4789,235 @@

    API Documentation

    413 samples = [], 414 a47 = 1., b47 = 0., c47 = -0.9, 415 a48 = 1., b48 = 0., c48 = -0.45, -416 rD47 = 0.015, rD48 = 0.045, -417 d13Cwg_VPDB = None, d18Owg_VSMOW = None, -418 session = None, -419 Nominal_D47 = None, Nominal_D48 = None, -420 Nominal_d13C_VPDB = None, Nominal_d18O_VPDB = None, -421 ALPHA_18O_ACID_REACTION = None, -422 R13_VPDB = None, -423 R17_VSMOW = None, -424 R18_VSMOW = None, -425 LAMBDA_17 = None, -426 R18_VPDB = None, -427 seed = 0, -428 ): -429 ''' -430 Return list with simulated analyses from a single session. -431 -432 **Parameters** -433 -434 + `samples`: a list of entries; each entry is a dictionary with the following fields: -435 * `Sample`: the name of the sample -436 * `d13C_VPDB`, `d18O_VPDB`: bulk composition of the carbonate sample -437 * `D47`, `D48`, `D49`, `D17O` (all optional): clumped-isotope and oxygen-17 anomalies of the carbonate sample -438 * `N`: how many analyses to generate for this sample -439 + `a47`: scrambling factor for Δ47 -440 + `b47`: compositional nonlinearity for Δ47 -441 + `c47`: working gas offset for Δ47 -442 + `a48`: scrambling factor for Δ48 -443 + `b48`: compositional nonlinearity for Δ48 -444 + `c48`: working gas offset for Δ48 -445 + `rD47`: analytical repeatability of Δ47 -446 + `rD48`: analytical repeatability of Δ48 -447 + `d13Cwg_VPDB`, `d18Owg_VSMOW`: bulk composition of the working gas -448 (by default equal to the `simulate_single_analysis` default values) -449 + `session`: name of the session (no name by default) -450 + `Nominal_D47`, `Nominal_D48`: where to lookup Δ47 and Δ48 values -451 if `D47` or `D48` are not specified (by default equal to the `simulate_single_analysis` defaults) -452 + `Nominal_d13C_VPDB`, `Nominal_d18O_VPDB`: where to lookup δ13C and -453 δ18O values if `d13C_VPDB` or `d18O_VPDB` are not specified -454 (by default equal to the `simulate_single_analysis` defaults) -455 + `ALPHA_18O_ACID_REACTION`: 18O/16O acid fractionation factor -456 (by default equal to the `simulate_single_analysis` defaults) -457 + `R13_VPDB`, `R17_VSMOW`, `R18_VSMOW`, `LAMBDA_17`, `R18_VPDB`: oxygen-17 -458 correction parameters (by default equal to the `simulate_single_analysis` default) -459 + `seed`: explicitly set to a non-zero value to achieve random but repeatable simulations -460 -461 -462 Here is an example of using this method to generate an arbitrary combination of -463 anchors and unknowns for a bunch of sessions: -464 -465 ```py -466 args = dict( -467 samples = [ -468 dict(Sample = 'ETH-1', N = 4), -469 dict(Sample = 'ETH-2', N = 5), -470 dict(Sample = 'ETH-3', N = 6), -471 dict(Sample = 'FOO', N = 2, -472 d13C_VPDB = -5., d18O_VPDB = -10., -473 D47 = 0.3, D48 = 0.15), -474 ], rD47 = 0.010, rD48 = 0.030) -475 -476 session1 = virtual_data(session = 'Session_01', **args, seed = 123) -477 session2 = virtual_data(session = 'Session_02', **args, seed = 1234) -478 session3 = virtual_data(session = 'Session_03', **args, seed = 12345) -479 session4 = virtual_data(session = 'Session_04', **args, seed = 123456) -480 -481 D = D47data(session1 + session2 + session3 + session4) -482 -483 D.crunch() -484 D.standardize() +416 rd45 = 0.020, rd46 = 0.060, +417 rD47 = 0.015, rD48 = 0.045, +418 d13Cwg_VPDB = None, d18Owg_VSMOW = None, +419 session = None, +420 Nominal_D47 = None, Nominal_D48 = None, +421 Nominal_d13C_VPDB = None, Nominal_d18O_VPDB = None, +422 ALPHA_18O_ACID_REACTION = None, +423 R13_VPDB = None, +424 R17_VSMOW = None, +425 R18_VSMOW = None, +426 LAMBDA_17 = None, +427 R18_VPDB = None, +428 seed = 0, +429 ): +430 ''' +431 Return list with simulated analyses from a single session. +432 +433 **Parameters** +434 +435 + `samples`: a list of entries; each entry is a dictionary with the following fields: +436 * `Sample`: the name of the sample +437 * `d13C_VPDB`, `d18O_VPDB`: bulk composition of the carbonate sample +438 * `D47`, `D48`, `D49`, `D17O` (all optional): clumped-isotope and oxygen-17 anomalies of the carbonate sample +439 * `N`: how many analyses to generate for this sample +440 + `a47`: scrambling factor for Δ47 +441 + `b47`: compositional nonlinearity for Δ47 +442 + `c47`: working gas offset for Δ47 +443 + `a48`: scrambling factor for Δ48 +444 + `b48`: compositional nonlinearity for Δ48 +445 + `c48`: working gas offset for Δ48 +446 + `rd45`: analytical repeatability of δ45 +447 + `rd46`: analytical repeatability of δ46 +448 + `rD47`: analytical repeatability of Δ47 +449 + `rD48`: analytical repeatability of Δ48 +450 + `d13Cwg_VPDB`, `d18Owg_VSMOW`: bulk composition of the working gas +451 (by default equal to the `simulate_single_analysis` default values) +452 + `session`: name of the session (no name by default) +453 + `Nominal_D47`, `Nominal_D48`: where to lookup Δ47 and Δ48 values +454 if `D47` or `D48` are not specified (by default equal to the `simulate_single_analysis` defaults) +455 + `Nominal_d13C_VPDB`, `Nominal_d18O_VPDB`: where to lookup δ13C and +456 δ18O values if `d13C_VPDB` or `d18O_VPDB` are not specified +457 (by default equal to the `simulate_single_analysis` defaults) +458 + `ALPHA_18O_ACID_REACTION`: 18O/16O acid fractionation factor +459 (by default equal to the `simulate_single_analysis` defaults) +460 + `R13_VPDB`, `R17_VSMOW`, `R18_VSMOW`, `LAMBDA_17`, `R18_VPDB`: oxygen-17 +461 correction parameters (by default equal to the `simulate_single_analysis` default) +462 + `seed`: explicitly set to a non-zero value to achieve random but repeatable simulations +463 +464 +465 Here is an example of using this method to generate an arbitrary combination of +466 anchors and unknowns for a bunch of sessions: +467 +468 ```py +469 args = dict( +470 samples = [ +471 dict(Sample = 'ETH-1', N = 4), +472 dict(Sample = 'ETH-2', N = 5), +473 dict(Sample = 'ETH-3', N = 6), +474 dict(Sample = 'FOO', N = 2, +475 d13C_VPDB = -5., d18O_VPDB = -10., +476 D47 = 0.3, D48 = 0.15), +477 ], rD47 = 0.010, rD48 = 0.030) +478 +479 session1 = virtual_data(session = 'Session_01', **args, seed = 123) +480 session2 = virtual_data(session = 'Session_02', **args, seed = 1234) +481 session3 = virtual_data(session = 'Session_03', **args, seed = 12345) +482 session4 = virtual_data(session = 'Session_04', **args, seed = 123456) +483 +484 D = D47data(session1 + session2 + session3 + session4) 485 -486 D.table_of_sessions(verbose = True, save_to_file = False) -487 D.table_of_samples(verbose = True, save_to_file = False) -488 D.table_of_analyses(verbose = True, save_to_file = False) -489 ``` -490 -491 This should output something like: -492 -493 ``` -494 [table_of_sessions] -495 –––––––––– –– –– ––––––––––– –––––––––––– –––––– –––––– –––––– ––––––––––––– –––––––––––––– –––––––––––––– -496 Session Na Nu d13Cwg_VPDB d18Owg_VSMOW r_d13C r_d18O r_D47 a ± SE 1e3 x b ± SE c ± SE -497 –––––––––– –– –– ––––––––––– –––––––––––– –––––– –––––– –––––– ––––––––––––– –––––––––––––– –––––––––––––– -498 Session_01 15 2 -4.000 26.000 0.0000 0.0000 0.0110 0.997 ± 0.017 -0.097 ± 0.244 -0.896 ± 0.006 -499 Session_02 15 2 -4.000 26.000 0.0000 0.0000 0.0109 1.002 ± 0.017 -0.110 ± 0.244 -0.901 ± 0.006 -500 Session_03 15 2 -4.000 26.000 0.0000 0.0000 0.0107 1.010 ± 0.017 -0.037 ± 0.244 -0.904 ± 0.006 -501 Session_04 15 2 -4.000 26.000 0.0000 0.0000 0.0106 1.001 ± 0.017 -0.181 ± 0.244 -0.894 ± 0.006 -502 –––––––––– –– –– ––––––––––– –––––––––––– –––––– –––––– –––––– ––––––––––––– –––––––––––––– –––––––––––––– -503 -504 [table_of_samples] -505 –––––– –– ––––––––– –––––––––– –––––– –––––– –––––––– –––––– –––––––– -506 Sample N d13C_VPDB d18O_VSMOW D47 SE 95% CL SD p_Levene -507 –––––– –– ––––––––– –––––––––– –––––– –––––– –––––––– –––––– –––––––– -508 ETH-1 16 2.02 37.02 0.2052 0.0079 -509 ETH-2 20 -10.17 19.88 0.2085 0.0100 -510 ETH-3 24 1.71 37.45 0.6132 0.0105 -511 FOO 8 -5.00 28.91 0.2989 0.0040 ± 0.0080 0.0101 0.638 -512 –––––– –– ––––––––– –––––––––– –––––– –––––– –––––––– –––––– –––––––– -513 -514 [table_of_analyses] -515 ––– –––––––––– –––––– ––––––––––– –––––––––––– ––––––––– ––––––––– –––––––––– –––––––––– –––––––––– –––––––––– –––––––––– ––––––––– ––––––––– ––––––––– –––––––– -516 UID Session Sample d13Cwg_VPDB d18Owg_VSMOW d45 d46 d47 d48 d49 d13C_VPDB d18O_VSMOW D47raw D48raw D49raw D47 -517 ––– –––––––––– –––––– ––––––––––– –––––––––––– ––––––––– ––––––––– –––––––––– –––––––––– –––––––––– –––––––––– –––––––––– ––––––––– ––––––––– ––––––––– –––––––– -518 1 Session_01 ETH-1 -4.000 26.000 6.018962 10.747026 16.122986 21.273526 27.780042 2.020000 37.024281 -0.706013 -0.328878 -0.000013 0.192554 -519 2 Session_01 ETH-1 -4.000 26.000 6.018962 10.747026 16.130144 21.282615 27.780042 2.020000 37.024281 -0.698974 -0.319981 -0.000013 0.199615 -520 3 Session_01 ETH-1 -4.000 26.000 6.018962 10.747026 16.149219 21.299572 27.780042 2.020000 37.024281 -0.680215 -0.303383 -0.000013 0.218429 -521 4 Session_01 ETH-1 -4.000 26.000 6.018962 10.747026 16.136616 21.233128 27.780042 2.020000 37.024281 -0.692609 -0.368421 -0.000013 0.205998 -522 5 Session_01 ETH-2 -4.000 26.000 -5.995859 -5.976076 -12.697171 -12.203054 -18.023381 -10.170000 19.875825 -0.680771 -0.290128 -0.000002 0.215054 -523 6 Session_01 ETH-2 -4.000 26.000 -5.995859 -5.976076 -12.701124 -12.184422 -18.023381 -10.170000 19.875825 -0.684772 -0.271272 -0.000002 0.211041 -524 7 Session_01 ETH-2 -4.000 26.000 -5.995859 -5.976076 -12.715105 -12.195251 -18.023381 -10.170000 19.875825 -0.698923 -0.282232 -0.000002 0.196848 -525 8 Session_01 ETH-2 -4.000 26.000 -5.995859 -5.976076 -12.701529 -12.204963 -18.023381 -10.170000 19.875825 -0.685182 -0.292061 -0.000002 0.210630 -526 9 Session_01 ETH-2 -4.000 26.000 -5.995859 -5.976076 -12.711420 -12.228478 -18.023381 -10.170000 19.875825 -0.695193 -0.315859 -0.000002 0.200589 -527 10 Session_01 ETH-3 -4.000 26.000 5.742374 11.161270 16.666719 22.296486 28.306614 1.710000 37.450394 -0.290459 -0.147284 -0.000014 0.609363 -528 11 Session_01 ETH-3 -4.000 26.000 5.742374 11.161270 16.671553 22.291060 28.306614 1.710000 37.450394 -0.285706 -0.152592 -0.000014 0.614130 -529 12 Session_01 ETH-3 -4.000 26.000 5.742374 11.161270 16.652854 22.273271 28.306614 1.710000 37.450394 -0.304093 -0.169990 -0.000014 0.595689 -530 13 Session_01 ETH-3 -4.000 26.000 5.742374 11.161270 16.684168 22.263156 28.306614 1.710000 37.450394 -0.273302 -0.179883 -0.000014 0.626572 -531 14 Session_01 ETH-3 -4.000 26.000 5.742374 11.161270 16.662702 22.253578 28.306614 1.710000 37.450394 -0.294409 -0.189251 -0.000014 0.605401 -532 15 Session_01 ETH-3 -4.000 26.000 5.742374 11.161270 16.681957 22.230907 28.306614 1.710000 37.450394 -0.275476 -0.211424 -0.000014 0.624391 -533 16 Session_01 FOO -4.000 26.000 -0.840413 2.828738 1.312044 5.395798 4.665655 -5.000000 28.907344 -0.598436 -0.268176 -0.000006 0.298996 -534 17 Session_01 FOO -4.000 26.000 -0.840413 2.828738 1.328123 5.307086 4.665655 -5.000000 28.907344 -0.582387 -0.356389 -0.000006 0.315092 -535 18 Session_02 ETH-1 -4.000 26.000 6.018962 10.747026 16.122201 21.340606 27.780042 2.020000 37.024281 -0.706785 -0.263217 -0.000013 0.195135 -536 19 Session_02 ETH-1 -4.000 26.000 6.018962 10.747026 16.134868 21.305714 27.780042 2.020000 37.024281 -0.694328 -0.297370 -0.000013 0.207564 -537 20 Session_02 ETH-1 -4.000 26.000 6.018962 10.747026 16.140008 21.261931 27.780042 2.020000 37.024281 -0.689273 -0.340227 -0.000013 0.212607 -538 21 Session_02 ETH-1 -4.000 26.000 6.018962 10.747026 16.135540 21.298472 27.780042 2.020000 37.024281 -0.693667 -0.304459 -0.000013 0.208224 -539 22 Session_02 ETH-2 -4.000 26.000 -5.995859 -5.976076 -12.701213 -12.202602 -18.023381 -10.170000 19.875825 -0.684862 -0.289671 -0.000002 0.213842 -540 23 Session_02 ETH-2 -4.000 26.000 -5.995859 -5.976076 -12.685649 -12.190405 -18.023381 -10.170000 19.875825 -0.669108 -0.277327 -0.000002 0.229559 -541 24 Session_02 ETH-2 -4.000 26.000 -5.995859 -5.976076 -12.719003 -12.257955 -18.023381 -10.170000 19.875825 -0.702869 -0.345692 -0.000002 0.195876 -542 25 Session_02 ETH-2 -4.000 26.000 -5.995859 -5.976076 -12.700592 -12.204641 -18.023381 -10.170000 19.875825 -0.684233 -0.291735 -0.000002 0.214469 -543 26 Session_02 ETH-2 -4.000 26.000 -5.995859 -5.976076 -12.720426 -12.214561 -18.023381 -10.170000 19.875825 -0.704308 -0.301774 -0.000002 0.194439 -544 27 Session_02 ETH-3 -4.000 26.000 5.742374 11.161270 16.673044 22.262090 28.306614 1.710000 37.450394 -0.284240 -0.180926 -0.000014 0.616730 -545 28 Session_02 ETH-3 -4.000 26.000 5.742374 11.161270 16.666542 22.263401 28.306614 1.710000 37.450394 -0.290634 -0.179643 -0.000014 0.610350 -546 29 Session_02 ETH-3 -4.000 26.000 5.742374 11.161270 16.680487 22.243486 28.306614 1.710000 37.450394 -0.276921 -0.199121 -0.000014 0.624031 -547 30 Session_02 ETH-3 -4.000 26.000 5.742374 11.161270 16.663900 22.245175 28.306614 1.710000 37.450394 -0.293231 -0.197469 -0.000014 0.607759 -548 31 Session_02 ETH-3 -4.000 26.000 5.742374 11.161270 16.674379 22.301309 28.306614 1.710000 37.450394 -0.282927 -0.142568 -0.000014 0.618039 -549 32 Session_02 ETH-3 -4.000 26.000 5.742374 11.161270 16.660825 22.270466 28.306614 1.710000 37.450394 -0.296255 -0.172733 -0.000014 0.604742 -550 33 Session_02 FOO -4.000 26.000 -0.840413 2.828738 1.294076 5.349940 4.665655 -5.000000 28.907344 -0.616369 -0.313776 -0.000006 0.283707 -551 34 Session_02 FOO -4.000 26.000 -0.840413 2.828738 1.313775 5.292121 4.665655 -5.000000 28.907344 -0.596708 -0.371269 -0.000006 0.303323 -552 35 Session_03 ETH-1 -4.000 26.000 6.018962 10.747026 16.121613 21.259909 27.780042 2.020000 37.024281 -0.707364 -0.342207 -0.000013 0.194934 -553 36 Session_03 ETH-1 -4.000 26.000 6.018962 10.747026 16.145714 21.304889 27.780042 2.020000 37.024281 -0.683661 -0.298178 -0.000013 0.218401 -554 37 Session_03 ETH-1 -4.000 26.000 6.018962 10.747026 16.126573 21.325093 27.780042 2.020000 37.024281 -0.702485 -0.278401 -0.000013 0.199764 -555 38 Session_03 ETH-1 -4.000 26.000 6.018962 10.747026 16.132057 21.323211 27.780042 2.020000 37.024281 -0.697092 -0.280244 -0.000013 0.205104 -556 39 Session_03 ETH-2 -4.000 26.000 -5.995859 -5.976076 -12.708448 -12.232023 -18.023381 -10.170000 19.875825 -0.692185 -0.319447 -0.000002 0.208915 -557 40 Session_03 ETH-2 -4.000 26.000 -5.995859 -5.976076 -12.714417 -12.202504 -18.023381 -10.170000 19.875825 -0.698226 -0.289572 -0.000002 0.202934 -558 41 Session_03 ETH-2 -4.000 26.000 -5.995859 -5.976076 -12.720039 -12.264469 -18.023381 -10.170000 19.875825 -0.703917 -0.352285 -0.000002 0.197300 -559 42 Session_03 ETH-2 -4.000 26.000 -5.995859 -5.976076 -12.701953 -12.228550 -18.023381 -10.170000 19.875825 -0.685611 -0.315932 -0.000002 0.215423 -560 43 Session_03 ETH-2 -4.000 26.000 -5.995859 -5.976076 -12.704535 -12.213634 -18.023381 -10.170000 19.875825 -0.688224 -0.300836 -0.000002 0.212837 -561 44 Session_03 ETH-3 -4.000 26.000 5.742374 11.161270 16.652920 22.230043 28.306614 1.710000 37.450394 -0.304028 -0.212269 -0.000014 0.594265 -562 45 Session_03 ETH-3 -4.000 26.000 5.742374 11.161270 16.691485 22.261017 28.306614 1.710000 37.450394 -0.266106 -0.181975 -0.000014 0.631810 -563 46 Session_03 ETH-3 -4.000 26.000 5.742374 11.161270 16.679119 22.305357 28.306614 1.710000 37.450394 -0.278266 -0.138609 -0.000014 0.619771 -564 47 Session_03 ETH-3 -4.000 26.000 5.742374 11.161270 16.663623 22.327286 28.306614 1.710000 37.450394 -0.293503 -0.117161 -0.000014 0.604685 -565 48 Session_03 ETH-3 -4.000 26.000 5.742374 11.161270 16.678524 22.282103 28.306614 1.710000 37.450394 -0.278851 -0.161352 -0.000014 0.619192 -566 49 Session_03 ETH-3 -4.000 26.000 5.742374 11.161270 16.666246 22.283361 28.306614 1.710000 37.450394 -0.290925 -0.160121 -0.000014 0.607238 -567 50 Session_03 FOO -4.000 26.000 -0.840413 2.828738 1.309929 5.340249 4.665655 -5.000000 28.907344 -0.600546 -0.323413 -0.000006 0.300148 -568 51 Session_03 FOO -4.000 26.000 -0.840413 2.828738 1.317548 5.334102 4.665655 -5.000000 28.907344 -0.592942 -0.329524 -0.000006 0.307676 -569 52 Session_04 ETH-1 -4.000 26.000 6.018962 10.747026 16.136865 21.300298 27.780042 2.020000 37.024281 -0.692364 -0.302672 -0.000013 0.204033 -570 53 Session_04 ETH-1 -4.000 26.000 6.018962 10.747026 16.133538 21.291260 27.780042 2.020000 37.024281 -0.695637 -0.311519 -0.000013 0.200762 -571 54 Session_04 ETH-1 -4.000 26.000 6.018962 10.747026 16.139991 21.319865 27.780042 2.020000 37.024281 -0.689290 -0.283519 -0.000013 0.207107 -572 55 Session_04 ETH-1 -4.000 26.000 6.018962 10.747026 16.145748 21.330075 27.780042 2.020000 37.024281 -0.683629 -0.273524 -0.000013 0.212766 -573 56 Session_04 ETH-2 -4.000 26.000 -5.995859 -5.976076 -12.702989 -12.202762 -18.023381 -10.170000 19.875825 -0.686660 -0.289833 -0.000002 0.204507 -574 57 Session_04 ETH-2 -4.000 26.000 -5.995859 -5.976076 -12.692830 -12.240287 -18.023381 -10.170000 19.875825 -0.676377 -0.327811 -0.000002 0.214786 -575 58 Session_04 ETH-2 -4.000 26.000 -5.995859 -5.976076 -12.702899 -12.180291 -18.023381 -10.170000 19.875825 -0.686568 -0.267091 -0.000002 0.204598 -576 59 Session_04 ETH-2 -4.000 26.000 -5.995859 -5.976076 -12.709282 -12.282257 -18.023381 -10.170000 19.875825 -0.693029 -0.370287 -0.000002 0.198140 -577 60 Session_04 ETH-2 -4.000 26.000 -5.995859 -5.976076 -12.679330 -12.235994 -18.023381 -10.170000 19.875825 -0.662712 -0.323466 -0.000002 0.228446 -578 61 Session_04 ETH-3 -4.000 26.000 5.742374 11.161270 16.695594 22.238663 28.306614 1.710000 37.450394 -0.262066 -0.203838 -0.000014 0.634200 -579 62 Session_04 ETH-3 -4.000 26.000 5.742374 11.161270 16.663504 22.286354 28.306614 1.710000 37.450394 -0.293620 -0.157194 -0.000014 0.602656 -580 63 Session_04 ETH-3 -4.000 26.000 5.742374 11.161270 16.666457 22.254290 28.306614 1.710000 37.450394 -0.290717 -0.188555 -0.000014 0.605558 -581 64 Session_04 ETH-3 -4.000 26.000 5.742374 11.161270 16.666910 22.223232 28.306614 1.710000 37.450394 -0.290271 -0.218930 -0.000014 0.606004 -582 65 Session_04 ETH-3 -4.000 26.000 5.742374 11.161270 16.679662 22.257256 28.306614 1.710000 37.450394 -0.277732 -0.185653 -0.000014 0.618539 -583 66 Session_04 ETH-3 -4.000 26.000 5.742374 11.161270 16.676768 22.267680 28.306614 1.710000 37.450394 -0.280578 -0.175459 -0.000014 0.615693 -584 67 Session_04 FOO -4.000 26.000 -0.840413 2.828738 1.307663 5.317330 4.665655 -5.000000 28.907344 -0.602808 -0.346202 -0.000006 0.290853 -585 68 Session_04 FOO -4.000 26.000 -0.840413 2.828738 1.308562 5.331400 4.665655 -5.000000 28.907344 -0.601911 -0.332212 -0.000006 0.291749 -586 ––– –––––––––– –––––– ––––––––––– –––––––––––– ––––––––– ––––––––– –––––––––– –––––––––– –––––––––– –––––––––– –––––––––– ––––––––– ––––––––– ––––––––– –––––––– -587 ``` -588 ''' -589 -590 kwargs = locals().copy() -591 -592 from numpy import random as nprandom -593 if seed: -594 rng = nprandom.default_rng(seed) -595 else: -596 rng = nprandom.default_rng() -597 -598 N = sum([s['N'] for s in samples]) -599 errors47 = rng.normal(loc = 0, scale = 1, size = N) # generate random measurement errors -600 errors47 *= rD47 / stdev(errors47) # scale errors to rD47 -601 errors48 = rng.normal(loc = 0, scale = 1, size = N) # generate random measurement errors -602 errors48 *= rD48 / stdev(errors48) # scale errors to rD48 -603 -604 k = 0 -605 out = [] -606 for s in samples: -607 kw = {} -608 kw['sample'] = s['Sample'] -609 kw = { -610 **kw, -611 **{var: kwargs[var] -612 for var in [ -613 'd13Cwg_VPDB', 'd18Owg_VSMOW', 'ALPHA_18O_ACID_REACTION', -614 'Nominal_D47', 'Nominal_D48', 'Nominal_d13C_VPDB', 'Nominal_d18O_VPDB', -615 'R13_VPDB', 'R17_VSMOW', 'R18_VSMOW', 'LAMBDA_17', 'R18_VPDB', -616 'a47', 'b47', 'c47', 'a48', 'b48', 'c48', -617 ] -618 if kwargs[var] is not None}, -619 **{var: s[var] -620 for var in ['d13C_VPDB', 'd18O_VPDB', 'D47', 'D48', 'D49', 'D17O'] -621 if var in s}, -622 } -623 -624 sN = s['N'] -625 while sN: -626 out.append(simulate_single_analysis(**kw)) -627 out[-1]['d47'] += errors47[k] * a47 -628 out[-1]['d48'] += errors48[k] * a48 -629 sN -= 1 -630 k += 1 -631 -632 if session is not None: -633 for r in out: -634 r['Session'] = session -635 return out +486 D.crunch() +487 D.standardize() +488 +489 D.table_of_sessions(verbose = True, save_to_file = False) +490 D.table_of_samples(verbose = True, save_to_file = False) +491 D.table_of_analyses(verbose = True, save_to_file = False) +492 ``` +493 +494 This should output something like: +495 +496 ``` +497 [table_of_sessions] +498 –––––––––– –– –– ––––––––––– –––––––––––– –––––– –––––– –––––– ––––––––––––– –––––––––––––– –––––––––––––– +499 Session Na Nu d13Cwg_VPDB d18Owg_VSMOW r_d13C r_d18O r_D47 a ± SE 1e3 x b ± SE c ± SE +500 –––––––––– –– –– ––––––––––– –––––––––––– –––––– –––––– –––––– ––––––––––––– –––––––––––––– –––––––––––––– +501 Session_01 15 2 -4.000 26.000 0.0000 0.0000 0.0110 0.997 ± 0.017 -0.097 ± 0.244 -0.896 ± 0.006 +502 Session_02 15 2 -4.000 26.000 0.0000 0.0000 0.0109 1.002 ± 0.017 -0.110 ± 0.244 -0.901 ± 0.006 +503 Session_03 15 2 -4.000 26.000 0.0000 0.0000 0.0107 1.010 ± 0.017 -0.037 ± 0.244 -0.904 ± 0.006 +504 Session_04 15 2 -4.000 26.000 0.0000 0.0000 0.0106 1.001 ± 0.017 -0.181 ± 0.244 -0.894 ± 0.006 +505 –––––––––– –– –– ––––––––––– –––––––––––– –––––– –––––– –––––– ––––––––––––– –––––––––––––– –––––––––––––– +506 +507 [table_of_samples] +508 –––––– –– ––––––––– –––––––––– –––––– –––––– –––––––– –––––– –––––––– +509 Sample N d13C_VPDB d18O_VSMOW D47 SE 95% CL SD p_Levene +510 –––––– –– ––––––––– –––––––––– –––––– –––––– –––––––– –––––– –––––––– +511 ETH-1 16 2.02 37.02 0.2052 0.0079 +512 ETH-2 20 -10.17 19.88 0.2085 0.0100 +513 ETH-3 24 1.71 37.45 0.6132 0.0105 +514 FOO 8 -5.00 28.91 0.2989 0.0040 ± 0.0080 0.0101 0.638 +515 –––––– –– ––––––––– –––––––––– –––––– –––––– –––––––– –––––– –––––––– +516 +517 [table_of_analyses] +518 ––– –––––––––– –––––– ––––––––––– –––––––––––– ––––––––– ––––––––– –––––––––– –––––––––– –––––––––– –––––––––– –––––––––– ––––––––– ––––––––– ––––––––– –––––––– +519 UID Session Sample d13Cwg_VPDB d18Owg_VSMOW d45 d46 d47 d48 d49 d13C_VPDB d18O_VSMOW D47raw D48raw D49raw D47 +520 ––– –––––––––– –––––– ––––––––––– –––––––––––– ––––––––– ––––––––– –––––––––– –––––––––– –––––––––– –––––––––– –––––––––– ––––––––– ––––––––– ––––––––– –––––––– +521 1 Session_01 ETH-1 -4.000 26.000 6.018962 10.747026 16.122986 21.273526 27.780042 2.020000 37.024281 -0.706013 -0.328878 -0.000013 0.192554 +522 2 Session_01 ETH-1 -4.000 26.000 6.018962 10.747026 16.130144 21.282615 27.780042 2.020000 37.024281 -0.698974 -0.319981 -0.000013 0.199615 +523 3 Session_01 ETH-1 -4.000 26.000 6.018962 10.747026 16.149219 21.299572 27.780042 2.020000 37.024281 -0.680215 -0.303383 -0.000013 0.218429 +524 4 Session_01 ETH-1 -4.000 26.000 6.018962 10.747026 16.136616 21.233128 27.780042 2.020000 37.024281 -0.692609 -0.368421 -0.000013 0.205998 +525 5 Session_01 ETH-2 -4.000 26.000 -5.995859 -5.976076 -12.697171 -12.203054 -18.023381 -10.170000 19.875825 -0.680771 -0.290128 -0.000002 0.215054 +526 6 Session_01 ETH-2 -4.000 26.000 -5.995859 -5.976076 -12.701124 -12.184422 -18.023381 -10.170000 19.875825 -0.684772 -0.271272 -0.000002 0.211041 +527 7 Session_01 ETH-2 -4.000 26.000 -5.995859 -5.976076 -12.715105 -12.195251 -18.023381 -10.170000 19.875825 -0.698923 -0.282232 -0.000002 0.196848 +528 8 Session_01 ETH-2 -4.000 26.000 -5.995859 -5.976076 -12.701529 -12.204963 -18.023381 -10.170000 19.875825 -0.685182 -0.292061 -0.000002 0.210630 +529 9 Session_01 ETH-2 -4.000 26.000 -5.995859 -5.976076 -12.711420 -12.228478 -18.023381 -10.170000 19.875825 -0.695193 -0.315859 -0.000002 0.200589 +530 10 Session_01 ETH-3 -4.000 26.000 5.742374 11.161270 16.666719 22.296486 28.306614 1.710000 37.450394 -0.290459 -0.147284 -0.000014 0.609363 +531 11 Session_01 ETH-3 -4.000 26.000 5.742374 11.161270 16.671553 22.291060 28.306614 1.710000 37.450394 -0.285706 -0.152592 -0.000014 0.614130 +532 12 Session_01 ETH-3 -4.000 26.000 5.742374 11.161270 16.652854 22.273271 28.306614 1.710000 37.450394 -0.304093 -0.169990 -0.000014 0.595689 +533 13 Session_01 ETH-3 -4.000 26.000 5.742374 11.161270 16.684168 22.263156 28.306614 1.710000 37.450394 -0.273302 -0.179883 -0.000014 0.626572 +534 14 Session_01 ETH-3 -4.000 26.000 5.742374 11.161270 16.662702 22.253578 28.306614 1.710000 37.450394 -0.294409 -0.189251 -0.000014 0.605401 +535 15 Session_01 ETH-3 -4.000 26.000 5.742374 11.161270 16.681957 22.230907 28.306614 1.710000 37.450394 -0.275476 -0.211424 -0.000014 0.624391 +536 16 Session_01 FOO -4.000 26.000 -0.840413 2.828738 1.312044 5.395798 4.665655 -5.000000 28.907344 -0.598436 -0.268176 -0.000006 0.298996 +537 17 Session_01 FOO -4.000 26.000 -0.840413 2.828738 1.328123 5.307086 4.665655 -5.000000 28.907344 -0.582387 -0.356389 -0.000006 0.315092 +538 18 Session_02 ETH-1 -4.000 26.000 6.018962 10.747026 16.122201 21.340606 27.780042 2.020000 37.024281 -0.706785 -0.263217 -0.000013 0.195135 +539 19 Session_02 ETH-1 -4.000 26.000 6.018962 10.747026 16.134868 21.305714 27.780042 2.020000 37.024281 -0.694328 -0.297370 -0.000013 0.207564 +540 20 Session_02 ETH-1 -4.000 26.000 6.018962 10.747026 16.140008 21.261931 27.780042 2.020000 37.024281 -0.689273 -0.340227 -0.000013 0.212607 +541 21 Session_02 ETH-1 -4.000 26.000 6.018962 10.747026 16.135540 21.298472 27.780042 2.020000 37.024281 -0.693667 -0.304459 -0.000013 0.208224 +542 22 Session_02 ETH-2 -4.000 26.000 -5.995859 -5.976076 -12.701213 -12.202602 -18.023381 -10.170000 19.875825 -0.684862 -0.289671 -0.000002 0.213842 +543 23 Session_02 ETH-2 -4.000 26.000 -5.995859 -5.976076 -12.685649 -12.190405 -18.023381 -10.170000 19.875825 -0.669108 -0.277327 -0.000002 0.229559 +544 24 Session_02 ETH-2 -4.000 26.000 -5.995859 -5.976076 -12.719003 -12.257955 -18.023381 -10.170000 19.875825 -0.702869 -0.345692 -0.000002 0.195876 +545 25 Session_02 ETH-2 -4.000 26.000 -5.995859 -5.976076 -12.700592 -12.204641 -18.023381 -10.170000 19.875825 -0.684233 -0.291735 -0.000002 0.214469 +546 26 Session_02 ETH-2 -4.000 26.000 -5.995859 -5.976076 -12.720426 -12.214561 -18.023381 -10.170000 19.875825 -0.704308 -0.301774 -0.000002 0.194439 +547 27 Session_02 ETH-3 -4.000 26.000 5.742374 11.161270 16.673044 22.262090 28.306614 1.710000 37.450394 -0.284240 -0.180926 -0.000014 0.616730 +548 28 Session_02 ETH-3 -4.000 26.000 5.742374 11.161270 16.666542 22.263401 28.306614 1.710000 37.450394 -0.290634 -0.179643 -0.000014 0.610350 +549 29 Session_02 ETH-3 -4.000 26.000 5.742374 11.161270 16.680487 22.243486 28.306614 1.710000 37.450394 -0.276921 -0.199121 -0.000014 0.624031 +550 30 Session_02 ETH-3 -4.000 26.000 5.742374 11.161270 16.663900 22.245175 28.306614 1.710000 37.450394 -0.293231 -0.197469 -0.000014 0.607759 +551 31 Session_02 ETH-3 -4.000 26.000 5.742374 11.161270 16.674379 22.301309 28.306614 1.710000 37.450394 -0.282927 -0.142568 -0.000014 0.618039 +552 32 Session_02 ETH-3 -4.000 26.000 5.742374 11.161270 16.660825 22.270466 28.306614 1.710000 37.450394 -0.296255 -0.172733 -0.000014 0.604742 +553 33 Session_02 FOO -4.000 26.000 -0.840413 2.828738 1.294076 5.349940 4.665655 -5.000000 28.907344 -0.616369 -0.313776 -0.000006 0.283707 +554 34 Session_02 FOO -4.000 26.000 -0.840413 2.828738 1.313775 5.292121 4.665655 -5.000000 28.907344 -0.596708 -0.371269 -0.000006 0.303323 +555 35 Session_03 ETH-1 -4.000 26.000 6.018962 10.747026 16.121613 21.259909 27.780042 2.020000 37.024281 -0.707364 -0.342207 -0.000013 0.194934 +556 36 Session_03 ETH-1 -4.000 26.000 6.018962 10.747026 16.145714 21.304889 27.780042 2.020000 37.024281 -0.683661 -0.298178 -0.000013 0.218401 +557 37 Session_03 ETH-1 -4.000 26.000 6.018962 10.747026 16.126573 21.325093 27.780042 2.020000 37.024281 -0.702485 -0.278401 -0.000013 0.199764 +558 38 Session_03 ETH-1 -4.000 26.000 6.018962 10.747026 16.132057 21.323211 27.780042 2.020000 37.024281 -0.697092 -0.280244 -0.000013 0.205104 +559 39 Session_03 ETH-2 -4.000 26.000 -5.995859 -5.976076 -12.708448 -12.232023 -18.023381 -10.170000 19.875825 -0.692185 -0.319447 -0.000002 0.208915 +560 40 Session_03 ETH-2 -4.000 26.000 -5.995859 -5.976076 -12.714417 -12.202504 -18.023381 -10.170000 19.875825 -0.698226 -0.289572 -0.000002 0.202934 +561 41 Session_03 ETH-2 -4.000 26.000 -5.995859 -5.976076 -12.720039 -12.264469 -18.023381 -10.170000 19.875825 -0.703917 -0.352285 -0.000002 0.197300 +562 42 Session_03 ETH-2 -4.000 26.000 -5.995859 -5.976076 -12.701953 -12.228550 -18.023381 -10.170000 19.875825 -0.685611 -0.315932 -0.000002 0.215423 +563 43 Session_03 ETH-2 -4.000 26.000 -5.995859 -5.976076 -12.704535 -12.213634 -18.023381 -10.170000 19.875825 -0.688224 -0.300836 -0.000002 0.212837 +564 44 Session_03 ETH-3 -4.000 26.000 5.742374 11.161270 16.652920 22.230043 28.306614 1.710000 37.450394 -0.304028 -0.212269 -0.000014 0.594265 +565 45 Session_03 ETH-3 -4.000 26.000 5.742374 11.161270 16.691485 22.261017 28.306614 1.710000 37.450394 -0.266106 -0.181975 -0.000014 0.631810 +566 46 Session_03 ETH-3 -4.000 26.000 5.742374 11.161270 16.679119 22.305357 28.306614 1.710000 37.450394 -0.278266 -0.138609 -0.000014 0.619771 +567 47 Session_03 ETH-3 -4.000 26.000 5.742374 11.161270 16.663623 22.327286 28.306614 1.710000 37.450394 -0.293503 -0.117161 -0.000014 0.604685 +568 48 Session_03 ETH-3 -4.000 26.000 5.742374 11.161270 16.678524 22.282103 28.306614 1.710000 37.450394 -0.278851 -0.161352 -0.000014 0.619192 +569 49 Session_03 ETH-3 -4.000 26.000 5.742374 11.161270 16.666246 22.283361 28.306614 1.710000 37.450394 -0.290925 -0.160121 -0.000014 0.607238 +570 50 Session_03 FOO -4.000 26.000 -0.840413 2.828738 1.309929 5.340249 4.665655 -5.000000 28.907344 -0.600546 -0.323413 -0.000006 0.300148 +571 51 Session_03 FOO -4.000 26.000 -0.840413 2.828738 1.317548 5.334102 4.665655 -5.000000 28.907344 -0.592942 -0.329524 -0.000006 0.307676 +572 52 Session_04 ETH-1 -4.000 26.000 6.018962 10.747026 16.136865 21.300298 27.780042 2.020000 37.024281 -0.692364 -0.302672 -0.000013 0.204033 +573 53 Session_04 ETH-1 -4.000 26.000 6.018962 10.747026 16.133538 21.291260 27.780042 2.020000 37.024281 -0.695637 -0.311519 -0.000013 0.200762 +574 54 Session_04 ETH-1 -4.000 26.000 6.018962 10.747026 16.139991 21.319865 27.780042 2.020000 37.024281 -0.689290 -0.283519 -0.000013 0.207107 +575 55 Session_04 ETH-1 -4.000 26.000 6.018962 10.747026 16.145748 21.330075 27.780042 2.020000 37.024281 -0.683629 -0.273524 -0.000013 0.212766 +576 56 Session_04 ETH-2 -4.000 26.000 -5.995859 -5.976076 -12.702989 -12.202762 -18.023381 -10.170000 19.875825 -0.686660 -0.289833 -0.000002 0.204507 +577 57 Session_04 ETH-2 -4.000 26.000 -5.995859 -5.976076 -12.692830 -12.240287 -18.023381 -10.170000 19.875825 -0.676377 -0.327811 -0.000002 0.214786 +578 58 Session_04 ETH-2 -4.000 26.000 -5.995859 -5.976076 -12.702899 -12.180291 -18.023381 -10.170000 19.875825 -0.686568 -0.267091 -0.000002 0.204598 +579 59 Session_04 ETH-2 -4.000 26.000 -5.995859 -5.976076 -12.709282 -12.282257 -18.023381 -10.170000 19.875825 -0.693029 -0.370287 -0.000002 0.198140 +580 60 Session_04 ETH-2 -4.000 26.000 -5.995859 -5.976076 -12.679330 -12.235994 -18.023381 -10.170000 19.875825 -0.662712 -0.323466 -0.000002 0.228446 +581 61 Session_04 ETH-3 -4.000 26.000 5.742374 11.161270 16.695594 22.238663 28.306614 1.710000 37.450394 -0.262066 -0.203838 -0.000014 0.634200 +582 62 Session_04 ETH-3 -4.000 26.000 5.742374 11.161270 16.663504 22.286354 28.306614 1.710000 37.450394 -0.293620 -0.157194 -0.000014 0.602656 +583 63 Session_04 ETH-3 -4.000 26.000 5.742374 11.161270 16.666457 22.254290 28.306614 1.710000 37.450394 -0.290717 -0.188555 -0.000014 0.605558 +584 64 Session_04 ETH-3 -4.000 26.000 5.742374 11.161270 16.666910 22.223232 28.306614 1.710000 37.450394 -0.290271 -0.218930 -0.000014 0.606004 +585 65 Session_04 ETH-3 -4.000 26.000 5.742374 11.161270 16.679662 22.257256 28.306614 1.710000 37.450394 -0.277732 -0.185653 -0.000014 0.618539 +586 66 Session_04 ETH-3 -4.000 26.000 5.742374 11.161270 16.676768 22.267680 28.306614 1.710000 37.450394 -0.280578 -0.175459 -0.000014 0.615693 +587 67 Session_04 FOO -4.000 26.000 -0.840413 2.828738 1.307663 5.317330 4.665655 -5.000000 28.907344 -0.602808 -0.346202 -0.000006 0.290853 +588 68 Session_04 FOO -4.000 26.000 -0.840413 2.828738 1.308562 5.331400 4.665655 -5.000000 28.907344 -0.601911 -0.332212 -0.000006 0.291749 +589 ––– –––––––––– –––––– ––––––––––– –––––––––––– ––––––––– ––––––––– –––––––––– –––––––––– –––––––––– –––––––––– –––––––––– ––––––––– ––––––––– ––––––––– –––––––– +590 ``` +591 ''' +592 +593 kwargs = locals().copy() +594 +595 from numpy import random as nprandom +596 if seed: +597 rng = nprandom.default_rng(seed) +598 else: +599 rng = nprandom.default_rng() +600 +601 N = sum([s['N'] for s in samples]) +602 errors45 = rng.normal(loc = 0, scale = 1, size = N) # generate random measurement errors +603 errors45 *= rd45 / stdev(errors45) # scale errors to rd45 +604 errors46 = rng.normal(loc = 0, scale = 1, size = N) # generate random measurement errors +605 errors46 *= rd46 / stdev(errors46) # scale errors to rd46 +606 errors47 = rng.normal(loc = 0, scale = 1, size = N) # generate random measurement errors +607 errors47 *= rD47 / stdev(errors47) # scale errors to rD47 +608 errors48 = rng.normal(loc = 0, scale = 1, size = N) # generate random measurement errors +609 errors48 *= rD48 / stdev(errors48) # scale errors to rD48 +610 +611 k = 0 +612 out = [] +613 for s in samples: +614 kw = {} +615 kw['sample'] = s['Sample'] +616 kw = { +617 **kw, +618 **{var: kwargs[var] +619 for var in [ +620 'd13Cwg_VPDB', 'd18Owg_VSMOW', 'ALPHA_18O_ACID_REACTION', +621 'Nominal_D47', 'Nominal_D48', 'Nominal_d13C_VPDB', 'Nominal_d18O_VPDB', +622 'R13_VPDB', 'R17_VSMOW', 'R18_VSMOW', 'LAMBDA_17', 'R18_VPDB', +623 'a47', 'b47', 'c47', 'a48', 'b48', 'c48', +624 ] +625 if kwargs[var] is not None}, +626 **{var: s[var] +627 for var in ['d13C_VPDB', 'd18O_VPDB', 'D47', 'D48', 'D49', 'D17O'] +628 if var in s}, +629 } +630 +631 sN = s['N'] +632 while sN: +633 out.append(simulate_single_analysis(**kw)) +634 out[-1]['d45'] += errors45[k] +635 out[-1]['d46'] += errors46[k] +636 out[-1]['d47'] += (errors45[k] + errors46[k] + errors47[k]) * a47 +637 out[-1]['d48'] += (2*errors46[k] + errors48[k]) * a48 +638 sN -= 1 +639 k += 1 +640 +641 if session is not None: +642 for r in out: +643 r['Session'] = session +644 return out
    @@ -4845,6 +5039,8 @@

    API Documentation

  • a48: scrambling factor for Δ48
  • b48: compositional nonlinearity for Δ48
  • c48: working gas offset for Δ48
  • +
  • rd45: analytical repeatability of δ45
  • +
  • rd46: analytical repeatability of δ46
  • rD47: analytical repeatability of Δ47
  • rD48: analytical repeatability of Δ48
  • d13Cwg_VPDB, d18Owg_VSMOW: bulk composition of the working gas @@ -5003,69 +5199,69 @@

    API Documentation

    -
    637def table_of_samples(
    -638	data47 = None,
    -639	data48 = None,
    -640	dir = 'output',
    -641	filename = None,
    -642	save_to_file = True,
    -643	print_out = True,
    -644	output = None,
    -645	):
    -646	'''
    -647	Print out, save to disk and/or return a combined table of samples
    -648	for a pair of `D47data` and `D48data` objects.
    -649
    -650	**Parameters**
    -651
    -652	+ `data47`: `D47data` instance
    -653	+ `data48`: `D48data` instance
    -654	+ `dir`: the directory in which to save the table
    -655	+ `filename`: the name to the csv file to write to
    -656	+ `save_to_file`: whether to save the table to disk
    -657	+ `print_out`: whether to print out the table
    -658	+ `output`: if set to `'pretty'`: return a pretty text table (see `pretty_table()`);
    -659		if set to `'raw'`: return a list of list of strings
    -660		(e.g., `[['header1', 'header2'], ['0.1', '0.2']]`)
    -661	'''
    -662	if data47 is None:
    -663		if data48 is None:
    -664			raise TypeError("Arguments must include at least one D47data() or D48data() instance.")
    -665		else:
    -666			return data48.table_of_samples(
    -667				dir = dir,
    -668				filename = filename,
    -669				save_to_file = save_to_file,
    -670				print_out = print_out,
    -671				output = output
    -672				)
    -673	else:
    -674		if data48 is None:
    -675			return data47.table_of_samples(
    +            
    646def table_of_samples(
    +647	data47 = None,
    +648	data48 = None,
    +649	dir = 'output',
    +650	filename = None,
    +651	save_to_file = True,
    +652	print_out = True,
    +653	output = None,
    +654	):
    +655	'''
    +656	Print out, save to disk and/or return a combined table of samples
    +657	for a pair of `D47data` and `D48data` objects.
    +658
    +659	**Parameters**
    +660
    +661	+ `data47`: `D47data` instance
    +662	+ `data48`: `D48data` instance
    +663	+ `dir`: the directory in which to save the table
    +664	+ `filename`: the name to the csv file to write to
    +665	+ `save_to_file`: whether to save the table to disk
    +666	+ `print_out`: whether to print out the table
    +667	+ `output`: if set to `'pretty'`: return a pretty text table (see `pretty_table()`);
    +668		if set to `'raw'`: return a list of list of strings
    +669		(e.g., `[['header1', 'header2'], ['0.1', '0.2']]`)
    +670	'''
    +671	if data47 is None:
    +672		if data48 is None:
    +673			raise TypeError("Arguments must include at least one D47data() or D48data() instance.")
    +674		else:
    +675			return data48.table_of_samples(
     676				dir = dir,
     677				filename = filename,
     678				save_to_file = save_to_file,
     679				print_out = print_out,
     680				output = output
     681				)
    -682		else:
    -683			out47 = data47.table_of_samples(save_to_file = False, print_out = False, output = 'raw')
    -684			out48 = data48.table_of_samples(save_to_file = False, print_out = False, output = 'raw')
    -685			out = transpose_table(transpose_table(out47) + transpose_table(out48)[4:])
    -686
    -687			if save_to_file:
    -688				if not os.path.exists(dir):
    -689					os.makedirs(dir)
    -690				if filename is None:
    -691					filename = f'D47D48_samples.csv'
    -692				with open(f'{dir}/{filename}', 'w') as fid:
    -693					fid.write(make_csv(out))
    -694			if print_out:
    -695				print('\n'+pretty_table(out))
    -696			if output == 'raw':
    -697				return out
    -698			elif output == 'pretty':
    -699				return pretty_table(out)
    +682	else:
    +683		if data48 is None:
    +684			return data47.table_of_samples(
    +685				dir = dir,
    +686				filename = filename,
    +687				save_to_file = save_to_file,
    +688				print_out = print_out,
    +689				output = output
    +690				)
    +691		else:
    +692			out47 = data47.table_of_samples(save_to_file = False, print_out = False, output = 'raw')
    +693			out48 = data48.table_of_samples(save_to_file = False, print_out = False, output = 'raw')
    +694			out = transpose_table(transpose_table(out47) + transpose_table(out48)[4:])
    +695
    +696			if save_to_file:
    +697				if not os.path.exists(dir):
    +698					os.makedirs(dir)
    +699				if filename is None:
    +700					filename = f'D47D48_samples.csv'
    +701				with open(f'{dir}/{filename}', 'w') as fid:
    +702					fid.write(make_csv(out))
    +703			if print_out:
    +704				print('\n'+pretty_table(out))
    +705			if output == 'raw':
    +706				return out
    +707			elif output == 'pretty':
    +708				return pretty_table(out)
     
    @@ -5100,75 +5296,75 @@

    API Documentation

    -
    702def table_of_sessions(
    -703	data47 = None,
    -704	data48 = None,
    -705	dir = 'output',
    -706	filename = None,
    -707	save_to_file = True,
    -708	print_out = True,
    -709	output = None,
    -710	):
    -711	'''
    -712	Print out, save to disk and/or return a combined table of sessions
    -713	for a pair of `D47data` and `D48data` objects.
    -714	***Only applicable if the sessions in `data47` and those in `data48`
    -715	consist of the exact same sets of analyses.***
    -716
    -717	**Parameters**
    -718
    -719	+ `data47`: `D47data` instance
    -720	+ `data48`: `D48data` instance
    -721	+ `dir`: the directory in which to save the table
    -722	+ `filename`: the name to the csv file to write to
    -723	+ `save_to_file`: whether to save the table to disk
    -724	+ `print_out`: whether to print out the table
    -725	+ `output`: if set to `'pretty'`: return a pretty text table (see `pretty_table()`);
    -726		if set to `'raw'`: return a list of list of strings
    -727		(e.g., `[['header1', 'header2'], ['0.1', '0.2']]`)
    -728	'''
    -729	if data47 is None:
    -730		if data48 is None:
    -731			raise TypeError("Arguments must include at least one D47data() or D48data() instance.")
    -732		else:
    -733			return data48.table_of_sessions(
    -734				dir = dir,
    -735				filename = filename,
    -736				save_to_file = save_to_file,
    -737				print_out = print_out,
    -738				output = output
    -739				)
    -740	else:
    -741		if data48 is None:
    -742			return data47.table_of_sessions(
    +            
    711def table_of_sessions(
    +712	data47 = None,
    +713	data48 = None,
    +714	dir = 'output',
    +715	filename = None,
    +716	save_to_file = True,
    +717	print_out = True,
    +718	output = None,
    +719	):
    +720	'''
    +721	Print out, save to disk and/or return a combined table of sessions
    +722	for a pair of `D47data` and `D48data` objects.
    +723	***Only applicable if the sessions in `data47` and those in `data48`
    +724	consist of the exact same sets of analyses.***
    +725
    +726	**Parameters**
    +727
    +728	+ `data47`: `D47data` instance
    +729	+ `data48`: `D48data` instance
    +730	+ `dir`: the directory in which to save the table
    +731	+ `filename`: the name to the csv file to write to
    +732	+ `save_to_file`: whether to save the table to disk
    +733	+ `print_out`: whether to print out the table
    +734	+ `output`: if set to `'pretty'`: return a pretty text table (see `pretty_table()`);
    +735		if set to `'raw'`: return a list of list of strings
    +736		(e.g., `[['header1', 'header2'], ['0.1', '0.2']]`)
    +737	'''
    +738	if data47 is None:
    +739		if data48 is None:
    +740			raise TypeError("Arguments must include at least one D47data() or D48data() instance.")
    +741		else:
    +742			return data48.table_of_sessions(
     743				dir = dir,
     744				filename = filename,
     745				save_to_file = save_to_file,
     746				print_out = print_out,
     747				output = output
     748				)
    -749		else:
    -750			out47 = data47.table_of_sessions(save_to_file = False, print_out = False, output = 'raw')
    -751			out48 = data48.table_of_sessions(save_to_file = False, print_out = False, output = 'raw')
    -752			for k,x in enumerate(out47[0]):
    -753				if k>7:
    -754					out47[0][k] = out47[0][k].replace('a', 'a_47').replace('b', 'b_47').replace('c', 'c_47')
    -755					out48[0][k] = out48[0][k].replace('a', 'a_48').replace('b', 'b_48').replace('c', 'c_48')
    -756			out = transpose_table(transpose_table(out47) + transpose_table(out48)[7:])
    -757
    -758			if save_to_file:
    -759				if not os.path.exists(dir):
    -760					os.makedirs(dir)
    -761				if filename is None:
    -762					filename = f'D47D48_sessions.csv'
    -763				with open(f'{dir}/{filename}', 'w') as fid:
    -764					fid.write(make_csv(out))
    -765			if print_out:
    -766				print('\n'+pretty_table(out))
    -767			if output == 'raw':
    -768				return out
    -769			elif output == 'pretty':
    -770				return pretty_table(out)
    +749	else:
    +750		if data48 is None:
    +751			return data47.table_of_sessions(
    +752				dir = dir,
    +753				filename = filename,
    +754				save_to_file = save_to_file,
    +755				print_out = print_out,
    +756				output = output
    +757				)
    +758		else:
    +759			out47 = data47.table_of_sessions(save_to_file = False, print_out = False, output = 'raw')
    +760			out48 = data48.table_of_sessions(save_to_file = False, print_out = False, output = 'raw')
    +761			for k,x in enumerate(out47[0]):
    +762				if k>7:
    +763					out47[0][k] = out47[0][k].replace('a', 'a_47').replace('b', 'b_47').replace('c', 'c_47')
    +764					out48[0][k] = out48[0][k].replace('a', 'a_48').replace('b', 'b_48').replace('c', 'c_48')
    +765			out = transpose_table(transpose_table(out47) + transpose_table(out48)[7:])
    +766
    +767			if save_to_file:
    +768				if not os.path.exists(dir):
    +769					os.makedirs(dir)
    +770				if filename is None:
    +771					filename = f'D47D48_sessions.csv'
    +772				with open(f'{dir}/{filename}', 'w') as fid:
    +773					fid.write(make_csv(out))
    +774			if print_out:
    +775				print('\n'+pretty_table(out))
    +776			if output == 'raw':
    +777				return out
    +778			elif output == 'pretty':
    +779				return pretty_table(out)
     
    @@ -5205,81 +5401,81 @@

    API Documentation

    -
    773def table_of_analyses(
    -774	data47 = None,
    -775	data48 = None,
    -776	dir = 'output',
    -777	filename = None,
    -778	save_to_file = True,
    -779	print_out = True,
    -780	output = None,
    -781	):
    -782	'''
    -783	Print out, save to disk and/or return a combined table of analyses
    -784	for a pair of `D47data` and `D48data` objects.
    -785
    -786	If the sessions in `data47` and those in `data48` do not consist of
    -787	the exact same sets of analyses, the table will have two columns
    -788	`Session_47` and `Session_48` instead of a single `Session` column.
    -789
    -790	**Parameters**
    -791
    -792	+ `data47`: `D47data` instance
    -793	+ `data48`: `D48data` instance
    -794	+ `dir`: the directory in which to save the table
    -795	+ `filename`: the name to the csv file to write to
    -796	+ `save_to_file`: whether to save the table to disk
    -797	+ `print_out`: whether to print out the table
    -798	+ `output`: if set to `'pretty'`: return a pretty text table (see `pretty_table()`);
    -799		if set to `'raw'`: return a list of list of strings
    -800		(e.g., `[['header1', 'header2'], ['0.1', '0.2']]`)
    -801	'''
    -802	if data47 is None:
    -803		if data48 is None:
    -804			raise TypeError("Arguments must include at least one D47data() or D48data() instance.")
    -805		else:
    -806			return data48.table_of_analyses(
    -807				dir = dir,
    -808				filename = filename,
    -809				save_to_file = save_to_file,
    -810				print_out = print_out,
    -811				output = output
    -812				)
    -813	else:
    -814		if data48 is None:
    -815			return data47.table_of_analyses(
    +            
    782def table_of_analyses(
    +783	data47 = None,
    +784	data48 = None,
    +785	dir = 'output',
    +786	filename = None,
    +787	save_to_file = True,
    +788	print_out = True,
    +789	output = None,
    +790	):
    +791	'''
    +792	Print out, save to disk and/or return a combined table of analyses
    +793	for a pair of `D47data` and `D48data` objects.
    +794
    +795	If the sessions in `data47` and those in `data48` do not consist of
    +796	the exact same sets of analyses, the table will have two columns
    +797	`Session_47` and `Session_48` instead of a single `Session` column.
    +798
    +799	**Parameters**
    +800
    +801	+ `data47`: `D47data` instance
    +802	+ `data48`: `D48data` instance
    +803	+ `dir`: the directory in which to save the table
    +804	+ `filename`: the name to the csv file to write to
    +805	+ `save_to_file`: whether to save the table to disk
    +806	+ `print_out`: whether to print out the table
    +807	+ `output`: if set to `'pretty'`: return a pretty text table (see `pretty_table()`);
    +808		if set to `'raw'`: return a list of list of strings
    +809		(e.g., `[['header1', 'header2'], ['0.1', '0.2']]`)
    +810	'''
    +811	if data47 is None:
    +812		if data48 is None:
    +813			raise TypeError("Arguments must include at least one D47data() or D48data() instance.")
    +814		else:
    +815			return data48.table_of_analyses(
     816				dir = dir,
     817				filename = filename,
     818				save_to_file = save_to_file,
     819				print_out = print_out,
     820				output = output
     821				)
    -822		else:
    -823			out47 = data47.table_of_analyses(save_to_file = False, print_out = False, output = 'raw')
    -824			out48 = data48.table_of_analyses(save_to_file = False, print_out = False, output = 'raw')
    -825			
    -826			if [l[1] for l in out47[1:]] == [l[1] for l in out48[1:]]: # if sessions are identical
    -827				out = transpose_table(transpose_table(out47) + transpose_table(out48)[-1:])
    -828			else:
    -829				out47[0][1] = 'Session_47'
    -830				out48[0][1] = 'Session_48'
    -831				out47 = transpose_table(out47)
    -832				out48 = transpose_table(out48)
    -833				out = transpose_table(out47[:2] + out48[1:2] + out47[2:] + out48[-1:])
    -834
    -835			if save_to_file:
    -836				if not os.path.exists(dir):
    -837					os.makedirs(dir)
    -838				if filename is None:
    -839					filename = f'D47D48_sessions.csv'
    -840				with open(f'{dir}/{filename}', 'w') as fid:
    -841					fid.write(make_csv(out))
    -842			if print_out:
    -843				print('\n'+pretty_table(out))
    -844			if output == 'raw':
    -845				return out
    -846			elif output == 'pretty':
    -847				return pretty_table(out)
    +822	else:
    +823		if data48 is None:
    +824			return data47.table_of_analyses(
    +825				dir = dir,
    +826				filename = filename,
    +827				save_to_file = save_to_file,
    +828				print_out = print_out,
    +829				output = output
    +830				)
    +831		else:
    +832			out47 = data47.table_of_analyses(save_to_file = False, print_out = False, output = 'raw')
    +833			out48 = data48.table_of_analyses(save_to_file = False, print_out = False, output = 'raw')
    +834			
    +835			if [l[1] for l in out47[1:]] == [l[1] for l in out48[1:]]: # if sessions are identical
    +836				out = transpose_table(transpose_table(out47) + transpose_table(out48)[-1:])
    +837			else:
    +838				out47[0][1] = 'Session_47'
    +839				out48[0][1] = 'Session_48'
    +840				out47 = transpose_table(out47)
    +841				out48 = transpose_table(out48)
    +842				out = transpose_table(out47[:2] + out48[1:2] + out47[2:] + out48[-1:])
    +843
    +844			if save_to_file:
    +845				if not os.path.exists(dir):
    +846					os.makedirs(dir)
    +847				if filename is None:
    +848					filename = f'D47D48_sessions.csv'
    +849				with open(f'{dir}/{filename}', 'w') as fid:
    +850					fid.write(make_csv(out))
    +851			if print_out:
    +852				print('\n'+pretty_table(out))
    +853			if output == 'raw':
    +854				return out
    +855			elif output == 'pretty':
    +856				return pretty_table(out)
     
    @@ -5318,2081 +5514,2230 @@

    API Documentation

    -
     895class D4xdata(list):
    - 896	'''
    - 897	Store and process data for a large set of Δ47 and/or Δ48
    - 898	analyses, usually comprising more than one analytical session.
    - 899	'''
    - 900
    - 901	### 17O CORRECTION PARAMETERS
    - 902	R13_VPDB = 0.01118  # (Chang & Li, 1990)
    - 903	'''
    - 904	Absolute (13C/12C) ratio of VPDB.
    - 905	By default equal to 0.01118 ([Chang & Li, 1990](http://www.cnki.com.cn/Article/CJFDTotal-JXTW199004006.htm))
    - 906	'''
    - 907
    - 908	R18_VSMOW = 0.0020052  # (Baertschi, 1976)
    - 909	'''
    - 910	Absolute (18O/16C) ratio of VSMOW.
    - 911	By default equal to 0.0020052 ([Baertschi, 1976](https://doi.org/10.1016/0012-821X(76)90115-1))
    - 912	'''
    - 913
    - 914	LAMBDA_17 = 0.528  # (Barkan & Luz, 2005)
    - 915	'''
    - 916	Mass-dependent exponent for triple oxygen isotopes.
    - 917	By default equal to 0.528 ([Barkan & Luz, 2005](https://doi.org/10.1002/rcm.2250))
    - 918	'''
    - 919
    - 920	R17_VSMOW = 0.00038475  # (Assonov & Brenninkmeijer, 2003, rescaled to R13_VPDB)
    - 921	'''
    - 922	Absolute (17O/16C) ratio of VSMOW.
    - 923	By default equal to 0.00038475
    - 924	([Assonov & Brenninkmeijer, 2003](https://dx.doi.org/10.1002/rcm.1011),
    - 925	rescaled to `R13_VPDB`)
    - 926	'''
    - 927
    - 928	R18_VPDB = R18_VSMOW * 1.03092
    - 929	'''
    - 930	Absolute (18O/16C) ratio of VPDB.
    - 931	By definition equal to `R18_VSMOW * 1.03092`.
    - 932	'''
    - 933
    - 934	R17_VPDB = R17_VSMOW * 1.03092 ** LAMBDA_17
    - 935	'''
    - 936	Absolute (17O/16C) ratio of VPDB.
    - 937	By definition equal to `R17_VSMOW * 1.03092 ** LAMBDA_17`.
    - 938	'''
    - 939
    - 940	LEVENE_REF_SAMPLE = 'ETH-3'
    - 941	'''
    - 942	After the Δ4x standardization step, each sample is tested to
    - 943	assess whether the Δ4x variance within all analyses for that
    - 944	sample differs significantly from that observed for a given reference
    - 945	sample (using [Levene's test](https://en.wikipedia.org/wiki/Levene%27s_test),
    - 946	which yields a p-value corresponding to the null hypothesis that the
    - 947	underlying variances are equal).
    +            
     904class D4xdata(list):
    + 905	'''
    + 906	Store and process data for a large set of Δ47 and/or Δ48
    + 907	analyses, usually comprising more than one analytical session.
    + 908	'''
    + 909
    + 910	### 17O CORRECTION PARAMETERS
    + 911	R13_VPDB = 0.01118  # (Chang & Li, 1990)
    + 912	'''
    + 913	Absolute (13C/12C) ratio of VPDB.
    + 914	By default equal to 0.01118 ([Chang & Li, 1990](http://www.cnki.com.cn/Article/CJFDTotal-JXTW199004006.htm))
    + 915	'''
    + 916
    + 917	R18_VSMOW = 0.0020052  # (Baertschi, 1976)
    + 918	'''
    + 919	Absolute (18O/16C) ratio of VSMOW.
    + 920	By default equal to 0.0020052 ([Baertschi, 1976](https://doi.org/10.1016/0012-821X(76)90115-1))
    + 921	'''
    + 922
    + 923	LAMBDA_17 = 0.528  # (Barkan & Luz, 2005)
    + 924	'''
    + 925	Mass-dependent exponent for triple oxygen isotopes.
    + 926	By default equal to 0.528 ([Barkan & Luz, 2005](https://doi.org/10.1002/rcm.2250))
    + 927	'''
    + 928
    + 929	R17_VSMOW = 0.00038475  # (Assonov & Brenninkmeijer, 2003, rescaled to R13_VPDB)
    + 930	'''
    + 931	Absolute (17O/16C) ratio of VSMOW.
    + 932	By default equal to 0.00038475
    + 933	([Assonov & Brenninkmeijer, 2003](https://dx.doi.org/10.1002/rcm.1011),
    + 934	rescaled to `R13_VPDB`)
    + 935	'''
    + 936
    + 937	R18_VPDB = R18_VSMOW * 1.03092
    + 938	'''
    + 939	Absolute (18O/16C) ratio of VPDB.
    + 940	By definition equal to `R18_VSMOW * 1.03092`.
    + 941	'''
    + 942
    + 943	R17_VPDB = R17_VSMOW * 1.03092 ** LAMBDA_17
    + 944	'''
    + 945	Absolute (17O/16C) ratio of VPDB.
    + 946	By definition equal to `R17_VSMOW * 1.03092 ** LAMBDA_17`.
    + 947	'''
      948
    - 949	`LEVENE_REF_SAMPLE` (by default equal to `'ETH-3'`) specifies which
    - 950	sample should be used as a reference for this test.
    - 951	'''
    - 952
    - 953	ALPHA_18O_ACID_REACTION = round(np.exp(3.59 / (90 + 273.15) - 1.79e-3), 6)  # (Kim et al., 2007, calcite)
    - 954	'''
    - 955	Specifies the 18O/16O fractionation factor generally applicable
    - 956	to acid reactions in the dataset. Currently used by `D4xdata.wg()`,
    - 957	`D4xdata.standardize_d13C`, and `D4xdata.standardize_d18O`.
    - 958
    - 959	By default equal to 1.008129 (calcite reacted at 90 °C,
    - 960	[Kim et al., 2007](https://dx.doi.org/10.1016/j.chemgeo.2007.08.005)).
    - 961	'''
    - 962
    - 963	Nominal_d13C_VPDB = {
    - 964		'ETH-1': 2.02,
    - 965		'ETH-2': -10.17,
    - 966		'ETH-3': 1.71,
    - 967		}	# (Bernasconi et al., 2018)
    - 968	'''
    - 969	Nominal δ13C_VPDB values assigned to carbonate standards, used by
    - 970	`D4xdata.standardize_d13C()`.
    + 949	LEVENE_REF_SAMPLE = 'ETH-3'
    + 950	'''
    + 951	After the Δ4x standardization step, each sample is tested to
    + 952	assess whether the Δ4x variance within all analyses for that
    + 953	sample differs significantly from that observed for a given reference
    + 954	sample (using [Levene's test](https://en.wikipedia.org/wiki/Levene%27s_test),
    + 955	which yields a p-value corresponding to the null hypothesis that the
    + 956	underlying variances are equal).
    + 957
    + 958	`LEVENE_REF_SAMPLE` (by default equal to `'ETH-3'`) specifies which
    + 959	sample should be used as a reference for this test.
    + 960	'''
    + 961
    + 962	ALPHA_18O_ACID_REACTION = round(np.exp(3.59 / (90 + 273.15) - 1.79e-3), 6)  # (Kim et al., 2007, calcite)
    + 963	'''
    + 964	Specifies the 18O/16O fractionation factor generally applicable
    + 965	to acid reactions in the dataset. Currently used by `D4xdata.wg()`,
    + 966	`D4xdata.standardize_d13C`, and `D4xdata.standardize_d18O`.
    + 967
    + 968	By default equal to 1.008129 (calcite reacted at 90 °C,
    + 969	[Kim et al., 2007](https://dx.doi.org/10.1016/j.chemgeo.2007.08.005)).
    + 970	'''
      971
    - 972	By default equal to `{'ETH-1': 2.02, 'ETH-2': -10.17, 'ETH-3': 1.71}` after
    - 973	[Bernasconi et al. (2018)](https://doi.org/10.1029/2017GC007385).
    - 974	'''
    - 975
    - 976	Nominal_d18O_VPDB = {
    - 977		'ETH-1': -2.19,
    - 978		'ETH-2': -18.69,
    - 979		'ETH-3': -1.78,
    - 980		}	# (Bernasconi et al., 2018)
    - 981	'''
    - 982	Nominal δ18O_VPDB values assigned to carbonate standards, used by
    - 983	`D4xdata.standardize_d18O()`.
    + 972	Nominal_d13C_VPDB = {
    + 973		'ETH-1': 2.02,
    + 974		'ETH-2': -10.17,
    + 975		'ETH-3': 1.71,
    + 976		}	# (Bernasconi et al., 2018)
    + 977	'''
    + 978	Nominal δ13C_VPDB values assigned to carbonate standards, used by
    + 979	`D4xdata.standardize_d13C()`.
    + 980
    + 981	By default equal to `{'ETH-1': 2.02, 'ETH-2': -10.17, 'ETH-3': 1.71}` after
    + 982	[Bernasconi et al. (2018)](https://doi.org/10.1029/2017GC007385).
    + 983	'''
      984
    - 985	By default equal to `{'ETH-1': -2.19, 'ETH-2': -18.69, 'ETH-3': -1.78}` after
    - 986	[Bernasconi et al. (2018)](https://doi.org/10.1029/2017GC007385).
    - 987	'''
    - 988
    - 989	d13C_STANDARDIZATION_METHOD = '2pt'
    + 985	Nominal_d18O_VPDB = {
    + 986		'ETH-1': -2.19,
    + 987		'ETH-2': -18.69,
    + 988		'ETH-3': -1.78,
    + 989		}	# (Bernasconi et al., 2018)
      990	'''
    - 991	Method by which to standardize δ13C values:
    - 992	
    - 993	+ `none`: do not apply any δ13C standardization.
    - 994	+ `'1pt'`: within each session, offset all initial δ13C values so as to
    - 995	minimize the difference between final δ13C_VPDB values and
    - 996	`Nominal_d13C_VPDB` (averaged over all analyses for which `Nominal_d13C_VPDB` is defined).
    - 997	+ `'2pt'`: within each session, apply a affine trasformation to all δ13C
    - 998	values so as to minimize the difference between final δ13C_VPDB
    - 999	values and `Nominal_d13C_VPDB` (averaged over all analyses for which `Nominal_d13C_VPDB`
    -1000	is defined).
    -1001	'''
    -1002
    -1003	d18O_STANDARDIZATION_METHOD = '2pt'
    -1004	'''
    -1005	Method by which to standardize δ18O values:
    -1006	
    -1007	+ `none`: do not apply any δ18O standardization.
    -1008	+ `'1pt'`: within each session, offset all initial δ18O values so as to
    -1009	minimize the difference between final δ18O_VPDB values and
    -1010	`Nominal_d18O_VPDB` (averaged over all analyses for which `Nominal_d18O_VPDB` is defined).
    -1011	+ `'2pt'`: within each session, apply a affine trasformation to all δ18O
    -1012	values so as to minimize the difference between final δ18O_VPDB
    -1013	values and `Nominal_d18O_VPDB` (averaged over all analyses for which `Nominal_d18O_VPDB`
    -1014	is defined).
    -1015	'''
    -1016
    -1017	def __init__(self, l = [], mass = '47', logfile = '', session = 'mySession', verbose = False):
    -1018		'''
    -1019		**Parameters**
    -1020
    -1021		+ `l`: a list of dictionaries, with each dictionary including at least the keys
    -1022		`Sample`, `d45`, `d46`, and `d47` or `d48`.
    -1023		+ `mass`: `'47'` or `'48'`
    -1024		+ `logfile`: if specified, write detailed logs to this file path when calling `D4xdata` methods.
    -1025		+ `session`: define session name for analyses without a `Session` key
    -1026		+ `verbose`: if `True`, print out detailed logs when calling `D4xdata` methods.
    -1027
    -1028		Returns a `D4xdata` object derived from `list`.
    -1029		'''
    -1030		self._4x = mass
    -1031		self.verbose = verbose
    -1032		self.prefix = 'D4xdata'
    -1033		self.logfile = logfile
    -1034		list.__init__(self, l)
    -1035		self.Nf = None
    -1036		self.repeatability = {}
    -1037		self.refresh(session = session)
    -1038
    -1039
    -1040	def make_verbal(oldfun):
    -1041		'''
    -1042		Decorator: allow temporarily changing `self.prefix` and overriding `self.verbose`.
    -1043		'''
    -1044		@wraps(oldfun)
    -1045		def newfun(*args, verbose = '', **kwargs):
    -1046			myself = args[0]
    -1047			oldprefix = myself.prefix
    -1048			myself.prefix = oldfun.__name__
    -1049			if verbose != '':
    -1050				oldverbose = myself.verbose
    -1051				myself.verbose = verbose
    -1052			out = oldfun(*args, **kwargs)
    -1053			myself.prefix = oldprefix
    -1054			if verbose != '':
    -1055				myself.verbose = oldverbose
    -1056			return out
    -1057		return newfun
    -1058
    -1059
    -1060	def msg(self, txt):
    -1061		'''
    -1062		Log a message to `self.logfile`, and print it out if `verbose = True`
    -1063		'''
    -1064		self.log(txt)
    -1065		if self.verbose:
    -1066			print(f'{f"[{self.prefix}]":<16} {txt}')
    + 991	Nominal δ18O_VPDB values assigned to carbonate standards, used by
    + 992	`D4xdata.standardize_d18O()`.
    + 993
    + 994	By default equal to `{'ETH-1': -2.19, 'ETH-2': -18.69, 'ETH-3': -1.78}` after
    + 995	[Bernasconi et al. (2018)](https://doi.org/10.1029/2017GC007385).
    + 996	'''
    + 997
    + 998	d13C_STANDARDIZATION_METHOD = '2pt'
    + 999	'''
    +1000	Method by which to standardize δ13C values:
    +1001	
    +1002	+ `none`: do not apply any δ13C standardization.
    +1003	+ `'1pt'`: within each session, offset all initial δ13C values so as to
    +1004	minimize the difference between final δ13C_VPDB values and
    +1005	`Nominal_d13C_VPDB` (averaged over all analyses for which `Nominal_d13C_VPDB` is defined).
    +1006	+ `'2pt'`: within each session, apply a affine trasformation to all δ13C
    +1007	values so as to minimize the difference between final δ13C_VPDB
    +1008	values and `Nominal_d13C_VPDB` (averaged over all analyses for which `Nominal_d13C_VPDB`
    +1009	is defined).
    +1010	'''
    +1011
    +1012	d18O_STANDARDIZATION_METHOD = '2pt'
    +1013	'''
    +1014	Method by which to standardize δ18O values:
    +1015	
    +1016	+ `none`: do not apply any δ18O standardization.
    +1017	+ `'1pt'`: within each session, offset all initial δ18O values so as to
    +1018	minimize the difference between final δ18O_VPDB values and
    +1019	`Nominal_d18O_VPDB` (averaged over all analyses for which `Nominal_d18O_VPDB` is defined).
    +1020	+ `'2pt'`: within each session, apply a affine trasformation to all δ18O
    +1021	values so as to minimize the difference between final δ18O_VPDB
    +1022	values and `Nominal_d18O_VPDB` (averaged over all analyses for which `Nominal_d18O_VPDB`
    +1023	is defined).
    +1024	'''
    +1025
    +1026	def __init__(self, l = [], mass = '47', logfile = '', session = 'mySession', verbose = False):
    +1027		'''
    +1028		**Parameters**
    +1029
    +1030		+ `l`: a list of dictionaries, with each dictionary including at least the keys
    +1031		`Sample`, `d45`, `d46`, and `d47` or `d48`.
    +1032		+ `mass`: `'47'` or `'48'`
    +1033		+ `logfile`: if specified, write detailed logs to this file path when calling `D4xdata` methods.
    +1034		+ `session`: define session name for analyses without a `Session` key
    +1035		+ `verbose`: if `True`, print out detailed logs when calling `D4xdata` methods.
    +1036
    +1037		Returns a `D4xdata` object derived from `list`.
    +1038		'''
    +1039		self._4x = mass
    +1040		self.verbose = verbose
    +1041		self.prefix = 'D4xdata'
    +1042		self.logfile = logfile
    +1043		list.__init__(self, l)
    +1044		self.Nf = None
    +1045		self.repeatability = {}
    +1046		self.refresh(session = session)
    +1047
    +1048
    +1049	def make_verbal(oldfun):
    +1050		'''
    +1051		Decorator: allow temporarily changing `self.prefix` and overriding `self.verbose`.
    +1052		'''
    +1053		@wraps(oldfun)
    +1054		def newfun(*args, verbose = '', **kwargs):
    +1055			myself = args[0]
    +1056			oldprefix = myself.prefix
    +1057			myself.prefix = oldfun.__name__
    +1058			if verbose != '':
    +1059				oldverbose = myself.verbose
    +1060				myself.verbose = verbose
    +1061			out = oldfun(*args, **kwargs)
    +1062			myself.prefix = oldprefix
    +1063			if verbose != '':
    +1064				myself.verbose = oldverbose
    +1065			return out
    +1066		return newfun
     1067
     1068
    -1069	def vmsg(self, txt):
    +1069	def msg(self, txt):
     1070		'''
    -1071		Log a message to `self.logfile` and print it out
    +1071		Log a message to `self.logfile`, and print it out if `verbose = True`
     1072		'''
     1073		self.log(txt)
    -1074		print(txt)
    -1075
    +1074		if self.verbose:
    +1075			print(f'{f"[{self.prefix}]":<16} {txt}')
     1076
    -1077	def log(self, *txts):
    -1078		'''
    -1079		Log a message to `self.logfile`
    -1080		'''
    -1081		if self.logfile:
    -1082			with open(self.logfile, 'a') as fid:
    -1083				for txt in txts:
    -1084					fid.write(f'\n{dt.now().strftime("%Y-%m-%d %H:%M:%S")} {f"[{self.prefix}]":<16} {txt}')
    +1077
    +1078	def vmsg(self, txt):
    +1079		'''
    +1080		Log a message to `self.logfile` and print it out
    +1081		'''
    +1082		self.log(txt)
    +1083		print(txt)
    +1084
     1085
    -1086
    -1087	def refresh(self, session = 'mySession'):
    -1088		'''
    -1089		Update `self.sessions`, `self.samples`, `self.anchors`, and `self.unknowns`.
    -1090		'''
    -1091		self.fill_in_missing_info(session = session)
    -1092		self.refresh_sessions()
    -1093		self.refresh_samples()
    +1086	def log(self, *txts):
    +1087		'''
    +1088		Log a message to `self.logfile`
    +1089		'''
    +1090		if self.logfile:
    +1091			with open(self.logfile, 'a') as fid:
    +1092				for txt in txts:
    +1093					fid.write(f'\n{dt.now().strftime("%Y-%m-%d %H:%M:%S")} {f"[{self.prefix}]":<16} {txt}')
     1094
     1095
    -1096	def refresh_sessions(self):
    +1096	def refresh(self, session = 'mySession'):
     1097		'''
    -1098		Update `self.sessions` and set `scrambling_drift`, `slope_drift`, and `wg_drift`
    -1099		to `False` for all sessions.
    -1100		'''
    -1101		self.sessions = {
    -1102			s: {'data': [r for r in self if r['Session'] == s]}
    -1103			for s in sorted({r['Session'] for r in self})
    -1104			}
    -1105		for s in self.sessions:
    -1106			self.sessions[s]['scrambling_drift'] = False
    -1107			self.sessions[s]['slope_drift'] = False
    -1108			self.sessions[s]['wg_drift'] = False
    -1109			self.sessions[s]['d13C_standardization_method'] = self.d13C_STANDARDIZATION_METHOD
    -1110			self.sessions[s]['d18O_standardization_method'] = self.d18O_STANDARDIZATION_METHOD
    -1111
    -1112
    -1113	def refresh_samples(self):
    -1114		'''
    -1115		Define `self.samples`, `self.anchors`, and `self.unknowns`.
    -1116		'''
    -1117		self.samples = {
    -1118			s: {'data': [r for r in self if r['Sample'] == s]}
    -1119			for s in sorted({r['Sample'] for r in self})
    -1120			}
    -1121		self.anchors = {s: self.samples[s] for s in self.samples if s in self.Nominal_D4x}
    -1122		self.unknowns = {s: self.samples[s] for s in self.samples if s not in self.Nominal_D4x}
    -1123
    -1124
    -1125	def read(self, filename, sep = '', session = ''):
    -1126		'''
    -1127		Read file in csv format to load data into a `D47data` object.
    -1128
    -1129		In the csv file, spaces before and after field separators (`','` by default)
    -1130		are optional. Each line corresponds to a single analysis.
    -1131
    -1132		The required fields are:
    +1098		Update `self.sessions`, `self.samples`, `self.anchors`, and `self.unknowns`.
    +1099		'''
    +1100		self.fill_in_missing_info(session = session)
    +1101		self.refresh_sessions()
    +1102		self.refresh_samples()
    +1103
    +1104
    +1105	def refresh_sessions(self):
    +1106		'''
    +1107		Update `self.sessions` and set `scrambling_drift`, `slope_drift`, and `wg_drift`
    +1108		to `False` for all sessions.
    +1109		'''
    +1110		self.sessions = {
    +1111			s: {'data': [r for r in self if r['Session'] == s]}
    +1112			for s in sorted({r['Session'] for r in self})
    +1113			}
    +1114		for s in self.sessions:
    +1115			self.sessions[s]['scrambling_drift'] = False
    +1116			self.sessions[s]['slope_drift'] = False
    +1117			self.sessions[s]['wg_drift'] = False
    +1118			self.sessions[s]['d13C_standardization_method'] = self.d13C_STANDARDIZATION_METHOD
    +1119			self.sessions[s]['d18O_standardization_method'] = self.d18O_STANDARDIZATION_METHOD
    +1120
    +1121
    +1122	def refresh_samples(self):
    +1123		'''
    +1124		Define `self.samples`, `self.anchors`, and `self.unknowns`.
    +1125		'''
    +1126		self.samples = {
    +1127			s: {'data': [r for r in self if r['Sample'] == s]}
    +1128			for s in sorted({r['Sample'] for r in self})
    +1129			}
    +1130		self.anchors = {s: self.samples[s] for s in self.samples if s in self.Nominal_D4x}
    +1131		self.unknowns = {s: self.samples[s] for s in self.samples if s not in self.Nominal_D4x}
    +1132
     1133
    -1134		+ `UID`: a unique identifier
    -1135		+ `Session`: an identifier for the analytical session
    -1136		+ `Sample`: a sample identifier
    -1137		+ `d45`, `d46`, and at least one of `d47` or `d48`: the working-gas delta values
    -1138
    -1139		Independently known oxygen-17 anomalies may be provided as `D17O` (in ‰ relative to
    -1140		VSMOW, λ = `self.LAMBDA_17`), and are otherwise assumed to be zero. Working-gas deltas `d47`, `d48`
    -1141		and `d49` are optional, and set to NaN by default.
    +1134	def read(self, filename, sep = '', session = ''):
    +1135		'''
    +1136		Read file in csv format to load data into a `D47data` object.
    +1137
    +1138		In the csv file, spaces before and after field separators (`','` by default)
    +1139		are optional. Each line corresponds to a single analysis.
    +1140
    +1141		The required fields are:
     1142
    -1143		**Parameters**
    -1144
    -1145		+ `fileneme`: the path of the file to read
    -1146		+ `sep`: csv separator delimiting the fields
    -1147		+ `session`: set `Session` field to this string for all analyses
    -1148		'''
    -1149		with open(filename) as fid:
    -1150			self.input(fid.read(), sep = sep, session = session)
    +1143		+ `UID`: a unique identifier
    +1144		+ `Session`: an identifier for the analytical session
    +1145		+ `Sample`: a sample identifier
    +1146		+ `d45`, `d46`, and at least one of `d47` or `d48`: the working-gas delta values
    +1147
    +1148		Independently known oxygen-17 anomalies may be provided as `D17O` (in ‰ relative to
    +1149		VSMOW, λ = `self.LAMBDA_17`), and are otherwise assumed to be zero. Working-gas deltas `d47`, `d48`
    +1150		and `d49` are optional, and set to NaN by default.
     1151
    -1152
    -1153	def input(self, txt, sep = '', session = ''):
    -1154		'''
    -1155		Read `txt` string in csv format to load analysis data into a `D47data` object.
    -1156
    -1157		In the csv string, spaces before and after field separators (`','` by default)
    -1158		are optional. Each line corresponds to a single analysis.
    -1159
    -1160		The required fields are:
    +1152		**Parameters**
    +1153
    +1154		+ `fileneme`: the path of the file to read
    +1155		+ `sep`: csv separator delimiting the fields
    +1156		+ `session`: set `Session` field to this string for all analyses
    +1157		'''
    +1158		with open(filename) as fid:
    +1159			self.input(fid.read(), sep = sep, session = session)
    +1160
     1161
    -1162		+ `UID`: a unique identifier
    -1163		+ `Session`: an identifier for the analytical session
    -1164		+ `Sample`: a sample identifier
    -1165		+ `d45`, `d46`, and at least one of `d47` or `d48`: the working-gas delta values
    -1166
    -1167		Independently known oxygen-17 anomalies may be provided as `D17O` (in ‰ relative to
    -1168		VSMOW, λ = `self.LAMBDA_17`), and are otherwise assumed to be zero. Working-gas deltas `d47`, `d48`
    -1169		and `d49` are optional, and set to NaN by default.
    +1162	def input(self, txt, sep = '', session = ''):
    +1163		'''
    +1164		Read `txt` string in csv format to load analysis data into a `D47data` object.
    +1165
    +1166		In the csv string, spaces before and after field separators (`','` by default)
    +1167		are optional. Each line corresponds to a single analysis.
    +1168
    +1169		The required fields are:
     1170
    -1171		**Parameters**
    -1172
    -1173		+ `txt`: the csv string to read
    -1174		+ `sep`: csv separator delimiting the fields. By default, use `,`, `;`, or `\t`,
    -1175		whichever appers most often in `txt`.
    -1176		+ `session`: set `Session` field to this string for all analyses
    -1177		'''
    -1178		if sep == '':
    -1179			sep = sorted(',;\t', key = lambda x: - txt.count(x))[0]
    -1180		txt = [[x.strip() for x in l.split(sep)] for l in txt.splitlines() if l.strip()]
    -1181		data = [{k: v if k in ['UID', 'Session', 'Sample'] else smart_type(v) for k,v in zip(txt[0], l) if v != ''} for l in txt[1:]]
    -1182
    -1183		if session != '':
    -1184			for r in data:
    -1185				r['Session'] = session
    -1186
    -1187		self += data
    -1188		self.refresh()
    -1189
    -1190
    -1191	@make_verbal
    -1192	def wg(self, samples = None, a18_acid = None):
    -1193		'''
    -1194		Compute bulk composition of the working gas for each session based on
    -1195		the carbonate standards defined in both `self.Nominal_d13C_VPDB` and
    -1196		`self.Nominal_d18O_VPDB`.
    -1197		'''
    +1171		+ `UID`: a unique identifier
    +1172		+ `Session`: an identifier for the analytical session
    +1173		+ `Sample`: a sample identifier
    +1174		+ `d45`, `d46`, and at least one of `d47` or `d48`: the working-gas delta values
    +1175
    +1176		Independently known oxygen-17 anomalies may be provided as `D17O` (in ‰ relative to
    +1177		VSMOW, λ = `self.LAMBDA_17`), and are otherwise assumed to be zero. Working-gas deltas `d47`, `d48`
    +1178		and `d49` are optional, and set to NaN by default.
    +1179
    +1180		**Parameters**
    +1181
    +1182		+ `txt`: the csv string to read
    +1183		+ `sep`: csv separator delimiting the fields. By default, use `,`, `;`, or `\t`,
    +1184		whichever appers most often in `txt`.
    +1185		+ `session`: set `Session` field to this string for all analyses
    +1186		'''
    +1187		if sep == '':
    +1188			sep = sorted(',;\t', key = lambda x: - txt.count(x))[0]
    +1189		txt = [[x.strip() for x in l.split(sep)] for l in txt.splitlines() if l.strip()]
    +1190		data = [{k: v if k in ['UID', 'Session', 'Sample'] else smart_type(v) for k,v in zip(txt[0], l) if v != ''} for l in txt[1:]]
    +1191
    +1192		if session != '':
    +1193			for r in data:
    +1194				r['Session'] = session
    +1195
    +1196		self += data
    +1197		self.refresh()
     1198
    -1199		self.msg('Computing WG composition:')
    -1200
    -1201		if a18_acid is None:
    -1202			a18_acid = self.ALPHA_18O_ACID_REACTION
    -1203		if samples is None:
    -1204			samples = [s for s in self.Nominal_d13C_VPDB if s in self.Nominal_d18O_VPDB]
    -1205
    -1206		assert a18_acid, f'Acid fractionation factor should not be zero.'
    +1199
    +1200	@make_verbal
    +1201	def wg(self, samples = None, a18_acid = None):
    +1202		'''
    +1203		Compute bulk composition of the working gas for each session based on
    +1204		the carbonate standards defined in both `self.Nominal_d13C_VPDB` and
    +1205		`self.Nominal_d18O_VPDB`.
    +1206		'''
     1207
    -1208		samples = [s for s in samples if s in self.Nominal_d13C_VPDB and s in self.Nominal_d18O_VPDB]
    -1209		R45R46_standards = {}
    -1210		for sample in samples:
    -1211			d13C_vpdb = self.Nominal_d13C_VPDB[sample]
    -1212			d18O_vpdb = self.Nominal_d18O_VPDB[sample]
    -1213			R13_s = self.R13_VPDB * (1 + d13C_vpdb / 1000)
    -1214			R17_s = self.R17_VPDB * ((1 + d18O_vpdb / 1000) * a18_acid) ** self.LAMBDA_17
    -1215			R18_s = self.R18_VPDB * (1 + d18O_vpdb / 1000) * a18_acid
    +1208		self.msg('Computing WG composition:')
    +1209
    +1210		if a18_acid is None:
    +1211			a18_acid = self.ALPHA_18O_ACID_REACTION
    +1212		if samples is None:
    +1213			samples = [s for s in self.Nominal_d13C_VPDB if s in self.Nominal_d18O_VPDB]
    +1214
    +1215		assert a18_acid, f'Acid fractionation factor should not be zero.'
     1216
    -1217			C12_s = 1 / (1 + R13_s)
    -1218			C13_s = R13_s / (1 + R13_s)
    -1219			C16_s = 1 / (1 + R17_s + R18_s)
    -1220			C17_s = R17_s / (1 + R17_s + R18_s)
    -1221			C18_s = R18_s / (1 + R17_s + R18_s)
    -1222
    -1223			C626_s = C12_s * C16_s ** 2
    -1224			C627_s = 2 * C12_s * C16_s * C17_s
    -1225			C628_s = 2 * C12_s * C16_s * C18_s
    -1226			C636_s = C13_s * C16_s ** 2
    -1227			C637_s = 2 * C13_s * C16_s * C17_s
    -1228			C727_s = C12_s * C17_s ** 2
    -1229
    -1230			R45_s = (C627_s + C636_s) / C626_s
    -1231			R46_s = (C628_s + C637_s + C727_s) / C626_s
    -1232			R45R46_standards[sample] = (R45_s, R46_s)
    -1233		
    -1234		for s in self.sessions:
    -1235			db = [r for r in self.sessions[s]['data'] if r['Sample'] in samples]
    -1236			assert db, f'No sample from {samples} found in session "{s}".'
    -1237# 			dbsamples = sorted({r['Sample'] for r in db})
    +1217		samples = [s for s in samples if s in self.Nominal_d13C_VPDB and s in self.Nominal_d18O_VPDB]
    +1218		R45R46_standards = {}
    +1219		for sample in samples:
    +1220			d13C_vpdb = self.Nominal_d13C_VPDB[sample]
    +1221			d18O_vpdb = self.Nominal_d18O_VPDB[sample]
    +1222			R13_s = self.R13_VPDB * (1 + d13C_vpdb / 1000)
    +1223			R17_s = self.R17_VPDB * ((1 + d18O_vpdb / 1000) * a18_acid) ** self.LAMBDA_17
    +1224			R18_s = self.R18_VPDB * (1 + d18O_vpdb / 1000) * a18_acid
    +1225
    +1226			C12_s = 1 / (1 + R13_s)
    +1227			C13_s = R13_s / (1 + R13_s)
    +1228			C16_s = 1 / (1 + R17_s + R18_s)
    +1229			C17_s = R17_s / (1 + R17_s + R18_s)
    +1230			C18_s = R18_s / (1 + R17_s + R18_s)
    +1231
    +1232			C626_s = C12_s * C16_s ** 2
    +1233			C627_s = 2 * C12_s * C16_s * C17_s
    +1234			C628_s = 2 * C12_s * C16_s * C18_s
    +1235			C636_s = C13_s * C16_s ** 2
    +1236			C637_s = 2 * C13_s * C16_s * C17_s
    +1237			C727_s = C12_s * C17_s ** 2
     1238
    -1239			X = [r['d45'] for r in db]
    -1240			Y = [R45R46_standards[r['Sample']][0] for r in db]
    -1241			x1, x2 = np.min(X), np.max(X)
    -1242
    -1243			if x1 < x2:
    -1244				wgcoord = x1/(x1-x2)
    -1245			else:
    -1246				wgcoord = 999
    +1239			R45_s = (C627_s + C636_s) / C626_s
    +1240			R46_s = (C628_s + C637_s + C727_s) / C626_s
    +1241			R45R46_standards[sample] = (R45_s, R46_s)
    +1242		
    +1243		for s in self.sessions:
    +1244			db = [r for r in self.sessions[s]['data'] if r['Sample'] in samples]
    +1245			assert db, f'No sample from {samples} found in session "{s}".'
    +1246# 			dbsamples = sorted({r['Sample'] for r in db})
     1247
    -1248			if wgcoord < -.5 or wgcoord > 1.5:
    -1249				# unreasonable to extrapolate to d45 = 0
    -1250				R45_wg = np.mean([y/(1+x/1000) for x,y in zip(X,Y)])
    -1251			else :
    -1252				# d45 = 0 is reasonably well bracketed
    -1253				R45_wg = np.polyfit(X, Y, 1)[1]
    -1254
    -1255			X = [r['d46'] for r in db]
    -1256			Y = [R45R46_standards[r['Sample']][1] for r in db]
    -1257			x1, x2 = np.min(X), np.max(X)
    -1258
    -1259			if x1 < x2:
    -1260				wgcoord = x1/(x1-x2)
    -1261			else:
    -1262				wgcoord = 999
    +1248			X = [r['d45'] for r in db]
    +1249			Y = [R45R46_standards[r['Sample']][0] for r in db]
    +1250			x1, x2 = np.min(X), np.max(X)
    +1251
    +1252			if x1 < x2:
    +1253				wgcoord = x1/(x1-x2)
    +1254			else:
    +1255				wgcoord = 999
    +1256
    +1257			if wgcoord < -.5 or wgcoord > 1.5:
    +1258				# unreasonable to extrapolate to d45 = 0
    +1259				R45_wg = np.mean([y/(1+x/1000) for x,y in zip(X,Y)])
    +1260			else :
    +1261				# d45 = 0 is reasonably well bracketed
    +1262				R45_wg = np.polyfit(X, Y, 1)[1]
     1263
    -1264			if wgcoord < -.5 or wgcoord > 1.5:
    -1265				# unreasonable to extrapolate to d46 = 0
    -1266				R46_wg = np.mean([y/(1+x/1000) for x,y in zip(X,Y)])
    -1267			else :
    -1268				# d46 = 0 is reasonably well bracketed
    -1269				R46_wg = np.polyfit(X, Y, 1)[1]
    -1270
    -1271			d13Cwg_VPDB, d18Owg_VSMOW = self.compute_bulk_delta(R45_wg, R46_wg)
    +1264			X = [r['d46'] for r in db]
    +1265			Y = [R45R46_standards[r['Sample']][1] for r in db]
    +1266			x1, x2 = np.min(X), np.max(X)
    +1267
    +1268			if x1 < x2:
    +1269				wgcoord = x1/(x1-x2)
    +1270			else:
    +1271				wgcoord = 999
     1272
    -1273			self.msg(f'Session {s} WG:   δ13C_VPDB = {d13Cwg_VPDB:.3f}   δ18O_VSMOW = {d18Owg_VSMOW:.3f}')
    -1274
    -1275			self.sessions[s]['d13Cwg_VPDB'] = d13Cwg_VPDB
    -1276			self.sessions[s]['d18Owg_VSMOW'] = d18Owg_VSMOW
    -1277			for r in self.sessions[s]['data']:
    -1278				r['d13Cwg_VPDB'] = d13Cwg_VPDB
    -1279				r['d18Owg_VSMOW'] = d18Owg_VSMOW
    -1280
    +1273			if wgcoord < -.5 or wgcoord > 1.5:
    +1274				# unreasonable to extrapolate to d46 = 0
    +1275				R46_wg = np.mean([y/(1+x/1000) for x,y in zip(X,Y)])
    +1276			else :
    +1277				# d46 = 0 is reasonably well bracketed
    +1278				R46_wg = np.polyfit(X, Y, 1)[1]
    +1279
    +1280			d13Cwg_VPDB, d18Owg_VSMOW = self.compute_bulk_delta(R45_wg, R46_wg)
     1281
    -1282	def compute_bulk_delta(self, R45, R46, D17O = 0):
    -1283		'''
    -1284		Compute δ13C_VPDB and δ18O_VSMOW,
    -1285		by solving the generalized form of equation (17) from
    -1286		[Brand et al. (2010)](https://doi.org/10.1351/PAC-REP-09-01-05),
    -1287		assuming that δ18O_VSMOW is not too big (0 ± 50 ‰) and
    -1288		solving the corresponding second-order Taylor polynomial.
    -1289		(Appendix A of [Daëron et al., 2016](https://doi.org/10.1016/j.chemgeo.2016.08.014))
    -1290		'''
    -1291
    -1292		K = np.exp(D17O / 1000) * self.R17_VSMOW * self.R18_VSMOW ** -self.LAMBDA_17
    -1293
    -1294		A = -3 * K ** 2 * self.R18_VSMOW ** (2 * self.LAMBDA_17)
    -1295		B = 2 * K * R45 * self.R18_VSMOW ** self.LAMBDA_17
    -1296		C = 2 * self.R18_VSMOW
    -1297		D = -R46
    -1298
    -1299		aa = A * self.LAMBDA_17 * (2 * self.LAMBDA_17 - 1) + B * self.LAMBDA_17 * (self.LAMBDA_17 - 1) / 2
    -1300		bb = 2 * A * self.LAMBDA_17 + B * self.LAMBDA_17 + C
    -1301		cc = A + B + C + D
    +1282			self.msg(f'Session {s} WG:   δ13C_VPDB = {d13Cwg_VPDB:.3f}   δ18O_VSMOW = {d18Owg_VSMOW:.3f}')
    +1283
    +1284			self.sessions[s]['d13Cwg_VPDB'] = d13Cwg_VPDB
    +1285			self.sessions[s]['d18Owg_VSMOW'] = d18Owg_VSMOW
    +1286			for r in self.sessions[s]['data']:
    +1287				r['d13Cwg_VPDB'] = d13Cwg_VPDB
    +1288				r['d18Owg_VSMOW'] = d18Owg_VSMOW
    +1289
    +1290
    +1291	def compute_bulk_delta(self, R45, R46, D17O = 0):
    +1292		'''
    +1293		Compute δ13C_VPDB and δ18O_VSMOW,
    +1294		by solving the generalized form of equation (17) from
    +1295		[Brand et al. (2010)](https://doi.org/10.1351/PAC-REP-09-01-05),
    +1296		assuming that δ18O_VSMOW is not too big (0 ± 50 ‰) and
    +1297		solving the corresponding second-order Taylor polynomial.
    +1298		(Appendix A of [Daëron et al., 2016](https://doi.org/10.1016/j.chemgeo.2016.08.014))
    +1299		'''
    +1300
    +1301		K = np.exp(D17O / 1000) * self.R17_VSMOW * self.R18_VSMOW ** -self.LAMBDA_17
     1302
    -1303		d18O_VSMOW = 1000 * (-bb + (bb ** 2 - 4 * aa * cc) ** .5) / (2 * aa)
    -1304
    -1305		R18 = (1 + d18O_VSMOW / 1000) * self.R18_VSMOW
    -1306		R17 = K * R18 ** self.LAMBDA_17
    -1307		R13 = R45 - 2 * R17
    -1308
    -1309		d13C_VPDB = 1000 * (R13 / self.R13_VPDB - 1)
    -1310
    -1311		return d13C_VPDB, d18O_VSMOW
    -1312
    +1303		A = -3 * K ** 2 * self.R18_VSMOW ** (2 * self.LAMBDA_17)
    +1304		B = 2 * K * R45 * self.R18_VSMOW ** self.LAMBDA_17
    +1305		C = 2 * self.R18_VSMOW
    +1306		D = -R46
    +1307
    +1308		aa = A * self.LAMBDA_17 * (2 * self.LAMBDA_17 - 1) + B * self.LAMBDA_17 * (self.LAMBDA_17 - 1) / 2
    +1309		bb = 2 * A * self.LAMBDA_17 + B * self.LAMBDA_17 + C
    +1310		cc = A + B + C + D
    +1311
    +1312		d18O_VSMOW = 1000 * (-bb + (bb ** 2 - 4 * aa * cc) ** .5) / (2 * aa)
     1313
    -1314	@make_verbal
    -1315	def crunch(self, verbose = ''):
    -1316		'''
    -1317		Compute bulk composition and raw clumped isotope anomalies for all analyses.
    -1318		'''
    -1319		for r in self:
    -1320			self.compute_bulk_and_clumping_deltas(r)
    -1321		self.standardize_d13C()
    -1322		self.standardize_d18O()
    -1323		self.msg(f"Crunched {len(self)} analyses.")
    -1324
    -1325
    -1326	def fill_in_missing_info(self, session = 'mySession'):
    -1327		'''
    -1328		Fill in optional fields with default values
    -1329		'''
    -1330		for i,r in enumerate(self):
    -1331			if 'D17O' not in r:
    -1332				r['D17O'] = 0.
    -1333			if 'UID' not in r:
    -1334				r['UID'] = f'{i+1}'
    -1335			if 'Session' not in r:
    -1336				r['Session'] = session
    -1337			for k in ['d47', 'd48', 'd49']:
    -1338				if k not in r:
    -1339					r[k] = np.nan
    -1340
    -1341
    -1342	def standardize_d13C(self):
    -1343		'''
    -1344		Perform δ13C standadization within each session `s` according to
    -1345		`self.sessions[s]['d13C_standardization_method']`, which is defined by default
    -1346		by `D47data.refresh_sessions()`as equal to `self.d13C_STANDARDIZATION_METHOD`, but
    -1347		may be redefined abitrarily at a later stage.
    -1348		'''
    -1349		for s in self.sessions:
    -1350			if self.sessions[s]['d13C_standardization_method'] in ['1pt', '2pt']:
    -1351				XY = [(r['d13C_VPDB'], self.Nominal_d13C_VPDB[r['Sample']]) for r in self.sessions[s]['data'] if r['Sample'] in self.Nominal_d13C_VPDB]
    -1352				X,Y = zip(*XY)
    -1353				if self.sessions[s]['d13C_standardization_method'] == '1pt':
    -1354					offset = np.mean(Y) - np.mean(X)
    -1355					for r in self.sessions[s]['data']:
    -1356						r['d13C_VPDB'] += offset				
    -1357				elif self.sessions[s]['d13C_standardization_method'] == '2pt':
    -1358					a,b = np.polyfit(X,Y,1)
    -1359					for r in self.sessions[s]['data']:
    -1360						r['d13C_VPDB'] = a * r['d13C_VPDB'] + b
    -1361
    -1362	def standardize_d18O(self):
    -1363		'''
    -1364		Perform δ18O standadization within each session `s` according to
    -1365		`self.ALPHA_18O_ACID_REACTION` and `self.sessions[s]['d18O_standardization_method']`,
    -1366		which is defined by default by `D47data.refresh_sessions()`as equal to
    -1367		`self.d18O_STANDARDIZATION_METHOD`, but may be redefined abitrarily at a later stage.
    -1368		'''
    -1369		for s in self.sessions:
    -1370			if self.sessions[s]['d18O_standardization_method'] in ['1pt', '2pt']:
    -1371				XY = [(r['d18O_VSMOW'], self.Nominal_d18O_VPDB[r['Sample']]) for r in self.sessions[s]['data'] if r['Sample'] in self.Nominal_d18O_VPDB]
    -1372				X,Y = zip(*XY)
    -1373				Y = [(1000+y) * self.R18_VPDB * self.ALPHA_18O_ACID_REACTION / self.R18_VSMOW - 1000 for y in Y]
    -1374				if self.sessions[s]['d18O_standardization_method'] == '1pt':
    -1375					offset = np.mean(Y) - np.mean(X)
    -1376					for r in self.sessions[s]['data']:
    -1377						r['d18O_VSMOW'] += offset				
    -1378				elif self.sessions[s]['d18O_standardization_method'] == '2pt':
    -1379					a,b = np.polyfit(X,Y,1)
    -1380					for r in self.sessions[s]['data']:
    -1381						r['d18O_VSMOW'] = a * r['d18O_VSMOW'] + b
    -1382	
    -1383
    -1384	def compute_bulk_and_clumping_deltas(self, r):
    -1385		'''
    -1386		Compute δ13C_VPDB, δ18O_VSMOW, and raw Δ47, Δ48, Δ49 values for a single analysis `r`.
    -1387		'''
    -1388
    -1389		# Compute working gas R13, R18, and isobar ratios
    -1390		R13_wg = self.R13_VPDB * (1 + r['d13Cwg_VPDB'] / 1000)
    -1391		R18_wg = self.R18_VSMOW * (1 + r['d18Owg_VSMOW'] / 1000)
    -1392		R45_wg, R46_wg, R47_wg, R48_wg, R49_wg = self.compute_isobar_ratios(R13_wg, R18_wg)
    -1393
    -1394		# Compute analyte isobar ratios
    -1395		R45 = (1 + r['d45'] / 1000) * R45_wg
    -1396		R46 = (1 + r['d46'] / 1000) * R46_wg
    -1397		R47 = (1 + r['d47'] / 1000) * R47_wg
    -1398		R48 = (1 + r['d48'] / 1000) * R48_wg
    -1399		R49 = (1 + r['d49'] / 1000) * R49_wg
    -1400
    -1401		r['d13C_VPDB'], r['d18O_VSMOW'] = self.compute_bulk_delta(R45, R46, D17O = r['D17O'])
    -1402		R13 = (1 + r['d13C_VPDB'] / 1000) * self.R13_VPDB
    -1403		R18 = (1 + r['d18O_VSMOW'] / 1000) * self.R18_VSMOW
    -1404
    -1405		# Compute stochastic isobar ratios of the analyte
    -1406		R45stoch, R46stoch, R47stoch, R48stoch, R49stoch = self.compute_isobar_ratios(
    -1407			R13, R18, D17O = r['D17O']
    -1408		)
    +1314		R18 = (1 + d18O_VSMOW / 1000) * self.R18_VSMOW
    +1315		R17 = K * R18 ** self.LAMBDA_17
    +1316		R13 = R45 - 2 * R17
    +1317
    +1318		d13C_VPDB = 1000 * (R13 / self.R13_VPDB - 1)
    +1319
    +1320		return d13C_VPDB, d18O_VSMOW
    +1321
    +1322
    +1323	@make_verbal
    +1324	def crunch(self, verbose = ''):
    +1325		'''
    +1326		Compute bulk composition and raw clumped isotope anomalies for all analyses.
    +1327		'''
    +1328		for r in self:
    +1329			self.compute_bulk_and_clumping_deltas(r)
    +1330		self.standardize_d13C()
    +1331		self.standardize_d18O()
    +1332		self.msg(f"Crunched {len(self)} analyses.")
    +1333
    +1334
    +1335	def fill_in_missing_info(self, session = 'mySession'):
    +1336		'''
    +1337		Fill in optional fields with default values
    +1338		'''
    +1339		for i,r in enumerate(self):
    +1340			if 'D17O' not in r:
    +1341				r['D17O'] = 0.
    +1342			if 'UID' not in r:
    +1343				r['UID'] = f'{i+1}'
    +1344			if 'Session' not in r:
    +1345				r['Session'] = session
    +1346			for k in ['d47', 'd48', 'd49']:
    +1347				if k not in r:
    +1348					r[k] = np.nan
    +1349
    +1350
    +1351	def standardize_d13C(self):
    +1352		'''
    +1353		Perform δ13C standadization within each session `s` according to
    +1354		`self.sessions[s]['d13C_standardization_method']`, which is defined by default
    +1355		by `D47data.refresh_sessions()`as equal to `self.d13C_STANDARDIZATION_METHOD`, but
    +1356		may be redefined abitrarily at a later stage.
    +1357		'''
    +1358		for s in self.sessions:
    +1359			if self.sessions[s]['d13C_standardization_method'] in ['1pt', '2pt']:
    +1360				XY = [(r['d13C_VPDB'], self.Nominal_d13C_VPDB[r['Sample']]) for r in self.sessions[s]['data'] if r['Sample'] in self.Nominal_d13C_VPDB]
    +1361				X,Y = zip(*XY)
    +1362				if self.sessions[s]['d13C_standardization_method'] == '1pt':
    +1363					offset = np.mean(Y) - np.mean(X)
    +1364					for r in self.sessions[s]['data']:
    +1365						r['d13C_VPDB'] += offset				
    +1366				elif self.sessions[s]['d13C_standardization_method'] == '2pt':
    +1367					a,b = np.polyfit(X,Y,1)
    +1368					for r in self.sessions[s]['data']:
    +1369						r['d13C_VPDB'] = a * r['d13C_VPDB'] + b
    +1370
    +1371	def standardize_d18O(self):
    +1372		'''
    +1373		Perform δ18O standadization within each session `s` according to
    +1374		`self.ALPHA_18O_ACID_REACTION` and `self.sessions[s]['d18O_standardization_method']`,
    +1375		which is defined by default by `D47data.refresh_sessions()`as equal to
    +1376		`self.d18O_STANDARDIZATION_METHOD`, but may be redefined abitrarily at a later stage.
    +1377		'''
    +1378		for s in self.sessions:
    +1379			if self.sessions[s]['d18O_standardization_method'] in ['1pt', '2pt']:
    +1380				XY = [(r['d18O_VSMOW'], self.Nominal_d18O_VPDB[r['Sample']]) for r in self.sessions[s]['data'] if r['Sample'] in self.Nominal_d18O_VPDB]
    +1381				X,Y = zip(*XY)
    +1382				Y = [(1000+y) * self.R18_VPDB * self.ALPHA_18O_ACID_REACTION / self.R18_VSMOW - 1000 for y in Y]
    +1383				if self.sessions[s]['d18O_standardization_method'] == '1pt':
    +1384					offset = np.mean(Y) - np.mean(X)
    +1385					for r in self.sessions[s]['data']:
    +1386						r['d18O_VSMOW'] += offset				
    +1387				elif self.sessions[s]['d18O_standardization_method'] == '2pt':
    +1388					a,b = np.polyfit(X,Y,1)
    +1389					for r in self.sessions[s]['data']:
    +1390						r['d18O_VSMOW'] = a * r['d18O_VSMOW'] + b
    +1391	
    +1392
    +1393	def compute_bulk_and_clumping_deltas(self, r):
    +1394		'''
    +1395		Compute δ13C_VPDB, δ18O_VSMOW, and raw Δ47, Δ48, Δ49 values for a single analysis `r`.
    +1396		'''
    +1397
    +1398		# Compute working gas R13, R18, and isobar ratios
    +1399		R13_wg = self.R13_VPDB * (1 + r['d13Cwg_VPDB'] / 1000)
    +1400		R18_wg = self.R18_VSMOW * (1 + r['d18Owg_VSMOW'] / 1000)
    +1401		R45_wg, R46_wg, R47_wg, R48_wg, R49_wg = self.compute_isobar_ratios(R13_wg, R18_wg)
    +1402
    +1403		# Compute analyte isobar ratios
    +1404		R45 = (1 + r['d45'] / 1000) * R45_wg
    +1405		R46 = (1 + r['d46'] / 1000) * R46_wg
    +1406		R47 = (1 + r['d47'] / 1000) * R47_wg
    +1407		R48 = (1 + r['d48'] / 1000) * R48_wg
    +1408		R49 = (1 + r['d49'] / 1000) * R49_wg
     1409
    -1410		# Check that R45/R45stoch and R46/R46stoch are undistinguishable from 1,
    -1411		# and raise a warning if the corresponding anomalies exceed 0.02 ppm.
    -1412		if (R45 / R45stoch - 1) > 5e-8:
    -1413			self.vmsg(f'This is unexpected: R45/R45stoch - 1 = {1e6 * (R45 / R45stoch - 1):.3f} ppm')
    -1414		if (R46 / R46stoch - 1) > 5e-8:
    -1415			self.vmsg(f'This is unexpected: R46/R46stoch - 1 = {1e6 * (R46 / R46stoch - 1):.3f} ppm')
    -1416
    -1417		# Compute raw clumped isotope anomalies
    -1418		r['D47raw'] = 1000 * (R47 / R47stoch - 1)
    -1419		r['D48raw'] = 1000 * (R48 / R48stoch - 1)
    -1420		r['D49raw'] = 1000 * (R49 / R49stoch - 1)
    -1421
    -1422
    -1423	def compute_isobar_ratios(self, R13, R18, D17O=0, D47=0, D48=0, D49=0):
    -1424		'''
    -1425		Compute isobar ratios for a sample with isotopic ratios `R13` and `R18`,
    -1426		optionally accounting for non-zero values of Δ17O (`D17O`) and clumped isotope
    -1427		anomalies (`D47`, `D48`, `D49`), all expressed in permil.
    -1428		'''
    -1429
    -1430		# Compute R17
    -1431		R17 = self.R17_VSMOW * np.exp(D17O / 1000) * (R18 / self.R18_VSMOW) ** self.LAMBDA_17
    -1432
    -1433		# Compute isotope concentrations
    -1434		C12 = (1 + R13) ** -1
    -1435		C13 = C12 * R13
    -1436		C16 = (1 + R17 + R18) ** -1
    -1437		C17 = C16 * R17
    -1438		C18 = C16 * R18
    -1439
    -1440		# Compute stochastic isotopologue concentrations
    -1441		C626 = C16 * C12 * C16
    -1442		C627 = C16 * C12 * C17 * 2
    -1443		C628 = C16 * C12 * C18 * 2
    -1444		C636 = C16 * C13 * C16
    -1445		C637 = C16 * C13 * C17 * 2
    -1446		C638 = C16 * C13 * C18 * 2
    -1447		C727 = C17 * C12 * C17
    -1448		C728 = C17 * C12 * C18 * 2
    -1449		C737 = C17 * C13 * C17
    -1450		C738 = C17 * C13 * C18 * 2
    -1451		C828 = C18 * C12 * C18
    -1452		C838 = C18 * C13 * C18
    -1453
    -1454		# Compute stochastic isobar ratios
    -1455		R45 = (C636 + C627) / C626
    -1456		R46 = (C628 + C637 + C727) / C626
    -1457		R47 = (C638 + C728 + C737) / C626
    -1458		R48 = (C738 + C828) / C626
    -1459		R49 = C838 / C626
    -1460
    -1461		# Account for stochastic anomalies
    -1462		R47 *= 1 + D47 / 1000
    -1463		R48 *= 1 + D48 / 1000
    -1464		R49 *= 1 + D49 / 1000
    -1465
    -1466		# Return isobar ratios
    -1467		return R45, R46, R47, R48, R49
    -1468
    +1410		r['d13C_VPDB'], r['d18O_VSMOW'] = self.compute_bulk_delta(R45, R46, D17O = r['D17O'])
    +1411		R13 = (1 + r['d13C_VPDB'] / 1000) * self.R13_VPDB
    +1412		R18 = (1 + r['d18O_VSMOW'] / 1000) * self.R18_VSMOW
    +1413
    +1414		# Compute stochastic isobar ratios of the analyte
    +1415		R45stoch, R46stoch, R47stoch, R48stoch, R49stoch = self.compute_isobar_ratios(
    +1416			R13, R18, D17O = r['D17O']
    +1417		)
    +1418
    +1419		# Check that R45/R45stoch and R46/R46stoch are undistinguishable from 1,
    +1420		# and raise a warning if the corresponding anomalies exceed 0.02 ppm.
    +1421		if (R45 / R45stoch - 1) > 5e-8:
    +1422			self.vmsg(f'This is unexpected: R45/R45stoch - 1 = {1e6 * (R45 / R45stoch - 1):.3f} ppm')
    +1423		if (R46 / R46stoch - 1) > 5e-8:
    +1424			self.vmsg(f'This is unexpected: R46/R46stoch - 1 = {1e6 * (R46 / R46stoch - 1):.3f} ppm')
    +1425
    +1426		# Compute raw clumped isotope anomalies
    +1427		r['D47raw'] = 1000 * (R47 / R47stoch - 1)
    +1428		r['D48raw'] = 1000 * (R48 / R48stoch - 1)
    +1429		r['D49raw'] = 1000 * (R49 / R49stoch - 1)
    +1430
    +1431
    +1432	def compute_isobar_ratios(self, R13, R18, D17O=0, D47=0, D48=0, D49=0):
    +1433		'''
    +1434		Compute isobar ratios for a sample with isotopic ratios `R13` and `R18`,
    +1435		optionally accounting for non-zero values of Δ17O (`D17O`) and clumped isotope
    +1436		anomalies (`D47`, `D48`, `D49`), all expressed in permil.
    +1437		'''
    +1438
    +1439		# Compute R17
    +1440		R17 = self.R17_VSMOW * np.exp(D17O / 1000) * (R18 / self.R18_VSMOW) ** self.LAMBDA_17
    +1441
    +1442		# Compute isotope concentrations
    +1443		C12 = (1 + R13) ** -1
    +1444		C13 = C12 * R13
    +1445		C16 = (1 + R17 + R18) ** -1
    +1446		C17 = C16 * R17
    +1447		C18 = C16 * R18
    +1448
    +1449		# Compute stochastic isotopologue concentrations
    +1450		C626 = C16 * C12 * C16
    +1451		C627 = C16 * C12 * C17 * 2
    +1452		C628 = C16 * C12 * C18 * 2
    +1453		C636 = C16 * C13 * C16
    +1454		C637 = C16 * C13 * C17 * 2
    +1455		C638 = C16 * C13 * C18 * 2
    +1456		C727 = C17 * C12 * C17
    +1457		C728 = C17 * C12 * C18 * 2
    +1458		C737 = C17 * C13 * C17
    +1459		C738 = C17 * C13 * C18 * 2
    +1460		C828 = C18 * C12 * C18
    +1461		C838 = C18 * C13 * C18
    +1462
    +1463		# Compute stochastic isobar ratios
    +1464		R45 = (C636 + C627) / C626
    +1465		R46 = (C628 + C637 + C727) / C626
    +1466		R47 = (C638 + C728 + C737) / C626
    +1467		R48 = (C738 + C828) / C626
    +1468		R49 = C838 / C626
     1469
    -1470	def split_samples(self, samples_to_split = 'all', grouping = 'by_session'):
    -1471		'''
    -1472		Split unknown samples by UID (treat all analyses as different samples)
    -1473		or by session (treat analyses of a given sample in different sessions as
    -1474		different samples).
    -1475
    -1476		**Parameters**
    +1470		# Account for stochastic anomalies
    +1471		R47 *= 1 + D47 / 1000
    +1472		R48 *= 1 + D48 / 1000
    +1473		R49 *= 1 + D49 / 1000
    +1474
    +1475		# Return isobar ratios
    +1476		return R45, R46, R47, R48, R49
     1477
    -1478		+ `samples_to_split`: a list of samples to split, e.g., `['IAEA-C1', 'IAEA-C2']`
    -1479		+ `grouping`: `by_uid` | `by_session`
    -1480		'''
    -1481		if samples_to_split == 'all':
    -1482			samples_to_split = [s for s in self.unknowns]
    -1483		gkeys = {'by_uid':'UID', 'by_session':'Session'}
    -1484		self.grouping = grouping.lower()
    -1485		if self.grouping in gkeys:
    -1486			gkey = gkeys[self.grouping]
    -1487		for r in self:
    -1488			if r['Sample'] in samples_to_split:
    -1489				r['Sample_original'] = r['Sample']
    -1490				r['Sample'] = f"{r['Sample']}__{r[gkey]}"
    -1491			elif r['Sample'] in self.unknowns:
    -1492				r['Sample_original'] = r['Sample']
    -1493		self.refresh_samples()
    -1494
    -1495
    -1496	def unsplit_samples(self, tables = False):
    -1497		'''
    -1498		Reverse the effects of `D47data.split_samples()`.
    -1499		
    -1500		This should only be used after `D4xdata.standardize()` with `method='pooled'`.
    -1501		
    -1502		After `D4xdata.standardize()` with `method='indep_sessions'`, one should
    -1503		probably use `D4xdata.combine_samples()` instead to reverse the effects of
    -1504		`D47data.split_samples()` with `grouping='by_uid'`, or `w_avg()` to reverse the
    -1505		effects of `D47data.split_samples()` with `grouping='by_sessions'` (because in
    -1506		that case session-averaged Δ4x values are statistically independent).
    -1507		'''
    -1508		unknowns_old = sorted({s for s in self.unknowns})
    -1509		CM_old = self.standardization.covar[:,:]
    -1510		VD_old = self.standardization.params.valuesdict().copy()
    -1511		vars_old = self.standardization.var_names
    -1512
    -1513		unknowns_new = sorted({r['Sample_original'] for r in self if 'Sample_original' in r})
    -1514
    -1515		Ns = len(vars_old) - len(unknowns_old)
    -1516		vars_new = vars_old[:Ns] + [f'D{self._4x}_{pf(u)}' for u in unknowns_new]
    -1517		VD_new = {k: VD_old[k] for k in vars_old[:Ns]}
    -1518
    -1519		W = np.zeros((len(vars_new), len(vars_old)))
    -1520		W[:Ns,:Ns] = np.eye(Ns)
    -1521		for u in unknowns_new:
    -1522			splits = sorted({r['Sample'] for r in self if 'Sample_original' in r and r['Sample_original'] == u})
    -1523			if self.grouping == 'by_session':
    -1524				weights = [self.samples[s][f'SE_D{self._4x}']**-2 for s in splits]
    -1525			elif self.grouping == 'by_uid':
    -1526				weights = [1 for s in splits]
    -1527			sw = sum(weights)
    -1528			weights = [w/sw for w in weights]
    -1529			W[vars_new.index(f'D{self._4x}_{pf(u)}'),[vars_old.index(f'D{self._4x}_{pf(s)}') for s in splits]] = weights[:]
    -1530
    -1531		CM_new = W @ CM_old @ W.T
    -1532		V = W @ np.array([[VD_old[k]] for k in vars_old])
    -1533		VD_new = {k:v[0] for k,v in zip(vars_new, V)}
    -1534
    -1535		self.standardization.covar = CM_new
    -1536		self.standardization.params.valuesdict = lambda : VD_new
    -1537		self.standardization.var_names = vars_new
    -1538
    -1539		for r in self:
    -1540			if r['Sample'] in self.unknowns:
    -1541				r['Sample_split'] = r['Sample']
    -1542				r['Sample'] = r['Sample_original']
    +1478
    +1479	def split_samples(self, samples_to_split = 'all', grouping = 'by_session'):
    +1480		'''
    +1481		Split unknown samples by UID (treat all analyses as different samples)
    +1482		or by session (treat analyses of a given sample in different sessions as
    +1483		different samples).
    +1484
    +1485		**Parameters**
    +1486
    +1487		+ `samples_to_split`: a list of samples to split, e.g., `['IAEA-C1', 'IAEA-C2']`
    +1488		+ `grouping`: `by_uid` | `by_session`
    +1489		'''
    +1490		if samples_to_split == 'all':
    +1491			samples_to_split = [s for s in self.unknowns]
    +1492		gkeys = {'by_uid':'UID', 'by_session':'Session'}
    +1493		self.grouping = grouping.lower()
    +1494		if self.grouping in gkeys:
    +1495			gkey = gkeys[self.grouping]
    +1496		for r in self:
    +1497			if r['Sample'] in samples_to_split:
    +1498				r['Sample_original'] = r['Sample']
    +1499				r['Sample'] = f"{r['Sample']}__{r[gkey]}"
    +1500			elif r['Sample'] in self.unknowns:
    +1501				r['Sample_original'] = r['Sample']
    +1502		self.refresh_samples()
    +1503
    +1504
    +1505	def unsplit_samples(self, tables = False):
    +1506		'''
    +1507		Reverse the effects of `D47data.split_samples()`.
    +1508		
    +1509		This should only be used after `D4xdata.standardize()` with `method='pooled'`.
    +1510		
    +1511		After `D4xdata.standardize()` with `method='indep_sessions'`, one should
    +1512		probably use `D4xdata.combine_samples()` instead to reverse the effects of
    +1513		`D47data.split_samples()` with `grouping='by_uid'`, or `w_avg()` to reverse the
    +1514		effects of `D47data.split_samples()` with `grouping='by_sessions'` (because in
    +1515		that case session-averaged Δ4x values are statistically independent).
    +1516		'''
    +1517		unknowns_old = sorted({s for s in self.unknowns})
    +1518		CM_old = self.standardization.covar[:,:]
    +1519		VD_old = self.standardization.params.valuesdict().copy()
    +1520		vars_old = self.standardization.var_names
    +1521
    +1522		unknowns_new = sorted({r['Sample_original'] for r in self if 'Sample_original' in r})
    +1523
    +1524		Ns = len(vars_old) - len(unknowns_old)
    +1525		vars_new = vars_old[:Ns] + [f'D{self._4x}_{pf(u)}' for u in unknowns_new]
    +1526		VD_new = {k: VD_old[k] for k in vars_old[:Ns]}
    +1527
    +1528		W = np.zeros((len(vars_new), len(vars_old)))
    +1529		W[:Ns,:Ns] = np.eye(Ns)
    +1530		for u in unknowns_new:
    +1531			splits = sorted({r['Sample'] for r in self if 'Sample_original' in r and r['Sample_original'] == u})
    +1532			if self.grouping == 'by_session':
    +1533				weights = [self.samples[s][f'SE_D{self._4x}']**-2 for s in splits]
    +1534			elif self.grouping == 'by_uid':
    +1535				weights = [1 for s in splits]
    +1536			sw = sum(weights)
    +1537			weights = [w/sw for w in weights]
    +1538			W[vars_new.index(f'D{self._4x}_{pf(u)}'),[vars_old.index(f'D{self._4x}_{pf(s)}') for s in splits]] = weights[:]
    +1539
    +1540		CM_new = W @ CM_old @ W.T
    +1541		V = W @ np.array([[VD_old[k]] for k in vars_old])
    +1542		VD_new = {k:v[0] for k,v in zip(vars_new, V)}
     1543
    -1544		self.refresh_samples()
    -1545		self.consolidate_samples()
    -1546		self.repeatabilities()
    +1544		self.standardization.covar = CM_new
    +1545		self.standardization.params.valuesdict = lambda : VD_new
    +1546		self.standardization.var_names = vars_new
     1547
    -1548		if tables:
    -1549			self.table_of_analyses()
    -1550			self.table_of_samples()
    -1551
    -1552	def assign_timestamps(self):
    -1553		'''
    -1554		Assign a time field `t` of type `float` to each analysis.
    -1555
    -1556		If `TimeTag` is one of the data fields, `t` is equal within a given session
    -1557		to `TimeTag` minus the mean value of `TimeTag` for that session.
    -1558		Otherwise, `TimeTag` is by default equal to the index of each analysis
    -1559		in the dataset and `t` is defined as above.
    -1560		'''
    -1561		for session in self.sessions:
    -1562			sdata = self.sessions[session]['data']
    -1563			try:
    -1564				t0 = np.mean([r['TimeTag'] for r in sdata])
    -1565				for r in sdata:
    -1566					r['t'] = r['TimeTag'] - t0
    -1567			except KeyError:
    -1568				t0 = (len(sdata)-1)/2
    -1569				for t,r in enumerate(sdata):
    -1570					r['t'] = t - t0
    -1571
    -1572
    -1573	def report(self):
    -1574		'''
    -1575		Prints a report on the standardization fit.
    -1576		Only applicable after `D4xdata.standardize(method='pooled')`.
    -1577		'''
    -1578		report_fit(self.standardization)
    -1579
    +1548		for r in self:
    +1549			if r['Sample'] in self.unknowns:
    +1550				r['Sample_split'] = r['Sample']
    +1551				r['Sample'] = r['Sample_original']
    +1552
    +1553		self.refresh_samples()
    +1554		self.consolidate_samples()
    +1555		self.repeatabilities()
    +1556
    +1557		if tables:
    +1558			self.table_of_analyses()
    +1559			self.table_of_samples()
    +1560
    +1561	def assign_timestamps(self):
    +1562		'''
    +1563		Assign a time field `t` of type `float` to each analysis.
    +1564
    +1565		If `TimeTag` is one of the data fields, `t` is equal within a given session
    +1566		to `TimeTag` minus the mean value of `TimeTag` for that session.
    +1567		Otherwise, `TimeTag` is by default equal to the index of each analysis
    +1568		in the dataset and `t` is defined as above.
    +1569		'''
    +1570		for session in self.sessions:
    +1571			sdata = self.sessions[session]['data']
    +1572			try:
    +1573				t0 = np.mean([r['TimeTag'] for r in sdata])
    +1574				for r in sdata:
    +1575					r['t'] = r['TimeTag'] - t0
    +1576			except KeyError:
    +1577				t0 = (len(sdata)-1)/2
    +1578				for t,r in enumerate(sdata):
    +1579					r['t'] = t - t0
     1580
    -1581	def combine_samples(self, sample_groups):
    -1582		'''
    -1583		Combine analyses of different samples to compute weighted average Δ4x
    -1584		and new error (co)variances corresponding to the groups defined by the `sample_groups`
    -1585		dictionary.
    -1586		
    -1587		Caution: samples are weighted by number of replicate analyses, which is a
    -1588		reasonable default behavior but is not always optimal (e.g., in the case of strongly
    -1589		correlated analytical errors for one or more samples).
    -1590		
    -1591		Returns a tuplet of:
    -1592		
    -1593		+ the list of group names
    -1594		+ an array of the corresponding Δ4x values
    -1595		+ the corresponding (co)variance matrix
    -1596		
    -1597		**Parameters**
    -1598
    -1599		+ `sample_groups`: a dictionary of the form:
    -1600		```py
    -1601		{'group1': ['sample_1', 'sample_2'],
    -1602		 'group2': ['sample_3', 'sample_4', 'sample_5']}
    -1603		```
    -1604		'''
    -1605		
    -1606		samples = [s for k in sorted(sample_groups.keys()) for s in sorted(sample_groups[k])]
    -1607		groups = sorted(sample_groups.keys())
    -1608		group_total_weights = {k: sum([self.samples[s]['N'] for s in sample_groups[k]]) for k in groups}
    -1609		D4x_old = np.array([[self.samples[x][f'D{self._4x}']] for x in samples])
    -1610		CM_old = np.array([[self.sample_D4x_covar(x,y) for x in samples] for y in samples])
    -1611		W = np.array([
    -1612			[self.samples[i]['N']/group_total_weights[j] if i in sample_groups[j] else 0 for i in samples]
    -1613			for j in groups])
    -1614		D4x_new = W @ D4x_old
    -1615		CM_new = W @ CM_old @ W.T
    -1616
    -1617		return groups, D4x_new[:,0], CM_new
    -1618		
    -1619
    -1620	@make_verbal
    -1621	def standardize(self,
    -1622		method = 'pooled',
    -1623		weighted_sessions = [],
    -1624		consolidate = True,
    -1625		consolidate_tables = False,
    -1626		consolidate_plots = False,
    -1627		constraints = {},
    -1628		):
    -1629		'''
    -1630		Compute absolute Δ4x values for all replicate analyses and for sample averages.
    -1631		If `method` argument is set to `'pooled'`, the standardization processes all sessions
    -1632		in a single step, assuming that all samples (anchors and unknowns alike) are homogeneous,
    -1633		i.e. that their true Δ4x value does not change between sessions,
    -1634		([Daëron, 2021](https://doi.org/10.1029/2020GC009592)). If `method` argument is set to
    -1635		`'indep_sessions'`, the standardization processes each session independently, based only
    -1636		on anchors analyses.
    -1637		'''
    -1638
    -1639		self.standardization_method = method
    -1640		self.assign_timestamps()
    -1641
    -1642		if method == 'pooled':
    -1643			if weighted_sessions:
    -1644				for session_group in weighted_sessions:
    -1645					if self._4x == '47':
    -1646						X = D47data([r for r in self if r['Session'] in session_group])
    -1647					elif self._4x == '48':
    -1648						X = D48data([r for r in self if r['Session'] in session_group])
    -1649					X.Nominal_D4x = self.Nominal_D4x.copy()
    -1650					X.refresh()
    -1651					result = X.standardize(method = 'pooled', weighted_sessions = [], consolidate = False)
    -1652					w = np.sqrt(result.redchi)
    -1653					self.msg(f'Session group {session_group} MRSWD = {w:.4f}')
    -1654					for r in X:
    -1655						r[f'wD{self._4x}raw'] *= w
    -1656			else:
    -1657				self.msg(f'All D{self._4x}raw weights set to 1 ‰')
    -1658				for r in self:
    -1659					r[f'wD{self._4x}raw'] = 1.
    -1660
    -1661			params = Parameters()
    -1662			for k,session in enumerate(self.sessions):
    -1663				self.msg(f"Session {session}: scrambling_drift is {self.sessions[session]['scrambling_drift']}.")
    -1664				self.msg(f"Session {session}: slope_drift is {self.sessions[session]['slope_drift']}.")
    -1665				self.msg(f"Session {session}: wg_drift is {self.sessions[session]['wg_drift']}.")
    -1666				s = pf(session)
    -1667				params.add(f'a_{s}', value = 0.9)
    -1668				params.add(f'b_{s}', value = 0.)
    -1669				params.add(f'c_{s}', value = -0.9)
    -1670				params.add(f'a2_{s}', value = 0.,
    -1671# 					vary = self.sessions[session]['scrambling_drift'],
    -1672					)
    -1673				params.add(f'b2_{s}', value = 0.,
    -1674# 					vary = self.sessions[session]['slope_drift'],
    -1675					)
    -1676				params.add(f'c2_{s}', value = 0.,
    -1677# 					vary = self.sessions[session]['wg_drift'],
    -1678					)
    -1679				if not self.sessions[session]['scrambling_drift']:
    -1680					params[f'a2_{s}'].expr = '0'
    -1681				if not self.sessions[session]['slope_drift']:
    -1682					params[f'b2_{s}'].expr = '0'
    -1683				if not self.sessions[session]['wg_drift']:
    -1684					params[f'c2_{s}'].expr = '0'
    -1685
    -1686			for sample in self.unknowns:
    -1687				params.add(f'D{self._4x}_{pf(sample)}', value = 0.5)
    -1688
    -1689			for k in constraints:
    -1690				params[k].expr = constraints[k]
    -1691
    -1692			def residuals(p):
    -1693				R = []
    -1694				for r in self:
    -1695					session = pf(r['Session'])
    -1696					sample = pf(r['Sample'])
    -1697					if r['Sample'] in self.Nominal_D4x:
    -1698						R += [ (
    -1699							r[f'D{self._4x}raw'] - (
    -1700								p[f'a_{session}'] * self.Nominal_D4x[r['Sample']]
    -1701								+ p[f'b_{session}'] * r[f'd{self._4x}']
    -1702								+	p[f'c_{session}']
    -1703								+ r['t'] * (
    -1704									p[f'a2_{session}'] * self.Nominal_D4x[r['Sample']]
    -1705									+ p[f'b2_{session}'] * r[f'd{self._4x}']
    -1706									+	p[f'c2_{session}']
    -1707									)
    -1708								)
    -1709							) / r[f'wD{self._4x}raw'] ]
    -1710					else:
    -1711						R += [ (
    -1712							r[f'D{self._4x}raw'] - (
    -1713								p[f'a_{session}'] * p[f'D{self._4x}_{sample}']
    -1714								+ p[f'b_{session}'] * r[f'd{self._4x}']
    -1715								+	p[f'c_{session}']
    -1716								+ r['t'] * (
    -1717									p[f'a2_{session}'] * p[f'D{self._4x}_{sample}']
    -1718									+ p[f'b2_{session}'] * r[f'd{self._4x}']
    -1719									+	p[f'c2_{session}']
    -1720									)
    -1721								)
    -1722							) / r[f'wD{self._4x}raw'] ]
    -1723				return R
    -1724
    -1725			M = Minimizer(residuals, params)
    -1726			result = M.least_squares()
    -1727			self.Nf = result.nfree
    -1728			self.t95 = tstudent.ppf(1 - 0.05/2, self.Nf)
    -1729			new_names, new_covar, new_se = _fullcovar(result)[:3]
    -1730			result.var_names = new_names
    -1731			result.covar = new_covar
    -1732
    -1733			for r in self:
    -1734				s = pf(r["Session"])
    -1735				a = result.params.valuesdict()[f'a_{s}']
    -1736				b = result.params.valuesdict()[f'b_{s}']
    -1737				c = result.params.valuesdict()[f'c_{s}']
    -1738				a2 = result.params.valuesdict()[f'a2_{s}']
    -1739				b2 = result.params.valuesdict()[f'b2_{s}']
    -1740				c2 = result.params.valuesdict()[f'c2_{s}']
    -1741				r[f'D{self._4x}'] = (r[f'D{self._4x}raw'] - c - b * r[f'd{self._4x}'] - c2 * r['t'] - b2 * r['t'] * r[f'd{self._4x}']) / (a + a2 * r['t'])
    -1742
    -1743			self.standardization = result
    -1744
    -1745			for session in self.sessions:
    -1746				self.sessions[session]['Np'] = 3
    -1747				for k in ['scrambling', 'slope', 'wg']:
    -1748					if self.sessions[session][f'{k}_drift']:
    -1749						self.sessions[session]['Np'] += 1
    -1750
    -1751			if consolidate:
    -1752				self.consolidate(tables = consolidate_tables, plots = consolidate_plots)
    -1753			return result
    -1754
    -1755
    -1756		elif method == 'indep_sessions':
    -1757
    -1758			if weighted_sessions:
    -1759				for session_group in weighted_sessions:
    -1760					X = D4xdata([r for r in self if r['Session'] in session_group], mass = self._4x)
    -1761					X.Nominal_D4x = self.Nominal_D4x.copy()
    -1762					X.refresh()
    -1763					# This is only done to assign r['wD47raw'] for r in X:
    -1764					X.standardize(method = method, weighted_sessions = [], consolidate = False)
    -1765					self.msg(f'D{self._4x}raw weights set to {1000*X[0][f"wD{self._4x}raw"]:.1f} ppm for sessions in {session_group}')
    -1766			else:
    -1767				self.msg('All weights set to 1 ‰')
    -1768				for r in self:
    -1769					r[f'wD{self._4x}raw'] = 1
    -1770
    -1771			for session in self.sessions:
    -1772				s = self.sessions[session]
    -1773				p_names = ['a', 'b', 'c', 'a2', 'b2', 'c2']
    -1774				p_active = [True, True, True, s['scrambling_drift'], s['slope_drift'], s['wg_drift']]
    -1775				s['Np'] = sum(p_active)
    -1776				sdata = s['data']
    -1777
    -1778				A = np.array([
    -1779					[
    -1780						self.Nominal_D4x[r['Sample']] / r[f'wD{self._4x}raw'],
    -1781						r[f'd{self._4x}'] / r[f'wD{self._4x}raw'],
    -1782						1 / r[f'wD{self._4x}raw'],
    -1783						self.Nominal_D4x[r['Sample']] * r['t'] / r[f'wD{self._4x}raw'],
    -1784						r[f'd{self._4x}'] * r['t'] / r[f'wD{self._4x}raw'],
    -1785						r['t'] / r[f'wD{self._4x}raw']
    -1786						]
    -1787					for r in sdata if r['Sample'] in self.anchors
    -1788					])[:,p_active] # only keep columns for the active parameters
    -1789				Y = np.array([[r[f'D{self._4x}raw'] / r[f'wD{self._4x}raw']] for r in sdata if r['Sample'] in self.anchors])
    -1790				s['Na'] = Y.size
    -1791				CM = linalg.inv(A.T @ A)
    -1792				bf = (CM @ A.T @ Y).T[0,:]
    -1793				k = 0
    -1794				for n,a in zip(p_names, p_active):
    -1795					if a:
    -1796						s[n] = bf[k]
    -1797# 						self.msg(f'{n} = {bf[k]}')
    -1798						k += 1
    -1799					else:
    -1800						s[n] = 0.
    -1801# 						self.msg(f'{n} = 0.0')
    -1802
    -1803				for r in sdata :
    -1804					a, b, c, a2, b2, c2 = s['a'], s['b'], s['c'], s['a2'], s['b2'], s['c2']
    -1805					r[f'D{self._4x}'] = (r[f'D{self._4x}raw'] - c - b * r[f'd{self._4x}'] - c2 * r['t'] - b2 * r['t'] * r[f'd{self._4x}']) / (a + a2 * r['t'])
    -1806					r[f'wD{self._4x}'] = r[f'wD{self._4x}raw'] / (a + a2 * r['t'])
    -1807
    -1808				s['CM'] = np.zeros((6,6))
    -1809				i = 0
    -1810				k_active = [j for j,a in enumerate(p_active) if a]
    -1811				for j,a in enumerate(p_active):
    -1812					if a:
    -1813						s['CM'][j,k_active] = CM[i,:]
    -1814						i += 1
    -1815
    -1816			if not weighted_sessions:
    -1817				w = self.rmswd()['rmswd']
    -1818				for r in self:
    -1819						r[f'wD{self._4x}'] *= w
    -1820						r[f'wD{self._4x}raw'] *= w
    -1821				for session in self.sessions:
    -1822					self.sessions[session]['CM'] *= w**2
    -1823
    -1824			for session in self.sessions:
    -1825				s = self.sessions[session]
    -1826				s['SE_a'] = s['CM'][0,0]**.5
    -1827				s['SE_b'] = s['CM'][1,1]**.5
    -1828				s['SE_c'] = s['CM'][2,2]**.5
    -1829				s['SE_a2'] = s['CM'][3,3]**.5
    -1830				s['SE_b2'] = s['CM'][4,4]**.5
    -1831				s['SE_c2'] = s['CM'][5,5]**.5
    +1581
    +1582	def report(self):
    +1583		'''
    +1584		Prints a report on the standardization fit.
    +1585		Only applicable after `D4xdata.standardize(method='pooled')`.
    +1586		'''
    +1587		report_fit(self.standardization)
    +1588
    +1589
    +1590	def combine_samples(self, sample_groups):
    +1591		'''
    +1592		Combine analyses of different samples to compute weighted average Δ4x
    +1593		and new error (co)variances corresponding to the groups defined by the `sample_groups`
    +1594		dictionary.
    +1595		
    +1596		Caution: samples are weighted by number of replicate analyses, which is a
    +1597		reasonable default behavior but is not always optimal (e.g., in the case of strongly
    +1598		correlated analytical errors for one or more samples).
    +1599		
    +1600		Returns a tuplet of:
    +1601		
    +1602		+ the list of group names
    +1603		+ an array of the corresponding Δ4x values
    +1604		+ the corresponding (co)variance matrix
    +1605		
    +1606		**Parameters**
    +1607
    +1608		+ `sample_groups`: a dictionary of the form:
    +1609		```py
    +1610		{'group1': ['sample_1', 'sample_2'],
    +1611		 'group2': ['sample_3', 'sample_4', 'sample_5']}
    +1612		```
    +1613		'''
    +1614		
    +1615		samples = [s for k in sorted(sample_groups.keys()) for s in sorted(sample_groups[k])]
    +1616		groups = sorted(sample_groups.keys())
    +1617		group_total_weights = {k: sum([self.samples[s]['N'] for s in sample_groups[k]]) for k in groups}
    +1618		D4x_old = np.array([[self.samples[x][f'D{self._4x}']] for x in samples])
    +1619		CM_old = np.array([[self.sample_D4x_covar(x,y) for x in samples] for y in samples])
    +1620		W = np.array([
    +1621			[self.samples[i]['N']/group_total_weights[j] if i in sample_groups[j] else 0 for i in samples]
    +1622			for j in groups])
    +1623		D4x_new = W @ D4x_old
    +1624		CM_new = W @ CM_old @ W.T
    +1625
    +1626		return groups, D4x_new[:,0], CM_new
    +1627		
    +1628
    +1629	@make_verbal
    +1630	def standardize(self,
    +1631		method = 'pooled',
    +1632		weighted_sessions = [],
    +1633		consolidate = True,
    +1634		consolidate_tables = False,
    +1635		consolidate_plots = False,
    +1636		constraints = {},
    +1637		):
    +1638		'''
    +1639		Compute absolute Δ4x values for all replicate analyses and for sample averages.
    +1640		If `method` argument is set to `'pooled'`, the standardization processes all sessions
    +1641		in a single step, assuming that all samples (anchors and unknowns alike) are homogeneous,
    +1642		i.e. that their true Δ4x value does not change between sessions,
    +1643		([Daëron, 2021](https://doi.org/10.1029/2020GC009592)). If `method` argument is set to
    +1644		`'indep_sessions'`, the standardization processes each session independently, based only
    +1645		on anchors analyses.
    +1646		'''
    +1647
    +1648		self.standardization_method = method
    +1649		self.assign_timestamps()
    +1650
    +1651		if method == 'pooled':
    +1652			if weighted_sessions:
    +1653				for session_group in weighted_sessions:
    +1654					if self._4x == '47':
    +1655						X = D47data([r for r in self if r['Session'] in session_group])
    +1656					elif self._4x == '48':
    +1657						X = D48data([r for r in self if r['Session'] in session_group])
    +1658					X.Nominal_D4x = self.Nominal_D4x.copy()
    +1659					X.refresh()
    +1660					result = X.standardize(method = 'pooled', weighted_sessions = [], consolidate = False)
    +1661					w = np.sqrt(result.redchi)
    +1662					self.msg(f'Session group {session_group} MRSWD = {w:.4f}')
    +1663					for r in X:
    +1664						r[f'wD{self._4x}raw'] *= w
    +1665			else:
    +1666				self.msg(f'All D{self._4x}raw weights set to 1 ‰')
    +1667				for r in self:
    +1668					r[f'wD{self._4x}raw'] = 1.
    +1669
    +1670			params = Parameters()
    +1671			for k,session in enumerate(self.sessions):
    +1672				self.msg(f"Session {session}: scrambling_drift is {self.sessions[session]['scrambling_drift']}.")
    +1673				self.msg(f"Session {session}: slope_drift is {self.sessions[session]['slope_drift']}.")
    +1674				self.msg(f"Session {session}: wg_drift is {self.sessions[session]['wg_drift']}.")
    +1675				s = pf(session)
    +1676				params.add(f'a_{s}', value = 0.9)
    +1677				params.add(f'b_{s}', value = 0.)
    +1678				params.add(f'c_{s}', value = -0.9)
    +1679				params.add(f'a2_{s}', value = 0.,
    +1680# 					vary = self.sessions[session]['scrambling_drift'],
    +1681					)
    +1682				params.add(f'b2_{s}', value = 0.,
    +1683# 					vary = self.sessions[session]['slope_drift'],
    +1684					)
    +1685				params.add(f'c2_{s}', value = 0.,
    +1686# 					vary = self.sessions[session]['wg_drift'],
    +1687					)
    +1688				if not self.sessions[session]['scrambling_drift']:
    +1689					params[f'a2_{s}'].expr = '0'
    +1690				if not self.sessions[session]['slope_drift']:
    +1691					params[f'b2_{s}'].expr = '0'
    +1692				if not self.sessions[session]['wg_drift']:
    +1693					params[f'c2_{s}'].expr = '0'
    +1694
    +1695			for sample in self.unknowns:
    +1696				params.add(f'D{self._4x}_{pf(sample)}', value = 0.5)
    +1697
    +1698			for k in constraints:
    +1699				params[k].expr = constraints[k]
    +1700
    +1701			def residuals(p):
    +1702				R = []
    +1703				for r in self:
    +1704					session = pf(r['Session'])
    +1705					sample = pf(r['Sample'])
    +1706					if r['Sample'] in self.Nominal_D4x:
    +1707						R += [ (
    +1708							r[f'D{self._4x}raw'] - (
    +1709								p[f'a_{session}'] * self.Nominal_D4x[r['Sample']]
    +1710								+ p[f'b_{session}'] * r[f'd{self._4x}']
    +1711								+	p[f'c_{session}']
    +1712								+ r['t'] * (
    +1713									p[f'a2_{session}'] * self.Nominal_D4x[r['Sample']]
    +1714									+ p[f'b2_{session}'] * r[f'd{self._4x}']
    +1715									+	p[f'c2_{session}']
    +1716									)
    +1717								)
    +1718							) / r[f'wD{self._4x}raw'] ]
    +1719					else:
    +1720						R += [ (
    +1721							r[f'D{self._4x}raw'] - (
    +1722								p[f'a_{session}'] * p[f'D{self._4x}_{sample}']
    +1723								+ p[f'b_{session}'] * r[f'd{self._4x}']
    +1724								+	p[f'c_{session}']
    +1725								+ r['t'] * (
    +1726									p[f'a2_{session}'] * p[f'D{self._4x}_{sample}']
    +1727									+ p[f'b2_{session}'] * r[f'd{self._4x}']
    +1728									+	p[f'c2_{session}']
    +1729									)
    +1730								)
    +1731							) / r[f'wD{self._4x}raw'] ]
    +1732				return R
    +1733
    +1734			M = Minimizer(residuals, params)
    +1735			result = M.least_squares()
    +1736			self.Nf = result.nfree
    +1737			self.t95 = tstudent.ppf(1 - 0.05/2, self.Nf)
    +1738			new_names, new_covar, new_se = _fullcovar(result)[:3]
    +1739			result.var_names = new_names
    +1740			result.covar = new_covar
    +1741
    +1742			for r in self:
    +1743				s = pf(r["Session"])
    +1744				a = result.params.valuesdict()[f'a_{s}']
    +1745				b = result.params.valuesdict()[f'b_{s}']
    +1746				c = result.params.valuesdict()[f'c_{s}']
    +1747				a2 = result.params.valuesdict()[f'a2_{s}']
    +1748				b2 = result.params.valuesdict()[f'b2_{s}']
    +1749				c2 = result.params.valuesdict()[f'c2_{s}']
    +1750				r[f'D{self._4x}'] = (r[f'D{self._4x}raw'] - c - b * r[f'd{self._4x}'] - c2 * r['t'] - b2 * r['t'] * r[f'd{self._4x}']) / (a + a2 * r['t'])
    +1751
    +1752			self.standardization = result
    +1753
    +1754			for session in self.sessions:
    +1755				self.sessions[session]['Np'] = 3
    +1756				for k in ['scrambling', 'slope', 'wg']:
    +1757					if self.sessions[session][f'{k}_drift']:
    +1758						self.sessions[session]['Np'] += 1
    +1759
    +1760			if consolidate:
    +1761				self.consolidate(tables = consolidate_tables, plots = consolidate_plots)
    +1762			return result
    +1763
    +1764
    +1765		elif method == 'indep_sessions':
    +1766
    +1767			if weighted_sessions:
    +1768				for session_group in weighted_sessions:
    +1769					X = D4xdata([r for r in self if r['Session'] in session_group], mass = self._4x)
    +1770					X.Nominal_D4x = self.Nominal_D4x.copy()
    +1771					X.refresh()
    +1772					# This is only done to assign r['wD47raw'] for r in X:
    +1773					X.standardize(method = method, weighted_sessions = [], consolidate = False)
    +1774					self.msg(f'D{self._4x}raw weights set to {1000*X[0][f"wD{self._4x}raw"]:.1f} ppm for sessions in {session_group}')
    +1775			else:
    +1776				self.msg('All weights set to 1 ‰')
    +1777				for r in self:
    +1778					r[f'wD{self._4x}raw'] = 1
    +1779
    +1780			for session in self.sessions:
    +1781				s = self.sessions[session]
    +1782				p_names = ['a', 'b', 'c', 'a2', 'b2', 'c2']
    +1783				p_active = [True, True, True, s['scrambling_drift'], s['slope_drift'], s['wg_drift']]
    +1784				s['Np'] = sum(p_active)
    +1785				sdata = s['data']
    +1786
    +1787				A = np.array([
    +1788					[
    +1789						self.Nominal_D4x[r['Sample']] / r[f'wD{self._4x}raw'],
    +1790						r[f'd{self._4x}'] / r[f'wD{self._4x}raw'],
    +1791						1 / r[f'wD{self._4x}raw'],
    +1792						self.Nominal_D4x[r['Sample']] * r['t'] / r[f'wD{self._4x}raw'],
    +1793						r[f'd{self._4x}'] * r['t'] / r[f'wD{self._4x}raw'],
    +1794						r['t'] / r[f'wD{self._4x}raw']
    +1795						]
    +1796					for r in sdata if r['Sample'] in self.anchors
    +1797					])[:,p_active] # only keep columns for the active parameters
    +1798				Y = np.array([[r[f'D{self._4x}raw'] / r[f'wD{self._4x}raw']] for r in sdata if r['Sample'] in self.anchors])
    +1799				s['Na'] = Y.size
    +1800				CM = linalg.inv(A.T @ A)
    +1801				bf = (CM @ A.T @ Y).T[0,:]
    +1802				k = 0
    +1803				for n,a in zip(p_names, p_active):
    +1804					if a:
    +1805						s[n] = bf[k]
    +1806# 						self.msg(f'{n} = {bf[k]}')
    +1807						k += 1
    +1808					else:
    +1809						s[n] = 0.
    +1810# 						self.msg(f'{n} = 0.0')
    +1811
    +1812				for r in sdata :
    +1813					a, b, c, a2, b2, c2 = s['a'], s['b'], s['c'], s['a2'], s['b2'], s['c2']
    +1814					r[f'D{self._4x}'] = (r[f'D{self._4x}raw'] - c - b * r[f'd{self._4x}'] - c2 * r['t'] - b2 * r['t'] * r[f'd{self._4x}']) / (a + a2 * r['t'])
    +1815					r[f'wD{self._4x}'] = r[f'wD{self._4x}raw'] / (a + a2 * r['t'])
    +1816
    +1817				s['CM'] = np.zeros((6,6))
    +1818				i = 0
    +1819				k_active = [j for j,a in enumerate(p_active) if a]
    +1820				for j,a in enumerate(p_active):
    +1821					if a:
    +1822						s['CM'][j,k_active] = CM[i,:]
    +1823						i += 1
    +1824
    +1825			if not weighted_sessions:
    +1826				w = self.rmswd()['rmswd']
    +1827				for r in self:
    +1828						r[f'wD{self._4x}'] *= w
    +1829						r[f'wD{self._4x}raw'] *= w
    +1830				for session in self.sessions:
    +1831					self.sessions[session]['CM'] *= w**2
     1832
    -1833			if not weighted_sessions:
    -1834				self.Nf = len(self) - len(self.unknowns) - np.sum([self.sessions[s]['Np'] for s in self.sessions])
    -1835			else:
    -1836				self.Nf = 0
    -1837				for sg in weighted_sessions:
    -1838					self.Nf += self.rmswd(sessions = sg)['Nf']
    -1839
    -1840			self.t95 = tstudent.ppf(1 - 0.05/2, self.Nf)
    +1833			for session in self.sessions:
    +1834				s = self.sessions[session]
    +1835				s['SE_a'] = s['CM'][0,0]**.5
    +1836				s['SE_b'] = s['CM'][1,1]**.5
    +1837				s['SE_c'] = s['CM'][2,2]**.5
    +1838				s['SE_a2'] = s['CM'][3,3]**.5
    +1839				s['SE_b2'] = s['CM'][4,4]**.5
    +1840				s['SE_c2'] = s['CM'][5,5]**.5
     1841
    -1842			avgD4x = {
    -1843				sample: np.mean([r[f'D{self._4x}'] for r in self if r['Sample'] == sample])
    -1844				for sample in self.samples
    -1845				}
    -1846			chi2 = np.sum([(r[f'D{self._4x}'] - avgD4x[r['Sample']])**2 for r in self])
    -1847			rD4x = (chi2/self.Nf)**.5
    -1848			self.repeatability[f'sigma_{self._4x}'] = rD4x
    -1849
    -1850			if consolidate:
    -1851				self.consolidate(tables = consolidate_tables, plots = consolidate_plots)
    -1852
    -1853
    -1854	def standardization_error(self, session, d4x, D4x, t = 0):
    -1855		'''
    -1856		Compute standardization error for a given session and
    -1857		(δ47, Δ47) composition.
    -1858		'''
    -1859		a = self.sessions[session]['a']
    -1860		b = self.sessions[session]['b']
    -1861		c = self.sessions[session]['c']
    -1862		a2 = self.sessions[session]['a2']
    -1863		b2 = self.sessions[session]['b2']
    -1864		c2 = self.sessions[session]['c2']
    -1865		CM = self.sessions[session]['CM']
    -1866
    -1867		x, y = D4x, d4x
    -1868		z = a * x + b * y + c + a2 * x * t + b2 * y * t + c2 * t
    -1869# 		x = (z - b*y - b2*y*t - c - c2*t) / (a+a2*t)
    -1870		dxdy = -(b+b2*t) / (a+a2*t)
    -1871		dxdz = 1. / (a+a2*t)
    -1872		dxda = -x / (a+a2*t)
    -1873		dxdb = -y / (a+a2*t)
    -1874		dxdc = -1. / (a+a2*t)
    -1875		dxda2 = -x * a2 / (a+a2*t)
    -1876		dxdb2 = -y * t / (a+a2*t)
    -1877		dxdc2 = -t / (a+a2*t)
    -1878		V = np.array([dxda, dxdb, dxdc, dxda2, dxdb2, dxdc2])
    -1879		sx = (V @ CM @ V.T) ** .5
    -1880		return sx
    -1881
    -1882
    -1883	@make_verbal
    -1884	def summary(self,
    -1885		dir = 'output',
    -1886		filename = None,
    -1887		save_to_file = True,
    -1888		print_out = True,
    -1889		):
    -1890		'''
    -1891		Print out an/or save to disk a summary of the standardization results.
    -1892
    -1893		**Parameters**
    -1894
    -1895		+ `dir`: the directory in which to save the table
    -1896		+ `filename`: the name to the csv file to write to
    -1897		+ `save_to_file`: whether to save the table to disk
    -1898		+ `print_out`: whether to print out the table
    -1899		'''
    -1900
    -1901		out = []
    -1902		out += [['N samples (anchors + unknowns)', f"{len(self.samples)} ({len(self.anchors)} + {len(self.unknowns)})"]]
    -1903		out += [['N analyses (anchors + unknowns)', f"{len(self)} ({len([r for r in self if r['Sample'] in self.anchors])} + {len([r for r in self if r['Sample'] in self.unknowns])})"]]
    -1904		out += [['Repeatability of δ13C_VPDB', f"{1000 * self.repeatability['r_d13C_VPDB']:.1f} ppm"]]
    -1905		out += [['Repeatability of δ18O_VSMOW', f"{1000 * self.repeatability['r_d18O_VSMOW']:.1f} ppm"]]
    -1906		out += [[f'Repeatability of Δ{self._4x} (anchors)', f"{1000 * self.repeatability[f'r_D{self._4x}a']:.1f} ppm"]]
    -1907		out += [[f'Repeatability of Δ{self._4x} (unknowns)', f"{1000 * self.repeatability[f'r_D{self._4x}u']:.1f} ppm"]]
    -1908		out += [[f'Repeatability of Δ{self._4x} (all)', f"{1000 * self.repeatability[f'r_D{self._4x}']:.1f} ppm"]]
    -1909		out += [['Model degrees of freedom', f"{self.Nf}"]]
    -1910		out += [['Student\'s 95% t-factor', f"{self.t95:.2f}"]]
    -1911		out += [['Standardization method', self.standardization_method]]
    -1912
    -1913		if save_to_file:
    -1914			if not os.path.exists(dir):
    -1915				os.makedirs(dir)
    -1916			if filename is None:
    -1917				filename = f'D{self._4x}_summary.csv'
    -1918			with open(f'{dir}/{filename}', 'w') as fid:
    -1919				fid.write(make_csv(out))
    -1920		if print_out:
    -1921			self.msg('\n' + pretty_table(out, header = 0))
    -1922
    -1923
    -1924	@make_verbal
    -1925	def table_of_sessions(self,
    -1926		dir = 'output',
    -1927		filename = None,
    -1928		save_to_file = True,
    -1929		print_out = True,
    -1930		output = None,
    -1931		):
    -1932		'''
    -1933		Print out an/or save to disk a table of sessions.
    -1934
    -1935		**Parameters**
    -1936
    -1937		+ `dir`: the directory in which to save the table
    -1938		+ `filename`: the name to the csv file to write to
    -1939		+ `save_to_file`: whether to save the table to disk
    -1940		+ `print_out`: whether to print out the table
    -1941		+ `output`: if set to `'pretty'`: return a pretty text table (see `pretty_table()`);
    -1942		    if set to `'raw'`: return a list of list of strings
    -1943		    (e.g., `[['header1', 'header2'], ['0.1', '0.2']]`)
    -1944		'''
    -1945		include_a2 = any([self.sessions[session]['scrambling_drift'] for session in self.sessions])
    -1946		include_b2 = any([self.sessions[session]['slope_drift'] for session in self.sessions])
    -1947		include_c2 = any([self.sessions[session]['wg_drift'] for session in self.sessions])
    -1948
    -1949		out = [['Session','Na','Nu','d13Cwg_VPDB','d18Owg_VSMOW','r_d13C','r_d18O',f'r_D{self._4x}','a ± SE','1e3 x b ± SE','c ± SE']]
    -1950		if include_a2:
    -1951			out[-1] += ['a2 ± SE']
    -1952		if include_b2:
    -1953			out[-1] += ['b2 ± SE']
    -1954		if include_c2:
    -1955			out[-1] += ['c2 ± SE']
    -1956		for session in self.sessions:
    -1957			out += [[
    -1958				session,
    -1959				f"{self.sessions[session]['Na']}",
    -1960				f"{self.sessions[session]['Nu']}",
    -1961				f"{self.sessions[session]['d13Cwg_VPDB']:.3f}",
    -1962				f"{self.sessions[session]['d18Owg_VSMOW']:.3f}",
    -1963				f"{self.sessions[session]['r_d13C_VPDB']:.4f}",
    -1964				f"{self.sessions[session]['r_d18O_VSMOW']:.4f}",
    -1965				f"{self.sessions[session][f'r_D{self._4x}']:.4f}",
    -1966				f"{self.sessions[session]['a']:.3f} ± {self.sessions[session]['SE_a']:.3f}",
    -1967				f"{1e3*self.sessions[session]['b']:.3f} ± {1e3*self.sessions[session]['SE_b']:.3f}",
    -1968				f"{self.sessions[session]['c']:.3f} ± {self.sessions[session]['SE_c']:.3f}",
    -1969				]]
    -1970			if include_a2:
    -1971				if self.sessions[session]['scrambling_drift']:
    -1972					out[-1] += [f"{self.sessions[session]['a2']:.1e} ± {self.sessions[session]['SE_a2']:.1e}"]
    -1973				else:
    -1974					out[-1] += ['']
    -1975			if include_b2:
    -1976				if self.sessions[session]['slope_drift']:
    -1977					out[-1] += [f"{self.sessions[session]['b2']:.1e} ± {self.sessions[session]['SE_b2']:.1e}"]
    -1978				else:
    -1979					out[-1] += ['']
    -1980			if include_c2:
    -1981				if self.sessions[session]['wg_drift']:
    -1982					out[-1] += [f"{self.sessions[session]['c2']:.1e} ± {self.sessions[session]['SE_c2']:.1e}"]
    -1983				else:
    -1984					out[-1] += ['']
    -1985
    -1986		if save_to_file:
    -1987			if not os.path.exists(dir):
    -1988				os.makedirs(dir)
    -1989			if filename is None:
    -1990				filename = f'D{self._4x}_sessions.csv'
    -1991			with open(f'{dir}/{filename}', 'w') as fid:
    -1992				fid.write(make_csv(out))
    -1993		if print_out:
    -1994			self.msg('\n' + pretty_table(out))
    -1995		if output == 'raw':
    -1996			return out
    -1997		elif output == 'pretty':
    -1998			return pretty_table(out)
    -1999
    -2000
    -2001	@make_verbal
    -2002	def table_of_analyses(
    -2003		self,
    -2004		dir = 'output',
    -2005		filename = None,
    -2006		save_to_file = True,
    -2007		print_out = True,
    -2008		output = None,
    -2009		):
    -2010		'''
    -2011		Print out an/or save to disk a table of analyses.
    -2012
    -2013		**Parameters**
    -2014
    -2015		+ `dir`: the directory in which to save the table
    -2016		+ `filename`: the name to the csv file to write to
    -2017		+ `save_to_file`: whether to save the table to disk
    -2018		+ `print_out`: whether to print out the table
    -2019		+ `output`: if set to `'pretty'`: return a pretty text table (see `pretty_table()`);
    -2020		    if set to `'raw'`: return a list of list of strings
    -2021		    (e.g., `[['header1', 'header2'], ['0.1', '0.2']]`)
    -2022		'''
    +1842			if not weighted_sessions:
    +1843				self.Nf = len(self) - len(self.unknowns) - np.sum([self.sessions[s]['Np'] for s in self.sessions])
    +1844			else:
    +1845				self.Nf = 0
    +1846				for sg in weighted_sessions:
    +1847					self.Nf += self.rmswd(sessions = sg)['Nf']
    +1848
    +1849			self.t95 = tstudent.ppf(1 - 0.05/2, self.Nf)
    +1850
    +1851			avgD4x = {
    +1852				sample: np.mean([r[f'D{self._4x}'] for r in self if r['Sample'] == sample])
    +1853				for sample in self.samples
    +1854				}
    +1855			chi2 = np.sum([(r[f'D{self._4x}'] - avgD4x[r['Sample']])**2 for r in self])
    +1856			rD4x = (chi2/self.Nf)**.5
    +1857			self.repeatability[f'sigma_{self._4x}'] = rD4x
    +1858
    +1859			if consolidate:
    +1860				self.consolidate(tables = consolidate_tables, plots = consolidate_plots)
    +1861
    +1862
    +1863	def standardization_error(self, session, d4x, D4x, t = 0):
    +1864		'''
    +1865		Compute standardization error for a given session and
    +1866		(δ47, Δ47) composition.
    +1867		'''
    +1868		a = self.sessions[session]['a']
    +1869		b = self.sessions[session]['b']
    +1870		c = self.sessions[session]['c']
    +1871		a2 = self.sessions[session]['a2']
    +1872		b2 = self.sessions[session]['b2']
    +1873		c2 = self.sessions[session]['c2']
    +1874		CM = self.sessions[session]['CM']
    +1875
    +1876		x, y = D4x, d4x
    +1877		z = a * x + b * y + c + a2 * x * t + b2 * y * t + c2 * t
    +1878# 		x = (z - b*y - b2*y*t - c - c2*t) / (a+a2*t)
    +1879		dxdy = -(b+b2*t) / (a+a2*t)
    +1880		dxdz = 1. / (a+a2*t)
    +1881		dxda = -x / (a+a2*t)
    +1882		dxdb = -y / (a+a2*t)
    +1883		dxdc = -1. / (a+a2*t)
    +1884		dxda2 = -x * a2 / (a+a2*t)
    +1885		dxdb2 = -y * t / (a+a2*t)
    +1886		dxdc2 = -t / (a+a2*t)
    +1887		V = np.array([dxda, dxdb, dxdc, dxda2, dxdb2, dxdc2])
    +1888		sx = (V @ CM @ V.T) ** .5
    +1889		return sx
    +1890
    +1891
    +1892	@make_verbal
    +1893	def summary(self,
    +1894		dir = 'output',
    +1895		filename = None,
    +1896		save_to_file = True,
    +1897		print_out = True,
    +1898		):
    +1899		'''
    +1900		Print out an/or save to disk a summary of the standardization results.
    +1901
    +1902		**Parameters**
    +1903
    +1904		+ `dir`: the directory in which to save the table
    +1905		+ `filename`: the name to the csv file to write to
    +1906		+ `save_to_file`: whether to save the table to disk
    +1907		+ `print_out`: whether to print out the table
    +1908		'''
    +1909
    +1910		out = []
    +1911		out += [['N samples (anchors + unknowns)', f"{len(self.samples)} ({len(self.anchors)} + {len(self.unknowns)})"]]
    +1912		out += [['N analyses (anchors + unknowns)', f"{len(self)} ({len([r for r in self if r['Sample'] in self.anchors])} + {len([r for r in self if r['Sample'] in self.unknowns])})"]]
    +1913		out += [['Repeatability of δ13C_VPDB', f"{1000 * self.repeatability['r_d13C_VPDB']:.1f} ppm"]]
    +1914		out += [['Repeatability of δ18O_VSMOW', f"{1000 * self.repeatability['r_d18O_VSMOW']:.1f} ppm"]]
    +1915		out += [[f'Repeatability of Δ{self._4x} (anchors)', f"{1000 * self.repeatability[f'r_D{self._4x}a']:.1f} ppm"]]
    +1916		out += [[f'Repeatability of Δ{self._4x} (unknowns)', f"{1000 * self.repeatability[f'r_D{self._4x}u']:.1f} ppm"]]
    +1917		out += [[f'Repeatability of Δ{self._4x} (all)', f"{1000 * self.repeatability[f'r_D{self._4x}']:.1f} ppm"]]
    +1918		out += [['Model degrees of freedom', f"{self.Nf}"]]
    +1919		out += [['Student\'s 95% t-factor', f"{self.t95:.2f}"]]
    +1920		out += [['Standardization method', self.standardization_method]]
    +1921
    +1922		if save_to_file:
    +1923			if not os.path.exists(dir):
    +1924				os.makedirs(dir)
    +1925			if filename is None:
    +1926				filename = f'D{self._4x}_summary.csv'
    +1927			with open(f'{dir}/{filename}', 'w') as fid:
    +1928				fid.write(make_csv(out))
    +1929		if print_out:
    +1930			self.msg('\n' + pretty_table(out, header = 0))
    +1931
    +1932
    +1933	@make_verbal
    +1934	def table_of_sessions(self,
    +1935		dir = 'output',
    +1936		filename = None,
    +1937		save_to_file = True,
    +1938		print_out = True,
    +1939		output = None,
    +1940		):
    +1941		'''
    +1942		Print out an/or save to disk a table of sessions.
    +1943
    +1944		**Parameters**
    +1945
    +1946		+ `dir`: the directory in which to save the table
    +1947		+ `filename`: the name to the csv file to write to
    +1948		+ `save_to_file`: whether to save the table to disk
    +1949		+ `print_out`: whether to print out the table
    +1950		+ `output`: if set to `'pretty'`: return a pretty text table (see `pretty_table()`);
    +1951		    if set to `'raw'`: return a list of list of strings
    +1952		    (e.g., `[['header1', 'header2'], ['0.1', '0.2']]`)
    +1953		'''
    +1954		include_a2 = any([self.sessions[session]['scrambling_drift'] for session in self.sessions])
    +1955		include_b2 = any([self.sessions[session]['slope_drift'] for session in self.sessions])
    +1956		include_c2 = any([self.sessions[session]['wg_drift'] for session in self.sessions])
    +1957
    +1958		out = [['Session','Na','Nu','d13Cwg_VPDB','d18Owg_VSMOW','r_d13C','r_d18O',f'r_D{self._4x}','a ± SE','1e3 x b ± SE','c ± SE']]
    +1959		if include_a2:
    +1960			out[-1] += ['a2 ± SE']
    +1961		if include_b2:
    +1962			out[-1] += ['b2 ± SE']
    +1963		if include_c2:
    +1964			out[-1] += ['c2 ± SE']
    +1965		for session in self.sessions:
    +1966			out += [[
    +1967				session,
    +1968				f"{self.sessions[session]['Na']}",
    +1969				f"{self.sessions[session]['Nu']}",
    +1970				f"{self.sessions[session]['d13Cwg_VPDB']:.3f}",
    +1971				f"{self.sessions[session]['d18Owg_VSMOW']:.3f}",
    +1972				f"{self.sessions[session]['r_d13C_VPDB']:.4f}",
    +1973				f"{self.sessions[session]['r_d18O_VSMOW']:.4f}",
    +1974				f"{self.sessions[session][f'r_D{self._4x}']:.4f}",
    +1975				f"{self.sessions[session]['a']:.3f} ± {self.sessions[session]['SE_a']:.3f}",
    +1976				f"{1e3*self.sessions[session]['b']:.3f} ± {1e3*self.sessions[session]['SE_b']:.3f}",
    +1977				f"{self.sessions[session]['c']:.3f} ± {self.sessions[session]['SE_c']:.3f}",
    +1978				]]
    +1979			if include_a2:
    +1980				if self.sessions[session]['scrambling_drift']:
    +1981					out[-1] += [f"{self.sessions[session]['a2']:.1e} ± {self.sessions[session]['SE_a2']:.1e}"]
    +1982				else:
    +1983					out[-1] += ['']
    +1984			if include_b2:
    +1985				if self.sessions[session]['slope_drift']:
    +1986					out[-1] += [f"{self.sessions[session]['b2']:.1e} ± {self.sessions[session]['SE_b2']:.1e}"]
    +1987				else:
    +1988					out[-1] += ['']
    +1989			if include_c2:
    +1990				if self.sessions[session]['wg_drift']:
    +1991					out[-1] += [f"{self.sessions[session]['c2']:.1e} ± {self.sessions[session]['SE_c2']:.1e}"]
    +1992				else:
    +1993					out[-1] += ['']
    +1994
    +1995		if save_to_file:
    +1996			if not os.path.exists(dir):
    +1997				os.makedirs(dir)
    +1998			if filename is None:
    +1999				filename = f'D{self._4x}_sessions.csv'
    +2000			with open(f'{dir}/{filename}', 'w') as fid:
    +2001				fid.write(make_csv(out))
    +2002		if print_out:
    +2003			self.msg('\n' + pretty_table(out))
    +2004		if output == 'raw':
    +2005			return out
    +2006		elif output == 'pretty':
    +2007			return pretty_table(out)
    +2008
    +2009
    +2010	@make_verbal
    +2011	def table_of_analyses(
    +2012		self,
    +2013		dir = 'output',
    +2014		filename = None,
    +2015		save_to_file = True,
    +2016		print_out = True,
    +2017		output = None,
    +2018		):
    +2019		'''
    +2020		Print out an/or save to disk a table of analyses.
    +2021
    +2022		**Parameters**
     2023
    -2024		out = [['UID','Session','Sample']]
    -2025		extra_fields = [f for f in [('SampleMass','.2f'),('ColdFingerPressure','.1f'),('AcidReactionYield','.3f')] if f[0] in {k for r in self for k in r}]
    -2026		for f in extra_fields:
    -2027			out[-1] += [f[0]]
    -2028		out[-1] += ['d13Cwg_VPDB','d18Owg_VSMOW','d45','d46','d47','d48','d49','d13C_VPDB','d18O_VSMOW','D47raw','D48raw','D49raw',f'D{self._4x}']
    -2029		for r in self:
    -2030			out += [[f"{r['UID']}",f"{r['Session']}",f"{r['Sample']}"]]
    -2031			for f in extra_fields:
    -2032				out[-1] += [f"{r[f[0]]:{f[1]}}"]
    -2033			out[-1] += [
    -2034				f"{r['d13Cwg_VPDB']:.3f}",
    -2035				f"{r['d18Owg_VSMOW']:.3f}",
    -2036				f"{r['d45']:.6f}",
    -2037				f"{r['d46']:.6f}",
    -2038				f"{r['d47']:.6f}",
    -2039				f"{r['d48']:.6f}",
    -2040				f"{r['d49']:.6f}",
    -2041				f"{r['d13C_VPDB']:.6f}",
    -2042				f"{r['d18O_VSMOW']:.6f}",
    -2043				f"{r['D47raw']:.6f}",
    -2044				f"{r['D48raw']:.6f}",
    -2045				f"{r['D49raw']:.6f}",
    -2046				f"{r[f'D{self._4x}']:.6f}"
    -2047				]
    -2048		if save_to_file:
    -2049			if not os.path.exists(dir):
    -2050				os.makedirs(dir)
    -2051			if filename is None:
    -2052				filename = f'D{self._4x}_analyses.csv'
    -2053			with open(f'{dir}/{filename}', 'w') as fid:
    -2054				fid.write(make_csv(out))
    -2055		if print_out:
    -2056			self.msg('\n' + pretty_table(out))
    -2057		return out
    -2058
    -2059	@make_verbal
    -2060	def covar_table(
    -2061		self,
    -2062		correl = False,
    -2063		dir = 'output',
    -2064		filename = None,
    -2065		save_to_file = True,
    -2066		print_out = True,
    -2067		output = None,
    -2068		):
    -2069		'''
    -2070		Print out, save to disk and/or return the variance-covariance matrix of D4x
    -2071		for all unknown samples.
    -2072
    -2073		**Parameters**
    -2074
    -2075		+ `dir`: the directory in which to save the csv
    -2076		+ `filename`: the name of the csv file to write to
    -2077		+ `save_to_file`: whether to save the csv
    -2078		+ `print_out`: whether to print out the matrix
    -2079		+ `output`: if set to `'pretty'`: return a pretty text matrix (see `pretty_table()`);
    -2080		    if set to `'raw'`: return a list of list of strings
    -2081		    (e.g., `[['header1', 'header2'], ['0.1', '0.2']]`)
    -2082		'''
    -2083		samples = sorted([u for u in self.unknowns])
    -2084		out = [[''] + samples]
    -2085		for s1 in samples:
    -2086			out.append([s1])
    -2087			for s2 in samples:
    -2088				if correl:
    -2089					out[-1].append(f'{self.sample_D4x_correl(s1, s2):.6f}')
    -2090				else:
    -2091					out[-1].append(f'{self.sample_D4x_covar(s1, s2):.8e}')
    -2092
    -2093		if save_to_file:
    -2094			if not os.path.exists(dir):
    -2095				os.makedirs(dir)
    -2096			if filename is None:
    +2024		+ `dir`: the directory in which to save the table
    +2025		+ `filename`: the name to the csv file to write to
    +2026		+ `save_to_file`: whether to save the table to disk
    +2027		+ `print_out`: whether to print out the table
    +2028		+ `output`: if set to `'pretty'`: return a pretty text table (see `pretty_table()`);
    +2029		    if set to `'raw'`: return a list of list of strings
    +2030		    (e.g., `[['header1', 'header2'], ['0.1', '0.2']]`)
    +2031		'''
    +2032
    +2033		out = [['UID','Session','Sample']]
    +2034		extra_fields = [f for f in [('SampleMass','.2f'),('ColdFingerPressure','.1f'),('AcidReactionYield','.3f')] if f[0] in {k for r in self for k in r}]
    +2035		for f in extra_fields:
    +2036			out[-1] += [f[0]]
    +2037		out[-1] += ['d13Cwg_VPDB','d18Owg_VSMOW','d45','d46','d47','d48','d49','d13C_VPDB','d18O_VSMOW','D47raw','D48raw','D49raw',f'D{self._4x}']
    +2038		for r in self:
    +2039			out += [[f"{r['UID']}",f"{r['Session']}",f"{r['Sample']}"]]
    +2040			for f in extra_fields:
    +2041				out[-1] += [f"{r[f[0]]:{f[1]}}"]
    +2042			out[-1] += [
    +2043				f"{r['d13Cwg_VPDB']:.3f}",
    +2044				f"{r['d18Owg_VSMOW']:.3f}",
    +2045				f"{r['d45']:.6f}",
    +2046				f"{r['d46']:.6f}",
    +2047				f"{r['d47']:.6f}",
    +2048				f"{r['d48']:.6f}",
    +2049				f"{r['d49']:.6f}",
    +2050				f"{r['d13C_VPDB']:.6f}",
    +2051				f"{r['d18O_VSMOW']:.6f}",
    +2052				f"{r['D47raw']:.6f}",
    +2053				f"{r['D48raw']:.6f}",
    +2054				f"{r['D49raw']:.6f}",
    +2055				f"{r[f'D{self._4x}']:.6f}"
    +2056				]
    +2057		if save_to_file:
    +2058			if not os.path.exists(dir):
    +2059				os.makedirs(dir)
    +2060			if filename is None:
    +2061				filename = f'D{self._4x}_analyses.csv'
    +2062			with open(f'{dir}/{filename}', 'w') as fid:
    +2063				fid.write(make_csv(out))
    +2064		if print_out:
    +2065			self.msg('\n' + pretty_table(out))
    +2066		return out
    +2067
    +2068	@make_verbal
    +2069	def covar_table(
    +2070		self,
    +2071		correl = False,
    +2072		dir = 'output',
    +2073		filename = None,
    +2074		save_to_file = True,
    +2075		print_out = True,
    +2076		output = None,
    +2077		):
    +2078		'''
    +2079		Print out, save to disk and/or return the variance-covariance matrix of D4x
    +2080		for all unknown samples.
    +2081
    +2082		**Parameters**
    +2083
    +2084		+ `dir`: the directory in which to save the csv
    +2085		+ `filename`: the name of the csv file to write to
    +2086		+ `save_to_file`: whether to save the csv
    +2087		+ `print_out`: whether to print out the matrix
    +2088		+ `output`: if set to `'pretty'`: return a pretty text matrix (see `pretty_table()`);
    +2089		    if set to `'raw'`: return a list of list of strings
    +2090		    (e.g., `[['header1', 'header2'], ['0.1', '0.2']]`)
    +2091		'''
    +2092		samples = sorted([u for u in self.unknowns])
    +2093		out = [[''] + samples]
    +2094		for s1 in samples:
    +2095			out.append([s1])
    +2096			for s2 in samples:
     2097				if correl:
    -2098					filename = f'D{self._4x}_correl.csv'
    +2098					out[-1].append(f'{self.sample_D4x_correl(s1, s2):.6f}')
     2099				else:
    -2100					filename = f'D{self._4x}_covar.csv'
    -2101			with open(f'{dir}/{filename}', 'w') as fid:
    -2102				fid.write(make_csv(out))
    -2103		if print_out:
    -2104			self.msg('\n'+pretty_table(out))
    -2105		if output == 'raw':
    -2106			return out
    -2107		elif output == 'pretty':
    -2108			return pretty_table(out)
    -2109
    -2110	@make_verbal
    -2111	def table_of_samples(
    -2112		self,
    -2113		dir = 'output',
    -2114		filename = None,
    -2115		save_to_file = True,
    -2116		print_out = True,
    -2117		output = None,
    -2118		):
    -2119		'''
    -2120		Print out, save to disk and/or return a table of samples.
    -2121
    -2122		**Parameters**
    -2123
    -2124		+ `dir`: the directory in which to save the csv
    -2125		+ `filename`: the name of the csv file to write to
    -2126		+ `save_to_file`: whether to save the csv
    -2127		+ `print_out`: whether to print out the table
    -2128		+ `output`: if set to `'pretty'`: return a pretty text table (see `pretty_table()`);
    -2129		    if set to `'raw'`: return a list of list of strings
    -2130		    (e.g., `[['header1', 'header2'], ['0.1', '0.2']]`)
    -2131		'''
    +2100					out[-1].append(f'{self.sample_D4x_covar(s1, s2):.8e}')
    +2101
    +2102		if save_to_file:
    +2103			if not os.path.exists(dir):
    +2104				os.makedirs(dir)
    +2105			if filename is None:
    +2106				if correl:
    +2107					filename = f'D{self._4x}_correl.csv'
    +2108				else:
    +2109					filename = f'D{self._4x}_covar.csv'
    +2110			with open(f'{dir}/{filename}', 'w') as fid:
    +2111				fid.write(make_csv(out))
    +2112		if print_out:
    +2113			self.msg('\n'+pretty_table(out))
    +2114		if output == 'raw':
    +2115			return out
    +2116		elif output == 'pretty':
    +2117			return pretty_table(out)
    +2118
    +2119	@make_verbal
    +2120	def table_of_samples(
    +2121		self,
    +2122		dir = 'output',
    +2123		filename = None,
    +2124		save_to_file = True,
    +2125		print_out = True,
    +2126		output = None,
    +2127		):
    +2128		'''
    +2129		Print out, save to disk and/or return a table of samples.
    +2130
    +2131		**Parameters**
     2132
    -2133		out = [['Sample','N','d13C_VPDB','d18O_VSMOW',f'D{self._4x}','SE','95% CL','SD','p_Levene']]
    -2134		for sample in self.anchors:
    -2135			out += [[
    -2136				f"{sample}",
    -2137				f"{self.samples[sample]['N']}",
    -2138				f"{self.samples[sample]['d13C_VPDB']:.2f}",
    -2139				f"{self.samples[sample]['d18O_VSMOW']:.2f}",
    -2140				f"{self.samples[sample][f'D{self._4x}']:.4f}",'','',
    -2141				f"{self.samples[sample][f'SD_D{self._4x}']:.4f}" if self.samples[sample]['N'] > 1 else '', ''
    -2142				]]
    -2143		for sample in self.unknowns:
    +2133		+ `dir`: the directory in which to save the csv
    +2134		+ `filename`: the name of the csv file to write to
    +2135		+ `save_to_file`: whether to save the csv
    +2136		+ `print_out`: whether to print out the table
    +2137		+ `output`: if set to `'pretty'`: return a pretty text table (see `pretty_table()`);
    +2138		    if set to `'raw'`: return a list of list of strings
    +2139		    (e.g., `[['header1', 'header2'], ['0.1', '0.2']]`)
    +2140		'''
    +2141
    +2142		out = [['Sample','N','d13C_VPDB','d18O_VSMOW',f'D{self._4x}','SE','95% CL','SD','p_Levene']]
    +2143		for sample in self.anchors:
     2144			out += [[
     2145				f"{sample}",
     2146				f"{self.samples[sample]['N']}",
     2147				f"{self.samples[sample]['d13C_VPDB']:.2f}",
     2148				f"{self.samples[sample]['d18O_VSMOW']:.2f}",
    -2149				f"{self.samples[sample][f'D{self._4x}']:.4f}",
    -2150				f"{self.samples[sample][f'SE_D{self._4x}']:.4f}",
    -2151				f{self.samples[sample][f'SE_D{self._4x}'] * self.t95:.4f}",
    -2152				f"{self.samples[sample][f'SD_D{self._4x}']:.4f}" if self.samples[sample]['N'] > 1 else '',
    -2153				f"{self.samples[sample]['p_Levene']:.3f}" if self.samples[sample]['N'] > 2 else ''
    -2154				]]
    -2155		if save_to_file:
    -2156			if not os.path.exists(dir):
    -2157				os.makedirs(dir)
    -2158			if filename is None:
    -2159				filename = f'D{self._4x}_samples.csv'
    -2160			with open(f'{dir}/{filename}', 'w') as fid:
    -2161				fid.write(make_csv(out))
    -2162		if print_out:
    -2163			self.msg('\n'+pretty_table(out))
    -2164		if output == 'raw':
    -2165			return out
    -2166		elif output == 'pretty':
    -2167			return pretty_table(out)
    -2168
    -2169
    -2170	def plot_sessions(self, dir = 'output', figsize = (8,8)):
    -2171		'''
    -2172		Generate session plots and save them to disk.
    -2173
    -2174		**Parameters**
    -2175
    -2176		+ `dir`: the directory in which to save the plots
    -2177		+ `figsize`: the width and height (in inches) of each plot
    -2178		'''
    -2179		if not os.path.exists(dir):
    -2180			os.makedirs(dir)
    -2181
    -2182		for session in self.sessions:
    -2183			sp = self.plot_single_session(session, xylimits = 'constant')
    -2184			ppl.savefig(f'{dir}/D{self._4x}_plot_{session}.pdf')
    -2185			ppl.close(sp.fig)
    -2186
    -2187
    -2188	@make_verbal
    -2189	def consolidate_samples(self):
    -2190		'''
    -2191		Compile various statistics for each sample.
    -2192
    -2193		For each anchor sample:
    -2194
    -2195		+ `D47` or `D48`: the nominal Δ4x value for this anchor, specified by `self.Nominal_D4x`
    -2196		+ `SE_D47` or `SE_D48`: set to zero by definition
    -2197
    -2198		For each unknown sample:
    -2199
    -2200		+ `D47` or `D48`: the standardized Δ4x value for this unknown
    -2201		+ `SE_D47` or `SE_D48`: the standard error of Δ4x for this unknown
    -2202
    -2203		For each anchor and unknown:
    -2204
    -2205		+ `N`: the total number of analyses of this sample
    -2206		+ `SD_D47` or `SD_D48`: the “sample” (in the statistical sense) standard deviation for this sample
    -2207		+ `d13C_VPDB`: the average δ13C_VPDB value for this sample
    -2208		+ `d18O_VSMOW`: the average δ18O_VSMOW value for this sample (as CO2)
    -2209		+ `p_Levene`: the p-value from a [Levene test](https://en.wikipedia.org/wiki/Levene%27s_test) of equal
    -2210		variance, indicating whether the Δ4x repeatability this sample differs significantly from
    -2211		that observed for the reference sample specified by `self.LEVENE_REF_SAMPLE`.
    -2212		'''
    -2213		D4x_ref_pop = [r[f'D{self._4x}'] for r in self.samples[self.LEVENE_REF_SAMPLE]['data']]
    -2214		for sample in self.samples:
    -2215			self.samples[sample]['N'] = len(self.samples[sample]['data'])
    -2216			if self.samples[sample]['N'] > 1:
    -2217				self.samples[sample][f'SD_D{self._4x}'] = stdev([r[f'D{self._4x}'] for r in self.samples[sample]['data']])
    -2218
    -2219			self.samples[sample]['d13C_VPDB'] = np.mean([r['d13C_VPDB'] for r in self.samples[sample]['data']])
    -2220			self.samples[sample]['d18O_VSMOW'] = np.mean([r['d18O_VSMOW'] for r in self.samples[sample]['data']])
    -2221
    -2222			D4x_pop = [r[f'D{self._4x}'] for r in self.samples[sample]['data']]
    -2223			if len(D4x_pop) > 2:
    -2224				self.samples[sample]['p_Levene'] = levene(D4x_ref_pop, D4x_pop, center = 'median')[1]
    -2225
    -2226		if self.standardization_method == 'pooled':
    -2227			for sample in self.anchors:
    -2228				self.samples[sample][f'D{self._4x}'] = self.Nominal_D4x[sample]
    -2229				self.samples[sample][f'SE_D{self._4x}'] = 0.
    -2230			for sample in self.unknowns:
    -2231				self.samples[sample][f'D{self._4x}'] = self.standardization.params.valuesdict()[f'D{self._4x}_{pf(sample)}']
    -2232				try:
    -2233					self.samples[sample][f'SE_D{self._4x}'] = self.sample_D4x_covar(sample)**.5
    -2234				except ValueError:
    -2235					# when `sample` is constrained by self.standardize(constraints = {...}),
    -2236					# it is no longer listed in self.standardization.var_names.
    -2237					# Temporary fix: define SE as zero for now
    -2238					self.samples[sample][f'SE_D4{self._4x}'] = 0.
    -2239
    -2240		elif self.standardization_method == 'indep_sessions':
    -2241			for sample in self.anchors:
    -2242				self.samples[sample][f'D{self._4x}'] = self.Nominal_D4x[sample]
    -2243				self.samples[sample][f'SE_D{self._4x}'] = 0.
    -2244			for sample in self.unknowns:
    -2245				self.msg(f'Consolidating sample {sample}')
    -2246				self.unknowns[sample][f'session_D{self._4x}'] = {}
    -2247				session_avg = []
    -2248				for session in self.sessions:
    -2249					sdata = [r for r in self.sessions[session]['data'] if r['Sample'] == sample]
    -2250					if sdata:
    -2251						self.msg(f'{sample} found in session {session}')
    -2252						avg_D4x = np.mean([r[f'D{self._4x}'] for r in sdata])
    -2253						avg_d4x = np.mean([r[f'd{self._4x}'] for r in sdata])
    -2254						# !! TODO: sigma_s below does not account for temporal changes in standardization error
    -2255						sigma_s = self.standardization_error(session, avg_d4x, avg_D4x)
    -2256						sigma_u = sdata[0][f'wD{self._4x}raw'] / self.sessions[session]['a'] / len(sdata)**.5
    -2257						session_avg.append([avg_D4x, (sigma_u**2 + sigma_s**2)**.5])
    -2258						self.unknowns[sample][f'session_D{self._4x}'][session] = session_avg[-1]
    -2259				self.samples[sample][f'D{self._4x}'], self.samples[sample][f'SE_D{self._4x}'] = w_avg(*zip(*session_avg))
    -2260				weights = {s: self.unknowns[sample][f'session_D{self._4x}'][s][1]**-2 for s in self.unknowns[sample][f'session_D{self._4x}']}
    -2261				wsum = sum([weights[s] for s in weights])
    -2262				for s in weights:
    -2263					self.unknowns[sample][f'session_D{self._4x}'][s] += [self.unknowns[sample][f'session_D{self._4x}'][s][1]**-2 / wsum]
    -2264
    -2265
    -2266	def consolidate_sessions(self):
    -2267		'''
    -2268		Compute various statistics for each session.
    -2269
    -2270		+ `Na`: Number of anchor analyses in the session
    -2271		+ `Nu`: Number of unknown analyses in the session
    -2272		+ `r_d13C_VPDB`: δ13C_VPDB repeatability of analyses within the session
    -2273		+ `r_d18O_VSMOW`: δ18O_VSMOW repeatability of analyses within the session
    -2274		+ `r_D47` or `r_D48`: Δ4x repeatability of analyses within the session
    -2275		+ `a`: scrambling factor
    -2276		+ `b`: compositional slope
    -2277		+ `c`: WG offset
    -2278		+ `SE_a`: Model stadard erorr of `a`
    -2279		+ `SE_b`: Model stadard erorr of `b`
    -2280		+ `SE_c`: Model stadard erorr of `c`
    -2281		+ `scrambling_drift` (boolean): whether to allow a temporal drift in the scrambling factor (`a`)
    -2282		+ `slope_drift` (boolean): whether to allow a temporal drift in the compositional slope (`b`)
    -2283		+ `wg_drift` (boolean): whether to allow a temporal drift in the WG offset (`c`)
    -2284		+ `a2`: scrambling factor drift
    -2285		+ `b2`: compositional slope drift
    -2286		+ `c2`: WG offset drift
    -2287		+ `Np`: Number of standardization parameters to fit
    -2288		+ `CM`: model covariance matrix for (`a`, `b`, `c`, `a2`, `b2`, `c2`)
    -2289		+ `d13Cwg_VPDB`: δ13C_VPDB of WG
    -2290		+ `d18Owg_VSMOW`: δ18O_VSMOW of WG
    -2291		'''
    -2292		for session in self.sessions:
    -2293			if 'd13Cwg_VPDB' not in self.sessions[session]:
    -2294				self.sessions[session]['d13Cwg_VPDB'] = self.sessions[session]['data'][0]['d13Cwg_VPDB']
    -2295			if 'd18Owg_VSMOW' not in self.sessions[session]:
    -2296				self.sessions[session]['d18Owg_VSMOW'] = self.sessions[session]['data'][0]['d18Owg_VSMOW']
    -2297			self.sessions[session]['Na'] = len([r for r in self.sessions[session]['data'] if r['Sample'] in self.anchors])
    -2298			self.sessions[session]['Nu'] = len([r for r in self.sessions[session]['data'] if r['Sample'] in self.unknowns])
    -2299
    -2300			self.msg(f'Computing repeatabilities for session {session}')
    -2301			self.sessions[session]['r_d13C_VPDB'] = self.compute_r('d13C_VPDB', samples = 'anchors', sessions = [session])
    -2302			self.sessions[session]['r_d18O_VSMOW'] = self.compute_r('d18O_VSMOW', samples = 'anchors', sessions = [session])
    -2303			self.sessions[session][f'r_D{self._4x}'] = self.compute_r(f'D{self._4x}', sessions = [session])
    -2304
    -2305		if self.standardization_method == 'pooled':
    -2306			for session in self.sessions:
    -2307
    -2308				self.sessions[session]['a'] = self.standardization.params.valuesdict()[f'a_{pf(session)}']
    -2309				i = self.standardization.var_names.index(f'a_{pf(session)}')
    -2310				self.sessions[session]['SE_a'] = self.standardization.covar[i,i]**.5
    -2311
    -2312				self.sessions[session]['b'] = self.standardization.params.valuesdict()[f'b_{pf(session)}']
    -2313				i = self.standardization.var_names.index(f'b_{pf(session)}')
    -2314				self.sessions[session]['SE_b'] = self.standardization.covar[i,i]**.5
    -2315
    -2316				self.sessions[session]['c'] = self.standardization.params.valuesdict()[f'c_{pf(session)}']
    -2317				i = self.standardization.var_names.index(f'c_{pf(session)}')
    -2318				self.sessions[session]['SE_c'] = self.standardization.covar[i,i]**.5
    -2319
    -2320				self.sessions[session]['a2'] = self.standardization.params.valuesdict()[f'a2_{pf(session)}']
    -2321				if self.sessions[session]['scrambling_drift']:
    -2322					i = self.standardization.var_names.index(f'a2_{pf(session)}')
    -2323					self.sessions[session]['SE_a2'] = self.standardization.covar[i,i]**.5
    -2324				else:
    -2325					self.sessions[session]['SE_a2'] = 0.
    -2326
    -2327				self.sessions[session]['b2'] = self.standardization.params.valuesdict()[f'b2_{pf(session)}']
    -2328				if self.sessions[session]['slope_drift']:
    -2329					i = self.standardization.var_names.index(f'b2_{pf(session)}')
    -2330					self.sessions[session]['SE_b2'] = self.standardization.covar[i,i]**.5
    -2331				else:
    -2332					self.sessions[session]['SE_b2'] = 0.
    -2333
    -2334				self.sessions[session]['c2'] = self.standardization.params.valuesdict()[f'c2_{pf(session)}']
    -2335				if self.sessions[session]['wg_drift']:
    -2336					i = self.standardization.var_names.index(f'c2_{pf(session)}')
    -2337					self.sessions[session]['SE_c2'] = self.standardization.covar[i,i]**.5
    -2338				else:
    -2339					self.sessions[session]['SE_c2'] = 0.
    -2340
    -2341				i = self.standardization.var_names.index(f'a_{pf(session)}')
    -2342				j = self.standardization.var_names.index(f'b_{pf(session)}')
    -2343				k = self.standardization.var_names.index(f'c_{pf(session)}')
    -2344				CM = np.zeros((6,6))
    -2345				CM[:3,:3] = self.standardization.covar[[i,j,k],:][:,[i,j,k]]
    -2346				try:
    -2347					i2 = self.standardization.var_names.index(f'a2_{pf(session)}')
    -2348					CM[3,[0,1,2,3]] = self.standardization.covar[i2,[i,j,k,i2]]
    -2349					CM[[0,1,2,3],3] = self.standardization.covar[[i,j,k,i2],i2]
    -2350					try:
    -2351						j2 = self.standardization.var_names.index(f'b2_{pf(session)}')
    -2352						CM[3,4] = self.standardization.covar[i2,j2]
    -2353						CM[4,3] = self.standardization.covar[j2,i2]
    -2354					except ValueError:
    -2355						pass
    -2356					try:
    -2357						k2 = self.standardization.var_names.index(f'c2_{pf(session)}')
    -2358						CM[3,5] = self.standardization.covar[i2,k2]
    -2359						CM[5,3] = self.standardization.covar[k2,i2]
    -2360					except ValueError:
    -2361						pass
    -2362				except ValueError:
    -2363					pass
    -2364				try:
    -2365					j2 = self.standardization.var_names.index(f'b2_{pf(session)}')
    -2366					CM[4,[0,1,2,4]] = self.standardization.covar[j2,[i,j,k,j2]]
    -2367					CM[[0,1,2,4],4] = self.standardization.covar[[i,j,k,j2],j2]
    -2368					try:
    -2369						k2 = self.standardization.var_names.index(f'c2_{pf(session)}')
    -2370						CM[4,5] = self.standardization.covar[j2,k2]
    -2371						CM[5,4] = self.standardization.covar[k2,j2]
    -2372					except ValueError:
    -2373						pass
    -2374				except ValueError:
    -2375					pass
    -2376				try:
    -2377					k2 = self.standardization.var_names.index(f'c2_{pf(session)}')
    -2378					CM[5,[0,1,2,5]] = self.standardization.covar[k2,[i,j,k,k2]]
    -2379					CM[[0,1,2,5],5] = self.standardization.covar[[i,j,k,k2],k2]
    -2380				except ValueError:
    -2381					pass
    -2382
    -2383				self.sessions[session]['CM'] = CM
    -2384
    -2385		elif self.standardization_method == 'indep_sessions':
    -2386			pass # Not implemented yet
    -2387
    -2388
    -2389	@make_verbal
    -2390	def repeatabilities(self):
    -2391		'''
    -2392		Compute analytical repeatabilities for δ13C_VPDB, δ18O_VSMOW, Δ4x
    -2393		(for all samples, for anchors, and for unknowns).
    -2394		'''
    -2395		self.msg('Computing reproducibilities for all sessions')
    +2149				f"{self.samples[sample][f'D{self._4x}']:.4f}",'','',
    +2150				f"{self.samples[sample][f'SD_D{self._4x}']:.4f}" if self.samples[sample]['N'] > 1 else '', ''
    +2151				]]
    +2152		for sample in self.unknowns:
    +2153			out += [[
    +2154				f"{sample}",
    +2155				f"{self.samples[sample]['N']}",
    +2156				f"{self.samples[sample]['d13C_VPDB']:.2f}",
    +2157				f"{self.samples[sample]['d18O_VSMOW']:.2f}",
    +2158				f"{self.samples[sample][f'D{self._4x}']:.4f}",
    +2159				f"{self.samples[sample][f'SE_D{self._4x}']:.4f}",
    +2160				f{self.samples[sample][f'SE_D{self._4x}'] * self.t95:.4f}",
    +2161				f"{self.samples[sample][f'SD_D{self._4x}']:.4f}" if self.samples[sample]['N'] > 1 else '',
    +2162				f"{self.samples[sample]['p_Levene']:.3f}" if self.samples[sample]['N'] > 2 else ''
    +2163				]]
    +2164		if save_to_file:
    +2165			if not os.path.exists(dir):
    +2166				os.makedirs(dir)
    +2167			if filename is None:
    +2168				filename = f'D{self._4x}_samples.csv'
    +2169			with open(f'{dir}/{filename}', 'w') as fid:
    +2170				fid.write(make_csv(out))
    +2171		if print_out:
    +2172			self.msg('\n'+pretty_table(out))
    +2173		if output == 'raw':
    +2174			return out
    +2175		elif output == 'pretty':
    +2176			return pretty_table(out)
    +2177
    +2178
    +2179	def plot_sessions(self, dir = 'output', figsize = (8,8)):
    +2180		'''
    +2181		Generate session plots and save them to disk.
    +2182
    +2183		**Parameters**
    +2184
    +2185		+ `dir`: the directory in which to save the plots
    +2186		+ `figsize`: the width and height (in inches) of each plot
    +2187		'''
    +2188		if not os.path.exists(dir):
    +2189			os.makedirs(dir)
    +2190
    +2191		for session in self.sessions:
    +2192			sp = self.plot_single_session(session, xylimits = 'constant')
    +2193			ppl.savefig(f'{dir}/D{self._4x}_plot_{session}.pdf')
    +2194			ppl.close(sp.fig)
    +2195
    +2196
    +2197	@make_verbal
    +2198	def consolidate_samples(self):
    +2199		'''
    +2200		Compile various statistics for each sample.
    +2201
    +2202		For each anchor sample:
    +2203
    +2204		+ `D47` or `D48`: the nominal Δ4x value for this anchor, specified by `self.Nominal_D4x`
    +2205		+ `SE_D47` or `SE_D48`: set to zero by definition
    +2206
    +2207		For each unknown sample:
    +2208
    +2209		+ `D47` or `D48`: the standardized Δ4x value for this unknown
    +2210		+ `SE_D47` or `SE_D48`: the standard error of Δ4x for this unknown
    +2211
    +2212		For each anchor and unknown:
    +2213
    +2214		+ `N`: the total number of analyses of this sample
    +2215		+ `SD_D47` or `SD_D48`: the “sample” (in the statistical sense) standard deviation for this sample
    +2216		+ `d13C_VPDB`: the average δ13C_VPDB value for this sample
    +2217		+ `d18O_VSMOW`: the average δ18O_VSMOW value for this sample (as CO2)
    +2218		+ `p_Levene`: the p-value from a [Levene test](https://en.wikipedia.org/wiki/Levene%27s_test) of equal
    +2219		variance, indicating whether the Δ4x repeatability this sample differs significantly from
    +2220		that observed for the reference sample specified by `self.LEVENE_REF_SAMPLE`.
    +2221		'''
    +2222		D4x_ref_pop = [r[f'D{self._4x}'] for r in self.samples[self.LEVENE_REF_SAMPLE]['data']]
    +2223		for sample in self.samples:
    +2224			self.samples[sample]['N'] = len(self.samples[sample]['data'])
    +2225			if self.samples[sample]['N'] > 1:
    +2226				self.samples[sample][f'SD_D{self._4x}'] = stdev([r[f'D{self._4x}'] for r in self.samples[sample]['data']])
    +2227
    +2228			self.samples[sample]['d13C_VPDB'] = np.mean([r['d13C_VPDB'] for r in self.samples[sample]['data']])
    +2229			self.samples[sample]['d18O_VSMOW'] = np.mean([r['d18O_VSMOW'] for r in self.samples[sample]['data']])
    +2230
    +2231			D4x_pop = [r[f'D{self._4x}'] for r in self.samples[sample]['data']]
    +2232			if len(D4x_pop) > 2:
    +2233				self.samples[sample]['p_Levene'] = levene(D4x_ref_pop, D4x_pop, center = 'median')[1]
    +2234
    +2235		if self.standardization_method == 'pooled':
    +2236			for sample in self.anchors:
    +2237				self.samples[sample][f'D{self._4x}'] = self.Nominal_D4x[sample]
    +2238				self.samples[sample][f'SE_D{self._4x}'] = 0.
    +2239			for sample in self.unknowns:
    +2240				self.samples[sample][f'D{self._4x}'] = self.standardization.params.valuesdict()[f'D{self._4x}_{pf(sample)}']
    +2241				try:
    +2242					self.samples[sample][f'SE_D{self._4x}'] = self.sample_D4x_covar(sample)**.5
    +2243				except ValueError:
    +2244					# when `sample` is constrained by self.standardize(constraints = {...}),
    +2245					# it is no longer listed in self.standardization.var_names.
    +2246					# Temporary fix: define SE as zero for now
    +2247					self.samples[sample][f'SE_D4{self._4x}'] = 0.
    +2248
    +2249		elif self.standardization_method == 'indep_sessions':
    +2250			for sample in self.anchors:
    +2251				self.samples[sample][f'D{self._4x}'] = self.Nominal_D4x[sample]
    +2252				self.samples[sample][f'SE_D{self._4x}'] = 0.
    +2253			for sample in self.unknowns:
    +2254				self.msg(f'Consolidating sample {sample}')
    +2255				self.unknowns[sample][f'session_D{self._4x}'] = {}
    +2256				session_avg = []
    +2257				for session in self.sessions:
    +2258					sdata = [r for r in self.sessions[session]['data'] if r['Sample'] == sample]
    +2259					if sdata:
    +2260						self.msg(f'{sample} found in session {session}')
    +2261						avg_D4x = np.mean([r[f'D{self._4x}'] for r in sdata])
    +2262						avg_d4x = np.mean([r[f'd{self._4x}'] for r in sdata])
    +2263						# !! TODO: sigma_s below does not account for temporal changes in standardization error
    +2264						sigma_s = self.standardization_error(session, avg_d4x, avg_D4x)
    +2265						sigma_u = sdata[0][f'wD{self._4x}raw'] / self.sessions[session]['a'] / len(sdata)**.5
    +2266						session_avg.append([avg_D4x, (sigma_u**2 + sigma_s**2)**.5])
    +2267						self.unknowns[sample][f'session_D{self._4x}'][session] = session_avg[-1]
    +2268				self.samples[sample][f'D{self._4x}'], self.samples[sample][f'SE_D{self._4x}'] = w_avg(*zip(*session_avg))
    +2269				weights = {s: self.unknowns[sample][f'session_D{self._4x}'][s][1]**-2 for s in self.unknowns[sample][f'session_D{self._4x}']}
    +2270				wsum = sum([weights[s] for s in weights])
    +2271				for s in weights:
    +2272					self.unknowns[sample][f'session_D{self._4x}'][s] += [self.unknowns[sample][f'session_D{self._4x}'][s][1]**-2 / wsum]
    +2273
    +2274
    +2275	def consolidate_sessions(self):
    +2276		'''
    +2277		Compute various statistics for each session.
    +2278
    +2279		+ `Na`: Number of anchor analyses in the session
    +2280		+ `Nu`: Number of unknown analyses in the session
    +2281		+ `r_d13C_VPDB`: δ13C_VPDB repeatability of analyses within the session
    +2282		+ `r_d18O_VSMOW`: δ18O_VSMOW repeatability of analyses within the session
    +2283		+ `r_D47` or `r_D48`: Δ4x repeatability of analyses within the session
    +2284		+ `a`: scrambling factor
    +2285		+ `b`: compositional slope
    +2286		+ `c`: WG offset
    +2287		+ `SE_a`: Model stadard erorr of `a`
    +2288		+ `SE_b`: Model stadard erorr of `b`
    +2289		+ `SE_c`: Model stadard erorr of `c`
    +2290		+ `scrambling_drift` (boolean): whether to allow a temporal drift in the scrambling factor (`a`)
    +2291		+ `slope_drift` (boolean): whether to allow a temporal drift in the compositional slope (`b`)
    +2292		+ `wg_drift` (boolean): whether to allow a temporal drift in the WG offset (`c`)
    +2293		+ `a2`: scrambling factor drift
    +2294		+ `b2`: compositional slope drift
    +2295		+ `c2`: WG offset drift
    +2296		+ `Np`: Number of standardization parameters to fit
    +2297		+ `CM`: model covariance matrix for (`a`, `b`, `c`, `a2`, `b2`, `c2`)
    +2298		+ `d13Cwg_VPDB`: δ13C_VPDB of WG
    +2299		+ `d18Owg_VSMOW`: δ18O_VSMOW of WG
    +2300		'''
    +2301		for session in self.sessions:
    +2302			if 'd13Cwg_VPDB' not in self.sessions[session]:
    +2303				self.sessions[session]['d13Cwg_VPDB'] = self.sessions[session]['data'][0]['d13Cwg_VPDB']
    +2304			if 'd18Owg_VSMOW' not in self.sessions[session]:
    +2305				self.sessions[session]['d18Owg_VSMOW'] = self.sessions[session]['data'][0]['d18Owg_VSMOW']
    +2306			self.sessions[session]['Na'] = len([r for r in self.sessions[session]['data'] if r['Sample'] in self.anchors])
    +2307			self.sessions[session]['Nu'] = len([r for r in self.sessions[session]['data'] if r['Sample'] in self.unknowns])
    +2308
    +2309			self.msg(f'Computing repeatabilities for session {session}')
    +2310			self.sessions[session]['r_d13C_VPDB'] = self.compute_r('d13C_VPDB', samples = 'anchors', sessions = [session])
    +2311			self.sessions[session]['r_d18O_VSMOW'] = self.compute_r('d18O_VSMOW', samples = 'anchors', sessions = [session])
    +2312			self.sessions[session][f'r_D{self._4x}'] = self.compute_r(f'D{self._4x}', sessions = [session])
    +2313
    +2314		if self.standardization_method == 'pooled':
    +2315			for session in self.sessions:
    +2316
    +2317				self.sessions[session]['a'] = self.standardization.params.valuesdict()[f'a_{pf(session)}']
    +2318				i = self.standardization.var_names.index(f'a_{pf(session)}')
    +2319				self.sessions[session]['SE_a'] = self.standardization.covar[i,i]**.5
    +2320
    +2321				self.sessions[session]['b'] = self.standardization.params.valuesdict()[f'b_{pf(session)}']
    +2322				i = self.standardization.var_names.index(f'b_{pf(session)}')
    +2323				self.sessions[session]['SE_b'] = self.standardization.covar[i,i]**.5
    +2324
    +2325				self.sessions[session]['c'] = self.standardization.params.valuesdict()[f'c_{pf(session)}']
    +2326				i = self.standardization.var_names.index(f'c_{pf(session)}')
    +2327				self.sessions[session]['SE_c'] = self.standardization.covar[i,i]**.5
    +2328
    +2329				self.sessions[session]['a2'] = self.standardization.params.valuesdict()[f'a2_{pf(session)}']
    +2330				if self.sessions[session]['scrambling_drift']:
    +2331					i = self.standardization.var_names.index(f'a2_{pf(session)}')
    +2332					self.sessions[session]['SE_a2'] = self.standardization.covar[i,i]**.5
    +2333				else:
    +2334					self.sessions[session]['SE_a2'] = 0.
    +2335
    +2336				self.sessions[session]['b2'] = self.standardization.params.valuesdict()[f'b2_{pf(session)}']
    +2337				if self.sessions[session]['slope_drift']:
    +2338					i = self.standardization.var_names.index(f'b2_{pf(session)}')
    +2339					self.sessions[session]['SE_b2'] = self.standardization.covar[i,i]**.5
    +2340				else:
    +2341					self.sessions[session]['SE_b2'] = 0.
    +2342
    +2343				self.sessions[session]['c2'] = self.standardization.params.valuesdict()[f'c2_{pf(session)}']
    +2344				if self.sessions[session]['wg_drift']:
    +2345					i = self.standardization.var_names.index(f'c2_{pf(session)}')
    +2346					self.sessions[session]['SE_c2'] = self.standardization.covar[i,i]**.5
    +2347				else:
    +2348					self.sessions[session]['SE_c2'] = 0.
    +2349
    +2350				i = self.standardization.var_names.index(f'a_{pf(session)}')
    +2351				j = self.standardization.var_names.index(f'b_{pf(session)}')
    +2352				k = self.standardization.var_names.index(f'c_{pf(session)}')
    +2353				CM = np.zeros((6,6))
    +2354				CM[:3,:3] = self.standardization.covar[[i,j,k],:][:,[i,j,k]]
    +2355				try:
    +2356					i2 = self.standardization.var_names.index(f'a2_{pf(session)}')
    +2357					CM[3,[0,1,2,3]] = self.standardization.covar[i2,[i,j,k,i2]]
    +2358					CM[[0,1,2,3],3] = self.standardization.covar[[i,j,k,i2],i2]
    +2359					try:
    +2360						j2 = self.standardization.var_names.index(f'b2_{pf(session)}')
    +2361						CM[3,4] = self.standardization.covar[i2,j2]
    +2362						CM[4,3] = self.standardization.covar[j2,i2]
    +2363					except ValueError:
    +2364						pass
    +2365					try:
    +2366						k2 = self.standardization.var_names.index(f'c2_{pf(session)}')
    +2367						CM[3,5] = self.standardization.covar[i2,k2]
    +2368						CM[5,3] = self.standardization.covar[k2,i2]
    +2369					except ValueError:
    +2370						pass
    +2371				except ValueError:
    +2372					pass
    +2373				try:
    +2374					j2 = self.standardization.var_names.index(f'b2_{pf(session)}')
    +2375					CM[4,[0,1,2,4]] = self.standardization.covar[j2,[i,j,k,j2]]
    +2376					CM[[0,1,2,4],4] = self.standardization.covar[[i,j,k,j2],j2]
    +2377					try:
    +2378						k2 = self.standardization.var_names.index(f'c2_{pf(session)}')
    +2379						CM[4,5] = self.standardization.covar[j2,k2]
    +2380						CM[5,4] = self.standardization.covar[k2,j2]
    +2381					except ValueError:
    +2382						pass
    +2383				except ValueError:
    +2384					pass
    +2385				try:
    +2386					k2 = self.standardization.var_names.index(f'c2_{pf(session)}')
    +2387					CM[5,[0,1,2,5]] = self.standardization.covar[k2,[i,j,k,k2]]
    +2388					CM[[0,1,2,5],5] = self.standardization.covar[[i,j,k,k2],k2]
    +2389				except ValueError:
    +2390					pass
    +2391
    +2392				self.sessions[session]['CM'] = CM
    +2393
    +2394		elif self.standardization_method == 'indep_sessions':
    +2395			pass # Not implemented yet
     2396
    -2397		self.repeatability['r_d13C_VPDB'] = self.compute_r('d13C_VPDB', samples = 'anchors')
    -2398		self.repeatability['r_d18O_VSMOW'] = self.compute_r('d18O_VSMOW', samples = 'anchors')
    -2399		self.repeatability[f'r_D{self._4x}a'] = self.compute_r(f'D{self._4x}', samples = 'anchors')
    -2400		self.repeatability[f'r_D{self._4x}u'] = self.compute_r(f'D{self._4x}', samples = 'unknowns')
    -2401		self.repeatability[f'r_D{self._4x}'] = self.compute_r(f'D{self._4x}', samples = 'all samples')
    -2402
    -2403
    -2404	@make_verbal
    -2405	def consolidate(self, tables = True, plots = True):
    -2406		'''
    -2407		Collect information about samples, sessions and repeatabilities.
    -2408		'''
    -2409		self.consolidate_samples()
    -2410		self.consolidate_sessions()
    -2411		self.repeatabilities()
    +2397
    +2398	@make_verbal
    +2399	def repeatabilities(self):
    +2400		'''
    +2401		Compute analytical repeatabilities for δ13C_VPDB, δ18O_VSMOW, Δ4x
    +2402		(for all samples, for anchors, and for unknowns).
    +2403		'''
    +2404		self.msg('Computing reproducibilities for all sessions')
    +2405
    +2406		self.repeatability['r_d13C_VPDB'] = self.compute_r('d13C_VPDB', samples = 'anchors')
    +2407		self.repeatability['r_d18O_VSMOW'] = self.compute_r('d18O_VSMOW', samples = 'anchors')
    +2408		self.repeatability[f'r_D{self._4x}a'] = self.compute_r(f'D{self._4x}', samples = 'anchors')
    +2409		self.repeatability[f'r_D{self._4x}u'] = self.compute_r(f'D{self._4x}', samples = 'unknowns')
    +2410		self.repeatability[f'r_D{self._4x}'] = self.compute_r(f'D{self._4x}', samples = 'all samples')
    +2411
     2412
    -2413		if tables:
    -2414			self.summary()
    -2415			self.table_of_sessions()
    -2416			self.table_of_analyses()
    -2417			self.table_of_samples()
    -2418
    -2419		if plots:
    -2420			self.plot_sessions()
    +2413	@make_verbal
    +2414	def consolidate(self, tables = True, plots = True):
    +2415		'''
    +2416		Collect information about samples, sessions and repeatabilities.
    +2417		'''
    +2418		self.consolidate_samples()
    +2419		self.consolidate_sessions()
    +2420		self.repeatabilities()
     2421
    -2422
    -2423	@make_verbal
    -2424	def rmswd(self,
    -2425		samples = 'all samples',
    -2426		sessions = 'all sessions',
    -2427		):
    -2428		'''
    -2429		Compute the χ2, root mean squared weighted deviation
    -2430		(i.e. reduced χ2), and corresponding degrees of freedom of the
    -2431		Δ4x values for samples in `samples` and sessions in `sessions`.
    -2432		
    -2433		Only used in `D4xdata.standardize()` with `method='indep_sessions'`.
    -2434		'''
    -2435		if samples == 'all samples':
    -2436			mysamples = [k for k in self.samples]
    -2437		elif samples == 'anchors':
    -2438			mysamples = [k for k in self.anchors]
    -2439		elif samples == 'unknowns':
    -2440			mysamples = [k for k in self.unknowns]
    -2441		else:
    -2442			mysamples = samples
    -2443
    -2444		if sessions == 'all sessions':
    -2445			sessions = [k for k in self.sessions]
    -2446
    -2447		chisq, Nf = 0, 0
    -2448		for sample in mysamples :
    -2449			G = [ r for r in self if r['Sample'] == sample and r['Session'] in sessions ]
    -2450			if len(G) > 1 :
    -2451				X, sX = w_avg([r[f'D{self._4x}'] for r in G], [r[f'wD{self._4x}'] for r in G])
    -2452				Nf += (len(G) - 1)
    -2453				chisq += np.sum([ ((r[f'D{self._4x}']-X)/r[f'wD{self._4x}'])**2 for r in G])
    -2454		r = (chisq / Nf)**.5 if Nf > 0 else 0
    -2455		self.msg(f'RMSWD of r["D{self._4x}"] is {r:.6f} for {samples}.')
    -2456		return {'rmswd': r, 'chisq': chisq, 'Nf': Nf}
    -2457
    -2458	
    -2459	@make_verbal
    -2460	def compute_r(self, key, samples = 'all samples', sessions = 'all sessions'):
    -2461		'''
    -2462		Compute the repeatability of `[r[key] for r in self]`
    -2463		'''
    -2464		# NB: it's debatable whether rD47 should be computed
    -2465		# with Nf = len(self)-len(self.samples) instead of
    -2466		# Nf = len(self) - len(self.unknwons) - 3*len(self.sessions)
    -2467
    -2468		if samples == 'all samples':
    -2469			mysamples = [k for k in self.samples]
    -2470		elif samples == 'anchors':
    -2471			mysamples = [k for k in self.anchors]
    -2472		elif samples == 'unknowns':
    -2473			mysamples = [k for k in self.unknowns]
    -2474		else:
    -2475			mysamples = samples
    +2422		if tables:
    +2423			self.summary()
    +2424			self.table_of_sessions()
    +2425			self.table_of_analyses()
    +2426			self.table_of_samples()
    +2427
    +2428		if plots:
    +2429			self.plot_sessions()
    +2430
    +2431
    +2432	@make_verbal
    +2433	def rmswd(self,
    +2434		samples = 'all samples',
    +2435		sessions = 'all sessions',
    +2436		):
    +2437		'''
    +2438		Compute the χ2, root mean squared weighted deviation
    +2439		(i.e. reduced χ2), and corresponding degrees of freedom of the
    +2440		Δ4x values for samples in `samples` and sessions in `sessions`.
    +2441		
    +2442		Only used in `D4xdata.standardize()` with `method='indep_sessions'`.
    +2443		'''
    +2444		if samples == 'all samples':
    +2445			mysamples = [k for k in self.samples]
    +2446		elif samples == 'anchors':
    +2447			mysamples = [k for k in self.anchors]
    +2448		elif samples == 'unknowns':
    +2449			mysamples = [k for k in self.unknowns]
    +2450		else:
    +2451			mysamples = samples
    +2452
    +2453		if sessions == 'all sessions':
    +2454			sessions = [k for k in self.sessions]
    +2455
    +2456		chisq, Nf = 0, 0
    +2457		for sample in mysamples :
    +2458			G = [ r for r in self if r['Sample'] == sample and r['Session'] in sessions ]
    +2459			if len(G) > 1 :
    +2460				X, sX = w_avg([r[f'D{self._4x}'] for r in G], [r[f'wD{self._4x}'] for r in G])
    +2461				Nf += (len(G) - 1)
    +2462				chisq += np.sum([ ((r[f'D{self._4x}']-X)/r[f'wD{self._4x}'])**2 for r in G])
    +2463		r = (chisq / Nf)**.5 if Nf > 0 else 0
    +2464		self.msg(f'RMSWD of r["D{self._4x}"] is {r:.6f} for {samples}.')
    +2465		return {'rmswd': r, 'chisq': chisq, 'Nf': Nf}
    +2466
    +2467	
    +2468	@make_verbal
    +2469	def compute_r(self, key, samples = 'all samples', sessions = 'all sessions'):
    +2470		'''
    +2471		Compute the repeatability of `[r[key] for r in self]`
    +2472		'''
    +2473		# NB: it's debatable whether rD47 should be computed
    +2474		# with Nf = len(self)-len(self.samples) instead of
    +2475		# Nf = len(self) - len(self.unknwons) - 3*len(self.sessions)
     2476
    -2477		if sessions == 'all sessions':
    -2478			sessions = [k for k in self.sessions]
    -2479
    -2480		if key in ['D47', 'D48']:
    -2481			chisq, Nf = 0, 0
    -2482			for sample in mysamples :
    -2483				X = [ r[key] for r in self if r['Sample'] == sample and r['Session'] in sessions ]
    -2484				if len(X) > 1 :
    -2485					chisq += np.sum([ (x-self.samples[sample][key])**2 for x in X ])
    -2486					if sample in self.unknowns:
    -2487						Nf += len(X) - 1
    -2488					else:
    -2489						Nf += len(X)
    -2490			if samples in ['anchors', 'all samples']:
    -2491				Nf -= sum([self.sessions[s]['Np'] for s in sessions])
    -2492			r = (chisq / Nf)**.5 if Nf > 0 else 0
    -2493
    -2494		else: # if key not in ['D47', 'D48']
    -2495			chisq, Nf = 0, 0
    -2496			for sample in mysamples :
    -2497				X = [ r[key] for r in self if r['Sample'] == sample and r['Session'] in sessions ]
    -2498				if len(X) > 1 :
    -2499					Nf += len(X) - 1
    -2500					chisq += np.sum([ (x-np.mean(X))**2 for x in X ])
    +2477		if samples == 'all samples':
    +2478			mysamples = [k for k in self.samples]
    +2479		elif samples == 'anchors':
    +2480			mysamples = [k for k in self.anchors]
    +2481		elif samples == 'unknowns':
    +2482			mysamples = [k for k in self.unknowns]
    +2483		else:
    +2484			mysamples = samples
    +2485
    +2486		if sessions == 'all sessions':
    +2487			sessions = [k for k in self.sessions]
    +2488
    +2489		if key in ['D47', 'D48']:
    +2490			chisq, Nf = 0, 0
    +2491			for sample in mysamples :
    +2492				X = [ r[key] for r in self if r['Sample'] == sample and r['Session'] in sessions ]
    +2493				if len(X) > 1 :
    +2494					chisq += np.sum([ (x-self.samples[sample][key])**2 for x in X ])
    +2495					if sample in self.unknowns:
    +2496						Nf += len(X) - 1
    +2497					else:
    +2498						Nf += len(X)
    +2499			if samples in ['anchors', 'all samples']:
    +2500				Nf -= sum([self.sessions[s]['Np'] for s in sessions])
     2501			r = (chisq / Nf)**.5 if Nf > 0 else 0
     2502
    -2503		self.msg(f'Repeatability of r["{key}"] is {1000*r:.1f} ppm for {samples}.')
    -2504		return r
    -2505
    -2506	def sample_average(self, samples, weights = 'equal', normalize = True):
    -2507		'''
    -2508		Weighted average Δ4x value of a group of samples, accounting for covariance.
    -2509
    -2510		Returns the weighed average Δ4x value and associated SE
    -2511		of a group of samples. Weights are equal by default. If `normalize` is
    -2512		true, `weights` will be rescaled so that their sum equals 1.
    -2513
    -2514		**Examples**
    -2515
    -2516		```python
    -2517		self.sample_average(['X','Y'], [1, 2])
    -2518		```
    -2519
    -2520		returns the value and SE of [Δ4x(X) + 2 Δ4x(Y)]/3,
    -2521		where Δ4x(X) and Δ4x(Y) are the average Δ4x
    -2522		values of samples X and Y, respectively.
    -2523
    -2524		```python
    -2525		self.sample_average(['X','Y'], [1, -1], normalize = False)
    -2526		```
    -2527
    -2528		returns the value and SE of the difference Δ4x(X) - Δ4x(Y).
    -2529		'''
    -2530		if weights == 'equal':
    -2531			weights = [1/len(samples)] * len(samples)
    +2503		else: # if key not in ['D47', 'D48']
    +2504			chisq, Nf = 0, 0
    +2505			for sample in mysamples :
    +2506				X = [ r[key] for r in self if r['Sample'] == sample and r['Session'] in sessions ]
    +2507				if len(X) > 1 :
    +2508					Nf += len(X) - 1
    +2509					chisq += np.sum([ (x-np.mean(X))**2 for x in X ])
    +2510			r = (chisq / Nf)**.5 if Nf > 0 else 0
    +2511
    +2512		self.msg(f'Repeatability of r["{key}"] is {1000*r:.1f} ppm for {samples}.')
    +2513		return r
    +2514
    +2515	def sample_average(self, samples, weights = 'equal', normalize = True):
    +2516		'''
    +2517		Weighted average Δ4x value of a group of samples, accounting for covariance.
    +2518
    +2519		Returns the weighed average Δ4x value and associated SE
    +2520		of a group of samples. Weights are equal by default. If `normalize` is
    +2521		true, `weights` will be rescaled so that their sum equals 1.
    +2522
    +2523		**Examples**
    +2524
    +2525		```python
    +2526		self.sample_average(['X','Y'], [1, 2])
    +2527		```
    +2528
    +2529		returns the value and SE of [Δ4x(X) + 2 Δ4x(Y)]/3,
    +2530		where Δ4x(X) and Δ4x(Y) are the average Δ4x
    +2531		values of samples X and Y, respectively.
     2532
    -2533		if normalize:
    -2534			s = sum(weights)
    -2535			if s:
    -2536				weights = [w/s for w in weights]
    -2537
    -2538		try:
    -2539# 			indices = [self.standardization.var_names.index(f'D47_{pf(sample)}') for sample in samples]
    -2540# 			C = self.standardization.covar[indices,:][:,indices]
    -2541			C = np.array([[self.sample_D4x_covar(x, y) for x in samples] for y in samples])
    -2542			X = [self.samples[sample][f'D{self._4x}'] for sample in samples]
    -2543			return correlated_sum(X, C, weights)
    -2544		except ValueError:
    -2545			return (0., 0.)
    +2533		```python
    +2534		self.sample_average(['X','Y'], [1, -1], normalize = False)
    +2535		```
    +2536
    +2537		returns the value and SE of the difference Δ4x(X) - Δ4x(Y).
    +2538		'''
    +2539		if weights == 'equal':
    +2540			weights = [1/len(samples)] * len(samples)
    +2541
    +2542		if normalize:
    +2543			s = sum(weights)
    +2544			if s:
    +2545				weights = [w/s for w in weights]
     2546
    -2547
    -2548	def sample_D4x_covar(self, sample1, sample2 = None):
    -2549		'''
    -2550		Covariance between Δ4x values of samples
    -2551
    -2552		Returns the error covariance between the average Δ4x values of two
    -2553		samples. If if only `sample_1` is specified, or if `sample_1 == sample_2`),
    -2554		returns the Δ4x variance for that sample.
    -2555		'''
    -2556		if sample2 is None:
    -2557			sample2 = sample1
    -2558		if self.standardization_method == 'pooled':
    -2559			i = self.standardization.var_names.index(f'D{self._4x}_{pf(sample1)}')
    -2560			j = self.standardization.var_names.index(f'D{self._4x}_{pf(sample2)}')
    -2561			return self.standardization.covar[i, j]
    -2562		elif self.standardization_method == 'indep_sessions':
    -2563			if sample1 == sample2:
    -2564				return self.samples[sample1][f'SE_D{self._4x}']**2
    -2565			else:
    -2566				c = 0
    -2567				for session in self.sessions:
    -2568					sdata1 = [r for r in self.sessions[session]['data'] if r['Sample'] == sample1]
    -2569					sdata2 = [r for r in self.sessions[session]['data'] if r['Sample'] == sample2]
    -2570					if sdata1 and sdata2:
    -2571						a = self.sessions[session]['a']
    -2572						# !! TODO: CM below does not account for temporal changes in standardization parameters
    -2573						CM = self.sessions[session]['CM'][:3,:3]
    -2574						avg_D4x_1 = np.mean([r[f'D{self._4x}'] for r in sdata1])
    -2575						avg_d4x_1 = np.mean([r[f'd{self._4x}'] for r in sdata1])
    -2576						avg_D4x_2 = np.mean([r[f'D{self._4x}'] for r in sdata2])
    -2577						avg_d4x_2 = np.mean([r[f'd{self._4x}'] for r in sdata2])
    -2578						c += (
    -2579							self.unknowns[sample1][f'session_D{self._4x}'][session][2]
    -2580							* self.unknowns[sample2][f'session_D{self._4x}'][session][2]
    -2581							* np.array([[avg_D4x_1, avg_d4x_1, 1]])
    -2582							@ CM
    -2583							@ np.array([[avg_D4x_2, avg_d4x_2, 1]]).T
    -2584							) / a**2
    -2585				return float(c)
    -2586
    -2587	def sample_D4x_correl(self, sample1, sample2 = None):
    -2588		'''
    -2589		Correlation between Δ4x errors of samples
    -2590
    -2591		Returns the error correlation between the average Δ4x values of two samples.
    -2592		'''
    -2593		if sample2 is None or sample2 == sample1:
    -2594			return 1.
    -2595		return (
    -2596			self.sample_D4x_covar(sample1, sample2)
    -2597			/ self.unknowns[sample1][f'SE_D{self._4x}']
    -2598			/ self.unknowns[sample2][f'SE_D{self._4x}']
    -2599			)
    -2600
    -2601	def plot_single_session(self,
    -2602		session,
    -2603		kw_plot_anchors = dict(ls='None', marker='x', mec=(.75, 0, 0), mew = .75, ms = 4),
    -2604		kw_plot_unknowns = dict(ls='None', marker='x', mec=(0, 0, .75), mew = .75, ms = 4),
    -2605		kw_plot_anchor_avg = dict(ls='-', marker='None', color=(.75, 0, 0), lw = .75),
    -2606		kw_plot_unknown_avg = dict(ls='-', marker='None', color=(0, 0, .75), lw = .75),
    -2607		kw_contour_error = dict(colors = [[0, 0, 0]], alpha = .5, linewidths = 0.75),
    -2608		xylimits = 'free', # | 'constant'
    -2609		x_label = None,
    -2610		y_label = None,
    -2611		error_contour_interval = 'auto',
    -2612		fig = 'new',
    -2613		):
    -2614		'''
    -2615		Generate plot for a single session
    -2616		'''
    -2617		if x_label is None:
    -2618			x_label = f'δ$_{{{self._4x}}}$ (‰)'
    -2619		if y_label is None:
    -2620			y_label = f'Δ$_{{{self._4x}}}$ (‰)'
    -2621
    -2622		out = _SessionPlot()
    -2623		anchors = [a for a in self.anchors if [r for r in self.sessions[session]['data'] if r['Sample'] == a]]
    -2624		unknowns = [u for u in self.unknowns if [r for r in self.sessions[session]['data'] if r['Sample'] == u]]
    -2625		
    -2626		if fig == 'new':
    -2627			out.fig = ppl.figure(figsize = (6,6))
    -2628			ppl.subplots_adjust(.1,.1,.9,.9)
    -2629
    -2630		out.anchor_analyses, = ppl.plot(
    -2631			[r[f'd{self._4x}'] for r in self.sessions[session]['data'] if r['Sample'] in self.anchors],
    -2632			[r[f'D{self._4x}'] for r in self.sessions[session]['data'] if r['Sample'] in self.anchors],
    -2633			**kw_plot_anchors)
    -2634		out.unknown_analyses, = ppl.plot(
    -2635			[r[f'd{self._4x}'] for r in self.sessions[session]['data'] if r['Sample'] in self.unknowns],
    -2636			[r[f'D{self._4x}'] for r in self.sessions[session]['data'] if r['Sample'] in self.unknowns],
    -2637			**kw_plot_unknowns)
    -2638		out.anchor_avg = ppl.plot(
    -2639			np.array([ np.array([
    -2640				np.min([r[f'd{self._4x}'] for r in self.sessions[session]['data'] if r['Sample'] == sample]) - 1,
    -2641				np.max([r[f'd{self._4x}'] for r in self.sessions[session]['data'] if r['Sample'] == sample]) + 1
    -2642				]) for sample in anchors]).T,
    -2643			np.array([ np.array([0, 0]) + self.Nominal_D4x[sample] for sample in anchors]).T,
    -2644			**kw_plot_anchor_avg)
    -2645		out.unknown_avg = ppl.plot(
    -2646			np.array([ np.array([
    -2647				np.min([r[f'd{self._4x}'] for r in self.sessions[session]['data'] if r['Sample'] == sample]) - 1,
    -2648				np.max([r[f'd{self._4x}'] for r in self.sessions[session]['data'] if r['Sample'] == sample]) + 1
    -2649				]) for sample in unknowns]).T,
    -2650			np.array([ np.array([0, 0]) + self.unknowns[sample][f'D{self._4x}'] for sample in unknowns]).T,
    -2651			**kw_plot_unknown_avg)
    -2652		if xylimits == 'constant':
    -2653			x = [r[f'd{self._4x}'] for r in self]
    -2654			y = [r[f'D{self._4x}'] for r in self]
    -2655			x1, x2, y1, y2 = np.min(x), np.max(x), np.min(y), np.max(y)
    -2656			w, h = x2-x1, y2-y1
    -2657			x1 -= w/20
    -2658			x2 += w/20
    -2659			y1 -= h/20
    -2660			y2 += h/20
    -2661			ppl.axis([x1, x2, y1, y2])
    -2662		elif xylimits == 'free':
    -2663			x1, x2, y1, y2 = ppl.axis()
    -2664		else:
    -2665			x1, x2, y1, y2 = ppl.axis(xylimits)
    -2666				
    -2667		if error_contour_interval != 'none':
    -2668			xi, yi = np.linspace(x1, x2), np.linspace(y1, y2)
    -2669			XI,YI = np.meshgrid(xi, yi)
    -2670			SI = np.array([[self.standardization_error(session, x, y) for x in xi] for y in yi])
    -2671			if error_contour_interval == 'auto':
    -2672				rng = np.max(SI) - np.min(SI)
    -2673				if rng <= 0.01:
    -2674					cinterval = 0.001
    -2675				elif rng <= 0.03:
    -2676					cinterval = 0.004
    -2677				elif rng <= 0.1:
    -2678					cinterval = 0.01
    -2679				elif rng <= 0.3:
    -2680					cinterval = 0.03
    -2681				elif rng <= 1.:
    -2682					cinterval = 0.1
    -2683				else:
    -2684					cinterval = 0.5
    -2685			else:
    -2686				cinterval = error_contour_interval
    -2687
    -2688			cval = np.arange(np.ceil(SI.min() / .001) * .001, np.ceil(SI.max() / .001 + 1) * .001, cinterval)
    -2689			out.contour = ppl.contour(XI, YI, SI, cval, **kw_contour_error)
    -2690			out.clabel = ppl.clabel(out.contour)
    -2691
    -2692		ppl.xlabel(x_label)
    -2693		ppl.ylabel(y_label)
    -2694		ppl.title(session, weight = 'bold')
    -2695		ppl.grid(alpha = .2)
    -2696		out.ax = ppl.gca()		
    -2697
    -2698		return out
    -2699
    -2700	def plot_residuals(
    -2701		self,
    -2702		hist = False,
    -2703		binwidth = 2/3,
    -2704		dir = 'output',
    -2705		filename = None,
    -2706		highlight = [],
    -2707		colors = None,
    -2708		figsize = None,
    -2709		):
    -2710		'''
    -2711		Plot residuals of each analysis as a function of time (actually, as a function of
    -2712		the order of analyses in the `D4xdata` object)
    -2713
    -2714		+ `hist`: whether to add a histogram of residuals
    -2715		+ `histbins`: specify bin edges for the histogram
    -2716		+ `dir`: the directory in which to save the plot
    -2717		+ `highlight`: a list of samples to highlight
    -2718		+ `colors`: a dict of `{<sample>: <color>}` for all samples
    -2719		+ `figsize`: (width, height) of figure
    -2720		'''
    -2721		# Layout
    -2722		fig = ppl.figure(figsize = (8,4) if figsize is None else figsize)
    -2723		if hist:
    -2724			ppl.subplots_adjust(left = .08, bottom = .05, right = .98, top = .8, wspace = -0.72)
    -2725			ax1, ax2 = ppl.subplot(121), ppl.subplot(1,15,15)
    -2726		else:
    -2727			ppl.subplots_adjust(.08,.05,.78,.8)
    -2728			ax1 = ppl.subplot(111)
    -2729		
    -2730		# Colors
    -2731		N = len(self.anchors)
    -2732		if colors is None:
    -2733			if len(highlight) > 0:
    -2734				Nh = len(highlight)
    -2735				if Nh == 1:
    -2736					colors = {highlight[0]: (0,0,0)}
    -2737				elif Nh == 3:
    -2738					colors = {a: c for a,c in zip(highlight, [(0,0,1), (1,0,0), (0,2/3,0)])}
    -2739				elif Nh == 4:
    -2740					colors = {a: c for a,c in zip(highlight, [(0,0,1), (1,0,0), (0,2/3,0), (.75,0,.75)])}
    -2741				else:
    -2742					colors = {a: hls_to_rgb(k/Nh, .4, 1) for k,a in enumerate(highlight)}
    -2743			else:
    -2744				if N == 3:
    -2745					colors = {a: c for a,c in zip(self.anchors, [(0,0,1), (1,0,0), (0,2/3,0)])}
    -2746				elif N == 4:
    -2747					colors = {a: c for a,c in zip(self.anchors, [(0,0,1), (1,0,0), (0,2/3,0), (.75,0,.75)])}
    -2748				else:
    -2749					colors = {a: hls_to_rgb(k/N, .4, 1) for k,a in enumerate(self.anchors)}
    -2750
    -2751		ppl.sca(ax1)
    -2752		
    -2753		ppl.axhline(0, color = 'k', alpha = .25, lw = 0.75)
    -2754
    -2755		session = self[0]['Session']
    -2756		x1 = 0
    -2757# 		ymax = np.max([1e3 * (r['D47'] - self.samples[r['Sample']]['D47']) for r in self])
    -2758		x_sessions = {}
    -2759		one_or_more_singlets = False
    -2760		one_or_more_multiplets = False
    -2761		multiplets = set()
    -2762		for k,r in enumerate(self):
    -2763			if r['Session'] != session:
    -2764				x2 = k-1
    -2765				x_sessions[session] = (x1+x2)/2
    -2766				ppl.axvline(k - 0.5, color = 'k', lw = .5)
    -2767				session = r['Session']
    -2768				x1 = k
    -2769			singlet = len(self.samples[r['Sample']]['data']) == 1
    -2770			if not singlet:
    -2771				multiplets.add(r['Sample'])
    -2772			if r['Sample'] in self.unknowns:
    -2773				if singlet:
    -2774					one_or_more_singlets = True
    -2775				else:
    -2776					one_or_more_multiplets = True
    -2777			kw = dict(
    -2778				marker = 'x' if singlet else '+',
    -2779				ms = 4 if singlet else 5,
    -2780				ls = 'None',
    -2781				mec = colors[r['Sample']] if r['Sample'] in colors else (0,0,0),
    -2782				mew = 1,
    -2783				alpha = 0.2 if singlet else 1,
    -2784				)
    -2785			if highlight and r['Sample'] not in highlight:
    -2786				kw['alpha'] = 0.2
    -2787			ppl.plot(k, 1e3 * (r['D47'] - self.samples[r['Sample']]['D47']), **kw)
    -2788		x2 = k
    -2789		x_sessions[session] = (x1+x2)/2
    -2790
    -2791		ppl.axhspan(-self.repeatability['r_D47']*1000, self.repeatability['r_D47']*1000, color = 'k', alpha = .05, lw = 1)
    -2792		ppl.axhspan(-self.repeatability['r_D47']*1000*self.t95, self.repeatability['r_D47']*1000*self.t95, color = 'k', alpha = .05, lw = 1)
    -2793		if not hist:
    -2794			ppl.text(len(self), self.repeatability['r_D47']*1000, f"   SD = {self.repeatability['r_D47']*1000:.1f} ppm", size = 9, alpha = 1, va = 'center')
    -2795			ppl.text(len(self), self.repeatability['r_D47']*1000*self.t95, f"   95% CL = ± {self.repeatability['r_D47']*1000*self.t95:.1f} ppm", size = 9, alpha = 1, va = 'center')
    -2796
    -2797		xmin, xmax, ymin, ymax = ppl.axis()
    -2798		for s in x_sessions:
    -2799			ppl.text(
    -2800				x_sessions[s],
    -2801				ymax +1,
    -2802				s,
    -2803				va = 'bottom',
    -2804				**(
    -2805					dict(ha = 'center')
    -2806					if len(self.sessions[s]['data']) > (0.15 * len(self))
    -2807					else dict(ha = 'left', rotation = 45)
    -2808					)
    -2809				)
    -2810
    -2811		if hist:
    -2812			ppl.sca(ax2)
    -2813
    -2814		for s in colors:
    -2815			kw['marker'] = '+'
    -2816			kw['ms'] = 5
    -2817			kw['mec'] = colors[s]
    -2818			kw['label'] = s
    -2819			kw['alpha'] = 1
    -2820			ppl.plot([], [], **kw)
    -2821
    -2822		kw['mec'] = (0,0,0)
    -2823
    -2824		if one_or_more_singlets:
    -2825			kw['marker'] = 'x'
    -2826			kw['ms'] = 4
    -2827			kw['alpha'] = .2
    -2828			kw['label'] = 'other (N$\\,$=$\\,$1)' if one_or_more_multiplets else 'other'
    +2547		try:
    +2548# 			indices = [self.standardization.var_names.index(f'D47_{pf(sample)}') for sample in samples]
    +2549# 			C = self.standardization.covar[indices,:][:,indices]
    +2550			C = np.array([[self.sample_D4x_covar(x, y) for x in samples] for y in samples])
    +2551			X = [self.samples[sample][f'D{self._4x}'] for sample in samples]
    +2552			return correlated_sum(X, C, weights)
    +2553		except ValueError:
    +2554			return (0., 0.)
    +2555
    +2556
    +2557	def sample_D4x_covar(self, sample1, sample2 = None):
    +2558		'''
    +2559		Covariance between Δ4x values of samples
    +2560
    +2561		Returns the error covariance between the average Δ4x values of two
    +2562		samples. If if only `sample_1` is specified, or if `sample_1 == sample_2`),
    +2563		returns the Δ4x variance for that sample.
    +2564		'''
    +2565		if sample2 is None:
    +2566			sample2 = sample1
    +2567		if self.standardization_method == 'pooled':
    +2568			i = self.standardization.var_names.index(f'D{self._4x}_{pf(sample1)}')
    +2569			j = self.standardization.var_names.index(f'D{self._4x}_{pf(sample2)}')
    +2570			return self.standardization.covar[i, j]
    +2571		elif self.standardization_method == 'indep_sessions':
    +2572			if sample1 == sample2:
    +2573				return self.samples[sample1][f'SE_D{self._4x}']**2
    +2574			else:
    +2575				c = 0
    +2576				for session in self.sessions:
    +2577					sdata1 = [r for r in self.sessions[session]['data'] if r['Sample'] == sample1]
    +2578					sdata2 = [r for r in self.sessions[session]['data'] if r['Sample'] == sample2]
    +2579					if sdata1 and sdata2:
    +2580						a = self.sessions[session]['a']
    +2581						# !! TODO: CM below does not account for temporal changes in standardization parameters
    +2582						CM = self.sessions[session]['CM'][:3,:3]
    +2583						avg_D4x_1 = np.mean([r[f'D{self._4x}'] for r in sdata1])
    +2584						avg_d4x_1 = np.mean([r[f'd{self._4x}'] for r in sdata1])
    +2585						avg_D4x_2 = np.mean([r[f'D{self._4x}'] for r in sdata2])
    +2586						avg_d4x_2 = np.mean([r[f'd{self._4x}'] for r in sdata2])
    +2587						c += (
    +2588							self.unknowns[sample1][f'session_D{self._4x}'][session][2]
    +2589							* self.unknowns[sample2][f'session_D{self._4x}'][session][2]
    +2590							* np.array([[avg_D4x_1, avg_d4x_1, 1]])
    +2591							@ CM
    +2592							@ np.array([[avg_D4x_2, avg_d4x_2, 1]]).T
    +2593							) / a**2
    +2594				return float(c)
    +2595
    +2596	def sample_D4x_correl(self, sample1, sample2 = None):
    +2597		'''
    +2598		Correlation between Δ4x errors of samples
    +2599
    +2600		Returns the error correlation between the average Δ4x values of two samples.
    +2601		'''
    +2602		if sample2 is None or sample2 == sample1:
    +2603			return 1.
    +2604		return (
    +2605			self.sample_D4x_covar(sample1, sample2)
    +2606			/ self.unknowns[sample1][f'SE_D{self._4x}']
    +2607			/ self.unknowns[sample2][f'SE_D{self._4x}']
    +2608			)
    +2609
    +2610	def plot_single_session(self,
    +2611		session,
    +2612		kw_plot_anchors = dict(ls='None', marker='x', mec=(.75, 0, 0), mew = .75, ms = 4),
    +2613		kw_plot_unknowns = dict(ls='None', marker='x', mec=(0, 0, .75), mew = .75, ms = 4),
    +2614		kw_plot_anchor_avg = dict(ls='-', marker='None', color=(.75, 0, 0), lw = .75),
    +2615		kw_plot_unknown_avg = dict(ls='-', marker='None', color=(0, 0, .75), lw = .75),
    +2616		kw_contour_error = dict(colors = [[0, 0, 0]], alpha = .5, linewidths = 0.75),
    +2617		xylimits = 'free', # | 'constant'
    +2618		x_label = None,
    +2619		y_label = None,
    +2620		error_contour_interval = 'auto',
    +2621		fig = 'new',
    +2622		):
    +2623		'''
    +2624		Generate plot for a single session
    +2625		'''
    +2626		if x_label is None:
    +2627			x_label = f'δ$_{{{self._4x}}}$ (‰)'
    +2628		if y_label is None:
    +2629			y_label = f'Δ$_{{{self._4x}}}$ (‰)'
    +2630
    +2631		out = _SessionPlot()
    +2632		anchors = [a for a in self.anchors if [r for r in self.sessions[session]['data'] if r['Sample'] == a]]
    +2633		unknowns = [u for u in self.unknowns if [r for r in self.sessions[session]['data'] if r['Sample'] == u]]
    +2634		
    +2635		if fig == 'new':
    +2636			out.fig = ppl.figure(figsize = (6,6))
    +2637			ppl.subplots_adjust(.1,.1,.9,.9)
    +2638
    +2639		out.anchor_analyses, = ppl.plot(
    +2640			[r[f'd{self._4x}'] for r in self.sessions[session]['data'] if r['Sample'] in self.anchors],
    +2641			[r[f'D{self._4x}'] for r in self.sessions[session]['data'] if r['Sample'] in self.anchors],
    +2642			**kw_plot_anchors)
    +2643		out.unknown_analyses, = ppl.plot(
    +2644			[r[f'd{self._4x}'] for r in self.sessions[session]['data'] if r['Sample'] in self.unknowns],
    +2645			[r[f'D{self._4x}'] for r in self.sessions[session]['data'] if r['Sample'] in self.unknowns],
    +2646			**kw_plot_unknowns)
    +2647		out.anchor_avg = ppl.plot(
    +2648			np.array([ np.array([
    +2649				np.min([r[f'd{self._4x}'] for r in self.sessions[session]['data'] if r['Sample'] == sample]) - 1,
    +2650				np.max([r[f'd{self._4x}'] for r in self.sessions[session]['data'] if r['Sample'] == sample]) + 1
    +2651				]) for sample in anchors]).T,
    +2652			np.array([ np.array([0, 0]) + self.Nominal_D4x[sample] for sample in anchors]).T,
    +2653			**kw_plot_anchor_avg)
    +2654		out.unknown_avg = ppl.plot(
    +2655			np.array([ np.array([
    +2656				np.min([r[f'd{self._4x}'] for r in self.sessions[session]['data'] if r['Sample'] == sample]) - 1,
    +2657				np.max([r[f'd{self._4x}'] for r in self.sessions[session]['data'] if r['Sample'] == sample]) + 1
    +2658				]) for sample in unknowns]).T,
    +2659			np.array([ np.array([0, 0]) + self.unknowns[sample][f'D{self._4x}'] for sample in unknowns]).T,
    +2660			**kw_plot_unknown_avg)
    +2661		if xylimits == 'constant':
    +2662			x = [r[f'd{self._4x}'] for r in self]
    +2663			y = [r[f'D{self._4x}'] for r in self]
    +2664			x1, x2, y1, y2 = np.min(x), np.max(x), np.min(y), np.max(y)
    +2665			w, h = x2-x1, y2-y1
    +2666			x1 -= w/20
    +2667			x2 += w/20
    +2668			y1 -= h/20
    +2669			y2 += h/20
    +2670			ppl.axis([x1, x2, y1, y2])
    +2671		elif xylimits == 'free':
    +2672			x1, x2, y1, y2 = ppl.axis()
    +2673		else:
    +2674			x1, x2, y1, y2 = ppl.axis(xylimits)
    +2675				
    +2676		if error_contour_interval != 'none':
    +2677			xi, yi = np.linspace(x1, x2), np.linspace(y1, y2)
    +2678			XI,YI = np.meshgrid(xi, yi)
    +2679			SI = np.array([[self.standardization_error(session, x, y) for x in xi] for y in yi])
    +2680			if error_contour_interval == 'auto':
    +2681				rng = np.max(SI) - np.min(SI)
    +2682				if rng <= 0.01:
    +2683					cinterval = 0.001
    +2684				elif rng <= 0.03:
    +2685					cinterval = 0.004
    +2686				elif rng <= 0.1:
    +2687					cinterval = 0.01
    +2688				elif rng <= 0.3:
    +2689					cinterval = 0.03
    +2690				elif rng <= 1.:
    +2691					cinterval = 0.1
    +2692				else:
    +2693					cinterval = 0.5
    +2694			else:
    +2695				cinterval = error_contour_interval
    +2696
    +2697			cval = np.arange(np.ceil(SI.min() / .001) * .001, np.ceil(SI.max() / .001 + 1) * .001, cinterval)
    +2698			out.contour = ppl.contour(XI, YI, SI, cval, **kw_contour_error)
    +2699			out.clabel = ppl.clabel(out.contour)
    +2700
    +2701		ppl.xlabel(x_label)
    +2702		ppl.ylabel(y_label)
    +2703		ppl.title(session, weight = 'bold')
    +2704		ppl.grid(alpha = .2)
    +2705		out.ax = ppl.gca()		
    +2706
    +2707		return out
    +2708
    +2709	def plot_residuals(
    +2710		self,
    +2711		hist = False,
    +2712		binwidth = 2/3,
    +2713		dir = 'output',
    +2714		filename = None,
    +2715		highlight = [],
    +2716		colors = None,
    +2717		figsize = None,
    +2718		):
    +2719		'''
    +2720		Plot residuals of each analysis as a function of time (actually, as a function of
    +2721		the order of analyses in the `D4xdata` object)
    +2722
    +2723		+ `hist`: whether to add a histogram of residuals
    +2724		+ `histbins`: specify bin edges for the histogram
    +2725		+ `dir`: the directory in which to save the plot
    +2726		+ `highlight`: a list of samples to highlight
    +2727		+ `colors`: a dict of `{<sample>: <color>}` for all samples
    +2728		+ `figsize`: (width, height) of figure
    +2729		'''
    +2730		# Layout
    +2731		fig = ppl.figure(figsize = (8,4) if figsize is None else figsize)
    +2732		if hist:
    +2733			ppl.subplots_adjust(left = .08, bottom = .05, right = .98, top = .8, wspace = -0.72)
    +2734			ax1, ax2 = ppl.subplot(121), ppl.subplot(1,15,15)
    +2735		else:
    +2736			ppl.subplots_adjust(.08,.05,.78,.8)
    +2737			ax1 = ppl.subplot(111)
    +2738		
    +2739		# Colors
    +2740		N = len(self.anchors)
    +2741		if colors is None:
    +2742			if len(highlight) > 0:
    +2743				Nh = len(highlight)
    +2744				if Nh == 1:
    +2745					colors = {highlight[0]: (0,0,0)}
    +2746				elif Nh == 3:
    +2747					colors = {a: c for a,c in zip(highlight, [(0,0,1), (1,0,0), (0,2/3,0)])}
    +2748				elif Nh == 4:
    +2749					colors = {a: c for a,c in zip(highlight, [(0,0,1), (1,0,0), (0,2/3,0), (.75,0,.75)])}
    +2750				else:
    +2751					colors = {a: hls_to_rgb(k/Nh, .4, 1) for k,a in enumerate(highlight)}
    +2752			else:
    +2753				if N == 3:
    +2754					colors = {a: c for a,c in zip(self.anchors, [(0,0,1), (1,0,0), (0,2/3,0)])}
    +2755				elif N == 4:
    +2756					colors = {a: c for a,c in zip(self.anchors, [(0,0,1), (1,0,0), (0,2/3,0), (.75,0,.75)])}
    +2757				else:
    +2758					colors = {a: hls_to_rgb(k/N, .4, 1) for k,a in enumerate(self.anchors)}
    +2759
    +2760		ppl.sca(ax1)
    +2761		
    +2762		ppl.axhline(0, color = 'k', alpha = .25, lw = 0.75)
    +2763
    +2764		session = self[0]['Session']
    +2765		x1 = 0
    +2766# 		ymax = np.max([1e3 * (r['D47'] - self.samples[r['Sample']]['D47']) for r in self])
    +2767		x_sessions = {}
    +2768		one_or_more_singlets = False
    +2769		one_or_more_multiplets = False
    +2770		multiplets = set()
    +2771		for k,r in enumerate(self):
    +2772			if r['Session'] != session:
    +2773				x2 = k-1
    +2774				x_sessions[session] = (x1+x2)/2
    +2775				ppl.axvline(k - 0.5, color = 'k', lw = .5)
    +2776				session = r['Session']
    +2777				x1 = k
    +2778			singlet = len(self.samples[r['Sample']]['data']) == 1
    +2779			if not singlet:
    +2780				multiplets.add(r['Sample'])
    +2781			if r['Sample'] in self.unknowns:
    +2782				if singlet:
    +2783					one_or_more_singlets = True
    +2784				else:
    +2785					one_or_more_multiplets = True
    +2786			kw = dict(
    +2787				marker = 'x' if singlet else '+',
    +2788				ms = 4 if singlet else 5,
    +2789				ls = 'None',
    +2790				mec = colors[r['Sample']] if r['Sample'] in colors else (0,0,0),
    +2791				mew = 1,
    +2792				alpha = 0.2 if singlet else 1,
    +2793				)
    +2794			if highlight and r['Sample'] not in highlight:
    +2795				kw['alpha'] = 0.2
    +2796			ppl.plot(k, 1e3 * (r['D47'] - self.samples[r['Sample']]['D47']), **kw)
    +2797		x2 = k
    +2798		x_sessions[session] = (x1+x2)/2
    +2799
    +2800		ppl.axhspan(-self.repeatability['r_D47']*1000, self.repeatability['r_D47']*1000, color = 'k', alpha = .05, lw = 1)
    +2801		ppl.axhspan(-self.repeatability['r_D47']*1000*self.t95, self.repeatability['r_D47']*1000*self.t95, color = 'k', alpha = .05, lw = 1)
    +2802		if not hist:
    +2803			ppl.text(len(self), self.repeatability['r_D47']*1000, f"   SD = {self.repeatability['r_D47']*1000:.1f} ppm", size = 9, alpha = 1, va = 'center')
    +2804			ppl.text(len(self), self.repeatability['r_D47']*1000*self.t95, f"   95% CL = ± {self.repeatability['r_D47']*1000*self.t95:.1f} ppm", size = 9, alpha = 1, va = 'center')
    +2805
    +2806		xmin, xmax, ymin, ymax = ppl.axis()
    +2807		for s in x_sessions:
    +2808			ppl.text(
    +2809				x_sessions[s],
    +2810				ymax +1,
    +2811				s,
    +2812				va = 'bottom',
    +2813				**(
    +2814					dict(ha = 'center')
    +2815					if len(self.sessions[s]['data']) > (0.15 * len(self))
    +2816					else dict(ha = 'left', rotation = 45)
    +2817					)
    +2818				)
    +2819
    +2820		if hist:
    +2821			ppl.sca(ax2)
    +2822
    +2823		for s in colors:
    +2824			kw['marker'] = '+'
    +2825			kw['ms'] = 5
    +2826			kw['mec'] = colors[s]
    +2827			kw['label'] = s
    +2828			kw['alpha'] = 1
     2829			ppl.plot([], [], **kw)
     2830
    -2831		if one_or_more_multiplets:
    -2832			kw['marker'] = '+'
    -2833			kw['ms'] = 4
    -2834			kw['alpha'] = 1
    -2835			kw['label'] = 'other (N$\\,$>$\\,$1)' if one_or_more_singlets else 'other'
    -2836			ppl.plot([], [], **kw)
    -2837
    -2838		if hist:
    -2839			leg = ppl.legend(loc = 'upper right', bbox_to_anchor = (1, 1), bbox_transform=fig.transFigure, borderaxespad = 1.5, fontsize = 9)
    -2840		else:
    -2841			leg = ppl.legend(loc = 'lower right', bbox_to_anchor = (1, 0), bbox_transform=fig.transFigure, borderaxespad = 1.5)
    -2842		leg.set_zorder(-1000)
    -2843
    -2844		ppl.sca(ax1)
    -2845
    -2846		ppl.ylabel('Δ$_{47}$ residuals (ppm)')
    -2847		ppl.xticks([])
    -2848		ppl.axis([-1, len(self), None, None])
    -2849
    -2850		if hist:
    -2851			ppl.sca(ax2)
    -2852			X = [1e3 * (r['D47'] - self.samples[r['Sample']]['D47']) for r in self if r['Sample'] in multiplets]
    -2853			ppl.hist(
    -2854				X,
    -2855				orientation = 'horizontal',
    -2856				histtype = 'stepfilled',
    -2857				ec = [.4]*3,
    -2858				fc = [.25]*3,
    -2859				alpha = .25,
    -2860				bins = np.linspace(-9e3*self.repeatability['r_D47'], 9e3*self.repeatability['r_D47'], int(18/binwidth+1)),
    -2861				)
    -2862			ppl.axis([None, None, ymin, ymax])
    -2863			ppl.text(0, 0,
    -2864				f"   SD = {self.repeatability['r_D47']*1000:.1f} ppm\n   95% CL = ± {self.repeatability['r_D47']*1000*self.t95:.1f} ppm",
    -2865				size = 8,
    -2866				alpha = 1,
    -2867				va = 'center',
    -2868				ha = 'left',
    -2869				)
    -2870
    -2871			ppl.xticks([])
    -2872			ppl.yticks([])
    -2873# 			ax2.spines['left'].set_visible(False)
    -2874			ax2.spines['right'].set_visible(False)
    -2875			ax2.spines['top'].set_visible(False)
    -2876			ax2.spines['bottom'].set_visible(False)
    -2877
    -2878
    -2879		if not os.path.exists(dir):
    -2880			os.makedirs(dir)
    -2881		if filename is None:
    -2882			return fig
    -2883		elif filename == '':
    -2884			filename = f'D{self._4x}_residuals.pdf'
    -2885		ppl.savefig(f'{dir}/{filename}')
    -2886		ppl.close(fig)
    -2887				
    -2888
    -2889	def simulate(self, *args, **kwargs):
    -2890		'''
    -2891		Legacy function with warning message pointing to `virtual_data()`
    -2892		'''
    -2893		raise DeprecationWarning('D4xdata.simulate is deprecated and has been replaced by virtual_data()')
    -2894
    -2895	def plot_distribution_of_analyses(
    -2896		self,
    -2897		dir = 'output',
    -2898		filename = None,
    -2899		vs_time = False,
    -2900		figsize = (6,4),
    -2901		subplots_adjust = (0.02, 0.13, 0.85, 0.8),
    -2902		output = None,
    -2903		):
    -2904		'''
    -2905		Plot temporal distribution of all analyses in the data set.
    -2906		
    -2907		**Parameters**
    -2908
    -2909		+ `vs_time`: if `True`, plot as a function of `TimeTag` rather than sequentially.
    -2910		'''
    -2911
    -2912		asamples = [s for s in self.anchors]
    -2913		usamples = [s for s in self.unknowns]
    -2914		if output is None or output == 'fig':
    -2915			fig = ppl.figure(figsize = figsize)
    -2916			ppl.subplots_adjust(*subplots_adjust)
    -2917		Xmin = min([r['TimeTag'] if vs_time else j for j,r in enumerate(self)])
    -2918		Xmax = max([r['TimeTag'] if vs_time else j for j,r in enumerate(self)])
    -2919		Xmax += (Xmax-Xmin)/40
    -2920		Xmin -= (Xmax-Xmin)/41
    -2921		for k, s in enumerate(asamples + usamples):
    -2922			if vs_time:
    -2923				X = [r['TimeTag'] for r in self if r['Sample'] == s]
    -2924			else:
    -2925				X = [x for x,r in enumerate(self) if r['Sample'] == s]
    -2926			Y = [-k for x in X]
    -2927			ppl.plot(X, Y, 'o', mec = None, mew = 0, mfc = 'b' if s in usamples else 'r', ms = 3, alpha = .75)
    -2928			ppl.axhline(-k, color = 'b' if s in usamples else 'r', lw = .5, alpha = .25)
    -2929			ppl.text(Xmax, -k, f'   {s}', va = 'center', ha = 'left', size = 7, color = 'b' if s in usamples else 'r')
    -2930		ppl.axis([Xmin, Xmax, -k-1, 1])
    -2931		ppl.xlabel('\ntime')
    -2932		ppl.gca().annotate('',
    -2933			xy = (0.6, -0.02),
    -2934			xycoords = 'axes fraction',
    -2935			xytext = (.4, -0.02), 
    -2936            arrowprops = dict(arrowstyle = "->", color = 'k'),
    -2937            )
    -2938			
    -2939
    -2940		x2 = -1
    -2941		for session in self.sessions:
    -2942			x1 = min([r['TimeTag'] if vs_time else j for j,r in enumerate(self) if r['Session'] == session])
    -2943			if vs_time:
    -2944				ppl.axvline(x1, color = 'k', lw = .75)
    -2945			if x2 > -1:
    -2946				if not vs_time:
    -2947					ppl.axvline((x1+x2)/2, color = 'k', lw = .75, alpha = .5)
    -2948			x2 = max([r['TimeTag'] if vs_time else j for j,r in enumerate(self) if r['Session'] == session])
    -2949# 			from xlrd import xldate_as_datetime
    -2950# 			print(session, xldate_as_datetime(x1, 0), xldate_as_datetime(x2, 0))
    -2951			if vs_time:
    -2952				ppl.axvline(x2, color = 'k', lw = .75)
    -2953				ppl.axvspan(x1,x2,color = 'k', zorder = -100, alpha = .15)
    -2954			ppl.text((x1+x2)/2, 1, f' {session}', ha = 'left', va = 'bottom', rotation = 45, size = 8)
    -2955
    -2956		ppl.xticks([])
    -2957		ppl.yticks([])
    -2958
    -2959		if output is None:
    -2960			if not os.path.exists(dir):
    -2961				os.makedirs(dir)
    -2962			if filename == None:
    -2963				filename = f'D{self._4x}_distribution_of_analyses.pdf'
    -2964			ppl.savefig(f'{dir}/{filename}')
    -2965			ppl.close(fig)
    -2966		elif output == 'ax':
    -2967			return ppl.gca()
    -2968		elif output == 'fig':
    -2969			return fig
    +2831		kw['mec'] = (0,0,0)
    +2832
    +2833		if one_or_more_singlets:
    +2834			kw['marker'] = 'x'
    +2835			kw['ms'] = 4
    +2836			kw['alpha'] = .2
    +2837			kw['label'] = 'other (N$\\,$=$\\,$1)' if one_or_more_multiplets else 'other'
    +2838			ppl.plot([], [], **kw)
    +2839
    +2840		if one_or_more_multiplets:
    +2841			kw['marker'] = '+'
    +2842			kw['ms'] = 4
    +2843			kw['alpha'] = 1
    +2844			kw['label'] = 'other (N$\\,$>$\\,$1)' if one_or_more_singlets else 'other'
    +2845			ppl.plot([], [], **kw)
    +2846
    +2847		if hist:
    +2848			leg = ppl.legend(loc = 'upper right', bbox_to_anchor = (1, 1), bbox_transform=fig.transFigure, borderaxespad = 1.5, fontsize = 9)
    +2849		else:
    +2850			leg = ppl.legend(loc = 'lower right', bbox_to_anchor = (1, 0), bbox_transform=fig.transFigure, borderaxespad = 1.5)
    +2851		leg.set_zorder(-1000)
    +2852
    +2853		ppl.sca(ax1)
    +2854
    +2855		ppl.ylabel('Δ$_{47}$ residuals (ppm)')
    +2856		ppl.xticks([])
    +2857		ppl.axis([-1, len(self), None, None])
    +2858
    +2859		if hist:
    +2860			ppl.sca(ax2)
    +2861			X = [1e3 * (r['D47'] - self.samples[r['Sample']]['D47']) for r in self if r['Sample'] in multiplets]
    +2862			ppl.hist(
    +2863				X,
    +2864				orientation = 'horizontal',
    +2865				histtype = 'stepfilled',
    +2866				ec = [.4]*3,
    +2867				fc = [.25]*3,
    +2868				alpha = .25,
    +2869				bins = np.linspace(-9e3*self.repeatability['r_D47'], 9e3*self.repeatability['r_D47'], int(18/binwidth+1)),
    +2870				)
    +2871			ppl.axis([None, None, ymin, ymax])
    +2872			ppl.text(0, 0,
    +2873				f"   SD = {self.repeatability['r_D47']*1000:.1f} ppm\n   95% CL = ± {self.repeatability['r_D47']*1000*self.t95:.1f} ppm",
    +2874				size = 8,
    +2875				alpha = 1,
    +2876				va = 'center',
    +2877				ha = 'left',
    +2878				)
    +2879
    +2880			ppl.xticks([])
    +2881			ppl.yticks([])
    +2882# 			ax2.spines['left'].set_visible(False)
    +2883			ax2.spines['right'].set_visible(False)
    +2884			ax2.spines['top'].set_visible(False)
    +2885			ax2.spines['bottom'].set_visible(False)
    +2886
    +2887
    +2888		if not os.path.exists(dir):
    +2889			os.makedirs(dir)
    +2890		if filename is None:
    +2891			return fig
    +2892		elif filename == '':
    +2893			filename = f'D{self._4x}_residuals.pdf'
    +2894		ppl.savefig(f'{dir}/{filename}')
    +2895		ppl.close(fig)
    +2896				
    +2897
    +2898	def simulate(self, *args, **kwargs):
    +2899		'''
    +2900		Legacy function with warning message pointing to `virtual_data()`
    +2901		'''
    +2902		raise DeprecationWarning('D4xdata.simulate is deprecated and has been replaced by virtual_data()')
    +2903
    +2904	def plot_distribution_of_analyses(
    +2905		self,
    +2906		dir = 'output',
    +2907		filename = None,
    +2908		vs_time = False,
    +2909		figsize = (6,4),
    +2910		subplots_adjust = (0.02, 0.13, 0.85, 0.8),
    +2911		output = None,
    +2912		):
    +2913		'''
    +2914		Plot temporal distribution of all analyses in the data set.
    +2915		
    +2916		**Parameters**
    +2917
    +2918		+ `vs_time`: if `True`, plot as a function of `TimeTag` rather than sequentially.
    +2919		'''
    +2920
    +2921		asamples = [s for s in self.anchors]
    +2922		usamples = [s for s in self.unknowns]
    +2923		if output is None or output == 'fig':
    +2924			fig = ppl.figure(figsize = figsize)
    +2925			ppl.subplots_adjust(*subplots_adjust)
    +2926		Xmin = min([r['TimeTag'] if vs_time else j for j,r in enumerate(self)])
    +2927		Xmax = max([r['TimeTag'] if vs_time else j for j,r in enumerate(self)])
    +2928		Xmax += (Xmax-Xmin)/40
    +2929		Xmin -= (Xmax-Xmin)/41
    +2930		for k, s in enumerate(asamples + usamples):
    +2931			if vs_time:
    +2932				X = [r['TimeTag'] for r in self if r['Sample'] == s]
    +2933			else:
    +2934				X = [x for x,r in enumerate(self) if r['Sample'] == s]
    +2935			Y = [-k for x in X]
    +2936			ppl.plot(X, Y, 'o', mec = None, mew = 0, mfc = 'b' if s in usamples else 'r', ms = 3, alpha = .75)
    +2937			ppl.axhline(-k, color = 'b' if s in usamples else 'r', lw = .5, alpha = .25)
    +2938			ppl.text(Xmax, -k, f'   {s}', va = 'center', ha = 'left', size = 7, color = 'b' if s in usamples else 'r')
    +2939		ppl.axis([Xmin, Xmax, -k-1, 1])
    +2940		ppl.xlabel('\ntime')
    +2941		ppl.gca().annotate('',
    +2942			xy = (0.6, -0.02),
    +2943			xycoords = 'axes fraction',
    +2944			xytext = (.4, -0.02), 
    +2945            arrowprops = dict(arrowstyle = "->", color = 'k'),
    +2946            )
    +2947			
    +2948
    +2949		x2 = -1
    +2950		for session in self.sessions:
    +2951			x1 = min([r['TimeTag'] if vs_time else j for j,r in enumerate(self) if r['Session'] == session])
    +2952			if vs_time:
    +2953				ppl.axvline(x1, color = 'k', lw = .75)
    +2954			if x2 > -1:
    +2955				if not vs_time:
    +2956					ppl.axvline((x1+x2)/2, color = 'k', lw = .75, alpha = .5)
    +2957			x2 = max([r['TimeTag'] if vs_time else j for j,r in enumerate(self) if r['Session'] == session])
    +2958# 			from xlrd import xldate_as_datetime
    +2959# 			print(session, xldate_as_datetime(x1, 0), xldate_as_datetime(x2, 0))
    +2960			if vs_time:
    +2961				ppl.axvline(x2, color = 'k', lw = .75)
    +2962				ppl.axvspan(x1,x2,color = 'k', zorder = -100, alpha = .15)
    +2963			ppl.text((x1+x2)/2, 1, f' {session}', ha = 'left', va = 'bottom', rotation = 45, size = 8)
    +2964
    +2965		ppl.xticks([])
    +2966		ppl.yticks([])
    +2967
    +2968		if output is None:
    +2969			if not os.path.exists(dir):
    +2970				os.makedirs(dir)
    +2971			if filename == None:
    +2972				filename = f'D{self._4x}_distribution_of_analyses.pdf'
    +2973			ppl.savefig(f'{dir}/{filename}')
    +2974			ppl.close(fig)
    +2975		elif output == 'ax':
    +2976			return ppl.gca()
    +2977		elif output == 'fig':
    +2978			return fig
    +2979
    +2980
    +2981	def plot_bulk_compositions(
    +2982		self,
    +2983		samples = None,
    +2984		dir = 'output/bulk_compositions',
    +2985		figsize = (6,6),
    +2986		subplots_adjust = (0.15, 0.12, 0.95, 0.92),
    +2987		show = False,
    +2988		sample_color = (0,.5,1),
    +2989		analysis_color = (.7,.7,.7),
    +2990		labeldist = 0.3,
    +2991		radius = 0.05,
    +2992		):
    +2993		'''
    +2994		Plot δ13C_VBDP vs δ18O_VSMOW (of CO2) for all analyses.
    +2995		
    +2996		By default, creates a directory `./output/bulk_compositions` where plots for
    +2997		each sample are saved. Another plot named `__all__.pdf` shows all analyses together.
    +2998		
    +2999		
    +3000		**Parameters**
    +3001
    +3002		+ `samples`: Only these samples are processed (by default: all samples).
    +3003		+ `dir`: where to save the plots
    +3004		+ `figsize`: (width, height) of figure
    +3005		+ `subplots_adjust`: passed to `subplots_adjust()`
    +3006		+ `show`: whether to call `matplotlib.pyplot.show()` on the plot with all samples,
    +3007		allowing for interactive visualization/exploration in (δ13C, δ18O) space.
    +3008		+ `sample_color`: color used for replicate markers/labels
    +3009		+ `analysis_color`: color used for sample markers/labels
    +3010		+ `labeldist`: distance (in inches) from replicate markers to replicate labels
    +3011		+ `radius`: radius of the dashed circle providing scale. No circle if `radius = 0`.
    +3012		'''
    +3013
    +3014		from matplotlib.patches import Ellipse
    +3015
    +3016		if samples is None:
    +3017			samples = [_ for _ in self.samples]
    +3018
    +3019		saved = {}
    +3020
    +3021		for s in samples:
    +3022
    +3023			fig = ppl.figure(figsize = figsize)
    +3024			fig.subplots_adjust(*subplots_adjust)
    +3025			ax = ppl.subplot(111)
    +3026			ppl.xlabel('$δ^{18}O_{VSMOW}$ of $CO_2$ (‰)')
    +3027			ppl.ylabel('$δ^{13}C_{VPDB}$ (‰)')
    +3028			ppl.title(s)
    +3029
    +3030
    +3031			XY = np.array([[_['d18O_VSMOW'], _['d13C_VPDB']] for _ in self.samples[s]['data']])
    +3032			UID = [_['UID'] for _ in self.samples[s]['data']]
    +3033			XY0 = XY.mean(0)
    +3034
    +3035			for xy in XY:
    +3036				ppl.plot([xy[0], XY0[0]], [xy[1], XY0[1]], '-', lw = 1, color = analysis_color)
    +3037				
    +3038			ppl.plot(*XY.T, 'wo', mew = 1, mec = analysis_color)
    +3039			ppl.plot(*XY0, 'wo', mew = 2, mec = sample_color)
    +3040			ppl.text(*XY0, f'  {s}', va = 'center', ha = 'left', color = sample_color, weight = 'bold')
    +3041			saved[s] = [XY, XY0]
    +3042			
    +3043			x1, x2, y1, y2 = ppl.axis()
    +3044			x0, dx = (x1+x2)/2, (x2-x1)/2
    +3045			y0, dy = (y1+y2)/2, (y2-y1)/2
    +3046			dx, dy = [max(max(dx, dy), radius)]*2
    +3047
    +3048			ppl.axis([
    +3049				x0 - 1.2*dx,
    +3050				x0 + 1.2*dx,
    +3051				y0 - 1.2*dy,
    +3052				y0 + 1.2*dy,
    +3053				])			
    +3054
    +3055			XY0_in_display_space = fig.dpi_scale_trans.inverted().transform(ax.transData.transform(XY0))
    +3056
    +3057			for xy, uid in zip(XY, UID):
    +3058
    +3059				xy_in_display_space = fig.dpi_scale_trans.inverted().transform(ax.transData.transform(xy))
    +3060				vector_in_display_space = xy_in_display_space - XY0_in_display_space
    +3061
    +3062				if (vector_in_display_space**2).sum() > 0:
    +3063
    +3064					unit_vector_in_display_space = vector_in_display_space / ((vector_in_display_space**2).sum())**0.5
    +3065					label_vector_in_display_space = vector_in_display_space + unit_vector_in_display_space * labeldist
    +3066					label_xy_in_display_space = XY0_in_display_space + label_vector_in_display_space
    +3067					label_xy_in_data_space = ax.transData.inverted().transform(fig.dpi_scale_trans.transform(label_xy_in_display_space))
    +3068
    +3069					ppl.text(*label_xy_in_data_space, uid, va = 'center', ha = 'center', color = analysis_color)
    +3070
    +3071				else:
    +3072
    +3073					ppl.text(*xy, f'{uid}  ', va = 'center', ha = 'right', color = analysis_color)
    +3074
    +3075			if radius:
    +3076				ax.add_artist(Ellipse(
    +3077					xy = XY0,
    +3078					width = radius*2,
    +3079					height = radius*2,
    +3080					ls = (0, (2,2)),
    +3081					lw = .7,
    +3082					ec = analysis_color,
    +3083					fc = 'None',
    +3084					))
    +3085				ppl.text(
    +3086					XY0[0],
    +3087					XY0[1]-radius,
    +3088					f'\n± {radius*1e3:.0f} ppm',
    +3089					color = analysis_color,
    +3090					va = 'top',
    +3091					ha = 'center',
    +3092					linespacing = 0.4,
    +3093					size = 8,
    +3094					)
    +3095
    +3096			if not os.path.exists(dir):
    +3097				os.makedirs(dir)
    +3098			fig.savefig(f'{dir}/{s}.pdf')
    +3099			ppl.close(fig)
    +3100
    +3101		fig = ppl.figure(figsize = figsize)
    +3102		fig.subplots_adjust(*subplots_adjust)
    +3103		ppl.xlabel('$δ^{18}O_{VSMOW}$ of $CO_2$ (‰)')
    +3104		ppl.ylabel('$δ^{13}C_{VPDB}$ (‰)')
    +3105
    +3106		for s in saved:
    +3107			for xy in saved[s][0]:
    +3108				ppl.plot([xy[0], saved[s][1][0]], [xy[1], saved[s][1][1]], '-', lw = 1, color = analysis_color)
    +3109			ppl.plot(*saved[s][0].T, 'wo', mew = 1, mec = analysis_color)
    +3110			ppl.plot(*saved[s][1], 'wo', mew = 1.5, mec = sample_color)
    +3111			ppl.text(*saved[s][1], f'  {s}', va = 'center', ha = 'left', color = sample_color, weight = 'bold')
    +3112
    +3113		x1, x2, y1, y2 = ppl.axis()
    +3114		ppl.axis([
    +3115			x1 - (x2-x1)/10,
    +3116			x2 + (x2-x1)/10,
    +3117			y1 - (y2-y1)/10,
    +3118			y2 + (y2-y1)/10,
    +3119			])			
    +3120
    +3121
    +3122		if not os.path.exists(dir):
    +3123			os.makedirs(dir)
    +3124		fig.savefig(f'{dir}/__all__.pdf')
    +3125		if show:
    +3126			ppl.show()
    +3127		ppl.close(fig)
     
    @@ -7411,27 +7756,27 @@

    API Documentation

    -
    1017	def __init__(self, l = [], mass = '47', logfile = '', session = 'mySession', verbose = False):
    -1018		'''
    -1019		**Parameters**
    -1020
    -1021		+ `l`: a list of dictionaries, with each dictionary including at least the keys
    -1022		`Sample`, `d45`, `d46`, and `d47` or `d48`.
    -1023		+ `mass`: `'47'` or `'48'`
    -1024		+ `logfile`: if specified, write detailed logs to this file path when calling `D4xdata` methods.
    -1025		+ `session`: define session name for analyses without a `Session` key
    -1026		+ `verbose`: if `True`, print out detailed logs when calling `D4xdata` methods.
    -1027
    -1028		Returns a `D4xdata` object derived from `list`.
    -1029		'''
    -1030		self._4x = mass
    -1031		self.verbose = verbose
    -1032		self.prefix = 'D4xdata'
    -1033		self.logfile = logfile
    -1034		list.__init__(self, l)
    -1035		self.Nf = None
    -1036		self.repeatability = {}
    -1037		self.refresh(session = session)
    +            
    1026	def __init__(self, l = [], mass = '47', logfile = '', session = 'mySession', verbose = False):
    +1027		'''
    +1028		**Parameters**
    +1029
    +1030		+ `l`: a list of dictionaries, with each dictionary including at least the keys
    +1031		`Sample`, `d45`, `d46`, and `d47` or `d48`.
    +1032		+ `mass`: `'47'` or `'48'`
    +1033		+ `logfile`: if specified, write detailed logs to this file path when calling `D4xdata` methods.
    +1034		+ `session`: define session name for analyses without a `Session` key
    +1035		+ `verbose`: if `True`, print out detailed logs when calling `D4xdata` methods.
    +1036
    +1037		Returns a `D4xdata` object derived from `list`.
    +1038		'''
    +1039		self._4x = mass
    +1040		self.verbose = verbose
    +1041		self.prefix = 'D4xdata'
    +1042		self.logfile = logfile
    +1043		list.__init__(self, l)
    +1044		self.Nf = None
    +1045		self.repeatability = {}
    +1046		self.refresh(session = session)
     
    @@ -7681,24 +8026,24 @@

    API Documentation

    -
    1040	def make_verbal(oldfun):
    -1041		'''
    -1042		Decorator: allow temporarily changing `self.prefix` and overriding `self.verbose`.
    -1043		'''
    -1044		@wraps(oldfun)
    -1045		def newfun(*args, verbose = '', **kwargs):
    -1046			myself = args[0]
    -1047			oldprefix = myself.prefix
    -1048			myself.prefix = oldfun.__name__
    -1049			if verbose != '':
    -1050				oldverbose = myself.verbose
    -1051				myself.verbose = verbose
    -1052			out = oldfun(*args, **kwargs)
    -1053			myself.prefix = oldprefix
    -1054			if verbose != '':
    -1055				myself.verbose = oldverbose
    -1056			return out
    -1057		return newfun
    +            
    1049	def make_verbal(oldfun):
    +1050		'''
    +1051		Decorator: allow temporarily changing `self.prefix` and overriding `self.verbose`.
    +1052		'''
    +1053		@wraps(oldfun)
    +1054		def newfun(*args, verbose = '', **kwargs):
    +1055			myself = args[0]
    +1056			oldprefix = myself.prefix
    +1057			myself.prefix = oldfun.__name__
    +1058			if verbose != '':
    +1059				oldverbose = myself.verbose
    +1060				myself.verbose = verbose
    +1061			out = oldfun(*args, **kwargs)
    +1062			myself.prefix = oldprefix
    +1063			if verbose != '':
    +1064				myself.verbose = oldverbose
    +1065			return out
    +1066		return newfun
     
    @@ -7718,13 +8063,13 @@

    API Documentation

    -
    1060	def msg(self, txt):
    -1061		'''
    -1062		Log a message to `self.logfile`, and print it out if `verbose = True`
    -1063		'''
    -1064		self.log(txt)
    -1065		if self.verbose:
    -1066			print(f'{f"[{self.prefix}]":<16} {txt}')
    +            
    1069	def msg(self, txt):
    +1070		'''
    +1071		Log a message to `self.logfile`, and print it out if `verbose = True`
    +1072		'''
    +1073		self.log(txt)
    +1074		if self.verbose:
    +1075			print(f'{f"[{self.prefix}]":<16} {txt}')
     
    @@ -7744,12 +8089,12 @@

    API Documentation

    -
    1069	def vmsg(self, txt):
    -1070		'''
    -1071		Log a message to `self.logfile` and print it out
    -1072		'''
    -1073		self.log(txt)
    -1074		print(txt)
    +            
    1078	def vmsg(self, txt):
    +1079		'''
    +1080		Log a message to `self.logfile` and print it out
    +1081		'''
    +1082		self.log(txt)
    +1083		print(txt)
     
    @@ -7769,14 +8114,14 @@

    API Documentation

    -
    1077	def log(self, *txts):
    -1078		'''
    -1079		Log a message to `self.logfile`
    -1080		'''
    -1081		if self.logfile:
    -1082			with open(self.logfile, 'a') as fid:
    -1083				for txt in txts:
    -1084					fid.write(f'\n{dt.now().strftime("%Y-%m-%d %H:%M:%S")} {f"[{self.prefix}]":<16} {txt}')
    +            
    1086	def log(self, *txts):
    +1087		'''
    +1088		Log a message to `self.logfile`
    +1089		'''
    +1090		if self.logfile:
    +1091			with open(self.logfile, 'a') as fid:
    +1092				for txt in txts:
    +1093					fid.write(f'\n{dt.now().strftime("%Y-%m-%d %H:%M:%S")} {f"[{self.prefix}]":<16} {txt}')
     
    @@ -7796,13 +8141,13 @@

    API Documentation

    -
    1087	def refresh(self, session = 'mySession'):
    -1088		'''
    -1089		Update `self.sessions`, `self.samples`, `self.anchors`, and `self.unknowns`.
    -1090		'''
    -1091		self.fill_in_missing_info(session = session)
    -1092		self.refresh_sessions()
    -1093		self.refresh_samples()
    +            
    1096	def refresh(self, session = 'mySession'):
    +1097		'''
    +1098		Update `self.sessions`, `self.samples`, `self.anchors`, and `self.unknowns`.
    +1099		'''
    +1100		self.fill_in_missing_info(session = session)
    +1101		self.refresh_sessions()
    +1102		self.refresh_samples()
     
    @@ -7822,21 +8167,21 @@

    API Documentation

    -
    1096	def refresh_sessions(self):
    -1097		'''
    -1098		Update `self.sessions` and set `scrambling_drift`, `slope_drift`, and `wg_drift`
    -1099		to `False` for all sessions.
    -1100		'''
    -1101		self.sessions = {
    -1102			s: {'data': [r for r in self if r['Session'] == s]}
    -1103			for s in sorted({r['Session'] for r in self})
    -1104			}
    -1105		for s in self.sessions:
    -1106			self.sessions[s]['scrambling_drift'] = False
    -1107			self.sessions[s]['slope_drift'] = False
    -1108			self.sessions[s]['wg_drift'] = False
    -1109			self.sessions[s]['d13C_standardization_method'] = self.d13C_STANDARDIZATION_METHOD
    -1110			self.sessions[s]['d18O_standardization_method'] = self.d18O_STANDARDIZATION_METHOD
    +            
    1105	def refresh_sessions(self):
    +1106		'''
    +1107		Update `self.sessions` and set `scrambling_drift`, `slope_drift`, and `wg_drift`
    +1108		to `False` for all sessions.
    +1109		'''
    +1110		self.sessions = {
    +1111			s: {'data': [r for r in self if r['Session'] == s]}
    +1112			for s in sorted({r['Session'] for r in self})
    +1113			}
    +1114		for s in self.sessions:
    +1115			self.sessions[s]['scrambling_drift'] = False
    +1116			self.sessions[s]['slope_drift'] = False
    +1117			self.sessions[s]['wg_drift'] = False
    +1118			self.sessions[s]['d13C_standardization_method'] = self.d13C_STANDARDIZATION_METHOD
    +1119			self.sessions[s]['d18O_standardization_method'] = self.d18O_STANDARDIZATION_METHOD
     
    @@ -7857,16 +8202,16 @@

    API Documentation

    -
    1113	def refresh_samples(self):
    -1114		'''
    -1115		Define `self.samples`, `self.anchors`, and `self.unknowns`.
    -1116		'''
    -1117		self.samples = {
    -1118			s: {'data': [r for r in self if r['Sample'] == s]}
    -1119			for s in sorted({r['Sample'] for r in self})
    -1120			}
    -1121		self.anchors = {s: self.samples[s] for s in self.samples if s in self.Nominal_D4x}
    -1122		self.unknowns = {s: self.samples[s] for s in self.samples if s not in self.Nominal_D4x}
    +            
    1122	def refresh_samples(self):
    +1123		'''
    +1124		Define `self.samples`, `self.anchors`, and `self.unknowns`.
    +1125		'''
    +1126		self.samples = {
    +1127			s: {'data': [r for r in self if r['Sample'] == s]}
    +1128			for s in sorted({r['Sample'] for r in self})
    +1129			}
    +1130		self.anchors = {s: self.samples[s] for s in self.samples if s in self.Nominal_D4x}
    +1131		self.unknowns = {s: self.samples[s] for s in self.samples if s not in self.Nominal_D4x}
     
    @@ -7886,32 +8231,32 @@

    API Documentation

    -
    1125	def read(self, filename, sep = '', session = ''):
    -1126		'''
    -1127		Read file in csv format to load data into a `D47data` object.
    -1128
    -1129		In the csv file, spaces before and after field separators (`','` by default)
    -1130		are optional. Each line corresponds to a single analysis.
    -1131
    -1132		The required fields are:
    -1133
    -1134		+ `UID`: a unique identifier
    -1135		+ `Session`: an identifier for the analytical session
    -1136		+ `Sample`: a sample identifier
    -1137		+ `d45`, `d46`, and at least one of `d47` or `d48`: the working-gas delta values
    -1138
    -1139		Independently known oxygen-17 anomalies may be provided as `D17O` (in ‰ relative to
    -1140		VSMOW, λ = `self.LAMBDA_17`), and are otherwise assumed to be zero. Working-gas deltas `d47`, `d48`
    -1141		and `d49` are optional, and set to NaN by default.
    +            
    1134	def read(self, filename, sep = '', session = ''):
    +1135		'''
    +1136		Read file in csv format to load data into a `D47data` object.
    +1137
    +1138		In the csv file, spaces before and after field separators (`','` by default)
    +1139		are optional. Each line corresponds to a single analysis.
    +1140
    +1141		The required fields are:
     1142
    -1143		**Parameters**
    -1144
    -1145		+ `fileneme`: the path of the file to read
    -1146		+ `sep`: csv separator delimiting the fields
    -1147		+ `session`: set `Session` field to this string for all analyses
    -1148		'''
    -1149		with open(filename) as fid:
    -1150			self.input(fid.read(), sep = sep, session = session)
    +1143		+ `UID`: a unique identifier
    +1144		+ `Session`: an identifier for the analytical session
    +1145		+ `Sample`: a sample identifier
    +1146		+ `d45`, `d46`, and at least one of `d47` or `d48`: the working-gas delta values
    +1147
    +1148		Independently known oxygen-17 anomalies may be provided as `D17O` (in ‰ relative to
    +1149		VSMOW, λ = `self.LAMBDA_17`), and are otherwise assumed to be zero. Working-gas deltas `d47`, `d48`
    +1150		and `d49` are optional, and set to NaN by default.
    +1151
    +1152		**Parameters**
    +1153
    +1154		+ `fileneme`: the path of the file to read
    +1155		+ `sep`: csv separator delimiting the fields
    +1156		+ `session`: set `Session` field to this string for all analyses
    +1157		'''
    +1158		with open(filename) as fid:
    +1159			self.input(fid.read(), sep = sep, session = session)
     
    @@ -7955,42 +8300,42 @@

    API Documentation

    -
    1153	def input(self, txt, sep = '', session = ''):
    -1154		'''
    -1155		Read `txt` string in csv format to load analysis data into a `D47data` object.
    -1156
    -1157		In the csv string, spaces before and after field separators (`','` by default)
    -1158		are optional. Each line corresponds to a single analysis.
    -1159
    -1160		The required fields are:
    -1161
    -1162		+ `UID`: a unique identifier
    -1163		+ `Session`: an identifier for the analytical session
    -1164		+ `Sample`: a sample identifier
    -1165		+ `d45`, `d46`, and at least one of `d47` or `d48`: the working-gas delta values
    -1166
    -1167		Independently known oxygen-17 anomalies may be provided as `D17O` (in ‰ relative to
    -1168		VSMOW, λ = `self.LAMBDA_17`), and are otherwise assumed to be zero. Working-gas deltas `d47`, `d48`
    -1169		and `d49` are optional, and set to NaN by default.
    +            
    1162	def input(self, txt, sep = '', session = ''):
    +1163		'''
    +1164		Read `txt` string in csv format to load analysis data into a `D47data` object.
    +1165
    +1166		In the csv string, spaces before and after field separators (`','` by default)
    +1167		are optional. Each line corresponds to a single analysis.
    +1168
    +1169		The required fields are:
     1170
    -1171		**Parameters**
    -1172
    -1173		+ `txt`: the csv string to read
    -1174		+ `sep`: csv separator delimiting the fields. By default, use `,`, `;`, or `\t`,
    -1175		whichever appers most often in `txt`.
    -1176		+ `session`: set `Session` field to this string for all analyses
    -1177		'''
    -1178		if sep == '':
    -1179			sep = sorted(',;\t', key = lambda x: - txt.count(x))[0]
    -1180		txt = [[x.strip() for x in l.split(sep)] for l in txt.splitlines() if l.strip()]
    -1181		data = [{k: v if k in ['UID', 'Session', 'Sample'] else smart_type(v) for k,v in zip(txt[0], l) if v != ''} for l in txt[1:]]
    -1182
    -1183		if session != '':
    -1184			for r in data:
    -1185				r['Session'] = session
    -1186
    -1187		self += data
    -1188		self.refresh()
    +1171		+ `UID`: a unique identifier
    +1172		+ `Session`: an identifier for the analytical session
    +1173		+ `Sample`: a sample identifier
    +1174		+ `d45`, `d46`, and at least one of `d47` or `d48`: the working-gas delta values
    +1175
    +1176		Independently known oxygen-17 anomalies may be provided as `D17O` (in ‰ relative to
    +1177		VSMOW, λ = `self.LAMBDA_17`), and are otherwise assumed to be zero. Working-gas deltas `d47`, `d48`
    +1178		and `d49` are optional, and set to NaN by default.
    +1179
    +1180		**Parameters**
    +1181
    +1182		+ `txt`: the csv string to read
    +1183		+ `sep`: csv separator delimiting the fields. By default, use `,`, `;`, or `\t`,
    +1184		whichever appers most often in `txt`.
    +1185		+ `session`: set `Session` field to this string for all analyses
    +1186		'''
    +1187		if sep == '':
    +1188			sep = sorted(',;\t', key = lambda x: - txt.count(x))[0]
    +1189		txt = [[x.strip() for x in l.split(sep)] for l in txt.splitlines() if l.strip()]
    +1190		data = [{k: v if k in ['UID', 'Session', 'Sample'] else smart_type(v) for k,v in zip(txt[0], l) if v != ''} for l in txt[1:]]
    +1191
    +1192		if session != '':
    +1193			for r in data:
    +1194				r['Session'] = session
    +1195
    +1196		self += data
    +1197		self.refresh()
     
    @@ -8036,95 +8381,95 @@

    API Documentation

    -
    1191	@make_verbal
    -1192	def wg(self, samples = None, a18_acid = None):
    -1193		'''
    -1194		Compute bulk composition of the working gas for each session based on
    -1195		the carbonate standards defined in both `self.Nominal_d13C_VPDB` and
    -1196		`self.Nominal_d18O_VPDB`.
    -1197		'''
    -1198
    -1199		self.msg('Computing WG composition:')
    -1200
    -1201		if a18_acid is None:
    -1202			a18_acid = self.ALPHA_18O_ACID_REACTION
    -1203		if samples is None:
    -1204			samples = [s for s in self.Nominal_d13C_VPDB if s in self.Nominal_d18O_VPDB]
    -1205
    -1206		assert a18_acid, f'Acid fractionation factor should not be zero.'
    +            
    1200	@make_verbal
    +1201	def wg(self, samples = None, a18_acid = None):
    +1202		'''
    +1203		Compute bulk composition of the working gas for each session based on
    +1204		the carbonate standards defined in both `self.Nominal_d13C_VPDB` and
    +1205		`self.Nominal_d18O_VPDB`.
    +1206		'''
     1207
    -1208		samples = [s for s in samples if s in self.Nominal_d13C_VPDB and s in self.Nominal_d18O_VPDB]
    -1209		R45R46_standards = {}
    -1210		for sample in samples:
    -1211			d13C_vpdb = self.Nominal_d13C_VPDB[sample]
    -1212			d18O_vpdb = self.Nominal_d18O_VPDB[sample]
    -1213			R13_s = self.R13_VPDB * (1 + d13C_vpdb / 1000)
    -1214			R17_s = self.R17_VPDB * ((1 + d18O_vpdb / 1000) * a18_acid) ** self.LAMBDA_17
    -1215			R18_s = self.R18_VPDB * (1 + d18O_vpdb / 1000) * a18_acid
    +1208		self.msg('Computing WG composition:')
    +1209
    +1210		if a18_acid is None:
    +1211			a18_acid = self.ALPHA_18O_ACID_REACTION
    +1212		if samples is None:
    +1213			samples = [s for s in self.Nominal_d13C_VPDB if s in self.Nominal_d18O_VPDB]
    +1214
    +1215		assert a18_acid, f'Acid fractionation factor should not be zero.'
     1216
    -1217			C12_s = 1 / (1 + R13_s)
    -1218			C13_s = R13_s / (1 + R13_s)
    -1219			C16_s = 1 / (1 + R17_s + R18_s)
    -1220			C17_s = R17_s / (1 + R17_s + R18_s)
    -1221			C18_s = R18_s / (1 + R17_s + R18_s)
    -1222
    -1223			C626_s = C12_s * C16_s ** 2
    -1224			C627_s = 2 * C12_s * C16_s * C17_s
    -1225			C628_s = 2 * C12_s * C16_s * C18_s
    -1226			C636_s = C13_s * C16_s ** 2
    -1227			C637_s = 2 * C13_s * C16_s * C17_s
    -1228			C727_s = C12_s * C17_s ** 2
    -1229
    -1230			R45_s = (C627_s + C636_s) / C626_s
    -1231			R46_s = (C628_s + C637_s + C727_s) / C626_s
    -1232			R45R46_standards[sample] = (R45_s, R46_s)
    -1233		
    -1234		for s in self.sessions:
    -1235			db = [r for r in self.sessions[s]['data'] if r['Sample'] in samples]
    -1236			assert db, f'No sample from {samples} found in session "{s}".'
    -1237# 			dbsamples = sorted({r['Sample'] for r in db})
    +1217		samples = [s for s in samples if s in self.Nominal_d13C_VPDB and s in self.Nominal_d18O_VPDB]
    +1218		R45R46_standards = {}
    +1219		for sample in samples:
    +1220			d13C_vpdb = self.Nominal_d13C_VPDB[sample]
    +1221			d18O_vpdb = self.Nominal_d18O_VPDB[sample]
    +1222			R13_s = self.R13_VPDB * (1 + d13C_vpdb / 1000)
    +1223			R17_s = self.R17_VPDB * ((1 + d18O_vpdb / 1000) * a18_acid) ** self.LAMBDA_17
    +1224			R18_s = self.R18_VPDB * (1 + d18O_vpdb / 1000) * a18_acid
    +1225
    +1226			C12_s = 1 / (1 + R13_s)
    +1227			C13_s = R13_s / (1 + R13_s)
    +1228			C16_s = 1 / (1 + R17_s + R18_s)
    +1229			C17_s = R17_s / (1 + R17_s + R18_s)
    +1230			C18_s = R18_s / (1 + R17_s + R18_s)
    +1231
    +1232			C626_s = C12_s * C16_s ** 2
    +1233			C627_s = 2 * C12_s * C16_s * C17_s
    +1234			C628_s = 2 * C12_s * C16_s * C18_s
    +1235			C636_s = C13_s * C16_s ** 2
    +1236			C637_s = 2 * C13_s * C16_s * C17_s
    +1237			C727_s = C12_s * C17_s ** 2
     1238
    -1239			X = [r['d45'] for r in db]
    -1240			Y = [R45R46_standards[r['Sample']][0] for r in db]
    -1241			x1, x2 = np.min(X), np.max(X)
    -1242
    -1243			if x1 < x2:
    -1244				wgcoord = x1/(x1-x2)
    -1245			else:
    -1246				wgcoord = 999
    +1239			R45_s = (C627_s + C636_s) / C626_s
    +1240			R46_s = (C628_s + C637_s + C727_s) / C626_s
    +1241			R45R46_standards[sample] = (R45_s, R46_s)
    +1242		
    +1243		for s in self.sessions:
    +1244			db = [r for r in self.sessions[s]['data'] if r['Sample'] in samples]
    +1245			assert db, f'No sample from {samples} found in session "{s}".'
    +1246# 			dbsamples = sorted({r['Sample'] for r in db})
     1247
    -1248			if wgcoord < -.5 or wgcoord > 1.5:
    -1249				# unreasonable to extrapolate to d45 = 0
    -1250				R45_wg = np.mean([y/(1+x/1000) for x,y in zip(X,Y)])
    -1251			else :
    -1252				# d45 = 0 is reasonably well bracketed
    -1253				R45_wg = np.polyfit(X, Y, 1)[1]
    -1254
    -1255			X = [r['d46'] for r in db]
    -1256			Y = [R45R46_standards[r['Sample']][1] for r in db]
    -1257			x1, x2 = np.min(X), np.max(X)
    -1258
    -1259			if x1 < x2:
    -1260				wgcoord = x1/(x1-x2)
    -1261			else:
    -1262				wgcoord = 999
    +1248			X = [r['d45'] for r in db]
    +1249			Y = [R45R46_standards[r['Sample']][0] for r in db]
    +1250			x1, x2 = np.min(X), np.max(X)
    +1251
    +1252			if x1 < x2:
    +1253				wgcoord = x1/(x1-x2)
    +1254			else:
    +1255				wgcoord = 999
    +1256
    +1257			if wgcoord < -.5 or wgcoord > 1.5:
    +1258				# unreasonable to extrapolate to d45 = 0
    +1259				R45_wg = np.mean([y/(1+x/1000) for x,y in zip(X,Y)])
    +1260			else :
    +1261				# d45 = 0 is reasonably well bracketed
    +1262				R45_wg = np.polyfit(X, Y, 1)[1]
     1263
    -1264			if wgcoord < -.5 or wgcoord > 1.5:
    -1265				# unreasonable to extrapolate to d46 = 0
    -1266				R46_wg = np.mean([y/(1+x/1000) for x,y in zip(X,Y)])
    -1267			else :
    -1268				# d46 = 0 is reasonably well bracketed
    -1269				R46_wg = np.polyfit(X, Y, 1)[1]
    -1270
    -1271			d13Cwg_VPDB, d18Owg_VSMOW = self.compute_bulk_delta(R45_wg, R46_wg)
    +1264			X = [r['d46'] for r in db]
    +1265			Y = [R45R46_standards[r['Sample']][1] for r in db]
    +1266			x1, x2 = np.min(X), np.max(X)
    +1267
    +1268			if x1 < x2:
    +1269				wgcoord = x1/(x1-x2)
    +1270			else:
    +1271				wgcoord = 999
     1272
    -1273			self.msg(f'Session {s} WG:   δ13C_VPDB = {d13Cwg_VPDB:.3f}   δ18O_VSMOW = {d18Owg_VSMOW:.3f}')
    -1274
    -1275			self.sessions[s]['d13Cwg_VPDB'] = d13Cwg_VPDB
    -1276			self.sessions[s]['d18Owg_VSMOW'] = d18Owg_VSMOW
    -1277			for r in self.sessions[s]['data']:
    -1278				r['d13Cwg_VPDB'] = d13Cwg_VPDB
    -1279				r['d18Owg_VSMOW'] = d18Owg_VSMOW
    +1273			if wgcoord < -.5 or wgcoord > 1.5:
    +1274				# unreasonable to extrapolate to d46 = 0
    +1275				R46_wg = np.mean([y/(1+x/1000) for x,y in zip(X,Y)])
    +1276			else :
    +1277				# d46 = 0 is reasonably well bracketed
    +1278				R46_wg = np.polyfit(X, Y, 1)[1]
    +1279
    +1280			d13Cwg_VPDB, d18Owg_VSMOW = self.compute_bulk_delta(R45_wg, R46_wg)
    +1281
    +1282			self.msg(f'Session {s} WG:   δ13C_VPDB = {d13Cwg_VPDB:.3f}   δ18O_VSMOW = {d18Owg_VSMOW:.3f}')
    +1283
    +1284			self.sessions[s]['d13Cwg_VPDB'] = d13Cwg_VPDB
    +1285			self.sessions[s]['d18Owg_VSMOW'] = d18Owg_VSMOW
    +1286			for r in self.sessions[s]['data']:
    +1287				r['d13Cwg_VPDB'] = d13Cwg_VPDB
    +1288				r['d18Owg_VSMOW'] = d18Owg_VSMOW
     
    @@ -8146,36 +8491,36 @@

    API Documentation

    -
    1282	def compute_bulk_delta(self, R45, R46, D17O = 0):
    -1283		'''
    -1284		Compute δ13C_VPDB and δ18O_VSMOW,
    -1285		by solving the generalized form of equation (17) from
    -1286		[Brand et al. (2010)](https://doi.org/10.1351/PAC-REP-09-01-05),
    -1287		assuming that δ18O_VSMOW is not too big (0 ± 50 ‰) and
    -1288		solving the corresponding second-order Taylor polynomial.
    -1289		(Appendix A of [Daëron et al., 2016](https://doi.org/10.1016/j.chemgeo.2016.08.014))
    -1290		'''
    -1291
    -1292		K = np.exp(D17O / 1000) * self.R17_VSMOW * self.R18_VSMOW ** -self.LAMBDA_17
    -1293
    -1294		A = -3 * K ** 2 * self.R18_VSMOW ** (2 * self.LAMBDA_17)
    -1295		B = 2 * K * R45 * self.R18_VSMOW ** self.LAMBDA_17
    -1296		C = 2 * self.R18_VSMOW
    -1297		D = -R46
    -1298
    -1299		aa = A * self.LAMBDA_17 * (2 * self.LAMBDA_17 - 1) + B * self.LAMBDA_17 * (self.LAMBDA_17 - 1) / 2
    -1300		bb = 2 * A * self.LAMBDA_17 + B * self.LAMBDA_17 + C
    -1301		cc = A + B + C + D
    +            
    1291	def compute_bulk_delta(self, R45, R46, D17O = 0):
    +1292		'''
    +1293		Compute δ13C_VPDB and δ18O_VSMOW,
    +1294		by solving the generalized form of equation (17) from
    +1295		[Brand et al. (2010)](https://doi.org/10.1351/PAC-REP-09-01-05),
    +1296		assuming that δ18O_VSMOW is not too big (0 ± 50 ‰) and
    +1297		solving the corresponding second-order Taylor polynomial.
    +1298		(Appendix A of [Daëron et al., 2016](https://doi.org/10.1016/j.chemgeo.2016.08.014))
    +1299		'''
    +1300
    +1301		K = np.exp(D17O / 1000) * self.R17_VSMOW * self.R18_VSMOW ** -self.LAMBDA_17
     1302
    -1303		d18O_VSMOW = 1000 * (-bb + (bb ** 2 - 4 * aa * cc) ** .5) / (2 * aa)
    -1304
    -1305		R18 = (1 + d18O_VSMOW / 1000) * self.R18_VSMOW
    -1306		R17 = K * R18 ** self.LAMBDA_17
    -1307		R13 = R45 - 2 * R17
    -1308
    -1309		d13C_VPDB = 1000 * (R13 / self.R13_VPDB - 1)
    -1310
    -1311		return d13C_VPDB, d18O_VSMOW
    +1303		A = -3 * K ** 2 * self.R18_VSMOW ** (2 * self.LAMBDA_17)
    +1304		B = 2 * K * R45 * self.R18_VSMOW ** self.LAMBDA_17
    +1305		C = 2 * self.R18_VSMOW
    +1306		D = -R46
    +1307
    +1308		aa = A * self.LAMBDA_17 * (2 * self.LAMBDA_17 - 1) + B * self.LAMBDA_17 * (self.LAMBDA_17 - 1) / 2
    +1309		bb = 2 * A * self.LAMBDA_17 + B * self.LAMBDA_17 + C
    +1310		cc = A + B + C + D
    +1311
    +1312		d18O_VSMOW = 1000 * (-bb + (bb ** 2 - 4 * aa * cc) ** .5) / (2 * aa)
    +1313
    +1314		R18 = (1 + d18O_VSMOW / 1000) * self.R18_VSMOW
    +1315		R17 = K * R18 ** self.LAMBDA_17
    +1316		R13 = R45 - 2 * R17
    +1317
    +1318		d13C_VPDB = 1000 * (R13 / self.R13_VPDB - 1)
    +1319
    +1320		return d13C_VPDB, d18O_VSMOW
     
    @@ -8201,16 +8546,16 @@

    API Documentation

    -
    1314	@make_verbal
    -1315	def crunch(self, verbose = ''):
    -1316		'''
    -1317		Compute bulk composition and raw clumped isotope anomalies for all analyses.
    -1318		'''
    -1319		for r in self:
    -1320			self.compute_bulk_and_clumping_deltas(r)
    -1321		self.standardize_d13C()
    -1322		self.standardize_d18O()
    -1323		self.msg(f"Crunched {len(self)} analyses.")
    +            
    1323	@make_verbal
    +1324	def crunch(self, verbose = ''):
    +1325		'''
    +1326		Compute bulk composition and raw clumped isotope anomalies for all analyses.
    +1327		'''
    +1328		for r in self:
    +1329			self.compute_bulk_and_clumping_deltas(r)
    +1330		self.standardize_d13C()
    +1331		self.standardize_d18O()
    +1332		self.msg(f"Crunched {len(self)} analyses.")
     
    @@ -8230,20 +8575,20 @@

    API Documentation

    -
    1326	def fill_in_missing_info(self, session = 'mySession'):
    -1327		'''
    -1328		Fill in optional fields with default values
    -1329		'''
    -1330		for i,r in enumerate(self):
    -1331			if 'D17O' not in r:
    -1332				r['D17O'] = 0.
    -1333			if 'UID' not in r:
    -1334				r['UID'] = f'{i+1}'
    -1335			if 'Session' not in r:
    -1336				r['Session'] = session
    -1337			for k in ['d47', 'd48', 'd49']:
    -1338				if k not in r:
    -1339					r[k] = np.nan
    +            
    1335	def fill_in_missing_info(self, session = 'mySession'):
    +1336		'''
    +1337		Fill in optional fields with default values
    +1338		'''
    +1339		for i,r in enumerate(self):
    +1340			if 'D17O' not in r:
    +1341				r['D17O'] = 0.
    +1342			if 'UID' not in r:
    +1343				r['UID'] = f'{i+1}'
    +1344			if 'Session' not in r:
    +1345				r['Session'] = session
    +1346			for k in ['d47', 'd48', 'd49']:
    +1347				if k not in r:
    +1348					r[k] = np.nan
     
    @@ -8263,25 +8608,25 @@

    API Documentation

    -
    1342	def standardize_d13C(self):
    -1343		'''
    -1344		Perform δ13C standadization within each session `s` according to
    -1345		`self.sessions[s]['d13C_standardization_method']`, which is defined by default
    -1346		by `D47data.refresh_sessions()`as equal to `self.d13C_STANDARDIZATION_METHOD`, but
    -1347		may be redefined abitrarily at a later stage.
    -1348		'''
    -1349		for s in self.sessions:
    -1350			if self.sessions[s]['d13C_standardization_method'] in ['1pt', '2pt']:
    -1351				XY = [(r['d13C_VPDB'], self.Nominal_d13C_VPDB[r['Sample']]) for r in self.sessions[s]['data'] if r['Sample'] in self.Nominal_d13C_VPDB]
    -1352				X,Y = zip(*XY)
    -1353				if self.sessions[s]['d13C_standardization_method'] == '1pt':
    -1354					offset = np.mean(Y) - np.mean(X)
    -1355					for r in self.sessions[s]['data']:
    -1356						r['d13C_VPDB'] += offset				
    -1357				elif self.sessions[s]['d13C_standardization_method'] == '2pt':
    -1358					a,b = np.polyfit(X,Y,1)
    -1359					for r in self.sessions[s]['data']:
    -1360						r['d13C_VPDB'] = a * r['d13C_VPDB'] + b
    +            
    1351	def standardize_d13C(self):
    +1352		'''
    +1353		Perform δ13C standadization within each session `s` according to
    +1354		`self.sessions[s]['d13C_standardization_method']`, which is defined by default
    +1355		by `D47data.refresh_sessions()`as equal to `self.d13C_STANDARDIZATION_METHOD`, but
    +1356		may be redefined abitrarily at a later stage.
    +1357		'''
    +1358		for s in self.sessions:
    +1359			if self.sessions[s]['d13C_standardization_method'] in ['1pt', '2pt']:
    +1360				XY = [(r['d13C_VPDB'], self.Nominal_d13C_VPDB[r['Sample']]) for r in self.sessions[s]['data'] if r['Sample'] in self.Nominal_d13C_VPDB]
    +1361				X,Y = zip(*XY)
    +1362				if self.sessions[s]['d13C_standardization_method'] == '1pt':
    +1363					offset = np.mean(Y) - np.mean(X)
    +1364					for r in self.sessions[s]['data']:
    +1365						r['d13C_VPDB'] += offset				
    +1366				elif self.sessions[s]['d13C_standardization_method'] == '2pt':
    +1367					a,b = np.polyfit(X,Y,1)
    +1368					for r in self.sessions[s]['data']:
    +1369						r['d13C_VPDB'] = a * r['d13C_VPDB'] + b
     
    @@ -8304,26 +8649,26 @@

    API Documentation

    -
    1362	def standardize_d18O(self):
    -1363		'''
    -1364		Perform δ18O standadization within each session `s` according to
    -1365		`self.ALPHA_18O_ACID_REACTION` and `self.sessions[s]['d18O_standardization_method']`,
    -1366		which is defined by default by `D47data.refresh_sessions()`as equal to
    -1367		`self.d18O_STANDARDIZATION_METHOD`, but may be redefined abitrarily at a later stage.
    -1368		'''
    -1369		for s in self.sessions:
    -1370			if self.sessions[s]['d18O_standardization_method'] in ['1pt', '2pt']:
    -1371				XY = [(r['d18O_VSMOW'], self.Nominal_d18O_VPDB[r['Sample']]) for r in self.sessions[s]['data'] if r['Sample'] in self.Nominal_d18O_VPDB]
    -1372				X,Y = zip(*XY)
    -1373				Y = [(1000+y) * self.R18_VPDB * self.ALPHA_18O_ACID_REACTION / self.R18_VSMOW - 1000 for y in Y]
    -1374				if self.sessions[s]['d18O_standardization_method'] == '1pt':
    -1375					offset = np.mean(Y) - np.mean(X)
    -1376					for r in self.sessions[s]['data']:
    -1377						r['d18O_VSMOW'] += offset				
    -1378				elif self.sessions[s]['d18O_standardization_method'] == '2pt':
    -1379					a,b = np.polyfit(X,Y,1)
    -1380					for r in self.sessions[s]['data']:
    -1381						r['d18O_VSMOW'] = a * r['d18O_VSMOW'] + b
    +            
    1371	def standardize_d18O(self):
    +1372		'''
    +1373		Perform δ18O standadization within each session `s` according to
    +1374		`self.ALPHA_18O_ACID_REACTION` and `self.sessions[s]['d18O_standardization_method']`,
    +1375		which is defined by default by `D47data.refresh_sessions()`as equal to
    +1376		`self.d18O_STANDARDIZATION_METHOD`, but may be redefined abitrarily at a later stage.
    +1377		'''
    +1378		for s in self.sessions:
    +1379			if self.sessions[s]['d18O_standardization_method'] in ['1pt', '2pt']:
    +1380				XY = [(r['d18O_VSMOW'], self.Nominal_d18O_VPDB[r['Sample']]) for r in self.sessions[s]['data'] if r['Sample'] in self.Nominal_d18O_VPDB]
    +1381				X,Y = zip(*XY)
    +1382				Y = [(1000+y) * self.R18_VPDB * self.ALPHA_18O_ACID_REACTION / self.R18_VSMOW - 1000 for y in Y]
    +1383				if self.sessions[s]['d18O_standardization_method'] == '1pt':
    +1384					offset = np.mean(Y) - np.mean(X)
    +1385					for r in self.sessions[s]['data']:
    +1386						r['d18O_VSMOW'] += offset				
    +1387				elif self.sessions[s]['d18O_standardization_method'] == '2pt':
    +1388					a,b = np.polyfit(X,Y,1)
    +1389					for r in self.sessions[s]['data']:
    +1390						r['d18O_VSMOW'] = a * r['d18O_VSMOW'] + b
     
    @@ -8346,43 +8691,43 @@

    API Documentation

    -
    1384	def compute_bulk_and_clumping_deltas(self, r):
    -1385		'''
    -1386		Compute δ13C_VPDB, δ18O_VSMOW, and raw Δ47, Δ48, Δ49 values for a single analysis `r`.
    -1387		'''
    -1388
    -1389		# Compute working gas R13, R18, and isobar ratios
    -1390		R13_wg = self.R13_VPDB * (1 + r['d13Cwg_VPDB'] / 1000)
    -1391		R18_wg = self.R18_VSMOW * (1 + r['d18Owg_VSMOW'] / 1000)
    -1392		R45_wg, R46_wg, R47_wg, R48_wg, R49_wg = self.compute_isobar_ratios(R13_wg, R18_wg)
    -1393
    -1394		# Compute analyte isobar ratios
    -1395		R45 = (1 + r['d45'] / 1000) * R45_wg
    -1396		R46 = (1 + r['d46'] / 1000) * R46_wg
    -1397		R47 = (1 + r['d47'] / 1000) * R47_wg
    -1398		R48 = (1 + r['d48'] / 1000) * R48_wg
    -1399		R49 = (1 + r['d49'] / 1000) * R49_wg
    -1400
    -1401		r['d13C_VPDB'], r['d18O_VSMOW'] = self.compute_bulk_delta(R45, R46, D17O = r['D17O'])
    -1402		R13 = (1 + r['d13C_VPDB'] / 1000) * self.R13_VPDB
    -1403		R18 = (1 + r['d18O_VSMOW'] / 1000) * self.R18_VSMOW
    -1404
    -1405		# Compute stochastic isobar ratios of the analyte
    -1406		R45stoch, R46stoch, R47stoch, R48stoch, R49stoch = self.compute_isobar_ratios(
    -1407			R13, R18, D17O = r['D17O']
    -1408		)
    +            
    1393	def compute_bulk_and_clumping_deltas(self, r):
    +1394		'''
    +1395		Compute δ13C_VPDB, δ18O_VSMOW, and raw Δ47, Δ48, Δ49 values for a single analysis `r`.
    +1396		'''
    +1397
    +1398		# Compute working gas R13, R18, and isobar ratios
    +1399		R13_wg = self.R13_VPDB * (1 + r['d13Cwg_VPDB'] / 1000)
    +1400		R18_wg = self.R18_VSMOW * (1 + r['d18Owg_VSMOW'] / 1000)
    +1401		R45_wg, R46_wg, R47_wg, R48_wg, R49_wg = self.compute_isobar_ratios(R13_wg, R18_wg)
    +1402
    +1403		# Compute analyte isobar ratios
    +1404		R45 = (1 + r['d45'] / 1000) * R45_wg
    +1405		R46 = (1 + r['d46'] / 1000) * R46_wg
    +1406		R47 = (1 + r['d47'] / 1000) * R47_wg
    +1407		R48 = (1 + r['d48'] / 1000) * R48_wg
    +1408		R49 = (1 + r['d49'] / 1000) * R49_wg
     1409
    -1410		# Check that R45/R45stoch and R46/R46stoch are undistinguishable from 1,
    -1411		# and raise a warning if the corresponding anomalies exceed 0.02 ppm.
    -1412		if (R45 / R45stoch - 1) > 5e-8:
    -1413			self.vmsg(f'This is unexpected: R45/R45stoch - 1 = {1e6 * (R45 / R45stoch - 1):.3f} ppm')
    -1414		if (R46 / R46stoch - 1) > 5e-8:
    -1415			self.vmsg(f'This is unexpected: R46/R46stoch - 1 = {1e6 * (R46 / R46stoch - 1):.3f} ppm')
    -1416
    -1417		# Compute raw clumped isotope anomalies
    -1418		r['D47raw'] = 1000 * (R47 / R47stoch - 1)
    -1419		r['D48raw'] = 1000 * (R48 / R48stoch - 1)
    -1420		r['D49raw'] = 1000 * (R49 / R49stoch - 1)
    +1410		r['d13C_VPDB'], r['d18O_VSMOW'] = self.compute_bulk_delta(R45, R46, D17O = r['D17O'])
    +1411		R13 = (1 + r['d13C_VPDB'] / 1000) * self.R13_VPDB
    +1412		R18 = (1 + r['d18O_VSMOW'] / 1000) * self.R18_VSMOW
    +1413
    +1414		# Compute stochastic isobar ratios of the analyte
    +1415		R45stoch, R46stoch, R47stoch, R48stoch, R49stoch = self.compute_isobar_ratios(
    +1416			R13, R18, D17O = r['D17O']
    +1417		)
    +1418
    +1419		# Check that R45/R45stoch and R46/R46stoch are undistinguishable from 1,
    +1420		# and raise a warning if the corresponding anomalies exceed 0.02 ppm.
    +1421		if (R45 / R45stoch - 1) > 5e-8:
    +1422			self.vmsg(f'This is unexpected: R45/R45stoch - 1 = {1e6 * (R45 / R45stoch - 1):.3f} ppm')
    +1423		if (R46 / R46stoch - 1) > 5e-8:
    +1424			self.vmsg(f'This is unexpected: R46/R46stoch - 1 = {1e6 * (R46 / R46stoch - 1):.3f} ppm')
    +1425
    +1426		# Compute raw clumped isotope anomalies
    +1427		r['D47raw'] = 1000 * (R47 / R47stoch - 1)
    +1428		r['D48raw'] = 1000 * (R48 / R48stoch - 1)
    +1429		r['D49raw'] = 1000 * (R49 / R49stoch - 1)
     
    @@ -8402,51 +8747,51 @@

    API Documentation

    -
    1423	def compute_isobar_ratios(self, R13, R18, D17O=0, D47=0, D48=0, D49=0):
    -1424		'''
    -1425		Compute isobar ratios for a sample with isotopic ratios `R13` and `R18`,
    -1426		optionally accounting for non-zero values of Δ17O (`D17O`) and clumped isotope
    -1427		anomalies (`D47`, `D48`, `D49`), all expressed in permil.
    -1428		'''
    -1429
    -1430		# Compute R17
    -1431		R17 = self.R17_VSMOW * np.exp(D17O / 1000) * (R18 / self.R18_VSMOW) ** self.LAMBDA_17
    -1432
    -1433		# Compute isotope concentrations
    -1434		C12 = (1 + R13) ** -1
    -1435		C13 = C12 * R13
    -1436		C16 = (1 + R17 + R18) ** -1
    -1437		C17 = C16 * R17
    -1438		C18 = C16 * R18
    -1439
    -1440		# Compute stochastic isotopologue concentrations
    -1441		C626 = C16 * C12 * C16
    -1442		C627 = C16 * C12 * C17 * 2
    -1443		C628 = C16 * C12 * C18 * 2
    -1444		C636 = C16 * C13 * C16
    -1445		C637 = C16 * C13 * C17 * 2
    -1446		C638 = C16 * C13 * C18 * 2
    -1447		C727 = C17 * C12 * C17
    -1448		C728 = C17 * C12 * C18 * 2
    -1449		C737 = C17 * C13 * C17
    -1450		C738 = C17 * C13 * C18 * 2
    -1451		C828 = C18 * C12 * C18
    -1452		C838 = C18 * C13 * C18
    -1453
    -1454		# Compute stochastic isobar ratios
    -1455		R45 = (C636 + C627) / C626
    -1456		R46 = (C628 + C637 + C727) / C626
    -1457		R47 = (C638 + C728 + C737) / C626
    -1458		R48 = (C738 + C828) / C626
    -1459		R49 = C838 / C626
    -1460
    -1461		# Account for stochastic anomalies
    -1462		R47 *= 1 + D47 / 1000
    -1463		R48 *= 1 + D48 / 1000
    -1464		R49 *= 1 + D49 / 1000
    -1465
    -1466		# Return isobar ratios
    -1467		return R45, R46, R47, R48, R49
    +            
    1432	def compute_isobar_ratios(self, R13, R18, D17O=0, D47=0, D48=0, D49=0):
    +1433		'''
    +1434		Compute isobar ratios for a sample with isotopic ratios `R13` and `R18`,
    +1435		optionally accounting for non-zero values of Δ17O (`D17O`) and clumped isotope
    +1436		anomalies (`D47`, `D48`, `D49`), all expressed in permil.
    +1437		'''
    +1438
    +1439		# Compute R17
    +1440		R17 = self.R17_VSMOW * np.exp(D17O / 1000) * (R18 / self.R18_VSMOW) ** self.LAMBDA_17
    +1441
    +1442		# Compute isotope concentrations
    +1443		C12 = (1 + R13) ** -1
    +1444		C13 = C12 * R13
    +1445		C16 = (1 + R17 + R18) ** -1
    +1446		C17 = C16 * R17
    +1447		C18 = C16 * R18
    +1448
    +1449		# Compute stochastic isotopologue concentrations
    +1450		C626 = C16 * C12 * C16
    +1451		C627 = C16 * C12 * C17 * 2
    +1452		C628 = C16 * C12 * C18 * 2
    +1453		C636 = C16 * C13 * C16
    +1454		C637 = C16 * C13 * C17 * 2
    +1455		C638 = C16 * C13 * C18 * 2
    +1456		C727 = C17 * C12 * C17
    +1457		C728 = C17 * C12 * C18 * 2
    +1458		C737 = C17 * C13 * C17
    +1459		C738 = C17 * C13 * C18 * 2
    +1460		C828 = C18 * C12 * C18
    +1461		C838 = C18 * C13 * C18
    +1462
    +1463		# Compute stochastic isobar ratios
    +1464		R45 = (C636 + C627) / C626
    +1465		R46 = (C628 + C637 + C727) / C626
    +1466		R47 = (C638 + C728 + C737) / C626
    +1467		R48 = (C738 + C828) / C626
    +1468		R49 = C838 / C626
    +1469
    +1470		# Account for stochastic anomalies
    +1471		R47 *= 1 + D47 / 1000
    +1472		R48 *= 1 + D48 / 1000
    +1473		R49 *= 1 + D49 / 1000
    +1474
    +1475		# Return isobar ratios
    +1476		return R45, R46, R47, R48, R49
     
    @@ -8468,30 +8813,30 @@

    API Documentation

    -
    1470	def split_samples(self, samples_to_split = 'all', grouping = 'by_session'):
    -1471		'''
    -1472		Split unknown samples by UID (treat all analyses as different samples)
    -1473		or by session (treat analyses of a given sample in different sessions as
    -1474		different samples).
    -1475
    -1476		**Parameters**
    -1477
    -1478		+ `samples_to_split`: a list of samples to split, e.g., `['IAEA-C1', 'IAEA-C2']`
    -1479		+ `grouping`: `by_uid` | `by_session`
    -1480		'''
    -1481		if samples_to_split == 'all':
    -1482			samples_to_split = [s for s in self.unknowns]
    -1483		gkeys = {'by_uid':'UID', 'by_session':'Session'}
    -1484		self.grouping = grouping.lower()
    -1485		if self.grouping in gkeys:
    -1486			gkey = gkeys[self.grouping]
    -1487		for r in self:
    -1488			if r['Sample'] in samples_to_split:
    -1489				r['Sample_original'] = r['Sample']
    -1490				r['Sample'] = f"{r['Sample']}__{r[gkey]}"
    -1491			elif r['Sample'] in self.unknowns:
    -1492				r['Sample_original'] = r['Sample']
    -1493		self.refresh_samples()
    +            
    1479	def split_samples(self, samples_to_split = 'all', grouping = 'by_session'):
    +1480		'''
    +1481		Split unknown samples by UID (treat all analyses as different samples)
    +1482		or by session (treat analyses of a given sample in different sessions as
    +1483		different samples).
    +1484
    +1485		**Parameters**
    +1486
    +1487		+ `samples_to_split`: a list of samples to split, e.g., `['IAEA-C1', 'IAEA-C2']`
    +1488		+ `grouping`: `by_uid` | `by_session`
    +1489		'''
    +1490		if samples_to_split == 'all':
    +1491			samples_to_split = [s for s in self.unknowns]
    +1492		gkeys = {'by_uid':'UID', 'by_session':'Session'}
    +1493		self.grouping = grouping.lower()
    +1494		if self.grouping in gkeys:
    +1495			gkey = gkeys[self.grouping]
    +1496		for r in self:
    +1497			if r['Sample'] in samples_to_split:
    +1498				r['Sample_original'] = r['Sample']
    +1499				r['Sample'] = f"{r['Sample']}__{r[gkey]}"
    +1500			elif r['Sample'] in self.unknowns:
    +1501				r['Sample_original'] = r['Sample']
    +1502		self.refresh_samples()
     
    @@ -8520,61 +8865,61 @@

    API Documentation

    -
    1496	def unsplit_samples(self, tables = False):
    -1497		'''
    -1498		Reverse the effects of `D47data.split_samples()`.
    -1499		
    -1500		This should only be used after `D4xdata.standardize()` with `method='pooled'`.
    -1501		
    -1502		After `D4xdata.standardize()` with `method='indep_sessions'`, one should
    -1503		probably use `D4xdata.combine_samples()` instead to reverse the effects of
    -1504		`D47data.split_samples()` with `grouping='by_uid'`, or `w_avg()` to reverse the
    -1505		effects of `D47data.split_samples()` with `grouping='by_sessions'` (because in
    -1506		that case session-averaged Δ4x values are statistically independent).
    -1507		'''
    -1508		unknowns_old = sorted({s for s in self.unknowns})
    -1509		CM_old = self.standardization.covar[:,:]
    -1510		VD_old = self.standardization.params.valuesdict().copy()
    -1511		vars_old = self.standardization.var_names
    -1512
    -1513		unknowns_new = sorted({r['Sample_original'] for r in self if 'Sample_original' in r})
    -1514
    -1515		Ns = len(vars_old) - len(unknowns_old)
    -1516		vars_new = vars_old[:Ns] + [f'D{self._4x}_{pf(u)}' for u in unknowns_new]
    -1517		VD_new = {k: VD_old[k] for k in vars_old[:Ns]}
    -1518
    -1519		W = np.zeros((len(vars_new), len(vars_old)))
    -1520		W[:Ns,:Ns] = np.eye(Ns)
    -1521		for u in unknowns_new:
    -1522			splits = sorted({r['Sample'] for r in self if 'Sample_original' in r and r['Sample_original'] == u})
    -1523			if self.grouping == 'by_session':
    -1524				weights = [self.samples[s][f'SE_D{self._4x}']**-2 for s in splits]
    -1525			elif self.grouping == 'by_uid':
    -1526				weights = [1 for s in splits]
    -1527			sw = sum(weights)
    -1528			weights = [w/sw for w in weights]
    -1529			W[vars_new.index(f'D{self._4x}_{pf(u)}'),[vars_old.index(f'D{self._4x}_{pf(s)}') for s in splits]] = weights[:]
    -1530
    -1531		CM_new = W @ CM_old @ W.T
    -1532		V = W @ np.array([[VD_old[k]] for k in vars_old])
    -1533		VD_new = {k:v[0] for k,v in zip(vars_new, V)}
    -1534
    -1535		self.standardization.covar = CM_new
    -1536		self.standardization.params.valuesdict = lambda : VD_new
    -1537		self.standardization.var_names = vars_new
    -1538
    -1539		for r in self:
    -1540			if r['Sample'] in self.unknowns:
    -1541				r['Sample_split'] = r['Sample']
    -1542				r['Sample'] = r['Sample_original']
    +            
    1505	def unsplit_samples(self, tables = False):
    +1506		'''
    +1507		Reverse the effects of `D47data.split_samples()`.
    +1508		
    +1509		This should only be used after `D4xdata.standardize()` with `method='pooled'`.
    +1510		
    +1511		After `D4xdata.standardize()` with `method='indep_sessions'`, one should
    +1512		probably use `D4xdata.combine_samples()` instead to reverse the effects of
    +1513		`D47data.split_samples()` with `grouping='by_uid'`, or `w_avg()` to reverse the
    +1514		effects of `D47data.split_samples()` with `grouping='by_sessions'` (because in
    +1515		that case session-averaged Δ4x values are statistically independent).
    +1516		'''
    +1517		unknowns_old = sorted({s for s in self.unknowns})
    +1518		CM_old = self.standardization.covar[:,:]
    +1519		VD_old = self.standardization.params.valuesdict().copy()
    +1520		vars_old = self.standardization.var_names
    +1521
    +1522		unknowns_new = sorted({r['Sample_original'] for r in self if 'Sample_original' in r})
    +1523
    +1524		Ns = len(vars_old) - len(unknowns_old)
    +1525		vars_new = vars_old[:Ns] + [f'D{self._4x}_{pf(u)}' for u in unknowns_new]
    +1526		VD_new = {k: VD_old[k] for k in vars_old[:Ns]}
    +1527
    +1528		W = np.zeros((len(vars_new), len(vars_old)))
    +1529		W[:Ns,:Ns] = np.eye(Ns)
    +1530		for u in unknowns_new:
    +1531			splits = sorted({r['Sample'] for r in self if 'Sample_original' in r and r['Sample_original'] == u})
    +1532			if self.grouping == 'by_session':
    +1533				weights = [self.samples[s][f'SE_D{self._4x}']**-2 for s in splits]
    +1534			elif self.grouping == 'by_uid':
    +1535				weights = [1 for s in splits]
    +1536			sw = sum(weights)
    +1537			weights = [w/sw for w in weights]
    +1538			W[vars_new.index(f'D{self._4x}_{pf(u)}'),[vars_old.index(f'D{self._4x}_{pf(s)}') for s in splits]] = weights[:]
    +1539
    +1540		CM_new = W @ CM_old @ W.T
    +1541		V = W @ np.array([[VD_old[k]] for k in vars_old])
    +1542		VD_new = {k:v[0] for k,v in zip(vars_new, V)}
     1543
    -1544		self.refresh_samples()
    -1545		self.consolidate_samples()
    -1546		self.repeatabilities()
    +1544		self.standardization.covar = CM_new
    +1545		self.standardization.params.valuesdict = lambda : VD_new
    +1546		self.standardization.var_names = vars_new
     1547
    -1548		if tables:
    -1549			self.table_of_analyses()
    -1550			self.table_of_samples()
    +1548		for r in self:
    +1549			if r['Sample'] in self.unknowns:
    +1550				r['Sample_split'] = r['Sample']
    +1551				r['Sample'] = r['Sample_original']
    +1552
    +1553		self.refresh_samples()
    +1554		self.consolidate_samples()
    +1555		self.repeatabilities()
    +1556
    +1557		if tables:
    +1558			self.table_of_analyses()
    +1559			self.table_of_samples()
     
    @@ -8602,25 +8947,25 @@

    API Documentation

    -
    1552	def assign_timestamps(self):
    -1553		'''
    -1554		Assign a time field `t` of type `float` to each analysis.
    -1555
    -1556		If `TimeTag` is one of the data fields, `t` is equal within a given session
    -1557		to `TimeTag` minus the mean value of `TimeTag` for that session.
    -1558		Otherwise, `TimeTag` is by default equal to the index of each analysis
    -1559		in the dataset and `t` is defined as above.
    -1560		'''
    -1561		for session in self.sessions:
    -1562			sdata = self.sessions[session]['data']
    -1563			try:
    -1564				t0 = np.mean([r['TimeTag'] for r in sdata])
    -1565				for r in sdata:
    -1566					r['t'] = r['TimeTag'] - t0
    -1567			except KeyError:
    -1568				t0 = (len(sdata)-1)/2
    -1569				for t,r in enumerate(sdata):
    -1570					r['t'] = t - t0
    +            
    1561	def assign_timestamps(self):
    +1562		'''
    +1563		Assign a time field `t` of type `float` to each analysis.
    +1564
    +1565		If `TimeTag` is one of the data fields, `t` is equal within a given session
    +1566		to `TimeTag` minus the mean value of `TimeTag` for that session.
    +1567		Otherwise, `TimeTag` is by default equal to the index of each analysis
    +1568		in the dataset and `t` is defined as above.
    +1569		'''
    +1570		for session in self.sessions:
    +1571			sdata = self.sessions[session]['data']
    +1572			try:
    +1573				t0 = np.mean([r['TimeTag'] for r in sdata])
    +1574				for r in sdata:
    +1575					r['t'] = r['TimeTag'] - t0
    +1576			except KeyError:
    +1577				t0 = (len(sdata)-1)/2
    +1578				for t,r in enumerate(sdata):
    +1579					r['t'] = t - t0
     
    @@ -8645,12 +8990,12 @@

    API Documentation

    -
    1573	def report(self):
    -1574		'''
    -1575		Prints a report on the standardization fit.
    -1576		Only applicable after `D4xdata.standardize(method='pooled')`.
    -1577		'''
    -1578		report_fit(self.standardization)
    +            
    1582	def report(self):
    +1583		'''
    +1584		Prints a report on the standardization fit.
    +1585		Only applicable after `D4xdata.standardize(method='pooled')`.
    +1586		'''
    +1587		report_fit(self.standardization)
     
    @@ -8671,43 +9016,43 @@

    API Documentation

    -
    1581	def combine_samples(self, sample_groups):
    -1582		'''
    -1583		Combine analyses of different samples to compute weighted average Δ4x
    -1584		and new error (co)variances corresponding to the groups defined by the `sample_groups`
    -1585		dictionary.
    -1586		
    -1587		Caution: samples are weighted by number of replicate analyses, which is a
    -1588		reasonable default behavior but is not always optimal (e.g., in the case of strongly
    -1589		correlated analytical errors for one or more samples).
    -1590		
    -1591		Returns a tuplet of:
    -1592		
    -1593		+ the list of group names
    -1594		+ an array of the corresponding Δ4x values
    -1595		+ the corresponding (co)variance matrix
    -1596		
    -1597		**Parameters**
    -1598
    -1599		+ `sample_groups`: a dictionary of the form:
    -1600		```py
    -1601		{'group1': ['sample_1', 'sample_2'],
    -1602		 'group2': ['sample_3', 'sample_4', 'sample_5']}
    -1603		```
    -1604		'''
    -1605		
    -1606		samples = [s for k in sorted(sample_groups.keys()) for s in sorted(sample_groups[k])]
    -1607		groups = sorted(sample_groups.keys())
    -1608		group_total_weights = {k: sum([self.samples[s]['N'] for s in sample_groups[k]]) for k in groups}
    -1609		D4x_old = np.array([[self.samples[x][f'D{self._4x}']] for x in samples])
    -1610		CM_old = np.array([[self.sample_D4x_covar(x,y) for x in samples] for y in samples])
    -1611		W = np.array([
    -1612			[self.samples[i]['N']/group_total_weights[j] if i in sample_groups[j] else 0 for i in samples]
    -1613			for j in groups])
    -1614		D4x_new = W @ D4x_old
    -1615		CM_new = W @ CM_old @ W.T
    -1616
    -1617		return groups, D4x_new[:,0], CM_new
    +            
    1590	def combine_samples(self, sample_groups):
    +1591		'''
    +1592		Combine analyses of different samples to compute weighted average Δ4x
    +1593		and new error (co)variances corresponding to the groups defined by the `sample_groups`
    +1594		dictionary.
    +1595		
    +1596		Caution: samples are weighted by number of replicate analyses, which is a
    +1597		reasonable default behavior but is not always optimal (e.g., in the case of strongly
    +1598		correlated analytical errors for one or more samples).
    +1599		
    +1600		Returns a tuplet of:
    +1601		
    +1602		+ the list of group names
    +1603		+ an array of the corresponding Δ4x values
    +1604		+ the corresponding (co)variance matrix
    +1605		
    +1606		**Parameters**
    +1607
    +1608		+ `sample_groups`: a dictionary of the form:
    +1609		```py
    +1610		{'group1': ['sample_1', 'sample_2'],
    +1611		 'group2': ['sample_3', 'sample_4', 'sample_5']}
    +1612		```
    +1613		'''
    +1614		
    +1615		samples = [s for k in sorted(sample_groups.keys()) for s in sorted(sample_groups[k])]
    +1616		groups = sorted(sample_groups.keys())
    +1617		group_total_weights = {k: sum([self.samples[s]['N'] for s in sample_groups[k]]) for k in groups}
    +1618		D4x_old = np.array([[self.samples[x][f'D{self._4x}']] for x in samples])
    +1619		CM_old = np.array([[self.sample_D4x_covar(x,y) for x in samples] for y in samples])
    +1620		W = np.array([
    +1621			[self.samples[i]['N']/group_total_weights[j] if i in sample_groups[j] else 0 for i in samples]
    +1622			for j in groups])
    +1623		D4x_new = W @ D4x_old
    +1624		CM_new = W @ CM_old @ W.T
    +1625
    +1626		return groups, D4x_new[:,0], CM_new
     
    @@ -8754,238 +9099,238 @@

    API Documentation

    -
    1620	@make_verbal
    -1621	def standardize(self,
    -1622		method = 'pooled',
    -1623		weighted_sessions = [],
    -1624		consolidate = True,
    -1625		consolidate_tables = False,
    -1626		consolidate_plots = False,
    -1627		constraints = {},
    -1628		):
    -1629		'''
    -1630		Compute absolute Δ4x values for all replicate analyses and for sample averages.
    -1631		If `method` argument is set to `'pooled'`, the standardization processes all sessions
    -1632		in a single step, assuming that all samples (anchors and unknowns alike) are homogeneous,
    -1633		i.e. that their true Δ4x value does not change between sessions,
    -1634		([Daëron, 2021](https://doi.org/10.1029/2020GC009592)). If `method` argument is set to
    -1635		`'indep_sessions'`, the standardization processes each session independently, based only
    -1636		on anchors analyses.
    -1637		'''
    -1638
    -1639		self.standardization_method = method
    -1640		self.assign_timestamps()
    -1641
    -1642		if method == 'pooled':
    -1643			if weighted_sessions:
    -1644				for session_group in weighted_sessions:
    -1645					if self._4x == '47':
    -1646						X = D47data([r for r in self if r['Session'] in session_group])
    -1647					elif self._4x == '48':
    -1648						X = D48data([r for r in self if r['Session'] in session_group])
    -1649					X.Nominal_D4x = self.Nominal_D4x.copy()
    -1650					X.refresh()
    -1651					result = X.standardize(method = 'pooled', weighted_sessions = [], consolidate = False)
    -1652					w = np.sqrt(result.redchi)
    -1653					self.msg(f'Session group {session_group} MRSWD = {w:.4f}')
    -1654					for r in X:
    -1655						r[f'wD{self._4x}raw'] *= w
    -1656			else:
    -1657				self.msg(f'All D{self._4x}raw weights set to 1 ‰')
    -1658				for r in self:
    -1659					r[f'wD{self._4x}raw'] = 1.
    -1660
    -1661			params = Parameters()
    -1662			for k,session in enumerate(self.sessions):
    -1663				self.msg(f"Session {session}: scrambling_drift is {self.sessions[session]['scrambling_drift']}.")
    -1664				self.msg(f"Session {session}: slope_drift is {self.sessions[session]['slope_drift']}.")
    -1665				self.msg(f"Session {session}: wg_drift is {self.sessions[session]['wg_drift']}.")
    -1666				s = pf(session)
    -1667				params.add(f'a_{s}', value = 0.9)
    -1668				params.add(f'b_{s}', value = 0.)
    -1669				params.add(f'c_{s}', value = -0.9)
    -1670				params.add(f'a2_{s}', value = 0.,
    -1671# 					vary = self.sessions[session]['scrambling_drift'],
    -1672					)
    -1673				params.add(f'b2_{s}', value = 0.,
    -1674# 					vary = self.sessions[session]['slope_drift'],
    -1675					)
    -1676				params.add(f'c2_{s}', value = 0.,
    -1677# 					vary = self.sessions[session]['wg_drift'],
    -1678					)
    -1679				if not self.sessions[session]['scrambling_drift']:
    -1680					params[f'a2_{s}'].expr = '0'
    -1681				if not self.sessions[session]['slope_drift']:
    -1682					params[f'b2_{s}'].expr = '0'
    -1683				if not self.sessions[session]['wg_drift']:
    -1684					params[f'c2_{s}'].expr = '0'
    -1685
    -1686			for sample in self.unknowns:
    -1687				params.add(f'D{self._4x}_{pf(sample)}', value = 0.5)
    -1688
    -1689			for k in constraints:
    -1690				params[k].expr = constraints[k]
    -1691
    -1692			def residuals(p):
    -1693				R = []
    -1694				for r in self:
    -1695					session = pf(r['Session'])
    -1696					sample = pf(r['Sample'])
    -1697					if r['Sample'] in self.Nominal_D4x:
    -1698						R += [ (
    -1699							r[f'D{self._4x}raw'] - (
    -1700								p[f'a_{session}'] * self.Nominal_D4x[r['Sample']]
    -1701								+ p[f'b_{session}'] * r[f'd{self._4x}']
    -1702								+	p[f'c_{session}']
    -1703								+ r['t'] * (
    -1704									p[f'a2_{session}'] * self.Nominal_D4x[r['Sample']]
    -1705									+ p[f'b2_{session}'] * r[f'd{self._4x}']
    -1706									+	p[f'c2_{session}']
    -1707									)
    -1708								)
    -1709							) / r[f'wD{self._4x}raw'] ]
    -1710					else:
    -1711						R += [ (
    -1712							r[f'D{self._4x}raw'] - (
    -1713								p[f'a_{session}'] * p[f'D{self._4x}_{sample}']
    -1714								+ p[f'b_{session}'] * r[f'd{self._4x}']
    -1715								+	p[f'c_{session}']
    -1716								+ r['t'] * (
    -1717									p[f'a2_{session}'] * p[f'D{self._4x}_{sample}']
    -1718									+ p[f'b2_{session}'] * r[f'd{self._4x}']
    -1719									+	p[f'c2_{session}']
    -1720									)
    -1721								)
    -1722							) / r[f'wD{self._4x}raw'] ]
    -1723				return R
    -1724
    -1725			M = Minimizer(residuals, params)
    -1726			result = M.least_squares()
    -1727			self.Nf = result.nfree
    -1728			self.t95 = tstudent.ppf(1 - 0.05/2, self.Nf)
    -1729			new_names, new_covar, new_se = _fullcovar(result)[:3]
    -1730			result.var_names = new_names
    -1731			result.covar = new_covar
    -1732
    -1733			for r in self:
    -1734				s = pf(r["Session"])
    -1735				a = result.params.valuesdict()[f'a_{s}']
    -1736				b = result.params.valuesdict()[f'b_{s}']
    -1737				c = result.params.valuesdict()[f'c_{s}']
    -1738				a2 = result.params.valuesdict()[f'a2_{s}']
    -1739				b2 = result.params.valuesdict()[f'b2_{s}']
    -1740				c2 = result.params.valuesdict()[f'c2_{s}']
    -1741				r[f'D{self._4x}'] = (r[f'D{self._4x}raw'] - c - b * r[f'd{self._4x}'] - c2 * r['t'] - b2 * r['t'] * r[f'd{self._4x}']) / (a + a2 * r['t'])
    -1742
    -1743			self.standardization = result
    -1744
    -1745			for session in self.sessions:
    -1746				self.sessions[session]['Np'] = 3
    -1747				for k in ['scrambling', 'slope', 'wg']:
    -1748					if self.sessions[session][f'{k}_drift']:
    -1749						self.sessions[session]['Np'] += 1
    -1750
    -1751			if consolidate:
    -1752				self.consolidate(tables = consolidate_tables, plots = consolidate_plots)
    -1753			return result
    -1754
    -1755
    -1756		elif method == 'indep_sessions':
    -1757
    -1758			if weighted_sessions:
    -1759				for session_group in weighted_sessions:
    -1760					X = D4xdata([r for r in self if r['Session'] in session_group], mass = self._4x)
    -1761					X.Nominal_D4x = self.Nominal_D4x.copy()
    -1762					X.refresh()
    -1763					# This is only done to assign r['wD47raw'] for r in X:
    -1764					X.standardize(method = method, weighted_sessions = [], consolidate = False)
    -1765					self.msg(f'D{self._4x}raw weights set to {1000*X[0][f"wD{self._4x}raw"]:.1f} ppm for sessions in {session_group}')
    -1766			else:
    -1767				self.msg('All weights set to 1 ‰')
    -1768				for r in self:
    -1769					r[f'wD{self._4x}raw'] = 1
    -1770
    -1771			for session in self.sessions:
    -1772				s = self.sessions[session]
    -1773				p_names = ['a', 'b', 'c', 'a2', 'b2', 'c2']
    -1774				p_active = [True, True, True, s['scrambling_drift'], s['slope_drift'], s['wg_drift']]
    -1775				s['Np'] = sum(p_active)
    -1776				sdata = s['data']
    -1777
    -1778				A = np.array([
    -1779					[
    -1780						self.Nominal_D4x[r['Sample']] / r[f'wD{self._4x}raw'],
    -1781						r[f'd{self._4x}'] / r[f'wD{self._4x}raw'],
    -1782						1 / r[f'wD{self._4x}raw'],
    -1783						self.Nominal_D4x[r['Sample']] * r['t'] / r[f'wD{self._4x}raw'],
    -1784						r[f'd{self._4x}'] * r['t'] / r[f'wD{self._4x}raw'],
    -1785						r['t'] / r[f'wD{self._4x}raw']
    -1786						]
    -1787					for r in sdata if r['Sample'] in self.anchors
    -1788					])[:,p_active] # only keep columns for the active parameters
    -1789				Y = np.array([[r[f'D{self._4x}raw'] / r[f'wD{self._4x}raw']] for r in sdata if r['Sample'] in self.anchors])
    -1790				s['Na'] = Y.size
    -1791				CM = linalg.inv(A.T @ A)
    -1792				bf = (CM @ A.T @ Y).T[0,:]
    -1793				k = 0
    -1794				for n,a in zip(p_names, p_active):
    -1795					if a:
    -1796						s[n] = bf[k]
    -1797# 						self.msg(f'{n} = {bf[k]}')
    -1798						k += 1
    -1799					else:
    -1800						s[n] = 0.
    -1801# 						self.msg(f'{n} = 0.0')
    -1802
    -1803				for r in sdata :
    -1804					a, b, c, a2, b2, c2 = s['a'], s['b'], s['c'], s['a2'], s['b2'], s['c2']
    -1805					r[f'D{self._4x}'] = (r[f'D{self._4x}raw'] - c - b * r[f'd{self._4x}'] - c2 * r['t'] - b2 * r['t'] * r[f'd{self._4x}']) / (a + a2 * r['t'])
    -1806					r[f'wD{self._4x}'] = r[f'wD{self._4x}raw'] / (a + a2 * r['t'])
    -1807
    -1808				s['CM'] = np.zeros((6,6))
    -1809				i = 0
    -1810				k_active = [j for j,a in enumerate(p_active) if a]
    -1811				for j,a in enumerate(p_active):
    -1812					if a:
    -1813						s['CM'][j,k_active] = CM[i,:]
    -1814						i += 1
    -1815
    -1816			if not weighted_sessions:
    -1817				w = self.rmswd()['rmswd']
    -1818				for r in self:
    -1819						r[f'wD{self._4x}'] *= w
    -1820						r[f'wD{self._4x}raw'] *= w
    -1821				for session in self.sessions:
    -1822					self.sessions[session]['CM'] *= w**2
    -1823
    -1824			for session in self.sessions:
    -1825				s = self.sessions[session]
    -1826				s['SE_a'] = s['CM'][0,0]**.5
    -1827				s['SE_b'] = s['CM'][1,1]**.5
    -1828				s['SE_c'] = s['CM'][2,2]**.5
    -1829				s['SE_a2'] = s['CM'][3,3]**.5
    -1830				s['SE_b2'] = s['CM'][4,4]**.5
    -1831				s['SE_c2'] = s['CM'][5,5]**.5
    +            
    1629	@make_verbal
    +1630	def standardize(self,
    +1631		method = 'pooled',
    +1632		weighted_sessions = [],
    +1633		consolidate = True,
    +1634		consolidate_tables = False,
    +1635		consolidate_plots = False,
    +1636		constraints = {},
    +1637		):
    +1638		'''
    +1639		Compute absolute Δ4x values for all replicate analyses and for sample averages.
    +1640		If `method` argument is set to `'pooled'`, the standardization processes all sessions
    +1641		in a single step, assuming that all samples (anchors and unknowns alike) are homogeneous,
    +1642		i.e. that their true Δ4x value does not change between sessions,
    +1643		([Daëron, 2021](https://doi.org/10.1029/2020GC009592)). If `method` argument is set to
    +1644		`'indep_sessions'`, the standardization processes each session independently, based only
    +1645		on anchors analyses.
    +1646		'''
    +1647
    +1648		self.standardization_method = method
    +1649		self.assign_timestamps()
    +1650
    +1651		if method == 'pooled':
    +1652			if weighted_sessions:
    +1653				for session_group in weighted_sessions:
    +1654					if self._4x == '47':
    +1655						X = D47data([r for r in self if r['Session'] in session_group])
    +1656					elif self._4x == '48':
    +1657						X = D48data([r for r in self if r['Session'] in session_group])
    +1658					X.Nominal_D4x = self.Nominal_D4x.copy()
    +1659					X.refresh()
    +1660					result = X.standardize(method = 'pooled', weighted_sessions = [], consolidate = False)
    +1661					w = np.sqrt(result.redchi)
    +1662					self.msg(f'Session group {session_group} MRSWD = {w:.4f}')
    +1663					for r in X:
    +1664						r[f'wD{self._4x}raw'] *= w
    +1665			else:
    +1666				self.msg(f'All D{self._4x}raw weights set to 1 ‰')
    +1667				for r in self:
    +1668					r[f'wD{self._4x}raw'] = 1.
    +1669
    +1670			params = Parameters()
    +1671			for k,session in enumerate(self.sessions):
    +1672				self.msg(f"Session {session}: scrambling_drift is {self.sessions[session]['scrambling_drift']}.")
    +1673				self.msg(f"Session {session}: slope_drift is {self.sessions[session]['slope_drift']}.")
    +1674				self.msg(f"Session {session}: wg_drift is {self.sessions[session]['wg_drift']}.")
    +1675				s = pf(session)
    +1676				params.add(f'a_{s}', value = 0.9)
    +1677				params.add(f'b_{s}', value = 0.)
    +1678				params.add(f'c_{s}', value = -0.9)
    +1679				params.add(f'a2_{s}', value = 0.,
    +1680# 					vary = self.sessions[session]['scrambling_drift'],
    +1681					)
    +1682				params.add(f'b2_{s}', value = 0.,
    +1683# 					vary = self.sessions[session]['slope_drift'],
    +1684					)
    +1685				params.add(f'c2_{s}', value = 0.,
    +1686# 					vary = self.sessions[session]['wg_drift'],
    +1687					)
    +1688				if not self.sessions[session]['scrambling_drift']:
    +1689					params[f'a2_{s}'].expr = '0'
    +1690				if not self.sessions[session]['slope_drift']:
    +1691					params[f'b2_{s}'].expr = '0'
    +1692				if not self.sessions[session]['wg_drift']:
    +1693					params[f'c2_{s}'].expr = '0'
    +1694
    +1695			for sample in self.unknowns:
    +1696				params.add(f'D{self._4x}_{pf(sample)}', value = 0.5)
    +1697
    +1698			for k in constraints:
    +1699				params[k].expr = constraints[k]
    +1700
    +1701			def residuals(p):
    +1702				R = []
    +1703				for r in self:
    +1704					session = pf(r['Session'])
    +1705					sample = pf(r['Sample'])
    +1706					if r['Sample'] in self.Nominal_D4x:
    +1707						R += [ (
    +1708							r[f'D{self._4x}raw'] - (
    +1709								p[f'a_{session}'] * self.Nominal_D4x[r['Sample']]
    +1710								+ p[f'b_{session}'] * r[f'd{self._4x}']
    +1711								+	p[f'c_{session}']
    +1712								+ r['t'] * (
    +1713									p[f'a2_{session}'] * self.Nominal_D4x[r['Sample']]
    +1714									+ p[f'b2_{session}'] * r[f'd{self._4x}']
    +1715									+	p[f'c2_{session}']
    +1716									)
    +1717								)
    +1718							) / r[f'wD{self._4x}raw'] ]
    +1719					else:
    +1720						R += [ (
    +1721							r[f'D{self._4x}raw'] - (
    +1722								p[f'a_{session}'] * p[f'D{self._4x}_{sample}']
    +1723								+ p[f'b_{session}'] * r[f'd{self._4x}']
    +1724								+	p[f'c_{session}']
    +1725								+ r['t'] * (
    +1726									p[f'a2_{session}'] * p[f'D{self._4x}_{sample}']
    +1727									+ p[f'b2_{session}'] * r[f'd{self._4x}']
    +1728									+	p[f'c2_{session}']
    +1729									)
    +1730								)
    +1731							) / r[f'wD{self._4x}raw'] ]
    +1732				return R
    +1733
    +1734			M = Minimizer(residuals, params)
    +1735			result = M.least_squares()
    +1736			self.Nf = result.nfree
    +1737			self.t95 = tstudent.ppf(1 - 0.05/2, self.Nf)
    +1738			new_names, new_covar, new_se = _fullcovar(result)[:3]
    +1739			result.var_names = new_names
    +1740			result.covar = new_covar
    +1741
    +1742			for r in self:
    +1743				s = pf(r["Session"])
    +1744				a = result.params.valuesdict()[f'a_{s}']
    +1745				b = result.params.valuesdict()[f'b_{s}']
    +1746				c = result.params.valuesdict()[f'c_{s}']
    +1747				a2 = result.params.valuesdict()[f'a2_{s}']
    +1748				b2 = result.params.valuesdict()[f'b2_{s}']
    +1749				c2 = result.params.valuesdict()[f'c2_{s}']
    +1750				r[f'D{self._4x}'] = (r[f'D{self._4x}raw'] - c - b * r[f'd{self._4x}'] - c2 * r['t'] - b2 * r['t'] * r[f'd{self._4x}']) / (a + a2 * r['t'])
    +1751
    +1752			self.standardization = result
    +1753
    +1754			for session in self.sessions:
    +1755				self.sessions[session]['Np'] = 3
    +1756				for k in ['scrambling', 'slope', 'wg']:
    +1757					if self.sessions[session][f'{k}_drift']:
    +1758						self.sessions[session]['Np'] += 1
    +1759
    +1760			if consolidate:
    +1761				self.consolidate(tables = consolidate_tables, plots = consolidate_plots)
    +1762			return result
    +1763
    +1764
    +1765		elif method == 'indep_sessions':
    +1766
    +1767			if weighted_sessions:
    +1768				for session_group in weighted_sessions:
    +1769					X = D4xdata([r for r in self if r['Session'] in session_group], mass = self._4x)
    +1770					X.Nominal_D4x = self.Nominal_D4x.copy()
    +1771					X.refresh()
    +1772					# This is only done to assign r['wD47raw'] for r in X:
    +1773					X.standardize(method = method, weighted_sessions = [], consolidate = False)
    +1774					self.msg(f'D{self._4x}raw weights set to {1000*X[0][f"wD{self._4x}raw"]:.1f} ppm for sessions in {session_group}')
    +1775			else:
    +1776				self.msg('All weights set to 1 ‰')
    +1777				for r in self:
    +1778					r[f'wD{self._4x}raw'] = 1
    +1779
    +1780			for session in self.sessions:
    +1781				s = self.sessions[session]
    +1782				p_names = ['a', 'b', 'c', 'a2', 'b2', 'c2']
    +1783				p_active = [True, True, True, s['scrambling_drift'], s['slope_drift'], s['wg_drift']]
    +1784				s['Np'] = sum(p_active)
    +1785				sdata = s['data']
    +1786
    +1787				A = np.array([
    +1788					[
    +1789						self.Nominal_D4x[r['Sample']] / r[f'wD{self._4x}raw'],
    +1790						r[f'd{self._4x}'] / r[f'wD{self._4x}raw'],
    +1791						1 / r[f'wD{self._4x}raw'],
    +1792						self.Nominal_D4x[r['Sample']] * r['t'] / r[f'wD{self._4x}raw'],
    +1793						r[f'd{self._4x}'] * r['t'] / r[f'wD{self._4x}raw'],
    +1794						r['t'] / r[f'wD{self._4x}raw']
    +1795						]
    +1796					for r in sdata if r['Sample'] in self.anchors
    +1797					])[:,p_active] # only keep columns for the active parameters
    +1798				Y = np.array([[r[f'D{self._4x}raw'] / r[f'wD{self._4x}raw']] for r in sdata if r['Sample'] in self.anchors])
    +1799				s['Na'] = Y.size
    +1800				CM = linalg.inv(A.T @ A)
    +1801				bf = (CM @ A.T @ Y).T[0,:]
    +1802				k = 0
    +1803				for n,a in zip(p_names, p_active):
    +1804					if a:
    +1805						s[n] = bf[k]
    +1806# 						self.msg(f'{n} = {bf[k]}')
    +1807						k += 1
    +1808					else:
    +1809						s[n] = 0.
    +1810# 						self.msg(f'{n} = 0.0')
    +1811
    +1812				for r in sdata :
    +1813					a, b, c, a2, b2, c2 = s['a'], s['b'], s['c'], s['a2'], s['b2'], s['c2']
    +1814					r[f'D{self._4x}'] = (r[f'D{self._4x}raw'] - c - b * r[f'd{self._4x}'] - c2 * r['t'] - b2 * r['t'] * r[f'd{self._4x}']) / (a + a2 * r['t'])
    +1815					r[f'wD{self._4x}'] = r[f'wD{self._4x}raw'] / (a + a2 * r['t'])
    +1816
    +1817				s['CM'] = np.zeros((6,6))
    +1818				i = 0
    +1819				k_active = [j for j,a in enumerate(p_active) if a]
    +1820				for j,a in enumerate(p_active):
    +1821					if a:
    +1822						s['CM'][j,k_active] = CM[i,:]
    +1823						i += 1
    +1824
    +1825			if not weighted_sessions:
    +1826				w = self.rmswd()['rmswd']
    +1827				for r in self:
    +1828						r[f'wD{self._4x}'] *= w
    +1829						r[f'wD{self._4x}raw'] *= w
    +1830				for session in self.sessions:
    +1831					self.sessions[session]['CM'] *= w**2
     1832
    -1833			if not weighted_sessions:
    -1834				self.Nf = len(self) - len(self.unknowns) - np.sum([self.sessions[s]['Np'] for s in self.sessions])
    -1835			else:
    -1836				self.Nf = 0
    -1837				for sg in weighted_sessions:
    -1838					self.Nf += self.rmswd(sessions = sg)['Nf']
    -1839
    -1840			self.t95 = tstudent.ppf(1 - 0.05/2, self.Nf)
    +1833			for session in self.sessions:
    +1834				s = self.sessions[session]
    +1835				s['SE_a'] = s['CM'][0,0]**.5
    +1836				s['SE_b'] = s['CM'][1,1]**.5
    +1837				s['SE_c'] = s['CM'][2,2]**.5
    +1838				s['SE_a2'] = s['CM'][3,3]**.5
    +1839				s['SE_b2'] = s['CM'][4,4]**.5
    +1840				s['SE_c2'] = s['CM'][5,5]**.5
     1841
    -1842			avgD4x = {
    -1843				sample: np.mean([r[f'D{self._4x}'] for r in self if r['Sample'] == sample])
    -1844				for sample in self.samples
    -1845				}
    -1846			chi2 = np.sum([(r[f'D{self._4x}'] - avgD4x[r['Sample']])**2 for r in self])
    -1847			rD4x = (chi2/self.Nf)**.5
    -1848			self.repeatability[f'sigma_{self._4x}'] = rD4x
    -1849
    -1850			if consolidate:
    -1851				self.consolidate(tables = consolidate_tables, plots = consolidate_plots)
    +1842			if not weighted_sessions:
    +1843				self.Nf = len(self) - len(self.unknowns) - np.sum([self.sessions[s]['Np'] for s in self.sessions])
    +1844			else:
    +1845				self.Nf = 0
    +1846				for sg in weighted_sessions:
    +1847					self.Nf += self.rmswd(sessions = sg)['Nf']
    +1848
    +1849			self.t95 = tstudent.ppf(1 - 0.05/2, self.Nf)
    +1850
    +1851			avgD4x = {
    +1852				sample: np.mean([r[f'D{self._4x}'] for r in self if r['Sample'] == sample])
    +1853				for sample in self.samples
    +1854				}
    +1855			chi2 = np.sum([(r[f'D{self._4x}'] - avgD4x[r['Sample']])**2 for r in self])
    +1856			rD4x = (chi2/self.Nf)**.5
    +1857			self.repeatability[f'sigma_{self._4x}'] = rD4x
    +1858
    +1859			if consolidate:
    +1860				self.consolidate(tables = consolidate_tables, plots = consolidate_plots)
     
    @@ -9011,33 +9356,33 @@

    API Documentation

    -
    1854	def standardization_error(self, session, d4x, D4x, t = 0):
    -1855		'''
    -1856		Compute standardization error for a given session and
    -1857		(δ47, Δ47) composition.
    -1858		'''
    -1859		a = self.sessions[session]['a']
    -1860		b = self.sessions[session]['b']
    -1861		c = self.sessions[session]['c']
    -1862		a2 = self.sessions[session]['a2']
    -1863		b2 = self.sessions[session]['b2']
    -1864		c2 = self.sessions[session]['c2']
    -1865		CM = self.sessions[session]['CM']
    -1866
    -1867		x, y = D4x, d4x
    -1868		z = a * x + b * y + c + a2 * x * t + b2 * y * t + c2 * t
    -1869# 		x = (z - b*y - b2*y*t - c - c2*t) / (a+a2*t)
    -1870		dxdy = -(b+b2*t) / (a+a2*t)
    -1871		dxdz = 1. / (a+a2*t)
    -1872		dxda = -x / (a+a2*t)
    -1873		dxdb = -y / (a+a2*t)
    -1874		dxdc = -1. / (a+a2*t)
    -1875		dxda2 = -x * a2 / (a+a2*t)
    -1876		dxdb2 = -y * t / (a+a2*t)
    -1877		dxdc2 = -t / (a+a2*t)
    -1878		V = np.array([dxda, dxdb, dxdc, dxda2, dxdb2, dxdc2])
    -1879		sx = (V @ CM @ V.T) ** .5
    -1880		return sx
    +            
    1863	def standardization_error(self, session, d4x, D4x, t = 0):
    +1864		'''
    +1865		Compute standardization error for a given session and
    +1866		(δ47, Δ47) composition.
    +1867		'''
    +1868		a = self.sessions[session]['a']
    +1869		b = self.sessions[session]['b']
    +1870		c = self.sessions[session]['c']
    +1871		a2 = self.sessions[session]['a2']
    +1872		b2 = self.sessions[session]['b2']
    +1873		c2 = self.sessions[session]['c2']
    +1874		CM = self.sessions[session]['CM']
    +1875
    +1876		x, y = D4x, d4x
    +1877		z = a * x + b * y + c + a2 * x * t + b2 * y * t + c2 * t
    +1878# 		x = (z - b*y - b2*y*t - c - c2*t) / (a+a2*t)
    +1879		dxdy = -(b+b2*t) / (a+a2*t)
    +1880		dxdz = 1. / (a+a2*t)
    +1881		dxda = -x / (a+a2*t)
    +1882		dxdb = -y / (a+a2*t)
    +1883		dxdc = -1. / (a+a2*t)
    +1884		dxda2 = -x * a2 / (a+a2*t)
    +1885		dxdb2 = -y * t / (a+a2*t)
    +1886		dxdc2 = -t / (a+a2*t)
    +1887		V = np.array([dxda, dxdb, dxdc, dxda2, dxdb2, dxdc2])
    +1888		sx = (V @ CM @ V.T) ** .5
    +1889		return sx
     
    @@ -9059,45 +9404,45 @@

    API Documentation

    -
    1883	@make_verbal
    -1884	def summary(self,
    -1885		dir = 'output',
    -1886		filename = None,
    -1887		save_to_file = True,
    -1888		print_out = True,
    -1889		):
    -1890		'''
    -1891		Print out an/or save to disk a summary of the standardization results.
    -1892
    -1893		**Parameters**
    -1894
    -1895		+ `dir`: the directory in which to save the table
    -1896		+ `filename`: the name to the csv file to write to
    -1897		+ `save_to_file`: whether to save the table to disk
    -1898		+ `print_out`: whether to print out the table
    -1899		'''
    -1900
    -1901		out = []
    -1902		out += [['N samples (anchors + unknowns)', f"{len(self.samples)} ({len(self.anchors)} + {len(self.unknowns)})"]]
    -1903		out += [['N analyses (anchors + unknowns)', f"{len(self)} ({len([r for r in self if r['Sample'] in self.anchors])} + {len([r for r in self if r['Sample'] in self.unknowns])})"]]
    -1904		out += [['Repeatability of δ13C_VPDB', f"{1000 * self.repeatability['r_d13C_VPDB']:.1f} ppm"]]
    -1905		out += [['Repeatability of δ18O_VSMOW', f"{1000 * self.repeatability['r_d18O_VSMOW']:.1f} ppm"]]
    -1906		out += [[f'Repeatability of Δ{self._4x} (anchors)', f"{1000 * self.repeatability[f'r_D{self._4x}a']:.1f} ppm"]]
    -1907		out += [[f'Repeatability of Δ{self._4x} (unknowns)', f"{1000 * self.repeatability[f'r_D{self._4x}u']:.1f} ppm"]]
    -1908		out += [[f'Repeatability of Δ{self._4x} (all)', f"{1000 * self.repeatability[f'r_D{self._4x}']:.1f} ppm"]]
    -1909		out += [['Model degrees of freedom', f"{self.Nf}"]]
    -1910		out += [['Student\'s 95% t-factor', f"{self.t95:.2f}"]]
    -1911		out += [['Standardization method', self.standardization_method]]
    -1912
    -1913		if save_to_file:
    -1914			if not os.path.exists(dir):
    -1915				os.makedirs(dir)
    -1916			if filename is None:
    -1917				filename = f'D{self._4x}_summary.csv'
    -1918			with open(f'{dir}/{filename}', 'w') as fid:
    -1919				fid.write(make_csv(out))
    -1920		if print_out:
    -1921			self.msg('\n' + pretty_table(out, header = 0))
    +            
    1892	@make_verbal
    +1893	def summary(self,
    +1894		dir = 'output',
    +1895		filename = None,
    +1896		save_to_file = True,
    +1897		print_out = True,
    +1898		):
    +1899		'''
    +1900		Print out an/or save to disk a summary of the standardization results.
    +1901
    +1902		**Parameters**
    +1903
    +1904		+ `dir`: the directory in which to save the table
    +1905		+ `filename`: the name to the csv file to write to
    +1906		+ `save_to_file`: whether to save the table to disk
    +1907		+ `print_out`: whether to print out the table
    +1908		'''
    +1909
    +1910		out = []
    +1911		out += [['N samples (anchors + unknowns)', f"{len(self.samples)} ({len(self.anchors)} + {len(self.unknowns)})"]]
    +1912		out += [['N analyses (anchors + unknowns)', f"{len(self)} ({len([r for r in self if r['Sample'] in self.anchors])} + {len([r for r in self if r['Sample'] in self.unknowns])})"]]
    +1913		out += [['Repeatability of δ13C_VPDB', f"{1000 * self.repeatability['r_d13C_VPDB']:.1f} ppm"]]
    +1914		out += [['Repeatability of δ18O_VSMOW', f"{1000 * self.repeatability['r_d18O_VSMOW']:.1f} ppm"]]
    +1915		out += [[f'Repeatability of Δ{self._4x} (anchors)', f"{1000 * self.repeatability[f'r_D{self._4x}a']:.1f} ppm"]]
    +1916		out += [[f'Repeatability of Δ{self._4x} (unknowns)', f"{1000 * self.repeatability[f'r_D{self._4x}u']:.1f} ppm"]]
    +1917		out += [[f'Repeatability of Δ{self._4x} (all)', f"{1000 * self.repeatability[f'r_D{self._4x}']:.1f} ppm"]]
    +1918		out += [['Model degrees of freedom', f"{self.Nf}"]]
    +1919		out += [['Student\'s 95% t-factor', f"{self.t95:.2f}"]]
    +1920		out += [['Standardization method', self.standardization_method]]
    +1921
    +1922		if save_to_file:
    +1923			if not os.path.exists(dir):
    +1924				os.makedirs(dir)
    +1925			if filename is None:
    +1926				filename = f'D{self._4x}_summary.csv'
    +1927			with open(f'{dir}/{filename}', 'w') as fid:
    +1928				fid.write(make_csv(out))
    +1929		if print_out:
    +1930			self.msg('\n' + pretty_table(out, header = 0))
     
    @@ -9127,81 +9472,81 @@

    API Documentation

    -
    1924	@make_verbal
    -1925	def table_of_sessions(self,
    -1926		dir = 'output',
    -1927		filename = None,
    -1928		save_to_file = True,
    -1929		print_out = True,
    -1930		output = None,
    -1931		):
    -1932		'''
    -1933		Print out an/or save to disk a table of sessions.
    -1934
    -1935		**Parameters**
    -1936
    -1937		+ `dir`: the directory in which to save the table
    -1938		+ `filename`: the name to the csv file to write to
    -1939		+ `save_to_file`: whether to save the table to disk
    -1940		+ `print_out`: whether to print out the table
    -1941		+ `output`: if set to `'pretty'`: return a pretty text table (see `pretty_table()`);
    -1942		    if set to `'raw'`: return a list of list of strings
    -1943		    (e.g., `[['header1', 'header2'], ['0.1', '0.2']]`)
    -1944		'''
    -1945		include_a2 = any([self.sessions[session]['scrambling_drift'] for session in self.sessions])
    -1946		include_b2 = any([self.sessions[session]['slope_drift'] for session in self.sessions])
    -1947		include_c2 = any([self.sessions[session]['wg_drift'] for session in self.sessions])
    -1948
    -1949		out = [['Session','Na','Nu','d13Cwg_VPDB','d18Owg_VSMOW','r_d13C','r_d18O',f'r_D{self._4x}','a ± SE','1e3 x b ± SE','c ± SE']]
    -1950		if include_a2:
    -1951			out[-1] += ['a2 ± SE']
    -1952		if include_b2:
    -1953			out[-1] += ['b2 ± SE']
    -1954		if include_c2:
    -1955			out[-1] += ['c2 ± SE']
    -1956		for session in self.sessions:
    -1957			out += [[
    -1958				session,
    -1959				f"{self.sessions[session]['Na']}",
    -1960				f"{self.sessions[session]['Nu']}",
    -1961				f"{self.sessions[session]['d13Cwg_VPDB']:.3f}",
    -1962				f"{self.sessions[session]['d18Owg_VSMOW']:.3f}",
    -1963				f"{self.sessions[session]['r_d13C_VPDB']:.4f}",
    -1964				f"{self.sessions[session]['r_d18O_VSMOW']:.4f}",
    -1965				f"{self.sessions[session][f'r_D{self._4x}']:.4f}",
    -1966				f"{self.sessions[session]['a']:.3f} ± {self.sessions[session]['SE_a']:.3f}",
    -1967				f"{1e3*self.sessions[session]['b']:.3f} ± {1e3*self.sessions[session]['SE_b']:.3f}",
    -1968				f"{self.sessions[session]['c']:.3f} ± {self.sessions[session]['SE_c']:.3f}",
    -1969				]]
    -1970			if include_a2:
    -1971				if self.sessions[session]['scrambling_drift']:
    -1972					out[-1] += [f"{self.sessions[session]['a2']:.1e} ± {self.sessions[session]['SE_a2']:.1e}"]
    -1973				else:
    -1974					out[-1] += ['']
    -1975			if include_b2:
    -1976				if self.sessions[session]['slope_drift']:
    -1977					out[-1] += [f"{self.sessions[session]['b2']:.1e} ± {self.sessions[session]['SE_b2']:.1e}"]
    -1978				else:
    -1979					out[-1] += ['']
    -1980			if include_c2:
    -1981				if self.sessions[session]['wg_drift']:
    -1982					out[-1] += [f"{self.sessions[session]['c2']:.1e} ± {self.sessions[session]['SE_c2']:.1e}"]
    -1983				else:
    -1984					out[-1] += ['']
    -1985
    -1986		if save_to_file:
    -1987			if not os.path.exists(dir):
    -1988				os.makedirs(dir)
    -1989			if filename is None:
    -1990				filename = f'D{self._4x}_sessions.csv'
    -1991			with open(f'{dir}/{filename}', 'w') as fid:
    -1992				fid.write(make_csv(out))
    -1993		if print_out:
    -1994			self.msg('\n' + pretty_table(out))
    -1995		if output == 'raw':
    -1996			return out
    -1997		elif output == 'pretty':
    -1998			return pretty_table(out)
    +            
    1933	@make_verbal
    +1934	def table_of_sessions(self,
    +1935		dir = 'output',
    +1936		filename = None,
    +1937		save_to_file = True,
    +1938		print_out = True,
    +1939		output = None,
    +1940		):
    +1941		'''
    +1942		Print out an/or save to disk a table of sessions.
    +1943
    +1944		**Parameters**
    +1945
    +1946		+ `dir`: the directory in which to save the table
    +1947		+ `filename`: the name to the csv file to write to
    +1948		+ `save_to_file`: whether to save the table to disk
    +1949		+ `print_out`: whether to print out the table
    +1950		+ `output`: if set to `'pretty'`: return a pretty text table (see `pretty_table()`);
    +1951		    if set to `'raw'`: return a list of list of strings
    +1952		    (e.g., `[['header1', 'header2'], ['0.1', '0.2']]`)
    +1953		'''
    +1954		include_a2 = any([self.sessions[session]['scrambling_drift'] for session in self.sessions])
    +1955		include_b2 = any([self.sessions[session]['slope_drift'] for session in self.sessions])
    +1956		include_c2 = any([self.sessions[session]['wg_drift'] for session in self.sessions])
    +1957
    +1958		out = [['Session','Na','Nu','d13Cwg_VPDB','d18Owg_VSMOW','r_d13C','r_d18O',f'r_D{self._4x}','a ± SE','1e3 x b ± SE','c ± SE']]
    +1959		if include_a2:
    +1960			out[-1] += ['a2 ± SE']
    +1961		if include_b2:
    +1962			out[-1] += ['b2 ± SE']
    +1963		if include_c2:
    +1964			out[-1] += ['c2 ± SE']
    +1965		for session in self.sessions:
    +1966			out += [[
    +1967				session,
    +1968				f"{self.sessions[session]['Na']}",
    +1969				f"{self.sessions[session]['Nu']}",
    +1970				f"{self.sessions[session]['d13Cwg_VPDB']:.3f}",
    +1971				f"{self.sessions[session]['d18Owg_VSMOW']:.3f}",
    +1972				f"{self.sessions[session]['r_d13C_VPDB']:.4f}",
    +1973				f"{self.sessions[session]['r_d18O_VSMOW']:.4f}",
    +1974				f"{self.sessions[session][f'r_D{self._4x}']:.4f}",
    +1975				f"{self.sessions[session]['a']:.3f} ± {self.sessions[session]['SE_a']:.3f}",
    +1976				f"{1e3*self.sessions[session]['b']:.3f} ± {1e3*self.sessions[session]['SE_b']:.3f}",
    +1977				f"{self.sessions[session]['c']:.3f} ± {self.sessions[session]['SE_c']:.3f}",
    +1978				]]
    +1979			if include_a2:
    +1980				if self.sessions[session]['scrambling_drift']:
    +1981					out[-1] += [f"{self.sessions[session]['a2']:.1e} ± {self.sessions[session]['SE_a2']:.1e}"]
    +1982				else:
    +1983					out[-1] += ['']
    +1984			if include_b2:
    +1985				if self.sessions[session]['slope_drift']:
    +1986					out[-1] += [f"{self.sessions[session]['b2']:.1e} ± {self.sessions[session]['SE_b2']:.1e}"]
    +1987				else:
    +1988					out[-1] += ['']
    +1989			if include_c2:
    +1990				if self.sessions[session]['wg_drift']:
    +1991					out[-1] += [f"{self.sessions[session]['c2']:.1e} ± {self.sessions[session]['SE_c2']:.1e}"]
    +1992				else:
    +1993					out[-1] += ['']
    +1994
    +1995		if save_to_file:
    +1996			if not os.path.exists(dir):
    +1997				os.makedirs(dir)
    +1998			if filename is None:
    +1999				filename = f'D{self._4x}_sessions.csv'
    +2000			with open(f'{dir}/{filename}', 'w') as fid:
    +2001				fid.write(make_csv(out))
    +2002		if print_out:
    +2003			self.msg('\n' + pretty_table(out))
    +2004		if output == 'raw':
    +2005			return out
    +2006		elif output == 'pretty':
    +2007			return pretty_table(out)
     
    @@ -9234,63 +9579,63 @@

    API Documentation

    -
    2001	@make_verbal
    -2002	def table_of_analyses(
    -2003		self,
    -2004		dir = 'output',
    -2005		filename = None,
    -2006		save_to_file = True,
    -2007		print_out = True,
    -2008		output = None,
    -2009		):
    -2010		'''
    -2011		Print out an/or save to disk a table of analyses.
    -2012
    -2013		**Parameters**
    -2014
    -2015		+ `dir`: the directory in which to save the table
    -2016		+ `filename`: the name to the csv file to write to
    -2017		+ `save_to_file`: whether to save the table to disk
    -2018		+ `print_out`: whether to print out the table
    -2019		+ `output`: if set to `'pretty'`: return a pretty text table (see `pretty_table()`);
    -2020		    if set to `'raw'`: return a list of list of strings
    -2021		    (e.g., `[['header1', 'header2'], ['0.1', '0.2']]`)
    -2022		'''
    +            
    2010	@make_verbal
    +2011	def table_of_analyses(
    +2012		self,
    +2013		dir = 'output',
    +2014		filename = None,
    +2015		save_to_file = True,
    +2016		print_out = True,
    +2017		output = None,
    +2018		):
    +2019		'''
    +2020		Print out an/or save to disk a table of analyses.
    +2021
    +2022		**Parameters**
     2023
    -2024		out = [['UID','Session','Sample']]
    -2025		extra_fields = [f for f in [('SampleMass','.2f'),('ColdFingerPressure','.1f'),('AcidReactionYield','.3f')] if f[0] in {k for r in self for k in r}]
    -2026		for f in extra_fields:
    -2027			out[-1] += [f[0]]
    -2028		out[-1] += ['d13Cwg_VPDB','d18Owg_VSMOW','d45','d46','d47','d48','d49','d13C_VPDB','d18O_VSMOW','D47raw','D48raw','D49raw',f'D{self._4x}']
    -2029		for r in self:
    -2030			out += [[f"{r['UID']}",f"{r['Session']}",f"{r['Sample']}"]]
    -2031			for f in extra_fields:
    -2032				out[-1] += [f"{r[f[0]]:{f[1]}}"]
    -2033			out[-1] += [
    -2034				f"{r['d13Cwg_VPDB']:.3f}",
    -2035				f"{r['d18Owg_VSMOW']:.3f}",
    -2036				f"{r['d45']:.6f}",
    -2037				f"{r['d46']:.6f}",
    -2038				f"{r['d47']:.6f}",
    -2039				f"{r['d48']:.6f}",
    -2040				f"{r['d49']:.6f}",
    -2041				f"{r['d13C_VPDB']:.6f}",
    -2042				f"{r['d18O_VSMOW']:.6f}",
    -2043				f"{r['D47raw']:.6f}",
    -2044				f"{r['D48raw']:.6f}",
    -2045				f"{r['D49raw']:.6f}",
    -2046				f"{r[f'D{self._4x}']:.6f}"
    -2047				]
    -2048		if save_to_file:
    -2049			if not os.path.exists(dir):
    -2050				os.makedirs(dir)
    -2051			if filename is None:
    -2052				filename = f'D{self._4x}_analyses.csv'
    -2053			with open(f'{dir}/{filename}', 'w') as fid:
    -2054				fid.write(make_csv(out))
    -2055		if print_out:
    -2056			self.msg('\n' + pretty_table(out))
    -2057		return out
    +2024		+ `dir`: the directory in which to save the table
    +2025		+ `filename`: the name to the csv file to write to
    +2026		+ `save_to_file`: whether to save the table to disk
    +2027		+ `print_out`: whether to print out the table
    +2028		+ `output`: if set to `'pretty'`: return a pretty text table (see `pretty_table()`);
    +2029		    if set to `'raw'`: return a list of list of strings
    +2030		    (e.g., `[['header1', 'header2'], ['0.1', '0.2']]`)
    +2031		'''
    +2032
    +2033		out = [['UID','Session','Sample']]
    +2034		extra_fields = [f for f in [('SampleMass','.2f'),('ColdFingerPressure','.1f'),('AcidReactionYield','.3f')] if f[0] in {k for r in self for k in r}]
    +2035		for f in extra_fields:
    +2036			out[-1] += [f[0]]
    +2037		out[-1] += ['d13Cwg_VPDB','d18Owg_VSMOW','d45','d46','d47','d48','d49','d13C_VPDB','d18O_VSMOW','D47raw','D48raw','D49raw',f'D{self._4x}']
    +2038		for r in self:
    +2039			out += [[f"{r['UID']}",f"{r['Session']}",f"{r['Sample']}"]]
    +2040			for f in extra_fields:
    +2041				out[-1] += [f"{r[f[0]]:{f[1]}}"]
    +2042			out[-1] += [
    +2043				f"{r['d13Cwg_VPDB']:.3f}",
    +2044				f"{r['d18Owg_VSMOW']:.3f}",
    +2045				f"{r['d45']:.6f}",
    +2046				f"{r['d46']:.6f}",
    +2047				f"{r['d47']:.6f}",
    +2048				f"{r['d48']:.6f}",
    +2049				f"{r['d49']:.6f}",
    +2050				f"{r['d13C_VPDB']:.6f}",
    +2051				f"{r['d18O_VSMOW']:.6f}",
    +2052				f"{r['D47raw']:.6f}",
    +2053				f"{r['D48raw']:.6f}",
    +2054				f"{r['D49raw']:.6f}",
    +2055				f"{r[f'D{self._4x}']:.6f}"
    +2056				]
    +2057		if save_to_file:
    +2058			if not os.path.exists(dir):
    +2059				os.makedirs(dir)
    +2060			if filename is None:
    +2061				filename = f'D{self._4x}_analyses.csv'
    +2062			with open(f'{dir}/{filename}', 'w') as fid:
    +2063				fid.write(make_csv(out))
    +2064		if print_out:
    +2065			self.msg('\n' + pretty_table(out))
    +2066		return out
     
    @@ -9323,56 +9668,56 @@

    API Documentation

    -
    2059	@make_verbal
    -2060	def covar_table(
    -2061		self,
    -2062		correl = False,
    -2063		dir = 'output',
    -2064		filename = None,
    -2065		save_to_file = True,
    -2066		print_out = True,
    -2067		output = None,
    -2068		):
    -2069		'''
    -2070		Print out, save to disk and/or return the variance-covariance matrix of D4x
    -2071		for all unknown samples.
    -2072
    -2073		**Parameters**
    -2074
    -2075		+ `dir`: the directory in which to save the csv
    -2076		+ `filename`: the name of the csv file to write to
    -2077		+ `save_to_file`: whether to save the csv
    -2078		+ `print_out`: whether to print out the matrix
    -2079		+ `output`: if set to `'pretty'`: return a pretty text matrix (see `pretty_table()`);
    -2080		    if set to `'raw'`: return a list of list of strings
    -2081		    (e.g., `[['header1', 'header2'], ['0.1', '0.2']]`)
    -2082		'''
    -2083		samples = sorted([u for u in self.unknowns])
    -2084		out = [[''] + samples]
    -2085		for s1 in samples:
    -2086			out.append([s1])
    -2087			for s2 in samples:
    -2088				if correl:
    -2089					out[-1].append(f'{self.sample_D4x_correl(s1, s2):.6f}')
    -2090				else:
    -2091					out[-1].append(f'{self.sample_D4x_covar(s1, s2):.8e}')
    -2092
    -2093		if save_to_file:
    -2094			if not os.path.exists(dir):
    -2095				os.makedirs(dir)
    -2096			if filename is None:
    +            
    2068	@make_verbal
    +2069	def covar_table(
    +2070		self,
    +2071		correl = False,
    +2072		dir = 'output',
    +2073		filename = None,
    +2074		save_to_file = True,
    +2075		print_out = True,
    +2076		output = None,
    +2077		):
    +2078		'''
    +2079		Print out, save to disk and/or return the variance-covariance matrix of D4x
    +2080		for all unknown samples.
    +2081
    +2082		**Parameters**
    +2083
    +2084		+ `dir`: the directory in which to save the csv
    +2085		+ `filename`: the name of the csv file to write to
    +2086		+ `save_to_file`: whether to save the csv
    +2087		+ `print_out`: whether to print out the matrix
    +2088		+ `output`: if set to `'pretty'`: return a pretty text matrix (see `pretty_table()`);
    +2089		    if set to `'raw'`: return a list of list of strings
    +2090		    (e.g., `[['header1', 'header2'], ['0.1', '0.2']]`)
    +2091		'''
    +2092		samples = sorted([u for u in self.unknowns])
    +2093		out = [[''] + samples]
    +2094		for s1 in samples:
    +2095			out.append([s1])
    +2096			for s2 in samples:
     2097				if correl:
    -2098					filename = f'D{self._4x}_correl.csv'
    +2098					out[-1].append(f'{self.sample_D4x_correl(s1, s2):.6f}')
     2099				else:
    -2100					filename = f'D{self._4x}_covar.csv'
    -2101			with open(f'{dir}/{filename}', 'w') as fid:
    -2102				fid.write(make_csv(out))
    -2103		if print_out:
    -2104			self.msg('\n'+pretty_table(out))
    -2105		if output == 'raw':
    -2106			return out
    -2107		elif output == 'pretty':
    -2108			return pretty_table(out)
    +2100					out[-1].append(f'{self.sample_D4x_covar(s1, s2):.8e}')
    +2101
    +2102		if save_to_file:
    +2103			if not os.path.exists(dir):
    +2104				os.makedirs(dir)
    +2105			if filename is None:
    +2106				if correl:
    +2107					filename = f'D{self._4x}_correl.csv'
    +2108				else:
    +2109					filename = f'D{self._4x}_covar.csv'
    +2110			with open(f'{dir}/{filename}', 'w') as fid:
    +2111				fid.write(make_csv(out))
    +2112		if print_out:
    +2113			self.msg('\n'+pretty_table(out))
    +2114		if output == 'raw':
    +2115			return out
    +2116		elif output == 'pretty':
    +2117			return pretty_table(out)
     
    @@ -9406,64 +9751,64 @@

    API Documentation

    -
    2110	@make_verbal
    -2111	def table_of_samples(
    -2112		self,
    -2113		dir = 'output',
    -2114		filename = None,
    -2115		save_to_file = True,
    -2116		print_out = True,
    -2117		output = None,
    -2118		):
    -2119		'''
    -2120		Print out, save to disk and/or return a table of samples.
    -2121
    -2122		**Parameters**
    -2123
    -2124		+ `dir`: the directory in which to save the csv
    -2125		+ `filename`: the name of the csv file to write to
    -2126		+ `save_to_file`: whether to save the csv
    -2127		+ `print_out`: whether to print out the table
    -2128		+ `output`: if set to `'pretty'`: return a pretty text table (see `pretty_table()`);
    -2129		    if set to `'raw'`: return a list of list of strings
    -2130		    (e.g., `[['header1', 'header2'], ['0.1', '0.2']]`)
    -2131		'''
    +            
    2119	@make_verbal
    +2120	def table_of_samples(
    +2121		self,
    +2122		dir = 'output',
    +2123		filename = None,
    +2124		save_to_file = True,
    +2125		print_out = True,
    +2126		output = None,
    +2127		):
    +2128		'''
    +2129		Print out, save to disk and/or return a table of samples.
    +2130
    +2131		**Parameters**
     2132
    -2133		out = [['Sample','N','d13C_VPDB','d18O_VSMOW',f'D{self._4x}','SE','95% CL','SD','p_Levene']]
    -2134		for sample in self.anchors:
    -2135			out += [[
    -2136				f"{sample}",
    -2137				f"{self.samples[sample]['N']}",
    -2138				f"{self.samples[sample]['d13C_VPDB']:.2f}",
    -2139				f"{self.samples[sample]['d18O_VSMOW']:.2f}",
    -2140				f"{self.samples[sample][f'D{self._4x}']:.4f}",'','',
    -2141				f"{self.samples[sample][f'SD_D{self._4x}']:.4f}" if self.samples[sample]['N'] > 1 else '', ''
    -2142				]]
    -2143		for sample in self.unknowns:
    +2133		+ `dir`: the directory in which to save the csv
    +2134		+ `filename`: the name of the csv file to write to
    +2135		+ `save_to_file`: whether to save the csv
    +2136		+ `print_out`: whether to print out the table
    +2137		+ `output`: if set to `'pretty'`: return a pretty text table (see `pretty_table()`);
    +2138		    if set to `'raw'`: return a list of list of strings
    +2139		    (e.g., `[['header1', 'header2'], ['0.1', '0.2']]`)
    +2140		'''
    +2141
    +2142		out = [['Sample','N','d13C_VPDB','d18O_VSMOW',f'D{self._4x}','SE','95% CL','SD','p_Levene']]
    +2143		for sample in self.anchors:
     2144			out += [[
     2145				f"{sample}",
     2146				f"{self.samples[sample]['N']}",
     2147				f"{self.samples[sample]['d13C_VPDB']:.2f}",
     2148				f"{self.samples[sample]['d18O_VSMOW']:.2f}",
    -2149				f"{self.samples[sample][f'D{self._4x}']:.4f}",
    -2150				f"{self.samples[sample][f'SE_D{self._4x}']:.4f}",
    -2151				f{self.samples[sample][f'SE_D{self._4x}'] * self.t95:.4f}",
    -2152				f"{self.samples[sample][f'SD_D{self._4x}']:.4f}" if self.samples[sample]['N'] > 1 else '',
    -2153				f"{self.samples[sample]['p_Levene']:.3f}" if self.samples[sample]['N'] > 2 else ''
    -2154				]]
    -2155		if save_to_file:
    -2156			if not os.path.exists(dir):
    -2157				os.makedirs(dir)
    -2158			if filename is None:
    -2159				filename = f'D{self._4x}_samples.csv'
    -2160			with open(f'{dir}/{filename}', 'w') as fid:
    -2161				fid.write(make_csv(out))
    -2162		if print_out:
    -2163			self.msg('\n'+pretty_table(out))
    -2164		if output == 'raw':
    -2165			return out
    -2166		elif output == 'pretty':
    -2167			return pretty_table(out)
    +2149				f"{self.samples[sample][f'D{self._4x}']:.4f}",'','',
    +2150				f"{self.samples[sample][f'SD_D{self._4x}']:.4f}" if self.samples[sample]['N'] > 1 else '', ''
    +2151				]]
    +2152		for sample in self.unknowns:
    +2153			out += [[
    +2154				f"{sample}",
    +2155				f"{self.samples[sample]['N']}",
    +2156				f"{self.samples[sample]['d13C_VPDB']:.2f}",
    +2157				f"{self.samples[sample]['d18O_VSMOW']:.2f}",
    +2158				f"{self.samples[sample][f'D{self._4x}']:.4f}",
    +2159				f"{self.samples[sample][f'SE_D{self._4x}']:.4f}",
    +2160				f{self.samples[sample][f'SE_D{self._4x}'] * self.t95:.4f}",
    +2161				f"{self.samples[sample][f'SD_D{self._4x}']:.4f}" if self.samples[sample]['N'] > 1 else '',
    +2162				f"{self.samples[sample]['p_Levene']:.3f}" if self.samples[sample]['N'] > 2 else ''
    +2163				]]
    +2164		if save_to_file:
    +2165			if not os.path.exists(dir):
    +2166				os.makedirs(dir)
    +2167			if filename is None:
    +2168				filename = f'D{self._4x}_samples.csv'
    +2169			with open(f'{dir}/{filename}', 'w') as fid:
    +2170				fid.write(make_csv(out))
    +2171		if print_out:
    +2172			self.msg('\n'+pretty_table(out))
    +2173		if output == 'raw':
    +2174			return out
    +2175		elif output == 'pretty':
    +2176			return pretty_table(out)
     
    @@ -9495,22 +9840,22 @@

    API Documentation

    -
    2170	def plot_sessions(self, dir = 'output', figsize = (8,8)):
    -2171		'''
    -2172		Generate session plots and save them to disk.
    -2173
    -2174		**Parameters**
    -2175
    -2176		+ `dir`: the directory in which to save the plots
    -2177		+ `figsize`: the width and height (in inches) of each plot
    -2178		'''
    -2179		if not os.path.exists(dir):
    -2180			os.makedirs(dir)
    -2181
    -2182		for session in self.sessions:
    -2183			sp = self.plot_single_session(session, xylimits = 'constant')
    -2184			ppl.savefig(f'{dir}/D{self._4x}_plot_{session}.pdf')
    -2185			ppl.close(sp.fig)
    +            
    2179	def plot_sessions(self, dir = 'output', figsize = (8,8)):
    +2180		'''
    +2181		Generate session plots and save them to disk.
    +2182
    +2183		**Parameters**
    +2184
    +2185		+ `dir`: the directory in which to save the plots
    +2186		+ `figsize`: the width and height (in inches) of each plot
    +2187		'''
    +2188		if not os.path.exists(dir):
    +2189			os.makedirs(dir)
    +2190
    +2191		for session in self.sessions:
    +2192			sp = self.plot_single_session(session, xylimits = 'constant')
    +2193			ppl.savefig(f'{dir}/D{self._4x}_plot_{session}.pdf')
    +2194			ppl.close(sp.fig)
     
    @@ -9538,82 +9883,82 @@

    API Documentation

    -
    2188	@make_verbal
    -2189	def consolidate_samples(self):
    -2190		'''
    -2191		Compile various statistics for each sample.
    -2192
    -2193		For each anchor sample:
    -2194
    -2195		+ `D47` or `D48`: the nominal Δ4x value for this anchor, specified by `self.Nominal_D4x`
    -2196		+ `SE_D47` or `SE_D48`: set to zero by definition
    -2197
    -2198		For each unknown sample:
    -2199
    -2200		+ `D47` or `D48`: the standardized Δ4x value for this unknown
    -2201		+ `SE_D47` or `SE_D48`: the standard error of Δ4x for this unknown
    -2202
    -2203		For each anchor and unknown:
    -2204
    -2205		+ `N`: the total number of analyses of this sample
    -2206		+ `SD_D47` or `SD_D48`: the “sample” (in the statistical sense) standard deviation for this sample
    -2207		+ `d13C_VPDB`: the average δ13C_VPDB value for this sample
    -2208		+ `d18O_VSMOW`: the average δ18O_VSMOW value for this sample (as CO2)
    -2209		+ `p_Levene`: the p-value from a [Levene test](https://en.wikipedia.org/wiki/Levene%27s_test) of equal
    -2210		variance, indicating whether the Δ4x repeatability this sample differs significantly from
    -2211		that observed for the reference sample specified by `self.LEVENE_REF_SAMPLE`.
    -2212		'''
    -2213		D4x_ref_pop = [r[f'D{self._4x}'] for r in self.samples[self.LEVENE_REF_SAMPLE]['data']]
    -2214		for sample in self.samples:
    -2215			self.samples[sample]['N'] = len(self.samples[sample]['data'])
    -2216			if self.samples[sample]['N'] > 1:
    -2217				self.samples[sample][f'SD_D{self._4x}'] = stdev([r[f'D{self._4x}'] for r in self.samples[sample]['data']])
    -2218
    -2219			self.samples[sample]['d13C_VPDB'] = np.mean([r['d13C_VPDB'] for r in self.samples[sample]['data']])
    -2220			self.samples[sample]['d18O_VSMOW'] = np.mean([r['d18O_VSMOW'] for r in self.samples[sample]['data']])
    -2221
    -2222			D4x_pop = [r[f'D{self._4x}'] for r in self.samples[sample]['data']]
    -2223			if len(D4x_pop) > 2:
    -2224				self.samples[sample]['p_Levene'] = levene(D4x_ref_pop, D4x_pop, center = 'median')[1]
    -2225
    -2226		if self.standardization_method == 'pooled':
    -2227			for sample in self.anchors:
    -2228				self.samples[sample][f'D{self._4x}'] = self.Nominal_D4x[sample]
    -2229				self.samples[sample][f'SE_D{self._4x}'] = 0.
    -2230			for sample in self.unknowns:
    -2231				self.samples[sample][f'D{self._4x}'] = self.standardization.params.valuesdict()[f'D{self._4x}_{pf(sample)}']
    -2232				try:
    -2233					self.samples[sample][f'SE_D{self._4x}'] = self.sample_D4x_covar(sample)**.5
    -2234				except ValueError:
    -2235					# when `sample` is constrained by self.standardize(constraints = {...}),
    -2236					# it is no longer listed in self.standardization.var_names.
    -2237					# Temporary fix: define SE as zero for now
    -2238					self.samples[sample][f'SE_D4{self._4x}'] = 0.
    -2239
    -2240		elif self.standardization_method == 'indep_sessions':
    -2241			for sample in self.anchors:
    -2242				self.samples[sample][f'D{self._4x}'] = self.Nominal_D4x[sample]
    -2243				self.samples[sample][f'SE_D{self._4x}'] = 0.
    -2244			for sample in self.unknowns:
    -2245				self.msg(f'Consolidating sample {sample}')
    -2246				self.unknowns[sample][f'session_D{self._4x}'] = {}
    -2247				session_avg = []
    -2248				for session in self.sessions:
    -2249					sdata = [r for r in self.sessions[session]['data'] if r['Sample'] == sample]
    -2250					if sdata:
    -2251						self.msg(f'{sample} found in session {session}')
    -2252						avg_D4x = np.mean([r[f'D{self._4x}'] for r in sdata])
    -2253						avg_d4x = np.mean([r[f'd{self._4x}'] for r in sdata])
    -2254						# !! TODO: sigma_s below does not account for temporal changes in standardization error
    -2255						sigma_s = self.standardization_error(session, avg_d4x, avg_D4x)
    -2256						sigma_u = sdata[0][f'wD{self._4x}raw'] / self.sessions[session]['a'] / len(sdata)**.5
    -2257						session_avg.append([avg_D4x, (sigma_u**2 + sigma_s**2)**.5])
    -2258						self.unknowns[sample][f'session_D{self._4x}'][session] = session_avg[-1]
    -2259				self.samples[sample][f'D{self._4x}'], self.samples[sample][f'SE_D{self._4x}'] = w_avg(*zip(*session_avg))
    -2260				weights = {s: self.unknowns[sample][f'session_D{self._4x}'][s][1]**-2 for s in self.unknowns[sample][f'session_D{self._4x}']}
    -2261				wsum = sum([weights[s] for s in weights])
    -2262				for s in weights:
    -2263					self.unknowns[sample][f'session_D{self._4x}'][s] += [self.unknowns[sample][f'session_D{self._4x}'][s][1]**-2 / wsum]
    +            
    2197	@make_verbal
    +2198	def consolidate_samples(self):
    +2199		'''
    +2200		Compile various statistics for each sample.
    +2201
    +2202		For each anchor sample:
    +2203
    +2204		+ `D47` or `D48`: the nominal Δ4x value for this anchor, specified by `self.Nominal_D4x`
    +2205		+ `SE_D47` or `SE_D48`: set to zero by definition
    +2206
    +2207		For each unknown sample:
    +2208
    +2209		+ `D47` or `D48`: the standardized Δ4x value for this unknown
    +2210		+ `SE_D47` or `SE_D48`: the standard error of Δ4x for this unknown
    +2211
    +2212		For each anchor and unknown:
    +2213
    +2214		+ `N`: the total number of analyses of this sample
    +2215		+ `SD_D47` or `SD_D48`: the “sample” (in the statistical sense) standard deviation for this sample
    +2216		+ `d13C_VPDB`: the average δ13C_VPDB value for this sample
    +2217		+ `d18O_VSMOW`: the average δ18O_VSMOW value for this sample (as CO2)
    +2218		+ `p_Levene`: the p-value from a [Levene test](https://en.wikipedia.org/wiki/Levene%27s_test) of equal
    +2219		variance, indicating whether the Δ4x repeatability this sample differs significantly from
    +2220		that observed for the reference sample specified by `self.LEVENE_REF_SAMPLE`.
    +2221		'''
    +2222		D4x_ref_pop = [r[f'D{self._4x}'] for r in self.samples[self.LEVENE_REF_SAMPLE]['data']]
    +2223		for sample in self.samples:
    +2224			self.samples[sample]['N'] = len(self.samples[sample]['data'])
    +2225			if self.samples[sample]['N'] > 1:
    +2226				self.samples[sample][f'SD_D{self._4x}'] = stdev([r[f'D{self._4x}'] for r in self.samples[sample]['data']])
    +2227
    +2228			self.samples[sample]['d13C_VPDB'] = np.mean([r['d13C_VPDB'] for r in self.samples[sample]['data']])
    +2229			self.samples[sample]['d18O_VSMOW'] = np.mean([r['d18O_VSMOW'] for r in self.samples[sample]['data']])
    +2230
    +2231			D4x_pop = [r[f'D{self._4x}'] for r in self.samples[sample]['data']]
    +2232			if len(D4x_pop) > 2:
    +2233				self.samples[sample]['p_Levene'] = levene(D4x_ref_pop, D4x_pop, center = 'median')[1]
    +2234
    +2235		if self.standardization_method == 'pooled':
    +2236			for sample in self.anchors:
    +2237				self.samples[sample][f'D{self._4x}'] = self.Nominal_D4x[sample]
    +2238				self.samples[sample][f'SE_D{self._4x}'] = 0.
    +2239			for sample in self.unknowns:
    +2240				self.samples[sample][f'D{self._4x}'] = self.standardization.params.valuesdict()[f'D{self._4x}_{pf(sample)}']
    +2241				try:
    +2242					self.samples[sample][f'SE_D{self._4x}'] = self.sample_D4x_covar(sample)**.5
    +2243				except ValueError:
    +2244					# when `sample` is constrained by self.standardize(constraints = {...}),
    +2245					# it is no longer listed in self.standardization.var_names.
    +2246					# Temporary fix: define SE as zero for now
    +2247					self.samples[sample][f'SE_D4{self._4x}'] = 0.
    +2248
    +2249		elif self.standardization_method == 'indep_sessions':
    +2250			for sample in self.anchors:
    +2251				self.samples[sample][f'D{self._4x}'] = self.Nominal_D4x[sample]
    +2252				self.samples[sample][f'SE_D{self._4x}'] = 0.
    +2253			for sample in self.unknowns:
    +2254				self.msg(f'Consolidating sample {sample}')
    +2255				self.unknowns[sample][f'session_D{self._4x}'] = {}
    +2256				session_avg = []
    +2257				for session in self.sessions:
    +2258					sdata = [r for r in self.sessions[session]['data'] if r['Sample'] == sample]
    +2259					if sdata:
    +2260						self.msg(f'{sample} found in session {session}')
    +2261						avg_D4x = np.mean([r[f'D{self._4x}'] for r in sdata])
    +2262						avg_d4x = np.mean([r[f'd{self._4x}'] for r in sdata])
    +2263						# !! TODO: sigma_s below does not account for temporal changes in standardization error
    +2264						sigma_s = self.standardization_error(session, avg_d4x, avg_D4x)
    +2265						sigma_u = sdata[0][f'wD{self._4x}raw'] / self.sessions[session]['a'] / len(sdata)**.5
    +2266						session_avg.append([avg_D4x, (sigma_u**2 + sigma_s**2)**.5])
    +2267						self.unknowns[sample][f'session_D{self._4x}'][session] = session_avg[-1]
    +2268				self.samples[sample][f'D{self._4x}'], self.samples[sample][f'SE_D{self._4x}'] = w_avg(*zip(*session_avg))
    +2269				weights = {s: self.unknowns[sample][f'session_D{self._4x}'][s][1]**-2 for s in self.unknowns[sample][f'session_D{self._4x}']}
    +2270				wsum = sum([weights[s] for s in weights])
    +2271				for s in weights:
    +2272					self.unknowns[sample][f'session_D{self._4x}'][s] += [self.unknowns[sample][f'session_D{self._4x}'][s][1]**-2 / wsum]
     
    @@ -9659,127 +10004,127 @@

    API Documentation

    -
    2266	def consolidate_sessions(self):
    -2267		'''
    -2268		Compute various statistics for each session.
    -2269
    -2270		+ `Na`: Number of anchor analyses in the session
    -2271		+ `Nu`: Number of unknown analyses in the session
    -2272		+ `r_d13C_VPDB`: δ13C_VPDB repeatability of analyses within the session
    -2273		+ `r_d18O_VSMOW`: δ18O_VSMOW repeatability of analyses within the session
    -2274		+ `r_D47` or `r_D48`: Δ4x repeatability of analyses within the session
    -2275		+ `a`: scrambling factor
    -2276		+ `b`: compositional slope
    -2277		+ `c`: WG offset
    -2278		+ `SE_a`: Model stadard erorr of `a`
    -2279		+ `SE_b`: Model stadard erorr of `b`
    -2280		+ `SE_c`: Model stadard erorr of `c`
    -2281		+ `scrambling_drift` (boolean): whether to allow a temporal drift in the scrambling factor (`a`)
    -2282		+ `slope_drift` (boolean): whether to allow a temporal drift in the compositional slope (`b`)
    -2283		+ `wg_drift` (boolean): whether to allow a temporal drift in the WG offset (`c`)
    -2284		+ `a2`: scrambling factor drift
    -2285		+ `b2`: compositional slope drift
    -2286		+ `c2`: WG offset drift
    -2287		+ `Np`: Number of standardization parameters to fit
    -2288		+ `CM`: model covariance matrix for (`a`, `b`, `c`, `a2`, `b2`, `c2`)
    -2289		+ `d13Cwg_VPDB`: δ13C_VPDB of WG
    -2290		+ `d18Owg_VSMOW`: δ18O_VSMOW of WG
    -2291		'''
    -2292		for session in self.sessions:
    -2293			if 'd13Cwg_VPDB' not in self.sessions[session]:
    -2294				self.sessions[session]['d13Cwg_VPDB'] = self.sessions[session]['data'][0]['d13Cwg_VPDB']
    -2295			if 'd18Owg_VSMOW' not in self.sessions[session]:
    -2296				self.sessions[session]['d18Owg_VSMOW'] = self.sessions[session]['data'][0]['d18Owg_VSMOW']
    -2297			self.sessions[session]['Na'] = len([r for r in self.sessions[session]['data'] if r['Sample'] in self.anchors])
    -2298			self.sessions[session]['Nu'] = len([r for r in self.sessions[session]['data'] if r['Sample'] in self.unknowns])
    -2299
    -2300			self.msg(f'Computing repeatabilities for session {session}')
    -2301			self.sessions[session]['r_d13C_VPDB'] = self.compute_r('d13C_VPDB', samples = 'anchors', sessions = [session])
    -2302			self.sessions[session]['r_d18O_VSMOW'] = self.compute_r('d18O_VSMOW', samples = 'anchors', sessions = [session])
    -2303			self.sessions[session][f'r_D{self._4x}'] = self.compute_r(f'D{self._4x}', sessions = [session])
    -2304
    -2305		if self.standardization_method == 'pooled':
    -2306			for session in self.sessions:
    -2307
    -2308				self.sessions[session]['a'] = self.standardization.params.valuesdict()[f'a_{pf(session)}']
    -2309				i = self.standardization.var_names.index(f'a_{pf(session)}')
    -2310				self.sessions[session]['SE_a'] = self.standardization.covar[i,i]**.5
    -2311
    -2312				self.sessions[session]['b'] = self.standardization.params.valuesdict()[f'b_{pf(session)}']
    -2313				i = self.standardization.var_names.index(f'b_{pf(session)}')
    -2314				self.sessions[session]['SE_b'] = self.standardization.covar[i,i]**.5
    -2315
    -2316				self.sessions[session]['c'] = self.standardization.params.valuesdict()[f'c_{pf(session)}']
    -2317				i = self.standardization.var_names.index(f'c_{pf(session)}')
    -2318				self.sessions[session]['SE_c'] = self.standardization.covar[i,i]**.5
    -2319
    -2320				self.sessions[session]['a2'] = self.standardization.params.valuesdict()[f'a2_{pf(session)}']
    -2321				if self.sessions[session]['scrambling_drift']:
    -2322					i = self.standardization.var_names.index(f'a2_{pf(session)}')
    -2323					self.sessions[session]['SE_a2'] = self.standardization.covar[i,i]**.5
    -2324				else:
    -2325					self.sessions[session]['SE_a2'] = 0.
    -2326
    -2327				self.sessions[session]['b2'] = self.standardization.params.valuesdict()[f'b2_{pf(session)}']
    -2328				if self.sessions[session]['slope_drift']:
    -2329					i = self.standardization.var_names.index(f'b2_{pf(session)}')
    -2330					self.sessions[session]['SE_b2'] = self.standardization.covar[i,i]**.5
    -2331				else:
    -2332					self.sessions[session]['SE_b2'] = 0.
    -2333
    -2334				self.sessions[session]['c2'] = self.standardization.params.valuesdict()[f'c2_{pf(session)}']
    -2335				if self.sessions[session]['wg_drift']:
    -2336					i = self.standardization.var_names.index(f'c2_{pf(session)}')
    -2337					self.sessions[session]['SE_c2'] = self.standardization.covar[i,i]**.5
    -2338				else:
    -2339					self.sessions[session]['SE_c2'] = 0.
    -2340
    -2341				i = self.standardization.var_names.index(f'a_{pf(session)}')
    -2342				j = self.standardization.var_names.index(f'b_{pf(session)}')
    -2343				k = self.standardization.var_names.index(f'c_{pf(session)}')
    -2344				CM = np.zeros((6,6))
    -2345				CM[:3,:3] = self.standardization.covar[[i,j,k],:][:,[i,j,k]]
    -2346				try:
    -2347					i2 = self.standardization.var_names.index(f'a2_{pf(session)}')
    -2348					CM[3,[0,1,2,3]] = self.standardization.covar[i2,[i,j,k,i2]]
    -2349					CM[[0,1,2,3],3] = self.standardization.covar[[i,j,k,i2],i2]
    -2350					try:
    -2351						j2 = self.standardization.var_names.index(f'b2_{pf(session)}')
    -2352						CM[3,4] = self.standardization.covar[i2,j2]
    -2353						CM[4,3] = self.standardization.covar[j2,i2]
    -2354					except ValueError:
    -2355						pass
    -2356					try:
    -2357						k2 = self.standardization.var_names.index(f'c2_{pf(session)}')
    -2358						CM[3,5] = self.standardization.covar[i2,k2]
    -2359						CM[5,3] = self.standardization.covar[k2,i2]
    -2360					except ValueError:
    -2361						pass
    -2362				except ValueError:
    -2363					pass
    -2364				try:
    -2365					j2 = self.standardization.var_names.index(f'b2_{pf(session)}')
    -2366					CM[4,[0,1,2,4]] = self.standardization.covar[j2,[i,j,k,j2]]
    -2367					CM[[0,1,2,4],4] = self.standardization.covar[[i,j,k,j2],j2]
    -2368					try:
    -2369						k2 = self.standardization.var_names.index(f'c2_{pf(session)}')
    -2370						CM[4,5] = self.standardization.covar[j2,k2]
    -2371						CM[5,4] = self.standardization.covar[k2,j2]
    -2372					except ValueError:
    -2373						pass
    -2374				except ValueError:
    -2375					pass
    -2376				try:
    -2377					k2 = self.standardization.var_names.index(f'c2_{pf(session)}')
    -2378					CM[5,[0,1,2,5]] = self.standardization.covar[k2,[i,j,k,k2]]
    -2379					CM[[0,1,2,5],5] = self.standardization.covar[[i,j,k,k2],k2]
    -2380				except ValueError:
    -2381					pass
    -2382
    -2383				self.sessions[session]['CM'] = CM
    -2384
    -2385		elif self.standardization_method == 'indep_sessions':
    -2386			pass # Not implemented yet
    +            
    2275	def consolidate_sessions(self):
    +2276		'''
    +2277		Compute various statistics for each session.
    +2278
    +2279		+ `Na`: Number of anchor analyses in the session
    +2280		+ `Nu`: Number of unknown analyses in the session
    +2281		+ `r_d13C_VPDB`: δ13C_VPDB repeatability of analyses within the session
    +2282		+ `r_d18O_VSMOW`: δ18O_VSMOW repeatability of analyses within the session
    +2283		+ `r_D47` or `r_D48`: Δ4x repeatability of analyses within the session
    +2284		+ `a`: scrambling factor
    +2285		+ `b`: compositional slope
    +2286		+ `c`: WG offset
    +2287		+ `SE_a`: Model stadard erorr of `a`
    +2288		+ `SE_b`: Model stadard erorr of `b`
    +2289		+ `SE_c`: Model stadard erorr of `c`
    +2290		+ `scrambling_drift` (boolean): whether to allow a temporal drift in the scrambling factor (`a`)
    +2291		+ `slope_drift` (boolean): whether to allow a temporal drift in the compositional slope (`b`)
    +2292		+ `wg_drift` (boolean): whether to allow a temporal drift in the WG offset (`c`)
    +2293		+ `a2`: scrambling factor drift
    +2294		+ `b2`: compositional slope drift
    +2295		+ `c2`: WG offset drift
    +2296		+ `Np`: Number of standardization parameters to fit
    +2297		+ `CM`: model covariance matrix for (`a`, `b`, `c`, `a2`, `b2`, `c2`)
    +2298		+ `d13Cwg_VPDB`: δ13C_VPDB of WG
    +2299		+ `d18Owg_VSMOW`: δ18O_VSMOW of WG
    +2300		'''
    +2301		for session in self.sessions:
    +2302			if 'd13Cwg_VPDB' not in self.sessions[session]:
    +2303				self.sessions[session]['d13Cwg_VPDB'] = self.sessions[session]['data'][0]['d13Cwg_VPDB']
    +2304			if 'd18Owg_VSMOW' not in self.sessions[session]:
    +2305				self.sessions[session]['d18Owg_VSMOW'] = self.sessions[session]['data'][0]['d18Owg_VSMOW']
    +2306			self.sessions[session]['Na'] = len([r for r in self.sessions[session]['data'] if r['Sample'] in self.anchors])
    +2307			self.sessions[session]['Nu'] = len([r for r in self.sessions[session]['data'] if r['Sample'] in self.unknowns])
    +2308
    +2309			self.msg(f'Computing repeatabilities for session {session}')
    +2310			self.sessions[session]['r_d13C_VPDB'] = self.compute_r('d13C_VPDB', samples = 'anchors', sessions = [session])
    +2311			self.sessions[session]['r_d18O_VSMOW'] = self.compute_r('d18O_VSMOW', samples = 'anchors', sessions = [session])
    +2312			self.sessions[session][f'r_D{self._4x}'] = self.compute_r(f'D{self._4x}', sessions = [session])
    +2313
    +2314		if self.standardization_method == 'pooled':
    +2315			for session in self.sessions:
    +2316
    +2317				self.sessions[session]['a'] = self.standardization.params.valuesdict()[f'a_{pf(session)}']
    +2318				i = self.standardization.var_names.index(f'a_{pf(session)}')
    +2319				self.sessions[session]['SE_a'] = self.standardization.covar[i,i]**.5
    +2320
    +2321				self.sessions[session]['b'] = self.standardization.params.valuesdict()[f'b_{pf(session)}']
    +2322				i = self.standardization.var_names.index(f'b_{pf(session)}')
    +2323				self.sessions[session]['SE_b'] = self.standardization.covar[i,i]**.5
    +2324
    +2325				self.sessions[session]['c'] = self.standardization.params.valuesdict()[f'c_{pf(session)}']
    +2326				i = self.standardization.var_names.index(f'c_{pf(session)}')
    +2327				self.sessions[session]['SE_c'] = self.standardization.covar[i,i]**.5
    +2328
    +2329				self.sessions[session]['a2'] = self.standardization.params.valuesdict()[f'a2_{pf(session)}']
    +2330				if self.sessions[session]['scrambling_drift']:
    +2331					i = self.standardization.var_names.index(f'a2_{pf(session)}')
    +2332					self.sessions[session]['SE_a2'] = self.standardization.covar[i,i]**.5
    +2333				else:
    +2334					self.sessions[session]['SE_a2'] = 0.
    +2335
    +2336				self.sessions[session]['b2'] = self.standardization.params.valuesdict()[f'b2_{pf(session)}']
    +2337				if self.sessions[session]['slope_drift']:
    +2338					i = self.standardization.var_names.index(f'b2_{pf(session)}')
    +2339					self.sessions[session]['SE_b2'] = self.standardization.covar[i,i]**.5
    +2340				else:
    +2341					self.sessions[session]['SE_b2'] = 0.
    +2342
    +2343				self.sessions[session]['c2'] = self.standardization.params.valuesdict()[f'c2_{pf(session)}']
    +2344				if self.sessions[session]['wg_drift']:
    +2345					i = self.standardization.var_names.index(f'c2_{pf(session)}')
    +2346					self.sessions[session]['SE_c2'] = self.standardization.covar[i,i]**.5
    +2347				else:
    +2348					self.sessions[session]['SE_c2'] = 0.
    +2349
    +2350				i = self.standardization.var_names.index(f'a_{pf(session)}')
    +2351				j = self.standardization.var_names.index(f'b_{pf(session)}')
    +2352				k = self.standardization.var_names.index(f'c_{pf(session)}')
    +2353				CM = np.zeros((6,6))
    +2354				CM[:3,:3] = self.standardization.covar[[i,j,k],:][:,[i,j,k]]
    +2355				try:
    +2356					i2 = self.standardization.var_names.index(f'a2_{pf(session)}')
    +2357					CM[3,[0,1,2,3]] = self.standardization.covar[i2,[i,j,k,i2]]
    +2358					CM[[0,1,2,3],3] = self.standardization.covar[[i,j,k,i2],i2]
    +2359					try:
    +2360						j2 = self.standardization.var_names.index(f'b2_{pf(session)}')
    +2361						CM[3,4] = self.standardization.covar[i2,j2]
    +2362						CM[4,3] = self.standardization.covar[j2,i2]
    +2363					except ValueError:
    +2364						pass
    +2365					try:
    +2366						k2 = self.standardization.var_names.index(f'c2_{pf(session)}')
    +2367						CM[3,5] = self.standardization.covar[i2,k2]
    +2368						CM[5,3] = self.standardization.covar[k2,i2]
    +2369					except ValueError:
    +2370						pass
    +2371				except ValueError:
    +2372					pass
    +2373				try:
    +2374					j2 = self.standardization.var_names.index(f'b2_{pf(session)}')
    +2375					CM[4,[0,1,2,4]] = self.standardization.covar[j2,[i,j,k,j2]]
    +2376					CM[[0,1,2,4],4] = self.standardization.covar[[i,j,k,j2],j2]
    +2377					try:
    +2378						k2 = self.standardization.var_names.index(f'c2_{pf(session)}')
    +2379						CM[4,5] = self.standardization.covar[j2,k2]
    +2380						CM[5,4] = self.standardization.covar[k2,j2]
    +2381					except ValueError:
    +2382						pass
    +2383				except ValueError:
    +2384					pass
    +2385				try:
    +2386					k2 = self.standardization.var_names.index(f'c2_{pf(session)}')
    +2387					CM[5,[0,1,2,5]] = self.standardization.covar[k2,[i,j,k,k2]]
    +2388					CM[[0,1,2,5],5] = self.standardization.covar[[i,j,k,k2],k2]
    +2389				except ValueError:
    +2390					pass
    +2391
    +2392				self.sessions[session]['CM'] = CM
    +2393
    +2394		elif self.standardization_method == 'indep_sessions':
    +2395			pass # Not implemented yet
     
    @@ -9824,19 +10169,19 @@

    API Documentation

    -
    2389	@make_verbal
    -2390	def repeatabilities(self):
    -2391		'''
    -2392		Compute analytical repeatabilities for δ13C_VPDB, δ18O_VSMOW, Δ4x
    -2393		(for all samples, for anchors, and for unknowns).
    -2394		'''
    -2395		self.msg('Computing reproducibilities for all sessions')
    -2396
    -2397		self.repeatability['r_d13C_VPDB'] = self.compute_r('d13C_VPDB', samples = 'anchors')
    -2398		self.repeatability['r_d18O_VSMOW'] = self.compute_r('d18O_VSMOW', samples = 'anchors')
    -2399		self.repeatability[f'r_D{self._4x}a'] = self.compute_r(f'D{self._4x}', samples = 'anchors')
    -2400		self.repeatability[f'r_D{self._4x}u'] = self.compute_r(f'D{self._4x}', samples = 'unknowns')
    -2401		self.repeatability[f'r_D{self._4x}'] = self.compute_r(f'D{self._4x}', samples = 'all samples')
    +            
    2398	@make_verbal
    +2399	def repeatabilities(self):
    +2400		'''
    +2401		Compute analytical repeatabilities for δ13C_VPDB, δ18O_VSMOW, Δ4x
    +2402		(for all samples, for anchors, and for unknowns).
    +2403		'''
    +2404		self.msg('Computing reproducibilities for all sessions')
    +2405
    +2406		self.repeatability['r_d13C_VPDB'] = self.compute_r('d13C_VPDB', samples = 'anchors')
    +2407		self.repeatability['r_d18O_VSMOW'] = self.compute_r('d18O_VSMOW', samples = 'anchors')
    +2408		self.repeatability[f'r_D{self._4x}a'] = self.compute_r(f'D{self._4x}', samples = 'anchors')
    +2409		self.repeatability[f'r_D{self._4x}u'] = self.compute_r(f'D{self._4x}', samples = 'unknowns')
    +2410		self.repeatability[f'r_D{self._4x}'] = self.compute_r(f'D{self._4x}', samples = 'all samples')
     
    @@ -9858,23 +10203,23 @@

    API Documentation

    -
    2404	@make_verbal
    -2405	def consolidate(self, tables = True, plots = True):
    -2406		'''
    -2407		Collect information about samples, sessions and repeatabilities.
    -2408		'''
    -2409		self.consolidate_samples()
    -2410		self.consolidate_sessions()
    -2411		self.repeatabilities()
    -2412
    -2413		if tables:
    -2414			self.summary()
    -2415			self.table_of_sessions()
    -2416			self.table_of_analyses()
    -2417			self.table_of_samples()
    -2418
    -2419		if plots:
    -2420			self.plot_sessions()
    +            
    2413	@make_verbal
    +2414	def consolidate(self, tables = True, plots = True):
    +2415		'''
    +2416		Collect information about samples, sessions and repeatabilities.
    +2417		'''
    +2418		self.consolidate_samples()
    +2419		self.consolidate_sessions()
    +2420		self.repeatabilities()
    +2421
    +2422		if tables:
    +2423			self.summary()
    +2424			self.table_of_sessions()
    +2425			self.table_of_analyses()
    +2426			self.table_of_samples()
    +2427
    +2428		if plots:
    +2429			self.plot_sessions()
     
    @@ -9895,40 +10240,40 @@

    API Documentation

    -
    2423	@make_verbal
    -2424	def rmswd(self,
    -2425		samples = 'all samples',
    -2426		sessions = 'all sessions',
    -2427		):
    -2428		'''
    -2429		Compute the χ2, root mean squared weighted deviation
    -2430		(i.e. reduced χ2), and corresponding degrees of freedom of the
    -2431		Δ4x values for samples in `samples` and sessions in `sessions`.
    -2432		
    -2433		Only used in `D4xdata.standardize()` with `method='indep_sessions'`.
    -2434		'''
    -2435		if samples == 'all samples':
    -2436			mysamples = [k for k in self.samples]
    -2437		elif samples == 'anchors':
    -2438			mysamples = [k for k in self.anchors]
    -2439		elif samples == 'unknowns':
    -2440			mysamples = [k for k in self.unknowns]
    -2441		else:
    -2442			mysamples = samples
    -2443
    -2444		if sessions == 'all sessions':
    -2445			sessions = [k for k in self.sessions]
    -2446
    -2447		chisq, Nf = 0, 0
    -2448		for sample in mysamples :
    -2449			G = [ r for r in self if r['Sample'] == sample and r['Session'] in sessions ]
    -2450			if len(G) > 1 :
    -2451				X, sX = w_avg([r[f'D{self._4x}'] for r in G], [r[f'wD{self._4x}'] for r in G])
    -2452				Nf += (len(G) - 1)
    -2453				chisq += np.sum([ ((r[f'D{self._4x}']-X)/r[f'wD{self._4x}'])**2 for r in G])
    -2454		r = (chisq / Nf)**.5 if Nf > 0 else 0
    -2455		self.msg(f'RMSWD of r["D{self._4x}"] is {r:.6f} for {samples}.')
    -2456		return {'rmswd': r, 'chisq': chisq, 'Nf': Nf}
    +            
    2432	@make_verbal
    +2433	def rmswd(self,
    +2434		samples = 'all samples',
    +2435		sessions = 'all sessions',
    +2436		):
    +2437		'''
    +2438		Compute the χ2, root mean squared weighted deviation
    +2439		(i.e. reduced χ2), and corresponding degrees of freedom of the
    +2440		Δ4x values for samples in `samples` and sessions in `sessions`.
    +2441		
    +2442		Only used in `D4xdata.standardize()` with `method='indep_sessions'`.
    +2443		'''
    +2444		if samples == 'all samples':
    +2445			mysamples = [k for k in self.samples]
    +2446		elif samples == 'anchors':
    +2447			mysamples = [k for k in self.anchors]
    +2448		elif samples == 'unknowns':
    +2449			mysamples = [k for k in self.unknowns]
    +2450		else:
    +2451			mysamples = samples
    +2452
    +2453		if sessions == 'all sessions':
    +2454			sessions = [k for k in self.sessions]
    +2455
    +2456		chisq, Nf = 0, 0
    +2457		for sample in mysamples :
    +2458			G = [ r for r in self if r['Sample'] == sample and r['Session'] in sessions ]
    +2459			if len(G) > 1 :
    +2460				X, sX = w_avg([r[f'D{self._4x}'] for r in G], [r[f'wD{self._4x}'] for r in G])
    +2461				Nf += (len(G) - 1)
    +2462				chisq += np.sum([ ((r[f'D{self._4x}']-X)/r[f'wD{self._4x}'])**2 for r in G])
    +2463		r = (chisq / Nf)**.5 if Nf > 0 else 0
    +2464		self.msg(f'RMSWD of r["D{self._4x}"] is {r:.6f} for {samples}.')
    +2465		return {'rmswd': r, 'chisq': chisq, 'Nf': Nf}
     
    @@ -9953,52 +10298,52 @@

    API Documentation

    -
    2459	@make_verbal
    -2460	def compute_r(self, key, samples = 'all samples', sessions = 'all sessions'):
    -2461		'''
    -2462		Compute the repeatability of `[r[key] for r in self]`
    -2463		'''
    -2464		# NB: it's debatable whether rD47 should be computed
    -2465		# with Nf = len(self)-len(self.samples) instead of
    -2466		# Nf = len(self) - len(self.unknwons) - 3*len(self.sessions)
    -2467
    -2468		if samples == 'all samples':
    -2469			mysamples = [k for k in self.samples]
    -2470		elif samples == 'anchors':
    -2471			mysamples = [k for k in self.anchors]
    -2472		elif samples == 'unknowns':
    -2473			mysamples = [k for k in self.unknowns]
    -2474		else:
    -2475			mysamples = samples
    +            
    2468	@make_verbal
    +2469	def compute_r(self, key, samples = 'all samples', sessions = 'all sessions'):
    +2470		'''
    +2471		Compute the repeatability of `[r[key] for r in self]`
    +2472		'''
    +2473		# NB: it's debatable whether rD47 should be computed
    +2474		# with Nf = len(self)-len(self.samples) instead of
    +2475		# Nf = len(self) - len(self.unknwons) - 3*len(self.sessions)
     2476
    -2477		if sessions == 'all sessions':
    -2478			sessions = [k for k in self.sessions]
    -2479
    -2480		if key in ['D47', 'D48']:
    -2481			chisq, Nf = 0, 0
    -2482			for sample in mysamples :
    -2483				X = [ r[key] for r in self if r['Sample'] == sample and r['Session'] in sessions ]
    -2484				if len(X) > 1 :
    -2485					chisq += np.sum([ (x-self.samples[sample][key])**2 for x in X ])
    -2486					if sample in self.unknowns:
    -2487						Nf += len(X) - 1
    -2488					else:
    -2489						Nf += len(X)
    -2490			if samples in ['anchors', 'all samples']:
    -2491				Nf -= sum([self.sessions[s]['Np'] for s in sessions])
    -2492			r = (chisq / Nf)**.5 if Nf > 0 else 0
    -2493
    -2494		else: # if key not in ['D47', 'D48']
    -2495			chisq, Nf = 0, 0
    -2496			for sample in mysamples :
    -2497				X = [ r[key] for r in self if r['Sample'] == sample and r['Session'] in sessions ]
    -2498				if len(X) > 1 :
    -2499					Nf += len(X) - 1
    -2500					chisq += np.sum([ (x-np.mean(X))**2 for x in X ])
    +2477		if samples == 'all samples':
    +2478			mysamples = [k for k in self.samples]
    +2479		elif samples == 'anchors':
    +2480			mysamples = [k for k in self.anchors]
    +2481		elif samples == 'unknowns':
    +2482			mysamples = [k for k in self.unknowns]
    +2483		else:
    +2484			mysamples = samples
    +2485
    +2486		if sessions == 'all sessions':
    +2487			sessions = [k for k in self.sessions]
    +2488
    +2489		if key in ['D47', 'D48']:
    +2490			chisq, Nf = 0, 0
    +2491			for sample in mysamples :
    +2492				X = [ r[key] for r in self if r['Sample'] == sample and r['Session'] in sessions ]
    +2493				if len(X) > 1 :
    +2494					chisq += np.sum([ (x-self.samples[sample][key])**2 for x in X ])
    +2495					if sample in self.unknowns:
    +2496						Nf += len(X) - 1
    +2497					else:
    +2498						Nf += len(X)
    +2499			if samples in ['anchors', 'all samples']:
    +2500				Nf -= sum([self.sessions[s]['Np'] for s in sessions])
     2501			r = (chisq / Nf)**.5 if Nf > 0 else 0
     2502
    -2503		self.msg(f'Repeatability of r["{key}"] is {1000*r:.1f} ppm for {samples}.')
    -2504		return r
    +2503		else: # if key not in ['D47', 'D48']
    +2504			chisq, Nf = 0, 0
    +2505			for sample in mysamples :
    +2506				X = [ r[key] for r in self if r['Sample'] == sample and r['Session'] in sessions ]
    +2507				if len(X) > 1 :
    +2508					Nf += len(X) - 1
    +2509					chisq += np.sum([ (x-np.mean(X))**2 for x in X ])
    +2510			r = (chisq / Nf)**.5 if Nf > 0 else 0
    +2511
    +2512		self.msg(f'Repeatability of r["{key}"] is {1000*r:.1f} ppm for {samples}.')
    +2513		return r
     
    @@ -10018,46 +10363,46 @@

    API Documentation

    -
    2506	def sample_average(self, samples, weights = 'equal', normalize = True):
    -2507		'''
    -2508		Weighted average Δ4x value of a group of samples, accounting for covariance.
    -2509
    -2510		Returns the weighed average Δ4x value and associated SE
    -2511		of a group of samples. Weights are equal by default. If `normalize` is
    -2512		true, `weights` will be rescaled so that their sum equals 1.
    -2513
    -2514		**Examples**
    -2515
    -2516		```python
    -2517		self.sample_average(['X','Y'], [1, 2])
    -2518		```
    -2519
    -2520		returns the value and SE of [Δ4x(X) + 2 Δ4x(Y)]/3,
    -2521		where Δ4x(X) and Δ4x(Y) are the average Δ4x
    -2522		values of samples X and Y, respectively.
    -2523
    -2524		```python
    -2525		self.sample_average(['X','Y'], [1, -1], normalize = False)
    -2526		```
    -2527
    -2528		returns the value and SE of the difference Δ4x(X) - Δ4x(Y).
    -2529		'''
    -2530		if weights == 'equal':
    -2531			weights = [1/len(samples)] * len(samples)
    +            
    2515	def sample_average(self, samples, weights = 'equal', normalize = True):
    +2516		'''
    +2517		Weighted average Δ4x value of a group of samples, accounting for covariance.
    +2518
    +2519		Returns the weighed average Δ4x value and associated SE
    +2520		of a group of samples. Weights are equal by default. If `normalize` is
    +2521		true, `weights` will be rescaled so that their sum equals 1.
    +2522
    +2523		**Examples**
    +2524
    +2525		```python
    +2526		self.sample_average(['X','Y'], [1, 2])
    +2527		```
    +2528
    +2529		returns the value and SE of [Δ4x(X) + 2 Δ4x(Y)]/3,
    +2530		where Δ4x(X) and Δ4x(Y) are the average Δ4x
    +2531		values of samples X and Y, respectively.
     2532
    -2533		if normalize:
    -2534			s = sum(weights)
    -2535			if s:
    -2536				weights = [w/s for w in weights]
    -2537
    -2538		try:
    -2539# 			indices = [self.standardization.var_names.index(f'D47_{pf(sample)}') for sample in samples]
    -2540# 			C = self.standardization.covar[indices,:][:,indices]
    -2541			C = np.array([[self.sample_D4x_covar(x, y) for x in samples] for y in samples])
    -2542			X = [self.samples[sample][f'D{self._4x}'] for sample in samples]
    -2543			return correlated_sum(X, C, weights)
    -2544		except ValueError:
    -2545			return (0., 0.)
    +2533		```python
    +2534		self.sample_average(['X','Y'], [1, -1], normalize = False)
    +2535		```
    +2536
    +2537		returns the value and SE of the difference Δ4x(X) - Δ4x(Y).
    +2538		'''
    +2539		if weights == 'equal':
    +2540			weights = [1/len(samples)] * len(samples)
    +2541
    +2542		if normalize:
    +2543			s = sum(weights)
    +2544			if s:
    +2545				weights = [w/s for w in weights]
    +2546
    +2547		try:
    +2548# 			indices = [self.standardization.var_names.index(f'D47_{pf(sample)}') for sample in samples]
    +2549# 			C = self.standardization.covar[indices,:][:,indices]
    +2550			C = np.array([[self.sample_D4x_covar(x, y) for x in samples] for y in samples])
    +2551			X = [self.samples[sample][f'D{self._4x}'] for sample in samples]
    +2552			return correlated_sum(X, C, weights)
    +2553		except ValueError:
    +2554			return (0., 0.)
     
    @@ -10099,44 +10444,44 @@

    API Documentation

    -
    2548	def sample_D4x_covar(self, sample1, sample2 = None):
    -2549		'''
    -2550		Covariance between Δ4x values of samples
    -2551
    -2552		Returns the error covariance between the average Δ4x values of two
    -2553		samples. If if only `sample_1` is specified, or if `sample_1 == sample_2`),
    -2554		returns the Δ4x variance for that sample.
    -2555		'''
    -2556		if sample2 is None:
    -2557			sample2 = sample1
    -2558		if self.standardization_method == 'pooled':
    -2559			i = self.standardization.var_names.index(f'D{self._4x}_{pf(sample1)}')
    -2560			j = self.standardization.var_names.index(f'D{self._4x}_{pf(sample2)}')
    -2561			return self.standardization.covar[i, j]
    -2562		elif self.standardization_method == 'indep_sessions':
    -2563			if sample1 == sample2:
    -2564				return self.samples[sample1][f'SE_D{self._4x}']**2
    -2565			else:
    -2566				c = 0
    -2567				for session in self.sessions:
    -2568					sdata1 = [r for r in self.sessions[session]['data'] if r['Sample'] == sample1]
    -2569					sdata2 = [r for r in self.sessions[session]['data'] if r['Sample'] == sample2]
    -2570					if sdata1 and sdata2:
    -2571						a = self.sessions[session]['a']
    -2572						# !! TODO: CM below does not account for temporal changes in standardization parameters
    -2573						CM = self.sessions[session]['CM'][:3,:3]
    -2574						avg_D4x_1 = np.mean([r[f'D{self._4x}'] for r in sdata1])
    -2575						avg_d4x_1 = np.mean([r[f'd{self._4x}'] for r in sdata1])
    -2576						avg_D4x_2 = np.mean([r[f'D{self._4x}'] for r in sdata2])
    -2577						avg_d4x_2 = np.mean([r[f'd{self._4x}'] for r in sdata2])
    -2578						c += (
    -2579							self.unknowns[sample1][f'session_D{self._4x}'][session][2]
    -2580							* self.unknowns[sample2][f'session_D{self._4x}'][session][2]
    -2581							* np.array([[avg_D4x_1, avg_d4x_1, 1]])
    -2582							@ CM
    -2583							@ np.array([[avg_D4x_2, avg_d4x_2, 1]]).T
    -2584							) / a**2
    -2585				return float(c)
    +            
    2557	def sample_D4x_covar(self, sample1, sample2 = None):
    +2558		'''
    +2559		Covariance between Δ4x values of samples
    +2560
    +2561		Returns the error covariance between the average Δ4x values of two
    +2562		samples. If if only `sample_1` is specified, or if `sample_1 == sample_2`),
    +2563		returns the Δ4x variance for that sample.
    +2564		'''
    +2565		if sample2 is None:
    +2566			sample2 = sample1
    +2567		if self.standardization_method == 'pooled':
    +2568			i = self.standardization.var_names.index(f'D{self._4x}_{pf(sample1)}')
    +2569			j = self.standardization.var_names.index(f'D{self._4x}_{pf(sample2)}')
    +2570			return self.standardization.covar[i, j]
    +2571		elif self.standardization_method == 'indep_sessions':
    +2572			if sample1 == sample2:
    +2573				return self.samples[sample1][f'SE_D{self._4x}']**2
    +2574			else:
    +2575				c = 0
    +2576				for session in self.sessions:
    +2577					sdata1 = [r for r in self.sessions[session]['data'] if r['Sample'] == sample1]
    +2578					sdata2 = [r for r in self.sessions[session]['data'] if r['Sample'] == sample2]
    +2579					if sdata1 and sdata2:
    +2580						a = self.sessions[session]['a']
    +2581						# !! TODO: CM below does not account for temporal changes in standardization parameters
    +2582						CM = self.sessions[session]['CM'][:3,:3]
    +2583						avg_D4x_1 = np.mean([r[f'D{self._4x}'] for r in sdata1])
    +2584						avg_d4x_1 = np.mean([r[f'd{self._4x}'] for r in sdata1])
    +2585						avg_D4x_2 = np.mean([r[f'D{self._4x}'] for r in sdata2])
    +2586						avg_d4x_2 = np.mean([r[f'd{self._4x}'] for r in sdata2])
    +2587						c += (
    +2588							self.unknowns[sample1][f'session_D{self._4x}'][session][2]
    +2589							* self.unknowns[sample2][f'session_D{self._4x}'][session][2]
    +2590							* np.array([[avg_D4x_1, avg_d4x_1, 1]])
    +2591							@ CM
    +2592							@ np.array([[avg_D4x_2, avg_d4x_2, 1]]).T
    +2593							) / a**2
    +2594				return float(c)
     
    @@ -10160,19 +10505,19 @@

    API Documentation

    -
    2587	def sample_D4x_correl(self, sample1, sample2 = None):
    -2588		'''
    -2589		Correlation between Δ4x errors of samples
    -2590
    -2591		Returns the error correlation between the average Δ4x values of two samples.
    -2592		'''
    -2593		if sample2 is None or sample2 == sample1:
    -2594			return 1.
    -2595		return (
    -2596			self.sample_D4x_covar(sample1, sample2)
    -2597			/ self.unknowns[sample1][f'SE_D{self._4x}']
    -2598			/ self.unknowns[sample2][f'SE_D{self._4x}']
    -2599			)
    +            
    2596	def sample_D4x_correl(self, sample1, sample2 = None):
    +2597		'''
    +2598		Correlation between Δ4x errors of samples
    +2599
    +2600		Returns the error correlation between the average Δ4x values of two samples.
    +2601		'''
    +2602		if sample2 is None or sample2 == sample1:
    +2603			return 1.
    +2604		return (
    +2605			self.sample_D4x_covar(sample1, sample2)
    +2606			/ self.unknowns[sample1][f'SE_D{self._4x}']
    +2607			/ self.unknowns[sample2][f'SE_D{self._4x}']
    +2608			)
     
    @@ -10194,104 +10539,104 @@

    API Documentation

    -
    2601	def plot_single_session(self,
    -2602		session,
    -2603		kw_plot_anchors = dict(ls='None', marker='x', mec=(.75, 0, 0), mew = .75, ms = 4),
    -2604		kw_plot_unknowns = dict(ls='None', marker='x', mec=(0, 0, .75), mew = .75, ms = 4),
    -2605		kw_plot_anchor_avg = dict(ls='-', marker='None', color=(.75, 0, 0), lw = .75),
    -2606		kw_plot_unknown_avg = dict(ls='-', marker='None', color=(0, 0, .75), lw = .75),
    -2607		kw_contour_error = dict(colors = [[0, 0, 0]], alpha = .5, linewidths = 0.75),
    -2608		xylimits = 'free', # | 'constant'
    -2609		x_label = None,
    -2610		y_label = None,
    -2611		error_contour_interval = 'auto',
    -2612		fig = 'new',
    -2613		):
    -2614		'''
    -2615		Generate plot for a single session
    -2616		'''
    -2617		if x_label is None:
    -2618			x_label = f'δ$_{{{self._4x}}}$ (‰)'
    -2619		if y_label is None:
    -2620			y_label = f'Δ$_{{{self._4x}}}$ (‰)'
    -2621
    -2622		out = _SessionPlot()
    -2623		anchors = [a for a in self.anchors if [r for r in self.sessions[session]['data'] if r['Sample'] == a]]
    -2624		unknowns = [u for u in self.unknowns if [r for r in self.sessions[session]['data'] if r['Sample'] == u]]
    -2625		
    -2626		if fig == 'new':
    -2627			out.fig = ppl.figure(figsize = (6,6))
    -2628			ppl.subplots_adjust(.1,.1,.9,.9)
    -2629
    -2630		out.anchor_analyses, = ppl.plot(
    -2631			[r[f'd{self._4x}'] for r in self.sessions[session]['data'] if r['Sample'] in self.anchors],
    -2632			[r[f'D{self._4x}'] for r in self.sessions[session]['data'] if r['Sample'] in self.anchors],
    -2633			**kw_plot_anchors)
    -2634		out.unknown_analyses, = ppl.plot(
    -2635			[r[f'd{self._4x}'] for r in self.sessions[session]['data'] if r['Sample'] in self.unknowns],
    -2636			[r[f'D{self._4x}'] for r in self.sessions[session]['data'] if r['Sample'] in self.unknowns],
    -2637			**kw_plot_unknowns)
    -2638		out.anchor_avg = ppl.plot(
    -2639			np.array([ np.array([
    -2640				np.min([r[f'd{self._4x}'] for r in self.sessions[session]['data'] if r['Sample'] == sample]) - 1,
    -2641				np.max([r[f'd{self._4x}'] for r in self.sessions[session]['data'] if r['Sample'] == sample]) + 1
    -2642				]) for sample in anchors]).T,
    -2643			np.array([ np.array([0, 0]) + self.Nominal_D4x[sample] for sample in anchors]).T,
    -2644			**kw_plot_anchor_avg)
    -2645		out.unknown_avg = ppl.plot(
    -2646			np.array([ np.array([
    -2647				np.min([r[f'd{self._4x}'] for r in self.sessions[session]['data'] if r['Sample'] == sample]) - 1,
    -2648				np.max([r[f'd{self._4x}'] for r in self.sessions[session]['data'] if r['Sample'] == sample]) + 1
    -2649				]) for sample in unknowns]).T,
    -2650			np.array([ np.array([0, 0]) + self.unknowns[sample][f'D{self._4x}'] for sample in unknowns]).T,
    -2651			**kw_plot_unknown_avg)
    -2652		if xylimits == 'constant':
    -2653			x = [r[f'd{self._4x}'] for r in self]
    -2654			y = [r[f'D{self._4x}'] for r in self]
    -2655			x1, x2, y1, y2 = np.min(x), np.max(x), np.min(y), np.max(y)
    -2656			w, h = x2-x1, y2-y1
    -2657			x1 -= w/20
    -2658			x2 += w/20
    -2659			y1 -= h/20
    -2660			y2 += h/20
    -2661			ppl.axis([x1, x2, y1, y2])
    -2662		elif xylimits == 'free':
    -2663			x1, x2, y1, y2 = ppl.axis()
    -2664		else:
    -2665			x1, x2, y1, y2 = ppl.axis(xylimits)
    -2666				
    -2667		if error_contour_interval != 'none':
    -2668			xi, yi = np.linspace(x1, x2), np.linspace(y1, y2)
    -2669			XI,YI = np.meshgrid(xi, yi)
    -2670			SI = np.array([[self.standardization_error(session, x, y) for x in xi] for y in yi])
    -2671			if error_contour_interval == 'auto':
    -2672				rng = np.max(SI) - np.min(SI)
    -2673				if rng <= 0.01:
    -2674					cinterval = 0.001
    -2675				elif rng <= 0.03:
    -2676					cinterval = 0.004
    -2677				elif rng <= 0.1:
    -2678					cinterval = 0.01
    -2679				elif rng <= 0.3:
    -2680					cinterval = 0.03
    -2681				elif rng <= 1.:
    -2682					cinterval = 0.1
    -2683				else:
    -2684					cinterval = 0.5
    -2685			else:
    -2686				cinterval = error_contour_interval
    -2687
    -2688			cval = np.arange(np.ceil(SI.min() / .001) * .001, np.ceil(SI.max() / .001 + 1) * .001, cinterval)
    -2689			out.contour = ppl.contour(XI, YI, SI, cval, **kw_contour_error)
    -2690			out.clabel = ppl.clabel(out.contour)
    -2691
    -2692		ppl.xlabel(x_label)
    -2693		ppl.ylabel(y_label)
    -2694		ppl.title(session, weight = 'bold')
    -2695		ppl.grid(alpha = .2)
    -2696		out.ax = ppl.gca()		
    -2697
    -2698		return out
    +            
    2610	def plot_single_session(self,
    +2611		session,
    +2612		kw_plot_anchors = dict(ls='None', marker='x', mec=(.75, 0, 0), mew = .75, ms = 4),
    +2613		kw_plot_unknowns = dict(ls='None', marker='x', mec=(0, 0, .75), mew = .75, ms = 4),
    +2614		kw_plot_anchor_avg = dict(ls='-', marker='None', color=(.75, 0, 0), lw = .75),
    +2615		kw_plot_unknown_avg = dict(ls='-', marker='None', color=(0, 0, .75), lw = .75),
    +2616		kw_contour_error = dict(colors = [[0, 0, 0]], alpha = .5, linewidths = 0.75),
    +2617		xylimits = 'free', # | 'constant'
    +2618		x_label = None,
    +2619		y_label = None,
    +2620		error_contour_interval = 'auto',
    +2621		fig = 'new',
    +2622		):
    +2623		'''
    +2624		Generate plot for a single session
    +2625		'''
    +2626		if x_label is None:
    +2627			x_label = f'δ$_{{{self._4x}}}$ (‰)'
    +2628		if y_label is None:
    +2629			y_label = f'Δ$_{{{self._4x}}}$ (‰)'
    +2630
    +2631		out = _SessionPlot()
    +2632		anchors = [a for a in self.anchors if [r for r in self.sessions[session]['data'] if r['Sample'] == a]]
    +2633		unknowns = [u for u in self.unknowns if [r for r in self.sessions[session]['data'] if r['Sample'] == u]]
    +2634		
    +2635		if fig == 'new':
    +2636			out.fig = ppl.figure(figsize = (6,6))
    +2637			ppl.subplots_adjust(.1,.1,.9,.9)
    +2638
    +2639		out.anchor_analyses, = ppl.plot(
    +2640			[r[f'd{self._4x}'] for r in self.sessions[session]['data'] if r['Sample'] in self.anchors],
    +2641			[r[f'D{self._4x}'] for r in self.sessions[session]['data'] if r['Sample'] in self.anchors],
    +2642			**kw_plot_anchors)
    +2643		out.unknown_analyses, = ppl.plot(
    +2644			[r[f'd{self._4x}'] for r in self.sessions[session]['data'] if r['Sample'] in self.unknowns],
    +2645			[r[f'D{self._4x}'] for r in self.sessions[session]['data'] if r['Sample'] in self.unknowns],
    +2646			**kw_plot_unknowns)
    +2647		out.anchor_avg = ppl.plot(
    +2648			np.array([ np.array([
    +2649				np.min([r[f'd{self._4x}'] for r in self.sessions[session]['data'] if r['Sample'] == sample]) - 1,
    +2650				np.max([r[f'd{self._4x}'] for r in self.sessions[session]['data'] if r['Sample'] == sample]) + 1
    +2651				]) for sample in anchors]).T,
    +2652			np.array([ np.array([0, 0]) + self.Nominal_D4x[sample] for sample in anchors]).T,
    +2653			**kw_plot_anchor_avg)
    +2654		out.unknown_avg = ppl.plot(
    +2655			np.array([ np.array([
    +2656				np.min([r[f'd{self._4x}'] for r in self.sessions[session]['data'] if r['Sample'] == sample]) - 1,
    +2657				np.max([r[f'd{self._4x}'] for r in self.sessions[session]['data'] if r['Sample'] == sample]) + 1
    +2658				]) for sample in unknowns]).T,
    +2659			np.array([ np.array([0, 0]) + self.unknowns[sample][f'D{self._4x}'] for sample in unknowns]).T,
    +2660			**kw_plot_unknown_avg)
    +2661		if xylimits == 'constant':
    +2662			x = [r[f'd{self._4x}'] for r in self]
    +2663			y = [r[f'D{self._4x}'] for r in self]
    +2664			x1, x2, y1, y2 = np.min(x), np.max(x), np.min(y), np.max(y)
    +2665			w, h = x2-x1, y2-y1
    +2666			x1 -= w/20
    +2667			x2 += w/20
    +2668			y1 -= h/20
    +2669			y2 += h/20
    +2670			ppl.axis([x1, x2, y1, y2])
    +2671		elif xylimits == 'free':
    +2672			x1, x2, y1, y2 = ppl.axis()
    +2673		else:
    +2674			x1, x2, y1, y2 = ppl.axis(xylimits)
    +2675				
    +2676		if error_contour_interval != 'none':
    +2677			xi, yi = np.linspace(x1, x2), np.linspace(y1, y2)
    +2678			XI,YI = np.meshgrid(xi, yi)
    +2679			SI = np.array([[self.standardization_error(session, x, y) for x in xi] for y in yi])
    +2680			if error_contour_interval == 'auto':
    +2681				rng = np.max(SI) - np.min(SI)
    +2682				if rng <= 0.01:
    +2683					cinterval = 0.001
    +2684				elif rng <= 0.03:
    +2685					cinterval = 0.004
    +2686				elif rng <= 0.1:
    +2687					cinterval = 0.01
    +2688				elif rng <= 0.3:
    +2689					cinterval = 0.03
    +2690				elif rng <= 1.:
    +2691					cinterval = 0.1
    +2692				else:
    +2693					cinterval = 0.5
    +2694			else:
    +2695				cinterval = error_contour_interval
    +2696
    +2697			cval = np.arange(np.ceil(SI.min() / .001) * .001, np.ceil(SI.max() / .001 + 1) * .001, cinterval)
    +2698			out.contour = ppl.contour(XI, YI, SI, cval, **kw_contour_error)
    +2699			out.clabel = ppl.clabel(out.contour)
    +2700
    +2701		ppl.xlabel(x_label)
    +2702		ppl.ylabel(y_label)
    +2703		ppl.title(session, weight = 'bold')
    +2704		ppl.grid(alpha = .2)
    +2705		out.ax = ppl.gca()		
    +2706
    +2707		return out
     
    @@ -10311,193 +10656,193 @@

    API Documentation

    -
    2700	def plot_residuals(
    -2701		self,
    -2702		hist = False,
    -2703		binwidth = 2/3,
    -2704		dir = 'output',
    -2705		filename = None,
    -2706		highlight = [],
    -2707		colors = None,
    -2708		figsize = None,
    -2709		):
    -2710		'''
    -2711		Plot residuals of each analysis as a function of time (actually, as a function of
    -2712		the order of analyses in the `D4xdata` object)
    -2713
    -2714		+ `hist`: whether to add a histogram of residuals
    -2715		+ `histbins`: specify bin edges for the histogram
    -2716		+ `dir`: the directory in which to save the plot
    -2717		+ `highlight`: a list of samples to highlight
    -2718		+ `colors`: a dict of `{<sample>: <color>}` for all samples
    -2719		+ `figsize`: (width, height) of figure
    -2720		'''
    -2721		# Layout
    -2722		fig = ppl.figure(figsize = (8,4) if figsize is None else figsize)
    -2723		if hist:
    -2724			ppl.subplots_adjust(left = .08, bottom = .05, right = .98, top = .8, wspace = -0.72)
    -2725			ax1, ax2 = ppl.subplot(121), ppl.subplot(1,15,15)
    -2726		else:
    -2727			ppl.subplots_adjust(.08,.05,.78,.8)
    -2728			ax1 = ppl.subplot(111)
    -2729		
    -2730		# Colors
    -2731		N = len(self.anchors)
    -2732		if colors is None:
    -2733			if len(highlight) > 0:
    -2734				Nh = len(highlight)
    -2735				if Nh == 1:
    -2736					colors = {highlight[0]: (0,0,0)}
    -2737				elif Nh == 3:
    -2738					colors = {a: c for a,c in zip(highlight, [(0,0,1), (1,0,0), (0,2/3,0)])}
    -2739				elif Nh == 4:
    -2740					colors = {a: c for a,c in zip(highlight, [(0,0,1), (1,0,0), (0,2/3,0), (.75,0,.75)])}
    -2741				else:
    -2742					colors = {a: hls_to_rgb(k/Nh, .4, 1) for k,a in enumerate(highlight)}
    -2743			else:
    -2744				if N == 3:
    -2745					colors = {a: c for a,c in zip(self.anchors, [(0,0,1), (1,0,0), (0,2/3,0)])}
    -2746				elif N == 4:
    -2747					colors = {a: c for a,c in zip(self.anchors, [(0,0,1), (1,0,0), (0,2/3,0), (.75,0,.75)])}
    -2748				else:
    -2749					colors = {a: hls_to_rgb(k/N, .4, 1) for k,a in enumerate(self.anchors)}
    -2750
    -2751		ppl.sca(ax1)
    -2752		
    -2753		ppl.axhline(0, color = 'k', alpha = .25, lw = 0.75)
    -2754
    -2755		session = self[0]['Session']
    -2756		x1 = 0
    -2757# 		ymax = np.max([1e3 * (r['D47'] - self.samples[r['Sample']]['D47']) for r in self])
    -2758		x_sessions = {}
    -2759		one_or_more_singlets = False
    -2760		one_or_more_multiplets = False
    -2761		multiplets = set()
    -2762		for k,r in enumerate(self):
    -2763			if r['Session'] != session:
    -2764				x2 = k-1
    -2765				x_sessions[session] = (x1+x2)/2
    -2766				ppl.axvline(k - 0.5, color = 'k', lw = .5)
    -2767				session = r['Session']
    -2768				x1 = k
    -2769			singlet = len(self.samples[r['Sample']]['data']) == 1
    -2770			if not singlet:
    -2771				multiplets.add(r['Sample'])
    -2772			if r['Sample'] in self.unknowns:
    -2773				if singlet:
    -2774					one_or_more_singlets = True
    -2775				else:
    -2776					one_or_more_multiplets = True
    -2777			kw = dict(
    -2778				marker = 'x' if singlet else '+',
    -2779				ms = 4 if singlet else 5,
    -2780				ls = 'None',
    -2781				mec = colors[r['Sample']] if r['Sample'] in colors else (0,0,0),
    -2782				mew = 1,
    -2783				alpha = 0.2 if singlet else 1,
    -2784				)
    -2785			if highlight and r['Sample'] not in highlight:
    -2786				kw['alpha'] = 0.2
    -2787			ppl.plot(k, 1e3 * (r['D47'] - self.samples[r['Sample']]['D47']), **kw)
    -2788		x2 = k
    -2789		x_sessions[session] = (x1+x2)/2
    -2790
    -2791		ppl.axhspan(-self.repeatability['r_D47']*1000, self.repeatability['r_D47']*1000, color = 'k', alpha = .05, lw = 1)
    -2792		ppl.axhspan(-self.repeatability['r_D47']*1000*self.t95, self.repeatability['r_D47']*1000*self.t95, color = 'k', alpha = .05, lw = 1)
    -2793		if not hist:
    -2794			ppl.text(len(self), self.repeatability['r_D47']*1000, f"   SD = {self.repeatability['r_D47']*1000:.1f} ppm", size = 9, alpha = 1, va = 'center')
    -2795			ppl.text(len(self), self.repeatability['r_D47']*1000*self.t95, f"   95% CL = ± {self.repeatability['r_D47']*1000*self.t95:.1f} ppm", size = 9, alpha = 1, va = 'center')
    -2796
    -2797		xmin, xmax, ymin, ymax = ppl.axis()
    -2798		for s in x_sessions:
    -2799			ppl.text(
    -2800				x_sessions[s],
    -2801				ymax +1,
    -2802				s,
    -2803				va = 'bottom',
    -2804				**(
    -2805					dict(ha = 'center')
    -2806					if len(self.sessions[s]['data']) > (0.15 * len(self))
    -2807					else dict(ha = 'left', rotation = 45)
    -2808					)
    -2809				)
    -2810
    -2811		if hist:
    -2812			ppl.sca(ax2)
    -2813
    -2814		for s in colors:
    -2815			kw['marker'] = '+'
    -2816			kw['ms'] = 5
    -2817			kw['mec'] = colors[s]
    -2818			kw['label'] = s
    -2819			kw['alpha'] = 1
    -2820			ppl.plot([], [], **kw)
    -2821
    -2822		kw['mec'] = (0,0,0)
    -2823
    -2824		if one_or_more_singlets:
    -2825			kw['marker'] = 'x'
    -2826			kw['ms'] = 4
    -2827			kw['alpha'] = .2
    -2828			kw['label'] = 'other (N$\\,$=$\\,$1)' if one_or_more_multiplets else 'other'
    +            
    2709	def plot_residuals(
    +2710		self,
    +2711		hist = False,
    +2712		binwidth = 2/3,
    +2713		dir = 'output',
    +2714		filename = None,
    +2715		highlight = [],
    +2716		colors = None,
    +2717		figsize = None,
    +2718		):
    +2719		'''
    +2720		Plot residuals of each analysis as a function of time (actually, as a function of
    +2721		the order of analyses in the `D4xdata` object)
    +2722
    +2723		+ `hist`: whether to add a histogram of residuals
    +2724		+ `histbins`: specify bin edges for the histogram
    +2725		+ `dir`: the directory in which to save the plot
    +2726		+ `highlight`: a list of samples to highlight
    +2727		+ `colors`: a dict of `{<sample>: <color>}` for all samples
    +2728		+ `figsize`: (width, height) of figure
    +2729		'''
    +2730		# Layout
    +2731		fig = ppl.figure(figsize = (8,4) if figsize is None else figsize)
    +2732		if hist:
    +2733			ppl.subplots_adjust(left = .08, bottom = .05, right = .98, top = .8, wspace = -0.72)
    +2734			ax1, ax2 = ppl.subplot(121), ppl.subplot(1,15,15)
    +2735		else:
    +2736			ppl.subplots_adjust(.08,.05,.78,.8)
    +2737			ax1 = ppl.subplot(111)
    +2738		
    +2739		# Colors
    +2740		N = len(self.anchors)
    +2741		if colors is None:
    +2742			if len(highlight) > 0:
    +2743				Nh = len(highlight)
    +2744				if Nh == 1:
    +2745					colors = {highlight[0]: (0,0,0)}
    +2746				elif Nh == 3:
    +2747					colors = {a: c for a,c in zip(highlight, [(0,0,1), (1,0,0), (0,2/3,0)])}
    +2748				elif Nh == 4:
    +2749					colors = {a: c for a,c in zip(highlight, [(0,0,1), (1,0,0), (0,2/3,0), (.75,0,.75)])}
    +2750				else:
    +2751					colors = {a: hls_to_rgb(k/Nh, .4, 1) for k,a in enumerate(highlight)}
    +2752			else:
    +2753				if N == 3:
    +2754					colors = {a: c for a,c in zip(self.anchors, [(0,0,1), (1,0,0), (0,2/3,0)])}
    +2755				elif N == 4:
    +2756					colors = {a: c for a,c in zip(self.anchors, [(0,0,1), (1,0,0), (0,2/3,0), (.75,0,.75)])}
    +2757				else:
    +2758					colors = {a: hls_to_rgb(k/N, .4, 1) for k,a in enumerate(self.anchors)}
    +2759
    +2760		ppl.sca(ax1)
    +2761		
    +2762		ppl.axhline(0, color = 'k', alpha = .25, lw = 0.75)
    +2763
    +2764		session = self[0]['Session']
    +2765		x1 = 0
    +2766# 		ymax = np.max([1e3 * (r['D47'] - self.samples[r['Sample']]['D47']) for r in self])
    +2767		x_sessions = {}
    +2768		one_or_more_singlets = False
    +2769		one_or_more_multiplets = False
    +2770		multiplets = set()
    +2771		for k,r in enumerate(self):
    +2772			if r['Session'] != session:
    +2773				x2 = k-1
    +2774				x_sessions[session] = (x1+x2)/2
    +2775				ppl.axvline(k - 0.5, color = 'k', lw = .5)
    +2776				session = r['Session']
    +2777				x1 = k
    +2778			singlet = len(self.samples[r['Sample']]['data']) == 1
    +2779			if not singlet:
    +2780				multiplets.add(r['Sample'])
    +2781			if r['Sample'] in self.unknowns:
    +2782				if singlet:
    +2783					one_or_more_singlets = True
    +2784				else:
    +2785					one_or_more_multiplets = True
    +2786			kw = dict(
    +2787				marker = 'x' if singlet else '+',
    +2788				ms = 4 if singlet else 5,
    +2789				ls = 'None',
    +2790				mec = colors[r['Sample']] if r['Sample'] in colors else (0,0,0),
    +2791				mew = 1,
    +2792				alpha = 0.2 if singlet else 1,
    +2793				)
    +2794			if highlight and r['Sample'] not in highlight:
    +2795				kw['alpha'] = 0.2
    +2796			ppl.plot(k, 1e3 * (r['D47'] - self.samples[r['Sample']]['D47']), **kw)
    +2797		x2 = k
    +2798		x_sessions[session] = (x1+x2)/2
    +2799
    +2800		ppl.axhspan(-self.repeatability['r_D47']*1000, self.repeatability['r_D47']*1000, color = 'k', alpha = .05, lw = 1)
    +2801		ppl.axhspan(-self.repeatability['r_D47']*1000*self.t95, self.repeatability['r_D47']*1000*self.t95, color = 'k', alpha = .05, lw = 1)
    +2802		if not hist:
    +2803			ppl.text(len(self), self.repeatability['r_D47']*1000, f"   SD = {self.repeatability['r_D47']*1000:.1f} ppm", size = 9, alpha = 1, va = 'center')
    +2804			ppl.text(len(self), self.repeatability['r_D47']*1000*self.t95, f"   95% CL = ± {self.repeatability['r_D47']*1000*self.t95:.1f} ppm", size = 9, alpha = 1, va = 'center')
    +2805
    +2806		xmin, xmax, ymin, ymax = ppl.axis()
    +2807		for s in x_sessions:
    +2808			ppl.text(
    +2809				x_sessions[s],
    +2810				ymax +1,
    +2811				s,
    +2812				va = 'bottom',
    +2813				**(
    +2814					dict(ha = 'center')
    +2815					if len(self.sessions[s]['data']) > (0.15 * len(self))
    +2816					else dict(ha = 'left', rotation = 45)
    +2817					)
    +2818				)
    +2819
    +2820		if hist:
    +2821			ppl.sca(ax2)
    +2822
    +2823		for s in colors:
    +2824			kw['marker'] = '+'
    +2825			kw['ms'] = 5
    +2826			kw['mec'] = colors[s]
    +2827			kw['label'] = s
    +2828			kw['alpha'] = 1
     2829			ppl.plot([], [], **kw)
     2830
    -2831		if one_or_more_multiplets:
    -2832			kw['marker'] = '+'
    -2833			kw['ms'] = 4
    -2834			kw['alpha'] = 1
    -2835			kw['label'] = 'other (N$\\,$>$\\,$1)' if one_or_more_singlets else 'other'
    -2836			ppl.plot([], [], **kw)
    -2837
    -2838		if hist:
    -2839			leg = ppl.legend(loc = 'upper right', bbox_to_anchor = (1, 1), bbox_transform=fig.transFigure, borderaxespad = 1.5, fontsize = 9)
    -2840		else:
    -2841			leg = ppl.legend(loc = 'lower right', bbox_to_anchor = (1, 0), bbox_transform=fig.transFigure, borderaxespad = 1.5)
    -2842		leg.set_zorder(-1000)
    -2843
    -2844		ppl.sca(ax1)
    -2845
    -2846		ppl.ylabel('Δ$_{47}$ residuals (ppm)')
    -2847		ppl.xticks([])
    -2848		ppl.axis([-1, len(self), None, None])
    -2849
    -2850		if hist:
    -2851			ppl.sca(ax2)
    -2852			X = [1e3 * (r['D47'] - self.samples[r['Sample']]['D47']) for r in self if r['Sample'] in multiplets]
    -2853			ppl.hist(
    -2854				X,
    -2855				orientation = 'horizontal',
    -2856				histtype = 'stepfilled',
    -2857				ec = [.4]*3,
    -2858				fc = [.25]*3,
    -2859				alpha = .25,
    -2860				bins = np.linspace(-9e3*self.repeatability['r_D47'], 9e3*self.repeatability['r_D47'], int(18/binwidth+1)),
    -2861				)
    -2862			ppl.axis([None, None, ymin, ymax])
    -2863			ppl.text(0, 0,
    -2864				f"   SD = {self.repeatability['r_D47']*1000:.1f} ppm\n   95% CL = ± {self.repeatability['r_D47']*1000*self.t95:.1f} ppm",
    -2865				size = 8,
    -2866				alpha = 1,
    -2867				va = 'center',
    -2868				ha = 'left',
    -2869				)
    -2870
    -2871			ppl.xticks([])
    -2872			ppl.yticks([])
    -2873# 			ax2.spines['left'].set_visible(False)
    -2874			ax2.spines['right'].set_visible(False)
    -2875			ax2.spines['top'].set_visible(False)
    -2876			ax2.spines['bottom'].set_visible(False)
    -2877
    -2878
    -2879		if not os.path.exists(dir):
    -2880			os.makedirs(dir)
    -2881		if filename is None:
    -2882			return fig
    -2883		elif filename == '':
    -2884			filename = f'D{self._4x}_residuals.pdf'
    -2885		ppl.savefig(f'{dir}/{filename}')
    -2886		ppl.close(fig)
    +2831		kw['mec'] = (0,0,0)
    +2832
    +2833		if one_or_more_singlets:
    +2834			kw['marker'] = 'x'
    +2835			kw['ms'] = 4
    +2836			kw['alpha'] = .2
    +2837			kw['label'] = 'other (N$\\,$=$\\,$1)' if one_or_more_multiplets else 'other'
    +2838			ppl.plot([], [], **kw)
    +2839
    +2840		if one_or_more_multiplets:
    +2841			kw['marker'] = '+'
    +2842			kw['ms'] = 4
    +2843			kw['alpha'] = 1
    +2844			kw['label'] = 'other (N$\\,$>$\\,$1)' if one_or_more_singlets else 'other'
    +2845			ppl.plot([], [], **kw)
    +2846
    +2847		if hist:
    +2848			leg = ppl.legend(loc = 'upper right', bbox_to_anchor = (1, 1), bbox_transform=fig.transFigure, borderaxespad = 1.5, fontsize = 9)
    +2849		else:
    +2850			leg = ppl.legend(loc = 'lower right', bbox_to_anchor = (1, 0), bbox_transform=fig.transFigure, borderaxespad = 1.5)
    +2851		leg.set_zorder(-1000)
    +2852
    +2853		ppl.sca(ax1)
    +2854
    +2855		ppl.ylabel('Δ$_{47}$ residuals (ppm)')
    +2856		ppl.xticks([])
    +2857		ppl.axis([-1, len(self), None, None])
    +2858
    +2859		if hist:
    +2860			ppl.sca(ax2)
    +2861			X = [1e3 * (r['D47'] - self.samples[r['Sample']]['D47']) for r in self if r['Sample'] in multiplets]
    +2862			ppl.hist(
    +2863				X,
    +2864				orientation = 'horizontal',
    +2865				histtype = 'stepfilled',
    +2866				ec = [.4]*3,
    +2867				fc = [.25]*3,
    +2868				alpha = .25,
    +2869				bins = np.linspace(-9e3*self.repeatability['r_D47'], 9e3*self.repeatability['r_D47'], int(18/binwidth+1)),
    +2870				)
    +2871			ppl.axis([None, None, ymin, ymax])
    +2872			ppl.text(0, 0,
    +2873				f"   SD = {self.repeatability['r_D47']*1000:.1f} ppm\n   95% CL = ± {self.repeatability['r_D47']*1000*self.t95:.1f} ppm",
    +2874				size = 8,
    +2875				alpha = 1,
    +2876				va = 'center',
    +2877				ha = 'left',
    +2878				)
    +2879
    +2880			ppl.xticks([])
    +2881			ppl.yticks([])
    +2882# 			ax2.spines['left'].set_visible(False)
    +2883			ax2.spines['right'].set_visible(False)
    +2884			ax2.spines['top'].set_visible(False)
    +2885			ax2.spines['bottom'].set_visible(False)
    +2886
    +2887
    +2888		if not os.path.exists(dir):
    +2889			os.makedirs(dir)
    +2890		if filename is None:
    +2891			return fig
    +2892		elif filename == '':
    +2893			filename = f'D{self._4x}_residuals.pdf'
    +2894		ppl.savefig(f'{dir}/{filename}')
    +2895		ppl.close(fig)
     
    @@ -10527,11 +10872,11 @@

    API Documentation

    -
    2889	def simulate(self, *args, **kwargs):
    -2890		'''
    -2891		Legacy function with warning message pointing to `virtual_data()`
    -2892		'''
    -2893		raise DeprecationWarning('D4xdata.simulate is deprecated and has been replaced by virtual_data()')
    +            
    2898	def simulate(self, *args, **kwargs):
    +2899		'''
    +2900		Legacy function with warning message pointing to `virtual_data()`
    +2901		'''
    +2902		raise DeprecationWarning('D4xdata.simulate is deprecated and has been replaced by virtual_data()')
     
    @@ -10551,81 +10896,81 @@

    API Documentation

    -
    2895	def plot_distribution_of_analyses(
    -2896		self,
    -2897		dir = 'output',
    -2898		filename = None,
    -2899		vs_time = False,
    -2900		figsize = (6,4),
    -2901		subplots_adjust = (0.02, 0.13, 0.85, 0.8),
    -2902		output = None,
    -2903		):
    -2904		'''
    -2905		Plot temporal distribution of all analyses in the data set.
    -2906		
    -2907		**Parameters**
    -2908
    -2909		+ `vs_time`: if `True`, plot as a function of `TimeTag` rather than sequentially.
    -2910		'''
    -2911
    -2912		asamples = [s for s in self.anchors]
    -2913		usamples = [s for s in self.unknowns]
    -2914		if output is None or output == 'fig':
    -2915			fig = ppl.figure(figsize = figsize)
    -2916			ppl.subplots_adjust(*subplots_adjust)
    -2917		Xmin = min([r['TimeTag'] if vs_time else j for j,r in enumerate(self)])
    -2918		Xmax = max([r['TimeTag'] if vs_time else j for j,r in enumerate(self)])
    -2919		Xmax += (Xmax-Xmin)/40
    -2920		Xmin -= (Xmax-Xmin)/41
    -2921		for k, s in enumerate(asamples + usamples):
    -2922			if vs_time:
    -2923				X = [r['TimeTag'] for r in self if r['Sample'] == s]
    -2924			else:
    -2925				X = [x for x,r in enumerate(self) if r['Sample'] == s]
    -2926			Y = [-k for x in X]
    -2927			ppl.plot(X, Y, 'o', mec = None, mew = 0, mfc = 'b' if s in usamples else 'r', ms = 3, alpha = .75)
    -2928			ppl.axhline(-k, color = 'b' if s in usamples else 'r', lw = .5, alpha = .25)
    -2929			ppl.text(Xmax, -k, f'   {s}', va = 'center', ha = 'left', size = 7, color = 'b' if s in usamples else 'r')
    -2930		ppl.axis([Xmin, Xmax, -k-1, 1])
    -2931		ppl.xlabel('\ntime')
    -2932		ppl.gca().annotate('',
    -2933			xy = (0.6, -0.02),
    -2934			xycoords = 'axes fraction',
    -2935			xytext = (.4, -0.02), 
    -2936            arrowprops = dict(arrowstyle = "->", color = 'k'),
    -2937            )
    -2938			
    -2939
    -2940		x2 = -1
    -2941		for session in self.sessions:
    -2942			x1 = min([r['TimeTag'] if vs_time else j for j,r in enumerate(self) if r['Session'] == session])
    -2943			if vs_time:
    -2944				ppl.axvline(x1, color = 'k', lw = .75)
    -2945			if x2 > -1:
    -2946				if not vs_time:
    -2947					ppl.axvline((x1+x2)/2, color = 'k', lw = .75, alpha = .5)
    -2948			x2 = max([r['TimeTag'] if vs_time else j for j,r in enumerate(self) if r['Session'] == session])
    -2949# 			from xlrd import xldate_as_datetime
    -2950# 			print(session, xldate_as_datetime(x1, 0), xldate_as_datetime(x2, 0))
    -2951			if vs_time:
    -2952				ppl.axvline(x2, color = 'k', lw = .75)
    -2953				ppl.axvspan(x1,x2,color = 'k', zorder = -100, alpha = .15)
    -2954			ppl.text((x1+x2)/2, 1, f' {session}', ha = 'left', va = 'bottom', rotation = 45, size = 8)
    -2955
    -2956		ppl.xticks([])
    -2957		ppl.yticks([])
    -2958
    -2959		if output is None:
    -2960			if not os.path.exists(dir):
    -2961				os.makedirs(dir)
    -2962			if filename == None:
    -2963				filename = f'D{self._4x}_distribution_of_analyses.pdf'
    -2964			ppl.savefig(f'{dir}/{filename}')
    -2965			ppl.close(fig)
    -2966		elif output == 'ax':
    -2967			return ppl.gca()
    -2968		elif output == 'fig':
    -2969			return fig
    +            
    2904	def plot_distribution_of_analyses(
    +2905		self,
    +2906		dir = 'output',
    +2907		filename = None,
    +2908		vs_time = False,
    +2909		figsize = (6,4),
    +2910		subplots_adjust = (0.02, 0.13, 0.85, 0.8),
    +2911		output = None,
    +2912		):
    +2913		'''
    +2914		Plot temporal distribution of all analyses in the data set.
    +2915		
    +2916		**Parameters**
    +2917
    +2918		+ `vs_time`: if `True`, plot as a function of `TimeTag` rather than sequentially.
    +2919		'''
    +2920
    +2921		asamples = [s for s in self.anchors]
    +2922		usamples = [s for s in self.unknowns]
    +2923		if output is None or output == 'fig':
    +2924			fig = ppl.figure(figsize = figsize)
    +2925			ppl.subplots_adjust(*subplots_adjust)
    +2926		Xmin = min([r['TimeTag'] if vs_time else j for j,r in enumerate(self)])
    +2927		Xmax = max([r['TimeTag'] if vs_time else j for j,r in enumerate(self)])
    +2928		Xmax += (Xmax-Xmin)/40
    +2929		Xmin -= (Xmax-Xmin)/41
    +2930		for k, s in enumerate(asamples + usamples):
    +2931			if vs_time:
    +2932				X = [r['TimeTag'] for r in self if r['Sample'] == s]
    +2933			else:
    +2934				X = [x for x,r in enumerate(self) if r['Sample'] == s]
    +2935			Y = [-k for x in X]
    +2936			ppl.plot(X, Y, 'o', mec = None, mew = 0, mfc = 'b' if s in usamples else 'r', ms = 3, alpha = .75)
    +2937			ppl.axhline(-k, color = 'b' if s in usamples else 'r', lw = .5, alpha = .25)
    +2938			ppl.text(Xmax, -k, f'   {s}', va = 'center', ha = 'left', size = 7, color = 'b' if s in usamples else 'r')
    +2939		ppl.axis([Xmin, Xmax, -k-1, 1])
    +2940		ppl.xlabel('\ntime')
    +2941		ppl.gca().annotate('',
    +2942			xy = (0.6, -0.02),
    +2943			xycoords = 'axes fraction',
    +2944			xytext = (.4, -0.02), 
    +2945            arrowprops = dict(arrowstyle = "->", color = 'k'),
    +2946            )
    +2947			
    +2948
    +2949		x2 = -1
    +2950		for session in self.sessions:
    +2951			x1 = min([r['TimeTag'] if vs_time else j for j,r in enumerate(self) if r['Session'] == session])
    +2952			if vs_time:
    +2953				ppl.axvline(x1, color = 'k', lw = .75)
    +2954			if x2 > -1:
    +2955				if not vs_time:
    +2956					ppl.axvline((x1+x2)/2, color = 'k', lw = .75, alpha = .5)
    +2957			x2 = max([r['TimeTag'] if vs_time else j for j,r in enumerate(self) if r['Session'] == session])
    +2958# 			from xlrd import xldate_as_datetime
    +2959# 			print(session, xldate_as_datetime(x1, 0), xldate_as_datetime(x2, 0))
    +2960			if vs_time:
    +2961				ppl.axvline(x2, color = 'k', lw = .75)
    +2962				ppl.axvspan(x1,x2,color = 'k', zorder = -100, alpha = .15)
    +2963			ppl.text((x1+x2)/2, 1, f' {session}', ha = 'left', va = 'bottom', rotation = 45, size = 8)
    +2964
    +2965		ppl.xticks([])
    +2966		ppl.yticks([])
    +2967
    +2968		if output is None:
    +2969			if not os.path.exists(dir):
    +2970				os.makedirs(dir)
    +2971			if filename == None:
    +2972				filename = f'D{self._4x}_distribution_of_analyses.pdf'
    +2973			ppl.savefig(f'{dir}/{filename}')
    +2974			ppl.close(fig)
    +2975		elif output == 'ax':
    +2976			return ppl.gca()
    +2977		elif output == 'fig':
    +2978			return fig
     
    @@ -10639,6 +10984,190 @@

    API Documentation

    + +
    + +
    + + def + plot_bulk_compositions( self, samples=None, dir='output/bulk_compositions', figsize=(6, 6), subplots_adjust=(0.15, 0.12, 0.95, 0.92), show=False, sample_color=(0, 0.5, 1), analysis_color=(0.7, 0.7, 0.7), labeldist=0.3, radius=0.05): + + + +
    + +
    2981	def plot_bulk_compositions(
    +2982		self,
    +2983		samples = None,
    +2984		dir = 'output/bulk_compositions',
    +2985		figsize = (6,6),
    +2986		subplots_adjust = (0.15, 0.12, 0.95, 0.92),
    +2987		show = False,
    +2988		sample_color = (0,.5,1),
    +2989		analysis_color = (.7,.7,.7),
    +2990		labeldist = 0.3,
    +2991		radius = 0.05,
    +2992		):
    +2993		'''
    +2994		Plot δ13C_VBDP vs δ18O_VSMOW (of CO2) for all analyses.
    +2995		
    +2996		By default, creates a directory `./output/bulk_compositions` where plots for
    +2997		each sample are saved. Another plot named `__all__.pdf` shows all analyses together.
    +2998		
    +2999		
    +3000		**Parameters**
    +3001
    +3002		+ `samples`: Only these samples are processed (by default: all samples).
    +3003		+ `dir`: where to save the plots
    +3004		+ `figsize`: (width, height) of figure
    +3005		+ `subplots_adjust`: passed to `subplots_adjust()`
    +3006		+ `show`: whether to call `matplotlib.pyplot.show()` on the plot with all samples,
    +3007		allowing for interactive visualization/exploration in (δ13C, δ18O) space.
    +3008		+ `sample_color`: color used for replicate markers/labels
    +3009		+ `analysis_color`: color used for sample markers/labels
    +3010		+ `labeldist`: distance (in inches) from replicate markers to replicate labels
    +3011		+ `radius`: radius of the dashed circle providing scale. No circle if `radius = 0`.
    +3012		'''
    +3013
    +3014		from matplotlib.patches import Ellipse
    +3015
    +3016		if samples is None:
    +3017			samples = [_ for _ in self.samples]
    +3018
    +3019		saved = {}
    +3020
    +3021		for s in samples:
    +3022
    +3023			fig = ppl.figure(figsize = figsize)
    +3024			fig.subplots_adjust(*subplots_adjust)
    +3025			ax = ppl.subplot(111)
    +3026			ppl.xlabel('$δ^{18}O_{VSMOW}$ of $CO_2$ (‰)')
    +3027			ppl.ylabel('$δ^{13}C_{VPDB}$ (‰)')
    +3028			ppl.title(s)
    +3029
    +3030
    +3031			XY = np.array([[_['d18O_VSMOW'], _['d13C_VPDB']] for _ in self.samples[s]['data']])
    +3032			UID = [_['UID'] for _ in self.samples[s]['data']]
    +3033			XY0 = XY.mean(0)
    +3034
    +3035			for xy in XY:
    +3036				ppl.plot([xy[0], XY0[0]], [xy[1], XY0[1]], '-', lw = 1, color = analysis_color)
    +3037				
    +3038			ppl.plot(*XY.T, 'wo', mew = 1, mec = analysis_color)
    +3039			ppl.plot(*XY0, 'wo', mew = 2, mec = sample_color)
    +3040			ppl.text(*XY0, f'  {s}', va = 'center', ha = 'left', color = sample_color, weight = 'bold')
    +3041			saved[s] = [XY, XY0]
    +3042			
    +3043			x1, x2, y1, y2 = ppl.axis()
    +3044			x0, dx = (x1+x2)/2, (x2-x1)/2
    +3045			y0, dy = (y1+y2)/2, (y2-y1)/2
    +3046			dx, dy = [max(max(dx, dy), radius)]*2
    +3047
    +3048			ppl.axis([
    +3049				x0 - 1.2*dx,
    +3050				x0 + 1.2*dx,
    +3051				y0 - 1.2*dy,
    +3052				y0 + 1.2*dy,
    +3053				])			
    +3054
    +3055			XY0_in_display_space = fig.dpi_scale_trans.inverted().transform(ax.transData.transform(XY0))
    +3056
    +3057			for xy, uid in zip(XY, UID):
    +3058
    +3059				xy_in_display_space = fig.dpi_scale_trans.inverted().transform(ax.transData.transform(xy))
    +3060				vector_in_display_space = xy_in_display_space - XY0_in_display_space
    +3061
    +3062				if (vector_in_display_space**2).sum() > 0:
    +3063
    +3064					unit_vector_in_display_space = vector_in_display_space / ((vector_in_display_space**2).sum())**0.5
    +3065					label_vector_in_display_space = vector_in_display_space + unit_vector_in_display_space * labeldist
    +3066					label_xy_in_display_space = XY0_in_display_space + label_vector_in_display_space
    +3067					label_xy_in_data_space = ax.transData.inverted().transform(fig.dpi_scale_trans.transform(label_xy_in_display_space))
    +3068
    +3069					ppl.text(*label_xy_in_data_space, uid, va = 'center', ha = 'center', color = analysis_color)
    +3070
    +3071				else:
    +3072
    +3073					ppl.text(*xy, f'{uid}  ', va = 'center', ha = 'right', color = analysis_color)
    +3074
    +3075			if radius:
    +3076				ax.add_artist(Ellipse(
    +3077					xy = XY0,
    +3078					width = radius*2,
    +3079					height = radius*2,
    +3080					ls = (0, (2,2)),
    +3081					lw = .7,
    +3082					ec = analysis_color,
    +3083					fc = 'None',
    +3084					))
    +3085				ppl.text(
    +3086					XY0[0],
    +3087					XY0[1]-radius,
    +3088					f'\n± {radius*1e3:.0f} ppm',
    +3089					color = analysis_color,
    +3090					va = 'top',
    +3091					ha = 'center',
    +3092					linespacing = 0.4,
    +3093					size = 8,
    +3094					)
    +3095
    +3096			if not os.path.exists(dir):
    +3097				os.makedirs(dir)
    +3098			fig.savefig(f'{dir}/{s}.pdf')
    +3099			ppl.close(fig)
    +3100
    +3101		fig = ppl.figure(figsize = figsize)
    +3102		fig.subplots_adjust(*subplots_adjust)
    +3103		ppl.xlabel('$δ^{18}O_{VSMOW}$ of $CO_2$ (‰)')
    +3104		ppl.ylabel('$δ^{13}C_{VPDB}$ (‰)')
    +3105
    +3106		for s in saved:
    +3107			for xy in saved[s][0]:
    +3108				ppl.plot([xy[0], saved[s][1][0]], [xy[1], saved[s][1][1]], '-', lw = 1, color = analysis_color)
    +3109			ppl.plot(*saved[s][0].T, 'wo', mew = 1, mec = analysis_color)
    +3110			ppl.plot(*saved[s][1], 'wo', mew = 1.5, mec = sample_color)
    +3111			ppl.text(*saved[s][1], f'  {s}', va = 'center', ha = 'left', color = sample_color, weight = 'bold')
    +3112
    +3113		x1, x2, y1, y2 = ppl.axis()
    +3114		ppl.axis([
    +3115			x1 - (x2-x1)/10,
    +3116			x2 + (x2-x1)/10,
    +3117			y1 - (y2-y1)/10,
    +3118			y2 + (y2-y1)/10,
    +3119			])			
    +3120
    +3121
    +3122		if not os.path.exists(dir):
    +3123			os.makedirs(dir)
    +3124		fig.savefig(f'{dir}/__all__.pdf')
    +3125		if show:
    +3126			ppl.show()
    +3127		ppl.close(fig)
    +
    + + +

    Plot δ13C_VBDP vs δ18OVSMOW (of CO2) for all analyses.

    + +

    By default, creates a directory ./output/bulk_compositions where plots for +each sample are saved. Another plot named __all__.pdf shows all analyses together.

    + +

    Parameters

    + +
      +
    • samples: Only these samples are processed (by default: all samples).
    • +
    • dir: where to save the plots
    • +
    • figsize: (width, height) of figure
    • +
    • subplots_adjust: passed to subplots_adjust()
    • +
    • show: whether to call matplotlib.pyplot.show() on the plot with all samples, +allowing for interactive visualization/exploration in (δ13C, δ18O) space.
    • +
    • sample_color: color used for replicate markers/labels
    • +
    • analysis_color: color used for sample markers/labels
    • +
    • labeldist: distance (in inches) from replicate markers to replicate labels
    • +
    • radius: radius of the dashed circle providing scale. No circle if radius = 0.
    • +
    +
    + +
    Inherited Members
    @@ -10671,94 +11200,94 @@
    Inherited Members
    -
    2972class D47data(D4xdata):
    -2973	'''
    -2974	Store and process data for a large set of Δ47 analyses,
    -2975	usually comprising more than one analytical session.
    -2976	'''
    -2977
    -2978	Nominal_D4x = {
    -2979		'ETH-1':   0.2052,
    -2980		'ETH-2':   0.2085,
    -2981		'ETH-3':   0.6132,
    -2982		'ETH-4':   0.4511,
    -2983		'IAEA-C1': 0.3018,
    -2984		'IAEA-C2': 0.6409,
    -2985		'MERCK':   0.5135,
    -2986		} # I-CDES (Bernasconi et al., 2021)
    -2987	'''
    -2988	Nominal Δ47 values assigned to the Δ47 anchor samples, used by
    -2989	`D47data.standardize()` to normalize unknown samples to an absolute Δ47
    -2990	reference frame.
    -2991
    -2992	By default equal to (after [Bernasconi et al. (2021)](https://doi.org/10.1029/2020GC009588)):
    -2993	```py
    -2994	{
    -2995		'ETH-1'   : 0.2052,
    -2996		'ETH-2'   : 0.2085,
    -2997		'ETH-3'   : 0.6132,
    -2998		'ETH-4'   : 0.4511,
    -2999		'IAEA-C1' : 0.3018,
    -3000		'IAEA-C2' : 0.6409,
    -3001		'MERCK'   : 0.5135,
    -3002	}
    -3003	```
    -3004	'''
    -3005
    -3006
    -3007	@property
    -3008	def Nominal_D47(self):
    -3009		return self.Nominal_D4x
    -3010	
    -3011
    -3012	@Nominal_D47.setter
    -3013	def Nominal_D47(self, new):
    -3014		self.Nominal_D4x = dict(**new)
    -3015		self.refresh()
    -3016
    -3017
    -3018	def __init__(self, l = [], **kwargs):
    -3019		'''
    -3020		**Parameters:** same as `D4xdata.__init__()`
    -3021		'''
    -3022		D4xdata.__init__(self, l = l, mass = '47', **kwargs)
    -3023
    -3024
    -3025	def D47fromTeq(self, fCo2eqD47 = 'petersen', priority = 'new'):
    -3026		'''
    -3027		Find all samples for which `Teq` is specified, compute equilibrium Δ47
    -3028		value for that temperature, and add treat these samples as additional anchors.
    -3029
    -3030		**Parameters**
    -3031
    -3032		+ `fCo2eqD47`: Which CO2 equilibrium law to use
    -3033		(`petersen`: [Petersen et al. (2019)](https://doi.org/10.1029/2018GC008127);
    -3034		`wang`: [Wang et al. (2019)](https://doi.org/10.1016/j.gca.2004.05.039)).
    -3035		+ `priority`: if `replace`: forget old anchors and only use the new ones;
    -3036		if `new`: keep pre-existing anchors but update them in case of conflict
    -3037		between old and new Δ47 values;
    -3038		if `old`: keep pre-existing anchors but preserve their original Δ47
    -3039		values in case of conflict.
    -3040		'''
    -3041		f = {
    -3042			'petersen': fCO2eqD47_Petersen,
    -3043			'wang': fCO2eqD47_Wang,
    -3044			}[fCo2eqD47]
    -3045		foo = {}
    -3046		for r in self:
    -3047			if 'Teq' in r:
    -3048				if r['Sample'] in foo:
    -3049					assert foo[r['Sample']] == f(r['Teq']), f'Different values of `Teq` provided for sample `{r["Sample"]}`.'
    -3050				else:
    -3051					foo[r['Sample']] = f(r['Teq'])
    -3052			else:
    -3053					assert r['Sample'] not in foo, f'`Teq` is inconsistently specified for sample `{r["Sample"]}`.'
    -3054
    -3055		if priority == 'replace':
    -3056			self.Nominal_D47 = {}
    -3057		for s in foo:
    -3058			if priority != 'old' or s not in self.Nominal_D47:
    -3059				self.Nominal_D47[s] = foo[s]
    +            
    3131class D47data(D4xdata):
    +3132	'''
    +3133	Store and process data for a large set of Δ47 analyses,
    +3134	usually comprising more than one analytical session.
    +3135	'''
    +3136
    +3137	Nominal_D4x = {
    +3138		'ETH-1':   0.2052,
    +3139		'ETH-2':   0.2085,
    +3140		'ETH-3':   0.6132,
    +3141		'ETH-4':   0.4511,
    +3142		'IAEA-C1': 0.3018,
    +3143		'IAEA-C2': 0.6409,
    +3144		'MERCK':   0.5135,
    +3145		} # I-CDES (Bernasconi et al., 2021)
    +3146	'''
    +3147	Nominal Δ47 values assigned to the Δ47 anchor samples, used by
    +3148	`D47data.standardize()` to normalize unknown samples to an absolute Δ47
    +3149	reference frame.
    +3150
    +3151	By default equal to (after [Bernasconi et al. (2021)](https://doi.org/10.1029/2020GC009588)):
    +3152	```py
    +3153	{
    +3154		'ETH-1'   : 0.2052,
    +3155		'ETH-2'   : 0.2085,
    +3156		'ETH-3'   : 0.6132,
    +3157		'ETH-4'   : 0.4511,
    +3158		'IAEA-C1' : 0.3018,
    +3159		'IAEA-C2' : 0.6409,
    +3160		'MERCK'   : 0.5135,
    +3161	}
    +3162	```
    +3163	'''
    +3164
    +3165
    +3166	@property
    +3167	def Nominal_D47(self):
    +3168		return self.Nominal_D4x
    +3169	
    +3170
    +3171	@Nominal_D47.setter
    +3172	def Nominal_D47(self, new):
    +3173		self.Nominal_D4x = dict(**new)
    +3174		self.refresh()
    +3175
    +3176
    +3177	def __init__(self, l = [], **kwargs):
    +3178		'''
    +3179		**Parameters:** same as `D4xdata.__init__()`
    +3180		'''
    +3181		D4xdata.__init__(self, l = l, mass = '47', **kwargs)
    +3182
    +3183
    +3184	def D47fromTeq(self, fCo2eqD47 = 'petersen', priority = 'new'):
    +3185		'''
    +3186		Find all samples for which `Teq` is specified, compute equilibrium Δ47
    +3187		value for that temperature, and add treat these samples as additional anchors.
    +3188
    +3189		**Parameters**
    +3190
    +3191		+ `fCo2eqD47`: Which CO2 equilibrium law to use
    +3192		(`petersen`: [Petersen et al. (2019)](https://doi.org/10.1029/2018GC008127);
    +3193		`wang`: [Wang et al. (2019)](https://doi.org/10.1016/j.gca.2004.05.039)).
    +3194		+ `priority`: if `replace`: forget old anchors and only use the new ones;
    +3195		if `new`: keep pre-existing anchors but update them in case of conflict
    +3196		between old and new Δ47 values;
    +3197		if `old`: keep pre-existing anchors but preserve their original Δ47
    +3198		values in case of conflict.
    +3199		'''
    +3200		f = {
    +3201			'petersen': fCO2eqD47_Petersen,
    +3202			'wang': fCO2eqD47_Wang,
    +3203			}[fCo2eqD47]
    +3204		foo = {}
    +3205		for r in self:
    +3206			if 'Teq' in r:
    +3207				if r['Sample'] in foo:
    +3208					assert foo[r['Sample']] == f(r['Teq']), f'Different values of `Teq` provided for sample `{r["Sample"]}`.'
    +3209				else:
    +3210					foo[r['Sample']] = f(r['Teq'])
    +3211			else:
    +3212					assert r['Sample'] not in foo, f'`Teq` is inconsistently specified for sample `{r["Sample"]}`.'
    +3213
    +3214		if priority == 'replace':
    +3215			self.Nominal_D47 = {}
    +3216		for s in foo:
    +3217			if priority != 'old' or s not in self.Nominal_D47:
    +3218				self.Nominal_D47[s] = foo[s]
     
    @@ -10777,11 +11306,11 @@
    Inherited Members
    -
    3018	def __init__(self, l = [], **kwargs):
    -3019		'''
    -3020		**Parameters:** same as `D4xdata.__init__()`
    -3021		'''
    -3022		D4xdata.__init__(self, l = l, mass = '47', **kwargs)
    +            
    3177	def __init__(self, l = [], **kwargs):
    +3178		'''
    +3179		**Parameters:** same as `D4xdata.__init__()`
    +3180		'''
    +3181		D4xdata.__init__(self, l = l, mass = '47', **kwargs)
     
    @@ -10833,41 +11362,41 @@
    Inherited Members
    -
    3025	def D47fromTeq(self, fCo2eqD47 = 'petersen', priority = 'new'):
    -3026		'''
    -3027		Find all samples for which `Teq` is specified, compute equilibrium Δ47
    -3028		value for that temperature, and add treat these samples as additional anchors.
    -3029
    -3030		**Parameters**
    -3031
    -3032		+ `fCo2eqD47`: Which CO2 equilibrium law to use
    -3033		(`petersen`: [Petersen et al. (2019)](https://doi.org/10.1029/2018GC008127);
    -3034		`wang`: [Wang et al. (2019)](https://doi.org/10.1016/j.gca.2004.05.039)).
    -3035		+ `priority`: if `replace`: forget old anchors and only use the new ones;
    -3036		if `new`: keep pre-existing anchors but update them in case of conflict
    -3037		between old and new Δ47 values;
    -3038		if `old`: keep pre-existing anchors but preserve their original Δ47
    -3039		values in case of conflict.
    -3040		'''
    -3041		f = {
    -3042			'petersen': fCO2eqD47_Petersen,
    -3043			'wang': fCO2eqD47_Wang,
    -3044			}[fCo2eqD47]
    -3045		foo = {}
    -3046		for r in self:
    -3047			if 'Teq' in r:
    -3048				if r['Sample'] in foo:
    -3049					assert foo[r['Sample']] == f(r['Teq']), f'Different values of `Teq` provided for sample `{r["Sample"]}`.'
    -3050				else:
    -3051					foo[r['Sample']] = f(r['Teq'])
    -3052			else:
    -3053					assert r['Sample'] not in foo, f'`Teq` is inconsistently specified for sample `{r["Sample"]}`.'
    -3054
    -3055		if priority == 'replace':
    -3056			self.Nominal_D47 = {}
    -3057		for s in foo:
    -3058			if priority != 'old' or s not in self.Nominal_D47:
    -3059				self.Nominal_D47[s] = foo[s]
    +            
    3184	def D47fromTeq(self, fCo2eqD47 = 'petersen', priority = 'new'):
    +3185		'''
    +3186		Find all samples for which `Teq` is specified, compute equilibrium Δ47
    +3187		value for that temperature, and add treat these samples as additional anchors.
    +3188
    +3189		**Parameters**
    +3190
    +3191		+ `fCo2eqD47`: Which CO2 equilibrium law to use
    +3192		(`petersen`: [Petersen et al. (2019)](https://doi.org/10.1029/2018GC008127);
    +3193		`wang`: [Wang et al. (2019)](https://doi.org/10.1016/j.gca.2004.05.039)).
    +3194		+ `priority`: if `replace`: forget old anchors and only use the new ones;
    +3195		if `new`: keep pre-existing anchors but update them in case of conflict
    +3196		between old and new Δ47 values;
    +3197		if `old`: keep pre-existing anchors but preserve their original Δ47
    +3198		values in case of conflict.
    +3199		'''
    +3200		f = {
    +3201			'petersen': fCO2eqD47_Petersen,
    +3202			'wang': fCO2eqD47_Wang,
    +3203			}[fCo2eqD47]
    +3204		foo = {}
    +3205		for r in self:
    +3206			if 'Teq' in r:
    +3207				if r['Sample'] in foo:
    +3208					assert foo[r['Sample']] == f(r['Teq']), f'Different values of `Teq` provided for sample `{r["Sample"]}`.'
    +3209				else:
    +3210					foo[r['Sample']] = f(r['Teq'])
    +3211			else:
    +3212					assert r['Sample'] not in foo, f'`Teq` is inconsistently specified for sample `{r["Sample"]}`.'
    +3213
    +3214		if priority == 'replace':
    +3215			self.Nominal_D47 = {}
    +3216		for s in foo:
    +3217			if priority != 'old' or s not in self.Nominal_D47:
    +3218				self.Nominal_D47[s] = foo[s]
     
    @@ -10949,6 +11478,7 @@
    Inherited Members
    plot_residuals
    simulate
    plot_distribution_of_analyses
    +
    plot_bulk_compositions
    builtins.list
    @@ -10979,55 +11509,55 @@
    Inherited Members
    -
    3064class D48data(D4xdata):
    -3065	'''
    -3066	Store and process data for a large set of Δ48 analyses,
    -3067	usually comprising more than one analytical session.
    -3068	'''
    -3069
    -3070	Nominal_D4x = {
    -3071		'ETH-1':  0.138,
    -3072		'ETH-2':  0.138,
    -3073		'ETH-3':  0.270,
    -3074		'ETH-4':  0.223,
    -3075		'GU-1':  -0.419,
    -3076		} # (Fiebig et al., 2019, 2021)
    -3077	'''
    -3078	Nominal Δ48 values assigned to the Δ48 anchor samples, used by
    -3079	`D48data.standardize()` to normalize unknown samples to an absolute Δ48
    -3080	reference frame.
    -3081
    -3082	By default equal to (after [Fiebig et al. (2019)](https://doi.org/10.1016/j.chemgeo.2019.05.019),
    -3083	Fiebig et al. (in press)):
    -3084
    -3085	```py
    -3086	{
    -3087		'ETH-1' :  0.138,
    -3088		'ETH-2' :  0.138,
    -3089		'ETH-3' :  0.270,
    -3090		'ETH-4' :  0.223,
    -3091		'GU-1'  : -0.419,
    -3092	}
    -3093	```
    -3094	'''
    -3095
    -3096
    -3097	@property
    -3098	def Nominal_D48(self):
    -3099		return self.Nominal_D4x
    -3100
    -3101	
    -3102	@Nominal_D48.setter
    -3103	def Nominal_D48(self, new):
    -3104		self.Nominal_D4x = dict(**new)
    -3105		self.refresh()
    -3106
    -3107
    -3108	def __init__(self, l = [], **kwargs):
    -3109		'''
    -3110		**Parameters:** same as `D4xdata.__init__()`
    -3111		'''
    -3112		D4xdata.__init__(self, l = l, mass = '48', **kwargs)
    +            
    3223class D48data(D4xdata):
    +3224	'''
    +3225	Store and process data for a large set of Δ48 analyses,
    +3226	usually comprising more than one analytical session.
    +3227	'''
    +3228
    +3229	Nominal_D4x = {
    +3230		'ETH-1':  0.138,
    +3231		'ETH-2':  0.138,
    +3232		'ETH-3':  0.270,
    +3233		'ETH-4':  0.223,
    +3234		'GU-1':  -0.419,
    +3235		} # (Fiebig et al., 2019, 2021)
    +3236	'''
    +3237	Nominal Δ48 values assigned to the Δ48 anchor samples, used by
    +3238	`D48data.standardize()` to normalize unknown samples to an absolute Δ48
    +3239	reference frame.
    +3240
    +3241	By default equal to (after [Fiebig et al. (2019)](https://doi.org/10.1016/j.chemgeo.2019.05.019),
    +3242	Fiebig et al. (in press)):
    +3243
    +3244	```py
    +3245	{
    +3246		'ETH-1' :  0.138,
    +3247		'ETH-2' :  0.138,
    +3248		'ETH-3' :  0.270,
    +3249		'ETH-4' :  0.223,
    +3250		'GU-1'  : -0.419,
    +3251	}
    +3252	```
    +3253	'''
    +3254
    +3255
    +3256	@property
    +3257	def Nominal_D48(self):
    +3258		return self.Nominal_D4x
    +3259
    +3260	
    +3261	@Nominal_D48.setter
    +3262	def Nominal_D48(self, new):
    +3263		self.Nominal_D4x = dict(**new)
    +3264		self.refresh()
    +3265
    +3266
    +3267	def __init__(self, l = [], **kwargs):
    +3268		'''
    +3269		**Parameters:** same as `D4xdata.__init__()`
    +3270		'''
    +3271		D4xdata.__init__(self, l = l, mass = '48', **kwargs)
     
    @@ -11046,11 +11576,11 @@
    Inherited Members
    -
    3108	def __init__(self, l = [], **kwargs):
    -3109		'''
    -3110		**Parameters:** same as `D4xdata.__init__()`
    -3111		'''
    -3112		D4xdata.__init__(self, l = l, mass = '48', **kwargs)
    +            
    3267	def __init__(self, l = [], **kwargs):
    +3268		'''
    +3269		**Parameters:** same as `D4xdata.__init__()`
    +3270		'''
    +3271		D4xdata.__init__(self, l = l, mass = '48', **kwargs)
     
    @@ -11148,6 +11678,7 @@
    Inherited Members
    plot_residuals
    simulate
    plot_distribution_of_analyses
    +
    plot_bulk_compositions
    builtins.list
    diff --git a/examples/plot_bulk_compositions/example_plot_bulk_compositions.png b/examples/plot_bulk_compositions/example_plot_bulk_compositions.png new file mode 100644 index 0000000..a4a7623 Binary files /dev/null and b/examples/plot_bulk_compositions/example_plot_bulk_compositions.png differ diff --git a/examples/plot_bulk_compositions/example_plot_bulk_compositions.py b/examples/plot_bulk_compositions/example_plot_bulk_compositions.py new file mode 100755 index 0000000..5aeb015 --- /dev/null +++ b/examples/plot_bulk_compositions/example_plot_bulk_compositions.py @@ -0,0 +1,18 @@ +#! /usr/bin/env python3 + +from lmfit import report_fit +from D47crunch import * + +mydata = D47data(virtual_data( + session = 'mysession', + samples = [ + dict(Sample = 'ETH-1', N = 4), + dict(Sample = 'ETH-2', N = 4), + dict(Sample = 'ETH-3', N = 4), + dict(Sample = 'MYSAMPLE', N = 8, D47 = 0.6, D48 = 0.1, d13C_VPDB = -4.0, d18O_VPDB = -12.0), + ], seed = 123)) + +mydata.refresh() +mydata.wg() +mydata.crunch() +mydata.plot_bulk_compositions() diff --git a/tests/test_plot_bulk_compositions.py b/tests/test_plot_bulk_compositions.py new file mode 100755 index 0000000..1abe7f8 --- /dev/null +++ b/tests/test_plot_bulk_compositions.py @@ -0,0 +1,31 @@ +#! /usr/bin/env python3 + +from lmfit import report_fit +from D47crunch import * + +mydata1 = virtual_data( + session = 'mysession1', + samples = [ + dict(Sample = 'ETH-1', N = 4), + dict(Sample = 'ETH-2', N = 4), + dict(Sample = 'ETH-3', N = 4), + dict(Sample = 'FOO', N = 4, D47 = 0.6, D48 = 0.1, d13C_VPDB = -4.0, d18O_VPDB = -12.0), + dict(Sample = 'BAR', N = 4, D47 = 0.5, D48 = 0.1, d13C_VPDB = -14.0, d18O_VPDB = -22.0), + ], seed = 123) + +mydata2 = virtual_data( + session = 'mysession2', + samples = [ + dict(Sample = 'ETH-1', N = 4), + dict(Sample = 'ETH-2', N = 4), + dict(Sample = 'ETH-3', N = 4), + dict(Sample = 'FOO', N = 4, D47 = 0.6, D48 = 0.1, d13C_VPDB = -4.0, d18O_VPDB = -12.0), + dict(Sample = 'BAR', N = 4, D47 = 0.5, D48 = 0.1, d13C_VPDB = -14.0, d18O_VPDB = -22.0), + ], seed = 456) + +mydata = D47data(mydata1+mydata2, verbose = True) + +mydata.refresh() +mydata.wg() +mydata.crunch() +mydata.plot_bulk_compositions()