1 Notes about data acquisition and processing

This R markdown file performs calibrations for dissolved inorganic carbon (\(\sum\text{CO}_2\)) concentration (\(c\)) and isotopic composition (\(\delta^{13}\text{C}\)) analyzed by acidification of water samples and transfer of resultant \(\text{CO}_2\) via a Thermo GasBench to an isotope ratio mass spectrometer. Peak amplitudes are fit by exponential decay to check for proper functioning of the GasBench needle. Only peaks derived from good injections are selected. Exponential fits are then performed on all analyses to project back to the m/z 44 amplitude that would have resulted from an injection occuring at the moment of needle puncture. Expected pCO\(_2\) of standards is then calculated from the mass of CaCO\(_3\) loaded into vials as well as the volumes of the vial, liquid, and H\(_3\)PO\(_4\) added. A calibration curve is constructed by plotting expected pCO\(_2\) of standards vs. their m/z 44 amplitude at t\(_0\). \(\sum\text{CO}_2\) of the original water sample is calculated based on this calibration. \(\delta^{13}\)C is corrected for drift, linearity, and isotopic discrimination.

2 Load libraries

# load libraries
library(tidyverse) # dplyr, tidyr, ggplot
library(isoreader) # reading isotope data files
library(isoprocessor) # processing isotope data files
library(plotly) # interactive plots
library(knitr) # generating reports
library(ggpmisc) # add equations of best fit to ggplot
library(chemCal) # calculations from calibration curve

# source all relevant scripting files
source(file.path("scripts", "plotting_functions.R"))

# global knitting options for automatic saving of all plots as .png and .pdf
knitr::opts_chunk$set(
  dev = c("png", "pdf"), fig.keep = "all",
  dev.args = list(pdf = list(encoding = "WinAnsi", useDingbats = FALSE)),
  fig.path = file.path("fig_output/", paste0(gsub("\\.[Rr]md", "/", knitr::current_input()))),
  cache.path = file.path("cache/", paste0(gsub("\\.[Rr]md", "/", knitr::current_input())))
)

Data processed using the packages isoreader version 1.3.0 and isoprocessor version 0.6.11.

3 Load Data

iso_files_raw <- 
  file.path(
    "data_raw/180228_DBN_DIC", "180228_DBN_DIC_data.cf.rds"
  ) %>%
      # read data files in parallel for fast read
  iso_read_continuous_flow(parallel = TRUE)

4 Process file info & peak table

# process
iso_files <- iso_files_raw %>% 
  # set peak table from vendor data table
  iso_set_peak_table_from_auto_vendor_data_table() %>% 
  # rename key file info columns
  iso_rename_file_info(id1 = `Identifier 1`, type = `Identifier 2`) %>%
  # parse text info into numbers
  iso_parse_file_info(number = Analysis) %>%
  # process other file information that is specific to the naming conventions
  # of this particular sequence
  iso_mutate_file_info(
    # what was the mass of carbonate standard loaded?
    mass_loaded = parse_number(Comment) %>% iso_double_with_units("ug")
  )
## Warning: 'iso_set_peak_table_from_auto_vendor_data_table' has been renamed
## to 'iso_set_peak_table_automatically_from_vendor_data_table'. Please call the
## latter function directly to avoid this warning.
## Info: setting peak table for 47 file(s) from vendor data table with the following renames:
## - for 47 file(s): 'Nr.'->'peak_nr', 'Is Ref.?'->'is_ref', 'Start'->'rt_start', 'Rt'->'rt', 'End'->'rt_end', 'Ampl 44'->'amp44', 'Ampl 45'->'amp45', 'Ampl 46'->'amp46', 'BGD 44'->'bgrd44_start', 'BGD 45'->'bgrd45_start', 'BGD 46'->'bgrd46_start', 'BGD 44'->'bgrd44_end', 'BGD 45'->'bgrd45_end', 'BGD 46'->'bgrd46_end', 'rIntensity 44'->'area44', 'rIntensity 45'->'area45', 'rIntensity 46'->'area46', 'rR 45CO2/44CO2'->'r45/44', 'rR 46CO2/44CO2'->'r46/44', 'rd 45CO2/44CO2'->'rd45/44', 'rd 46CO2/44CO2'->'rd46/44', 'd 45CO2/44CO2'->'d45/44', 'd 46CO2/44CO2'->'d46/44', 'd 13C/12C'->'d13C', 'd 18O/16O'->'d18O', 'd 17O/16O'->'d17O', 'AT% 13C/12C'->'at13C', 'AT% 18O/16O'->'at18O'
## Info: renaming the following file info across 47 data file(s): 'Identifier 1'->'id1', 'Identifier 2'->'type'
## Info: parsing 1 file info columns for 47 data file(s):
##  - to number: 'Analysis'
## Info: mutating file info for 47 data file(s)

4.1 Chromatograms

Display chromatograms of all samples and standards. The first four peaks are reference peaks. The smaller sharp peak after that is a half-inject used to clear the sample loop.

chroms <- iso_files %>% 
  iso_plot_continuous_flow_data(
    data = c(44),
    color = NULL
  ) +
  theme(legend.position = "bottom")

chroms

4.2 Peak maps

# note: we do not consider first two sample inject peaks as analyte peaks to be sure that there is no carryover between samples

peak_maps <- 
  tibble::tribble(
    ~compound,          ~ref_nr,    ~`rt`,
    # peak map data (row-by-row)
    "CO2 ref",          1,      26,
    "CO2 ref",          2,      51,
    "CO2 ref",          3,      75,
    "CO2 ref",          4,      100,
    "CO2 half inject",  NA,     148,
    "CO2 first sample peak",      NA,     169,
    "CO2 analyte",      NA,     219,
    "CO2 analyte",      NA,     269,
    "CO2 analyte",      NA,     319,
    "CO2 analyte",      NA,     368,
    "CO2 analyte",      NA,     418,
    "CO2 analyte",      NA,     468,
    "CO2 analyte",      NA,     518,
    "CO2 analyte",      NA,     568,
    "CO2 analyte",      NA,     617
  )
peak_maps %>% knitr::kable(digits = 0)
compound ref_nr rt
CO2 ref 1 26
CO2 ref 2 51
CO2 ref 3 75
CO2 ref 4 100
CO2 half inject NA 148
CO2 first sample peak NA 169
CO2 analyte NA 219
CO2 analyte NA 269
CO2 analyte NA 319
CO2 analyte NA 368
CO2 analyte NA 418
CO2 analyte NA 468
CO2 analyte NA 518
CO2 analyte NA 568
CO2 analyte NA 617

4.3 Fetch peak table

# identify peaks
peak_table_w_ids <- iso_files %>% 
  iso_map_peaks(peak_maps) %>%
  # peak table
  iso_get_peak_table(include_file_info = everything())
## Info: 666 of 670 peaks in 47 files were successfully mapped using a single peak map. 4 peak(s) could not be mapped. 39 expected peak(s) are missing.
## Info: aggregating peak table from 47 data file(s), including file info 'everything()'

Display an example chromatogram with peaks labeled.

chrom_labeled <- iso_files %>%
  iso_filter_files(id1 == "146 ug LSVEC dis3") %>% 
  iso_plot_continuous_flow_data(
    # select data and aesthetics
    data = c(44),
    color = id1,
    # provide our peak table with ids
    peak_table = peak_table_w_ids, 
    # define peak labels, this can be any valid expression
    peak_label = iso_format(id = peak_info)
  ) +
  theme(
    legend.position = "bottom"
  )
## Info: applying file filter, keeping 1 of 47 files
chrom_labeled

4.4 Select analyte peaks

# focus on analyte peaks
peak_table_analytes <- peak_table_w_ids %>% 
  # omit reference peaks and half inject for further processing (i.e. analyte peaks only)
  filter(compound == "CO2 analyte")

# print
peak_table_analytes

5 Concentration calibrations

5.1 Quality control of peaks

Add columns to data frame for the period of time between the GasBench needle puncturing the vial and it being injected to the GC-IRMS.

sample_transfer_t_s <- 53 # seconds between needle stabbing vial and start of acquisition

rt_CO2_s <- 150 # actual CO2 retention time from start of inject to m/z 44 peak on mass spec

peak_table_analytes <- peak_table_analytes %>% mutate(t_stab_to_inject_s = rt - rt_CO2_s + sample_transfer_t_s) # make column for time between stabbing of gasbench needle and injection

Find and plot a model chromatogram with good injection.

iso_files %>% iso_filter_files(id1 == "207 ug YULE drift1") %>% 
  iso_plot_continuous_flow_data(
    data = c("44"),
    color = file_id
  ) +
  theme(legend.position = "bottom")
## Info: applying file filter, keeping 1 of 47 files

Plot the peak amplitudes of the training chromatogram vs. time from start of He dilution. A non-linear least squares model of exponential decay of m/z 44 amplitude vs. time from needle puncture to inject provides a good fit to the data. This is consistent with physical reality since the sample CO\(_2\) is in a constant volume and is being diluted with a constant flow of He.

training_chrom <- peak_table_analytes %>% filter(id1 == "207 ug YULE drift1")

training_chrom %>% 
   ggplot() +
  aes(
    x = t_stab_to_inject_s,
    y = amp44
    ) +
  geom_point() +
geom_line(data = function(df) mutate(df, amp44 = nls(amp44 ~ A*exp(-k*t_stab_to_inject_s), start = c(A = 10000, k = 0.0001), data = df) %>% predict()), alpha = 0.4)+
    scale_x_continuous(name = "time from GasBench needle puncture until injection on GC-IRMS [s]", limits = c(0, 600), expand = c(0,0)) +
  scale_y_continuous(name = ("m/z 44 peak amplitude [mV]"))+
  theme_bw()

Make non-linear least squares model based on the ‘training chromatogram’.

exp_model <- nls(data = training_chrom, formula = amp44 ~ A*exp(-k*t_stab_to_inject_s), start = c(A = 10000, k = 0.0001)) # make and save model

summary(exp_model) # print summary of the model
## 
## Formula: amp44 ~ A * exp(-k * t_stab_to_inject_s)
## 
## Parameters:
##    Estimate Std. Error t value Pr(>|t|)    
## A 1.567e+04  1.283e+01  1221.4  < 2e-16 ***
## k 7.805e-04  2.544e-06   306.8 1.03e-15 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 11.93 on 7 degrees of freedom
## 
## Number of iterations to convergence: 4 
## Achieved convergence tolerance: 1.259e-06

Save exponential decay variable, k

k <- as.numeric(coef(exp_model)[2]) # save k

k # print k
## [1] 0.0007804918

For demonstration purposes, find and plot a model chromatogram with some bad injections.

We can see in the following chromatogram that the first few peaks don’t follow the expected trend of decreasing amplitude with time. This indicates partial needle clogging, probably due to water condensation on underside of septum, causing some weak injections in this sample.

iso_files %>% iso_filter_files(id1 == "251 ug YULE lin") %>% 
  iso_plot_continuous_flow_data(
    data = c(44),
    color = file_id
  ) +
  theme(legend.position = "bottom")
## Info: applying file filter, keeping 1 of 47 files

For demonstration purposes, now re-plot m/z 44 peak amplitude vs. period of time between the GasBench needle puncturing the vial and it being injected to the GC-IRMS, same chromatogram as above.

weak_inject_chrom <- peak_table_analytes %>% filter(id1 == "251 ug YULE lin")

weak_inject_chrom %>% 
   ggplot() +
  aes(
    x = t_stab_to_inject_s,
    y = amp44
    ) +
  geom_point()+
    scale_x_continuous(name = "time from GasBench needle puncture until injection on GC-IRMS [s]", limits = c(0, 600), expand = c(0,0)) +
  scale_y_continuous(name = ("m/z 44 peak amplitude [mV]"))+
  theme_bw()

Now, plot all peaks alongside the fit which gives the largest m/z 44 peak amplitude at t\(_0\). It is visibly clear which injects were affected by partial needle obstruction.

weak_inject_chrom <- weak_inject_chrom %>% mutate(amp44_t0_per_peak = amp44 * exp(k*t_stab_to_inject_s)) # For each peak, calculate the projected amplitude at t0 in the chromatogram

weak_inject_chrom_max_amp44_t0  <- weak_inject_chrom %>% filter(amp44_t0_per_peak == max(amp44_t0_per_peak)) # select largest projected amplitude at t0 in the chromatogram
 
max_amp44_t0 <- weak_inject_chrom_max_amp44_t0$amp44_t0_per_peak # save largest projected amplitude at t0

t_max_amp44_t0 <- weak_inject_chrom_max_amp44_t0$t_stab_to_inject_s # save the time of the peak with largest projected amplitude at t0
 
weak_inject_chrom_fit_function <- function (x) max_amp44_t0*exp(-k*x) # write function to calculate amp44 as a function of t based on the projected amplitude at t0 estimated from the largest peak of the weak inject chromatogram and the k value from the good chromatogram

# plot the peak and the fit based on the point with largest project amp44 t0
weak_inject_chrom %>%
  ggplot() +
      stat_function(data = data.frame(amp44=c(1, 600)), aes(x=amp44), fun = weak_inject_chrom_fit_function, geom="line") +
  aes(
    x = t_stab_to_inject_s,
    y = amp44
    ) +
  geom_point(color="blue") +
    scale_x_continuous(name = "time from GasBench needle puncture until injection on GC-IRMS [s]", limits = c(0, 600), expand = c(0,0)) +
  scale_y_continuous(name = ("m/z 44 peak amplitude [mV]"))+
  theme_bw()

Estimate the expected m/z 44 peak amplitudes of all peaks if proper injections occured based on the maximum projected amplitude at t\(_0\) for each analysis.

peak_table_analytes_max_peaks <- peak_table_analytes %>% mutate(amp44_t0_per_peak = amp44 * exp(k*t_stab_to_inject_s)) # For each peak, calculate the projected amplitude at t0 in the chromatogram

peak_table_analytes_max_peaks <- peak_table_analytes_max_peaks  %>% group_by(file_id) %>% mutate(max_amp44_t0 = max(amp44_t0_per_peak)) # add column for largest projected m/z 44 peak amplitude at t0 per peak for a given analysis

peak_table_analytes_max_peaks <- peak_table_analytes_max_peaks %>% group_by(file_id) %>% mutate(t_of_max_amp44_t0 = ifelse(amp44_t0_per_peak == max_amp44_t0, t_stab_to_inject_s, 0)) # add column for t_stab_to_inject for the peak with largest projected m/z 44 peak amplitude at t0

peak_table_analytes_max_peaks <- peak_table_analytes_max_peaks %>% group_by(file_id) %>% mutate(t_of_max_amp44_t0_copied = max(t_of_max_amp44_t0)) %>% select(-t_of_max_amp44_t0) # copy t_of_max_amp44_t0 to whole row and delete previously made column

peak_table_analytes_max_peaks <- peak_table_analytes_max_peaks %>% group_by(file_id) %>% mutate(amp44_expected = max_amp44_t0*exp(-k*t_stab_to_inject_s)) # estimate what the amp44 would have been based on the peak with largest projected m/z 44 peak amplitude at t0 and previously calculated value of k

peak_table_analytes_max_peaks <- peak_table_analytes_max_peaks %>% group_by(file_id) %>% mutate(amp44_expected_plus5percent = amp44_expected + amp44_expected*.05) # make column for expected amp44 + 5%

peak_table_analytes_max_peaks <- peak_table_analytes_max_peaks %>% group_by(file_id) %>% mutate(amp44_expected_minus5percent = amp44_expected - amp44_expected*.05) # make column for expected amp44 - 5%

Plot measured m/z 44 peak amplitude as well as expected peak amplitude ± 5%. By playing around with the interactive plot, it should be clear that normal injections are consistently within the expected value 5±%, and the bad injections are obviously out of this range. 5% is an arbitrarily selected tuning factor.

It’s worth noting that the ~12 ml Exetainer vials generally require a somewhat larger tolerance compared to the ~119 ml vials that I sometimes use for lower concentration samples (i.e. 5% as opposed to 1.5%). This is because the larger vials have a larger headspace and therefore slower dilution, so the exact peak fitting parameters are typically less sensitive for the bigger vials.

amp44_measured_v_expected <- peak_table_analytes_max_peaks %>%
   ggplot() +
  aes(
    x = t_stab_to_inject_s,
    y = amp44,
    color = file_id
    ) +
geom_pointrange(aes(y = amp44_expected, ymin = amp44_expected_minus5percent, ymax = amp44_expected_plus5percent, label = "expected_amp44"), size = 2, alpha=0.5)+
  geom_point(size=1, aes(label = d13C))+
      scale_x_continuous(name = "time from GasBench needle puncture until injection on GC-IRMS [s]", limits = c(0, 600), expand = c(0,0)) +
  scale_y_continuous(name = ("m/z 44 peak amplitude [mV]"))+
    theme_bw()
## Warning: Ignoring unknown aesthetics: label

## Warning: Ignoring unknown aesthetics: label
amp44_measured_v_expected %>% ggplotly()
## Don't know how to automatically pick scale for object of type iso_double_with_units/vctrs_vctr. Defaulting to continuous.

5.1.1 Filter out bad peaks

Filter out peaks that are not within 5% of the expected value for amp44

peak_table_analytes_max_peaks_filtered <- peak_table_analytes_max_peaks %>% filter(amp44 > amp44_expected_minus5percent & amp44 < amp44_expected_plus5percent) # filter out peaks that are not within 5% of the expected value for amp44

Plot peaks filtered for good injections

filtered_for_good_injects <- peak_table_analytes_max_peaks_filtered  %>%
   ggplot() +
  aes(
    x = t_stab_to_inject_s,
    y = amp44,
    color = file_id
    )+
        scale_x_continuous(name = "time from GasBench needle puncture until injection on GC-IRMS [s]", limits = c(0, 600), expand = c(0,0)) +
  scale_y_continuous(name = ("m/z 44 peak amplitude [mV]"))+
  geom_point(size=2)+
    theme_bw()

filtered_for_good_injects  %>% ggplotly()

Write function for calculating the amplitude of a signal at time 0 given a dataframe of time and signal.

# function for calculating the amplitude of a signal at time 0 given a dataframe of time and signal
exp_decay_t0 <- function (time, signal, A_guess = 5000, k_guess = k) {
    ampl_t0 <- coef(nls(formula = signal ~ A*exp(-k*time), start = c(A = A_guess, k = k_guess)))[1]
    try(return(ampl_t0))
}

Count peaks left after filtering

peak_counts <- peak_table_analytes_max_peaks_filtered  %>% group_by(file_id) %>% summarise(n=n()) # summarise how many peaks are left after filtering for bad injects

peak_counts #print

Only keep samples with at least 4 peaks after quality filtering

peak_table_analytes_summarise <- peak_table_analytes_max_peaks_filtered  %>% group_by(file_id) %>% filter(n()>=4) # only keep samples with at least 4 peaks after quality filtering

5.1.2 Calculate more exactly m/z 44 peak amplitude at t\(_{0}\) based on all peaks in an analysis

peak_table_analytes_summarise <- peak_table_analytes_summarise %>% group_by(file_id) %>% mutate(amp44_t0 = exp_decay_t0(time = t_stab_to_inject_s, signal = amp44)) # calculate more exactly amp44 at t0 based on all peaks in the model

5.1.3 Condense multi-peak dataframe into summary dataframe with one row per analysis.

data <- 
  peak_table_analytes_summarise %>% 
  group_by(file_id, id1, type, mass_loaded, amp44_t0) %>% 
  summarize(
    num.peaks=n(),
    d13C.measured=mean(d13C),
    d13C.sd=sd(d13C),
    amp44_mean=mean(amp44),
    amp44.sd=sd(amp44),
    inv.amp44=1/amp44_mean,
    file_datetime=mean(file_datetime)
  )
## `summarise()` has grouped output by 'file_id', 'id1', 'type', 'mass_loaded'. You can override using the `.groups` argument.
data <- data %>% ungroup()

Add column for data type

data <- data %>% mutate(type_general = ifelse(type == "sample", "sample", "standard"))

5.1.4 Calculate limit of quantitation (LOQ)

# Select method blanks
method_blanks <- data %>% filter(str_detect(file_id, "1 ml milliQ acidified"))

method_blanks <- method_blanks %>% ungroup()

select(method_blanks, file_id, amp44_t0) %>% kable()
file_id amp44_t0
10073__1 ml milliQ acidified 20180123 RU YF rep 1-0000.dxf 73.17363
10074__1 ml milliQ acidified 20180123 RU YF rep 2-0000.dxf 56.69220
10075__1 ml milliQ acidified 20180123 RU YF rep 3-0000.dxf 57.21363
10076__1 ml milliQ acidified 20180227 RU YF rep 1-0000.dxf 226.65427
10077__1 ml milliQ acidified 20180227 RU YF rep 2-0000.dxf 566.16338
10078__1 ml milliQ acidified 20180227 RU YF rep 3-0000.dxf 424.86666
10128__1 ml milliQ acidified 20180227 RU YF rep 5-0000.dxf 78.19520
# mean signal of method blanks
S_mb <- mean(method_blanks$amp44_t0)

S_mb # print
## [1] 211.8513
# standard deviation of signal of method blanks
sd_mb <- sd(method_blanks$amp44_t0)

sd_mb # print
## [1] 206.6456
# calculate the signal for limit of quantitation
# eq. 4.7.4 https://chem.libretexts.org/Bookshelves/Analytical_Chemistry/Book%3A_Analytical_Chemistry_2.0_(Harvey)/04_Evaluating_Analytical_Data/4.7%3A_Detection_Limits
# The ability to detect the analyte with confidence is not the same as the ability to report with confidence its concentration, or to distinguish between its concentration in two samples. For this reason the American Chemical Society’s Committee on Environmental Analytical Chemistry recommends the limit of quantitation, (SA)LOQ.

S_A_LOQ <- S_mb + 10 * sd_mb

S_A_LOQ # print 
## [1] 2278.307

Make units explicit for subsequent calculations. add_row() in following chunk can’t handle double with units data class.

data <- data %>% iso_make_units_explicit()

# print
data

Check if data is quantitable

# insert dummy row for LOQ
# have to make units explicit here because add_row() can't handle double with units
data <- data %>% iso_make_units_explicit() %>% add_row(file_id = "LOQ", id1 = "LOQ", type_general = "sample", amp44_t0 = S_A_LOQ)

# add general names for data
data <- data %>% mutate(name = case_when(
  str_detect(id1, "5 min He purge 20180123") == TRUE ~ "5 min He purge 20180123",
  str_detect(id1, "5 min He purge 20171108") == TRUE ~ "5 min He purge 20171108",
  str_detect(id1, "1 ml milliQ acidified 20180123") == TRUE ~ "1 ml milliQ acidified 20180123",
  str_detect(id1, "1 ml milliQ acidified 20180227") == TRUE ~ "1 ml milliQ acidified 20180227",
  str_detect(id1, "BA1A_5566") == TRUE ~ "BA1A_55_66",
  str_detect(id1, "BA1A_100") == TRUE ~ "BA1A_100_400",
  str_detect(id1, "LOQ") == TRUE ~ "LOQ",
  str_detect(id1, "YULE") == TRUE ~ "YULE",
  str_detect(id1, "HIS") == TRUE ~ "HIS",
  str_detect(id1, "LSVEC") == TRUE ~ "LSVEC"
))

# add column for samples above or below limit of quantitation
data <- data %>% mutate(quantitatable = ifelse(amp44_t0 >= S_A_LOQ, TRUE, FALSE))

# select relevant data and print
data %>% select(file_id, amp44_t0, quantitatable) %>% kable()
file_id amp44_t0 quantitatable
10067__5 min He purge 20180123 rep 1-0000.dxf 66.86527 FALSE
10068__5 min He purge 20180123 rep 2-0000.dxf 55.64026 FALSE
10069__5 min He purge 20180123 rep 3-0000.dxf 66.63987 FALSE
10070__5 min He purge 20171108 rep 1-0000.dxf 145.28541 FALSE
10071__5 min He purge 20171108 rep 2-0000.dxf 130.42173 FALSE
10072__5 min He purge 20171108 rep 3-0000.dxf 178.70127 FALSE
10073__1 ml milliQ acidified 20180123 RU YF rep 1-0000.dxf 73.17363 FALSE
10074__1 ml milliQ acidified 20180123 RU YF rep 2-0000.dxf 56.69220 FALSE
10075__1 ml milliQ acidified 20180123 RU YF rep 3-0000.dxf 57.21363 FALSE
10076__1 ml milliQ acidified 20180227 RU YF rep 1-0000.dxf 226.65427 FALSE
10077__1 ml milliQ acidified 20180227 RU YF rep 2-0000.dxf 566.16338 FALSE
10078__1 ml milliQ acidified 20180227 RU YF rep 3-0000.dxf 424.86666 FALSE
10079__28 ug YULE lin-0000.dxf 2047.77838 FALSE
10080__49 ug YULE lin-0000.dxf 1925.23421 FALSE
10081__92 ug YULE lin-0000.dxf 6888.47340 TRUE
10082__163 ug YULE lin-0000.dxf 12307.90025 TRUE
10083__191 ug YULE lin-0000.dxf 14358.00831 TRUE
10084__251 ug YULE lin-0000.dxf 19143.10166 TRUE
10085__301 ug YULE lin-0000.dxf 22998.61980 TRUE
10086__346 ug YULE lin-0000.dxf 26932.84838 TRUE
10087__407 ug YULE lin-0000.dxf 31028.35070 TRUE
10088__207 ug YULE drift1-0000.dxf 15669.61336 TRUE
10089__198 ug HIS mon1-0000.dxf 14700.23145 TRUE
10090__203 ug YULE dis1-0000.dxf 15249.13180 TRUE
10091__196 ug HIS dis2-0000.dxf 14887.51164 TRUE
10092__146 ug LSVEC dis3-0000.dxf 15004.60880 TRUE
10093__192 ug YULE drift2-0000.dxf 14415.33815 TRUE
10094__192 ug HIS mon2-0000.dxf 14911.69404 TRUE
10097__BA1A_5566 rep1-0000.dxf 10080.30400 TRUE
10098__BA1A_5566 rep2-0000.dxf 9922.64221 TRUE
10100__186 ug YULE drift3-0000.dxf 13578.65647 TRUE
10101__212 ug HIS mon3-0000.dxf 16143.71122 TRUE
10107__206 ug YULE drift4-0000.dxf 15443.03516 TRUE
10108__207 ug HIS mon4-0000.dxf 15454.17212 TRUE
10113__BA1A_100 rep1-0000.dxf 515.82689 FALSE
10114__198 ug YULE drift5-0000.dxf 14781.00122 TRUE
10115__201 ug HIS drift5-0000.dxf 15410.26673 TRUE
10116__BA1A_100 rep2-0000.dxf 528.20726 FALSE
10121__200 ug YULE drift6-0000.dxf 15204.99615 TRUE
10122__205 ug HIS mon6-0000.dxf 15063.61293 TRUE
10125__198 ug YULE drift7-0000.dxf 15065.95432 TRUE
10126__202 ug HIS mon7-0000.dxf 15269.63375 TRUE
10128__1 ml milliQ acidified 20180227 RU YF rep 5-0000.dxf 78.19520 FALSE
LOQ 2278.30687 TRUE

5.2 Create calibration curve

Correct data types for calculations

5.3 Adjust some constants depending on sample preparation. User input needed.

vol_vial_ml = 11.7 # volume of Exetainer with septum screwed down
vol_H2O_sample_ml <- 1 # volume of water sample in ml
vol_H3PO4_added_ml <- .1 # volume of concentrated H3PO4 added to samples and standards

select standards for calibration curve

linC <- data %>% filter(type == "lin.std") # filter for linearity standards

Plot calib curve based on mass loaded

calib_DIC <-
ggplot (linC, aes(x=`mass_loaded [ug]`, y=amp44_t0, label = num.peaks)) +
  geom_point() +
          scale_x_continuous(name = "mass CaCO3 loaded [µg]") +
  scale_y_continuous(name = ("m/z 44 peak amplitude t0 [mV]"))+
  theme_bw()

calib_DIC %>% ggplotly()

Something appears wrong with the 49µg standard. Perhaps an error in weighing and transferring the standard, or piece of standard got stuck on the side of Exetainer and did not dissolve, or cap was not properly closed, allowing CO\(_2\) to diffuse out. The chromatogram below looks fine, so it’s unclear what the problem was.

iso_files %>% iso_filter_files(id1 == "49 ug YULE lin") %>% 
  iso_plot_continuous_flow_data(
    data = c(44),
    color = id1
  )
## Info: applying file filter, keeping 1 of 47 files

Cull 49 µg YULE from linearity standards

linC <- linC %>% filter(id1 != "49 ug YULE lin") # low yield for 49 ug sample 

Replot calibration curve with 49 µg standard culled

calib_mass_loaded_summ <- summary(lm(linC$amp44_t0 ~ linC$`mass_loaded [ug]`)) # summarize regression of m/z 44 amplitude at t0 vs. mass loaded

calib_DIC_2  <- 
  ggplot(linC, aes(x=`mass_loaded [ug]`, y=amp44_t0)) +
  geom_smooth(method="lm", color = "blue") +
  geom_point(shape=21, fill="black", size = 2)+
  stat_poly_eq(aes(label =  paste(stat(eq.label), stat(rr.label), sep = "~~~~")),
               formula = linC$amp44_t0 ~ linC$`mass_loaded [ug]` , parse = TRUE, rr.digits = 6, color = "blue")+
 scale_x_continuous(name = latex2exp::TeX("mass CaCO$_3$ loaded $\\[$µg$\\]$"))+
 scale_y_continuous(name = latex2exp::TeX("m/z 44 peak amplitude t$_0$ $\\[$mV$\\]$"))+
theme_bw()

calib_DIC_2
## `geom_smooth()` using formula 'y ~ x'

Preparing constants and equations to calculate pCO\(_2\) from µg CaCO\(_3\)

# calculating henry's constant at lab conditions

R <- 0.083144598 # R (l * bar * K−1 * mol−1)
Pa_bar  <- 1e5 # Pa/bar
l_m3 <- 1e3 # l m^-3

Hcp_CO2_25C_DI <- 3.30E-04 # Henry's constant (Hcp) @ 298.15K in deonized water (Sander, 2015)[mol m^-3 Pa^-1]
#eqn:  Hcc = c(aq) / c(g)
#Hcc = Hcp * R * T
Hcp_lit_temp_K <- 298.15 # temp in K of literature henry constant

Hcp_CO2_25C_DI_bar <- Hcp_CO2_25C_DI * Pa_bar / l_m3 # Hcp mol L^-1 bar^-1
Hcc_CO2_25C_DI <- Hcp_CO2_25C_DI_bar * R * Hcp_lit_temp_K # dimensionless Hcc

Hcp_temp_correct_factor  <- 2400 #dlnHcp/d(1/T) [K] temperature correction factor (Sander, 2015)

lab_temp_C <- 21
lab_temp_K <- lab_temp_C + 273.15

Hcp_CO2_lab_temp_DI <- Hcp_CO2_25C_DI * exp(Hcp_temp_correct_factor * (1/lab_temp_K - 1/Hcp_lit_temp_K)) #Henry constant at lab temp in DI water [mol m^-3 Pa^-1]

Hcp_CO2_lab_temp_DI_bar <- Hcp_CO2_lab_temp_DI * Pa_bar / l_m3 # Hcp mol L^-1 bar^-1

Hcc_CO2_lab_temp_DI <- Hcp_CO2_lab_temp_DI_bar * R * lab_temp_K # dimensionless Hcc

PO4_stock_M <- 14.8 # moles / liter of phosphate in concentrated stock sol'n (85 wt %) https://www.sigmaaldrich.com/chemistry/stockroom-reagents/learning-center/technical-library/reagent-concentrations.html

vol_l_ml <- vol_H2O_sample_ml + vol_H3PO4_added_ml # volume of water + acid in ml

water_H3PO4_ratio <- vol_H2O_sample_ml / vol_H3PO4_added_ml    # ratio of concentrated H3PO4 (85 wt%) to water in DIC prep method
dilution_factor_H3PO4 <- (1/(1+1*water_H3PO4_ratio)) # dilution factor of concentrated H3PO4 during acidification of water sample

ci  <- 14.8 * dilution_factor_H3PO4 # concentration of total phosphate and its protonated forms in acidified water sample [kmol m^-3 aka mol/l]
hi_H2PO4 <- 0.1025 # m^3kmol^-1 ion-specific parameter (schumpe 1993)
hg_CO2 <- -0.0183 # m^3kmol^-1 gas-specific parameter (schumpe 1993)

Hcc_CO2_lab_temp_and_ionic_strength <- Hcc_CO2_lab_temp_DI * 10^-((hi_H2PO4 + hg_CO2) * ci)

Calculate expected pCO\(_2\) from µg CaCO\(_3\) loaded

MM_CaCO3 <- 100.0869 #g/mol

linC <- linC %>% mutate(mol_CO2_total_expected = `mass_loaded [ug]` * 1e-6 / MM_CaCO3) # add column for total moles CO2 expected

linC <- linC %>% mutate(mol_ratio_CO2_g_aq = (vol_vial_ml - vol_l_ml) * 1e-3 / (vol_l_ml*1e-3 * Hcc_CO2_lab_temp_and_ionic_strength)) # add column for mole ratio of CO2 gas / aqueous

linC <- linC %>% mutate(mol_CO2_g = mol_CO2_total_expected / (1+ (1/mol_ratio_CO2_g_aq))) # add column for total moles CO2 in gas phase

linC <- linC %>% mutate(p_CO2_expected_bar = mol_CO2_g * R * lab_temp_K / ((vol_vial_ml - vol_l_ml)*1e-3)) # add column for expected pCO2

5.3.1 Re-plot calibration curve in terms of pCO\(_2\)

calib_DIC_3  <- 
  ggplot(linC, aes(x=p_CO2_expected_bar, y=amp44_t0)) +
  geom_smooth(method="lm", color = "blue") +
  geom_point(shape=21, fill="black", size = 2)+
  stat_poly_eq(aes(label =  paste(stat(eq.label), stat(rr.label), sep = "~~~~")),
               formula = linC$amp44_t0 ~ linC$p_CO2_expected_bar , parse = TRUE, rr.digits = 6, color = "blue")+
   scale_x_continuous(name = latex2exp::TeX("pCO$_2$ expected $\\[$bar$\\]$"))+
 scale_y_continuous(name = latex2exp::TeX("m/z 44 peak amplitude t$_0$ $\\[$mV$\\]$"))+
theme_bw()

calib_DIC_3 # show plot
## `geom_smooth()` using formula 'y ~ x'

Generate linear regression of calibration against pCO\(_2\)

calib_fit_pCO2 <- lm(linC$amp44_t0 ~ linC$p_CO2_expected_bar) # make linear regression of m/z 44 amplitude at t0 vs. mass loaded

calib_fit_summ_pCO2  <- summary(calib_fit_pCO2) # summarize regression statistics

calib_fit_summ_pCO2 # print regression statistics
## 
## Call:
## lm(formula = linC$amp44_t0 ~ linC$p_CO2_expected_bar)
## 
## Residuals:
##     Min      1Q  Median      3Q     Max 
## -200.75  -95.69  -35.88   25.56  417.21 
## 
## Coefficients:
##                          Estimate Std. Error t value Pr(>|t|)    
## (Intercept)                -219.8      156.2  -1.407    0.209    
## linC$p_CO2_expected_bar 3593193.1    28736.2 125.041 1.76e-11 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 210.2 on 6 degrees of freedom
## Multiple R-squared:  0.9996, Adjusted R-squared:  0.9996 
## F-statistic: 1.564e+04 on 1 and 6 DF,  p-value: 1.764e-11

5.3.2 Calculate pCO\(_2\) of samples

Filter for samples

samples <- filter(data, type_general=="sample") # filter for samples

5.3.3 Apply calibration

# use inverse.predict function from chemCal to predict X based on Y
samples_calibrated <- samples %>% group_by(name) %>%  mutate(pCO2_bar = as.numeric(inverse.predict(object = calib_fit_pCO2, newdata = amp44_t0, alpha = 0.05)[1]))

# use inverse.predict function from chemCal to calculate the 95% confidence interval for the prediction
samples_calibrated <- samples_calibrated %>%  mutate(pCO2_bar_95_confidence = as.numeric(inverse.predict(object = calib_fit_pCO2, newdata = amp44_t0, alpha = 0.05)[2]))

# summarize calibrated samples 
samples_calibrated_summ <- samples_calibrated %>% group_by(name) %>% summarise(n = n(), `amp44_mean [mV] mean` = mean(`amp44_mean [mV]`), amp44_t0 = mean(amp44_t0), pCO2_bar = first(pCO2_bar), pCO2_bar_95_confidence = first(pCO2_bar_95_confidence), quantitatable = ifelse(any(quantitatable == FALSE) == TRUE, FALSE, TRUE))

samples_calibrated_summ %>% kable(digits = 6) # print
name n amp44_mean [mV] mean amp44_t0 pCO2_bar pCO2_bar_95_confidence quantitatable
1 ml milliQ acidified 20180123 3 50.02926 62.35982 0.000079 5.5e-05 FALSE
1 ml milliQ acidified 20180227 4 256.86973 323.96988 0.000151 5.2e-05 FALSE
5 min He purge 20171108 3 122.69898 151.46947 0.000103 5.4e-05 FALSE
5 min He purge 20180123 3 50.64277 63.04847 0.000079 5.5e-05 FALSE
BA1A_100_400 2 403.18256 522.01707 0.000206 5.9e-05 FALSE
BA1A_55_66 2 7889.09799 10001.47310 0.002845 4.9e-05 TRUE
LOQ 1 NA 2278.30687 0.000695 7.0e-05 TRUE

Now, just for demonstration purposes, convert mass CaCO\(_3\) loaded to \(c_{\sum\text{CO}_2}\) and re-plot. This is a simpler, more common, and slightly less exact/representative way to make such a calibration curve.

MM_CaCO3 <- 100.0869 #g/mol

linC <- linC %>% mutate(mol_CO2_total_expected = `mass_loaded [ug]` * 1e-6 / MM_CaCO3) # add column for total moles CO2 expected

linC <- linC %>% mutate(DIC_uM = mol_CO2_total_expected / (vol_H2O_sample_ml *1e-3) * 1e6) # dissolved inorganic carbon concentration of initial water sample by dividing total moles CO2 by volume of water

calib_DIC_4  <- 
  ggplot(linC, aes(x=DIC_uM, y=amp44_t0)) +
  geom_smooth(method="lm", color = "blue") +
  geom_point(shape=21, fill="black", size = 2)+
  stat_poly_eq(aes(label =  paste(stat(eq.label), stat(rr.label), sep = "~~~~")),
               formula = linC$amp44_t0 ~ linC$DIC_uM , parse = TRUE, rr.digits = 6, color = "blue")+
 scale_x_continuous(name = latex2exp::TeX("estimated $\\textit{c}_{\\sum CO_2}$ $\\[$µmol$\\cdot$L$^{-1}\\]$"))+
 scale_y_continuous(name = latex2exp::TeX("m/z 44 peak amplitude t$_0$ $\\[$mV$\\]$"))+
  theme_bw()

calib_DIC_4
## `geom_smooth()` using formula 'y ~ x'

# make interactive plot
calib_DIC_5  <- 
  ggplot(linC, aes(x=DIC_uM, y=amp44_t0, label=id1))+
  geom_point()+
theme_bw()

calib_DIC_5 %>% ggplotly()

5.3.4 Calculate \(c_{\sum\text{CO}_2}\) of samples

### for concentration

samples_calibrated_summ <- samples_calibrated_summ %>% mutate(mol_CO2_g = pCO2_bar * (vol_vial_ml - vol_l_ml) * 1e-3 / (R * lab_temp_K)) # calculate moles CO2 in gas phase

samples_calibrated_summ <- samples_calibrated_summ %>% mutate(mol_CO2_aq = mol_CO2_g * vol_l_ml*1e-3 * Hcc_CO2_lab_temp_and_ionic_strength / ((vol_vial_ml - vol_l_ml)*1e-3)) #calculated moles CO2 in aqueous phase

samples_calibrated_summ <- samples_calibrated_summ %>% mutate(mol_CO2_tot = mol_CO2_g + mol_CO2_aq) # sum aqueous and gaseous CO2

samples_calibrated_summ <- samples_calibrated_summ %>% mutate(DIC_uM = mol_CO2_tot / (vol_H2O_sample_ml * 1e-3) * 1e6) # convert total moles CO2 to dissolved inorganic C concentration of initial water sample

### for confidence interval

samples_calibrated_summ <- samples_calibrated_summ %>% mutate(mol_CO2_g_95_confidence = pCO2_bar_95_confidence * (vol_vial_ml - vol_l_ml) * 1e-3 / (R * lab_temp_K)) # calculate moles CO2 in gas phase

samples_calibrated_summ <- samples_calibrated_summ %>% mutate(mol_CO2_aq_95_confidence = mol_CO2_g_95_confidence * vol_l_ml*1e-3 * Hcc_CO2_lab_temp_and_ionic_strength / ((vol_vial_ml - vol_l_ml)*1e-3)) #calculated moles CO2 in aqueous phase

samples_calibrated_summ <- samples_calibrated_summ %>% mutate(mol_CO2_tot_95_confidence = mol_CO2_g_95_confidence + mol_CO2_aq_95_confidence) # sum aqueous and gaseous CO2

samples_calibrated_summ <- samples_calibrated_summ %>% mutate(DIC_uM_95_confidence = mol_CO2_tot_95_confidence / (vol_H2O_sample_ml * 1e-3) * 1e6) # convert total moles CO2 to dissolved inorganic C concentration of initial water sample

# add column in which d13C is rounded to tenth of permil place
samples_calibrated_summ <- samples_calibrated_summ %>% mutate(`DIC_uM rounded tens` = round(DIC_uM, -1))

### clean and print
samples_select <- samples_calibrated_summ %>% select(name, n, `amp44_mean [mV] mean`, amp44_t0, pCO2_bar, pCO2_bar_95_confidence, DIC_uM, DIC_uM_95_confidence, `DIC_uM rounded tens`, quantitatable) 

samples_select %>% kable(caption = "DIC concentration from calibration of amp44 t0 vs. pCO2 expected")
DIC concentration from calibration of amp44 t0 vs. pCO2 expected
name n amp44_mean [mV] mean amp44_t0 pCO2_bar pCO2_bar_95_confidence DIC_uM DIC_uM_95_confidence DIC_uM rounded tens quantitatable
1 ml milliQ acidified 20180123 3 50.02926 62.35982 0.0000785 5.46e-05 36.48100 25.37534 40 FALSE
1 ml milliQ acidified 20180227 4 256.86973 323.96988 0.0001513 5.15e-05 70.30823 23.93714 70 FALSE
5 min He purge 20171108 3 122.69898 151.46947 0.0001033 5.45e-05 48.00323 25.31196 50 FALSE
5 min He purge 20180123 3 50.64277 63.04847 0.0000787 5.46e-05 36.57004 25.37485 40 FALSE
BA1A_100_400 2 403.18256 522.01707 0.0002064 5.90e-05 95.91653 27.39700 100 FALSE
BA1A_55_66 2 7889.09799 10001.47310 0.0028446 4.88e-05 1321.64833 22.65752 1320 TRUE
LOQ 1 NA 2278.30687 0.0006952 7.01e-05 323.01187 32.57864 320 TRUE

Plot samples to check that their amplitude roughly makes sense given their calculated \(c_{\sum\text{CO}_2}\). Note that on this plot, the standards’ \(c_{\sum\text{CO}_2}\) was calculated simply with the mass loaded, whereas the samples were calculated based on the pCO\(_2\) calibration. The samples do plot as expected.

Dashed line = LOQ.

LOQ_DIC_um <- as.numeric(samples_select %>% filter(name == "LOQ") %>% select(DIC_uM))

amp44.DIC.sample.stnd.check <- ggplot(samples_calibrated_summ, aes(x=amp44_t0, y=DIC_uM, label=name, color = "samples")) +
  geom_hline(yintercept = LOQ_DIC_um, linetype = "dashed", alpha = 0.3)+
  geom_point()+
  geom_point(data = linC, aes(x=amp44_t0, y=DIC_uM, label=file_id, color="standards")) +
            scale_y_continuous(name = "DIC (µM)") +
  scale_x_continuous(name = ("m/z 44 amplitude t0 (mV)"))+
  theme_bw()
## Warning: Ignoring unknown aesthetics: label
amp44.DIC.sample.stnd.check %>% ggplotly()

6 \(\delta^{13}\)C calibrations

6.1 Initial dataset checks, plots, and culling

First round of culling looks at standard deviations of the stable isotope values of the individual peaks (typically there are 10 peaks, prior to previous culling for bad injects), and uses that information to identify analytical outliers or samples with problems or too few peaks. Often the “cutoff” of outliers tends to be sd of 0.075 - 0.1 permil.

Make summary plots of reproducibility of isotopic values.

data <- data %>% filter(amp44_t0 >= S_A_LOQ) # filter for samples with signal greater than or equal LOQ for concentration

sd.hist <- data %>% ggplot(aes(x=d13C.sd, fill = amp44_t0)) +
  geom_histogram(binwidth=.01) +
  theme_bw()+
  theme(axis.text.x = element_text(angle = 90, hjust = 1))

sd.values <- data %>% filter(!is.na(d13C.sd)) %>% ggplot(aes(x=file_id, y=d13C.sd, label=id1, color = `amp44_mean [mV]`)) +
  geom_point() +
  theme_bw()+
  theme(axis.text.x = element_text(angle = 90, hjust = 1))+
  scale_color_gradientn(colours = c("red", "blue", "blue"), values = c(0, 0.2, 1))

sd.v.amp44 <- data %>% filter(!is.na(d13C.sd)) %>% ggplot(aes(x=`amp44_mean [mV]`, y=d13C.sd, fill=factor(num.peaks), label=file_id)) +
  geom_point(size=3, shape=21)+
  theme_bw()+
  scale_fill_discrete(name="# peaks")

sd.hist
## Warning: Removed 1 rows containing non-finite values (stat_bin).

sd.values %>% ggplotly()
sd.v.amp44 %>% ggplotly()

Remove any data points that did not replicate within uncertainty for the individual peaks, redo plots - creates a “culled data” file that shows samples and standards that shouldn’t be used. In this analysis, no samples needed to be culled.

d13C.sd.cutoff <- 0.1 # set a standard deviation on d13C measurements between peaks that you deem acceptable

culled.data <- subset(data, d13C.sd>d13C.sd.cutoff)  # subset data that don't meet the acceptability threshold for d13C standard deviation
wo.culled <- subset(data, d13C.sd<d13C.sd.cutoff) # subset data that do meet the threshold

#print
culled.data

Plot yields of the standards, using interactive plots. Use this to cull more standards if need be, by looking for statistical outliers that coincide with yield problems. Note: LSVEC is LiCO\(_3\), whereas the other standards are CaCO\(_3\), so it is expected that LSVEC has a different ratio of amplitude to mass loaded. Thus, all plotted standards look fine in this analytical session.

# make a data frame of standards
stds1 <- subset(wo.culled, type_general == "standard")

stds1 <- stds1 %>% mutate(standard = case_when(
  str_detect(id1, "YULE") == TRUE ~ "YULE",
  str_detect(id1, "HIS") == TRUE ~ "HIS",
  str_detect(id1, "LSVEC") == TRUE ~ "LSVEC"
))

yield.stds <- ggplot(stds1, aes(x = `mass_loaded [ug]`, y = `amp44_mean [mV]`, label=file_id)) +
  stat_smooth(method="lm") +
  geom_point(aes(color=standard)) +
  theme_bw()

# note: LSVEC is LiCO3, not CaCO3, so it is expected to be above the yield of the other standards

d13C.stds <- 
  ggplot(stds1, aes(label=file_id)) +
  geom_point(shape=21, mapping = aes(x =`amp44_mean [mV]`, y = `d13C.measured [permil]`, fill = standard)) +
  facet_grid(standard ~ ., scales = "free") +
  theme_bw()

ggplotly(yield.stds)
## `geom_smooth()` using formula 'y ~ x'
ggplotly(d13C.stds )

6.2 Isotope standard values

6.2.1 Load isotope standards

standards <- 
  tibble::tribble(
    ~name,           ~true_d13C,
    "HIS",               -4.80,
    "LSVEC",               -46.6,
    "YULE",            -3.12
  ) %>% 
  mutate(
    true_d13C = iso_double_with_units(true_d13C, "permil")
  )
standards %>% knitr::kable(digits = 2)
name true_d13C
HIS -4.80
LSVEC -46.60
YULE -3.12

6.2.2 Add isotope standards

wo.culled_w_stds <- 
  wo.culled %>% 
  iso_add_standards(stds = standards, match_by = c(name)) 
## Info: matching standards by 'name' - added 3 standard entries to 24 out of 26 rows, added new column 'is_std_peak' to identify standard peaks

6.3 Generate a calibration with linear regression

calibs <- wo.culled_w_stds %>%
  # prepare for calibration
  iso_prepare_for_calibration() %>% 
  # run calibrations
  iso_generate_calibration(
    model = c(
      # reference scale correction
      delta_only = lm(`d13C.measured [permil]` ~ true_d13C),
      # multivariate with delta and amplitude
      delta_and_ampl = lm(`d13C.measured [permil]` ~ true_d13C + `amp44_mean [mV]`),
      # + the delta and amplitude cross term
      delta_cross_ampl = lm(`d13C.measured [permil]` ~ true_d13C * `amp44_mean [mV]`),
      # multivariate with delta and the datetime (i.e. checking for temporal drift)
      delta_and_time = lm(`d13C.measured [permil]` ~ true_d13C + file_datetime),
      delta_cross_time = lm(`d13C.measured [permil]` ~ true_d13C * file_datetime),
      # multivariate with delta, amplitude and datetime
      delta_and_ampl_and_time = lm(`d13C.measured [permil]` ~ true_d13C + `amp44_mean [mV]` + file_datetime),
      # multivariate with delta cross amplitude and datetime
      delta_cross_ampl_and_time = lm(`d13C.measured [permil]` ~ true_d13C * `amp44_mean [mV]` + file_datetime)
    ), 
    # specify which peaks to include in the calibration, here:
    # - all std_peaks (this filter should always be included!)
    use_in_calib = is_std_peak
  ) 
## Info: preparing data for calibration by nesting the entire dataset
## Info: generating calibration based on 7 models (delta_only = 'lm(`d13C.measured [permil]` ~ true_d13C)', delta_and_ampl = 'lm(`d13C.measured [permil]` ~ true_d13C + `amp44_mean [mV]`)', delta_cross_ampl = 'lm(`d13C.measured [permil]` ~ true_d13C * `amp44_mean [mV]`)', delta_and_time = 'lm(`d13C.measured [permil]` ~ true_d13C + file_datetime)', delta_cross_time = 'lm(`d13C.measured [permil]` ~ true_d13C * file_datetime)', delta_and_ampl_and_time = 'lm(...)', delta_cross_ampl_and_time = 'lm(...)') for 1 data group(s) with standards filter 'is_std_peak'. Storing residuals in new column 'resid'. Storing calibration info in new column 'in_calib'.

6.3.1 Coefficients

# look at coefficients and summary
calibs %>% 
  # unnest calibration parameters
  iso_get_calibration_parameters(
    select_from_coefs = 
      c(term, estimate, SE = std.error, signif),
    select_from_summary = 
      c(fit_R2 = adj.r.squared, fit_RMSD = deviance, residual_df = df.residual)) %>%
  arrange(term) %>% 
  knitr::kable(digits = 4)
## Info: retrieving coefficient column(s) 'c(term, estimate, SE = std.error, signif)' for calibration
## Info: retrieving summary column(s) 'c(fit_R2 = adj.r.squared, fit_RMSD = deviance, residual_df = df.residual)' for calibration
calib calib_ok calib_points term estimate SE signif fit_R2 fit_RMSD residual_df
delta_only TRUE 24 (Intercept) 0.2718 0.0344 *** (p < 0.001) 0.9997 0.4436 22
delta_and_ampl TRUE 24 (Intercept) -0.1318 0.0584 * (p < 0.05) 0.9999 0.1254 21
delta_cross_ampl TRUE 24 (Intercept) -1.5722 1.6558 - 0.9999 0.1208 20
delta_and_time TRUE 24 (Intercept) 6116.5549 3274.6750 . (p < 0.1) 0.9998 0.3804 21
delta_cross_time TRUE 24 (Intercept) 11960.3653 14641.0025 - 0.9998 0.3773 20
delta_and_ampl_and_time TRUE 24 (Intercept) 3714.5306 1775.0484 * (p < 0.05) 0.9999 0.1028 20
delta_cross_ampl_and_time TRUE 24 (Intercept) 3524.2228 1896.4103 . (p < 0.1) 0.9999 0.1022 19
delta_and_ampl TRUE 24 amp44_mean [mV] 0.0000 0.0000 *** (p < 0.001) 0.9999 0.1254 21
delta_cross_ampl TRUE 24 amp44_mean [mV] 0.0002 0.0001 - 0.9999 0.1208 20
delta_and_ampl_and_time TRUE 24 amp44_mean [mV] 0.0000 0.0000 *** (p < 0.001) 0.9999 0.1028 20
delta_cross_ampl_and_time TRUE 24 amp44_mean [mV] 0.0001 0.0001 - 0.9999 0.1022 19
delta_and_time TRUE 24 file_datetime 0.0000 0.0000 . (p < 0.1) 0.9998 0.3804 21
delta_cross_time TRUE 24 file_datetime 0.0000 0.0000 - 0.9998 0.3773 20
delta_and_ampl_and_time TRUE 24 file_datetime 0.0000 0.0000 * (p < 0.05) 0.9999 0.1028 20
delta_cross_ampl_and_time TRUE 24 file_datetime 0.0000 0.0000 . (p < 0.1) 0.9999 0.1022 19
delta_only TRUE 24 true_d13C 0.9883 0.0034 *** (p < 0.001) 0.9997 0.4436 22
delta_and_ampl TRUE 24 true_d13C 0.9874 0.0018 *** (p < 0.001) 0.9999 0.1254 21
delta_cross_ampl TRUE 24 true_d13C 0.5238 0.5325 - 0.9999 0.1208 20
delta_and_time TRUE 24 true_d13C 0.9887 0.0032 *** (p < 0.001) 0.9998 0.3804 21
delta_cross_time TRUE 24 true_d13C 1518.5454 3701.7177 - 0.9998 0.3773 20
delta_and_ampl_and_time TRUE 24 true_d13C 0.9876 0.0017 *** (p < 0.001) 0.9999 0.1028 20
delta_cross_ampl_and_time TRUE 24 true_d13C 0.8055 0.5249 - 0.9999 0.1022 19
delta_cross_ampl TRUE 24 true_d13C:amp44_mean [mV] 0.0000 0.0000 - 0.9999 0.1208 20
delta_cross_ampl_and_time TRUE 24 true_d13C:amp44_mean [mV] 0.0000 0.0000 - 0.9999 0.1022 19
delta_cross_time TRUE 24 true_d13C:file_datetime 0.0000 0.0000 - 0.9998 0.3773 20

6.3.2 Visualize Calibration Parameters

The visualization of the calibration parameters reveals that as expected the scale contraction and amplitude calibrations are highly statistically relevant (*** = p.value < 0.001). Dt (drift) is also statistically relevant (* = p.value < 0.05).

calibs %>% iso_plot_calibration_parameters()

6.4 Apply global calibration

calibs_applied <- 
  calibs %>% 
  # which calibration to use? can include multiple if desired to see the result
  # in this case, the amplitude- and time-conscious calibrations are applied
  filter(calib == "delta_and_ampl_and_time") %>% 
  # apply calibration indication what should be calculated
  iso_apply_calibration(true_d13C, calculate_error = TRUE)
## Info: applying calibration to infer 'true_d13C' for 1 data group(s); storing resulting value in new column 'true_d13C_pred' and estimated error in new column 'true_d13C_pred_se'. This may take a moment... finished.
# calibration ranges
calibs_with_ranges <-
  calibs_applied %>% 
  # evaluate calibration range for the measured amplitude and predicted d13C
  iso_evaluate_calibration_range(`amp44_mean [mV]`, true_d13C_pred) 
## Info: evaluating range for terms 'amp44_mean [mV]' and 'true_d13C_pred' in calibration for 1 data group(s); storing resulting summary for each data entry in new column 'in_range'.
# show calibration ranges
calibs_with_ranges %>% 
  iso_get_calibration_range() %>% 
  iso_remove_list_columns() %>% 
  knitr::kable(d = 2)
## Info: retrieving all calibration range information for calibration
calib calib_ok calib_points term units min max
delta_and_ampl_and_time TRUE 24 amp44_mean [mV] NA 5547.16 25223.78
delta_and_ampl_and_time TRUE 24 true_d13C_pred permil -46.59 -2.96
# create calibrated peak table
peak_table_calibrated <- calibs_with_ranges %>% 
  iso_get_calibration_data()
## Info: retrieving all data

7 Evaluation of isotope calibration

7.1 Overview

All reported samples are within calibrated range.

# replicate earlier overview plot but now with the calibrated delta values
# and with a highlight of the calibration ranges and which points are in range
peak_table_calibrated %>% 
  # visualize with convenience function iso_plot_data
  iso_plot_data(
    # choose x and y (multiple y possible)
    x = `amp44_mean [mV]`, y = true_d13C_pred,
    # choose aesthetics
    color = in_range, shape = is_std_peak, size = 3,
    # decide what geoms to include
    points = TRUE
  ) %>% 
  # highlight calibration range
  iso_mark_calibration_range() +
  # legend
  theme(legend.position = "bottom", legend.direction = "vertical")

7.2 Summary

# generate data summary
peak_data <- 
  peak_table_calibrated

# summarize replicates
peak_data_summary <- 
  peak_data %>% 
  # summarize for each sample and compound
  group_by(name) %>% 
  iso_summarize_data_table(`amp44_mean [mV]`, true_d13C_pred, true_d13C_pred_se) %>% select(-`true_d13C_pred_se sd`)

# add column in which d13C is rounded to hundredth of permil place
peak_data_summary <- peak_data_summary %>% mutate(`d13C rounded hundredth` = round(`true_d13C_pred mean`, 2))

# print
peak_data_summary %>% iso_make_units_explicit() %>% knitr::kable(d = 2)
name n amp44_mean [mV] mean amp44_mean [mV] sd true_d13C_pred mean [permil] true_d13C_pred sd true_d13C_pred_se mean [permil] d13C rounded hundredth [permil]
BA1A_55_66 2 8040.70 94.76 -14.64 0.30 0.08 -14.64
HIS 8 12207.70 353.16 -4.82 0.08 0.07 -4.82
LSVEC 1 12008.30 NA -46.59 NA 0.10 -46.59
YULE 15 13568.38 4869.74 -3.11 0.06 0.08 -3.11
# add data about DIC concentration to d13C-calibrated samples
samples_summ_w_concs <- peak_data_summary %>% left_join(samples_select %>% select(amp44_t0, DIC_uM, DIC_uM_95_confidence, `DIC_uM rounded tens`, quantitatable, name), by = "name")

samples_summ_w_concs %>% iso_make_units_explicit() %>% knitr::kable(d = 2)
name n amp44_mean [mV] mean amp44_mean [mV] sd true_d13C_pred mean [permil] true_d13C_pred sd true_d13C_pred_se mean [permil] d13C rounded hundredth [permil] amp44_t0 DIC_uM DIC_uM_95_confidence DIC_uM rounded tens quantitatable
BA1A_55_66 2 8040.70 94.76 -14.64 0.30 0.08 -14.64 10001.47 1321.65 22.66 1320 TRUE
HIS 8 12207.70 353.16 -4.82 0.08 0.07 -4.82 NA NA NA NA NA
LSVEC 1 12008.30 NA -46.59 NA 0.10 -46.59 NA NA NA NA NA
YULE 15 13568.38 4869.74 -3.11 0.06 0.08 -3.11 NA NA NA NA NA
# add data about LOQ and samples below LOQ
samples_summ_w_concs_w_LOQ <- union(samples_summ_w_concs, peak_data_summary %>% full_join(samples_select %>% select(n, amp44_t0, `amp44_mean [mV] mean`, DIC_uM, DIC_uM_95_confidence, `DIC_uM rounded tens`, quantitatable, name)), by = "name")
## Joining, by = c("name", "n", "amp44_mean [mV] mean")
# arrange summary data by column describing whether it was above limit of quanitation
samples_summ_w_concs_w_LOQ <- samples_summ_w_concs_w_LOQ %>% arrange(-quantitatable)

# print
samples_summ_w_concs_w_LOQ %>% iso_make_units_explicit() %>% knitr::kable(d = 2)
name n amp44_mean [mV] mean amp44_mean [mV] sd true_d13C_pred mean [permil] true_d13C_pred sd true_d13C_pred_se mean [permil] d13C rounded hundredth [permil] amp44_t0 DIC_uM DIC_uM_95_confidence DIC_uM rounded tens quantitatable
BA1A_55_66 2 8040.70 94.76 -14.64 0.30 0.08 -14.64 10001.47 1321.65 22.66 1320 TRUE
BA1A_55_66 2 7889.10 NA NA NA NA NA 10001.47 1321.65 22.66 1320 TRUE
LOQ 1 NA NA NA NA NA NA 2278.31 323.01 32.58 320 TRUE
1 ml milliQ acidified 20180123 3 50.03 NA NA NA NA NA 62.36 36.48 25.38 40 FALSE
1 ml milliQ acidified 20180227 4 256.87 NA NA NA NA NA 323.97 70.31 23.94 70 FALSE
5 min He purge 20171108 3 122.70 NA NA NA NA NA 151.47 48.00 25.31 50 FALSE
5 min He purge 20180123 3 50.64 NA NA NA NA NA 63.05 36.57 25.37 40 FALSE
BA1A_100_400 2 403.18 NA NA NA NA NA 522.02 95.92 27.40 100 FALSE
HIS 8 12207.70 353.16 -4.82 0.08 0.07 -4.82 NA NA NA NA NA
LSVEC 1 12008.30 NA -46.59 NA 0.10 -46.59 NA NA NA NA NA
YULE 15 13568.38 4869.74 -3.11 0.06 0.08 -3.11 NA NA NA NA NA
BA1A_55_66 2 8040.70 94.76 -14.64 0.30 0.08 -14.64 NA NA NA NA NA

8 Export

Save data to xlsx spreadsheet.

# export the global calibration with all its information and data to Excel
peak_table_calibrated %>% 
  iso_export_calibration_to_excel(
    filepath = format(Sys.Date(), "data_output/%Y%m%d_180228_DBN_DIC_calibrated.xlsx"),
    # include data summary as an additional useful tab
    `data summary` = samples_summ_w_concs_w_LOQ
  )
## Info: exporting calibrations into Excel '20210721_180228_DBN_DIC_calibrated.xlsx'...
## Info: retrieving all data
## Info: retrieving all coefficient information for calibration
## Info: retrieving all summary information for calibration
## Info: retrieving all calibration range information for calibration
## Info: export complete, created tabs 'data summary', 'all data', 'calib coefs', 'calib summary' and 'calib range'.