sah.deuterium

Description

This module contains methods to calculate deuteration levels from INS spectra.

Index

impulse_approx() Calculate the deuteration levels from INS spectra with the Impulse Approximation
ins_peaks() Estimates amine deuteration by integrating the partially-deuterated peaks from INS

  1"""
  2# Description
  3
  4This module contains methods to calculate deuteration levels from INS spectra.
  5
  6
  7# Index
  8
  9| | |
 10| --- | --- |
 11| `impulse_approx()` | Calculate the deuteration levels from INS spectra with the Impulse Approximation |
 12| `ins_peaks()`      | Estimates amine deuteration by integrating the partially-deuterated peaks from INS |
 13
 14---
 15"""
 16
 17
 18from copy import deepcopy
 19import aton.alias as alias
 20from .classes import Spectra, Material
 21from .fit import area_under_peak, ratio_areas, plateau
 22import numpy as np
 23
 24
 25def impulse_approx(
 26        ins: Spectra,
 27        material_H: Material,
 28        material_D: Material,
 29        threshold: float=600,
 30        H_df_index: int=0,
 31        D_df_index: int=1,
 32    ) -> tuple:
 33    """Calculate the deuteration levels from INS spectra
 34    with the *Impulse Approximation*, see
 35    [Andreani et al., Advances in Physics 66, 1-73 (2017)](https://www.tandfonline.com/doi/full/10.1080/00018732.2017.1317963).
 36
 37    Protonated and deuterated materials must be specified
 38    as `sah.classes.Material` objects.
 39    Note that this approximation is very sensitive to the mass sample.
 40    The threshold controls the start of the plateau (in meV)
 41    to start considering Deep Inelastic Neutron Scattering (DINS).
 42    The protonated and deuterated dataframe indexes are specified
 43    by `H_df_index` and `D_df_index`, respectively.
 44
 45    In this approximation, the ideal ratio between
 46    the cross-sections and the experimental ratio between
 47    the pleteaus at high energies should be the same:
 48    $$
 49    \\frac{\\text{plateau_D}}{\\text{plateau_H}} \\approx \\frac{\\text{cross_section_D}}{\\text{cross_section_H}}
 50    $$
 51    Taking this into account, the deuteration is estimated as:
 52    $$
 53    \\text{Deuteration} = \\frac{1-\\text{real_ratio}}{1-\\text{ideal_ratio}}
 54    $$
 55    """
 56    ins = deepcopy(ins)
 57    material_H = deepcopy(material_H)
 58    material_D = deepcopy(material_D)
 59    material_H_grams = 1.0 if material_H.grams is None else material_H.grams
 60    material_H.grams = material_H_grams
 61    material_H.set()
 62    material_D_grams = 1.0 if material_D.grams is None else material_D.grams
 63    material_D.grams = material_D_grams
 64    material_D.set()
 65
 66    material_H.print()
 67    material_D.print()
 68
 69    # Make sure units are in meV
 70    units_in = ins.units
 71    if units_in not in alias.units['meV']:
 72        ins.set_units('meV', units_in)
 73
 74    # Divide the y values of the dataframes by the mols of the material.
 75    ins.dfs[H_df_index][ins.dfs[H_df_index].columns[1]] = ins.dfs[H_df_index][ins.dfs[H_df_index].columns[1]]
 76    ins.dfs[D_df_index][ins.dfs[D_df_index].columns[1]] = ins.dfs[D_df_index][ins.dfs[D_df_index].columns[1]]
 77
 78    plateau_H, plateau_H_error = plateau(ins, [threshold, None], H_df_index)
 79    plateau_D, plateau_D_error = plateau(ins, [threshold, None], D_df_index)
 80
 81    plateau_H_normalized = plateau_H / material_H.mols
 82    plateau_H_normalized_error = plateau_H_normalized * np.sqrt((plateau_H_error / plateau_H)**2 + (material_H.mols_error / material_H.mols)**2)
 83    plateau_D_normalized = plateau_D / material_D.mols
 84    plateau_D_normalized_error = plateau_D_normalized * np.sqrt((plateau_D_error / plateau_D)**2 + (material_D.mols_error / material_D.mols)**2)
 85
 86    # ratio if fully protonated = 1.0
 87    ratio = plateau_D_normalized / plateau_H_normalized  # ratio_ideal < ratio < 1.0
 88    ratio_ideal = material_D.cross_section / material_H.cross_section  # 0.0 < ratio_ideal < 1.0
 89
 90    ratio_error = abs(ratio * np.sqrt((plateau_H_normalized_error / plateau_H_normalized)**2 + (plateau_D_normalized_error / plateau_D_normalized)**2))
 91
 92    deuteration = (1 - ratio) / (1 - ratio_ideal)
 93    deuteration_error = abs(deuteration * np.sqrt((ratio_error / ratio)**2))
 94
 95    print(f'Plateau H:                 {plateau_H} +- {plateau_H_error}')
 96    print(f'Plateau H / mols:          {plateau_H_normalized} +- {plateau_H_normalized_error}')
 97    print(f'Plateau D:                 {plateau_D} +- {plateau_D_error}')
 98    print(f'Plateau D / mols:          {plateau_D_normalized} +- {plateau_D_normalized_error}')
 99    print(f'Ratio D/H plateaus:        {ratio} +- {ratio_error}')
100    print(f'Ratio D/H cross sections:  {ratio_ideal}')
101
102    print(f"\nDeuteration: {deuteration:.2f} +- {deuteration_error:.2f}\n")
103    return round(deuteration,2), round(deuteration_error,2)
104
105
106def ins_peaks(
107        ins:Spectra,
108        peaks:dict,
109        df_index:int=0,
110    ) -> str:
111    """Estimates partial deuteration by integrating the INS disrotatory peaks.
112    The amines should not interfere with each other, as in the case of CH$_3$NH$_3$PbI$_3$.
113
114    In the MAPbI3 example, the INS disrotatory peaks of CH3NH3 appear at ~38 meV for the fully protonated sample.
115    Note that `peaks` must be a dictionary with the peak limits
116    and the baseline, as in the example below:
117    ```python
118    peaks = {
119        'baseline' : None,
120        'baseline_error' : None,
121        'h6d0' : [41, 43],
122        'h5d1' : [41, 43],
123        'h4d2' : [41, 43],
124        'h3d3' : [34.7, 37.3],
125        'h2d4' : [31.0, 33.0],
126        'h1d5' : [28.0, 30.5],
127        'h0d6' : [26.5, 28.0],
128        }
129    ```
130    Peak keywords required for selective deuteration (only C or only N):
131    `h6d0`, `h5d1`, `h4d2`, `h3d3`.
132
133    Additional peak keywords required for total deuteration:
134    `h2d4`, `h1d5`, `h0d6`.
135
136    If some peak is not present in your sample,
137    just set the limits to a small baseline plateau.
138    """
139
140    peak_data = deepcopy(ins)
141
142    baseline = 0.0
143    baseline_error = 0.0
144    if 'baseline' in peaks.keys():
145        if peaks['baseline'] is not None:
146            baseline = peaks['baseline']
147    if 'baseline_error' in peaks.keys():
148        if peaks['baseline_error'] is not None:
149            baseline_error = peaks['baseline_error']
150
151    run_partial = True
152    run_total = True
153
154    h6d0_limits = None
155    h5d1_limits = None
156    h4d2_limits = None
157    h3d3_limits = None
158    h2d4_limits = None
159    h1d5_limits = None
160    h0d6_limits = None
161
162    if 'h6d0' in peaks:
163        h6d0_limits = peaks['h6d0']
164    if 'h5d1' in peaks:
165        h5d1_limits = peaks['h5d1']
166    if 'h4d2' in peaks:
167        h4d2_limits = peaks['h4d2']
168    if 'h3d3' in peaks:
169        h3d3_limits = peaks['h3d3']
170    if 'h2d4' in peaks:
171        h2d4_limits = peaks['h2d4']
172    if 'h1d5' in peaks:
173        h1d5_limits = peaks['h1d5']
174    if 'h0d6' in peaks:
175        h0d6_limits = peaks['h0d6']
176
177    if h0d6_limits is None or h1d5_limits is None or h2d4_limits is None or h3d3_limits is None:
178        run_total = False
179    if h6d0_limits is None or h5d1_limits is None or h4d2_limits is None or h3d3_limits is None:
180        run_partial = False
181
182    if not run_partial:
183        raise ValueError('No peaks to integrate. Remember to assign peak limits as a dictionary with the keys: h6d0, h5d1, h4d2, h3d3, h2d4, h1d5, h0d6.')
184
185    h6d0_area, h6d0_area_error = area_under_peak(peak_data, [h6d0_limits[0], h6d0_limits[1], baseline, baseline_error], df_index, True)
186    h5d1_area, h5d1_area_error = area_under_peak(peak_data, [h5d1_limits[0], h5d1_limits[1], baseline, baseline_error], df_index, True)
187    h4d2_area, h4d2_area_error = area_under_peak(peak_data, [h4d2_limits[0], h4d2_limits[1], baseline, baseline_error], df_index, True)
188    h3d3_area, h3d3_area_error = area_under_peak(peak_data, [h3d3_limits[0], h3d3_limits[1], baseline, baseline_error], df_index, True)
189    h6d0_area /= 6
190    h5d1_area /= 5
191    h4d2_area /= 4
192    h3d3_area /= 3
193    h6d0_area_error /= 6
194    h5d1_area_error /= 5
195    h4d2_area_error /= 4
196    h3d3_area_error /= 3
197    
198    if not run_total:
199        total_area = h6d0_area + h5d1_area + h4d2_area + h3d3_area
200        total_area_error = np.sqrt(h6d0_area_error**2 + h5d1_area_error**2 + h4d2_area_error**2 + h3d3_area_error**2)
201
202        h6d0_ratio, h6d0_error = ratio_areas(h6d0_area, total_area, h6d0_area_error, total_area_error)
203        h5d1_ratio, h5d1_error = ratio_areas(h5d1_area, total_area, h5d1_area_error, total_area_error)
204        h4d2_ratio, h4d2_error = ratio_areas(h4d2_area, total_area, h4d2_area_error, total_area_error)
205        h3d3_ratio, h3d3_error = ratio_areas(h3d3_area, total_area, h3d3_area_error, total_area_error)
206
207        deuteration = 0 * h6d0_ratio + (1/3) * h5d1_ratio + (2/3) * h4d2_ratio + 1 * h3d3_ratio
208        protonation = 1 * h6d0_ratio + (2/3) * h5d1_ratio + (1/3) * h4d2_ratio + 0 * h3d3_ratio
209
210        deuteration_error = np.sqrt((1/3 * h5d1_error)**2 + (2/3 * h4d2_error)**2 + (1 * h3d3_error)**2)
211        protonation_error = np.sqrt((1 * h6d0_error)**2 + (2/3 * h5d1_error)**2 + (1/3 * h4d2_error)**2)
212
213    if run_total:
214        h2d4_area, h2d4_area_error = area_under_peak(peak_data, [h2d4_limits[0], h2d4_limits[1], baseline, baseline_error], df_index, True)
215        h1d5_area, h1d5_area_error = area_under_peak(peak_data, [h1d5_limits[0], h1d5_limits[1], baseline, baseline_error], df_index, True)
216        h0d6_area, h0d6_area_error = area_under_peak(peak_data, [h0d6_limits[0], h0d6_limits[1], baseline, baseline_error], df_index, True)
217        h2d4_area /= 2
218        h1d5_area /= 1
219        h0d6_area /= 1
220        h2d4_area_error /= 2
221        h1d5_area_error /= 1
222        h0d6_area_error /= 1
223
224        total_area_CDND = h6d0_area + h5d1_area + h4d2_area + h3d3_area + h2d4_area + h1d5_area + h0d6_area
225        total_area_error_CDND = np.sqrt(h6d0_area_error**2 + h5d1_area_error**2 + h4d2_area_error**2 + h3d3_area_error**2 + h2d4_area_error**2 + h1d5_area_error**2 + h0d6_area_error**2)
226
227        h6d0_ratio_CDND, h6d0_error_CDND = ratio_areas(h6d0_area, total_area_CDND, h6d0_area_error, total_area_error_CDND)
228        h5d1_ratio_CDND, h5d1_error_CDND = ratio_areas(h5d1_area, total_area_CDND, h5d1_area_error, total_area_error_CDND)
229        h4d2_ratio_CDND, h4d2_error_CDND = ratio_areas(h4d2_area, total_area_CDND, h4d2_area_error, total_area_error_CDND)
230        h3d3_ratio_CDND, h3d3_error_CDND = ratio_areas(h3d3_area, total_area_CDND, h3d3_area_error, total_area_error_CDND)
231        h2d4_ratio_CDND, h2d4_error_CDND = ratio_areas(h2d4_area, total_area_CDND, h2d4_area_error, total_area_error_CDND)
232        h1d5_ratio_CDND, h1d5_error_CDND = ratio_areas(h1d5_area, total_area_CDND, h1d5_area_error, total_area_error_CDND)
233        h0d6_ratio_CDND, h0d6_error_CDND = ratio_areas(h0d6_area, total_area_CDND, h0d6_area_error, total_area_error_CDND)
234
235        deuteration_CDND = 0 * h6d0_ratio_CDND + (1/6) * h5d1_ratio_CDND + (2/6) * h4d2_ratio_CDND + (3/6) * h3d3_ratio_CDND + (4/6) * h2d4_ratio_CDND + (5/6) * h1d5_ratio_CDND + 1 * h0d6_ratio_CDND
236        deuteration_CDND_error = np.sqrt((1/6 * h5d1_error_CDND)**2 + (2/6 * h4d2_error_CDND)**2 + (3/6 * h3d3_error_CDND)**2 + (4/6 * h2d4_error_CDND)**2 + (5/6 * h1d5_error_CDND)**2 + (1 * h0d6_error_CDND)**2)
237
238        protonation_CDND = 1 * h6d0_ratio_CDND + (5/6) * h5d1_ratio_CDND + (4/6) * h4d2_ratio_CDND + (3/6) * h3d3_ratio_CDND + (2/6) * h2d4_ratio_CDND + (1/6) * h1d5_ratio_CDND + 0 * h0d6_ratio_CDND
239        protonation_CDND_error = np.sqrt((1 * h6d0_error_CDND)**2 + (5/6 * h5d1_error_CDND)**2 + (4/6 * h4d2_error_CDND)**2 + (3/6 * h3d3_error_CDND)**2 + (2/6 * h2d4_error_CDND)**2 + (1/6 * h1d5_error_CDND)**2)
240
241        deuteration_CDND_amine = 0 * h3d3_ratio_CDND + (1/3) * h2d4_ratio_CDND + (2/3) * h1d5_ratio_CDND + 1 * h0d6_ratio_CDND
242        deuteration_CDND_amine_error = np.sqrt((1/3 * h2d4_error_CDND)**2 + (2/3 * h1d5_error_CDND)**2 + (1 * h0d6_error_CDND)**2)
243
244        protonation_CDND_amine = 1 * h3d3_ratio_CDND + (2/3) * h2d4_ratio_CDND + (1/3) * h1d5_ratio_CDND + 0 * h0d6_ratio_CDND
245        protonation_CDND_amine_error = np.sqrt((1 * h3d3_error_CDND)**2 + (2/3 * h2d4_error_CDND)**2 + (1/3 * h1d5_error_CDND)**2)
246
247    print()
248    if hasattr(ins, "plotting") and ins.plotting.legend != None:
249        print(f'Sample:  {ins.plotting.legend[df_index]}')
250    else:
251        print(f'Sample:  {ins.files[df_index]}')
252    print(f'Corrected baseline: {round(baseline,2)} +- {round(baseline_error,2)}')
253    if not run_total:
254        print(f"HHH {h6d0_limits}:  {round(h6d0_ratio,2)}  +-  {round(h6d0_error,2)}")
255        print(f"DHH {h5d1_limits}:  {round(h5d1_ratio,2)}  +-  {round(h5d1_error,2)}")
256        print(f"DDH {h4d2_limits}:  {round(h4d2_ratio,2)}  +-  {round(h4d2_error,2)}")
257        print(f"DDD {h3d3_limits}:  {round(h3d3_ratio,2)}  +-  {round(h3d3_error,2)}")
258        print(f"Amine deuteration:  {round(deuteration,2)}  +-  {round(deuteration_error,2)}")
259        print(f"Amine protonation:  {round(protonation,2)}  +-  {round(protonation_error,2)}")
260        print()
261        return f"{deuteration:.2f} +- {deuteration_error:.2f}"
262    else:
263        print(f"HHH-HHH {h6d0_limits}:  {round(h6d0_ratio_CDND,2)}  +-  {round(h6d0_error_CDND,2)}")
264        print(f"DHH-HHH {h5d1_limits}:  {round(h5d1_ratio_CDND,2)}  +-  {round(h5d1_error_CDND,2)}")
265        print(f"DDH-HHH {h4d2_limits}:  {round(h4d2_ratio_CDND,2)}  +-  {round(h4d2_error_CDND,2)}")
266        print(f"DDD-HHH {h3d3_limits}:  {round(h3d3_ratio_CDND,2)}  +-  {round(h3d3_error_CDND,2)}")
267        print(f"DDD-DHH {h2d4_limits}:  {round(h2d4_ratio_CDND,2)}  +-  {round(h2d4_error_CDND,2)}")
268        print(f"DDD-DDH {h1d5_limits}:  {round(h1d5_ratio_CDND,2)}  +-  {round(h1d5_error_CDND,2)}")
269        print(f"DDD-DDD {h0d6_limits}:  {round(h0d6_ratio_CDND,2)}  +-  {round(h0d6_error_CDND,2)}")
270        print(f"Total deuteration:  {round(deuteration_CDND,2)}  +-  {round(deuteration_CDND_error,2)}")
271        print(f"Total protonation:  {round(protonation_CDND,2)}  +-  {round(protonation_CDND_error,2)}")
272        print(f"Amine deuteration:  {round(deuteration_CDND_amine,2)}  +-  {round(deuteration_CDND_amine_error,2)}")
273        print(f"Amine protonation:  {round(protonation_CDND_amine,2)}  +-  {round(protonation_CDND_amine_error,2)}")
274        print()
275        return f"{deuteration_CDND_amine:.2f} +- {deuteration_CDND_amine_error:.2f} / {deuteration_CDND:.2f} +- {deuteration_CDND_error:.2f}"
def impulse_approx( ins: sah.classes.Spectra, material_H: sah.classes.Material, material_D: sah.classes.Material, threshold: float = 600, H_df_index: int = 0, D_df_index: int = 1) -> tuple:
 26def impulse_approx(
 27        ins: Spectra,
 28        material_H: Material,
 29        material_D: Material,
 30        threshold: float=600,
 31        H_df_index: int=0,
 32        D_df_index: int=1,
 33    ) -> tuple:
 34    """Calculate the deuteration levels from INS spectra
 35    with the *Impulse Approximation*, see
 36    [Andreani et al., Advances in Physics 66, 1-73 (2017)](https://www.tandfonline.com/doi/full/10.1080/00018732.2017.1317963).
 37
 38    Protonated and deuterated materials must be specified
 39    as `sah.classes.Material` objects.
 40    Note that this approximation is very sensitive to the mass sample.
 41    The threshold controls the start of the plateau (in meV)
 42    to start considering Deep Inelastic Neutron Scattering (DINS).
 43    The protonated and deuterated dataframe indexes are specified
 44    by `H_df_index` and `D_df_index`, respectively.
 45
 46    In this approximation, the ideal ratio between
 47    the cross-sections and the experimental ratio between
 48    the pleteaus at high energies should be the same:
 49    $$
 50    \\frac{\\text{plateau_D}}{\\text{plateau_H}} \\approx \\frac{\\text{cross_section_D}}{\\text{cross_section_H}}
 51    $$
 52    Taking this into account, the deuteration is estimated as:
 53    $$
 54    \\text{Deuteration} = \\frac{1-\\text{real_ratio}}{1-\\text{ideal_ratio}}
 55    $$
 56    """
 57    ins = deepcopy(ins)
 58    material_H = deepcopy(material_H)
 59    material_D = deepcopy(material_D)
 60    material_H_grams = 1.0 if material_H.grams is None else material_H.grams
 61    material_H.grams = material_H_grams
 62    material_H.set()
 63    material_D_grams = 1.0 if material_D.grams is None else material_D.grams
 64    material_D.grams = material_D_grams
 65    material_D.set()
 66
 67    material_H.print()
 68    material_D.print()
 69
 70    # Make sure units are in meV
 71    units_in = ins.units
 72    if units_in not in alias.units['meV']:
 73        ins.set_units('meV', units_in)
 74
 75    # Divide the y values of the dataframes by the mols of the material.
 76    ins.dfs[H_df_index][ins.dfs[H_df_index].columns[1]] = ins.dfs[H_df_index][ins.dfs[H_df_index].columns[1]]
 77    ins.dfs[D_df_index][ins.dfs[D_df_index].columns[1]] = ins.dfs[D_df_index][ins.dfs[D_df_index].columns[1]]
 78
 79    plateau_H, plateau_H_error = plateau(ins, [threshold, None], H_df_index)
 80    plateau_D, plateau_D_error = plateau(ins, [threshold, None], D_df_index)
 81
 82    plateau_H_normalized = plateau_H / material_H.mols
 83    plateau_H_normalized_error = plateau_H_normalized * np.sqrt((plateau_H_error / plateau_H)**2 + (material_H.mols_error / material_H.mols)**2)
 84    plateau_D_normalized = plateau_D / material_D.mols
 85    plateau_D_normalized_error = plateau_D_normalized * np.sqrt((plateau_D_error / plateau_D)**2 + (material_D.mols_error / material_D.mols)**2)
 86
 87    # ratio if fully protonated = 1.0
 88    ratio = plateau_D_normalized / plateau_H_normalized  # ratio_ideal < ratio < 1.0
 89    ratio_ideal = material_D.cross_section / material_H.cross_section  # 0.0 < ratio_ideal < 1.0
 90
 91    ratio_error = abs(ratio * np.sqrt((plateau_H_normalized_error / plateau_H_normalized)**2 + (plateau_D_normalized_error / plateau_D_normalized)**2))
 92
 93    deuteration = (1 - ratio) / (1 - ratio_ideal)
 94    deuteration_error = abs(deuteration * np.sqrt((ratio_error / ratio)**2))
 95
 96    print(f'Plateau H:                 {plateau_H} +- {plateau_H_error}')
 97    print(f'Plateau H / mols:          {plateau_H_normalized} +- {plateau_H_normalized_error}')
 98    print(f'Plateau D:                 {plateau_D} +- {plateau_D_error}')
 99    print(f'Plateau D / mols:          {plateau_D_normalized} +- {plateau_D_normalized_error}')
100    print(f'Ratio D/H plateaus:        {ratio} +- {ratio_error}')
101    print(f'Ratio D/H cross sections:  {ratio_ideal}')
102
103    print(f"\nDeuteration: {deuteration:.2f} +- {deuteration_error:.2f}\n")
104    return round(deuteration,2), round(deuteration_error,2)

Calculate the deuteration levels from INS spectra with the Impulse Approximation, see Andreani et al., Advances in Physics 66, 1-73 (2017).

Protonated and deuterated materials must be specified as sah.classes.Material objects. Note that this approximation is very sensitive to the mass sample. The threshold controls the start of the plateau (in meV) to start considering Deep Inelastic Neutron Scattering (DINS). The protonated and deuterated dataframe indexes are specified by H_df_index and D_df_index, respectively.

In this approximation, the ideal ratio between the cross-sections and the experimental ratio between the pleteaus at high energies should be the same: $$ \frac{\text{plateau_D}}{\text{plateau_H}} \approx \frac{\text{cross_section_D}}{\text{cross_section_H}} $$ Taking this into account, the deuteration is estimated as: $$ \text{Deuteration} = \frac{1-\text{real_ratio}}{1-\text{ideal_ratio}} $$

def ins_peaks(ins: sah.classes.Spectra, peaks: dict, df_index: int = 0) -> str:
107def ins_peaks(
108        ins:Spectra,
109        peaks:dict,
110        df_index:int=0,
111    ) -> str:
112    """Estimates partial deuteration by integrating the INS disrotatory peaks.
113    The amines should not interfere with each other, as in the case of CH$_3$NH$_3$PbI$_3$.
114
115    In the MAPbI3 example, the INS disrotatory peaks of CH3NH3 appear at ~38 meV for the fully protonated sample.
116    Note that `peaks` must be a dictionary with the peak limits
117    and the baseline, as in the example below:
118    ```python
119    peaks = {
120        'baseline' : None,
121        'baseline_error' : None,
122        'h6d0' : [41, 43],
123        'h5d1' : [41, 43],
124        'h4d2' : [41, 43],
125        'h3d3' : [34.7, 37.3],
126        'h2d4' : [31.0, 33.0],
127        'h1d5' : [28.0, 30.5],
128        'h0d6' : [26.5, 28.0],
129        }
130    ```
131    Peak keywords required for selective deuteration (only C or only N):
132    `h6d0`, `h5d1`, `h4d2`, `h3d3`.
133
134    Additional peak keywords required for total deuteration:
135    `h2d4`, `h1d5`, `h0d6`.
136
137    If some peak is not present in your sample,
138    just set the limits to a small baseline plateau.
139    """
140
141    peak_data = deepcopy(ins)
142
143    baseline = 0.0
144    baseline_error = 0.0
145    if 'baseline' in peaks.keys():
146        if peaks['baseline'] is not None:
147            baseline = peaks['baseline']
148    if 'baseline_error' in peaks.keys():
149        if peaks['baseline_error'] is not None:
150            baseline_error = peaks['baseline_error']
151
152    run_partial = True
153    run_total = True
154
155    h6d0_limits = None
156    h5d1_limits = None
157    h4d2_limits = None
158    h3d3_limits = None
159    h2d4_limits = None
160    h1d5_limits = None
161    h0d6_limits = None
162
163    if 'h6d0' in peaks:
164        h6d0_limits = peaks['h6d0']
165    if 'h5d1' in peaks:
166        h5d1_limits = peaks['h5d1']
167    if 'h4d2' in peaks:
168        h4d2_limits = peaks['h4d2']
169    if 'h3d3' in peaks:
170        h3d3_limits = peaks['h3d3']
171    if 'h2d4' in peaks:
172        h2d4_limits = peaks['h2d4']
173    if 'h1d5' in peaks:
174        h1d5_limits = peaks['h1d5']
175    if 'h0d6' in peaks:
176        h0d6_limits = peaks['h0d6']
177
178    if h0d6_limits is None or h1d5_limits is None or h2d4_limits is None or h3d3_limits is None:
179        run_total = False
180    if h6d0_limits is None or h5d1_limits is None or h4d2_limits is None or h3d3_limits is None:
181        run_partial = False
182
183    if not run_partial:
184        raise ValueError('No peaks to integrate. Remember to assign peak limits as a dictionary with the keys: h6d0, h5d1, h4d2, h3d3, h2d4, h1d5, h0d6.')
185
186    h6d0_area, h6d0_area_error = area_under_peak(peak_data, [h6d0_limits[0], h6d0_limits[1], baseline, baseline_error], df_index, True)
187    h5d1_area, h5d1_area_error = area_under_peak(peak_data, [h5d1_limits[0], h5d1_limits[1], baseline, baseline_error], df_index, True)
188    h4d2_area, h4d2_area_error = area_under_peak(peak_data, [h4d2_limits[0], h4d2_limits[1], baseline, baseline_error], df_index, True)
189    h3d3_area, h3d3_area_error = area_under_peak(peak_data, [h3d3_limits[0], h3d3_limits[1], baseline, baseline_error], df_index, True)
190    h6d0_area /= 6
191    h5d1_area /= 5
192    h4d2_area /= 4
193    h3d3_area /= 3
194    h6d0_area_error /= 6
195    h5d1_area_error /= 5
196    h4d2_area_error /= 4
197    h3d3_area_error /= 3
198    
199    if not run_total:
200        total_area = h6d0_area + h5d1_area + h4d2_area + h3d3_area
201        total_area_error = np.sqrt(h6d0_area_error**2 + h5d1_area_error**2 + h4d2_area_error**2 + h3d3_area_error**2)
202
203        h6d0_ratio, h6d0_error = ratio_areas(h6d0_area, total_area, h6d0_area_error, total_area_error)
204        h5d1_ratio, h5d1_error = ratio_areas(h5d1_area, total_area, h5d1_area_error, total_area_error)
205        h4d2_ratio, h4d2_error = ratio_areas(h4d2_area, total_area, h4d2_area_error, total_area_error)
206        h3d3_ratio, h3d3_error = ratio_areas(h3d3_area, total_area, h3d3_area_error, total_area_error)
207
208        deuteration = 0 * h6d0_ratio + (1/3) * h5d1_ratio + (2/3) * h4d2_ratio + 1 * h3d3_ratio
209        protonation = 1 * h6d0_ratio + (2/3) * h5d1_ratio + (1/3) * h4d2_ratio + 0 * h3d3_ratio
210
211        deuteration_error = np.sqrt((1/3 * h5d1_error)**2 + (2/3 * h4d2_error)**2 + (1 * h3d3_error)**2)
212        protonation_error = np.sqrt((1 * h6d0_error)**2 + (2/3 * h5d1_error)**2 + (1/3 * h4d2_error)**2)
213
214    if run_total:
215        h2d4_area, h2d4_area_error = area_under_peak(peak_data, [h2d4_limits[0], h2d4_limits[1], baseline, baseline_error], df_index, True)
216        h1d5_area, h1d5_area_error = area_under_peak(peak_data, [h1d5_limits[0], h1d5_limits[1], baseline, baseline_error], df_index, True)
217        h0d6_area, h0d6_area_error = area_under_peak(peak_data, [h0d6_limits[0], h0d6_limits[1], baseline, baseline_error], df_index, True)
218        h2d4_area /= 2
219        h1d5_area /= 1
220        h0d6_area /= 1
221        h2d4_area_error /= 2
222        h1d5_area_error /= 1
223        h0d6_area_error /= 1
224
225        total_area_CDND = h6d0_area + h5d1_area + h4d2_area + h3d3_area + h2d4_area + h1d5_area + h0d6_area
226        total_area_error_CDND = np.sqrt(h6d0_area_error**2 + h5d1_area_error**2 + h4d2_area_error**2 + h3d3_area_error**2 + h2d4_area_error**2 + h1d5_area_error**2 + h0d6_area_error**2)
227
228        h6d0_ratio_CDND, h6d0_error_CDND = ratio_areas(h6d0_area, total_area_CDND, h6d0_area_error, total_area_error_CDND)
229        h5d1_ratio_CDND, h5d1_error_CDND = ratio_areas(h5d1_area, total_area_CDND, h5d1_area_error, total_area_error_CDND)
230        h4d2_ratio_CDND, h4d2_error_CDND = ratio_areas(h4d2_area, total_area_CDND, h4d2_area_error, total_area_error_CDND)
231        h3d3_ratio_CDND, h3d3_error_CDND = ratio_areas(h3d3_area, total_area_CDND, h3d3_area_error, total_area_error_CDND)
232        h2d4_ratio_CDND, h2d4_error_CDND = ratio_areas(h2d4_area, total_area_CDND, h2d4_area_error, total_area_error_CDND)
233        h1d5_ratio_CDND, h1d5_error_CDND = ratio_areas(h1d5_area, total_area_CDND, h1d5_area_error, total_area_error_CDND)
234        h0d6_ratio_CDND, h0d6_error_CDND = ratio_areas(h0d6_area, total_area_CDND, h0d6_area_error, total_area_error_CDND)
235
236        deuteration_CDND = 0 * h6d0_ratio_CDND + (1/6) * h5d1_ratio_CDND + (2/6) * h4d2_ratio_CDND + (3/6) * h3d3_ratio_CDND + (4/6) * h2d4_ratio_CDND + (5/6) * h1d5_ratio_CDND + 1 * h0d6_ratio_CDND
237        deuteration_CDND_error = np.sqrt((1/6 * h5d1_error_CDND)**2 + (2/6 * h4d2_error_CDND)**2 + (3/6 * h3d3_error_CDND)**2 + (4/6 * h2d4_error_CDND)**2 + (5/6 * h1d5_error_CDND)**2 + (1 * h0d6_error_CDND)**2)
238
239        protonation_CDND = 1 * h6d0_ratio_CDND + (5/6) * h5d1_ratio_CDND + (4/6) * h4d2_ratio_CDND + (3/6) * h3d3_ratio_CDND + (2/6) * h2d4_ratio_CDND + (1/6) * h1d5_ratio_CDND + 0 * h0d6_ratio_CDND
240        protonation_CDND_error = np.sqrt((1 * h6d0_error_CDND)**2 + (5/6 * h5d1_error_CDND)**2 + (4/6 * h4d2_error_CDND)**2 + (3/6 * h3d3_error_CDND)**2 + (2/6 * h2d4_error_CDND)**2 + (1/6 * h1d5_error_CDND)**2)
241
242        deuteration_CDND_amine = 0 * h3d3_ratio_CDND + (1/3) * h2d4_ratio_CDND + (2/3) * h1d5_ratio_CDND + 1 * h0d6_ratio_CDND
243        deuteration_CDND_amine_error = np.sqrt((1/3 * h2d4_error_CDND)**2 + (2/3 * h1d5_error_CDND)**2 + (1 * h0d6_error_CDND)**2)
244
245        protonation_CDND_amine = 1 * h3d3_ratio_CDND + (2/3) * h2d4_ratio_CDND + (1/3) * h1d5_ratio_CDND + 0 * h0d6_ratio_CDND
246        protonation_CDND_amine_error = np.sqrt((1 * h3d3_error_CDND)**2 + (2/3 * h2d4_error_CDND)**2 + (1/3 * h1d5_error_CDND)**2)
247
248    print()
249    if hasattr(ins, "plotting") and ins.plotting.legend != None:
250        print(f'Sample:  {ins.plotting.legend[df_index]}')
251    else:
252        print(f'Sample:  {ins.files[df_index]}')
253    print(f'Corrected baseline: {round(baseline,2)} +- {round(baseline_error,2)}')
254    if not run_total:
255        print(f"HHH {h6d0_limits}:  {round(h6d0_ratio,2)}  +-  {round(h6d0_error,2)}")
256        print(f"DHH {h5d1_limits}:  {round(h5d1_ratio,2)}  +-  {round(h5d1_error,2)}")
257        print(f"DDH {h4d2_limits}:  {round(h4d2_ratio,2)}  +-  {round(h4d2_error,2)}")
258        print(f"DDD {h3d3_limits}:  {round(h3d3_ratio,2)}  +-  {round(h3d3_error,2)}")
259        print(f"Amine deuteration:  {round(deuteration,2)}  +-  {round(deuteration_error,2)}")
260        print(f"Amine protonation:  {round(protonation,2)}  +-  {round(protonation_error,2)}")
261        print()
262        return f"{deuteration:.2f} +- {deuteration_error:.2f}"
263    else:
264        print(f"HHH-HHH {h6d0_limits}:  {round(h6d0_ratio_CDND,2)}  +-  {round(h6d0_error_CDND,2)}")
265        print(f"DHH-HHH {h5d1_limits}:  {round(h5d1_ratio_CDND,2)}  +-  {round(h5d1_error_CDND,2)}")
266        print(f"DDH-HHH {h4d2_limits}:  {round(h4d2_ratio_CDND,2)}  +-  {round(h4d2_error_CDND,2)}")
267        print(f"DDD-HHH {h3d3_limits}:  {round(h3d3_ratio_CDND,2)}  +-  {round(h3d3_error_CDND,2)}")
268        print(f"DDD-DHH {h2d4_limits}:  {round(h2d4_ratio_CDND,2)}  +-  {round(h2d4_error_CDND,2)}")
269        print(f"DDD-DDH {h1d5_limits}:  {round(h1d5_ratio_CDND,2)}  +-  {round(h1d5_error_CDND,2)}")
270        print(f"DDD-DDD {h0d6_limits}:  {round(h0d6_ratio_CDND,2)}  +-  {round(h0d6_error_CDND,2)}")
271        print(f"Total deuteration:  {round(deuteration_CDND,2)}  +-  {round(deuteration_CDND_error,2)}")
272        print(f"Total protonation:  {round(protonation_CDND,2)}  +-  {round(protonation_CDND_error,2)}")
273        print(f"Amine deuteration:  {round(deuteration_CDND_amine,2)}  +-  {round(deuteration_CDND_amine_error,2)}")
274        print(f"Amine protonation:  {round(protonation_CDND_amine,2)}  +-  {round(protonation_CDND_amine_error,2)}")
275        print()
276        return f"{deuteration_CDND_amine:.2f} +- {deuteration_CDND_amine_error:.2f} / {deuteration_CDND:.2f} +- {deuteration_CDND_error:.2f}"

Estimates partial deuteration by integrating the INS disrotatory peaks. The amines should not interfere with each other, as in the case of CH$_3$NH$_3$PbI$_3$.

In the MAPbI3 example, the INS disrotatory peaks of CH3NH3 appear at ~38 meV for the fully protonated sample. Note that peaks must be a dictionary with the peak limits and the baseline, as in the example below:

peaks = {
    'baseline' : None,
    'baseline_error' : None,
    'h6d0' : [41, 43],
    'h5d1' : [41, 43],
    'h4d2' : [41, 43],
    'h3d3' : [34.7, 37.3],
    'h2d4' : [31.0, 33.0],
    'h1d5' : [28.0, 30.5],
    'h0d6' : [26.5, 28.0],
    }

Peak keywords required for selective deuteration (only C or only N): h6d0, h5d1, h4d2, h3d3.

Additional peak keywords required for total deuteration: h2d4, h1d5, h0d6.

If some peak is not present in your sample, just set the limits to a small baseline plateau.