diff --git a/LICENSE.txt b/LICENSE.txt new file mode 100644 index 0000000..443e7a0 --- /dev/null +++ b/LICENSE.txt @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2023 Francesco Bruno + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/MANIFEST.in b/MANIFEST.in new file mode 100644 index 0000000..5cd823e --- /dev/null +++ b/MANIFEST.in @@ -0,0 +1,2 @@ +include klassez/test/* +include klassez/docs/* diff --git a/PKG-INFO b/PKG-INFO new file mode 100644 index 0000000..25fee52 --- /dev/null +++ b/PKG-INFO @@ -0,0 +1,31 @@ +Metadata-Version: 2.1 +Name: klassez +Version: 0.1a1 +Summary: A collection of functions for NMR data handling. Documentation: klassez.pdf in "docs" subfolder of your install dir. +Home-page: https://test.pypi.org/legacy/klassez +Author: Francesco Bruno +Author-email: bruno@cerm.unifi.it +License: LICENSE.txt +Platform: UNKNOWN +Classifier: Programming Language :: Python :: 3 +Classifier: Operating System :: OS Independent +Classifier: License :: OSI Approved :: MIT License +Requires-Python: >=3.8 +Description-Content-Type: text/markdown +License-File: LICENSE.txt + +# **KLASSEZ** + +The *klassez* package is a collection of functions and classes to handle NMR data. + +It is organized in modules, each of which contains functions dedicated to specific tasks. + +Full documentation is available, after installation of the package, through the function **open_doc**. + +Some of the functions employed by *klassez* are taken from the NMRGLUE package and modified to suit the needs of the *klassez* package itself. + +--- + +*klassez* is developed and tested on *Ubuntu 22.04 LTS*. Other OS should encounter no issues; however, if it raises any errors, please notify. + + diff --git a/README.md b/README.md index ccc3857..abe1064 100644 --- a/README.md +++ b/README.md @@ -1,2 +1,13 @@ -# klassez -A package for the management of NMR data +# **KLASSEZ** + +The *klassez* package is a collection of functions and classes to handle NMR data. + +It is organized in modules, each of which contains functions dedicated to specific tasks. + +Full documentation is available, after installation of the package, through the function **open_doc**. + +Some of the functions employed by *klassez* are taken from the NMRGLUE package and modified to suit the needs of the *klassez* package itself. + +--- + +*klassez* is developed and tested on *Ubuntu 22.04 LTS*. Other OS should encounter no issues; however, if it raises any errors, please notify. diff --git a/klassez.egg-info/PKG-INFO b/klassez.egg-info/PKG-INFO new file mode 100644 index 0000000..25fee52 --- /dev/null +++ b/klassez.egg-info/PKG-INFO @@ -0,0 +1,31 @@ +Metadata-Version: 2.1 +Name: klassez +Version: 0.1a1 +Summary: A collection of functions for NMR data handling. Documentation: klassez.pdf in "docs" subfolder of your install dir. +Home-page: https://test.pypi.org/legacy/klassez +Author: Francesco Bruno +Author-email: bruno@cerm.unifi.it +License: LICENSE.txt +Platform: UNKNOWN +Classifier: Programming Language :: Python :: 3 +Classifier: Operating System :: OS Independent +Classifier: License :: OSI Approved :: MIT License +Requires-Python: >=3.8 +Description-Content-Type: text/markdown +License-File: LICENSE.txt + +# **KLASSEZ** + +The *klassez* package is a collection of functions and classes to handle NMR data. + +It is organized in modules, each of which contains functions dedicated to specific tasks. + +Full documentation is available, after installation of the package, through the function **open_doc**. + +Some of the functions employed by *klassez* are taken from the NMRGLUE package and modified to suit the needs of the *klassez* package itself. + +--- + +*klassez* is developed and tested on *Ubuntu 22.04 LTS*. Other OS should encounter no issues; however, if it raises any errors, please notify. + + diff --git a/klassez.egg-info/SOURCES.txt b/klassez.egg-info/SOURCES.txt new file mode 100644 index 0000000..0f672aa --- /dev/null +++ b/klassez.egg-info/SOURCES.txt @@ -0,0 +1,23 @@ +LICENSE.txt +MANIFEST.in +README.md +pyproject.toml +setup.py +klassez/Spectra.py +klassez/__init__.py +klassez/config.py +klassez/figures.py +klassez/fit.py +klassez/misc.py +klassez/processing.py +klassez/qsin.py +klassez/sim.py +klassez.egg-info/PKG-INFO +klassez.egg-info/SOURCES.txt +klassez.egg-info/dependency_links.txt +klassez.egg-info/requires.txt +klassez.egg-info/top_level.txt +klassez/docs/klassez.pdf +klassez/test/acqus_1D +klassez/test/acqus_2D +klassez/test/test.py \ No newline at end of file diff --git a/klassez.egg-info/dependency_links.txt b/klassez.egg-info/dependency_links.txt new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/klassez.egg-info/dependency_links.txt @@ -0,0 +1 @@ + diff --git a/klassez.egg-info/requires.txt b/klassez.egg-info/requires.txt new file mode 100644 index 0000000..174f80c --- /dev/null +++ b/klassez.egg-info/requires.txt @@ -0,0 +1,7 @@ +csaps +lmfit +matplotlib +nmrglue +numpy +scipy +seaborn diff --git a/klassez.egg-info/top_level.txt b/klassez.egg-info/top_level.txt new file mode 100644 index 0000000..bfff284 --- /dev/null +++ b/klassez.egg-info/top_level.txt @@ -0,0 +1 @@ +klassez diff --git a/klassez/Spectra.py b/klassez/Spectra.py new file mode 100644 index 0000000..048388f --- /dev/null +++ b/klassez/Spectra.py @@ -0,0 +1,1562 @@ +#! /usr/bin/env python3 + +import os +import sys +import numpy as np +from scipy import linalg, stats +from scipy.spatial import ConvexHull +import random +import matplotlib +import matplotlib.pyplot as plt +import matplotlib.cm as cm +from matplotlib.widgets import Slider, Button, RadioButtons, TextBox, CheckButtons, Cursor, LassoSelector +from matplotlib.path import Path +import seaborn as sns +import nmrglue as ng +import lmfit as l +from datetime import datetime +import warnings + +from . import fit, misc, sim, figures, processing +#from .__init__ import CM +from .config import CM + + +warnings.filterwarnings(action='ignore', category=UserWarning) + +""" +Classes for the management of NMR data. +""" +class Spectrum_1D: + """ + Class: 1D NMR spectrum + """ + def __str__(self): + doc = '-'*64 + doc += '\nSpectrum_1D object.\n' + if 'ngdic' in self.__dict__.keys(): + doc += f'Read from "{self.datadir}"\n' + else: + doc += f'Simulated from "{self.datadir}"\n' + N = self.fid.shape[-1] + doc += f'It is a {self.acqus["nuc"]} spectrum recorded over a\nsweep width of {self.acqus["SWp"]} ppm, centered at {self.acqus["o1p"]} ppm.\n' + doc += f'The FID is {N} points long.\n' + doc += '-'*64 + + return doc + + def __len__(self): + if 'S' in self.__dict__.keys(): + return self.S.shape[-1] + else: + return self.fid.shape[-1] + + def __init__(self, in_file, pv=False, isexp=True): + """ + Initialize the class. + Simulation of the dataset (i.e. isexp=False) employs sim.sim_1D. + ------- + Parameters: + - in_file: str + path to file to read, or to the folder of the spectrum + - pv: bool + True if you want to use pseudo-voigt lineshapes for simulation, False for Voigt + - isexp: bool + True if this is an experimental dataset, False if it is simulated + """ + self.datadir = in_file + if isexp is False: # Simulate the dataset + self.acqus = sim.load_sim_1D(in_file) + self.fid = sim.sim_1D(in_file, pv=pv) + else: + warnings.filterwarnings("ignore") # Suppress errors due to CONVDTA in TopSpin + dic, data = ng.bruker.read(in_file, cplex=True) + self.fid = data + self.acqus = misc.makeacqus_1D(dic) + self.BYTORDA = dic['acqus']['BYTORDA'] + self.DTYPA = dic['acqus']['DTYPA'] + self.ngdic = dic # NMRGLUE dictionary of parameters + del dic + del data + # Look for group delay points: if there is not, put it to 0 + try: + self.grpdly = int(self.ngdic['acqus']['GRPDLY']) + except: + self.grpdly = 0 + + # Initalize the procs dictionary with default values + # DEFAULT VALUES + # ----------------------------------------------- + proc_keys_1D = ['wf', 'zf', 'fcor', 'tdeff'] + wf0 = { + 'mode':None, + 'ssb':2, + 'lb':5, + 'gb':10, + 'gc':0, + 'sw':None + } + proc_init_1D = (wf0, None, 0.5, 0) + # ----------------------------------------------- + + self.procs = { + } + for k, key in enumerate(proc_keys_1D): + self.procs[key] = proc_init_1D[k] # Processing parameters + self.procs['wf']['sw'] = round(self.acqus['SW'], 4) + # Then, phases + self.procs['p0'] = 0 + self.procs['p1'] = 0 + self.procs['pv'] = round(self.acqus['o1p'], 2) + + def convdta(self, scaling=1): + """ Call processing.convdta using attribute self.grpdly """ + self.fid = processing.convdta(self.fid, self.grpdly, scaling) + + def process(self, interactive=False): + """ + Performs the processing of the FID. The parameters are read from self.procs. + Calls processing.interactive_fp or processing.fp using self.acqus and self.procs + Writes the result is self.S, then unpacks it in self.r and self.i + Calculates frequency and ppm scales. + Initializes self.F with fit.Voigt_Fit class using the current parameters + -------- + Parameters: + - interactive: bool + True if you want to open the interactive panel, False to read the parameters from self.procs. + """ + if interactive is True: + self.S, self.procs = processing.interactive_fp(self.fid, self.acqus, self.procs) + else: + self.S = processing.fp(self.fid, wf=self.procs['wf'], zf=self.procs['zf'], fcor=self.procs['fcor'], tdeff=self.procs['tdeff']) + if self.acqus['SFO1'] < 0: + self.S = self.S[::-1] + self.r = self.S.real + self.i = self.S.imag + + # Calculate frequency and ppm scales + self.freq = processing.make_scale(self.r.shape[0], dw=self.acqus['dw']) + if self.acqus['SFO1'] < 0: + self.freq = self.freq[::-1] + self.ppm = misc.freq2ppm(self.freq, B0=self.acqus['SFO1'], o1p=self.acqus['o1p']) + + # Initializes the F attribute + self.F = fit.Voigt_Fit(self.ppm, self.S, self.acqus['t1'], self.acqus['SFO1'], self.acqus['o1p'], self.acqus['nuc']) + self.baseline = np.zeros_like(self.ppm) + self.integrals = {} + + def inv_process(self): + """ + Performs the inverse processing of the spectrum according to the given parameters. + Overwrites the S attribute!! + Calls processing.inv_fp + """ + if self.acqus['SFO1'] < 0: + self.S = self.S[::-1] + self.S = processing.inv_fp(self.S, wf=self.procs['wf'], size=self.acqus['TD'], fcor=self.procs['fcor']) + + def mc(self): + """ + Calculates the magnitude of the spectrum and overwrites self.S, self.r, self.i + """ + self.S = (self.S.real**2 + self.S.imag**2)**0.5 + self.r = self.S.real + self.i = self.S.imag + + self.F.S = self.r + + def adjph(self, p0=None, p1=None, pv=None): + """ + Adjusts the phases of the spectrum according to the given parameters, or interactively if they are left as default. + Calls for processing.ps + ------- + Parameters: + - p0: float or None + 0-th order phase correction /° + - p1: float or None + 1-st order phase correction /° + - pv: float or None + 1-st order pivot /ppm + """ + # Adjust the phases + self.S, values = processing.ps(self.S, self.ppm, p0=p0, p1=p1, pivot=pv) + self.r = self.S.real + self.i = self.S.imag + self.procs['p0'] += round(values[0], 2) + self.procs['p1'] += round(values[1], 2) + if values[2] is not None: + self.procs['pv'] = round(values[2], 5) + + self.F.S = self.r + + def cal(self, offset=None, isHz=False): + """ + Calibrates the ppm and frequency scale according to a given value, or interactively. + Calls processing.calibration + ------- + Parameters: + - offset: float or None + scale shift value + - isHz: bool + True if offset is in frequency units, False if offset is in ppm + """ + in_ppm = np.copy(self.ppm) + in_S = np.copy(self.r) + if offset is None: + offppm = processing.calibration(in_ppm, in_S) + offhz = misc.ppm2freq(offppm, self.acqus['SFO1'], self.acqus['o1p']) + else: + if isHz: + offhz = offset + offppm = misc.freq2ppm(offhz, self.acqus['SFO1'], self.acqus['o1p']) + else: + offppm = offset + offhz = misc.ppm2freq(offppm, self.acqus['SFO1'], self.acqus['o1p']) + self.freq += offhz + self.ppm += offppm + + def save_acqus(self, path='sim_in_1D'): + """ + Write the acqus dictionary in a file. + Calls misc.write_acqus_1D + -------- + Parameters: + - path: str + Filename + """ + misc.write_acqus_1D(self.acqus, path=path) + + def write_ser(self, path=None): + """ + Writes the FID in binary format. + Calls misc.write_ser + -------- + Parameters: + - path: str or None + Path where to save the binary file. If it is None, the original binary file is overwritten, so BE CAREFUL!!! + """ + if path is None: + path = self.datadir + misc.write_ser(path, self.fid, self.BYTORDA, self.DTYPA) + + def plot(self): + """ + Plots the real part of the spectrum. + """ + n_xticks = 10 + + # Make the figure + fig = plt.figure(1) + fig.set_size_inches(15,8) + plt.subplots_adjust(left=0.10, bottom=0.15, right=0.95, top=0.90) # Make room for the sliders + ax = fig.add_subplot(1,1,1) + # Auto-adjusts the limits for the y-axis + misc.set_ylim(ax, self.r) + # Make pretty x-scale + xsx, xdx = max(self.ppm), min(self.ppm) + misc.pretty_scale(ax, (xsx, xdx), axis='x', n_major_ticks=n_xticks) + + # Pretty y-axis numbers + spect, = ax.plot(self.ppm, self.r, lw=0.8) + # Create sliders for moving the borders + + X_label = '$\delta\ $'+misc.nuc_format(self.acqus['nuc'])+' /ppm' + ax.set_xlabel(X_label) + + misc.mathformat(ax) + misc.set_fontsizes(ax, 14) + cursor = Cursor(ax, useblit=True, c='tab:red', lw=0.8, horizOn=False) + + + plt.show() + plt.close() + + def qfil(self, u=None, s=None): + """ + Gaussian filter to suppress signals. + Tries to read self.procs['qfil'], which is + { 'u': u, 's': s } + Calls processing.qfil + --------- + Parameters: + - u: float + Position /ppm + - s: float + Width (standard deviation) /ppm + """ + if 'qfil' not in self.procs.keys(): + self.procs['qfil'] = {'u': u, 's': s} + for key, value in self.procs['qfil'].items(): + if value is None: + self.procs['qfil']['u'], self.procs['qfil']['s'] = processing.interactive_qfil(self.ppm, self.r) + break + self.S = processing.qfil(self.ppm, self.S, self.procs['qfil']['u'], self.procs['qfil']['s']) + self.r = self.S.real + self.i = self.S.imag + + + def basl(self, basl_file='spectrum.basl', winlim=None): + """ + Correct the baseline of the spectrum, according to a pre-existing file or interactively. + Calls processing.baseline_correction or processing.load_baseline + ------- + Parameters: + - basl_file: str + Path to the baseline file. If it already exists, the baseline will be built according to this file; otherwise this will be the destination file of the baseline. + - winlim: tuple or None + Limits of the baseline. If it is None, it will be interactively set. If basl_file exists, it will be read from there. Else, (ppm1, ppm2). + """ + if not os.path.exists(basl_file): + processing.baseline_correction(self.ppm, self.r, basl_file=basl_file, winlim=winlim) + self.baseline = processing.load_baseline(basl_file, self.ppm, self.r) + + def integrate(self, lims=None): + """ + Integrate the spectrum with a dedicated GUI. + Calls fit.integrate and writes in self.integrals + """ + X_label = '$\delta\,$'+misc.nuc_format(self.acqus['nuc'])+' /ppm' + if lims is None: + integrals = fit.integrate(self.ppm, self.r, X_label=X_label) + for key, value in integrals.items(): + self.integrals[key] = value + else: + self.integrals[f'{lims[0]:.2f}:{lims[1]:.2f}'] = processing.integrate(self.r, self.ppm, lims) + + def write_integrals(self, filename='integrals.dat'): + """ + Write the integrals in a file named filename. + ------- + Parameters: + - filename: str + name of the file where to write the integrals. + """ + f = open(filename, 'w') + for key, value in self.integrals.items(): + if 'total' in key: + f.write('{:12}\t\t{:.4e}\n'.format(key, value)) + elif 'ref' in key: + if 'pos' in key: + f.write('{:12}\t\t{}\n'.format(key, value)) + elif 'int' in key: + f.write('{:12}\t\t{:.4e}\n'.format(key, value)) + elif 'val' in key: + f.write('{:12}\t\t{:.3f}\n'.format(key, value)) + else: + f.write('{:12}\t{:.8f}\n'.format(key, value)) + f.close() + + +class pSpectrum_1D(Spectrum_1D): + """ + Subclass of Spectrum_1D that allows to handle processed 1D NMR spectra. + Useful when dealing with traces of 2D spectra. + """ + def __init__(self, in_file, acqus=None, procs=None, istrace=False): + """ + Initialize the class. + ------- + Parameters: + - in_file: str or 1darray + If istrace is True, in_file is the NMR spectrum, real part. If istrace is False, in_file is the directory of the processed data. + - acqus: dict or None + If istrace is True, you must pass the associated 'acqus' dictionary. If istrace is False, it is not necessary as it is read from the input directory + - procs: dict or None + You can pass the dictionary of processing parameters, if you want. Otherwise, it is initialized as empty. + - istrace: bool + Declare the object as trace extracted from a 2D (True) or true experimental spectrum (False) + """ + if istrace is True: + self.r = in_file + self.S = self.r + self.acqus = acqus + + else: + warnings.filterwarnings("ignore") + dic, data = ng.bruker.read_pdata(in_file) + _, self.r = ng.bruker.read_pdata(in_file, bin_files=['1r']) + _, self.i = ng.bruker.read_pdata(in_file, bin_files=['1i']) + self.S = self.r + 1j * self.i + self.acqus = misc.makeacqus_1D(dic) + self.BYTORDA = dic['acqus']['BYTORDA'] + self.DTYPA = dic['acqus']['DTYPA'] + self.ngdic = dic + del dic + del data + try: + self.grpdly = int(self.ngdic['acqus']['GRPDLY']) + except: + self.grpdly = 0 + + if procs is None: + proc_keys_1D = ['wf', 'zf', 'fcor', 'tdeff'] + wf0 = { + 'mode':None, + 'ssb':2, + 'lb':5, + 'gb':10, + 'gc':0, + 'sw':None + } + proc_init_1D = (wf0, None, 0.5, 0) + + self.procs = { + } + for k, key in enumerate(proc_keys_1D): + self.procs[key] = proc_init_1D[k] # Processing parameters + self.procs['wf']['sw'] = round(self.acqus['SW'], 4) + # Then, phases + self.procs['p0'] = 0 + self.procs['p1'] = 0 + self.procs['pv'] = self.acqus['o1p'] + else: + self.procs = procs + + # Calculate frequency and ppm scales + self.freq = processing.make_scale(self.r.shape[0], dw=self.acqus['dw']) + if self.acqus['SFO1'] < 0: + self.freq = self.freq[::-1] + self.ppm = misc.freq2ppm(self.freq, B0=self.acqus['SFO1'], o1p=self.acqus['o1p']) + + self.F = fit.Voigt_Fit(self.ppm, self.S, self.acqus['t1'], self.acqus['SFO1'], self.acqus['o1p'], self.acqus['nuc']) + + def write_ser(self): + """Overwrite the original function to prevent writing of the binary file. It does nothing!""" + pass + + +class Spectrum_2D: + """ + Class: 2D NMR spectrum + """ + def __str__(self): + doc = '-'*64 + doc += '\nSpectrum_2D object.\n' + if 'ngdic' in self.__dict__.keys(): + doc += f'Read from "{self.datadir}"\n' + else: + doc += f'Simulated from "{self.datadir}"\n' + N = self.fid.shape + doc += f'It is a {self.acqus["nuc1"]}-{self.acqus["nuc2"]} spectrum recorded over a \nsweep width of \n{self.acqus["SW1p"]} ppm centered at {self.acqus["o1p"]} ppm in F1, and\n{self.acqus["SW2p"]} ppm centered at {self.acqus["o2p"]} ppm in F2.\n' + doc += f'The FID is {N[0]}x{N[1]} points long.\n' + doc += '-'*64 + + return doc + + def __len__(self): + if 'S' in self.__dict__.keys(): + return self.S.shape[-1] + else: + return self.fid.shape[-1] + + def __init__(self, in_file, pv=False, isexp=True, is_pseudo=False): + """ + Initialize the class. + ------- + Parameters: + - in_file: str + path to file to read, or to the folder of the spectrum + - isexp: bool + True if this is an experimental dataset, False if it is simulated + - pv: bool + True if you want to use pseudo-voigt lineshapes for simulation, False for Voigt + - is_pseudo: bool + True if it is a pseudo-2D. + """ + self.datadir = in_file + if isexp is False: + self.acqus = sim.load_sim_2D(in_file) + if is_pseudo: + self.acqus['FnMODE'] = 'No' + else: + self.acqus['FnMODE'] = 'States-TPPI' + self.fid = sim.sim_2D(in_file, pv=pv) + else: + warnings.filterwarnings("ignore") + dic, data = ng.bruker.read(in_file, cplex=True) + self.ngdic = dic + self.fid = data + self.acqus = misc.makeacqus_2D(dic) + self.BYTORDA = dic['acqus']['BYTORDA'] + self.DTYPA = dic['acqus']['DTYPA'] + FnMODE_flag = dic['acqu2s']['FnMODE'] + FnMODEs = ['Undefined', 'QF', 'QSEC', 'TPPI', 'States', 'States-TPPI', 'Echo-Antiecho'] + self.acqus['FnMODE'] = FnMODEs[FnMODE_flag] + # put a flag to say "shuffle" + if self.acqus['FnMODE'] == 'Echo-Antiecho': + self.eaeflag = 1 + else: + self.eaeflag = 0 + del dic + del data + + try: + self.grpdly = int(self.ngdic['acqus']['GRPDLY']) + except: + self.grpdly = 0 + + # initialize the procs dictionary with default values + wf1 = { + 'mode':None, + 'ssb':2, + 'lb':5, + 'gb':10, + 'gc':0, + 'sw':None + } + wf2 = { + 'mode':None, + 'ssb':2, + 'lb':5, + 'gb':10, + 'gc':0, + 'sw':None + } + proc_init_2D = ( + [wf1, wf2], # window function + [None, None], # zero-fill + [0.5, 0.5], # fcor + [0,0] # tdeff + ) + + proc_keys_1D = ['wf', 'zf', 'fcor', 'tdeff'] + self.procs = {} + for k, key in enumerate(proc_keys_1D): + self.procs[key] = proc_init_2D[k] # Processing parameters + self.procs['wf'][0]['sw'] = round(self.acqus['SW1'], 4) + self.procs['wf'][1]['sw'] = round(self.acqus['SW2'], 4) + + # Then, phases + self.procs['p0_1'] = 0 + self.procs['p1_1'] = 0 + self.procs['pv_1'] = round(self.acqus['o1p'], 2) + self.procs['p0_2'] = 0 + self.procs['p1_2'] = 0 + self.procs['pv_2'] = round(self.acqus['o2p'], 2) + + # Create empty dictionary where to save the projections + self.trf1 = {} + self.trf2 = {} + self.Trf1 = {} + self.Trf2 = {} + + def convdta(self, scaling=1): + """ Calls processing.convdta """ + self.fid = processing.convdta(self.fid, self.grpdly, scaling) + + def eae(self): + """ Calls processing.EAE to shuffle the data. """ + self.fid = processing.EAE(self.fid) + self.eaeflag = 0 + + def xf2(self): + """ + Process only the direct dimension. + Calls processing.fp using procs[keys][1] + freq_f1 and ppm_f1 are assigned with the indexes of the transients. + """ + if self.procs['zf'][1] is None: + self.S = np.zeros_like(self.fid) + else: + self.S = np.zeros((self.fid.shape[0], self.procs['zf'][1])) + + for k in range(self.fid.shape[0]): + self.S[k] = processing.fp(self.fid[k], wf=self.procs['wf'][1], zf=self.procs['zf'][1], fcor=self.procs['fcor'][1], tdeff=self.procs['tdeff'][1]) + + self.freq_f2 = processing.make_scale(self.S.shape[1], dw=self.acqus['dw2']) + if self.acqus['SFO2'] < 0: + self.freq_f2 = self.freq_f2[::-1] + self.ppm_f2 = misc.freq2ppm(self.freq_f2, B0=self.acqus['SFO2'], o1p=self.acqus['o2p']) + + if self.acqus['SFO2'] < 0: + self.S = self.S[:,::-1] + + self.rr = self.S.real + self.ii = self.S.imag + + self.freq_f1 = np.arange(self.S.shape[0]) + self.ppm_f1 = np.arange(self.S.shape[0]) + + def xf1(self): + """ + Process only the indirect dimension. + Transposes the spectrum in hypermode or normally if FnMODE != QF, then calls for processing.fp using self.procs[keys][0], then transposes it back. + """ + if self.acqus['FnMODE']=='QF': + self.fid = self.fid.T + else: + self.fid = processing.tp_hyper(self.fid) + + if self.procs['zf'][0] is None: + self.S = np.zeros_like(self.fid) + else: + self.S = np.zeros((self.fid.shape[0], self.procs['zf'][0])) + + for k in range(self.fid.shape[0]): + self.S[k] = processing.fp(self.fid[k], wf=self.procs['wf'][0], zf=self.procs['zf'][0], fcor=self.procs['fcor'][0], tdeff=self.procs['tdeff'][0]) + + if self.acqus['FnMODE']=='QF': + self.fid = self.fid.T + self.S = self.S.T + else: + self.fid = processing.tp_hyper(self.fid) + self.S = processing.tp_hyper(self.S) + + self.freq_f1 = processing.make_scale(self.S.shape[0], dw=self.acqus['dw1']) + if self.acqus['SFO1'] < 0: + self.freq_f1 = self.freq_f1[::-1] + self.ppm_f1 = misc.freq2ppm(self.freq_f1, B0=self.acqus['SFO1'], o1p=self.acqus['o1p']) + + self.rr = np.copy(self.S.real) + self.ppm_f2 = np.arange(self.S.shape[1]) + + def process(self, interactive=False, **int_kwargs): + """ + Performs the processing of the FID. The parameters are read from self.procs. + If interactive is True, calls processing.interactive_xfb with **int_kwargs, else calls processing.xfb + -------- + Parameters: + - interactive: bool + True if you want to open the interactive panel, False to read the parameters from self.procs. + - int_kwargs: + - lvl0: float + For interactive processing. Set the starting contour values. + - show_cnt: bool + For interactive processing. If it is True shows the contours of the spectrum, if it is False shows the heatmap. + """ + + # If Echo-Antiecho, pre-process the FID to get the correct spectral arrangement + if self.acqus['FnMODE'] == 'Echo-Antiecho' and self.eaeflag == 1: + self.fid = processing.EAE(self.fid) + + if interactive is True: + self.S, self.procs = processing.interactive_xfb(self.fid, self.acqus, self.procs, **int_kwargs) + else: + self.S = processing.xfb(self.fid, wf=self.procs['wf'], zf=self.procs['zf'], fcor=self.procs['fcor'], tdeff=self.procs['tdeff'], FnMODE=self.acqus['FnMODE'], u=False) + + # For EAE, correct the 90° phase shift in F1 + if self.acqus['FnMODE'] == 'Echo-Antiecho': + self.S = processing.tp_hyper(self.S) + self.S = processing.ps(self.S, p0=-90)[0] + self.S = processing.tp_hyper(self.S) + + if self.acqus['SFO2'] < 0: + self.S = self.S[:,::-1] + if self.acqus['SFO1'] < 0: + # Reversing the spectrum in the indirect dimension causes a 90° dephasing + self.S = self.S[::-1,:] + if self.acqus['FnMODE'] == 'QF': + self.S = self.S.T + else: + self.S = processing.tp_hyper(self.S) + self.S = processing.ps(self.S, p0=-90)[0] #...that has to be corrected + if self.acqus['FnMODE'] == 'QF': + self.S = self.S.T + else: + self.S = processing.tp_hyper(self.S) + + if self.acqus['FnMODE'] == 'QF': + self.rr = self.S.real + self.ii = self.S.imag + else: + rr, ir, ri, ii = processing.unpack_2D(self.S) + self.rr = rr + self.ri = ri + self.ir = ir + self.ii = ii + + # Calculates the frequency and ppm scales + self.freq_f1 = processing.make_scale(self.rr.shape[0], dw=self.acqus['dw1']) + if self.acqus['SFO1'] < 0: + self.freq_f1 = self.freq_f1[::-1] + self.ppm_f1 = misc.freq2ppm(self.freq_f1, B0=self.acqus['SFO1'], o1p=self.acqus['o1p']) + self.freq_f2 = processing.make_scale(self.rr.shape[1], dw=self.acqus['dw2']) + if self.acqus['SFO2'] < 0: + self.freq_f2 = self.freq_f2[::-1] + self.ppm_f2 = misc.freq2ppm(self.freq_f2, B0=self.acqus['SFO2'], o1p=self.acqus['o2p']) + + + def inv_process(self): + """ + Performs the inverse processing of the spectrum according to the given parameters. + Overwrites the S attribute!! + Calls inv_xfb + """ + + # For EAE, correct the 90° phase shift in F1 + if self.acqus['FnMODE'] == 'Echo-Antiecho': + self.S = processing.tp_hyper(self.S) + self.S = processing.ps(self.S, p0=90)[0] + self.S = processing.tp_hyper(self.S) + + if self.acqus['SFO2'] < 0: + self.S = self.S[:,::-1] + if self.acqus['SFO1'] < 0: + self.S = self.S[::-1,:] + self.S = processing.tp_hyper(self.S) + self.S = processing.ps(self.S, p0=-90)[0] + self.S = processing.tp_hyper(self.S) + + self.S = processing.inv_xfb(self.S, wf=self.procs['wf'], size=[self.acqus['TD1'], self.acqus['TD2']], fcor=self.procs['fcor'], FnMODE=self.acqus['FnMODE']) + + + def mc(self): + """ Compute the magnitude of the spectrum. """ + self.S = (self.S.real**2 + self.S.imag**2 )**0.5 + if self.acqus['FnMODE'] == 'QF': + self.rr = self.S.real + self.ii = self.S.imag + else: + rr, ir, ri, ii = processing.unpack_2D(self.S) + self.rr = rr + self.ri = ri + self.ir = ir + self.ii = ii + + def adjph(self, p01=None, p11=None, pv1=None, p02=None, p12=None, pv2=None): + """ + Adjusts the phases of the spectrum according to the given parameters, or interactively if they are left as default. + ------- + Parameters: + - p01: float or None + 0-th order phase correction /° of the indirect dimension + - p11: float or None + 1-st order phase correction /° of the indirect dimension + - pv1: float or None + 1-st order pivot /ppm of the indirect dimension + - p02: float or None + 0-th order phase correction /° of the direct dimension + - p12: float or None + 1-st order phase correction /° of the direct dimension + - pv2: float or None + 1-st order pivot /ppm of the direct dimension + """ + interactive = True # by default + # Set pivot to carrier if not specified + if pv1 is None: + pv1 = self.acqus['o1p'] + if pv2 is None: + pv2 = self.acqus['o2p'] + ph = [p01, p11, p02, p12] # for easier handling + for p in ph: + # If a phase is specified, interactive is set to False... + if p is not None: + interactive = False + if interactive is False: + # ... and the not-set phases are put to 0 + for i, p in enumerate(ph): + if p is None: + ph[i] = 0 + # Adjust the phases according to the given values + + self.S, values_f2 = processing.ps(self.S, self.ppm_f2, p0=ph[2], p1=ph[3], pivot=pv2) + if self.acqus['FnMODE'] == 'No': + pass + elif self.acqus['FnMODE'] == 'QF': + self.S = self.S.T + self.S, values_f1 = processing.ps(self.S, self.ppm_f1, p0=ph[0], p1=ph[1], pivot=pv1) + self.S = self.S.T + else: + self.S = processing.tp_hyper(self.S) + self.S, values_f1 = processing.ps(self.S, self.ppm_f1, p0=ph[0], p1=ph[1], pivot=pv1) + self.S = processing.tp_hyper(self.S) + else: + # Call interactive phase correction + if self.acqus['FnMODE'] == 'QF' or self.acqus['FnMODE'] == 'No': + self.S, values_f1, values_f2 = processing.interactive_phase_2D(self.ppm_f1, self.ppm_f2, self.S, False) + else: + self.S, values_f1, values_f2 = processing.interactive_phase_2D(self.ppm_f1, self.ppm_f2, self.S) + # Unpack the phased spectrum + if self.acqus['FnMODE'] == 'QF' or self.acqus['FnMODE'] == 'No': + self.rr = self.S.real + self.ii = self.S.imag + else: + rr, ir, ri, ii = processing.unpack_2D(self.S) + self.rr = rr + self.ri = ri + self.ir = ir + self.ii = ii + # update procs + self.procs['p0_2'] += round(values_f2[0], 2) + self.procs['p1_2'] += round(values_f2[1], 2) + if values_f2[2] is not None: + self.procs['pv_2'] = round(values_f2[2], 5) + self.procs['p0_1'] += round(values_f1[0], 2) + self.procs['p1_1'] += round(values_f1[1], 2) + if values_f1[2] is not None: + self.procs['pv_1'] = round(values_f1[2], 5) + + + def qfil(self, which=None, u=None, s=None): + """ + Suppress signals using qfil. + 'Which' is the number of the trace to be used. + Edits only 'rr' if FnMODE is phase-sensitive + Calls processing.qfil + """ + if 'qfil' not in self.procs.keys(): + self.procs['qfil'] = {'u': u, 's': s} + if which is None: + which_list = misc.select_traces(self.ppm_f1, self.ppm_f2, self.rr, Neg=False, grid=False) + print(which_list) + which, _ = misc.ppmfind(self.ppm_f1, which_list[0][1]) + print(which, self.ppm_f1[which]) + + for key, value in self.procs['qfil'].items(): + if value is None: + self.procs['qfil']['u'], self.procs['qfil']['s'] = processing.interactive_qfil(self.ppm_f2, self.rr[which]) + break + self.S = processing.qfil(self.ppm_f2, self.S, self.procs['qfil']['u'], self.procs['qfil']['s']) + if self.acqus['FnMODE'] == 'QF': + self.rr = self.S.real + self.ii = self.S.imag + else: + self.rr, self.ir, self.ri, self.ii = processing.unpack_2D(self.S) + + def cal(self, offset=[None,None], isHz=False): + """ + Calibration of the ppm and frequency scales according to a given value, or interactively. In this latter case, a reference peak must be chosen. + Calls processing.calibration + -------- + Parameters: + - offset: tuple + (scale shift F1, scale shift F2) + - isHz: tuple of bool + True if offset is in frequency units, False if offset is in ppm + """ + + def _calibrate(ppm, trace, SFO1, o1p): + offppm = processing.calibration(ppm, trace) + offhz = misc.ppm2freq(offppm, SFO1, o1p) + return offppm, offhz + + if offset[0] is None or offset[1] is None: + coord = misc.select_traces(self.ppm_f1, self.ppm_f2, self.rr, Neg=False, grid=False) + ix, iy = coord[0][0], coord[0][1] + X = misc.get_trace(self.rr, self.ppm_f2, self.ppm_f1, iy, column=False) + Y = misc.get_trace(self.rr, self.ppm_f2, self.ppm_f1, ix, column=True) + + if offset[1] is None: + ppm_f2 = np.copy(self.ppm_f2) + offp2, offh2 = _calibrate(ppm_f2, X, self.acqus['SFO2'], self.acqus['o2p']) + else: + if isHz: + offh2 = offset[1] + offp2 = misc.freq2ppm(offh2, self.acqus['SFO2'], self.acqus['o2p']) + else: + offp2 = offset[1] + offh2 = misc.ppm2freq(offp2, self.acqus['SFO2'], self.acqus['o2p']) + + if offset[0] is None: + ppm_f1 = np.copy(self.ppm_f1) + offp1, offh1 = _calibrate(ppm_f1, Y, self.acqus['SFO1'], self.acqus['o1p']) + else: + if isHz: + offh1 = offset[0] + offp1 = misc.freq2ppm(offh1, self.acqus['SFO1'], self.acqus['o1p']) + else: + offp1 = offset[0] + offh1 = misc.ppm2freq(offp1, self.acqus['SFO1'], self.acqus['o1p']) + + self.freq_f2 += offh2 + self.ppm_f2 += offp2 + self.freq_f1 += offh1 + self.ppm_f1 += offp1 + + + def calf2(self, value=None, isHz=False): + """ + Calibrates the ppm and frequency scale of the direct dimension according to a given value, or interactively. + Calls self.cal on F2 only + ------- + Parameters: + - value: float or None + scale shift value + - isHz: bool + True if offset is in frequency units, False if offset is in ppm + """ + offset = [0, value] + self.cal(offset, isHz) + + def calf1(self, value=None, isHz=False): + """ + Calibrates the ppm and frequency scale of the indirect dimension according to a given value, or interactively. + Calls self.cal on F1 only. + ------- + Parameters: + - value: float or None + scale shift value + - isHz: bool + True if offset is in frequency units, False if offset is in ppm + """ + offset = [value, 0] + self.cal(offset, isHz) + + + + def save_acqus(self, path='sim_in_2D'): + """ + Write the acqus dictionary in a file. + Calls misc.write_acqus_2D + -------- + Parameters: + - path: str + Filename + """ + misc.write_acqus_2D(self.acqus, path=path) + + def write_ser(self, path=None): + """ + Writes the FID in binary format. + Calls misc.write_ser + -------- + Parameters: + - path: str or None + Path where to save the binary file. If it is None, the original binary file is overwritten, so BE CAREFUL!!! + """ + if path is None: + path = self.datadir + misc.write_ser(self.fid, path, self.BYTORDA, self.DTYPA) + + def projf1(self, a, b=None): + """ + Calculates the sum trace of the indirect dimension, from a to b in F2. + Store the trace in the dictionary trf1 and as 1D spectrum in Trf1. The key is 'a' or 'a:b' + Calls misc.get_trace on self.rr with column=True + ------- + Parameters: + - a: float + ppm F2 value where to extract the trace. + - b: float or None. + If it is None, extract the trace in a. Else, sum from a to b in F2. + """ + # make dictionary label + if b is None: + label = str(a) + else: + label = str(a)+':'+str(b) + f1 = misc.get_trace(self.rr, self.ppm_f2, self.ppm_f1, a, b, column=True) + self.trf1[label] = f1 + self.Trf1[label] = pSpectrum_1D(f1, acqus=misc.split_acqus_2D(self.acqus)[0], procs=misc.split_procs_2D(self.procs)[0], istrace=True) + + def projf2(self, a, b=None): + """ + Calculates the sum trace of the direct dimension, from a to b in F1. + Store the trace in the dictionary trf2 and as 1D spectrum in Trf2. The key is 'a' or 'a:b' + Calls misc.get_trace on self.rr with column=False + ------- + Parameters: + - a: float + ppm F1 value where to extract the trace. + - b: float or None. + If it is None, extract the trace in a. Else, sum from a to b in F1. + """ + # make dictionary label + if b is None: + label = str(a) + else: + label = str(a)+':'+str(b) + f2 = misc.get_trace(self.rr, self.ppm_f2, self.ppm_f1, a, b, column=False) + self.trf2[label] = f2 + self.Trf2[label] = pSpectrum_1D(f2, acqus=misc.split_acqus_2D(self.acqus)[1], procs=misc.split_procs_2D(self.procs)[1], istrace=True) + + def integrate(self, **kwargs): + """ + Integrate the spectrum with a dedicated GUI. + Calls fit.integrate_2D + """ + self.integrals = fit.integrate_2D(self.ppm_f1, self.ppm_f2, self.rr, self.acqus['SFO1'], self.acqus['SFO2'], **kwargs) + + def write_integrals(self, filename='integrals.dat'): + """ + Write the integrals in a file named filename. + ------- + Parameters: + - filename: str + name of the file where to write the integrals. + """ + f = open(filename, 'w') + f.write('{:12}\t{:12}\t\t{:20}\n'.format('ppm F2', 'ppm F1', 'Value')) + f.write('-'*60+'\n') + for key, value in self.integrals.items(): + ppm2, ppm1 = tuple(key.split(':')) + f.write('{:12}\t{:12}\t\t{:20.5e}\n'.format(ppm2, ppm1, value)) + f.close() + + def plot(self, Neg=True, lvl0=0.2): + """ + Plots the real part of the spectrum. + ------- + Parameters: + - Neg: bool + Plot (True) or not (False) the negative contours. + - lvl0: float + Starting contour value. + """ + warnings.filterwarnings("ignore", message="No contour levels were found within the data range.") + # Plots data, set Neg=True to see negative contours + S = self.rr + n_xticks, n_yticks = 10, 10 + + X_label = '$\delta\ $'+misc.nuc_format(self.acqus['nuc2'])+' /ppm' + Y_label = '$\delta\ $'+misc.nuc_format(self.acqus['nuc1'])+' /ppm' + + cmaps = [cm.Blues_r, cm.Reds_r] + + # flags for the activation of scroll zoom + lvlstep = 0.02 + + # define boxes for sliders + iz_box = plt.axes([0.925, 0.80, 0.05, 0.05]) + dz_box = plt.axes([0.925, 0.75, 0.05, 0.05]) + + # Functions connected to the sliders + def increase_zoom(event): + nonlocal lvlstep + lvlstep *= 2 + + def decrease_zoom(event): + nonlocal lvlstep + lvlstep /= 2 + + def on_scroll(event): + nonlocal livello, cnt + if Neg: + nonlocal Ncnt + + if event.button == 'up': + livello += lvlstep + elif event.button == 'down': + livello += -lvlstep + if livello <= 0: + livello = 1e-6 + elif livello > 1: + livello = 1 + + if Neg: + cnt, Ncnt = figures.redraw_contours(ax, self.ppm_f2, self.ppm_f1, S, lvl=livello, cnt=cnt, Neg=Neg, Ncnt=Ncnt, lw=0.5, cmap=[cmaps[0], cmaps[1]]) + else: + cnt, _ = figures.redraw_contours(ax, self.ppm_f2, self.ppm_f1, S, lvl=livello, cnt=cnt, Neg=Neg, Ncnt=None, lw=0.5, cmap=[cmaps[0], cmaps[1]]) + + misc.pretty_scale(ax, (max(self.ppm_f2), min(self.ppm_f2)), axis='x', n_major_ticks=n_xticks) + misc.pretty_scale(ax, (max(self.ppm_f1), min(self.ppm_f1)), axis='y', n_major_ticks=n_yticks) + ax.set_xlabel(X_label) + ax.set_ylabel(Y_label) + misc.set_fontsizes(ax, 14) + print('{:.3f}'.format(livello), end='\r') + fig.canvas.draw() + + # Make the figure + fig = plt.figure(1) + fig.set_size_inches(15,8) + plt.subplots_adjust(left = 0.10, bottom=0.10, right=0.90, top=0.95) + ax = fig.add_subplot(1,1,1) + + contour_num = 16 + contour_factor = 1.40 + + livello = lvl0 + + cnt = figures.ax2D(ax, self.ppm_f2, self.ppm_f1, S, lvl=livello, cmap=cmaps[0]) + if Neg: + Ncnt = figures.ax2D(ax, self.ppm_f2, self.ppm_f1, -S, lvl=livello, cmap=cmaps[1]) + + # Make pretty x-scale + misc.pretty_scale(ax, (max(self.ppm_f2), min(self.ppm_f2)), axis='x', n_major_ticks=n_xticks) + misc.pretty_scale(ax, (max(self.ppm_f1), min(self.ppm_f1)), axis='y', n_major_ticks=n_yticks) + ax.set_xlabel(X_label) + ax.set_ylabel(Y_label) + + scale_factor = 1 + + # Create buttons + iz_button = Button(iz_box, label='$\\uparrow$') + dz_button = Button(dz_box, label='$\downarrow$') + + # Connect the widgets to functions + scroll = fig.canvas.mpl_connect('scroll_event', on_scroll) + + iz_button.on_clicked(increase_zoom) + dz_button.on_clicked(decrease_zoom) + + misc.set_fontsizes(ax, 14) + + cursor = Cursor(ax, useblit=True, c='tab:red', lw=0.8) + + plt.show() + plt.close() + + +class pSpectrum_2D(Spectrum_2D): + """ + Subclass of Spectrum_2D that allows to handle processed 2D NMR spectra. + Reads the processed spectrum from Bruker. + """ + + def __init__(self, in_file): + """ + Initialize the class. + ------- + Parameters: + - in_file: str + Path to the spectrum. Here, the 'pdata/#' folder must be specified. + """ + if in_file[-1] != '/': + in_file = in_file+'/' + warnings.filterwarnings("ignore") + dic, data = ng.bruker.read(in_file.split('pdata')[0], cplex=True) + _, self.rr = ng.bruker.read_pdata(in_file, bin_files=['2rr']) + _, self.ii = ng.bruker.read_pdata(in_file, bin_files=['2ii']) + if os.path.exists(in_file+'2ir') and os.path.exists(in_file+'2ri'): + _, self.ir = ng.bruker.read_pdata(in_file, bin_files=['2ir']) + _, self.ri = ng.bruker.read_pdata(in_file, bin_files=['2ri']) + self.S = processing.repack_2D(self.rr, self.ir, self.ri, self.ii) + else: + self.ir = np.array(np.copy(self.rr)) + self.ri = np.copy(self.ii) + self.S = self.rr + 1j*self.ii + + self.acqus = misc.makeacqus_2D(dic) + self.BYTORDA = dic['acqus']['BYTORDA'] + self.DTYPA = dic['acqus']['DTYPA'] + self.ngdic = dic + del dic + del data + + try: + self.grpdly = int(self.ngdic['acqus']['GRPDLY']) + except: + self.grpdly = 0 + + # initialize the procs dictionary with default values + wf1 = { + 'mode':None, + 'ssb':2, + 'lb':5, + 'gb':10, + 'gc':0, + 'sw':None + } + wf2 = { + 'mode':None, + 'ssb':2, + 'lb':5, + 'gb':10, + 'gc':0, + 'sw':None + } + proc_init_2D = ( + [wf1, wf2], # window function + [None, None], # zero-fill + [0.5, 0.5], # fcor + [0,0] # tdeff + ) + + proc_keys_1D = ['wf', 'zf', 'fcor', 'tdeff'] + self.procs = {} + for k, key in enumerate(proc_keys_1D): + self.procs[key] = proc_init_2D[k] # Processing parameters + self.procs['wf'][0]['sw'] = round(self.acqus['SW1'], 4) + self.procs['wf'][1]['sw'] = round(self.acqus['SW2'], 4) + + # Then, phases + self.procs['p0_1'] = 0 + self.procs['p1_1'] = 0 + self.procs['pv_1'] = round(self.acqus['o1p'], 2) + self.procs['p0_2'] = 0 + self.procs['p1_2'] = 0 + self.procs['pv_2'] = round(self.acqus['o2p'], 2) + + # Calculates the frequency and ppm scales + self.freq_f1 = processing.make_scale(self.rr.shape[0], dw=self.acqus['dw1']) + if self.acqus['SFO1'] < 0: + self.freq_f1 = self.freq_f1[::-1] + self.ppm_f1 = misc.freq2ppm(self.freq_f1, B0=self.acqus['SFO1'], o1p=self.acqus['o1p']) + + self.freq_f2 = processing.make_scale(self.rr.shape[1], dw=self.acqus['dw2']) + if self.acqus['SFO2'] < 0: + self.freq_f2 = self.freq_f2[::-1] + self.ppm_f2 = misc.freq2ppm(self.freq_f2, B0=self.acqus['SFO2'], o1p=self.acqus['o2p']) + + # Create empty dictionary where to save the projections + self.trf1 = {} + self.trf2 = {} + self.Trf1 = {} + self.Trf2 = {} + + def write_ser(self): + """Overwrite the original function to prevent writing of the binary file. It does nothing!""" + pass + +class Pseudo_2D(Spectrum_2D): + """ Pseudo_2D experiment """ + + def __str__(self): + doc = '-'*64 + doc += '\nPseudo_2D object.\n' + if 'ngdic' in self.__dict__.keys(): + doc += f'Read from "{self.datadir}"\n' + else: + doc += f'Simulated from "{self.datadir}"\n' + doc += f'It is a {self.acqus["nuc"]} spectrum recorded over a\nsweep width of {self.acqus["SWp"]} ppm, centered at {self.acqus["o1p"]} ppm.\n' + if self.fid is None: + doc += 'The FID is not present yet.' + else: + N = self.fid.shape + doc += f'The FID consists of {N[0]} experiments, each one is {N[1]} points long.\n' + doc += '-'*64 + return doc + + def __init__(self, in_file, fid=None, pv=False, isexp=True): + """ + Initialize the class. + ------- + Parameters: + - in_file: str + path to file to read, or to the folder of the spectrum + - fid: 2darray or None + Array that replaces self.fid. + - isexp: bool + True if this is an experimental dataset, False if it is simulated + - pv: bool + True if you want to use pseudo-voigt lineshapes for simulation, False for Voigt + """ + self.datadir = in_file + if isexp is False: + self.acqus = sim.load_sim_1D(in_file) + self.fid = fid + else: + dic, data = ng.bruker.read(in_file, cplex=True) + self.fid = data + self.acqus = misc.makeacqus_1D(dic) + self.BYTORDA = dic['acqus']['BYTORDA'] + self.DTYPA = dic['acqus']['DTYPA'] + self.ngdic = dic + del dic + del data + + try: + self.grpdly = int(self.ngdic['acqus']['GRPDLY']) + except: + self.grpdly = 0 + + # Initalize the procs dictionary with default values + proc_keys_1D = ['wf', 'zf', 'fcor', 'tdeff'] + wf0 = { + 'mode':None, + 'ssb':2, + 'lb':5, + 'gb':10, + 'gc':0, + 'sw':None + } + proc_init_1D = (wf0, None, 0.5, 0) + + self.procs = { + } + for k, key in enumerate(proc_keys_1D): + self.procs[key] = proc_init_1D[k] # Processing parameters + self.procs['wf']['sw'] = round(self.acqus['SW'], 4) + # Then, phases + self.procs['p0'] = 0 + self.procs['p1'] = 0 + self.procs['pv'] = round(self.acqus['o1p'], 2) + + def convdta(self, scaling=1): + """ Calls processing.convdta """ + self.fid = processing.convdta(self.fid, self.grpdly, scaling) + + def process(self): + """ + Process only the direct dimension. + Calls processing.fp on each transient + """ + if self.procs['zf'] is None: + self.S = np.zeros_like(self.fid) + else: + self.S = np.zeros((self.fid.shape[0], self.procs['zf'])).astype(self.fid.dtype) + + for k in range(self.fid.shape[0]): + self.S[k] = processing.fp(self.fid[k], wf=self.procs['wf'], zf=self.procs['zf'], fcor=self.procs['fcor'], tdeff=self.procs['tdeff']) + + self.freq_f2 = processing.make_scale(self.S.shape[1], dw=self.acqus['dw']) + if self.acqus['SFO1'] < 0: + self.freq_f2 = self.freq_f2[::-1] + self.ppm_f2 = misc.freq2ppm(self.freq_f2, B0=self.acqus['SFO1'], o1p=self.acqus['o1p']) + + if self.acqus['SFO1'] < 0: + self.S = self.S[:,::-1] + + self.rr = self.S.real + self.ii = self.S.imag + + self.freq_f1 = np.arange(self.S.shape[0]) + self.ppm_f1 = np.arange(self.S.shape[0]) + + self.integrals = {} + + # Create empty dictionary where to save the projections + self.trf1 = {} + self.trf2 = {} + self.Trf1 = {} + self.Trf2 = {} + + + def adjph(self, expno=0, p0=None, p1=None, pv=None): + """ + Adjusts the phases of the spectrum according to the given parameters, or interactively if they are left as default. + ------- + Parameters: + - expno: int + Number of the experiment (python numbering) to use in the interactive panel + - p0: float or None + 0-th order phase correction /° + - p1: float or None + 1-st order phase correction /° + - pv: float or None + 1-st order pivot /ppm + """ + S = self.S[expno] + # Adjust the phases + _, values = processing.ps(S, self.ppm_f2, p0=p0, p1=p1, pivot=pv) + self.S, _ = processing.ps(self.S, self.ppm_f2, *values) + + self.rr = self.S.real + self.ii = self.S.imag + + self.procs['p0'] += round(values[0], 2) + self.procs['p1'] += round(values[1], 2) + if values[2] is not None: + self.procs['pv'] = round(values[2], 5) + + + def projf1(self, a, b=None): + """ + Calculates the sum trace of the indirect dimension, from a to b in F2. + Store the trace in the dictionary trf1 and as 1D spectrum in Trf1. The key is 'a' or 'a:b' + ------- + Parameters: + - a: float + ppm F2 value where to extract the trace. + - b: float or None. + If it is None, extract the trace in a. Else, sum from a to b in F2. + """ + # make dictionary label + if b is None: + label = str(a) + else: + label = str(a)+':'+str(b) + f1 = misc.get_trace(self.rr, self.ppm_f2, self.ppm_f1, a, b, column=True) + self.trf1[label] = f1 + self.Trf1[label] = pSpectrum_1D(f1, acqus=self.acqus, procs=self.procs, istrace=True) + self.Trf1[label].freq = np.copy(self.freq_f1) + self.Trf1[label].ppm = np.copy(self.ppm_f1) + + def projf2(self, a, b=None): + """ + Calculates the sum trace of the direct dimension, from a to b in F1. + Store the trace in the dictionary trf2 and as 1D spectrum in Trf2. The key is 'a' or 'a:b' + ------- + Parameters: + - a: float + ppm F1 value where to extract the trace. + - b: float or None. + If it is None, extract the trace in a. Else, sum from a to b in F1. + """ + # make dictionary label + if b is None: + label = str(a) + else: + label = str(a)+':'+str(b) + f2 = misc.get_trace(self.rr, self.ppm_f2, self.ppm_f1, a, b, column=False) + self.trf2[label] = f2 + self.Trf2[label] = pSpectrum_1D(f2, acqus=self.acqus, procs=self.procs, istrace=True) + + def plot(self, Neg=True, lvl0=0.2, Y_label=''): + """ + Plots the real part of the spectrum. + ------- + Parameters: + - Neg: bool + Plot (True) or not (False) the negative contours. + - lvl0: float + Starting contour value. + """ + warnings.filterwarnings("ignore", message="No contour levels were found within the data range.") + # Plots data, set Neg=True to see negative contours + S = np.copy(self.rr) + n_xticks, n_yticks = 10, 10 + + X_label = '$\delta\ $'+misc.nuc_format(self.acqus['nuc'])+' /ppm' + + cmaps = [cm.Blues_r, cm.Reds_r] + + # flags for the activation of scroll zoom + lvlstep = 0.02 + + # define boxes for sliders + iz_box = plt.axes([0.925, 0.80, 0.05, 0.05]) + dz_box = plt.axes([0.925, 0.75, 0.05, 0.05]) + + # Functions connected to the sliders + def increase_zoom(event): + nonlocal lvlstep + lvlstep *= 2 + + def decrease_zoom(event): + nonlocal lvlstep + lvlstep /= 2 + + def on_scroll(event): + nonlocal livello, cnt + if Neg: + nonlocal Ncnt + + if event.button == 'up': + livello += lvlstep + elif event.button == 'down': + livello += -lvlstep + if livello <= 0: + livello = 1e-6 + elif livello > 1: + livello = 1 + + if Neg: + cnt, Ncnt = figures.redraw_contours(ax, self.ppm_f2, self.ppm_f1, S, lvl=livello, cnt=cnt, Neg=Neg, Ncnt=Ncnt, lw=0.5, cmap=[cmaps[0], cmaps[1]]) + else: + cnt, _ = figures.redraw_contours(ax, self.ppm_f2, self.ppm_f1, S, lvl=livello, cnt=cnt, Neg=Neg, Ncnt=None, lw=0.5, cmap=[cmaps[0], cmaps[1]]) + + misc.pretty_scale(ax, (max(self.ppm_f2), min(self.ppm_f2)), axis='x', n_major_ticks=n_xticks) + misc.pretty_scale(ax, (max(self.ppm_f1), min(self.ppm_f1)), axis='y', n_major_ticks=n_yticks) + ax.set_xlabel(X_label) + ax.set_ylabel(Y_label) + misc.set_fontsizes(ax, 14) + print('{:.3f}'.format(livello), end='\r') + fig.canvas.draw() + + # Make the figure + fig = plt.figure(1) + fig.set_size_inches(15,8) + plt.subplots_adjust(left = 0.10, bottom=0.10, right=0.90, top=0.95) + ax = fig.add_subplot(1,1,1) + + contour_num = 16 + contour_factor = 1.40 + + livello = lvl0 + + cnt = figures.ax2D(ax, self.ppm_f2, self.ppm_f1, S, lvl=livello, cmap=cmaps[0]) + if Neg: + Ncnt = figures.ax2D(ax, self.ppm_f2, self.ppm_f1, -S, lvl=livello, cmap=cmaps[1]) + + # Make pretty x-scale + misc.pretty_scale(ax, (max(self.ppm_f2), min(self.ppm_f2)), axis='x', n_major_ticks=n_xticks) + misc.pretty_scale(ax, (max(self.ppm_f1), min(self.ppm_f1)), axis='y', n_major_ticks=n_yticks) + ax.set_xlabel(X_label) + ax.set_ylabel(Y_label) + + scale_factor = 1 + + # Create buttons + iz_button = Button(iz_box, label='$\\uparrow$') + dz_button = Button(dz_box, label='$\downarrow$') + + # Connect the widgets to functions + scroll = fig.canvas.mpl_connect('scroll_event', on_scroll) + + iz_button.on_clicked(increase_zoom) + dz_button.on_clicked(decrease_zoom) + + misc.set_fontsizes(ax, 14) + + cursor = Cursor(ax, useblit=True, c='tab:red', lw=0.8) + + plt.show() + plt.close() + + def plot_md(self, which='all', lims=None): + """ + Plot a number of experiments, superimposed. + -------- + Parameters: + - which: str + List of experiment indexes, so that eval(which) is meaningful + - lims: tuple + Region of the spectrum to show (ppm1, ppm2) + """ + if 'all' in which: + which_exp = np.arange(self.rr.shape[0]) + else: + which_exp = eval(which) + ppm = np.copy(self.ppm_f2) + S = [np.copy(self.rr[w]) for w in which_exp] + + if lims is not None: + for k, s in enumerate(S): + _, S[k] = misc.trim_data(ppm, s, *lims) + ppm, _ = misc.trim_data(ppm, s, *lims) + + figures.dotmd(ppm, S, labels=[f'{w}' for w in which_exp]) + + def plot_stacked(self, which='all', lims=None): + """ + Plot a number of experiments, stacked. + -------- + Parameters: + - which: str + List of experiment indexes, so that eval(which) is meaningful + - lims: tuple + Region of the spectrum to show (ppm1, ppm2) + """ + if 'all' in which: + which_exp = np.arange(self.rr.shape[0]) + else: + which_exp = eval(which) + ppm = np.copy(self.ppm_f2) + S = [np.copy(self.rr[w]) for w in which_exp] + + if lims is not None: + for k, s in enumerate(S): + _, S[k] = misc.trim_data(ppm, s, *lims) + ppm, _ = misc.trim_data(ppm, s, *lims) + + X_label = '$\delta\ $'+misc.nuc_format(self.acqus['nuc'])+' /ppm' + + figures.stacked_plot( + ppm, S, + X_label=X_label, Y_label='Normalized intensity /a.u.', + labels=[f'{w}' for w in which_exp]) + + + def integrate(self, which=0, lims=None): + """ + Integrate the spectrum with a dedicated GUI. + Calls processing.integral on each experiment, then saves the results in self.integrals. + If lims is not given, calls fit.integrate to select the regions to integrate. + -------- + Parameters: + - which: int + Experiment index to show in interactive panel + - lims: tuple + Region of the spectrum to integrate (ppm1, ppm2) + """ + if lims is None: + X_label = '$\delta\,$'+misc.nuc_format(self.acqus['nuc'])+' /ppm' + integrals = fit.integrate(self.ppm_f2, self.rr[which], X_label=X_label) + for key, _ in integrals.items(): + if ':' in key: + lims = [eval(q) for q in key.split(':')] # trasforma stringa in float!!! + self.integrals[key] = [processing.integral(self.rr[k], self.ppm_f2, lims)[-1] for k in range(self.rr.shape[0])] + else: + self.integrals[key] = np.array(integrals[key]) + + else: + self.integrals[f'{lims[0]:.2f}:{lims[1]:.2f}'] = np.array(processing.integral(self.rr, self.ppm_f2, lims)[...,-1]) + + + + + + + + diff --git a/klassez/__init__.py b/klassez/__init__.py new file mode 100644 index 0000000..d789410 --- /dev/null +++ b/klassez/__init__.py @@ -0,0 +1,26 @@ +#! /usr/bin/env python3 + +import os +import sys +import numpy as np +from scipy import linalg, stats +import matplotlib +import matplotlib.pyplot as plt +import matplotlib.cm as cm +import seaborn as sns +from pprint import pprint as Print + +from . import fit, misc, sim, figures, processing +from .Spectra import Spectrum_1D, pSpectrum_1D, Spectrum_2D, pSpectrum_2D, Pseudo_2D + +__version__ = '0.1a.1' + +# Use seaborn's colormaps and save it to a dictionary +from .config import CM, COLORS + +def open_doc(): + """ Open the documentation .pdf file in the browser. """ + import webbrowser + webbrowser.open_new(__doc__) + +__doc__ = f'{__path__[0]}/docs/klassez.pdf' diff --git a/klassez/config.py b/klassez/config.py new file mode 100644 index 0000000..b21a460 --- /dev/null +++ b/klassez/config.py @@ -0,0 +1,42 @@ +#! /usr/bin/env python3 + +import seaborn as sns + + +# Use seaborn's colormaps and save it to a dictionary +CMapsNames = ['Accent', 'Accent_r', 'Blues', 'Blues_r', 'BrBG', 'BrBG_r', 'BuGn', 'BuGn_r', 'BuPu', 'BuPu_r', 'CMRmap', 'CMRmap_r', 'Dark2', 'Dark2_r', 'GnBu', 'GnBu_r', 'Greens', 'Greens_r', 'Greys', 'Greys_r', 'OrRd', 'OrRd_r', 'Oranges', 'Oranges_r', 'PRGn', 'PRGn_r', 'Paired', 'Paired_r', 'Pastel1', 'Pastel1_r', 'Pastel2', 'Pastel2_r', 'PiYG', 'PiYG_r', 'PuBu', 'PuBuGn', 'PuBuGn_r', 'PuBu_r', 'PuOr', 'PuOr_r', 'PuRd', 'PuRd_r', 'Purples', 'Purples_r', 'RdBu', 'RdBu_r', 'RdGy', 'RdGy_r', 'RdPu', 'RdPu_r', 'RdYlBu', 'RdYlBu_r', 'RdYlGn', 'RdYlGn_r', 'Reds', 'Reds_r', 'Set1', 'Set1_r', 'Set2', 'Set2_r', 'Set3', 'Set3_r', 'Spectral', 'Spectral_r', 'Wistia', 'Wistia_r', 'YlGn', 'YlGnBu', 'YlGnBu_r', 'YlGn_r', 'YlOrBr', 'YlOrBr_r', 'YlOrRd', 'YlOrRd_r', 'afmhot', 'afmhot_r', 'autumn', 'autumn_r', 'binary', 'binary_r', 'bone', 'bone_r', 'brg', 'brg_r', 'bwr', 'bwr_r', 'cividis', 'cividis_r', 'cool', 'cool_r', 'coolwarm', 'coolwarm_r', 'copper', 'copper_r', 'cubehelix', 'cubehelix_r', 'flag', 'flag_r', 'gist_earth', 'gist_earth_r', 'gist_gray', 'gist_gray_r', 'gist_heat', 'gist_heat_r', 'gist_ncar', 'gist_ncar_r', 'gist_rainbow', 'gist_rainbow_r', 'gist_stern', 'gist_stern_r', 'gist_yarg', 'gist_yarg_r', 'gnuplot', 'gnuplot2', 'gnuplot2_r', 'gnuplot_r', 'gray', 'gray_r', 'hot', 'hot_r', 'hsv', 'hsv_r', 'icefire', 'icefire_r', 'inferno', 'inferno_r', 'magma', 'magma_r', 'mako', 'mako_r', 'nipy_spectral', 'nipy_spectral_r', 'ocean', 'ocean_r', 'pink', 'pink_r', 'plasma', 'plasma_r', 'prism', 'prism_r', 'rainbow', 'rainbow_r', 'rocket', 'rocket_r', 'seismic', 'seismic_r', 'spring', 'spring_r', 'summer', 'summer_r', 'tab10', 'tab10_r', 'tab20', 'tab20_r', 'tab20b', 'tab20b_r', 'tab20c', 'tab20c_r', 'terrain', 'terrain_r', 'twilight', 'twilight_r', 'twilight_shifted', 'twilight_shifted_r', 'viridis', 'viridis_r', 'vlag', 'vlag_r', 'winter', 'winter_r'] + +global CM, COLORS +CM = {} +for key in CMapsNames: + CM[key] = sns.color_palette(key, as_cmap=True) + + +# List of colors + + +colors = [ +'tab:blue', +'tab:red', +'tab:green', +'tab:orange', +'tab:cyan', +'tab:purple', +'tab:pink', +'tab:gray', +'tab:brown', +'tab:olive', +'salmon', +'indigo', +'m', +'c', +'g', +'r', +'b', +'k', +] + +for w in range(10): + colors += colors +COLORS = tuple(colors) + diff --git a/klassez/docs/klassez.pdf b/klassez/docs/klassez.pdf new file mode 100644 index 0000000..32f0bcc Binary files /dev/null and b/klassez/docs/klassez.pdf differ diff --git a/klassez/figures.py b/klassez/figures.py new file mode 100644 index 0000000..a6b7510 --- /dev/null +++ b/klassez/figures.py @@ -0,0 +1,1469 @@ +#! /usr/bin/env python3 + +import os +import sys +import numpy as np +from scipy import linalg, stats +from scipy.spatial import ConvexHull +import random +import matplotlib +import matplotlib.pyplot as plt +import matplotlib.cm as cm +from matplotlib.widgets import Slider, Button, RadioButtons, TextBox, CheckButtons, Cursor, LassoSelector +from matplotlib.path import Path +import seaborn as sns +import nmrglue as ng +import lmfit as l +from datetime import datetime +import warnings + +from . import fit, misc, sim, figures, processing +#from .__init__ import CM +s_colors=[ 'tab:cyan', 'tab:red', 'tab:green', 'tab:purple', 'tab:pink', 'tab:gray', 'tab:brown', 'tab:olive', 'salmon', 'indigo' ] + +from .config import CM, COLORS + +figsize_small = (3.59, 2.56) +figsize_large = (15, 8) + +warnings.filterwarnings("ignore", message="No contour levels were found within the data range.") + + +def heatmap(data, zlim='auto', z_sym=True, cmap=None, xscale=None, yscale=None, rev=(False, False), n_xticks=10, n_yticks=10, n_zticks=10, fontsize=10, name=None): + """ + Computes a heatmap of data. + -------- + Parameters: + - data: 2darray + Input data + - zlim: tuple or 'auto' or 'abs' + Vertical limits of the heatmap, that determines the extent of the colorbar. 'auto' means (min(data), max(data)), 'abs' means(min(|data|), max(|data|)). + - z_sym: bool + True to symmetrize the vertical scale around 0. + - cmap: matplotlib.cm object + Colormap of the heatmap. + - xscale: 1darray or None + x-scale. None means np.arange(data.shape[1]) + - yscale: 1darray or None + y-scale. None means np.arange(data.shape[0]) + - rev: tuple of bool + Reverse scale (x, y). + - n_xticks: int + Number of ticks of the x axis + - n_yticks: int + Number of ticks of the y axis + - n_zticks: int + Number of ticks of the color bar + - fontsize: float + Biggest font size to apply to the figure. + - name: str or None + Filename for the figure. Set to None to show the figure. + """ + print('Computing heatmap...', end='\r') + + # Check if data is real + if np.iscomplexobj(data): + data = data.real + + # Set zlim + if zlim == 'auto': + zlim = np.min(data), np.max(data) + elif zlim == 'abs': + zlim = np.min(np.abs(data)), np.max(np.abs(data)) + + # Symmetrize z + if z_sym is True: + zlim = -max(zlim), max(zlim) + + # Set default cmap + if cmap is None: + cmap = CM['icefire_r'] + + # Set default scales + if xscale is None: + xscale = np.arange(data.shape[-1]) + if yscale is None: + yscale = np.arange(data.shape[0]) + + # Set extent according to rev + if rev == (False, False): # do not reverse + extent = min(xscale), max(xscale), min(yscale), max(yscale) + elif rev == (True, False): # reverse only x + extent = max(xscale), min(xscale), min(yscale), max(yscale) + elif rev == (False, True): # reverse only y + extent = min(xscale), max(xscale), max(yscale), min(yscale) + elif rev == (True, True): # reverse both + extent = max(xscale), min(xscale), max(yscale), min(yscale) + + # Create figure panel + fig = plt.figure() + fig.set_size_inches(figsize_small) + plt.subplots_adjust(left=0.15, bottom=0.15, top=0.90, right=0.85) + ax = fig.add_subplot() + + # Divide the ax subplot to make space for the colorbar + from mpl_toolkits.axes_grid1 import make_axes_locatable + divider = make_axes_locatable(ax) + cax = divider.append_axes('right', size='2.5%', pad=0.10) + + # Plot data + im = ax.imshow(data, aspect='auto', cmap=cmap, vmin=zlim[0], vmax=zlim[1], extent=extent) + + # Make colorbar + plt.colorbar(im, cax=cax, orientation='vertical') + + # Customize appearance + # x-axis + misc.pretty_scale(ax, (extent[0], extent[1]), axis='x', n_major_ticks=n_xticks) + # y-axis + misc.pretty_scale(ax, (extent[2], extent[3]), axis='y', n_major_ticks=n_yticks) + # colorbar y-axis + misc.pretty_scale(cax, zlim, axis='y', n_major_ticks=n_zticks) + misc.mathformat(cax) + # fontsizes + misc.set_fontsizes(ax, fontsize) + misc.set_fontsizes(cax, fontsize) + + if name: + # Save the figure + print('Saving {}.png...'.format(name), end='\r') + plt.savefig(name+'.png', dpi=600) + print('{}.png saved.\n'.format(name)) + else: + # Make figure larger + fig.set_size_inches(figsize_large) + # Increase fontsize + misc.set_fontsizes(ax, 14) + misc.set_fontsizes(cax, 14) + # Show + plt.show() + print('\n') + plt.close() + + +def ax_heatmap(ax, data, zlim='auto', z_sym=True, cmap=None, xscale=None, yscale=None, rev=(False, False), n_xticks=10, n_yticks=10, n_zticks=10, fontsize=10): + """ + Computes a heatmap of data on the given 'ax' + -------- + Parameters: + - ax: matplotlib.Subplot object + Panel where to draw the heatmap + - data: 2darray + Input data + - zlim: tuple or 'auto' or 'abs' + Vertical limits of the heatmap, that determines the extent of the colorbar. 'auto' means (min(data), max(data)), 'abs' means(min(|data|), max(|data|)). + - z_sym: bool + True to symmetrize the vertical scale around 0. + - cmap: matplotlib.cm object + Colormap of the heatmap. + - xscale: 1darray or None + x-scale. None means np.arange(data.shape[1]) + - yscale: 1darray or None + y-scale. None means np.arange(data.shape[0]) + - rev: tuple of bool + Reverse scale (x, y). + - n_xticks: int + Number of ticks of the x axis + - n_yticks: int + Number of ticks of the y axis + - n_zticks: int + Number of ticks of the color bar + - fontsize: float + Biggest font size to apply to the figure. + ------- + Returns: + - im: matplotlib.AxesImage + The heatmap + - cax: figure panel where the colorbar is drawn + """ + + # Check if data is real + if np.iscomplexobj(data): + data = data.real + + # Set zlim + if zlim == 'auto': + zlim = np.min(data), np.max(data) + elif zlim == 'abs': + zlim = np.min(np.abs(data)), np.max(np.abs(data)) + + # Symmetrize z + if z_sym is True: + zlim = -max(zlim), max(zlim) + + # Set default cmap + if cmap is None: + cmap = CM['icefire_r'] + + # Set default scales + if xscale is None: + xscale = np.arange(data.shape[-1]) + if yscale is None: + yscale = np.arange(data.shape[0]) + + # Set extent according to rev + if rev == (False, False): # do not reverse + extent = min(xscale), max(xscale), min(yscale), max(yscale) + elif rev == (True, False): # reverse only x + extent = max(xscale), min(xscale), min(yscale), max(yscale) + elif rev == (False, True): # reverse only y + extent = min(xscale), max(xscale), max(yscale), min(yscale) + elif rev == (True, True): # reverse both + extent = max(xscale), min(xscale), max(yscale), min(yscale) + + # Divide the ax subplot to make space for the colorbar + from mpl_toolkits.axes_grid1 import make_axes_locatable + from matplotlib.ticker import StrMethodFormatter + divider = make_axes_locatable(ax) + cax = divider.append_axes('right', size='2.5%', pad=0.10) + + # Plot data + im = ax.imshow(data, aspect='auto', cmap=cmap, vmin=zlim[0], vmax=zlim[1], extent=extent) + + # Make colorbar + plt.colorbar(im, cax=cax, orientation='vertical') + + # Customize appearance + # x-axis + misc.pretty_scale(ax, (extent[0], extent[1]), axis='x', n_major_ticks=n_xticks) + # y-axis + misc.pretty_scale(ax, (extent[2], extent[3]), axis='y', n_major_ticks=n_yticks) + # colorbar y-axis + misc.pretty_scale(cax, zlim, axis='y', n_major_ticks=n_zticks) + misc.mathformat(cax) + # fontsizes + misc.set_fontsizes(ax, fontsize) + misc.set_fontsizes(cax, fontsize) + + # Return the heatmap and the colorbar axis + return im, cax + + + +def sns_heatmap(data, name=None): + """ + Computes a heatmap of "data", which is a matrix. + Specify "name" if you want to save the figure. + """ + data = data.real + + fig = plt.figure() + fig.set_size_inches(figsize_small) + ax = fig.add_subplot(1,1,1) + formatter = matplotlib.ticker.ScalarFormatter(useMathText=True) + formatter.set_scientific(True) + formatter.set_powerlimits((-2, 2)) + + ax = sns.heatmap(data, center=0, linewidth=0, cbar_kws={'format': formatter}) + ax.tick_params(labelsize=7) + ax.set_xlabel('F2', fontsize=8) + ax.set_ylabel('F1', fontsize=8) + ax.figure.axes[-1].yaxis.get_offset_text().set_size(7) + ax.figure.axes[-1].tick_params(labelsize=7) + fig.tight_layout() + if name: + plt.savefig(name+'.png', format='png', dpi=600) + else: + plt.show() + plt.close() + + +def plot_fid_re(fid, scale=None, c='b', lims=None, name=None): + """ + Makes a single-panel figure that shows either the real or the imaginary part of the FID. + The x-scale and y-scale are automatically adjusted. + """ + + + size = fid.shape[-1] + fid = fid.flatten() + n_trans = fid.shape[-1]//size + if lims is None: + if n_trans >1: + lims = (0,n_trans) + else: + lims = (0,size) + + if scale is None: + if n_trans > 1: + scale = np.empty(1) + for i in range(n_trans): + temp_scale = i + np.linspace(0, 1, size) + scale = np.concatenate((scale, temp_scale), axis=-1) + scale = np.delete(scale, 0) + else: + scale = np.arange(size) + + + fig = plt.figure() + fig.set_size_inches(figsize_small) + plt.subplots_adjust(left=0.15, bottom=0.15, right=0.95, top=0.90) + ax1 = fig.add_subplot(1,1,1) + ax1.axhline(0, ls='-', c='k', lw=0.2) + ax1.plot(scale, fid.real, c=c, lw=0.5) + ax1.set_xlim(lims) + ax1.set_xlabel('# points', fontsize=8) + ax1.set_ylabel('Intensity /a.u.', fontsize=8) + + misc.set_ylim(ax1, [np.abs(fid.real), -np.abs(fid.real)]) + misc.mathformat(ax1, axis='y') + + + if name: + misc.set_fontsizes(ax1, 10) + plt.savefig(name+'.png', format='png', dpi=600) + else: + fig.set_size_inches(figsize_large) + misc.set_fontsizes(ax1, 14) + plt.show() + plt.close() + +def plot_fid(fid, name=None): + """ + Makes a two-panel figure that shows on the left the real part of the FID, on the right the imaginary part. + The x-scale and y-scale are automatically adjusted. + """ + + size = fid.shape[-1] + fid = fid.flatten() + n_trans = fid.shape[-1]//size + scale = np.empty(1) + for i in range(n_trans): + temp_scale = i + np.linspace(0, 1, size) + scale = np.concatenate((scale, temp_scale), axis=-1) + scale = np.delete(scale, 0) + + fig = plt.figure() + fig.set_size_inches(5.50, 2.56) + plt.subplots_adjust(left=0.1, bottom=0.1, top=0.95, right=0.95, wspace=0.20) + ax1 = fig.add_subplot(1,2,1) + ax2 = fig.add_subplot(1,2,2) + + ax1.set_title('Real channel', fontsize=8) + ax2.set_title('Imaginary channel', fontsize=8) + + ax1.axhline(0, ls='-', c='k', lw=0.2) + ax1.plot(scale, fid.real, c='b', lw=0.1) + ax2.axhline(0, ls='-', c='k', lw=0.2) + ax2.plot(scale, fid.imag, c='r', lw=0.1) + + ax1.set_xticks(np.linspace(0, n_trans, n_trans+1)) + ax1.set_xticks(np.arange(0, n_trans, 0.2), minor=True) + ax1.set_xlim(0, n_trans) + ax2.set_xticks(np.linspace(0, n_trans, n_trans+1)) + ax2.set_xticks(np.arange(0, n_trans, 0.2), minor=True) + ax2.set_xlim(0, n_trans) + ax2.tick_params(axis='y', labelleft=False) + + misc.set_ylim(ax1, [np.abs(fid.real), -np.abs(fid.real)]) + misc.set_ylim(ax2, [np.abs(fid.real), -np.abs(fid.real)]) + misc.mathformat(ax1, axis='y') + misc.mathformat(ax2, axis='y') + + if name: + misc.set_fontsizes(10) + plt.savefig(name+'.png', format='png', dpi=600) + else: + fig.set_size_inches(figsize_large) + misc.set_fontsizes(14) + plt.show() + plt.close() + +def figure2D(ppm_f2, ppm_f1, datax, xlims=None, ylims=None, cmap=None, c_fac=1.4, lvl=0.09, name=None, X_label='$\delta\ $ F2 /ppm', Y_label='$\delta\ $ F1 /ppm', lw=0.5, Negatives=False, cmapneg=None, n_xticks=10, n_yticks=10): + """ + Creates the contour plot of a 2D NMR spectrum. It requires: + - ppm_f2 and ppm_f1: ppm scales of the direct and indirect dimension, respectively; + - datax: the 2D NMR spectrum; + - xsx, xdx, ysx, ydx: axis limits; + - lvl: height respect to maximum at which the contour are computed; + - name: filename of the figure, if it has to be saved; + - X_label, Y_label: text of the X and Y axis; + - lw: linewidth of the contours + - Negatives: set it to "True" if you want to see the negative part of the spectrum, in red. + - spacex, spacey: spaces between the ticks for the axes + """ + + swapped_scales = len(ppm_f2) == datax.shape[0] and len(ppm_f1) == datax.shape[1] + if swapped_scales: + raise AssertionError('Swapped scales!') + + if cmap is None: + cmap = cm.Greys_r + + if xlims is None: + xsx, xdx = max(ppm_f2), min(ppm_f2) + else: + xsx, xdx = max(xlims), min(xlims) + if ylims is None: + ysx, ydx = max(ppm_f1), min(ppm_f1) + else: + ysx, ydx = max(ylims), min(ylims) + + norm = np.max(datax) + contour_start = norm*lvl + contour_num = 16 + contour_factor = c_fac + # calculate contour levels + cl = contour_start * contour_factor ** np.arange(contour_num) + + + fig = plt.figure() + fig.set_size_inches(figsize_small) + plt.subplots_adjust(left=0.15, bottom=0.15, right=0.95, top=0.95) + ax = fig.add_subplot(1,1,1) + ax.contour(ppm_f2, ppm_f1, datax, cl, cmap=cmap, extent=(min(ppm_f2), max(ppm_f2), max(ppm_f1), min(ppm_f1)), linewidths=lw) + + if Negatives: # Plot the negative part of the spectrum + if cmapneg is None: + cmapneg = cm.Reds_r + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", message="No contour levels were found within the data range.") + ax.contour(ppm_f2, ppm_f1, -datax, cl, cmap=cmapneg, extent=(min(ppm_f2), max(ppm_f2), max(ppm_f1), min(ppm_f1)), linewidths=lw) + + ax.set_xlabel(X_label) + ax.set_ylabel(Y_label) + + misc.pretty_scale(ax, (xsx, xdx), axis='x', n_major_ticks=n_xticks) + misc.pretty_scale(ax, (ysx, ydx), axis='y', n_major_ticks=n_yticks) + + misc.set_fontsizes(ax, 10) + + if name: + print( 'Saving '+name+'.png...') + plt.savefig(name+'.png', format='png', dpi=600) + else: + fig.set_size_inches(figsize_large) + misc.set_fontsizes(ax, 14) + plt.show() + plt.close() + print( 'Done.') + +def ax2D(ax, ppm_f2, ppm_f1, datax, xlims=None, ylims=None, cmap=None, c_fac=1.4, lvl=0.1, lw=0.5, X_label='$\delta\,$F2 /ppm', Y_label='$\delta\,$F1 /ppm', title=None, n_xticks=10, n_yticks=10, fontsize=10): + """ + Adds a 2D plot in the 'ax' subplot. Allows for modular figures setup. + """ + + swapped_scales = len(ppm_f2) == datax.shape[0] and len(ppm_f1) == datax.shape[1] + if swapped_scales: + raise AssertionError('Swapped scales!') + + if cmap is None: + cmap = cm.Greys_r + + if xlims is None: + xsx, xdx = max(ppm_f2), min(ppm_f2) + else: + xsx, xdx = max(xlims), min(xlims) + if ylims is None: + ysx, ydx = max(ppm_f1), min(ppm_f1) + else: + ysx, ydx = max(ylims), min(ylims) + + norm = np.max(np.abs(datax)) + contour_start = norm*lvl + contour_num = 16 + contour_factor = c_fac + # calculate contour levels + cl = contour_start * contour_factor ** np.arange(contour_num) + + cnt = ax.contour(ppm_f2, ppm_f1, datax, cl, cmap=cmap, extent=(min(ppm_f2), max(ppm_f2), max(ppm_f1), min(ppm_f1)), linewidths=lw) + + if X_label is not None: + ax.set_xlabel(X_label) + if Y_label is not None: + ax.set_ylabel(Y_label) + + misc.pretty_scale(ax, (xsx, xdx), axis='x', n_major_ticks=n_xticks) + misc.pretty_scale(ax, (ysx, ydx), axis='y', n_major_ticks=n_yticks) + + if title: + ax.set_title(title) + misc.set_fontsizes(ax, fontsize) + + return cnt + + + + +def figure2D_multi(ppm_f2, ppm_f1, datax, xlims=None, ylims=None, lvl='default', name=None, X_label='$\delta\ $ F2 /ppm', Y_label='$\delta\ $ F1 /ppm', lw=0.5, Negatives=False, n_xticks=10, n_yticks=10, labels=None): + """ + Creates the contour plot of a 2D NMR spectrum. It requires: + - ppm_f2 and ppm_f1: ppm scales of the direct and indirect dimension, respectively; + - datax: the 2D NMR spectrum; + - xsx, xdx, ysx, ydx: axis limits; + - lvl: height respect to maximum at which the contour are computed; + - name: filename of the figure, if it has to be saved; + - X_label, Y_label: text of the X and Y axis; + - lw: linewidth of the contours + - Negatives: set it to "True" if you want to see the negative part of the spectrum, in red. + - spacex, spacey: spaces between the ticks for the axes + """ + + nsp = len(datax) + cmaps = [cm.Blues_r, cm.Reds_r, cm.Greens_r, cm.Greys_r, cm.Purples_r, cm.Oranges_r, cm.YlOrBr_r, cm.YlOrRd_r, cm.OrRd_r, cm.PuRd_r, cm.RdPu_r, cm.BuPu_r, cm.GnBu_r, cm.PuBu_r, cm.YlGnBu_r, cm.PuBuGn_r, cm.BuGn_r, cm.YlGn] + + # Labels of the spectra that appear in the legend + if not labels: + labels = [] + for k in range(nsp): + labels.append(str(k+1)) + + if xlims is None: + xsx, xdx = max(ppm_f2), min(ppm_f2) + else: + xsx, xdx = max(xlims), min(xlims) + if ylims is None: + ysx, ydx = max(ppm_f1), min(ppm_f1) + else: + ysx, ydx = max(ylims), min(ylims) + + + fig = plt.figure() + fig.set_size_inches(figsize_small) + plt.subplots_adjust(left=0.15, bottom=0.2) + ax = fig.add_subplot(1,1,1) + + contour_num = 16 + contour_factor = 1.40 + if lvl == 'default': + lvl = np.ones(nsp) * 0.1 + norm = [] + contour_start = [] + cl = [] + cnt = [] + for k in range(nsp): + norm.append(np.max(np.abs(datax[k]))) + contour_start.append(norm[k] * lvl[k]) + # calculate contour levels + cl.append(norm[k] * lvl[k] * contour_factor ** np.arange(contour_num)) + cntt = ax.contour(ppm_f2, ppm_f1, datax[k], cl[k], cmap=cmaps[k], extent=(min(ppm_f2), max(ppm_f2), max(ppm_f1), min(ppm_f1)), linewidths=0.5) + cnt.append(cntt) + for i in range(len(labels)): + cnt[i].collections[i].set_label(labels[i]) + + ax.set_xlabel(X_label) + ax.set_ylabel(Y_label) + + misc.pretty_scale(ax, (xsx, xdx), axis='x', n_major_ticks=n_xticks) + misc.pretty_scale(ax, (ysx, ydx), axis='y', n_major_ticks=n_yticks) + ax.legend() + + if name: + misc.set_fontsizes(10) + print( 'Saving '+name+'.png...') + plt.savefig(name+'.png', format='png', dpi=600) + else: + fig.set_size_inches(figsize_large) + misc.set_fontsizes(14) + plt.show() + plt.close() + print( 'Done.') + + + + + + +def figure1D(ppm, data, norm=False, xlims=None, ylims=None, c='b', lw=0.5, name=None, X_label='$\delta\ $ F1 /ppm', Y_label='Intensity /a.u.', n_xticks=10, n_yticks=10, hideylabels=False): + """ + Creates the contour plot of a 2D NMR spectrum. It requires: + - ppm_f2 and ppm_f1: ppm scales of the direct and indirect dimension, respectively; + - datax: the 2D NMR spectrum; + - xlims, ylims: tuple of axis limits; + - lvl: height respect to maximum at which the contour are computed; + - name: filename of the figure, if it has to be saved; + - lw = linewith of the line + - X_label, Y_label: text of the X and Y axis; + - Negatives: set it to "True" if you want to see the negative part of the spectrum, in red. + """ + if np.iscomplexobj(data): + data = np.copy(data.real) + + if xlims is None: + xsx, xdx = max(ppm), min(ppm) + else: + xsx, xdx = max(xlims[0], xlims[1]), min(xlims[0], xlims[1]) + + if norm: + data = data/np.max(np.abs(data)) + if Y_label=='Intensity /a.u.': + Y_label='Normalized Intensity /a.u.' + + fig = plt.figure() + fig.set_size_inches(figsize_small) + plt.subplots_adjust(left=0.15, bottom=0.15, right=0.95, top=0.90) + ax = fig.add_subplot(1,1,1) + ax.plot(ppm, data, lw=lw, c=c) + + misc.set_ylim(ax, data) + + if ylims is None: + ysx, ydx = ax.get_ylim() + else: + ysx, ydx = min(ylims), max(ylims) + + misc.pretty_scale(ax, (xsx, xdx), axis='x', n_major_ticks=n_xticks) + misc.pretty_scale(ax, (ysx, ydx), axis='y', n_major_ticks=n_yticks) + + if hideylabels: + ax.tick_params(axis='y', which='both', left=False, labelleft=False) + + ax.set_xlabel(X_label) + if not hideylabels: + ax.set_ylabel(Y_label) + misc.mathformat(ax, axis='y') + + if name: + misc.set_fontsizes(ax, 10) + print( 'Saving '+name+'.png...') + plt.savefig(name+'.png', format='png', dpi=600) + else: + fig.set_size_inches(figsize_large) + misc.set_fontsizes(ax, 14) + plt.show() + plt.close() + print( 'Done.') + + +def ax1D(ax, ppm, data, norm=False, xlims=None, ylims=None, c='b', lw=0.5, X_label='$\delta\ $ F1 /ppm', Y_label='Intensity /a.u.', n_xticks=10, n_yticks=10, label=None, fontsize=10): + """ + Creates the contour plot of a 2D NMR spectrum. It requires: + - ppm_f2 and ppm_f1: ppm scales of the direct and indirect dimension, respectively; + - datax: the 2D NMR spectrum; + - xlims, ylims: tuple of axis limits; + - lvl: height respect to maximum at which the contour are computed; + - name: filename of the figure, if it has to be saved; + - lw = linewith of the line + - X_label, Y_label: text of the X and Y axis; + - Negatives: set it to "True" if you want to see the negative part of the spectrum, in red. + """ + if np.iscomplexobj(data): + data = np.copy(data.real) + + if xlims is None: + xsx, xdx = max(ppm), min(ppm) + else: + xsx, xdx = max(xlims), min(xlims) + + if norm: + data = data/np.max(data) + if Y_label=='Intensity /a.u.': + Y_label='Normalized Intensity /a.u.' + + line, = ax.plot(ppm, data, lw=lw, c=c) + if isinstance(label, str): + line.set_label(label) + + misc.set_ylim(ax, data) + + if ylims is None: + ysx, ydx = ax.get_ylim() + else: + ysx, ydx = min(ylims), max(ylims) + + misc.pretty_scale(ax, (xsx, xdx), axis='x', n_major_ticks=n_xticks) + misc.pretty_scale(ax, (ysx, ydx), axis='y', n_major_ticks=n_yticks) + misc.mathformat(ax, axis='y') + + ax.set_xlabel(X_label) + misc.set_fontsizes(ax, fontsize) + + + return line + + +def figure1D_multi(ppm0, data0, xlims=None, ylims=None, norm=False, c=None, name=None, X_label='$\delta\ $ F1 /ppm', Y_label='Intensity /a.u.', n_xticks=10, n_yticks=10, hideylabels=False, labels=None): + """ + Creates the superimposed plot of a series of 1D NMR spectra. + ------- + Parameters: + - ppm0: list or 1darray + ppm scale of the spectra + - data0: list + List containing the spectra to be plotted + - xlims: tuple or None + Limits for the x-axis. If None, the whole scale is used. + - ylims: tuple or None + Limits for the y-axis. If None, they are automatically set. + - norm: False or float or str + If it is False, it does nothing. If it is float, divides all spectra for that number. If it is str('#'), normalizes all the spectra to the '#' spectrum. If it is whatever else string, normalizes all spectra to themselves. + - c: tuple or None + List of the colors to use for the traces. None uses the default ones. + - name: str or None + Filename of the figure, if it has to be saved. If it is None, the figure is shown instead. + - X_label: str + text of the x-axis label + - Y_label: str + text of the y-axis label + - n_xticks: int + Number of numbered ticks on the x-axis of the figure + - n_yticks: int + Number of numbered ticks on the x-axis of the figure + - hideylabels: bool + if True, does not show label and tick labels of the y axis. + - labels: list or None or False + List of the labels to be shown in the legend. If it is None, the default entries are used (i.e., '1, 2, 3,...'). If it is False, the legend is not shown. + """ + + # Check input data format and transform into a list if it is not already + if isinstance(data0, list): + nsp = len(data0) + elif isinstance(data0, np.ndarray): + if len(data0.shape) == 1: + nsp = 1 + elif len(data0.shape) == 2: + nsp = data0.shape[0] + else: + raise ValueError('Unknown input data. Aborting...') + + # Check ppm scale format and transform into a list if it is not already + if isinstance(ppm0, np.ndarray): + if len(ppm0.shape) == 1: + ppm = [ppm0 for k in range(nsp)] + elif len(ppm0.shape) ==2: + ppm = [ppm0[k] for k in ppm0.shape[0]] + else: + raise ValueError('Unknown input scale. Aborting...') + elif isinstance(ppm0, list): + if len(ppm0) == nsp: + ppm = [ppm0[k] for k in range(nsp)] + else: + raise ValueError('The provided ppm scales do not match the number of spectra') + else: + raise ValueError('Unknown input scale. Aborting...') + + # Build the labels if not given + if labels is None: + labels = ['{}'.format(w+1) for w in range(nsp)] + elif labels is False: + pass + elif len(labels) == nsp: + pass + else: + raise ValueError('The number of provided labels do not match the number of spectra') + + # Build the list of spectra + if nsp == 1: + print('You provided only one spectrum. You should call figure1D instead.') + return 0 + else: + data = [data0[k] for k in range(nsp)] # copy to prevent overwriting + + # Delete the imaginary part of the spectra, if there is + for k, spectrum in enumerate(data): + if np.iscomplexobj(spectrum): + data[k] = np.copy(spectrum.real) + + # Handle the 'norm' flag + if norm is not False: + if isinstance(norm, float) or isinstance(norm, int): # norm is a number + normval = [norm for k in range(nsp)] # normalize for that number + print('Spectra were normalized to {}.'.format(normval[0])) + elif isinstance(norm, str): # norm is a string + if xlims is None: + idx1 = [0 for k in range(nsp)] + idx2 = [len(ppm[k]) for k in range(nsp)] + else: + idx1 = [misc.ppmfind(ppm[k], max(xlims))[0] for k in range(nsp)] + idx2 = [misc.ppmfind(ppm[k], min(xlims))[0] for k in range(nsp)] + try: # Check if norm can be interpreted as a list index + idx = int(eval(norm)) - 1 + # If so, normalize all the spectra to the maximum of the norm-th spectrum (ordinary numbering) + normval = [np.max(data[idx][min(idx1[k], idx2[k]):max(idx1[k],idx2[k])]) for k in range(nsp)] + print('Spectra were normalized to the {}° spectrum'.format(norm)) + except: # If you write anything else + # normalize all spectra to themselves + normval = [np.max(data[k][min(idx1[k], idx2[k]):max(idx1[k],idx2[k])]) for k in range(nsp)] + print('Spectra were normalized to themselves.') + data = [data[k]/normval[k] for k in range(nsp)] + # Correct the Y-label if left to the default one + if Y_label == 'Intensity /a.u.': + Y_label = 'Normalized intensity /a.u.' + + # Set the colors + if isinstance(c, tuple) or isinstance(c, list): + if len(c) < nsp: + raise ValueError('The provided colors are not enough for the spectra.') + else: + c = COLORS + # If the default colors are not enough, cycle between them + while len(c) < nsp: + c = list(c) + c += list(COLORS) + c = tuple(c) + + # Make the figure + fig = plt.figure() + fig.set_size_inches(figsize_small) + ax = fig.add_subplot(1,1,1) + plt.subplots_adjust(left=0.20, bottom=0.15, right=0.95, top=0.90) + # Add the traces + for k, s in enumerate(data): + line = figures.ax1D(ax, ppm[k], data[k], norm=False, xlims=None, ylims=None, c=c[k], lw=0.5, X_label='', Y_label='', n_xticks=10, n_yticks=10) + if labels is not False: + line.set_label(labels[k]) + + # Adjust the limits + misc.set_ylim(ax, data) + + if xlims is None: + xsx, xdx = ax.get_xlim() + else: + xsx, xdx = max(xlims), min(xlims) + + if ylims is None: + ysx, ydx = ax.get_ylim() + else: + ysx, ydx = min(ylims), max(ylims) + + # Make pretty scales + misc.pretty_scale(ax, (xsx, xdx), axis='x', n_major_ticks=n_xticks) + misc.pretty_scale(ax, (ysx, ydx), axis='y', n_major_ticks=n_yticks) + + # Set the labels for the axes + if hideylabels: + ax.tick_params(axis='y', which='both', left=False, labelleft=False) + + ax.set_xlabel(X_label) + if not hideylabels: + ax.set_ylabel(Y_label) + misc.mathformat(ax, axis='y') + + # Legend + if labels is not False: + ax.legend() + + # Save / Show the figure + if name: + misc.set_fontsizes(ax, 10) + print( 'Saving '+name+'.png...') + plt.savefig(name+'.png', format='png', dpi=600) + else: + fig.set_size_inches(figsize_large) + plt.subplots_adjust(left=0.10) + misc.set_fontsizes(ax, 14) + plt.show() + plt.close() + print( 'Done.') + + +def fitfigure(S, ppm_scale, t_AQ, V, C=False, SFO1=701.125, o1p=0, limits=None, s_labels=None, X_label='$\delta\,$ F1 /ppm', n_xticks=10, name=None): + """ + Makes the figure to show the result of a quantitative fit. + -------- + Parameters: + - S : 1darray + Spectrum to be fitted + - ppm_scale : 1darray + Self-explanatory + - V : 2darray + matrix (# signals, parameters) + - C : 1darray or False + Coefficients of the polynomion to be used as baseline correction. If the 'baseline' checkbox in the interactive figure panel is not checked, C_f is False. + - limits : tuple or None + Trim limits for the spectrum (left, right). If None, the whole spectrum is used. + - s_labels : list or None or False + Legend entries for the single components. If None, they are computed automatically as 1, 2, 3, etc. If False, they are not shown in the legend. + - X_label : str + label for the x-axis. + - n_xticks : int + number of numbered ticks that will appear in the ppm scale. An oculated choice can be very satisfying. + - name : str or None + Name with which to save the figure. If None, the picture is shown instead of being saved. + """ + N = S.shape[-1] + + # Set the limits + if limits is None: + limits = (max(ppm_scale), min(ppm_scale)) + + # Get limit indexes + lim1 = misc.ppmfind(ppm_scale, limits[0])[0] + lim2 = misc.ppmfind(ppm_scale, limits[1])[0] + lim1, lim2 = min(lim1, lim2), max(lim1, lim2) + + # Compute legend labels, if not already present + if s_labels is None: + s_labels = [str(w+1) for w in np.arange(V.shape[0])] + + x = np.linspace(0, 1, ppm_scale[lim1:lim2].shape[-1])[::-1] + # Make the polynomion only if C contains its coefficients + if C is False: + y = np.zeros_like(x) + else: + y = misc.polyn(x, C) + + # Make the signals + sgn = [] + Total = np.zeros_like(x) + for i in range(V.shape[0]): + sgn.append(fit.make_signal(t_AQ, V[i,0], V[i,1], V[i,2], V[i,3], V[i,4], V[i,5], SFO1=SFO1, o1p=o1p, N=N)) + Total += sgn[i][lim1:lim2].real + + # Initial figure + fig = plt.figure(1) + fig.set_size_inches(figsize_small) + plt.subplots_adjust(bottom=0.15, top=0.90, left=0.15, right=0.95) + ax = fig.add_subplot(1,1,1) + + # Experimental and total + ax.plot(ppm_scale[lim1:lim2], S[lim1:lim2], label='Experimental', lw=0.8, c='k') + ax.plot(ppm_scale[lim1:lim2], y+Total, label = 'Fit', c='tab:blue', lw=0.7) + + # Single components + for i in range(V.shape[0]): + s_plot, = ax.plot(ppm_scale[lim1:lim2], sgn[i][lim1:lim2].real, c=s_colors[i], lw=0.4, ls='--') + if bool(s_labels[i]): + s_plot.set_label(s_labels[i]) + + # Baseline + if C is not False: + ax.plot(ppm_scale[lim1:lim2], y, label = 'Baseline', lw=0.4, c='tab:orange', ls='-.') + + # Customize picture appearance + misc.pretty_scale(ax, limits, axis='x', n_major_ticks=n_xticks) + + ax.set_xlabel(X_label) + ax.set_ylabel('Intensity /a.u.') + + misc.mathformat(ax, axis='y') + + ax.legend() + # Save/show the figure + if name: + misc.set_fontsizes(ax, 10) + plt.savefig(name+'.png', dpi=600) + else: + fig.set_size_inches(figsize_large) + misc.set_fontsizes(ax, 14) + plt.show() + plt.close() + + + +def stacked_plot(ppmscale, S, xlims=None, lw=0.5, name=None, X_label='$\delta\ $ F1 /ppm', Y_label='Normalized intensity /a.u.', n_xticks=10, labels=None): + """ + Creates a stacked plot of all the spectra contained in the list S. Note that S MUST BE a list. All the spectra must share the same scale. + """ + nsp = len(S) # number of spectra in the lsit + if not labels: # auto-builds the labels for the spectra if not specified + labels=[] + for k in range(nsp): + labels.append(str(k+1)) + + # Normalizes all the spectra to the biggest value of the series + norm_factor = np.max(np.abs(np.array(S))) + for k in range(nsp): + S[k] /= norm_factor + + # Define limits for the x-scale + if xlims is None: + xsx=max(ppmscale) + xdx=min(ppmscale) + else: + xsx, xdx = max(xlims), min(xlims) + + # Define the figure + fig = plt.figure() + fig.set_size_inches(figsize_small) + plt.subplots_adjust(left=0.15, bottom=0.2, right=0.95, top=0.95) + ax = fig.add_subplot(1,1,1) + # plot the spectra + for k in range(nsp): + ax.plot(ppmscale, S[k]+k, lw=lw, label=labels[k]) + + misc.pretty_scale(ax, (xsx, xdx), axis='x', n_major_ticks=n_xticks) + misc.mathformat(ax) + + # Decorate the axes + ax.set_xlabel(X_label) + ax.set_ylabel(Y_label) + + ax.set_ylim(-0.25, nsp+0.25) + ax.legend() + + # Shows or saves the figure + if name: + misc.set_fontsizes(ax, 10) + print( 'Saving '+name+'.png...') + plt.savefig(name+'.png', format='png', dpi=600) + else: + fig.set_size_inches(figsize_large) + plt.subplots_adjust(left=0.10, bottom=0.1, right=0.95, top=0.95) + misc.set_fontsizes(ax, 14) + cursor = Cursor(ax, useblit=True, horizOn=False, c='tab:red', lw=0.8) + plt.show() + plt.close() + print( 'Done.') + + +def dotmd(ppmscale, S, labels=None, lw=0.8, n_xticks=10): + """ + Interactive display of multiple 1D spectra. They have to share the same scale. + ------- + Parameters + - ppmscale: 1darray + ppm scale of the spectra + - S: list or 1darray or 2darray + spectra to be plotted. If it is a 2darray, the spectra to be plotted are the rows of S + - labels: list + labels to be put in the legend. + - n_xticks: int + Number of numbered ticks on the x-axis of the figure + """ + + if isinstance(S, list): + S = [S[w].real for w in range(len(S))] + elif isinstance(S, np.ndarray): + if len(S.shape) == 1: + S = [S.real] + elif len(S.shape) == 2: + S = [S[w].real for w in range(S.shape[0])] + if len(S.shape) == 3: + raise ValueError('Maybe you should use figures.dotmd_2D') + else: + raise ValueError('{}D arrays are not allowed.'.format(len(S.shape))) + + nsp = len(S) + + if isinstance(ppmscale, np.ndarray): + if len(ppmscale.shape) == 2: + if ppmscale.shape[0] != nsp: + raise ValueError('Number of scales do not match the number of spectra') + elif len(ppmscale.shape) == 1: + ppmscale = [ppmscale for w in range(nsp)] + else: + raise ValueError('There is a problem in the shape of the scale.') + + # flags for the activation of scroll zoom + flags = np.ones(nsp) + lvlstep = 0.1 + + # Labels of the spectra that appear in the legend + if not labels: + labels = [] + for k in range(nsp): + labels.append(str(k+1)) + elif len(labels) != nsp: + raise ValueError('Shape mismatch: you provided {} labels for {} spectra.'.format(len(labels), nsp)) + + # define boxes for sliders + u_box = plt.axes([0.025, 0.85, 0.080, 0.05]) + d_box = plt.axes([0.025, 0.25, 0.080, 0.05]) + adj_box = plt.axes([0.025, 0.55, 0.080, 0.05]) + iz_box = plt.axes([0.025, 0.10, 0.05, 0.05]) + dz_box = plt.axes([0.025, 0.05, 0.05, 0.05]) + check_box = plt.axes([0.87, 0.20, 0.12, 0.04*nsp]) + + # Functions connected to the sliders + + def increase_zoom(event): + nonlocal lvlstep + lvlstep *= 2 + + def decrease_zoom(event): + nonlocal lvlstep + lvlstep /= 2 + + def y_autoscale(val): + misc.set_ylim(ax, np.concatenate([s * scale_factor[k] for k, s in enumerate(S)])) + D, U = ax.get_ylim() + u_tb.set_val('{:.3e}'.format(U)) + d_tb.set_val('{:.3e}'.format(D)) + misc.pretty_scale(ax, ax.get_xlim(), axis='x', n_major_ticks=10) + fig.canvas.draw() + + def update_ylim(val): + U = eval(u_tb.text) + D = eval(d_tb.text) + misc.pretty_scale(ax, (D,U), axis='y', n_major_ticks=n_xticks) + misc.pretty_scale(ax, ax.get_xlim(), axis='x', n_major_ticks=10) + fig.canvas.draw() + + def on_scroll(event): + nonlocal scale_factor + for k in range(nsp): + if flags[k]: + if event.button == 'up': + scale_factor[k] += lvlstep + if event.button == 'down': + scale_factor[k] += -lvlstep + if scale_factor[k] < 0: + scale_factor[k] = 0 + for k in range(nsp): + spectrum[k].set_ydata(S[k].real * scale_factor[k]) + scale_text[k].set_text(f'{scale_factor[k]:.2f}') + misc.pretty_scale(ax, ax.get_xlim(), axis='x', n_major_ticks=10) + fig.canvas.draw() + + def radioflag(label): + nonlocal flags + status = radio.get_status() + for k, stat in enumerate(status): + flags[k] = stat + + # Make the figure + fig = plt.figure(1) + fig.set_size_inches(figsize_large) + plt.subplots_adjust(left = 0.15, bottom=0.10, right=0.85, top=0.95) # Make room for the sliders + ax = fig.add_subplot(1,1,1) + + # Auto-adjusts the limits for the y-axis + misc.set_ylim(ax, np.concatenate(S)) + # Make pretty scales + misc.pretty_scale(ax, (np.max(np.concatenate(ppmscale)), np.min(np.concatenate(ppmscale))), axis='x', n_major_ticks=n_xticks) + misc.pretty_scale(ax, ax.get_ylim(), axis='y', n_major_ticks=10) + + # Pretty y-axis numbers + misc.mathformat(ax) + # Adjust fonts + misc.set_fontsizes(ax, 14) + + scale_factor = np.ones(nsp) + spectrum = [] + # Plot the data + for k in range(nsp): + spect, = ax.plot(ppmscale[k], S[k].real*scale_factor[k], c=COLORS[k], lw=lw) + spectrum.append(spect) + for k, spectr in enumerate(spectrum): + spectr.set_label(labels[k]) + ax.legend(loc='upper right') + + # TextBoxes to set the ylims + y_l = ax.get_ylim() + u_tb = TextBox(ax=u_box, label='', initial='{:.3e}'.format(y_l[1]), textalignment='center') + d_tb = TextBox(ax=d_box, label='', initial='{:.3e}'.format(y_l[0]), textalignment='center') + + # Create labels for the checkbox + checklabels = [] + for k in range(nsp): + checklabels.append(spectrum[k].get_label()[:12]) + radio = CheckButtons(check_box, checklabels, list(np.ones(nsp))) + HBOX = check_box.dataLim.bounds[-1] + misc.edit_checkboxes(radio, xadj=0, yadj=0.005, length=0.1, height=(HBOX-0.2*HBOX)/nsp, + color=[spec.get_color() for spec in spectrum]) + + lbl_y = [ Q.get_position()[1] for Q in radio.labels] + scale_text = [] + for Y, value in zip(lbl_y, scale_factor): + scale_text.append(ax.text(0.995, Y, f'{value:.3f}', + ha='right', va='center', transform=check_box.transAxes, fontsize=10)) + + # Create buttons + iz_button = Button(iz_box, label='$\\uparrow$') #!!! + dz_button = Button(dz_box, label='$\downarrow$') #!!! + adj_button = Button(adj_box, label='Adjust') #!!! + + # Connect the widgets to functions + radio.on_clicked(radioflag) + scroll = fig.canvas.mpl_connect('scroll_event', on_scroll) + + u_tb.on_submit(update_ylim) + d_tb.on_submit(update_ylim) + adj_button.on_clicked(y_autoscale) + iz_button.on_clicked(increase_zoom) + dz_button.on_clicked(decrease_zoom) + cursor = Cursor(ax, useblit=True, color='red', horizOn=False, linewidth=0.4) + + plt.show() + plt.close() + + + +def dotmd_2D(ppm_f1, ppm_f2, S0, labels=None, name='dotmd_2D', X_label='$\delta\ $ F2 /ppm', Y_label='$\delta\ $ F1 /ppm', n_xticks=10, n_yticks=10, Neg=False): + """ + Interactive display of multiple 2D spectra. They have to share the same scales. + ------- + Parameters: + - ppm_f1: 1darray + ppm scale of the indirect dimension. If only one scale is supplied, all the spectra are plotted using the same scale. Otherwise, each spectrum is plotted using its scale. There is a 1:1 correspondance between ppm_f1 and S. + - ppm_f2: 1darray + ppm scale of the direct dimension. If only one scale is supplied, all the spectra are plotted using the same scale. Otherwise, each spectrum is plotted using its scale. There is a 1:1 correspondance between ppm_f2 and S. + - S: list + spectra to be plotted + - labels: list + labels to be put in the legend. + - name: str + If you choose to save the figure, this is its filename. + - X_label: str + text of the x-axis label; + - Y_label: str + text of the y-axis label; + - n_xticks: int + Number of numbered ticks on the x-axis of the figure + - n_yticks: int + Number of numbered ticks on the x-axis of the figure + - Neg: bool + If True, show the negative contours. + """ + + cmaps = [CM[key] for key in CM.keys() if '_r' in key] # Use only _r cmaps otherwise you don't see a thing + + # Checks on dimensions of S0 + if isinstance(S0, list): + S = [S0[w].real for w in range(len(S0))] + elif isinstance(S0, np.ndarray): + if len(S0.shape) == 1: + raise ValueError('Maybe you should use figures.dotmd') + elif len(S0.shape) == 2: + S = [S0.real] + elif len(S.shape) == 3: + S = [S0[w].real for w in range(S0.shape[0])] + else: + raise ValueError('{}D arrays are not allowed.'.format(len(S0.shape))) + + nsp = len(S) # Number of SPectra + + # Checks on scales dimensions + if isinstance(ppm_f1, np.ndarray): + if len(ppm_f1.shape) == 2: + if ppm_f1.shape[0] != nsp: + raise ValueError('Number of scales do not match the number of spectra') + elif len(ppm_f1.shape) == 1: + ppm_f1 = [ppm_f1 for w in range(nsp)] + else: + raise ValueError('There is a problem in the shape of the scale.') + if isinstance(ppm_f2, np.ndarray): + if len(ppm_f2.shape) == 2: + if ppm_f2.shape[0] != nsp: + raise ValueError('Number of scales do not match the number of spectra') + elif len(ppm_f2.shape) == 1: + ppm_f2 = [ppm_f2 for w in range(nsp)] + else: + raise ValueError('There is a problem in the shape of the scale.') + # ---------------------------------------------------------------------------------- + + # flags for the activation of scroll zoom + flags = np.ones(nsp) + scale_factor = np.ones(nsp) + # Start level contour + lvl = [0.1 for k in range(nsp)] + # Initialize lvlstep + lvlstep = 0.02 + + # Labels of the spectra that appear in the legend + if not labels: + labels = [] + for k in range(nsp): + labels.append(str(k+1)) + elif len(labels) != nsp: + raise ValueError('Shape mismatch: you provided {} labels for {} spectra.'.format(len(labels), nsp)) + + # define boxes for sliders + iz_box = plt.axes([0.025, 0.10, 0.05, 0.05]) + dz_box = plt.axes([0.025, 0.05, 0.05, 0.05]) + check_box = plt.axes([0.87, 0.20, 0.12, 0.04*nsp]) + save_box = plt.axes([0.15, 0.90, 0.10, 0.05]) + + # ---------------------------------------------------------------------------------- + # Functions connected to the sliders + def increase_zoom(event): + """ double it """ + nonlocal lvlstep + lvlstep *= 2 + + def decrease_zoom(event): + """ halve it """ + nonlocal lvlstep + lvlstep /= 2 + + def on_scroll(event): + """ What happens when you scroll """ + nonlocal lvl, cnt + if Neg: + nonlocal Ncnt + # Get limits of the figure, to reset them later + xsx, xdx = ax.get_xlim() + ysx, ydx = ax.get_ylim() + # Move only the active spectra + for k in range(nsp): + if flags[k]: + if event.button == 'up': + lvl[k] += lvlstep + if event.button == 'down': + lvl[k] += -lvlstep + if lvl[k] < 1e-5: + lvl[k] = 1e-5 + if lvl[k] > 1: + lvl[k] = 1 + # Clear ax because cnt cannot be overwritten as list + ax.cla() + # Redraw the contours + cnt = [figures.ax2D(ax, ppm_f2[k], ppm_f1[k], S[k], + xlims=(max(ppm_f2[k]), min(ppm_f2[k])), ylims=(max(ppm_f1[k]), min(ppm_f1[k])), + cmap=cmaps[k], c_fac=1.4, lvl=lvl[k], lw=0.5, X_label=X_label, Y_label=Y_label) + for k in range(nsp)] + if Neg: + Ncnt = [figures.ax2D(ax, ppm_f2[k], ppm_f1[k], -S[k], + xlims=(max(ppm_f2[k]), min(ppm_f2[k])), ylims=(max(ppm_f1[k]), min(ppm_f1[k])), + cmap=cmaps[k], c_fac=1.4, lvl=lvl[k], lw=0.5, X_label=X_label, Y_label=Y_label) + for k in range(nsp)] + else: + Ncnt = None + # Redraw the legend because of ax.cla() + ax.legend(legend, Labels, loc='upper right', fontsize=14) + + # Set the limits as they were before + misc.pretty_scale(ax, (xsx, xdx), 'x') + misc.pretty_scale(ax, (ysx, ydx), 'y') + + # Update the zoom values in the legend + [scale_text[k].set_text(f'{value:.3f}') for k, value in enumerate(lvl)] + # Bigger fonts + misc.set_fontsizes(ax, 14) + fig.canvas.draw() + + def radioflag(label): + """ Change the flags array according to the checkbox """ + nonlocal flags + status = radio.get_status() + for k, stat in enumerate(status): + flags[k] = stat + + def makefigure(event): + """ Make a figure """ + if nsp == 1: + figures.figure2D(ppm_f2[0], ppm_f1[0], S[0], xlims=(l_slider.val, r_slider.val), ylims=(u_slider.val, d_slider.val), lvl=lvl[0], name=name, X_label=X_label, Y_label=Y_label, n_xticks=10, n_yticks=10) + else: + figures.figure2D_multi(ppm_f2, ppm_f1, S, xlims=(l_slider.val, r_slider.val), ylims=(u_slider.val, d_slider.val), lvl=lvl, name=name, X_label=X_label, Y_label=Y_label, n_xticks=10, n_yticks=10, labels=labels) + + # ---------------------------------------------------------------------------------- + + # Make the figure + fig = plt.figure(1) + fig.set_size_inches(figsize_large) + plt.subplots_adjust(left = 0.15, bottom=0.10, right=0.85, top=0.95) # Make room for the sliders + ax = fig.add_subplot(1,1,1) + + # Draw the contours + cnt = [figures.ax2D(ax, ppm_f2[k], ppm_f1[k], S[k], xlims=(max(ppm_f2[k]), min(ppm_f2[k])), ylims=(max(ppm_f1[k]), min(ppm_f1[k])), cmap=cmaps[k], c_fac=1.4, lvl=lvl[k], lw=0.5, X_label=X_label, Y_label=Y_label) + for k in range(nsp)] + if Neg: + Ncnt = [figures.ax2D(ax, ppm_f2[k], ppm_f1[k], -S[k], xlims=(max(ppm_f2[k]), min(ppm_f2[k])), ylims=(max(ppm_f1[k]), min(ppm_f1[k])), cmap=cmaps[k], c_fac=1.4, lvl=lvl[k], lw=0.5, X_label=X_label, Y_label=Y_label) + for k in range(nsp)] + else: + Ncnt = None + + # Set the legend + legend = [] + for i in range(len(labels)): + h,_ = cnt[i].legend_elements() + legend.append(h[0]) + if Neg: + for i in range(len(labels)): + Nh,_ = Ncnt[i].legend_elements() + legend.append(Nh[0]) + # Draw the legend + if Neg: + Nlabels = ['$-$ '+labels[w] for w in range(len(labels))] + Labels = [*labels, *Nlabels] + else: + Labels = labels + ax.legend(legend, Labels, loc='upper right', fontsize=14) + + # Make pretty x-scale + xsx, xdx = max(np.concatenate(ppm_f2)), min(np.concatenate(ppm_f2)) + ysx, ydx = max(np.concatenate(ppm_f1)), min(np.concatenate(ppm_f1)) + misc.pretty_scale(ax, (xsx, xdx), axis='x') + misc.pretty_scale(ax, (ysx, ydx), axis='y') + + + # Create labels for the checkbox + checklabels = [] + for k in range(nsp): + checklabels.append(labels[k][:12]) + radio = CheckButtons(check_box, checklabels, list(np.ones(nsp))) + HBOX = check_box.dataLim.bounds[-1] + misc.edit_checkboxes(radio, xadj=0, yadj=0.005, length=0.1, height=(HBOX-0.2*HBOX)/nsp) + + lbl_y = [ Q.get_position()[1] for Q in radio.labels] + scale_text = [] + for Y, value in zip(lbl_y, scale_factor): + scale_text.append(check_box.text(0.995, Y, f'{value:.3f}', + ha='right', va='center', transform=check_box.transAxes, fontsize=10)) + + # Create buttons + iz_button = Button(iz_box, label='$\\uparrow$') + dz_button = Button(dz_box, label='$\downarrow$') + save_button = Button(ax=save_box, label='Make\nfigure') + + # Connect the widgets to functions + radio.on_clicked(radioflag) + scroll = fig.canvas.mpl_connect('scroll_event', on_scroll) + + iz_button.on_clicked(increase_zoom) + dz_button.on_clicked(decrease_zoom) + save_button.on_clicked(makefigure) + + cursor = Cursor(ax, useblit=True, color='red', linewidth=0.4) + + misc.set_fontsizes(ax, 14) + + plt.show() + plt.close() + + +def redraw_contours(ax, ppm_f2, ppm_f1, S, lvl, cnt, Neg=False, Ncnt=None, lw=0.5, cmap=[None, None], verb=False): + """ + Redraws the contours in interactive 2D visualizations. + -------- + Parameters: + - ax: matplotlib.Subplot Object + Panel of the figure where to draw the contours + - ppm_f2: 1darray + ppm scale of the direct dimension + - ppm_f1: 1darray + ppm scale of the indirect dimension + - S: 2darray + Spectrum + - lvl: float + Level at which to draw the contours + - cnt: matplotlib.contour.QuadContourSet object + Pre-existing contours + - Neg: bool + Choose if to draw the negative contours (True) or not (False) + - Ncnt: matplotlib.contour.QuadContourSet object + Pre-existing negative contours + - lw: float + Linewidth + - cmap: list + Colour of the contours. [cmap +, cmap -] + ------- + Returns: + - cnt: matplotlib.contour.QuadContourSet object + Updated contours + - Ncnt: matplotlib.contour.QuadContourSet object or None + Updated negative contours if Neg is True, None otherwise + """ + + # Suppress the 'I cannot find the contours' warning + warnings.filterwarnings("ignore", message="No contour levels were found within the data range.") + + for c in cnt.collections: + # try to remove the positive contours + try: + c.remove() + except Exception as e: + if verb: + print(e) + pass + if Neg: + # try to remove the negative contours + for Nc in Ncnt.collections: + try: + Nc.remove() + except Exception as e: + if verb: + print(e) + pass + # Draw new positive contours + cnt = figures.ax2D(ax, ppm_f2, ppm_f1, S, lvl=lvl, cmap=cmap[0]) + if Neg: + # Draw new negative contours + Ncnt = figures.ax2D(ax, ppm_f2, ppm_f1, -S, lvl=lvl, cmap=cmap[1]) + else: + Ncnt = None + + # Return things + return cnt, Ncnt + diff --git a/klassez/fit.py b/klassez/fit.py new file mode 100644 index 0000000..d6b4a2c --- /dev/null +++ b/klassez/fit.py @@ -0,0 +1,2267 @@ +#! /usr/bin/env python3 + +import os +import sys +import numpy as np +from scipy import linalg, stats +from scipy.spatial import ConvexHull +from scipy import interpolate +from csaps import csaps +import random +import matplotlib +import matplotlib.pyplot as plt +import matplotlib.cm as cm +from matplotlib.widgets import Slider, Button, RadioButtons, TextBox, CheckButtons, Cursor, LassoSelector, SpanSelector +from matplotlib.path import Path +import seaborn as sns +import nmrglue as ng +import lmfit as l +from datetime import datetime +import warnings + +from . import fit, misc, sim, figures, processing +#from .__init__ import CM +from .config import CM + + +""" +Functions for performing fits. +""" + +s_colors=[ 'tab:cyan', 'tab:red', 'tab:green', 'tab:purple', 'tab:pink', 'tab:gray', 'tab:brown', 'tab:olive', 'salmon', 'indigo' ] + +def f_t1(t, A, B, T1): + # Function that models the buildup of magnetization due to T1 relaxation + f = A * (1 - np.exp(-t/T1) ) + B + return f + +def f_t2(t, A, B, T2): + # Function that models the decay of magnetization due to T2 relaxation + f = A * np.exp(-t/T2) + B + return f + +def histogram(data, nbins=100, density=True, f_lims= None, xlabel=None, x_symm=False, name=None): + """ + Computes an histogram of 'data' and tries to fit it with a gaussian lineshape. + The parameters of the gaussian function are calculated analytically directly from 'data' + -------- + Parameters: + - data : ndarray + the data to be binned + - nbins : int + number of bins to be calculated + - density : bool + True for normalize data + - f_lims : tuple or None + limits for the x axis of the figure + - xlabel : str or None + Text to be displayed under the x axis + - x_symm : bool + set it to True to make symmetric x-axis with respect to 0 + - name : str + name for the figure to be saved + ------- + Returns: + - m : float + Mean of data + - s : float + Standard deviation of data. + """ + + if len(data.shape) > 1: + data = data.flatten() + + if x_symm: + lims = (- max(np.abs(data)), max(np.abs(data)) ) + else: + lims = (min(data), max(data)) + + hist, bin_edges = np.histogram(data, bins=nbins, range=lims, density=density) # Computes the bins for the histogram + + lnspc = np.linspace(lims[0], lims[1], len(data)) # Scale for a smooth gaussian + m, s = stats.norm.fit(data) # Get mean and standard deviation of 'data' + + if density: + A = 1 + else: + A = np.trapz(hist, dx=bin_edges[1]-bin_edges[0]) # Integral + fit_g = A / (np.sqrt(2 * np.pi) * s) * np.exp(-0.5 * ((lnspc - m) / s)**2) # Gaussian lineshape + + fig = plt.figure() + fig.set_size_inches(3.96, 2.78) + ax = fig.add_subplot(1,1,1) + ax.hist(data, color='tab:blue', density=density, bins=bin_edges) + ax.plot(lnspc, fit_g, c='r', lw=0.6, label = '$\mu = ${:.3g}'.format(m)+'\n$\sigma = ${:.3g}'.format(s)) + ax.tick_params(labelsize=7) + ax.ticklabel_format(axis='both', style='scientific', scilimits=(-3,3), useMathText=True) + ax.yaxis.get_offset_text().set_size(7) + ax.xaxis.get_offset_text().set_size(7) + if density: + ax.set_ylabel('Normalized count', fontsize=8) + else: + ax.set_ylabel('Count', fontsize=8) + if xlabel: + ax.set_xlabel(xlabel, fontsize=8) + if f_lims: + ax.set_xlim(f_lims) + ax.legend(loc='upper right', fontsize=6) + fig.tight_layout() + if name: + plt.savefig(name+'.png', format='png', dpi=600) + else: + plt.show() + plt.close() + + return m, s + +def ax_histogram(ax, data0, nbins=100, density=True, f_lims= None, xlabel=None, x_symm=False, barcolor='tab:blue'): + """ + Computes an histogram of 'data' and tries to fit it with a gaussian lineshape. + The parameters of the gaussian function are calculated analytically directly from 'data' + -------- + Parameters: + - ax : matplotlib.subplot Object + panel of the figure where to put the histogram + - data : ndarray + the data to be binned + - nbins : int + number of bins to be calculated + - density : bool + True for normalize data + - f_lims : tuple or None + limits for the x axis of the figure + - xlabel : str or None + Text to be displayed under the x axis + - x_symm : bool + set it to True to make symmetric x-axis with respect to 0 + - name : str + name for the figure to be saved + ------- + Returns: + - m : float + Mean of data + - s : float + Standard deviation of data. + """ + + if len(data0.shape) > 1: + data = data0.flatten() + else: + data = np.copy(data0) + + + if x_symm: + lims = (- max(np.abs(data)), max(np.abs(data)) ) + else: + lims = (min(data), max(data)) + + hist, bin_edges = np.histogram(data, bins=nbins, range=lims, density=density) # Computes the bins for the histogram + + lnspc = np.linspace(lims[0], lims[1], len(data)) # Scale for a smooth gaussian + m, s = stats.norm.fit(data) # Get mean and standard deviation of 'data' + + if density: + A = 1 + else: + A = np.trapz(hist, dx=bin_edges[1]-bin_edges[0]) # Integral + fit_g = A / (np.sqrt(2 * np.pi) * s) * np.exp(-0.5 * ((lnspc - m) / s)**2) # Gaussian lineshape + + ax.hist(data, color=barcolor, density=density, bins=bin_edges) + ax.plot(lnspc, fit_g, c='r', lw=0.6, label = 'Theoretical values:\n$\mu = ${:.3g}'.format(m)+'\n$\sigma = ${:.3g}'.format(s)) + + + if density: + ax.set_ylabel('Normalized count') + else: + ax.set_ylabel('Count') + if xlabel: + ax.set_xlabel(xlabel) + + if f_lims: + misc.pretty_scale(ax, f_lims, 'x') + else: + misc.pretty_scale(ax, ax.get_xlim(), 'x') + misc.pretty_scale(ax, ax.get_ylim(), 'y') + + misc.mathformat(ax, limits=(-3,3)) + + ax.legend(loc='upper right') + + misc.set_fontsizes(ax, 10) + + return m, s + +def bin_data(data0, nbins=100, density=True, x_symm=False): + """ + Computes the histogram of data, sampling it into nbins bins. + -------- + Parameters: + - data : ndarray + the data to be binned + - nbins : int + number of bins to be calculated + - density : bool + True for normalize data + - x_symm : bool + set it to True to make symmetric x-axis with respect to 0 + ------- + Returns: + - hist: 1darray + The bin intensity + - bin_scale: 1darray + Scale built with the mean value of the bin widths. + """ + if len(data0.shape) > 1: + data = data0.flatten() + else: + data = np.copy(data0) + + if x_symm: + lims = (- max(np.abs(data)), max(np.abs(data)) ) + else: + lims = (min(data), max(data)) + + hist, bin_edges = np.histogram(data, bins=nbins, range=lims, density=density) # Computes the bins for the histogram + bin_scale = np.array( [np.mean((bin_edges[k], bin_edges[k+1])) for k in range(len(bin_edges) - 1) ]) + return hist, bin_scale + +def fit_int(y, y_c): + """ + Calculate the intensity according to the least square fit as: + I = sum( obs * calc ) / sum( calc^2 ) + -------- + Parameters: + - y: ndarray + Observed data. + - y_c: ndarray + Calculated data + -------- + Returns: + - I: float + Calculated intensity + """ + I = np.sum(y * y_c, axis=-1) / np.sum(y_c**2, axis=-1) + return I + +def get_region(ppmscale, S, rev=True): + """ + Interactively select the spectral region to be fitted. + Returns the border ppm values. + ------- + Parameters: + - ppmscale: 1darray + The ppm scale of the spectrum + - S: 1darray + The spectrum to be trimmed + - rev: bool + Choose if to reverse the ppm scale and data (True) or not (False). + ------- + Returns: + - left: float + Left border of the selected spectral window + - right: float + Right border of the selected spectral window + """ + + # Set the slider initial values + if rev: + left = max(ppmscale) + right = min(ppmscale) + else: + right = max(ppmscale) + left = min(ppmscale) + res = misc.calcres(ppmscale) + + # Make the boxes + # for sliders + box_left = plt.axes([0.1, 0.15, 0.80, 0.02]) + box_t_left = plt.axes([0.1, 0.05, 0.05, 0.03]) + box_right = plt.axes([0.1, 0.10, 0.80, 0.02]) + box_t_right = plt.axes([0.85, 0.05, 0.05, 0.03]) + # for buttons + box_button = plt.axes([0.45, 0.925, 0.1, 0.04]) + # Make the sliders + left_slider = Slider(ax=box_left, label='Left', valmin=min(ppmscale), valmax=max(ppmscale), valinit=left, valstep=res, color='tab:red') + right_slider = Slider(ax=box_right, label='Right', valmin=min(ppmscale), valmax=max(ppmscale), valinit=right, valstep=res, color='tab:green') + # Make the buttons + button = Button(box_button, 'SAVE', hovercolor='0.975') + l_tbox = TextBox(box_t_left, '', textalignment='center') + r_tbox = TextBox(box_t_right, '', textalignment='center') + + + # Definition of the 'update' functions + # + def update_region(val): + # updates the value for the range selectors + left = left_slider.val + right = right_slider.val + LB, RB = misc.ppmfind(ppmscale, left)[0], misc.ppmfind(ppmscale, right)[0] + data_inside = S[min(LB,RB):max(LB,RB)] + + L.set_xdata(left) + R.set_xdata(right) + if rev: + ax.set_xlim(left+25*res, right-25*res) + else: + ax.set_xlim(left-25*res, right+25*res) + T = max(data_inside.real) + B = min(data_inside.real) + ax.set_ylim(B - 0.05*T, T + 0.05*T) + + + + def on_submit_l(v): + V = eval(v) + left_slider.set_val(V) + update_region(0) + def on_submit_r(v): + V = eval(v) + right_slider.set_val(V) + update_region(0) + + def save(event): + # Saves the values to be returned + nonlocal left, right + left = left_slider.val + right = right_slider.val + + # Creation of interactive figure panel + fig = plt.figure(1) + fig.set_size_inches(15,8) + plt.subplots_adjust(left = 0.10, bottom=0.25, right=0.90, top=0.90) # Make room for the sliders + ax = fig.add_subplot(1,1,1) + + misc.pretty_scale(ax, (left, right)) + if rev: + ax.set_xlim(left+25*res, right-25*res) + else: + ax.set_xlim(left-25*res, right+25*res) + + ax.plot(ppmscale, S.real, c='b', lw=0.8) # Plot the data + misc.mathformat(ax, 'y') + ax.set_xlabel('$\delta\,$ /ppm') + ax.set_ylabel('Intensity /a.u.') + L = ax.axvline(x=left, lw=0.5, c='r') # Left selector + R = ax.axvline(x=right, lw=0.5, c='g') # Right selector + + # Call the 'update' functions upon interaction with the widgets + left_slider.on_changed(update_region) + right_slider.on_changed(update_region) + button.on_clicked(save) + l_tbox.on_submit(on_submit_l) + r_tbox.on_submit(on_submit_r) + + misc.set_fontsizes(ax, 14) + + plt.show() + plt.close(1) + + return left, right + + + +def make_signal(t, u, s, k, x_g, phi, A, SFO1=701.125, o1p=0, N=None): + """ + Generates a voigt signal on the basis of the passed parameters in the time domain. Then, makes the Fourier transform and returns it. + ------- + Parameters: + - t : ndarray + acquisition timescale + - u : float + chemical shift /ppm + - s : float + full-width at half-maximum /Hz + - k : float + relative intensity + - x_g : float + fraction of gaussianity + - phi : float + phase of the signal, in degrees + - A : float + total intensity + - SFO1 : float + Larmor frequency /MHz + - o1p : float + pulse carrier frequency /ppm + - N : int or None + length of the final signal. If None, signal is not zero-filled before to be transformed. + ------- + Returns: + - sgn : 1darray + generated signal in the frequency domain + """ + U = misc.ppm2freq(u, SFO1, o1p) # conversion to frequency units + S = s * 2 * np.pi # conversion to radians + phi = phi * np.pi / 180 # conversion to radians + sgn = sim.t_voigt(t, U, S, A=A*k, phi=phi, x_g=x_g) # make the signal + if isinstance(N, int): + sgn = processing.zf(sgn, N) # zero-fill it + sgn = processing.ft(sgn) # transform it + return sgn + + +def voigt_fit(S, ppm_scale, V, C, t_AQ, limits=None, SFO1=701.125, o1p=0, utol=0.5, vary_phi=False, vary_xg=True, vary_basl=True, hist_name=None, write_out='fit.out', test_res=True): + """ + Fits an NMR spectrum with a set of signals, whose parameters are specifed in the V matrix. + There is the possibility to use a baseline through the parameter C. + The signals are computed in the time domain and then Fourier transformed. + -------- + Parameters: + - S : 1darray + Spectrum to be fitted + - ppm_scale : 1darray + Self-explanatory + - V : 2darray + matrix (# signals, parameters) + - C : 1darray or False + Coefficients of the polynomion to be used as baseline correction. If the 'baseline' checkbox in the interactive figure panel is not checked, C_f is False. + - t_AQ : 1darray + Acquisition timescale + - limits : tuple or None + Trim limits for the spectrum (left, right). If None, the whole spectrum is used. + - SFO1 : float + Larmor frequency /MHz + - o1p : float + pulse carrier frequency /ppm + - utol : float + tolerance for the chemical shift. The peak center can move in the range [u-utol, u+utol]. + - vary_xg: bool + If it is False, the parameter x_g cannot be varyied during the fitting procedure. Useful when fitting with pure Gaussians or pure Lorentzians. + - vary_basl: bool + If it is False, the baseline is kept fixed at the initial parameters. + ------- + Returns: + - C_f : 1darray or False + Coefficients of the polynomion to be used as baseline correction, or just False if not used. + - V_f : 2darray + matrix (# signals, parameters) after the fit + - result : lmfit.fit_result + container of all information on the fit + """ + + ns = V.shape[0] # Number of signals + # the baseline is used if C is an array + if isinstance(C, list): + C = np.array(C) + use_basl = isinstance(C, np.ndarray) + + # Compute limits and get indexes on ppm_scale + if limits is None: + limits = [max(ppm_scale), min(ppm_scale)] + lim1 = misc.ppmfind(ppm_scale, limits[0])[0] + lim2 = misc.ppmfind(ppm_scale, limits[1])[0] + lim1, lim2 = min(lim1, lim2), max(lim1, lim2) + + # Total integral and spectral width, to be used as limits for the fit + I = np.trapz(S[lim1:lim2], dx=misc.calcres(ppm_scale)) + SW = misc.ppm2freq(np.abs(ppm_scale[0]-ppm_scale[-1]), B0=SFO1, o1p=o1p) + + # polynomion x-scale + x = np.linspace(0, 1, ppm_scale[lim1:lim2].shape[-1])[::-1] + + # Filling up the Parameters dictionary + param = l.Parameters() + peak_names = ['u', 'fwhm', 'k', 'x_g', 'phi'] # signal entries + poly_names = ['a', 'b', 'c', 'd', 'e'] # baseline polynomion coefficients + + minima = np.array([ # lower thresholds + [u-utol for u in V[:,0]], # chemical shift + [0 for s in range(ns)], # fwhm + [0 for k in range(ns)], # rel int + [0-1e-5 for xg in range(ns)], # xg + [-180 for phi in range(ns)] # phase angle + ]).T + maxima = np.array([ # upper thresholds + [u+utol for u in V[:,0]], # chemical shift + [SW for s in range(ns)], # fwhm + [5 for k in range(ns)], # rel int + [1+1e-5 for xg in range(ns)], # xg + [180 for phi in range(ns)] # phase angle + ]).T + + for i in range(V.shape[0]): # put variables in the dictionary + idx = str(i+1) + for j in range(len(peak_names)): + param.add(peak_names[j]+idx, value=V[i,j], min=minima[i,j], max=maxima[i,j]) + param['x_g'+idx].set(vary=vary_xg) + param['phi'+idx].set(vary=vary_phi) + param.add('A', value=V[0,-1], vary=False) # Unique, got from first row + + if C is not False: # Add polynomion + lim_poly = np.array([1e1, 1e1, 1e1, 1e1, 1e1]) + for i in range(len(poly_names)): + param.add(poly_names[i], value=C[i], min=C[i]-lim_poly[i], max=C[i]+lim_poly[i], vary=vary_basl) + + + def f2min_real(param, S, use_basl=False): + # cost function for the fit. + N = S.shape[-1] + + # unpack V and C from dictionary + param = param.valuesdict() + V = fit.dic2mat(param, peak_names, ns, param['A']) + if use_basl is not False: + C_in = np.array([param[w] for w in poly_names]) + y = misc.polyn(x, C_in) + else: + y = np.zeros_like(x) + + # Compute only total signal + sgn = np.zeros(len(x)) + for i in range(V.shape[0]): + temp_sgn = make_signal(t_AQ, *V[i], SFO1, o1p, N).real + sgn += temp_sgn[lim1:lim2] + + # Calculate residual + R = y + sgn - S[lim1:lim2] + return R + + def f2min_cplx(param, S, use_basl=False): + # cost function for the fit. + N = S.shape[-1] + + # unpack V and C from dictionary + param = param.valuesdict() + V = fit.dic2mat(param, peak_names, ns, param['A']) + if use_basl is not False: + C_in = np.array([param[w] for w in poly_names]) + y = misc.polyn(x, C_in) + 1j*misc.polyn(x, C_in) + else: + y = np.zeros_like(x) + + # Compute only total signal + sgn = np.zeros(len(x)).astype(S.dtype) + for i in range(V.shape[0]): + temp_sgn = make_signal(t_AQ, *V[i], SFO1, o1p, N) + sgn += temp_sgn[lim1:lim2] + + # Calculate residual + R = y + sgn - S[lim1:lim2] + R_tot = np.concatenate((R.real, R.imag), axis=-1) + return R_tot + + # Fit + print('Starting fit...') + start_time = datetime.now() + if np.iscomplexobj(S): + minner = l.Minimizer(f2min_cplx, param, fcn_args=(S, use_basl)) + else: + minner = l.Minimizer(f2min_real, param, fcn_args=(S, use_basl)) + result = minner.minimize(method='leastsq', max_nfev=10000, xtol=1e-15, ftol=1e-15) + end_time = datetime.now() + runtime = end_time - start_time + + print('{} Total runtime: {}.\nNumber of function evaluations: {:5.0f}'.format(result.message, runtime, result.nfev)) + + popt = result.params.valuesdict() # final parameters + + # Put all the results in final variables + V_f = dic2mat(popt, peak_names, ns, popt['A']) + if use_basl is True: + C_f = np.array([popt[w] for w in poly_names]) + else: + C_f = False + + # Print the parameters + print_par(V_f, C_f, limits=limits) + if isinstance(write_out, str): + print('These values are saved in: {}'.format(write_out)) + write_par(V_f, C_f, limits=limits, filename=write_out) + + # Check for the gaussianity of the residual + if test_res is True: + # Get the info + if np.iscomplexobj(S): + Npt = len(result.residual) + R = result.residual[:Npt//2] + 1j*result.residual[Npt//2:] + SYSDEV, Q_G = fit.test_residuals(R.real) + else: + R = result.residual + SYSDEV, Q_G = fit.test_residuals(R) + + + # Make the figure of the residual + if hist_name: + fig = plt.figure() + fig.set_size_inches(6.60, 2.56) + plt.subplots_adjust(left=0.10, bottom=0.15, top=0.90, right=0.95, wspace=0.30) + axr = fig.add_subplot(1,2,1) + axh = fig.add_subplot(1,2,2) + + axr.set_title('Fit residual') + axr.plot(ppm_scale[lim1:lim2], R.real, c='tab:blue', lw=0.5) + axr.axhline(0, c='k', lw=0.25) + axr.set_xlabel('$\delta\,$ /ppm') + axr.set_ylabel('Intensity /a.u.') + misc.mathformat(axr) + misc.pretty_scale(axr, (ppm_scale[lim1], ppm_scale[lim2])) + Rlims = np.ceil(max(np.abs(R))) + misc.set_ylim(axr, [S, R, -S, -R]) + misc.pretty_scale(axr, axr.get_ylim(), axis='y', n_major_ticks=8) + + axh.set_title('Histogram of residual') + # Compute the number of bins + if R.shape[-1] < 200: + n_bins = 20 + else: + n_bins = R.shape[-1] // 10 + if n_bins > 2500: + n_bins = 2500 + m_R, s_R = fit.ax_histogram(axh, R.real, nbins=n_bins, density=False, x_symm=True, barcolor='tab:blue') + axh.axvline(0, c='k', lw=0.25) + axh.set_xlabel('Intensity /a.u.') + misc.pretty_scale(axh, axh.get_xlim(), axis='x', n_major_ticks=8) + misc.pretty_scale(axh, axh.get_ylim(), axis='y') + misc.mathformat(axh, 'both') + + misc.set_fontsizes(axr, 10) + misc.set_fontsizes(axh, 10) + plt.savefig(hist_name+'.png', dpi=300) + plt.close() + else: + fig = plt.figure() + ax = fig.add_subplot() + m_R, s_R = fit.ax_histogram(ax, R.real, nbins=100, density=False, x_symm=True, barcolor='tab:blue') + plt.close() + + # Plot the statistics + print('-' * 60) + print('{:^60}'.format('Statistics of the fit')) + print('{:<30} = {:=9.3e} | Optimal : 0'.format('Mean of residuals', m_R)) + print('{:<30} = {:=9.3e} | Optimal : 0'.format('Mean/STD of residuals', m_R / s_R)) + print('{:<30} = {:+9.3e} | Optimal : 1'.format('Systematic deviation', SYSDEV)) + print('{:<30} = {:+9.3e} | Optimal : 1'.format('Gaussianity of residuals', Q_G)) + print('-' * 60) + + return V_f, C_f, result, runtime + + + +def smooth_spl(x, y, s_f=1, size=0, weights=None): + """ + Fit the input data with a 3rd-order spline, given the smoothing factor to be applied. + ------- + Parameters: + - x: 1darray + Location of the experimental points + - y: 1darray + Input data to be fitted + - s_f: float + Smoothing factor of the spline. 0=best straight line, 1=native spline. + - size: int + Size of the spline. If size=0, the same dimension as y is chosen. + ------- + Returns: + - x_s: 1darray + Location of the spline data points. + - y_s: 1darray + Spline that fits the data. + """ + # Reverse x and y if x is descending + if x[0] > x[-1]: + x_o = np.copy(x[::-1]) + y_o = np.copy(y[::-1]) + if weights is not None: + weights = weights[::-1] + else: + x_o = np.copy(x) + y_o = np.copy(y) + + # If size is not given, make the spline with the same size as the observed data + if size: + x_s = np.linspace(x_o[0], x_o[-1], size) + else: + x_s = np.linspace(x_o[0], x_o[-1], x.shape[-1]) + + # Compute the spline + if np.iscomplexobj(y_o): # Treat real and imaginary part separately, then join them together + y_sr = csaps(x_o, y_o.real, x_s, weights=weights, smooth=s_f) + y_si = csaps(x_o, y_o.imag, x_s, weights=weights, smooth=s_f) + y_s = y_sr + 1j*y_si + else: # Normal spline smoothing + y_s = csaps(x_o, y_o, x_s, weights=weights, smooth=s_f) + + # Reverse the spline if you reversed the observed data + if x[0] > x[-1]: + x_s = x_s[::-1] + y_s = y_s[::-1] + return x_s, y_s + +def interactive_smoothing(x, y, cmap='RdBu'): + """ + Interpolate the given data with a 3rd-degree spline. Type the desired smoothing factor in the box and see the outcome directly on the figure. + When the panel is closed, the smoothed function is returned. + ------- + Parameters: + - x: 1darray + Scale of the data + - y: 1darray + Data to be smoothed + - cmap: str + Name of the colormap to be used to represent the weights + -------- + Returns: + - sx: 1darray + Location of the spline points + - sy: 1darray + Smoothed y + - s_f: float + Employed smoothing factor for the spline + - weights: 1darray + Weights vector + """ + cmap = CM[f'{cmap}'] # Read the colormap + + # Get the limits for the figure + lims = x[0], x[-1] + + # Initialize data + s_f = 0.95 # Smoothing factor + size = x.shape[-1] # Spline size + weights = np.ones_like(x) * 0.5 # Weights vector + sx, sy = fit.smooth_spl(x, y, size=size, s_f=s_f, weights=weights) # Calculate starting spline + + # Make the widgets + # Smoothing factor textbox + sf_box = plt.axes([0.25, 0.04, 0.1, 0.06]) + sf_tb = TextBox(sf_box, 'Insert\nSmoothing factor', textalignment='center') + + # Size textbox + size_box = plt.axes([0.60, 0.04, 0.1, 0.06]) + size_tb = TextBox(size_box, 'Insert\nSize', textalignment='center') + + # Weights slider + slider_box = plt.axes([0.90, 0.15, 0.01, 0.8]) + weight_slider = Slider( + ax=slider_box, + label = 'Weight', + valmin = 1e-5, + valmax = 1, + valinit = 0.5, + valstep = 0.05, + orientation = 'vertical' + ) + + # Colorbar for the weights + cbar_box = plt.axes([0.94, 0.15, 0.02, 0.8]) + norm = matplotlib.colors.Normalize(vmin=0, vmax=1) # Dummy values to plot the colorbar + plt.colorbar(matplotlib.cm.ScalarMappable(norm=norm, cmap=cmap), cax=cbar_box, orientation='vertical') + + # -------------------------------------------------------------------------------------------------------- + # Functions connected to the widgets + + def update_plot(): + """ Redraw the spline """ + sx, sy = fit.smooth_spl(x, y, size=size, s_f=s_f, weights=weights) + s_plot.set_data(sx, sy.real) + plt.draw() + + def update_size(text): + """ Update size, write it, call update_plot""" + nonlocal size + try: + size = int(eval(text)) + except: + pass + size_text.set_text('Size:\n{:.0f}'.format(size)) + update_plot() + + def update_sf(text): + """ Update s_f, write it, call update_plot""" + nonlocal s_f + try: + s_f = eval(text) + except: + pass + s_text.set_text('Smoothing factor:\n{:.4f}'.format(s_f)) + update_plot() + + def onselect(*event): + """ Stupid function connected to both mouse click and spanselector """ + if len(event) > 1: # = Selector, hence OK + span.set_visible(True) + else: # = Mousebutton? + event, = event # It is a tuple! Unpack + if event.inaxes == ax and event.button == 1: + # Only if you click inside the figure with left button + span.set_visible(True) + fig.canvas.draw() + + def update_bg_color(weights): + """ Draw the figure background according to the weight vector """ + [fill.set_fc(cmap(q)) for fill, q in zip(tmp_fill, weights)] + fig.canvas.draw() + + def press_space(key): + """ When you press 'space' """ + if key.key == ' ': + nonlocal weights + span.set_visible(False) # Hide the spanselector + xmin, xmax = span.extents # Get the shaded area + # Get indexes on x of the shaded area, and sort them + imin, _ = misc.ppmfind(x, xmin) + imax, _ = misc.ppmfind(x, xmax) + imin, imax = min(imin, imax), max(imin, imax) + # Set the weights according to the value set on the slider + weights[imin:imax] = weight_slider.val + # Draw the background and the spline + update_bg_color(weights) + update_plot() + + def mouse_scroll(event): + """ Control slider with the mouse scroll """ + valstep = 0.05 + sl_lims = 1e-5, 1 + if event.button == 'up': + if weight_slider.val < sl_lims[1]: + weight_slider.set_val(weight_slider.val + valstep) + else: + weight_slider.set_val(sl_lims[1]) + elif event.button == 'down': + if weight_slider.val > sl_lims[0]: + weight_slider.set_val(weight_slider.val - valstep) + else: + weight_slider.set_val(sl_lims[0]) + + # -------------------------------------------------------------------------------------------------------- + + # Make the figure + fig = plt.figure(1) + fig.set_size_inches(figures.figsize_large) + plt.subplots_adjust(left=0.1, right=0.85, top=0.95, bottom=0.15) + ax = fig.add_subplot() + + ax.set_title('Press SPACE to set the weights') + + # Background + tmp_fill = [ax.axvspan(x[k-1], x[k], ec=None, fc=cmap(q), alpha=0.25) for k, q in enumerate(weights) if k != 0] + + # Plot things + ax.plot(x, y.real, c='tab:blue', lw=0.9, label='Original') + s_text = plt.text(0.45, 0.07, 'Smoothing factor:\n{:.5f}'.format(s_f), fontsize=16, ha='center', va='center', transform=fig.transFigure) + size_text = plt.text(0.75, 0.07, 'Size:\n{:.0f}'.format(size), fontsize=16, ha='center', va='center', transform=fig.transFigure) + s_plot, = ax.plot(sx, sy.real, c='tab:red', lw=0.8, label='Smoothed') + + # Adjust figure display + misc.pretty_scale(ax, lims, 'x') + misc.pretty_scale(ax, ax.get_ylim(), 'y') + misc.mathformat(ax) + misc.set_fontsizes(ax, 16) + ax.legend(fontsize=12) + + # Connect widget to function + sf_tb.on_submit(update_sf) + size_tb.on_submit(update_size) + + # Declare span selector + span = SpanSelector(ax, onselect, "horizontal", useblit=True, + props=dict(alpha=0.25, facecolor="tab:blue"), interactive=True, drag_from_anywhere=True) + + # Press space and mouse left button + fig.canvas.mpl_connect('key_press_event', press_space) + fig.canvas.mpl_connect('button_press_event', onselect) + fig.canvas.mpl_connect('scroll_event', mouse_scroll) + + plt.show() + + # Compute final output + sx, sy = fit.smooth_spl(x, y, size=size, s_f=s_f, weights=weights) + + return sx, sy, s_f, weights + +def build_baseline(ppm_scale, C, L=None): + """ + Builds the baseline calculating the polynomion with the given coefficients, and summing up to the right position. + ------- + Parameters: + - ppm_scale: 1darray + ppm scale of the spectrum + - C: list + Parameters coefficients. No baseline corresponds to False. + - L: list + List of window regions. If it is None, the baseline is built on the whole ppm_scale + ------- + Returns: + - baseline: 1darray + Self-explanatory. + """ + + # Just in case you have a unique polynomion + if isinstance(C, list) is False: + C = [np.array(C)] + if L is None: + L = ppm_scale[0], ppm_scale[-1] + if isinstance(L, list) is False: + L = [L] + + poly = [] + baseline = np.zeros_like(ppm_scale) + for k, coeff in enumerate(C): + if coeff is False: # No baseline + continue + lims = misc.ppmfind(ppm_scale, L[k][0])[0], misc.ppmfind(ppm_scale, L[k][1])[0] # Find the indexes on ppm_scale + lims = min(lims), max(lims) # Sort them to avoid stupid mistakes + size_x = int(np.abs(lims[0] - lims[1])) # Size of the polynomion scale + x = np.linspace(0, 1, size_x)[::-1] # Build the polynomion scale as the one of the fit + poly.append(misc.polyn(x, coeff)) # Computes the polynomion + baseline[lims[0]:lims[1]] += poly[-1] # Sum it to the baseline in the correct position + return baseline + + + + +def join_par(filenames, ppm_scale, joined_name=None): + """ + Load a series of parameters fit files. Join them together, returning a unique array of signal parameters, a list of coefficients for the baseline, and a list of tuples for the regions. + Also, uses the coefficients and the regions to directly build the baseline according to the ppm windows. + ------- + Parameters: + - filenames: list + List of directories of the input files. + - ppm_scale: 1darray + ppm scale of the spectrum. Used to build the baseline + - joined_name: str or None + If it is not None, concatenates the files in the list 'filenames' and saves them in a single file named 'joined_name'. + ------- + Returns: + - V: 2darray + Array of joined signal parameters + - C: list + Parameters coefficients. No baseline corresponds to False. + - L: list + List of window regions. + - baseline: 1darray + Baseline built from C and L. + """ + if isinstance(joined_name, str): + f = open(joined_name, 'w') + + V = [] # Values + C = [] # Polynomion + L = [] # Window limits + + for k in range(len(filenames)): + # Read file + tmp = fit.read_par(filenames[k]) + V.append(tmp[0]) + C.append(tmp[1]) + L.append(tmp[2]) + + if isinstance(joined_name, str): + for k in range(len(filenames)): + fit.write_par(V[k], C[k], L[k], f) + if k < len(filenames) - 1: # Add separator + f.write('\n*{}*\n\n'.format('-'*64)) + else: # Add closing statement + f.write('\n***{:^60}***'.format('END OF FILE')) + f.close() + print('Joined parameters are saved in {}'.format(joined_name)) + + # Check if the regions superimpose + sup = np.zeros_like(ppm_scale) + for k in range(len(L)): + lims = misc.ppmfind(ppm_scale, L[k][0])[0], misc.ppmfind(ppm_scale, L[k][1])[0] # Find the indexes on ppm_scale + lims = min(lims), max(lims) # Sort them to avoid stupid mistakes + sup[lims[0]:lims[1]] += 1 # Add 1 in the highlighted region + + # If there are superimposed regions, do not compute the baseline + if np.any(sup > 1): + print('Warning: Superimposed regions detected! Baseline not computed.') + baseline = np.zeros_like(ppm_scale) + else: + baseline = fit.build_baseline(ppm_scale, C, L) + + # Stack the signal parameters to make a unique matrix + V = np.concatenate(V, axis=0) + + return V, C, L, baseline + + +def calc_fit_lines(ppm_scale, limits, t_AQ, SFO1, o1p, N, V, C=False): + """ + Given the values extracted from a fit input/output file, calculates the signals, the total fit function, and the baseline. + ------- + Parameters: + - ppm_scale: 1darray + PPM scale of the spectrum + - limits: tuple + (left, right) in ppm + - t_AQ: 1darray + Acquisition timescale + - SFO1: float + Larmor frequency of the nucleus /ppm + - o1p: float + Pulse carrier frequency /ppm + - N: int + Size of the final spectrum. + - V: 2darray + Matrix containing the values to build the signals. + - C: 1darray + Baseline polynomion coefficients. False to not use the baseline + ------- + Returns: + - sgn: list + Voigt signals built using V + - Total: 1darray + sum of all the sgn + - baseline: 1darray + Polynomion built using C. False if C is False. + """ + lim1 = misc.ppmfind(ppm_scale, limits[0])[0] + lim2 = misc.ppmfind(ppm_scale, limits[1])[0] + lim1, lim2 = min(lim1, lim2), max(lim1, lim2) + + x = np.linspace(0, 1, ppm_scale[lim1:lim2].shape[-1])[::-1] + # Make the polynomion only if C contains its coefficients + if C is False: + baseline = np.zeros_like(x) + else: + baseline = misc.polyn(x, C) + + # Make the signals + sgn = [] + Total = np.zeros_like(x) + 1j*np.zeros_like(x) + for i in range(V.shape[0]): + sgn.append(fit.make_signal(t_AQ, V[i,0], V[i,1], V[i,2], V[i,3], V[i,4], V[i,5], SFO1=SFO1, o1p=o1p, N=N)) + Total += sgn[i][lim1:lim2] + + return sgn, Total, baseline + +def integrate(ppm0, data0, X_label='$\delta\,$F1 /ppm'): + """ + Allows interactive integration of a NMR spectrum through a dedicated GUI. Returns the values as a dictionary, where the keys are the selected regions truncated to the 2nd decimal figure. + The returned dictionary contains pre-defined keys, as follows: + > total: total integrated area + > ref_pos: location of the reference peak /ppm1:ppm2 + > ref_int: absolute integral of the reference peak + > ref_val: for how many nuclei the reference peak integrates + The absolute integral of the x-th peak, I_x, must be calculated according to the formula: + I_x = I_x(relative) * ref_int / ref_val + -------- + Parameters: + - ppm: 1darray + PPM scale of the spectrum + - data: 1darray + Spectrum to be integrated. + - X_label: str + Label of the x-axis + ------- + Returns: + - f_vals: dict + Dictionary containing the values of the integrated peaks. + """ + + # Copy to prevent overwriting + ppm = np.copy(ppm0) + data = np.copy(data0) + + # Calculate the total integral function + int_f = processing.integral(data, ppm) + + # Make the figure + fig = plt.figure() + fig.set_size_inches(figures.figsize_large) + plt.subplots_adjust(left=0.10, bottom=0.15, top=0.90, right=0.80) + ax = fig.add_subplot() + + # Make boxes for buttons + add_box = plt.axes([0.875, 0.80, 0.05, 0.06]) + setref_box = plt.axes([0.825, 0.72, 0.075, 0.06]) + save_box = plt.axes([0.875, 0.20, 0.05, 0.06]) + # Make box for tbox + reftb_box = plt.axes([0.925, 0.72, 0.05, 0.06]) + # Make buttons + add_button = Button(add_box, 'ADD', hovercolor='0.875') + save_button = Button(save_box, 'SAVE', hovercolor='0.875') + setref_button = Button(setref_box, 'SET REF', hovercolor='0.875') + # Make tbox + ref_tbox = TextBox(ax=reftb_box, label='', initial='{}'.format(1), textalignment='center') + + # Declare variables + f_vals = { # Initialize output variable + 'total': float(0), # Total integrated area + 'ref_pos': '{:.2f}:{:.2f}'.format(ppm[0], ppm[-1]), # Position of the reference signal /ppm1:ppm2 + 'ref_val': float(1), # For how many nuclei the reference peak integrates + 'ref_int': int_f[-1], # Reference peak integral, absolute value + } + abs_vals = {} # dictionary: integrals of the peaks, absolute values + text_integrals={} # dictionary: labels to keep record of the integrals + + # --------------------------------------------------------------------------------------- + # Functions connected to the widgets + def redraw_labels(f_vals): + """ Computes the relative integrals and updates the texts on the plot """ + corr_func = f_vals['ref_val'] / f_vals['ref_int'] # Correction + + # Update all the integral texts according to the new total_integral value + tmp_text.set_text('{:.5f}'.format(tmp_plot.get_ydata()[-1] * corr_func)) # Relative value of the integral: under the red label on the right + for key, value in abs_vals.items(): + text_integrals[key].set_text('{:.4f}'.format(abs_vals[key] * corr_func)) + + fig.canvas.draw() + + def set_ref_val(xxx): + """ Function of the textbox """ + nonlocal f_vals + f_vals['ref_val'] = eval(xxx) + redraw_labels(f_vals) + + def set_ref_int(event): + nonlocal f_vals + tmp_plot.set_visible(False) # Set the integral function as invisible so that it does not overlay with the permanent one + xdata, ydata = tmp_plot.get_data() # Get the data from the red curve + + f_vals['ref_int'] = ydata[-1] # Calculate the integral and cast it to the correct entry in f_vals + f_vals['ref_pos'] = '{:.2f}:{:.2f}'.format(xdata[0], xdata[-1]) # Get reference peak position from the plot and save it in f_vals + + # Update the plot + ref_plot.set_data(xdata, ydata) # Draw permanent integral function, in blue + ref_plot.set_visible(True) # Because at the beginning it is invisible + + ref_text.set_text('{:.4e}'.format(f_vals['ref_int'])) # Update label under the blue label on the right + redraw_labels(f_vals) + + + def onselect(vsx, vdx): + """ When you drag and release """ + tmp_total_integral = np.copy(f_vals['total']) # Copy total_integral inside + + corr_func = f_vals['ref_val'] / f_vals['ref_int'] # Correction function + + sx, dx = max(vsx, vdx), min(vsx, vdx) # Sort the borders of the selected window + # Take indexes of the borders of the selected window and sort them + isx = misc.ppmfind(ppm, sx)[0] + idx = misc.ppmfind(ppm, dx)[0] + isx, idx = min(isx, idx), max(isx, idx) + + # Compute the integral + int_fun = processing.integral(data, ppm, (sx, dx)) # Integral function + int_val = int_fun[-1] # Value of the integral + tmp_total_integral += int_val # Update the total integral, but only inside + + # Update the plot + tmp_plot.set_data(ppm[isx:idx], int_fun) # Plot the integral function on the peak, normalized + tmp_plot.set_visible(True) # Set this plot as visible, if it is not + + tmp_text.set_text('{:.5f}'.format(int_val * corr_func)) # Relative value of the integral: under the red label on the right + tot_text.set_text('{:.4e}'.format(tmp_total_integral)) # Total integrated area: under the green label on the right + fig.canvas.draw() + + def f_add(event): + """ When you click 'ADD' """ + nonlocal f_vals, abs_vals + + tmp_plot.set_visible(False) # Set the integral function as invisible so that it does not overlay with the permanent one + xdata, ydata = tmp_plot.get_data() # Get the data from the red curve + + # Update the variables + f_vals['total'] += ydata[-1] + abs_vals['{:.2f}:{:.2f}'.format(xdata[0], xdata[-1])] = ydata[-1] + + # Update the plot + ax.plot(xdata, ydata, c='tab:green', lw=0.8) # Draw permanent integral function + tot_text.set_text('{:.4e}'.format(f_vals['total'])) # Text under green label on the right + + xtext = (xdata[0] + xdata[-1]) / 2 # x coordinate of the text: centre of the selected window + text_integrals['{:.2f}:{:.2f}'.format(xdata[0], xdata[-1])] = ax.text(xtext, ax.get_ylim()[-1], '{:.5f}'.format(ydata[-1]), horizontalalignment='center', verticalalignment='bottom', fontsize=10, rotation=60) # Add whatever to the label + + # Update all the integral texts according to the new total_integral value + redraw_labels(f_vals) + + def f_save(event): + """ When you click 'SAVE' """ + nonlocal f_vals # to update the output variable + # Append in the dictionary the relative values of the integrals + for key, value in abs_vals.items(): + f_vals[key] = value * f_vals['ref_val'] / f_vals['ref_int'] + plt.close() + + # --------------------------------------------------------------------------------------- + + # Add things to the figure panel + + ax.plot(ppm, data, c='tab:blue', lw=0.8) # Spectrum + tmp_plot, = ax.plot(ppm, int_f/max(int_f)*max(data), c='tab:red', lw=0.8, visible=False) # Draw the total integral function but set to invisible because it is useless, needed as placeholder for the red curve + ref_plot, = ax.plot(ppm, int_f/max(int_f)*max(data), c='b', lw=0.8, visible=False) # Draw the total integral function but set to invisible because it is useless, needed as placeholder for the blue curve + + # Draw text labels in the figure, on the right + ax.text(0.90, 0.68, 'Current integral (normalized)', horizontalalignment='center', verticalalignment='center', transform=fig.transFigure, fontsize=14, color='tab:red') + tmp_text = ax.text(0.90, 0.65, '0', horizontalalignment='center', verticalalignment='center', transform=fig.transFigure, fontsize=14) + ax.text(0.90, 0.60, 'Total integral', horizontalalignment='center', verticalalignment='center', transform=fig.transFigure, fontsize=14, color='tab:green') + tot_text = ax.text(0.90, 0.55, '0', horizontalalignment='center', verticalalignment='center', transform=fig.transFigure, fontsize=14) + ax.text(0.90, 0.50, 'Reference integral', horizontalalignment='center', verticalalignment='center', transform=fig.transFigure, fontsize=14, color='b') + ref_text = ax.text(0.90, 0.45, '{:.4e}'.format(f_vals['ref_int']), horizontalalignment='center', verticalalignment='center', transform=fig.transFigure, fontsize=14) + + # Fancy shit + ax.set_xlim(ppm[0], ppm[-1]) + ax.set_xlabel(X_label) + ax.set_ylabel('Intensity /a.u.') + misc.pretty_scale(ax, ax.get_xlim(), 'x') + misc.pretty_scale(ax, ax.get_ylim(), 'y') + misc.mathformat(ax, 'y') + misc.set_fontsizes(ax, 14) + + # Add more widgets and connect the buttons to their functions + cursor = Cursor(ax, c='tab:red', lw=0.8, horizOn=False) # Vertical line that follows the cursor + span = SpanSelector(ax, onselect, 'horizontal', props=dict(facecolor='tab:red', alpha=0.5)) # Draggable window + add_button.on_clicked(f_add) + save_button.on_clicked(f_save) + setref_button.on_clicked(set_ref_int) + ref_tbox.on_submit(set_ref_val) + + # Show the figure + plt.show() + + return f_vals + + +def integrate_2D(ppm_f1, ppm_f2, data, SFO1, SFO2, fwhm_1=200, fwhm_2=200, utol_1=0.5, utol_2=0.5, plot_result=False): + """ + Function to select and integrate 2D peaks of a spectrum, using dedicated GUIs. + Calls integral_2D to do the dirty job. + --------- + Parameters: + - ppm_f1: 1darray + PPM scale of the indirect dimension + - ppm_f2: 1darray + PPM scale of the direct dimension + - data: 2darray + real part of the spectrum + - SFO1: float + Larmor frequency of the nucleus in the indirect dimension + - SFO2: float + Larmor frequency of the nucleus in the direct dimension + - fwhm_1: float + Starting FWHM /Hz in the indirect dimension + - fwhm_2: float + Starting FWHM /Hz in the direct dimension + - utol_1: float + Allowed tolerance for u_1 during the fit. (u_1-utol_1, u_1+utol_1) + - utol_2: float + Allowed tolerance for u_2 during the fit. (u_2-utol_2, u_2+utol_2) + - plot_result: bool + True to show how the program fitted the traces. + -------- + Returns: + - I: dict + Computed integrals. The keys are ':' with 2 decimal figures. + """ + + # Get all the information that integral_2D needs + peaks = misc.select_for_integration(ppm_f1, ppm_f2, data, Neg=True) + + I = {} # Declare empty dictionary + for P in peaks: + # Extract trace F1 + T1 = misc.get_trace(data, ppm_f2, ppm_f1, P['f2']['u'], column=True) + x_T1, y_T1 = misc.trim_data(ppm_f1, T1, *P['f1']['lim']) # Trim according to the rectangle + # Extract trace F2 + T2 = misc.get_trace(data, ppm_f2, ppm_f1, P['f1']['u'], column=False) + x_T2, y_T2 = misc.trim_data(ppm_f2, T2, *P['f2']['lim']) # Trim according to the rectangle + + # Compute the integrals + I_p = processing.integral_2D(x_T1, y_T1, SFO1, x_T2, y_T2, SFO2, + u_1=P['f1']['u'], fwhm_1=fwhm_1, utol_1=utol_1, + u_2=P['f2']['u'], fwhm_2=fwhm_2, utol_2=utol_2, + plot_result=plot_result) + + # Store the integral in the dictionary + I[f'{P["f2"]["u"]:.2f}:{P["f1"]["u"]:.2f}'] = I_p + return I + + +def make_iguess(S_in, ppm_scale, t_AQ, limits=None, SFO1=701.125, o1p=0, rev=True, name='i_guess.inp'): + """ + Compute the initial guess for the quantitative fit of 1D NMR spectrum in an interactive manner. + When the panel is closed, the values are saved in a file. + ------- + Parameters: + - S : 1darray + Spectrum to be fitted + - ppm_scale : 1darray + Self-explanatory + - t_AQ : 1darray + Acquisition timescale + - limits : tuple or None + Trim limits for the spectrum (left, right). If None, the whole spectrum is used. + - SFO1 : float + Larmor frequency /MHz + - o1p : float + pulse carrier frequency /ppm + - rev : bool + choose if you want to reverse the x-axis scale (True) or not (False). + - name : str + name of the file where to save the parameters + ------- + Returns: + - V_f : 2darray + matrix (# signals, parameters) + - C_f : 1darray or False + Coefficients of the polynomion to be used as baseline correction. If the 'baseline' checkbox in the interactive figure panel is not checked, C_f is False. + """ + if np.iscomplexobj(S_in): + S = np.copy(S_in).real + else: + S = np.copy(S_in) + + N = S.shape[-1] + + # Set limits according to rev + if limits is None: + if rev is True: + limits = [max(ppm_scale), min(ppm_scale)] + else: + limits = [min(ppm_scale), max(ppm_scale)] + + + # Get index for the limits + lim1 = misc.ppmfind(ppm_scale, limits[0])[0] + lim2 = misc.ppmfind(ppm_scale, limits[1])[0] + I = np.trapz(S[lim1:lim2], dx=misc.calcres(ppm_scale))/SFO1 + + # make boxes for widgets + poly_box = plt.axes([0.72, 0.10, 0.10, 0.3]) + slider_box = plt.axes([0.68, 0.10, 0.01, 0.65]) + peak_box = plt.axes([0.72, 0.45, 0.10, 0.3]) + su_box = plt.axes([0.815, 0.825, 0.08, 0.075]) + giu_box = plt.axes([0.894, 0.825, 0.08, 0.075]) + save_box = plt.axes([0.7, 0.825, 0.085, 0.04]) + reset_box = plt.axes([0.7, 0.865, 0.085, 0.04]) + p_or_s_box = plt.axes([0.73, 0.78, 0.04, 0.03]) + check_box = plt.axes([0.85, 0.1, 0.1, 0.7]) + + # Make widgets + # Buttons + up_button = Button(su_box, '$\\uparrow$', hovercolor = '0.975') + down_button = Button(giu_box, '$\\downarrow$', hovercolor = '0.975') + save_button = Button(save_box, 'SAVE', hovercolor = '0.975') + reset_button = Button(reset_box, 'RESET', hovercolor = '0.975') + + # Radio + poly_name = ['a', 'b', 'c', 'd', 'e'] + poly_radio = RadioButtons(poly_box, poly_name, activecolor='tab:orange') # Polynomion + + peak_name = ['$\delta$ /ppm', '$\Gamma$ /Hz', '$k$', '$x_{g}$', '$\phi$', '$A$'] + peak_radio = RadioButtons(peak_box, peak_name, activecolor='tab:blue') # Signal parameters + + # Sliders + slider = Slider(ax = slider_box, label = 'Active\nSignal', valmin = 1, valmax = 10, valinit = 1, valstep = 1, orientation='vertical', color='tab:blue') + p_or_s = Slider(p_or_s_box, '', valmin=0, valmax=1, valinit=0, valstep=1, track_color='tab:blue', color='tab:orange') + + # Checkbox + check_name = [str(w+1) for w in range(10)]+['Basl'] + check_status = [True] + [False for w in range(10)] + check = CheckButtons(check_box, check_name, check_status) + + # Create variable for the 'active' status + stats = [np.zeros(len(peak_name)), np.zeros(len(poly_name))] + # u s k xg phi A + # a b c d e + stats[0][0] = 1 + stats[1][0] = 1 + + # Initial values + # Polynomion coefficients + C = np.zeros(len(poly_name)) + # Parameters of the peaks + V = np.array([[(limits[0]+limits[1])/2, SFO1*0.5, 1, 1, 0, I/10] for w in range(10)]) + V_init = np.copy(V) # Save for reset + + # Increase step for the polynomion (order of magnitude) + om = np.zeros(len(poly_name)) + # Increase step for the peak parameters + sens = np.array([0.1, 20, 0.05, 0.1, 5, I/100]) + sens_init = np.copy(sens) # Save for reset + + # Functions connected to the widgets + def statmod(label): + # Sets 'label' as active modifying 'stats' + nonlocal stats + if label in peak_name: # if signal + stats[0] = np.zeros(len(peak_name)) + for k, L in enumerate(peak_name): + if label == L: + stats[0][k] = 1 + elif label in poly_name: # if baseline + stats[1] = np.zeros(len(poly_name)) + for k, L in enumerate(poly_name): + if label == L: + stats[1][k] = 1 + update(0) # Call update to redraw the figure + + def roll_up_p(event): + # Increase polynomion with mouse scroll + nonlocal C + for k in range(len(poly_name)): + if stats[1][k]: + C[k]+=10**om[k] + + def roll_down_p(event): + # Decrease polynomion with mouse scroll + nonlocal C + for k in range(len(poly_name)): + if stats[1][k]: + C[k]-=10**om[k] + + def up_om(event): + # Increase the om of the active coefficient by 1 + nonlocal om + for k in range(len(poly_name)): + if stats[1][k]: + om[k] += 1 + + def down_om(event): + # Decrease the om of the active coefficient by 1 + nonlocal om + for k in range(len(poly_name)): + if stats[1][k]: + om[k] -= 1 + + def roll_up_s(event): + # Increase signal with mouse scroll + nonlocal V + maxima = [np.inf, np.inf, np.inf, 1, 180, np.inf] + for k in range(len(peak_name)): + if stats[0][k]: + V[slider.val-1,k]+=sens[k] + if V[slider.val-1,k]>=maxima[k]: + V[slider.val-1,k]=maxima[k] + V[:,-1] = V[slider.val-1,-1] + + def roll_down_s(event): + # Decrease signal with mouse scroll + nonlocal V + minima = [-np.inf, 0, 0, 0, -180, 0] + for k in range(len(peak_name)): + if stats[0][k]: + V[slider.val-1,k]-=sens[k] + if V[slider.val-1,k]<=minima[k]: + V[slider.val-1,k]=minima[k] + V[:,-1] = V[slider.val-1,-1] + + def up_sens(event): + # Doubles increase step + nonlocal sens + for k in range(len(peak_name)): + if stats[0][k]: + sens[k] *= 2 + + def down_sens(event): + # Halves increase step + nonlocal sens + for k in range(len(peak_name)): + if stats[0][k]: + sens[k] /= 2 + + def switch_up(event): + # Fork function for mouse scroll up + if p_or_s.val == 0: + up_sens(event) + elif p_or_s.val == 1: + up_om(event) + + def switch_down(event): + # Fork function for mouse scroll down + if p_or_s.val == 0: + down_sens(event) + elif p_or_s.val == 1: + down_om(event) + + def on_scroll(event): + # Mouse scroll + if event.button == 'up': + if p_or_s.val==0: + roll_up_s(event) + elif p_or_s.val==1: + roll_up_p(event) + elif event.button == 'down': + if p_or_s.val==0: + roll_down_s(event) + elif p_or_s.val==1: + roll_down_p(event) + update(0) + + def set_visible(label): + # Set line visible or invisible according to the checkbox + index = check_name.index(label) + if index < 10: + s_plot[index].set_visible(not s_plot[index].get_visible()) + if s_plot[index].get_visible(): + slider.set_val(index+1) + elif index == 10: + poly_plot.set_visible(not poly_plot.get_visible()) + update(0) + + def head_color(null): + if p_or_s.val: + head_print.set_color(s_colors[-1]) + else: + w = slider.val - 1 + head_print.set_color(s_colors[w]) + plt.draw() + + + # polynomion + x = np.linspace(0, 1, ppm_scale[lim1:lim2].shape[-1])[::-1] + y = np.zeros_like(x) + + # Signals + sgn = np.zeros((V.shape[0], ppm_scale.shape[-1])) # array for the single signals + Total = np.zeros(len(ppm_scale[lim1:lim2])) # total function + for i in range(V.shape[0]): + sgn[i] = make_signal(t_AQ, V[i,0], V[i,1], V[i,2], V[i,3], V[i,4], V[i,5], SFO1, o1p, N).real + if check_status[i]: + Total += sgn[i][lim1:lim2].real + + + # Initial figure + fig = plt.figure(1) + fig.set_size_inches(15,8) + plt.subplots_adjust(bottom=0.10, top=0.90, left=0.05, right=0.65) + ax = fig.add_subplot(1,1,1) + + ax.plot(ppm_scale[lim1:lim2], S[lim1:lim2], label='Experimental', lw=1.0, c='k') # experimental + + # signals, total, polynomion + s_plot=[] + for i in range(V.shape[0]): + temp, = ax.plot(ppm_scale[lim1:lim2], sgn[i][lim1:lim2].real, c=s_colors[i], lw=1.0, ls='--') + s_plot.append(temp) + total_plot, = ax.plot(ppm_scale[lim1:lim2], y+Total, label = 'Fit', c='tab:blue', lw=1.0) + poly_plot, = ax.plot(ppm_scale[lim1:lim2], y, label = 'Baseline', lw=0.8, c='tab:orange') + s_colors.append(poly_plot.get_color()) + + # make pretty scale + ax.set_xlim(max(limits[0],limits[1]),min(limits[0],limits[1])) + misc.pretty_scale(ax, ax.get_xlim(), axis='x', n_major_ticks=10) + + # Header for current values print + head_print = ax.text(0.1, 0.04, + '{:_>7s} ,{:_>5} ,{:_>5} ,{:_>5} ,{:_>7} ,{:_>11} | {:_^11}, {:_^11}, {:_^11}, {:_^11}, {:_^11}'.format( + 'u', 's', 'k', 'xg', 'phi', 'A', 'a', 'b', 'c', 'd', 'e'), + ha='left', va='bottom', transform=fig.transFigure, fontsize=10, color=s_colors[0]) + values_print = ax.text(0.1, 0.01, + '{:+7.2f}, {:5.0f}, {:5.3f}, {:5.2f}, {:+07.2f}, {:5.2e} | {:+5.2e}, {:+5.2e}, {:+5.2e}, {:+5.2e}, {:+5.2e}'.format( + V[0,0], V[0,1],V[0,2],V[0,3],V[0,4],V[0,5], C[0], C[1], C[2], C[3], C[4]), + ha='left', va='bottom', transform=fig.transFigure, fontsize=10) + + # Customize checkbox appearance + # make boxes more squared + HBOX = check_box.dataLim.bounds[-1] + misc.edit_checkboxes(check, xadj=0, yadj=0.001, length=0.1, height=(HBOX-0.5*HBOX)/len(check_name), color=s_colors) + + def update(val): + # Calculates and draws all the figure elements + Total_inside = np.zeros_like(Total) + check_status = check.get_status() + sgn = [] + if check_status[-1]: # baseline check + y = misc.polyn(x, C) + else: + y = np.zeros_like(x) + + # Make the signals + for i in range(V.shape[0]): + if check_status[i]: + sgn.append(make_signal(t_AQ, V[i,0], V[i,1], V[i,2], V[i,3], V[i,4], V[i,5], SFO1, o1p, N)) + Total_inside += sgn[i][lim1:lim2].real + else: + sgn.append(np.zeros_like(ppm_scale)) + + # Update the plot + for i in range(V.shape[0]): + if check_status[i]: + s_plot[i].set_ydata(sgn[i][lim1:lim2].real) + poly_plot.set_ydata(y) + total_plot.set_ydata(y+Total_inside) + w = slider.val - 1 + # print the current values + values_print.set_text( + '{:+7.2f}, {:5.0f}, {:5.3f}, {:5.2f}, {:+07.2f}, {:5.2e} | {:+5.2e}, {:+5.2e}, {:+5.2e}, {:+5.2e}, {:+5.2e}'.format( + V[w,0], V[w,1],V[w,2],V[w,3],V[w,4],V[w,5], C[0], C[1], C[2], C[3], C[4])) + plt.draw() + + def reset(event): + # Sets all the widgets to their starting values + nonlocal C, V, om, sens + C = np.zeros(len(poly_name)) + V = np.copy(V_init) + om = np.zeros_like(C) + sens = np.copy(sens_init) + update(0) # to update the figure + + # Declare variables to store the final values + V_f = [] + C_f = np.zeros_like(C) + def save(event): + # Put current values in the final variables that are returned + nonlocal V_f, C_f + V_f = [] + check_status=check.get_status() + for i in range(len(check_status)-1): # last one is baseline + if check_status[i]: + V_f.append(V[i]) + V_f = np.array(V_f) + if check_status[i]: + C_f = np.copy(C) + + + # Connect widgets to functions + poly_radio.on_clicked(statmod) + peak_radio.on_clicked(statmod) + up_button.on_clicked(switch_up) + down_button.on_clicked(switch_down) + scroll = fig.canvas.mpl_connect('scroll_event', on_scroll) + check.on_clicked(set_visible) + save_button.on_clicked(save) + reset_button.on_clicked(reset) + p_or_s.on_changed(head_color) + slider.on_changed(head_color) + + # Ruler for slider + for i, H in enumerate(np.linspace(0.10, 0.75, 10)): + plt.text(0.685, H, '$-$', ha='center', va='center', fontsize=20, color=s_colors[i], transform=fig.transFigure) + + # Set visibility + for i in range(V.shape[0]): + s_plot[i].set_visible(check_status[i]) + poly_plot.set_visible(False) + + ax.legend() + plt.show() + + # correct the intensities + V_f[:,2], Acorr = misc.molfrac(V_f[:,2]) + V_f[:,-1] *= Acorr + + + # Write the input file and return the values + check_status=check.get_status() + if check_status[-1]: + write_par(V_f, C_f, limits, filename=name) + return V_f, C_f + else: + write_par(V_f, False, limits, filename=name) + return V_f, False + + + +# -------------------------------------------------------------------- + +def read_par(filename): + """ + Reads the input file of the fit and returns the values. + -------- + Parameters: + - filename: str + directory and name of the input file to be read + -------- + Returns: + - V : 2darray + matrix (# signals, parameters) + - C : 1darray or False + Coefficients of the polynomion to be used as baseline correction. If the 'baseline' checkbox in the interactive figure panel is not checked, C_f is False. + - limits : tuple or None + Trim limits for the spectrum (left, right). If None, the whole spectrum is used. + """ + # Declare empty variables + V = [] + C = False + limits = None + + f = open(filename, 'r') + L = f.readlines() + V_flag = 0 # Am I reading the signal parameter section? + B_flag = 0 # Am I reading the baseline parameter section? + L_flag = 0 # Am I reading the window limits? + for i, line in enumerate(L): + if line[0] == '#' or line == '\n': # Comment or empty line + continue + line = line.strip() + if line == '***{:^60}***'.format('SIGNAL PARAMETERS'): + V_flag = 1 + continue + if line == '***{:^60}***'.format('END OF SIGNAL PARAMETERS'): + V_flag = 0 + continue + if line == '***{:^60}***'.format('BASELINE PARAMETERS'): + B_flag = 1 + continue + if line == '***{:^60}***'.format('WINDOW DELIMITERS /ppm'): + L_flag = 1 + continue + if line == '***{:^60}***'.format('END OF FILE'): + f.close() + break + + + if L_flag: + v = line.split('\t') + limits = float(v[-2].replace(' ','')), float(v[-1].replace(' ', '')) + L_flag = 0 + + if V_flag: + v = line.split('\t') + V.append(np.array([float(w.replace(' ', '')) for w in v[1:]])) # [1:] because first column is signal index + + if B_flag: + v = line.split('\t') + C = np.array([float(w.replace(' ', '')) for w in v]) + B_flag = 0 + + V = np.array(V) + return V, C, limits + + +def write_par(V, C, limits, filename='i_guess.inp'): + """ + Write the parameters of the fit, whether they are input or output. + -------- + Parameters: + - V : 2darray + matrix (# signals, parameters) + - C : 1darray or False + Coefficients of the polynomion to be used as baseline correction. If the 'baseline' checkbox in the interactive figure panel is not checked, C_f is False. + - limits : tuple + Trim limits for the spectrum (left, right). + - filename: str + directory and name of the file to be written + """ + if isinstance(filename, str): + f = open(filename, 'w') + else: + f = filename + f.write('***{:^60}***\n'.format('WINDOW DELIMITERS /ppm')) + f.write('{:=7.2f}\t{:=7.2f}\n\n'.format(limits[0], limits[1])) + + f.write('***{:^60}***\n'.format('SIGNAL PARAMETERS')) + f.write('{:<4}\t{:>7}\t{:>5}\t{:>5}\t{:>5}\t{:>5}\t{:<9}\n'.format('#', 'u', 's', 'k', 'x_g', 'phi', 'A')) + for i in range(V.shape[0]): + f.write('{:<4.0f}\t{:=7.2f}\t{:5.0f}\t{:5.3f}\t{:5.2f}\t{: 5.2f}\t{:5.2e}\n'.format( i+1, V[i,0], V[i,1], V[i,2], V[i,3], V[i,4], V[i,5])) + f.write('***{:^60}***\n'.format('END OF SIGNAL PARAMETERS')) + + if C is not False: # Write baseline coefficients only if explicitely said + f.write('\n***{:^60}***\n'.format('BASELINE PARAMETERS')) + f.write('#\t{:^9}\t{:^9}\t{:^9}\t{:^9}\t{:^9}\n'.format('a', 'b', 'c', 'd', 'e')) + f.write(' \t{: 5.2e}\t{: 5.2e}\t{: 5.2e}\t{: 5.2e}\t{: 5.2e}\n'.format(C[0], C[1], C[2], C[3], C[4])) + f.write('\n***{:^60}***\n'.format('END OF BASELINE PARAMETERS')) + + if isinstance(filename, str): + f.write('\n***{:^60}***\n'.format('END OF FILE')) + f.close() + + +def print_par(V, C, limits=[None,None]): + """ + Prints on screen the same thing that write_par writes in a file. + -------- + Parameters: + - V : 2darray + matrix (# signals, parameters) + - C : 1darray or False + Coefficients of the polynomion to be used as baseline correction. If the 'baseline' checkbox in the interactive figure panel is not checked, C_f is False. + - limits : tuple or None + Trim limits for the spectrum (left, right). If None, the whole spectrum is used. + """ + print('***{:^60}***'.format('SIGNAL PARAMETERS')) + print('{:<4}\t{:>7}\t{:>5}\t{:>5}\t{:>5}\t{:>5}\t{:<9}'.format('#', 'u', 's', 'k', 'x_g', 'phi', 'A')) + for i in range(V.shape[0]): + print('{:<4.0f}\t{:=7.2f}\t{:5.0f}\t{:5.3f}\t{:5.2f}\t{: 5.2f}\t{:5.2e}'.format( i+1, V[i,0], V[i,1], V[i,2], V[i,3], V[i,4], V[i,5])) + + if C is not False: + print('***{:^60}***\n'.format('BASELINE PARAMETERS')) + print('#\t{:^9}\t{:^9}\t{:^9}\t{:^9}\t{:^9}'.format('a', 'b', 'c', 'd', 'e')) + print(' \t{: 5.2e}\t{: 5.2e}\t{: 5.2e}\t{: 5.2e}\t{: 5.2e}'.format(C[0], C[1], C[2], C[3], C[4])) + print('#\tWINDOW DELIMITERS /ppm') + print('{:=7.2f}\t{:=7.2f}'.format(limits[0], limits[1])) + +def dic2mat(dic, peak_names, ns, A=None): + """ + This is used to make the matrix of the parameters starting from a dictionary like the one produced by l. + The column of the total intensity is not added, unless the parameter 'A' is passed. In this case, the third column (which is the one with the relative intesities) is corrected using the function molfrac. + -------- + Parameters: + - dic : dict + input dictionary + - peak_names : list + list of the parameter entries to be looked for + - ns : int + number of signals to unpack + - A : float or None + Total intensity. + ------- + Returns: + - V : 2darray + Matrix containing the parameters. + """ + V = [] + # u s k xg phi A + for i in range(ns): + V.append([]) + for j in range(len(peak_names)): + V[i].append(dic[peak_names[j]+str(i+1)]) + V = np.array(V) + if A is None: + return V + else: + V[:,2], Acorr = misc.molfrac(V[:,2]) + A_arr = Acorr * np.array([A for w in range(ns)]) + V = np.concatenate((V, A_arr.reshape(-1, 1)), axis=-1) + return V + + + +# -------------------------------------------------------------------- + + +def test_residuals(R, nbins=100, density=False): + """ + Test the residuals of a fit to see if it was reliable. + Returns two values: SYSDEV and Q_G. + SYSDEV is inspired by Svergun's Gnom, and it tells if there are systematic deviations basing on the number of sign changes in the residual. Optimal value must be 1. + Q_G is to see the discrepancy between a gaussian function built with the mean and standard deviation of the residuals and the gaussian built fitting the histogram of the residuals. Values go from 0 (worst case) to 1 (best case). + ------- + Parameters: + - R : 1darray + Array of the residuals + - nbins : int + number of bins of the histogram, i.e. the number of points that will be used to fit the histogram. + - density : bool + True to normalize the histogram, False otherwise. + ------- + Returns: + - SYSDEV : float + Read full caption + - Q_G : float + Read full caption + """ + # Get theoretical mean and std of the residue + m_t, s_t = np.mean(R), np.std(R) + + # Calculate SYSDEV + N_s = np.sum((np.diff(np.sign(R)) != 0)*1) + SYSDEV = N_s / (len(R)/2) + + # Make histogram + hist, bin_edges = np.histogram(R, bins=nbins, density=density) # Computes the bins for the histogram + + # Set A according to density + if density: + A_t = 1 + else: + A_t = np.trapz(hist, dx=bin_edges[1]-bin_edges[0]) # Integral + + # center point of the bin bar + x = np.array( [(bin_edges[i]+bin_edges[i+1])/2 for i in range(len(bin_edges)-1)]) + + # Theoretical gaussian function and its integral + G_t = sim.f_gaussian(x, m_t, s_t, A_t) + I_t = np.trapz(G_t, dx=misc.calcres(x)) + + # Fitted gaussian and its integral + m_f, s_f, A_f = gaussian_fit(x, hist) + G_f = sim.f_gaussian(x, m_f, s_f, A_f) + I_f = np.trapz(G_f, dx=misc.calcres(x)) + + # Calculate Q_G + Q_G = np.trapz(np.abs(G_t - G_f), dx=misc.calcres(x)) + # Normalize it. 1- is to make it similar to SYSDEV + Q_G = 1 - (Q_G / (I_t + I_f)) + + return SYSDEV, Q_G + +def write_log(input_file, output_file, limits, V_i, C_i, V_f, C_f, result, runtime, test_res=True, log_file='fit.log'): + """ + Write a log file with all the information of the fit. + ------- + Parameters: + - input_file: str + Location and filename of the input file + - output_file: str + Location and filename of the output file + - limits: tuple + Delimiters of the spectral region that was fitted. (left, right) + - V_i: 2darray + Initial parameters of the fit + - C_i: 1darray or False + Coefficients of the starting polynomion used for baseline correction. If False, it was not used. + - V_f: 2darray + Final parameters of the fit + - C_f: 1darray or False + Coefficients of the final polynomion used for baseline correction. If False, it was not used. + - result: lmfit.FitResult Object + Object returned by lmfit after the fit. + - runtime: datetime.datetime Object + Time taken for the fit + - test_res: bool + Choose if to test the residual with the fit.test_residual function (True) or not (False) + - log_file: str + Filename of the log file to be saved. + """ + now = datetime.now() + date_and_time = now.strftime("%d/%m/%Y at %H:%M:%S") + f = open(log_file, 'w') + + f.write('***{:^60}***\n\n'.format('FIT LOG')) + f.write('Fit performed by {} on {}\n\n'.format(os.getlogin(), date_and_time)) + f.write('-'*60) + f.write('\n\n') + + f.write('{:<12}{:>}\n'.format('Input file:', os.path.abspath(input_file))) + write_par(V_i, C_i, limits=limits, filename=f) + + f.write('-'*60) + f.write('\n\n') + + f.write('{:<12}{:>}\n'.format('Output file:', os.path.abspath(output_file))) + write_par(V_f, C_f, limits=limits, filename=f) + + f.write('-'*60) + f.write('\n') + + f.write('{}\nTotal runtime: {}.\nNumber of function evaluations: {:5.0f}\n\n'.format(result.message, runtime, result.nfev)) + + # Check for the gaussianity of the residual + if test_res is True: + R = result.residual + m_R = np.mean(R) + SYSDEV, Q_G = test_residuals(R) + f.write('{:^60}\n'.format('Statistics of the fit')) + f.write('{:<30} = {:=9.2e} | Optimal : 0\n'.format('Mean of residuals', m_R)) + f.write('{:<30} = {:9.6f} | Optimal : 1\n'.format('Systematic deviation', SYSDEV)) + f.write('{:<30} = {:9.6f} | Optimal : 1\n'.format('Gaussianity of residuals', Q_G)) + f.write('-' * 60) + f.close() + + +def gaussian_fit(x, y, s_in=None): + """ + Fit 'y' with a gaussian function, built using 'x' as independent variable + ------- + Parameters: + - x : 1darray + x-scale + - y : 1darray + data to be fitted + ------- + Returns: + - u : float + mean + - s : float + standard deviation + - A : float + Integral + """ + + # Make parameter dictionary + param = l.Parameters() + param.add('u', value=np.mean(y), min=min(y), max=max(y)) + param.add('s', value=np.std(y), min=0, max=np.inf) + if s_in: + param['s'].set(value=s_in) + param.add('A', value=np.trapz(y, dx=misc.calcres(x)), min=0, max=5*np.trapz(y, dx=misc.calcres(x))) + + def f2min(param, x, y): + # Cost function + par = param.valuesdict() + G = sim.f_gaussian(x, par['u'], par['s'], par['A']) + return y - G + + minner = l.Minimizer(f2min, param, fcn_args=(x, y)) + result = minner.minimize(method='leastsq', max_nfev=10000, xtol=1e-15, ftol=1e-15) + + # Return the result + popt = result.params.valuesdict() + return popt['u'], popt['s'], popt['A'] + + + +# ------------------------------------------------------------------------------------------- + +class Voigt_Fit: + """ + This class offers an "interface" to fit a 1D NMR spectrum. + ------- + Attributes: + - ppm_scale: 1darray + Self-explanatory + - S : 1darray + Spectrum to fit. Only real part + - t_AQ: 1darray + acquisition timescale of the spectrum + - SFO1: float + Larmor frequency of the nucleus + - o1p : float + Pulse carrier frequency + - nuc : str or None + Nucleus. Used to write the X_scale of the plot. + - input_file : str + filename of the input file + - output_file : str + filename of the output file + - log_file : str + filename of the log file + - limits : tuple + borders of the fitting window + - Vi : 2darray + array with the values of the signals used as initial guess + - Ci : 1darray + coefficients of the baseline polynomion as initial guess + - Vf : 2darray + array with the values of the signals after the fit + - Cf : 1darray + coefficients of the baseline polynomion after the fit + - s_labels : list + legend entries for the single signals. + -------- + Methods: + - __init__(self, ppm_scale, S, t_AQ, SFO1, o1p, nuc=None): + Add common variables + - iguess(self, input_file, limits=None): + Create initial guess and writes the input file if not present + - dofit(self, log_file='fit.log', output_file='fit.out', utol=0.5, vary_phi=False, vary_xg=False, res_hist_name='histogram_of_residuals', test_res=True): + Fit the data, writes the output and log file + - plot(self, what, name=None, s_labels=None, X_label='$\delta\, $F1 /ppm', n_major_ticks=10): + plot either the initial guess or the fitted data + """ + + def __init__(self, ppm_scale, S, t_AQ, SFO1, o1p, nuc=None): + self.ppm_scale = ppm_scale + self.S = S + self.t_AQ = t_AQ + self.SFO1 = SFO1 + self.o1p = o1p + if nuc is None: + self.X_label = '$\delta\,$ /ppm' + elif isinstance(nuc, str): + fnuc = misc.nuc_format(nuc) + self.X_label = '$\delta$ ' + fnuc +' /ppm' + + def iguess(self, input_file=None, limits=None): + # If input_file is not given, set in_file_exist to False. + # If input_file is passed as argument, check if there is already, and set in_file_exist accordingly. + if input_file is None: + in_file_exist = False + else: + in_file_exist = os.path.exists(input_file) + + + if in_file_exist is True: # Read everything you need from the file + self.Vi, self.Ci, self.limits = fit.read_par(input_file) + else: # Make the initial guess interactively and save the file. + # Get the limits interactively if they are not given, and the input file is to be created + if limits is None: + self.limits = fit.get_region(self.ppm_scale, self.S) + else: + self.limits = limits + if input_file is None: # If the input file name was not passed, set a default name + input_file = f'inp_{self.limits[0]:.2f}:{self.limits[1]:.2f}' + self.Vi, self.Ci = fit.make_iguess(self.S, self.ppm_scale, self.t_AQ, self.limits, self.SFO1, self.o1p, name=input_file) + + self.input_file = input_file + print(f'{input_file} loaded as input file.') + + def load_fit(self, output_file=None): + out_file_exist = os.path.exists(output_file) + if out_file_exist is True: # Read everything you need from the file + self.Vf, self.Cf, self.limits = fit.read_par(output_file) + self.output_file = output_file + print(f'{output_file} loaded as output file.') + else: + raise NameError(f'{output_file} does not exist.') + + def dofit(self, input_file=None, output_file=None, log_file='fit.log', on_cplex=True, **kwargs): + """ + kwargs: + - utol + - vary_phi + - vary_xg + - test_res + - hist_name + """ + if input_file is None: + input_file = self.input_file + if output_file is None: + output_file = f'out_{self.limits[0]:.2f}:{self.limits[1]:.2f}' + + self.output_file = output_file + if np.iscomplexobj(self.S) and on_cplex: + S = np.copy(self.S) + else: + S = np.copy(self.S.real) + + self.Vf, self.Cf, self._result, self._runtime = fit.voigt_fit( + S, self.ppm_scale, self.Vi, self.Ci, self.t_AQ, self.limits, self.SFO1, self.o1p, + write_out=self.output_file, **kwargs) + + fit.write_log(input_file=self.input_file, output_file=self.output_file, + limits=self.limits, V_i=self.Vi, C_i=self.Ci, V_f=self.Vf, C_f=self.Cf, + result=self._result, runtime=self._runtime, test_res=True, log_file=log_file) + + def plot(self, what='fit', s_labels=None, **kwargs): + if what == 'iguess': + V = self.Vi + C = self.Ci + elif what == 'fit': + V = self.Vf + C = self.Cf + limits = self.limits + + if s_labels is not None: + self.s_labels = s_labels + + if np.iscomplexobj(self.S): + S = np.copy(self.S.real) + else: + S = np.copy(self.S) + + + figures.fitfigure(S, self.ppm_scale, self.t_AQ, + V, C, SFO1=self.SFO1, o1p=self.o1p, limits=limits, + s_labels=s_labels, X_label=self.X_label, **kwargs) + + def _join(self, files, joined_name=None, flag=0): + joined = fit.join_par(files, self.ppm_scale, joined_name) + self.limits = joined[2] + if flag == 0: #input + self.Vi = joined[0] + self.Ci = joined[1] + self.ibasl = joined[3] + elif flag == 1: #output + self.Vf = joined[0] + self.Cf = joined[1] + self.fbasl = joined[3] + + def get_fit_lines(self, what='fit'): + + if what == 'iguess': + V = self.Vi + C = self.Ci + elif what == 'fit': + V = self.Vf + C = self.Cf + + N = self.S.shape[-1] + + signals, Total, baseline = fit.calc_fit_lines(self.ppm_scale, self.limits, self.t_AQ, self.SFO1, self.o1p, N=N, V=V, C=C) + + return signals, Total, baseline + + +def gen_iguess(x, experimental, param, model, model_args=[]): + """ + GUI for the interactive setup of a Parameters object to be used in a fitting procedure. + Once you initialized the Parameters object with the name of the parameters and a dummy value, you are allowed to set the value, minimum, maximum and vary status through the textboxes given in the right column, and see their effects in real time. + Upon closure of the figure, the Parameters object with the updated entries is returned. + A maximum of 18 parameters will fit the figure. + --------- + Parameters: + - x: 1darray + Independent variable + - experimental: 1darray + The objective values you are trying to fit + - param: lmfit.Parameters Object + Initialized parameters object + - model: function + Function to be used for the generation of the fit model. Param must be the first argument. + - model_args: list + List of args to be passed to model, after param + --------- + Returns: + - param: lmfit.Parameters Object + Updated Parameters Object + """ + + # Declare some stuff to be used multiple times + L_box = 0.08 # Length of the textboxes + H_box = 0.04 # Height of the textboxes + + # Y position of the rows + y0box = 0.85 + list_Y_box = [] + for k in range(len(param)): + space = H_box + 0.01 + list_Y_box.append(y0box - k*space) + + + # --------------------------------------------------------------------------------------- + # Functions connected to the widgets + def update(text): + """ Called upon writing something in the textboxes """ + def get_val(tb): + """ Overwrite inf with np.inf otherwise raises error """ + if 'inf' in tb.text: + return eval(tb.text.replace('inf', 'np.inf')) + else: + return eval(tb.text) + + nonlocal param + # Read all textboxes at once and set Parameters accordingly + for p, tb_val, tb_min, tb_max in zip(labels, val_tb, min_tb, max_tb): + param[p].set(value=get_val(tb_val), min=get_val(tb_min), max=get_val(tb_max)) + + # Compute and redraw the model function + newmodel = model(param, *model_args) + model_plot.set_ydata(newmodel) + plt.draw() + + def set_vary(null): + """ Called by the checkboxes """ + nonlocal param + # Read all textboxes at once, set Parameters.vary accordingly + for cb, p in zip(var_cb, labels): + param[f'{p}'].set(vary=cb.get_status()[0]) + + # --------------------------------------------------------------------------------------- + + + # Make the figure + fig = plt.figure() + fig.set_size_inches(figures.figsize_large) + plt.subplots_adjust(left=0.1, right=0.6, top=0.9, bottom=0.1) + ax = fig.add_subplot(1,1,1) + + # Draw the widgets + # Header row + [plt.text(X, 0.925, f'{head}', ha='center', transform=fig.transFigure) + for X, head in zip((0.635, 0.72, 0.81, 0.90, 0.96), ('Parameter', 'Value', 'Min', 'Max', 'Vary'))] + # First column + labels = [f'{p}' for p in param] # Name of the parameters + # Write them in the first column, right-aligned + [plt.text(0.675, Y_box+H_box/2, f'{label}', ha='right', va='center', transform=fig.transFigure) for Y_box, label in zip(list_Y_box, labels)] + # Textboxes for 'value' + val_boxes = [plt.axes([0.68, Y_box, L_box, H_box]) for Y_box in list_Y_box] + val_tb = [TextBox(box, '', textalignment='center', initial=f'{param[p].value}') for box, p in zip(val_boxes, labels)] + # Textboxes for 'min' + min_boxes = [plt.axes([0.77, Y_box, L_box, H_box]) for Y_box in list_Y_box] + min_tb = [TextBox(box, '', textalignment='center', initial=f'{param[p].min}') for box, p in zip(min_boxes, labels)] + # Textboxes for 'max' + max_boxes = [plt.axes([0.86, Y_box, L_box, H_box]) for Y_box in list_Y_box] + max_tb = [TextBox(box, '', textalignment='center', initial=f'{param[p].max}') for box, p in zip(max_boxes, labels)] + # Checkboxes for 'vary' + var_boxes = [plt.axes([0.95, Y_box, 0.025, H_box]) for Y_box in list_Y_box] + var_cb = [CheckButtons(box, labels=[''], actives=[f'{param[p].vary}']) for box, p in zip(var_boxes, labels)] + [misc.edit_checkboxes(cb, 0.2, 0.2, 0.6, 0.6, color='tab:blue') for cb in var_cb] # make bigger squares + + # Plot the data and the model + ax.plot(x, experimental, '.', markersize=2, c='tab:red', label='Observed data') + model_plot, = ax.plot(x, model(param, *model_args), c='tab:blue', label='Model') + + # Fancy shit + misc.pretty_scale(ax, ax.get_xlim(), 'x') + misc.pretty_scale(ax, ax.get_ylim(), 'y') + misc.mathformat(ax) + ax.legend() + misc.set_fontsizes(ax, 15) + + # Connect the widgets to their functions + for column in zip(val_tb, min_tb, max_tb): + for box in column: + box.on_submit(update) + for cb in var_cb: + cb.on_clicked(set_vary) + + plt.show() + + return param + diff --git a/klassez/misc.py b/klassez/misc.py new file mode 100644 index 0000000..986d422 --- /dev/null +++ b/klassez/misc.py @@ -0,0 +1,1481 @@ +#! /usr/bin/env python3 + +import os +import sys +import numpy as np +from scipy import linalg, stats +from scipy.spatial import ConvexHull +import random +import matplotlib +import matplotlib.pyplot as plt +import matplotlib.cm as cm +from matplotlib.widgets import Slider, Button, RadioButtons, TextBox, CheckButtons, Cursor, LassoSelector, RectangleSelector +from matplotlib.path import Path +import seaborn as sns +import nmrglue as ng +import lmfit as l +from datetime import datetime +import warnings + +from . import fit, misc, sim, figures, processing +#from .__init__ import CM +from .config import CM +""" +Collection of all-purpose functions +""" + +def noise_std(y): + """ + Calculates the standard deviation of the noise using the Bruker formula. Taken y as an array of N points, and yi its i-th entry: + ------- + Parameters + - y : 1darray + The spectral region you would like to use to calculate the standard deviation of the noise. + ------- + Returns + - noisestd : float + The standard deviation of the noise. + """ + N = len(y) + n = N//2 + 1 + # W + W = 0 + for k in range(N): + W += y[k] + W = W**2 + # Y + Y=0 + for k in range(N): + Y += y[k]**2 + # X + X=0 + for k in range(1,n): + X += k * (y[-k] - y[k-1]) + noisestd = (N-1)**(-0.5) * np.sqrt( Y - 1/N * (W + 3 * X**2 / (N**2 -1) )) + return noisestd + + +def SNR(data, signal=None, n_reg=None): + """ + Computes the signal to noise ratio of a 1D spectrum. + ------- + Parameters + - data : 1darray + The spectrum of which you want to compute the SNR + - signal : float, optional + If provided, uses this value as maximum signal. Otherwise, it is selected as the maximum value in "data" + - n_reg : list or tuple, optional + If provided, contains the points that delimit the noise region. Otherwise, the whole spectrum is used. + ------- + Returns + - snr : float + The SNR of the spectrum + """ + # Computes the SNR of a 1D spectrum (or 2D projection). + # n_reg is a list/tuple of 2 values that delimitates the noise region + if signal is None: + signal = np.max(data) + + if n_reg is None: + y = data + else: + y = data[min(n_reg[0], n_reg[1]):max(n_reg[0], n_reg[1])] + snr = signal / (2 * misc.noise_std(y)) + return snr + +def SNR_2D(data, n_reg=None): + """ + Computes the signal to noise ratio of a 2D spectrum. + ------- + Parameters + - data : 1darray + The spectrum of which you want to compute the SNR + - n_reg : list or tuple + If provided, the points of F1 scale and F2 scale, respectively, of which to extract the projections. + Otherwise, opens the tool for interactive selection. + -------- + Returns + - snr_f1 : float + The SNR of the indirect dimension + - snr_f2 : float + The SNR of the direct dimension + """ + # Computes the SNR of a 2D spectrum. + # n_reg is: (ppmf1 for f2 trace, ppmf2 for f1 trace) + if n_reg is None: + x_scale = np.arange(data.shape[-1]) + y_scale = np.arange(data.shape[0]) + coord = misc.select_traces(y_scale, x_scale, data) + n_reg = (coord[0][0], coord[0][1]) + print('index for SNR (F1 | F2): ',n_reg) + + f1_trace = data[:,n_reg[0]] + f2_trace = data[n_reg[1],:] + + snr_f1 = misc.SNR(f1_trace, signal=np.max(data)) + snr_f2 = misc.SNR(f2_trace, signal=np.max(data)) + + return snr_f1, snr_f2 + +def makeacqus_1D(dic): + """ + Given a NMRGLUE dictionary from a 1D spectrum (generated by ng.bruker.read), this function builds the acqus file with only the "important" parameters. + ------- + Parameters + - dic: dict + NMRglue dictionary returned by ng.bruker.read + ------- + Returns + - acqus : dict + Dictionary with only few parameters + """ + acqus = {} + acqus['nuc'] = dic['acqus']['NUC1'] + acqus['SFO1'] = dic['acqus']['SFO1'] + acqus['SWp'] = dic['acqus']['SW'] + acqus['TD'] = int(dic['acqus']['TD'])//2 # Fuckin' Bruker + acqus['o1'] = dic['acqus']['O1'] + + acqus['B0'] = acqus['SFO1'] / sim.gamma[acqus['nuc']] + acqus['o1p'] = acqus['o1'] / acqus['SFO1'] + acqus['SW'] = acqus['SWp'] * np.abs(acqus['SFO1']) + acqus['dw'] = 1 / acqus['SW'] + acqus['t1'] = np.linspace(0, acqus['TD']*acqus['dw'], acqus['TD']) + acqus['AQ'] = acqus['t1'][-1] + return acqus + +def makeacqus_2D(dic): + """ + Given a NMRGLUE dictionary from a 2D spectrum (generated by ng.bruker.read ), this function builds the acqus file with only the "important" parameters. + ------- + Parameters + - dic: dict + NMRglue dictionary returned by ng.bruker.read + ------- + Returns + - acqus : dict + Dictionary with only few parameters + """ + acqus = {} + acqus['nuc1'] = dic['acqu2s']['NUC1'] + acqus['nuc2'] = dic['acqus']['NUC1'] + acqus['SFO1'] = dic['acqu2s']['SFO1'] + acqus['SFO2'] = dic['acqus']['SFO1'] + acqus['SW1p'] = dic['acqu2s']['SW'] + acqus['SW2p'] = dic['acqus']['SW'] + acqus['TD1'] = int(dic['acqu2s']['TD']) # Indirect evolution is not /2 + acqus['TD2'] = int(dic['acqus']['TD'])//2 # Fuckin' Bruker + acqus['o1'] = dic['acqu2s']['O1'] + acqus['o2'] = dic['acqus']['O1'] + + acqus['B0'] = acqus['SFO2'] / sim.gamma[acqus['nuc2']] + acqus['o1p'] = acqus['o1'] / acqus['SFO1'] + acqus['o2p'] = acqus['o2'] / acqus['SFO2'] + acqus['SW1'] = acqus['SW1p'] * np.abs(acqus['SFO1']) + acqus['SW2'] = acqus['SW2p'] * np.abs(acqus['SFO2']) + acqus['dw1'] = 1 / acqus['SW1'] + acqus['dw2'] = 1 / acqus['SW2'] + acqus['t1'] = np.linspace(0, acqus['TD1']*acqus['dw1'], acqus['TD1']) + acqus['t2'] = np.linspace(0, acqus['TD2']*acqus['dw2'], acqus['TD2']) + acqus['AQ1'] = acqus['t1'][-1] + acqus['AQ2'] = acqus['t2'][-1] + return acqus + +def write_acqus_1D(acqus, path='sim_in_1D'): + """ + Writes the input file for a simulated spectrum, basing on a dictionary of parameters. + ------- + Parameters + - acqus : dict + The dictionary containing the parameters for the simulation + - path : str, optional + Directory where the file will be saved. + """ + f = open(path, 'w') + keylist = acqus.keys() + for key in keylist: + if key[:1] == 't': + pass + else: + if isinstance(acqus[key], (list, tuple)): + f.write('{}\t'.format(key)) + for w in acqus[key]: + f.write('{}, '.format(w)) + f.write('\n') + else: + f.write('{}\t{}\n'.format(key, acqus[key])) + + + f.close() + +def write_acqus_2D(acqus, path='sim_in_2D'): + """ + Writes the input file for a simulated spectrum, basing on a dictionary of parameters. + ------- + Parameters + - acqus : dict + The dictionary containing the parameters for the simulation + - path : str, optional + Directory where the file will be saved. + """ + f = open(path, 'w') + keylist = acqus.keys() + for key in keylist: + if key[:1] == 't': + pass + else: + if isinstance(acqus[key], (list, tuple)): + f.write('{}\t'.format(key)) + for w in acqus[key]: + f.write('{}, '.format(w)) + f.write('\n') + else: + f.write('{}\t{}\n'.format(key, acqus[key])) + f.close() + +def calcres(fqscale): + """ + Calculates the frequency resolution of an axis scale, i.e. how many Hz is a "tick". + -------- + Parameters + - fqscale : 1darray + Scale to be processed + ------- + Returns + -------- + - res: float + The resolution of the scale + """ + return np.abs(fqscale[1]-fqscale[0]) + +def hz2pt(fqscale, hz): + """ + Converts hzfrom frequency units to points, on the basis of its scale. + -------- + Parameters + - fqscale : 1darray + Scale to be processed + - hz :float + Value to be converted + ------- + Returns + - pt :float + The frequency value converted in points + """ + hzpt = misc.calcres(fqscale) + pt = int(round(hz / hzpt)) + return pt + + +def find_nearest(array, value): + """ + Finds the value in array which is the nearest to value . + ------- + Parameters + - array : 1darray + Self-explanatory + - value : float + Value to be found + ------- + Returns + - val :float + The closest value in array tovalue + """ + # Finds the value in 'array' which is the nearest to 'value' + array = np.asarray(array) + idx = (np.abs(array - value)).argmin() + return array[idx] + +def trim_data(ppm_scale, y, sx, dx): + """ + Trims the frequency scale and correspondant 1D dataset y from sx (ppm) to dx (ppm). + ------- + Parameters + - ppm_scale : 1darray + ppm scale of the spectrum + - y :1darray + spectrum + - sx :float + ppm value where to start trimming + - dx :float + ppm value where to finish trimming + ------- + Returns + - xtrim : 1darray + Trimmed ppm scale + - ytrim : 1darray + Trimmed spectrum + """ + SX = misc.ppmfind(ppm_scale, sx)[0] + DX = misc.ppmfind(ppm_scale, dx)[0] + xtrim = ppm_scale[min(SX,DX):max(SX,DX)] + if np.iscomplexobj(y): + ytrim_re = y.real[...,min(SX,DX):max(SX,DX)] + ytrim_im = y.imag[...,min(SX,DX):max(SX,DX)] + ytrim = ytrim_re + 1j*ytrim_im + else: + ytrim = y[...,min(SX,DX):max(SX,DX)] + return xtrim, ytrim + +def ppmfind(ppm_scale, value): + """ + Finds the exact value in ppm_scale. + ------- + Parameters + - ppm_scale : 1darray + Self-explanatory + - value : float + The value to be found + ------- + Returns + - I :int + The index correspondant to ’V’ in ’ppm_scale’ + - V :float + The closest value to ’value’ in ’ppm_scale’ + """ + # Finds the exact 'value' in ppm scale 'ppm_1h' + # Returns the found value 'V' and its index 'I' + avgstep = np.abs((ppm_scale[0]-ppm_scale[1])/2) + I, V = None, None + for i, delta in enumerate(ppm_scale): + if value-avgstep <= delta and delta < value+avgstep: + I = i + V = ppm_scale[i] + break + else: + continue + if I is None or V is None: + raise ValueError('Value {} not found.'.format(value)) + else: + return I, V + +def ppm2freq(x, B0=701.125, o1p=0): + """ + Converts xfrom ppm to Hz. + ------- + Parameters + - x :float + Value to be converted + - B0 :float + Field frequency, in MHz. Default: 700 MHz + - o1p : float + Carrier frequency, in ppm. Default: 0. + ------- + Returns + - y :float + The converted value + """ + # Converts 'x' from ppm to Hz. + # B0 is the frequency of the field in MHz. + y = (x-o1p)*B0 + return y + +def freq2ppm(x, B0=701.125, o1p=0): + """ + Converts xfrom Hz to ppm. + ------- + Parameters + - x :float + Value to be converted + - B0 :float + Field frequency, in MHz. Default: 700 MHz + - o1p : float + Carrier frequency, in ppm. Default: 0. + ------- + Returns + - y :float + The converted value + """ + y = x/B0 + o1p + return y + +def readlistfile(datafile): + """ + Takes as input the path of a file containing one entry for each row. Returns a list of the aforementioned entries. + ------- + Parameters + - datafile: str + Path to a file that contains one entry for each row + ------- + Returns + - files : list + List of the entries contained in the file + """ + with open(datafile) as F: + names = F.readlines() + + files = [] + for i in range(len(names)): + files.append(names[i].strip()) + + return files + +def procpar(txt): + """ + Takes as input the path of a file containing a "key" in the first column and a "value" in the second + column. Returns a dictionary of shape "key" : "value". + ------- + Parameters + - txt :str + Path to a file that contains "key" in first column and "value" in the second + ------- + Returns + - procpars : dict + Dictionary of shape "key":"value" + """ + fyle = open(txt).readlines() + procpars = {} + for line in fyle: + if line[0] == '#': + continue # Skip comments + string = line.split('\t') + procpars[string[0]] = float(string[1].strip()) + return procpars + +def get_trace(data, ppm_f2, ppm_f1, a, b=None, column=True): + """ + Takes as input a 2D dataset and the ppm scales of direct and indirect dimensions respectively. + Calculates the projection on the given axis summing from a (ppm) to b(ppm). + Default: indirect dimension projection (i.e. column=True), change it to "False" for the direct dimension projection. + Returns the calculated 1D projection. + ------- + Parameters + - data : 2darray + Spectrum of which to extract the projections + - ppm_f2 : 1darray + ppm scale of the direct dimension + - ppm_f1 : 1darray + ppm scale of the indirect dimension + - a :float + The ppm value from which to start extracting the projection. + - b :float, optional + If provided, the ppm value at which to stop extracting the projection. Otherwise, returns only the 'a' trace. + - column : bool + If True, extracts the F1 projection. If False, extracts the F2 projection. + ------- + Returns + - y :1darray + Computed projection + """ + if not b: + b = a + + if column: + A = int(misc.ppmfind(ppm_f2, a)[0]) + B = int(misc.ppmfind(ppm_f2, b)[0]) + if A==B: + y = data[:,A] + else: + y = np.sum(data[:, min(A,B):max(A,B)],axis=1) + else: + A = int(misc.ppmfind(ppm_f1, a)[0]) + B = int(misc.ppmfind(ppm_f1, b)[0]) + if A==B: + y = data[A,:] + else: + y = np.sum(data[min(A,B):max(A,B), :],axis=0) + return y + + +def select_traces(ppm_f1, ppm_f2, data, Neg=True, grid=False): + """ + Select traces from a 2D spectrum, save the coordinates in a list. + Left click to select a point, right click to remove it. + ------- + Parameters + - ppm_f1 : 1darray + ppm scale of the indirect dimension + - ppm_f2 : 1darray + ppm scale of the direct dimension + - data : 2darray + Spectrum + - Neg : bool + Choose if to show the negative contours ( True) or not ( False ) + - grid : bool + Choose if to display the grid ( True) or not ( False ) + ------- + Returns + - coord: list + List containing the ’[x,y]’ coordinates of the selected points. + """ + cmaps = CM['Blues_r'], CM['Reds_r'] + # Select traces from a 2D spectrum, save the coordinates in a list + lvlstep = 0.02 # for mouse scroll + + # Make the figure + fig = plt.figure() + fig.set_size_inches(figures.figsize_large) + ax = fig.add_subplot(1,1,1) + ax.set_title('Left double click to add point, right click to remove point') + plt.subplots_adjust(left=0.1, bottom=0.1, right=0.95, top=0.90) + + # Set figure borders + xsx = max(ppm_f2) + xdx = min(ppm_f2) + ysx = max(ppm_f1) + ydx = min(ppm_f1) + + # set level for contour + livello = 0.2 + cnt = figures.ax2D(ax, ppm_f2, ppm_f1, data, xlims=(xsx, xdx), ylims=(ysx, ydx), cmap=cmaps[0], c_fac=1.4, lvl=livello, lw=0.5, X_label='', Y_label='') + if Neg: + Ncnt = figures.ax2D(ax, ppm_f2, ppm_f1, -data, xlims=(xsx, xdx), ylims=(ysx, ydx), cmap=cmaps[0], c_fac=1.4, lvl=livello, lw=0.5) + else: + Ncnt = None + + # Make pretty scales + misc.pretty_scale(ax, (xsx, xdx), 'x') + misc.pretty_scale(ax, (ysx, ydx), 'y') + + xgrid = ppm_f2 + ygrid = ppm_f1 + if grid: # Set grid to visible + for i in xgrid: + ax.axvline(i, color='grey', lw=0.1) + for j in ygrid: + ax.axhline(j, color='grey', lw=0.1) + + # Parameters to save coordinates + coord = [] # Final list of coordinates + dot = [] # Bullets in figure + dothline = [] # Horizontal lines + dotvline = [] # Vertical lines + + def on_click(event): + # What happens if you click? + x, y = event.xdata, event.ydata # x,y position of cursor + if event.inaxes == ax: # You are inside the figure + ix, iy = misc.find_nearest(xgrid, x), misc.find_nearest(ygrid, y) # Handle to the grid + if str(event.button) == 'MouseButton.LEFT' and event.dblclick: # Left click: add point + if [ix, iy] not in coord: # Avoid superimposed peaks + coord.append([ix,iy]) # Update list + # Update figure: + # add bullet + line, = ax.plot(ix, iy, 'ro', markersize=2) + dot.append(line) + # add horizontal line + dothline.append(ax.axhline(iy, c='r', lw=0.4)) + # add vertical line + dotvline.append(ax.axvline(ix, c='r', lw=0.4)) + if str(event.button) == 'MouseButton.RIGHT': # Right click: remove point + if [ix, iy] in coord: # only if the point is already selected + # Remove coordinates and all figure elements + i = coord.index([ix, iy]) + coord.remove([ix, iy]) + killd = dot.pop(i) + killd.remove() + killh = dothline.pop(i) + killh.remove() + killv = dotvline.pop(i) + killv.remove() + + fig.canvas.draw() + + def on_scroll(event): + # Zoom + nonlocal livello, cnt + if Neg: + nonlocal Ncnt + + xsx, xdx = ax.get_xlim() + ysx, ydx = ax.get_ylim() + + if event.button == 'up': + livello += lvlstep + if event.button == 'down': + livello += -lvlstep + if livello < 1e-5: + livello = 1e-5 + if livello > 1: + livello = 1 + cnt, Ncnt = figures.redraw_contours(ax, ppm_f2, ppm_f1, data, lvl=livello, cnt=cnt, Neg=Neg, Ncnt=Ncnt, lw=0.5, cmap=cmaps) + misc.pretty_scale(ax, (xsx, xdx), 'x') + misc.pretty_scale(ax, (ysx, ydx), 'y') + fig.canvas.draw() + + # Widgets + cursor = Cursor(ax, useblit=True, color='red', linewidth=0.4) + mouse = fig.canvas.mpl_connect('button_press_event', on_click) + scroll = fig.canvas.mpl_connect('scroll_event', on_scroll) + + plt.show() + plt.close() + + return coord + +def select_for_integration(ppm_f1, ppm_f2, data, Neg=True): + """ + Select the peaks of a 2D spectrum to integrate. + First, select the area where your peak is located by dragging the red square. + Then, select the center of the peak by right_clicking. + Finally, click 'ADD' to store the peak. Repeat the procedure for as many peaks as you want. + ------- + Parameters + - ppm_f1 : 1darray + ppm scale of the indirect dimension + - ppm_f2 : 1darray + ppm scale of the direct dimension + - data : 2darray + Spectrum + - Neg : bool + Choose if to show the negative contours ( True) or not ( False ) + ------- + Returns + - peaks: list of dict + For each peak there are two keys, 'f1' and 'f2', whose meaning is obvious. + For each of these keys, you have 'u': center of the peak /ppm, and 'lim': the limits of the square you drew before. + """ + + cmaps = CM['Blues_r'], CM['Reds_r'] + lvlstep = 0.02 # Increase step for contours when scroll the mouse1 + + # Make an underlying grid to snap the pointer + xgrid = np.copy(ppm_f2) + ygrid = np.copy(ppm_f1) + # Parameters to save coordinates + coord = [] # Final list of coordinates + rekt = [] # Rectangles + # Set figure borders + xsx, xdx = max(ppm_f2), min(ppm_f2) + ysx, ydx = max(ppm_f1), min(ppm_f1) + # set base level for contour + lvl0 = 0.2 + + + + + # ----------------------------------------------------------------------------------------------------------------- + # Functions connected to the widgets + def add_crosshair(coord, ix, iy): + """ Add blue crosshair in (ix, iy) """ + if [ix, iy] not in coord: # Avoid superimposed peaks + coord.append([ix,iy]) # Update list + ax.plot(ix, iy, 'bo', markersize=2) # add dot + ax.axhline(iy, c='b', lw=0.4) # add horizontal line + ax.axvline(ix, c='b', lw=0.4) # add vertical line + for obj in (tmp_dot, tmp_hline, tmp_vline): + obj.set_visible(False) # Set the red crosshair invisible + return coord + + def on_click(event): + """ Right click moves the red crosshair """ + x, y = event.xdata, event.ydata # x,y position of cursor + if event.inaxes == ax: # You are inside the figure + ix, iy = misc.find_nearest(xgrid, x), misc.find_nearest(ygrid, y) # Snap to the grid + if str(event.button) == 'MouseButton.RIGHT': + # Update figure: + tmp_dot.set_data(ix, iy) + tmp_hline.set_ydata(iy) + tmp_vline.set_xdata(ix) + # Make visible the red crosshair + for obj in (tmp_dot, tmp_hline, tmp_vline): + obj.set_visible(True) + else: + pass + fig.canvas.draw() + + def on_scroll(event): + """ Redraw contours with more/less levels """ + nonlocal lvl0, cnt + if Neg: + nonlocal Ncnt + + # Read the input + if event.button == 'up': + lvl0 += lvlstep + if event.button == 'down': + lvl0 += -lvlstep + if lvl0 < 0: + lvl0 = 1e-10 + if lvl0 > 1: + lvl0 = 1 + + # Redraw contours + if Neg: + cnt, Ncnt = figures.redraw_contours(ax, ppm_f2, ppm_f1, data, lvl=lvl0, cnt=cnt, Neg=Neg, Ncnt=Ncnt, lw=0.5, cmap=[cmaps[0], cmaps[1]]) + else: + cnt, _ = figures.redraw_contours(ax, ppm_f2, ppm_f1, data, lvl=lvl0, cnt=cnt, Neg=Neg, Ncnt=None, lw=0.5, cmap=[cmaps[0], cmaps[1]]) + # Draw the pretty things again + misc.pretty_scale(ax, (xsx, xdx), 'x') + misc.pretty_scale(ax, (ysx, ydx), 'y') + misc.set_fontsizes(ax, 14) + fig.canvas.draw() + + def onselect(epress, erelease): + """ Drag rectangle """ + if epress.button == 1: # left click + # Vertices of the rectangle, counterclockwise + X = np.array(span.extents[0:2]) + Y = np.array(span.extents[2:4]) + vertX = X[0], X[1], X[1], X[0] + vertY = Y[0], Y[0], Y[1], Y[1] + + # Make visible the red rectangle + if not tmp_rekt.get_visible(): + tmp_rekt.set_visible(True) + tmp_rekt.set_xy(np.array((vertX, vertY)).T) # .T because (vertX, vertY).shape = (2, 4) + else: + pass + fig.canvas.draw() + + def add_func(event): + """ ADD button """ + nonlocal tmp_rekt, coord + # Draw blue crosshair reading data from the red dot + ix, iy = tmp_dot.get_data() + coord = add_crosshair(coord, ix, iy) # Update coord with the new peak + + # Draw blue rectangle reading data from the red rectangle + verts = np.array(tmp_rekt.get_xy())[:-1] # Skip the latter because it knows it has to close the perimeter + dummy_rekt, = ax.fill(verts[:,0], verts[:,1], 'tab:blue', alpha=0.25) + rekt.append(dummy_rekt) + # Set red rectangle to invisible + tmp_rekt.set_visible(False) + fig.canvas.draw() + + # ----------------------------------------------------------------------------------------------------------------- + + # Make the figure + fig = plt.figure() + fig.set_size_inches(figures.figsize_large) + ax = fig.add_subplot(1,1,1) + ax.set_title('Drag with left peak for region; select peak with right click') + plt.subplots_adjust(left=0.1, bottom=0.1, right=0.875, top=0.90) + + # ADD button + add_box = plt.axes([0.925, 0.70, 0.05, 0.05]) + add_button = Button(add_box, 'ADD', hovercolor='0.975') + + # Draw contour + cnt = figures.ax2D(ax, ppm_f2, ppm_f1, data, cmap=cmaps[0], c_fac=1.4, lvl=lvl0, lw=0.5) + if Neg: + Ncnt = figures.ax2D(ax, ppm_f2, ppm_f1, -data, cmap=cmaps[1], c_fac=1.4, lvl=lvl0, lw=0.5) + + # Initialize the red curves + tmp_rekt, = ax.fill(np.array([0.1,0.2,0.3]), np.array([0.1,0.2,0.3]), 'tab:red', alpha=0.25, visible=False) # Rectangle + tmp_dot, = ax.plot(0, 0, 'ro', markersize=2, visible=False) # Dot + tmp_hline = ax.axhline(0, 0, c='r', lw=0.4, visible=False) # Horizontal line + tmp_vline = ax.axvline(0, 0, c='r', lw=0.4, visible=False) # Vertical line + + # Pretty things + misc.pretty_scale(ax, (xsx, xdx), 'x') + misc.pretty_scale(ax, (ysx, ydx), 'y') + misc.set_fontsizes(ax, 14) + + # Widgets + cursor = Cursor(ax, useblit=True, color='red', linewidth=0.4) # Moving crosshair + mouse = fig.canvas.mpl_connect('button_press_event', on_click) # Right click + scroll = fig.canvas.mpl_connect('scroll_event', on_scroll) # Mouse scroll + span = RectangleSelector(ax, onselect, useblit=False, props=dict(facecolor='tab:red', alpha=0.5)) # Draggable rectangle + add_button.on_clicked(add_func) # Button + + plt.show() + plt.close() + + # ----------------------------------------------------------------------------------------------------------------- + + # collect results + peaks = [] + + def calc_borders(rect): + """ Calculate the limits of the rectangle """ + vert = rect.get_xy() + vertX, vertY = vert[:,0], vert[:,1] + x_lims = min(vertX), max(vertX) + y_lims = min(vertY), max(vertY) + return x_lims, y_lims + + for dot, rec in zip(coord, rekt): + x_lims, y_lims = calc_borders(rec) + # Create an entry for each peak as stated in the description + peaks.append({ + 'f1' : { + 'u' : dot[1], + 'lim' : y_lims, + }, + 'f2' : { + 'u' : dot[0], + 'lim' : x_lims, + }, + }) + return peaks + + +def polyn(x, c): + """ + Computes p(x), polynomion of degree n−1, where nis the number of provided coefficients. + ------- + Parameters + - x :1darray + Scale upon which to build the polynomion + - c :list or 1darray + Sequence of the polynomion coeffiecient, starting from the 0-th order coefficient + ------- + Returns + - px :1darray + Polynomion of degree n−1. + """ + # Computes p(x) polynomion of degree n-1. + # c is a list/array of the n coefficients, sorted starting + # from the 0th-order coefficient + degree = len(c) + px = np.zeros_like(x) + for i in range(degree): + px += c[i] * x**i + return px + + + +def write_ser(fid, path='./', BYTORDA=0, DTYPA=0, overwrite=True): + """ + Writes the FID file in directory 'path', in a TopSpin-readable way (i.e. little endian, int32). + The binary file is named 'fid' if 1D, 'ser' if multiD. + The parameters BYTORDA and DTYPA can be found in the acqus file. + - BYTORDA = 1 => big endian => '>' + - BYTORDA = 0 => little endian => '<' + - DTYPA = 0 => int32 => 'i4' + - DTYPA = 2 => float64 => 'f8' + ------- + Parameters + - fid :ndarray + FID array to be written + - path : str + Directory where to save the file + """ + + if BYTORDA == 0: + endian = '<' + elif BYTORDA == 1: + endian = '>' + else: + raise ValueError('Endianness not defined') + + if DTYPA == 0: + dtype = 'i4' + elif DTYPA == 2: + dtype = 'f8' + else: + raise ValueError('Data type not defined') + + + def uncomplexify_data(data_in): + # Uncomplexify data (pack real,imag) into a int32 array + size = list(data_in.shape) + size[-1] = size[-1] * 2 + data_out = np.empty(size, dtype="int32") + data_out[..., ::2] = data_in.real + data_out[..., 1::2] = data_in.imag + return data_out + + def open_towrite(filename): + # Open filename for writing and return file object + p, fn = os.path.split(filename) # split into filename and path + # create directories if needed + if p != '' and os.path.exists(p) is False: + os.makedirs(p) + return open(filename, 'wb') + + if path[-1] != '/': + path = path+'/' + if len(fid.shape) == 1: + filename = 'fid' + else: + filename = 'ser' + + if os.path.exists(path + filename): + if overwrite is True: + os.remove(path + filename) + else: + what_to_do = input('{} already exists. Overwrite it? [YES/no]'.format(filename+path)) + if what_to_do.lower()[0] == 'n': + return 0 + else: + os.remove(path + filename) + f = open_towrite(path + filename) + if np.iscomplexobj(fid): + fid = uncomplexify_data(fid) + print('Writing \'{}\' file in {}...'.format(filename, path)) + f.write(fid.astype(endian+dtype).tobytes()) + f.close() + print('Done.') + + +def pretty_scale(ax, limits, axis='x', n_major_ticks=10): + """ + This function computes a pretty scale for your plot. Calculates and sets a scale made of 'n_major_ticks' numbered ticks, spaced by 5*n_major_ticks unnumbered ticks. After that, the plot borders are trimmed according to the given limits. + -------- + Parameters: + - ax: matplotlib.AxesSubplot object + Panel of the figure of which to calculate the scale + - limits: tuple + limits to apply of the given axis. (left, right) + - axis: str + 'x' for x-axis, 'y' for y-axis + - n_major_ticks: int + Number of numbered ticks in the final scale. An oculated choice gives very pleasant results. + """ + + import matplotlib.ticker as TKR + + if axis=='x': + ax.set_xlim(limits) + sx, dx = ax.get_xlim() + elif axis=='y': + ax.set_ylim(limits) + sx, dx = ax.get_ylim() + else: + raise ValueError('Unknown options for "axis".') + + # Compute major ticks + steps = [1, 2, 4, 5, 10] + majorlocs = TKR.MaxNLocator(nbins=n_major_ticks, steps=steps).tick_values(sx, dx) + + # Compute minor ticks manually because matplotlib is complicated + ndivs = 5 + majorstep = majorlocs[1] - majorlocs[0] + minorstep = majorstep / ndivs + + vmin, vmax = sx, dx + if vmin > vmax: + vmin, vmax = vmax, vmin + + t0 = majorlocs[0] + tmin = ((vmin - t0) // minorstep + 1) * minorstep + tmax = ((vmax - t0) // minorstep + 1) * minorstep + minorlocs = np.arange(tmin, tmax, minorstep) + t0 + + # Set the computed ticks and update the limits + if axis == 'x': + ax.set_xticks(majorlocs) + ax.set_xticks(minorlocs, minor=True) + ax.set_xlim(sx,dx) + elif axis == 'y': + ax.set_yticks(majorlocs) + ax.set_yticks(minorlocs, minor=True) + ax.set_ylim(sx,dx) + + +def molfrac(n): + """ + Computes the "molar fraction" 'x' of the array 'n'. + Returns also the total amount. + ------- + Parameters: + - n: list or 1darray + list of values + ------- + Returns: + - x: list or 1darray + molar fraction array + - N: float + sum of all the elements in 'n' + """ + if isinstance(n, list): + n = np.array(n) + N = np.sum(n) + x = [n[i]/N for i in range(len(n))] + if isinstance(n, list): + x = np.array(x) + return x, N + +def split_acqus_2D(acqus): + """ + Split the acqus dictionary of a 2D spectrum into two separate 1D-like acqus dictionaries. + -------- + Parameters: + - acqus: dict + acqus dictionary of a 2D spectrum + -------- + Returns: + - acqu1s: dict + acqus dictionary of the indirect dimension + - acqu2s: dict + acqus dictionary of the direct dimension + """ + keys = ['B0', 'nuc', 'o1p', 'SWp', 'TD', 'SFO1', 'SW', 'dw', 't1', 'o1', 'AQ1'] + acqu1v = [ + acqus['B0'], + acqus['nuc1'], + acqus['o1p'], + acqus['SW1p'], + acqus['TD1'], + acqus['SFO1'], + acqus['SW1'], + acqus['dw1'], + acqus['t1'], + acqus['AQ1'], + acqus['o1']] + acqu2v = [ + acqus['B0'], + acqus['nuc2'], + acqus['o2p'], + acqus['SW2p'], + acqus['TD2'], + acqus['SFO2'], + acqus['SW2'], + acqus['dw2'], + acqus['t2'], + acqus['AQ2'], + acqus['o2']] + acqu1s = {} + acqu2s = {} + for k, key in enumerate(keys): + acqu1s[key] = acqu1v[k] + acqu2s[key] = acqu2v[k] + return acqu1s, acqu2s + +def split_procs_2D(procs): + """ + Split the procs dictionary of a 2D spectrum into two separate 1D-like procs dictionaries. + -------- + Parameters: + - procs: dict + procs dictionary of a 2D spectrum + -------- + Returns: + - proc1s: dict + procs dictionary of the indirect dimension + - proc2s: dict + procs dictionary of the direct dimension + """ + keys = ['wf', 'zf', 'fcor', 'tdeff', 'p0', 'p1', 'pv'] + proc1v = [ + procs['wf'][0], + procs['zf'][0], + procs['fcor'][0], + procs['tdeff'][0], + procs['p0_1'], + procs['p1_1'], + procs['pv_1']] + proc2v = [ + procs['wf'][1], + procs['zf'][1], + procs['fcor'][1], + procs['tdeff'][1], + procs['p0_2'], + procs['p1_2'], + procs['pv_2']] + proc1s = {} + proc2s = {} + for k, key in enumerate(keys): + proc1s[key] = proc1v[k] + proc2s[key] = proc2v[k] + return proc1s, proc2s + +def nuc_format(nuc): + """ + Converts the 'nuc' key you may find in acqus in the formatted label, e.g. '13C' -> '$^{13}$C' + -------- + Parameters: + - nuc: str + Unformatted string + -------- + Returns: + - fnuc: str + Formatted string. + """ + import re + fnuc = re.split('(\D+)',nuc) + f_nuc = '$^{' + str(fnuc[0]) + '}$'+str(fnuc[1]) + return f_nuc + +def set_ylim(ax, data_inp): + """ + Sets the y-limits of ax as follows: + Bottom: min(data) - 5% max(data) + Top: max(data) + 5% max(data) + ------- + Parameters: + - ax: matplotlib.Subplot Object + Panel of the figure where to apply this scale + - data_inp: ndarray or list + Input data. If it is a list, data_inp is converted to array. + """ + if isinstance(data_inp, list): + datain = np.concatenate(data_inp) + else: + datain = np.copy(data_inp) + + T = np.max(datain.real) + B = np.min(datain.real) + try: # T and B can raise errors in certain situations + ax.set_ylim(B - 0.05*T, T + 0.05*T) + except: + pass + +def get_ylim(data_inp): + """ + Calculates the y-limits of ax as follows: + Bottom: min(data) - 5% max(data) + Top: max(data) + 5% max(data) + ------- + Parameters: + - data_inp: ndarray or list + Input data. If it is a list, data_inp is converted to array. + """ + if isinstance(data_inp, list): + datain = np.concatenate(data_inp) + else: + datain = np.copy(data_inp) + + T = np.max(datain.real) + B = np.min(datain.real) + return B, T + + +def mathformat(ax, axis='y', limits=(-2,2)): + """ + Apply exponential formatting to the given axis of the given figure panel. The offset text size is uniformed to the tick labels' size. + ------- + Parameters: + - ax: matplotlib.Subplot Object + Panel of the figure to edit + - axis: str + 'x', 'y' or 'both'. + - limits: tuple + tuple of ints that indicate the order of magnitude range outside which the exponential format is applied. + """ + ax.ticklabel_format(axis=axis, style='scientific', scilimits=limits, useMathText=True) + if axis=='y' or axis=='both': + tmp = (ax.get_yticklabels()) + fontsize = tmp[0].get_fontsize() + ax.yaxis.get_offset_text().set_size(fontsize) + + if axis=='x' or axis=='both': + tmp = (ax.get_xticklabels()) + fontsize = tmp[0].get_fontsize() + ax.xaxis.get_offset_text().set_size(fontsize) + + +def set_fontsizes(ax, fontsize=10): + """ + Automatically adjusts the fontsizes of all the figure elements. + In particular: + - title = fontsize + - axis labels = fontsize - 2 + - ticks labels = fontsize - 3 + - legend entries = fontsize - 4 + -------- + Parameters: + - ax: matplotlib.Subplot Object + Subplot of interest + - fontsize: float + Starting fontsize + ------- + """ + + # --------------------------------------------------------------------- + def _modify_legend(ax, **kwargs): + """ + Copied from StackOverflow: + https://stackoverflow.com/questions/23689728/how-to-modify-matplotlib-legend-after-it-has-been-created + """ + + l = ax.legend_ + defaults = dict( + loc = l._loc, + numpoints = l.numpoints, + markerscale = l.markerscale, + scatterpoints = l.scatterpoints, + scatteryoffsets = l._scatteryoffsets, + prop = l.prop, + borderpad = l.borderpad, + labelspacing = l.labelspacing, + handlelength = l.handlelength, + handleheight = l.handleheight, + handletextpad = l.handletextpad, + borderaxespad = l.borderaxespad, + columnspacing = l.columnspacing, + ncol = l._ncol, + mode = l._mode, + fancybox = type(l.legendPatch.get_boxstyle())==matplotlib.patches.BoxStyle.Round, + shadow = l.shadow, + title = l.get_title().get_text() if l._legend_title_box.get_visible() else None, + framealpha = l.get_frame().get_alpha(), + bbox_to_anchor = l.get_bbox_to_anchor()._bbox, + bbox_transform = l.get_bbox_to_anchor()._transform, + #frameon = l._drawFrame, + frameon = l.draw_frame, + handler_map = l._custom_handler_map, + ) + + if "fontsize" in kwargs and "prop" not in kwargs: + defaults["prop"].set_size(kwargs["fontsize"]) + + ax.legend(**dict(list(defaults.items()) + list(kwargs.items()))) + # --------------------------------------------------------------------- + + # Set the dimensions + title_font = fontsize + label_font = fontsize - 2 + ticks_font = fontsize - 3 + legen_font = fontsize - 4 + + ax.title.set_fontsize(title_font) # title + ax.xaxis.label.set_fontsize(label_font) # xlabel + ax.yaxis.label.set_fontsize(label_font) # xlabel + # Ticks + for label in (ax.get_xticklabels() + ax.get_yticklabels()): + label.set_fontsize(ticks_font) + # Offset text + ax.xaxis.get_offset_text().set_size(ticks_font) + ax.yaxis.get_offset_text().set_size(ticks_font) + + # Legend + if ax.legend_ is not None: + _modify_legend(ax, fontsize=legen_font) + + +def hankel(data, n=None): + """ + Computes a Hankel matrix from data. + If data is a 1darray of length N, computes the correspondant Hankel matrix of dimensions (N-n+1, n). + If data id a 2darray, computes the closest Hankel matrix in the Frobenius norm sense by averaging the values on the antidiagonals. + ------- + Parameters: + - data: 1darray + Vector to be Hankel-ized, of length N + - n: int + Number of columns that the Hankel matrix will have + Returns: + - H: 2darray + Hankel matrix of dimensions (N-n+1, n) + """ + if isinstance(data, np.ndarray): + if len(data.shape) == 1: + if n is None: + raise ValueError('You must specify the number of columns of the Hankel matrix.') + H = linalg.hankel(data[:n], data[n-1:]).T + elif len(data.shape) == 2: + H = misc.avg_antidiag(data) + else: + raise ValueError('{}D arrays are not supported.'.format(len(data.shape))) + else: + raise ValueError('Input data is not an array.') + return H + +def unhankel(H): + """ + Concatenates the first row and the last column of the matrix H, which should have Hankel-like structure, so to build the array of independent parameters. + ------ + Parameters: + - H: 2darray + Hankel-like matrix + ------ + Returns: + - h: 1darray + First row and last column, concatenated + """ + h = np.concatenate((H[0,:], H[1:, -1]), axis=-1) + return h + + +def avg_antidiag(X): + """ + Given a matrix X without any specific structure, finds the closest Hankel matrix in the Frobenius norm sense by averaging the antidiagonals. + --------- + Parameters: + - X: 2darray + Input matrix + -------- + Returns: + - Xp: 2darray + Hankel matrix obtained from X + """ + m, n = X.shape # Get dimensions of X + N = m + n - 1 # Degrees of freedom that Xp will have + data = np.array([np.mean(np.diag(X[:, ::-1], w)) for w in range(-N+n, n)])[::-1] # Mean on the antidiagonals + Xp = misc.hankel(data, n) # Transform the "data" array into a matrix + + return Xp + +def write_help(request, file=None): + """ + Gets the documentation of request, and tries to save it in a text file. + ------- + Parameters: + - request: function or class or package + Whatever you need documentation of + - file: str or None or False + Name of the output documentation file. If it is None, a default name is given. If it is False, the output is printed on screen. + """ + import pydoc + if file is None: + file = request.__name__+'.hlp' + hlp_text = pydoc.render_doc(request, renderer=pydoc.plaintext) + if bool(file): + with open(file, 'w') as F: + F.write(hlp_text) + else: + print(hlp_text) + + +def print_list(mylist): + """ + Prints a list, one entry per row. + ------- + Parameters: + - mylist: list + The list you want to print + ------- + Returns: + - outstring: str + The printed text formatted as single string + """ + outstring = '' + for entry in mylist: + outstring += '{}\n'.format(entry) + print(outstring) + return outstring + +def print_dict(mydict): + """ + Prints a dictionary one entry per row, in the format key: value. Nested dictionaries are printed with an indentation + ------- + Parameters: + - mydict: dict + The dictionary you want to print + ------- + Returns: + - outstring: str + The printed text formatted as single string + """ + outstring = '' + for key, value in mydict.items(): + if isinstance(value, dict): + outstring += '{}:\n'.format(key) + for inner_key, inner_value in value.items(): + outstring += '\t{:10}: {}\n'.format(inner_key, inner_value) + else: + outstring += '{:10}: {}\n'.format(key, value) + print(outstring) + return outstring + + +def show_cmap(cmap, N=10, start=0, end=1): + """ + Plot the colors extracted from a colormap. + ------- + Parameters: + - cmap: matplotlib.Colormap Object + The colormap from which you want to extract the list of colors + - N: int + Number of samples to extract + - start: float + Start point of the sampling. 0 = beginning of the cmap; 1 = end of the cmap. + - end: float + End point of the sampling. 0 = beginning of the cmap; 1 = end of the cmap. + """ + + x = np.linspace(start, end, N) + colors = cmap2list(cmap, N, start, end) + + # To fill the space + width = (end - start) / (N - 1) + + # Make the figure + fig = plt.figure() + fig.set_size_inches(figures.figsize_large) + fig.subplots_adjust(left=0.01, right=0.99) + ax = fig.add_subplot(1,1,1) + + ax.set_title(cmap.name) + + # Draw the colors + ax.bar(x, 1, width=width, bottom=None, align='center', data=None, color=colors) + + # Remove the white spaces + ax.set_xlim(start-width/2, end+width/2) + ax.set_ylim(0,1) + ax.tick_params(axis='y', left=False, labelleft=False) + + plt.show() + plt.close() + + +def cmap2list(cmap, N=10, start=0, end=1): + """ + Extract the colors from a colormap and return it as a list. + ------- + Parameters: + - cmap: matplotlib.Colormap Object + The colormap from which you want to extract the list of colors + - N: int + Number of samples to extract + - start: float + Start point of the sampling. 0 = beginning of the cmap; 1 = end of the cmap. + - end: float + End point of the sampling. 0 = beginning of the cmap; 1 = end of the cmap. + ------- + Returns: + - colors: list + List of the extracted colors. + """ + x = np.linspace(start, end, N) + colors = cmap(x) + return colors + +def edit_checkboxes(checkbox, xadj=0, yadj=0, length=None, height=None, color=None): + """ + Edit the size of the box to be checked, and adjust the lines accordingly. + ----------- + Parameters: + - checkbox: matplotlib.widgets.CheckBox Object + The checkbox to edit + - xadj: float + modifier value for bottom left corner x-coordinate of the rectangle, in checkbox.ax coordinates + - yadj: float + modifier value for bottom left corner y-coordinate of the rectangle, in checkbox.ax coordinates + - length: float + length of the rectangle, in checkbox.ax coordinates + - height: float + height of the rectangle, in checkbox.ax coordinates + - color: str or list or None + If it is not None, change color to the lines + """ + for rekt, lnz in zip(checkbox.rectangles, checkbox.lines): + orig = rekt.get_xy() + x, y = orig[0]+xadj, orig[1]+yadj + rekt.set_xy((x, y)) + if length is not None: + rekt.set_width(length) + if height is not None: + rekt.set_height(height) + length, height = rekt.get_width(), rekt.get_height() + + lx = x, x+length + ly = y, y+height + + lnz[0].set_data(lx, ly[::-1]) + lnz[1].set_data(lx, ly) + + if color is not None: + if isinstance(color, (tuple, list)): + for col, lnz in zip(color, checkbox.lines): + [line.set_color(col) for line in lnz] + else: + for lnz in checkbox.lines: + [line.set_color(color) for line in lnz] + +def binomial_triangle(n): + """ + Calculates the n-th row of the binomial triangle. The first row is n=1, not 0. + Example: + In: > binomial_triangle(4) + > 1 3 3 1 + -------- + Parameters: + n: int + Row index + -------- + Returns: + row: 1darray + The n-th row of binomial triangle. + """ + row = [] + n -= 1 + for k in range(n+1): + row.append( + np.math.factorial(n) / ( np.math.factorial(k) * np.math.factorial(n-k) ) + ) + return np.array(row) diff --git a/klassez/processing.py b/klassez/processing.py new file mode 100644 index 0000000..71cec25 --- /dev/null +++ b/klassez/processing.py @@ -0,0 +1,3987 @@ +#! /usr/bin/env python3 + +import os +import sys +import numpy as np +from scipy import linalg, stats +from scipy.spatial import ConvexHull +import random +import matplotlib +import matplotlib.pyplot as plt +import matplotlib.cm as cm +from matplotlib.widgets import Slider, Button, RadioButtons, TextBox, CheckButtons, Cursor, LassoSelector +from matplotlib.path import Path +import seaborn as sns +import nmrglue as ng +import lmfit as l +from datetime import datetime +import warnings + +from . import fit, misc, sim, figures, processing + +#from .__init__ import CM +#CM = __init__.CM +from .config import CM +""" +Contains a series of processing functions for different purposes +""" + + +# CPMG processing +def sum_echo_train(datao, n, n_echoes, i_p=0): + """ + "datao" is a 1D or 2D dataset whose direct dimension + has been acquired with CPMG, i.e. it is made of + echoes separated one from each other by "n" points. + The first good point of the FID is "i_p". + This function sums the first "n_echoes" echoes and + returns the resulting FID. + """ + try: + if len(datao[:,0])>1: + transients = len(datao[:,0]) + except: + transients = 1 + datao = np.reshape(datao, (1,-1)) + + data = datao[ 0:transients, i_p : n*n_echoes+i_p+1 ] + if np.mod(n,2) == 0: + nm = int(n/2) + else: + nm = int(n/2)+1 + + data_p = np.zeros((transients, nm), dtype='complex64') + datad = np.zeros_like(data_p) + datar = np.zeros_like(data_p) + + for i in range(n_echoes): + a = i * n + b1 = i * n + nm + b2 = b1 + if np.mod(n, 2) == 0: + b2 = b1 + 1 + c = i * n + n + 1 + + datad[:, 0:nm] += data[:, a:b1] #dritto + datar[:,0:nm] += data[:, b2:c][:,::-1] #rovescio + + datar = datar.real - 1j * datar.imag + data_p = datad + datar + + if transients == 1: + data_p = np.reshape(data_p, -1) + + return data_p + +def split_echo_train(datao, n, n_echoes, i_p=0): + """ + "datao" is a 1D or 2D dataset whose direct dimension + has been acquired with CPMG, i.e. it is made of + echoes separated one from each other by "n" points. + The first good point of the FID is "i_p". + This function separates the first "n_echoes" echoes and + store them in a tensor of shape + (n_echoes, len(datao[:,0]), n/2). + """ + + try: + if len(datao[:,0])>1: + transients = len(datao[:,0]) + except: + transients = 1 + datao = np.reshape(datao, (1,-1)) + + data = datao[ 0:transients, i_p : n*n_echoes+i_p+1 ] + if np.mod(n,2) == 0: + nm = int(n/2) + else: + nm = int(n/2)+1 + + datad = np.zeros((transients, nm), dtype='complex64') + datar = np.zeros((transients, nm), dtype='complex64') + + for i in range(n_echoes): + a = i * n + b1 = i * n + nm + b2 = b1 + if np.mod(n, 2) == 0: + b2 = b1 + 1 + c = i * n + n + 1 + + datad[:, 0:nm] = data[:, a:b1] #dritto + datar[:,0:nm] = data[:, b2:c][:,::-1] #rovescio + + datar = datar.real - 1j * datar.imag + datap = datad + datar + datap = np.reshape(datap, (1, len(datap[:,0]), len(datap[0,:]))) + if i==0: + data_p = datap + else: + data_p = np.vstack((data_p, datap)) + + if transients == 1: + data_p = np.reshape(data_p, (n_echoes,-1)) + + return data_p + +# ----------------------------------------------------------------------- + +# fid adjustment +def quad(fid): + """ + Subtracts from the FID the arithmetic mean of its last quarter. The real and imaginary channels are treated separately. + ------- + Parameters + - fid : ndarray + Self-explanatory. + ------- + Returns + - fid : ndarray + Processed FID. + """ + size = fid.shape[-1] + qsize = size//4 + avg_re = np.average(fid[...,-qsize:].real) + avg_im = np.average(fid[...,-qsize:].imag) + fid.real = fid.real - avg_re + fid.imag = fid.imag - avg_im + return fid + +def qpol(fid): + """ + Fits the FID with a 4-th degree polynomion, then subtracts it from the original FID. The real and imaginary channels are treated separately. + ------- + Parameters: + - fid : ndarray + Self-explanatory. + ------- + Returns: + - fid : ndarray + Processed FID + """ + # Fits the FID with a 4th degree polinomion + size = fid.shape[-1] + x = np.linspace(0, size, size) + def p5(x, par): + a = par['a'] + b = par['b'] + c = par['c'] + d = par['d'] + e = par['e'] + f = par['f'] + p = a + b*x + c*x**2 + d*x**3 + e*x**4 + f*x**5 + return p + + def fcn_min(params, x, fid): + par = params.valuesdict() + p = p5(x, par) + r = fid - p + return r + + params_re = l.Parameters() + params_re.add('a', value=0) + params_re.add('b', value=0) + params_re.add('c', value=0) + params_re.add('d', value=0) + params_re.add('e', value=0) + params_re.add('f', value=0) + + params_im = l.Parameters() + params_im.add('a', value=0) + params_im.add('b', value=0) + params_im.add('c', value=0) + params_im.add('d', value=0) + params_im.add('e', value=0) + params_im.add('f', value=0) + + m_re = l.Minimizer(fcn_min, params_re, fcn_args=(x, fid.real)) + result_re = m_re.minimize(method='leastsq', max_nfev=75000, xtol=1e-12, ftol=1e-12) + m_im = l.Minimizer(fcn_min, params_im, fcn_args=(x, fid.imag)) + result_im = m_im.minimize(method='leastsq', max_nfev=75000, xtol=1e-12, ftol=1e-12) + + coeff_re = result_re.params.valuesdict() + coeff_im = result_im.params.valuesdict() + + fid.real -= p5(x, coeff_re) + fid.imag -= p5(x, coeff_im) + return fid + +# ------------------------------------------------------------------------------------------------------------------------------------------------------- +# window functions +def qsin(data, ssb): + """ + Sine-squared apodization. + """ + + if ssb == 0 or ssb == 1: + off = 0 + else: + off = 1/ssb + end = 1 + size = data.shape[-1] + apod = np.power(np.sin(np.pi * off + np.pi * (end - off) * np.arange(size) / (size)).astype(data.dtype), 2).astype(data.dtype) + return apod * data + +def sin(data, ssb): + """ + Sine apodization. + """ + if ssb == 0 or ssb == 1: + off = 0 + else: + off = 1/ssb + end = 1 + size = data.shape[-1] + apod = np.sin(np.pi * off + np.pi * (end - off) * np.arange(size) / (size)).astype(data.dtype) + return apod * data + +def em(data, lb, sw): + """ + Exponential apodization + --------- + Parameters: + - data: ndarray + Input data + - lb: float + Lorentzian broadening. It should be positive. + - sw: float + Spectral width /Hz + """ + lb = lb / (2*sw) + apod = np.exp(-np.pi * np.arange(data.shape[-1]) * lb).astype(data.dtype) + return apod * data + +def gm(data, lb, gb, sw, gc=0): + """ + Gaussian apodization + ------- + Parameters: + - data: ndarray + Input data + - lb: float + Lorentzian broadening /Hz. It should be negative. + - gb: float + Gaussian broadening /Hz. It should be positive. + - sw: float + Spectral width /Hz + - gc: float + Gaussian center, relatively to the FID length: 0 <= gc <= 1 + Returns: + - pdata: ndarray + Processed data + """ + size = data.shape[-1] + a = np.pi * lb / sw * np.arange(size) + b = 0.6 * np.pi * (gb / sw) * (gc * (size-1) - np.arange(size) ) + apod = np.exp(a - b**2) + return apod * data + +def gmb(data, lb, gb, sw): + """ + Gaussian apodization, Bruker-like + """ + size = data.shape[-1] + t = np.arange(size) / sw + aq = size / sw + a = np.pi * lb + b = - a / (2 * gb * aq) + apod = np.exp(a * t - b**2 * t**2) + return apod * data + +# zero-filling +def zf(data, size): + # zero-filling of data up to size + def zf_pad(data, pad): + size = list(data.shape) + size[-1] = int(pad) + z = np.zeros(size, dtype=data.dtype) + return np.concatenate((data, z), axis=-1) + zpad = size - data.shape[-1] + if zpad <= 0 : + zpad = 0 + datazf = zf_pad(data, pad=zpad) + return datazf + +# Fourier transform +def ft(data0, alt=False, fcor=0.5, Numpy=True): + """ + Fourier transform in NMR sense, i.e. with positive exponential. + This means to perform IFT reverting the 1/N scaling. + ------------ + Parameters: + - alt: negates the sign of the odd points, then take the complex conjugate. + Required for States-TPPI processing. + - fcor: weighting factor for FID 1st point. Default value (0.5) prevents baseline offset + - Numpy: if True (STRONGLY ADVISED) performs the FT using the FFT algorithm encoded + in numpy. If False, performs it manually using the definition of discrete FT. + """ + data = np.copy(data0) + if not np.iscomplexobj(data): + print('WARNING! The input array is not complex.') + size = data.shape[-1] + data[...,0] = data[...,0] * fcor + if data.dtype != "complex64": + data = data.astype("complex64") + if alt: + data[...,1::2] = data[...,1::2] * -1 + data.imag = data.imag * -1 + if Numpy is True: + dataft = np.fft.fftshift(np.fft.ifft(data, axis=-1).astype(data.dtype), -1) * size + else: + dataft = np.zeros_like(data) + for k in range(size): + for n in range(size): + dataft[...,n] += np.exp( (1j/size) * 2 * np.pi * k * (n - (size/2)) ) * data[..., k] + return dataft + +def ift(data0, alt=False, fcor=0.5, Numpy=True): + """ + Inverse Fourier transform in NMR sense, i.e. with negative exponential. + This means to perform FT adding the "times-N" scaling. + ------------ + Parameters: + - alt: negates the sign of the odd points, then take the complex conjugate. + Required for States-TPPI processing. + - fcor: weighting factor for FID 1st point. Default value (0.5) prevents baseline offset + - Numpy: if True (STRONGLY ADVISED) performs the IFT using the FFT algorithm encoded + in numpy. If False, performs it manually using the definition of discrete IFT. + """ + data = np.copy(data0) + if not np.iscomplexobj(data): + print('WARNING! The input array is not complex.') + size = data.shape[-1] + if Numpy: + s = 1 / size + dataft = np.fft.fft(np.fft.ifftshift(data, -1), axis=-1).astype(data.dtype) * s + else: + dataft = np.zeros_like(data) + for k in range(size): + for n in range(size): + dataft[...,n] += 1/size * np.exp( (-1j/size) * 2 * np.pi * n * (k - (size/2)) ) * data[..., k] + if alt: + dataft[...,1::2] = dataft[...,1::2] * -1 + dataft.imag = dataft.imag * -1 + dataft[...,0] = dataft[...,0] / fcor + return dataft + +def rev(data): + # Reverse data + return data[...,::-1] + + # phase correction +def ps(data, ppmscale=None, p0=None, p1=None, pivot=None, interactive=False): + """ + Applies phase correction on the last dimension of data. + The pivot is set at the center of the spectrum by default. + Missing parameters will be inserted interactively. + ------- + Parameters: + - data: ndarray + Input data + - ppmscale: 1darray or None + PPM scale of the spectrum. Required for pivot and interactive phase correction + - p0: float + Zero-order phase correction angle /degrees + - p1: float + First-order phase correction angle /degrees + - pivot: float or None. + First-order phase correction pivot /ppm. If None, it is the center of the spectrum. + - interactive: bool + If True, all the parameters will be ignored and the interactive phase correction panel will be opened. + -------- + Returns: + - datap: ndarray + Phased data + - final_values: tuple + Employed values of the phase correction. (p0, p1, pivot) + """ + if p0 is None and p1 is None: + interactive = True + elif p0 is None and p1 is not None: + p0 = 0 + elif p1 is None and p0 is not None: + p1 = 0 + + if ppmscale is None and interactive is True and pivot is not None: + raise ValueError('PPM scale not supplied. Aborting...') + + if interactive is True and len(data.shape) < 2: + datap, final_values = processing.interactive_phase_1D(ppmscale, data) + else: + p0 = p0 * np.pi / 180 + p1 = p1 * np.pi / 180 + size = data.shape[-1] + pvscale = np.arange(size) / size + if pivot is None: + pv = 0.5 + else: + pv = (misc.ppmfind(ppmscale, pivot)[0] / size) + apod = np.exp(1j * (p0 + p1 * (pvscale - pv))).astype(data.dtype) + datap = data * apod + final_values = p0*180/np.pi, p1*180/np.pi, pivot + return datap, final_values + + +def EAE(data): + """ + Shuffles data if the spectrum is acquired with FnMODE = Echo-Antiecho. + NOTE: introduces -90° phase shift in F1, to be corrected after the processing + """ + pdata = np.zeros_like(data) + pdata[::2] = (data[::2].real - data[1::2].real) + 1j*(data[::2].imag - data[1::2].imag) + pdata[1::2] = -(data[::2].imag + data[1::2].imag) + 1j*(data[::2].real + data[1::2].real) + return pdata + + +def tp_hyper(data): + """ + Computes the hypercomplex transpose of data. + Needed for the processing of data acquired in a phase_sensitive manner + in the indirect dimension. + """ + def ri2c(data): + s = list(data.shape) + s[-1] = s[-1]*2 + n = np.empty(s, data.real.dtype) + n[..., ::2] = data.real + n[..., 1::2] = data.imag + return n + def c2ri(data): + temp = np.array(data.flat[0] + 1j*data.flat[1]) + s = list(data.shape) + s[-1] = s[-1] // 2 + n = np.empty(s, temp.dtype) + del temp + n.real = data.real[..., ::2] + n.imag = data.real[..., 1::2] + return n + datatp = np.array(c2ri(ri2c(data).T), dtype='complex64') + return datatp + +def unpack_2D(data): + # Separates fully processed 2D NMR data into 4 distinct ser files + rr = data.real[::2] + ir = data.imag[::2] + ri = data.real[1::2] + ii = data.imag[1::2] + return rr, ir, ri, ii + +def repack_2D(rr, ir, ri, ii): + # Renconstruct hypercomplex 2D NMR data given the 4 sers + data = np.empty((2*rr.shape[0],rr.shape[1]), dtype='complex64') + data.real[::2] = rr + data.imag[::2] = ir + data.real[1::2] = ri + data.imag[1::2] = ii + return data + +def td_eff(data, tdeff): + """ + Uses only the first tdeff points of data. tdeff must be a list as long as the dimensions: + tdeff = [F1, F2, ..., Fn] + """ + def trim(data, n): + return data[...,:n] + + ndim = len(data.shape) + # if tdeff is a number, make it list + if isinstance(tdeff, int): + L = tdeff + tdeff = [] + tdeff.append(L) + del L + + tdeff = tdeff[::-1] # to obtain correct final shape + + if len(tdeff) != ndim: # Check + raise ValueError('Shape mismatch between data and tdeff') + + X = tuple(np.roll(np.arange(ndim),1)) # Roll the dimensions to the right + + for k in range(ndim): + if tdeff[k]: + data = trim(data, tdeff[k]) + data = np.transpose(data, X) + + return data + + + +def fp(data, wf=None, zf=None, fcor=0.5, tdeff=0): + """ + Performs the full processing of a 1D NMR FID (data). Required parameters are: + - wf: {'mode': function to be used, + 'parameters': different from each function} + - zf: final size of spectrum + - fcor: weighting factor for the FID first point + - tdeff: number of points of the FID to be used for the processing. + """ + # Window function + data = processing.td_eff(data, tdeff) + if wf is not None: + if wf['mode'] == 'qsin': + data = processing.qsin(data, ssb=wf['ssb']) + if wf['mode'] == 'sin': + data = processing.sin(data, ssb=wf['ssb']) + if wf['mode'] == 'em': + data = processing.em(data, lb=wf['lb'], sw=wf['sw']) + if wf['mode'] == 'gm': + data = processing.gm(data, lb=wf['lb'], gb=wf['gb'], sw=wf['sw'], gc=wf['gc']) + if wf['mode'] == 'gmb': + data = processing.gmb(data, lb=wf['lb'], gb=wf['gb'], sw=wf['sw']) + # Zero-filling + if zf is not None: + data = processing.zf(data, zf) + # FT + data = processing.ft(data, fcor=fcor) + return data + + +def interactive_fp(fid0, acqus, procs): + """ + Perform the processing of a 1D NMR spectrum interactively. The GUI offers the opportunity to test different window functions, as well as different tdeff values and final sizes. + The active parameters appear as blue text. + ------- + Parameters: + - fid0: 1darray + FID to process + - acqus: dict + Dictionary of acquisition parameters + - procs: dict + Dictionary of processing parameters + ------- + Returns: + - pdata: 1darray + Processed spectrum + - procs: dict + Updated dictionary of processing parameters + """ + + def get_apod(size, procs): + """ Calculate the window function on the basis of 'procs' """ + Y = np.ones(size, dtype='complex64') # array of ones + # Process the array of ones and then revert FT to get everything but the processing + apodf = processing.ift(processing.fp(Y, wf=procs['wf'], zf=procs['zf'], tdeff=procs['tdeff'])) + apodf = apodf.real + # Adjust the dimension to size + if apodf.shape[-1] < size: # if shorter than size, zero-fill + apodf = processing.zf(apodf, size) + if apodf.shape[-1] > size: # if longet than size, trim + apodf = processing.td_eff(apodf, size) + return apodf + + # Copy initial FID to prevent overwriting + fid = np.copy(fid0) + fid0 = np.copy(fid) + Y = np.ones_like(fid0) + + # Calculate starting values + data = processing.fp(fid, wf=procs['wf'], zf=procs['zf'], tdeff=procs['tdeff']) + + + # Get WF + apodf = get_apod(fid0.shape[-1], procs) + + # Calculate the ppm scale + fq_scale = processing.make_scale(data.shape[-1], acqus['dw'], rev=True) + ppm_scale = misc.freq2ppm(fq_scale, acqus['SFO1'], acqus['o1p']) + + + # Define useful things + modes = ['No', 'em', 'sin', 'qsin', 'gm', 'gmb'] # entries for the radiobuttons + act_keys = { # Active parameters + 'No': [], + 'em': ['lb'], + 'sin': ['ssb'], + 'qsin': ['ssb'], + 'gm': ['lb', 'gb', 'gc'], + 'gmb': ['lb', 'gb'], + } + tx = {} # Dictionary of the texts + + # Draw boxes for widgets + SI_box = plt.axes([0.85, 0.85, 0.07, 0.04]) + tdeff_box = plt.axes([0.85, 0.80, 0.07, 0.04]) + mode_box = plt.axes([0.825, 0.5, 0.15, 0.25]) + ssb_box = plt.axes([0.85, 0.25, 0.07, 0.04]) + lb_box = plt.axes([0.85, 0.20, 0.07, 0.04]) + gb_box = plt.axes([0.85, 0.15, 0.07, 0.04]) + gc_box = plt.axes([0.85, 0.1, 0.07, 0.04]) + + # Define widgets + SI_tb = TextBox(SI_box, 'SI', textalignment='center') + tdeff_tb = TextBox(tdeff_box, 'TDeff', textalignment='center') + mode_radio = RadioButtons(mode_box, modes, active=0) + ssb_tb = TextBox(ssb_box, 'SSB', textalignment='center') + lb_tb = TextBox(lb_box, 'LB', textalignment='center') + gb_tb = TextBox(gb_box, 'GB', textalignment='center') + gc_tb = TextBox(gc_box, 'GC', textalignment='center') + + + # Functions connected to widgets + def update(): + # Redraw the plot + fid = np.copy(fid0) # Starting value + # Process data according to the new values + data = processing.fp(fid, wf=procs['wf'], zf=procs['zf'], tdeff=procs['tdeff']) + apodf = get_apod(fid0.shape[-1], procs) # Get window functions + # Recalculate the scales + fq_scale = processing.make_scale(data.shape[-1], acqus['dw'], rev=True) + ppm_scale = misc.freq2ppm(fq_scale, acqus['SFO1'], acqus['o1p']) + # Update the plot + tx['SI'].set_text('{:.0f}'.format(data.shape[-1])) + line.set_data(ppm_scale, data.real) # Spectrum + fidp.set_ydata((fid0 * apodf).real / max(fid0.real)) # FID (blue) + apodp.set_ydata(apodf) # WF (red) + # Update the limits + misc.set_ylim(ax, data.real) + misc.set_ylim(axf, (apodf, -apodf)) + plt.draw() + + def update_SI(v): + nonlocal procs + try: + SI = eval(v) + procs['zf'] = SI + except: + pass + update() + + def update_tdeff(v): + nonlocal procs + try: + val = eval(v) + procs['tdeff'] = int(val) + except: + pass + tx['tdeff'].set_text('{:.0f}'.format(procs['tdeff'])) + update() + + def update_mode(label): + nonlocal procs + for key, value in tx.items(): + value.set_color('k') + if label == 'No': + procs['wf']['mode'] = None + else: + procs['wf']['mode'] = label + for key in act_keys[label]: + tx[key].set_color('tab:blue') + update() + + def update_lb(v): + nonlocal procs + try: + lb = eval(v) + procs['wf']['lb'] = lb + except: + pass + tx['lb'].set_text('{:.0f}'.format(procs['wf']['lb'])) + update() + + def update_gb(v): + nonlocal procs + try: + gb = eval(v) + procs['wf']['gb'] = gb + except: + pass + tx['gb'].set_text('{:.2f}'.format(procs['wf']['gb'])) + update() + + def update_gc(v): + nonlocal procs + try: + gc = eval(v) + procs['wf']['gc'] = gc + except: + pass + tx['gc'].set_text('{:.2f}'.format(procs['wf']['gc'])) + update() + + def update_ssb(v): + nonlocal procs + try: + ssb = eval(v) + procs['wf']['ssb'] = ssb + except: + pass + tx['ssb'].set_text('{:.0f}'.format(procs['wf']['ssb'])) + update() + + + + # Draw the figure panel + fig = plt.figure(1) + fig.set_size_inches(15,9) + plt.subplots_adjust(left=0.1, bottom=0.05, right=0.8, top=0.95, hspace=0.4) + ax = fig.add_subplot(4,1,(1,3)) # spectrum + axf = fig.add_subplot(4,1,4) # fid + + ax.axhline(0, c='k', lw=0.4) # baseline + axf.axhline(0, c='k', lw=0.4) # baseline + line, = ax.plot(ppm_scale, data.real, c='tab:blue') # Spectrum + fidp, = axf.plot(np.arange(fid.shape[-1]), fid0.real/max(fid0.real), c='tab:blue', lw=0.6) # FID + fidp.set_label('Normalized FID') + apodp, = axf.plot(np.arange(fid.shape[-1]), apodf, c='tab:red', lw=1.0) # Window function + apodp.set_label('Window function') + + axf.legend() + + def calcy(box): + """ y_coordinate + (box_height / 2) """ + pos = box.get_position().bounds + y = round(pos[1] + pos[3]/2, 2) + return y + + # Write text alongside figures + tx['SI'] = plt.text(0.93, calcy(SI_box), '{:.0f}'.format(data.shape[-1]), ha='left', va='center', transform=fig.transFigure) + tx['tdeff'] = plt.text(0.93, calcy(tdeff_box), '{:.0f}'.format(procs['tdeff']), ha='left', va='center', transform=fig.transFigure) + tx['ssb'] = plt.text(0.93, calcy(ssb_box), '{:.0f}'.format(procs['wf']['ssb']), ha='left', va='center', transform=fig.transFigure) + tx['lb'] = plt.text(0.93, calcy(lb_box), '{:.0f}'.format(procs['wf']['lb']), ha='left', va='center', transform=fig.transFigure) + tx['gb'] = plt.text(0.93, calcy(gb_box), '{:.2f}'.format(procs['wf']['gb']), ha='left', va='center', transform=fig.transFigure) + tx['gc'] = plt.text(0.93, calcy(gc_box), '{:.2f}'.format(procs['wf']['gc']), ha='left', va='center', transform=fig.transFigure) + + # Customize appearance + ax.set_xlabel('$\delta $ {} /ppm'.format(misc.nuc_format(acqus['nuc']))) + ax.set_ylabel('Intensity /a.u.') + misc.set_ylim(ax, data.real) + misc.set_ylim(axf, (-1,1)) + misc.mathformat(ax) + misc.mathformat(axf) + misc.pretty_scale(ax, (max(ppm_scale), min(ppm_scale))) + misc.pretty_scale(axf, (0, fid.shape[-1])) + misc.set_fontsizes(ax, 14) + misc.set_fontsizes(axf, 14) + + # Connect function to widgets + SI_tb.on_submit(update_SI) + mode_radio.on_clicked(update_mode) + tdeff_tb.on_submit(update_tdeff) + ssb_tb.on_submit(update_ssb) + lb_tb.on_submit(update_lb) + gb_tb.on_submit(update_gb) + gc_tb.on_submit(update_gc) + + plt.show() + + # Calculate final spectrum, return it + datap = processing.fp(fid0, wf=procs['wf'], zf=procs['zf'], tdeff=procs['tdeff']) + + return datap, procs + + + + + + +def inv_fp(data, wf=None, size=None, fcor=0.5): + """ + Performs the full inverse processing of a 1D NMR spectrum (data). Required parameters are: + Parameters: + - wf: dict + {'mode': function to be used, 'parameters': different from each function} + - size: int + initial size of the FID + - fcor: float + weighting factor for the FID first point + """ + # IFT + data = processing.ift(data, fcor=fcor) + # Reverse zero-filling + if size is not None: + data = processing.td_eff(data, size) + # Reverse window function + if wf is not None: + if wf['mode'] == None: + apod = np.ones_like(data) + if wf['mode'] == 'qsin': + apod = processing.qsin(data, ssb=wf['ssb'])/data + if wf['mode'] == 'sin': + apod = processing.sin(data, ssb=wf['ssb'])/data + if wf['mode'] == 'em': + apod = processing.em(data, lb=wf['lb'], sw=wf['sw'])/data + if wf['mode'] == 'gm': + apod = processing.gm(data, lb=wf['lb'], gb=wf['gb'], sw=wf['sw'])/data + data = data / apod + return data + + + +def xfb(data, wf=[None, None], zf=[None, None], fcor=[0.5,0.5], tdeff=[0,0], u=True, FnMODE='States-TPPI'): + """ + Performs the full processing of a 2D NMR FID (data). Required parameters are: + - wf: list of two entries [F1, F2]. Each entry is a dictionary: + {'mode': function to be used, + 'parameters': different from each function} + - zf: list of two entries [zf F1, zf F2] + - fcor: first fid point weighting factor + - u : if True, unpacks the hypercomplex spectrum and returns the 4 ser + - tdeff: number of points of the FID to be used for the processing. + """ + + data = processing.td_eff(data, tdeff) + + # Processing the direct dimension + # Window function + if wf[1] is not None: + if wf[1]['mode'] == 'qsin': + data = processing.qsin(data, ssb=wf[1]['ssb']) + if wf[1]['mode'] == 'sin': + data = processing.sin(data, ssb=wf[1]['ssb']) + if wf[1]['mode'] == 'em': + data = processing.em(data, lb=wf[1]['lb'], sw=wf[1]['sw']) + if wf[1]['mode'] == 'gm': + data = processing.gm(data, lb=wf[1]['lb'], gb=wf[1]['gb'], sw=wf[1]['sw']) + # Zero-filling + if zf[1] is not None: + data = processing.zf(data, zf[1]) + # FT + data = processing.ft(data, fcor=fcor[1]) + + # Processing the indirect dimension + # If FnMODE is 'QF', do normal transpose instead of hyper + if FnMODE == 'QF': + data = data.T + else: + data = processing.tp_hyper(data) + + # Window function + if wf[0] is not None: + if wf[0]['mode'] == 'qsin': + data = processing.qsin(data, ssb=wf[0]['ssb']) + if wf[0]['mode'] == 'sin': + data = processing.sin(data, ssb=wf[0]['ssb']) + if wf[0]['mode'] == 'em': + data = processing.em(data, lb=wf[0]['lb'], sw=wf[0]['sw']) + if wf[0]['mode'] == 'gm': + data = processing.gm(data, lb=wf[0]['lb'], gb=wf[0]['gb'], sw=wf[0]['sw']) + # Zero-filling + if zf[0] is not None: + data = processing.zf(data, zf[0]) + # FT + # Discriminate between F1 acquisition modes + if FnMODE == 'States-TPPI': + data = processing.ft(data, alt=True, fcor=fcor[0]) + elif FnMODE == 'Echo-Antiecho' or FnMODE == 'QF': + data = processing.ft(data, fcor=fcor[0]) + else: + raise NotImplementedError('Unknown acquisition mode in F1. Aborting...') + if FnMODE == 'States-TPPI' or FnMODE == 'QF': + data = processing.rev(data) # reverse data + # Transpose back + if FnMODE == 'QF': + data = data.T + else: + data = processing.tp_hyper(data) + # Unpack and/or return processed data + if u: # unpack or not + if FnMODE == 'QF': + return data.real, data.imag + else: + return processing.unpack_2D(data) # rr, ir, ri, ii + else: + return data + + + + +def interactive_xfb(fid0, acqus, procs, lvl0=0.1, show_cnt=True): + """ + Perform the processing of a 2D NMR spectrum interactively. The GUI offers the opportunity to test different window functions, as well as different tdeff values and final sizes. + The active parameters appear as blue text. + When changing the parameters, give it some time to compute. The figure panel is quite heavy. + ------- + Parameters: + - fid0: 2darray + FID to process + - acqus: dict + Dictionary of acquisition parameters + - procs: dict + Dictionary of processing parameters + - lvl0: float + Starting level of the contours + - show_cnt: bool + Choose if to display data using contours (True) or heatmap (False) + ------- + Returns: + - pdata: 2darray + Processed spectrum + - procs: dict + Updated dictionary of processing parameters + """ + + def get_apod(size, procs): + """ Calculate the window function on the basis of 'procs' """ + Y = np.ones(size, dtype='complex64') # array of ones + # Process the array of ones and then revert FT to get everything but the processing + apodf = processing.ift(processing.fp(Y, wf=procs['wf'], zf=procs['zf'], tdeff=procs['tdeff'])) + apodf = apodf.real + # Adjust the dimension to size + if apodf.shape[-1] < size: # if shorter than size, zero-fill + apodf = processing.zf(apodf, size) + if apodf.shape[-1] > size: # if longet than size, trim + apodf = processing.td_eff(apodf, size) + return apodf + + CNT = bool(show_cnt) + + # Copy initial FID to prevent overwriting and create new variables + fid = np.copy(fid0) + fid0 = np.copy(fid) + A1 = np.ones_like(fid0[:,0]) # WF F1 + A2 = np.ones_like(fid0[0,:]) # WF F2 + + # Split acqus and procs from 2D version in two 1D-like dictionaries + acqu1s, acqu2s = misc.split_acqus_2D(acqus) + proc1s, proc2s = misc.split_procs_2D(procs) + + # Calculate starting values, get only rr + data = processing.xfb(fid, wf=procs['wf'], zf=procs['zf'], tdeff=procs['tdeff'], FnMODE=acqus['FnMODE'])[0] + + # Get WFs + apodf1 = get_apod(fid0.shape[0], proc1s) + apodf2 = get_apod(fid0.shape[1], proc2s) + + # Calculate the ppm scales + # F1 + fq1_scale = processing.make_scale(data.shape[0], acqu1s['dw'], rev=True) + ppm_f1 = misc.freq2ppm(fq1_scale, acqu1s['SFO1'], acqu1s['o1p']) + # F2 + fq2_scale = processing.make_scale(data.shape[1], acqu2s['dw'], rev=True) + ppm_f2 = misc.freq2ppm(fq2_scale, acqu2s['SFO1'], acqu2s['o1p']) + + # Define useful things + modes = ['No', 'em', 'sin', 'qsin', 'gm', 'gmb'] # entries for the radiobuttons + act_keys = { # Active parameters + 'No': [], + 'em': ['lb'], + 'sin': ['ssb'], + 'qsin': ['ssb'], + 'gm': ['lb', 'gb', 'gc'], + 'gmb': ['lb', 'gb'], + } + tx = [{},{}] # Dictionary of the texts. [Left column i.e. F2, Right column i.e. F1] + + # Draw boxes for widgets + SI_box = [ # Sizes + plt.axes([0.76, 0.90, 0.05, 0.04]), + plt.axes([0.89, 0.90, 0.05, 0.04])] + tdeff_box = [ # TDEFF + plt.axes([0.76, 0.85, 0.05, 0.04]), + plt.axes([0.89, 0.85, 0.05, 0.04])] + mode_box = [ # WF mode + plt.axes([0.76, 0.52, 0.09, 0.30]), + plt.axes([0.89, 0.52, 0.09, 0.30])] + ssb_box = [ # SSB + plt.axes([0.76, 0.45, 0.05, 0.04]), + plt.axes([0.89, 0.45, 0.05, 0.04])] + lb_box = [ # LB + plt.axes([0.76, 0.40, 0.05, 0.04]), + plt.axes([0.89, 0.40, 0.05, 0.04])] + gb_box = [ # GB + plt.axes([0.76, 0.35, 0.05, 0.04]), + plt.axes([0.89, 0.35, 0.05, 0.04])] + gc_box = [ # GC + plt.axes([0.76, 0.3, 0.05, 0.04]), + plt.axes([0.89, 0.3, 0.05, 0.04])] + + # Define widgets + SI_tb = [ # SI + TextBox(SI_box[0], 'SI', textalignment='center'), + TextBox(SI_box[1], '', textalignment='center')] + tdeff_tb = [ # TDEFF + TextBox(tdeff_box[0], 'TDeff', textalignment='center'), + TextBox(tdeff_box[1], '', textalignment='center')] + mode_radio = [ # WF mode + RadioButtons(mode_box[0], modes, active=0), + RadioButtons(mode_box[1], modes, active=0)] + ssb_tb = [ # SSB + TextBox(ssb_box[0], 'SSB', textalignment='center'), + TextBox(ssb_box[1], '', textalignment='center')] + lb_tb = [ # LB + TextBox(lb_box[0], 'LB', textalignment='center'), + TextBox(lb_box[1], '', textalignment='center')] + gb_tb = [ # GB + TextBox(gb_box[0], 'GB', textalignment='center'), + TextBox(gb_box[1], '', textalignment='center')] + gc_tb = [ # GC + TextBox(gc_box[0], 'GC', textalignment='center'), + TextBox(gc_box[1], '', textalignment='center')] + + # Functions connected to widgets + def update(): + # Redraws the plot + nonlocal cnt + proc1s, proc2s = misc.split_procs_2D(procs) # split procs for WFs + fid = np.copy(fid0) # Starting value + fid02 = np.copy(fid0[0,:]) # F2 FID + fid01 = np.copy(fid0[:,0]) # F1 FID + fidp = np.copy(fid0) # Whole FID for heatmap + + # Calculate the processed FID before FT, as processing.xfb does but without FTs + fidp = processing.ift(processing.fp(fidp, wf=proc2s['wf'], zf=fid0.shape[-1], tdeff=procs['tdeff'])) + if acqus['FnMODE'] == 'QF': + fidp = fidp.T + else: + fidp = processing.tp_hyper(fidp) + fidp = processing.ift(processing.fp(fidp, wf=proc1s['wf'], zf=fid0.shape[0], tdeff=procs['tdeff'])) + if acqus['FnMODE'] == 'QF': + fidp = fidp.T + else: + fidp = processing.tp_hyper(fidp) + + # Process data according to the new values + data = processing.xfb(fid, wf=procs['wf'], zf=procs['zf'], tdeff=procs['tdeff'], FnMODE=acqus['FnMODE'])[0] + + # Get WFs + apodf1 = get_apod(fid01.shape[-1], proc1s) + apodf2 = get_apod(fid02.shape[-1], proc2s) + + # Recalculate the scales + fq1_scale = processing.make_scale(data.shape[0], acqu1s['dw'], rev=True) + ppm_f1 = misc.freq2ppm(fq1_scale, acqu1s['SFO1'], acqu1s['o1p']) + fq2_scale = processing.make_scale(data.shape[1], acqu2s['dw'], rev=True) + ppm_f2 = misc.freq2ppm(fq2_scale, acqu2s['SFO1'], acqu2s['o1p']) + + # Update SI text with the actual size of data + tx[0]['SI'].set_text('{:.0f}'.format(data.shape[-1])) + tx[1]['SI'].set_text('{:.0f}'.format(data.shape[0])) + + # Update the plot + # Spectrum + if CNT: + cnt, _ = figures.redraw_contours(ax, ppm_f2, ppm_f1, data, lvl0, cnt, Neg=False, Ncnt=None, lw=0.5, cmap=[None, None]) + else: + cnt.set_data(data) + + # F2 FID + fidp2.set_ydata((fid02 * apodf2).real / np.max(fid02.real)) # FID (blue) + apodp2.set_ydata(apodf2) # WF (red) + # F1 FID + fidp1.set_ydata((fid01 * apodf1).real / np.max(fid01.real)) # FID (blue) + apodp1.set_ydata(apodf1) # WF (red) + + # Whole FID heatmap + hm.set_data(fidp.real) + + # Update the limits and make figure pretty + ax.set_xlabel('$\delta $ {} /ppm'.format(misc.nuc_format(acqu2s['nuc']))) + ax.set_ylabel('$\delta $ {} /ppm'.format(misc.nuc_format(acqu1s['nuc']))) + misc.set_ylim(axf2, (apodf2, -apodf2)) + misc.set_ylim(axf1, (apodf1, -apodf1)) + misc.set_fontsizes(ax, 14) + + # Redraw + fig.canvas.draw() + + # -------------------------------------------------- + # update_SI = [update_SI_f2, update_SI_f1] + def update_SI_f2(v): + nonlocal procs + try: + SI = eval(v) + procs['zf'][1] = SI + except: + pass + update() + def update_SI_f1(v): + nonlocal procs + try: + SI = eval(v) + procs['zf'][0] = SI + except: + pass + update() + update_SI = [update_SI_f2, update_SI_f1] + + # -------------------------------------------------- + # update_tdeff = [update_tdeff_f2, update_tdeff_f1] + def update_tdeff_f2(v): + nonlocal procs + try: + val = eval(v) + procs['tdeff'][1] = int(val) + except: + pass + tx[0]['tdeff'].set_text('{:.0f}'.format(procs['tdeff'][1])) + update() + def update_tdeff_f1(v): + nonlocal procs + try: + val = eval(v) + procs['tdeff'][0] = int(val) + except: + pass + tx[1]['tdeff'].set_text('{:.0f}'.format(procs['tdeff'][0])) + update() + update_tdeff = [update_tdeff_f2, update_tdeff_f1] + + # -------------------------------------------------- + # update_mode = [update_mode_f2, update_mode_f1] + def update_mode_f2(label): + nonlocal procs + for key, value in tx[0].items(): + value.set_color('k') + if label == 'No': + procs['wf'][1]['mode'] = None + else: + procs['wf'][1]['mode'] = label + for key in act_keys[label]: + tx[0][key].set_color('tab:blue') + update() + def update_mode_f1(label): + nonlocal procs + for key, value in tx[1].items(): + value.set_color('k') + if label == 'No': + procs['wf'][0]['mode'] = None + else: + procs['wf'][0]['mode'] = label + for key in act_keys[label]: + tx[1][key].set_color('tab:blue') + update() + update_mode = [update_mode_f2, update_mode_f1] + + # -------------------------------------------------- + # update_ssb = [update_ssb_f2, update_ssb_f1] + def update_ssb_f2(v): + nonlocal procs + try: + ssb = eval(v) + procs['wf'][1]['ssb'] = ssb + except: + pass + tx[0]['ssb'].set_text('{:.0f}'.format(procs['wf'][1]['ssb'])) + update() + def update_ssb_f1(v): + nonlocal procs + try: + ssb = eval(v) + procs['wf'][0]['ssb'] = ssb + except: + pass + tx[1]['ssb'].set_text('{:.0f}'.format(procs['wf'][0]['ssb'])) + update() + update_ssb = [update_ssb_f2, update_ssb_f1] + + # -------------------------------------------------- + # update_lb = [update_lb_f2, update_lb_f1] + def update_lb_f2(v): + nonlocal procs + try: + lb = eval(v) + procs['wf'][1]['lb'] = lb + except: + pass + tx[0]['lb'].set_text('{:.0f}'.format(procs['wf'][1]['lb'])) + update() + def update_lb_f1(v): + nonlocal procs + try: + lb = eval(v) + procs['wf'][0]['lb'] = lb + except: + pass + tx[1]['lb'].set_text('{:.0f}'.format(procs['wf'][0]['lb'])) + update() + update_lb = [update_lb_f2, update_lb_f1] + + # -------------------------------------------------- + # update_gb = [update_gb_f2, update_gb_f1] + def update_gb_f2(v): + nonlocal procs + try: + gb = eval(v) + procs['wf'][1]['gb'] = gb + except: + pass + tx[0]['gb'].set_text('{:.2f}'.format(procs['wf'][1]['gb'])) + update() + def update_gb_f1(v): + nonlocal procs + try: + gb = eval(v) + procs['wf'][0]['gb'] = gb + except: + pass + tx[1]['gb'].set_text('{:.2f}'.format(procs['wf'][0]['gb'])) + update() + update_gb = [update_gb_f2, update_gb_f1] + + # -------------------------------------------------- + # update_gc = [update_gc_f2, update_gc_f1] + def update_gc_f2(v): + nonlocal procs + try: + gc = eval(v) + procs['wf'][1]['gc'] = gc + except: + pass + tx[0]['gc'].set_text('{:.2f}'.format(procs['wf'][1]['gc'])) + update() + def update_gc_f1(v): + nonlocal procs + try: + gc = eval(v) + procs['wf'][0]['gc'] = gc + except: + pass + tx[1]['gc'].set_text('{:.2f}'.format(procs['wf'][0]['gc'])) + update() + update_gc = [update_gc_f2, update_gc_f1] + + # ------------------------------------------------------------------------------------------------------------ + # ------------------------------------------------------------------------------------------------------------ + + + # Draw the figure panel + fig = plt.figure(1) + fig.set_size_inches(15,9) + plt.subplots_adjust(left=0.1, bottom=0.05, right=0.725, top=0.95, hspace=0.75, wspace=0.25) + ax = fig.add_subplot(4,3,(1,9)) # spectrum + axf2 = fig.add_subplot(4,3,10) # fid F2 + axf1 = fig.add_subplot(4,3,11) # fid F1 + axhm = fig.add_subplot(4,3,12) # fid total + + + # Spectrum plot + ax.set_title('Spectrum') + if CNT: + cnt = figures.ax2D(ax, ppm_f2, ppm_f1, data, lvl=lvl0, X_label='', Y_label='', fontsize=14) + else: + cnt, axcbar = figures.ax_heatmap(ax, data, zlim='auto', z_sym=True, cmap=None, xscale=ppm_f2, yscale=ppm_f1, rev=(True,True), n_xticks=10, n_yticks=10, n_zticks=10, fontsize=14) + axcbar.tick_params(axis='y', labelright=False) # Turn off the ticks of the colorbar otherwise it is ugly as shit + + # FID F2 plot + axf2.set_title('F2 FID') + axf2.axhline(0, c='k', lw=0.4) # baseline + fidp2, = axf2.plot(np.arange(fid.shape[-1]), fid0[0].real/max(fid0[0].real), c='tab:blue', lw=0.6) # FID + fidp2.set_label('Normalized FID') + apodp2, = axf2.plot(np.arange(fid.shape[-1]), apodf2, c='tab:red', lw=1.0) # Window function + apodp2.set_label('Window function') + axf2.legend() + + # FID F1 plot + axf1.set_title('F1 FID') + axf1.axhline(0, c='k', lw=0.4) # baseline + fidp1, = axf1.plot(np.arange(fid.shape[0]), fid0[:,0].real/max(fid0[:,0].real), c='tab:blue', lw=0.6) # FID + fidp1.set_label('Normalized FID') + apodp1, = axf1.plot(np.arange(fid.shape[0]), apodf1, c='tab:red', lw=1.0) # Window function + apodp1.set_label('Window function') + axf1.legend() + + # Whole FID heatmap plot + axhm.set_title('FID') + hm, _ = figures.ax_heatmap(axhm, fid0.real, zlim='auto', z_sym=True, cmap=None, rev=(False, True), n_xticks=5, n_yticks=5, n_zticks=5, fontsize=14) + + # Write text alongside figures + # Define a function to calculate the y coordinate given the box coordinates + def calcy(box): + """ y_coordinate + (box_height / 2) """ + pos = box.get_position().bounds + y = round(pos[1] + pos[3]/2, 2) + return y + + # Write the text + # Header + plt.text(0.80, 0.97, 'F2', rotation=0, ha='center', va='center', transform=fig.transFigure, fontsize=14) + plt.text(0.93, 0.97, 'F1', rotation=0, ha='center', va='center', transform=fig.transFigure, fontsize=14) + # Left column, F2 + tx[0]['SI'] = plt.text(0.82, calcy(SI_box[0]), '{:.0f}'.format(data.shape[-1]), ha='left', va='center', transform=fig.transFigure) + tx[0]['tdeff'] = plt.text(0.82, calcy(tdeff_box[0]), '{:.0f}'.format(proc2s['tdeff']), ha='left', va='center', transform=fig.transFigure) + tx[0]['ssb'] = plt.text(0.82, calcy(ssb_box[0]), '{:.0f}'.format(proc2s['wf']['ssb']), ha='left', va='center', transform=fig.transFigure) + tx[0]['lb'] = plt.text(0.82, calcy(lb_box[0]), '{:.0f}'.format(proc2s['wf']['lb']), ha='left', va='center', transform=fig.transFigure) + tx[0]['gb'] = plt.text(0.82, calcy(gb_box[0]), '{:.2f}'.format(proc2s['wf']['gb']), ha='left', va='center', transform=fig.transFigure) + tx[0]['gc'] = plt.text(0.82, calcy(gc_box[0]), '{:.2f}'.format(proc2s['wf']['gc']), ha='left', va='center', transform=fig.transFigure) + # Right column, F1 + tx[1]['SI'] = plt.text(0.95, calcy(SI_box[0]), '{:.0f}'.format(data.shape[0]), ha='left', va='center', transform=fig.transFigure) + tx[1]['tdeff'] = plt.text(0.95, calcy(tdeff_box[0]), '{:.0f}'.format(proc1s['tdeff']), ha='left', va='center', transform=fig.transFigure) + tx[1]['ssb'] = plt.text(0.95, calcy(ssb_box[0]), '{:.0f}'.format(proc1s['wf']['ssb']), ha='left', va='center', transform=fig.transFigure) + tx[1]['lb'] = plt.text(0.95, calcy(lb_box[0]), '{:.0f}'.format(proc1s['wf']['lb']), ha='left', va='center', transform=fig.transFigure) + tx[1]['gb'] = plt.text(0.95, calcy(gb_box[0]), '{:.2f}'.format(proc1s['wf']['gb']), ha='left', va='center', transform=fig.transFigure) + tx[1]['gc'] = plt.text(0.95, calcy(gc_box[0]), '{:.2f}'.format(proc1s['wf']['gc']), ha='left', va='center', transform=fig.transFigure) + + # Add other elements to the figure + # Vertical line between F1 and F2 + plt.text(0.87, 0.63, '$-$'*55, rotation=90, ha='left', va='center', transform=fig.transFigure, fontsize=10) + # Horizontal line below 'F1 F2' header + plt.text(0.87, 0.95, '$-$'*32, rotation=0, ha='center', va='center', transform=fig.transFigure, fontsize=10) + # Horizontal line between the 'Spectrum' plot and the three at the bottom + plt.text(0.40, 0.235, '$-$'*90, rotation=0, ha='center', va='center', transform=fig.transFigure, fontsize=10) + + # Customize appearance + # Spectrum axis labels + ax.set_xlabel('$\delta $ {} /ppm'.format(misc.nuc_format(acqu2s['nuc']))) + ax.set_ylabel('$\delta $ {} /ppm'.format(misc.nuc_format(acqu1s['nuc']))) + # Spectrum axes scales + misc.pretty_scale(ax, (max(ppm_f2), min(ppm_f2)), axis='x') + misc.pretty_scale(ax, (max(ppm_f1), min(ppm_f1)), axis='y') + + # FID F2 axes + # y + misc.set_ylim(axf2, (-1,1)) + misc.mathformat(axf2) + # x + misc.pretty_scale(axf2, (0, fid.shape[1]), n_major_ticks=4) + # FID F1 y-axis + # y + misc.set_ylim(axf1, (-1,1)) + misc.mathformat(axf1) + # x + misc.pretty_scale(axf1, (0, fid.shape[0]), n_major_ticks=4) + + # Font sizes + misc.set_fontsizes(ax, 14) + misc.set_fontsizes(axf2, 14) + misc.set_fontsizes(axf1, 14) + misc.set_fontsizes(axhm, 14) + + # Connect function to widgets + for i in range(2): + SI_tb[i].on_submit(update_SI[i]) + mode_radio[i].on_clicked(update_mode[i]) + tdeff_tb[i].on_submit(update_tdeff[i]) + ssb_tb[i].on_submit(update_ssb[i]) + lb_tb[i].on_submit(update_lb[i]) + gb_tb[i].on_submit(update_gb[i]) + gc_tb[i].on_submit(update_gc[i]) + + plt.show() + + # Calculate final spectrum. Do not unpack the hyperser + datap = processing.xfb(fid, wf=procs['wf'], zf=procs['zf'], tdeff=procs['tdeff'], FnMODE=acqus['FnMODE'], u=False) + + # Return hyperser and updated procs dictionary + return datap, procs + + +def inv_xfb(data, wf=[None, None], size=[None, None], fcor=[0.5,0.5], FnMODE='States-TPPI'): + """ + Performs the full processing of a 2D NMR FID (data). Required parameters are: + ------- + Parameters: + - data: 2darray + Input data + - wf: list of dict + list of two entries [F1, F2]. Each entry is a dictionary of window functions + - zf: list + list of two entries [zf F1, zf F2] + - fcor: list + first fid point weighting factor [F1, F2] + - u : bool + If True, unpacks the hypercomplex spectrum and returns the 4 ser + - tdeff: list of int + number of points of the FID to be used for the processing, [F1, F2] + -------- + Returns: + - data: 2darray + Processed data + """ + + # Processing the indirect dimension + # If FnMODE is 'QF', do normal transpose instead of hyper + if FnMODE == 'QF': + data = data.T + else: + data = processing.tp_hyper(data) + + if FnMODE == 'States-TPPI' or FnMODE == 'QF': + data = processing.rev(data) # reverse data + + # IFT on F1 + # Discriminate between F1 acquisition modes + if FnMODE == 'States-TPPI': + data = processing.ift(data, alt=True, fcor=fcor[0]) + elif FnMODE == 'Echo-Antiecho' or FnMODE == 'QF': + data = processing.ift(data, fcor=fcor[0]) + else: + raise NotImplementedError('Unknown acquisition mode in F1. Aborting...') + + # Revert zero-filling + if size[0] is not None: + data = processing.td_eff(data, size[0]) + + # Reverse window function + if wf[0] is not None: + if wf[0]['mode'] == None: + apod = np.ones_like(data) + if wf[0]['mode'] == 'qsin': + apod = processing.qsin(data, ssb=wf[0]['ssb'])/data + if wf[0]['mode'] == 'sin': + apod = processing.sin(data, ssb=wf[0]['ssb'])/data + if wf[0]['mode'] == 'em': + apod = processing.em(data, lb=wf[0]['lb'], sw=wf[0]['sw'])/data + if wf[0]['mode'] == 'gm': + apod = processing.gm(data, lb=wf[0]['lb'], gb=wf[0]['gb'], sw=wf[0]['sw'])/data + data = data / apod + + # Transpose back + if FnMODE == 'QF': + data = data.T + else: + data = processing.tp_hyper(data) + + # IFT on F2 + data = processing.ift(data, fcor=fcor[1]) + + # Revert zero-filling + if size[1] is not None: + data = processing.td_eff(data, size[1]) + + # Reverse window function + if wf[1] is not None: + if wf[1]['mode'] == None: + apod = np.ones_like(data) + if wf[1]['mode'] == 'qsin': + apod = processing.qsin(data, ssb=wf[1]['ssb'])/data + if wf[1]['mode'] == 'sin': + apod = processing.sin(data, ssb=wf[1]['ssb'])/data + if wf[1]['mode'] == 'em': + apod = processing.em(data, lb=wf[1]['lb'], sw=wf[1]['sw'])/data + if wf[1]['mode'] == 'gm': + apod = processing.gm(data, lb=wf[1]['lb'], gb=wf[1]['gb'], sw=wf[1]['sw'])/data + data = data / apod + + return data + + +def make_scale(size, dw, rev=True): + """ + Computes the frequency scale of the NMR spectrum, given + the # of points and the employed dwell time (the REAL one, not + the TopSpin one!). "rev"=True is required for the correct frequency arrangement + in the NMR sense. + """ + fqscale = np.fft.fftshift(np.fft.fftfreq(size, d=dw)) + if rev: + fqscale = fqscale[::-1] + return fqscale + +# ------------------------------------------------------------------------------------ + + +def tabula_rasa(data, lvl=0.05, cmap=cm.Blues_r): + """ + This function is to be used in SIFT algorithm. + Allows interactive selection using a Lasso widget of the region of the spectrum + which contain signal. Returns a masking matrix, of the same shape as data, whose entries + are 1 inside the selection and 0 outside. + """ + # Define grid + xscale = np.arange(data.shape[1]) + yscale = np.arange(data.shape[0]) + + thesignal = [] # List of the selected regions + sgn_reg = 0 # Temporary storage of selected region + mask = np.zeros_like(data) # Mask matrix of zeros + + # Define 'save' button + box = plt.axes([0.8, 0.025, 0.10, 0.07]) + button = Button(box, 'SAVE') + + def onselect(verts): + # Function connected to the lasso + nonlocal sgn_reg + # raw selection of data + path = Path(verts, closed=True) + selected = [] + for i in yscale: + for j in xscale: + if path.contains_point((float(j),float(i))): + selected.append([j,i]) + + # Create convex hull around the raw lasso + CH = ConvexHull(np.array(selected)) + # Create delimiting wall + xhull = list(CH.points[CH.vertices,0]) + xhull.append(CH.points[CH.vertices[0],0]) + xhull = np.array(xhull) + yhull = list(CH.points[CH.vertices,1]) + yhull.append(CH.points[CH.vertices[0],1]) + yhull = np.array(yhull) + + # Update the plot + hull.set(visible=True) + hull.set_data(xhull, yhull) + + # Fine selection of points on the basis of the hull + path = Path(CH.points[CH.vertices], closed=True) + selected = [] + for i in yscale: + for j in xscale: + if path.contains_point((float(j),float(i))): + selected.append([j,i]) + # Store the selected points in a non-local variable + sgn_reg = np.array(selected) + + plt.draw() + + def save(event): + # Function connected to the button + + nonlocal thesignal + thesignal.append(sgn_reg) # Save the region + + CH = ConvexHull(sgn_reg) # Compute convex hull + # Create the walls + xhull = list(CH.points[CH.vertices,0]) + xhull.append(CH.points[CH.vertices[0],0]) + xhull = np.array(xhull) + yhull = list(CH.points[CH.vertices,1]) + yhull.append(CH.points[CH.vertices[0],1]) + yhull = np.array(yhull) + + ax.plot(xhull, yhull, 'g') # Plot the region walls on the figure forever + hull.set(visible=False) # Turn off the lasso + + # Parameters for contour + norm = np.max(data) + contour_start = norm * lvl + contour_num = 16 + contour_factor = 1.40 + cl = contour_start * contour_factor**np.arange(contour_num) + + # Make the figure + fig = plt.figure(1) + fig.set_size_inches(12,8) + plt.subplots_adjust(left=0.15, bottom=0.15) + ax = fig.add_subplot(1,1,1) + ax.contour(xscale, yscale, data, cl, cmap=cmap, linewidths=0.5) # plot the contours + + hull, = ax.plot(0,0, visible=False) # Create variable for the lasso selection on screen + + # Set limits + #ax.set_xlim(data.shape[1], 0) + ax.set_ylim(data.shape[0], 0) + + # Widgets + lasso = LassoSelector(ax, onselect) + button.on_clicked(save) + + plt.show() + plt.close() + + # Fill the masking matrix on the basis of the selected region + # If you selected something, set as '1' the highlighted points + if len(thesignal) > 0: + thesignal = np.concatenate(thesignal) + for k in range(thesignal.shape[0]): + mask[thesignal[k,1], thesignal[k,0]] = 1 + # If you did not select anything, the masking matrix does not alter the spectrum + else: + mask = np.ones_like(data) + return mask + + + +# Phase correction +def interactive_phase_1D(ppmscale, S): + """ + This function allow to adjust the phase of 1D spectra interactively. Use the mouse scroll to regulate the values. + Parameters + - ppmscale: 1darray + ppm scale of the spectrum. Used to regulate the pivot position + - S: 1darray + Spectrum to be phased. Must be complex! + + Returns + - phased_data: 1darray + Phased spectrum + """ + + def phase(data, p0=0, p1=0, pivot=None): + """ This is the actual phase function """ + if data.dtype != 'complex64': + data = data.astype('complex64') + size = data.shape[-1] + # convert to radians + p0 = p0 * np.pi / 180 + p1 = p1 * np.pi / 180 + # Define axis for pivot that goes from 0 to 1 + pvscale = np.arange(size) / size + if pivot is None: + pv = 0.5 + else: + pv = misc.ppmfind(ppmscale, pivot)[0]/size + apod = np.exp(1j * (p0 + p1 * (pvscale - pv))).astype(data.dtype) + return apod * data + + # sensitivity + sens = [5,5, 0.1, 0.1] + + # create empty variables for the phases and pivot to be returned + p0_f = 0 + p1_f = 0 + pivot_f = round(np.mean([min(ppmscale),max(ppmscale)]), 2) + + # Boxes for widgets + box_us = plt.axes([0.815, 0.825, 0.08, 0.075]) # increase sensitivity + box_ds = plt.axes([0.905, 0.825, 0.08, 0.075]) # decrease sensitivity + box_l = plt.axes([0.025, 0.15, 0.015, 0.7]) # left border + box_r = plt.axes([0.060, 0.15, 0.015, 0.7]) # right border + box_save = plt.axes([0.81, 0.15, 0.085, 0.04]) # save button + box_reset = plt.axes([1-0.095, 0.15, 0.085, 0.04]) # reset button + box_sande = plt.axes([0.81, 0.10, 0.18, 0.04]) # save and exit button + box_radio = plt.axes([0.81, 0.55, 0.18, 0.25]) # radio buttons + + radiolabels = [ # labels for the radio buttons + '0$^{th}$-order\nphase correction', + '1$^{st}$-order\nphase correction', + '1$^{st}$-order\npivot' + ] + + + # Make widgets + # Sliders + l = Slider(ax=box_l, label='Left', valmin=min(ppmscale), valmax=max(ppmscale), valinit=max(ppmscale), orientation='vertical') + r = Slider(ax=box_r, label='Right', valmin=min(ppmscale), valmax=max(ppmscale), valinit=min(ppmscale), orientation='vertical') + # Buttons + up_button = Button(box_us, '$\\uparrow$', hovercolor='0.975') + down_button = Button(box_ds, '$\\downarrow$', hovercolor='0.975') + save_button = Button(box_save, 'SAVE', hovercolor='0.975') + reset_button = Button(box_reset, 'RESET', hovercolor='0.975') + saveandexit = Button(box_sande, 'SAVE AND EXIT', hovercolor='0.975') + # Radiobuttons + radio = RadioButtons(box_radio, radiolabels) + + # Array 'status': 1 means active, 0 means inactive. + stat = np.array([1, 0, 0]) + # values: p0 p1 pivot + P = np.array([0, 0, round(np.mean([min(ppmscale),max(ppmscale)]), 2) ] ) + + + zoom_adj = True + + def statmod(label): + # changes the 'stat' array according to the radiobutton + nonlocal stat + stat = np.zeros(3) + for k, L in enumerate(radiolabels): + if label == L: + stat[k] = 1 + + def roll_up(event): + # Increase the active value of its 'sens' + nonlocal P + for k in range(3): + if stat[k]: + P[k] += sens[k] + + def roll_down(event): + # Decrease the active value of its 'sens' + nonlocal P + for k in range(3): + if stat[k]: + P[k] -= sens[k] + + def sens_up(event): + # Doubles the active 'sens' + nonlocal sens + for k in range(3): + if stat[k]: + sens[k] = sens[k]*2 + def sens_down(event): + # Halves the active 'sens' + nonlocal sens + for k in range(3): + if stat[k]: + sens[k] = sens[k]/2 + + def on_scroll(event): + # When you move the mouse scroll + if event.button == 'up': + roll_up(event) + if event.button == 'down': + roll_down(event) + + # Print the actual values + phases_text.set_text('p0={:7.2f} | p1={:7.2f} | pv={:7.2f}'.format(*P)) + + # Set the values + p0 = P[0] + p1 = P[1] + pivot = P[2] + + data_inside = phase(S, p0, p1, pivot) # phase the spectrum + spectrum.set_ydata(data_inside.real) # update plot + pivot_bar.set_xdata(pivot) # update pivot bar + # Interactively update the vertical limits + if zoom_adj: + T = max(data_inside.real) + B = min(data_inside.real) + ax.set_ylim(B - 0.05*T, T + 0.05*T) + # Update + fig.canvas.draw() + + + def update_lim(val): + # Trim the figure according to the border sliders + L = l.val + R = r.val + ax.set_xlim(L,R) + + def reset(event): + # Reset the phase and pivot values to their starting point + nonlocal P + P = np.array([0, 0, round(np.mean([min(ppmscale),max(ppmscale)])) ] ) + on_scroll(event) + + def save(event): + # Write the actual P values in the final variables + nonlocal p0_f, p1_f, pivot_f + p0_f = P[0] + p1_f = P[1] + pivot_f = P[2] + + def save_and_exit(event): + # Function for the SAVE AND EXIT button: + # Calls the 'save' function, then closes the figure + save(event) + plt.close() + + def zoom_onoff(event): + nonlocal zoom_adj + if event.key == 'z': + zoom_adj = not(zoom_adj) + + # Make the figure + fig = plt.figure(1) + fig.set_size_inches(15,8) + plt.subplots_adjust(left = 0.125, bottom=0.10, right=0.8, top=0.9) # Make room for the sliders + ax = fig.add_subplot(1,1,1) + + # Set borders and scale + ax.set_xlim(max(ppmscale), min(ppmscale)) + T = max(S.real) + B = min(S.real) + ax.set_ylim(B - 0.01*T, T + 0.01*T) + # Make pretty scale + misc.pretty_scale(ax, (max(ppmscale), min(ppmscale))) + + + # Write axis label + plt.text(0.5, 0.05, '$\delta$ /ppm', ha='center', va='center', fontsize=20, transform=fig.transFigure) + + phases_text = plt.text(0.75, 0.015, + 'p0={:7.2f} | p1={:7.2f} | pv={:7.2f}'.format(*P), + ha='center', va='bottom', transform=fig.transFigure, fontsize=10) + + ax.axhline(0, c='k', lw=0.2) # baseline guide + + spectrum, = ax.plot(ppmscale, S.real, c='b', lw=0.8) # Plot the data + pivot_bar = ax.axvline((min(ppmscale)+max(ppmscale))/2, c='r', lw=0.5) # Plot the pivot bar + + # Link widgets to functions + l.on_changed(update_lim) + r.on_changed(update_lim) + up_button.on_clicked(sens_up) + down_button.on_clicked(sens_down) + radio.on_clicked(statmod) + reset_button.on_clicked(reset) + save_button.on_clicked(save) + saveandexit.on_clicked(save_and_exit) + scroll = fig.canvas.mpl_connect('scroll_event', on_scroll) + fig.canvas.mpl_connect('key_press_event', zoom_onoff) + + + plt.show() + + phased_data = phase(S, p0=p0_f, p1=p1_f, pivot=pivot_f) + final_values = p0_f, p1_f, pivot_f + print('p0: {:.3f}, p1: {:.3f}, pv: {:.3f}\n'.format(*final_values)) + return phased_data, final_values + + + +def interactive_phase_2D(ppm_f1, ppm_f2, S, hyper=True): + """ + Interactively adjust the phases of a 2D spectrum + S must be hypercomplex, so BEFORE TO UNPACK + """ + + # Unpack the hyperser + if hyper: + S_rr, S_ri, S_ir, S_ii = processing.unpack_2D(S) + else: + S_rr, S_ii = S.real, S.imag + + zoom_adj = True + + def phase(data, p0=0, p1=0, pivot=None, dim='f2'): + """This is the actual phase function """ + # as 1D + if data.dtype != 'complex64': + data = data.astype('complex64') + size = data.shape[-1] + # convert to radians + p0 = p0 * np.pi / 180 + p1 = p1 * np.pi / 180 + # Define axis for pivot that goes from 0 to 1 + pvscale = np.arange(size) / size + if pivot is None: + pv = 0.5 + elif dim == 'f2': + pv = misc.ppmfind(ppm_f2, pivot)[0]/size + elif dim == 'f1': + pv = misc.ppmfind(ppm_f1, pivot)[0]/size + apod = np.exp(1j * (p0 + p1 * (pvscale - pv))).astype(data.dtype) + return apod * data + + def maketraces(coord, S, ppm_f2, ppm_f1, hyper=True): + # Extract the traces according to the 'coord' list + if hyper: + S_rr, S_ri, S_ir, S_ii = processing.unpack_2D(S) + else: + S_rr, S_ii = S.real, S.imag + # Create empty lists for the traces + f1, f2 = [], [] + npk = len(coord) + for i in range(npk): + y = misc.get_trace(S_rr, ppm_f2, ppm_f1, coord[i][0], column=True) + f1.append(y) + x = misc.get_trace(S_rr, ppm_f2, ppm_f1, coord[i][1], column=False) + f2.append(x) + return f1, f2 + + # Get the traces on which to see the effects of phase adjustment + coord = misc.select_traces(ppm_f1, ppm_f2, S_rr) + npk = len(coord) + + # Get the traces + f1, f2 = maketraces(coord, S, ppm_f2, ppm_f1, hyper) + + # Set initial values + + # Create boxes + # for sentitivity sliders + box_us = plt.axes([0.815, 0.825, 0.08, 0.075]) # increase sensitivity + box_ds = plt.axes([0.905, 0.825, 0.08, 0.075]) # decrease sensitivity + # for zoom sliders + box_l_f2 = plt.axes([0.025, 0.15, 0.015, 0.30]) + box_r_f2 = plt.axes([0.060, 0.15, 0.015, 0.30]) + box_l_f1 = plt.axes([0.025, 0.60, 0.015, 0.30]) + box_r_f1 = plt.axes([0.060, 0.60, 0.015, 0.30]) + # for buttons + box_save = plt.axes([0.81, 0.15, 0.085, 0.04]) # save button + box_reset = plt.axes([1-0.095, 0.15, 0.085, 0.04]) # reset button + box_sande = plt.axes([0.81, 0.10, 0.18, 0.04]) # save and exit button + box_radio = plt.axes([0.81, 0.55, 0.18, 0.25]) # radio buttons + box_dimen = plt.axes([0.81, 0.35, 0.18, 0.18]) # radio buttons + + radiolabels = [ # labels for the radio buttons + '0$^{th}$-order\nphase correction', + '1$^{st}$-order\nphase correction', + '1$^{st}$-order\npivot' + ] + + # Make the sliders + # for sensitivity + up_button = Button(box_us, '$\\uparrow$', hovercolor='0.975') + down_button = Button(box_ds, '$\\downarrow$', hovercolor='0.975') + # for zoom + l_f2 = Slider(ax=box_l_f2, label='Left', valmin=min(ppm_f2), valmax=max(ppm_f2), valinit=max(ppm_f2), orientation='vertical') + r_f2 = Slider(ax=box_r_f2, label='Right', valmin=min(ppm_f2), valmax=max(ppm_f2), valinit=min(ppm_f2), orientation='vertical') + l_f1 = Slider(ax=box_l_f1, label='Left', valmin=min(ppm_f1), valmax=max(ppm_f1), valinit=max(ppm_f1), orientation='vertical') + r_f1 = Slider(ax=box_r_f1, label='Right', valmin=min(ppm_f1), valmax=max(ppm_f1), valinit=min(ppm_f1), orientation='vertical') + # Make the buttons + save_button = Button(box_save, 'SAVE', hovercolor='0.975') + reset_button = Button(box_reset, 'RESET', hovercolor='0.975') + saveandexit = Button(box_sande, 'SAVE AND EXIT', hovercolor='0.975') + # Radiobuttons + radio = RadioButtons(box_radio, radiolabels) + seldim = RadioButtons(box_dimen, ['F2', 'F1']) + + # Array "sensitivity": + sens = [#p0 p1 pivot + [5, 5, 0.1], #F2 + [5, 5, 0.1] #F1 + ] + + # "status" arrays: + stat = np.array([1, 0, 0]) # p0, p1, pivot + statf = np.array([1, 0]) # f2, f1 + + P = np.array([ # Values + [0, 0, round(np.mean([min(ppm_f2),max(ppm_f2)]), 2) ], #F2 + [0, 0, round(np.mean([min(ppm_f1),max(ppm_f1)]), 2) ] #F1 + ]) + # For reset + P0 = np.copy(P) + + # Initialize final variables with starting values + p0_f2 = P0[0][0] + p1_f2 = P0[0][1] + pivot_f2 = P0[0][2] + p0_f1 = P0[1][0] + p1_f1 = P0[1][1] + pivot_f1 = P0[1][2] + + + # Functions connected to widgets + def statmod(label): + # changes the 'stat' array according to the radiobutton + nonlocal stat + stat = np.zeros(3) + for k, L in enumerate(radiolabels): + if label == L: + stat[k] = 1 + + def change_dim(label): + nonlocal statf + if label == 'F2': + statf = np.array([1,0]) + if label == 'F1': + statf = np.array([0,1]) + + def roll_up(event): + # Increase the active value of its 'sens' + nonlocal P + for i in range(2): + for k in range(3): + if statf[i] and stat[k]: + P[i,k] += sens[i][k] + # Manage out-of-border + if P[0][2] > max(ppm_f2): + P[0][2] = round(np.floor(max(ppm_f2)), 2) + if P[1][2] > max(ppm_f1): + P[1][2] = round(np.floor(max(ppm_f1)), 2) + + def roll_down(event): + # Decrease the active value of its 'sens' + nonlocal P + for i in range(2): + for k in range(3): + if statf[i] and stat[k]: + P[i][k] -= sens[i][k] + # Manage out-of-border + if P[0][2] < min(ppm_f2): + P[0][2] = round(np.ceil(min(ppm_f2)), 2) + if P[1][2] < min(ppm_f1): + P[1][2] = round(np.ceil(min(ppm_f1)), 2) + + def sens_up(event): + # Doubles the active 'sens' + nonlocal sens + for i in range(2): + for k in range(3): + if statf[i] and stat[k]: + sens[i][k] = sens[i][k]*2 + def sens_down(event): + # Halves the active 'sens' + nonlocal sens + for i in range(2): + for k in range(3): + if statf[i] and stat[k]: + sens[i][k] = sens[i][k]/2 + + def on_scroll(event): + # When you move the mouse scroll + if event.button == 'up': + roll_up(event) + if event.button == 'down': + roll_down(event) + + # Print the actual values + phases_text.set_text( + 'p02={:7.2f} | p12={:7.2f} | pv2={:7.2f} || p01={:7.2f} | p11={:7.2f} | pv1={:7.2f}'.format(*P[0], *P[1])) + + # phase the entire 2D + Sp = phase(S, p0=P[0][0], p1=P[0][1], pivot=P[0][2], dim='f2') + if hyper: + Sp = processing.tp_hyper(Sp) + else: + Sp = Sp.T + Sp = phase(Sp, p0=P[1][0], p1=P[1][1], pivot=P[1][2], dim='f1') + if hyper: + Sp = processing.tp_hyper(Sp) + else: + Sp = Sp.T + + # Get the traces + f1, f2 = maketraces(coord, Sp, ppm_f2, ppm_f1, hyper) + + for i in range(npk): + # take the traces + y_f1 = f1[i] + y_f2 = f2[i] + # update plots + t_f2[i].set_ydata(y_f2.real) + t_f1[i].set_ydata(y_f1.real) + p_f2[i].set_xdata(P[0][2]) + p_f1[i].set_xdata(P[1][2]) + # Update zoom + if zoom_adj: + misc.set_ylim(ax[2*i], y_f2.real) + misc.set_ylim(ax[2*i+1], y_f1.real) + fig.canvas.draw() + + def zoom_onoff(event): + nonlocal zoom_adj + if event.key == 'z': + zoom_adj = not(zoom_adj) + + def update_lim(val): + # Update zoom + L2 = l_f2.val + R2 = r_f2.val + L1 = l_f1.val + R1 = r_f1.val + for i in range(npk): + ax[2*i].set_xlim(L2,R2) + ax[2*i+1].set_xlim(L1,R1) + + def reset(event): + # Reset the sliders + nonlocal P + P = np.copy(P0) + on_scroll(event) + + def save(event): + # Save the values + nonlocal p0_f2, p1_f2, pivot_f2, p0_f1, p1_f1, pivot_f1 + p0_f2 = P[0][0] + p1_f2 = P[0][1] + pivot_f2 = P[0][2] + p0_f1 = P[1][0] + p1_f1 = P[1][1] + pivot_f1 = P[1][2] + + def save_and_exit(event): + # Function for the SAVE AND EXIT button: + # Calls the 'save' function, then closes the figure + save(event) + plt.close() + + # Make the figure + fig = plt.figure(1) + fig.set_size_inches(15,8) + plt.subplots_adjust(left = 0.125, bottom=0.125, right=0.8, top=0.9, wspace=0.10, hspace=0.20) # Make room for the sliders + # Create figure panels: one for each trace + ax = [] + for i in range(2*npk): + ax.append(fig.add_subplot(npk, 2, i+1)) + + # Set axis limits + for i in range(2*npk): + if np.mod(i+1,2)!=0: + ax[i].set_xlim(max(ppm_f2), min(ppm_f2)) + else: + ax[i].set_xlim(max(ppm_f1), min(ppm_f1)) + # Set vertical limits + for i in range(npk): + for j in range(2): + if j==0: # left + T = max(f2[i].real) + B = min(f2[i].real) + panel = 2 * i + ax[panel].set_title('$\delta\,$F1: {:.1f} ppm'.format(coord[i][1])) + else: # right + T = max(f1[i].real) + B = min(f1[i].real) + panel = 2 * i + 1 + ax[panel].set_title('$\delta\,$F2: {:.1f} ppm'.format(coord[i][0])) + + + ax[panel].set_ylim(B - 0.01*T, T + 0.01*T) + # Make pretty scale + xsx, xdx = ax[panel].get_xlim() + + misc.pretty_scale(ax[panel], ax[panel].get_xlim(), axis='x', n_major_ticks=10) + + misc.mathformat(ax[panel]) + # Plot ticks only in the bottom row + if i != npk-1: + ax[panel].tick_params(axis='x', labelbottom=False) + + # Create empty lists for traces plots + t_f2 = [] + t_f1 = [] + p_f2 = [] + p_f1 = [] + # Plot the traces and append to the correct list + for i in range(npk): + tf2, = ax[2*i].plot(ppm_f2, f2[i], c='b', lw=0.8) # Plot the data + t_f2.append(tf2) + pivot_bar_f2 = ax[2*i].axvline(P[0][2], c='r', lw=0.5) + p_f2.append(pivot_bar_f2) + tf1, = ax[2*i+1].plot(ppm_f1, f1[i], c='b', lw=0.8) # Plot the data + t_f1.append(tf1) + pivot_bar_f1 = ax[2*i+1].axvline(P[1][2], c='r', lw=0.5) + p_f1.append(pivot_bar_f1) + ax[2*i].axhline(0, c='k', lw=0.2) # baseline guide + ax[2*i+1].axhline(0, c='k', lw=0.2) # baseline guide + + plt.text(0.30, 0.050, '$\delta$ F2 /ppm', ha='center', va='bottom', fontsize=18, transform=fig.transFigure) + plt.text(0.65, 0.050, '$\delta$ F1 /ppm', ha='center', va='bottom', fontsize=18, transform=fig.transFigure) + + phases_text = plt.text(0.975, 0.015, + 'p02={:7.2f} | p12={:7.2f} | pv2={:7.2f} || p01={:7.2f} | p11={:7.2f} | pv1={:7.2f}'.format(*P[0], *P[1]), + ha='right', va='bottom', transform=fig.transFigure, fontsize=10) + + # Connect the widgets to the functions + l_f2.on_changed(update_lim) + r_f2.on_changed(update_lim) + l_f1.on_changed(update_lim) + r_f1.on_changed(update_lim) + reset_button.on_clicked(reset) + save_button.on_clicked(save) + saveandexit.on_clicked(save_and_exit) + + up_button.on_clicked(sens_up) + down_button.on_clicked(sens_down) + radio.on_clicked(statmod) + seldim.on_clicked(change_dim) + scroll = fig.canvas.mpl_connect('scroll_event', on_scroll) + fig.canvas.mpl_connect('key_press_event', zoom_onoff) + + + plt.show() + + # Phase the spectrum with the final parameters + S = phase(S, p0=p0_f2, p1=p1_f2, pivot=pivot_f2, dim='f2') + if hyper: + S = processing.tp_hyper(S) + else: + S = S.T + S = phase(S, p0=p0_f1, p1=p1_f1, pivot=pivot_f1, dim='f1') + if hyper: + S = processing.tp_hyper(S) + else: + S = S.T + + final_values_f1 = p0_f1, p1_f1, pivot_f1 + final_values_f2 = p0_f2, p1_f2, pivot_f2 + print('F2 - p0: {:.3f}, p1: {:.3f}, pv: {:.3f}'.format(*final_values_f2)) + print('F1 - p0: {:.3f}, p1: {:.3f}, pv: {:.3f}\n'.format(*final_values_f1)) + + return S, final_values_f1, final_values_f2 + +def integral(fx, x=None, lims=None): + """ + Calculates the primitive of fx. If fx is a multidimensional array, the integrals are computed along the last dimension. + ------- + Parameters: + - fx: ndarray + Function (array) to integrate + - x: 1darray or None + Independent variable. Determines the integration step. If None, it is the point scale + - lims: tuple or None + Integration range. If None, the whole function is integrated. + ------- + Returns: + - Fx: ndarray + Integrated function. + """ + + # Copy variables for check + fx_in = np.copy(fx) + if x is None: # Make the point scale + x_in = np.arange(fx.shape[-1]) + else: + x_in = np.copy(x) + # Integration step + dx = misc.calcres(x_in) + + if lims is None: # whole range + x_tr, fx_tr = np.copy(x_in), np.copy(fx_in) + else: + # Trim data according to lims + x_tr, fx_tr = misc.trim_data(x_in, fx_in, *lims) + + # Integrate + Fx = np.cumsum(fx_tr, axis=-1) * dx + return Fx + +def integral_2D(ppm_f1, t_f1, SFO1, ppm_f2, t_f2, SFO2, u_1=None, fwhm_1=200, utol_1=0.5, u_2=None, fwhm_2=200, utol_2=0.5, plot_result=False): + """ + Calculate the integral of a 2D peak. The idea is to extract the traces correspondent to the peak center and fit them with a gaussian function in each dimension. Then, once got the intensity of each of the two gaussians, multiply them together in order to obtain the 2D integral. + This procedure should be equivalent to what CARA does. + --------- + Parameters: + - ppm_f1: 1darray + PPM scale of the indirect dimension + - t_f1: 1darray + Trace of the indirect dimension, real part + - SFO1: float + Larmor frequency of the nucleus in the indirect dimension + - ppm_f2: 1darray + PPM scale of the direct dimension + - t_f2: 1darray + Trace of the direct dimension, real part + - SFO2: float + Larmor frequency of the nucleus in the direct dimension + - u_1: float + Chemical shift in F1 /ppm. Defaults to the center of the scale + - fwhm_1: float + Starting FWHM /Hz in the indirect dimension + - utol_1: float + Allowed tolerance for u_1 during the fit. (u_1-utol_1, u_1+utol_1) + - u_2: float + Chemical shift in F2 /ppm. Defaults to the center of the scale + - fwhm_2: float + Starting FWHM /Hz in the direct dimension + - utol_2: float + Allowed tolerance for u_2 during the fit. (u_2-utol_2, u_2+utol_2) + - plot_result: bool + True to show how the program fitted the traces. + -------- + Returns: + - I_tot: float + Computed integral. + """ + + def f2min(param, T, x, SFO1): + """ Cost function """ + par = param.valuesdict() + sigma = misc.freq2ppm(par['fwhm'], np.abs(SFO1)) / 2.355 # Convert FWHM to ppm and then to std + model = sim.f_gaussian(x, par['u'], sigma, A=par['I']) # Compute gaussian + par['I'] = fit.fit_int(T, model) # Calculate integral + residual = par['I'] * model - T + return residual + + def fitting(ppm, T, SFO1, u_0, fwhm_0, utol=0.5): + """ Main function """ + param = l.Parameters() + param.add('u', value=u_0, min=u_0-utol, max=u_0+utol) + param.add('fwhm', value=fwhm_0, min=0) + param.add('I', value=1, vary=False) # Do not vary as it is adjusted during the fit + + minner = l.Minimizer(f2min, param, fcn_args=(T, ppm, SFO1)) + result = minner.minimize(method='leastsq', max_nfev=10000, xtol=1e-10, ftol=1e-10) + popt = result.params.valuesdict() + res = result.residual + + # Calculate the model, update the popt dictionary + sigma = misc.freq2ppm(popt['fwhm'], np.abs(SFO1)) / 2.355 + model_0 = sim.f_gaussian(ppm, popt['u'], sigma, A=popt['I']) + popt['I'] = fit.fit_int(T, model_0) + model_0 *= popt['I'] + + return popt, model_0 + + # Calculate u_0 if not given + if u_1 is None: + u_1 = np.mean(ppm_f1) + if u_2 is None: + u_2 = np.mean(ppm_f2) + + # Fit both traces using the function above + popt_f2, fit_f2 = fitting(ppm_f2, t_f2, SFO2, u_2, fwhm_2, utol_2) + popt_f1, fit_f1 = fitting(ppm_f1, t_f1, SFO1, u_1, fwhm_1, utol_1) + + if plot_result: # Do the plot + xlim = [(max(ppm_f2), min(ppm_f2)), + (max(ppm_f1), min(ppm_f1))] + + # Make the figure + fig = plt.figure() + fig.set_size_inches(figures.figsize_large) + plt.subplots_adjust(left=0.05, right=0.95, bottom=0.10, top=0.90, wspace=0.20) + + axes = [fig.add_subplot(1,2,w+1) for w in range(2)] + axes[0].set_title('FIT F2') + axes[1].set_title('FIT F1') + axes[0].plot(ppm_f2, t_f2, c='tab:blue', label='Trace F2') + axes[0].plot(ppm_f2, fit_f2, c='tab:red', lw=0.9, label='Fit F2') + axes[0].plot(ppm_f2, t_f2-fit_f2, c='green', lw=0.6, label='residual') + axes[1].plot(ppm_f1, t_f1, c='tab:blue', label='Trace F1') + axes[1].plot(ppm_f1, fit_f1, c='tab:red', lw=0.9, label='Fit F1') + axes[1].plot(ppm_f1, t_f1-fit_f1, c='green', lw=0.6, label='residual') + + # Fancy shit + for k, ax in enumerate(axes): + misc.pretty_scale(ax, xlim[k], 'x') + misc.pretty_scale(ax, ax.get_ylim(), 'y') + misc.mathformat(ax) + ax.set_xlabel(r'$\delta$ /ppm') + ax.legend() + misc.set_fontsizes(ax, 16) + + plt.show() + plt.close() + + # Calculate integral + I_tot = popt_f1['I'] * popt_f2['I'] + return I_tot + + + +def pknl(data, grpdly=0, onfid=False): + """ + Compensate for the Bruker group delay at the beginning of FID through a first-order phase correction of + p1 = 360 * GRPDLY + This should be applied after apodization and zero-filling. + ------- + Parameters: + - data: ndarray + Input data. Be sure it is complex! + - grpdly: int + Number of points that make the group delay. + - onfid: bool + If it is True, performs FT before to apply the phase correction, and IFT after. + ------- + Returns: + - datap: ndarray + Corrected data + """ + # Safety check + assert np.iscomplexobj(data), print('Input data is not complex') + + if onfid is True: # FT, ps, IFT + data_ft = processing.ft(data) + datap_ft, *_ = processing.ps(data_ft, p1=-360*grpdly) + datap = processing.ift(datap_ft) + return datap + else: # Just ps + datap, *_ = processing.ps(data, p1=-360*grpdly) + return datap + +def convdta(data, grpdly=0, scaling=1): + """ + Removes the digital filtering to obtain a spectrum similar to the command CONVDTA performed by TopSpin. + However, they will differ a little bit because of the digitization. + These differences are not invisible to human's eye. + ------- + Parameters: + - data: ndarray + FID with digital filter + - grpdly: int + Number of points that the digital filter consists of. Key $GRPDLY in acqus file + - scaling: float + Scaling factor of the resulting FID. Needed to match TopSpin's intensities. + ------- + Returns: + - data_in: ndarray + FID without the digital filter. It will have grpdly points less than data. + """ + # Safety copy + data_in = np.copy(data) + + # Circular shift to put the digital filter at the end of FID + data_in = np.roll(data_in, -grpdly, axis=-1) + # Digital filter, reversed to make it look like a FID. + dig_filt = data_in[..., -grpdly:][::-1] + + # Subtract the digital filter, reversed, from the start of the FID + data_in[...,:grpdly] -= dig_filt + # Trim the digital filter at the end of FID + data_in = data_in[...,:-grpdly] + # Correct the intensities + data_in *= scaling + return data_in + + +def calibration(ppmscale, S): + """ + Scroll the ppm scale of spectrum to make calibration. + The interface offers two guidelines: the red one, labelled 'reference signal' remains fixed, whereas the green one ('calibration value') moves with the ppm scale. + The ideal calibration procedure consists in placing the red line on the signal you want to use as reference, and the green line on the ppm value that the reference signal must assume in the calibrated spectrum. Then, scroll with the mouse until the two lines are superimposed. + ------- + Parameters: + - ppmscale: 1darray + The ppm scale to be calibrated + - S: 1darray + The spectrum to calibrate + ------- + Returns: + - offset: float + Difference between original scale and new scale. This must be summed up to the original ppm scale to calibrate the spectrum. + """ + + #initialize values + if ppmscale[0] < ppmscale[-1]: + S = S[::-1] + ppmscale = ppmscale[::-1] + ppmscale0 = np.copy(ppmscale) # save original scale for reset + + offset = 0 # initialize returned value + calstep = 0.25 # calibration step + + radio_flag = 1 # radiobutton status + + # Initialize guidelines positions + # Fixed one + g_idx = len(ppmscale)//2 + g_pos = ppmscale[g_idx] + # Mobile one + d_idx = len(ppmscale)//2 + d_pos = ppmscale[g_idx] + + # Boxes and widgets + # Sliders + box_left = plt.axes([0.1, 0.15, 0.80, 0.02]) + left_slider = Slider(box_left, 'Left', 0, len(ppmscale)-1, 0, valstep=1) + box_right = plt.axes([0.1, 0.10, 0.80, 0.02]) + right_slider = Slider(box_right, 'Right', 0, len(ppmscale)-1, len(ppmscale)-1, valstep=1) + + # Buttons + box_save = plt.axes([0.905, 0.475, 0.07, 0.08]) + button = Button(box_save, 'SAVE\nAND\nEXIT', hovercolor='0.975') + box_reset = plt.axes([0.825, 0.475, 0.07, 0.08]) + reset_button = Button(box_reset, 'RESET', hovercolor='0.975') + box_up = plt.axes([0.905, 0.675, 0.07, 0.08]) + up_button = Button(box_up, '$\\uparrow$', hovercolor='0.975') + box_down = plt.axes([0.825, 0.675, 0.07, 0.08]) + down_button = Button(box_down, '$\\downarrow$', hovercolor='0.975') + + # RadioButtons + box_radio = plt.axes([0.825, 0.25, 0.15, 0.2]) + radio_labels = ['Reference signal', 'Calibration value'] + radio = RadioButtons(box_radio, radio_labels, active=0) + + + # Functions connected to the widgets + def radio_val(label): + # Switch the status of the radiobutton + nonlocal radio_flag + if label==radio_labels[0]: + radio_flag = 1 + elif label==radio_labels[1]: + radio_flag = 0 + + def increase_step(event): + # up + nonlocal calstep + calstep *= 2 + + def decrease_step(event): + # down + nonlocal calstep + calstep /= 2 + + def update(val): + left = left_slider.val + right = right_slider.val + ppm_in = ppmscale[left], ppmscale[right] + if np.abs(ppm_in[0]-ppm_in[1]) > 1: + misc.pretty_scale(ax, ppm_in) + else: + ax.set_xlim(ppm_in) + + S_in = S[min(left,right):max(left,right)] + T = np.max(np.array(S_in).real) + B = np.min(np.array(S_in).real) + ax.set_ylim(B - 0.01*T, T + 0.01*T) + + def save(event): + # Calculate the calibration offset and close figure + nonlocal offset + offset = ppmscale[0] - ppmscale0[0] + plt.close() + + def reset(event): + nonlocal calstep, ppmscale + calstep = 0.25 + ppmscale = np.copy(ppmscale0) + on_scroll(event) + fig.canvas.draw() + + + def mouse_click(event): + if radio_flag: + move_fixed(event) + else: + move_mobile(event) + + def move_fixed(event): + # set position of the red bar + x = event.xdata + if x is not None: + if event.dblclick and str(event.button) == 'MouseButton.LEFT': + nonlocal g_pos, g_idx + g_pos = x + g_idx = misc.ppmfind(ppmscale, g_pos)[0] + guide.set_xdata(x) + gtext.set_text('Ref: {: 9.3f}'.format(g_pos)) + fig.canvas.draw() + + def move_mobile(event): + # set position of the green bar + x = event.xdata + if x is not None: + if event.dblclick and str(event.button) == 'MouseButton.LEFT': + nonlocal d_pos, d_idx + d_pos = x + d_idx = misc.ppmfind(ppmscale, d_pos)[0] + dguide.set_xdata(x) + dtext.set_text('Cal: {: 9.3f}'.format(d_pos)) + fig.canvas.draw() + + def on_scroll(event): + # move the scale + nonlocal ppmscale + if event.button == 'up': + ppmscale += calstep + if event.button == 'down': + ppmscale -= calstep + spect.set_xdata(ppmscale) + guide.set_xdata(ppmscale[g_idx]) + dguide.set_xdata(d_pos) + gtext.set_text('Ref: {: 9.3f}'.format(ppmscale[g_idx])) + dtext.set_text('Cal: {: 9.3f}'.format(d_pos)) + update(0) + fig.canvas.draw() + + # Make the figure + fig = plt.figure(1) + fig.set_size_inches(15,8) + plt.subplots_adjust(left = 0.1, bottom=0.25, right=0.80, top=0.90) + ax = fig.add_subplot(1,1,1) + + spect, = ax.plot(ppmscale, S.real, c='tab:blue', lw=0.8) # plot spectrum + + # Plot the guidelines + guide = ax.axvline(x=g_pos, lw=0.7, c='tab:red') # static + dguide = ax.axvline(x=d_pos, lw=0.7, c='tab:green') # dynamic + # green and red lines position + gtext = plt.text(0.925, 0.89, 'Ref: {: 9.3f}'.format(g_pos), ha='right', va='top', fontsize=20, transform=fig.transFigure, c='tab:red') + dtext = plt.text(0.925, 0.85, 'Cal: {: 9.3f}'.format(d_pos), ha='right', va='top', fontsize=20, transform=fig.transFigure, c='tab:green') + + # Make cool figure + T = np.max(np.array(S).real) + B = np.min(np.array(S).real) + ax.set_ylim(B - 0.01*T, T + 0.01*T) + + ax.ticklabel_format(axis='y', style='scientific', scilimits=(-2,2), useMathText=True) + misc.pretty_scale(ax, (max(ppmscale), min(ppmscale))) + + # Connect widgets to functions + left_slider.on_changed(update) + right_slider.on_changed(update) + button.on_clicked(save) + reset_button.on_clicked(reset) + up_button.on_clicked(increase_step) + down_button.on_clicked(decrease_step) + radio.on_clicked(radio_val) + cursor = Cursor(ax, useblit=True, horizOn=False, color='k', linewidth=0.4) + mouse = fig.canvas.mpl_connect('button_press_event', mouse_click) + scroll = fig.canvas.mpl_connect('scroll_event', on_scroll) + + plt.show() + plt.close(1) + + print('Offset: {: .3f} /ppm'.format(offset)) + + return offset + +#----------------------------------------------------------------------------------------- +# MCR and related + + +def stack_MCR(input_data, H=True): + """ + Performs matrix augmentation converting input_data from dimensions (X, Y, Z) to (Y, X * Z) if H=True, or (X * Y, Z) if H=False. + ------- + Parameters: + - input_data: 3darray + Contains the spectra to be stacked together. The index that runs on the datasets must be the first one. + - H: bool + True for horizontal stacking, False for vertical stacking. + ------- + Returns: + - data: 2darray + Augmented data matrix. + """ + if isinstance(input_data, list): + nds = len(input_data) + Q = input_data + else: + nds = input_data.shape[0] + Q = [input_data[w] for w in range(nds)] + if H: + #data = np.concatenate([input_data[w] for w in range(nds)], axis=1).astype('complex128') + data = np.concatenate(Q, axis=1).astype('complex128') + else: + #data = np.concatenate([input_data[w] for w in range(nds)], axis=0).astype('complex128') + data = np.concatenate(Q, axis=0).astype('complex128') + return data + + +def MCR_unpack(C, S, nds, H=True): + # Reverts matrix augmentation of stack_MCR. + # if H is True, converts C from dimensions (Y, n) to (X, Y, n) + # and S from dimensions (n, X*Z) to (X, n, Z) + # if H is False converts C from dimensions (Y, n) to (X, Y, n) + # and S from dimensions (n, X*Z) to (X, n, Z) + if H: + C_f = np.array([C for w in range(nds)]) + S_f = np.array(np.split(S, nds, axis=1)) + else: + C_f = np.array(np.split(C, nds, axis=0)) + S_f = np.array([S for w in range(nds)]) + return C_f, S_f + +def calc_nc(data, s_n): + """ + Calculates the optimal number of components, given the standard deviation of the noise. + The threshold value is calculated as stated in Theorem 1 of reference: https://arxiv.org/abs/1710.09787v2 + ------- + Parameters: + - data: 2darray + Input data + - s_n: float + Noise standard deviation + ------- + Returns: + - n_c: int + Number of components + """ + M, N = data.shape + + S = linalg.svdvals(data) + + b = M/N + c = (1/2**0.5) * ( 1 + b + (1 + 14*b + b**2)**0.5 )**0.5 + threshold = s_n * ( (c + 1/c) * (c + b/c))**0.5 + + threshold *= S[0] + for k in range(len(S)): + if S[k] < threshold: + n_c = k+1 + break + + return n_c + + +def SIMPLISMA(D, nc, f=10, oncols=True): + """ + Finds the first nc purest components of matrix D using the SIMPLISMA algorithm, proposed by Windig and Guilment (DOI: 10.1021/ac00014a016 ). If oncols=True, this function estimates S with SIMPLISMA, then calculates C = DS+ . If oncols=False, this function estimates C with SIMPLISMA, then calculates S = C+ D. f defines the percentage of allowed noise. + ------- + Parameters: + - D: 2darray + Input data, of dimensions m x n + - nc: int + Number of components to be found. This determines the final size of the C and S matrices. + - f: float + Percentage of allowed noise. + - oncols: bool + If True, SIMPLISMA estimates the S matrix, otherwise estimates C. + ------- + Returns: + - C: 2darray + Estimation of the C matrix, of dimensions m x nc. + - S: 2darray + Estimation of the S matrix, of dimensions nc x n. + """ + + rows = D.shape[0] # number of rows of D + cols = D.shape[1] # number of columns of D + + if oncols: + # on columns + m = np.zeros(rows).astype(D.dtype) + s = np.zeros(rows).astype(D.dtype) + + for i in range(rows): + m[i] = np.mean(D[i,:]) # mean of the i-th row + s[i] = np.std(D[i,:]) # STD of the i-th row + + # Correction factor for the noise 'alpha' + a = 0.01 * f * max(m) + + print('Computing 1° purest variable...', end='\r') + p1 = s / (m + a) # First purity spectrum + pv, ipv = [], [] # Purest variables and correspondant index + + # 1st purest variable + pv.append(max(p1)) + ipv.append(np.argmax(p1)) + + # Rescaling of data for lambda: makes determinant of COO + # proportional only to the independance between variables + l = ( s**2 + (m + a)**2 )**0.5 # lambda corrected for alpha + Dl = np.zeros_like(D) + for i in range(rows): + Dl[i,:] = D[i,:] / l[i] + + Q = (1/cols) * Dl @ Dl.T # Correlation-around-origin matrix + + # Calculation of the weighting factors: + # express the independency between the variables + w = np.zeros((rows, nc)).astype(D.dtype) # Weights + p_s = np.zeros((rows, nc)).astype(D.dtype) # Pure components spectra + s_s = np.zeros((rows, nc)).astype(D.dtype) # STD spectra + + # First weight + w[:,0] = (s**2 + m**2) / (s**2 + (m + a)**2) + p_s[:,0] = w[:,0] * p1 + s_s[:,0] = w[:,0] * s + + # Matrix for computing the determinants + # It has the following structure, where Q denotes the COO matrix + # and p# the index of the # purest component: + """ + Q[i,i] Q[i,p1] Q[i,p2] ... Q[i,p(i-1)] + Q[p1,i] Q[p1,p1] Q[p1,p2] ... Q[p1,p(i-1)] + Q[p2,i] Q[p2,p1] Q[p2,p2] ... Q[p2,p(i-1)] + ... ... ... ... ... + Q[p(i-1),i] Q[p(i-1),p1] Q[p(i-1),p2] ... Q[p(i-1),p(i-1)] + """ + for c in range(1, nc): # 'c' cycles on number of components + print('Computing '+str(c+1)+'° purest variable...', end='\r') + for i in range(rows): # i cycles on the number of rows + W = np.zeros((c+1,c+1)).astype(D.dtype) + W[0,0] = Q[i,i] + for k in range(1, c+1): # cycles inside W + W[0,k] = Q[i,ipv[k-1]] # first row \{0,0} + W[k,0] = Q[ipv[k-1],i] # first column \{0,0} + for q in range(1, c+1): + W[k,q] = Q[ipv[k-1],ipv[q-1]] # all the rest, going row per row + w[i,c] = linalg.det(W) + + p_s[:,c] = p_s[:,0] * w[:,c] # Create pure spectrum of c-th component + s_s[:,c] = s_s[:,0] * w[:,c] # Create STD spectrum of c-th component + pv.append(max(p_s[:,c])) # Update pure component + ipv.append(np.argmax(p_s[:,c])) # Update pure variable + + print('Purest variables succesfully found.\n') + for c in range(nc): + print('{}° purest variable:\t\t{}'.format(c+1, ipv[c])) + + # MCR "S" matrix (D = CS + E) + S = np.zeros((nc, cols)).astype(D.dtype) + for c in range(nc): + S[c,:] = D[ipv[c],:] + C = D @ linalg.pinv(S) + + else: + # on rows + m = np.zeros((cols)).astype(D.dtype) + s = np.zeros((cols)).astype(D.dtype) + + for j in range(cols): + m[j] = np.mean(D[:,j]) # mean of the i-th row + s[j] = np.std(D[:,j]) # STD of the i-th row + + # Correction factor for the noise 'alpha' + a = 0.01 * f * max(m) + + print('Computing 1° purest variable...', end='\r') + # First purity spectrum + p1 = s / (m + a) # First purity spectrum + pv, ipv = [], [] # Purest variables and correspondant index + + # 1st purest variable + pv.append(max(p1)) + ipv.append(np.argmax(p1)) + + # Rescaling of data for lambda: makes determinant of COO + # proportional only to the independance between variables + l = ( s**2 + (m + a)**2 )**0.5 # lambda corrected for alpha + Dl = np.zeros_like(D) + for j in range(cols): + Dl[:,j] = D[:,j] / l[j] + + Q = (1/rows) * Dl.T @ Dl # Correlation-around-origin matrix + + # Calculation of the weighting factors: + # express the independency between the variables + + w = np.zeros((cols, nc)).astype(D.dtype) # Weights + p_s = np.zeros((cols, nc)).astype(D.dtype) # Pure components spectra + s_s = np.zeros((cols, nc)).astype(D.dtype) # STD spectra + + # First weight + w[:,0] = (s**2 + m**2) / (s**2 + (m + a)**2) + p_s[:,0] = w[:,0] * p1 + s_s[:,0] = w[:,0] * s + + # Matrix for computing the determinants + # It has the following structure, where Q denotes the COO matrix + # and p# the index of the # purest component: + """ + Q[j,j] Q[j,p1] Q[j,p2] ... Q[j,p(j-1)] + Q[p1,j] Q[p1,p1] Q[p1,p2] ... Q[p1,p(j-1)] + Q[p2,j] Q[p2,p1] Q[p2,p2] ... Q[p2,p(j-1)] + ... ... ... ... ... + Q[p(j-1),j] Q[p(j-1),p1] Q[p(j-1),p2] ... Q[p(j-1),p(j-1)] + """ + for c in range(1, nc): # 'c' cycles on number of components + print('Computing '+str(c+1)+'° purest variable...', end='\r') + for j in range(cols): # j cycles on the number of colums + W = np.zeros((c+1,c+1)).astype(D.dtype) + W[0,0] = Q[j,j] + for k in range(1, c+1): # cycles inside W + W[0,k] = Q[j,ipv[k-1]] # first row \{0,0} + W[k,0] = Q[ipv[k-1],j] # first column \{0,0} + for q in range(1, c+1): + W[k,q] = Q[ipv[k-1],ipv[q-1]] # all the rest, going row per row + w[j,c] = linalg.det(W) + + p_s[:,c] = p_s[:,0] * w[:,c] # Create pure spectrum of c-th component + s_s[:,c] = s_s[:,0] * w[:,c] # Create STD spectrum of c-th component + pv.append(max(p_s[:,c])) # Update pure component + ipv.append(np.argmax(p_s[:,c])) # Update pure variable + + print('Purest variables succesfully found.\n') + for c in range(nc): + print('{}° purest variable:\t\t{}'.format(c+1, ipv[c])) + + # MCR "C" matrix (D = CS + E) + C = np.zeros((rows, nc)).astype(D.dtype) + for c in range(nc): + C[:,c] = D[:,ipv[c]] + S = linalg.pinv(C) @ D + + return C, S + + +def MCR_ALS(D, C, S, itermax=10000, tol=1e-5): + """ + Performs alternating least squares to get the final C and S matrices. Being the fundamental MCR equation: + D = CS + E + At the k-th step of the iterative cycle: + 1. C(k) = DS+(k−1) + 2. S(k) = C+(k) D + 3. E(k) = D − C(k) S(k) + Defined rC and rS as the Frobenius norm of the difference of C and S matrices between two subsequent steps: + rC = || C(k) − C(k−1) || + rS = || S(k) − S(k−1) || + The convergence is reached when: + rC <= tol && rS <= tol + ------- + Parameters: + - D: 2darray + Input data, of dimensions m × n + - C: 2darray + Estimation of the C matrix, of dimensions m x nc. + - S: 2darray + Estimation of the S matrix, of dimensions nc x n. + - itermax: int + Maximum number of iterations + - tol: float + Threshold for the arrest criterion. + ------- + Returns + - C: 2darray + Optimized C matrix, of dimensions m x nc. + - S: 2darray + Optimized S matrix, of dimensions nc x n. + """ + + itermax = int(itermax) + E = D - C @ S + + start_time = datetime.now() + print('\n-----------------------------------------------------\n') + print(' MCR optimization running... \n') + + convergence_flag = 0 + print( '# \tC convergence\tS convergence') + for kk in range(itermax): + # Copy from previous cycle + C0 = np.copy(C) + E0 = np.copy(E) + S0 = np.copy(S) + + # Compute new C, S and E + C = D @ linalg.pinv(S) + S = linalg.pinv(C) @ D + E = D - C @ S + + # Compute the Frobenius norm of the difference matrices + # between two subsequent cycles + rC = linalg.norm(C - C0) + rS = linalg.norm(S - S0) + + # Ongoing print of the residues + print(str(kk+1)+' \t{:.5e}'.format(rC)+ '\t'+'{:.5e}'.format(rS), end='\r') + + # Arrest criterion + if (rC < tol) and (rS < tol) and kk: + end_time = datetime.now() + print( '\n\n\tMCR converges in '+str(kk+1)+' steps.') + convergence_flag = 1 # Set to 1 if the arrest criterion is reached + break + + if not convergence_flag: + print ('\n\n\tMCR does not converge.') + end_time = datetime.now() + print( '\tTotal runtime: {}'.format(end_time - start_time)) + + return C, S + +def new_MCR_ALS(D, C, S, itermax=10000, tol=1e-5, reg_f=None, reg_fargs=[]): + + itermax = int(itermax) + E = D - C @ S + + start_time = datetime.now() + print('\n-----------------------------------------------------\n') + print(' MCR optimization running... \n') + + convergence_flag = 0 + print( '# \tC convergence\tS convergence') + reg_fargs.append(None) + for kk in range(itermax): + # Copy from previous cycle + C0 = np.copy(C) + E0 = np.copy(E) + S0 = np.copy(S) + + + # Compute new C, S and E + C = D @ linalg.pinv(S) + + # Regularization + if reg_f is None: + pass + else: + C, S, prev_param = reg_f(C, S, *reg_fargs, cycle=kk) + reg_fargs[-1] = prev_param + + S = linalg.pinv(C) @ D + if reg_f is not None: + for i in range(S.shape[0]): + S[i] /= 1#np.max(S[i]) + E = D - C @ S + + # Compute the Frobenius norm of the difference matrices + # between two subsequent cycles + rC = linalg.norm(C - C0) + rS = linalg.norm(S - S0) + + # Ongoing print of the residues + print(str(kk+1)+' \t{:.5e}'.format(rC)+ '\t'+'{:.5e}'.format(rS), end='\r') + + # Arrest criterion + if (rC < tol) and (rS < tol): + end_time = datetime.now() + print( '\n\n\tMCR converges in '+str(kk+1)+' steps.') + convergence_flag = 1 # Set to 1 if the arrest criterion is reached + break + + if not convergence_flag: + print ('\n\n\tMCR does not converge.') + end_time = datetime.now() + print( '\tTotal runtime: {}'.format(end_time - start_time)) + + return C, S + +def MCR_WALS(D, C, S, errmat, itermax=10000, tol=1e-5): + itermax = int(itermax) + Vc = np.zeros((errmat.shape[0])).astype(D.dtype) + Vs = np.zeros((errmat.shape[1])).astype(D.dtype) + for i in range(D.shape[0]): + Vc[i] = np.std(errmat[i])**2 + for j in range(D.shape[1]): + Vs[j] = np.std(errmat[:,j])**2 + print(Vc.shape, Vs.shape) + plt.plot(Vc) + plt.show() + E = D - C @ S + + start_time = datetime.now() + print('\n-----------------------------------------------------\n') + print(' MCR optimization running... \n') + + convergence_flag = 0 + print( '# \tC convergence\tS convergence') + X = np.zeros_like(D) + """ + for i in range(D.shape[0]): + X[i,:] = D[i,:] * Vs + """ + for j in range(D.shape[1]): + X[:,j] = D[:,j] * Vc + for kk in range(itermax): + # Copy from previous cycle + C0 = np.copy(C) + E0 = np.copy(E) + S0 = np.copy(S) + + """ + # Projection of D in S space + # Compute new C + Sp = np.array([S[w,:] * Vs for w in range(S.shape[0])]) + C = X @ linalg.pinv(Sp) + # Compute new S + S = linalg.pinv(C) @ D + """ + Cp = np.array([C[:,w] * Vc for w in range(C.shape[1])]).T + S = linalg.pinv(Cp) @ X + C = D @ linalg.pinv(S) + + E = D - C @ S + + # Compute the Frobenius norm of the difference matrices + # between two subsequent cycles + rC = linalg.norm(C - C0) + rS = linalg.norm(S - S0) + + # Ongoing print of the residues + print(str(kk+1)+' \t{:.5e}'.format(rC)+ '\t'+'{:.5e}'.format(rS), end='\r') + + # Arrest criterion + if (rC < tol) and (rS < tol): + end_time = datetime.now() + print( '\n\n\tMCR converges in '+str(kk+1)+' steps.') + convergence_flag = 1 # Set to 1 if the arrest criterion is reached + break + + if not convergence_flag: + print ('\n\n\tMCR does not converge.') + end_time = datetime.now() + print( '\tTotal runtime: {}'.format(end_time - start_time)) + + + return C, S + + + +def MCR(input_data, nc, f=10, tol=1e-5, itermax=1e4, H=True, oncols=True): + """ + This is an implementation of Multivariate Curve Resolution for the denoising of 2D NMR data. + Let us consider a matrix D, of dimensions m x n, where the starting data are stored. The final purpose of MCR is to decompose the D matrix as follows: + D = CS + E + where C and S are matrices of dimension m x nc and nc x n, respectively, and E contains the part of the data that are not reproduced by the factorization. + Being D the FID of a NMR spectrum, C will contain time evolutions of the indirect dimension, and S will contain transients in the direct dimension. + + The total MCR workflow can be separated in two parts: a first algorithm that produces an initial guess for the three matrices C, S and E (SIMPLISMA), and an optimization step that aims at the removal of the unwanted features of the data by iteratively filling the E matrix (MCR ALS). + This function returns the denoised datasets, CS, and the single C and S matrices. + ------- + Parameters + - input_data: 2darray or 3darray + a 3D array containing the set of 2D NMR datasets to be coprocessed stacked along the first dimension. A single 2D array can be passed, if the denoising of a single dataset is desired. + - nc: int + number of purest components to be looked for; + - f: float + percentage of allowed noise; + - tol: float + tolerance for the arrest criterion; + - itermax: int + maximum number of allowed iterations + - H: bool + True for horizontal stacking of data (default), False for vertical; + - oncols: bool + True to estimate S with processing.SIMPLISMA, False to estimate C. + ------- + Returns + - CS_f: 2darray or 3darray + Final denoised data matrix + - C_f: 2darray or 3darray + Final C matrix + - S_f: 2darray or 3darray + Final S matrix + """ + + # Get number of datasets (nds) from the shape of the input tensor + if isinstance(input_data, list): + nds = len(input_data) + else: + if len(input_data.shape) == 3: + nds = input_data.shape[0] + elif len(input_data.shape) == 2: + nds = 1 + input_data = np.reshape(input_data, (1, input_data.shape[0], input_data.shape[1])) + else: + print('Input data is not a matrix!') + exit() + + + print('\n*****************************************************') + print('* *') + print('* Multivariate Curve Resolution *') + print('* *') + print('*****************************************************\n') + + D = processing.stack_MCR(input_data, H=H) # Matrix augmentation + + # Get initial estimation of C, S and E + C0, S0 = processing.SIMPLISMA(D, nc, f, oncols=oncols) + + # Optimize C and S matrix through Alternating Least Squares + C, S = processing.MCR_ALS(D, C0, S0, itermax=itermax, tol=tol) + + # Revert matrix augmentation + C_f, S_f = processing.MCR_unpack(C, S, nds, H) + + # Obtain the denoised data of the same shape as the input + if isinstance(input_data, list): + CS_f = [] + for j in range(nds): + CS_f.append(C_f[j] @ S_f[j]) + else: + CS_f = np.zeros_like(input_data).astype(input_data.dtype) + for j in range(nds): + CS_f[j] = C_f[j] @ S_f[j] + + # Reshape if no matrix augmentation is performed + if nds == 1: + CS_f = CS_f[0] + C_f = C_f[0] + S_f = S_f[0] + + print('\n*****************************************************\n') + + return CS_f, C_f, S_f + +# ---------------------------------------------------------------------------------------- # + + + +def new_MCR(input_data, nc, f=10, tol=1e-5, itermax=1e4, H=True, oncols=True, our_function=None, fargs=[], our_function2=None, f2args=[]): + # This is an implementation of Multivariate Curve Resolution + # for the denoising of 2D NMR data. It requires: + # - input_data: a tensor containing the set of 2D NMR datasets to be coprocessed + # stacked along the first dimension; + # - nc : number of purest components; + # - f : percentage of allowed noise; + # - tol : tolerance for the arrest criterion; + # - itermax : maximum number of allowed iterations, default 10000 + # - H : True for horizontal stacking of data (default), False for vertical; + # - oncols : True to estimate S with purest components, False to estimate C + # This function returns the denoised datasets, 'CS', and the 'C' and 'S' matrices. + + # Get number of datasets (nds) from the shape of the input tensor + if isinstance(input_data, list): + nds = len(input_data) + else: + if len(input_data.shape) == 3: + nds = input_data.shape[0] + elif len(input_data.shape) == 2: + nds = 1 + input_data = np.reshape(input_data, (1, input_data.shape[0], input_data.shape[1])) + else: + print('Input data is not a matrix!') + exit() + + + print('\n*****************************************************') + print('* *') + print('* Multivariate Curve Resolution *') + print('* *') + print('*****************************************************\n') + + D = processing.stack_MCR(input_data, H=H) # Matrix augmentation + + # Get initial estimation of C, S and E + if our_function is None: + C0, S0 = processing.SIMPLISMA(D, nc, f, oncols=oncols) + else: + C0, S0, nc = our_function(D, *fargs) + + # Optimize C and S matrix through Alternating Least Squares + if our_function2 is None: + C, S = processing.MCR_ALS(D, C0, S0, itermax=itermax, tol=tol) + else: + C, S = processing.new_MCR_ALS(D, C0, S0, itermax, tol, our_function2, f2args) + + # Revert matrix augmentation + C_f, S_f = processing.MCR_unpack(C, S, nds, H) + + # Obtain the denoised data of the same shape as the input + if isinstance(input_data, list): + CS_f = [] + for j in range(nds): + CS_f.append(C_f[j] @ S_f[j]) + else: + CS_f = np.zeros_like(input_data).astype(input_data.dtype) + for j in range(nds): + CS_f[j] = C_f[j] @ S_f[j] + + # Reshape if no matrix augmentation is performed + if nds == 1: + CS_f = CS_f[0] + C_f = C_f[0] + S_f = S_f[0] + + print('\n*****************************************************\n') + + return CS_f, C_f, S_f + +def LRD(data, nc): + """ + Denoising method based on Low-Rank Decomposition. + The algorithm performs a singular value decomposition on data, then keeps only the first nc singular values while setting all the others to 0. + Finally, rebuilds the data matrix using the modified singular values. + ------- + Parameters: + - data: 2darray + Data to be denoised + - nc: int + Number of components, i.e. number of singular values to keep + ------- + Returns: + - data_out: 2darray + Denoised data + """ + # Safety check on data dimension + if len(data.shape) != 2: + raise ValueError('Input data is not 2D. Aborting...') + + print('\n*****************************************************') + print('* *') + print('* Low Rank Denoising *') + print('* *') + print('*****************************************************\n') + + # Make SVD + print('Performing SVD. This might take a while...') + U, svals, V = linalg.svd(data) + print('Done.\n') + # Apply hard-thresholding + svals_p = np.zeros_like(svals) + svals_p[:nc] = svals[:nc] + # Reconstruct the denoised data + data_out = U @ linalg.diagsvd(svals_p, U.shape[1], V.shape[0]) @ V + print('Low-Rank Denosing completed.') + print('\n*****************************************************\n') + return data_out + +def Cadzow(data, n, nc, print_head=True): + """ + Performs Cadzow denoising on data, which is a 1D array of N points. + The algorithm works as follows: + 1. Transform data in a Hankel matrix H of dimensions (N-n, n) + 2. Make SVD on H = U S V + 3. Keep only the first nc singular values, and put all the rest to 0 (S -> S') + 4. Rebuild H' = U S' V + 5. Average the antidiagonals to rebuild the Hankel-type structure, then make 1D array + + Set print_head=True to display the fancy heading. + """ + if print_head is True: + print('\n*****************************************************') + print('* *') + print('* Cadzow denoising *') + print('* *') + print('*****************************************************\n') + + N = data.shape[-1] + + # Builds a Hankel-type matrix containing in the first row "data" up to index "n-1" + # and as last column "data" from index "n" to the end + H = linalg.hankel(data[:n], data[n-1:]).T + + U, s, V = linalg.svd(H) # Make SVD + sp = np.zeros_like(s) # Create empty array for singular values + sp[:nc] = s[:nc] # Keep only the first nc singular values + + Hp = U @ linalg.diagsvd(sp, H.shape[0], H.shape[1]) @ V # Rebuild the new data matrix + datap = np.array([np.mean(np.diag(Hp[:, ::-1], w)) for w in range(-N+n, n)])[::-1] # Mean on the antidiagonals + + return datap + + +def iterCadzow(data, n, nc, itermax=100, f=0.005, print_head=True, print_time=True): + """ + Performs Cadzow denoising on data, which is a 1D array of N points, in an iterative manner. + The algorithm works as follows: + 1. Transform data in a Hankel matrix H of dimensions (N-n, n) + 2. Make SVD on H = U S V + 3. Keep only the first nc singular values, and put all the rest to 0 (S -> S') + 4. Rebuild H' = U S' V + 5. Average the antidiagonals to rebuild the Hankel-type structure, then make 1D array + 6. Check arrest criterion: if it is not reached, go to 1, else exit. + + The arrest criterion is: + | S(step k-1)[nc-1] / S(step k-1)[0] - S(step k)[nc-1] / S(step k)[0] | < f * S(step 0)[nc-1] / S(step 0)[0] + + - itermax: max number of iterations allowed + - f: factor that appears in the arrest criterion + - print_time: set it to True to show the time it took + - print_head: set it to True to display the fancy heading. + """ + + if print_head is True: + print('\n*****************************************************') + print('* *') + print('* Cadzow denoising *') + print('* *') + print('*****************************************************\n') + + def check_arrcrit(s_0, s_1, nc, tol): + """ + Arrest criterion: + check if the difference of the ratio [max(s) / min(s)] between two subsequent iterations is below tol + """ + r_0 = s_0[0] / s_1[0] + r_c = s_0[nc-1] / s_1[nc-1] + R = np.abs(r_0 - r_c) + + if R < tol: + return R, True + else: + return R, False + + def calc_tol(s, nc, f=0.01): + tol = (s[nc] / s[0]) * f + return tol + + + start_time = datetime.now() + + N = data.shape[-1] + + data0 = data + # Builds a Hankel-type matrix containing in the first row "data" up to index "n-1" + # and as last column "data" from index "n" to the end + H0 = linalg.hankel(data[:n], data[n-1:]).T + + s0 = linalg.svdvals(H0) # Calculate the singular values of H0 + sp = np.zeros_like(s0) # Create empty array to store the singular values to be kept + + + tol = calc_tol(s0, nc, f=f) + + print( '#\tControl value\t|\tTarget') + for k in range(itermax): + H0 = linalg.hankel(data0[:n], data0[n-1:]).T # Make Hankel + U, s, V = linalg.svd(H0) # Make SVD + sp[:nc] = s[:nc] # Keep only the first nc singular values + + Hp = U @ linalg.diagsvd(sp, H0.shape[0], H0.shape[1]) @ V # Rebuild the new data matrix + datap = np.array([np.mean(np.diag(Hp[:, ::-1], w)) for w in range(-N+n, n)])[::-1] # Mean on the antidiagonals + + # Check convergence + R, Cond = check_arrcrit(s0, s, nc, tol) + # Print status + print( str(k+1)+'\t{:.5e}\t|\t{:.5e}'.format(R, tol), end='\r') + if Cond and k: + print('Cadzow converges in '+str(k+1)+' steps.'+' '*20) + break + else: + s0 = s + data0 = datap + + end_time = datetime.now() + if k+1 == itermax: + print('\tCadzow does not converge.') + if print_time is True: + print( 'Total runtime: {}'.format(end_time - start_time)) + + return datap + +def Cadzow_2D(data, n, nc, i=True, f=0.005, itermax=100, print_time=True): + """ + Performs the Cadzow denoising method on a 2D spectrum, one transient at the time. This function calls either Cadzow or iterCadzow, depending on the parameter 'i': True for iterCadzow, False for normal Cadzow. + + """ + start_time = datetime.now() + print('\n*****************************************************') + print('* *') + print('* Cadzow denoising *') + print('* *') + print('*****************************************************\n') + + datap = np.zeros_like(data) + for k in range(data.shape[0]): + print('Processing of transient '+str(k+1)+' of '+str(data.shape[0]), end='\r') + if i: + datap[k] = processing.iterCadzow(data[k], n=n, nc=nc, f=f, itermax=itermax, print_head=False, print_time=False) + else: + datap[k] = processing.Cadzow(data[k], n=n, nc=nc, print_head=False) + print('Processing has ended!\n', end='\r') + end_time = datetime.now() + if print_time is True: + print( 'Total runtime: {}'.format(end_time - start_time)) + + return datap + + + + + + + + + + +#------------------------------------------------------------------------------------------------------------------- + +# BASELINE + + +def interactive_basl_windows(ppm, data): + """ + Allows for interactive partitioning of a spectrum in windows. + Double left click to add a bar, double right click to remove it. + Returns the location of the red bars as a list. + ------- + Parameters: + - ppm: 1darray + PPM scale of the spectrum + - data: 1darray + Spectrum to be partitioned + ------- + Returns + - coord: list + List containing the coordinates of the windows, plus ppm[0] and ppm[-1] + """ + + # Make the figure + fig = plt.figure() + fig.set_size_inches(15,8) + ax = fig.add_subplot(1,1,1) + plt.subplots_adjust(left=0.1, bottom=0.1, right=0.9, top=0.95) + + ax.set_title('Divide the spectrum into windows. Double click to set a wall, right click to remove it') + + # Set figure borders + + spectrum = figures.ax1D(ax, ppm, data) + + # Parameters to save coordinates + coord = [] # Final list of coordinates + dotvline = [] # Vertical lines + + def on_click(event): + # What happens if you click? + if event.inaxes == ax: + pass + else: + return None + + x = event.xdata # x,y position of cursor + if x is not None: # You are inside the figure + idx, ix = misc.ppmfind(ppm, x) + if str(event.button) == 'MouseButton.LEFT' and event.dblclick: # Left click: add point + if ix not in coord: # Avoid superimposed peaks + coord.append(ix) # Update list + # Update figure: + # add bullet + dotvline.append(ax.axvline(ix, c='r', lw=0.4)) + if str(event.button) == 'MouseButton.RIGHT': # Right click: remove point + if ix in coord: # only if the point is already selected + # Remove coordinates and all figure elements + i = coord.index(ix) + coord.remove(ix) + killv = dotvline.pop(i) + killv.remove() + + fig.canvas.draw() + + misc.set_fontsizes(ax, 14) + # Widgets + cursor = Cursor(ax, useblit=True, color='k', linewidth=0.2) + mouse = fig.canvas.mpl_connect('button_press_event', on_click) + + plt.show() + plt.close() + + + # Append initial and final values of the ppm scale + coord.append(ppm[0]) + coord.append(ppm[-1]) + # Sort the coordinates + coord = sorted(coord) + + return coord + + +def make_polynomion_baseline(ppm, data, limits): + """ + Interactive baseline correction with 4th degree polynomion. + ------- + Parameters: + - ppm: 1darray + PPM scale of the spectrum + - data: 1darray + spectrum + - limits: tuple + Window limits (left, right). + ------- + Returns: + - mode: str + Baseline correction mode: 'polynomion' as default, 'spline' if you press the button + - C_f: 1darray or str + Baseline polynomion coefficients, or 'callintsmooth' if you press the spline button + """ + + # Initialize mode + mode = 'polynomion' + + # Lenght of data + N = data.shape[-1] + + # Get index for the limits + lim1 = misc.ppmfind(ppm, limits[0])[0] + lim2 = misc.ppmfind(ppm, limits[1])[0] + lim1, lim2 = min(lim1, lim2), max(lim1, lim2) + + # make boxes for widgets + poly_box = plt.axes([0.87, 0.10, 0.10, 0.3]) + su_box = plt.axes([0.815, 0.825, 0.08, 0.075]) + giu_box = plt.axes([0.894, 0.825, 0.08, 0.075]) + callspline_box = plt.axes([0.825, 0.625, 0.15, 0.075]) + save_box = plt.axes([0.88, 0.725, 0.085, 0.04]) + reset_box = plt.axes([0.88, 0.765, 0.085, 0.04]) + + # Make widgets + # Buttons + up_button = Button(su_box, '$\\uparrow$', hovercolor = '0.975') + down_button = Button(giu_box, '$\\downarrow$', hovercolor = '0.975') + save_button = Button(save_box, 'SAVE', hovercolor = '0.975') + reset_button = Button(reset_box, 'RESET', hovercolor = '0.975') + callspline_button = Button(callspline_box, 'SPLINE BASELINE\nCORRECTION', hovercolor = '0.975') + + # Radio + poly_name = ['a', 'b', 'c', 'd', 'e'] + poly_radio = RadioButtons(poly_box, poly_name, activecolor='tab:orange') # Polynomion + + # Create variable for the 'active' status + stats = np.zeros(len(poly_name)) + # a b c d e + stats[0] = 1 + + # Initial values + # Polynomion coefficients + C = np.zeros(len(poly_name)) + # Increase step for the polynomion (order of magnitude) + om = np.zeros(len(poly_name)) + + # Functions connected to the widgets + def statmod(label): + # Sets 'label' as active modifying 'stats' + nonlocal stats + if label in poly_name: # if baseline + stats = np.zeros(len(poly_name)) + for k, L in enumerate(poly_name): + if label == L: + stats[k] = 1 + update(0) # Call update to redraw the figure + + def roll_up_p(event): + # Increase polynomion with mouse scroll + nonlocal C + for k in range(len(poly_name)): + if stats[k]: + C[k]+=10**om[k] + + def roll_down_p(event): + # Decrease polynomion with mouse scroll + nonlocal C + for k in range(len(poly_name)): + if stats[k]: + C[k]-=10**om[k] + + def up_om(event): + # Increase the om of the active coefficient by 1 + nonlocal om + for k in range(len(poly_name)): + if stats[k]: + om[k] += 1 + + def down_om(event): + # Decrease the om of the active coefficient by 1 + nonlocal om + for k in range(len(poly_name)): + if stats[k]: + om[k] -= 1 + + def on_scroll(event): + # Mouse scroll + if event.button == 'up': + roll_up_p(event) + elif event.button == 'down': + roll_down_p(event) + update(0) + + # polynomion + x = np.linspace(0, 1, ppm[lim1:lim2].shape[-1])[::-1] + y = np.zeros_like(x) + + + # Initial figure + fig = plt.figure(1) + fig.set_size_inches(15,8) + plt.subplots_adjust(bottom=0.10, top=0.90, left=0.05, right=0.80) + ax = fig.add_subplot(1,1,1) + + ax.plot(ppm[lim1:lim2], data[lim1:lim2], label='Spectrum', lw=1.0, c='tab:blue') # experimental + + poly_plot, = ax.plot(ppm[lim1:lim2], y, label = 'Baseline', lw=0.8, c='tab:orange') # Polynomion + + # make pretty scale + ax.set_xlim(max(limits),min(limits)) + misc.pretty_scale(ax, ax.get_xlim(), axis='x', n_major_ticks=10) + misc.set_ylim(ax, data[lim1:lim2]) + + + def update(val): + # Calculates and draws all the figure elements + y = misc.polyn(x, C) + poly_plot.set_ydata(y) + values_print.set_text('{:+5.2e}, {:+5.2e}, {:+5.2e}, {:+5.2e}, {:+5.2e}'.format(C[0], C[1], C[2], C[3], C[4])) + plt.draw() + + def reset(event): + # Sets all the widgets to their starting values + nonlocal C, om + C = np.zeros(len(poly_name)) + om = np.zeros_like(C) + update(0) # to update the figure + + # Declare variables to store the final values + C_f = np.zeros_like(C) + def save(event): + # Put current values in the final variables that are returned + nonlocal C_f + C_f = np.copy(C) + + def use_spline_instead(X): + # Close everything and return + nonlocal mode, C_f + plt.close() + mode = 'spline' + C_f = 'callintsmooth' + + # Header for current values print + plt.text(0.1, 0.04, + '{:_^11}, {:_^11}, {:_^11}, {:_^11}, {:_^11}'.format('a', 'b', 'c', 'd', 'e'), + ha='left', va='bottom', transform=fig.transFigure, fontsize=10) + values_print = plt.text(0.1, 0.01, + '{:+5.2e}, {:+5.2e}, {:+5.2e}, {:+5.2e}, {:+5.2e}'.format(*C), + ha='left', va='bottom', transform=fig.transFigure, fontsize=10) + misc.set_fontsizes(ax, 14) + + # Connect widgets to functions + poly_radio.on_clicked(statmod) + up_button.on_clicked(up_om) + down_button.on_clicked(down_om) + scroll = fig.canvas.mpl_connect('scroll_event', on_scroll) + save_button.on_clicked(save) + reset_button.on_clicked(reset) + callspline_button.on_clicked(use_spline_instead) + + ax.legend() + plt.show() + plt.close() + + return mode, C_f + + +def write_basl_info(f, limits, mode, data): + """ + Writes the baseline parameters of a certain window in a file. + -------- + Parameters: + - f: TextIO object + File where to write the parameters + - limits: tuple + Limits of the spectral window. (left, right) + - mode: str + Baseline correction mode: 'polynomion' or 'spline' + - data: float or 1darray + It can be either the spline smoothing factor or the polynomion coefficients + """ + f.write('***{:^54}***\n'.format('WINDOW LIMITS /PPM')) + f.write('{: 8.3f}\t{: 8.3f}\n'.format(limits[0], limits[1])) + f.write('***{:^54}***\n'.format('BASELINE CORRECTION MODE')) + f.write('{}\n'.format(mode)) + f.write('***{:^54}***\n'.format('POLYNOMION COEFFICIENTS')) + if mode == 'polynomion': + N = len(data) + for k, c in enumerate(data): + if k < N - 1: + f.write('{: 5.2e}\t'.format(c)) + else: + f.write('{: 5.2e}\n'.format(c)) + break + else: + N = 5 + for k, c in enumerate(np.zeros(5)): + if k < N - 1: + f.write('{: 5.2e}\t'.format(c)) + else: + f.write('{: 5.2e}\n'.format(c)) + break + f.write('***{:^54}***\n'.format('SPLINE SMOOTHING FACTOR')) + if mode == 'spline': + f.write('{:5.3e}\n'.format(data)) + else: + f.write('{:5.3e}\n'.format(0)) + f.write('***{:^54}***\n'.format('-'*50)) + + +def baseline_correction(ppm, data, basl_file='spectrum.basl', winlim=None): + """ + Interactively corrects the baseline of a given spectrum and saves the parameters in a file. + The program starts with an interface to partition the spectrum in windows to correct separately. + Then, for each window, an interactive panel opens to allow the user to compute the baseline. + -------- + Parameters: + - ppm: 1darray + PPM scale of the spectrum + - data: 1darray + The spectrum of which to adjust the baseline + - basl_file: str + Name for the baseline parameters file + - winlim: list or str or None + List of the breakpoints for the window. If it is str, indicates the location of a file to be read with np.loadtxt. If it is None, the partitioning is done interactively. + """ + + # Check if winlim is passed as list + if isinstance(winlim, list): + coord = winlim + elif isinstance(winlim, str): + # It means it is a file. Try to read it + if os.path.exists(winlim): + coord = list(np.loadtxt(winlim)) + else: + raise NameError('File {} not found.'.format(winlim)) + else: + # Interactive partitioning + coord = processing.interactive_basl_windows(ppm, data) + + # Clear the file + if os.path.exists(basl_file): + os.remove(basl_file) + + # Open the file + F = open(basl_file, 'a') + for i, _ in enumerate(coord): + if i == len(coord) - 1: + break # Stop before it raises error + limits = coord[i], coord[i+1] + mode, C_f = processing.make_polynomion_baseline(ppm, data, limits) # Interactive polynomion + if isinstance(C_f, str): # If you press "use spline" in the polynomion interactive figure + # Get the limits + lim1 = misc.ppmfind(ppm, limits[0])[0] + lim2 = misc.ppmfind(ppm, limits[1])[0] + lim1, lim2 = min(lim1, lim2), max(lim1, lim2) + # trim ppm and data + xdata, ydata = ppm[lim1:lim2], data[lim1:lim2] + # Calculate the spline + _, C_f = fit.interactive_smoothing(xdata, ydata) + # Write the section in the file + processing.write_basl_info(F, limits, mode, C_f) + F.close() + +def load_baseline(filename, ppm, data): + """ + Read the baseline parameters from a file and builds the baseline itself. + ------- + Parameters: + - filename: str + Location of the baseline file + - ppm: 1darray + PPM scale of the spectrum + - data: 1darray + Spectrum of which to correct the baseline + ------- + Returns: + - baseline: 1darray + Computed baseline + """ + + # Opens the file + f = open(filename, 'r') + r = f.readlines() + + # Initialize the lists of the variables + limits = [] # Window limits + mode = [] # Baseline correction mode + C = [] # Polynomion coefficients + S = [] # Spline smoothing factor + + tmpmode = None # Correction mode for the active section + for k, line in enumerate(r): + # Read the limits + if 'WINDOW LIMITS /PPM' in line: + Q = r[k+1] + Q = Q.replace('\t', ', ') + limits.append(eval(Q)) + continue + # Read mode + if 'BASELINE CORRECTION MODE' in line: + tmpmode = r[k+1].strip() + mode.append(tmpmode) + continue + # Read the polynomion coefficients + if 'POLYNOMION COEFFICIENTS' in line: + if tmpmode == 'polynomion': + Q = r[k+1] + Q = Q.replace('\t', ',') + C.append(np.array(eval('['+Q+']'))) + else: + C.append(np.zeros(5)) + continue + # Read the spline smoothing factor + if 'SPLINE SMOOTHING FACTOR' in line: + if tmpmode == 'spline': + Q = r[k+1] + S.append(eval(Q)) + else: + S.append(0) + continue + # Reset tmpmode + if '-----' in line: + tmpmode = None + continue + + # Now, make the baseline + + # Initialize flat baseline + baseline = np.zeros_like(ppm) + n_w = len(limits) # Number of windows + + for k in range(n_w): + # Translate the limits in points + lim1 = misc.ppmfind(ppm, limits[k][0])[0] + lim2 = misc.ppmfind(ppm, limits[k][1])[0] + lim1, lim2 = min(lim1, lim2), max(lim1, lim2) + + if mode[k] == 'polynomion': # Compute polynomion in the active region + x = np.linspace(0, 1, ppm[lim1:lim2].shape[-1])[::-1] + tmpbasl = misc.polyn(x, C[k]) + elif mode[k] == 'spline': # Fit the spectrum in the active region with a spline + y = data[lim1:lim2] + tmpbasl = fit.smooth_spl(y, S[k]) + # Put the just computed baseline in the corresponding region + baseline[lim1:lim2] = tmpbasl + + return baseline + +def qfil(ppm, data, u, s): + """ + Suppress signals in the spectrum using a gaussian filter. + --------- + Parameters: + - ppm: 1darray + Scale on which to build the filter + - data: ndarray + Data to be processed. The filter is applied on the last dimension + - u: float + Position of the filter + - s: float + Width of the filter (standard deviation) + -------- + Returns: + - pdata: ndarray + Filtered data + """ + G = sim.gaussian_filter(ppm, u, s) + datap = np.zeros_like(data) + datap[...,:] = data[...,:] * G + return datap + +def interactive_qfil(ppm, data_in): + """ + Interactive function to design a gaussian filter with the aim of suppressing signals in the spectrum. + You can adjust position and width of the filter scrolling with the mouse. + --------- + Parameters: + - ppm: 1darray + Scale on which the filter will be built + - data_in: 1darray + Spectrum on which to apply the filter. + --------- + Returns: + - u: float + Position of the gaussian filter + - s: float + Width of the gaussian filter (Standard deviation) + """ + + # Safe copy + data = np.copy(data_in.real) + + # Initialize the values: u at the center of the spectrum, s as 100 points + u = np.mean(ppm) + s = 100 * misc.calcres(ppm) + + sens = 0.2 # one mouse 'tick' + stat = 0 # move u + + # Make the filter with start values + G = sim.f_gaussian(ppm, u, s) + G /= max(G) # Normalize it to preserve intensities + + # Make the figure + fig = plt.figure() + fig.set_size_inches(figures.figsize_large) + plt.subplots_adjust(left=0.10, bottom=0.15, right=0.85, top=0.90) + ax = fig.add_subplot(1,1,1) + + # Plot + # Original spectrum + figures.ax1D(ax, ppm, data, c='tab:blue', lw=0.8, X_label='$\delta\, $/ppm', Y_label='Intensity /a.u.', label='Original') + # Filter + G_plot, = ax.plot(ppm, G*np.max(data), c='tab:orange', lw=0.6, ls='--', label='Filter') + # Processed data + pdata = data * (1 - G) # Compute it + p_spect, = ax.plot(ppm, pdata, c='tab:red', lw=0.7, label='Processed') + + # -------------------------------------------------- + + # WIDGETS + # Radio-buttons to select which value to modify + radio_box = plt.axes([0.875, 0.40, 0.10, 0.20]) + radio_labels = ['u', 's'] + radio = RadioButtons(radio_box, radio_labels) + + # Modify sensitivity buttons + up_box = plt.axes([0.875, 0.70, 0.05, 0.05]) + up_button = Button(up_box, r'$\uparrow$') + dn_box = plt.axes([0.925, 0.70, 0.05, 0.05]) + dn_button = Button(dn_box, r'$\downarrow$') + + # FUNCTIONS CONNECTED TO WIDGETS + def up_sens(event): + """ Double sens """ + nonlocal sens + sens *= 2 + def dn_sens(event): + """ Halves sens """ + nonlocal sens + sens /= 2 + + def radio_func(label): + """ Change the variable 'stats' according to the radiobutton """ + nonlocal stat + if label == radio_labels[0]: # u + stat = 0 + elif label == radio_labels[1]: # s + stat = 1 + + def on_scroll(event): + """ On mouse scroll, modify the correspondant value, then redraw the figure """ + nonlocal u, s + if event.button == 'up': + if stat: # s + s += sens + else: # u + u += sens + elif event.button == 'down': + if stat: # s + s -= sens + if s < 0: # Safety check + s = 0 + else: # u + u -= sens + update() + + def update(): + """ Redraw the figure """ + # Compute the filter with the new values + G_in = sim.f_gaussian(ppm, u, s) + G_in /= max(G_in) + # Multiply * max(data) to make it visible + G_plot.set_ydata(G_in*np.max(data)) + # Compute processed data + pdata = data * (1 - G_in) + p_spect.set_ydata(pdata) + plt.draw() + + # -------------------------------------------------- + + # CONNECT WIDGETS TO THE FUNCTIONS + up_button.on_clicked(up_sens) + dn_button.on_clicked(dn_sens) + radio.on_clicked(radio_func) + fig.canvas.mpl_connect('scroll_event', on_scroll) + + # -------------------------------------------------- + + # Adjust figure appearence + ax.legend(loc='upper right', fontsize=12) + misc.mathformat(ax) + misc.set_fontsizes(ax, 14) + plt.show() + plt.close() + + return u, s + diff --git a/klassez/qsin.py b/klassez/qsin.py new file mode 100644 index 0000000..042e759 --- /dev/null +++ b/klassez/qsin.py @@ -0,0 +1,17 @@ +#! /usr/bin/env python3 + +import numpy as np + +def qsin(data, ssb): + """ + Sine-squared apodization. + """ + + if ssb == 0 or ssb == 1: + off = 0 + else: + off = 1/ssb + end = 1 + size = data.shape[-1] + apod = np.power(np.sin(np.pi * off + np.pi * (end - off) * np.arange(size) / (size)).astype(data.dtype), 2).astype(data.dtype) + return apod * data diff --git a/klassez/sim.py b/klassez/sim.py new file mode 100644 index 0000000..e1f9b70 --- /dev/null +++ b/klassez/sim.py @@ -0,0 +1,963 @@ +#! /usr/bin/env python3 + +import os +import sys +import numpy as np +from scipy import linalg, stats +from scipy.spatial import ConvexHull +import random +import matplotlib +import matplotlib.pyplot as plt +import matplotlib.cm as cm +from matplotlib.widgets import Slider, Button, RadioButtons, TextBox, CheckButtons, Cursor, LassoSelector +from matplotlib.path import Path +import seaborn as sns +import nmrglue as ng +import lmfit as l +from datetime import datetime +import warnings + +from . import fit, misc, sim, figures, processing +from .config import CM +#from .__init__ import CM + + +gamma = { # gyromagnetic ratio of all NMR active nuclei in MHz/T + '1H' : 42.57748, + '2H' : 6.53564, + '3H' : 45.41461, + '3He' : 32.43467, + '6Li' : 6.2657, + '7Li' : 16.54646, + '9Be' : -5.98384, + '10B' : 4.57538, + '11B' : 13.66056, + '13C' : 10.70611, + '14N' : 3.0758, + '15N' : -4.31438, + '17O' : -5.77223, + '19F' : 40.02581, + '21Ne' : 3.36107, + '23Na' : 11.26259, + '25Mg' : 2.60532, + '27Al' : 11.09441, + '29Si' : -8.45802, + '31P' : 17.23579, + '33S' : 3.26527, + '35Cl' : 4.17174, + '37Cl' : 3.47219, + '39K' : 1.98667, + '41K' : 1.09041, + '43Ca' : -2.86461, + '45Sc' : 10.34335, + '47Ti' : -2.40009, + '49Ti' : -2.40052, + '50V' : 4.24497, + '51V' : 11.19277, + '53Cr' : 2.40648, + '55Mn' : 10.45975, + '57Fe' : 1.37568, + '59Co' : -10.05425, + '61Ni' : -3.80472, + '63Cu' : 11.28559, + '65Cu' : 12.08945, + '67Zn' : 2.66322, + '69Ga' : 10.21987, + '71Ga' : 12.98443, + '73Ge' : -1.4851, + '75As' : 7.29224, + '77Se' : 8.11825, + '79Br' : 8.53849, + '81Br' : 11.49847, + '83Kr' : -1.63838, + '85Rb' : 4.11086, + '87Rb' : 4.11086, + '87Sr' : 13.93135, + '89Y' : -2.08587, + '91Zr' : -3.9729, + '93Nb' : 10.40764, + '95Mo' : 2.77392, + '97Mo' : -2.83225, + '99Ru' : -1.96452, + '99Tc' : 9.58334, + '101Ru' : -2.20211, + '103Rh' : 1.33991, + '105Pd' : -1.94835, + '107Ag' : -1.72311, + '109Ag' : -1.9807, + '111Cd' : -9.02855, + '113Cd' : -9.44496, + '113In' : 9.31042, + '115In' : 9.33043, + '117Sn' : -15.16865, + '119Sn' : -15.86948, + '121Sb' : 10.18922, + '123Sb' : 5.51762, + '123Te' : -11.15871, + '125Te' : -13.45065, + '127I' : 8.51848, + '129Xe' : 11.77736, + '131Xe' : 3.49093, + '133Cs' : 5.58489, + '135Ba' : 4.22965, + '137Ba' : 4.73164, + '138La' : 5.61725, + '139La' : 6.01449, + '141Pr' : 12.50032, + '143Nd' : -2.31494, + '145Nd' : -1.42209, + '147Sm' : -1.75845, + '149Sm' : -1.39995, + '151Eu' : 10.55921, + '153Eu' : 4.66266, + '155Gd' : -1.61582, + '157Gd' : -1.99986, + '159Tb' : 9.6604, + '161Dy' : -1.4025, + '163Dy' : 1.99859, + '165Ho' : 8.73009, + '167Er' : -1.23049, + '169Tm' : -3.51988, + '171Yb' : 7.49917, + '173Yb' : -2.06586, + '175Lu' : 4.85681, + '177Hf' : 1.3199, + '177Lu' : 1.10276, + '179Hf' : -0.60002, + '181Ta' : 5.0961, + '183W' : 1.77165, + '185Re' : 9.58589, + '187Os' : 0.73542, + '187Re' : 9.68382, + '189Os' : 3.30359, + '191Ir' : 0.54893, + '193Ir' : 0.59747, + '195Pt' : 9.15331, + '197Au' : 0.54701, + '199Hg' : 7.59029, + '201Hg' : -2.81011, + '203Tl' : 24.33303, + '205Tl' : 24.57061, + '207Pb' : 8.90806, + '209Bi' : 6.8422, + '235U' : 0.5716, + } + +def calc_splitting(u0, I0, m=1, J=0): + """ + Calculate the frequency and the intensities of a NMR signal splitted by scalar coupling. + ------- + Parameters: + - u0: float + Frequency of the non-splitted signal (Hz) + - I0: float + Total intensity of the non-splitted signal. + - m: int + Multiplicity, i.e. number of expected signals after the splitting + - J: float + Scalar coupling constant (Hz) + ------ + Returns: + - u_s: 1darray + Frequencies of the splitted signal (Hz) + - I_s: Intensities of the splitted signal + """ + # FREQUENCIES + u_s = [] + # if m=2 => J_pattern = -0.5 +0.5 + # if m=3 => J_pattern = -1 0 1 + J_pattern = np.arange(m) - np.mean(np.arange(m)) + for k in J_pattern: + u_s.append(u0 + k*J) + u_s = np.array(u_s) + + # INTENSITIES + base_int = misc.binomial_triangle(m) / (2**(m-1)) + I_s = base_int * I0 + + return u_s, I_s + + +def multiplet(u, I, m='s', J=[]): + """ + Split a given signal according to a scalar coupling pattern. + ------- + Parameters: + - u: float + Frequency of the non-splitted signal (Hz) + - I: float + Intensity of the non-splitted signal + - m: str + Organic chemistry-like multiplet, i.e. s, d, dqt, etc. + - J: float or list + Scalar coupling constants. The number of constants should match the number of coupling branches + -------- + Returns: + - u_in: list + List of the splitted frequencies (Hz) + - I_in: list + Intensities of the splitted signal + """ + n_splitting = len(m) # Number of splittings + + # Adjust the variables to make them fit in the loop + if m=='s': # Singlet: J is useless + J_in = [0] + elif isinstance(J, (list, tuple, np.ndarray)): + J_in = J + else: + J_in = [J] + + u_in = [u] + I_in = [I] + for n in range(n_splitting): # Loop in the number of splitting + u_ret, I_ret = [], [] # Declare empty lists + for k, _ in enumerate(u_in): # u_in expands according to the splitting tree + if m[n] == 'd': # doublet + mult = 2 + elif m[n] == 't': # triplet + mult = 3 + elif m[n] == 'q': # quartet + mult = 4 + else: # anything else is useless + mult = 1 + + # Compute the splitting + u_s, I_s = sim.calc_splitting(u_in[k], I_in[k], mult, J_in[n]) + + for w, v in zip(u_s, I_s): # Fill the _ret lists with the splitted signals + u_ret.append(w) + I_ret.append(v) + + # Replace the input variables with the splitted ones + u_in = u_ret + I_in = I_ret + + return u_in, I_in + + +def load_sim_1D(File): + """ + Creates a dictionary from the spectral parameters listed in the input file. + ------- + Parameters: + - File: str + Path to the input file location + ------- + Returns: + - dic: dict + Dictionary of the parameters, ready to be read from the simulation functions. + """ + + inp = open(File, 'r').readlines() + keys = [] + vals = [] + for i in range(len(inp)): + if inp[i] == '\n' or inp[i][0] == '#': + continue # skip empty lines or comments + line = inp[i].split('\t', 1) # separate key from the rest + if ' ' in line[0]: + line = line[0].split(' ', 1) + keys.append(line[0]) + + rest = line[1].strip() + rest = rest.split('\t') + rest = rest[0].split('#') + try: + value = eval(rest[0]) + except: + value = (f'{rest[0]}') + vals.append(value) + + + dic = {} + for i, key in enumerate(keys): + if 'nuc' in key: # Remove unwanted spaces + vals[i] = vals[i].replace(' ', '') + dic[key] = vals[i] + if 'phases' not in keys: + dic['phases'] = tuple([0 for w in dic['shifts']]) + else: + dic['phases'] = tuple([w * np.pi / 180 for w in dic['phases']]) + if 'mult' not in keys: # Multiplicity + dic['mult'] = tuple(['s' for w in dic['shifts']]) + else: + dic['mult'] = tuple(dic['mult'].strip(',').replace(' ', '').split(',')) + if 'Jconst' not in keys: # Coupling constants + dic['Jconst'] = tuple([0 for w in dic['shifts']]) + + dic['TD'] = int(dic['TD']) + dic['SFO1'] = dic['B0'] * sim.gamma[dic['nuc']] + dic['SW'] = dic['SWp'] * np.abs(dic['SFO1']) + dic['dw'] = 1/dic['SW'] + dic['t1'] = np.linspace(0, dic['TD']*dic['dw'], dic['TD']) + dic['AQ'] = dic['t1'][-1] + dic['o1'] = dic['o1p'] * dic['SFO1'] + + return dic + +def sim_1D(File, pv=False): + """ + Simulates a 1D NMR spectrum from the instructions written in File. + ------- + Parameters + - File: str + Path to the input file location + - pv: bool + True for pseudo-Voigt model, False for Voigt model. + ------- + Returns + - fid: 1darray + FID of the simulated spectrum. + """ + if isinstance(File, str): + in_file = load_sim_1D(File) + elif isinstance(File, dict): + in_file = File + else: + raise ValueError('Unknown file type, aborting...') + + TD = in_file['TD'] # Points of the FID + + shifts = np.array(in_file['shifts']) # Chemical shift /ppm + amplitudes = in_file['amplitudes'] # Relative intensity of the signals + fwhm = np.array(in_file['fwhm']) # Full width at half maximum of the signals + x_g = in_file['x_g'] # Fraction of gaussianity of the FID + phases = in_file['phases'] + + freq = misc.ppm2freq(shifts, B0=in_file['SFO1'], o1p=in_file['o1p']) # peaks center frequency + + fid = np.zeros(TD, dtype='complex64') # empty FID + for j, _ in enumerate(freq): + # Account for multiplicity + u_split, A_split = multiplet(freq[j], amplitudes[j], m=in_file['mult'][j], J=in_file['Jconst'][j]) + for u, I in zip(u_split, A_split): + sgn_par = dict(t=in_file['t1'], u=u, fwhm=2*np.pi*fwhm[j], x_g=x_g[j], A=I, phi=phases[j] ) + if pv: # Generate pseudo-voigt signals + fid += sim.t_pvoigt(**sgn_par) + else: # Make Voigt signals + fid += sim.t_voigt(**sgn_par) + return fid + +def load_sim_2D(File, states=True): + """ + Creates a dictionary from the spectral parameters listed in the input file. + ------- + Parameters + - File: str + Path to the input file location + - states: bool + If FnMODE is States or States-TPPI, set it to True to get the correct timescale. + ------- + Returns + - dic: dict + Dictionary of the parameters, ready to be read from the simulation functions. + """ + inp = open(File, 'r').readlines() + keys = [] + vals = [] + for i in range(len(inp)): + if inp[i] == '\n' or inp[i][0] == '#': + continue + line = inp[i].split('\t', 1) + if ' ' in line[0]: + line = line[0].split(' ', 1) + keys.append(line[0]) + + rest = line[1].strip() + rest = rest.split('\t') + rest = rest[0].split('#') + try: + value = eval(rest[0]) + except: + value = str(rest[0]) + vals.append(value) + + dic = {} + for i, key in enumerate(keys): + if 'nuc' in key: # Remove unwanted spaces + vals[i] = vals[i].replace(' ', '') + dic[key] = vals[i] + + for key, value in dic.items(): + if 'TD' in key: + dic[key] = int(value) + dic['SFO1'] = dic['B0'] * sim.gamma[dic['nuc1']] # Larmor frequency /MHz + dic['SFO2'] = dic['B0'] * sim.gamma[dic['nuc2']] # Larmor frequency /MHz + dic['SW1'] = np.abs(dic['SW1p'] * dic['SFO1']) # spectral width + dic['SW2'] = np.abs(dic['SW2p'] * dic['SFO2']) # spectral width + dic['dw1'] = np.abs(1 / dic['SW1']) # dwell time + dic['dw2'] = np.abs(1 / dic['SW2']) # dwell time + dic['o1'] = dic['o1p'] * dic['SFO1'] + dic['o2'] = dic['o2p'] * dic['SFO2'] + if states: + dic['t1'] = np.linspace(0, dic['TD1']//2 * dic['dw1'], dic['TD1']) # acquisition time scale + else: + dic['t1'] = np.linspace(0, dic['TD1'] * dic['dw1'], dic['TD1']) # acquisition time scale + dic['t2'] = np.linspace(0, dic['TD2'] * dic['dw2'], dic['TD2']) # acquisition time scale + dic['AQ1'] = dic['t1'][-1] + dic['AQ2'] = dic['t2'][-1] + + return dic + +def sim_2D(File, states=True, alt=True, pv=False): + """ + Simulates a 2D NMR spectrum from the instructions written in File. + The indirect dimension is sampled with states-TPPI as default. + -------- + Parameters + - File: str + Path to the input file location + - states: bool + Set it to True to allow for correct spectral arrangement in the indirect dimension. + - alt: bool + Set it to True to allow for correct spectral arrangement in the indirect dimension. + - pv: bool + True for pseudo-Voigt model, False for Voigt model. + -------- + Returns + - fid: 2darray + FID of the simulated spectrum. + """ + + # Generates a dictionary of parameters from an input file + if isinstance(File, str): + in_file = sim.load_sim_2D(File, states=states) + elif isinstance(File, dict): + in_file = File + else: + raise ValueError('Unknown file type, aborting...') + + + # recall of timescales from in_file + t1 = in_file['t1'] + t2 = in_file['t2'] + + # recall of peaks parameters from in_file + # reshape is needed to allow for correct indexing + shifts_f1 = np.array(in_file['shifts_f1']).reshape(-1) # chemical shift in F1 + shifts_f2 = np.array(in_file['shifts_f2']).reshape(-1) # chemical shift in F2 + fwhm_f1 = np.array(in_file['fwhm_f1']).reshape(-1) # FWHM of peaks in F1 + fwhm_f2 = np.array(in_file['fwhm_f2']).reshape(-1) # FWHM of peaks in F2 + amplitudes = np.array(in_file['amplitudes']).reshape(-1) # relative intensity + x_g = np.array(in_file['x_g']).reshape(-1) # fraction of gaussianity + + # conversion of FWHM from Hz to radians + fwhm1 = 2 * np.pi * fwhm_f1 + fwhm2 = 2 * np.pi * fwhm_f2 + + # calculation of stdev for gaussian peaks + sigma1 = fwhm1 / 2.355 + sigma2 = fwhm2 / 2.355 + + # conversion of chemical shift from ppm to rad/s + freq1 = misc.ppm2freq(shifts_f1, B0=in_file['SFO1'], o1p=in_file['o1p']) # peaks center frequency + freq2 = misc.ppm2freq(shifts_f2, B0=in_file['SFO2'], o1p=in_file['o2p']) # peaks center frequency + + # creation of empty FID + fid = np.zeros((in_file['TD1'], in_file['TD2']), dtype='complex64') # empty FID + + # The number of NMR signals is retrieved from the length of the amplitudes array. + # If there is only one peak, 'ns' (number of signals) is set to 1. + try: + ns = len(amplitudes) + except: + ns = 1 + # Creates a pseudo-voigt signal looping on the number of peaks + + for p in range(ns): + if pv: # Generate pseudo-Voigt signal + fid += sim.t_2Dpvoigt(t1, t2, freq1[p], freq2[p], fwhm1[p], fwhm2[p], A=amplitudes[p], x_g=x_g[p], states=states, alt=alt) + else: # Generate Voigt signal + fid += sim.t_2Dvoigt(t1, t2, freq1[p], freq2[p], fwhm1[p], fwhm2[p], A=amplitudes[p], x_g=x_g[p], states=states, alt=alt) + return fid + +def noisegen(size, o2, t2, s_n=1): + """ + Simulates additive noise in the time domain. + -------- + Parameters + - size: int or tuple + Dimension of the noise matrix + - o2: float + Carrier frequency, in Hz. + - t2: 1darray + Time scale of the last temporal dimension. + - s_n: float + Standard deviation of the noise. + -------- + Returns + - noise: 2darray + Noise matrix, of dimensions size. + """ + + # correlated part of noise until ADC + white_corr = np.random.normal(0, s_n, size) + # white noise in FID has to be centered on the offset frequency + noise_corr = white_corr * np.exp(1j* 2 * np.pi * o2 * t2) + + # uncorrelated part of noise: quadrature detection + white_re = np.random.normal(0, s_n, size) + white_im = np.random.normal(0, s_n, size) + # cosine-modulated in the real channel and sine-modulated in the imaginary channel + noise_re = white_re * np.cos( 2* np.pi * o2 * t2) + noise_im = white_im * np.sin( 2* np.pi * o2 * t2) + + # final noise is sum of the two parts + noise = noise_corr + (noise_re + 1j*noise_im) + return noise + + +def mult_noise(data_size, mean, s_n): + N = data_size[0] + + white = np.random.lognormal(mean, s_n, N) + #white = np.random.normal(0, s_n, N) + + #noisemat = np.diag(1 - 0.25 * white) + noisemat = np.diag(white) + return noisemat + + + +def water7(N, t2, vW, fwhm=300, A=1, spread=701.125): + """ + Simulates a feature like the water ridge in HSQC spectra, in the time domain. + -------- + Parameters + - N: int + Number of transients + - t2: 1darray + Time scale of the last temporal dimension. + - vW: float + Nominal peak position, in Hz. + - fwhm: float + Nominal full-width at half maximum of the peak. + - A: float + Signal intensity. + - spread: float + Standard deviation of the peak position distribution, in Hz. + -------- + Returns + - ridge: 2darray + Matrix of the ridge. + """ + + + uW = np.random.normal(vW, spread, N) + s = fwhm / 2.355 # conversion from fwhm to sigma + ridge = np.zeros((N, len(t2)), dtype='complex64') + for i in range(N): + # each transient features a gaussian signal with the parameters specified above + # but it is on phase in the even transients and 90 degree dephased in the odd ones + ridge[i] = sim.t_gaussian(t2, uW[i], s, A=A, phi=np.pi/2*np.mod(i,2)) + return ridge + + +def f_gaussian(x, u, s, A=1): + """ + Gaussian function in the frequency domain: + -------- + Parameters + - x: 1darray + Independent variable + - u: float + Peak position + - s: float + Standard deviation + - A: float + Intensity + -------- + Returns + - f: 1darray + Gaussian function. + """ + if s > 0: + f = A/(np.sqrt(2 * np.pi)*s) * np.exp(-1/2*((x-u)/s)**2) + else: + f = np.zeros_like(x) + return f + +def f_lorentzian(x, u, fwhm, A=1): + """ + Lorentzian function in the time domain: + -------- + Parameters + - x: 1darray + Independent variable + - u: float + Peak position + - fwhm: float + Full-width at half-maximum, 2γ + - A: float + Intensity + -------- + Returns + - f: 1darray + Lorentzian function. + """ + + hwhm = fwhm/2 # half width at half maximum + if hwhm > 0: + f = A/(np.pi) * hwhm/((x-u)**2 + hwhm**2 ) + else: + f = np.zeros_like(x) + return f + +def f_pvoigt(x, u, fwhm, A=1, x_g=0): + """ + Pseudo-Voigt function in the frequency domain: + -------- + Parameters + - x: 1darray + Independent variable + - u: float + Peak position + - fwhm: float + Full-width at half-maximum + - A: float + Intensity + - x_g: float + Fraction of gaussianity + -------- + Returns + - S: 1darray + Pseudo-Voigt function. + """ + s = fwhm / 2.355 + S = A* (sim.f_gaussian(x, u, s, A=x_g) + sim.f_lorentzian(x, u, fwhm, A=1-x_g)) + return S + +def t_gaussian(t, u, s, A=1, phi=0): + """ + Gaussian function in the time domain. + -------- + Parameters + - t: 1darray + Independent variable + - u: float + Peak position + - s: float + Standard deviation + - A: float + Intensity + - phi: float + Phase, in radians + -------- + Returns + - S: 1darray + Gaussian function. + """ + s = np.abs(s) # Avoid problems with s<0 + if s >= 0: + S = A * np.exp(1j*phi) * np.exp((1j*2*np.pi*u*t) - (t**2)*(s**2)/2) + return S + +def t_lorentzian(t, u, fwhm, A=1, phi=0): + """ + Lorentzian function in the time domain. + -------- + Parameters + - t: 1darray + Independent variable + - u: float + Peak position + - fwhm: float + Full-width at half-maximum, 2γ + - A: float + Intensity + - phi: float + Phase, in radians + -------- + Returns + - S: 1darray + Lorentzian function. + """ + hwhm = np.abs(fwhm) / 2 + S = A * np.exp(1j*phi) * np.exp((1j *2*np.pi *u * t)-(t*hwhm)) + return S + +def t_pvoigt(t, u, fwhm, A=1, x_g=0, phi=0): + """ + Pseudo-Voigt function in the time domain: + -------- + Parameters + - t: 1darray + Independent variable + - u: float + Peak position + - fwhm: float + Full-width at half-maximum + - A: float + Intensity + - x_g: float + Fraction of gaussianity + - phi: float + Phase, in radians + -------- + Returns + - S: 1darray + Pseudo-Voigt function. + """ + + s = fwhm / 2.355 + S = A * (sim.t_gaussian(t, u, s, A=x_g, phi=phi) + sim.t_lorentzian(t, u, fwhm, A=1-x_g, phi=phi)) + return S + +def t_voigt(t, u, fwhm, A=1, x_g=0, phi=0): + """ + Voigt function in the time domain. The parameter x_g affects the linewidth of the lorentzian and gaussian contributions. + -------- + Parameters + - t: 1darray + Independent variable + - u: float + Peak position + - fwhm: float + Full-width at half-maximum + - A: float + Intensity + - x_g: float + Fraction of gaussianity + - phi: float + Phase, in radians + -------- + Returns + - S: 1darray + Voigt function. + """ + + s = fwhm / 2.355 + S = A * np.exp(1j*phi) * sim.t_gaussian(t, u/2, s*x_g) * sim.t_lorentzian(t, u/2, fwhm*(1-x_g)) + return S + + +def t_2Dgaussian(t1, t2, v1, v2, s1, s2, A=1, states=True, alt=True): + """ + Bidimensional gaussian function. + -------- + Parameters + - t1: 1darray + Indirect evolution timescale + - t2: 1darray + Timescale of the direct dimension + - v1: float + Peak position in the indirect dimension + - v2: float + Peak position in the direct dimension + - s1: float + Standard deviation in the indirect dimension + - s2: float + Standard deviation in the direct dimension + - A: float + Intensity + - states: bool + Set to True for "FnMODE":"States-TPPI + - alt: bool + Set to True for "FnMODE":"States-TPPI + -------- + Returns + - S: 2darray + Gaussian function. + """ + if states: + # States acquires twice the same point of the indirect dimension time domain + t1[1::2] = t1[::2] + if alt: + # TPPI cycles the receiver phase of 90 degrees at each transient acquisition + freq_1 = np.zeros(len(t1), dtype='complex64') + for k in range(4): + t1t = t1[k::4] + freq_1[k::4] = np.cos( (2 * np.pi * v1 * t1t) - (0.5 * np.pi * np.mod(k,4) )) + else: + freq_1 = np.exp(1j * 2 * np.pi * v1 * t1) + # NMR signal in the direct dimension + F2 = np.exp(1j*2*np.pi*v2*t2) * np.exp(-(s2**2 * t2**2)/2) + # NMR signal in the indirect dimension + F1 = freq_1 * np.exp(-(s1**2 * t1**2)/2) + # The full FID is reconstructed by doing the external product between the two vectors + S = A * F1.reshape(-1,1) @ F2.reshape(1,-1) + return S + +def t_2Dlorentzian(t1, t2, v1, v2, fwhm1, fwhm2, A=1, states=True, alt=True): + """ + Bidimensional lorentzian function. + -------- + Parameters + - t1: 1darray + Indirect evolution timescale + - t2: 1darray + Timescale of the direct dimension + - v1: float + Peak position in the indirect dimension + - v2: float + Peak position in the direct dimension45 + - fwhm1: float + Full-width at half maximum in the indirect dimension + - fwhm2: float + Full-width at half maximum in the direct dimension + - A: float + Intensity + - states: bool + Set to True for "FnMODE":"States-TPPI + - alt: bool + Set to True for "FnMODE":"States-TPPI + -------- + Returns + - S: 2darray + Lorentzian function. + """ + hwhm1 = fwhm1 / 2 + hwhm2 = fwhm2 / 2 + if states: + # States acquires twice the same point of the indirect dimension time domain + t1[1::2] = t1[::2] + if alt: + # TPPI cycles the receiver phase of 90 degrees at each transient acquisition + freq_1 = np.zeros(len(t1), dtype='complex64') + for k in range(4): + t1t = t1[k::4] + freq_1[k::4] = np.cos( (2 * np.pi * v1 * t1t) - (0.5 * np.pi * np.mod(k,4) )) + else: + freq_1 = np.exp(1j * 2 * np.pi * v1 * t1) + # NMR signal in the direct dimension + F2 = np.exp(1j*2*np.pi*v2*t2) * np.exp(-(hwhm2 * t2)) + # NMR signal in the indirect dimension + F1 = freq_1 * np.exp(-(hwhm1 * t1)) + # The full FID is reconstructed by doing the external product between the two vectors + S = A * F1.reshape(-1,1) @ F2.reshape(1,-1) + return S + +def t_2Dpvoigt(t1, t2, v1, v2, fwhm1, fwhm2, A=1, x_g=0.5, states=True, alt=True): + """ + Generates a 2D pseudo-voigt signal in the time domain. + x_g states for the fraction of gaussianity, whereas A defines the overall amplitude of the total peak. + Indexes ’1’ and ’2’ on the variables stand for ’F1’ and ’F2’, respectively. + -------- + Parameters + - t1: 1darray + Indirect evolution timescale + - t2: 1darray + Timescale of the direct dimension + - v1: float + Peak position in the indirect dimension + - v2: float + Peak position in the direct dimension + - fwhm1: float + Full-width at half maximum in the indirect dimension + - fwhm2: float + Full-width at half maximum in the direct dimension + - A: float + Intensity + - x_g: float + Fraction of gaussianity + - states: bool + Set to True for "FnMODE":"States-TPPI + - alt: bool + Set to True for "FnMODE":"States-TPPI46 + -------- + Returns + - fid: 2darray + Pseudo-Voigt function. + """ + + # stdev computed for the gaussian part. + s1 = fwhm1 / 2.355 + s2 = fwhm2 / 2.355 + # Passing 's' to 'gaussian' and 'fwhm' to 'lorentzian' makes the two parts of the pseudo-voigt signal to have the same width and allow proper summation + G = sim.t_2Dgaussian(t1, t2, v1, v2, s1, s2, A=x_g, states=states, alt=alt) + L = sim.t_2Dlorentzian(t1, t2, v1, v2, fwhm1, fwhm2, A=(1-x_g), states=states, alt=alt) + fid = A * (G + L) + return fid + +def t_2Dvoigt(t1, t2, v1, v2, fwhm1, fwhm2, A=1, x_g=0.5, states=True, alt=True): + """ + Generates a 2D Voigt signal in the time domain. + x_g states for the fraction of gaussianity, whereas A defines the overall amplitude of the total peak. + Indexes ’1’ and ’2’ on the variables stand for ’F1’ and ’F2’, respectively. + -------- + Parameters + - t1: 1darray + Indirect evolution timescale + - t2: 1darray + Timescale of the direct dimension + - v1: float + Peak position in the indirect dimension + - v2: float + Peak position in the direct dimension + - fwhm1: float + Full-width at half maximum in the indirect dimension + - fwhm2: float + Full-width at half maximum in the direct dimension + - A: float + Intensity + - x_g: float + Fraction of gaussianity + - states: bool + Set to True for "FnMODE":"States-TPPI + - alt: bool + Set to True for "FnMODE":"States-TPPI + -------- + Returns + - S: 2darray + Voigt function. + """ + # stdev computed for the gaussian part. + s1 = fwhm1 / 2.355 + s2 = fwhm2 / 2.355 + # hwhm computed for the lorentzian part. + hwhm1 = fwhm1 / 2 + hwhm2 = fwhm2 / 2 + if states: + # States acquires twice the same point of the indirect dimension time domain + t1[1::2] = t1[::2] + + # direct dimension + # frequency + freq_2 = np.exp(1j * 2 * np.pi * v2 * t2) + + # Add line-broadening, fist lorentzian then gaussian, using: + # hwhm' = (1 - x_g) * hwhm for L + # s' = x_g * s for G + F2 = freq_2 * np.exp(-(1-x_g)*hwhm2 * t2) * np.exp(-((x_g*s2)**2 * t2**2)/2) + + # indirect dimension + if alt: + # Redfield cycles the receiver phase of 90 degrees at each transient acquisition + freq_1 = np.zeros(len(t1), dtype='complex64') + for k in range(4): + t1t = t1[k::4] + freq_1[k::4] = np.cos( (2 * np.pi * v1 * t1t) - (0.5 * np.pi * np.mod(k,4) )) + else: + freq_1 = np.exp(1j * 2 * np.pi * v1 * t1) + # Add line-broadening, fist lorentzian then gaussian, using: + # hwhm' = (1 - x_g) * hwhm for L + # s' = x_g * s for G + F1 = freq_1 * np.exp(-(1-x_g) * hwhm1 * t1) * np.exp(-((x_g*s1)**2 * t1**2)/2) + + # The full FID is reconstructed by doing the external product between the two vectors + S = A * F1.reshape(-1,1) @ F2.reshape(1,-1) + return S + + +def gaussian_filter(ppm, u, s): + """ + Compute a gaussian filter to be used in order to suppress signals in the spectrum. + --------- + Parameters: + - ppm: 1darray + Scale on which to build the filter + - u: float + Position of the filter + - s: float + Width of the filter (standard deviation) + -------- + Returns: + - G: 1darray + Computed gaussian filter + """ + G = sim.f_gaussian(ppm, u, s) + G /= max(G) # Normalize to preserve intensities + G = 1 - G + return G + diff --git a/klassez/test/acqus_1D b/klassez/test/acqus_1D new file mode 100644 index 0000000..cfd5161 --- /dev/null +++ b/klassez/test/acqus_1D @@ -0,0 +1,11 @@ +B0 16.4 # Magnetic field /T +nuc 13C # Observed nucleus +o1p 100 # Centre of the spectrum +SWp 350 # Sweep width /ppm +TD 1024 # Number of sampled complex points + +shifts 180, 40, 25 # Chemical shift /ppm +fwhm 200, 200, 150 # FWHM /Hz +amplitudes 10, 15, 50 # Intensity /a.u., only relative values actually matter +x_g 0.2, 0.2, 0.2 # Fraction of gaussianity +phases -5, 0, 5 # Phase angles /degrees diff --git a/klassez/test/acqus_2D b/klassez/test/acqus_2D new file mode 100644 index 0000000..6e46f68 --- /dev/null +++ b/klassez/test/acqus_2D @@ -0,0 +1,17 @@ +B0 16.4 # Magnetic field /T +nuc1 1H # Observed nucleus, indirect dimension +nuc2 13C # Observed nucleus, direct dimension +o1p 5 # Centre of the indirect dimension /ppm +o2p 100 # Centre of the direct dimension /ppm +SW1p 30 # Sweep width, indirect dimension /ppm +SW2p 350 # Sweep width, direct dimension /ppm +TD1 512 # Number of sampled points, indirect dimension +TD2 1024 # Number of sampled complex points, direct dimension + +shifts_f1 10, 5, 10, -5, -5 # Chemical shift /ppm, indirect dimension +shifts_f2 180, 120, 90, 90, 25 # Chemical shift /ppm, direct dimension +fwhm_f1 [200 for j in range(5)] # FWHM /Hz, indirect dimension +fwhm_f2 [300 for j in range(5)] # FHWM /Hz, direct dimension +amplitudes 100, 100, 100, 100, 100 # Intensity /a.u., only relative values matter +x_g [0.2 for j in range(5)] # Fraction of gaussianity + diff --git a/klassez/test/test.py b/klassez/test/test.py new file mode 100644 index 0000000..ad875b4 --- /dev/null +++ b/klassez/test/test.py @@ -0,0 +1,14 @@ +#! /usr/bin/env python3 + +from klassez import * + +# Test: import 1D spectrum +s1 = Spectrum_1D('acqus_1D', isexp=False) +s1.process() +s1.F.iguess() +s1.F.plot('iguess') +s1.F.dofit() +s1.F.plot('fit') + +s2 = Spectrum_2D('acqus_2D', isexp=False) +s2.process() diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000..f1bfca4 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,3 @@ +[build-system] +requires = ['setuptools>=42'] +build-backend = 'setuptools.build_meta' diff --git a/setup.cfg b/setup.cfg new file mode 100644 index 0000000..8bfd5a1 --- /dev/null +++ b/setup.cfg @@ -0,0 +1,4 @@ +[egg_info] +tag_build = +tag_date = 0 + diff --git a/setup.py b/setup.py new file mode 100644 index 0000000..6e0545c --- /dev/null +++ b/setup.py @@ -0,0 +1,30 @@ +#! /usr/bin/env python3 + +from setuptools import setup, find_packages + +try: + with open('README.md', 'r', encoding='utf-8') as fh: + long_description = fh.read() +except: + long_description = 'LONG DESCRIPTION' + +setup( + name='klassez', + version='0.1a.1', + author='Francesco Bruno', + author_email='bruno@cerm.unifi.it', + description='A collection of functions for NMR data handling. Documentation: klassez.pdf in "docs" subfolder of your install dir.', + url='https://test.pypi.org/legacy/klassez', + long_description=long_description, + long_description_content_type = 'text/markdown', + classifiers = [ + 'Programming Language :: Python :: 3', + 'Operating System :: OS Independent', + 'License :: OSI Approved :: MIT License' + ], + license='LICENSE.txt', + install_requires = ['numpy', 'scipy', 'lmfit', 'seaborn', 'nmrglue', 'matplotlib', 'csaps'], + packages=['klassez'], + include_package_data = True, + python_requires = '>=3.8', + )