diff --git a/src/icesat2waves/local_modules/jonswap_gamma.py b/src/icesat2waves/local_modules/jonswap_gamma.py index e66bf77..07d6861 100644 --- a/src/icesat2waves/local_modules/jonswap_gamma.py +++ b/src/icesat2waves/local_modules/jonswap_gamma.py @@ -1,4 +1,4 @@ - +import logging from scipy.constants import g from lmfit import minimize, Parameters @@ -6,6 +6,7 @@ import matplotlib.pyplot as plt import numpy as np +_logger = logging.getLogger(__name__) # def normalize_time(time): @@ -96,7 +97,7 @@ def JONSWAP_bulk(f, floc=0.04, famp=1e-2, gamma=3.3, peak_std=1e-1): delta = np.exp(-((w - wp) ** 2) / (2 * peak_std**2 * wp**2)) peak_factor = gamma**delta - + # units of m^2 / Hz return alpha * w ** (-5) * np.exp(-stretch * (w / wp) ** -4) * peak_factor @@ -127,7 +128,7 @@ def pierson_moskowitz_fetch_limit(f, X, U): alpha = 0.076 * (g * X / U**2) ** (-0.22) wp = 7.0 * np.pi * (g / U) * (g * X / U**2) ** (-0.33) - print("wp=" + str(wp)) + _logger.debug("wp=%s", wp) sigma_p = 0.07 sigma_pp = 0.09 @@ -170,7 +171,7 @@ def JONSWAP_default(f, X, U, gamma=3.3): return ( alpha * g**2.0 * w ** (-5.0) * np.exp(-5.0 / 4.0 * (w / wp) ** -4) * peak_factor - ) # Hz**-5 m**2 /s**4 = m**2 sec + ) # Hz**-5 m**2 /s**4 = m**2 sec """ add function for X_tilde(X, U10), alpha(f_max, U10) and f_max(U10, X_tilde) or f_max(U, X)""" @@ -730,7 +731,7 @@ def reg_func(p0, pi, p_err_unit): vd[k] = I * np.random.rand() Jm = Jm_regulizer(vd, priors) - print(Jm) + _logger.debug("Jm from regulizer: %s", Jm) diff --git a/src/icesat2waves/local_modules/m_colormanager_ph3.py b/src/icesat2waves/local_modules/m_colormanager_ph3.py index fc8d55a..9bb0326 100644 --- a/src/icesat2waves/local_modules/m_colormanager_ph3.py +++ b/src/icesat2waves/local_modules/m_colormanager_ph3.py @@ -1,5 +1,4 @@ from __future__ import unicode_literals -from __future__ import print_function from __future__ import division from matplotlib.colors import LinearSegmentedColormap @@ -54,7 +53,7 @@ class color: def __init__(self, path=None, name=None): self.white=(1,1,1) if (path is not None) & (name is not None): - _logger.debug("color theme: %s", name) + _logger.debug('color theme: %s', name) try: theme=json_load(name, path) for k, v in theme.items(): @@ -173,7 +172,7 @@ def colormaps(self,n, gamma=None): def show(self): for key in self.__dict__.keys(): - _logger.debug(key) + _logger.debug("key: %s", key) _logger.debug(' rels dict keys:') for key in self.rels.keys(): diff --git a/src/icesat2waves/local_modules/m_general_ph3.py b/src/icesat2waves/local_modules/m_general_ph3.py index 6cbd59e..ce6381f 100644 --- a/src/icesat2waves/local_modules/m_general_ph3.py +++ b/src/icesat2waves/local_modules/m_general_ph3.py @@ -53,8 +53,8 @@ def colormaps(self, n, gamma=None): ) def show(self): - for key in self.__dict__.keys(): - print(key) + for key, value in self.__dict__.items(): + _logger.debug("Key: %s, Value: %s", key, value) class FigureAxisXY: @@ -296,7 +296,7 @@ def linear(self): self.F.ax.set_yscale("log", nonposy="clip") tt = self.time.astype(DT.datetime) self.cs = plt.contourf(tt[:-2], self.fs[:], dd, self.clevs, cmap=self.cmap) - print(self.clevs) + _logger.debug("clevels: %s", self.clevs) plt.ylabel(f"Power db({self.data_unit}^2/{self.sample_unit})") plt.xlabel(f"f ({self.sample_unit})") self.cbar = plt.colorbar(self.cs, pad=0.01) @@ -341,10 +341,11 @@ def power(self, anomalie=False): self.F.ax.set_yscale("log", nonposy="clip") tt = self.time.astype(DT.datetime) - print(tt[:-1].shape, self.fs[:].shape, dd.T.shape) + _logger.debug("Shape of tt: %s, Shape of fs: %s, Shape of dd.T: %s", + tt[:-1].shape, self.fs[:].shape, dd.T.shape) self.cs = plt.contourf(tt[:-1], self.fs[:], dd.T, self.clevs, cmap=self.cmap) self.x = np.arange(0, tt[:-1].size) - print(self.clevs) + _logger.debug("clevels: %s", self.clevs) plt.xlabel("Time") plt.ylabel(f"f ({self.sample_unit})") self.cbar = plt.colorbar(self.cs, pad=0.01) @@ -477,14 +478,14 @@ def power_imshow( fn = self.fs if isinstance(tt[0], np.datetime64): - print("time axis is numpy.datetime64, converted to number for plotting") + _logger.debug('time axis is numpy.datetime64, converted to number for plotting') ttt = dates.date2num(tt.astype(DT.datetime)) elif isinstance(tt[0], np.timedelta64): - print("time axis is numpy.timedelta64, converted to number for plotting") + _logger.debug('time axis is numpy.timedelta64, converted to number for plotting') ttt = tt else: - print("time axis is not converted") + _logger.debug('time axis is not converted') ttt = tt MT.stats_format(dd2) @@ -622,14 +623,14 @@ def linear_imshow( fn = self.fs if isinstance(tt[0], np.datetime64): - print("numpy.datetime64") + _logger.debug('numpy.datetime64') ttt = dates.date2num(tt.astype(DT.datetime)) elif isinstance(tt[0], np.timedelta64): - print("numpy.timedelta64") + _logger.debug('numpy.timedelta64') ttt = tt else: - print("something else") + _logger.debug('something else') ttt = tt self.cs = plt.pcolormesh( @@ -702,7 +703,7 @@ def __init__( self.min = np.nanmin(data[freq_sel_bool, :]) self.max = np.nanmax(data[freq_sel_bool, :]) if verbose: - print(str(self.min), str(self.max)) + _logger.debug("min: %s, max: %s", self.min, self.max) self.ylabels = np.arange(10, 100, 20) self.data_type = data_type @@ -835,7 +836,7 @@ def echo_dt(a, as_string=False): if as_string: return string else: - print(string) + _logger.debug(string) def easy_dtstr(a): @@ -880,27 +881,27 @@ def save_anyfig(fig, name=None, path=None): extension = ".png" full_name = (os.path.join(savepath, name)) + extension fig.savefig(full_name, bbox_inches="tight", format="png", dpi=180) - print("save at: ", full_name) + _logger.debug('save at: %s',full_name) def read_cdo(file): cdo = Cdo() G = cdo.readCdf(file).variables - print(G.keys()) + _logger.debug("keys in G: %s", list(G.keys())) return G def build_timestamp(time, unit, start, verbose=True): timestamp = np.datetime64(start) + time[:].astype("m8[" + unit + "]") if verbose: - print(timestamp) + _logger.debug("timestamp: %s", timestamp) return timestamp def build_timestamp_v2(time, unit, start, verbose=True): timestamp = np.datetime64(start) + time[:].astype("datetime64[s]") if verbose: - print(timestamp) + _logger.debug("timestamp: %s", timestamp) return timestamp @@ -908,29 +909,29 @@ def cut_nparray(var, low, high, verbose=False): if low < high: if low < var[0]: if verbose: - print("out of lower limit!") + _logger.debug("out of lower limit!") if high > var[-1]: if verbose: - print("out of upper limit!") - print(high, ">", var[-1]) + _logger.debug('out of upper limit!') + _logger.debug("high: %s, last var: %s", high, var[-1]) return (var >= low) & (var <= high) elif high < low: if high < var[0]: - print("limits flipped, out of lower limit!") + _logger.debug("limits flipped, out of lower limit!") if low > var[-1]: - print("limits flipped, out of lower limit!") + _logger.debug("limits flipped, out of lower limit!") return (var >= high) & (var <= low) elif high == low: if verbose: - print("find nearest") + _logger.debug("find nearest") a = var - low return np.unravel_index(np.abs(a).argmin(), np.transpose(a.shape)) else: - print("error") + _logger.warning("error in cut_nparray()") return @@ -964,10 +965,10 @@ def boxmean(data, lon, lat, xlim, ylim): case (2, 1): datan = data[:, :, xbool][:, ybool, :] case _: - print("arrays have not the same shape") + _logger.debug('arrays do not have the same shape') - print("new shape", datan.shape) + _logger.debug("new shape %s", datan.shape) return np.nanmean(np.nanmean(datan, axis=xp), axis=yp).squeeze() @@ -986,7 +987,7 @@ def detrend(data, od=None, x=None, plot=False, verbose=False): elif od > 0: if verbose: - print("assume data is equal dist. You can define option x= if not.") + _logger.debug("assume data is equal dist. You can define option x= if not.") d_org = data - np.nanmean(data) x = np.arange(0, d_org.size, 1) if x is None else x @@ -1014,7 +1015,7 @@ def detrend(data, od=None, x=None, plot=False, verbose=False): stats["polynom order"] = od stats["polyvals"] = px if verbose: - print(stats) + _logger.debug("stats: %s", stats) return d_detrend / np.nanstd(d_detrend), stats @@ -1030,7 +1031,7 @@ def runningvar(var, m, tailcopy=False): m = int(m) s = var.shape if s[0] <= 2 * m: - print("0 Dimension is smaller then averaging length") + _logger.debug("0 Dimension is smaller then averaging length") return rr = np.asarray(var) * np.nan var_range = np.arange(m, int(s[0]) - m - 1, 1) @@ -1052,7 +1053,7 @@ def runningmean_wrap_around(var, m): m = int(m) s = var.shape if s[0] <= 2 * m: - print("0 Dimension is smaller then averaging length") + _logger.debug("0 Dimension is smaller then averaging length") return rr = np.asarray(var) * np.nan @@ -1068,7 +1069,7 @@ def runningmean(var, m, tailcopy=False): m = int(m) s = var.shape if s[0] <= 2 * m: - print("0 Dimension is smaller then averaging length") + _logger.debug("0 Dimension is smaller then averaging length") return rr = np.asarray(var) * np.nan var_range = np.arange(m, int(s[0]) - m - 1, 1) @@ -1155,7 +1156,7 @@ def find_max_ts( if jump is None: if verbose: - print("index, data, edit ts (index)") + _logger.debug("index, data, edit ts (index)") return index, data, data[index] else: c = np.diff(index) @@ -1175,7 +1176,7 @@ def find_max_ts( b = np.append(b, index[i]).astype(int) i = i + 1 if verbose: - print("index, edited ts, edit ts (index), org_index") + _logger.debug("index, edited ts, edit ts (index), org_index") return b, data, data[b], index @@ -1297,7 +1298,7 @@ def __init__(self, index, time=None, weigthing=False, span=None): self.weigthing = weigthing if weigthing is not False else None self.comp = dict() if time is None: - print( + _logger.debug( "timeaxis is not defined. Make sure that both timeseries have the same timestamp" ) self.time_index = None @@ -1325,31 +1326,31 @@ def corse_iter(self, dt, unit=None): .astype("float") ) span_new = np.array(self.span) * dt_format / dt - print("old span=", self.span) - print("new span=", span_new) + _logger.debug('old span= %s',self.span) + _logger.debug('new span= %s',span_new ) for s in span_new: span.append(int(np.floor(s))) - print(span) + _logger.debug("span: %s", span) self.iter2 = CompIter(span, dt, unit=unit) def iter_info(self): self.iter_operate.__dict__ - print("available iters") + _logger.debug("available iters") if self.iter is not None: - print("self.iter") + _logger.debug("self.iter: %s", self.iter) self.iter.__dict__ if self.iter2 is not None: - print("self.iter2") + _logger.debug("self.iter2: %s", self.iter2) self.iter2.__dict__ def info(self): - print("index", self.index) - print("span", self.span) - print("weight", self.weigthing) - print("comp", self.comp.keys()) + _logger.debug("index %s", self.index) + _logger.debug("span %s", self.span) + _logger.debug("weight %s", self.weigthing) + _logger.debug("comp %s", self.comp.keys()) def transform_index_time(self, time_index, time_composite): """find nearest time index of compostite time compared to index times""" @@ -1363,7 +1364,7 @@ def transform_index_time(self, time_index, time_composite): def compose_ts(self, ts, name, time=None): if time is not None: if self.time_index is None: - print("timeaxis of index TS is not defined!") + _logger.debug("timeaxis of index TS is not defined!") return else: iindex = self.transform_index_time(self.time_index, time) @@ -1371,30 +1372,29 @@ def compose_ts(self, ts, name, time=None): iindex = self.index span = self.iter_operate.span - print(iindex) + _logger.debug("index: %s", iindex) if self.span != [0, 0]: comp = np.empty((-span[0] + span[1])) self.length = comp.size for i in iindex: if i + span[0] < 0: - print("i", i, "span:", span[0], span[1]) - print("left postion:", i + span[0]) + _logger.debug('i: %s span: %s %s', i, span[0], span[1]) + _logger.debug('left postion: %s', i+span[0]) raise ValueError("composite span exceeds ts limits") return -1 elif i + span[1] > ts.size: return -1 - print(i, span[0], span[1]) - print("i", i, "span:", span[0], span[1]) - print("right postion:", i + span[1]) + _logger.debug('i: %s span: %s %s', i, span[0], span[1]) + _logger.debug('right postion: %s',i+span[1]) raise ValueError("composite span exceeds ts limits") return -1 - print("comp", comp.shape) - print("ts", ts[i + span[0] : i + span[1]].shape) - print(i, span[0], span[1]) - comp = np.vstack((comp, ts[i + span[0] : i + span[1]])) + _logger.debug('comp %s', comp.shape) + _logger.debug('ts %s', ts[i + span[0]:i + span[1]].shape) + _logger.debug('i: %s span: %s %s', i, span[0], span[1]) + comp = np.vstack((comp, ts[i + span[0]:i + span[1]])) comp = np.delete(comp, 0, 0) comp1 = CompositeData(comp, self.weigthing) @@ -1406,7 +1406,7 @@ def compose_ts(self, ts, name, time=None): def compose_2d(self, field, name, time=None): if time is not None: if self.time_index is None: - print("timeaxis of index TS is not defined!") + _logger.debug("timeaxis of index TS is not defined!") return else: iindex = self.transform_index_time(self.time_index, time) @@ -1415,7 +1415,7 @@ def compose_2d(self, field, name, time=None): span = self.iter_operate.span if span != [0, 0]: - print(-span[0] + span[1], field.shape[1]) + _logger.debug("span range: %s, field shape: %s", -span[0]+span[1],field.shape[1]) comp = np.empty((-span[0] + span[1], field.shape[1])) * np.NaN self.length = -span[0] + span[1] for i in iindex: @@ -1437,7 +1437,7 @@ def compose_2d(self, field, name, time=None): comp1 = CompositeData(comp, self.weigthing) self.comp[name] = comp1 else: - print("no span defined") + _logger.debug('no span defined') comp = field[iindex, :] comp1 = CompositeData(comp, self.weigthing) @@ -1446,7 +1446,7 @@ def compose_2d(self, field, name, time=None): def compose_field(self, field, name, time=None): if time is not None: if self.time_index is None: - print("timeaxis of index TS is not defined!") + _logger.debug("timeaxis of index TS is not defined!") return else: iindex = self.transform_index_time(self.time_index, time) @@ -1471,7 +1471,7 @@ def compose_field(self, field, name, time=None): comp1 = CompositeData(comp, self.weigthing) self.comp[name] = comp1 else: - print("no span defined") + _logger.debug("no span defined") comp = field[iindex, :, :] comp1 = CompositeData(comp, self.weigthing) @@ -1525,12 +1525,12 @@ def gen_log_space(limit, n): def linefit2Points(time_lin, f, data, f1, f2, f_delta=None, plot=False): if isinstance(time_lin[0], np.datetime64): - print("type is numpy.datetime64", time_lin.shape) + _logger.debug('type is numpy.datetime64, shape %s', time_lin.shape) time_lin = time_lin.astype("m8[s]").astype(int) if f.shape[0] != data.shape[0]: - print("ERROR: shapes are not correct") - print(f.shape, time_lin.shape, data.shape) + _logger.error("ERROR: shapes are not correct") + _logger.error("f: %s; time_lin: %s; data: %s", f.shape, time_lin.shape, data.shape) return # find neerest discrete frequency @@ -1592,19 +1592,21 @@ def find_max_along_line( timestamp = time_lin if isinstance(time_lin[0], np.datetime64): - print("time is numpy.datetime64") + _logger.debug("time is numpy.datetime64") time_lin = time_lin.astype("m8[s]").astype(int) if mode is None: mode = "free_limits" + + _logger.debug("find_max_along_line with %s", mode) + if mode in ["free_limits", "upper_limit"]: + _logger.debug("line_left[0]: %s, time_lin[0]: %s", line_left[0], time_lin[0]) if line_left[0] > time_lin[0]: + _logger.debug(" left line > time0") f_start = 0 - print(" left line > time0") - print(line_left[0], time_lin[0]) else: - print(" left line < time") - print(line_left[0], time_lin[0]) + _logger.debug(" left line < time0") a = line_left - time_lin[0] f_start = np.unravel_index(np.abs(a).argmin(), np.transpose(a.shape))[0] + 1 else: @@ -1613,14 +1615,13 @@ def find_max_along_line( f_start = np.unravel_index(np.abs(a).argmin(), np.transpose(a.shape))[0] if mode == "free_limits" or mode == "lower_limit": + _logger.debug("line_right[-1]: %s, time_lin[-1]: %s", line_right[-1], time_lin[-1]) if line_right[-1] > time_lin[-1]: - print(" right line > time window") - print(line_right[-1], time_lin[-1]) + _logger.debug(" right line > time window") a = line_right - time_lin[-1] f_end = np.unravel_index(np.abs(a).argmin(), np.transpose(a.shape))[0] - 1 else: - print(" right line < time window") - print(line_right[-1], time_lin[-1]) + _logger.debug(" right line < time window") f_end = time_lin.size - 2 else: a = f - f2 @@ -1740,14 +1741,14 @@ def RAMSAC_regression_bootstrap(time, freq, time_lin_arg=None, plot=False, **kwa if time_lin_arg is not None: time_lin = time_lin_arg - print("time lin is set") + _logger.debug("time lin is set") else: - print("create linear time axis") + _logger.debug("create linear time axis") time_lin = np.linspace(time.min(), time.max(), freq.size) RAMS_predicted_line = time_lin * RAMS_slope + RAMS_intercept - print(RAMS_slope, RAMS_intercept) + _logger.debug("RAMS_slope: %s RAMS_intercept: %s", RAMS_slope, RAMS_intercept) RAMS_out = boot.ci( (time, freq), simple_RAMSAC_regression_estimator, method="bca", **kwargs ) diff --git a/src/icesat2waves/local_modules/m_spectrum_ph3.py b/src/icesat2waves/local_modules/m_spectrum_ph3.py index e13b7bd..36099aa 100644 --- a/src/icesat2waves/local_modules/m_spectrum_ph3.py +++ b/src/icesat2waves/local_modules/m_spectrum_ph3.py @@ -1,3 +1,5 @@ +import logging + import numpy as np from scipy.special import gammainc from scipy import signal @@ -18,6 +20,8 @@ except ImportError: pass +_logger = logging.getLogger(__name__) + def calc_freq(self): """calculate array of spectral variable (frequency or @@ -59,7 +63,7 @@ def create_timeaxis_collection(time_as_datetime64): datetime = time_as_datetime64.astype(DT.datetime) float_plot = dates.date2num(datetime) dt64 = time_as_datetime64 - + T = { "sec": sec, "day": day, @@ -89,21 +93,22 @@ def spicke_remover(data, nstd=20.0, spreed=500.0, max_loops=10.0, verbose=False) while peak_remove: data_std = nstd * data.std() max_abs_data2 = np.max(np.abs(data2)) - + if data_std < max_abs_data2: act_flag = True data2 = M.spickes_to_mean(data2, nloop=0, spreed=spreed, gaussian=False) looper_count += 1 else: - peak_remove = False - - if verbose: - print(f"{'True' if act_flag else 'False'}: {data_std} {'<' if act_flag else '>'} {max_abs_data2}") - + peak_remove=False + + _logger.debug("%s: %s %s %s", + f"{'True' if act_flag else 'False'}", data_std, + f" {'<' if act_flag else '>'}", max_abs_data2) + if looper_count > max_loops: peak_remove = False if verbose: - print("Stopped by max#") + _logger.debug("Stopped by max#") if verbose: plt.plot(data, "r") @@ -129,14 +134,14 @@ def __init__(self, data, dt, win_flag=1, pwelch=False, verbose=False): if win_flag: if verbose: - print("window") + _logger.debug("window") MT.write_log(self.hist, "window") self.win_flag = win_flag self.phi = np.copy(self.data[:]) self.phi *= win * np.sqrt(factor) else: if verbose: - print("no window") + _logger.debug("no window") MT.write_log(self.hist, "no window") self.win_flag = 0 self.phi = np.copy(self.data[:]) @@ -157,14 +162,13 @@ def __init__(self, data, dt, win_flag=1, pwelch=False, verbose=False): def parceval(self): - print("Parcevals Theorem:") - print("variance of unweighted timeseries: ", self.data.var()) - print( - "variance of weighted timeseries: ", - self.phi.var() if self.win_flag is 1 else "data not windowed", - ) - print("variance of weighted timeseries: ", self.phi.var()) - print("variance of the Spectrum: ", self.var) + _logger.debug("Parcevals Theorem:") + _logger.debug("variance of unweighted timeseries: %s", self.data.var()) + _logger.debug( + "variance of weighted timeseries: %s", + self.phi.var() if self.win_flag is 1 else "data not windowed") + _logger.debug("variance of weighted timeseries: %s", self.phi.var() ) + _logger.debug("variance of the Spectrum: %s", self.var) class Moments: @@ -193,7 +197,7 @@ def __init__( if prewhite is None: data = np.array(data_org) # field to be analyzed elif prewhite == 1: - print("prewhite =1") + _logger.debug("prewhite =1") data = np.gradient(np.array(data_org), axis=1) elif prewhite == 2: data = np.gradient(np.gradient(np.array(data_org), axis=1), axis=1) @@ -267,11 +271,11 @@ def __init__( else: if plot_chunks: - print("end of TS is reached") - print("last spec No: " + str(last_k)) - print("spec container: " + str(specs.shape)) - print("last used Timestep: " + str(last_used_TS)) - print("length of TS " + str(data.size)) + _logger.debug("end of TS is reached") + _logger.debug("last spec No: %s", last_k) + _logger.debug("spec container: %s ", specs.shape) + _logger.debug("last used Timestep: %s", last_used_TS) + _logger.debug("length of TS %s", data.size) k += 1 @@ -284,7 +288,7 @@ def __init__( stack = np.empty([nbin, self.f.size]) for i, mom in enumerate(self.mom_list): stack[i, :] = mom[k] # stack them - + # mean them and decide prewhitening if prewhite is None: factor = 1 @@ -292,10 +296,10 @@ def __init__( factor = 2 * np.pi * self.f elif prewhite == 2: factor = (2 * np.pi * self.f) ** 2 - + self.moments_stack[k] = stack * factor self.moments_est[k] = np.nanmean(stack, axis=0) * factor - + self.moments_unit = "[data]^2" self.n_spec = len(self.mom_list) @@ -398,7 +402,7 @@ def __init__( if prewhite is None: self.data = data # field to be analyzed elif prewhite == 1: - print("prewhite =1") + _logger.debug("prewhite =1") self.data = np.gradient(data) elif prewhite == 2: self.data = np.gradient(np.gradient(data)) @@ -465,11 +469,11 @@ def __init__( del self.spec else: if plot_chunks: - print("end of TS is reached") - print("last spec No: " + str(last_k)) - print("spec container: " + str(specs.shape)) - print("last used Timestep: " + str(last_used_TS)) - print("length of TS " + str(dsize) + "ms") + _logger.debug("end of TS is reached") + _logger.debug("last spec No: %s", last_k) + _logger.debug("spec container: %s", specs.shape) + _logger.debug("last used Timestep: %s", last_used_TS) + _logger.debug("length of TS %s ms", dsize) k += 1 @@ -493,18 +497,16 @@ def error(self, ci=0.95): self.El, self.Eu = spec_error(self.spec_est, self.n_spec, ci=ci) def parceval(self): - print("Parcevals Theorem:") - print("variance of unweighted timeseries: ", self.data.var()) - print( - "mean variance of timeseries chunks: ", + _logger.debug("Parcevals Theorem:") + _logger.debug("variance of unweighted timeseries: %s", self.data.var()) + _logger.debug("mean variance of timeseries chunks: %s", ( self.chunks.var(axis=1).mean() if self.save_chunks is True else "data not saved" ), ) - - print("variance of the pwelch Spectrum: ", self.var) + _logger.debug("variance of the pwelch Spectrum: %s",self.var) def calc_var(self): """Compute total variance from spectrum""" @@ -536,7 +538,7 @@ def __init__( self.write_log("Length = " + M.echo_dt(L, as_string=True)) L = int(L.item().total_seconds()) else: - print("unknown L type") + _logger.debug("unknown L type") self.write_log("Length = " + "unknown L type") subL = int(np.round(L / 10)) if subL is None else subL @@ -582,9 +584,9 @@ def __init__( n_specs = [] k = 0 - print("subL", subL) - print("L", L) - print(data_size_adjust) + _logger.debug('subL %s', subL) + _logger.debug('L %s', L) + _logger.debug("data_size_adjust: %s", data_size_adjust) for i in np.arange(0, data_size_adjust - int(L - ov) + 1, int(L - ov)): @@ -701,9 +703,7 @@ def write_log(self, s, verbose=False): self.hist = MT.write_log(self.hist, s, verbose=verbose) def log(self): - print(".hist variable") - print(self.hist) - + _logger.debug(".hist variable: %s", self.hist) def power_anomalie(self, clim=None): dd = 10 * np.log10(self.data[:, :]) @@ -711,8 +711,8 @@ def power_anomalie(self, clim=None): np.nanmedian(dd, axis=0) if clim is None else 10 * np.log10(clim) ) dd_tmp = self.data_power_mean.repeat(self.time.size) - print(self.data_power_mean.shape) - print(self.f.size, self.time.size) + _logger.debug("data power mean shape: %s", self.data_power_mean.shape) + _logger.debug("f size: %s, time size: %s", self.f.size, self.time.size) self.data_power_ano = dd - dd_tmp.reshape(self.f.size, self.time.size).T def anomalie(self, clim=None): @@ -746,10 +746,10 @@ def __init__( dt_unit = "s" dt_timedelta = np.timedelta64(dt, dt_unit) - print("sample resolution:") + _logger.debug("sample resolution:") M.echo_dt(dt_timedelta) timeres = np.timedelta64(int(self.dt_periodogram), dt_unit) - print("time resolution:") + _logger.debug("time resolution:") M.echo_dt(timeres) if timestamp is None: @@ -759,9 +759,9 @@ def __init__( start_time = timestamp[0] end_time = timestamp[-1] - print("Periodogram starttime and endtime:") - print(start_time) - print(end_time) + _logger.debug("Periodogram starttime and endtime:") + _logger.debug("%s", start_time) + _logger.debug("%s", end_time) time = np.arange(start_time + L / 2, end_time + dt_timedelta, timeres) if time.shape[0] != self.data.shape[0]: @@ -772,8 +772,8 @@ def __init__( def save_data(self, path=None, S=None): P = SaveDataPeriodogram(self, S=S) - print(P.meta) - print("constructed class for saving") + _logger.debug("P.meta: %s", P.meta) + _logger.debug("constructed class for saving") save_file(P, path) @@ -829,7 +829,7 @@ def MEM_cal(moments_est, freq, theta=None, flim=None): d1 = N_sel["Q12"] / np.sqrt(N_sel["P11"] * (N_sel["P22"] + N_sel["P33"])) # Lygre and Krongstad 1986 have here sqrt(N_sel['P11'] *(N_sel['P22'] + N_sel['P33']). I guess its a typo. d2 = N_sel["Q13"] / np.sqrt(N_sel["P11"] * (N_sel["P22"] + N_sel["P33"])) - + d3 = (N_sel["P22"] - N_sel["P33"]) / (N_sel["P22"] + N_sel["P33"]) d4 = 2 * N_sel["P23"] / (N_sel["P22"] + N_sel["P33"]) @@ -939,7 +939,7 @@ def save_file(data, path): outfile = path with open(outfile, "wb") as f: pickle.dump(data, f, pickle.HIGHEST_PROTOCOL) - print("saved to:", outfile) + _logger.debug('saved to: %s',outfile) diff --git a/src/icesat2waves/local_modules/m_tools_ph3.py b/src/icesat2waves/local_modules/m_tools_ph3.py index cfff700..23b911f 100644 --- a/src/icesat2waves/local_modules/m_tools_ph3.py +++ b/src/icesat2waves/local_modules/m_tools_ph3.py @@ -1,3 +1,5 @@ +import logging + import os import matplotlib.pyplot as plt import numpy as np @@ -12,6 +14,8 @@ from pandas.io.pytables import PerformanceWarning import glob +_logger = logging.getLogger(__name__) + def dt_form_timestamp(timestamp, unit="h"): return (timestamp[1]-timestamp[0]).astype(f"m8[{unit}]") @@ -103,9 +107,9 @@ def fake_2d_data(verbose=True, timeaxis=False): z3 = 1 / (sigma * np.sqrt(2 * np.pi)) * np.exp(-((YY - mu) ** 2) / (2 * sigma**2)) z3 = z3 / z3.max() if verbose: - print("x", x.shape) - print("y", y.shape) - print("z", z3.shape) + _logger.debug("x %s" , x.shape) + _logger.debug("y %s" , y.shape) + _logger.debug("z %s" , z3.shape) plt.contourf(x, y, z2 / 2 + z3 / 2) plt.colorbar() @@ -123,8 +127,7 @@ def pickle_save(name, path, data, verbose=True): with open(full_name, "wb") as f2: pickle.dump(data, f2) if verbose: - print("save at: ", full_name) - + _logger.debug("save at: %s", full_name) def pickle_load(name, path, verbose=True): full_name = os.path.join(path, name + ".npy") @@ -133,7 +136,7 @@ def pickle_load(name, path, verbose=True): data = pickle.load(f) if verbose: - print("load from: ", full_name) + _logger.debug("load from: %s", full_name) return data @@ -146,7 +149,7 @@ def json_save(name, path, data, verbose=False, return_name=False): with open(full_name, "w") as outfile: json.dump(data, outfile, indent=2) if verbose: - print("save at: ", full_name) + _logger.debug("save at: %s", full_name) if return_name: return full_name_root @@ -164,7 +167,7 @@ def default(self, obj): with open(full_name, "w") as outfile: json.dump(data, outfile, cls=CustomJSONizer, indent=2) if verbose: - print("save at: ", full_name) + _logger.debug("save at: %s", full_name) if return_name: return full_name_root @@ -175,7 +178,7 @@ def json_load(name, path, verbose=False): with open(full_name, "r") as ifile: data = json.load(ifile) if verbose: - print("loaded from: ", full_name) + _logger.debug("loaded from: %s", full_name) return data @@ -190,7 +193,7 @@ def h5_load_v2(name, path, verbose=False): with h5py.File(path + name + '.h5','r') as h5f: if verbose: - print(h5f.keys()) + _logger.debug("%s h5f keys: %s", name, h5f.keys()) data_dict = {k: v[:] for k, v in h5f.items()} @@ -207,7 +210,7 @@ def h5_save(name, path, data_dict, verbose=False, mode="w"): store[k] = I if verbose: - print("saved at: " + full_name) + _logger.debug("saved at: %s", full_name) def load_pandas_table_dict(name, save_path): @@ -243,8 +246,8 @@ def write_log(hist, string, verbose=False, short=True, date=True): message = f"\n{now} {string}" if date else f"\n {string}" if verbose in [True, 'all']: - print(hist + message if verbose == 'all' else message) - + _logger.debug("hist message: %s", hist + message if verbose == 'all' else message) + return hist + message @@ -262,7 +265,7 @@ def write_variables_log(hist, var_list, locals, verbose=False, date=False): message = f"\n{now} {stringg}" if date else f"\n{' '.ljust(5)} {stringg}" if verbose in [True, 'all']: - print(hist + message if verbose == 'all' else message) + _logger.debug("write_variables_log: %s", hist + message if verbose == 'all' else message) def save_log_txt(name, path, hist, verbose=False): @@ -272,7 +275,7 @@ def save_log_txt(name, path, hist, verbose=False): with open(full_name, "w") as ifile: ifile.write(str(hist)) if verbose: - print("saved at: ", full_name) + _logger.debug("saved at: %s",full_name) def load_log_txt(hist_file, path): @@ -285,7 +288,7 @@ def load_log_txt(hist_file, path): def shape(a): for i in a: - print(i.shape) + _logger.debug("shape of i=%s: %s", i, i.shape) def find_O(a, case="round"): @@ -310,19 +313,24 @@ def find_O(a, case="round"): def stats(a): - print( - f"shape: {a.shape}\n" - f"Nans: {np.sum(np.isnan(a))}\n" - f"max: {np.nanmax(a)}\n" - f"min: {np.nanmin(a)}\n" - f"mean: {np.nanmean(a)}" + _logger.debug( + "shape: %s\nNans: %s\nmax: %s\nmin: %s\nmean: %s", + a.shape, + np.sum(np.isnan(a)), + np.nanmax(a), + np.nanmin(a), + np.nanmean(a) ) + def stats_format(a, name=None): - print(f"Name: {name}" - f" Shape: {a.shape}" - f" NaNs: {np.sum(np.isnan(a))}" - f" max: {np.nanmax(a)}" - f" min: {np.nanmin(a)}" - f" mean: {np.nanmean(a)}") + _logger.debug( + "Name: %s\n" + " Shape: %s\n" + " NaNs: %s\n" + " max: %s\n" + " min: %s\n" + " mean: %s", + name, a.shape, np.sum(np.isnan(a)), np.nanmax(a), np.nanmin(a), np.nanmean(a) + )