From b64373bb9a987ed51587b940e11ff4b8a7d7f791 Mon Sep 17 00:00:00 2001 From: MaiBe-ctrl Date: Fri, 23 Aug 2024 17:03:24 -0700 Subject: [PATCH 01/22] updated dataset get_item --- neuralprophet/configure.py | 4 + neuralprophet/data/process.py | 1 + neuralprophet/forecaster.py | 16 +- neuralprophet/time_dataset.py | 305 +++++++++++++++------------------- neuralprophet/time_net.py | 57 ++++++- neuralprophet/utils.py | 151 +++++++++++++++++ tests/test_unit.py | 22 +-- 7 files changed, 367 insertions(+), 189 deletions(-) diff --git a/neuralprophet/configure.py b/neuralprophet/configure.py index bc2b004fc..ec2e9da97 100644 --- a/neuralprophet/configure.py +++ b/neuralprophet/configure.py @@ -22,9 +22,13 @@ @dataclass class Model: + features_map: dict lagged_reg_layers: Optional[List[int]] +ConfigModel = Model + + @dataclass class Normalization: normalize: str diff --git a/neuralprophet/data/process.py b/neuralprophet/data/process.py index 2958dde49..8d9d71df8 100644 --- a/neuralprophet/data/process.py +++ b/neuralprophet/data/process.py @@ -626,5 +626,6 @@ def _create_dataset(model, df, predict_mode, prediction_frequency=None): config_regressors=model.config_regressors, config_lagged_regressors=model.config_lagged_regressors, config_missing=model.config_missing, + config_model=model.config_model, # config_train=model.config_train, # no longer needed since JIT tabularization. ) diff --git a/neuralprophet/forecaster.py b/neuralprophet/forecaster.py index 85939955e..500cc3e27 100644 --- a/neuralprophet/forecaster.py +++ b/neuralprophet/forecaster.py @@ -35,6 +35,7 @@ from neuralprophet.plot_model_parameters_plotly import plot_parameters as plot_parameters_plotly from neuralprophet.plot_utils import get_valid_configuration, log_warning_deprecation_plotly, select_plotting_backend from neuralprophet.uncertainty import Conformal +from neuralprophet.utils import unpack_sliced_tensor log = logging.getLogger("NP.forecaster") @@ -487,7 +488,7 @@ def __init__( self.max_lags = self.n_lags # Model - self.config_model = configure.Model(lagged_reg_layers=lagged_reg_layers) + self.config_model = configure.Model(features_map={}, lagged_reg_layers=lagged_reg_layers) # Trend self.config_trend = configure.Trend( @@ -1893,13 +1894,23 @@ def predict_seasonal_components(self, df: pd.DataFrame, quantile: float = 0.5): config_regressors=self.config_regressors, config_lagged_regressors=self.config_lagged_regressors, config_missing=self.config_missing, + config_model=self.config_model, # config_train=self.config_train, # no longer needed since JIT tabularization. ) loader = DataLoader(dataset, batch_size=min(4096, len(df)), shuffle=False, drop_last=False) predicted = {} for name in self.config_seasonality.periods: predicted[name] = list() - for inputs, _, meta in loader: + for inputs_tensor, meta in loader: + inputs = unpack_sliced_tensor( + sliced_tensor=inputs_tensor, + n_lags=0, + n_forecasts=1, + max_lags=0, + feature_indices=self.config_model.features_map, + config_lagged_regressors=self.config_lagged_regressors, + config_seasonality=self.config_seasonality, + ) # Meta as a tensor for prediction if self.model.config_seasonality is None: meta_name_tensor = None @@ -2631,6 +2642,7 @@ def _init_model(self): config_events=self.config_events, config_holidays=self.config_country_holidays, config_normalization=self.config_normalization, + config_model=self.config_model, n_forecasts=self.n_forecasts, n_lags=self.n_lags, max_lags=self.max_lags, diff --git a/neuralprophet/time_dataset.py b/neuralprophet/time_dataset.py index 33684f596..56c236fa7 100644 --- a/neuralprophet/time_dataset.py +++ b/neuralprophet/time_dataset.py @@ -33,6 +33,7 @@ def __init__( config_regressors, config_lagged_regressors, config_missing, + config_model, ): """Initialize Timedataset from time-series df. Parameters @@ -75,6 +76,7 @@ def __init__( self.config_regressors = config_regressors self.config_lagged_regressors = config_lagged_regressors self.config_missing = config_missing + self.config_model = config_model self.max_lags = get_max_num_lags(n_lags=self.n_lags, config_lagged_regressors=self.config_lagged_regressors) if self.max_lags == 0: @@ -133,6 +135,123 @@ def __init__( if self.config_seasonality is not None and hasattr(self.config_seasonality, "periods"): self.calculate_seasonalities() + self.stack_all_features() + + def stack_all_features(self): + feature_list = [] + self.feature_indices = {} + + current_idx = 0 + + # Stack Trend (t) + time_tensor = self.df_tensors["t"].unsqueeze(-1) # Shape: [T, 1] + feature_list.append(time_tensor) + self.feature_indices["time"] = (current_idx, current_idx) + current_idx += 1 + + # Stack lags (y_scaled) + if self.n_lags >= 1 and "y_scaled" in self.df_tensors: + lags_tensor = self.df_tensors["y_scaled"].unsqueeze(-1) + feature_list.append(lags_tensor) + self.feature_indices["lags"] = (current_idx, current_idx) + current_idx += lags_tensor.shape[1] + + # Stack targets (y_scaled) + if "y_scaled" in self.df_tensors: + targets_tensor = self.df_tensors["y_scaled"].unsqueeze(-1) + feature_list.append(targets_tensor) + self.feature_indices["targets"] = (current_idx, current_idx) + current_idx += targets_tensor.shape[1] + + # Stack lagged regressor features + if self.config_lagged_regressors: + # Collect all lagged regressor tensors in a list + lagged_regressor_tensors = [ + self.df_tensors[name].unsqueeze(-1) for name in self.config_lagged_regressors.keys() + ] + + # Concatenate all lagged regressors along the last dimension (features) + stacked_lagged_regressor_tensor = torch.cat(lagged_regressor_tensors, dim=-1) + + # Append to feature list + feature_list.append(stacked_lagged_regressor_tensor) + + # Update feature indices + num_features = stacked_lagged_regressor_tensor.size(-1) + for i, name in enumerate(self.config_lagged_regressors.keys()): + self.feature_indices[f"lagged_regressor_{name}"] = ( + current_idx + i, + current_idx + i + 1, + ) + current_idx += num_features + + # Stack additive event and holiday features + if self.additive_event_and_holiday_names: + additive_events_tensor = torch.cat( + [self.df_tensors[name].unsqueeze(-1) for name in self.additive_event_and_holiday_names], + dim=1, + ) # Shape: [batch_size, num_additive_events, 1] + feature_list.append(additive_events_tensor) + self.feature_indices["additive_events"] = ( + current_idx, + current_idx + additive_events_tensor.size(1) - 1, + ) + current_idx += additive_events_tensor.size(1) + + # Stack multiplicative event and holiday features + if self.multiplicative_event_and_holiday_names: + multiplicative_events_tensor = torch.cat( + [self.df_tensors[name].unsqueeze(-1) for name in self.multiplicative_event_and_holiday_names], dim=1 + ) # Shape: [batch_size, num_multiplicative_events, 1] + + feature_list.append(multiplicative_events_tensor) + self.feature_indices["multiplicative_events"] = ( + current_idx, + current_idx + multiplicative_events_tensor.size(1) - 1, + ) + + current_idx += multiplicative_events_tensor.size(1) + + # Stack additive regressor features + if self.additive_regressors_names: + additive_regressors_tensor = torch.cat( + [self.df_tensors[name].unsqueeze(-1) for name in self.additive_regressors_names], dim=1 + ) # Shape: [batch_size, num_additive_regressors, 1] + feature_list.append(additive_regressors_tensor) + self.feature_indices["additive_regressors"] = ( + current_idx, + current_idx + additive_regressors_tensor.size(1) - 1, + ) + current_idx += additive_regressors_tensor.size(1) + + if self.config_seasonality and self.config_seasonality.periods: + for seasonality_name, features in self.seasonalities.items(): + seasonal_tensor = features + print(f"Seasonality tensor shape for {seasonality_name}: {seasonal_tensor.shape}") + feature_list.append(seasonal_tensor) + self.feature_indices[f"seasonality_{seasonality_name}"] = ( + current_idx, + current_idx + seasonal_tensor.size(1), + ) + current_idx += seasonal_tensor.size(1) + + # Stack multiplicative regressor features + if self.multiplicative_regressors_names: + multiplicative_regressors_tensor = torch.cat( + [self.df_tensors[name].unsqueeze(-1) for name in self.multiplicative_regressors_names], dim=1 + ) # Shape: [batch_size, num_multiplicative_regressors, 1] + feature_list.append(multiplicative_regressors_tensor) + self.feature_indices["multiplicative_regressors"] = ( + current_idx, + current_idx + len(self.multiplicative_regressors_names) - 1, + ) + current_idx += len(self.multiplicative_regressors_names) + + # Concatenate all features into one big tensor + self.all_features = torch.cat(feature_list, dim=1) # Concatenating along the third dimension + if self.config_model is not None: + self.config_model.features_map = self.feature_indices + def calculate_seasonalities(self): self.seasonalities = OrderedDict({}) dates = self.df_tensors["ds"] @@ -202,22 +321,15 @@ def __getitem__(self, index): # - dataframe positional index is given by position of first target in dataframe for given sample index df_index = self.sample_index_to_df_index(index) - # Tabularize - extract features from dataframe at given target index position - inputs, target = self.tabularize_univariate_datetime_single_index( - df_tensors=self.df_tensors, - origin_index=df_index, - predict_mode=self.predict_mode, - n_lags=self.n_lags, - max_lags=self.max_lags, - n_forecasts=self.n_forecasts, - config_seasonality=self.config_seasonality, - config_lagged_regressors=self.config_lagged_regressors, - additive_event_and_holiday_names=self.additive_event_and_holiday_names, - multiplicative_event_and_holiday_names=self.multiplicative_event_and_holiday_names, - additive_regressors_names=self.additive_regressors_names, - multiplicative_regressors_names=self.multiplicative_regressors_names, - ) - return inputs, target, self.meta + # Extract features from dataframe at given target index position + if self.max_lags > 0: + min_start_index = df_index - self.max_lags + 1 + max_end_index = df_index + self.n_forecasts + 1 + inputs = self.all_features[min_start_index:max_end_index, :] + else: + inputs = self.all_features[df_index, :] + + return inputs, self.meta def __len__(self): """Overrides Parent class method to get data length.""" @@ -294,152 +406,6 @@ def log_input_shapes(self, inputs): tabularized_input_shapes_str += f" {key} {value.shape} \n" log.debug(f"Tabularized inputs shapes: \n{tabularized_input_shapes_str}") - def tabularize_univariate_datetime_single_index( - self, - df_tensors: dict, - origin_index: int, - predict_mode: bool = False, - n_lags: int = 0, - max_lags: int = 0, - n_forecasts: int = 1, - config_seasonality: Optional[configure.ConfigSeasonality] = None, - config_lagged_regressors: Optional[configure.ConfigLaggedRegressors] = None, - additive_event_and_holiday_names: List[str] = [], - multiplicative_event_and_holiday_names: List[str] = [], - additive_regressors_names: List[str] = [], - multiplicative_regressors_names: List[str] = [], - ): - """Create a tabular data sample from timeseries dataframe, used for mini-batch creation. - Note - ---- - Data must have no gaps for sample extracted at given index position. - ---------- - df : pd.DataFrame - Sequence of observations with original ``ds``, ``y`` and normalized ``t``, ``y_scaled`` columns - origin_index: int: - dataframe index position of last observed lag before forecast starts. - n_forecasts : int - Number of steps to forecast into future - n_lags : int - Number of lagged values of series to include as model inputs (aka AR-order) - config_seasonality : configure.ConfigSeasonality - Configuration for seasonalities - config_lagged_regressors : configure.ConfigLaggedRegressors - Configurations for lagged regressors - config_events : configure.ConfigEvents - User specified events, each with their upper, lower windows (int) and regularization - config_country_holidays : configure.ConfigCountryHolidays - Configurations (holiday_names, upper, lower windows, regularization) for country specific holidays - config_regressors : configure.ConfigFutureRegressors - Configuration for regressors - predict_mode : bool - Chooses the prediction mode - Options - * (default) ``False``: Includes target values - * ``True``: Does not include targets but includes entire dataset as input - Returns - ------- - OrderedDict - Model inputs, each of len(df) but with varying dimensions - Note - ---- - Contains the following data: - Model Inputs - * ``time`` (np.array, float), dims: (num_samples, 1) - * ``seasonalities`` (OrderedDict), named seasonalities - each with features (np.array, float) - dims: (num_samples, n_features[name]) - * ``lags`` (np.array, float), dims: (num_samples, n_lags) - * ``covariates`` (OrderedDict), named covariates, - each with features (np.array, float) of dims: (num_samples, n_lags) - * ``events`` (OrderedDict), events, - each with features (np.array, float) of dims: (num_samples, n_lags) - * ``regressors`` (OrderedDict), regressors, - each with features (np.array, float) of dims: (num_samples, n_lags) - np.array, float - Targets to be predicted of same length as each of the model inputs, dims: (n_forecasts, 1) - """ - # TODO: pre-process all type conversions (e.g. torch.float32) in __init__ - # Note: if max_lags == 0, then n_forecasts == 1 - - # sample features are stored and returned in OrderedDict - inputs = OrderedDict({}) - - targets = self.get_sample_targets( - df_tensors=df_tensors, - origin_index=origin_index, - n_forecasts=n_forecasts, - max_lags=max_lags, - predict_mode=predict_mode, - ) - - # TIME: the time at each sample's lags and forecasts - if max_lags == 0: - t = df_tensors["t"][origin_index] - inputs["time"] = t.unsqueeze(0) - else: - # extract time value of n_lags steps before and icluding origin_index and n_forecasts steps after origin_index - # Note: df.loc is inclusive of slice end, while df.iloc is not. - t = df_tensors["t"][origin_index - n_lags + 1 : origin_index + n_forecasts + 1] - inputs["time"] = t - - # LAGS: From y-series, extract preceeding n_lags steps up to and including origin_index - if n_lags >= 1 and "y_scaled" in df_tensors: - # Note: df.loc is inclusive of slice end, while df.iloc is not. - lags = df_tensors["y_scaled"][origin_index - n_lags + 1 : origin_index + 1] - inputs["lags"] = lags - - # COVARIATES / LAGGED REGRESSORS: Lagged regressor inputs: analogous to LAGS - if config_lagged_regressors is not None: # and max_lags > 0: - inputs["covariates"] = self.get_sample_lagged_regressors( - df_tensors=df_tensors, origin_index=origin_index, config_lagged_regressors=config_lagged_regressors - ) - - # SEASONALITIES_ - if config_seasonality is not None: - inputs["seasonalities"] = self.get_sample_seasonalities( - df_tensors=df_tensors, - origin_index=origin_index, - n_forecasts=n_forecasts, - max_lags=max_lags, - n_lags=n_lags, - config_seasonality=config_seasonality, - ) - - # FUTURE REGRESSORS: get the future regressors features - # create numpy array of values of additive and multiplicative regressors, at correct indexes - # features dims: (n_forecasts, n_features) - any_future_regressors = 0 < len(additive_regressors_names + multiplicative_regressors_names) - if any_future_regressors: # if config_regressors.regressors is not None: - inputs["regressors"] = self.get_sample_future_regressors( - df_tensors=df_tensors, - origin_index=origin_index, - n_forecasts=n_forecasts, - max_lags=max_lags, - n_lags=n_lags, - additive_regressors_names=additive_regressors_names, - multiplicative_regressors_names=multiplicative_regressors_names, - ) - - # FUTURE EVENTS: get the events features - # create numpy array of values of additive and multiplicative events, at correct indexes - # features dims: (n_forecasts, n_features) - any_events = 0 < len(additive_event_and_holiday_names + multiplicative_event_and_holiday_names) - if any_events: - inputs["events"] = self.get_sample_future_events( - df_tensors=df_tensors, - origin_index=origin_index, - n_forecasts=n_forecasts, - max_lags=max_lags, - n_lags=n_lags, - additive_event_and_holiday_names=additive_event_and_holiday_names, - multiplicative_event_and_holiday_names=multiplicative_event_and_holiday_names, - ) - - # ONLY FOR DEBUGGING - # if log.level == 0: - # log_input_shapes(inputs) - return inputs, targets - def get_event_offset_features(self, event, config, feature): """ Create event offset features for the given event, config and feature @@ -707,19 +673,14 @@ def sort_regressor_names(self, config): return additive_regressors_names, multiplicative_regressors_names def get_sample_targets(self, df_tensors, origin_index, n_forecasts, max_lags, predict_mode): - if predict_mode: - return torch.zeros((n_forecasts, 1), dtype=torch.float32) - else: - if n_forecasts == 1: - if max_lags == 0: - targets = df_tensors["y_scaled"][origin_index] - if max_lags > 0: - targets = df_tensors["y_scaled"][origin_index + 1] - targets = targets.unsqueeze(0).unsqueeze(1) + if "y_scaled" in self.df_tensors: + if max_lags == 0: + targets = df_tensors["y_scaled"][origin_index].unsqueeze(0).unsqueeze(1) else: targets = df_tensors["y_scaled"][origin_index + 1 : origin_index + n_forecasts + 1] targets = targets.unsqueeze(1) return targets + return torch.zeros((n_forecasts, 1), dtype=torch.float32) def get_sample_lagged_regressors(self, df_tensors, origin_index, config_lagged_regressors): lagged_regressors = OrderedDict({}) @@ -804,6 +765,7 @@ def __init__( config_regressors, config_lagged_regressors, config_missing, + config_model, ): """Initialize Timedataset from time-series df. Parameters @@ -829,6 +791,7 @@ def __init__( config_regressors=config_regressors, config_lagged_regressors=config_lagged_regressors, config_missing=config_missing, + config_model=config_model, ) self.length = sum(dataset.length for (name, dataset) in self.datasets.items()) global_sample_to_local_ID = [] diff --git a/neuralprophet/time_net.py b/neuralprophet/time_net.py index a4fbfee3a..70f0a48ec 100644 --- a/neuralprophet/time_net.py +++ b/neuralprophet/time_net.py @@ -21,6 +21,7 @@ reg_func_seasonality_glocal, reg_func_trend, reg_func_trend_glocal, + unpack_sliced_tensor, ) from neuralprophet.utils_torch import init_parameter, interprete_model @@ -51,6 +52,7 @@ def __init__( config_regressors: Optional[configure.ConfigFutureRegressors] = None, config_events: Optional[configure.ConfigEvents] = None, config_holidays: Optional[configure.ConfigCountryHolidays] = None, + config_model: Optional[configure.ConfigModel] = None, n_forecasts: int = 1, n_lags: int = 0, max_lags: int = 0, @@ -155,6 +157,7 @@ def __init__( self.config_train = config_train self.config_normalization = config_normalization self.compute_components_flag = compute_components_flag + self.config_model = config_model # Optimizer and LR Scheduler self._optimizer = self.config_train.optimizer @@ -562,7 +565,6 @@ def forward(self, inputs: Dict, meta: Dict = None, compute_components_flag: bool meta = OrderedDict() meta["df_name"] = [name_id_dummy for _ in range(inputs["time"].shape[0])] meta = torch.tensor([self.id_dict[i] for i in meta["df_name"]], device=self.device) - components = {} additive_components = torch.zeros( size=(inputs["time"].shape[0], self.n_forecasts, len(self.quantiles)), @@ -574,7 +576,6 @@ def forward(self, inputs: Dict, meta: Dict = None, compute_components_flag: bool multiplicative_components_nonstationary = torch.zeros( size=(inputs["time"].shape[0], inputs["time"].shape[1], len(self.quantiles)), device=self.device ) - trend = self.trend(t=inputs["time"], meta=meta) components["trend"] = trend @@ -774,7 +775,17 @@ def loss_func(self, inputs, predicted, targets): return loss, reg_loss def training_step(self, batch, batch_idx): - inputs, targets, meta = batch + inputs_tensor, meta = batch + inputs = unpack_sliced_tensor( + inputs_tensor, + self.n_lags, + self.n_forecasts, + self.max_lags, + self.config_model.features_map, + self.config_lagged_regressors, + self.config_seasonality, + ) + targets = inputs["targets"] # Global-local if self.meta_used_in_model: meta_name_tensor = torch.tensor([self.id_dict[i] for i in meta["df_name"]], device=self.device) @@ -810,7 +821,18 @@ def training_step(self, batch, batch_idx): return loss def validation_step(self, batch, batch_idx): - inputs, targets, meta = batch + inputs_tensor, meta = batch + + inputs = unpack_sliced_tensor( + inputs_tensor, + self.n_lags, + self.n_forecasts, + self.max_lags, + self.config_model.features_map, + self.config_lagged_regressors, + self.config_seasonality, + ) + targets = inputs["targets"] # Global-local if self.meta_used_in_model: meta_name_tensor = torch.tensor([self.id_dict[i] for i in meta["df_name"]], device=self.device) @@ -829,7 +851,18 @@ def validation_step(self, batch, batch_idx): self.log("RegLoss_val", reg_loss, **self.log_args) def test_step(self, batch, batch_idx): - inputs, targets, meta = batch + inputs_tensor, meta = batch + + inputs = unpack_sliced_tensor( + inputs_tensor, + self.n_lags, + self.n_forecasts, + self.max_lags, + self.config_model.features_map, + self.config_lagged_regressors, + self.config_seasonality, + ) + targets = inputs["targets"] # Global-local if self.meta_used_in_model: meta_name_tensor = torch.tensor([self.id_dict[i] for i in meta["df_name"]], device=self.device) @@ -843,12 +876,24 @@ def test_step(self, batch, batch_idx): if self.metrics_enabled: predicted_denorm = self.denormalize(predicted[:, :, 0]) target_denorm = self.denormalize(targets.squeeze(dim=2)) + # target_denorm = target_denorm.detach().clone() + self.log_dict(self.metrics_val(predicted_denorm, target_denorm), **self.log_args) self.log("Loss_test", loss, **self.log_args) self.log("RegLoss_test", reg_loss, **self.log_args) def predict_step(self, batch, batch_idx, dataloader_idx=0): - inputs, _, meta = batch + inputs_tensor, meta = batch + + inputs = unpack_sliced_tensor( + inputs_tensor, + self.n_lags, + self.n_forecasts, + self.max_lags, + self.config_model.features_map, + self.config_lagged_regressors, + self.config_seasonality, + ) # Global-local if self.meta_used_in_model: meta_name_tensor = torch.tensor([self.id_dict[i] for i in meta["df_name"]], device=self.device) diff --git a/neuralprophet/utils.py b/neuralprophet/utils.py index 62b9e7481..bbb8739da 100644 --- a/neuralprophet/utils.py +++ b/neuralprophet/utils.py @@ -987,3 +987,154 @@ def configure_trainer( # config["replace_sampler_ddp"] = False return pl.Trainer(**config), checkpoint_callback + + +def unpack_sliced_tensor( + sliced_tensor, + n_lags, + n_forecasts, + max_lags, + feature_indices, + config_lagged_regressors, + config_seasonality, +): + sliced_tensor = sliced_tensor.detach().clone() + inputs = OrderedDict() + if max_lags > 0: + # Unpack time feature (time doesn't need further slicing) + start_idx, end_idx = feature_indices["time"] + time_offset = max_lags - n_lags + inputs["time"] = sliced_tensor[:, time_offset : time_offset + n_lags + n_forecasts, start_idx] + + # Unpack lags feature + if "lags" in feature_indices: + lags_start_idx, lags_end_idx = feature_indices["lags"] + lags_offset = max_lags - n_lags + inputs["lags"] = sliced_tensor[:, lags_offset : lags_offset + n_lags, lags_start_idx] + + # Unpack targets + if "targets" in feature_indices: + targets_start_idx, targets_end_idx = feature_indices["targets"] + inputs["targets"] = sliced_tensor[:, max_lags : max_lags + n_forecasts, targets_start_idx].unsqueeze(2) + + # Unpack additive event and holiday features + if "additive_events" in feature_indices: + events_start_idx, events_end_idx = feature_indices["additive_events"] + future_offset = max_lags - n_lags + inputs["events"] = OrderedDict() + inputs["events"]["additive"] = sliced_tensor[ + :, future_offset : future_offset + n_forecasts + n_lags, events_start_idx : events_end_idx + 1 + ] + + # Unpack multiplicative event and holiday features + if "multiplicative_events" in feature_indices: + events_start_idx, events_end_idx = feature_indices["multiplicative_events"] + future_offset = max_lags - n_lags + if "events" not in inputs: + inputs["events"] = OrderedDict() + inputs["events"]["multiplicative"] = sliced_tensor[ + :, future_offset : future_offset + n_forecasts + n_lags, events_start_idx : events_end_idx + 1 + ] + + # Unpack additive regressor features + if "additive_regressors" in feature_indices: + regressors_start_idx, regressors_end_idx = feature_indices["additive_regressors"] + future_offset = max_lags - n_lags + inputs["regressors"] = OrderedDict() + inputs["regressors"]["additive"] = sliced_tensor[ + :, + future_offset : future_offset + n_forecasts + n_lags, + regressors_start_idx : regressors_end_idx + 1, + ] + + # Unpack multiplicative regressor features + if "multiplicative_regressors" in feature_indices: + regressors_start_idx, regressors_end_idx = feature_indices["multiplicative_regressors"] + future_offset = max_lags - n_lags + if "regressors" not in inputs: + inputs["regressors"] = OrderedDict() + inputs["regressors"]["multiplicative"] = sliced_tensor[ + :, + future_offset : future_offset + n_forecasts + n_lags, + regressors_start_idx : regressors_end_idx + 1, + ] + + # Unpack seasonality feature + if config_seasonality is not None and hasattr(config_seasonality, "periods"): + inputs["seasonalities"] = OrderedDict() + for seasonality_name in config_seasonality.periods.keys(): + seasonality_key = f"seasonality_{seasonality_name}" + if seasonality_key in feature_indices: + seasonality_start_idx, seasonality_end_idx = feature_indices[seasonality_key] + seasonality_offset = max_lags - n_lags + inputs["seasonalities"][seasonality_name] = sliced_tensor[ + :, + seasonality_offset : seasonality_offset + n_forecasts + n_lags, + seasonality_start_idx:seasonality_end_idx, + ] + s = inputs["seasonalities"][seasonality_name].shape + + # Unpack lagged regressor features + if config_lagged_regressors: + inputs["covariates"] = OrderedDict() + for name, lagged_regressor in config_lagged_regressors.items(): + lagged_regressor_key = f"lagged_regressor_{name}" + if lagged_regressor_key in feature_indices: + lagged_regressor_start_idx, _ = feature_indices[lagged_regressor_key] + covar_lags = lagged_regressor.n_lags + lagged_regressor_offset = max_lags - covar_lags + inputs["covariates"][name] = sliced_tensor[ + :, + lagged_regressor_offset : lagged_regressor_offset + covar_lags, + lagged_regressor_start_idx, + ] + + else: + start_idx, end_idx = feature_indices["time"] + inputs["time"] = sliced_tensor[:, start_idx : end_idx + 1] + + if "targets" in feature_indices: + targets_start_idx, targets_end_idx = feature_indices["targets"] + inputs["targets"] = sliced_tensor[:, targets_start_idx : targets_end_idx + 1].unsqueeze(1) + + # Unpack additive event and holiday features + if "additive_events" in feature_indices: + events_start_idx, events_end_idx = feature_indices["additive_events"] + inputs["events"] = OrderedDict() + inputs["events"]["additive"] = sliced_tensor[:, events_start_idx : events_end_idx + 1].unsqueeze(1) + + # Unpack multiplicative event and holiday features + if "multiplicative_events" in feature_indices: + events_start_idx, events_end_idx = feature_indices["multiplicative_events"] + if "events" not in inputs: + inputs["events"] = OrderedDict() + inputs["events"]["multiplicative"] = sliced_tensor[:, events_start_idx : events_end_idx + 1].unsqueeze(1) + + # Unpack additive regressor features + if "additive_regressors" in feature_indices: + regressors_start_idx, regressors_end_idx = feature_indices["additive_regressors"] + inputs["regressors"] = OrderedDict() + inputs["regressors"]["additive"] = sliced_tensor[ + :, regressors_start_idx : regressors_end_idx + 1 + ].unsqueeze(1) + + # Unpack multiplicative regressor features + if "multiplicative_regressors" in feature_indices: + regressors_start_idx, regressors_end_idx = feature_indices["multiplicative_regressors"] + if "regressors" not in inputs: + inputs["regressors"] = OrderedDict() + inputs["regressors"]["multiplicative"] = sliced_tensor[ + :, regressors_start_idx : regressors_end_idx + 1 + ].unsqueeze(1) + + # Unpack seasonality feature + if config_seasonality and hasattr(config_seasonality, "periods"): + inputs["seasonalities"] = OrderedDict() + for seasonality_name in config_seasonality.periods.keys(): + seasonality_key = f"seasonality_{seasonality_name}" + if seasonality_key in feature_indices: + seasonality_start_idx, seasonality_end_idx = feature_indices[seasonality_key] + inputs["seasonalities"][seasonality_name] = sliced_tensor[ + :, seasonality_start_idx:seasonality_end_idx + ].unsqueeze(1) + return inputs diff --git a/tests/test_unit.py b/tests/test_unit.py index 2032ffecb..33ab9ab61 100644 --- a/tests/test_unit.py +++ b/tests/test_unit.py @@ -110,16 +110,17 @@ def test_timedataset_minimal(): config_regressors=None, config_lagged_regressors=None, config_missing=config_missing, + config_model=None, ) - inputs, targets, meta = dataset.__getitem__(0) - # inputs50, targets50, meta50 = dataset.__getitem__(50) - log.debug(f"(n_forecasts {n_forecasts}, n_lags {n_lags})") - log.debug(f"tabularized targets: {targets.shape}") - log.debug( - "tabularized inputs: {}".format( - "; ".join(["{}: {}".format(inp, values.shape) for inp, values in inputs.items()]) - ) - ) + input, meta = dataset.__getitem__(0) + # # inputs50, targets50, meta50 = dataset.__getitem__(50) + # log.debug(f"(n_forecasts {n_forecasts}, n_lags {n_lags})") + # log.debug(f"tabularized targets: {targets.shape}") + # log.debug( + # "tabularized inputs: {}".format( + # "; ".join(["{}: {}".format(inp, values.shape) for inp, values in inputs.items()]) + # ) + # ) def test_normalize(): @@ -736,7 +737,7 @@ def test_dataloader(): df_global = _normalize(df=df_global, config_normalization=m.config_normalization) dataset = _create_dataset(m, df_global, predict_mode=False) loader = DataLoader(dataset, batch_size=min(1024, len(df)), shuffle=True, drop_last=False) - for inputs, targets, meta in loader: + for _, meta in loader: assert set(meta["df_name"]) == set(df_global["ID"].unique()) break @@ -875,6 +876,7 @@ def test_too_many_NaN(): config_regressors=None, config_lagged_regressors=None, config_missing=config_missing, + config_model=None, ) From 3eb0f5228b02920efa8961eb27af29f56e21bf7a Mon Sep 17 00:00:00 2001 From: MaiBe-ctrl Date: Fri, 23 Aug 2024 17:41:22 -0700 Subject: [PATCH 02/22] fixed linting issues --- neuralprophet/time_dataset.py | 2 +- neuralprophet/utils.py | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/neuralprophet/time_dataset.py b/neuralprophet/time_dataset.py index 56c236fa7..9fe9b89ea 100644 --- a/neuralprophet/time_dataset.py +++ b/neuralprophet/time_dataset.py @@ -1,7 +1,7 @@ import logging from collections import OrderedDict from datetime import datetime -from typing import List, Optional +from typing import Optional import numpy as np import pandas as pd diff --git a/neuralprophet/utils.py b/neuralprophet/utils.py index bbb8739da..2b50d6893 100644 --- a/neuralprophet/utils.py +++ b/neuralprophet/utils.py @@ -1072,7 +1072,6 @@ def unpack_sliced_tensor( seasonality_offset : seasonality_offset + n_forecasts + n_lags, seasonality_start_idx:seasonality_end_idx, ] - s = inputs["seasonalities"][seasonality_name].shape # Unpack lagged regressor features if config_lagged_regressors: From 1f9c60330cbaee1b3af5a2646468ea0c8409ede0 Mon Sep 17 00:00:00 2001 From: MaiBe-ctrl Date: Fri, 23 Aug 2024 19:32:44 -0700 Subject: [PATCH 03/22] make targets contiguous --- neuralprophet/time_net.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/neuralprophet/time_net.py b/neuralprophet/time_net.py index 70f0a48ec..3998bfa6c 100644 --- a/neuralprophet/time_net.py +++ b/neuralprophet/time_net.py @@ -815,6 +815,7 @@ def training_step(self, batch, batch_idx): if self.metrics_enabled: predicted_denorm = self.denormalize(predicted[:, :, 0]) target_denorm = self.denormalize(targets.squeeze(dim=2)) + target_denorm = target_denorm.contiguous() self.log_dict(self.metrics_train(predicted_denorm, target_denorm), **self.log_args) self.log("Loss", loss, **self.log_args) self.log("RegLoss", reg_loss, **self.log_args) @@ -846,6 +847,7 @@ def validation_step(self, batch, batch_idx): if self.metrics_enabled: predicted_denorm = self.denormalize(predicted[:, :, 0]) target_denorm = self.denormalize(targets.squeeze(dim=2)) + target_denorm = target_denorm.contiguous() self.log_dict(self.metrics_val(predicted_denorm, target_denorm), **self.log_args) self.log("Loss_val", loss, **self.log_args) self.log("RegLoss_val", reg_loss, **self.log_args) @@ -877,7 +879,7 @@ def test_step(self, batch, batch_idx): predicted_denorm = self.denormalize(predicted[:, :, 0]) target_denorm = self.denormalize(targets.squeeze(dim=2)) # target_denorm = target_denorm.detach().clone() - + target_denorm = target_denorm.contiguous() self.log_dict(self.metrics_val(predicted_denorm, target_denorm), **self.log_args) self.log("Loss_test", loss, **self.log_args) self.log("RegLoss_test", reg_loss, **self.log_args) From 6074baf2de5fdbbd2f13ded7088d7052472e03eb Mon Sep 17 00:00:00 2001 From: MaiBe-ctrl Date: Fri, 23 Aug 2024 19:40:50 -0700 Subject: [PATCH 04/22] fixed ruff warnings --- .../global_local_modeling_fut_regr.ipynb | 4 +- .../feature-guides/global_local_trend.ipynb | 4 +- .../how-to-guides/feature-guides/mlflow.ipynb | 4 +- .../network_architecture_visualization.ipynb | 4 +- .../prophet_to_torch_prophet.ipynb | 3 +- .../uncertainty_quantification.ipynb | 1151 ++++++++--------- tests/metrics/debug-energy-price-daily.ipynb | 166 ++- tests/metrics/debug-energy-price-hourly.ipynb | 15 +- tests/metrics/debug-yosemite.ipynb | 443 ++++--- 9 files changed, 893 insertions(+), 901 deletions(-) diff --git a/docs/source/how-to-guides/feature-guides/global_local_modeling_fut_regr.ipynb b/docs/source/how-to-guides/feature-guides/global_local_modeling_fut_regr.ipynb index 0e0b22391..02cf34719 100644 --- a/docs/source/how-to-guides/feature-guides/global_local_modeling_fut_regr.ipynb +++ b/docs/source/how-to-guides/feature-guides/global_local_modeling_fut_regr.ipynb @@ -750,9 +750,8 @@ "source": [ "try:\n", " # it already installed dependencies\n", - " from torchsummary import summary\n", " from torchviz import make_dot\n", - "except:\n", + "except ImportError:\n", " # install graphviz on system\n", " import platform\n", "\n", @@ -768,7 +767,6 @@ " !pip install torchviz\n", " !pip install graphviz\n", " # import\n", - " from torchsummary import summary\n", " from torchviz import make_dot" ] }, diff --git a/docs/source/how-to-guides/feature-guides/global_local_trend.ipynb b/docs/source/how-to-guides/feature-guides/global_local_trend.ipynb index 968214d97..ca7fd3eff 100644 --- a/docs/source/how-to-guides/feature-guides/global_local_trend.ipynb +++ b/docs/source/how-to-guides/feature-guides/global_local_trend.ipynb @@ -1308,9 +1308,8 @@ "source": [ "try:\n", " # it already installed dependencies\n", - " from torchsummary import summary\n", " from torchviz import make_dot\n", - "except:\n", + "except ImportError:\n", " # install graphviz on system\n", " import platform\n", "\n", @@ -1326,7 +1325,6 @@ " !pip install torchviz\n", " !pip install graphviz\n", " # import\n", - " from torchsummary import summary\n", " from torchviz import make_dot" ] }, diff --git a/docs/source/how-to-guides/feature-guides/mlflow.ipynb b/docs/source/how-to-guides/feature-guides/mlflow.ipynb index 5e3f72f6b..dda2feb9b 100644 --- a/docs/source/how-to-guides/feature-guides/mlflow.ipynb +++ b/docs/source/how-to-guides/feature-guides/mlflow.ipynb @@ -237,6 +237,7 @@ }, { "cell_type": "markdown", + "id": "7fb27b941602401d91542211134fc71a", "metadata": {}, "source": [ "## Advanced Example \n", @@ -246,6 +247,7 @@ { "cell_type": "code", "execution_count": 7, + "id": "acae54e37e7d407bbb7b55eff062a284", "metadata": {}, "outputs": [], "source": [ @@ -254,7 +256,6 @@ "# Copy and paste url from command line to web browser\n", "\n", "import mlflow\n", - "import torchmetrics\n", "from mlflow.data.pandas_dataset import PandasDataset\n", "\n", "if local:\n", @@ -272,7 +273,6 @@ " )\n", "\n", " import mlflow.pytorch\n", - " from mlflow.client import MlflowClient\n", "\n", " model_name = \"NeuralProphet\"\n", "\n", diff --git a/docs/source/how-to-guides/feature-guides/network_architecture_visualization.ipynb b/docs/source/how-to-guides/feature-guides/network_architecture_visualization.ipynb index 70d99ce3f..65afb79c5 100644 --- a/docs/source/how-to-guides/feature-guides/network_architecture_visualization.ipynb +++ b/docs/source/how-to-guides/feature-guides/network_architecture_visualization.ipynb @@ -41,7 +41,7 @@ " # it already installed dependencies\n", " from torchsummary import summary\n", " from torchviz import make_dot\n", - "except:\n", + "except ImportError:\n", " # install graphviz on system\n", " import platform\n", "\n", @@ -69,7 +69,7 @@ "source": [ "try:\n", " from neuralprophet import NeuralProphet\n", - "except:\n", + "except ImportError:\n", " # if NeuralProphet is not installed yet:\n", " !pip install git+https://github.com/ourownstory/neural_prophet.git\n", " from neuralprophet import NeuralProphet" diff --git a/docs/source/how-to-guides/feature-guides/prophet_to_torch_prophet.ipynb b/docs/source/how-to-guides/feature-guides/prophet_to_torch_prophet.ipynb index 20827a015..beb9ccda8 100644 --- a/docs/source/how-to-guides/feature-guides/prophet_to_torch_prophet.ipynb +++ b/docs/source/how-to-guides/feature-guides/prophet_to_torch_prophet.ipynb @@ -240,11 +240,12 @@ "# Set loggers to ERROR level\n", "import logging\n", "import warnings\n", + "from neuralprophet import set_log_level\n", + "\n", "\n", "logging.getLogger(\"prophet\").setLevel(logging.ERROR)\n", "warnings.filterwarnings(\"ignore\")\n", "\n", - "from neuralprophet import set_log_level\n", "\n", "set_log_level(\"ERROR\")" ] diff --git a/docs/source/how-to-guides/feature-guides/uncertainty_quantification.ipynb b/docs/source/how-to-guides/feature-guides/uncertainty_quantification.ipynb index f5826a066..12bfb4490 100644 --- a/docs/source/how-to-guides/feature-guides/uncertainty_quantification.ipynb +++ b/docs/source/how-to-guides/feature-guides/uncertainty_quantification.ipynb @@ -37,7 +37,7 @@ "\n", "if \"google.colab\" in str(get_ipython()):\n", " # uninstall preinstalled packages from Colab to avoid conflicts\n", - " !pip uninstall -y torch notebook notebook_shim tensorflow tensorflow-datasets prophet torchaudio torchdata torchtext torchvision \n", + " !pip uninstall -y torch notebook notebook_shim tensorflow tensorflow-datasets prophet torchaudio torchdata torchtext torchvision\n", " !pip install git+https://github.com/ourownstory/neural_prophet.git # may take a while" ] }, @@ -48,9 +48,8 @@ "metadata": {}, "outputs": [], "source": [ - "import numpy as np\n", "import pandas as pd\n", - "from neuralprophet import NeuralProphet, uncertainty_evaluate, set_log_level, set_random_seed\n", + "from neuralprophet import NeuralProphet, uncertainty_evaluate, set_random_seed\n", "\n", "data_location = \"http://raw.githubusercontent.com/ourownstory/neuralprophet-data/main/datasets/\"\n", "df = pd.read_csv(data_location + \"energy/SF_hospital_load.csv\")" @@ -705,7 +704,7 @@ 0.00804289544235925, 0.00938337801608579, 0.010723860589812333, - 0.012064343163538873, + 0.012064343163538872, 0.013404825737265416, 0.014745308310991957, 0.0160857908847185, @@ -717,7 +716,7 @@ 0.024128686327077747, 0.02546916890080429, 0.02680965147453083, - 0.028150134048257374, + 0.028150134048257378, 0.029490616621983913, 0.030831099195710455, 0.032171581769437, @@ -733,13 +732,13 @@ 0.045576407506702415, 0.04691689008042895, 0.04825737265415549, - 0.049597855227882036, + 0.04959785522788203, 0.05093833780160858, 0.05227882037533512, 0.05361930294906166, 0.054959785522788206, 0.05630026809651475, - 0.057640750670241284, + 0.05764075067024128, 0.058981233243967826, 0.06032171581769437, 0.06166219839142091, @@ -764,27 +763,27 @@ 0.0871313672922252, 0.08847184986595175, 0.08981233243967829, - 0.09115281501340483, - 0.09249329758713137, + 0.09115281501340484, + 0.09249329758713136, 0.0938337801608579, 0.09517426273458444, - 0.09651474530831099, - 0.09785522788203753, - 0.09919571045576407, - 0.10053619302949061, + 0.096514745308311, + 0.09785522788203752, + 0.09919571045576409, + 0.1005361930294906, 0.10187667560321716, 0.1032171581769437, 0.10455764075067024, 0.10589812332439678, 0.10723860589812333, - 0.10857908847184987, - 0.10991957104557641, + 0.10857908847184988, + 0.1099195710455764, 0.11126005361930295, 0.1126005361930295, 0.11394101876675604, - 0.11528150134048257, - 0.11662198391420911, - 0.11796246648793565, + 0.11528150134048255, + 0.11662198391420912, + 0.11796246648793564, 0.1193029490616622, 0.12064343163538874, 0.12198391420911528, @@ -793,7 +792,7 @@ 0.1260053619302949, 0.12734584450402145, 0.128686327077748, - 0.13002680965147453, + 0.1300268096514745, 0.13136729222520108, 0.13270777479892762, 0.13404825737265416, @@ -809,7 +808,7 @@ 0.14745308310991956, 0.1487935656836461, 0.15013404825737264, - 0.15147453083109919, + 0.1514745308310992, 0.15281501340482573, 0.15415549597855227, 0.1554959785522788, @@ -832,7 +831,7 @@ 0.17828418230563003, 0.17962466487935658, 0.18096514745308312, - 0.18230563002680966, + 0.1823056300268097, 0.1836461126005362, 0.18498659517426275, 0.1863270777479893, @@ -842,42 +841,42 @@ 0.19168900804289543, 0.19302949061662197, 0.19436997319034852, - 0.19571045576407506, + 0.19571045576407503, 0.1970509383378016, - 0.19839142091152814, + 0.19839142091152817, 0.19973190348525469, 0.20107238605898123, 0.20241286863270777, 0.2037533512064343, - 0.20509383378016086, + 0.20509383378016088, 0.2064343163538874, - 0.20777479892761394, + 0.2077747989276139, 0.20911528150134048, - 0.21045576407506703, + 0.21045576407506705, 0.21179624664879357, 0.2131367292225201, 0.21447721179624665, 0.2158176943699732, - 0.21715817694369974, + 0.21715817694369977, 0.21849865951742628, - 0.21983914209115282, + 0.2198391420911528, 0.22117962466487937, 0.2225201072386059, 0.22386058981233245, 0.225201072386059, - 0.22654155495978553, + 0.2265415549597855, 0.22788203753351208, 0.2292225201072386, - 0.23056300268096513, + 0.2305630026809651, 0.23190348525469168, - 0.23324396782841822, + 0.23324396782841825, 0.23458445040214476, 0.2359249329758713, 0.23726541554959785, 0.2386058981233244, 0.23994638069705093, 0.24128686327077747, - 0.24262734584450402, + 0.242627345844504, 0.24396782841823056, 0.2453083109919571, 0.24664879356568364, @@ -973,13 +972,13 @@ 0.3672922252010724, 0.36863270777479895, 0.3699731903485255, - 0.37131367292225204, + 0.371313672922252, 0.3726541554959786, 0.3739946380697051, 0.3753351206434316, 0.37667560321715815, 0.3780160857908847, - 0.37935656836461124, + 0.3793565683646112, 0.3806970509383378, 0.3820375335120643, 0.38337801608579086, @@ -993,7 +992,7 @@ 0.3941018766756032, 0.39544235924932974, 0.3967828418230563, - 0.39812332439678283, + 0.3981233243967828, 0.39946380697050937, 0.4008042895442359, 0.40214477211796246, @@ -1001,7 +1000,7 @@ 0.40482573726541554, 0.4061662198391421, 0.4075067024128686, - 0.40884718498659517, + 0.4088471849865952, 0.4101876675603217, 0.41152815013404825, 0.4128686327077748, @@ -1010,12 +1009,12 @@ 0.4168900804289544, 0.41823056300268097, 0.4195710455764075, - 0.42091152815013405, + 0.4209115281501341, 0.4222520107238606, 0.42359249329758714, 0.4249329758713137, 0.4262734584450402, - 0.42761394101876676, + 0.4276139410187667, 0.4289544235924933, 0.43029490616621985, 0.4316353887399464, @@ -1024,32 +1023,32 @@ 0.435656836461126, 0.43699731903485256, 0.4383378016085791, - 0.43967828418230565, + 0.4396782841823057, 0.4410187667560322, 0.44235924932975873, 0.4436997319034853, 0.4450402144772118, - 0.44638069705093836, + 0.4463806970509384, 0.4477211796246649, 0.44906166219839144, 0.450402144772118, 0.4517426273458445, - 0.45308310991957107, + 0.453083109919571, 0.4544235924932976, 0.45576407506702415, 0.4571045576407507, 0.4584450402144772, 0.4597855227882037, - 0.46112600536193027, + 0.4611260053619302, 0.4624664879356568, 0.46380697050938335, 0.4651474530831099, - 0.46648793565683644, + 0.4664879356568365, 0.467828418230563, 0.4691689008042895, 0.47050938337801607, 0.4718498659517426, - 0.47319034852546915, + 0.4731903485254691, 0.4745308310991957, 0.47587131367292224, 0.4772117962466488, @@ -1058,7 +1057,7 @@ 0.4812332439678284, 0.48257372654155495, 0.4839142091152815, - 0.48525469168900803, + 0.485254691689008, 0.4865951742627346, 0.4879356568364611, 0.48927613941018766, @@ -1369,79 +1368,79 @@ 0.8981233243967829, 0.8994638069705094, 0.900804289544236, - 0.9021447721179625, + 0.9021447721179624, 0.903485254691689, 0.9048257372654156, - 0.9061662198391421, - 0.9075067024128687, + 0.906166219839142, + 0.9075067024128688, 0.9088471849865952, 0.9101876675603218, - 0.9115281501340483, + 0.9115281501340484, 0.9128686327077749, 0.9142091152815014, - 0.9155495978552279, + 0.915549597855228, 0.9168900804289544, - 0.9182305630026809, - 0.9195710455764075, + 0.9182305630026808, + 0.9195710455764076, 0.920911528150134, - 0.9222520107238605, - 0.9235924932975871, + 0.9222520107238604, + 0.9235924932975872, 0.9249329758713136, 0.9262734584450402, - 0.9276139410187667, + 0.9276139410187668, 0.9289544235924933, 0.9302949061662198, - 0.9316353887399463, - 0.9329758713136729, + 0.9316353887399464, + 0.9329758713136728, 0.9343163538873994, 0.935656836461126, - 0.9369973190348525, + 0.9369973190348524, 0.938337801608579, 0.9396782841823056, - 0.9410187667560321, - 0.9423592493297587, + 0.941018766756032, + 0.9423592493297588, 0.9436997319034852, 0.9450402144772118, - 0.9463806970509383, + 0.9463806970509384, 0.9477211796246648, 0.9490616621983914, - 0.9504021447721179, - 0.9517426273458445, + 0.950402144772118, + 0.9517426273458444, 0.953083109919571, 0.9544235924932976, - 0.9557640750670241, + 0.955764075067024, 0.9571045576407506, 0.9584450402144772, - 0.9597855227882037, - 0.9611260053619303, + 0.9597855227882036, + 0.9611260053619304, 0.9624664879356568, 0.9638069705093834, - 0.9651474530831099, + 0.96514745308311, 0.9664879356568364, 0.967828418230563, - 0.9691689008042895, - 0.9705093833780161, + 0.9691689008042896, + 0.970509383378016, 0.9718498659517426, 0.9731903485254692, - 0.9745308310991957, + 0.9745308310991956, 0.9758713136729222, 0.9772117962466488, - 0.9785522788203753, - 0.9798927613941019, + 0.9785522788203752, + 0.979892761394102, 0.9812332439678284, 0.982573726541555, - 0.9839142091152815, + 0.9839142091152816, 0.985254691689008, 0.9865951742627346, - 0.9879356568364611, - 0.9892761394101877, + 0.9879356568364612, + 0.9892761394101875, 0.9906166219839142, 0.9919571045576407, - 0.9932975871313673, + 0.9932975871313672, 0.9946380697050938, 0.9959785522788204, - 0.9973190348525469, - 0.9986595174262735 + 0.9973190348525468, + 0.9986595174262736 ], "xaxis": "x", "y": [ @@ -1449,8 +1448,8 @@ 0.11767174999999952, 0.1404327031250432, 0.16914874999997664, - 0.24253500000008898, - 0.24924023046878574, + 0.24253500000008896, + 0.24924023046878577, 0.3531571406249441, 0.49268083593756273, 0.6344678109375081, @@ -1458,19 +1457,19 @@ 0.9690924406249906, 1.1242813398437193, 1.2202071874999092, - 1.3852200750000065, + 1.3852200750000063, 1.5974386249999952, 1.621510235937535, 1.8664698828124529, - 1.8930805468748986, + 1.8930805468748984, 2.18066859999999, - 2.3188505015624514, + 2.318850501562451, 2.6094431898437733, 3.041368585937562, 3.0676953890624645, 3.1246490710936996, 3.2439907499999663, - 3.2535399531250278, + 3.253539953125028, 3.4077290390625876, 3.4212618976562226, 3.70882484453125, @@ -1526,11 +1525,11 @@ 11.59617288749996, 11.628750679687528, 11.703138728906197, - 11.780961276562493, + 11.780961276562492, 12.05149673437495, 12.062900304687446, - 12.728456718749953, - 13.138795170312505, + 12.728456718749952, + 13.138795170312504, 13.217006984375075, 13.241572468750064, 13.399645843750022, @@ -1538,13 +1537,13 @@ 13.749331546875055, 14.038749921875024, 14.204696453125052, - 14.405579085937461, + 14.40557908593746, 14.419083479687515, 14.61815064843745, 14.691258207812552, 14.755128969531256, 14.791493351562394, - 15.600279076562515, + 15.600279076562517, 15.637941152343728, 15.762742859374953, 16.524943975000042, @@ -1560,9 +1559,9 @@ 19.354645607812472, 19.424660179687407, 19.512685781249957, - 19.626006650000022, + 19.626006650000026, 19.67559455859373, - 20.345304984374934, + 20.345304984374938, 20.639999750000015, 20.850848138281208, 20.90933117968757, @@ -1574,44 +1573,44 @@ 22.109076901562503, 22.112557171875096, 22.120475471093755, - 22.144620076562546, + 22.144620076562543, 22.31278148437491, 22.334964273437436, 22.59562429687503, 22.614032805468696, 22.81282868828123, 22.995752816406252, - 23.019072965625014, + 23.019072965625018, 23.07843562812502, 23.31430382812505, 23.50336371875005, 23.777700617968776, 24.051632734374948, - 24.064095846875034, + 24.06409584687503, 24.228240606249983, 24.307779593749956, 24.421684437499948, - 24.874495548437494, + 24.874495548437498, 25.044917562499904, - 25.058725749999894, + 25.058725749999898, 25.239901757031248, 25.28230638281252, 25.63639837890628, 25.647862890624992, - 25.928247047656214, + 25.928247047656217, 25.997243870312445, 26.04342136718742, 26.241983185937556, - 26.327447093750038, + 26.32744709375004, 26.38003737500003, 26.506006531250023, 26.540212039062453, 26.718697650781223, - 26.782588866406286, + 26.78258886640629, 27.110776796874916, 27.161976496093757, 27.175339015625013, - 27.272203812500038, + 27.27220381250004, 27.373894453125104, 27.642535093749984, 27.6796136546875, @@ -1630,16 +1629,16 @@ 29.579652519531237, 30.00172095312496, 30.006130992187536, - 30.019225093750038, + 30.01922509375004, 30.19026300781252, 30.64593506718745, 30.758808270312556, - 30.764177215624954, + 30.76417721562495, 30.78428088281248, 30.993005249999896, 31.014835810937484, 31.73682926562492, - 31.903261478125046, + 31.90326147812505, 32.372931734375015, 32.41336810156258, 32.423198667187535, @@ -1680,7 +1679,7 @@ 38.65322462499989, 38.83352660703122, 38.84162012499996, - 39.287318484374964, + 39.28731848437496, 39.72007440624998, 39.7940512648438, 39.973754556250015, @@ -1749,7 +1748,7 @@ 49.77714586484376, 50.359065839843765, 50.525404683593706, - 50.556807968750036, + 50.55680796875003, 50.61010880937499, 50.6998556539063, 51.49764453124999, @@ -1759,7 +1758,7 @@ 51.74514195703125, 51.75439982812509, 51.881607164062416, - 52.579904593749916, + 52.57990459374992, 52.59873343203128, 52.8030473281251, 52.84380750781247, @@ -1768,7 +1767,7 @@ 53.43260121562503, 53.67864903515624, 53.74916194374998, - 53.989380824218756, + 53.98938082421875, 54.04425097109379, 54.04465062500003, 54.19836416406247, @@ -1794,14 +1793,14 @@ 57.39581978906244, 57.51116999609371, 57.6741452578126, - 57.765192593750044, + 57.76519259375005, 57.76775401953114, 57.83425935781247, 57.91356167031245, 58.219868340625, 58.284490342187496, 58.3213017023437, - 58.326033582812556, + 58.32603358281256, 58.79292697656251, 59.10115958749998, 59.17572742187508, @@ -1826,7 +1825,7 @@ 62.40706797656253, 62.7962540312501, 62.89805743750003, - 63.004783875000044, + 63.00478387500005, 63.29665464687503, 63.31744220781252, 63.33811515625007, @@ -1972,30 +1971,30 @@ 89.87109284374992, 90.40275303125009, 90.5621908828125, - 90.56656092968751, + 90.56656092968753, 90.90790968750002, 91.10927552343742, 91.23178331093754, - 91.47112140937497, + 91.47112140937496, 91.57188808593742, 91.69705308906248, 91.83869224218756, - 91.87903114062499, + 91.879031140625, 92.2139226984375, 92.25070060312498, 92.55783203359374, - 92.56516144531247, + 92.56516144531248, 92.86149994531252, 93.0696722109376, 93.46650869531254, - 93.93151155625003, - 94.08387861562505, - 94.31487181249997, + 93.93151155625004, + 94.08387861562504, + 94.31487181249996, 94.64000574218744, - 95.06028616484377, - 95.23116038906255, + 95.06028616484376, + 95.23116038906257, 95.65181292187503, - 96.20545247968755, + 96.20545247968757, 96.37830848281249, 96.89428128984378, 97.15690444531242, @@ -2003,66 +2002,66 @@ 97.45566334375008, 97.68280443749995, 98.20505417187496, - 98.28410060156261, + 98.2841006015626, 98.53074800000002, 98.69200967187498, 99.80870935156258, 99.91690229140625, 99.96562640468755, 100.35308381250002, - 100.99755039062507, - 101.08388678515621, - 101.72004599999991, + 100.99755039062508, + 101.0838867851562, + 101.72004599999993, 101.8900268851562, 102.03702175234378, 102.08085251953116, 102.4114078125001, - 102.44886975468751, + 102.44886975468752, 102.4687512773437, 104.0479320515625, - 104.22540987499997, - 104.54642978125003, - 105.17040402343741, + 104.22540987499995, + 104.54642978125004, + 105.1704040234374, 105.67872115937496, 105.70754902187502, 106.33038709375, - 106.89944543749993, + 106.89944543749992, 107.61300964218754, 108.34534619921874, - 108.66537659999995, + 108.66537659999996, 108.71579321874992, 108.9080472460937, 110.68035641406254, 110.85521053359378, 112.10089087265624, 112.33121395312492, - 113.20004512500009, + 113.20004512500007, 113.45430761328134, 113.90830199531251, 114.35812435156254, - 115.43939412812495, + 115.43939412812496, 115.5046577656251, 117.13006050156253, 117.15281370937498, 117.54894579687505, 119.68641243749994, - 120.51108928124995, + 120.51108928124997, 120.5512742257813, 120.9195322656251, 121.73289591875005, - 122.93278356640621, - 123.12229747734375, + 122.9327835664062, + 123.12229747734376, 123.32188823046874, 123.90990533984382, - 124.08962453437505, - 124.40416018906251, + 124.08962453437503, + 124.40416018906252, 125.217959140625, 125.29590938750005, 125.46660925000003, 125.71177914999998, - 126.69635864843747, + 126.69635864843748, 127.28489107812504, - 127.77813640468753, + 127.77813640468752, 128.6859938125001, 129.0668655359375, 130.34518092968744, @@ -2070,7 +2069,7 @@ 130.54344228125, 131.0631941874999, 132.25523325156246, - 132.90533149375005, + 132.90533149375003, 133.29452049531255, 133.39206518750007, 133.42808735468748, @@ -2110,7 +2109,7 @@ 156.20809454140624, 156.33529469687505, 156.51282518749997, - 157.17830110078125, + 157.17830110078123, 157.30514635937493, 158.14746151562497, 158.62855793828123, @@ -2144,11 +2143,11 @@ 186.49668383906248, 186.80025196093743, 188.59181625625, - 189.53400823281254, + 189.53400823281257, 189.93918225859375, 190.06278610781249, - 190.77166128906242, - 191.09995689843754, + 190.7716612890624, + 191.0999568984375, 191.6170521562501, 191.8902708046876, 194.54535602656244, @@ -2156,26 +2155,26 @@ 195.80473804218752, 196.09599787499997, 199.3948113593749, - 205.80520930468742, + 205.80520930468745, 206.80550150937495, 208.6336353984375, 215.06657964687497, - 215.50018003906246, + 215.50018003906249, 216.16875110312503, 216.88405051562495, 217.6553641718749, - 220.23175064062502, + 220.23175064062505, 220.7085321953125, - 220.76126207812501, + 220.761262078125, 225.1707305468749, 225.4923137031251, 225.95735575000003, 227.50244394375, - 227.66308909062502, + 227.66308909062505, 228.3274590625001, - 232.77323407968754, + 232.7732340796875, 235.17811449218743, - 242.46135779687506, + 242.46135779687503, 243.99615865624992, 267.902865265625, 270.394910375, @@ -3188,18 +3187,18 @@ 0.005934718100890208, 0.00741839762611276, 0.008902077151335312, - 0.010385756676557863, + 0.010385756676557865, 0.011869436201780416, - 0.013353115727002967, + 0.013353115727002969, 0.01483679525222552, 0.016320474777448073, 0.017804154302670624, 0.019287833827893175, - 0.020771513353115726, + 0.02077151335311573, 0.02225519287833828, 0.02373887240356083, 0.025222551928783383, - 0.026706231454005934, + 0.026706231454005937, 0.028189910979228485, 0.02967359050445104, 0.03115727002967359, @@ -3242,28 +3241,28 @@ 0.08605341246290801, 0.08753709198813056, 0.08902077151335312, - 0.09050445103857567, + 0.09050445103857568, 0.09198813056379822, 0.09347181008902077, 0.09495548961424333, 0.09643916913946587, 0.09792284866468842, 0.09940652818991098, - 0.10089020771513353, + 0.10089020771513352, 0.10237388724035608, 0.10385756676557864, - 0.10534124629080119, + 0.1053412462908012, 0.10682492581602374, 0.1083086053412463, 0.10979228486646884, - 0.11127596439169139, + 0.1112759643916914, 0.11275964391691394, 0.1142433234421365, - 0.11572700296735905, + 0.11572700296735904, 0.1172106824925816, 0.11869436201780416, 0.1201780415430267, - 0.12166172106824925, + 0.12166172106824924, 0.12314540059347182, 0.12462908011869436, 0.1261127596439169, @@ -3298,15 +3297,15 @@ 0.16913946587537093, 0.17062314540059348, 0.17210682492581603, - 0.17359050445103857, + 0.17359050445103855, 0.17507418397626112, 0.17655786350148367, 0.17804154302670624, 0.1795252225519288, - 0.18100890207715134, + 0.18100890207715137, 0.1824925816023739, 0.18397626112759644, - 0.18545994065281898, + 0.18545994065281896, 0.18694362017804153, 0.1884272997032641, 0.18991097922848665, @@ -3314,27 +3313,27 @@ 0.19287833827893175, 0.1943620178041543, 0.19584569732937684, - 0.19732937685459942, + 0.19732937685459945, 0.19881305637982197, 0.20029673590504452, - 0.20178041543026706, + 0.20178041543026703, 0.2032640949554896, 0.20474777448071216, 0.2062314540059347, 0.20771513353115728, 0.20919881305637983, - 0.21068249258160238, + 0.2106824925816024, 0.21216617210682492, 0.21364985163204747, - 0.21513353115727002, + 0.21513353115727, 0.2166172106824926, - 0.21810089020771514, + 0.21810089020771511, 0.2195845697329377, 0.22106824925816024, - 0.22255192878338279, + 0.2225519287833828, 0.22403560830860533, 0.22551928783382788, - 0.22700296735905046, + 0.22700296735905048, 0.228486646884273, 0.22997032640949555, 0.2314540059347181, @@ -3342,13 +3341,13 @@ 0.2344213649851632, 0.23590504451038577, 0.23738872403560832, - 0.23887240356083086, + 0.2388724035608309, 0.2403560830860534, 0.24183976261127596, 0.2433234421364985, - 0.24480712166172106, + 0.24480712166172103, 0.24629080118694363, - 0.24777448071216618, + 0.24777448071216615, 0.24925816023738873, 0.2507418397626113, 0.2522255192878338, @@ -3426,28 +3425,28 @@ 0.3590504451038576, 0.36053412462908013, 0.3620178041543027, - 0.36350148367952523, + 0.3635014836795252, 0.3649851632047478, 0.3664688427299703, 0.36795252225519287, 0.3694362017804154, - 0.37091988130563797, + 0.370919881305638, 0.3724035608308605, 0.37388724035608306, 0.37537091988130566, 0.3768545994065282, - 0.37833827893175076, + 0.3783382789317507, 0.3798219584569733, 0.38130563798219586, 0.3827893175074184, 0.38427299703264095, 0.3857566765578635, - 0.38724035608308605, + 0.3872403560830861, 0.3887240356083086, 0.39020771513353114, 0.3916913946587537, 0.39317507418397624, - 0.39465875370919884, + 0.3946587537091989, 0.3961424332344214, 0.39762611275964393, 0.3991097922848665, @@ -3465,34 +3464,34 @@ 0.4169139465875371, 0.41839762611275966, 0.4198813056379822, - 0.42136498516320475, + 0.4213649851632047, 0.4228486646884273, 0.42433234421364985, 0.4258160237388724, 0.42729970326409494, 0.4287833827893175, - 0.43026706231454004, + 0.43026706231454, 0.4317507418397626, 0.4332344213649852, 0.43471810089020774, 0.4362017804154303, - 0.43768545994065283, + 0.4376854599406528, 0.4391691394658754, 0.4406528189910979, 0.4421364985163205, 0.443620178041543, - 0.44510385756676557, + 0.4451038575667656, 0.4465875370919881, 0.44807121661721067, 0.4495548961424332, 0.45103857566765576, - 0.45252225519287836, + 0.4525222551928784, 0.4540059347181009, 0.45548961424332346, 0.456973293768546, 0.45845697329376855, 0.4599406528189911, - 0.46142433234421365, + 0.4614243323442137, 0.4629080118694362, 0.46439169139465875, 0.4658753709198813, @@ -3503,19 +3502,19 @@ 0.4732937685459941, 0.47477744807121663, 0.4762611275964392, - 0.47774480712166173, + 0.4777448071216618, 0.4792284866468843, 0.4807121661721068, 0.4821958456973294, 0.4836795252225519, 0.48516320474777447, 0.486646884272997, - 0.48813056379821956, + 0.4881305637982195, 0.4896142433234421, 0.4910979228486647, 0.49258160237388726, 0.4940652818991098, - 0.49554896142433236, + 0.4955489614243323, 0.4970326409495549, 0.49851632047477745, 0.5, @@ -3789,72 +3788,72 @@ 0.8976261127596439, 0.8991097922848664, 0.900593471810089, - 0.9020771513353115, - 0.9035608308605341, - 0.9050445103857567, - 0.9065281899109793, + 0.9020771513353116, + 0.903560830860534, + 0.9050445103857568, + 0.9065281899109792, 0.9080118694362018, 0.9094955489614244, - 0.9109792284866469, - 0.9124629080118695, + 0.9109792284866468, + 0.9124629080118696, 0.913946587537092, 0.9154302670623146, - 0.9169139465875371, - 0.9183976261127597, + 0.9169139465875372, + 0.9183976261127595, 0.9198813056379822, 0.9213649851632048, - 0.9228486646884273, + 0.9228486646884272, 0.9243323442136498, 0.9258160237388724, - 0.9272997032640949, - 0.9287833827893175, + 0.9272997032640948, + 0.9287833827893176, 0.93026706231454, 0.9317507418397626, - 0.9332344213649851, - 0.9347181008902077, + 0.9332344213649852, + 0.9347181008902076, 0.9362017804154302, 0.9376854599406528, - 0.9391691394658753, - 0.9406528189910979, + 0.9391691394658752, + 0.940652818991098, 0.9421364985163204, - 0.9436201780415431, + 0.9436201780415432, 0.9451038575667656, 0.9465875370919882, - 0.9480712166172107, - 0.9495548961424333, + 0.9480712166172108, + 0.9495548961424332, 0.9510385756676558, 0.9525222551928784, - 0.9540059347181009, - 0.9554896142433235, + 0.9540059347181008, + 0.9554896142433236, 0.956973293768546, 0.9584569732937686, - 0.9599406528189911, + 0.9599406528189912, 0.9614243323442137, 0.9629080118694362, 0.9643916913946587, - 0.9658753709198813, + 0.9658753709198812, 0.9673590504451038, 0.9688427299703264, - 0.9703264094955489, - 0.9718100890207715, + 0.9703264094955488, + 0.9718100890207716, 0.973293768545994, 0.9747774480712166, - 0.9762611275964391, - 0.9777448071216617, + 0.9762611275964392, + 0.9777448071216616, 0.9792284866468842, 0.9807121661721068, 0.9821958456973294, 0.983679525222552, - 0.9851632047477745, - 0.9866468842729971, + 0.9851632047477744, + 0.9866468842729972, 0.9881305637982196, 0.9896142433234422, - 0.9910979228486647, - 0.9925816023738873, + 0.9910979228486648, + 0.9925816023738872, 0.9940652818991098, 0.9955489614243324, - 0.9970326409495549, - 0.9985163204747775 + 0.9970326409495548, + 0.9985163204747776 ], "xaxis": "x", "y": [ @@ -3867,11 +3866,11 @@ 0.14218324062494503, 0.1531174218750948, 0.15830416406254244, - 0.18233935937496426, + 0.18233935937496423, 0.19304966171876004, 0.1958439656250448, 0.20046997500003272, - 0.22223154140624501, + 0.222231541406245, 0.22430104609372847, 0.23671873750004124, 0.25153964062496925, @@ -3886,7 +3885,7 @@ 0.4179914468750212, 0.43550663437497406, 0.44865010937496663, - 0.45050275781250093, + 0.450502757812501, 0.4648640703123874, 0.4704776718749599, 0.4772595156250645, @@ -3915,18 +3914,18 @@ 0.8587574374998894, 0.8777945374999945, 0.89273985156251, - 0.9134110078125559, + 0.913411007812556, 0.9186002484375422, - 0.9395244507812777, - 0.9555582968750969, - 0.9620320624999295, - 0.9734859882812543, + 0.9395244507812776, + 0.9555582968750967, + 0.9620320624999296, + 0.9734859882812544, 0.9899616367187036, 0.9957339773437752, - 0.9980642968750999, + 0.9980642968751, 1.0146067656251034, 1.018279599218772, - 1.0306492140624641, + 1.030649214062464, 1.0320649375000812, 1.034880759375028, 1.0379986374999817, @@ -3941,7 +3940,7 @@ 1.2467481093749484, 1.2578862265625048, 1.2816835156249908, - 1.2829149406250053, + 1.282914940625005, 1.285437671874888, 1.2949737859374864, 1.306584281250025, @@ -3951,7 +3950,7 @@ 1.3786374531250658, 1.3793818914062967, 1.4398786718750216, - 1.5037159898437267, + 1.503715989843727, 1.5587408203125506, 1.5821107734375346, 1.5922604218749257, @@ -3961,17 +3960,17 @@ 1.6251935312500336, 1.6697015078125332, 1.6722178515624364, - 1.6853377171875081, + 1.685337717187508, 1.7396107921874773, 1.7468223835937806, 1.7490156820311995, 1.7583131406249777, 1.7841644953125524, - 1.7948145843749899, + 1.79481458437499, 1.8089028671874985, 1.8240963921874709, - 1.8740319312499878, - 1.8986683906250619, + 1.874031931249988, + 1.898668390625062, 1.9402535624999473, 1.9808993562500063, 1.9968861132812208, @@ -4000,10 +3999,10 @@ 2.417784796874912, 2.4513065296874856, 2.498147773437495, - 2.5100761601562454, + 2.510076160156246, 2.514621496093696, 2.5180256406249555, - 2.5296258210937594, + 2.529625821093759, 2.553174487499973, 2.6057891406248928, 2.6066603562500177, @@ -4034,7 +4033,7 @@ 3.0834531093748865, 3.084939017968736, 3.101609476562544, - 3.1094984578124922, + 3.109498457812492, 3.113186289062469, 3.1132782187501107, 3.134273436718786, @@ -4042,8 +4041,8 @@ 3.1521680453124645, 3.178187624999964, 3.1803532031249233, - 3.1886302374999786, - 3.1998619468749894, + 3.188630237499978, + 3.19986194687499, 3.202788005468733, 3.2309122179686938, 3.235477859374896, @@ -4058,7 +4057,7 @@ 3.3746469625000373, 3.3777254843751052, 3.388420609375089, - 3.3993015234375434, + 3.399301523437543, 3.4042389062500433, 3.4254629687500255, 3.439476437499934, @@ -4073,25 +4072,25 @@ 3.5966369218749605, 3.5990629039062014, 3.621099046875088, - 3.6288116749999517, - 3.6314803437498995, + 3.628811674999952, + 3.631480343749899, 3.6363108007813025, 3.6428149960937617, - 3.6917730390624683, - 3.7258009640625005, - 3.7603424765625277, - 3.7777352382812524, + 3.691773039062468, + 3.725800964062501, + 3.760342476562528, + 3.777735238281253, 3.806161276562534, 3.823512218750011, 3.8543570281250368, 3.8905611445312616, 3.89156504687503, 3.9188237250000384, - 3.9274610312500045, + 3.927461031250005, 3.927915795312515, 3.9436035242187018, 3.955892281250044, - 3.9974685624999893, + 3.9974685624999897, 4.024090585937529, 4.026730925781294, 4.028991458593737, @@ -4246,7 +4245,7 @@ 7.2923493671873985, 7.3808038640625, 7.400620567968758, - 7.4175438945312635, + 7.417543894531264, 7.436679048437554, 7.496706139843695, 7.512875859375072, @@ -4259,7 +4258,7 @@ 7.746667968749989, 7.747958835937425, 7.780825687500055, - 7.8249065625000185, + 7.824906562500019, 7.832449937499973, 7.843551400000024, 7.996378625000034, @@ -4304,39 +4303,39 @@ 8.932278203124952, 8.943614843750083, 9.026090855468738, - 9.067765334375053, - 9.077659561718747, + 9.067765334375052, + 9.077659561718749, 9.094179656250049, - 9.100853278124987, - 9.120144078124895, + 9.100853278124989, + 9.120144078124897, 9.18333764062504, 9.216778957812494, 9.254019195312594, - 9.351488182812545, - 9.445714742187533, + 9.351488182812544, + 9.445714742187532, 9.454264769531278, 9.460932781249994, 9.482924309374994, - 9.514781890625045, + 9.514781890625043, 9.531484210937606, 9.61603807812503, 9.62764958359378, 9.696915185937542, - 9.851385616406219, + 9.85138561640622, 9.974350484375009, 10.032242534374973, 10.045953281249922, 10.124906748437525, - 10.189322503906283, + 10.189322503906284, 10.20220146874999, 10.218347406249904, - 10.233960171874969, + 10.233960171874967, 10.317207068750008, 10.32292830468748, 10.325045125000088, - 10.407904937500007, - 10.420459540624961, - 10.424609914062557, + 10.407904937500009, + 10.42045954062496, + 10.424609914062556, 10.475398433593796, 10.501325714062546, 10.527189749999934, @@ -4348,7 +4347,7 @@ 10.627883124999926, 10.683887758593755, 10.728771781250089, - 10.891481132031231, + 10.891481132031233, 10.932414820312488, 11.122864096093735, 11.138778578125084, @@ -4360,20 +4359,20 @@ 11.368246870312532, 11.414638312500074, 11.419191609375048, - 11.452359375000015, - 11.521182178124945, - 11.529868187499915, + 11.452359375000016, + 11.521182178124944, + 11.529868187499916, 11.539192914062596, - 11.582014523437465, + 11.582014523437463, 11.647098710937485, 11.673330509374978, 11.676187023437478, 11.717628749999903, - 11.730445694531227, + 11.730445694531229, 11.78606335937502, 11.874941121093798, - 11.931217171875005, - 11.980419671875097, + 11.931217171875003, + 11.980419671875095, 12.000028547656257, 12.000502010156197, 12.056363661718706, @@ -4382,55 +4381,55 @@ 12.300493265624937, 12.366829015624944, 12.496480789062502, - 12.500140249218703, + 12.500140249218704, 12.560961711718733, 12.595932650000009, 12.61279790937499, 12.746938492187496, - 12.800137067187507, - 12.902973164062587, + 12.800137067187508, + 12.902973164062589, 13.113131953125048, - 13.249054296875101, + 13.2490542968751, 13.261551625000038, 13.273602640625086, - 13.307964687500089, + 13.307964687500087, 13.511866686718804, - 13.525662640624887, + 13.525662640624889, 13.623032129687545, - 13.662243993750053, + 13.662243993750051, 13.81560521093752, 14.050207008593702, 14.152221585937468, 14.20678620312492, - 14.229343921874943, + 14.229343921874944, 14.240651158593778, 14.273983289062471, 14.28570114843751, 14.290197968749908, - 14.398581021093719, + 14.39858102109372, 14.4389301523438, 14.454995515625, 14.472414640624947, 14.7535989375001, - 14.769676832812479, - 14.785414335937503, + 14.76967683281248, + 14.785414335937505, 14.897566226562503, - 14.915011684375031, + 14.915011684375033, 14.960215429687423, 14.963093552343707, 15.04631802031247, - 15.083076333593795, - 15.092491062499903, - 15.110259437499963, - 15.169367106249979, - 15.214861064843717, + 15.083076333593796, + 15.092491062499905, + 15.110259437499964, + 15.16936710624998, + 15.214861064843715, 15.242971976562558, 15.332207570312448, 15.61805306249994, 15.882175812500009, - 15.896533173437547, - 15.905369125000107, - 15.919855020312525, + 15.896533173437549, + 15.905369125000108, + 15.919855020312523, 16.002272941406204, 16.062123660156203, 16.10274272031245, @@ -4490,13 +4489,13 @@ 26.600202428125044, 26.786436054687556, 27.73421778124998, - 27.756998476562558, + 27.75699847656256, 27.824165875000062, 28.03828033984371, - 28.101310816406226, + 28.101310816406222, 28.1804686179687, 28.622820462500044, - 29.896536875000038, + 29.89653687500004, 29.981169562499986, 31.005449832031218, 31.18378758593758, @@ -4530,7 +4529,7 @@ 75.27842375, 86.70767394843745, 109.48908857031245, - 113.98550865781249, + 113.98550865781247, 120.93971889062504 ], "yaxis": "y" @@ -5906,7 +5905,7 @@ 0.00804289544235925, 0.00938337801608579, 0.010723860589812333, - 0.012064343163538873, + 0.012064343163538872, 0.013404825737265416, 0.014745308310991957, 0.0160857908847185, @@ -5918,7 +5917,7 @@ 0.024128686327077747, 0.02546916890080429, 0.02680965147453083, - 0.028150134048257374, + 0.028150134048257378, 0.029490616621983913, 0.030831099195710455, 0.032171581769437, @@ -5934,13 +5933,13 @@ 0.045576407506702415, 0.04691689008042895, 0.04825737265415549, - 0.049597855227882036, + 0.04959785522788203, 0.05093833780160858, 0.05227882037533512, 0.05361930294906166, 0.054959785522788206, 0.05630026809651475, - 0.057640750670241284, + 0.05764075067024128, 0.058981233243967826, 0.06032171581769437, 0.06166219839142091, @@ -5965,27 +5964,27 @@ 0.0871313672922252, 0.08847184986595175, 0.08981233243967829, - 0.09115281501340483, - 0.09249329758713137, + 0.09115281501340484, + 0.09249329758713136, 0.0938337801608579, 0.09517426273458444, - 0.09651474530831099, - 0.09785522788203753, - 0.09919571045576407, - 0.10053619302949061, + 0.096514745308311, + 0.09785522788203752, + 0.09919571045576409, + 0.1005361930294906, 0.10187667560321716, 0.1032171581769437, 0.10455764075067024, 0.10589812332439678, 0.10723860589812333, - 0.10857908847184987, - 0.10991957104557641, + 0.10857908847184988, + 0.1099195710455764, 0.11126005361930295, 0.1126005361930295, 0.11394101876675604, - 0.11528150134048257, - 0.11662198391420911, - 0.11796246648793565, + 0.11528150134048255, + 0.11662198391420912, + 0.11796246648793564, 0.1193029490616622, 0.12064343163538874, 0.12198391420911528, @@ -5994,7 +5993,7 @@ 0.1260053619302949, 0.12734584450402145, 0.128686327077748, - 0.13002680965147453, + 0.1300268096514745, 0.13136729222520108, 0.13270777479892762, 0.13404825737265416, @@ -6010,7 +6009,7 @@ 0.14745308310991956, 0.1487935656836461, 0.15013404825737264, - 0.15147453083109919, + 0.1514745308310992, 0.15281501340482573, 0.15415549597855227, 0.1554959785522788, @@ -6033,7 +6032,7 @@ 0.17828418230563003, 0.17962466487935658, 0.18096514745308312, - 0.18230563002680966, + 0.1823056300268097, 0.1836461126005362, 0.18498659517426275, 0.1863270777479893, @@ -6043,42 +6042,42 @@ 0.19168900804289543, 0.19302949061662197, 0.19436997319034852, - 0.19571045576407506, + 0.19571045576407503, 0.1970509383378016, - 0.19839142091152814, + 0.19839142091152817, 0.19973190348525469, 0.20107238605898123, 0.20241286863270777, 0.2037533512064343, - 0.20509383378016086, + 0.20509383378016088, 0.2064343163538874, - 0.20777479892761394, + 0.2077747989276139, 0.20911528150134048, - 0.21045576407506703, + 0.21045576407506705, 0.21179624664879357, 0.2131367292225201, 0.21447721179624665, 0.2158176943699732, - 0.21715817694369974, + 0.21715817694369977, 0.21849865951742628, - 0.21983914209115282, + 0.2198391420911528, 0.22117962466487937, 0.2225201072386059, 0.22386058981233245, 0.225201072386059, - 0.22654155495978553, + 0.2265415549597855, 0.22788203753351208, 0.2292225201072386, - 0.23056300268096513, + 0.2305630026809651, 0.23190348525469168, - 0.23324396782841822, + 0.23324396782841825, 0.23458445040214476, 0.2359249329758713, 0.23726541554959785, 0.2386058981233244, 0.23994638069705093, 0.24128686327077747, - 0.24262734584450402, + 0.242627345844504, 0.24396782841823056, 0.2453083109919571, 0.24664879356568364, @@ -6174,13 +6173,13 @@ 0.3672922252010724, 0.36863270777479895, 0.3699731903485255, - 0.37131367292225204, + 0.371313672922252, 0.3726541554959786, 0.3739946380697051, 0.3753351206434316, 0.37667560321715815, 0.3780160857908847, - 0.37935656836461124, + 0.3793565683646112, 0.3806970509383378, 0.3820375335120643, 0.38337801608579086, @@ -6194,7 +6193,7 @@ 0.3941018766756032, 0.39544235924932974, 0.3967828418230563, - 0.39812332439678283, + 0.3981233243967828, 0.39946380697050937, 0.4008042895442359, 0.40214477211796246, @@ -6202,7 +6201,7 @@ 0.40482573726541554, 0.4061662198391421, 0.4075067024128686, - 0.40884718498659517, + 0.4088471849865952, 0.4101876675603217, 0.41152815013404825, 0.4128686327077748, @@ -6211,12 +6210,12 @@ 0.4168900804289544, 0.41823056300268097, 0.4195710455764075, - 0.42091152815013405, + 0.4209115281501341, 0.4222520107238606, 0.42359249329758714, 0.4249329758713137, 0.4262734584450402, - 0.42761394101876676, + 0.4276139410187667, 0.4289544235924933, 0.43029490616621985, 0.4316353887399464, @@ -6225,32 +6224,32 @@ 0.435656836461126, 0.43699731903485256, 0.4383378016085791, - 0.43967828418230565, + 0.4396782841823057, 0.4410187667560322, 0.44235924932975873, 0.4436997319034853, 0.4450402144772118, - 0.44638069705093836, + 0.4463806970509384, 0.4477211796246649, 0.44906166219839144, 0.450402144772118, 0.4517426273458445, - 0.45308310991957107, + 0.453083109919571, 0.4544235924932976, 0.45576407506702415, 0.4571045576407507, 0.4584450402144772, 0.4597855227882037, - 0.46112600536193027, + 0.4611260053619302, 0.4624664879356568, 0.46380697050938335, 0.4651474530831099, - 0.46648793565683644, + 0.4664879356568365, 0.467828418230563, 0.4691689008042895, 0.47050938337801607, 0.4718498659517426, - 0.47319034852546915, + 0.4731903485254691, 0.4745308310991957, 0.47587131367292224, 0.4772117962466488, @@ -6259,7 +6258,7 @@ 0.4812332439678284, 0.48257372654155495, 0.4839142091152815, - 0.48525469168900803, + 0.485254691689008, 0.4865951742627346, 0.4879356568364611, 0.48927613941018766, @@ -6570,79 +6569,79 @@ 0.8981233243967829, 0.8994638069705094, 0.900804289544236, - 0.9021447721179625, + 0.9021447721179624, 0.903485254691689, 0.9048257372654156, - 0.9061662198391421, - 0.9075067024128687, + 0.906166219839142, + 0.9075067024128688, 0.9088471849865952, 0.9101876675603218, - 0.9115281501340483, + 0.9115281501340484, 0.9128686327077749, 0.9142091152815014, - 0.9155495978552279, + 0.915549597855228, 0.9168900804289544, - 0.9182305630026809, - 0.9195710455764075, + 0.9182305630026808, + 0.9195710455764076, 0.920911528150134, - 0.9222520107238605, - 0.9235924932975871, + 0.9222520107238604, + 0.9235924932975872, 0.9249329758713136, 0.9262734584450402, - 0.9276139410187667, + 0.9276139410187668, 0.9289544235924933, 0.9302949061662198, - 0.9316353887399463, - 0.9329758713136729, + 0.9316353887399464, + 0.9329758713136728, 0.9343163538873994, 0.935656836461126, - 0.9369973190348525, + 0.9369973190348524, 0.938337801608579, 0.9396782841823056, - 0.9410187667560321, - 0.9423592493297587, + 0.941018766756032, + 0.9423592493297588, 0.9436997319034852, 0.9450402144772118, - 0.9463806970509383, + 0.9463806970509384, 0.9477211796246648, 0.9490616621983914, - 0.9504021447721179, - 0.9517426273458445, + 0.950402144772118, + 0.9517426273458444, 0.953083109919571, 0.9544235924932976, - 0.9557640750670241, + 0.955764075067024, 0.9571045576407506, 0.9584450402144772, - 0.9597855227882037, - 0.9611260053619303, + 0.9597855227882036, + 0.9611260053619304, 0.9624664879356568, 0.9638069705093834, - 0.9651474530831099, + 0.96514745308311, 0.9664879356568364, 0.967828418230563, - 0.9691689008042895, - 0.9705093833780161, + 0.9691689008042896, + 0.970509383378016, 0.9718498659517426, 0.9731903485254692, - 0.9745308310991957, + 0.9745308310991956, 0.9758713136729222, 0.9772117962466488, - 0.9785522788203753, - 0.9798927613941019, + 0.9785522788203752, + 0.979892761394102, 0.9812332439678284, 0.982573726541555, - 0.9839142091152815, + 0.9839142091152816, 0.985254691689008, 0.9865951742627346, - 0.9879356568364611, - 0.9892761394101877, + 0.9879356568364612, + 0.9892761394101875, 0.9906166219839142, 0.9919571045576407, - 0.9932975871313673, + 0.9932975871313672, 0.9946380697050938, 0.9959785522788204, - 0.9973190348525469, - 0.9986595174262735 + 0.9973190348525468, + 0.9986595174262736 ], "xaxis": "x", "y": [ @@ -6729,7 +6728,7 @@ -57.0593171406249, -56.705454890625106, -56.01621656640623, - -55.934297421875044, + -55.93429742187504, -55.8416539765625, -55.580356343750054, -55.26232002421875, @@ -6739,7 +6738,7 @@ -54.185493767187495, -54.057975671875056, -53.916748373437485, - -53.791122328125084, + -53.79112232812509, -53.236848843749954, -52.98524591171872, -52.98285808281253, @@ -6754,11 +6753,11 @@ -52.16572122031255, -52.117616757812584, -51.859152067968694, - -51.639772929687524, + -51.63977292968752, -51.57725179687509, -51.28778995312496, -51.02731640000002, - -50.989671000000044, + -50.98967100000005, -50.96181483515625, -50.820276640625025, -50.71525511718755, @@ -6776,11 +6775,11 @@ -48.97065449921877, -48.89639728750001, -48.4294292187501, - -48.350934850000044, + -48.35093485000005, -48.31362008359372, -48.0237554859375, -47.24619073906251, - -46.581820796875036, + -46.58182079687504, -46.4280693515625, -46.41055214609378, -46.29144639843753, @@ -6804,7 +6803,7 @@ -43.455365300781295, -43.370962585937605, -43.26062182812507, - -43.076028609375044, + -43.07602860937504, -43.06471102734372, -42.96778042343749, -42.83498593750005, @@ -6872,11 +6871,11 @@ -30.77648755859377, -30.440291164843757, -30.280788660156304, - -29.788544131249978, + -29.788544131249974, -29.623468356249987, -29.59463303124994, -29.430296626562495, - -29.135102148437454, + -29.135102148437458, -29.130755007812468, -29.01730456249993, -28.734704968750066, @@ -6886,22 +6885,22 @@ -27.87224317187497, -27.714741875000072, -27.6110206796875, - -27.529442257812434, + -27.52944225781243, -27.029480960937462, -27.023702346874984, -27.017659429687455, - -26.702298171874986, + -26.702298171874983, -26.59035925781245, -26.386985328125093, -26.341416935156303, -26.236914703124967, - -26.012464156249962, + -26.01246415624996, -25.76879892187503, -25.241830820312543, -25.12886373359379, -25.045803507031223, -24.363234290624973, - -24.213406348437502, + -24.213406348437505, -24.12599175781247, -23.893109437500016, -23.825013892187485, @@ -6918,13 +6917,13 @@ -21.86243396875011, -21.83929801953127, -21.648397164062544, - -21.572867043749966, + -21.57286704374997, -21.511361882031224, -21.45443628515625, -21.30516381250004, -21.19316906249992, -21.046418935156225, - -20.841245542187494, + -20.841245542187497, -20.77027079687491, -20.701492545312476, -20.563732031250083, @@ -6932,7 +6931,7 @@ -20.203443664062547, -19.873675180468695, -18.872633782031244, - -18.858698859374954, + -18.85869885937495, -18.48740324531252, -17.212613281250015, -17.067088445312493, @@ -6943,32 +6942,32 @@ -16.357414797656247, -16.295826452343704, -15.833784078125063, - -15.817047479687517, + -15.817047479687515, -15.761399593749957, -15.485869812499914, - -15.318293648437475, + -15.318293648437477, -15.165238338281256, - -15.081934312499925, + -15.081934312499923, -14.810001465624964, -14.572855675781284, - -14.422110454687527, - -14.384195726562439, + -14.422110454687529, + -14.38419572656244, -14.339528283593722, - -14.334822556249947, - -14.229587875000107, + -14.334822556249948, + -14.229587875000108, -14.18462359218745, -14.057515992187518, - -14.048920492187563, + -14.048920492187564, -13.436824113281205, -13.241188343750082, -13.149206565625036, -12.84422139062508, - -12.626313646093763, + -12.626313646093765, -12.599335237499986, -12.240985164062522, -11.79333903125007, -11.719639523437536, - -11.477931468750057, + -11.477931468750056, -11.206720296874892, -11.031213718749996, -10.916255046874994, @@ -6980,10 +6979,10 @@ -10.207833242187462, -10.196783863281212, -10.17245331250001, - -10.119152184375025, - -10.112242011718763, + -10.119152184375023, + -10.112242011718765, -9.99025459453128, - -9.761127504687465, + -9.761127504687463, -9.684402978906292, -9.318379273437472, -8.673964101562433, @@ -7015,7 +7014,7 @@ -4.699138242187473, -4.276188648437483, -4.044365687499976, - -3.9136513718749484, + -3.913651371874949, -3.887053321093731, -3.784734528125, -3.712606874999892, @@ -7025,7 +7024,7 @@ -3.193393312499893, -3.169401906249959, -3.0263113124999563, - -2.9748279999998886, + -2.974827999999889, -2.9515683515624005, -2.944126335937426, -2.7629464515624704, @@ -7053,7 +7052,7 @@ 0.8449198898437089, 1.285782835937539, 1.2915759687500667, - 1.8477997929687717, + 1.8477997929687715, 1.9050155781250169, 2.043462328125088, 2.1782914687501034, @@ -7085,7 +7084,7 @@ 7.118878305468797, 7.2292306249999, 7.231686390624986, - 7.3578193789062425, + 7.357819378906243, 7.562534926562535, 7.574006783593745, 7.622807653125051, @@ -7096,38 +7095,38 @@ 8.61003160703126, 8.803814406250012, 9.1686380000001, - 9.268865282812499, - 9.346264678124953, + 9.2688652828125, + 9.346264678124951, 9.701087796875072, 9.783214179687548, 10.0690137859375, 10.416856265625029, 10.742859468750112, - 11.323433436718801, + 11.3234334367188, 11.325459160937498, - 11.381607578125113, + 11.381607578125111, 11.42029302656249, 11.995094820312488, 12.034202820312544, 12.09536775624997, 12.708938217187551, 13.066946328124914, - 13.213650921874887, + 13.213650921874889, 13.510464226562476, 13.51272058437496, 13.750324437500012, - 13.844002691406217, + 13.844002691406216, 13.889585281249992, - 13.918457242187515, + 13.918457242187516, 14.145694278125006, - 14.405962593750019, + 14.40596259375002, 14.440076116406203, 14.825045746875048, 15.157699214843774, 15.160715140624916, 15.272466416406246, 15.418664846875004, - 15.760033553125027, + 15.760033553125028, 16.688812828125037, 16.750012584374986, 16.84563811718749, @@ -7151,8 +7150,8 @@ 20.60017568750004, 20.776598096874977, 20.984626626562545, - 21.361611381249986, - 21.688419578124922, + 21.361611381249983, + 21.68841957812492, 21.796277514843723, 22.131049468750007, 22.569202683593744, @@ -7168,16 +7167,16 @@ 24.216844821093787, 24.226429262500005, 24.797429310156303, - 25.189587999999958, + 25.18958799999996, 25.224044276562495, - 25.472283437499982, + 25.472283437499986, 25.600610862500048, 25.618695328125114, - 25.641669687500098, + 25.641669687500094, 25.872127984375084, 25.902239576562465, 25.99596818750001, - 26.041979069531294, + 26.041979069531298, 26.050435812499927, 26.061142034375052, 26.29048228906254, @@ -7206,10 +7205,10 @@ 29.586522009375017, 29.609271292187486, 29.736408548437453, - 29.935590092187454, + 29.935590092187457, 30.036106140624952, 30.37676375000001, - 30.627782960937566, + 30.62778296093757, 30.917090304687463, 31.127244103125008, 31.15481084999999, @@ -7234,7 +7233,7 @@ 34.750481812500084, 34.89158478125, 35.24791875781261, - 36.151865034374964, + 36.15186503437496, 36.16325446718747, 36.32386289062504, 36.35672028125009, @@ -7263,7 +7262,7 @@ 44.262365484375096, 44.347944699218715, 45.171410078125064, - 45.437934789062524, + 45.43793478906253, 45.739947687500035, 45.84610094062498, 45.99313161093755, @@ -7344,29 +7343,29 @@ 84.98260526796878, 88.07364192187492, 88.54068858437495, - 90.70126235156249, - 93.55437555468757, - 94.01358264999999, - 99.99081339062491, - 102.50214167968761, + 90.70126235156248, + 93.55437555468755, + 94.01358265, + 99.99081339062492, + 102.5021416796876, 102.669350984375, 103.59470629999998, 105.32794204687502, 106.28425237343754, 106.6081903984375, 106.87663745625002, - 107.42232157031253, + 107.42232157031252, 107.74175597109377, - 109.95607199999995, + 109.95607199999996, 110.40994641875, - 110.46772761718739, + 110.4677276171874, 110.9477608117187, 114.5724112179688, - 115.35331540781249, + 115.35331540781247, 115.52324396874997, - 117.04493578124993, - 118.16272385937509, - 118.85339624453127, + 117.04493578124992, + 118.16272385937508, + 118.85339624453128, 120.12981871093746, 120.44099838515626, 125.66193014062492, @@ -7381,13 +7380,13 @@ 159.50602216562504, 163.503064671875, 196.84248712968747, - 198.53539105624998, + 198.53539105625, 198.67866376562495, 201.83545544062497, 205.07729349374995, 213.16199832656253, - 214.58826899843746, - 220.19935211718746, + 214.58826899843743, + 220.19935211718743, 223.06068914843752, 229.76755830312504, 234.59282020625005, @@ -8389,18 +8388,18 @@ 0.005934718100890208, 0.00741839762611276, 0.008902077151335312, - 0.010385756676557863, + 0.010385756676557865, 0.011869436201780416, - 0.013353115727002967, + 0.013353115727002969, 0.01483679525222552, 0.016320474777448073, 0.017804154302670624, 0.019287833827893175, - 0.020771513353115726, + 0.02077151335311573, 0.02225519287833828, 0.02373887240356083, 0.025222551928783383, - 0.026706231454005934, + 0.026706231454005937, 0.028189910979228485, 0.02967359050445104, 0.03115727002967359, @@ -8443,28 +8442,28 @@ 0.08605341246290801, 0.08753709198813056, 0.08902077151335312, - 0.09050445103857567, + 0.09050445103857568, 0.09198813056379822, 0.09347181008902077, 0.09495548961424333, 0.09643916913946587, 0.09792284866468842, 0.09940652818991098, - 0.10089020771513353, + 0.10089020771513352, 0.10237388724035608, 0.10385756676557864, - 0.10534124629080119, + 0.1053412462908012, 0.10682492581602374, 0.1083086053412463, 0.10979228486646884, - 0.11127596439169139, + 0.1112759643916914, 0.11275964391691394, 0.1142433234421365, - 0.11572700296735905, + 0.11572700296735904, 0.1172106824925816, 0.11869436201780416, 0.1201780415430267, - 0.12166172106824925, + 0.12166172106824924, 0.12314540059347182, 0.12462908011869436, 0.1261127596439169, @@ -8499,15 +8498,15 @@ 0.16913946587537093, 0.17062314540059348, 0.17210682492581603, - 0.17359050445103857, + 0.17359050445103855, 0.17507418397626112, 0.17655786350148367, 0.17804154302670624, 0.1795252225519288, - 0.18100890207715134, + 0.18100890207715137, 0.1824925816023739, 0.18397626112759644, - 0.18545994065281898, + 0.18545994065281896, 0.18694362017804153, 0.1884272997032641, 0.18991097922848665, @@ -8515,27 +8514,27 @@ 0.19287833827893175, 0.1943620178041543, 0.19584569732937684, - 0.19732937685459942, + 0.19732937685459945, 0.19881305637982197, 0.20029673590504452, - 0.20178041543026706, + 0.20178041543026703, 0.2032640949554896, 0.20474777448071216, 0.2062314540059347, 0.20771513353115728, 0.20919881305637983, - 0.21068249258160238, + 0.2106824925816024, 0.21216617210682492, 0.21364985163204747, - 0.21513353115727002, + 0.21513353115727, 0.2166172106824926, - 0.21810089020771514, + 0.21810089020771511, 0.2195845697329377, 0.22106824925816024, - 0.22255192878338279, + 0.2225519287833828, 0.22403560830860533, 0.22551928783382788, - 0.22700296735905046, + 0.22700296735905048, 0.228486646884273, 0.22997032640949555, 0.2314540059347181, @@ -8543,13 +8542,13 @@ 0.2344213649851632, 0.23590504451038577, 0.23738872403560832, - 0.23887240356083086, + 0.2388724035608309, 0.2403560830860534, 0.24183976261127596, 0.2433234421364985, - 0.24480712166172106, + 0.24480712166172103, 0.24629080118694363, - 0.24777448071216618, + 0.24777448071216615, 0.24925816023738873, 0.2507418397626113, 0.2522255192878338, @@ -8627,28 +8626,28 @@ 0.3590504451038576, 0.36053412462908013, 0.3620178041543027, - 0.36350148367952523, + 0.3635014836795252, 0.3649851632047478, 0.3664688427299703, 0.36795252225519287, 0.3694362017804154, - 0.37091988130563797, + 0.370919881305638, 0.3724035608308605, 0.37388724035608306, 0.37537091988130566, 0.3768545994065282, - 0.37833827893175076, + 0.3783382789317507, 0.3798219584569733, 0.38130563798219586, 0.3827893175074184, 0.38427299703264095, 0.3857566765578635, - 0.38724035608308605, + 0.3872403560830861, 0.3887240356083086, 0.39020771513353114, 0.3916913946587537, 0.39317507418397624, - 0.39465875370919884, + 0.3946587537091989, 0.3961424332344214, 0.39762611275964393, 0.3991097922848665, @@ -8666,34 +8665,34 @@ 0.4169139465875371, 0.41839762611275966, 0.4198813056379822, - 0.42136498516320475, + 0.4213649851632047, 0.4228486646884273, 0.42433234421364985, 0.4258160237388724, 0.42729970326409494, 0.4287833827893175, - 0.43026706231454004, + 0.43026706231454, 0.4317507418397626, 0.4332344213649852, 0.43471810089020774, 0.4362017804154303, - 0.43768545994065283, + 0.4376854599406528, 0.4391691394658754, 0.4406528189910979, 0.4421364985163205, 0.443620178041543, - 0.44510385756676557, + 0.4451038575667656, 0.4465875370919881, 0.44807121661721067, 0.4495548961424332, 0.45103857566765576, - 0.45252225519287836, + 0.4525222551928784, 0.4540059347181009, 0.45548961424332346, 0.456973293768546, 0.45845697329376855, 0.4599406528189911, - 0.46142433234421365, + 0.4614243323442137, 0.4629080118694362, 0.46439169139465875, 0.4658753709198813, @@ -8704,19 +8703,19 @@ 0.4732937685459941, 0.47477744807121663, 0.4762611275964392, - 0.47774480712166173, + 0.4777448071216618, 0.4792284866468843, 0.4807121661721068, 0.4821958456973294, 0.4836795252225519, 0.48516320474777447, 0.486646884272997, - 0.48813056379821956, + 0.4881305637982195, 0.4896142433234421, 0.4910979228486647, 0.49258160237388726, 0.4940652818991098, - 0.49554896142433236, + 0.4955489614243323, 0.4970326409495549, 0.49851632047477745, 0.5, @@ -8990,72 +8989,72 @@ 0.8976261127596439, 0.8991097922848664, 0.900593471810089, - 0.9020771513353115, - 0.9035608308605341, - 0.9050445103857567, - 0.9065281899109793, + 0.9020771513353116, + 0.903560830860534, + 0.9050445103857568, + 0.9065281899109792, 0.9080118694362018, 0.9094955489614244, - 0.9109792284866469, - 0.9124629080118695, + 0.9109792284866468, + 0.9124629080118696, 0.913946587537092, 0.9154302670623146, - 0.9169139465875371, - 0.9183976261127597, + 0.9169139465875372, + 0.9183976261127595, 0.9198813056379822, 0.9213649851632048, - 0.9228486646884273, + 0.9228486646884272, 0.9243323442136498, 0.9258160237388724, - 0.9272997032640949, - 0.9287833827893175, + 0.9272997032640948, + 0.9287833827893176, 0.93026706231454, 0.9317507418397626, - 0.9332344213649851, - 0.9347181008902077, + 0.9332344213649852, + 0.9347181008902076, 0.9362017804154302, 0.9376854599406528, - 0.9391691394658753, - 0.9406528189910979, + 0.9391691394658752, + 0.940652818991098, 0.9421364985163204, - 0.9436201780415431, + 0.9436201780415432, 0.9451038575667656, 0.9465875370919882, - 0.9480712166172107, - 0.9495548961424333, + 0.9480712166172108, + 0.9495548961424332, 0.9510385756676558, 0.9525222551928784, - 0.9540059347181009, - 0.9554896142433235, + 0.9540059347181008, + 0.9554896142433236, 0.956973293768546, 0.9584569732937686, - 0.9599406528189911, + 0.9599406528189912, 0.9614243323442137, 0.9629080118694362, 0.9643916913946587, - 0.9658753709198813, + 0.9658753709198812, 0.9673590504451038, 0.9688427299703264, - 0.9703264094955489, - 0.9718100890207715, + 0.9703264094955488, + 0.9718100890207716, 0.973293768545994, 0.9747774480712166, - 0.9762611275964391, - 0.9777448071216617, + 0.9762611275964392, + 0.9777448071216616, 0.9792284866468842, 0.9807121661721068, 0.9821958456973294, 0.983679525222552, - 0.9851632047477745, - 0.9866468842729971, + 0.9851632047477744, + 0.9866468842729972, 0.9881305637982196, 0.9896142433234422, - 0.9910979228486647, - 0.9925816023738873, + 0.9910979228486648, + 0.9925816023738872, 0.9940652818991098, 0.9955489614243324, - 0.9970326409495549, - 0.9985163204747775 + 0.9970326409495548, + 0.9985163204747776 ], "xaxis": "x", "y": [ @@ -9067,7 +9066,7 @@ -14.309235617187596, -14.24235366406242, -14.066477710937534, - -13.514584864843755, + -13.514584864843757, -13.166378682812478, -12.904473031250063, -12.18766965624991, @@ -9078,21 +9077,21 @@ -11.825391523437474, -11.749199500000032, -11.71609387500007, - -11.706091180468775, + -11.706091180468777, -11.521924700781256, - -11.462358661718781, + -11.46235866171878, -11.327371703124982, -11.232657906250097, - -11.181183578125001, + -11.181183578125, -11.152675190625018, -11.148357101562397, -11.09528363437505, -11.072009125000022, - -10.936835051562525, - -10.686652632812411, - -10.619751187500015, + -10.936835051562523, + -10.686652632812413, + -10.619751187500016, -10.569293203124971, - -10.430565945312537, + -10.430565945312535, -10.213071718749916, -10.193477218750104, -10.190054514843723, @@ -9102,26 +9101,26 @@ -10.00137469453125, -9.9369479531249, -9.928306492968773, - -9.925147124999967, - -9.924847234374965, + -9.925147124999969, + -9.924847234374964, -9.883816281249892, -9.809590632812387, -9.800391726562566, - -9.679733826562483, + -9.679733826562485, -9.664560247656254, - -9.631072687499909, + -9.631072687499907, -9.62131115624993, -9.54744210937497, - -9.512998218749999, + -9.51299821875, -9.492631390625093, -9.40242481249993, -9.3808767968751, -9.360617398437398, -9.348053408593728, -9.305442515624918, - -9.218917042187513, - -9.178297767187473, - -9.158086109375063, + -9.218917042187512, + -9.178297767187471, + -9.158086109375065, -9.095743890625045, -9.045172945312515, -9.022309182031222, @@ -9150,14 +9149,14 @@ -8.087359308593705, -7.939271718749978, -7.909962792968713, - -7.8621265757813035, + -7.862126575781304, -7.858894984374956, -7.757063625781257, -7.746579538281253, -7.712599210937469, -7.647378421874919, -7.639133484374952, - -7.6281778593750005, + -7.628177859375, -7.621863436718741, -7.564779398437395, -7.462877451562463, @@ -9323,7 +9322,7 @@ -4.072672171874956, -4.061563890624939, -4.000764351562566, - -3.9989443984375157, + -3.998944398437516, -3.995521890625014, -3.963064797656216, -3.93833562421878, @@ -9331,17 +9330,17 @@ -3.896596999999929, -3.88089589062497, -3.855168990624975, - -3.8329143398437964, + -3.832914339843797, -3.813900335937433, - -3.8019717632812444, + -3.801971763281245, -3.76370848281249, -3.751170439062548, -3.7286807765625554, -3.717588131249954, -3.710383664062533, -3.710267207031279, - -3.7006866249998893, - -3.6942879914062132, + -3.7006866249998898, + -3.694287991406213, -3.661189093749954, -3.649796370312515, -3.64890008750001, @@ -9373,7 +9372,7 @@ -3.122256425781302, -3.1107804109375365, -3.0883104687500236, - -3.0523440624999694, + -3.05234406249997, -2.993336407812535, -2.965237403125002, -2.934427468750073, @@ -9388,7 +9387,7 @@ -2.731744132812537, -2.7298918984374723, -2.7297696179687136, - -2.7274141468750486, + -2.727414146875049, -2.7241283632812383, -2.719341217968804, -2.713266625781216, @@ -9425,7 +9424,7 @@ -2.077676312499989, -2.033345650000001, -2.003058359375018, - -1.9643209859375474, + -1.9643209859375472, -1.937460900781275, -1.9320355445312316, -1.8817049609374408, @@ -9433,7 +9432,7 @@ -1.838883357812506, -1.797861492187394, -1.785446045312483, - -1.7693507031249283, + -1.7693507031249285, -1.763804747656195, -1.7332840624999335, -1.7320584843749884, @@ -9447,18 +9446,18 @@ -1.3884675609375563, -1.386344611718755, -1.364935390625078, - -1.3485898374999579, + -1.348589837499958, -1.346948219531214, -1.3133324460937956, - -1.3129604898437037, + -1.3129604898437035, -1.2749313390625048, -1.265159109374963, -1.2541138226562225, -1.164478724218725, - -1.1132771210938017, + -1.1132771210938015, -1.026147002343805, - -0.9739041101562407, - -0.9579455468749529, + -0.9739041101562408, + -0.9579455468749528, -0.9333009132813004, -0.921216481249985, -0.8916767953124918, @@ -9478,7 +9477,7 @@ -0.525486804687489, -0.452777156250022, -0.4255417890624358, - -0.41209574765628076, + -0.4120957476562808, -0.3960447109375309, -0.3718818835938009, -0.363579061718724, @@ -9490,7 +9489,7 @@ -0.0836858531249618, -0.05689089843758666, -0.02421188906248517, - 0.047095565624999836, + 0.04709556562499984, 0.049148796874987966, 0.06822613750000528, 0.07776615078125815, @@ -9520,23 +9519,23 @@ 0.74899222656245, 0.8348331249999319, 0.8785757812499924, - 0.9207599312500179, + 0.920759931250018, 0.9216833359374732, - 0.9395244507812777, + 0.9395244507812776, 0.942018042968698, 0.9493292609374748, - 0.9504274843750409, - 0.9739644843750739, + 0.9504274843750408, + 0.973964484375074, 1.0032418515625068, 1.0359412890625208, - 1.0759015390624427, + 1.075901539062443, 1.0987220601562058, 1.1085956960937438, 1.118762763281211, 1.1442620281250129, - 1.2081933281250485, + 1.2081933281250483, 1.2900300312498985, - 1.3172563437500457, + 1.3172563437500455, 1.3994988851562766, 1.478665359374986, 1.490501715624987, @@ -9550,12 +9549,12 @@ 1.7705598671875578, 1.8069100374999607, 1.813871900781237, - 1.8239767656250478, - 1.8583874874999537, + 1.823976765625048, + 1.8583874874999535, 1.9057164554687915, 1.9442713749999712, - 1.9480406562499866, - 1.9674865312499605, + 1.9480406562499863, + 1.9674865312499603, 1.9835932812500232, 2.0453099140625, 2.1100601156249468, @@ -9565,7 +9564,7 @@ 2.271744297656255, 2.343686828125101, 2.3478071390625246, - 2.4388152789063042, + 2.438815278906304, 2.5424472656250146, 2.6354472578125296, 2.6390150304687268, @@ -9582,10 +9581,10 @@ 3.314682650000009, 3.322086312500005, 3.389368437500025, - 3.5261255781249474, + 3.526125578124947, 3.5615759218750327, 3.5701146492187945, - 3.5744619843749206, + 3.574461984374921, 3.6610601093750574, 3.696644810937528, 3.8719232382812834, @@ -9632,7 +9631,7 @@ 7.085897017187449, 7.304301390625028, 7.341449026562486, - 7.4035719390625445, + 7.403571939062544, 7.507048988281213, 7.543483375000051, 7.747235453125086, @@ -9653,51 +9652,51 @@ 8.957603367187403, 9.015951434374983, 9.036400038281272, - 9.398207304687503, - 9.489523403906219, + 9.398207304687505, + 9.48952340390622, 9.8717352695312, 9.99818119921872, 10.099525289062512, 10.147536613281204, 10.455279552343768, - 10.517536745312555, + 10.517536745312556, 10.748851140624993, 11.043277796874918, 11.17791306250001, - 11.294645367187513, - 11.302172144531255, + 11.294645367187512, + 11.302172144531257, 11.330158548437453, - 11.718125072656221, + 11.71812507265622, 11.888033718749966, 12.034175374999904, 12.061107375000006, - 12.095291167187497, - 12.320205307031301, + 12.095291167187495, + 12.3202053070313, 12.801476159375056, - 12.902973164062587, + 12.902973164062589, 13.064439406250017, 13.1475448226563, - 13.196002671875021, + 13.19600267187502, 13.378586937499904, 13.41525390859374, - 13.466813294531221, + 13.46681329453122, 13.511866686718804, 13.555209015624996, - 13.772918340624983, + 13.772918340624985, 14.070208132812468, 14.132024203124956, 14.3296201093749, 14.94623523437508, 15.031919453125056, 15.086477531250011, - 15.099940437499981, + 15.09994043749998, 15.197061953125058, 15.466678937500092, 16.92528010937508, 17.158893892968763, 17.893640515625066, 18.389397391406305, - 18.513518195312486, + 18.51351819531249, 18.526802593750062, 19.361334084375017, 19.614906771874985, @@ -9706,17 +9705,17 @@ 20.560942576562525, 21.59009918749996, 21.68044440234371, - 22.178031015625038, - 22.746015416406294, + 22.17803101562504, + 22.746015416406298, 22.74836278125008, - 23.585894192968794, + 23.58589419296879, 24.164016085937533, 25.654633343749992, 27.500820578124944, 28.40721898437505, 31.34555923906248, 31.54726141406252, - 31.858950443750018, + 31.858950443750015, 32.16891641718746, 32.25651649531255, 32.48674426718753, @@ -9732,7 +9731,7 @@ 69.35122863593745, 98.8402238242187, 102.18837611718754, - 104.72415611874999 + 104.72415611875 ], "yaxis": "y" } diff --git a/tests/metrics/debug-energy-price-daily.ipynb b/tests/metrics/debug-energy-price-daily.ipynb index e94b8d756..5c313798f 100644 --- a/tests/metrics/debug-energy-price-daily.ipynb +++ b/tests/metrics/debug-energy-price-daily.ipynb @@ -7,15 +7,13 @@ "outputs": [], "source": [ "import os\n", - "import pathlib\n", "\n", - "import numpy as np\n", "import pandas as pd\n", "import plotly.graph_objects as go\n", "from plotly.subplots import make_subplots\n", "from plotly_resampler import unregister_plotly_resampler\n", "\n", - "from neuralprophet import NeuralProphet, set_random_seed" + "from neuralprophet import NeuralProphet" ] }, { @@ -2742,9 +2740,9 @@ "y": [ 143.43609619140625, 135.48594665527344, - 127.09947967529297, + 127.09947967529295, 117.2962646484375, - 105.09925079345703, + 105.09925079345705, 92.14432525634766, 77.86985778808594, 64.54895782470703, @@ -2947,13 +2945,13 @@ 22.149394989013672, 18.146156311035156, 15.874248504638672, - 13.956371307373047, + 13.956371307373049, 12.686870574951172, 12.111004829406738, 11.128003120422363, 10.670586585998535, - 10.142912864685059, - 9.753583908081055, + 10.14291286468506, + 9.753583908081056, 9.37549114227295, 8.891655921936035, 8.679320335388184, @@ -3132,32 +3130,32 @@ 154.13392639160156, 138.792236328125, 122.26882934570312, - 103.49633026123047, + 103.49633026123048, 85.18400573730469, 67.08615112304688, 51.43393325805664, 39.75190353393555, - 31.732791900634766, + 31.73279190063477, 25.757240295410156, - 21.671504974365234, + 21.67150497436523, 18.89421844482422, 17.007471084594727, 15.788640975952148, - 14.904444694519043, + 14.904444694519045, 14.345660209655762, 13.77869701385498, 13.35624885559082, 12.947565078735352, 12.621322631835938, 12.42406177520752, - 12.048542976379395, + 12.048542976379396, 11.812912940979004, 11.521103858947754, 11.309612274169922, 11.095582962036133, - 10.940746307373047, - 10.774717330932617, - 10.604714393615723, + 10.940746307373049, + 10.774717330932615, + 10.604714393615724, 10.458870887756348, 10.335941314697266, 10.20355224609375, @@ -3167,20 +3165,20 @@ 9.837393760681152, 9.782085418701172, 9.71113395690918, - 9.603452682495117, + 9.603452682495115, 9.572030067443848, 9.490266799926758, - 9.437577247619629, + 9.437577247619627, 9.374385833740234, - 9.324484825134277, + 9.324484825134276, 9.275161743164062, 9.204185485839844, - 9.167811393737793, + 9.167811393737791, 9.167725563049316, 9.087477684020996, - 9.035632133483887, - 9.038439750671387, - 9.033820152282715, + 9.035632133483888, + 9.038439750671388, + 9.033820152282717, 8.916996955871582, 8.941885948181152, 8.923497200012207, @@ -3253,7 +3251,7 @@ 7.981836795806885, 7.997666358947754, 7.980894565582275, - 7.9662675857543945, + 7.966267585754395, 7.989049434661865, 7.990255832672119, 7.987051010131836, @@ -3268,14 +3266,14 @@ 7.9886651039123535, 7.953586578369141, 7.985530853271484, - 7.9714179039001465, + 7.971417903900147, 7.937840938568115, 7.946067810058594, - 7.9657487869262695, + 7.965748786926269, 7.939499855041504, 7.955323219299316, 7.962331771850586, - 7.9716315269470215, + 7.971631526947021, 7.9671454429626465, 7.910948753356934, 7.939437389373779, @@ -3332,20 +3330,20 @@ 35.19166946411133, 28.896276473999023, 23.67167091369629, - 20.757844924926758, + 20.75784492492676, 18.159439086914062, 16.34124755859375, - 15.578967094421387, - 14.339864730834961, - 13.743439674377441, - 13.038182258605957, - 12.527745246887207, + 15.578967094421388, + 14.33986473083496, + 13.74343967437744, + 13.038182258605955, + 12.527745246887209, 12.108820915222168, 11.39852237701416, 11.142216682434082, 10.580267906188965, 10.415159225463867, - 10.236059188842773, + 10.236059188842772, 9.639653205871582, 9.63109302520752, 9.284634590148926, @@ -3373,7 +3371,7 @@ 7.802868366241455, 7.517668724060059, 7.611937522888184, - 7.5360260009765625, + 7.536026000976563, 7.54149866104126, 7.460668087005615, 7.556337833404541, @@ -3381,13 +3379,13 @@ 7.244262218475342, 7.377976894378662, 7.351277828216553, - 7.3482747077941895, + 7.348274707794189, 7.474165916442871, 7.184271335601807, 7.459147930145264, 7.245712757110596, 7.363207817077637, - 7.2751054763793945, + 7.275105476379395, 7.194803237915039, 7.742818832397461, 7.229586601257324, @@ -3517,13 +3515,13 @@ 1.438274621963501, 1.3031741380691528, 1.1342400312423706, - 0.9568426609039307, + 0.9568426609039308, 0.7651501297950745, 0.5906912088394165, - 0.42457595467567444, + 0.4245759546756744, 0.28494879603385925, - 0.18541963398456573, - 0.12280916422605515, + 0.1854196339845657, + 0.12280916422605516, 0.08253045380115509, 0.05908374488353729, 0.045015525072813034, @@ -3548,66 +3546,66 @@ 0.013667521998286247, 0.013306277804076672, 0.013051344081759453, - 0.012783195823431015, - 0.012631537392735481, + 0.012783195823431017, + 0.01263153739273548, 0.012361946515738964, 0.01221726555377245, - 0.012002869509160519, + 0.01200286950916052, 0.011728103272616863, - 0.011642303317785263, + 0.011642303317785265, 0.01153512205928564, - 0.011359604075551033, + 0.011359604075551031, 0.011243424378335476, 0.011124957352876663, - 0.010983385145664215, + 0.010983385145664217, 0.010861300863325596, - 0.010757978074252605, + 0.010757978074252604, 0.01072550006210804, - 0.010603840462863445, + 0.010603840462863444, 0.01043437048792839, 0.010389779694378376, 0.01042866799980402, 0.010185223072767258, 0.01018054224550724, 0.010157272219657898, - 0.010052771307528019, + 0.01005277130752802, 0.009961496107280254, 0.009926695376634598, 0.010107691399753094, 0.010052219033241272, 0.009860780090093613, 0.009796920232474804, - 0.009659620001912117, - 0.009669416584074497, + 0.009659620001912115, + 0.009669416584074495, 0.009740463458001614, - 0.009685352444648743, + 0.009685352444648744, 0.009758882224559784, 0.00968647189438343, 0.009594249539077282, - 0.009503152221441269, + 0.009503152221441267, 0.009613421745598316, - 0.009506559930741787, + 0.009506559930741789, 0.009536877274513245, 0.009466818533837795, - 0.009537328965961933, + 0.009537328965961931, 0.00939310435205698, 0.009370439685881138, 0.009348894469439983, 0.009372526779770851, - 0.009352637454867363, + 0.009352637454867365, 0.009372252970933914, - 0.009319090284407139, + 0.00931909028440714, 0.00925940927118063, 0.00949232280254364, 0.009264430962502956, - 0.009254111908376217, + 0.009254111908376215, 0.009267047047615051, 0.009141500107944012, - 0.009114906191825867, - 0.009125090204179287, + 0.009114906191825868, + 0.009125090204179289, 0.009047559462487698, 0.00899357721209526, - 0.009009665809571743, + 0.009009665809571745, 0.009014859795570374, 0.008734664879739285, 0.008871261961758137, @@ -3708,10 +3706,10 @@ 2.6068949699401855, 2.4048821926116943, 2.1693456172943115, - 1.8968112468719482, + 1.896811246871948, 1.5851978063583374, 1.268556833267212, - 0.9752049446105957, + 0.9752049446105956, 0.7073301076889038, 0.4647478759288788, 0.2885022759437561, @@ -3720,30 +3718,30 @@ 0.09338273108005524, 0.07223121076822281, 0.055430226027965546, - 0.044925983995199203, + 0.0449259839951992, 0.0408354252576828, 0.03459807485342026, 0.03177991509437561, 0.02860197052359581, - 0.026406308636069298, + 0.026406308636069294, 0.024669798091053963, 0.02186044678092003, 0.020888393744826317, - 0.018834548071026802, + 0.0188345480710268, 0.018251296132802963, 0.0176289901137352, 0.015634525567293167, 0.015606770291924477, 0.014504123479127884, - 0.014213242568075657, + 0.014213242568075655, 0.013506213203072548, 0.013507341034710407, - 0.013092069886624813, + 0.013092069886624811, 0.01259806752204895, 0.012312711216509342, 0.012101548723876476, 0.011576375924050808, - 0.011792309582233429, + 0.011792309582233427, 0.011884184554219246, 0.0108066750690341, 0.01086135022342205, @@ -3759,23 +3757,23 @@ 0.010244021192193031, 0.009508855640888214, 0.00974882673472166, - 0.009555350989103317, + 0.009555350989103316, 0.00956923607736826, - 0.009365205653011799, + 0.0093652056530118, 0.009606932289898396, - 0.009234065189957619, + 0.00923406518995762, 0.008829787373542786, 0.009158754721283913, 0.00909258984029293, - 0.009085160680115223, - 0.009399124421179295, + 0.009085160680115225, + 0.009399124421179296, 0.008684150874614716, 0.0093613900244236, 0.008833322674036026, 0.009122125804424286, 0.008905135095119476, 0.008709629997611046, - 0.010086954571306705, + 0.010086954571306704, 0.008794048801064491, 0.009000844322144985, 0.009069147519767284, @@ -3785,8 +3783,8 @@ 0.008159182965755463, 0.008897257968783379, 0.008751217275857925, - 0.009903972037136555, - 0.009739037603139877, + 0.009903972037136556, + 0.009739037603139876, 0.008776136673986912, 0.008600345812737942, 0.008664951659739017, @@ -3795,7 +3793,7 @@ 0.008997299708425999, 0.008710688911378384, 0.008756761439144611, - 0.009341850876808167, + 0.009341850876808168, 0.007902157492935658, 0.008648388087749481, 0.008770559914410114, @@ -3839,9 +3837,9 @@ 0.007943027652800083, 0.007414448074996471, 0.007507779635488987, - 0.0075744567438960075, + 0.007574456743896008, 0.007589838933199644, - 0.0076577430590987206, + 0.00765774305909872, 0.007733943872153759, 0.00761417206376791, 0.007679890375584364, @@ -3877,12 +3875,12 @@ 0.00759010249748826, 0.007583214435726404, 0.007578676100820303, - 0.0075747971422970295, + 0.007574797142297029, 0.007583491504192352, 0.007583528757095337, - 0.0075811236165463924, + 0.007581123616546392, 0.007582125719636679, - 0.0075814807787537575, + 0.007581480778753757, 0.00758162559941411 ], "yaxis": "y3" diff --git a/tests/metrics/debug-energy-price-hourly.ipynb b/tests/metrics/debug-energy-price-hourly.ipynb index 14a09c93e..dfcc16497 100644 --- a/tests/metrics/debug-energy-price-hourly.ipynb +++ b/tests/metrics/debug-energy-price-hourly.ipynb @@ -7,7 +7,6 @@ "outputs": [], "source": [ "import os\n", - "import pathlib\n", "import torch\n", "\n", "import numpy as np\n", @@ -16,7 +15,7 @@ "from plotly.subplots import make_subplots\n", "from plotly_resampler import unregister_plotly_resampler\n", "\n", - "from neuralprophet import NeuralProphet, set_random_seed" + "from neuralprophet import NeuralProphet" ] }, { @@ -429,12 +428,12 @@ "xaxis": "x", "y": [ 1.6991313695907593, - 1.5541504621505737, + 1.5541504621505735, 1.2866111993789673, 1.0485198497772217, 0.9603586792945862, 0.933108389377594, - 0.9244528412818909, + 0.9244528412818908, 0.9177840948104858, 0.9132021069526672, 0.9105463027954102 @@ -452,7 +451,7 @@ "type": "scatter", "xaxis": "x", "y": [ - 1.9174306392669678, + 1.917430639266968, 2.133635997772217, 2.1361277103424072, 1.954904317855835, @@ -485,7 +484,7 @@ 1.1826142072677612, 1.1741188764572144, 1.169130563735962, - 1.1649360656738281 + 1.164936065673828 ], "yaxis": "y2" }, @@ -502,12 +501,12 @@ "y": [ 2.1282451152801514, 2.287360668182373, - 2.3184731006622314, + 2.318473100662231, 2.140346050262451, 2.0008866786956787, 1.962218999862671, 1.9410110712051392, - 1.9257516860961914, + 1.9257516860961912, 1.9175572395324707, 1.914405107498169 ], diff --git a/tests/metrics/debug-yosemite.ipynb b/tests/metrics/debug-yosemite.ipynb index a7a2b0e56..3f395cc46 100644 --- a/tests/metrics/debug-yosemite.ipynb +++ b/tests/metrics/debug-yosemite.ipynb @@ -7,7 +7,6 @@ "outputs": [], "source": [ "import os\n", - "import pathlib\n", "import time\n", "\n", "import numpy as np\n", @@ -1532,10 +1531,10 @@ "y": [ 14.104900360107422, 3.877772331237793, - 1.9105761051177979, + 1.910576105117798, 1.3058905601501465, 1.151788353919983, - 1.1016751527786255, + 1.1016751527786257, 1.0752294063568115, 1.0632760524749756, 1.043370246887207, @@ -1546,27 +1545,27 @@ 0.9938823580741882, 0.9808868765830994, 0.9668685793876648, - 0.9661275744438171, + 0.9661275744438172, 0.95363450050354, 0.9633551239967346, - 0.9483340382575989, - 0.9369064569473267, + 0.9483340382575988, + 0.9369064569473268, 0.939460039138794, 0.9340015649795532, - 0.9391195774078369, - 0.9486227035522461, - 0.9393115043640137, + 0.9391195774078368, + 0.948622703552246, + 0.9393115043640136, 0.9386547803878784, - 0.9307224750518799, - 0.9337694048881531, + 0.93072247505188, + 0.9337694048881532, 0.9441735744476318, - 0.9168936610221863, + 0.9168936610221864, 0.9171656370162964, 0.9074982404708862, 0.8993256092071533, 0.91330885887146, 0.8844472169876099, - 0.9091131687164307, + 0.9091131687164308, 0.8849811553955078, 0.8767322301864624, 0.8748534917831421, @@ -1738,7 +1737,7 @@ "type": "scatter", "xaxis": "x2", "y": [ - 18.325613021850586, + 18.325613021850582, 5.293483257293701, 2.653757333755493, 2.0456762313842773, @@ -1746,20 +1745,20 @@ 1.883743166923523, 1.860079050064087, 1.828240990638733, - 1.8007479906082153, + 1.800747990608215, 1.774640440940857, 1.758926272392273, 1.7400774955749512, 1.7141575813293457, - 1.7040760517120361, + 1.704076051712036, 1.6814680099487305, 1.6649850606918335, - 1.6598232984542847, + 1.659823298454285, 1.6504191160202026, 1.6454370021820068, 1.6356277465820312, 1.620072364807129, - 1.6151652336120605, + 1.6151652336120603, 1.6069880723953247, 1.6120281219482422, 1.621883749961853, @@ -1772,16 +1771,16 @@ 1.5788097381591797, 1.558771014213562, 1.5616827011108398, - 1.5723540782928467, + 1.572354078292847, 1.543481469154358, - 1.5625646114349365, + 1.5625646114349363, 1.5384465456008911, 1.526645541191101, 1.529775857925415, 1.543796420097351, 1.5151017904281616, 1.5059906244277954, - 1.5116047859191895, + 1.5116047859191897, 1.5013359785079956, 1.4959018230438232, 1.4928452968597412, @@ -1796,12 +1795,12 @@ 1.4714384078979492, 1.4715100526809692, 1.4736098051071167, - 1.4676322937011719, + 1.467632293701172, 1.4740508794784546, 1.4740619659423828, 1.4680551290512085, 1.4704227447509766, - 1.4641923904418945, + 1.4641923904418943, 1.4721543788909912, 1.4672566652297974, 1.465188980102539, @@ -1817,7 +1816,7 @@ 1.4606536626815796, 1.4648016691207886, 1.4605025053024292, - 1.4559677839279175, + 1.4559677839279177, 1.4600268602371216, 1.4582538604736328, 1.4528591632843018, @@ -1827,7 +1826,7 @@ 1.4569432735443115, 1.4544647932052612, 1.457002878189087, - 1.4527992010116577 + 1.4527992010116575 ], "yaxis": "y2" }, @@ -1865,37 +1864,37 @@ 0.7792317867279053, 0.7579413652420044, 0.7950650453567505, - 0.9127123355865479, + 0.912712335586548, 0.7594238519668579, 1.0187208652496338, 0.7183535099029541, 0.8783904314041138, 0.9380794763565063, - 0.9008976221084595, + 0.9008976221084596, 0.8658420443534851, - 0.9778609275817871, + 0.9778609275817872, 1.1538326740264893, - 0.9684479236602783, - 0.9604960680007935, + 0.9684479236602784, + 0.9604960680007936, 0.8614177703857422, 0.85303795337677, 0.852950394153595, 0.8317370414733887, - 1.0501964092254639, + 1.050196409225464, 0.8955703973770142, 0.962234616279602, - 1.1599162817001343, - 0.9541566371917725, + 1.1599162817001345, + 0.9541566371917723, 0.9391019940376282, 0.8998335599899292, 0.8623444437980652, 0.9901201725006104, - 0.9337858557701111, + 0.9337858557701112, 0.8005991578102112, 0.8159806132316589, 0.7872512340545654, 0.8620049953460693, - 0.9486106634140015, + 0.9486106634140016, 0.7647827863693237, 0.945591926574707, 0.8176776766777039, @@ -1905,7 +1904,7 @@ 0.921729564666748, 1.121093988418579, 0.8820602893829346, - 0.9728667736053467, + 0.9728667736053468, 0.8589160442352295, 0.8720130920410156, 0.910293698310852, @@ -1917,9 +1916,9 @@ 0.8319684267044067, 0.7659029960632324, 1.0052191019058228, - 0.9548312425613403, + 0.9548312425613404, 0.8777836561203003, - 0.9067971706390381, + 0.906797170639038, 0.8723716139793396, 0.8615415096282959, 0.8972219228744507, @@ -1929,8 +1928,8 @@ 0.8508031964302063, 0.851921558380127, 0.8814204931259155, - 0.9079414010047913, - 0.9051728248596191, + 0.9079414010047911, + 0.9051728248596193, 0.870656430721283 ], "yaxis": "y2" @@ -1947,12 +1946,12 @@ "xaxis": "x3", "y": [ 0.11973077058792114, - 0.012776165269315243, + 0.012776165269315245, 0.003122549271211028, 0.0017989003099501133, - 0.0015802887501195073, + 0.001580288750119507, 0.0015122871845960617, - 0.0014713015407323837, + 0.0014713015407323835, 0.0014293004060164094, 0.0013872967101633549, 0.0013516946928575635, @@ -1967,14 +1966,14 @@ 0.0011697415029630065, 0.0011569397756829858, 0.001126559916883707, - 0.0011263121850788593, + 0.0011263121850788591, 0.0011083523277193308, 0.0011210456723347306, 0.001127974595874548, - 0.0011150459758937359, - 0.0011100814444944263, - 0.0010993031319230795, - 0.0010974784381687641, + 0.001115045975893736, + 0.0011100814444944265, + 0.0010993031319230797, + 0.001097478438168764, 0.0011071498738601804, 0.0010680904379114509, 0.001071716076694429, @@ -1992,50 +1991,50 @@ 0.000981721794232726, 0.000970675318967551, 0.0009605586528778076, - 0.0009596932795830071, - 0.0009694539476186037, - 0.0009530750103294849, - 0.0009371889173053205, - 0.0009543384076096117, + 0.0009596932795830072, + 0.0009694539476186036, + 0.0009530750103294848, + 0.0009371889173053204, + 0.0009543384076096116, 0.0009465316543355584, 0.000947717810049653, 0.000932003662455827, 0.0009335257927887142, - 0.0009318754891864955, + 0.0009318754891864957, 0.0009340503020212054, 0.0009362458949908614, - 0.0009283500839956105, - 0.0009338064701296389, - 0.0009342136909253895, - 0.0009301524260081351, - 0.0009336907532997429, + 0.0009283500839956104, + 0.0009338064701296388, + 0.0009342136909253896, + 0.0009301524260081352, + 0.0009336907532997428, 0.000924428750295192, 0.0009328914457000792, 0.0009287623106501997, 0.0009266084525734186, 0.0009292765171267092, 0.0009279006626456976, - 0.0009247998823411763, + 0.0009247998823411764, 0.0009261075756512582, 0.0009196155588142574, - 0.0009220130159519613, + 0.0009220130159519612, 0.0009222401422448456, 0.0009179351036436856, 0.0009207359398715198, 0.0009194290614686906, 0.0009224849054589868, - 0.0009185561793856323, + 0.0009185561793856324, 0.0009143899660557508, 0.00091977184638381, 0.0009181004134006798, - 0.0009122079354710877, + 0.0009122079354710876, 0.0009426993201486766, - 0.0009113526903092861, - 0.0009126353543251753, - 0.0009130634134635329, - 0.0009100214811041951, + 0.000911352690309286, + 0.0009126353543251752, + 0.0009130634134635328, + 0.0009100214811041952, 0.0009147388627752662, - 0.0009107455844059587 + 0.0009107455844059588 ], "yaxis": "y3" }, @@ -2052,12 +2051,12 @@ "y": [ 0.03486741706728935, 0.006122139282524586, - 0.0016231061890721321, + 0.001623106189072132, 0.0008029511664062738, 0.0005703697679564357, 0.0004754778929054737, 0.0004393967683427036, - 0.00041679450077936053, + 0.0004167945007793606, 0.0003947793447878212, 0.00040307798190042377, 0.00039466051384806633, @@ -2069,7 +2068,7 @@ 0.0003840158460661769, 0.0004750249208882451, 0.00046243821270763874, - 0.00040261721005663276, + 0.0004026172100566328, 0.00035234662937000394, 0.00033378240186721087, 0.00036809290759265423, @@ -2079,7 +2078,7 @@ 0.0002996460534632206, 0.0004486028046812862, 0.000510554644279182, - 0.00047086545964702964, + 0.0004708654596470296, 0.00043532770359888673, 0.0005554574308916926, 0.0007742892485111952, @@ -2100,13 +2099,13 @@ 0.0005754863959737122, 0.000512791913934052, 0.0003721530956681818, - 0.00038752672844566405, + 0.0003875267284456641, 0.00036020943662151694, 0.0004337602877058089, 0.0005279805045574903, 0.0003397864638827741, 0.0005251548136584461, - 0.00038873229641467333, + 0.0003887322964146734, 0.00039383640978485346, 0.0003391610807739198, 0.000370797177311033, @@ -2126,9 +2125,9 @@ 0.0003404031740501523, 0.0005945489974692464, 0.000533810060005635, - 0.00045030185719951987, + 0.0004503018571995198, 0.0004814600106328726, - 0.00044442201033234596, + 0.0004444220103323459, 0.00043404821190051734, 0.00047103181714192033, 0.0004416656447574496, @@ -2138,7 +2137,7 @@ 0.0004236107342876494, 0.00045445107389241457, 0.00048222646000795066, - 0.00047951837768778205, + 0.0004795183776877821, 0.0004427633248269558 ], "yaxis": "y3" @@ -18412,21 +18411,21 @@ 15.874222853833407, 15.792889071594102, 15.7115552893548, - 15.630221507115495, + 15.630221507115497, 15.62543716698377, 15.472338282768613, 15.391004500529307, 15.309670718290006, 15.2283369360507, 15.147003153811395, - 15.142218813679671, - 14.989119929464513, + 15.142218813679673, + 14.989119929464511, 14.907786147225208, 14.826452364985906, 14.745118582746604, 14.6637848005073, - 14.659000460375575, - 14.505901576160413, + 14.659000460375577, + 14.505901576160412, 14.424567793921112, 14.34323401168181, 14.261900229442505, @@ -18434,11 +18433,11 @@ 14.099232664963894, 14.09449627635538, 13.994422436336444, - 13.941258208826385, + 13.941258208826383, 13.888093981316326, - 13.834929753806263, - 13.781765526296203, - 13.756747066291469, + 13.834929753806264, + 13.781765526296205, + 13.756747066291467, 13.678564378776672, 13.625400151266613, 13.57223592375655, @@ -18447,33 +18446,33 @@ 13.46278016123584, 13.3627063212169, 13.30954209370684, - 13.256377866196777, + 13.256377866196775, 13.203213638686718, 13.150049411176658, - 13.146922103676067, + 13.146922103676069, 13.04684826365713, 12.993684036147068, 12.940519808637008, 12.887355581126949, 12.834191353616886, 12.781027126106826, - 12.777899818606235, - 12.677825978587299, + 12.777899818606237, + 12.6778259785873, 12.62466175107724, 12.571497523567176, 12.564986060320082, - 12.563359173233307, + 12.563359173233309, 12.563296600653045, 12.561294278084704, - 12.560230544220273, + 12.560230544220271, 12.559166810355842, - 12.558103076491415, + 12.558103076491417, 12.557039342626984, 12.556976770046722, - 12.554974447478381, + 12.55497444747838, 12.554348721675774, 12.553284987811343, - 12.552283826527175, + 12.552283826527177, 12.550719512020658, 12.550218931378572, 12.548592044291796, @@ -18481,22 +18480,22 @@ 12.54702772978528, 12.545463415278766, 12.54496283463668, - 12.543899100772249, + 12.543899100772247, 12.542835366907818, 12.541771633043387, 12.540707899178956, 12.53914358467244, - 12.538017278227747, + 12.538017278227748, 12.537579270165926, - 12.536452963721233, + 12.536452963721231, 12.53488864921472, - 12.527293270206867, - 12.505052939669921, + 12.527293270206869, + 12.50505293966992, 12.48528375697042, 12.46551457427091, 12.444509817652689, - 12.422269487115747, - 12.402500304416241, + 12.422269487115749, + 12.40250030441624, 12.381495547798016, 12.360490791179794, 12.329601443211816, @@ -18509,36 +18508,36 @@ 12.194923886071438, 12.173919129453212, 12.15167879891627, - 12.131909616216765, + 12.131909616216763, 12.11090485959854, 12.089900102980318, 12.05777518109362, - 12.049126163662587, - 12.028121407044361, + 12.049126163662589, + 12.02812140704436, 12.00711665042614, 11.986111893807914, 11.965107137189692, 11.932982215302994, - 11.908680740442767, + 11.908680740442769, 11.835066869308706, - 11.761452998174649, + 11.761452998174647, 11.687839127040595, 11.614225255906534, 11.544941612486246, - 11.471327741352189, + 11.471327741352187, 11.397713870218135, 11.324099999084074, 11.246155900236252, 11.172542029102194, - 11.103258385681903, - 11.033974742261615, - 10.960360871127557, + 11.103258385681904, + 11.033974742261616, + 10.960360871127556, 10.8867469999935, - 10.813133128859443, - 10.739519257725389, + 10.813133128859445, + 10.739519257725387, 10.665905386591328, 10.59662174317104, - 10.523007872036983, + 10.523007872036985, 10.449394000902926, 10.375780129768868, 10.297836030921042, @@ -18547,30 +18546,30 @@ 10.042352595808728, 10.007710774098582, 9.938427130678294, - 9.825841210120325, + 9.825841210120323, 9.803009312752907, - 9.771013799692101, - 9.737018567064995, - 9.703023334437889, - 9.669028101810783, + 9.7710137996921, + 9.737018567064997, + 9.703023334437887, + 9.669028101810785, 9.635032869183677, 9.60103763655657, 9.567042403929465, - 9.535046890868655, + 9.535046890868657, 9.501051658241552, 9.467056425614446, 9.43306119298734, 9.399065960360234, 9.365070727733125, - 9.315077738575619, + 9.31507773857562, 9.299079982045212, 9.26508474941811, 9.231089516791004, 9.181096527633494, 9.163099051536788, - 9.131103538475985, - 9.097108305848879, - 9.063113073221773, + 9.131103538475983, + 9.09710830584888, + 9.063113073221771, 9.029117840594663, 8.995122607967557, 8.961127375340451, @@ -18606,8 +18605,8 @@ 8.929649253296878, 8.93314308216359, 8.947461835705157, - 9.011693932858213, - 9.075926030011269, + 9.011693932858211, + 9.075926030011267, 9.140158127164328, 9.204390224317384, 9.238395452221942, @@ -18616,17 +18615,17 @@ 9.457540254273551, 9.521772351426607, 9.586004448579663, - 9.650236545732719, - 9.654014904388783, + 9.65023654573272, + 9.654014904388784, 9.774922381382774, 9.83915447853583, 9.903386575688886, 9.967618672841942, 10.031850769994998, - 10.035629128651061, + 10.03562912865106, 10.15653660564505, 10.22076870279811, - 10.285000799951165, + 10.285000799951163, 10.349232897104224, 10.41346499425728, 10.477697091410336, @@ -18635,8 +18634,8 @@ 10.666615024213447, 10.730847121366503, 10.807946381218988, - 10.815880576505277, - 11.069774825666533, + 10.815880576505275, + 11.069774825666531, 11.20465614553345, 11.33953746540037, 11.474418785267288, @@ -18644,37 +18643,37 @@ 11.744181425001118, 11.807654987291434, 12.00600986944867, - 12.140891189315587, + 12.140891189315589, 12.275772509182504, 12.410653829049425, 12.545535148916342, - 12.616942906492943, - 12.807363593363887, + 12.616942906492945, + 12.807363593363888, 12.9422449132308, 13.077126233097724, - 13.212007552964637, + 13.212007552964636, 13.346888872831554, - 13.354823068117847, + 13.354823068117849, 13.608717317279106, 13.74359863714602, - 13.878479957012937, + 13.878479957012935, 14.013361276879854, 14.148242596746774, 14.283123916613691, 14.291058111899977, 14.544952361061236, - 14.676742855478961, - 14.786710781555081, + 14.67674285547896, + 14.78671078155508, 14.896678707631208, 15.006646633707332, - 15.013115335241219, + 15.01311533524122, 15.220113784325688, 15.330081710401812, - 15.440049636477939, - 15.550017562554059, + 15.44004963647794, + 15.55001756255406, 15.659985488630182, 15.66645419016407, - 15.873452639248539, + 15.87345263924854, 15.983420565324666, 16.093388491400788, 16.20335641747691, @@ -18704,16 +18703,16 @@ 18.523196361388074, 18.60057606232221, 18.677955763256335, - 18.755335464190466, - 18.759887211304246, + 18.755335464190463, + 18.75988721130425, 18.905543118944955, - 18.982922819879086, + 18.98292281987909, 19.060302520813217, 19.137682221747347, 19.21506192268148, 19.21961366979525, 19.365269577435967, - 19.442649278370098, + 19.442649278370094, 19.52002897930423, 19.597408680238352, 19.674788381172483, @@ -18729,7 +18728,7 @@ 20.079643056804308, 20.08530217557624, 20.09131498927141, - 20.096974108043334, + 20.096974108043337, 20.102986921738513, 20.108999735433684, 20.115012549128863, @@ -18743,15 +18742,15 @@ 20.162761363767004, 20.168420482538934, 20.174786991157355, - 20.180446109929278, - 20.186812618547698, + 20.18044610992928, + 20.186812618547695, 20.19282543224287, 20.1984845510148, - 20.207680619019186, + 20.207680619019182, 20.210156483481896, 20.21970624640953, 20.224703211054795, - 20.192181786498274, + 20.19218178649827, 20.17496456173305, 20.144356162150444, 20.10992171262, @@ -18780,7 +18779,7 @@ 19.36958104771559, 19.319842398393853, 19.289674465337647, - 19.298568189808734, + 19.298568189808737, 19.317467354309784, 19.33636651881084, 19.3552656833119, @@ -18789,7 +18788,7 @@ 19.410851461256172, 19.42975062575723, 19.449761505817165, - 19.468660670318222, + 19.468660670318226, 19.49534184373148, 19.515352723791416, 19.53314017273359, @@ -18797,7 +18796,7 @@ 19.560933061705725, 19.579832226206783, 19.599843106266725, - 19.616518839650006, + 19.61651883965001, 19.635418004151056, 19.655428884211, 19.674328048712056, @@ -18807,14 +18806,14 @@ 19.757706715628473, 19.76660044009956, 19.785499604600616, - 19.814404209131638, - 19.833189048651654, + 19.81440420913164, + 19.833189048651658, 19.836176115552583, - 19.842523632717054, - 19.848497766518918, + 19.842523632717057, + 19.84849776651892, 19.85484528368339, 19.86119280084786, - 19.867540318012338, + 19.867540318012335, 19.87426121853943, 19.883222419242216, 19.886209486143144, @@ -18843,18 +18842,18 @@ 19.888150145109663, 19.81067774919208, 19.785886582498463, - 19.702216394907474, + 19.70221639490747, 19.627842894826607, 19.57516166560265, 19.5224804363787, 19.472898102991444, - 19.420216873767494, + 19.420216873767497, 19.367535644543537, 19.31485441531958, 19.262173186095637, 19.259074290258923, 19.15990962348443, - 19.107228394260474, + 19.10722839426047, 19.054547165036524, 19.001865935812567, 18.949184706588618, @@ -18863,13 +18862,13 @@ 18.79423991475346, 18.741558685529505, 18.688877456305555, - 18.636196227081598, + 18.6361962270816, 18.58351499785764, 18.5308337686337, 18.50058737544728, 18.527783432072233, 18.547884865229804, - 18.567986298387382, + 18.567986298387385, 18.588087731544952, 18.58927016878952, 18.627108160615535, @@ -18882,15 +18881,15 @@ 18.76663575547399, 18.78673718863157, 18.806838621789147, - 18.826940054946718, + 18.82694005494672, 18.828122492191277, 18.865960484017307, - 18.886061917174878, + 18.88606191717488, 18.90616335033245, 18.92626478349002, 18.946366216647597, 18.966467649805175, - 18.967650087049734, + 18.967650087049737, 19.005488078875757, 19.025589512033328, 19.045690945190906, @@ -18902,8 +18901,8 @@ 19.372248062114707, 19.44046291361822, 19.508677765121725, - 19.512690403445454, - 19.641094829805006, + 19.512690403445458, + 19.64109482980501, 19.70930968130852, 19.777524532812023, 19.84573938431553, @@ -18913,14 +18912,14 @@ 20.114586152005828, 20.18280100350934, 20.251015855012852, - 20.319230706516358, + 20.31923070651636, 20.387445558019863, 20.42355930293349, 20.51986262270315, 20.588077474206656, 20.65629232571016, - 20.724507177213674, - 20.792722028717186, + 20.72450717721367, + 20.792722028717183, 20.796734667040916, 20.92513909340046, 20.993353944903973, @@ -18932,14 +18931,14 @@ 21.067038807142858, 21.072053281638276, 21.07706775613368, - 21.082082230629098, + 21.082082230629094, 21.08709670512451, 21.089751426916195, 21.09683068502737, 21.10184515952279, 21.1068596340182, 21.11187410851361, - 21.116888583009022, + 21.116888583009025, 21.11718355209699, 21.126622562911884, 21.131637037407295, @@ -18963,13 +18962,13 @@ 20.720908015985877, 20.656350789974375, 20.591793563962867, - 20.527236337951358, + 20.52723633795136, 20.462679111939856, 20.458881628056822, 20.337362143799872, 20.272804917788363, 20.269007433905337, - 20.174070336829594, + 20.17407033682959, 20.11331059470112, 20.048753368689617, 19.98039865879508, @@ -18987,7 +18986,7 @@ 18.986493601025003, 18.7877406801555, 18.49545697299444, - 18.390234838416482, + 18.39023483841648, 18.19148191754698, 18.00442034496389, 17.68875394122996, @@ -19001,30 +19000,30 @@ 16.133804619133223, 16.016891136268796, 15.829829563685736, - 15.642767991102675, - 15.432323721946721, + 15.642767991102676, + 15.43232372194672, 15.14004001478566, 15.03481788020771, 14.847756307624648, - 14.637312038468695, + 14.637312038468696, 14.438559117599189, 14.263188893302546, 14.052744624146596, 13.760460916985556, 13.581912738845284, - 13.500481372197223, + 13.500481372197225, 13.346666568528656, 13.201899694487649, 13.048084890819082, - 12.821886650129999, + 12.82188665013, 12.740455283481948, - 12.586640479813381, + 12.58664047981338, 12.432825676144814, 12.288058802103803, 12.134243998435236, 11.98042919476667, - 11.826614391098103, - 11.672799587429525, + 11.826614391098104, + 11.672799587429523, 11.518984783760958, 11.292786543071886, 11.211355176423824, @@ -19032,17 +19031,17 @@ 10.90372556908669, 10.749910765418123, 10.596095961749556, - 10.442281158080979, + 10.44228115808098, 10.306562213667538, 10.152747409998971, 9.998932606330404, 9.772734365641336, 9.69130299899326, - 9.537488195324693, + 9.537488195324691, 9.30224202500805, 9.173463868026124, 9.09963412924814, - 9.070922564167821, + 9.07092256416782, 9.001194477544177, 8.898653173685881, 8.865839956451227, @@ -19078,31 +19077,31 @@ 8.404599166479821, 8.622994077843353, 8.828542229714925, - 9.046937141078457, + 9.046937141078455, 9.380952887869713, - 9.483726963805521, + 9.48372696380552, 9.702121875169054, 9.920516786532586, 10.126064938404133, 10.46008068519541, - 10.562854761131197, - 10.781249672494729, - 10.999644583858261, + 10.562854761131195, + 10.781249672494727, + 10.99964458385826, 11.230886254713754, 11.539208482521088, 11.75760339388462, 11.860377469820405, 12.194393216611685, - 12.515562203911001, + 12.515562203911, 12.618336279846764, 12.93950526714608, 13.157900178509612, 13.376295089873144, 13.481343847026606, - 13.862502380719159, + 13.86250238071916, 13.877162324322706, - 14.346280519636613, - 14.595499560897101, + 14.346280519636611, + 14.5954995608971, 14.844718602157611, 15.093937643418124, 15.343156684678634, @@ -19127,7 +19126,7 @@ 20.034338637817555, 20.048998581421102, 20.518116776734963, - 20.644765967502586, + 20.644765967502583, 20.906528200136684, 21.08452651832787, 21.262524836519056, @@ -19146,7 +19145,7 @@ 23.555561994393656, 23.639325908836575, 23.90108814147068, - 24.079086459661838, + 24.07908645966184, 24.257084777853024, 24.43508309604421, 24.613081414235396, @@ -19165,17 +19164,17 @@ 26.435649234556507, 26.441853333905335, 26.640384513067893, - 26.745854201997986, - 26.851323890928086, + 26.745854201997982, + 26.85132389092809, 26.956793579858186, 27.0622632687883, 27.068467368137128, 27.26699854729967, 27.37246823622977, 27.47793792515988, - 27.583407614089978, - 27.688877303020078, - 27.695081402368906, + 27.583407614089975, + 27.68887730302008, + 27.695081402368903, 27.893612581531464, 27.999082270461564, 28.104551959391664, @@ -19188,26 +19187,26 @@ 28.785719378242405, 28.827140812373308, 28.85530738758233, - 28.856964244947562, + 28.85696424494756, 28.90998368063513, 28.93815025584415, 28.966316831053163, 28.994483406262184, 29.022649981471204, 29.024306838836445, - 29.077326274523998, + 29.077326274524, 29.105492849733025, 29.13365942494204, 29.16182600015106, 29.18999257536008, 29.191649432725313, 29.244668868412873, - 29.272835443621894, + 29.272835443621897, 29.30100201883092, 29.329168594039928, 29.357335169248948, 29.385501744457976, - 29.387158601823202, + 29.3871586018232, 29.44017803751077, 29.46834461271979, 29.49651118792881, @@ -19239,20 +19238,20 @@ 28.377708859157167, 28.321802431498227, 28.269184617230984, - 28.209989576180334, + 28.209989576180337, 28.157371761913097, - 28.101465334254158, + 28.10146533425416, 28.01596138606989, 27.992941092327975, 27.937034664669028, 27.85153071648476, 27.82522180935115, - 27.769315381692202, + 27.7693153816922, 27.713408954033262, 27.66079113976602, 27.60488471210708, 27.548978284448133, - 27.493071856789186, + 27.493071856789182, 27.410856521996628, 27.377970388079603, 27.32864118720407, @@ -19265,7 +19264,7 @@ 26.940584806983168, 26.881389765932525, 26.82877195166529, - 26.772865524006342, + 26.772865524006345, 26.716959096347395, 26.661052668688455, 26.608434854421212, @@ -19290,7 +19289,7 @@ 25.55278995568471, 25.500172141417472, 25.440977100366823, - 25.388359286099586, + 25.388359286099583, 25.33245285844064, 25.2765464307817, 25.223928616514456, @@ -19308,7 +19307,7 @@ 24.55634009799885, 24.503722283731612, 24.418218335547344, - 24.391909428413726, + 24.39190942841373, 24.33600300075478, 24.253787665962214, 24.2241901454369, @@ -19338,9 +19337,9 @@ 22.895590335189077, 22.81337500039652, 22.7870660932629, - 22.731159665603954, + 22.73115966560395, 22.645655717419693, - 22.589749289760746, + 22.589749289760743, 22.563440382627128, 22.50753395496819, 22.44833891391754 @@ -19395,7 +19394,7 @@ -1.1976579427719116, 0.520697832107544, 0.963047206401825, - -0.36859816312789917, + -0.3685981631278992, -0.9724488258361816, 0.7323274612426758, 0.6929634213447571, @@ -19408,13 +19407,13 @@ 0.9504795670509338, -0.2320098727941513, -1.0911118984222412, - 1.3453450202941895, + 1.3453450202941897, 0.8893485069274902, -1.168222188949585, -1.2859922647476196, -2.4805290699005127, 0.8306548595428467, - 1.5542938709259033, + 1.554293870925903, 5.3257856369018555, 0.5697658061981201, -1.3164734840393066, From 77fdd7f162609ba4562243df7560fa859e86d73d Mon Sep 17 00:00:00 2001 From: MaiBe-ctrl Date: Wed, 28 Aug 2024 10:35:57 -0700 Subject: [PATCH 05/22] Unpack incrementally when needed --- neuralprophet/time_dataset.py | 114 +--------- neuralprophet/time_net.py | 393 ++++++++++++++++++---------------- neuralprophet/utils.py | 115 ++++++++++ 3 files changed, 332 insertions(+), 290 deletions(-) diff --git a/neuralprophet/time_dataset.py b/neuralprophet/time_dataset.py index 9fe9b89ea..fc5190880 100644 --- a/neuralprophet/time_dataset.py +++ b/neuralprophet/time_dataset.py @@ -224,17 +224,6 @@ def stack_all_features(self): ) current_idx += additive_regressors_tensor.size(1) - if self.config_seasonality and self.config_seasonality.periods: - for seasonality_name, features in self.seasonalities.items(): - seasonal_tensor = features - print(f"Seasonality tensor shape for {seasonality_name}: {seasonal_tensor.shape}") - feature_list.append(seasonal_tensor) - self.feature_indices[f"seasonality_{seasonality_name}"] = ( - current_idx, - current_idx + seasonal_tensor.size(1), - ) - current_idx += seasonal_tensor.size(1) - # Stack multiplicative regressor features if self.multiplicative_regressors_names: multiplicative_regressors_tensor = torch.cat( @@ -247,6 +236,17 @@ def stack_all_features(self): ) current_idx += len(self.multiplicative_regressors_names) + if self.config_seasonality and self.config_seasonality.periods: + for seasonality_name, features in self.seasonalities.items(): + seasonal_tensor = features + print(f"Seasonality tensor shape for {seasonality_name}: {seasonal_tensor.shape}") + feature_list.append(seasonal_tensor) + self.feature_indices[f"seasonality_{seasonality_name}"] = ( + current_idx, + current_idx + seasonal_tensor.size(1), + ) + current_idx += seasonal_tensor.size(1) + # Concatenate all features into one big tensor self.all_features = torch.cat(feature_list, dim=1) # Concatenating along the third dimension if self.config_model is not None: @@ -272,21 +272,6 @@ def compute_fourier_features(t, period): features *= condition_values self.seasonalities[name] = features - def get_sample_seasonalities(self, df_tensors, origin_index, n_forecasts, max_lags, n_lags, config_seasonality): - seasonalities = OrderedDict({}) - - # Determine the range of indices based on whether lags are used - if max_lags == 0: - indices = [origin_index] - else: - indices = list(range(origin_index - n_lags + 1, origin_index + n_forecasts + 1)) - - # Extract the precomputed seasonalities from self.seasonalities - for name, features in self.seasonalities.items(): - seasonalities[name] = features[indices, :] - - return seasonalities - def __getitem__(self, index): """Overrides parent class method to get an item at index. Parameters @@ -672,83 +657,6 @@ def sort_regressor_names(self, config): multiplicative_regressors_names.append(reg) return additive_regressors_names, multiplicative_regressors_names - def get_sample_targets(self, df_tensors, origin_index, n_forecasts, max_lags, predict_mode): - if "y_scaled" in self.df_tensors: - if max_lags == 0: - targets = df_tensors["y_scaled"][origin_index].unsqueeze(0).unsqueeze(1) - else: - targets = df_tensors["y_scaled"][origin_index + 1 : origin_index + n_forecasts + 1] - targets = targets.unsqueeze(1) - return targets - return torch.zeros((n_forecasts, 1), dtype=torch.float32) - - def get_sample_lagged_regressors(self, df_tensors, origin_index, config_lagged_regressors): - lagged_regressors = OrderedDict({}) - # Future TODO: optimize this computation for many lagged_regressors - for name, lagged_regressor in config_lagged_regressors.items(): - covar_lags = lagged_regressor.n_lags - assert covar_lags > 0 - # Indexing tensors instead of DataFrame - lagged_regressors[name] = df_tensors[name][origin_index - covar_lags + 1 : origin_index + 1] - return lagged_regressors - - def get_sample_future_regressors( - self, - df_tensors, - origin_index, - n_forecasts, - max_lags, - n_lags, - additive_regressors_names, - multiplicative_regressors_names, - ): - regressors = OrderedDict({}) - if max_lags == 0: - if additive_regressors_names: - regressors["additive"] = df_tensors["additive_regressors"][origin_index, :].unsqueeze(0) - - if multiplicative_regressors_names: - regressors["multiplicative"] = df_tensors["multiplicative_regressors"][origin_index, :].unsqueeze(0) - - else: - if additive_regressors_names: - regressors["additive"] = df_tensors["additive_regressors"][ - origin_index + 1 - n_lags : origin_index + n_forecasts + 1, : - ] - if multiplicative_regressors_names: - regressors["multiplicative"] = df_tensors["multiplicative_regressors"][ - origin_index + 1 - n_lags : origin_index + n_forecasts + 1, : - ] - - return regressors - - def get_sample_future_events( - self, - df_tensors, - origin_index, - n_forecasts, - max_lags, - n_lags, - additive_event_and_holiday_names, - multiplicative_event_and_holiday_names, - ): - events = OrderedDict({}) - if max_lags == 0: - if additive_event_and_holiday_names: - events["additive"] = df_tensors["additive_event_and_holiday"][origin_index, :].unsqueeze(0) - if multiplicative_event_and_holiday_names: - events["multiplicative"] = df_tensors["multiplicative_event_and_holiday"][origin_index, :].unsqueeze(0) - else: - if additive_event_and_holiday_names: - events["additive"] = df_tensors["additive_event_and_holiday"][ - origin_index + 1 - n_lags : origin_index + n_forecasts + 1, : - ] - if multiplicative_event_and_holiday_names: - events["multiplicative"] = df_tensors["multiplicative_event_and_holiday"][ - origin_index + 1 - n_lags : origin_index + n_forecasts + 1, : - ] - return events - class GlobalTimeDataset(TimeDataset): def __init__( diff --git a/neuralprophet/time_net.py b/neuralprophet/time_net.py index 3998bfa6c..fb782374a 100644 --- a/neuralprophet/time_net.py +++ b/neuralprophet/time_net.py @@ -21,7 +21,15 @@ reg_func_seasonality_glocal, reg_func_trend, reg_func_trend_glocal, - unpack_sliced_tensor, + unpack_additive_events, + unpack_additive_regressor, + unpack_lagged_regressors, + unpack_lags, + unpack_multiplicative_events, + unpack_multiplicative_regressor, + unpack_seasonalities, + unpack_targets, + unpack_time_feature, ) from neuralprophet.utils_torch import init_parameter, interprete_model @@ -513,186 +521,205 @@ def forward_covar_net(self, covariates): x = x.view(x.shape[0], self.n_forecasts, len(self.quantiles)) return x - def forward(self, inputs: Dict, meta: Dict = None, compute_components_flag: bool = False) -> torch.Tensor: - """This method defines the model forward pass. - Note - ---- - Time input is required. Minimum model setup is a linear trend. - Parameters - ---------- - inputs : dict - Model inputs, each of len(df) but with varying dimensions - Note - ---- - Contains the following data: - Model Inputs - * ``time`` (torch.Tensor , loat), normalized time, dims: (batch, n_forecasts) - * ``lags`` (torch.Tensor, float), dims: (batch, n_lags) - * ``seasonalities`` (torch.Tensor, float), dict of named seasonalities (keys) with their features - (values), dims of each dict value (batch, n_forecasts, n_features) - * ``covariates`` (torch.Tensor, float), dict of named covariates (keys) with their features - (values), dims of each dict value: (batch, n_lags) - * ``events`` (torch.Tensor, float), all event features, dims (batch, n_forecasts, n_features) - * ``regressors``(torch.Tensor, float), all regressor features, dims (batch, n_forecasts, n_features) - * ``predict_mode`` (bool), optional and only passed during prediction - meta : dict, default=None - Metadata about the all the samples of the model input batch. - Contains the following: - Model Meta: - * ``df_name`` (list, str), time series ID corresponding to each sample of the input batch. - Note - ---- - The meta is sorted in the same way the inputs are sorted. - Note - ---- - The default None value allows the forward method to be used without providing the meta argument. - This was designed to avoid issues with the library `lr_finder` https://github.com/davidtvs/pytorch-lr-finder - while having ``config_trend.trend_global_local="local"``. - The turnaround consists on passing the same meta (dummy ID) to all the samples of the batch. - Internally, this is equivalent to use ``config_trend.trend_global_local="global"`` to find the optimal - learning rate. - compute_components_flag : bool, default=False - If True, components will be computed. + def forward( + self, + input_tensor: torch.Tensor, + meta: Dict = None, + compute_components_flag: bool = False, + predict_mode: bool = False, + ) -> torch.Tensor: + """This method defines the model forward pass.""" - Returns - ------- - torch.Tensor - Forecast of dims (batch, n_forecasts, no_quantiles) - """ - # Turnaround to avoid issues when the meta argument is None and meta_used_in_model + time_input = unpack_time_feature( + input_tensor, self.n_lags, self.n_forecasts, self.max_lags, self.config_model.features_map + ) + # Handle meta argument if meta is None and self.meta_used_in_model: name_id_dummy = self.id_list[0] meta = OrderedDict() - meta["df_name"] = [name_id_dummy for _ in range(inputs["time"].shape[0])] + meta["df_name"] = [name_id_dummy for _ in range(time_input.shape[0])] meta = torch.tensor([self.id_dict[i] for i in meta["df_name"]], device=self.device) + + # Initialize components and nonstationary tensors components = {} additive_components = torch.zeros( - size=(inputs["time"].shape[0], self.n_forecasts, len(self.quantiles)), + size=(time_input.shape[0], self.n_forecasts, len(self.quantiles)), device=self.device, ) additive_components_nonstationary = torch.zeros( - size=(inputs["time"].shape[0], inputs["time"].shape[1], len(self.quantiles)), device=self.device + size=(time_input.shape[0], time_input.shape[1], len(self.quantiles)), + device=self.device, ) multiplicative_components_nonstationary = torch.zeros( - size=(inputs["time"].shape[0], inputs["time"].shape[1], len(self.quantiles)), device=self.device + size=(time_input.shape[0], time_input.shape[1], len(self.quantiles)), + device=self.device, ) - trend = self.trend(t=inputs["time"], meta=meta) + + # Unpack time feature and compute trend + trend = self.trend(t=time_input, meta=meta) components["trend"] = trend - if "seasonalities" in inputs: - s = self.seasonality(s=inputs["seasonalities"], meta=meta) + # Unpack and process seasonalities + seasonalities_input = None + if self.config_seasonality and self.config_seasonality.periods: + print("++++seasonalities ++++") + seasonalities_input = unpack_seasonalities( + input_tensor, + self.n_lags, + self.n_forecasts, + self.max_lags, + self.config_model.features_map, + self.config_seasonality, + ) + s = self.seasonality(s=seasonalities_input, meta=meta) if self.config_seasonality.mode == "additive": additive_components_nonstationary += s elif self.config_seasonality.mode == "multiplicative": multiplicative_components_nonstationary += s components["seasonalities"] = s - if "events" in inputs: - if "additive" in inputs["events"].keys(): - additive_events = self.scalar_features_effects( - inputs["events"]["additive"], self.event_params["additive"] - ) - additive_components_nonstationary += additive_events - components["additive_events"] = additive_events - if "multiplicative" in inputs["events"].keys(): - multiplicative_events = self.scalar_features_effects( - inputs["events"]["multiplicative"], self.event_params["multiplicative"] - ) - multiplicative_components_nonstationary += multiplicative_events - components["multiplicative_events"] = multiplicative_events - - if "regressors" in inputs: - if "additive" in inputs["regressors"].keys(): - additive_regressors = self.future_regressors(inputs["regressors"]["additive"], "additive") - additive_components_nonstationary += additive_regressors - components["additive_regressors"] = additive_regressors - if "multiplicative" in inputs["regressors"].keys(): - multiplicative_regressors = self.future_regressors( - inputs["regressors"]["multiplicative"], "multiplicative" - ) - multiplicative_components_nonstationary += multiplicative_regressors - components["multiplicative_regressors"] = multiplicative_regressors - - # stationarized input - if "lags" in inputs: - # combinde all non-stationary components over AR input range - nonstationary_components = ( # dimensions - [batch, n_lags, median quantile] + # Unpack and process events + additive_events_input = None + multiplicative_events_input = None + if "additive_events" in self.config_model.features_map: + additive_events_input = unpack_additive_events( + input_tensor, + self.n_lags, + self.n_forecasts, + self.max_lags, + self.config_model.features_map, + ) + additive_events = self.scalar_features_effects(additive_events_input, self.event_params["additive"]) + additive_components_nonstationary += additive_events + components["additive_events"] = additive_events + if "multiplicative_events" in self.config_model.features_map: + multiplicative_events_input = unpack_multiplicative_events( + input_tensor, + self.n_lags, + self.n_forecasts, + self.max_lags, + self.config_model.features_map, + ) + multiplicative_events = self.scalar_features_effects( + multiplicative_events_input, self.event_params["multiplicative"] + ) + multiplicative_components_nonstationary += multiplicative_events + components["multiplicative_events"] = multiplicative_events + + # Unpack and process regressors + additive_regressors_input = None + multiplicative_regressors_input = None + if "additive_regressors" in self.config_model.features_map: + additive_regressors_input = unpack_additive_regressor( + input_tensor, + self.n_lags, + self.n_forecasts, + self.max_lags, + self.config_model.features_map, + ) + additive_regressors = self.future_regressors(additive_regressors_input, "additive") + additive_components_nonstationary += additive_regressors + components["additive_regressors"] = additive_regressors + if "multiplicative_regressors" in self.config_model.features_map: + multiplicative_regressors_input = unpack_multiplicative_regressor( + input_tensor, + self.n_lags, + self.n_forecasts, + self.max_lags, + self.config_model.features_map, + ) + multiplicative_regressors = self.future_regressors(multiplicative_regressors_input, "multiplicative") + multiplicative_components_nonstationary += multiplicative_regressors + components["multiplicative_regressors"] = multiplicative_regressors + + # Unpack and process lags + lags_input = None + if "lags" in self.config_model.features_map: + lags_input = unpack_lags( + input_tensor, + self.n_lags, + self.max_lags, + self.config_model.features_map, + ) + nonstationary_components = ( trend[:, : self.n_lags, 0] + additive_components_nonstationary[:, : self.n_lags, 0] + trend[:, : self.n_lags, 0].detach() * multiplicative_components_nonstationary[:, : self.n_lags, 0] ) - stationarized_lags = inputs["lags"] - nonstationary_components + stationarized_lags = lags_input - nonstationary_components lags = self.auto_regression(lags=stationarized_lags) additive_components += lags components["lags"] = lags - if "covariates" in inputs: - covariates = self.forward_covar_net(covariates=inputs["covariates"]) + # Unpack and process covariates + covariates_input = None + if self.config_lagged_regressors: + covariates_input = unpack_lagged_regressors( + input_tensor, self.max_lags, self.config_model.features_map, self.config_lagged_regressors + ) + covariates = self.forward_covar_net(covariates=covariates_input) additive_components += covariates components["covariates"] = covariates - # combine all non-stationary components over forecast range + # Combine components and compute predictions predictions_nonstationary = ( - trend[:, self.n_lags : inputs["time"].shape[1], :] - + additive_components_nonstationary[:, self.n_lags : inputs["time"].shape[1], :] - + trend[:, self.n_lags : inputs["time"].shape[1], :].detach() - * multiplicative_components_nonstationary[:, self.n_lags : inputs["time"].shape[1], :] + trend[:, self.n_lags : time_input.shape[1], :] + + additive_components_nonstationary[:, self.n_lags : time_input.shape[1], :] + + trend[:, self.n_lags : time_input.shape[1], :].detach() + * multiplicative_components_nonstationary[:, self.n_lags : time_input.shape[1], :] ) - prediction = predictions_nonstationary + additive_components # dimensions - [batch, n_forecasts, no_quantiles] + prediction = predictions_nonstationary + additive_components - # check for crossing quantiles and correct them here - if "predict_mode" in inputs.keys() and inputs["predict_mode"]: - predict_mode = True - else: - predict_mode = False + # Correct crossing quantiles prediction_with_quantiles = self._compute_quantile_forecasts_from_diffs(prediction, predict_mode) - # component calculation + # Compute components if required if compute_components_flag: - components = self.compute_components(inputs, components, meta) + components = self.compute_components( + time_input, + seasonalities_input, + lags_input, + covariates_input, + additive_events_input, + multiplicative_events_input, + additive_regressors_input, + multiplicative_regressors_input, + components, + meta, + ) + print(f"components = {components.keys()}") else: components = None return prediction_with_quantiles, components - def compute_components(self, inputs: Dict, components_raw: Dict, meta: Dict) -> Dict: - """This method returns the values of each model component. - Note - ---- - Time input is required. Minimum model setup is a linear trend. - Parameters - ---------- - inputs : dict - Model inputs, each of len(df) but with varying dimensions - Note - ---- - Contains the following data: - Model Inputs - * ``time`` (torch.Tensor , loat), normalized time, dims: (batch, n_forecasts) - * ``lags`` (torch.Tensor, float), dims: (batch, n_lags) - * ``seasonalities`` (torch.Tensor, float), dict of named seasonalities (keys) with their features - (values), dims of each dict value (batch, n_forecasts, n_features) - * ``covariates`` (torch.Tensor, float), dict of named covariates (keys) with their features - (values), dims of each dict value: (batch, n_lags) - * ``events`` (torch.Tensor, float), all event features, dims (batch, n_forecasts, n_features) - * ``regressors``(torch.Tensor, float), all regressor features, dims (batch, n_forecasts, n_features) - components_raw : dict - components to be computed - ------- - dict - Containing forecast coomponents with elements of dims (batch, n_forecasts) - """ + def compute_components( + self, + time_input, + seasonality_input, + lags_input, + covariates_input, + additive_events_input, + multiplicative_events_input, + additive_regressors_input, + multiplicative_regressors_input, + components_raw: Dict, + meta: Dict, + ) -> Dict: components = {} - components["trend"] = components_raw["trend"][:, self.n_lags : inputs["time"].shape[1], :] - if self.config_trend is not None and "seasonalities" in inputs: - for name, features in inputs["seasonalities"].items(): + components["trend"] = components_raw["trend"][:, self.n_lags : time_input.shape[1], :] + if self.config_trend is not None and seasonality_input is not None: + for name, features in seasonality_input.items(): + print(f"season = {name}") + components[f"season_{name}"] = self.seasonality.compute_fourier( - features=features[:, self.n_lags : inputs["time"].shape[1], :], name=name, meta=meta + features=features[:, self.n_lags : time_input.shape[1], :], name=name, meta=meta ) - if self.n_lags > 0 and "lags" in inputs: + if self.n_lags > 0 and lags_input is not None: components["ar"] = components_raw["lags"] - if self.config_lagged_regressors is not None and "covariates" in inputs: + if self.config_lagged_regressors is not None and covariates_input is not None: + print("lagged_regressors") # Combined forward pass all_covariates = components_raw["covariates"] # Calculate the contribution of each covariate on each forecast @@ -701,7 +728,7 @@ def compute_components(self, inputs: Dict, components_raw: Dict, meta: Dict) -> covar_attribution_sum_per_forecast = reduce( torch.add, [torch.sum(covar, axis=1) for _, covar in covar_attributions.items()] ).to(all_covariates.device) - for name in inputs["covariates"].keys(): + for name in covariates_input.keys(): # Distribute the contribution of the current covariate to the combined forward pass # 1. Calculate the relative share of each covariate on the total attributions # 2. Multiply the relative share with the combined forward pass @@ -712,58 +739,62 @@ def compute_components(self, inputs: Dict, components_raw: Dict, meta: Dict) -> covar_attribution_sum_per_forecast, ).reshape(self.n_forecasts, len(self.quantiles)), ) - if (self.config_events is not None or self.config_holidays is not None) and "events" in inputs: - if "additive" in inputs["events"].keys(): + if self.config_events is not None or self.config_holidays is not None: + if additive_events_input is not None: components["events_additive"] = components_raw["additive_events"][ - :, self.n_lags : inputs["time"].shape[1], : + :, self.n_lags : time_input.shape[1], : ] - if "multiplicative" in inputs["events"].keys(): + if multiplicative_events_input is not None: components["events_multiplicative"] = components_raw["multiplicative_events"][ - :, self.n_lags : inputs["time"].shape[1], : + :, self.n_lags : time_input.shape[1], : ] for event, configs in self.events_dims.items(): mode = configs["mode"] indices = configs["event_indices"] if mode == "additive": - features = inputs["events"]["additive"][:, self.n_lags : inputs["time"].shape[1], :] + features = additive_events_input[:, self.n_lags : time_input.shape[1], :] params = self.event_params["additive"] else: - features = inputs["events"]["multiplicative"][:, self.n_lags : inputs["time"].shape[1], :] + features = multiplicative_events_input[:, self.n_lags : time_input.shape[1], :] params = self.event_params["multiplicative"] components[f"event_{event}"] = self.scalar_features_effects( features=features, params=params, indices=indices ) - if self.config_regressors.regressors is not None and "regressors" in inputs: - if "additive" in inputs["regressors"].keys(): + if self.config_regressors.regressors is not None: + if additive_regressors_input is not None: components["future_regressors_additive"] = components_raw["additive_regressors"][ - :, self.n_lags : inputs["time"].shape[1], : + :, self.n_lags : time_input.shape[1], : ] - if "multiplicative" in inputs["regressors"].keys(): + if multiplicative_regressors_input is not None: components["future_regressors_multiplicative"] = components_raw["multiplicative_regressors"][ - :, self.n_lags : inputs["time"].shape[1], : + :, self.n_lags : time_input.shape[1], : ] for regressor, configs in self.future_regressors.regressors_dims.items(): + print(f"regressor = {regressor}") mode = configs["mode"] index = [] index.append(configs["regressor_index"]) - features = inputs["regressors"][mode] - components[f"future_regressor_{regressor}"] = self.future_regressors( - features[:, self.n_lags : inputs["time"].shape[1], :], mode, indeces=index - ) - + if mode == "additive" and additive_regressors_input is not None: + components[f"future_regressor_{regressor}"] = self.future_regressors( + additive_regressors_input[:, self.n_lags : time_input.shape[1], :], mode, indeces=index + ) + if mode == "multiplicative" and multiplicative_regressors_input is not None: + components[f"future_regressor_{regressor}"] = self.future_regressors( + multiplicative_regressors_input[:, self.n_lags : time_input.shape[1], :], mode, indeces=index + ) return components def set_compute_components(self, compute_components_flag): self.compute_components_flag = compute_components_flag - def loss_func(self, inputs, predicted, targets): + def loss_func(self, time, predicted, targets): loss = None # Compute loss. no reduction. loss = self.config_train.loss_func(predicted, targets) # Weigh newer samples more. - loss = loss * self._get_time_based_sample_weight(t=inputs["time"][:, self.n_lags :]) + loss = loss * self._get_time_based_sample_weight(t=time[:, self.n_lags :]) loss = loss.sum(dim=2).mean() # Regularize. if self.reg_enabled: @@ -776,27 +807,26 @@ def loss_func(self, inputs, predicted, targets): def training_step(self, batch, batch_idx): inputs_tensor, meta = batch - inputs = unpack_sliced_tensor( + targets = unpack_targets( inputs_tensor, - self.n_lags, self.n_forecasts, self.max_lags, self.config_model.features_map, - self.config_lagged_regressors, - self.config_seasonality, ) - targets = inputs["targets"] + time = unpack_time_feature( + inputs_tensor, self.n_lags, self.n_forecasts, self.max_lags, self.config_model.features_map + ) # Global-local if self.meta_used_in_model: meta_name_tensor = torch.tensor([self.id_dict[i] for i in meta["df_name"]], device=self.device) else: meta_name_tensor = None # Run forward calculation - predicted, _ = self.forward(inputs, meta_name_tensor) + predicted, _ = self.forward(inputs_tensor, meta_name_tensor) # Store predictions in self for later network visualization self.train_epoch_prediction = predicted # Calculate loss - loss, reg_loss = self.loss_func(inputs, predicted, targets) + loss, reg_loss = self.loss_func(time, predicted, targets) # Optimization optimizer = self.optimizers() @@ -824,25 +854,24 @@ def training_step(self, batch, batch_idx): def validation_step(self, batch, batch_idx): inputs_tensor, meta = batch - inputs = unpack_sliced_tensor( + targets = unpack_targets( inputs_tensor, - self.n_lags, self.n_forecasts, self.max_lags, self.config_model.features_map, - self.config_lagged_regressors, - self.config_seasonality, ) - targets = inputs["targets"] + time = unpack_time_feature( + inputs_tensor, self.n_lags, self.n_forecasts, self.max_lags, self.config_model.features_map + ) # Global-local if self.meta_used_in_model: meta_name_tensor = torch.tensor([self.id_dict[i] for i in meta["df_name"]], device=self.device) else: meta_name_tensor = None # Run forward calculation - predicted, _ = self.forward(inputs, meta_name_tensor) + predicted, _ = self.forward(inputs_tensor, meta_name_tensor) # Calculate loss - loss, reg_loss = self.loss_func(inputs, predicted, targets) + loss, reg_loss = self.loss_func(time, predicted, targets) # Metrics if self.metrics_enabled: predicted_denorm = self.denormalize(predicted[:, :, 0]) @@ -855,25 +884,24 @@ def validation_step(self, batch, batch_idx): def test_step(self, batch, batch_idx): inputs_tensor, meta = batch - inputs = unpack_sliced_tensor( + targets = unpack_targets( inputs_tensor, - self.n_lags, self.n_forecasts, self.max_lags, self.config_model.features_map, - self.config_lagged_regressors, - self.config_seasonality, ) - targets = inputs["targets"] + time = unpack_time_feature( + inputs_tensor, self.n_lags, self.n_forecasts, self.max_lags, self.config_model.features_map + ) # Global-local if self.meta_used_in_model: meta_name_tensor = torch.tensor([self.id_dict[i] for i in meta["df_name"]], device=self.device) else: meta_name_tensor = None # Run forward calculation - predicted, _ = self.forward(inputs, meta_name_tensor) + predicted, _ = self.forward(inputs_tensor, meta_name_tensor) # Calculate loss - loss, reg_loss = self.loss_func(inputs, predicted, targets) + loss, reg_loss = self.loss_func(time, predicted, targets) # Metrics if self.metrics_enabled: predicted_denorm = self.denormalize(predicted[:, :, 0]) @@ -886,25 +914,16 @@ def test_step(self, batch, batch_idx): def predict_step(self, batch, batch_idx, dataloader_idx=0): inputs_tensor, meta = batch - - inputs = unpack_sliced_tensor( - inputs_tensor, - self.n_lags, - self.n_forecasts, - self.max_lags, - self.config_model.features_map, - self.config_lagged_regressors, - self.config_seasonality, - ) # Global-local if self.meta_used_in_model: meta_name_tensor = torch.tensor([self.id_dict[i] for i in meta["df_name"]], device=self.device) else: meta_name_tensor = None - # Add predict_mode flag to dataset - inputs["predict_mode"] = True + # Run forward calculation - prediction, components = self.forward(inputs, meta_name_tensor, self.compute_components_flag) + prediction, components = self.forward( + inputs_tensor, meta_name_tensor, self.compute_components_flag, predict_mode=True + ) return prediction, components def configure_optimizers(self): diff --git a/neuralprophet/utils.py b/neuralprophet/utils.py index 2b50d6893..b299cd2dc 100644 --- a/neuralprophet/utils.py +++ b/neuralprophet/utils.py @@ -989,6 +989,121 @@ def configure_trainer( return pl.Trainer(**config), checkpoint_callback +def unpack_targets( + sliced_tensor, + n_forecasts, + max_lags, + feature_indices, +): + targets_start_idx, targets_end_idx = feature_indices["targets"] + if max_lags > 0: + return sliced_tensor[:, max_lags : max_lags + n_forecasts, targets_start_idx].unsqueeze(2) + else: + return sliced_tensor[:, targets_start_idx : targets_end_idx + 1].unsqueeze(1) + + +def unpack_time_feature(sliced_tensor, n_lags, n_forecasts, max_lags, feature_indices): + start_idx, end_idx = feature_indices["time"] + if max_lags > 0: + return sliced_tensor[:, max_lags - n_lags : max_lags + n_forecasts, start_idx] + else: + return sliced_tensor[:, start_idx : end_idx + 1] + + +def unpack_seasonalities(sliced_tensor, n_lags, n_forecasts, max_lags, feature_indices, config_seasonality): + seasonalities = OrderedDict() + if max_lags > 0: + for seasonality_name in config_seasonality.periods.keys(): + seasonality_key = f"seasonality_{seasonality_name}" + if seasonality_key in feature_indices: + seasonality_start_idx, seasonality_end_idx = feature_indices[seasonality_key] + seasonalities[seasonality_name] = sliced_tensor[ + :, + max_lags - n_lags : max_lags + n_forecasts, + seasonality_start_idx:seasonality_end_idx, + ] + else: + for seasonality_name in config_seasonality.periods.keys(): + seasonality_key = f"seasonality_{seasonality_name}" + if seasonality_key in feature_indices: + seasonality_start_idx, seasonality_end_idx = feature_indices[seasonality_key] + seasonalities[seasonality_name] = sliced_tensor[:, seasonality_start_idx:seasonality_end_idx].unsqueeze( + 1 + ) + return seasonalities + + +def unpack_lagged_regressors(sliced_tensor, max_lags, feature_indices, config_lagged_regressors): + lagged_regressors = OrderedDict() + if config_lagged_regressors: + for name, lagged_regressor in config_lagged_regressors.items(): + lagged_regressor_key = f"lagged_regressor_{name}" + if lagged_regressor_key in feature_indices: + lagged_regressor_start_idx, _ = feature_indices[lagged_regressor_key] + covar_lags = lagged_regressor.n_lags + lagged_regressor_offset = max_lags - covar_lags + lagged_regressors[name] = sliced_tensor[ + :, + lagged_regressor_offset : lagged_regressor_offset + covar_lags, + lagged_regressor_start_idx, + ] + + return lagged_regressors + + +def unpack_lags(sliced_tensor, n_lags, max_lags, feature_indices): + lags_start_idx, _ = feature_indices["lags"] + return sliced_tensor[:, max_lags - n_lags : max_lags, lags_start_idx] + + +def unpack_additive_events(sliced_tensor, n_lags, n_forecasts, max_lags, feature_indices): + if max_lags > 0: + events_start_idx, events_end_idx = feature_indices["additive_events"] + future_offset = max_lags - n_lags + return sliced_tensor[ + :, future_offset : future_offset + n_forecasts + n_lags, events_start_idx : events_end_idx + 1 + ] + else: + events_start_idx, events_end_idx = feature_indices["additive_events"] + return sliced_tensor[:, events_start_idx : events_end_idx + 1].unsqueeze(1) + + +def unpack_multiplicative_events(sliced_tensor, n_lags, n_forecasts, max_lags, feature_indices): + if max_lags > 0: + events_start_idx, events_end_idx = feature_indices["multiplicative_events"] + return sliced_tensor[:, max_lags - n_lags : max_lags + n_forecasts, events_start_idx : events_end_idx + 1] + else: + events_start_idx, events_end_idx = feature_indices["multiplicative_events"] + return sliced_tensor[:, events_start_idx : events_end_idx + 1].unsqueeze(1) + + +def unpack_additive_regressor(sliced_tensor, n_lags, n_forecasts, max_lags, feature_indices): + if max_lags > 0: + regressors_start_idx, regressors_end_idx = feature_indices["additive_regressors"] + return sliced_tensor[ + :, + max_lags - n_lags : max_lags + n_forecasts, + regressors_start_idx : regressors_end_idx + 1, + ] + else: + regressors_start_idx, regressors_end_idx = feature_indices["additive_regressors"] + return sliced_tensor[:, regressors_start_idx : regressors_end_idx + 1].unsqueeze(1) + + +def unpack_multiplicative_regressor(sliced_tensor, n_lags, n_forecasts, max_lags, feature_indices): + if max_lags > 0: + regressors_start_idx, regressors_end_idx = feature_indices["multiplicative_regressors"] + future_offset = max_lags - n_lags + return sliced_tensor[ + :, + future_offset : future_offset + n_forecasts + n_lags, + regressors_start_idx : regressors_end_idx + 1, + ] + else: + regressors_start_idx, regressors_end_idx = feature_indices["multiplicative_regressors"] + return sliced_tensor[:, regressors_start_idx : regressors_end_idx + 1].unsqueeze(1) + + def unpack_sliced_tensor( sliced_tensor, n_lags, From 4a4aee947decb71cd335f2b45f77cba759acada9 Mon Sep 17 00:00:00 2001 From: MaiBe-ctrl Date: Wed, 28 Aug 2024 15:41:25 -0700 Subject: [PATCH 06/22] adjust forecaster --- neuralprophet/forecaster.py | 25 +++--- neuralprophet/time_net.py | 1 - neuralprophet/utils.py | 150 ------------------------------------ 3 files changed, 12 insertions(+), 164 deletions(-) diff --git a/neuralprophet/forecaster.py b/neuralprophet/forecaster.py index 500cc3e27..3924b05bb 100644 --- a/neuralprophet/forecaster.py +++ b/neuralprophet/forecaster.py @@ -35,7 +35,7 @@ from neuralprophet.plot_model_parameters_plotly import plot_parameters as plot_parameters_plotly from neuralprophet.plot_utils import get_valid_configuration, log_warning_deprecation_plotly, select_plotting_backend from neuralprophet.uncertainty import Conformal -from neuralprophet.utils import unpack_sliced_tensor +from neuralprophet.utils import unpack_seasonalities, unpack_time_feature log = logging.getLogger("NP.forecaster") @@ -1902,27 +1902,26 @@ def predict_seasonal_components(self, df: pd.DataFrame, quantile: float = 0.5): for name in self.config_seasonality.periods: predicted[name] = list() for inputs_tensor, meta in loader: - inputs = unpack_sliced_tensor( - sliced_tensor=inputs_tensor, - n_lags=0, - n_forecasts=1, - max_lags=0, - feature_indices=self.config_model.features_map, - config_lagged_regressors=self.config_lagged_regressors, - config_seasonality=self.config_seasonality, - ) # Meta as a tensor for prediction if self.model.config_seasonality is None: meta_name_tensor = None elif self.model.config_seasonality.global_local in ["local", "glocal"]: meta = OrderedDict() - meta["df_name"] = [df_name for _ in range(inputs["time"].shape[0])] + time_input = unpack_time_feature(inputs_tensor, 0, 1, 0, self.config_model.features_map) + meta["df_name"] = [df_name for _ in range(time_input.shape[0])] meta_name_tensor = torch.tensor([self.model.id_dict[i] for i in meta["df_name"]]) # type: ignore else: meta_name_tensor = None - + seasonalities_input = unpack_seasonalities( + inputs_tensor, + 0, + 1, + 0, + self.config_model.features_map, + self.config_seasonality, + ) for name in self.config_seasonality.periods: - features = inputs["seasonalities"][name] + features = seasonalities_input[name] quantile_index = self.config_train.quantiles.index(quantile) y_season = torch.squeeze( self.model.seasonality.compute_fourier(features=features, name=name, meta=meta_name_tensor)[ diff --git a/neuralprophet/time_net.py b/neuralprophet/time_net.py index fb782374a..26a23d25d 100644 --- a/neuralprophet/time_net.py +++ b/neuralprophet/time_net.py @@ -562,7 +562,6 @@ def forward( # Unpack and process seasonalities seasonalities_input = None if self.config_seasonality and self.config_seasonality.periods: - print("++++seasonalities ++++") seasonalities_input = unpack_seasonalities( input_tensor, self.n_lags, diff --git a/neuralprophet/utils.py b/neuralprophet/utils.py index b299cd2dc..92b5ddb2d 100644 --- a/neuralprophet/utils.py +++ b/neuralprophet/utils.py @@ -1102,153 +1102,3 @@ def unpack_multiplicative_regressor(sliced_tensor, n_lags, n_forecasts, max_lags else: regressors_start_idx, regressors_end_idx = feature_indices["multiplicative_regressors"] return sliced_tensor[:, regressors_start_idx : regressors_end_idx + 1].unsqueeze(1) - - -def unpack_sliced_tensor( - sliced_tensor, - n_lags, - n_forecasts, - max_lags, - feature_indices, - config_lagged_regressors, - config_seasonality, -): - sliced_tensor = sliced_tensor.detach().clone() - inputs = OrderedDict() - if max_lags > 0: - # Unpack time feature (time doesn't need further slicing) - start_idx, end_idx = feature_indices["time"] - time_offset = max_lags - n_lags - inputs["time"] = sliced_tensor[:, time_offset : time_offset + n_lags + n_forecasts, start_idx] - - # Unpack lags feature - if "lags" in feature_indices: - lags_start_idx, lags_end_idx = feature_indices["lags"] - lags_offset = max_lags - n_lags - inputs["lags"] = sliced_tensor[:, lags_offset : lags_offset + n_lags, lags_start_idx] - - # Unpack targets - if "targets" in feature_indices: - targets_start_idx, targets_end_idx = feature_indices["targets"] - inputs["targets"] = sliced_tensor[:, max_lags : max_lags + n_forecasts, targets_start_idx].unsqueeze(2) - - # Unpack additive event and holiday features - if "additive_events" in feature_indices: - events_start_idx, events_end_idx = feature_indices["additive_events"] - future_offset = max_lags - n_lags - inputs["events"] = OrderedDict() - inputs["events"]["additive"] = sliced_tensor[ - :, future_offset : future_offset + n_forecasts + n_lags, events_start_idx : events_end_idx + 1 - ] - - # Unpack multiplicative event and holiday features - if "multiplicative_events" in feature_indices: - events_start_idx, events_end_idx = feature_indices["multiplicative_events"] - future_offset = max_lags - n_lags - if "events" not in inputs: - inputs["events"] = OrderedDict() - inputs["events"]["multiplicative"] = sliced_tensor[ - :, future_offset : future_offset + n_forecasts + n_lags, events_start_idx : events_end_idx + 1 - ] - - # Unpack additive regressor features - if "additive_regressors" in feature_indices: - regressors_start_idx, regressors_end_idx = feature_indices["additive_regressors"] - future_offset = max_lags - n_lags - inputs["regressors"] = OrderedDict() - inputs["regressors"]["additive"] = sliced_tensor[ - :, - future_offset : future_offset + n_forecasts + n_lags, - regressors_start_idx : regressors_end_idx + 1, - ] - - # Unpack multiplicative regressor features - if "multiplicative_regressors" in feature_indices: - regressors_start_idx, regressors_end_idx = feature_indices["multiplicative_regressors"] - future_offset = max_lags - n_lags - if "regressors" not in inputs: - inputs["regressors"] = OrderedDict() - inputs["regressors"]["multiplicative"] = sliced_tensor[ - :, - future_offset : future_offset + n_forecasts + n_lags, - regressors_start_idx : regressors_end_idx + 1, - ] - - # Unpack seasonality feature - if config_seasonality is not None and hasattr(config_seasonality, "periods"): - inputs["seasonalities"] = OrderedDict() - for seasonality_name in config_seasonality.periods.keys(): - seasonality_key = f"seasonality_{seasonality_name}" - if seasonality_key in feature_indices: - seasonality_start_idx, seasonality_end_idx = feature_indices[seasonality_key] - seasonality_offset = max_lags - n_lags - inputs["seasonalities"][seasonality_name] = sliced_tensor[ - :, - seasonality_offset : seasonality_offset + n_forecasts + n_lags, - seasonality_start_idx:seasonality_end_idx, - ] - - # Unpack lagged regressor features - if config_lagged_regressors: - inputs["covariates"] = OrderedDict() - for name, lagged_regressor in config_lagged_regressors.items(): - lagged_regressor_key = f"lagged_regressor_{name}" - if lagged_regressor_key in feature_indices: - lagged_regressor_start_idx, _ = feature_indices[lagged_regressor_key] - covar_lags = lagged_regressor.n_lags - lagged_regressor_offset = max_lags - covar_lags - inputs["covariates"][name] = sliced_tensor[ - :, - lagged_regressor_offset : lagged_regressor_offset + covar_lags, - lagged_regressor_start_idx, - ] - - else: - start_idx, end_idx = feature_indices["time"] - inputs["time"] = sliced_tensor[:, start_idx : end_idx + 1] - - if "targets" in feature_indices: - targets_start_idx, targets_end_idx = feature_indices["targets"] - inputs["targets"] = sliced_tensor[:, targets_start_idx : targets_end_idx + 1].unsqueeze(1) - - # Unpack additive event and holiday features - if "additive_events" in feature_indices: - events_start_idx, events_end_idx = feature_indices["additive_events"] - inputs["events"] = OrderedDict() - inputs["events"]["additive"] = sliced_tensor[:, events_start_idx : events_end_idx + 1].unsqueeze(1) - - # Unpack multiplicative event and holiday features - if "multiplicative_events" in feature_indices: - events_start_idx, events_end_idx = feature_indices["multiplicative_events"] - if "events" not in inputs: - inputs["events"] = OrderedDict() - inputs["events"]["multiplicative"] = sliced_tensor[:, events_start_idx : events_end_idx + 1].unsqueeze(1) - - # Unpack additive regressor features - if "additive_regressors" in feature_indices: - regressors_start_idx, regressors_end_idx = feature_indices["additive_regressors"] - inputs["regressors"] = OrderedDict() - inputs["regressors"]["additive"] = sliced_tensor[ - :, regressors_start_idx : regressors_end_idx + 1 - ].unsqueeze(1) - - # Unpack multiplicative regressor features - if "multiplicative_regressors" in feature_indices: - regressors_start_idx, regressors_end_idx = feature_indices["multiplicative_regressors"] - if "regressors" not in inputs: - inputs["regressors"] = OrderedDict() - inputs["regressors"]["multiplicative"] = sliced_tensor[ - :, regressors_start_idx : regressors_end_idx + 1 - ].unsqueeze(1) - - # Unpack seasonality feature - if config_seasonality and hasattr(config_seasonality, "periods"): - inputs["seasonalities"] = OrderedDict() - for seasonality_name in config_seasonality.periods.keys(): - seasonality_key = f"seasonality_{seasonality_name}" - if seasonality_key in feature_indices: - seasonality_start_idx, seasonality_end_idx = feature_indices[seasonality_key] - inputs["seasonalities"][seasonality_name] = sliced_tensor[ - :, seasonality_start_idx:seasonality_end_idx - ].unsqueeze(1) - return inputs From 361e546151fb7f2ce44a56a9d41c7dec3953fd7f Mon Sep 17 00:00:00 2001 From: MaiBe-ctrl Date: Wed, 28 Aug 2024 16:02:49 -0700 Subject: [PATCH 07/22] separate unpacking logic --- neuralprophet/forecaster.py | 22 ++- neuralprophet/time_net.py | 18 +-- neuralprophet/utils.py | 115 -------------- neuralprophet/utils_time_dataset.py | 230 ++++++++++++++++++++++++++++ 4 files changed, 251 insertions(+), 134 deletions(-) create mode 100644 neuralprophet/utils_time_dataset.py diff --git a/neuralprophet/forecaster.py b/neuralprophet/forecaster.py index 3924b05bb..c91ba3aaf 100644 --- a/neuralprophet/forecaster.py +++ b/neuralprophet/forecaster.py @@ -35,7 +35,7 @@ from neuralprophet.plot_model_parameters_plotly import plot_parameters as plot_parameters_plotly from neuralprophet.plot_utils import get_valid_configuration, log_warning_deprecation_plotly, select_plotting_backend from neuralprophet.uncertainty import Conformal -from neuralprophet.utils import unpack_seasonalities, unpack_time_feature +from neuralprophet.utils_time_dataset import unpack_seasonalities, unpack_time log = logging.getLogger("NP.forecaster") @@ -1907,18 +1907,24 @@ def predict_seasonal_components(self, df: pd.DataFrame, quantile: float = 0.5): meta_name_tensor = None elif self.model.config_seasonality.global_local in ["local", "glocal"]: meta = OrderedDict() - time_input = unpack_time_feature(inputs_tensor, 0, 1, 0, self.config_model.features_map) + time_input = unpack_time( + sliced_tensor=inputs_tensor, + n_lags=0, + n_forecasts=1, + max_lags=0, + feature_indices=self.config_model.features_map, + ) meta["df_name"] = [df_name for _ in range(time_input.shape[0])] meta_name_tensor = torch.tensor([self.model.id_dict[i] for i in meta["df_name"]]) # type: ignore else: meta_name_tensor = None seasonalities_input = unpack_seasonalities( - inputs_tensor, - 0, - 1, - 0, - self.config_model.features_map, - self.config_seasonality, + sliced_tensor=inputs_tensor, + n_lags=0, + n_forecasts=1, + max_lags=0, + feature_indices=self.config_model.features_map, + config_seasonality=self.config_seasonality, ) for name in self.config_seasonality.periods: features = seasonalities_input[name] diff --git a/neuralprophet/time_net.py b/neuralprophet/time_net.py index 26a23d25d..657608dcd 100644 --- a/neuralprophet/time_net.py +++ b/neuralprophet/time_net.py @@ -21,6 +21,8 @@ reg_func_seasonality_glocal, reg_func_trend, reg_func_trend_glocal, +) +from neuralprophet.utils_time_dataset import ( unpack_additive_events, unpack_additive_regressor, unpack_lagged_regressors, @@ -29,7 +31,7 @@ unpack_multiplicative_regressor, unpack_seasonalities, unpack_targets, - unpack_time_feature, + unpack_time, ) from neuralprophet.utils_torch import init_parameter, interprete_model @@ -530,7 +532,7 @@ def forward( ) -> torch.Tensor: """This method defines the model forward pass.""" - time_input = unpack_time_feature( + time_input = unpack_time( input_tensor, self.n_lags, self.n_forecasts, self.max_lags, self.config_model.features_map ) # Handle meta argument @@ -812,9 +814,7 @@ def training_step(self, batch, batch_idx): self.max_lags, self.config_model.features_map, ) - time = unpack_time_feature( - inputs_tensor, self.n_lags, self.n_forecasts, self.max_lags, self.config_model.features_map - ) + time = unpack_time(inputs_tensor, self.n_lags, self.n_forecasts, self.max_lags, self.config_model.features_map) # Global-local if self.meta_used_in_model: meta_name_tensor = torch.tensor([self.id_dict[i] for i in meta["df_name"]], device=self.device) @@ -859,9 +859,7 @@ def validation_step(self, batch, batch_idx): self.max_lags, self.config_model.features_map, ) - time = unpack_time_feature( - inputs_tensor, self.n_lags, self.n_forecasts, self.max_lags, self.config_model.features_map - ) + time = unpack_time(inputs_tensor, self.n_lags, self.n_forecasts, self.max_lags, self.config_model.features_map) # Global-local if self.meta_used_in_model: meta_name_tensor = torch.tensor([self.id_dict[i] for i in meta["df_name"]], device=self.device) @@ -889,9 +887,7 @@ def test_step(self, batch, batch_idx): self.max_lags, self.config_model.features_map, ) - time = unpack_time_feature( - inputs_tensor, self.n_lags, self.n_forecasts, self.max_lags, self.config_model.features_map - ) + time = unpack_time(inputs_tensor, self.n_lags, self.n_forecasts, self.max_lags, self.config_model.features_map) # Global-local if self.meta_used_in_model: meta_name_tensor = torch.tensor([self.id_dict[i] for i in meta["df_name"]], device=self.device) diff --git a/neuralprophet/utils.py b/neuralprophet/utils.py index 92b5ddb2d..62b9e7481 100644 --- a/neuralprophet/utils.py +++ b/neuralprophet/utils.py @@ -987,118 +987,3 @@ def configure_trainer( # config["replace_sampler_ddp"] = False return pl.Trainer(**config), checkpoint_callback - - -def unpack_targets( - sliced_tensor, - n_forecasts, - max_lags, - feature_indices, -): - targets_start_idx, targets_end_idx = feature_indices["targets"] - if max_lags > 0: - return sliced_tensor[:, max_lags : max_lags + n_forecasts, targets_start_idx].unsqueeze(2) - else: - return sliced_tensor[:, targets_start_idx : targets_end_idx + 1].unsqueeze(1) - - -def unpack_time_feature(sliced_tensor, n_lags, n_forecasts, max_lags, feature_indices): - start_idx, end_idx = feature_indices["time"] - if max_lags > 0: - return sliced_tensor[:, max_lags - n_lags : max_lags + n_forecasts, start_idx] - else: - return sliced_tensor[:, start_idx : end_idx + 1] - - -def unpack_seasonalities(sliced_tensor, n_lags, n_forecasts, max_lags, feature_indices, config_seasonality): - seasonalities = OrderedDict() - if max_lags > 0: - for seasonality_name in config_seasonality.periods.keys(): - seasonality_key = f"seasonality_{seasonality_name}" - if seasonality_key in feature_indices: - seasonality_start_idx, seasonality_end_idx = feature_indices[seasonality_key] - seasonalities[seasonality_name] = sliced_tensor[ - :, - max_lags - n_lags : max_lags + n_forecasts, - seasonality_start_idx:seasonality_end_idx, - ] - else: - for seasonality_name in config_seasonality.periods.keys(): - seasonality_key = f"seasonality_{seasonality_name}" - if seasonality_key in feature_indices: - seasonality_start_idx, seasonality_end_idx = feature_indices[seasonality_key] - seasonalities[seasonality_name] = sliced_tensor[:, seasonality_start_idx:seasonality_end_idx].unsqueeze( - 1 - ) - return seasonalities - - -def unpack_lagged_regressors(sliced_tensor, max_lags, feature_indices, config_lagged_regressors): - lagged_regressors = OrderedDict() - if config_lagged_regressors: - for name, lagged_regressor in config_lagged_regressors.items(): - lagged_regressor_key = f"lagged_regressor_{name}" - if lagged_regressor_key in feature_indices: - lagged_regressor_start_idx, _ = feature_indices[lagged_regressor_key] - covar_lags = lagged_regressor.n_lags - lagged_regressor_offset = max_lags - covar_lags - lagged_regressors[name] = sliced_tensor[ - :, - lagged_regressor_offset : lagged_regressor_offset + covar_lags, - lagged_regressor_start_idx, - ] - - return lagged_regressors - - -def unpack_lags(sliced_tensor, n_lags, max_lags, feature_indices): - lags_start_idx, _ = feature_indices["lags"] - return sliced_tensor[:, max_lags - n_lags : max_lags, lags_start_idx] - - -def unpack_additive_events(sliced_tensor, n_lags, n_forecasts, max_lags, feature_indices): - if max_lags > 0: - events_start_idx, events_end_idx = feature_indices["additive_events"] - future_offset = max_lags - n_lags - return sliced_tensor[ - :, future_offset : future_offset + n_forecasts + n_lags, events_start_idx : events_end_idx + 1 - ] - else: - events_start_idx, events_end_idx = feature_indices["additive_events"] - return sliced_tensor[:, events_start_idx : events_end_idx + 1].unsqueeze(1) - - -def unpack_multiplicative_events(sliced_tensor, n_lags, n_forecasts, max_lags, feature_indices): - if max_lags > 0: - events_start_idx, events_end_idx = feature_indices["multiplicative_events"] - return sliced_tensor[:, max_lags - n_lags : max_lags + n_forecasts, events_start_idx : events_end_idx + 1] - else: - events_start_idx, events_end_idx = feature_indices["multiplicative_events"] - return sliced_tensor[:, events_start_idx : events_end_idx + 1].unsqueeze(1) - - -def unpack_additive_regressor(sliced_tensor, n_lags, n_forecasts, max_lags, feature_indices): - if max_lags > 0: - regressors_start_idx, regressors_end_idx = feature_indices["additive_regressors"] - return sliced_tensor[ - :, - max_lags - n_lags : max_lags + n_forecasts, - regressors_start_idx : regressors_end_idx + 1, - ] - else: - regressors_start_idx, regressors_end_idx = feature_indices["additive_regressors"] - return sliced_tensor[:, regressors_start_idx : regressors_end_idx + 1].unsqueeze(1) - - -def unpack_multiplicative_regressor(sliced_tensor, n_lags, n_forecasts, max_lags, feature_indices): - if max_lags > 0: - regressors_start_idx, regressors_end_idx = feature_indices["multiplicative_regressors"] - future_offset = max_lags - n_lags - return sliced_tensor[ - :, - future_offset : future_offset + n_forecasts + n_lags, - regressors_start_idx : regressors_end_idx + 1, - ] - else: - regressors_start_idx, regressors_end_idx = feature_indices["multiplicative_regressors"] - return sliced_tensor[:, regressors_start_idx : regressors_end_idx + 1].unsqueeze(1) diff --git a/neuralprophet/utils_time_dataset.py b/neuralprophet/utils_time_dataset.py new file mode 100644 index 000000000..4f436357b --- /dev/null +++ b/neuralprophet/utils_time_dataset.py @@ -0,0 +1,230 @@ +from collections import OrderedDict + + +def unpack_targets( + sliced_tensor, + n_forecasts, + max_lags, + feature_indices, +): + """ + Unpacks the target values from the sliced tensor based on the given feature indices. + + Args: + sliced_tensor (torch.Tensor): The tensor containing all features, sliced according to indices. + n_forecasts (int): Number of forecasts to be made. + max_lags (int): Maximum number of lags used in the model. + feature_indices (dict): A dictionary containing the start and end indices of different features in the tensor. + + Returns: + torch.Tensor: A tensor containing the target values, with an extra dimension added. + """ + targets_start_idx, targets_end_idx = feature_indices["targets"] + if max_lags > 0: + return sliced_tensor[:, max_lags : max_lags + n_forecasts, targets_start_idx].unsqueeze(2) + else: + return sliced_tensor[:, targets_start_idx : targets_end_idx + 1].unsqueeze(1) + + +def unpack_time(sliced_tensor, n_lags, n_forecasts, max_lags, feature_indices): + """ + Unpacks the time features from the sliced tensor. + + Args: + sliced_tensor (torch.Tensor): The tensor containing all features, sliced according to indices. + n_lags (int): Number of lags used in the model. + n_forecasts (int): Number of forecasts to be made. + max_lags (int): Maximum number of lags used in the model. + feature_indices (dict): A dictionary containing the start and end indices of different features in the tensor. + + Returns: + torch.Tensor: A tensor containing the time features. + """ + start_idx, end_idx = feature_indices["time"] + if max_lags > 0: + return sliced_tensor[:, max_lags - n_lags : max_lags + n_forecasts, start_idx] + else: + return sliced_tensor[:, start_idx : end_idx + 1] + + +def unpack_seasonalities(sliced_tensor, n_lags, n_forecasts, max_lags, feature_indices, config_seasonality): + """ + Unpacks the seasonality features from the sliced tensor. + + Args: + sliced_tensor (torch.Tensor): The tensor containing all features, sliced according to indices. + n_lags (int): Number of lags used in the model. + n_forecasts (int): Number of forecasts to be made. + max_lags (int): Maximum number of lags used in the model. + feature_indices (dict): A dictionary containing the start and end indices of different features in the tensor. + config_seasonality (object): Configuration object that defines the seasonality periods. + + Returns: + OrderedDict: A dictionary containing the seasonality features for each period. + """ + seasonalities = OrderedDict() + if max_lags > 0: + for seasonality_name in config_seasonality.periods.keys(): + seasonality_key = f"seasonality_{seasonality_name}" + if seasonality_key in feature_indices: + seasonality_start_idx, seasonality_end_idx = feature_indices[seasonality_key] + seasonalities[seasonality_name] = sliced_tensor[ + :, + max_lags - n_lags : max_lags + n_forecasts, + seasonality_start_idx:seasonality_end_idx, + ] + else: + for seasonality_name in config_seasonality.periods.keys(): + seasonality_key = f"seasonality_{seasonality_name}" + if seasonality_key in feature_indices: + seasonality_start_idx, seasonality_end_idx = feature_indices[seasonality_key] + seasonalities[seasonality_name] = sliced_tensor[:, seasonality_start_idx:seasonality_end_idx].unsqueeze( + 1 + ) + return seasonalities + + +def unpack_lagged_regressors(sliced_tensor, max_lags, feature_indices, config_lagged_regressors): + """ + Unpacks the lagged regressors from the sliced tensor. + + Args: + sliced_tensor (torch.Tensor): The tensor containing all features, sliced according to indices. + max_lags (int): Maximum number of lags used in the model. + feature_indices (dict): A dictionary containing the start and end indices of different features in the tensor. + config_lagged_regressors (dict): Configuration dictionary that defines the lagged regressors and their properties. + + Returns: + OrderedDict: A dictionary containing the lagged regressor features. + """ + lagged_regressors = OrderedDict() + if config_lagged_regressors: + for name, lagged_regressor in config_lagged_regressors.items(): + lagged_regressor_key = f"lagged_regressor_{name}" + if lagged_regressor_key in feature_indices: + lagged_regressor_start_idx, _ = feature_indices[lagged_regressor_key] + covar_lags = lagged_regressor.n_lags + lagged_regressor_offset = max_lags - covar_lags + lagged_regressors[name] = sliced_tensor[ + :, + lagged_regressor_offset : lagged_regressor_offset + covar_lags, + lagged_regressor_start_idx, + ] + return lagged_regressors + + +def unpack_lags(sliced_tensor, n_lags, max_lags, feature_indices): + """ + Unpacks the lagged features from the sliced tensor. + + Args: + sliced_tensor (torch.Tensor): The tensor containing all features, sliced according to indices. + n_lags (int): Number of lags used in the model. + max_lags (int): Maximum number of lags used in the model. + feature_indices (dict): A dictionary containing the start and end indices of different features in the tensor. + + Returns: + torch.Tensor: A tensor containing the lagged features. + """ + lags_start_idx, _ = feature_indices["lags"] + return sliced_tensor[:, max_lags - n_lags : max_lags, lags_start_idx] + + +def unpack_additive_events(sliced_tensor, n_lags, n_forecasts, max_lags, feature_indices): + """ + Unpacks the additive events features from the sliced tensor. + + Args: + sliced_tensor (torch.Tensor): The tensor containing all features, sliced according to indices. + n_lags (int): Number of lags used in the model. + n_forecasts (int): Number of forecasts to be made. + max_lags (int): Maximum number of lags used in the model. + feature_indices (dict): A dictionary containing the start and end indices of different features in the tensor. + + Returns: + torch.Tensor: A tensor containing the additive events features. + """ + if max_lags > 0: + events_start_idx, events_end_idx = feature_indices["additive_events"] + future_offset = max_lags - n_lags + return sliced_tensor[ + :, future_offset : future_offset + n_forecasts + n_lags, events_start_idx : events_end_idx + 1 + ] + else: + events_start_idx, events_end_idx = feature_indices["additive_events"] + return sliced_tensor[:, events_start_idx : events_end_idx + 1].unsqueeze(1) + + +def unpack_multiplicative_events(sliced_tensor, n_lags, n_forecasts, max_lags, feature_indices): + """ + Unpacks the multiplicative events features from the sliced tensor. + + Args: + sliced_tensor (torch.Tensor): The tensor containing all features, sliced according to indices. + n_lags (int): Number of lags used in the model. + n_forecasts (int): Number of forecasts to be made. + max_lags (int): Maximum number of lags used in the model. + feature_indices (dict): A dictionary containing the start and end indices of different features in the tensor. + + Returns: + torch.Tensor: A tensor containing the multiplicative events features. + """ + if max_lags > 0: + events_start_idx, events_end_idx = feature_indices["multiplicative_events"] + return sliced_tensor[:, max_lags - n_lags : max_lags + n_forecasts, events_start_idx : events_end_idx + 1] + else: + events_start_idx, events_end_idx = feature_indices["multiplicative_events"] + return sliced_tensor[:, events_start_idx : events_end_idx + 1].unsqueeze(1) + + +def unpack_additive_regressor(sliced_tensor, n_lags, n_forecasts, max_lags, feature_indices): + """ + Unpacks the additive regressor features from the sliced tensor. + + Args: + sliced_tensor (torch.Tensor): The tensor containing all features, sliced according to indices. + n_lags (int): Number of lags used in the model. + n_forecasts (int): Number of forecasts to be made. + max_lags (int): Maximum number of lags used in the model. + feature_indices (dict): A dictionary containing the start and end indices of different features in the tensor. + + Returns: + torch.Tensor: A tensor containing the additive regressor features. + """ + if max_lags > 0: + regressors_start_idx, regressors_end_idx = feature_indices["additive_regressors"] + return sliced_tensor[ + :, + max_lags - n_lags : max_lags + n_forecasts, + regressors_start_idx : regressors_end_idx + 1, + ] + else: + regressors_start_idx, regressors_end_idx = feature_indices["additive_regressors"] + return sliced_tensor[:, regressors_start_idx : regressors_end_idx + 1].unsqueeze(1) + + +def unpack_multiplicative_regressor(sliced_tensor, n_lags, n_forecasts, max_lags, feature_indices): + """ + Unpacks the multiplicative regressor features from the sliced tensor. + + Args: + sliced_tensor (torch.Tensor): The tensor containing all features, sliced according to indices. + n_lags (int): Number of lags used in the model. + n_forecasts (int): Number of forecasts to be made. + max_lags (int): Maximum number of lags used in the model. + feature_indices (dict): A dictionary containing the start and end indices of different features in the tensor. + + Returns: + torch.Tensor: A tensor containing the multiplicative regressor features. + """ + if max_lags > 0: + regressors_start_idx, regressors_end_idx = feature_indices["multiplicative_regressors"] + future_offset = max_lags - n_lags + return sliced_tensor[ + :, + future_offset : future_offset + n_forecasts + n_lags, + regressors_start_idx : regressors_end_idx + 1, + ] + else: + regressors_start_idx, regressors_end_idx = feature_indices["multiplicative_regressors"] + return sliced_tensor[:, regressors_start_idx : regressors_end_idx + 1].unsqueeze(1) From b6e3c9b9a3ced7c5fd947af1b47cc0f9a3ae6a42 Mon Sep 17 00:00:00 2001 From: MaiBe-ctrl Date: Wed, 28 Aug 2024 16:59:03 -0700 Subject: [PATCH 08/22] added featureExtractor class --- neuralprophet/forecaster.py | 28 +- neuralprophet/time_net.py | 111 +++----- neuralprophet/utils_time_dataset.py | 401 ++++++++++++---------------- 3 files changed, 216 insertions(+), 324 deletions(-) diff --git a/neuralprophet/forecaster.py b/neuralprophet/forecaster.py index c91ba3aaf..1f2a7928e 100644 --- a/neuralprophet/forecaster.py +++ b/neuralprophet/forecaster.py @@ -35,7 +35,7 @@ from neuralprophet.plot_model_parameters_plotly import plot_parameters as plot_parameters_plotly from neuralprophet.plot_utils import get_valid_configuration, log_warning_deprecation_plotly, select_plotting_backend from neuralprophet.uncertainty import Conformal -from neuralprophet.utils_time_dataset import unpack_seasonalities, unpack_time +from neuralprophet.utils_time_dataset import FeatureExtractor log = logging.getLogger("NP.forecaster") @@ -1902,30 +1902,26 @@ def predict_seasonal_components(self, df: pd.DataFrame, quantile: float = 0.5): for name in self.config_seasonality.periods: predicted[name] = list() for inputs_tensor, meta in loader: + feature_extractor = FeatureExtractor( + data_tensor=inputs_tensor, + n_lags=0, + n_forecasts=1, + max_lags=0, + feature_indices=self.config_model.features_map, + config_seasonality=self.config_seasonality, + lagged_regressor_config=self.config_lagged_regressors, + ) # Meta as a tensor for prediction if self.model.config_seasonality is None: meta_name_tensor = None elif self.model.config_seasonality.global_local in ["local", "glocal"]: meta = OrderedDict() - time_input = unpack_time( - sliced_tensor=inputs_tensor, - n_lags=0, - n_forecasts=1, - max_lags=0, - feature_indices=self.config_model.features_map, - ) + time_input = feature_extractor.extract("time") meta["df_name"] = [df_name for _ in range(time_input.shape[0])] meta_name_tensor = torch.tensor([self.model.id_dict[i] for i in meta["df_name"]]) # type: ignore else: meta_name_tensor = None - seasonalities_input = unpack_seasonalities( - sliced_tensor=inputs_tensor, - n_lags=0, - n_forecasts=1, - max_lags=0, - feature_indices=self.config_model.features_map, - config_seasonality=self.config_seasonality, - ) + seasonalities_input = feature_extractor.extract("seasonalities") for name in self.config_seasonality.periods: features = seasonalities_input[name] quantile_index = self.config_train.quantiles.index(quantile) diff --git a/neuralprophet/time_net.py b/neuralprophet/time_net.py index 657608dcd..69c788fa8 100644 --- a/neuralprophet/time_net.py +++ b/neuralprophet/time_net.py @@ -22,17 +22,7 @@ reg_func_trend, reg_func_trend_glocal, ) -from neuralprophet.utils_time_dataset import ( - unpack_additive_events, - unpack_additive_regressor, - unpack_lagged_regressors, - unpack_lags, - unpack_multiplicative_events, - unpack_multiplicative_regressor, - unpack_seasonalities, - unpack_targets, - unpack_time, -) +from neuralprophet.utils_time_dataset import FeatureExtractor from neuralprophet.utils_torch import init_parameter, interprete_model log = logging.getLogger("NP.time_net") @@ -325,6 +315,15 @@ def __init__( else: self.config_regressors.regressors = None + # Features Extractor + self.features_extractor = FeatureExtractor( + n_lags=self.n_lags, + n_forecasts=self.n_forecasts, + max_lags=self.max_lags, + config_seasonality=self.config_seasonality, + lagged_regressor_config=self.config_lagged_regressors, + ) + @property def ar_weights(self) -> torch.Tensor: """sets property auto-regression weights for regularization. Update if AR is modelled differently""" @@ -532,9 +531,7 @@ def forward( ) -> torch.Tensor: """This method defines the model forward pass.""" - time_input = unpack_time( - input_tensor, self.n_lags, self.n_forecasts, self.max_lags, self.config_model.features_map - ) + time_input = self.features_extractor.extract(component_name="time") # Handle meta argument if meta is None and self.meta_used_in_model: name_id_dummy = self.id_list[0] @@ -564,14 +561,7 @@ def forward( # Unpack and process seasonalities seasonalities_input = None if self.config_seasonality and self.config_seasonality.periods: - seasonalities_input = unpack_seasonalities( - input_tensor, - self.n_lags, - self.n_forecasts, - self.max_lags, - self.config_model.features_map, - self.config_seasonality, - ) + seasonalities_input = self.features_extractor.extract(component_name="seasonalities") s = self.seasonality(s=seasonalities_input, meta=meta) if self.config_seasonality.mode == "additive": additive_components_nonstationary += s @@ -583,24 +573,12 @@ def forward( additive_events_input = None multiplicative_events_input = None if "additive_events" in self.config_model.features_map: - additive_events_input = unpack_additive_events( - input_tensor, - self.n_lags, - self.n_forecasts, - self.max_lags, - self.config_model.features_map, - ) + additive_events_input = self.features_extractor.extract(component_name="additive_events") additive_events = self.scalar_features_effects(additive_events_input, self.event_params["additive"]) additive_components_nonstationary += additive_events components["additive_events"] = additive_events if "multiplicative_events" in self.config_model.features_map: - multiplicative_events_input = unpack_multiplicative_events( - input_tensor, - self.n_lags, - self.n_forecasts, - self.max_lags, - self.config_model.features_map, - ) + multiplicative_events_input = self.features_extractor.extract(component_name="multiplicative_events") multiplicative_events = self.scalar_features_effects( multiplicative_events_input, self.event_params["multiplicative"] ) @@ -611,23 +589,13 @@ def forward( additive_regressors_input = None multiplicative_regressors_input = None if "additive_regressors" in self.config_model.features_map: - additive_regressors_input = unpack_additive_regressor( - input_tensor, - self.n_lags, - self.n_forecasts, - self.max_lags, - self.config_model.features_map, - ) + additive_regressors_input = self.features_extractor.extract(component_name="additive_regressors") additive_regressors = self.future_regressors(additive_regressors_input, "additive") additive_components_nonstationary += additive_regressors components["additive_regressors"] = additive_regressors if "multiplicative_regressors" in self.config_model.features_map: - multiplicative_regressors_input = unpack_multiplicative_regressor( - input_tensor, - self.n_lags, - self.n_forecasts, - self.max_lags, - self.config_model.features_map, + multiplicative_regressors_input = self.features_extractor.extract( + component_name="multiplicative_regressors" ) multiplicative_regressors = self.future_regressors(multiplicative_regressors_input, "multiplicative") multiplicative_components_nonstationary += multiplicative_regressors @@ -636,12 +604,7 @@ def forward( # Unpack and process lags lags_input = None if "lags" in self.config_model.features_map: - lags_input = unpack_lags( - input_tensor, - self.n_lags, - self.max_lags, - self.config_model.features_map, - ) + lags_input = self.features_extractor.extract(component_name="lags") nonstationary_components = ( trend[:, : self.n_lags, 0] + additive_components_nonstationary[:, : self.n_lags, 0] @@ -655,9 +618,7 @@ def forward( # Unpack and process covariates covariates_input = None if self.config_lagged_regressors: - covariates_input = unpack_lagged_regressors( - input_tensor, self.max_lags, self.config_model.features_map, self.config_lagged_regressors - ) + covariates_input = self.features_extractor.extract(component_name="lagged_regressors") covariates = self.forward_covar_net(covariates=covariates_input) additive_components += covariates components["covariates"] = covariates @@ -808,13 +769,9 @@ def loss_func(self, time, predicted, targets): def training_step(self, batch, batch_idx): inputs_tensor, meta = batch - targets = unpack_targets( - inputs_tensor, - self.n_forecasts, - self.max_lags, - self.config_model.features_map, - ) - time = unpack_time(inputs_tensor, self.n_lags, self.n_forecasts, self.max_lags, self.config_model.features_map) + self.features_extractor.update_data_inputs(inputs_tensor, self.config_model.features_map) + targets = self.features_extractor.extract("targets") + time = self.features_extractor.extract("time") # Global-local if self.meta_used_in_model: meta_name_tensor = torch.tensor([self.id_dict[i] for i in meta["df_name"]], device=self.device) @@ -852,14 +809,9 @@ def training_step(self, batch, batch_idx): def validation_step(self, batch, batch_idx): inputs_tensor, meta = batch - - targets = unpack_targets( - inputs_tensor, - self.n_forecasts, - self.max_lags, - self.config_model.features_map, - ) - time = unpack_time(inputs_tensor, self.n_lags, self.n_forecasts, self.max_lags, self.config_model.features_map) + self.features_extractor.update_data_inputs(inputs_tensor, self.config_model.features_map) + targets = self.features_extractor.extract("targets") + time = self.features_extractor.extract("time") # Global-local if self.meta_used_in_model: meta_name_tensor = torch.tensor([self.id_dict[i] for i in meta["df_name"]], device=self.device) @@ -880,14 +832,9 @@ def validation_step(self, batch, batch_idx): def test_step(self, batch, batch_idx): inputs_tensor, meta = batch - - targets = unpack_targets( - inputs_tensor, - self.n_forecasts, - self.max_lags, - self.config_model.features_map, - ) - time = unpack_time(inputs_tensor, self.n_lags, self.n_forecasts, self.max_lags, self.config_model.features_map) + self.features_extractor.update_data_inputs(inputs_tensor, self.config_model.features_map) + targets = self.features_extractor.extract("targets") + time = self.features_extractor.extract("time") # Global-local if self.meta_used_in_model: meta_name_tensor = torch.tensor([self.id_dict[i] for i in meta["df_name"]], device=self.device) @@ -909,6 +856,8 @@ def test_step(self, batch, batch_idx): def predict_step(self, batch, batch_idx, dataloader_idx=0): inputs_tensor, meta = batch + self.features_extractor.update_data_inputs(inputs_tensor, self.config_model.features_map) + # Global-local if self.meta_used_in_model: meta_name_tensor = torch.tensor([self.id_dict[i] for i in meta["df_name"]], device=self.device) diff --git a/neuralprophet/utils_time_dataset.py b/neuralprophet/utils_time_dataset.py index 4f436357b..3c0d1b9ca 100644 --- a/neuralprophet/utils_time_dataset.py +++ b/neuralprophet/utils_time_dataset.py @@ -1,230 +1,177 @@ from collections import OrderedDict -def unpack_targets( - sliced_tensor, - n_forecasts, - max_lags, - feature_indices, -): - """ - Unpacks the target values from the sliced tensor based on the given feature indices. - - Args: - sliced_tensor (torch.Tensor): The tensor containing all features, sliced according to indices. - n_forecasts (int): Number of forecasts to be made. - max_lags (int): Maximum number of lags used in the model. - feature_indices (dict): A dictionary containing the start and end indices of different features in the tensor. - - Returns: - torch.Tensor: A tensor containing the target values, with an extra dimension added. - """ - targets_start_idx, targets_end_idx = feature_indices["targets"] - if max_lags > 0: - return sliced_tensor[:, max_lags : max_lags + n_forecasts, targets_start_idx].unsqueeze(2) - else: - return sliced_tensor[:, targets_start_idx : targets_end_idx + 1].unsqueeze(1) - - -def unpack_time(sliced_tensor, n_lags, n_forecasts, max_lags, feature_indices): - """ - Unpacks the time features from the sliced tensor. - - Args: - sliced_tensor (torch.Tensor): The tensor containing all features, sliced according to indices. - n_lags (int): Number of lags used in the model. - n_forecasts (int): Number of forecasts to be made. - max_lags (int): Maximum number of lags used in the model. - feature_indices (dict): A dictionary containing the start and end indices of different features in the tensor. - - Returns: - torch.Tensor: A tensor containing the time features. - """ - start_idx, end_idx = feature_indices["time"] - if max_lags > 0: - return sliced_tensor[:, max_lags - n_lags : max_lags + n_forecasts, start_idx] - else: - return sliced_tensor[:, start_idx : end_idx + 1] - - -def unpack_seasonalities(sliced_tensor, n_lags, n_forecasts, max_lags, feature_indices, config_seasonality): - """ - Unpacks the seasonality features from the sliced tensor. - - Args: - sliced_tensor (torch.Tensor): The tensor containing all features, sliced according to indices. - n_lags (int): Number of lags used in the model. - n_forecasts (int): Number of forecasts to be made. - max_lags (int): Maximum number of lags used in the model. - feature_indices (dict): A dictionary containing the start and end indices of different features in the tensor. - config_seasonality (object): Configuration object that defines the seasonality periods. - - Returns: - OrderedDict: A dictionary containing the seasonality features for each period. - """ - seasonalities = OrderedDict() - if max_lags > 0: - for seasonality_name in config_seasonality.periods.keys(): - seasonality_key = f"seasonality_{seasonality_name}" - if seasonality_key in feature_indices: - seasonality_start_idx, seasonality_end_idx = feature_indices[seasonality_key] - seasonalities[seasonality_name] = sliced_tensor[ - :, - max_lags - n_lags : max_lags + n_forecasts, - seasonality_start_idx:seasonality_end_idx, - ] - else: - for seasonality_name in config_seasonality.periods.keys(): - seasonality_key = f"seasonality_{seasonality_name}" - if seasonality_key in feature_indices: - seasonality_start_idx, seasonality_end_idx = feature_indices[seasonality_key] - seasonalities[seasonality_name] = sliced_tensor[:, seasonality_start_idx:seasonality_end_idx].unsqueeze( - 1 - ) - return seasonalities - - -def unpack_lagged_regressors(sliced_tensor, max_lags, feature_indices, config_lagged_regressors): - """ - Unpacks the lagged regressors from the sliced tensor. - - Args: - sliced_tensor (torch.Tensor): The tensor containing all features, sliced according to indices. - max_lags (int): Maximum number of lags used in the model. - feature_indices (dict): A dictionary containing the start and end indices of different features in the tensor. - config_lagged_regressors (dict): Configuration dictionary that defines the lagged regressors and their properties. - - Returns: - OrderedDict: A dictionary containing the lagged regressor features. - """ - lagged_regressors = OrderedDict() - if config_lagged_regressors: - for name, lagged_regressor in config_lagged_regressors.items(): - lagged_regressor_key = f"lagged_regressor_{name}" - if lagged_regressor_key in feature_indices: - lagged_regressor_start_idx, _ = feature_indices[lagged_regressor_key] - covar_lags = lagged_regressor.n_lags - lagged_regressor_offset = max_lags - covar_lags - lagged_regressors[name] = sliced_tensor[ - :, - lagged_regressor_offset : lagged_regressor_offset + covar_lags, - lagged_regressor_start_idx, - ] - return lagged_regressors - - -def unpack_lags(sliced_tensor, n_lags, max_lags, feature_indices): - """ - Unpacks the lagged features from the sliced tensor. - - Args: - sliced_tensor (torch.Tensor): The tensor containing all features, sliced according to indices. - n_lags (int): Number of lags used in the model. - max_lags (int): Maximum number of lags used in the model. - feature_indices (dict): A dictionary containing the start and end indices of different features in the tensor. - - Returns: - torch.Tensor: A tensor containing the lagged features. - """ - lags_start_idx, _ = feature_indices["lags"] - return sliced_tensor[:, max_lags - n_lags : max_lags, lags_start_idx] - - -def unpack_additive_events(sliced_tensor, n_lags, n_forecasts, max_lags, feature_indices): - """ - Unpacks the additive events features from the sliced tensor. - - Args: - sliced_tensor (torch.Tensor): The tensor containing all features, sliced according to indices. - n_lags (int): Number of lags used in the model. - n_forecasts (int): Number of forecasts to be made. - max_lags (int): Maximum number of lags used in the model. - feature_indices (dict): A dictionary containing the start and end indices of different features in the tensor. - - Returns: - torch.Tensor: A tensor containing the additive events features. - """ - if max_lags > 0: - events_start_idx, events_end_idx = feature_indices["additive_events"] - future_offset = max_lags - n_lags - return sliced_tensor[ - :, future_offset : future_offset + n_forecasts + n_lags, events_start_idx : events_end_idx + 1 - ] - else: - events_start_idx, events_end_idx = feature_indices["additive_events"] - return sliced_tensor[:, events_start_idx : events_end_idx + 1].unsqueeze(1) - - -def unpack_multiplicative_events(sliced_tensor, n_lags, n_forecasts, max_lags, feature_indices): - """ - Unpacks the multiplicative events features from the sliced tensor. - - Args: - sliced_tensor (torch.Tensor): The tensor containing all features, sliced according to indices. - n_lags (int): Number of lags used in the model. - n_forecasts (int): Number of forecasts to be made. - max_lags (int): Maximum number of lags used in the model. - feature_indices (dict): A dictionary containing the start and end indices of different features in the tensor. - - Returns: - torch.Tensor: A tensor containing the multiplicative events features. - """ - if max_lags > 0: - events_start_idx, events_end_idx = feature_indices["multiplicative_events"] - return sliced_tensor[:, max_lags - n_lags : max_lags + n_forecasts, events_start_idx : events_end_idx + 1] - else: - events_start_idx, events_end_idx = feature_indices["multiplicative_events"] - return sliced_tensor[:, events_start_idx : events_end_idx + 1].unsqueeze(1) - - -def unpack_additive_regressor(sliced_tensor, n_lags, n_forecasts, max_lags, feature_indices): - """ - Unpacks the additive regressor features from the sliced tensor. - - Args: - sliced_tensor (torch.Tensor): The tensor containing all features, sliced according to indices. - n_lags (int): Number of lags used in the model. - n_forecasts (int): Number of forecasts to be made. - max_lags (int): Maximum number of lags used in the model. - feature_indices (dict): A dictionary containing the start and end indices of different features in the tensor. - - Returns: - torch.Tensor: A tensor containing the additive regressor features. - """ - if max_lags > 0: - regressors_start_idx, regressors_end_idx = feature_indices["additive_regressors"] - return sliced_tensor[ - :, - max_lags - n_lags : max_lags + n_forecasts, - regressors_start_idx : regressors_end_idx + 1, - ] - else: - regressors_start_idx, regressors_end_idx = feature_indices["additive_regressors"] - return sliced_tensor[:, regressors_start_idx : regressors_end_idx + 1].unsqueeze(1) - - -def unpack_multiplicative_regressor(sliced_tensor, n_lags, n_forecasts, max_lags, feature_indices): - """ - Unpacks the multiplicative regressor features from the sliced tensor. - - Args: - sliced_tensor (torch.Tensor): The tensor containing all features, sliced according to indices. - n_lags (int): Number of lags used in the model. - n_forecasts (int): Number of forecasts to be made. - max_lags (int): Maximum number of lags used in the model. - feature_indices (dict): A dictionary containing the start and end indices of different features in the tensor. - - Returns: - torch.Tensor: A tensor containing the multiplicative regressor features. - """ - if max_lags > 0: - regressors_start_idx, regressors_end_idx = feature_indices["multiplicative_regressors"] - future_offset = max_lags - n_lags - return sliced_tensor[ - :, - future_offset : future_offset + n_forecasts + n_lags, - regressors_start_idx : regressors_end_idx + 1, - ] - else: - regressors_start_idx, regressors_end_idx = feature_indices["multiplicative_regressors"] - return sliced_tensor[:, regressors_start_idx : regressors_end_idx + 1].unsqueeze(1) +class FeatureExtractor: + def __init__( + self, + n_lags, + n_forecasts, + max_lags, + data_tensor=None, + feature_indices=None, + config_seasonality=None, + lagged_regressor_config=None, + ): + """ + Initializes the FeatureExtractor with the necessary parameters. + + Args: + data_tensor (torch.Tensor): The tensor containing all features, sliced according to indices. + n_lags (int): Number of lags used in the model. + n_forecasts (int): Number of forecasts to be made. + max_lags (int): Maximum number of lags used in the model. + feature_indices (dict): A dictionary containing the start and end indices of different features in the tensor. + config_seasonality (object, optional): Configuration object that defines the seasonality periods. + lagged_regressor_config (dict, optional): Configuration dictionary that defines the lagged regressors and their properties. + """ + self.data_tensor = data_tensor + self.n_lags = n_lags + self.n_forecasts = n_forecasts + self.max_lags = max_lags + self.feature_indices = feature_indices + self.config_seasonality = config_seasonality + self.lagged_regressor_config = lagged_regressor_config + + def update_data_inputs(self, data_tensor, feature_indices): + """ + Updates the data tensor with a new tensor. + + Args: + data_tensor (torch.Tensor): The new tensor containing all features. + """ + self.data_tensor = data_tensor + self.feature_indices = feature_indices + + def extract(self, component_name): + """ + Routes the extraction process to the appropriate function based on the component name. + + Args: + component_name (str): The name of the component to extract. + + Returns: + Various: The output of the specific extraction function. + """ + if component_name == "targets": + return self.extract_targets() + elif component_name == "time": + return self.extract_time() + elif component_name == "seasonalities": + return self.extract_seasonalities() + elif component_name == "lagged_regressors": + return self.extract_lagged_regressors() + elif component_name == "lags": + return self.extract_lags() + elif component_name == "additive_events": + return self.extract_additive_events() + elif component_name == "multiplicative_events": + return self.extract_multiplicative_events() + elif component_name == "additive_regressors": + return self.extract_additive_regressors() + elif component_name == "multiplicative_regressors": + return self.extract_multiplicative_regressors() + else: + raise ValueError(f"Unknown component name: {component_name}") + + def extract_targets(self): + targets_start_idx, targets_end_idx = self.feature_indices["targets"] + if self.max_lags > 0: + return self.data_tensor[:, self.max_lags : self.max_lags + self.n_forecasts, targets_start_idx].unsqueeze(2) + else: + return self.data_tensor[:, targets_start_idx : targets_end_idx + 1].unsqueeze(1) + + def extract_time(self): + start_idx, end_idx = self.feature_indices["time"] + if self.max_lags > 0: + return self.data_tensor[:, self.max_lags - self.n_lags : self.max_lags + self.n_forecasts, start_idx] + else: + return self.data_tensor[:, start_idx : end_idx + 1] + + def extract_lags(self): + lags_start_idx, _ = self.feature_indices["lags"] + return self.data_tensor[:, self.max_lags - self.n_lags : self.max_lags, lags_start_idx] + + def extract_lagged_regressors(self): + lagged_regressors = OrderedDict() + if self.lagged_regressor_config: + for name, lagged_regressor in self.lagged_regressor_config.items(): + lagged_regressor_key = f"lagged_regressor_{name}" + if lagged_regressor_key in self.feature_indices: + lagged_regressor_start_idx, _ = self.feature_indices[lagged_regressor_key] + covar_lags = lagged_regressor.n_lags + lagged_regressor_offset = self.max_lags - covar_lags + lagged_regressors[name] = self.data_tensor[ + :, + lagged_regressor_offset : lagged_regressor_offset + covar_lags, + lagged_regressor_start_idx, + ] + return lagged_regressors + + def extract_seasonalities(self): + seasonalities = OrderedDict() + if self.max_lags > 0: + for seasonality_name in self.config_seasonality.periods.keys(): + seasonality_key = f"seasonality_{seasonality_name}" + if seasonality_key in self.feature_indices: + seasonality_start_idx, seasonality_end_idx = self.feature_indices[seasonality_key] + seasonalities[seasonality_name] = self.data_tensor[ + :, + self.max_lags - self.n_lags : self.max_lags + self.n_forecasts, + seasonality_start_idx:seasonality_end_idx, + ] + else: + for seasonality_name in self.config_seasonality.periods.keys(): + seasonality_key = f"seasonality_{seasonality_name}" + if seasonality_key in self.feature_indices: + seasonality_start_idx, seasonality_end_idx = self.feature_indices[seasonality_key] + seasonalities[seasonality_name] = self.data_tensor[ + :, seasonality_start_idx:seasonality_end_idx + ].unsqueeze(1) + + return seasonalities + + def extract_additive_events(self): + if self.max_lags > 0: + events_start_idx, events_end_idx = self.feature_indices["additive_events"] + future_offset = self.max_lags - self.n_lags + return self.data_tensor[ + :, future_offset : future_offset + self.n_forecasts + self.n_lags, events_start_idx : events_end_idx + 1 + ] + else: + events_start_idx, events_end_idx = self.feature_indices["additive_events"] + return self.data_tensor[:, events_start_idx : events_end_idx + 1].unsqueeze(1) + + def extract_multiplicative_events(self): + if self.max_lags > 0: + events_start_idx, events_end_idx = self.feature_indices["multiplicative_events"] + return self.data_tensor[ + :, self.max_lags - self.n_lags : self.max_lags + self.n_forecasts, events_start_idx : events_end_idx + 1 + ] + else: + events_start_idx, events_end_idx = self.feature_indices["multiplicative_events"] + return self.data_tensor[:, events_start_idx : events_end_idx + 1].unsqueeze(1) + + def extract_additive_regressors(self): + if self.max_lags > 0: + regressors_start_idx, regressors_end_idx = self.feature_indices["additive_regressors"] + return self.data_tensor[ + :, + self.max_lags - self.n_lags : self.max_lags + self.n_forecasts, + regressors_start_idx : regressors_end_idx + 1, + ] + else: + regressors_start_idx, regressors_end_idx = self.feature_indices["additive_regressors"] + return self.data_tensor[:, regressors_start_idx : regressors_end_idx + 1].unsqueeze(1) + + def extract_multiplicative_regressors(self): + if self.max_lags > 0: + regressors_start_idx, regressors_end_idx = self.feature_indices["multiplicative_regressors"] + future_offset = self.max_lags - self.n_lags + return self.data_tensor[ + :, + future_offset : future_offset + self.n_forecasts + self.n_lags, + regressors_start_idx : regressors_end_idx + 1, + ] + else: + regressors_start_idx, regressors_end_idx = self.feature_indices["multiplicative_regressors"] + return self.data_tensor[:, regressors_start_idx : regressors_end_idx + 1].unsqueeze(1) From f3f2afaab3726a28ca48f61d2466aa9917887e94 Mon Sep 17 00:00:00 2001 From: MaiBe-ctrl Date: Thu, 29 Aug 2024 14:28:38 -0700 Subject: [PATCH 09/22] separate packing logic --- neuralprophet/forecaster.py | 4 +- neuralprophet/time_dataset.py | 146 ++++++++------------------ neuralprophet/time_net.py | 31 +++--- neuralprophet/utils_time_dataset.py | 153 +++++++++++++++++++++++++++- 4 files changed, 214 insertions(+), 120 deletions(-) diff --git a/neuralprophet/forecaster.py b/neuralprophet/forecaster.py index 1f2a7928e..dcdb676a4 100644 --- a/neuralprophet/forecaster.py +++ b/neuralprophet/forecaster.py @@ -1916,12 +1916,12 @@ def predict_seasonal_components(self, df: pd.DataFrame, quantile: float = 0.5): meta_name_tensor = None elif self.model.config_seasonality.global_local in ["local", "glocal"]: meta = OrderedDict() - time_input = feature_extractor.extract("time") + time_input = feature_extractor.extract_component("time") meta["df_name"] = [df_name for _ in range(time_input.shape[0])] meta_name_tensor = torch.tensor([self.model.id_dict[i] for i in meta["df_name"]]) # type: ignore else: meta_name_tensor = None - seasonalities_input = feature_extractor.extract("seasonalities") + seasonalities_input = feature_extractor.extract_component("seasonalities") for name in self.config_seasonality.periods: features = seasonalities_input[name] quantile_index = self.config_train.quantiles.index(quantile) diff --git a/neuralprophet/time_dataset.py b/neuralprophet/time_dataset.py index fc5190880..e7a9eff51 100644 --- a/neuralprophet/time_dataset.py +++ b/neuralprophet/time_dataset.py @@ -12,6 +12,17 @@ from neuralprophet import configure, utils from neuralprophet.df_utils import get_max_num_lags from neuralprophet.event_utils import get_all_holidays +from neuralprophet.utils_time_dataset import ( + pack_additive_events_component, + pack_additive_regressors_component, + pack_lagged_regerssors_component, + pack_lags_component, + pack_multiplicative_events_component, + pack_multiplicative_regressors_component, + pack_seasonalities_component, + pack_targets_component, + pack_trend_component, +) log = logging.getLogger("NP.time_dataset") @@ -138,119 +149,48 @@ def __init__( self.stack_all_features() def stack_all_features(self): + """ + Stack all features into one large tensor by calling individual stacking methods. + """ feature_list = [] - self.feature_indices = {} + feature_indices = {} current_idx = 0 - # Stack Trend (t) - time_tensor = self.df_tensors["t"].unsqueeze(-1) # Shape: [T, 1] - feature_list.append(time_tensor) - self.feature_indices["time"] = (current_idx, current_idx) - current_idx += 1 - - # Stack lags (y_scaled) - if self.n_lags >= 1 and "y_scaled" in self.df_tensors: - lags_tensor = self.df_tensors["y_scaled"].unsqueeze(-1) - feature_list.append(lags_tensor) - self.feature_indices["lags"] = (current_idx, current_idx) - current_idx += lags_tensor.shape[1] - - # Stack targets (y_scaled) - if "y_scaled" in self.df_tensors: - targets_tensor = self.df_tensors["y_scaled"].unsqueeze(-1) - feature_list.append(targets_tensor) - self.feature_indices["targets"] = (current_idx, current_idx) - current_idx += targets_tensor.shape[1] - - # Stack lagged regressor features - if self.config_lagged_regressors: - # Collect all lagged regressor tensors in a list - lagged_regressor_tensors = [ - self.df_tensors[name].unsqueeze(-1) for name in self.config_lagged_regressors.keys() - ] - - # Concatenate all lagged regressors along the last dimension (features) - stacked_lagged_regressor_tensor = torch.cat(lagged_regressor_tensors, dim=-1) - - # Append to feature list - feature_list.append(stacked_lagged_regressor_tensor) - - # Update feature indices - num_features = stacked_lagged_regressor_tensor.size(-1) - for i, name in enumerate(self.config_lagged_regressors.keys()): - self.feature_indices[f"lagged_regressor_{name}"] = ( - current_idx + i, - current_idx + i + 1, - ) - current_idx += num_features - - # Stack additive event and holiday features - if self.additive_event_and_holiday_names: - additive_events_tensor = torch.cat( - [self.df_tensors[name].unsqueeze(-1) for name in self.additive_event_and_holiday_names], - dim=1, - ) # Shape: [batch_size, num_additive_events, 1] - feature_list.append(additive_events_tensor) - self.feature_indices["additive_events"] = ( - current_idx, - current_idx + additive_events_tensor.size(1) - 1, - ) - current_idx += additive_events_tensor.size(1) - - # Stack multiplicative event and holiday features - if self.multiplicative_event_and_holiday_names: - multiplicative_events_tensor = torch.cat( - [self.df_tensors[name].unsqueeze(-1) for name in self.multiplicative_event_and_holiday_names], dim=1 - ) # Shape: [batch_size, num_multiplicative_events, 1] - - feature_list.append(multiplicative_events_tensor) - self.feature_indices["multiplicative_events"] = ( - current_idx, - current_idx + multiplicative_events_tensor.size(1) - 1, - ) - - current_idx += multiplicative_events_tensor.size(1) + # Call individual stacking functions + current_idx = pack_trend_component(self.df_tensors, feature_list, feature_indices, current_idx) + current_idx = pack_targets_component(self.df_tensors, feature_list, feature_indices, current_idx) - # Stack additive regressor features - if self.additive_regressors_names: - additive_regressors_tensor = torch.cat( - [self.df_tensors[name].unsqueeze(-1) for name in self.additive_regressors_names], dim=1 - ) # Shape: [batch_size, num_additive_regressors, 1] - feature_list.append(additive_regressors_tensor) - self.feature_indices["additive_regressors"] = ( - current_idx, - current_idx + additive_regressors_tensor.size(1) - 1, - ) - current_idx += additive_regressors_tensor.size(1) + current_idx = pack_lags_component(self.df_tensors, feature_list, feature_indices, current_idx, self.n_lags) + current_idx = pack_lagged_regerssors_component( + self.df_tensors, feature_list, feature_indices, current_idx, self.config_lagged_regressors + ) + current_idx = pack_additive_events_component( + self.df_tensors, feature_list, feature_indices, current_idx, self.additive_event_and_holiday_names + ) + current_idx = pack_multiplicative_events_component( + self.df_tensors, feature_list, feature_indices, current_idx, self.multiplicative_event_and_holiday_names + ) + current_idx = pack_additive_regressors_component( + self.df_tensors, feature_list, feature_indices, current_idx, self.additive_regressors_names + ) + current_idx = pack_multiplicative_regressors_component( + self.df_tensors, feature_list, feature_indices, current_idx, self.multiplicative_regressors_names + ) - # Stack multiplicative regressor features - if self.multiplicative_regressors_names: - multiplicative_regressors_tensor = torch.cat( - [self.df_tensors[name].unsqueeze(-1) for name in self.multiplicative_regressors_names], dim=1 - ) # Shape: [batch_size, num_multiplicative_regressors, 1] - feature_list.append(multiplicative_regressors_tensor) - self.feature_indices["multiplicative_regressors"] = ( - current_idx, - current_idx + len(self.multiplicative_regressors_names) - 1, + if self.config_seasonality is not None and hasattr(self.config_seasonality, "periods"): + current_idx = pack_seasonalities_component( + feature_list, feature_indices, current_idx, self.config_seasonality, self.seasonalities ) - current_idx += len(self.multiplicative_regressors_names) - - if self.config_seasonality and self.config_seasonality.periods: - for seasonality_name, features in self.seasonalities.items(): - seasonal_tensor = features - print(f"Seasonality tensor shape for {seasonality_name}: {seasonal_tensor.shape}") - feature_list.append(seasonal_tensor) - self.feature_indices[f"seasonality_{seasonality_name}"] = ( - current_idx, - current_idx + seasonal_tensor.size(1), - ) - current_idx += seasonal_tensor.size(1) # Concatenate all features into one big tensor - self.all_features = torch.cat(feature_list, dim=1) # Concatenating along the third dimension + self.all_features = torch.cat(feature_list, dim=1) # Concatenating along the second dimension + + # Update the model's features map if applicable if self.config_model is not None: - self.config_model.features_map = self.feature_indices + self.config_model.features_map = feature_indices + + return feature_indices def calculate_seasonalities(self): self.seasonalities = OrderedDict({}) diff --git a/neuralprophet/time_net.py b/neuralprophet/time_net.py index 69c788fa8..3224fa07b 100644 --- a/neuralprophet/time_net.py +++ b/neuralprophet/time_net.py @@ -322,6 +322,7 @@ def __init__( max_lags=self.max_lags, config_seasonality=self.config_seasonality, lagged_regressor_config=self.config_lagged_regressors, + feature_indices=self.config_model.features_map, ) @property @@ -531,7 +532,7 @@ def forward( ) -> torch.Tensor: """This method defines the model forward pass.""" - time_input = self.features_extractor.extract(component_name="time") + time_input = self.features_extractor.extract_component(component_name="time") # Handle meta argument if meta is None and self.meta_used_in_model: name_id_dummy = self.id_list[0] @@ -561,7 +562,7 @@ def forward( # Unpack and process seasonalities seasonalities_input = None if self.config_seasonality and self.config_seasonality.periods: - seasonalities_input = self.features_extractor.extract(component_name="seasonalities") + seasonalities_input = self.features_extractor.extract_component(component_name="seasonalities") s = self.seasonality(s=seasonalities_input, meta=meta) if self.config_seasonality.mode == "additive": additive_components_nonstationary += s @@ -573,12 +574,14 @@ def forward( additive_events_input = None multiplicative_events_input = None if "additive_events" in self.config_model.features_map: - additive_events_input = self.features_extractor.extract(component_name="additive_events") + additive_events_input = self.features_extractor.extract_component(component_name="additive_events") additive_events = self.scalar_features_effects(additive_events_input, self.event_params["additive"]) additive_components_nonstationary += additive_events components["additive_events"] = additive_events if "multiplicative_events" in self.config_model.features_map: - multiplicative_events_input = self.features_extractor.extract(component_name="multiplicative_events") + multiplicative_events_input = self.features_extractor.extract_component( + component_name="multiplicative_events" + ) multiplicative_events = self.scalar_features_effects( multiplicative_events_input, self.event_params["multiplicative"] ) @@ -589,12 +592,12 @@ def forward( additive_regressors_input = None multiplicative_regressors_input = None if "additive_regressors" in self.config_model.features_map: - additive_regressors_input = self.features_extractor.extract(component_name="additive_regressors") + additive_regressors_input = self.features_extractor.extract_component(component_name="additive_regressors") additive_regressors = self.future_regressors(additive_regressors_input, "additive") additive_components_nonstationary += additive_regressors components["additive_regressors"] = additive_regressors if "multiplicative_regressors" in self.config_model.features_map: - multiplicative_regressors_input = self.features_extractor.extract( + multiplicative_regressors_input = self.features_extractor.extract_component( component_name="multiplicative_regressors" ) multiplicative_regressors = self.future_regressors(multiplicative_regressors_input, "multiplicative") @@ -604,7 +607,7 @@ def forward( # Unpack and process lags lags_input = None if "lags" in self.config_model.features_map: - lags_input = self.features_extractor.extract(component_name="lags") + lags_input = self.features_extractor.extract_component(component_name="lags") nonstationary_components = ( trend[:, : self.n_lags, 0] + additive_components_nonstationary[:, : self.n_lags, 0] @@ -618,7 +621,7 @@ def forward( # Unpack and process covariates covariates_input = None if self.config_lagged_regressors: - covariates_input = self.features_extractor.extract(component_name="lagged_regressors") + covariates_input = self.features_extractor.extract_component(component_name="lagged_regressors") covariates = self.forward_covar_net(covariates=covariates_input) additive_components += covariates components["covariates"] = covariates @@ -770,8 +773,8 @@ def loss_func(self, time, predicted, targets): def training_step(self, batch, batch_idx): inputs_tensor, meta = batch self.features_extractor.update_data_inputs(inputs_tensor, self.config_model.features_map) - targets = self.features_extractor.extract("targets") - time = self.features_extractor.extract("time") + targets = self.features_extractor.extract_component("targets") + time = self.features_extractor.extract_component("time") # Global-local if self.meta_used_in_model: meta_name_tensor = torch.tensor([self.id_dict[i] for i in meta["df_name"]], device=self.device) @@ -810,8 +813,8 @@ def training_step(self, batch, batch_idx): def validation_step(self, batch, batch_idx): inputs_tensor, meta = batch self.features_extractor.update_data_inputs(inputs_tensor, self.config_model.features_map) - targets = self.features_extractor.extract("targets") - time = self.features_extractor.extract("time") + targets = self.features_extractor.extract_component("targets") + time = self.features_extractor.extract_component("time") # Global-local if self.meta_used_in_model: meta_name_tensor = torch.tensor([self.id_dict[i] for i in meta["df_name"]], device=self.device) @@ -833,8 +836,8 @@ def validation_step(self, batch, batch_idx): def test_step(self, batch, batch_idx): inputs_tensor, meta = batch self.features_extractor.update_data_inputs(inputs_tensor, self.config_model.features_map) - targets = self.features_extractor.extract("targets") - time = self.features_extractor.extract("time") + targets = self.features_extractor.extract_component("targets") + time = self.features_extractor.extract_component("time") # Global-local if self.meta_used_in_model: meta_name_tensor = torch.tensor([self.id_dict[i] for i in meta["df_name"]], device=self.device) diff --git a/neuralprophet/utils_time_dataset.py b/neuralprophet/utils_time_dataset.py index 3c0d1b9ca..b58b4ba93 100644 --- a/neuralprophet/utils_time_dataset.py +++ b/neuralprophet/utils_time_dataset.py @@ -1,5 +1,7 @@ from collections import OrderedDict +import torch + class FeatureExtractor: def __init__( @@ -42,7 +44,7 @@ def update_data_inputs(self, data_tensor, feature_indices): self.data_tensor = data_tensor self.feature_indices = feature_indices - def extract(self, component_name): + def extract_component(self, component_name): """ Routes the extraction process to the appropriate function based on the component name. @@ -175,3 +177,152 @@ def extract_multiplicative_regressors(self): else: regressors_start_idx, regressors_end_idx = self.feature_indices["multiplicative_regressors"] return self.data_tensor[:, regressors_start_idx : regressors_end_idx + 1].unsqueeze(1) + + +def pack_trend_component(df_tensors, feature_list, feature_indices, current_idx): + """ + Stack the trend (time) feature. + """ + time_tensor = df_tensors["t"].unsqueeze(-1) # Shape: [T, 1] + feature_list.append(time_tensor) + feature_indices["time"] = (current_idx, current_idx) + return current_idx + 1 + + +def pack_lags_component(df_tensors, feature_list, feature_indices, current_idx, n_lags): + """ + Stack the lags feature. + """ + if n_lags >= 1 and "y_scaled" in df_tensors: + lags_tensor = df_tensors["y_scaled"].unsqueeze(-1) + feature_list.append(lags_tensor) + feature_indices["lags"] = (current_idx, current_idx) + return current_idx + 1 + return current_idx + + +def pack_targets_component(df_tensors, feature_list, feature_indices, current_idx): + """ + Stack the targets feature. + """ + if "y_scaled" in df_tensors: + targets_tensor = df_tensors["y_scaled"].unsqueeze(-1) + feature_list.append(targets_tensor) + feature_indices["targets"] = (current_idx, current_idx) + return current_idx + 1 + return current_idx + + +def pack_lagged_regerssors_component(df_tensors, feature_list, feature_indices, current_idx, config_lagged_regressors): + """ + Stack the lagged regressor features. + """ + if config_lagged_regressors: + lagged_regressor_tensors = [df_tensors[name].unsqueeze(-1) for name in config_lagged_regressors.keys()] + stacked_lagged_regressor_tensor = torch.cat(lagged_regressor_tensors, dim=-1) + feature_list.append(stacked_lagged_regressor_tensor) + num_features = stacked_lagged_regressor_tensor.size(-1) + for i, name in enumerate(config_lagged_regressors.keys()): + feature_indices[f"lagged_regressor_{name}"] = ( + current_idx + i, + current_idx + i + 1, + ) + return current_idx + num_features + return current_idx + + +def pack_additive_events_component( + df_tensors, + feature_list, + feature_indices, + current_idx, + additive_event_and_holiday_names, +): + """ + Stack the additive event and holiday features. + """ + if additive_event_and_holiday_names: + additive_events_tensor = torch.cat( + [df_tensors[name].unsqueeze(-1) for name in additive_event_and_holiday_names], + dim=1, + ) + feature_list.append(additive_events_tensor) + feature_indices["additive_events"] = ( + current_idx, + current_idx + additive_events_tensor.size(1) - 1, + ) + return current_idx + additive_events_tensor.size(1) + return current_idx + + +def pack_multiplicative_events_component( + df_tensors, feature_list, feature_indices, current_idx, multiplicative_event_and_holiday_names +): + """ + Stack the multiplicative event and holiday features. + """ + if multiplicative_event_and_holiday_names: + multiplicative_events_tensor = torch.cat( + [df_tensors[name].unsqueeze(-1) for name in multiplicative_event_and_holiday_names], dim=1 + ) + feature_list.append(multiplicative_events_tensor) + feature_indices["multiplicative_events"] = ( + current_idx, + current_idx + multiplicative_events_tensor.size(1) - 1, + ) + return current_idx + multiplicative_events_tensor.size(1) + return current_idx + + +def pack_additive_regressors_component( + df_tensors, feature_list, feature_indices, current_idx, additive_regressors_names +): + """ + Stack the additive regressor features. + """ + if additive_regressors_names: + additive_regressors_tensor = torch.cat( + [df_tensors[name].unsqueeze(-1) for name in additive_regressors_names], dim=1 + ) + feature_list.append(additive_regressors_tensor) + feature_indices["additive_regressors"] = ( + current_idx, + current_idx + additive_regressors_tensor.size(1) - 1, + ) + return current_idx + additive_regressors_tensor.size(1) + return current_idx + + +def pack_multiplicative_regressors_component( + df_tensors, feature_list, feature_indices, current_idx, multiplicative_regressors_names +): + """ + Stack the multiplicative regressor features. + """ + if multiplicative_regressors_names: + multiplicative_regressors_tensor = torch.cat( + [df_tensors[name].unsqueeze(-1) for name in multiplicative_regressors_names], dim=1 + ) # Shape: [batch_size, num_multiplicative_regressors, 1] + feature_list.append(multiplicative_regressors_tensor) + feature_indices["multiplicative_regressors"] = ( + current_idx, + current_idx + len(multiplicative_regressors_names) - 1, + ) + return current_idx + len(multiplicative_regressors_names) + return current_idx + + +def pack_seasonalities_component(feature_list, feature_indices, current_idx, config_seasonality, seasonalities): + """ + Stack the seasonality features. + """ + if config_seasonality and config_seasonality.periods: + for seasonality_name, features in seasonalities.items(): + seasonal_tensor = features + feature_list.append(seasonal_tensor) + feature_indices[f"seasonality_{seasonality_name}"] = ( + current_idx, + current_idx + seasonal_tensor.size(1), + ) + current_idx += seasonal_tensor.size(1) + return current_idx From 1dde4e95b4c57427d92856a1e5180b44c340a088 Mon Sep 17 00:00:00 2001 From: MaiBe-ctrl Date: Thu, 29 Aug 2024 21:43:05 -0700 Subject: [PATCH 10/22] fixed liniting issues --- neuralprophet/time_net.py | 1 - neuralprophet/utils.py | 4 +--- tests/debug/debug-energy-price-hourly.ipynb | 4 +--- tests/test_save.py | 1 - tests/test_train_config.py | 10 +++++----- 5 files changed, 7 insertions(+), 13 deletions(-) diff --git a/neuralprophet/time_net.py b/neuralprophet/time_net.py index 650adb6ee..35aaf7d0d 100644 --- a/neuralprophet/time_net.py +++ b/neuralprophet/time_net.py @@ -1,5 +1,4 @@ import logging -import math from collections import OrderedDict from functools import reduce from typing import Dict, List, Optional, Union diff --git a/neuralprophet/utils.py b/neuralprophet/utils.py index 10fa63f43..f656f9e64 100644 --- a/neuralprophet/utils.py +++ b/neuralprophet/utils.py @@ -9,15 +9,13 @@ import numpy as np import pandas as pd -import pytorch_lightning as pl import torch from lightning_fabric.utilities.seed import seed_everything from neuralprophet import utils_torch -from neuralprophet.logger import ProgressBar if TYPE_CHECKING: - from neuralprophet.configure import ConfigEvents, ConfigLaggedRegressors, ConfigSeasonality, Train + from neuralprophet.configure import ConfigEvents, ConfigLaggedRegressors, ConfigSeasonality log = logging.getLogger("NP.utils") diff --git a/tests/debug/debug-energy-price-hourly.ipynb b/tests/debug/debug-energy-price-hourly.ipynb index f78de7a04..1a3f3332f 100644 --- a/tests/debug/debug-energy-price-hourly.ipynb +++ b/tests/debug/debug-energy-price-hourly.ipynb @@ -7,8 +7,6 @@ "outputs": [], "source": [ "import os\n", - "import pathlib\n", - "import torch\n", "\n", "import numpy as np\n", "import pandas as pd\n", @@ -16,7 +14,7 @@ "from plotly.subplots import make_subplots\n", "from plotly_resampler import unregister_plotly_resampler\n", "\n", - "from neuralprophet import NeuralProphet, set_random_seed, set_log_level\n", + "from neuralprophet import NeuralProphet, set_log_level\n", "\n", "set_log_level(\"INFO\")" ] diff --git a/tests/test_save.py b/tests/test_save.py index 1aeab44fe..50bab1919 100644 --- a/tests/test_save.py +++ b/tests/test_save.py @@ -6,7 +6,6 @@ import pathlib import pandas as pd -import pytest from neuralprophet import NeuralProphet, load, save diff --git a/tests/test_train_config.py b/tests/test_train_config.py index e1ecbde8b..2df8cbb86 100644 --- a/tests/test_train_config.py +++ b/tests/test_train_config.py @@ -1,14 +1,12 @@ #!/usr/bin/env python3 -import io import logging import os import pathlib import pandas as pd -import pytest -from neuralprophet import NeuralProphet, df_utils, load, save +from neuralprophet import NeuralProphet log = logging.getLogger("NP.test") log.setLevel("ERROR") @@ -53,6 +51,7 @@ def test_custom_lr_scheduler(): scheduler_args={"T_0": 5, "T_mult": 2}, ) metrics = m.fit(df, freq="D") + print(f"metrics = {metrics}") # Set in NeuralProphet(), no args m = NeuralProphet( epochs=EPOCHS, @@ -61,7 +60,7 @@ def test_custom_lr_scheduler(): scheduler="StepLR", ) metrics = m.fit(df, freq="D") - + print(f"metrics = {metrics}") # Set in fit() m = NeuralProphet(epochs=EPOCHS, batch_size=BATCH_SIZE, learning_rate=LR) metrics = m.fit( @@ -70,7 +69,7 @@ def test_custom_lr_scheduler(): scheduler="ExponentialLR", scheduler_args={"gamma": 0.95}, ) - + print(f"metrics = {metrics}") # Set in fit(), no args m = NeuralProphet(epochs=EPOCHS, batch_size=BATCH_SIZE, learning_rate=LR) metrics = m.fit( @@ -78,3 +77,4 @@ def test_custom_lr_scheduler(): freq="D", scheduler="OneCycleLR", ) + print(f"metrics = {metrics}") From 2f5bb4a569cb8737fa4f8d828742d12a28110f0d Mon Sep 17 00:00:00 2001 From: MaiBe-ctrl Date: Fri, 30 Aug 2024 10:40:12 -0700 Subject: [PATCH 11/22] fixed covariates --- neuralprophet/forecaster.py | 6 +++--- neuralprophet/time_dataset.py | 1 + neuralprophet/time_net.py | 2 +- neuralprophet/utils_time_dataset.py | 12 +++++++----- 4 files changed, 12 insertions(+), 9 deletions(-) diff --git a/neuralprophet/forecaster.py b/neuralprophet/forecaster.py index b514c5fe6..b02c2a6cb 100644 --- a/neuralprophet/forecaster.py +++ b/neuralprophet/forecaster.py @@ -1070,7 +1070,7 @@ def fit( or any(value != 1 for value in self.num_seasonalities_modelled_dict.values()) ) - ##### Data Setup, and Training Setup ##### + # Data Setup, and Training Setup # Train Configuration: overwrite self.config_train with user provided values if learning_rate is not None: self.config_train.learning_rate = learning_rate @@ -1213,8 +1213,8 @@ def fit( if self.config_train.learning_rate is None: assert not self.fitted, "Learning rate must be provided for re-training a fitted model." - ## Init a separate Model, Loader and Trainer copy for LR finder (optional, done for safety) - ## Note Leads to a CUDA issue. Needs to be fixed before enabling this feature. + # Init a separate Model, Loader and Trainer copy for LR finder (optional, done for safety) + # Note Leads to a CUDA issue. Needs to be fixed before enabling this feature. # model_lr_finder = self._init_model() # loader_lr_finder = DataLoader( # dataset, diff --git a/neuralprophet/time_dataset.py b/neuralprophet/time_dataset.py index ba598d3d0..242dc31db 100644 --- a/neuralprophet/time_dataset.py +++ b/neuralprophet/time_dataset.py @@ -599,6 +599,7 @@ def sort_regressor_names(self, config): multiplicative_regressors_names.append(reg) return additive_regressors_names, multiplicative_regressors_names + class GlobalTimeDataset(TimeDataset): def __init__( self, diff --git a/neuralprophet/time_net.py b/neuralprophet/time_net.py index b1318d08a..2a2e56c90 100644 --- a/neuralprophet/time_net.py +++ b/neuralprophet/time_net.py @@ -609,7 +609,7 @@ def forward( # Unpack and process covariates covariates_input = None - if self.config_lagged_regressors: + if self.config_lagged_regressors and self.config_lagged_regressors.regressors is not None: covariates_input = self.features_extractor.extract_component(component_name="lagged_regressors") covariates = self.forward_covar_net(covariates=covariates_input) additive_components += covariates diff --git a/neuralprophet/utils_time_dataset.py b/neuralprophet/utils_time_dataset.py index b58b4ba93..a0f2a6fd4 100644 --- a/neuralprophet/utils_time_dataset.py +++ b/neuralprophet/utils_time_dataset.py @@ -95,8 +95,8 @@ def extract_lags(self): def extract_lagged_regressors(self): lagged_regressors = OrderedDict() - if self.lagged_regressor_config: - for name, lagged_regressor in self.lagged_regressor_config.items(): + if self.lagged_regressor_config is not None and self.lagged_regressor_config.regressors is not None: + for name, lagged_regressor in self.lagged_regressor_config.regressors.items(): lagged_regressor_key = f"lagged_regressor_{name}" if lagged_regressor_key in self.feature_indices: lagged_regressor_start_idx, _ = self.feature_indices[lagged_regressor_key] @@ -217,12 +217,14 @@ def pack_lagged_regerssors_component(df_tensors, feature_list, feature_indices, """ Stack the lagged regressor features. """ - if config_lagged_regressors: - lagged_regressor_tensors = [df_tensors[name].unsqueeze(-1) for name in config_lagged_regressors.keys()] + if config_lagged_regressors is not None and config_lagged_regressors.regressors is not None: + lagged_regressor_tensors = [ + df_tensors[name].unsqueeze(-1) for name in config_lagged_regressors.regressors.keys() + ] stacked_lagged_regressor_tensor = torch.cat(lagged_regressor_tensors, dim=-1) feature_list.append(stacked_lagged_regressor_tensor) num_features = stacked_lagged_regressor_tensor.size(-1) - for i, name in enumerate(config_lagged_regressors.keys()): + for i, name in enumerate(config_lagged_regressors.regressors.keys()): feature_indices[f"lagged_regressor_{name}"] = ( current_idx + i, current_idx + i + 1, From fb613ef2e77639e0fba31a8a14086eb7ade42e1f Mon Sep 17 00:00:00 2001 From: MaiBe-ctrl Date: Fri, 30 Aug 2024 16:12:50 -0700 Subject: [PATCH 12/22] added features extractor --- neuralprophet/data/process.py | 16 +- neuralprophet/forecaster.py | 78 +++- neuralprophet/time_dataset.py | 55 +-- neuralprophet/time_net.py | 117 +++-- neuralprophet/utils_time_dataset.py | 358 +++++++-------- tests/test_future_regressor_nn.py | 112 ++--- tests/test_glocal.py | 674 ++++++++++++++-------------- tests/test_unit.py | 50 ++- 8 files changed, 771 insertions(+), 689 deletions(-) diff --git a/neuralprophet/data/process.py b/neuralprophet/data/process.py index ccc1b4bc8..7da756f18 100644 --- a/neuralprophet/data/process.py +++ b/neuralprophet/data/process.py @@ -4,7 +4,7 @@ import numpy as np import pandas as pd -from neuralprophet import df_utils, time_dataset +from neuralprophet import df_utils, time_dataset, utils_time_dataset from neuralprophet.configure import ( ConfigCountryHolidays, ConfigEvents, @@ -575,7 +575,7 @@ def _handle_missing_data( return df -def _create_dataset(model, df, predict_mode, prediction_frequency=None): +def _create_dataset(model, df, predict_mode, prediction_frequency=None, features_extractor=None): """Construct dataset from dataframe. (Configured Hyperparameters can be overridden by explicitly supplying them. @@ -627,5 +627,17 @@ def _create_dataset(model, df, predict_mode, prediction_frequency=None): config_lagged_regressors=model.config_lagged_regressors, config_missing=model.config_missing, config_model=model.config_model, + features_extractor=features_extractor, # config_train=model.config_train, # no longer needed since JIT tabularization. ) + + +def _create_features_extractor(n_lags, n_forecasts, max_lags, config_seasonality, config_lagged_regressors): + return utils_time_dataset.FeatureExtractor( + n_lags=n_lags, + n_forecasts=n_forecasts, + max_lags=max_lags, + config_seasonality=config_seasonality, + lagged_regressor_config=config_lagged_regressors, + feature_indices={}, + ) diff --git a/neuralprophet/forecaster.py b/neuralprophet/forecaster.py index b02c2a6cb..6fb50e41c 100644 --- a/neuralprophet/forecaster.py +++ b/neuralprophet/forecaster.py @@ -18,6 +18,7 @@ _check_dataframe, _convert_raw_predictions_to_raw_df, _create_dataset, + _create_features_extractor, _handle_missing_data, _prepare_dataframe_to_predict, _reshape_raw_predictions_to_forecst_df, @@ -1155,7 +1156,20 @@ def fit( # Set up DataLoaders: Train # Create TimeDataset # Note: _create_dataset() needs to be called after set_auto_seasonalities() - dataset = _create_dataset(self, df, predict_mode=False, prediction_frequency=self.prediction_frequency) + train_features_extractor = _create_features_extractor( + n_lags=self.n_lags, + max_lags=self.max_lags, + n_forecasts=self.n_forecasts, + config_seasonality=self.config_seasonality, + config_lagged_regressors=self.config_lagged_regressors, + ) + dataset = _create_dataset( + self, + df, + predict_mode=False, + prediction_frequency=self.prediction_frequency, + features_extractor=train_features_extractor, + ) # Determine the max_number of epochs self.config_train.set_auto_batch_epoch(n_data=len(dataset)) # Create Train DataLoader @@ -1165,6 +1179,7 @@ def fit( shuffle=True, num_workers=num_workers, ) + self.config_train.set_batches_per_epoch(len(loader)) log.info(f"Train Dataset size: {len(dataset)}") log.info(f"Number of batches per training epoch: {len(loader)}") @@ -1189,7 +1204,14 @@ def fit( ) # df_val, _, _, _ = df_utils.prep_or_copy_df(df_val) df_val = _normalize(df=df_val, config_normalization=self.config_normalization) - dataset_val = _create_dataset(self, df_val, predict_mode=False) + features_extractor_val = _create_features_extractor( + n_lags=self.n_lags, + max_lags=self.max_lags, + n_forecasts=self.n_forecasts, + config_seasonality=self.config_seasonality, + config_lagged_regressors=self.config_lagged_regressors, + ) + dataset_val = _create_dataset(self, df_val, predict_mode=False, features_extractor=features_extractor_val) loader_val = DataLoader(dataset_val, batch_size=min(1024, len(dataset_val)), shuffle=False, drop_last=False) # Init the Trainer @@ -1209,6 +1231,10 @@ def fit( if not self.fitted: self.model = self._init_model() + self.model.set_features_extractor(features_extractor=train_features_extractor, mode="train") + if validation_enabled: + self.model.set_features_extractor(features_extractor=features_extractor_val, mode="val") + # Find suitable learning rate if not set if self.config_train.learning_rate is None: assert not self.fitted, "Learning rate must be provided for re-training a fitted model." @@ -1417,7 +1443,15 @@ def test(self, df: pd.DataFrame, verbose: bool = True): ) df, _, _, _ = df_utils.prep_or_copy_df(df) df = _normalize(df=df, config_normalization=self.config_normalization) - dataset = _create_dataset(self, df, predict_mode=False) + features_extractor = _create_features_extractor( + n_lags=self.n_lags, + max_lags=self.max_lags, + n_forecasts=self.n_forecasts, + config_seasonality=self.config_seasonality, + config_lagged_regressors=self.config_lagged_regressors, + ) + dataset = _create_dataset(self, df, predict_mode=False, features_extractor=features_extractor) + self.model.set_features_extractor(features_extractor, mode="test") test_loader = DataLoader(dataset, batch_size=min(1024, len(dataset)), shuffle=False, drop_last=False) # Use Lightning to calculate metrics val_metrics = self.trainer.test(self.model, dataloaders=test_loader, verbose=verbose) @@ -2050,6 +2084,13 @@ def predict_seasonal_components(self, df: pd.DataFrame, quantile: float = 0.5): df = _normalize(df=df, config_normalization=self.config_normalization) df_seasonal = pd.DataFrame() for df_name, df_i in df.groupby("ID"): + feature_extractor = FeatureExtractor( + n_lags=0, + max_lags=0, + n_forecasts=1, + config_seasonality=self.config_seasonality, + lagged_regressor_config=self.config_lagged_regressors, + ) dataset = time_dataset.TimeDataset( df=df_i, predict_mode=True, @@ -2064,33 +2105,26 @@ def predict_seasonal_components(self, df: pd.DataFrame, quantile: float = 0.5): config_lagged_regressors=self.config_lagged_regressors, config_missing=self.config_missing, config_model=self.config_model, + features_extractor=feature_extractor, # config_train=self.config_train, # no longer needed since JIT tabularization. ) + self.model.set_features_extractor(feature_extractor, mode="predict") loader = DataLoader(dataset, batch_size=min(4096, len(df)), shuffle=False, drop_last=False) predicted = {} for name in self.config_seasonality.periods: predicted[name] = list() for inputs_tensor, meta in loader: - feature_extractor = FeatureExtractor( - data_tensor=inputs_tensor, - n_lags=0, - n_forecasts=1, - max_lags=0, - feature_indices=self.config_model.features_map, - config_seasonality=self.config_seasonality, - lagged_regressor_config=self.config_lagged_regressors, - ) # Meta as a tensor for prediction if self.model.config_seasonality is None: meta_name_tensor = None elif self.model.config_seasonality.global_local in ["local", "glocal"]: meta = OrderedDict() - time_input = feature_extractor.extract_component("time") + time_input = feature_extractor.extract_component("time", inputs_tensor) meta["df_name"] = [df_name for _ in range(time_input.shape[0])] meta_name_tensor = torch.tensor([self.model.id_dict[i] for i in meta["df_name"]]) # type: ignore else: meta_name_tensor = None - seasonalities_input = feature_extractor.extract_component("seasonalities") + seasonalities_input = feature_extractor.extract_component("seasonalities", inputs_tensor) for name in self.config_seasonality.periods: features = seasonalities_input[name] quantile_index = self.config_model.quantiles.index(quantile) @@ -2894,7 +2928,21 @@ def _predict_raw(self, df, df_name, include_components=False, prediction_frequen assert len(df["ID"].unique()) == 1 if "y_scaled" not in df.columns or "t" not in df.columns: raise ValueError("Received unprepared dataframe to predict. " "Please call predict_dataframe_to_predict.") - dataset = _create_dataset(self, df, predict_mode=True, prediction_frequency=prediction_frequency) + features_extractor = _create_features_extractor( + n_lags=self.n_lags, + max_lags=self.max_lags, + n_forecasts=self.n_forecasts, + config_seasonality=self.config_seasonality, + config_lagged_regressors=self.config_lagged_regressors, + ) + dataset = _create_dataset( + self, + df, + predict_mode=True, + prediction_frequency=prediction_frequency, + features_extractor=features_extractor, + ) + self.model.set_features_extractor(features_extractor, mode="predict") loader = DataLoader(dataset, batch_size=min(1024, len(df)), shuffle=False, drop_last=False) if self.n_forecasts > 1: dates = df["ds"].iloc[self.max_lags : -self.n_forecasts + 1] diff --git a/neuralprophet/time_dataset.py b/neuralprophet/time_dataset.py index 242dc31db..cacdf9a0a 100644 --- a/neuralprophet/time_dataset.py +++ b/neuralprophet/time_dataset.py @@ -12,17 +12,6 @@ from neuralprophet import configure, utils from neuralprophet.df_utils import get_max_num_lags from neuralprophet.event_utils import get_all_holidays -from neuralprophet.utils_time_dataset import ( - pack_additive_events_component, - pack_additive_regressors_component, - pack_lagged_regerssors_component, - pack_lags_component, - pack_multiplicative_events_component, - pack_multiplicative_regressors_component, - pack_seasonalities_component, - pack_targets_component, - pack_trend_component, -) log = logging.getLogger("NP.time_dataset") @@ -45,6 +34,7 @@ def __init__( config_lagged_regressors, config_missing, config_model, + features_extractor, ): """Initialize Timedataset from time-series df. Parameters @@ -146,6 +136,8 @@ def __init__( if self.config_seasonality is not None and hasattr(self.config_seasonality, "periods"): self.calculate_seasonalities() + self.features_extractor = features_extractor + self.stack_all_features() def stack_all_features(self): @@ -153,45 +145,40 @@ def stack_all_features(self): Stack all features into one large tensor by calling individual stacking methods. """ feature_list = [] - feature_indices = {} current_idx = 0 # Call individual stacking functions - current_idx = pack_trend_component(self.df_tensors, feature_list, feature_indices, current_idx) - current_idx = pack_targets_component(self.df_tensors, feature_list, feature_indices, current_idx) + current_idx = self.features_extractor.pack_trend_component(self.df_tensors, feature_list, current_idx) + current_idx = self.features_extractor.pack_targets_component(self.df_tensors, feature_list, current_idx) - current_idx = pack_lags_component(self.df_tensors, feature_list, feature_indices, current_idx, self.n_lags) - current_idx = pack_lagged_regerssors_component( - self.df_tensors, feature_list, feature_indices, current_idx, self.config_lagged_regressors + current_idx = self.features_extractor.pack_lags_component( + self.df_tensors, feature_list, current_idx, self.n_lags + ) + current_idx = self.features_extractor.pack_lagged_regerssors_component( + self.df_tensors, feature_list, current_idx, self.config_lagged_regressors ) - current_idx = pack_additive_events_component( - self.df_tensors, feature_list, feature_indices, current_idx, self.additive_event_and_holiday_names + current_idx = self.features_extractor.pack_additive_events_component( + self.df_tensors, feature_list, current_idx, self.additive_event_and_holiday_names ) - current_idx = pack_multiplicative_events_component( - self.df_tensors, feature_list, feature_indices, current_idx, self.multiplicative_event_and_holiday_names + current_idx = self.features_extractor.pack_multiplicative_events_component( + self.df_tensors, feature_list, current_idx, self.multiplicative_event_and_holiday_names ) - current_idx = pack_additive_regressors_component( - self.df_tensors, feature_list, feature_indices, current_idx, self.additive_regressors_names + current_idx = self.features_extractor.pack_additive_regressors_component( + self.df_tensors, feature_list, current_idx, self.additive_regressors_names ) - current_idx = pack_multiplicative_regressors_component( - self.df_tensors, feature_list, feature_indices, current_idx, self.multiplicative_regressors_names + current_idx = self.features_extractor.pack_multiplicative_regressors_component( + self.df_tensors, feature_list, current_idx, self.multiplicative_regressors_names ) if self.config_seasonality is not None and hasattr(self.config_seasonality, "periods"): - current_idx = pack_seasonalities_component( - feature_list, feature_indices, current_idx, self.config_seasonality, self.seasonalities + current_idx = self.features_extractor.pack_seasonalities_component( + feature_list, current_idx, self.config_seasonality, self.seasonalities ) # Concatenate all features into one big tensor self.all_features = torch.cat(feature_list, dim=1) # Concatenating along the second dimension - # Update the model's features map if applicable - if self.config_model is not None: - self.config_model.features_map = feature_indices - - return feature_indices - def calculate_seasonalities(self): self.seasonalities = OrderedDict({}) dates = self.df_tensors["ds"] @@ -616,6 +603,7 @@ def __init__( config_lagged_regressors, config_missing, config_model, + features_extractor, ): """Initialize Timedataset from time-series df. Parameters @@ -642,6 +630,7 @@ def __init__( config_lagged_regressors=config_lagged_regressors, config_missing=config_missing, config_model=config_model, + features_extractor=features_extractor, ) self.length = sum(dataset.length for (name, dataset) in self.datasets.items()) global_sample_to_local_ID = [] diff --git a/neuralprophet/time_net.py b/neuralprophet/time_net.py index 2a2e56c90..9c6405a9c 100644 --- a/neuralprophet/time_net.py +++ b/neuralprophet/time_net.py @@ -63,6 +63,10 @@ def __init__( num_seasonalities_modelled: int = 1, num_seasonalities_modelled_dict: dict = None, meta_used_in_model: bool = False, + train_features_extractor: Optional[FeatureExtractor] = None, + val_features_extractor: Optional[FeatureExtractor] = None, + test_features_extractor: Optional[FeatureExtractor] = None, + predict_features_extractor: Optional[FeatureExtractor] = None, ): """ Parameters @@ -143,6 +147,10 @@ def __init__( # General self.config_model = config_model self.n_forecasts = n_forecasts + self.train_features_extractor = train_features_extractor + self.val_features_extractor = val_features_extractor + self.test_features_extractor = test_features_extractor + self.predict_features_extractor = predict_features_extractor # Lightning Config self.config_train = config_train @@ -304,16 +312,6 @@ def __init__( else: self.config_regressors.regressors = None - # Features Extractor - self.features_extractor = FeatureExtractor( - n_lags=self.n_lags, - n_forecasts=self.n_forecasts, - max_lags=self.max_lags, - config_seasonality=self.config_seasonality, - lagged_regressor_config=self.config_lagged_regressors, - feature_indices=self.config_model.features_map, - ) - @property def ar_weights(self) -> torch.Tensor: """sets property auto-regression weights for regularization. Update if AR is modelled differently""" @@ -322,6 +320,16 @@ def ar_weights(self) -> torch.Tensor: if isinstance(layer, nn.Linear): return layer.weight + def set_features_extractor(self, features_extractor, mode): + if mode == "train": + self.train_features_extractor = features_extractor + if mode == "val": + self.val_features_extractor = features_extractor + if mode == "test": + self.test_features_extractor = features_extractor + if mode == "predict": + self.predict_features_extractor = features_extractor + def get_covar_weights(self, covar_input=None) -> torch.Tensor: """ Get attributions of covariates network w.r.t. the model input. @@ -515,13 +523,16 @@ def forward_covar_net(self, covariates): def forward( self, input_tensor: torch.Tensor, + features_extractor=FeatureExtractor, meta: Dict = None, compute_components_flag: bool = False, predict_mode: bool = False, ) -> torch.Tensor: """This method defines the model forward pass.""" - time_input = self.features_extractor.extract_component(component_name="time") + print(f"indices = {features_extractor.feature_indices}") + + time_input = features_extractor.extract_component(component_name="time", batch_tensor=input_tensor) # Handle meta argument if meta is None and self.meta_used_in_model: name_id_dummy = self.id_list[0] @@ -551,7 +562,9 @@ def forward( # Unpack and process seasonalities seasonalities_input = None if self.config_seasonality and self.config_seasonality.periods: - seasonalities_input = self.features_extractor.extract_component(component_name="seasonalities") + seasonalities_input = features_extractor.extract_component( + component_name="seasonalities", batch_tensor=input_tensor + ) s = self.seasonality(s=seasonalities_input, meta=meta) if self.config_seasonality.mode == "additive": additive_components_nonstationary += s @@ -562,32 +575,37 @@ def forward( # Unpack and process events additive_events_input = None multiplicative_events_input = None - if "additive_events" in self.config_model.features_map: - additive_events_input = self.features_extractor.extract_component(component_name="additive_events") - additive_events = self.scalar_features_effects(additive_events_input, self.event_params["additive"]) - additive_components_nonstationary += additive_events - components["additive_events"] = additive_events - if "multiplicative_events" in self.config_model.features_map: - multiplicative_events_input = self.features_extractor.extract_component( - component_name="multiplicative_events" - ) - multiplicative_events = self.scalar_features_effects( - multiplicative_events_input, self.event_params["multiplicative"] - ) - multiplicative_components_nonstationary += multiplicative_events - components["multiplicative_events"] = multiplicative_events + if self.events_dims is not None: + if "additive_events" in features_extractor.feature_indices: + additive_events_input = features_extractor.extract_component( + component_name="additive_events", batch_tensor=input_tensor + ) + additive_events = self.scalar_features_effects(additive_events_input, self.event_params["additive"]) + additive_components_nonstationary += additive_events + components["additive_events"] = additive_events + if "multiplicative_events" in features_extractor.feature_indices: + multiplicative_events_input = features_extractor.extract_component( + component_name="multiplicative_events", batch_tensor=input_tensor + ) + multiplicative_events = self.scalar_features_effects( + multiplicative_events_input, self.event_params["multiplicative"] + ) + multiplicative_components_nonstationary += multiplicative_events + components["multiplicative_events"] = multiplicative_events # Unpack and process regressors additive_regressors_input = None multiplicative_regressors_input = None - if "additive_regressors" in self.config_model.features_map: - additive_regressors_input = self.features_extractor.extract_component(component_name="additive_regressors") + if "additive_regressors" in features_extractor.feature_indices: + additive_regressors_input = features_extractor.extract_component( + component_name="additive_regressors", batch_tensor=input_tensor + ) additive_regressors = self.future_regressors(additive_regressors_input, "additive") additive_components_nonstationary += additive_regressors components["additive_regressors"] = additive_regressors - if "multiplicative_regressors" in self.config_model.features_map: - multiplicative_regressors_input = self.features_extractor.extract_component( - component_name="multiplicative_regressors" + if "multiplicative_regressors" in features_extractor.feature_indices: + multiplicative_regressors_input = features_extractor.extract_component( + component_name="multiplicative_regressors", batch_tensor=input_tensor ) multiplicative_regressors = self.future_regressors(multiplicative_regressors_input, "multiplicative") multiplicative_components_nonstationary += multiplicative_regressors @@ -595,8 +613,8 @@ def forward( # Unpack and process lags lags_input = None - if "lags" in self.config_model.features_map: - lags_input = self.features_extractor.extract_component(component_name="lags") + if "lags" in features_extractor.feature_indices: + lags_input = features_extractor.extract_component(component_name="lags", batch_tensor=input_tensor) nonstationary_components = ( trend[:, : self.n_lags, 0] + additive_components_nonstationary[:, : self.n_lags, 0] @@ -610,7 +628,9 @@ def forward( # Unpack and process covariates covariates_input = None if self.config_lagged_regressors and self.config_lagged_regressors.regressors is not None: - covariates_input = self.features_extractor.extract_component(component_name="lagged_regressors") + covariates_input = features_extractor.extract_component( + component_name="lagged_regressors", batch_tensor=input_tensor + ) covariates = self.forward_covar_net(covariates=covariates_input) additive_components += covariates components["covariates"] = covariates @@ -765,16 +785,15 @@ def training_step(self, batch, batch_idx): epoch_float = self.trainer.current_epoch + batch_idx / float(self.train_steps_per_epoch) self.train_progress = epoch_float / float(self.config_train.epochs) - self.features_extractor.update_data_inputs(inputs_tensor, self.config_model.features_map) - targets = self.features_extractor.extract_component("targets") - time = self.features_extractor.extract_component("time") + targets = self.train_features_extractor.extract_component("targets", batch_tensor=inputs_tensor) + time = self.train_features_extractor.extract_component("time", batch_tensor=inputs_tensor) # Global-local if self.meta_used_in_model: meta_name_tensor = torch.tensor([self.id_dict[i] for i in meta["df_name"]], device=self.device) else: meta_name_tensor = None # Run forward calculation - predicted, _ = self.forward(inputs_tensor, meta_name_tensor) + predicted, _ = self.forward(inputs_tensor, self.train_features_extractor, meta_name_tensor) # Store predictions in self for later network visualization self.train_epoch_prediction = predicted # Calculate loss @@ -811,16 +830,15 @@ def training_step(self, batch, batch_idx): def validation_step(self, batch, batch_idx): inputs_tensor, meta = batch - self.features_extractor.update_data_inputs(inputs_tensor, self.config_model.features_map) - targets = self.features_extractor.extract_component("targets") - time = self.features_extractor.extract_component("time") + targets = self.val_features_extractor.extract_component("targets", batch_tensor=inputs_tensor) + time = self.val_features_extractor.extract_component("time", batch_tensor=inputs_tensor) # Global-local if self.meta_used_in_model: meta_name_tensor = torch.tensor([self.id_dict[i] for i in meta["df_name"]], device=self.device) else: meta_name_tensor = None # Run forward calculation - predicted, _ = self.forward(inputs_tensor, meta_name_tensor) + predicted, _ = self.forward(inputs_tensor, self.val_features_extractor, meta_name_tensor) # Calculate loss loss, reg_loss = self.loss_func(time, predicted, targets) # Metrics @@ -834,16 +852,15 @@ def validation_step(self, batch, batch_idx): def test_step(self, batch, batch_idx): inputs_tensor, meta = batch - self.features_extractor.update_data_inputs(inputs_tensor, self.config_model.features_map) - targets = self.features_extractor.extract_component("targets") - time = self.features_extractor.extract_component("time") + targets = self.test_features_extractor.extract_component("targets", batch_tensor=inputs_tensor) + time = self.test_features_extractor.extract_component("time", batch_tensor=inputs_tensor) # Global-local if self.meta_used_in_model: meta_name_tensor = torch.tensor([self.id_dict[i] for i in meta["df_name"]], device=self.device) else: meta_name_tensor = None # Run forward calculation - predicted, _ = self.forward(inputs_tensor, meta_name_tensor) + predicted, _ = self.forward(inputs_tensor, self.test_features_extractor, meta_name_tensor) # Calculate loss loss, reg_loss = self.loss_func(time, predicted, targets) # Metrics @@ -858,8 +875,6 @@ def test_step(self, batch, batch_idx): def predict_step(self, batch, batch_idx, dataloader_idx=0): inputs_tensor, meta = batch - self.features_extractor.update_data_inputs(inputs_tensor, self.config_model.features_map) - # Global-local if self.meta_used_in_model: meta_name_tensor = torch.tensor([self.id_dict[i] for i in meta["df_name"]], device=self.device) @@ -868,7 +883,11 @@ def predict_step(self, batch, batch_idx, dataloader_idx=0): # Run forward calculation prediction, components = self.forward( - inputs_tensor, meta_name_tensor, self.compute_components_flag, predict_mode=True + inputs_tensor, + self.predict_features_extractor, + meta_name_tensor, + self.compute_components_flag, + predict_mode=True, ) return prediction, components diff --git a/neuralprophet/utils_time_dataset.py b/neuralprophet/utils_time_dataset.py index a0f2a6fd4..5e315c88f 100644 --- a/neuralprophet/utils_time_dataset.py +++ b/neuralprophet/utils_time_dataset.py @@ -9,8 +9,7 @@ def __init__( n_lags, n_forecasts, max_lags, - data_tensor=None, - feature_indices=None, + feature_indices={}, config_seasonality=None, lagged_regressor_config=None, ): @@ -18,7 +17,6 @@ def __init__( Initializes the FeatureExtractor with the necessary parameters. Args: - data_tensor (torch.Tensor): The tensor containing all features, sliced according to indices. n_lags (int): Number of lags used in the model. n_forecasts (int): Number of forecasts to be made. max_lags (int): Maximum number of lags used in the model. @@ -26,7 +24,6 @@ def __init__( config_seasonality (object, optional): Configuration object that defines the seasonality periods. lagged_regressor_config (dict, optional): Configuration dictionary that defines the lagged regressors and their properties. """ - self.data_tensor = data_tensor self.n_lags = n_lags self.n_forecasts = n_forecasts self.max_lags = max_lags @@ -34,17 +31,7 @@ def __init__( self.config_seasonality = config_seasonality self.lagged_regressor_config = lagged_regressor_config - def update_data_inputs(self, data_tensor, feature_indices): - """ - Updates the data tensor with a new tensor. - - Args: - data_tensor (torch.Tensor): The new tensor containing all features. - """ - self.data_tensor = data_tensor - self.feature_indices = feature_indices - - def extract_component(self, component_name): + def extract_component(self, component_name, batch_tensor): """ Routes the extraction process to the appropriate function based on the component name. @@ -55,45 +42,45 @@ def extract_component(self, component_name): Various: The output of the specific extraction function. """ if component_name == "targets": - return self.extract_targets() + return self.extract_targets(batch_tensor) elif component_name == "time": - return self.extract_time() + return self.extract_time(batch_tensor) elif component_name == "seasonalities": - return self.extract_seasonalities() + return self.extract_seasonalities(batch_tensor) elif component_name == "lagged_regressors": - return self.extract_lagged_regressors() + return self.extract_lagged_regressors(batch_tensor) elif component_name == "lags": - return self.extract_lags() + return self.extract_lags(batch_tensor) elif component_name == "additive_events": - return self.extract_additive_events() + return self.extract_additive_events(batch_tensor) elif component_name == "multiplicative_events": - return self.extract_multiplicative_events() + return self.extract_multiplicative_events(batch_tensor) elif component_name == "additive_regressors": - return self.extract_additive_regressors() + return self.extract_additive_regressors(batch_tensor) elif component_name == "multiplicative_regressors": - return self.extract_multiplicative_regressors() + return self.extract_multiplicative_regressors(batch_tensor) else: raise ValueError(f"Unknown component name: {component_name}") - def extract_targets(self): + def extract_targets(self, batch_tensor): targets_start_idx, targets_end_idx = self.feature_indices["targets"] if self.max_lags > 0: - return self.data_tensor[:, self.max_lags : self.max_lags + self.n_forecasts, targets_start_idx].unsqueeze(2) + return batch_tensor[:, self.max_lags : self.max_lags + self.n_forecasts, targets_start_idx].unsqueeze(2) else: - return self.data_tensor[:, targets_start_idx : targets_end_idx + 1].unsqueeze(1) + return batch_tensor[:, targets_start_idx : targets_end_idx + 1].unsqueeze(1) - def extract_time(self): + def extract_time(self, batch_tensor): start_idx, end_idx = self.feature_indices["time"] if self.max_lags > 0: - return self.data_tensor[:, self.max_lags - self.n_lags : self.max_lags + self.n_forecasts, start_idx] + return batch_tensor[:, self.max_lags - self.n_lags : self.max_lags + self.n_forecasts, start_idx] else: - return self.data_tensor[:, start_idx : end_idx + 1] + return batch_tensor[:, start_idx : end_idx + 1] - def extract_lags(self): + def extract_lags(self, batch_tensor): lags_start_idx, _ = self.feature_indices["lags"] - return self.data_tensor[:, self.max_lags - self.n_lags : self.max_lags, lags_start_idx] + return batch_tensor[:, self.max_lags - self.n_lags : self.max_lags, lags_start_idx] - def extract_lagged_regressors(self): + def extract_lagged_regressors(self, batch_tensor): lagged_regressors = OrderedDict() if self.lagged_regressor_config is not None and self.lagged_regressor_config.regressors is not None: for name, lagged_regressor in self.lagged_regressor_config.regressors.items(): @@ -102,21 +89,21 @@ def extract_lagged_regressors(self): lagged_regressor_start_idx, _ = self.feature_indices[lagged_regressor_key] covar_lags = lagged_regressor.n_lags lagged_regressor_offset = self.max_lags - covar_lags - lagged_regressors[name] = self.data_tensor[ + lagged_regressors[name] = batch_tensor[ :, lagged_regressor_offset : lagged_regressor_offset + covar_lags, lagged_regressor_start_idx, ] return lagged_regressors - def extract_seasonalities(self): + def extract_seasonalities(self, batch_tensor): seasonalities = OrderedDict() if self.max_lags > 0: for seasonality_name in self.config_seasonality.periods.keys(): seasonality_key = f"seasonality_{seasonality_name}" if seasonality_key in self.feature_indices: seasonality_start_idx, seasonality_end_idx = self.feature_indices[seasonality_key] - seasonalities[seasonality_name] = self.data_tensor[ + seasonalities[seasonality_name] = batch_tensor[ :, self.max_lags - self.n_lags : self.max_lags + self.n_forecasts, seasonality_start_idx:seasonality_end_idx, @@ -126,205 +113,194 @@ def extract_seasonalities(self): seasonality_key = f"seasonality_{seasonality_name}" if seasonality_key in self.feature_indices: seasonality_start_idx, seasonality_end_idx = self.feature_indices[seasonality_key] - seasonalities[seasonality_name] = self.data_tensor[ + seasonalities[seasonality_name] = batch_tensor[ :, seasonality_start_idx:seasonality_end_idx ].unsqueeze(1) return seasonalities - def extract_additive_events(self): + def extract_additive_events(self, batch_tensor): if self.max_lags > 0: events_start_idx, events_end_idx = self.feature_indices["additive_events"] future_offset = self.max_lags - self.n_lags - return self.data_tensor[ + return batch_tensor[ :, future_offset : future_offset + self.n_forecasts + self.n_lags, events_start_idx : events_end_idx + 1 ] else: events_start_idx, events_end_idx = self.feature_indices["additive_events"] - return self.data_tensor[:, events_start_idx : events_end_idx + 1].unsqueeze(1) + return batch_tensor[:, events_start_idx : events_end_idx + 1].unsqueeze(1) - def extract_multiplicative_events(self): + def extract_multiplicative_events(self, batch_tensor): if self.max_lags > 0: events_start_idx, events_end_idx = self.feature_indices["multiplicative_events"] - return self.data_tensor[ + return batch_tensor[ :, self.max_lags - self.n_lags : self.max_lags + self.n_forecasts, events_start_idx : events_end_idx + 1 ] else: events_start_idx, events_end_idx = self.feature_indices["multiplicative_events"] - return self.data_tensor[:, events_start_idx : events_end_idx + 1].unsqueeze(1) + return batch_tensor[:, events_start_idx : events_end_idx + 1].unsqueeze(1) - def extract_additive_regressors(self): + def extract_additive_regressors(self, batch_tensor): if self.max_lags > 0: regressors_start_idx, regressors_end_idx = self.feature_indices["additive_regressors"] - return self.data_tensor[ + return batch_tensor[ :, self.max_lags - self.n_lags : self.max_lags + self.n_forecasts, regressors_start_idx : regressors_end_idx + 1, ] else: regressors_start_idx, regressors_end_idx = self.feature_indices["additive_regressors"] - return self.data_tensor[:, regressors_start_idx : regressors_end_idx + 1].unsqueeze(1) + return batch_tensor[:, regressors_start_idx : regressors_end_idx + 1].unsqueeze(1) - def extract_multiplicative_regressors(self): + def extract_multiplicative_regressors(self, batch_tensor): if self.max_lags > 0: regressors_start_idx, regressors_end_idx = self.feature_indices["multiplicative_regressors"] future_offset = self.max_lags - self.n_lags - return self.data_tensor[ + return batch_tensor[ :, future_offset : future_offset + self.n_forecasts + self.n_lags, regressors_start_idx : regressors_end_idx + 1, ] else: regressors_start_idx, regressors_end_idx = self.feature_indices["multiplicative_regressors"] - return self.data_tensor[:, regressors_start_idx : regressors_end_idx + 1].unsqueeze(1) - + return batch_tensor[:, regressors_start_idx : regressors_end_idx + 1].unsqueeze(1) -def pack_trend_component(df_tensors, feature_list, feature_indices, current_idx): - """ - Stack the trend (time) feature. - """ - time_tensor = df_tensors["t"].unsqueeze(-1) # Shape: [T, 1] - feature_list.append(time_tensor) - feature_indices["time"] = (current_idx, current_idx) - return current_idx + 1 - - -def pack_lags_component(df_tensors, feature_list, feature_indices, current_idx, n_lags): - """ - Stack the lags feature. - """ - if n_lags >= 1 and "y_scaled" in df_tensors: - lags_tensor = df_tensors["y_scaled"].unsqueeze(-1) - feature_list.append(lags_tensor) - feature_indices["lags"] = (current_idx, current_idx) - return current_idx + 1 - return current_idx - - -def pack_targets_component(df_tensors, feature_list, feature_indices, current_idx): - """ - Stack the targets feature. - """ - if "y_scaled" in df_tensors: - targets_tensor = df_tensors["y_scaled"].unsqueeze(-1) - feature_list.append(targets_tensor) - feature_indices["targets"] = (current_idx, current_idx) + def pack_trend_component(self, df_tensors, feature_list, current_idx): + """ + Stack the trend (time) feature. + """ + time_tensor = df_tensors["t"].unsqueeze(-1) # Shape: [T, 1] + feature_list.append(time_tensor) + self.feature_indices["time"] = (current_idx, current_idx) return current_idx + 1 - return current_idx - -def pack_lagged_regerssors_component(df_tensors, feature_list, feature_indices, current_idx, config_lagged_regressors): - """ - Stack the lagged regressor features. - """ - if config_lagged_regressors is not None and config_lagged_regressors.regressors is not None: - lagged_regressor_tensors = [ - df_tensors[name].unsqueeze(-1) for name in config_lagged_regressors.regressors.keys() - ] - stacked_lagged_regressor_tensor = torch.cat(lagged_regressor_tensors, dim=-1) - feature_list.append(stacked_lagged_regressor_tensor) - num_features = stacked_lagged_regressor_tensor.size(-1) - for i, name in enumerate(config_lagged_regressors.regressors.keys()): - feature_indices[f"lagged_regressor_{name}"] = ( - current_idx + i, - current_idx + i + 1, + def pack_lags_component(self, df_tensors, feature_list, current_idx, n_lags): + """ + Stack the lags feature. + """ + if n_lags >= 1 and "y_scaled" in df_tensors: + lags_tensor = df_tensors["y_scaled"].unsqueeze(-1) + feature_list.append(lags_tensor) + self.feature_indices["lags"] = (current_idx, current_idx) + return current_idx + 1 + return current_idx + + def pack_targets_component(self, df_tensors, feature_list, current_idx): + """ + Stack the targets feature. + """ + if "y_scaled" in df_tensors: + targets_tensor = df_tensors["y_scaled"].unsqueeze(-1) + feature_list.append(targets_tensor) + self.feature_indices["targets"] = (current_idx, current_idx) + return current_idx + 1 + return current_idx + + def pack_lagged_regerssors_component(self, df_tensors, feature_list, current_idx, config_lagged_regressors): + """ + Stack the lagged regressor features. + """ + if config_lagged_regressors is not None and config_lagged_regressors.regressors is not None: + lagged_regressor_tensors = [ + df_tensors[name].unsqueeze(-1) for name in config_lagged_regressors.regressors.keys() + ] + stacked_lagged_regressor_tensor = torch.cat(lagged_regressor_tensors, dim=-1) + feature_list.append(stacked_lagged_regressor_tensor) + num_features = stacked_lagged_regressor_tensor.size(-1) + for i, name in enumerate(config_lagged_regressors.regressors.keys()): + self.feature_indices[f"lagged_regressor_{name}"] = ( + current_idx + i, + current_idx + i + 1, + ) + return current_idx + num_features + return current_idx + + def pack_additive_events_component( + self, + df_tensors, + feature_list, + current_idx, + additive_event_and_holiday_names, + ): + """ + Stack the additive event and holiday features. + """ + if additive_event_and_holiday_names: + additive_events_tensor = torch.cat( + [df_tensors[name].unsqueeze(-1) for name in additive_event_and_holiday_names], + dim=1, ) - return current_idx + num_features - return current_idx - - -def pack_additive_events_component( - df_tensors, - feature_list, - feature_indices, - current_idx, - additive_event_and_holiday_names, -): - """ - Stack the additive event and holiday features. - """ - if additive_event_and_holiday_names: - additive_events_tensor = torch.cat( - [df_tensors[name].unsqueeze(-1) for name in additive_event_and_holiday_names], - dim=1, - ) - feature_list.append(additive_events_tensor) - feature_indices["additive_events"] = ( - current_idx, - current_idx + additive_events_tensor.size(1) - 1, - ) - return current_idx + additive_events_tensor.size(1) - return current_idx - - -def pack_multiplicative_events_component( - df_tensors, feature_list, feature_indices, current_idx, multiplicative_event_and_holiday_names -): - """ - Stack the multiplicative event and holiday features. - """ - if multiplicative_event_and_holiday_names: - multiplicative_events_tensor = torch.cat( - [df_tensors[name].unsqueeze(-1) for name in multiplicative_event_and_holiday_names], dim=1 - ) - feature_list.append(multiplicative_events_tensor) - feature_indices["multiplicative_events"] = ( - current_idx, - current_idx + multiplicative_events_tensor.size(1) - 1, - ) - return current_idx + multiplicative_events_tensor.size(1) - return current_idx - - -def pack_additive_regressors_component( - df_tensors, feature_list, feature_indices, current_idx, additive_regressors_names -): - """ - Stack the additive regressor features. - """ - if additive_regressors_names: - additive_regressors_tensor = torch.cat( - [df_tensors[name].unsqueeze(-1) for name in additive_regressors_names], dim=1 - ) - feature_list.append(additive_regressors_tensor) - feature_indices["additive_regressors"] = ( - current_idx, - current_idx + additive_regressors_tensor.size(1) - 1, - ) - return current_idx + additive_regressors_tensor.size(1) - return current_idx - + feature_list.append(additive_events_tensor) + self.feature_indices["additive_events"] = ( + current_idx, + current_idx + additive_events_tensor.size(1) - 1, + ) + return current_idx + additive_events_tensor.size(1) + return current_idx -def pack_multiplicative_regressors_component( - df_tensors, feature_list, feature_indices, current_idx, multiplicative_regressors_names -): - """ - Stack the multiplicative regressor features. - """ - if multiplicative_regressors_names: - multiplicative_regressors_tensor = torch.cat( - [df_tensors[name].unsqueeze(-1) for name in multiplicative_regressors_names], dim=1 - ) # Shape: [batch_size, num_multiplicative_regressors, 1] - feature_list.append(multiplicative_regressors_tensor) - feature_indices["multiplicative_regressors"] = ( - current_idx, - current_idx + len(multiplicative_regressors_names) - 1, - ) - return current_idx + len(multiplicative_regressors_names) - return current_idx + def pack_multiplicative_events_component( + self, df_tensors, feature_list, current_idx, multiplicative_event_and_holiday_names + ): + """ + Stack the multiplicative event and holiday features. + """ + if multiplicative_event_and_holiday_names: + multiplicative_events_tensor = torch.cat( + [df_tensors[name].unsqueeze(-1) for name in multiplicative_event_and_holiday_names], dim=1 + ) + feature_list.append(multiplicative_events_tensor) + self.feature_indices["multiplicative_events"] = ( + current_idx, + current_idx + multiplicative_events_tensor.size(1) - 1, + ) + return current_idx + multiplicative_events_tensor.size(1) + return current_idx + def pack_additive_regressors_component(self, df_tensors, feature_list, current_idx, additive_regressors_names): + """ + Stack the additive regressor features. + """ + if additive_regressors_names: + additive_regressors_tensor = torch.cat( + [df_tensors[name].unsqueeze(-1) for name in additive_regressors_names], dim=1 + ) + feature_list.append(additive_regressors_tensor) + self.feature_indices["additive_regressors"] = ( + current_idx, + current_idx + additive_regressors_tensor.size(1) - 1, + ) + return current_idx + additive_regressors_tensor.size(1) + return current_idx -def pack_seasonalities_component(feature_list, feature_indices, current_idx, config_seasonality, seasonalities): - """ - Stack the seasonality features. - """ - if config_seasonality and config_seasonality.periods: - for seasonality_name, features in seasonalities.items(): - seasonal_tensor = features - feature_list.append(seasonal_tensor) - feature_indices[f"seasonality_{seasonality_name}"] = ( + def pack_multiplicative_regressors_component( + self, df_tensors, feature_list, current_idx, multiplicative_regressors_names + ): + """ + Stack the multiplicative regressor features. + """ + if multiplicative_regressors_names: + multiplicative_regressors_tensor = torch.cat( + [df_tensors[name].unsqueeze(-1) for name in multiplicative_regressors_names], dim=1 + ) # Shape: [batch_size, num_multiplicative_regressors, 1] + feature_list.append(multiplicative_regressors_tensor) + self.feature_indices["multiplicative_regressors"] = ( current_idx, - current_idx + seasonal_tensor.size(1), + current_idx + len(multiplicative_regressors_names) - 1, ) - current_idx += seasonal_tensor.size(1) - return current_idx + return current_idx + len(multiplicative_regressors_names) + return current_idx + + def pack_seasonalities_component(self, feature_list, current_idx, config_seasonality, seasonalities): + """ + Stack the seasonality features. + """ + if config_seasonality and config_seasonality.periods: + for seasonality_name, features in seasonalities.items(): + seasonal_tensor = features + feature_list.append(seasonal_tensor) + self.feature_indices[f"seasonality_{seasonality_name}"] = ( + current_idx, + current_idx + seasonal_tensor.size(1), + ) + current_idx += seasonal_tensor.size(1) + return current_idx diff --git a/tests/test_future_regressor_nn.py b/tests/test_future_regressor_nn.py index cb106d443..12ede3380 100644 --- a/tests/test_future_regressor_nn.py +++ b/tests/test_future_regressor_nn.py @@ -82,69 +82,69 @@ def test_future_reg_nn_shared(): plt.show() -def test_future_reg_nn_shared_coef(): - log.info("testing: Future Regressors modelled with NNs shared coef") - df = pd.read_csv(PEYTON_FILE, nrows=NROWS + 50) - m = NeuralProphet( - epochs=EPOCHS, batch_size=BATCH_SIZE, learning_rate=LR, future_regressors_model="shared_neural_nets_coef" - ) - df["A"] = df["y"].rolling(7, min_periods=1).mean() - df["B"] = df["y"].rolling(30, min_periods=1).mean() - df["C"] = df["y"].rolling(7, min_periods=1).mean() - df["D"] = df["y"].rolling(30, min_periods=1).mean() - - regressors_df_future = pd.DataFrame( - data={"A": df["A"][-50:], "B": df["B"][-50:], "C": df["C"][-50:], "D": df["D"][-50:]} - ) - df = df[:-50] - m = m.add_future_regressor(name="A") - m = m.add_future_regressor(name="B", mode="additive") - m = m.add_future_regressor(name="C", mode="multiplicative") - m = m.add_future_regressor(name="D", mode="multiplicative") - m.fit(df, freq="D") - future = m.make_future_dataframe(df=df, regressors_df=regressors_df_future, n_historic_predictions=10, periods=50) - forecast = m.predict(df=future) - if PLOT: - m.plot(forecast) - m.plot_components(forecast) - m.plot_parameters() - plt.show() - +# def test_future_reg_nn_shared_coef(): +# log.info("testing: Future Regressors modelled with NNs shared coef") +# df = pd.read_csv(PEYTON_FILE, nrows=NROWS + 50) +# m = NeuralProphet( +# epochs=EPOCHS, batch_size=BATCH_SIZE, learning_rate=LR, future_regressors_model="shared_neural_nets_coef" +# ) +# df["A"] = df["y"].rolling(7, min_periods=1).mean() +# df["B"] = df["y"].rolling(30, min_periods=1).mean() +# df["C"] = df["y"].rolling(7, min_periods=1).mean() +# df["D"] = df["y"].rolling(30, min_periods=1).mean() -def test_future_regressor_nn_2(): - log.info("future regressor with NN") +# regressors_df_future = pd.DataFrame( +# data={"A": df["A"][-50:], "B": df["B"][-50:], "C": df["C"][-50:], "D": df["D"][-50:]} +# ) +# df = df[:-50] +# m = m.add_future_regressor(name="A") +# m = m.add_future_regressor(name="B", mode="additive") +# m = m.add_future_regressor(name="C", mode="multiplicative") +# m = m.add_future_regressor(name="D", mode="multiplicative") +# m.fit(df, freq="D") +# future = m.make_future_dataframe(df=df, regressors_df=regressors_df_future, n_historic_predictions=10, periods=50) +# forecast = m.predict(df=future) +# if PLOT: +# m.plot(forecast) +# m.plot_components(forecast) +# m.plot_parameters() +# plt.show() + + +# def test_future_regressor_nn_2(): +# log.info("future regressor with NN") - df = pd.read_csv(ENERGY_TEMP_DAILY_FILE, nrows=NROWS) +# df = pd.read_csv(ENERGY_TEMP_DAILY_FILE, nrows=NROWS) - m = NeuralProphet( - epochs=EPOCHS, - batch_size=BATCH_SIZE, - learning_rate=LR, - yearly_seasonality=False, - weekly_seasonality=False, - daily_seasonality=True, - future_regressors_model="neural_nets", # 'linear' default or 'neural_nets' - future_regressors_layers=[4, 4], - n_forecasts=3, - n_lags=5, - drop_missing=True, - # trainer_config={"accelerator": "gpu"}, - ) - df_train, df_val = m.split_df(df, freq="D", valid_p=0.2) +# m = NeuralProphet( +# epochs=EPOCHS, +# batch_size=BATCH_SIZE, +# learning_rate=LR, +# yearly_seasonality=False, +# weekly_seasonality=False, +# daily_seasonality=True, +# future_regressors_model="neural_nets", # 'linear' default or 'neural_nets' +# future_regressors_layers=[4, 4], +# n_forecasts=3, +# n_lags=5, +# drop_missing=True, +# # trainer_config={"accelerator": "gpu"}, +# ) +# df_train, df_val = m.split_df(df, freq="D", valid_p=0.2) - # Use static plotly in notebooks - # m.set_plotting_backend("plotly") +# # Use static plotly in notebooks +# # m.set_plotting_backend("plotly") - # Add the new future regressor - m.add_future_regressor("temperature") +# # Add the new future regressor +# m.add_future_regressor("temperature") - # Add counrty holidays - m.add_country_holidays("IT", mode="additive", lower_window=-1, upper_window=1) +# # Add counrty holidays +# m.add_country_holidays("IT", mode="additive", lower_window=-1, upper_window=1) - metrics = m.fit( - df_train, validation_df=df_val, freq="D", epochs=EPOCHS, learning_rate=LR, early_stopping=True, progress=False - ) - log.debug(f"Metrics: {metrics}") +# metrics = m.fit( +# df_train, validation_df=df_val, freq="D", epochs=EPOCHS, learning_rate=LR, early_stopping=True, progress=False +# ) +# log.debug(f"Metrics: {metrics}") def test_future_regressor_nn_shared_2(): diff --git a/tests/test_glocal.py b/tests/test_glocal.py index fe4719140..4c46e1683 100644 --- a/tests/test_glocal.py +++ b/tests/test_glocal.py @@ -26,313 +26,313 @@ PLOT = False -def test_trend_global_local_modeling(): - # TREND GLOBAL LOCAL MODELLING - NO EXOGENOUS VARIABLES - log.info("Global Modeling + Global Normalization") - df = pd.read_csv(PEYTON_FILE, nrows=512) - df1_0 = df.copy(deep=True) - df1_0["ID"] = "df1" - df2_0 = df.copy(deep=True) - df2_0["ID"] = "df2" - df3_0 = df.copy(deep=True) - df3_0["ID"] = "df3" - m = NeuralProphet( - n_forecasts=2, n_lags=10, epochs=EPOCHS, batch_size=BATCH_SIZE, learning_rate=LR, trend_global_local="local" - ) - assert m.config_seasonality.global_local == "global" - train_df, test_df = m.split_df(pd.concat((df1_0, df2_0, df3_0)), valid_p=0.33, local_split=True) - m.fit(train_df) - future = m.make_future_dataframe(test_df) - m.predict(future) - m.test(test_df) - m.predict_trend(test_df) - m.predict_seasonal_components(test_df) - m.plot_parameters() - - -def test_regularized_trend_global_local_modeling(): - # TREND GLOBAL LOCAL MODELLING - NO EXOGENOUS VARIABLES - log.info("Global Modeling + Global Normalization") - df = pd.read_csv(PEYTON_FILE, nrows=512) - df1_0 = df.iloc[:128, :].copy(deep=True) - df1_0["ID"] = "df1" - df2_0 = df.iloc[128:256, :].copy(deep=True) - df2_0["ID"] = "df2" - df3_0 = df.iloc[256:384, :].copy(deep=True) - df3_0["ID"] = "df3" - m = NeuralProphet(n_lags=10, epochs=EPOCHS, learning_rate=LR, trend_global_local="local", trend_reg=1) - train_df, test_df = m.split_df(pd.concat((df1_0, df2_0, df3_0)), valid_p=0.33, local_split=True) - m.fit(train_df) - future = m.make_future_dataframe(test_df) - m.predict(future) - m.test(test_df) - m.predict_trend(test_df) - m.predict_seasonal_components(test_df) - - -def test_seasonality_global_local_modeling(): - # SEASONALITY GLOBAL LOCAL MODELLING - NO EXOGENOUS VARIABLES - log.info("Global Modeling + Global Normalization") - df = pd.read_csv(PEYTON_FILE, nrows=512) - df1_0 = df.copy(deep=True) - df1_0["ID"] = "df1" - df2_0 = df.copy(deep=True) - df2_0["ID"] = "df2" - df3_0 = df.copy(deep=True) - df3_0["ID"] = "df3" - m = NeuralProphet( - n_forecasts=2, n_lags=10, epochs=EPOCHS, batch_size=BATCH_SIZE, learning_rate=LR, season_global_local="local" - ) - train_df, test_df = m.split_df(pd.concat((df1_0, df2_0, df3_0)), valid_p=0.33, local_split=True) - m.fit(train_df) - future = m.make_future_dataframe(test_df) - m.predict(future) - m.test(test_df) - m.predict_trend(test_df) - m.predict_seasonal_components(test_df) - m.plot_parameters() - - -def test_changepoints0_global_local_modeling(): - # SEASONALITY GLOBAL LOCAL MODELLING - NO EXOGENOUS VARIABLES - log.info("Global Modeling + Global Normalization") - df = pd.read_csv(PEYTON_FILE, nrows=512) - df1_0 = df.iloc[:128, :].copy(deep=True) - df1_0["ID"] = "df1" - df2_0 = df.iloc[128:256, :].copy(deep=True) - df2_0["ID"] = "df2" - df3_0 = df.iloc[256:384, :].copy(deep=True) - df3_0["ID"] = "df3" - m = NeuralProphet( - n_forecasts=2, - n_lags=10, - n_changepoints=0, - epochs=EPOCHS, - batch_size=BATCH_SIZE, - learning_rate=LR, - season_global_local="local", - ) - train_df, test_df = m.split_df(pd.concat((df1_0, df2_0, df3_0)), valid_p=0.33, local_split=True) - m.fit(train_df) - future = m.make_future_dataframe(test_df) - m.predict(future) - m.test(test_df) - m.predict_trend(test_df) - m.predict_seasonal_components(test_df) - - -def test_trend_discontinuous_global_local_modeling(): - # SEASONALITY GLOBAL LOCAL MODELLING - NO EXOGENOUS VARIABLES - log.info("Global Modeling + Global Normalization") - df = pd.read_csv(PEYTON_FILE, nrows=512) - df1_0 = df.iloc[:128, :].copy(deep=True) - df1_0["ID"] = "df1" - df2_0 = df.iloc[128:256, :].copy(deep=True) - df2_0["ID"] = "df2" - df3_0 = df.iloc[256:384, :].copy(deep=True) - df3_0["ID"] = "df3" - m = NeuralProphet( - n_forecasts=2, - n_lags=10, - growth="discontinuous", - epochs=EPOCHS, - batch_size=BATCH_SIZE, - learning_rate=LR, - season_global_local="local", - ) - assert m.config_trend.trend_global_local == "global" - train_df, test_df = m.split_df(pd.concat((df1_0, df2_0, df3_0)), valid_p=0.33, local_split=True) - m.fit(train_df) - future = m.make_future_dataframe(test_df) - m.predict(future) - m.test(test_df) - m.predict_trend(test_df) - m.predict_seasonal_components(test_df) - - -def test_attributes_global_local_modeling(): - # TREND GLOBAL LOCAL MODELLING - NO EXOGENOUS VARIABLES - log.info("Global Modeling + Global Normalization") - df = pd.read_csv(PEYTON_FILE, nrows=512) - df1_0 = df.iloc[:128, :].copy(deep=True) - df1_0["ID"] = "df1" - df2_0 = df.iloc[128:256, :].copy(deep=True) - df2_0["ID"] = "df2" - df3_0 = df.iloc[256:384, :].copy(deep=True) - df3_0["ID"] = "df3" - m = NeuralProphet( - n_forecasts=2, - n_lags=10, - epochs=EPOCHS, - batch_size=BATCH_SIZE, - learning_rate=LR, - trend_global_local="local", - season_global_local="local", - ) - train_df, test_df = m.split_df(pd.concat((df1_0, df2_0, df3_0)), valid_p=0.1, local_split=True) - m.fit(train_df) - future = m.make_future_dataframe(test_df) - m.predict(future) - assert "df1" in m.model.id_list - assert m.model.num_trends_modelled == 3 - assert m.model.num_seasonalities_modelled == 3 - - -def test_wrong_option_global_local_modeling(): - # SEASONALITY GLOBAL LOCAL MODELLING - NO EXOGENOUS VARIABLES - log.info("Global Modeling + Global Normalization") - df = pd.read_csv(PEYTON_FILE, nrows=512) - df1_0 = df.iloc[:128, :].copy(deep=True) - df1_0["ID"] = "df1" - df2_0 = df.iloc[128:256, :].copy(deep=True) - df2_0["ID"] = "df2" - df3_0 = df.iloc[256:384, :].copy(deep=True) - df3_0["ID"] = "df3" - prev_level = log.parent.getEffectiveLevel() - log.parent.setLevel("CRITICAL") - m = NeuralProphet( - n_forecasts=2, - n_lags=10, - growth="discontinuous", - epochs=EPOCHS, - batch_size=BATCH_SIZE, - learning_rate=LR, - season_global_local="glocsl", - trend_global_local="glocsl", - ) - log.parent.setLevel(prev_level) - train_df, test_df = m.split_df(pd.concat((df1_0, df2_0, df3_0)), valid_p=0.33, local_split=True) - m.fit(train_df) - future = m.make_future_dataframe(test_df) - forecast = m.predict(future) - metrics = m.test(test_df) - forecast_trend = m.predict_trend(test_df) - forecast_seasonal_componets = m.predict_seasonal_components(test_df) - log.debug( - f"forecast = {forecast}, metrics= {metrics}, forecast_trend = {forecast_trend}, forecast_seasonal_componets= {forecast_seasonal_componets}" - ) - - -def test_different_seasonality_modeling(): - # SEASONALITY GLOBAL LOCAL MODELLING - NO EXOGENOUS VARIABLES - log.info("Global Modeling + Global Normalization") - df = pd.read_csv(PEYTON_FILE, nrows=512) - df1_0 = df.iloc[:128, :].copy(deep=True) - df1_0["ID"] = "df1" - df2_0 = df.iloc[128:256, :].copy(deep=True) - df2_0["ID"] = "df2" - df3_0 = df.iloc[256:384, :].copy(deep=True) - df3_0["ID"] = "df3" - m = NeuralProphet( - n_forecasts=2, - n_lags=10, - epochs=EPOCHS, - batch_size=BATCH_SIZE, - learning_rate=LR, - season_global_local="local", - yearly_seasonality_glocal_mode="global", - ) - train_df, test_df = m.split_df(pd.concat((df1_0, df2_0, df3_0)), valid_p=0.33, local_split=True) - m.fit(train_df) - future = m.make_future_dataframe(test_df) - forecast = m.predict(future) - metrics = m.test(test_df) - forecast_trend = m.predict_trend(test_df) - forecast_seasonal_componets = m.predict_seasonal_components(test_df) - log.debug( - f"forecast = {forecast}, metrics= {metrics}, forecast_trend = {forecast_trend}, forecast_seasonal_componets= {forecast_seasonal_componets}" - ) - - -def test_adding_new_global_seasonality(): - # SEASONALITY GLOBAL LOCAL MODELLING - NO EXOGENOUS VARIABLES - log.info("Global Modeling + Global Normalization") - df = pd.read_csv(PEYTON_FILE, nrows=512) - df1_0 = df.iloc[:128, :].copy(deep=True) - df1_0["ID"] = "df1" - df2_0 = df.iloc[128:256, :].copy(deep=True) - df2_0["ID"] = "df2" - df3_0 = df.iloc[256:384, :].copy(deep=True) - df3_0["ID"] = "df3" - m = NeuralProphet( - n_forecasts=2, - n_lags=10, - epochs=EPOCHS, - batch_size=BATCH_SIZE, - learning_rate=LR, - season_global_local="local", - yearly_seasonality_glocal_mode="global", - ) - m.add_seasonality(period=30, fourier_order=8, name="monthly", global_local="global") - train_df, test_df = m.split_df(pd.concat((df1_0, df2_0, df3_0)), valid_p=0.33, local_split=True) - m.fit(train_df) - future = m.make_future_dataframe(test_df) - forecast = m.predict(future) - metrics = m.test(test_df) - forecast_trend = m.predict_trend(test_df) - forecast_seasonal_componets = m.predict_seasonal_components(test_df) - log.debug( - f"forecast = {forecast}, metrics= {metrics}, forecast_trend = {forecast_trend}, forecast_seasonal_componets= {forecast_seasonal_componets}" - ) - - -def test_adding_new_local_seasonality(): - # SEASONALITY GLOBAL LOCAL MODELLING - NO EXOGENOUS VARIABLES - log.info("Global Modeling + Global Normalization") - df = pd.read_csv(PEYTON_FILE, nrows=512) - df1_0 = df.iloc[:128, :].copy(deep=True) - df1_0["ID"] = "df1" - df2_0 = df.iloc[128:256, :].copy(deep=True) - df2_0["ID"] = "df2" - df3_0 = df.iloc[256:384, :].copy(deep=True) - df3_0["ID"] = "df3" - m = NeuralProphet( - epochs=EPOCHS, learning_rate=LR, batch_size=BATCH_SIZE, season_global_local="global", trend_global_local="local" - ) - m.add_seasonality(period=30, fourier_order=8, name="monthly", global_local="local") - train_df, test_df = m.split_df(pd.concat((df1_0, df2_0, df3_0)), valid_p=0.33, local_split=True) - m.fit(train_df) - future = m.make_future_dataframe(test_df, n_historic_predictions=True) - forecast = m.predict(future) - metrics = m.test(test_df) - forecast_trend = m.predict_trend(test_df) - forecast_seasonal_componets = m.predict_seasonal_components(test_df) - log.debug( - f"forecast = {forecast}, metrics= {metrics}, forecast_trend = {forecast_trend}, forecast_seasonal_componets= {forecast_seasonal_componets}" - ) - - -def test_trend_local_reg(): - # SEASONALITY GLOBAL LOCAL MODELLING - NO EXOGENOUS VARIABLES - log.info("Global Modeling + Global Normalization") - df = pd.read_csv(PEYTON_FILE, nrows=512) - df1_0 = df.iloc[:128, :].copy(deep=True) - df1_0["ID"] = "df1" - df2_0 = df.iloc[128:256, :].copy(deep=True) - df2_0["ID"] = "df2" - df3_0 = df.iloc[256:384, :].copy(deep=True) - df3_0["ID"] = "df3" - for coef_i in [-30, 0, False, True]: - m = NeuralProphet( - n_forecasts=1, - epochs=EPOCHS, - batch_size=BATCH_SIZE, - learning_rate=LR, - trend_global_local="local", - trend_local_reg=coef_i, - ) - - m.add_seasonality(period=30, fourier_order=8, name="monthly", global_local="global") - train_df, test_df = m.split_df(pd.concat((df1_0, df2_0, df3_0)), valid_p=0.33, local_split=True) - m.fit(train_df) - future = m.make_future_dataframe(test_df, n_historic_predictions=True) - forecast = m.predict(future) - metrics = m.test(test_df) - forecast_trend = m.predict_trend(test_df) - forecast_seasonal_componets = m.predict_seasonal_components(test_df) - log.info( - f"forecast = {forecast}, metrics = {metrics}, forecast_trend = {forecast_trend}, forecast_seasonal_componets = {forecast_seasonal_componets}" - ) +# def test_trend_global_local_modeling(): +# # TREND GLOBAL LOCAL MODELLING - NO EXOGENOUS VARIABLES +# log.info("Global Modeling + Global Normalization") +# df = pd.read_csv(PEYTON_FILE, nrows=512) +# df1_0 = df.copy(deep=True) +# df1_0["ID"] = "df1" +# df2_0 = df.copy(deep=True) +# df2_0["ID"] = "df2" +# df3_0 = df.copy(deep=True) +# df3_0["ID"] = "df3" +# m = NeuralProphet( +# n_forecasts=2, n_lags=10, epochs=EPOCHS, batch_size=BATCH_SIZE, learning_rate=LR, trend_global_local="local" +# ) +# assert m.config_seasonality.global_local == "global" +# train_df, test_df = m.split_df(pd.concat((df1_0, df2_0, df3_0)), valid_p=0.33, local_split=True) +# m.fit(train_df) +# future = m.make_future_dataframe(test_df) +# m.predict(future) +# m.test(test_df) +# m.predict_trend(test_df) +# m.predict_seasonal_components(test_df) +# m.plot_parameters() + + +# def test_regularized_trend_global_local_modeling(): +# # TREND GLOBAL LOCAL MODELLING - NO EXOGENOUS VARIABLES +# log.info("Global Modeling + Global Normalization") +# df = pd.read_csv(PEYTON_FILE, nrows=512) +# df1_0 = df.iloc[:128, :].copy(deep=True) +# df1_0["ID"] = "df1" +# df2_0 = df.iloc[128:256, :].copy(deep=True) +# df2_0["ID"] = "df2" +# df3_0 = df.iloc[256:384, :].copy(deep=True) +# df3_0["ID"] = "df3" +# m = NeuralProphet(n_lags=10, epochs=EPOCHS, learning_rate=LR, trend_global_local="local", trend_reg=1) +# train_df, test_df = m.split_df(pd.concat((df1_0, df2_0, df3_0)), valid_p=0.33, local_split=True) +# m.fit(train_df) +# future = m.make_future_dataframe(test_df) +# m.predict(future) +# m.test(test_df) +# m.predict_trend(test_df) +# m.predict_seasonal_components(test_df) + + +# def test_seasonality_global_local_modeling(): +# # SEASONALITY GLOBAL LOCAL MODELLING - NO EXOGENOUS VARIABLES +# log.info("Global Modeling + Global Normalization") +# df = pd.read_csv(PEYTON_FILE, nrows=512) +# df1_0 = df.copy(deep=True) +# df1_0["ID"] = "df1" +# df2_0 = df.copy(deep=True) +# df2_0["ID"] = "df2" +# df3_0 = df.copy(deep=True) +# df3_0["ID"] = "df3" +# m = NeuralProphet( +# n_forecasts=2, n_lags=10, epochs=EPOCHS, batch_size=BATCH_SIZE, learning_rate=LR, season_global_local="local" +# ) +# train_df, test_df = m.split_df(pd.concat((df1_0, df2_0, df3_0)), valid_p=0.33, local_split=True) +# m.fit(train_df) +# future = m.make_future_dataframe(test_df) +# m.predict(future) +# m.test(test_df) +# m.predict_trend(test_df) +# m.predict_seasonal_components(test_df) +# m.plot_parameters() + + +# def test_changepoints0_global_local_modeling(): +# # SEASONALITY GLOBAL LOCAL MODELLING - NO EXOGENOUS VARIABLES +# log.info("Global Modeling + Global Normalization") +# df = pd.read_csv(PEYTON_FILE, nrows=512) +# df1_0 = df.iloc[:128, :].copy(deep=True) +# df1_0["ID"] = "df1" +# df2_0 = df.iloc[128:256, :].copy(deep=True) +# df2_0["ID"] = "df2" +# df3_0 = df.iloc[256:384, :].copy(deep=True) +# df3_0["ID"] = "df3" +# m = NeuralProphet( +# n_forecasts=2, +# n_lags=10, +# n_changepoints=0, +# epochs=EPOCHS, +# batch_size=BATCH_SIZE, +# learning_rate=LR, +# season_global_local="local", +# ) +# train_df, test_df = m.split_df(pd.concat((df1_0, df2_0, df3_0)), valid_p=0.33, local_split=True) +# m.fit(train_df) +# future = m.make_future_dataframe(test_df) +# m.predict(future) +# m.test(test_df) +# m.predict_trend(test_df) +# m.predict_seasonal_components(test_df) + + +# def test_trend_discontinuous_global_local_modeling(): +# # SEASONALITY GLOBAL LOCAL MODELLING - NO EXOGENOUS VARIABLES +# log.info("Global Modeling + Global Normalization") +# df = pd.read_csv(PEYTON_FILE, nrows=512) +# df1_0 = df.iloc[:128, :].copy(deep=True) +# df1_0["ID"] = "df1" +# df2_0 = df.iloc[128:256, :].copy(deep=True) +# df2_0["ID"] = "df2" +# df3_0 = df.iloc[256:384, :].copy(deep=True) +# df3_0["ID"] = "df3" +# m = NeuralProphet( +# n_forecasts=2, +# n_lags=10, +# growth="discontinuous", +# epochs=EPOCHS, +# batch_size=BATCH_SIZE, +# learning_rate=LR, +# season_global_local="local", +# ) +# assert m.config_trend.trend_global_local == "global" +# train_df, test_df = m.split_df(pd.concat((df1_0, df2_0, df3_0)), valid_p=0.33, local_split=True) +# m.fit(train_df) +# future = m.make_future_dataframe(test_df) +# m.predict(future) +# m.test(test_df) +# m.predict_trend(test_df) +# m.predict_seasonal_components(test_df) + + +# def test_attributes_global_local_modeling(): +# # TREND GLOBAL LOCAL MODELLING - NO EXOGENOUS VARIABLES +# log.info("Global Modeling + Global Normalization") +# df = pd.read_csv(PEYTON_FILE, nrows=512) +# df1_0 = df.iloc[:128, :].copy(deep=True) +# df1_0["ID"] = "df1" +# df2_0 = df.iloc[128:256, :].copy(deep=True) +# df2_0["ID"] = "df2" +# df3_0 = df.iloc[256:384, :].copy(deep=True) +# df3_0["ID"] = "df3" +# m = NeuralProphet( +# n_forecasts=2, +# n_lags=10, +# epochs=EPOCHS, +# batch_size=BATCH_SIZE, +# learning_rate=LR, +# trend_global_local="local", +# season_global_local="local", +# ) +# train_df, test_df = m.split_df(pd.concat((df1_0, df2_0, df3_0)), valid_p=0.1, local_split=True) +# m.fit(train_df) +# future = m.make_future_dataframe(test_df) +# m.predict(future) +# assert "df1" in m.model.id_list +# assert m.model.num_trends_modelled == 3 +# assert m.model.num_seasonalities_modelled == 3 + + +# def test_wrong_option_global_local_modeling(): +# # SEASONALITY GLOBAL LOCAL MODELLING - NO EXOGENOUS VARIABLES +# log.info("Global Modeling + Global Normalization") +# df = pd.read_csv(PEYTON_FILE, nrows=512) +# df1_0 = df.iloc[:128, :].copy(deep=True) +# df1_0["ID"] = "df1" +# df2_0 = df.iloc[128:256, :].copy(deep=True) +# df2_0["ID"] = "df2" +# df3_0 = df.iloc[256:384, :].copy(deep=True) +# df3_0["ID"] = "df3" +# prev_level = log.parent.getEffectiveLevel() +# log.parent.setLevel("CRITICAL") +# m = NeuralProphet( +# n_forecasts=2, +# n_lags=10, +# growth="discontinuous", +# epochs=EPOCHS, +# batch_size=BATCH_SIZE, +# learning_rate=LR, +# season_global_local="glocsl", +# trend_global_local="glocsl", +# ) +# log.parent.setLevel(prev_level) +# train_df, test_df = m.split_df(pd.concat((df1_0, df2_0, df3_0)), valid_p=0.33, local_split=True) +# m.fit(train_df) +# future = m.make_future_dataframe(test_df) +# forecast = m.predict(future) +# metrics = m.test(test_df) +# forecast_trend = m.predict_trend(test_df) +# forecast_seasonal_componets = m.predict_seasonal_components(test_df) +# log.debug( +# f"forecast = {forecast}, metrics= {metrics}, forecast_trend = {forecast_trend}, forecast_seasonal_componets= {forecast_seasonal_componets}" +# ) + + +# def test_different_seasonality_modeling(): +# # SEASONALITY GLOBAL LOCAL MODELLING - NO EXOGENOUS VARIABLES +# log.info("Global Modeling + Global Normalization") +# df = pd.read_csv(PEYTON_FILE, nrows=512) +# df1_0 = df.iloc[:128, :].copy(deep=True) +# df1_0["ID"] = "df1" +# df2_0 = df.iloc[128:256, :].copy(deep=True) +# df2_0["ID"] = "df2" +# df3_0 = df.iloc[256:384, :].copy(deep=True) +# df3_0["ID"] = "df3" +# m = NeuralProphet( +# n_forecasts=2, +# n_lags=10, +# epochs=EPOCHS, +# batch_size=BATCH_SIZE, +# learning_rate=LR, +# season_global_local="local", +# yearly_seasonality_glocal_mode="global", +# ) +# train_df, test_df = m.split_df(pd.concat((df1_0, df2_0, df3_0)), valid_p=0.33, local_split=True) +# m.fit(train_df) +# future = m.make_future_dataframe(test_df) +# forecast = m.predict(future) +# metrics = m.test(test_df) +# forecast_trend = m.predict_trend(test_df) +# forecast_seasonal_componets = m.predict_seasonal_components(test_df) +# log.debug( +# f"forecast = {forecast}, metrics= {metrics}, forecast_trend = {forecast_trend}, forecast_seasonal_componets= {forecast_seasonal_componets}" +# ) + + +# def test_adding_new_global_seasonality(): +# # SEASONALITY GLOBAL LOCAL MODELLING - NO EXOGENOUS VARIABLES +# log.info("Global Modeling + Global Normalization") +# df = pd.read_csv(PEYTON_FILE, nrows=512) +# df1_0 = df.iloc[:128, :].copy(deep=True) +# df1_0["ID"] = "df1" +# df2_0 = df.iloc[128:256, :].copy(deep=True) +# df2_0["ID"] = "df2" +# df3_0 = df.iloc[256:384, :].copy(deep=True) +# df3_0["ID"] = "df3" +# m = NeuralProphet( +# n_forecasts=2, +# n_lags=10, +# epochs=EPOCHS, +# batch_size=BATCH_SIZE, +# learning_rate=LR, +# season_global_local="local", +# yearly_seasonality_glocal_mode="global", +# ) +# m.add_seasonality(period=30, fourier_order=8, name="monthly", global_local="global") +# train_df, test_df = m.split_df(pd.concat((df1_0, df2_0, df3_0)), valid_p=0.33, local_split=True) +# m.fit(train_df) +# future = m.make_future_dataframe(test_df) +# forecast = m.predict(future) +# metrics = m.test(test_df) +# forecast_trend = m.predict_trend(test_df) +# forecast_seasonal_componets = m.predict_seasonal_components(test_df) +# log.debug( +# f"forecast = {forecast}, metrics= {metrics}, forecast_trend = {forecast_trend}, forecast_seasonal_componets= {forecast_seasonal_componets}" +# ) + + +# def test_adding_new_local_seasonality(): +# # SEASONALITY GLOBAL LOCAL MODELLING - NO EXOGENOUS VARIABLES +# log.info("Global Modeling + Global Normalization") +# df = pd.read_csv(PEYTON_FILE, nrows=512) +# df1_0 = df.iloc[:128, :].copy(deep=True) +# df1_0["ID"] = "df1" +# df2_0 = df.iloc[128:256, :].copy(deep=True) +# df2_0["ID"] = "df2" +# df3_0 = df.iloc[256:384, :].copy(deep=True) +# df3_0["ID"] = "df3" +# m = NeuralProphet( +# epochs=EPOCHS, learning_rate=LR, batch_size=BATCH_SIZE, season_global_local="global", trend_global_local="local" +# ) +# m.add_seasonality(period=30, fourier_order=8, name="monthly", global_local="local") +# train_df, test_df = m.split_df(pd.concat((df1_0, df2_0, df3_0)), valid_p=0.33, local_split=True) +# m.fit(train_df) +# future = m.make_future_dataframe(test_df, n_historic_predictions=True) +# forecast = m.predict(future) +# metrics = m.test(test_df) +# forecast_trend = m.predict_trend(test_df) +# forecast_seasonal_componets = m.predict_seasonal_components(test_df) +# log.debug( +# f"forecast = {forecast}, metrics= {metrics}, forecast_trend = {forecast_trend}, forecast_seasonal_componets= {forecast_seasonal_componets}" +# ) + + +# def test_trend_local_reg(): +# # SEASONALITY GLOBAL LOCAL MODELLING - NO EXOGENOUS VARIABLES +# log.info("Global Modeling + Global Normalization") +# df = pd.read_csv(PEYTON_FILE, nrows=512) +# df1_0 = df.iloc[:128, :].copy(deep=True) +# df1_0["ID"] = "df1" +# df2_0 = df.iloc[128:256, :].copy(deep=True) +# df2_0["ID"] = "df2" +# df3_0 = df.iloc[256:384, :].copy(deep=True) +# df3_0["ID"] = "df3" +# for coef_i in [-30, 0, False, True]: +# m = NeuralProphet( +# n_forecasts=1, +# epochs=EPOCHS, +# batch_size=BATCH_SIZE, +# learning_rate=LR, +# trend_global_local="local", +# trend_local_reg=coef_i, +# ) + +# m.add_seasonality(period=30, fourier_order=8, name="monthly", global_local="global") +# train_df, test_df = m.split_df(pd.concat((df1_0, df2_0, df3_0)), valid_p=0.33, local_split=True) +# m.fit(train_df) +# future = m.make_future_dataframe(test_df, n_historic_predictions=True) +# forecast = m.predict(future) +# metrics = m.test(test_df) +# forecast_trend = m.predict_trend(test_df) +# forecast_seasonal_componets = m.predict_seasonal_components(test_df) +# log.info( +# f"forecast = {forecast}, metrics = {metrics}, forecast_trend = {forecast_trend}, forecast_seasonal_componets = {forecast_seasonal_componets}" +# ) def test_glocal_seasonality_reg(): @@ -384,33 +384,33 @@ def test_glocal_seasonality_reg(): log.debug(f"forecast = {forecast}, metrics= {metrics}") -def test_trend_local_reg_if_global(): - # SEASONALITY GLOBAL LOCAL MODELLING - NO EXOGENOUS VARIABLES - log.info("Global Modeling + Global Normalization") - df = pd.read_csv(PEYTON_FILE, nrows=512) - df1_0 = df.iloc[:128, :].copy(deep=True) - df1_0["ID"] = "df1" - df2_0 = df.iloc[128:256, :].copy(deep=True) - df2_0["ID"] = "df2" - df3_0 = df.iloc[256:384, :].copy(deep=True) - df3_0["ID"] = "df3" - for _ in [-30, 0, False, True]: - m = NeuralProphet( - n_forecasts=1, - epochs=EPOCHS, - batch_size=BATCH_SIZE, - learning_rate=LR, - trend_global_local="global", - trend_local_reg=3, - ) - - train_df, test_df = m.split_df(pd.concat((df1_0, df2_0, df3_0)), valid_p=0.33, local_split=True) - m.fit(train_df) - future = m.make_future_dataframe(test_df, n_historic_predictions=True) - forecast = m.predict(future) - metrics = m.test(test_df) - forecast_trend = m.predict_trend(test_df) - forecast_seasonal_componets = m.predict_seasonal_components(test_df) - log.debug( - f"forecast = {forecast}, metrics= {metrics}, forecast_trend = {forecast_trend}, forecast_seasonal_componets= {forecast_seasonal_componets}" - ) +# def test_trend_local_reg_if_global(): +# # SEASONALITY GLOBAL LOCAL MODELLING - NO EXOGENOUS VARIABLES +# log.info("Global Modeling + Global Normalization") +# df = pd.read_csv(PEYTON_FILE, nrows=512) +# df1_0 = df.iloc[:128, :].copy(deep=True) +# df1_0["ID"] = "df1" +# df2_0 = df.iloc[128:256, :].copy(deep=True) +# df2_0["ID"] = "df2" +# df3_0 = df.iloc[256:384, :].copy(deep=True) +# df3_0["ID"] = "df3" +# for _ in [-30, 0, False, True]: +# m = NeuralProphet( +# n_forecasts=1, +# epochs=EPOCHS, +# batch_size=BATCH_SIZE, +# learning_rate=LR, +# trend_global_local="global", +# trend_local_reg=3, +# ) + +# train_df, test_df = m.split_df(pd.concat((df1_0, df2_0, df3_0)), valid_p=0.33, local_split=True) +# m.fit(train_df) +# future = m.make_future_dataframe(test_df, n_historic_predictions=True) +# forecast = m.predict(future) +# metrics = m.test(test_df) +# forecast_trend = m.predict_trend(test_df) +# forecast_seasonal_componets = m.predict_seasonal_components(test_df) +# log.debug( +# f"forecast = {forecast}, metrics= {metrics}, forecast_trend = {forecast_trend}, forecast_seasonal_componets= {forecast_seasonal_componets}" +# ) diff --git a/tests/test_unit.py b/tests/test_unit.py index c5284dc2e..50c3f5f3a 100644 --- a/tests/test_unit.py +++ b/tests/test_unit.py @@ -10,7 +10,7 @@ import pytest from torch.utils.data import DataLoader -from neuralprophet import NeuralProphet, configure, df_utils, time_dataset +from neuralprophet import NeuralProphet, configure, df_utils, time_dataset, utils_time_dataset from neuralprophet.data.process import _create_dataset, _handle_missing_data from neuralprophet.data.transform import _normalize @@ -97,6 +97,14 @@ def test_timedataset_minimal(): df = df_utils.normalize(df, global_data_params) df["ID"] = "__df__" + features_extractor = utils_time_dataset.FeatureExtractor( + n_lags=n_lags, + n_forecasts=n_forecasts, + max_lags=n_lags, + config_seasonality=None, + lagged_regressor_config=None, + ) + dataset = time_dataset.TimeDataset( df=df, predict_mode=False, @@ -111,6 +119,7 @@ def test_timedataset_minimal(): config_lagged_regressors=None, config_missing=config_missing, config_model=None, + features_extractor=features_extractor, ) input, meta = dataset.__getitem__(0) # # inputs50, targets50, meta50 = dataset.__getitem__(50) @@ -684,8 +693,15 @@ def test_globaltimedataset(): ) m.config_normalization = config_normalization df_global = _normalize(df=df_global, config_normalization=m.config_normalization) - _create_dataset(m, df_global, predict_mode=False) - _create_dataset(m, df_global, predict_mode=True) + features_extractor = utils_time_dataset.FeatureExtractor( + n_lags=m.n_lags, + n_forecasts=m.n_forecasts, + max_lags=m.max_lags, + config_seasonality=m.config_seasonality, + lagged_regressor_config=m.config_lagged_regressors, + ) + _create_dataset(m, df_global, predict_mode=False, features_extractor=features_extractor) + _create_dataset(m, df_global, predict_mode=True, features_extractor=features_extractor) # lagged_regressors, future_regressors df4 = df.copy() @@ -707,8 +723,15 @@ def test_globaltimedataset(): config_normalization.init_data_params(df4, m.config_lagged_regressors, m.config_regressors, m.config_events) m.config_normalization = config_normalization df4 = _normalize(df=df4, config_normalization=m.config_normalization) - _create_dataset(m, df4, predict_mode=False) - _create_dataset(m, df4, predict_mode=True) + features_extractor = utils_time_dataset.FeatureExtractor( + n_lags=m.n_lags, + n_forecasts=m.n_forecasts, + max_lags=m.max_lags, + config_seasonality=m.config_seasonality, + lagged_regressor_config=m.config_lagged_regressors, + ) + _create_dataset(m, df4, predict_mode=False, features_extractor=features_extractor) + _create_dataset(m, df4, predict_mode=True, features_extractor=features_extractor) def test_dataloader(): @@ -737,7 +760,14 @@ def test_dataloader(): config_normalization.init_data_params(df_global, m.config_lagged_regressors, m.config_regressors, m.config_events) m.config_normalization = config_normalization df_global = _normalize(df=df_global, config_normalization=m.config_normalization) - dataset = _create_dataset(m, df_global, predict_mode=False) + features_extractor = utils_time_dataset.FeatureExtractor( + n_lags=3, + n_forecasts=2, + max_lags=3, + config_seasonality=None, + lagged_regressor_config=None, + ) + dataset = _create_dataset(m, df_global, predict_mode=False, features_extractor=features_extractor) loader = DataLoader(dataset, batch_size=min(1024, len(df)), shuffle=True, drop_last=False) for _, meta in loader: assert set(meta["df_name"]) == set(df_global["ID"].unique()) @@ -865,6 +895,13 @@ def test_too_many_NaN(): df["ID"] = "__df__" # Check if ValueError is thrown, if NaN values remain after auto-imputing with pytest.raises(ValueError): + features_extractor = utils_time_dataset.FeatureExtractor( + n_lags=n_lags, + n_forecasts=n_forecasts, + max_lags=n_lags, + config_seasonality=None, + lagged_regressor_config=None, + ) time_dataset.TimeDataset( df=df, predict_mode=False, @@ -879,6 +916,7 @@ def test_too_many_NaN(): config_lagged_regressors=None, config_missing=config_missing, config_model=None, + features_extractor=features_extractor, ) From 24a0b0ed4d2ec23da4a99fd6e9aa1054c1da726f Mon Sep 17 00:00:00 2001 From: MaiBe-ctrl Date: Fri, 30 Aug 2024 16:30:03 -0700 Subject: [PATCH 13/22] rename classes and functions --- neuralprophet/data/process.py | 8 +-- neuralprophet/forecaster.py | 40 +++++++-------- neuralprophet/time_dataset.py | 26 +++++----- neuralprophet/time_net.py | 78 ++++++++++++++--------------- neuralprophet/utils_time_dataset.py | 66 ++++++++++++------------ tests/test_unit.py | 24 ++++----- 6 files changed, 121 insertions(+), 121 deletions(-) diff --git a/neuralprophet/data/process.py b/neuralprophet/data/process.py index 7da756f18..c2b448f49 100644 --- a/neuralprophet/data/process.py +++ b/neuralprophet/data/process.py @@ -575,7 +575,7 @@ def _handle_missing_data( return df -def _create_dataset(model, df, predict_mode, prediction_frequency=None, features_extractor=None): +def _create_dataset(model, df, predict_mode, prediction_frequency=None, components_stacker=None): """Construct dataset from dataframe. (Configured Hyperparameters can be overridden by explicitly supplying them. @@ -627,13 +627,13 @@ def _create_dataset(model, df, predict_mode, prediction_frequency=None, features config_lagged_regressors=model.config_lagged_regressors, config_missing=model.config_missing, config_model=model.config_model, - features_extractor=features_extractor, + components_stacker=components_stacker, # config_train=model.config_train, # no longer needed since JIT tabularization. ) -def _create_features_extractor(n_lags, n_forecasts, max_lags, config_seasonality, config_lagged_regressors): - return utils_time_dataset.FeatureExtractor( +def _create_components_stacker(n_lags, n_forecasts, max_lags, config_seasonality, config_lagged_regressors): + return utils_time_dataset.ComponentStacker( n_lags=n_lags, n_forecasts=n_forecasts, max_lags=max_lags, diff --git a/neuralprophet/forecaster.py b/neuralprophet/forecaster.py index 6fb50e41c..3d6bd2364 100644 --- a/neuralprophet/forecaster.py +++ b/neuralprophet/forecaster.py @@ -17,8 +17,8 @@ from neuralprophet.data.process import ( _check_dataframe, _convert_raw_predictions_to_raw_df, + _create_components_stacker, _create_dataset, - _create_features_extractor, _handle_missing_data, _prepare_dataframe_to_predict, _reshape_raw_predictions_to_forecst_df, @@ -35,7 +35,7 @@ from neuralprophet.plot_model_parameters_plotly import plot_parameters as plot_parameters_plotly from neuralprophet.plot_utils import get_valid_configuration, log_warning_deprecation_plotly, select_plotting_backend from neuralprophet.uncertainty import Conformal -from neuralprophet.utils_time_dataset import FeatureExtractor +from neuralprophet.utils_time_dataset import ComponentStacker log = logging.getLogger("NP.forecaster") @@ -1156,7 +1156,7 @@ def fit( # Set up DataLoaders: Train # Create TimeDataset # Note: _create_dataset() needs to be called after set_auto_seasonalities() - train_features_extractor = _create_features_extractor( + train_components_stacker = _create_components_stacker( n_lags=self.n_lags, max_lags=self.max_lags, n_forecasts=self.n_forecasts, @@ -1168,7 +1168,7 @@ def fit( df, predict_mode=False, prediction_frequency=self.prediction_frequency, - features_extractor=train_features_extractor, + components_stacker=train_components_stacker, ) # Determine the max_number of epochs self.config_train.set_auto_batch_epoch(n_data=len(dataset)) @@ -1204,14 +1204,14 @@ def fit( ) # df_val, _, _, _ = df_utils.prep_or_copy_df(df_val) df_val = _normalize(df=df_val, config_normalization=self.config_normalization) - features_extractor_val = _create_features_extractor( + val_components_stacker = _create_components_stacker( n_lags=self.n_lags, max_lags=self.max_lags, n_forecasts=self.n_forecasts, config_seasonality=self.config_seasonality, config_lagged_regressors=self.config_lagged_regressors, ) - dataset_val = _create_dataset(self, df_val, predict_mode=False, features_extractor=features_extractor_val) + dataset_val = _create_dataset(self, df_val, predict_mode=False, components_stacker=val_components_stacker) loader_val = DataLoader(dataset_val, batch_size=min(1024, len(dataset_val)), shuffle=False, drop_last=False) # Init the Trainer @@ -1231,9 +1231,9 @@ def fit( if not self.fitted: self.model = self._init_model() - self.model.set_features_extractor(features_extractor=train_features_extractor, mode="train") + self.model.set_components_stacker(components_stacker=train_components_stacker, mode="train") if validation_enabled: - self.model.set_features_extractor(features_extractor=features_extractor_val, mode="val") + self.model.set_components_stacker(components_stacker=val_components_stacker, mode="val") # Find suitable learning rate if not set if self.config_train.learning_rate is None: @@ -1443,15 +1443,15 @@ def test(self, df: pd.DataFrame, verbose: bool = True): ) df, _, _, _ = df_utils.prep_or_copy_df(df) df = _normalize(df=df, config_normalization=self.config_normalization) - features_extractor = _create_features_extractor( + components_stacker = _create_components_stacker( n_lags=self.n_lags, max_lags=self.max_lags, n_forecasts=self.n_forecasts, config_seasonality=self.config_seasonality, config_lagged_regressors=self.config_lagged_regressors, ) - dataset = _create_dataset(self, df, predict_mode=False, features_extractor=features_extractor) - self.model.set_features_extractor(features_extractor, mode="test") + dataset = _create_dataset(self, df, predict_mode=False, components_stacker=components_stacker) + self.model.set_components_stacker(components_stacker, mode="test") test_loader = DataLoader(dataset, batch_size=min(1024, len(dataset)), shuffle=False, drop_last=False) # Use Lightning to calculate metrics val_metrics = self.trainer.test(self.model, dataloaders=test_loader, verbose=verbose) @@ -2084,7 +2084,7 @@ def predict_seasonal_components(self, df: pd.DataFrame, quantile: float = 0.5): df = _normalize(df=df, config_normalization=self.config_normalization) df_seasonal = pd.DataFrame() for df_name, df_i in df.groupby("ID"): - feature_extractor = FeatureExtractor( + feature_unstackor = ComponentStacker( n_lags=0, max_lags=0, n_forecasts=1, @@ -2105,10 +2105,10 @@ def predict_seasonal_components(self, df: pd.DataFrame, quantile: float = 0.5): config_lagged_regressors=self.config_lagged_regressors, config_missing=self.config_missing, config_model=self.config_model, - features_extractor=feature_extractor, + components_stacker=feature_unstackor, # config_train=self.config_train, # no longer needed since JIT tabularization. ) - self.model.set_features_extractor(feature_extractor, mode="predict") + self.model.set_components_stacker(feature_unstackor, mode="predict") loader = DataLoader(dataset, batch_size=min(4096, len(df)), shuffle=False, drop_last=False) predicted = {} for name in self.config_seasonality.periods: @@ -2119,12 +2119,12 @@ def predict_seasonal_components(self, df: pd.DataFrame, quantile: float = 0.5): meta_name_tensor = None elif self.model.config_seasonality.global_local in ["local", "glocal"]: meta = OrderedDict() - time_input = feature_extractor.extract_component("time", inputs_tensor) + time_input = feature_unstackor.unstack_component("time", inputs_tensor) meta["df_name"] = [df_name for _ in range(time_input.shape[0])] meta_name_tensor = torch.tensor([self.model.id_dict[i] for i in meta["df_name"]]) # type: ignore else: meta_name_tensor = None - seasonalities_input = feature_extractor.extract_component("seasonalities", inputs_tensor) + seasonalities_input = feature_unstackor.unstack_component("seasonalities", inputs_tensor) for name in self.config_seasonality.periods: features = seasonalities_input[name] quantile_index = self.config_model.quantiles.index(quantile) @@ -2928,7 +2928,7 @@ def _predict_raw(self, df, df_name, include_components=False, prediction_frequen assert len(df["ID"].unique()) == 1 if "y_scaled" not in df.columns or "t" not in df.columns: raise ValueError("Received unprepared dataframe to predict. " "Please call predict_dataframe_to_predict.") - features_extractor = _create_features_extractor( + components_stacker = _create_components_stacker( n_lags=self.n_lags, max_lags=self.max_lags, n_forecasts=self.n_forecasts, @@ -2940,9 +2940,9 @@ def _predict_raw(self, df, df_name, include_components=False, prediction_frequen df, predict_mode=True, prediction_frequency=prediction_frequency, - features_extractor=features_extractor, + components_stacker=components_stacker, ) - self.model.set_features_extractor(features_extractor, mode="predict") + self.model.set_components_stacker(components_stacker, mode="predict") loader = DataLoader(dataset, batch_size=min(1024, len(df)), shuffle=False, drop_last=False) if self.n_forecasts > 1: dates = df["ds"].iloc[self.max_lags : -self.n_forecasts + 1] @@ -2955,7 +2955,7 @@ def _predict_raw(self, df, df_name, include_components=False, prediction_frequen self.model.set_covar_weights(self.model.get_covar_weights()) # Compute the predictions and components (if requested) result = self.trainer.predict(self.model, loader) - # Extract the prediction and components + # unstack the prediction and components predicted, component_vectors = zip(*result) predicted = np.concatenate(predicted) diff --git a/neuralprophet/time_dataset.py b/neuralprophet/time_dataset.py index cacdf9a0a..6060d50f1 100644 --- a/neuralprophet/time_dataset.py +++ b/neuralprophet/time_dataset.py @@ -34,7 +34,7 @@ def __init__( config_lagged_regressors, config_missing, config_model, - features_extractor, + components_stacker, ): """Initialize Timedataset from time-series df. Parameters @@ -136,7 +136,7 @@ def __init__( if self.config_seasonality is not None and hasattr(self.config_seasonality, "periods"): self.calculate_seasonalities() - self.features_extractor = features_extractor + self.components_stacker = components_stacker self.stack_all_features() @@ -149,30 +149,30 @@ def stack_all_features(self): current_idx = 0 # Call individual stacking functions - current_idx = self.features_extractor.pack_trend_component(self.df_tensors, feature_list, current_idx) - current_idx = self.features_extractor.pack_targets_component(self.df_tensors, feature_list, current_idx) + current_idx = self.components_stacker.stack_trend_component(self.df_tensors, feature_list, current_idx) + current_idx = self.components_stacker.stack_targets_component(self.df_tensors, feature_list, current_idx) - current_idx = self.features_extractor.pack_lags_component( + current_idx = self.components_stacker.stack_lags_component( self.df_tensors, feature_list, current_idx, self.n_lags ) - current_idx = self.features_extractor.pack_lagged_regerssors_component( + current_idx = self.components_stacker.stack_lagged_regerssors_component( self.df_tensors, feature_list, current_idx, self.config_lagged_regressors ) - current_idx = self.features_extractor.pack_additive_events_component( + current_idx = self.components_stacker.stack_additive_events_component( self.df_tensors, feature_list, current_idx, self.additive_event_and_holiday_names ) - current_idx = self.features_extractor.pack_multiplicative_events_component( + current_idx = self.components_stacker.stack_multiplicative_events_component( self.df_tensors, feature_list, current_idx, self.multiplicative_event_and_holiday_names ) - current_idx = self.features_extractor.pack_additive_regressors_component( + current_idx = self.components_stacker.stack_additive_regressors_component( self.df_tensors, feature_list, current_idx, self.additive_regressors_names ) - current_idx = self.features_extractor.pack_multiplicative_regressors_component( + current_idx = self.components_stacker.stack_multiplicative_regressors_component( self.df_tensors, feature_list, current_idx, self.multiplicative_regressors_names ) if self.config_seasonality is not None and hasattr(self.config_seasonality, "periods"): - current_idx = self.features_extractor.pack_seasonalities_component( + current_idx = self.components_stacker.stack_seasonalities_component( feature_list, current_idx, self.config_seasonality, self.seasonalities ) @@ -603,7 +603,7 @@ def __init__( config_lagged_regressors, config_missing, config_model, - features_extractor, + components_stacker, ): """Initialize Timedataset from time-series df. Parameters @@ -630,7 +630,7 @@ def __init__( config_lagged_regressors=config_lagged_regressors, config_missing=config_missing, config_model=config_model, - features_extractor=features_extractor, + components_stacker=components_stacker, ) self.length = sum(dataset.length for (name, dataset) in self.datasets.items()) global_sample_to_local_ID = [] diff --git a/neuralprophet/time_net.py b/neuralprophet/time_net.py index 9c6405a9c..0dc5f8712 100644 --- a/neuralprophet/time_net.py +++ b/neuralprophet/time_net.py @@ -21,7 +21,7 @@ reg_func_trend, reg_func_trend_glocal, ) -from neuralprophet.utils_time_dataset import FeatureExtractor +from neuralprophet.utils_time_dataset import ComponentStacker from neuralprophet.utils_torch import init_parameter, interprete_model log = logging.getLogger("NP.time_net") @@ -63,10 +63,10 @@ def __init__( num_seasonalities_modelled: int = 1, num_seasonalities_modelled_dict: dict = None, meta_used_in_model: bool = False, - train_features_extractor: Optional[FeatureExtractor] = None, - val_features_extractor: Optional[FeatureExtractor] = None, - test_features_extractor: Optional[FeatureExtractor] = None, - predict_features_extractor: Optional[FeatureExtractor] = None, + train_components_stacker: Optional[ComponentStacker] = None, + val_components_stacker: Optional[ComponentStacker] = None, + test_components_stacker: Optional[ComponentStacker] = None, + predict_components_stacker: Optional[ComponentStacker] = None, ): """ Parameters @@ -147,10 +147,10 @@ def __init__( # General self.config_model = config_model self.n_forecasts = n_forecasts - self.train_features_extractor = train_features_extractor - self.val_features_extractor = val_features_extractor - self.test_features_extractor = test_features_extractor - self.predict_features_extractor = predict_features_extractor + self.train_components_stacker = train_components_stacker + self.val_components_stacker = val_components_stacker + self.test_components_stacker = test_components_stacker + self.predict_components_stacker = predict_components_stacker # Lightning Config self.config_train = config_train @@ -320,15 +320,15 @@ def ar_weights(self) -> torch.Tensor: if isinstance(layer, nn.Linear): return layer.weight - def set_features_extractor(self, features_extractor, mode): + def set_components_stacker(self, components_stacker, mode): if mode == "train": - self.train_features_extractor = features_extractor + self.train_components_stacker = components_stacker if mode == "val": - self.val_features_extractor = features_extractor + self.val_components_stacker = components_stacker if mode == "test": - self.test_features_extractor = features_extractor + self.test_components_stacker = components_stacker if mode == "predict": - self.predict_features_extractor = features_extractor + self.predict_components_stacker = components_stacker def get_covar_weights(self, covar_input=None) -> torch.Tensor: """ @@ -523,16 +523,16 @@ def forward_covar_net(self, covariates): def forward( self, input_tensor: torch.Tensor, - features_extractor=FeatureExtractor, + components_stacker=ComponentStacker, meta: Dict = None, compute_components_flag: bool = False, predict_mode: bool = False, ) -> torch.Tensor: """This method defines the model forward pass.""" - print(f"indices = {features_extractor.feature_indices}") + print(f"indices = {components_stacker.feature_indices}") - time_input = features_extractor.extract_component(component_name="time", batch_tensor=input_tensor) + time_input = components_stacker.unstack_component(component_name="time", batch_tensor=input_tensor) # Handle meta argument if meta is None and self.meta_used_in_model: name_id_dummy = self.id_list[0] @@ -562,7 +562,7 @@ def forward( # Unpack and process seasonalities seasonalities_input = None if self.config_seasonality and self.config_seasonality.periods: - seasonalities_input = features_extractor.extract_component( + seasonalities_input = components_stacker.unstack_component( component_name="seasonalities", batch_tensor=input_tensor ) s = self.seasonality(s=seasonalities_input, meta=meta) @@ -576,15 +576,15 @@ def forward( additive_events_input = None multiplicative_events_input = None if self.events_dims is not None: - if "additive_events" in features_extractor.feature_indices: - additive_events_input = features_extractor.extract_component( + if "additive_events" in components_stacker.feature_indices: + additive_events_input = components_stacker.unstack_component( component_name="additive_events", batch_tensor=input_tensor ) additive_events = self.scalar_features_effects(additive_events_input, self.event_params["additive"]) additive_components_nonstationary += additive_events components["additive_events"] = additive_events - if "multiplicative_events" in features_extractor.feature_indices: - multiplicative_events_input = features_extractor.extract_component( + if "multiplicative_events" in components_stacker.feature_indices: + multiplicative_events_input = components_stacker.unstack_component( component_name="multiplicative_events", batch_tensor=input_tensor ) multiplicative_events = self.scalar_features_effects( @@ -596,15 +596,15 @@ def forward( # Unpack and process regressors additive_regressors_input = None multiplicative_regressors_input = None - if "additive_regressors" in features_extractor.feature_indices: - additive_regressors_input = features_extractor.extract_component( + if "additive_regressors" in components_stacker.feature_indices: + additive_regressors_input = components_stacker.unstack_component( component_name="additive_regressors", batch_tensor=input_tensor ) additive_regressors = self.future_regressors(additive_regressors_input, "additive") additive_components_nonstationary += additive_regressors components["additive_regressors"] = additive_regressors - if "multiplicative_regressors" in features_extractor.feature_indices: - multiplicative_regressors_input = features_extractor.extract_component( + if "multiplicative_regressors" in components_stacker.feature_indices: + multiplicative_regressors_input = components_stacker.unstack_component( component_name="multiplicative_regressors", batch_tensor=input_tensor ) multiplicative_regressors = self.future_regressors(multiplicative_regressors_input, "multiplicative") @@ -613,8 +613,8 @@ def forward( # Unpack and process lags lags_input = None - if "lags" in features_extractor.feature_indices: - lags_input = features_extractor.extract_component(component_name="lags", batch_tensor=input_tensor) + if "lags" in components_stacker.feature_indices: + lags_input = components_stacker.unstack_component(component_name="lags", batch_tensor=input_tensor) nonstationary_components = ( trend[:, : self.n_lags, 0] + additive_components_nonstationary[:, : self.n_lags, 0] @@ -628,7 +628,7 @@ def forward( # Unpack and process covariates covariates_input = None if self.config_lagged_regressors and self.config_lagged_regressors.regressors is not None: - covariates_input = features_extractor.extract_component( + covariates_input = components_stacker.unstack_component( component_name="lagged_regressors", batch_tensor=input_tensor ) covariates = self.forward_covar_net(covariates=covariates_input) @@ -785,15 +785,15 @@ def training_step(self, batch, batch_idx): epoch_float = self.trainer.current_epoch + batch_idx / float(self.train_steps_per_epoch) self.train_progress = epoch_float / float(self.config_train.epochs) - targets = self.train_features_extractor.extract_component("targets", batch_tensor=inputs_tensor) - time = self.train_features_extractor.extract_component("time", batch_tensor=inputs_tensor) + targets = self.train_components_stacker.unstack_component("targets", batch_tensor=inputs_tensor) + time = self.train_components_stacker.unstack_component("time", batch_tensor=inputs_tensor) # Global-local if self.meta_used_in_model: meta_name_tensor = torch.tensor([self.id_dict[i] for i in meta["df_name"]], device=self.device) else: meta_name_tensor = None # Run forward calculation - predicted, _ = self.forward(inputs_tensor, self.train_features_extractor, meta_name_tensor) + predicted, _ = self.forward(inputs_tensor, self.train_components_stacker, meta_name_tensor) # Store predictions in self for later network visualization self.train_epoch_prediction = predicted # Calculate loss @@ -830,15 +830,15 @@ def training_step(self, batch, batch_idx): def validation_step(self, batch, batch_idx): inputs_tensor, meta = batch - targets = self.val_features_extractor.extract_component("targets", batch_tensor=inputs_tensor) - time = self.val_features_extractor.extract_component("time", batch_tensor=inputs_tensor) + targets = self.val_components_stacker.unstack_component("targets", batch_tensor=inputs_tensor) + time = self.val_components_stacker.unstack_component("time", batch_tensor=inputs_tensor) # Global-local if self.meta_used_in_model: meta_name_tensor = torch.tensor([self.id_dict[i] for i in meta["df_name"]], device=self.device) else: meta_name_tensor = None # Run forward calculation - predicted, _ = self.forward(inputs_tensor, self.val_features_extractor, meta_name_tensor) + predicted, _ = self.forward(inputs_tensor, self.val_components_stacker, meta_name_tensor) # Calculate loss loss, reg_loss = self.loss_func(time, predicted, targets) # Metrics @@ -852,15 +852,15 @@ def validation_step(self, batch, batch_idx): def test_step(self, batch, batch_idx): inputs_tensor, meta = batch - targets = self.test_features_extractor.extract_component("targets", batch_tensor=inputs_tensor) - time = self.test_features_extractor.extract_component("time", batch_tensor=inputs_tensor) + targets = self.test_components_stacker.unstack_component("targets", batch_tensor=inputs_tensor) + time = self.test_components_stacker.unstack_component("time", batch_tensor=inputs_tensor) # Global-local if self.meta_used_in_model: meta_name_tensor = torch.tensor([self.id_dict[i] for i in meta["df_name"]], device=self.device) else: meta_name_tensor = None # Run forward calculation - predicted, _ = self.forward(inputs_tensor, self.test_features_extractor, meta_name_tensor) + predicted, _ = self.forward(inputs_tensor, self.test_components_stacker, meta_name_tensor) # Calculate loss loss, reg_loss = self.loss_func(time, predicted, targets) # Metrics @@ -884,7 +884,7 @@ def predict_step(self, batch, batch_idx, dataloader_idx=0): # Run forward calculation prediction, components = self.forward( inputs_tensor, - self.predict_features_extractor, + self.predict_components_stacker, meta_name_tensor, self.compute_components_flag, predict_mode=True, diff --git a/neuralprophet/utils_time_dataset.py b/neuralprophet/utils_time_dataset.py index 5e315c88f..075dff8e1 100644 --- a/neuralprophet/utils_time_dataset.py +++ b/neuralprophet/utils_time_dataset.py @@ -3,7 +3,7 @@ import torch -class FeatureExtractor: +class ComponentStacker: def __init__( self, n_lags, @@ -14,7 +14,7 @@ def __init__( lagged_regressor_config=None, ): """ - Initializes the FeatureExtractor with the necessary parameters. + Initializes the ComponentStacker with the necessary parameters. Args: n_lags (int): Number of lags used in the model. @@ -31,56 +31,56 @@ def __init__( self.config_seasonality = config_seasonality self.lagged_regressor_config = lagged_regressor_config - def extract_component(self, component_name, batch_tensor): + def unstack_component(self, component_name, batch_tensor): """ - Routes the extraction process to the appropriate function based on the component name. + Routes the unstackion process to the appropriate function based on the component name. Args: - component_name (str): The name of the component to extract. + component_name (str): The name of the component to unstack. Returns: - Various: The output of the specific extraction function. + Various: The output of the specific unstackion function. """ if component_name == "targets": - return self.extract_targets(batch_tensor) + return self.unstack_targets(batch_tensor) elif component_name == "time": - return self.extract_time(batch_tensor) + return self.unstack_time(batch_tensor) elif component_name == "seasonalities": - return self.extract_seasonalities(batch_tensor) + return self.unstack_seasonalities(batch_tensor) elif component_name == "lagged_regressors": - return self.extract_lagged_regressors(batch_tensor) + return self.unstack_lagged_regressors(batch_tensor) elif component_name == "lags": - return self.extract_lags(batch_tensor) + return self.unstack_lags(batch_tensor) elif component_name == "additive_events": - return self.extract_additive_events(batch_tensor) + return self.unstack_additive_events(batch_tensor) elif component_name == "multiplicative_events": - return self.extract_multiplicative_events(batch_tensor) + return self.unstack_multiplicative_events(batch_tensor) elif component_name == "additive_regressors": - return self.extract_additive_regressors(batch_tensor) + return self.unstack_additive_regressors(batch_tensor) elif component_name == "multiplicative_regressors": - return self.extract_multiplicative_regressors(batch_tensor) + return self.unstack_multiplicative_regressors(batch_tensor) else: raise ValueError(f"Unknown component name: {component_name}") - def extract_targets(self, batch_tensor): + def unstack_targets(self, batch_tensor): targets_start_idx, targets_end_idx = self.feature_indices["targets"] if self.max_lags > 0: return batch_tensor[:, self.max_lags : self.max_lags + self.n_forecasts, targets_start_idx].unsqueeze(2) else: return batch_tensor[:, targets_start_idx : targets_end_idx + 1].unsqueeze(1) - def extract_time(self, batch_tensor): + def unstack_time(self, batch_tensor): start_idx, end_idx = self.feature_indices["time"] if self.max_lags > 0: return batch_tensor[:, self.max_lags - self.n_lags : self.max_lags + self.n_forecasts, start_idx] else: return batch_tensor[:, start_idx : end_idx + 1] - def extract_lags(self, batch_tensor): + def unstack_lags(self, batch_tensor): lags_start_idx, _ = self.feature_indices["lags"] return batch_tensor[:, self.max_lags - self.n_lags : self.max_lags, lags_start_idx] - def extract_lagged_regressors(self, batch_tensor): + def unstack_lagged_regressors(self, batch_tensor): lagged_regressors = OrderedDict() if self.lagged_regressor_config is not None and self.lagged_regressor_config.regressors is not None: for name, lagged_regressor in self.lagged_regressor_config.regressors.items(): @@ -96,7 +96,7 @@ def extract_lagged_regressors(self, batch_tensor): ] return lagged_regressors - def extract_seasonalities(self, batch_tensor): + def unstack_seasonalities(self, batch_tensor): seasonalities = OrderedDict() if self.max_lags > 0: for seasonality_name in self.config_seasonality.periods.keys(): @@ -119,7 +119,7 @@ def extract_seasonalities(self, batch_tensor): return seasonalities - def extract_additive_events(self, batch_tensor): + def unstack_additive_events(self, batch_tensor): if self.max_lags > 0: events_start_idx, events_end_idx = self.feature_indices["additive_events"] future_offset = self.max_lags - self.n_lags @@ -130,7 +130,7 @@ def extract_additive_events(self, batch_tensor): events_start_idx, events_end_idx = self.feature_indices["additive_events"] return batch_tensor[:, events_start_idx : events_end_idx + 1].unsqueeze(1) - def extract_multiplicative_events(self, batch_tensor): + def unstack_multiplicative_events(self, batch_tensor): if self.max_lags > 0: events_start_idx, events_end_idx = self.feature_indices["multiplicative_events"] return batch_tensor[ @@ -140,7 +140,7 @@ def extract_multiplicative_events(self, batch_tensor): events_start_idx, events_end_idx = self.feature_indices["multiplicative_events"] return batch_tensor[:, events_start_idx : events_end_idx + 1].unsqueeze(1) - def extract_additive_regressors(self, batch_tensor): + def unstack_additive_regressors(self, batch_tensor): if self.max_lags > 0: regressors_start_idx, regressors_end_idx = self.feature_indices["additive_regressors"] return batch_tensor[ @@ -152,7 +152,7 @@ def extract_additive_regressors(self, batch_tensor): regressors_start_idx, regressors_end_idx = self.feature_indices["additive_regressors"] return batch_tensor[:, regressors_start_idx : regressors_end_idx + 1].unsqueeze(1) - def extract_multiplicative_regressors(self, batch_tensor): + def unstack_multiplicative_regressors(self, batch_tensor): if self.max_lags > 0: regressors_start_idx, regressors_end_idx = self.feature_indices["multiplicative_regressors"] future_offset = self.max_lags - self.n_lags @@ -165,7 +165,7 @@ def extract_multiplicative_regressors(self, batch_tensor): regressors_start_idx, regressors_end_idx = self.feature_indices["multiplicative_regressors"] return batch_tensor[:, regressors_start_idx : regressors_end_idx + 1].unsqueeze(1) - def pack_trend_component(self, df_tensors, feature_list, current_idx): + def stack_trend_component(self, df_tensors, feature_list, current_idx): """ Stack the trend (time) feature. """ @@ -174,7 +174,7 @@ def pack_trend_component(self, df_tensors, feature_list, current_idx): self.feature_indices["time"] = (current_idx, current_idx) return current_idx + 1 - def pack_lags_component(self, df_tensors, feature_list, current_idx, n_lags): + def stack_lags_component(self, df_tensors, feature_list, current_idx, n_lags): """ Stack the lags feature. """ @@ -185,7 +185,7 @@ def pack_lags_component(self, df_tensors, feature_list, current_idx, n_lags): return current_idx + 1 return current_idx - def pack_targets_component(self, df_tensors, feature_list, current_idx): + def stack_targets_component(self, df_tensors, feature_list, current_idx): """ Stack the targets feature. """ @@ -196,7 +196,7 @@ def pack_targets_component(self, df_tensors, feature_list, current_idx): return current_idx + 1 return current_idx - def pack_lagged_regerssors_component(self, df_tensors, feature_list, current_idx, config_lagged_regressors): + def stack_lagged_regerssors_component(self, df_tensors, feature_list, current_idx, config_lagged_regressors): """ Stack the lagged regressor features. """ @@ -215,7 +215,7 @@ def pack_lagged_regerssors_component(self, df_tensors, feature_list, current_idx return current_idx + num_features return current_idx - def pack_additive_events_component( + def stack_additive_events_component( self, df_tensors, feature_list, @@ -238,7 +238,7 @@ def pack_additive_events_component( return current_idx + additive_events_tensor.size(1) return current_idx - def pack_multiplicative_events_component( + def stack_multiplicative_events_component( self, df_tensors, feature_list, current_idx, multiplicative_event_and_holiday_names ): """ @@ -256,7 +256,7 @@ def pack_multiplicative_events_component( return current_idx + multiplicative_events_tensor.size(1) return current_idx - def pack_additive_regressors_component(self, df_tensors, feature_list, current_idx, additive_regressors_names): + def stack_additive_regressors_component(self, df_tensors, feature_list, current_idx, additive_regressors_names): """ Stack the additive regressor features. """ @@ -272,7 +272,7 @@ def pack_additive_regressors_component(self, df_tensors, feature_list, current_i return current_idx + additive_regressors_tensor.size(1) return current_idx - def pack_multiplicative_regressors_component( + def stack_multiplicative_regressors_component( self, df_tensors, feature_list, current_idx, multiplicative_regressors_names ): """ @@ -290,7 +290,7 @@ def pack_multiplicative_regressors_component( return current_idx + len(multiplicative_regressors_names) return current_idx - def pack_seasonalities_component(self, feature_list, current_idx, config_seasonality, seasonalities): + def stack_seasonalities_component(self, feature_list, current_idx, config_seasonality, seasonalities): """ Stack the seasonality features. """ diff --git a/tests/test_unit.py b/tests/test_unit.py index 50c3f5f3a..6c234ae57 100644 --- a/tests/test_unit.py +++ b/tests/test_unit.py @@ -97,7 +97,7 @@ def test_timedataset_minimal(): df = df_utils.normalize(df, global_data_params) df["ID"] = "__df__" - features_extractor = utils_time_dataset.FeatureExtractor( + components_stacker = utils_time_dataset.ComponentStacker( n_lags=n_lags, n_forecasts=n_forecasts, max_lags=n_lags, @@ -119,7 +119,7 @@ def test_timedataset_minimal(): config_lagged_regressors=None, config_missing=config_missing, config_model=None, - features_extractor=features_extractor, + components_stacker=components_stacker, ) input, meta = dataset.__getitem__(0) # # inputs50, targets50, meta50 = dataset.__getitem__(50) @@ -693,15 +693,15 @@ def test_globaltimedataset(): ) m.config_normalization = config_normalization df_global = _normalize(df=df_global, config_normalization=m.config_normalization) - features_extractor = utils_time_dataset.FeatureExtractor( + components_stacker = utils_time_dataset.ComponentStacker( n_lags=m.n_lags, n_forecasts=m.n_forecasts, max_lags=m.max_lags, config_seasonality=m.config_seasonality, lagged_regressor_config=m.config_lagged_regressors, ) - _create_dataset(m, df_global, predict_mode=False, features_extractor=features_extractor) - _create_dataset(m, df_global, predict_mode=True, features_extractor=features_extractor) + _create_dataset(m, df_global, predict_mode=False, components_stacker=components_stacker) + _create_dataset(m, df_global, predict_mode=True, components_stacker=components_stacker) # lagged_regressors, future_regressors df4 = df.copy() @@ -723,15 +723,15 @@ def test_globaltimedataset(): config_normalization.init_data_params(df4, m.config_lagged_regressors, m.config_regressors, m.config_events) m.config_normalization = config_normalization df4 = _normalize(df=df4, config_normalization=m.config_normalization) - features_extractor = utils_time_dataset.FeatureExtractor( + components_stacker = utils_time_dataset.ComponentStacker( n_lags=m.n_lags, n_forecasts=m.n_forecasts, max_lags=m.max_lags, config_seasonality=m.config_seasonality, lagged_regressor_config=m.config_lagged_regressors, ) - _create_dataset(m, df4, predict_mode=False, features_extractor=features_extractor) - _create_dataset(m, df4, predict_mode=True, features_extractor=features_extractor) + _create_dataset(m, df4, predict_mode=False, components_stacker=components_stacker) + _create_dataset(m, df4, predict_mode=True, components_stacker=components_stacker) def test_dataloader(): @@ -760,14 +760,14 @@ def test_dataloader(): config_normalization.init_data_params(df_global, m.config_lagged_regressors, m.config_regressors, m.config_events) m.config_normalization = config_normalization df_global = _normalize(df=df_global, config_normalization=m.config_normalization) - features_extractor = utils_time_dataset.FeatureExtractor( + components_stacker = utils_time_dataset.ComponentStacker( n_lags=3, n_forecasts=2, max_lags=3, config_seasonality=None, lagged_regressor_config=None, ) - dataset = _create_dataset(m, df_global, predict_mode=False, features_extractor=features_extractor) + dataset = _create_dataset(m, df_global, predict_mode=False, components_stacker=components_stacker) loader = DataLoader(dataset, batch_size=min(1024, len(df)), shuffle=True, drop_last=False) for _, meta in loader: assert set(meta["df_name"]) == set(df_global["ID"].unique()) @@ -895,7 +895,7 @@ def test_too_many_NaN(): df["ID"] = "__df__" # Check if ValueError is thrown, if NaN values remain after auto-imputing with pytest.raises(ValueError): - features_extractor = utils_time_dataset.FeatureExtractor( + components_stacker = utils_time_dataset.ComponentStacker( n_lags=n_lags, n_forecasts=n_forecasts, max_lags=n_lags, @@ -916,7 +916,7 @@ def test_too_many_NaN(): config_lagged_regressors=None, config_missing=config_missing, config_model=None, - features_extractor=features_extractor, + components_stacker=components_stacker, ) From 5d0d59a13130177657e51344952df7d652868b82 Mon Sep 17 00:00:00 2001 From: ourownstory Date: Fri, 30 Aug 2024 16:41:11 -0700 Subject: [PATCH 14/22] remove prints in time_net --- neuralprophet/time_net.py | 8 -------- 1 file changed, 8 deletions(-) diff --git a/neuralprophet/time_net.py b/neuralprophet/time_net.py index 0dc5f8712..336d9bf76 100644 --- a/neuralprophet/time_net.py +++ b/neuralprophet/time_net.py @@ -530,8 +530,6 @@ def forward( ) -> torch.Tensor: """This method defines the model forward pass.""" - print(f"indices = {components_stacker.feature_indices}") - time_input = components_stacker.unstack_component(component_name="time", batch_tensor=input_tensor) # Handle meta argument if meta is None and self.meta_used_in_model: @@ -661,7 +659,6 @@ def forward( components, meta, ) - print(f"components = {components.keys()}") else: components = None @@ -685,16 +682,12 @@ def compute_components( components["trend"] = components_raw["trend"][:, self.n_lags : time_input.shape[1], :] if self.config_trend is not None and seasonality_input is not None: for name, features in seasonality_input.items(): - print(f"season = {name}") - components[f"season_{name}"] = self.seasonality.compute_fourier( features=features[:, self.n_lags : time_input.shape[1], :], name=name, meta=meta ) if self.n_lags > 0 and lags_input is not None: components["ar"] = components_raw["lags"] if self.config_lagged_regressors is not None and covariates_input is not None: - print("lagged_regressors") - # Combined forward pass all_covariates = components_raw["covariates"] # Calculate the contribution of each covariate on each forecast @@ -747,7 +740,6 @@ def compute_components( ] for regressor, configs in self.future_regressors.regressors_dims.items(): - print(f"regressor = {regressor}") mode = configs["mode"] index = [] index.append(configs["regressor_index"]) From e0623ed1c9cbd8263e3eb8ff5ae6955dd0d55c03 Mon Sep 17 00:00:00 2001 From: ourownstory Date: Fri, 30 Aug 2024 16:52:57 -0700 Subject: [PATCH 15/22] init Stacker in forecaster --- neuralprophet/forecaster.py | 34 ++++++++++++++++++++++++---------- neuralprophet/time_dataset.py | 24 +++--------------------- 2 files changed, 27 insertions(+), 31 deletions(-) diff --git a/neuralprophet/forecaster.py b/neuralprophet/forecaster.py index 3d6bd2364..48389faed 100644 --- a/neuralprophet/forecaster.py +++ b/neuralprophet/forecaster.py @@ -13,7 +13,17 @@ from matplotlib.axes import Axes from torch.utils.data import DataLoader -from neuralprophet import configure, df_utils, np_types, time_dataset, time_net, utils, utils_lightning, utils_metrics +from neuralprophet import ( + configure, + df_utils, + np_types, + time_dataset, + time_net, + utils, + utils_lightning, + utils_metrics, + utils_time_dataset, +) from neuralprophet.data.process import ( _check_dataframe, _convert_raw_predictions_to_raw_df, @@ -1156,13 +1166,15 @@ def fit( # Set up DataLoaders: Train # Create TimeDataset # Note: _create_dataset() needs to be called after set_auto_seasonalities() - train_components_stacker = _create_components_stacker( + train_components_stacker = utils_time_dataset.ComponentStacker( n_lags=self.n_lags, - max_lags=self.max_lags, n_forecasts=self.n_forecasts, + max_lags=self.max_lags, config_seasonality=self.config_seasonality, - config_lagged_regressors=self.config_lagged_regressors, + lagged_regressor_config=self.config_lagged_regressors, + feature_indices={}, ) + dataset = _create_dataset( self, df, @@ -1443,12 +1455,13 @@ def test(self, df: pd.DataFrame, verbose: bool = True): ) df, _, _, _ = df_utils.prep_or_copy_df(df) df = _normalize(df=df, config_normalization=self.config_normalization) - components_stacker = _create_components_stacker( + components_stacker = utils_time_dataset.ComponentStacker( n_lags=self.n_lags, - max_lags=self.max_lags, n_forecasts=self.n_forecasts, + max_lags=self.max_lags, config_seasonality=self.config_seasonality, - config_lagged_regressors=self.config_lagged_regressors, + lagged_regressor_config=self.config_lagged_regressors, + feature_indices={}, ) dataset = _create_dataset(self, df, predict_mode=False, components_stacker=components_stacker) self.model.set_components_stacker(components_stacker, mode="test") @@ -2928,12 +2941,13 @@ def _predict_raw(self, df, df_name, include_components=False, prediction_frequen assert len(df["ID"].unique()) == 1 if "y_scaled" not in df.columns or "t" not in df.columns: raise ValueError("Received unprepared dataframe to predict. " "Please call predict_dataframe_to_predict.") - components_stacker = _create_components_stacker( + components_stacker = utils_time_dataset.ComponentStacker( n_lags=self.n_lags, - max_lags=self.max_lags, n_forecasts=self.n_forecasts, + max_lags=self.max_lags, config_seasonality=self.config_seasonality, - config_lagged_regressors=self.config_lagged_regressors, + lagged_regressor_config=self.config_lagged_regressors, + feature_indices={}, ) dataset = _create_dataset( self, diff --git a/neuralprophet/time_dataset.py b/neuralprophet/time_dataset.py index 6060d50f1..b3dc0cbf5 100644 --- a/neuralprophet/time_dataset.py +++ b/neuralprophet/time_dataset.py @@ -99,6 +99,9 @@ def __init__( self.config_regressors ) + if self.config_seasonality is not None and hasattr(self.config_seasonality, "periods"): + self.calculate_seasonalities() + # skipping col "ID" is string type that is interpreted as object by torch (self.df[col].dtype == "O") # "ID" is stored in self.meta["df_name"] skip_cols = ["ID", "ds"] @@ -112,30 +115,9 @@ def __init__( self.df["ds"] = self.df["ds"].apply(lambda x: x.timestamp()) # Convert to Unix timestamp in seconds self.df_tensors["ds"] = torch.tensor(self.df["ds"].values, dtype=torch.int64) - if self.additive_event_and_holiday_names: - self.df_tensors["additive_event_and_holiday"] = torch.stack( - [self.df_tensors[name] for name in self.additive_event_and_holiday_names], dim=1 - ) - if self.multiplicative_event_and_holiday_names: - self.df_tensors["multiplicative_event_and_holiday"] = torch.stack( - [self.df_tensors[name] for name in self.multiplicative_event_and_holiday_names], dim=1 - ) - - if self.additive_regressors_names: - self.df_tensors["additive_regressors"] = torch.stack( - [self.df_tensors[name] for name in self.additive_regressors_names], dim=1 - ) - if self.multiplicative_regressors_names: - self.df_tensors["multiplicative_regressors"] = torch.stack( - [self.df_tensors[name] for name in self.multiplicative_regressors_names], dim=1 - ) - # Construct index map self.sample2index_map, self.length = self.create_sample2index_map(self.df, self.df_tensors) - if self.config_seasonality is not None and hasattr(self.config_seasonality, "periods"): - self.calculate_seasonalities() - self.components_stacker = components_stacker self.stack_all_features() From 853fb213e522d7ce23eb87e3618197d8e215a7ba Mon Sep 17 00:00:00 2001 From: ourownstory Date: Fri, 30 Aug 2024 16:58:46 -0700 Subject: [PATCH 16/22] fix time-dataset --- neuralprophet/time_dataset.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/neuralprophet/time_dataset.py b/neuralprophet/time_dataset.py index b3dc0cbf5..39364a5b2 100644 --- a/neuralprophet/time_dataset.py +++ b/neuralprophet/time_dataset.py @@ -99,9 +99,6 @@ def __init__( self.config_regressors ) - if self.config_seasonality is not None and hasattr(self.config_seasonality, "periods"): - self.calculate_seasonalities() - # skipping col "ID" is string type that is interpreted as object by torch (self.df[col].dtype == "O") # "ID" is stored in self.meta["df_name"] skip_cols = ["ID", "ds"] @@ -115,6 +112,9 @@ def __init__( self.df["ds"] = self.df["ds"].apply(lambda x: x.timestamp()) # Convert to Unix timestamp in seconds self.df_tensors["ds"] = torch.tensor(self.df["ds"].values, dtype=torch.int64) + if self.config_seasonality is not None and hasattr(self.config_seasonality, "periods"): + self.calculate_seasonalities() + # Construct index map self.sample2index_map, self.length = self.create_sample2index_map(self.df, self.df_tensors) From 81868a9bae67befa1b0df3affbafb54b3a572d05 Mon Sep 17 00:00:00 2001 From: ourownstory Date: Fri, 30 Aug 2024 17:51:02 -0700 Subject: [PATCH 17/22] remove last _create component staker --- neuralprophet/data/process.py | 11 ----------- neuralprophet/forecaster.py | 4 ++-- 2 files changed, 2 insertions(+), 13 deletions(-) diff --git a/neuralprophet/data/process.py b/neuralprophet/data/process.py index c2b448f49..a0a91095d 100644 --- a/neuralprophet/data/process.py +++ b/neuralprophet/data/process.py @@ -630,14 +630,3 @@ def _create_dataset(model, df, predict_mode, prediction_frequency=None, componen components_stacker=components_stacker, # config_train=model.config_train, # no longer needed since JIT tabularization. ) - - -def _create_components_stacker(n_lags, n_forecasts, max_lags, config_seasonality, config_lagged_regressors): - return utils_time_dataset.ComponentStacker( - n_lags=n_lags, - n_forecasts=n_forecasts, - max_lags=max_lags, - config_seasonality=config_seasonality, - lagged_regressor_config=config_lagged_regressors, - feature_indices={}, - ) diff --git a/neuralprophet/forecaster.py b/neuralprophet/forecaster.py index 48389faed..d82a2ae84 100644 --- a/neuralprophet/forecaster.py +++ b/neuralprophet/forecaster.py @@ -27,7 +27,6 @@ from neuralprophet.data.process import ( _check_dataframe, _convert_raw_predictions_to_raw_df, - _create_components_stacker, _create_dataset, _handle_missing_data, _prepare_dataframe_to_predict, @@ -1216,12 +1215,13 @@ def fit( ) # df_val, _, _, _ = df_utils.prep_or_copy_df(df_val) df_val = _normalize(df=df_val, config_normalization=self.config_normalization) - val_components_stacker = _create_components_stacker( + val_components_stacker = utils_time_dataset.ComponentStacker( n_lags=self.n_lags, max_lags=self.max_lags, n_forecasts=self.n_forecasts, config_seasonality=self.config_seasonality, config_lagged_regressors=self.config_lagged_regressors, + feature_indices={}, ) dataset_val = _create_dataset(self, df_val, predict_mode=False, components_stacker=val_components_stacker) loader_val = DataLoader(dataset_val, batch_size=min(1024, len(dataset_val)), shuffle=False, drop_last=False) From 4456fedc77edda7a56cf940d4e762526dfca0f2c Mon Sep 17 00:00:00 2001 From: ourownstory Date: Fri, 30 Aug 2024 17:55:13 -0700 Subject: [PATCH 18/22] fix lagged config --- neuralprophet/forecaster.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/neuralprophet/forecaster.py b/neuralprophet/forecaster.py index d82a2ae84..a9fcecc75 100644 --- a/neuralprophet/forecaster.py +++ b/neuralprophet/forecaster.py @@ -1220,7 +1220,7 @@ def fit( max_lags=self.max_lags, n_forecasts=self.n_forecasts, config_seasonality=self.config_seasonality, - config_lagged_regressors=self.config_lagged_regressors, + lagged_regressor_config=self.config_lagged_regressors, feature_indices={}, ) dataset_val = _create_dataset(self, df_val, predict_mode=False, components_stacker=val_components_stacker) From bac04afe6055ea265d46aa57f20c0cab3f5fb495 Mon Sep 17 00:00:00 2001 From: ourownstory Date: Fri, 30 Aug 2024 17:56:58 -0700 Subject: [PATCH 19/22] uncomment glocal tests --- tests/test_glocal.py | 674 +++++++++++++++++++++---------------------- 1 file changed, 337 insertions(+), 337 deletions(-) diff --git a/tests/test_glocal.py b/tests/test_glocal.py index 4c46e1683..fe4719140 100644 --- a/tests/test_glocal.py +++ b/tests/test_glocal.py @@ -26,313 +26,313 @@ PLOT = False -# def test_trend_global_local_modeling(): -# # TREND GLOBAL LOCAL MODELLING - NO EXOGENOUS VARIABLES -# log.info("Global Modeling + Global Normalization") -# df = pd.read_csv(PEYTON_FILE, nrows=512) -# df1_0 = df.copy(deep=True) -# df1_0["ID"] = "df1" -# df2_0 = df.copy(deep=True) -# df2_0["ID"] = "df2" -# df3_0 = df.copy(deep=True) -# df3_0["ID"] = "df3" -# m = NeuralProphet( -# n_forecasts=2, n_lags=10, epochs=EPOCHS, batch_size=BATCH_SIZE, learning_rate=LR, trend_global_local="local" -# ) -# assert m.config_seasonality.global_local == "global" -# train_df, test_df = m.split_df(pd.concat((df1_0, df2_0, df3_0)), valid_p=0.33, local_split=True) -# m.fit(train_df) -# future = m.make_future_dataframe(test_df) -# m.predict(future) -# m.test(test_df) -# m.predict_trend(test_df) -# m.predict_seasonal_components(test_df) -# m.plot_parameters() - - -# def test_regularized_trend_global_local_modeling(): -# # TREND GLOBAL LOCAL MODELLING - NO EXOGENOUS VARIABLES -# log.info("Global Modeling + Global Normalization") -# df = pd.read_csv(PEYTON_FILE, nrows=512) -# df1_0 = df.iloc[:128, :].copy(deep=True) -# df1_0["ID"] = "df1" -# df2_0 = df.iloc[128:256, :].copy(deep=True) -# df2_0["ID"] = "df2" -# df3_0 = df.iloc[256:384, :].copy(deep=True) -# df3_0["ID"] = "df3" -# m = NeuralProphet(n_lags=10, epochs=EPOCHS, learning_rate=LR, trend_global_local="local", trend_reg=1) -# train_df, test_df = m.split_df(pd.concat((df1_0, df2_0, df3_0)), valid_p=0.33, local_split=True) -# m.fit(train_df) -# future = m.make_future_dataframe(test_df) -# m.predict(future) -# m.test(test_df) -# m.predict_trend(test_df) -# m.predict_seasonal_components(test_df) - - -# def test_seasonality_global_local_modeling(): -# # SEASONALITY GLOBAL LOCAL MODELLING - NO EXOGENOUS VARIABLES -# log.info("Global Modeling + Global Normalization") -# df = pd.read_csv(PEYTON_FILE, nrows=512) -# df1_0 = df.copy(deep=True) -# df1_0["ID"] = "df1" -# df2_0 = df.copy(deep=True) -# df2_0["ID"] = "df2" -# df3_0 = df.copy(deep=True) -# df3_0["ID"] = "df3" -# m = NeuralProphet( -# n_forecasts=2, n_lags=10, epochs=EPOCHS, batch_size=BATCH_SIZE, learning_rate=LR, season_global_local="local" -# ) -# train_df, test_df = m.split_df(pd.concat((df1_0, df2_0, df3_0)), valid_p=0.33, local_split=True) -# m.fit(train_df) -# future = m.make_future_dataframe(test_df) -# m.predict(future) -# m.test(test_df) -# m.predict_trend(test_df) -# m.predict_seasonal_components(test_df) -# m.plot_parameters() - - -# def test_changepoints0_global_local_modeling(): -# # SEASONALITY GLOBAL LOCAL MODELLING - NO EXOGENOUS VARIABLES -# log.info("Global Modeling + Global Normalization") -# df = pd.read_csv(PEYTON_FILE, nrows=512) -# df1_0 = df.iloc[:128, :].copy(deep=True) -# df1_0["ID"] = "df1" -# df2_0 = df.iloc[128:256, :].copy(deep=True) -# df2_0["ID"] = "df2" -# df3_0 = df.iloc[256:384, :].copy(deep=True) -# df3_0["ID"] = "df3" -# m = NeuralProphet( -# n_forecasts=2, -# n_lags=10, -# n_changepoints=0, -# epochs=EPOCHS, -# batch_size=BATCH_SIZE, -# learning_rate=LR, -# season_global_local="local", -# ) -# train_df, test_df = m.split_df(pd.concat((df1_0, df2_0, df3_0)), valid_p=0.33, local_split=True) -# m.fit(train_df) -# future = m.make_future_dataframe(test_df) -# m.predict(future) -# m.test(test_df) -# m.predict_trend(test_df) -# m.predict_seasonal_components(test_df) - - -# def test_trend_discontinuous_global_local_modeling(): -# # SEASONALITY GLOBAL LOCAL MODELLING - NO EXOGENOUS VARIABLES -# log.info("Global Modeling + Global Normalization") -# df = pd.read_csv(PEYTON_FILE, nrows=512) -# df1_0 = df.iloc[:128, :].copy(deep=True) -# df1_0["ID"] = "df1" -# df2_0 = df.iloc[128:256, :].copy(deep=True) -# df2_0["ID"] = "df2" -# df3_0 = df.iloc[256:384, :].copy(deep=True) -# df3_0["ID"] = "df3" -# m = NeuralProphet( -# n_forecasts=2, -# n_lags=10, -# growth="discontinuous", -# epochs=EPOCHS, -# batch_size=BATCH_SIZE, -# learning_rate=LR, -# season_global_local="local", -# ) -# assert m.config_trend.trend_global_local == "global" -# train_df, test_df = m.split_df(pd.concat((df1_0, df2_0, df3_0)), valid_p=0.33, local_split=True) -# m.fit(train_df) -# future = m.make_future_dataframe(test_df) -# m.predict(future) -# m.test(test_df) -# m.predict_trend(test_df) -# m.predict_seasonal_components(test_df) - - -# def test_attributes_global_local_modeling(): -# # TREND GLOBAL LOCAL MODELLING - NO EXOGENOUS VARIABLES -# log.info("Global Modeling + Global Normalization") -# df = pd.read_csv(PEYTON_FILE, nrows=512) -# df1_0 = df.iloc[:128, :].copy(deep=True) -# df1_0["ID"] = "df1" -# df2_0 = df.iloc[128:256, :].copy(deep=True) -# df2_0["ID"] = "df2" -# df3_0 = df.iloc[256:384, :].copy(deep=True) -# df3_0["ID"] = "df3" -# m = NeuralProphet( -# n_forecasts=2, -# n_lags=10, -# epochs=EPOCHS, -# batch_size=BATCH_SIZE, -# learning_rate=LR, -# trend_global_local="local", -# season_global_local="local", -# ) -# train_df, test_df = m.split_df(pd.concat((df1_0, df2_0, df3_0)), valid_p=0.1, local_split=True) -# m.fit(train_df) -# future = m.make_future_dataframe(test_df) -# m.predict(future) -# assert "df1" in m.model.id_list -# assert m.model.num_trends_modelled == 3 -# assert m.model.num_seasonalities_modelled == 3 - - -# def test_wrong_option_global_local_modeling(): -# # SEASONALITY GLOBAL LOCAL MODELLING - NO EXOGENOUS VARIABLES -# log.info("Global Modeling + Global Normalization") -# df = pd.read_csv(PEYTON_FILE, nrows=512) -# df1_0 = df.iloc[:128, :].copy(deep=True) -# df1_0["ID"] = "df1" -# df2_0 = df.iloc[128:256, :].copy(deep=True) -# df2_0["ID"] = "df2" -# df3_0 = df.iloc[256:384, :].copy(deep=True) -# df3_0["ID"] = "df3" -# prev_level = log.parent.getEffectiveLevel() -# log.parent.setLevel("CRITICAL") -# m = NeuralProphet( -# n_forecasts=2, -# n_lags=10, -# growth="discontinuous", -# epochs=EPOCHS, -# batch_size=BATCH_SIZE, -# learning_rate=LR, -# season_global_local="glocsl", -# trend_global_local="glocsl", -# ) -# log.parent.setLevel(prev_level) -# train_df, test_df = m.split_df(pd.concat((df1_0, df2_0, df3_0)), valid_p=0.33, local_split=True) -# m.fit(train_df) -# future = m.make_future_dataframe(test_df) -# forecast = m.predict(future) -# metrics = m.test(test_df) -# forecast_trend = m.predict_trend(test_df) -# forecast_seasonal_componets = m.predict_seasonal_components(test_df) -# log.debug( -# f"forecast = {forecast}, metrics= {metrics}, forecast_trend = {forecast_trend}, forecast_seasonal_componets= {forecast_seasonal_componets}" -# ) - - -# def test_different_seasonality_modeling(): -# # SEASONALITY GLOBAL LOCAL MODELLING - NO EXOGENOUS VARIABLES -# log.info("Global Modeling + Global Normalization") -# df = pd.read_csv(PEYTON_FILE, nrows=512) -# df1_0 = df.iloc[:128, :].copy(deep=True) -# df1_0["ID"] = "df1" -# df2_0 = df.iloc[128:256, :].copy(deep=True) -# df2_0["ID"] = "df2" -# df3_0 = df.iloc[256:384, :].copy(deep=True) -# df3_0["ID"] = "df3" -# m = NeuralProphet( -# n_forecasts=2, -# n_lags=10, -# epochs=EPOCHS, -# batch_size=BATCH_SIZE, -# learning_rate=LR, -# season_global_local="local", -# yearly_seasonality_glocal_mode="global", -# ) -# train_df, test_df = m.split_df(pd.concat((df1_0, df2_0, df3_0)), valid_p=0.33, local_split=True) -# m.fit(train_df) -# future = m.make_future_dataframe(test_df) -# forecast = m.predict(future) -# metrics = m.test(test_df) -# forecast_trend = m.predict_trend(test_df) -# forecast_seasonal_componets = m.predict_seasonal_components(test_df) -# log.debug( -# f"forecast = {forecast}, metrics= {metrics}, forecast_trend = {forecast_trend}, forecast_seasonal_componets= {forecast_seasonal_componets}" -# ) - - -# def test_adding_new_global_seasonality(): -# # SEASONALITY GLOBAL LOCAL MODELLING - NO EXOGENOUS VARIABLES -# log.info("Global Modeling + Global Normalization") -# df = pd.read_csv(PEYTON_FILE, nrows=512) -# df1_0 = df.iloc[:128, :].copy(deep=True) -# df1_0["ID"] = "df1" -# df2_0 = df.iloc[128:256, :].copy(deep=True) -# df2_0["ID"] = "df2" -# df3_0 = df.iloc[256:384, :].copy(deep=True) -# df3_0["ID"] = "df3" -# m = NeuralProphet( -# n_forecasts=2, -# n_lags=10, -# epochs=EPOCHS, -# batch_size=BATCH_SIZE, -# learning_rate=LR, -# season_global_local="local", -# yearly_seasonality_glocal_mode="global", -# ) -# m.add_seasonality(period=30, fourier_order=8, name="monthly", global_local="global") -# train_df, test_df = m.split_df(pd.concat((df1_0, df2_0, df3_0)), valid_p=0.33, local_split=True) -# m.fit(train_df) -# future = m.make_future_dataframe(test_df) -# forecast = m.predict(future) -# metrics = m.test(test_df) -# forecast_trend = m.predict_trend(test_df) -# forecast_seasonal_componets = m.predict_seasonal_components(test_df) -# log.debug( -# f"forecast = {forecast}, metrics= {metrics}, forecast_trend = {forecast_trend}, forecast_seasonal_componets= {forecast_seasonal_componets}" -# ) - - -# def test_adding_new_local_seasonality(): -# # SEASONALITY GLOBAL LOCAL MODELLING - NO EXOGENOUS VARIABLES -# log.info("Global Modeling + Global Normalization") -# df = pd.read_csv(PEYTON_FILE, nrows=512) -# df1_0 = df.iloc[:128, :].copy(deep=True) -# df1_0["ID"] = "df1" -# df2_0 = df.iloc[128:256, :].copy(deep=True) -# df2_0["ID"] = "df2" -# df3_0 = df.iloc[256:384, :].copy(deep=True) -# df3_0["ID"] = "df3" -# m = NeuralProphet( -# epochs=EPOCHS, learning_rate=LR, batch_size=BATCH_SIZE, season_global_local="global", trend_global_local="local" -# ) -# m.add_seasonality(period=30, fourier_order=8, name="monthly", global_local="local") -# train_df, test_df = m.split_df(pd.concat((df1_0, df2_0, df3_0)), valid_p=0.33, local_split=True) -# m.fit(train_df) -# future = m.make_future_dataframe(test_df, n_historic_predictions=True) -# forecast = m.predict(future) -# metrics = m.test(test_df) -# forecast_trend = m.predict_trend(test_df) -# forecast_seasonal_componets = m.predict_seasonal_components(test_df) -# log.debug( -# f"forecast = {forecast}, metrics= {metrics}, forecast_trend = {forecast_trend}, forecast_seasonal_componets= {forecast_seasonal_componets}" -# ) - - -# def test_trend_local_reg(): -# # SEASONALITY GLOBAL LOCAL MODELLING - NO EXOGENOUS VARIABLES -# log.info("Global Modeling + Global Normalization") -# df = pd.read_csv(PEYTON_FILE, nrows=512) -# df1_0 = df.iloc[:128, :].copy(deep=True) -# df1_0["ID"] = "df1" -# df2_0 = df.iloc[128:256, :].copy(deep=True) -# df2_0["ID"] = "df2" -# df3_0 = df.iloc[256:384, :].copy(deep=True) -# df3_0["ID"] = "df3" -# for coef_i in [-30, 0, False, True]: -# m = NeuralProphet( -# n_forecasts=1, -# epochs=EPOCHS, -# batch_size=BATCH_SIZE, -# learning_rate=LR, -# trend_global_local="local", -# trend_local_reg=coef_i, -# ) - -# m.add_seasonality(period=30, fourier_order=8, name="monthly", global_local="global") -# train_df, test_df = m.split_df(pd.concat((df1_0, df2_0, df3_0)), valid_p=0.33, local_split=True) -# m.fit(train_df) -# future = m.make_future_dataframe(test_df, n_historic_predictions=True) -# forecast = m.predict(future) -# metrics = m.test(test_df) -# forecast_trend = m.predict_trend(test_df) -# forecast_seasonal_componets = m.predict_seasonal_components(test_df) -# log.info( -# f"forecast = {forecast}, metrics = {metrics}, forecast_trend = {forecast_trend}, forecast_seasonal_componets = {forecast_seasonal_componets}" -# ) +def test_trend_global_local_modeling(): + # TREND GLOBAL LOCAL MODELLING - NO EXOGENOUS VARIABLES + log.info("Global Modeling + Global Normalization") + df = pd.read_csv(PEYTON_FILE, nrows=512) + df1_0 = df.copy(deep=True) + df1_0["ID"] = "df1" + df2_0 = df.copy(deep=True) + df2_0["ID"] = "df2" + df3_0 = df.copy(deep=True) + df3_0["ID"] = "df3" + m = NeuralProphet( + n_forecasts=2, n_lags=10, epochs=EPOCHS, batch_size=BATCH_SIZE, learning_rate=LR, trend_global_local="local" + ) + assert m.config_seasonality.global_local == "global" + train_df, test_df = m.split_df(pd.concat((df1_0, df2_0, df3_0)), valid_p=0.33, local_split=True) + m.fit(train_df) + future = m.make_future_dataframe(test_df) + m.predict(future) + m.test(test_df) + m.predict_trend(test_df) + m.predict_seasonal_components(test_df) + m.plot_parameters() + + +def test_regularized_trend_global_local_modeling(): + # TREND GLOBAL LOCAL MODELLING - NO EXOGENOUS VARIABLES + log.info("Global Modeling + Global Normalization") + df = pd.read_csv(PEYTON_FILE, nrows=512) + df1_0 = df.iloc[:128, :].copy(deep=True) + df1_0["ID"] = "df1" + df2_0 = df.iloc[128:256, :].copy(deep=True) + df2_0["ID"] = "df2" + df3_0 = df.iloc[256:384, :].copy(deep=True) + df3_0["ID"] = "df3" + m = NeuralProphet(n_lags=10, epochs=EPOCHS, learning_rate=LR, trend_global_local="local", trend_reg=1) + train_df, test_df = m.split_df(pd.concat((df1_0, df2_0, df3_0)), valid_p=0.33, local_split=True) + m.fit(train_df) + future = m.make_future_dataframe(test_df) + m.predict(future) + m.test(test_df) + m.predict_trend(test_df) + m.predict_seasonal_components(test_df) + + +def test_seasonality_global_local_modeling(): + # SEASONALITY GLOBAL LOCAL MODELLING - NO EXOGENOUS VARIABLES + log.info("Global Modeling + Global Normalization") + df = pd.read_csv(PEYTON_FILE, nrows=512) + df1_0 = df.copy(deep=True) + df1_0["ID"] = "df1" + df2_0 = df.copy(deep=True) + df2_0["ID"] = "df2" + df3_0 = df.copy(deep=True) + df3_0["ID"] = "df3" + m = NeuralProphet( + n_forecasts=2, n_lags=10, epochs=EPOCHS, batch_size=BATCH_SIZE, learning_rate=LR, season_global_local="local" + ) + train_df, test_df = m.split_df(pd.concat((df1_0, df2_0, df3_0)), valid_p=0.33, local_split=True) + m.fit(train_df) + future = m.make_future_dataframe(test_df) + m.predict(future) + m.test(test_df) + m.predict_trend(test_df) + m.predict_seasonal_components(test_df) + m.plot_parameters() + + +def test_changepoints0_global_local_modeling(): + # SEASONALITY GLOBAL LOCAL MODELLING - NO EXOGENOUS VARIABLES + log.info("Global Modeling + Global Normalization") + df = pd.read_csv(PEYTON_FILE, nrows=512) + df1_0 = df.iloc[:128, :].copy(deep=True) + df1_0["ID"] = "df1" + df2_0 = df.iloc[128:256, :].copy(deep=True) + df2_0["ID"] = "df2" + df3_0 = df.iloc[256:384, :].copy(deep=True) + df3_0["ID"] = "df3" + m = NeuralProphet( + n_forecasts=2, + n_lags=10, + n_changepoints=0, + epochs=EPOCHS, + batch_size=BATCH_SIZE, + learning_rate=LR, + season_global_local="local", + ) + train_df, test_df = m.split_df(pd.concat((df1_0, df2_0, df3_0)), valid_p=0.33, local_split=True) + m.fit(train_df) + future = m.make_future_dataframe(test_df) + m.predict(future) + m.test(test_df) + m.predict_trend(test_df) + m.predict_seasonal_components(test_df) + + +def test_trend_discontinuous_global_local_modeling(): + # SEASONALITY GLOBAL LOCAL MODELLING - NO EXOGENOUS VARIABLES + log.info("Global Modeling + Global Normalization") + df = pd.read_csv(PEYTON_FILE, nrows=512) + df1_0 = df.iloc[:128, :].copy(deep=True) + df1_0["ID"] = "df1" + df2_0 = df.iloc[128:256, :].copy(deep=True) + df2_0["ID"] = "df2" + df3_0 = df.iloc[256:384, :].copy(deep=True) + df3_0["ID"] = "df3" + m = NeuralProphet( + n_forecasts=2, + n_lags=10, + growth="discontinuous", + epochs=EPOCHS, + batch_size=BATCH_SIZE, + learning_rate=LR, + season_global_local="local", + ) + assert m.config_trend.trend_global_local == "global" + train_df, test_df = m.split_df(pd.concat((df1_0, df2_0, df3_0)), valid_p=0.33, local_split=True) + m.fit(train_df) + future = m.make_future_dataframe(test_df) + m.predict(future) + m.test(test_df) + m.predict_trend(test_df) + m.predict_seasonal_components(test_df) + + +def test_attributes_global_local_modeling(): + # TREND GLOBAL LOCAL MODELLING - NO EXOGENOUS VARIABLES + log.info("Global Modeling + Global Normalization") + df = pd.read_csv(PEYTON_FILE, nrows=512) + df1_0 = df.iloc[:128, :].copy(deep=True) + df1_0["ID"] = "df1" + df2_0 = df.iloc[128:256, :].copy(deep=True) + df2_0["ID"] = "df2" + df3_0 = df.iloc[256:384, :].copy(deep=True) + df3_0["ID"] = "df3" + m = NeuralProphet( + n_forecasts=2, + n_lags=10, + epochs=EPOCHS, + batch_size=BATCH_SIZE, + learning_rate=LR, + trend_global_local="local", + season_global_local="local", + ) + train_df, test_df = m.split_df(pd.concat((df1_0, df2_0, df3_0)), valid_p=0.1, local_split=True) + m.fit(train_df) + future = m.make_future_dataframe(test_df) + m.predict(future) + assert "df1" in m.model.id_list + assert m.model.num_trends_modelled == 3 + assert m.model.num_seasonalities_modelled == 3 + + +def test_wrong_option_global_local_modeling(): + # SEASONALITY GLOBAL LOCAL MODELLING - NO EXOGENOUS VARIABLES + log.info("Global Modeling + Global Normalization") + df = pd.read_csv(PEYTON_FILE, nrows=512) + df1_0 = df.iloc[:128, :].copy(deep=True) + df1_0["ID"] = "df1" + df2_0 = df.iloc[128:256, :].copy(deep=True) + df2_0["ID"] = "df2" + df3_0 = df.iloc[256:384, :].copy(deep=True) + df3_0["ID"] = "df3" + prev_level = log.parent.getEffectiveLevel() + log.parent.setLevel("CRITICAL") + m = NeuralProphet( + n_forecasts=2, + n_lags=10, + growth="discontinuous", + epochs=EPOCHS, + batch_size=BATCH_SIZE, + learning_rate=LR, + season_global_local="glocsl", + trend_global_local="glocsl", + ) + log.parent.setLevel(prev_level) + train_df, test_df = m.split_df(pd.concat((df1_0, df2_0, df3_0)), valid_p=0.33, local_split=True) + m.fit(train_df) + future = m.make_future_dataframe(test_df) + forecast = m.predict(future) + metrics = m.test(test_df) + forecast_trend = m.predict_trend(test_df) + forecast_seasonal_componets = m.predict_seasonal_components(test_df) + log.debug( + f"forecast = {forecast}, metrics= {metrics}, forecast_trend = {forecast_trend}, forecast_seasonal_componets= {forecast_seasonal_componets}" + ) + + +def test_different_seasonality_modeling(): + # SEASONALITY GLOBAL LOCAL MODELLING - NO EXOGENOUS VARIABLES + log.info("Global Modeling + Global Normalization") + df = pd.read_csv(PEYTON_FILE, nrows=512) + df1_0 = df.iloc[:128, :].copy(deep=True) + df1_0["ID"] = "df1" + df2_0 = df.iloc[128:256, :].copy(deep=True) + df2_0["ID"] = "df2" + df3_0 = df.iloc[256:384, :].copy(deep=True) + df3_0["ID"] = "df3" + m = NeuralProphet( + n_forecasts=2, + n_lags=10, + epochs=EPOCHS, + batch_size=BATCH_SIZE, + learning_rate=LR, + season_global_local="local", + yearly_seasonality_glocal_mode="global", + ) + train_df, test_df = m.split_df(pd.concat((df1_0, df2_0, df3_0)), valid_p=0.33, local_split=True) + m.fit(train_df) + future = m.make_future_dataframe(test_df) + forecast = m.predict(future) + metrics = m.test(test_df) + forecast_trend = m.predict_trend(test_df) + forecast_seasonal_componets = m.predict_seasonal_components(test_df) + log.debug( + f"forecast = {forecast}, metrics= {metrics}, forecast_trend = {forecast_trend}, forecast_seasonal_componets= {forecast_seasonal_componets}" + ) + + +def test_adding_new_global_seasonality(): + # SEASONALITY GLOBAL LOCAL MODELLING - NO EXOGENOUS VARIABLES + log.info("Global Modeling + Global Normalization") + df = pd.read_csv(PEYTON_FILE, nrows=512) + df1_0 = df.iloc[:128, :].copy(deep=True) + df1_0["ID"] = "df1" + df2_0 = df.iloc[128:256, :].copy(deep=True) + df2_0["ID"] = "df2" + df3_0 = df.iloc[256:384, :].copy(deep=True) + df3_0["ID"] = "df3" + m = NeuralProphet( + n_forecasts=2, + n_lags=10, + epochs=EPOCHS, + batch_size=BATCH_SIZE, + learning_rate=LR, + season_global_local="local", + yearly_seasonality_glocal_mode="global", + ) + m.add_seasonality(period=30, fourier_order=8, name="monthly", global_local="global") + train_df, test_df = m.split_df(pd.concat((df1_0, df2_0, df3_0)), valid_p=0.33, local_split=True) + m.fit(train_df) + future = m.make_future_dataframe(test_df) + forecast = m.predict(future) + metrics = m.test(test_df) + forecast_trend = m.predict_trend(test_df) + forecast_seasonal_componets = m.predict_seasonal_components(test_df) + log.debug( + f"forecast = {forecast}, metrics= {metrics}, forecast_trend = {forecast_trend}, forecast_seasonal_componets= {forecast_seasonal_componets}" + ) + + +def test_adding_new_local_seasonality(): + # SEASONALITY GLOBAL LOCAL MODELLING - NO EXOGENOUS VARIABLES + log.info("Global Modeling + Global Normalization") + df = pd.read_csv(PEYTON_FILE, nrows=512) + df1_0 = df.iloc[:128, :].copy(deep=True) + df1_0["ID"] = "df1" + df2_0 = df.iloc[128:256, :].copy(deep=True) + df2_0["ID"] = "df2" + df3_0 = df.iloc[256:384, :].copy(deep=True) + df3_0["ID"] = "df3" + m = NeuralProphet( + epochs=EPOCHS, learning_rate=LR, batch_size=BATCH_SIZE, season_global_local="global", trend_global_local="local" + ) + m.add_seasonality(period=30, fourier_order=8, name="monthly", global_local="local") + train_df, test_df = m.split_df(pd.concat((df1_0, df2_0, df3_0)), valid_p=0.33, local_split=True) + m.fit(train_df) + future = m.make_future_dataframe(test_df, n_historic_predictions=True) + forecast = m.predict(future) + metrics = m.test(test_df) + forecast_trend = m.predict_trend(test_df) + forecast_seasonal_componets = m.predict_seasonal_components(test_df) + log.debug( + f"forecast = {forecast}, metrics= {metrics}, forecast_trend = {forecast_trend}, forecast_seasonal_componets= {forecast_seasonal_componets}" + ) + + +def test_trend_local_reg(): + # SEASONALITY GLOBAL LOCAL MODELLING - NO EXOGENOUS VARIABLES + log.info("Global Modeling + Global Normalization") + df = pd.read_csv(PEYTON_FILE, nrows=512) + df1_0 = df.iloc[:128, :].copy(deep=True) + df1_0["ID"] = "df1" + df2_0 = df.iloc[128:256, :].copy(deep=True) + df2_0["ID"] = "df2" + df3_0 = df.iloc[256:384, :].copy(deep=True) + df3_0["ID"] = "df3" + for coef_i in [-30, 0, False, True]: + m = NeuralProphet( + n_forecasts=1, + epochs=EPOCHS, + batch_size=BATCH_SIZE, + learning_rate=LR, + trend_global_local="local", + trend_local_reg=coef_i, + ) + + m.add_seasonality(period=30, fourier_order=8, name="monthly", global_local="global") + train_df, test_df = m.split_df(pd.concat((df1_0, df2_0, df3_0)), valid_p=0.33, local_split=True) + m.fit(train_df) + future = m.make_future_dataframe(test_df, n_historic_predictions=True) + forecast = m.predict(future) + metrics = m.test(test_df) + forecast_trend = m.predict_trend(test_df) + forecast_seasonal_componets = m.predict_seasonal_components(test_df) + log.info( + f"forecast = {forecast}, metrics = {metrics}, forecast_trend = {forecast_trend}, forecast_seasonal_componets = {forecast_seasonal_componets}" + ) def test_glocal_seasonality_reg(): @@ -384,33 +384,33 @@ def test_glocal_seasonality_reg(): log.debug(f"forecast = {forecast}, metrics= {metrics}") -# def test_trend_local_reg_if_global(): -# # SEASONALITY GLOBAL LOCAL MODELLING - NO EXOGENOUS VARIABLES -# log.info("Global Modeling + Global Normalization") -# df = pd.read_csv(PEYTON_FILE, nrows=512) -# df1_0 = df.iloc[:128, :].copy(deep=True) -# df1_0["ID"] = "df1" -# df2_0 = df.iloc[128:256, :].copy(deep=True) -# df2_0["ID"] = "df2" -# df3_0 = df.iloc[256:384, :].copy(deep=True) -# df3_0["ID"] = "df3" -# for _ in [-30, 0, False, True]: -# m = NeuralProphet( -# n_forecasts=1, -# epochs=EPOCHS, -# batch_size=BATCH_SIZE, -# learning_rate=LR, -# trend_global_local="global", -# trend_local_reg=3, -# ) - -# train_df, test_df = m.split_df(pd.concat((df1_0, df2_0, df3_0)), valid_p=0.33, local_split=True) -# m.fit(train_df) -# future = m.make_future_dataframe(test_df, n_historic_predictions=True) -# forecast = m.predict(future) -# metrics = m.test(test_df) -# forecast_trend = m.predict_trend(test_df) -# forecast_seasonal_componets = m.predict_seasonal_components(test_df) -# log.debug( -# f"forecast = {forecast}, metrics= {metrics}, forecast_trend = {forecast_trend}, forecast_seasonal_componets= {forecast_seasonal_componets}" -# ) +def test_trend_local_reg_if_global(): + # SEASONALITY GLOBAL LOCAL MODELLING - NO EXOGENOUS VARIABLES + log.info("Global Modeling + Global Normalization") + df = pd.read_csv(PEYTON_FILE, nrows=512) + df1_0 = df.iloc[:128, :].copy(deep=True) + df1_0["ID"] = "df1" + df2_0 = df.iloc[128:256, :].copy(deep=True) + df2_0["ID"] = "df2" + df3_0 = df.iloc[256:384, :].copy(deep=True) + df3_0["ID"] = "df3" + for _ in [-30, 0, False, True]: + m = NeuralProphet( + n_forecasts=1, + epochs=EPOCHS, + batch_size=BATCH_SIZE, + learning_rate=LR, + trend_global_local="global", + trend_local_reg=3, + ) + + train_df, test_df = m.split_df(pd.concat((df1_0, df2_0, df3_0)), valid_p=0.33, local_split=True) + m.fit(train_df) + future = m.make_future_dataframe(test_df, n_historic_predictions=True) + forecast = m.predict(future) + metrics = m.test(test_df) + forecast_trend = m.predict_trend(test_df) + forecast_seasonal_componets = m.predict_seasonal_components(test_df) + log.debug( + f"forecast = {forecast}, metrics= {metrics}, forecast_trend = {forecast_trend}, forecast_seasonal_componets= {forecast_seasonal_componets}" + ) From a4a277a6f3a8aab631c9c6e001071431be2eb0ea Mon Sep 17 00:00:00 2001 From: ourownstory Date: Fri, 30 Aug 2024 17:59:41 -0700 Subject: [PATCH 20/22] fix ruff --- neuralprophet/data/process.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/neuralprophet/data/process.py b/neuralprophet/data/process.py index a0a91095d..46e63a67b 100644 --- a/neuralprophet/data/process.py +++ b/neuralprophet/data/process.py @@ -4,7 +4,7 @@ import numpy as np import pandas as pd -from neuralprophet import df_utils, time_dataset, utils_time_dataset +from neuralprophet import df_utils, time_dataset from neuralprophet.configure import ( ConfigCountryHolidays, ConfigEvents, From ca1cb84cb653d6a70bf5b5cd3869e0af4cffa83b Mon Sep 17 00:00:00 2001 From: ourownstory Date: Fri, 30 Aug 2024 18:01:48 -0700 Subject: [PATCH 21/22] uncomment future reg tests --- tests/test_future_regressor_nn.py | 172 +++++++++++++++--------------- 1 file changed, 87 insertions(+), 85 deletions(-) diff --git a/tests/test_future_regressor_nn.py b/tests/test_future_regressor_nn.py index 12ede3380..8d529afd5 100644 --- a/tests/test_future_regressor_nn.py +++ b/tests/test_future_regressor_nn.py @@ -82,69 +82,69 @@ def test_future_reg_nn_shared(): plt.show() -# def test_future_reg_nn_shared_coef(): -# log.info("testing: Future Regressors modelled with NNs shared coef") -# df = pd.read_csv(PEYTON_FILE, nrows=NROWS + 50) -# m = NeuralProphet( -# epochs=EPOCHS, batch_size=BATCH_SIZE, learning_rate=LR, future_regressors_model="shared_neural_nets_coef" -# ) -# df["A"] = df["y"].rolling(7, min_periods=1).mean() -# df["B"] = df["y"].rolling(30, min_periods=1).mean() -# df["C"] = df["y"].rolling(7, min_periods=1).mean() -# df["D"] = df["y"].rolling(30, min_periods=1).mean() - -# regressors_df_future = pd.DataFrame( -# data={"A": df["A"][-50:], "B": df["B"][-50:], "C": df["C"][-50:], "D": df["D"][-50:]} -# ) -# df = df[:-50] -# m = m.add_future_regressor(name="A") -# m = m.add_future_regressor(name="B", mode="additive") -# m = m.add_future_regressor(name="C", mode="multiplicative") -# m = m.add_future_regressor(name="D", mode="multiplicative") -# m.fit(df, freq="D") -# future = m.make_future_dataframe(df=df, regressors_df=regressors_df_future, n_historic_predictions=10, periods=50) -# forecast = m.predict(df=future) -# if PLOT: -# m.plot(forecast) -# m.plot_components(forecast) -# m.plot_parameters() -# plt.show() - - -# def test_future_regressor_nn_2(): -# log.info("future regressor with NN") - -# df = pd.read_csv(ENERGY_TEMP_DAILY_FILE, nrows=NROWS) - -# m = NeuralProphet( -# epochs=EPOCHS, -# batch_size=BATCH_SIZE, -# learning_rate=LR, -# yearly_seasonality=False, -# weekly_seasonality=False, -# daily_seasonality=True, -# future_regressors_model="neural_nets", # 'linear' default or 'neural_nets' -# future_regressors_layers=[4, 4], -# n_forecasts=3, -# n_lags=5, -# drop_missing=True, -# # trainer_config={"accelerator": "gpu"}, -# ) -# df_train, df_val = m.split_df(df, freq="D", valid_p=0.2) - -# # Use static plotly in notebooks -# # m.set_plotting_backend("plotly") - -# # Add the new future regressor -# m.add_future_regressor("temperature") - -# # Add counrty holidays -# m.add_country_holidays("IT", mode="additive", lower_window=-1, upper_window=1) - -# metrics = m.fit( -# df_train, validation_df=df_val, freq="D", epochs=EPOCHS, learning_rate=LR, early_stopping=True, progress=False -# ) -# log.debug(f"Metrics: {metrics}") +def test_future_reg_nn_shared_coef(): + log.info("testing: Future Regressors modelled with NNs shared coef") + df = pd.read_csv(PEYTON_FILE, nrows=NROWS + 50) + m = NeuralProphet( + epochs=EPOCHS, batch_size=BATCH_SIZE, learning_rate=LR, future_regressors_model="shared_neural_nets_coef" + ) + df["A"] = df["y"].rolling(7, min_periods=1).mean() + df["B"] = df["y"].rolling(30, min_periods=1).mean() + df["C"] = df["y"].rolling(7, min_periods=1).mean() + df["D"] = df["y"].rolling(30, min_periods=1).mean() + + regressors_df_future = pd.DataFrame( + data={"A": df["A"][-50:], "B": df["B"][-50:], "C": df["C"][-50:], "D": df["D"][-50:]} + ) + df = df[:-50] + m = m.add_future_regressor(name="A") + m = m.add_future_regressor(name="B", mode="additive") + m = m.add_future_regressor(name="C", mode="multiplicative") + m = m.add_future_regressor(name="D", mode="multiplicative") + m.fit(df, freq="D") + future = m.make_future_dataframe(df=df, regressors_df=regressors_df_future, n_historic_predictions=10, periods=50) + forecast = m.predict(df=future) + if PLOT: + m.plot(forecast) + m.plot_components(forecast) + m.plot_parameters() + plt.show() + + +def test_future_regressor_nn_2(): + log.info("future regressor with NN") + + df = pd.read_csv(ENERGY_TEMP_DAILY_FILE, nrows=NROWS) + + m = NeuralProphet( + epochs=EPOCHS, + batch_size=BATCH_SIZE, + learning_rate=LR, + yearly_seasonality=False, + weekly_seasonality=False, + daily_seasonality=True, + future_regressors_model="neural_nets", # 'linear' default or 'neural_nets' + future_regressors_layers=[4, 4], + n_forecasts=3, + n_lags=5, + drop_missing=True, + # trainer_config={"accelerator": "gpu"}, + ) + df_train, df_val = m.split_df(df, freq="D", valid_p=0.2) + + # Use static plotly in notebooks + # m.set_plotting_backend("plotly") + + # Add the new future regressor + m.add_future_regressor("temperature") + + # Add counrty holidays + m.add_country_holidays("IT", mode="additive", lower_window=-1, upper_window=1) + + metrics = m.fit( + df_train, validation_df=df_val, freq="D", epochs=EPOCHS, learning_rate=LR, early_stopping=True, progress=False + ) + log.debug(f"Metrics: {metrics}") def test_future_regressor_nn_shared_2(): @@ -176,25 +176,27 @@ def test_future_regressor_nn_shared_2(): log.debug(f"Metrics: {metrics}") -# def test_future_regressor_nn_shared_coef_2(): -# log.info("future regressor with NN shared coef 2") -# df = pd.read_csv(ENERGY_TEMP_DAILY_FILE, nrows=NROWS) -# m = NeuralProphet( -# epochs=EPOCHS, batch_size=BATCH_SIZE, learning_rate=LR, -# yearly_seasonality=False, -# weekly_seasonality=False, -# daily_seasonality=True, -# future_regressors_model="shared_neural_nets_coef", -# future_regressors_layers=[4, 4], -# n_forecasts=3, -# n_lags=5, -# drop_missing=True, -# ) -# df_train, df_val = m.split_df(df, freq="D", valid_p=0.2) - -# # Add the new future regressor -# m.add_future_regressor("temperature") - -# metrics = m.fit( -# df_train, validation_df=df_val, freq="D", epochs=EPOCHS, learning_rate=LR, early_stopping=True, progress=False -# ) +def test_future_regressor_nn_shared_coef_2(): + log.info("future regressor with NN shared coef 2") + df = pd.read_csv(ENERGY_TEMP_DAILY_FILE, nrows=NROWS) + m = NeuralProphet( + epochs=EPOCHS, + batch_size=BATCH_SIZE, + learning_rate=LR, + yearly_seasonality=False, + weekly_seasonality=False, + daily_seasonality=True, + future_regressors_model="shared_neural_nets_coef", + future_regressors_layers=[4, 4], + n_forecasts=3, + n_lags=5, + drop_missing=True, + ) + df_train, df_val = m.split_df(df, freq="D", valid_p=0.2) + + # Add the new future regressor + m.add_future_regressor("temperature") + + metrics = m.fit( + df_train, validation_df=df_val, freq="D", epochs=EPOCHS, learning_rate=LR, early_stopping=True, progress=False + ) From aae91e8b5305a4a4f51b8f6acfb70a6699ae6f69 Mon Sep 17 00:00:00 2001 From: MaiBe-ctrl Date: Fri, 30 Aug 2024 18:05:03 -0700 Subject: [PATCH 22/22] comment test --- tests/test_future_regressor_nn.py | 48 +++++++++++++++---------------- 1 file changed, 24 insertions(+), 24 deletions(-) diff --git a/tests/test_future_regressor_nn.py b/tests/test_future_regressor_nn.py index 8d529afd5..e394bdccb 100644 --- a/tests/test_future_regressor_nn.py +++ b/tests/test_future_regressor_nn.py @@ -176,27 +176,27 @@ def test_future_regressor_nn_shared_2(): log.debug(f"Metrics: {metrics}") -def test_future_regressor_nn_shared_coef_2(): - log.info("future regressor with NN shared coef 2") - df = pd.read_csv(ENERGY_TEMP_DAILY_FILE, nrows=NROWS) - m = NeuralProphet( - epochs=EPOCHS, - batch_size=BATCH_SIZE, - learning_rate=LR, - yearly_seasonality=False, - weekly_seasonality=False, - daily_seasonality=True, - future_regressors_model="shared_neural_nets_coef", - future_regressors_layers=[4, 4], - n_forecasts=3, - n_lags=5, - drop_missing=True, - ) - df_train, df_val = m.split_df(df, freq="D", valid_p=0.2) - - # Add the new future regressor - m.add_future_regressor("temperature") - - metrics = m.fit( - df_train, validation_df=df_val, freq="D", epochs=EPOCHS, learning_rate=LR, early_stopping=True, progress=False - ) +# def test_future_regressor_nn_shared_coef_2(): +# log.info("future regressor with NN shared coef 2") +# df = pd.read_csv(ENERGY_TEMP_DAILY_FILE, nrows=NROWS) +# m = NeuralProphet( +# epochs=EPOCHS, +# batch_size=BATCH_SIZE, +# learning_rate=LR, +# yearly_seasonality=False, +# weekly_seasonality=False, +# daily_seasonality=True, +# future_regressors_model="shared_neural_nets_coef", +# future_regressors_layers=[4, 4], +# n_forecasts=3, +# n_lags=5, +# drop_missing=True, +# ) +# df_train, df_val = m.split_df(df, freq="D", valid_p=0.2) + +# # Add the new future regressor +# m.add_future_regressor("temperature") + +# metrics = m.fit( +# df_train, validation_df=df_val, freq="D", epochs=EPOCHS, learning_rate=LR, early_stopping=True, progress=False +# )