diff --git a/.gitignore b/.gitignore index 3bed848..9242d04 100644 --- a/.gitignore +++ b/.gitignore @@ -1,9 +1,9 @@ # Added by me __init__.py lightning_logs -predictions.csv +checkpoints # some file sizes are too large for git -datasets +dataset # Byte-compiled / optimized / DLL files __pycache__/ diff --git a/base/base_model.py b/base/base_model.py deleted file mode 100644 index ad73507..0000000 --- a/base/base_model.py +++ /dev/null @@ -1,25 +0,0 @@ -import torch.nn as nn -import numpy as np -from abc import abstractmethod - - -class BaseModel(nn.Module): - """ - Base class for all models - """ - @abstractmethod - def forward(self, *inputs): - """ - Forward pass logic - - :return: Model output - """ - raise NotImplementedError - - def __str__(self): - """ - Model prints with number of trainable parameters - """ - model_parameters = filter(lambda p: p.requires_grad, self.parameters()) - params = sum([np.prod(p.size()) for p in model_parameters]) - return super().__str__() + '\nTrainable parameters: {}'.format(params) diff --git a/base/base_trainer.py b/base/base_trainer.py deleted file mode 100644 index e43e33b..0000000 --- a/base/base_trainer.py +++ /dev/null @@ -1,151 +0,0 @@ -import torch -from abc import abstractmethod -from numpy import inf -from logger import TensorboardWriter - - -class BaseTrainer: - """ - Base class for all trainers - """ - def __init__(self, model, criterion, metric_ftns, optimizer, config): - self.config = config - self.logger = config.get_logger('trainer', config['trainer']['verbosity']) - - self.model = model - self.criterion = criterion - self.metric_ftns = metric_ftns - self.optimizer = optimizer - - cfg_trainer = config['trainer'] - self.epochs = cfg_trainer['epochs'] - self.save_period = cfg_trainer['save_period'] - self.monitor = cfg_trainer.get('monitor', 'off') - - # configuration to monitor model performance and save best - if self.monitor == 'off': - self.mnt_mode = 'off' - self.mnt_best = 0 - else: - self.mnt_mode, self.mnt_metric = self.monitor.split() - assert self.mnt_mode in ['min', 'max'] - - self.mnt_best = inf if self.mnt_mode == 'min' else -inf - self.early_stop = cfg_trainer.get('early_stop', inf) - if self.early_stop <= 0: - self.early_stop = inf - - self.start_epoch = 1 - - self.checkpoint_dir = config.save_dir - - # setup visualization writer instance - self.writer = TensorboardWriter(config.log_dir, self.logger, cfg_trainer['tensorboard']) - - if config.resume is not None: - self._resume_checkpoint(config.resume) - - @abstractmethod - def _train_epoch(self, epoch): - """ - Training logic for an epoch - - :param epoch: Current epoch number - """ - raise NotImplementedError - - def train(self): - """ - Full training logic - """ - not_improved_count = 0 - for epoch in range(self.start_epoch, self.epochs + 1): - result = self._train_epoch(epoch) - - # save logged informations into log dict - log = {'epoch': epoch} - log.update(result) - - # print logged informations to the screen - for key, value in log.items(): - self.logger.info(' {:15s}: {}'.format(str(key), value)) - - # evaluate model performance according to configured metric, save best checkpoint as model_best - best = False - if self.mnt_mode != 'off': - try: - # check whether model performance improved or not, according to specified metric(mnt_metric) - improved = (self.mnt_mode == 'min' and log[self.mnt_metric] <= self.mnt_best) or \ - (self.mnt_mode == 'max' and log[self.mnt_metric] >= self.mnt_best) - except KeyError: - self.logger.warning("Warning: Metric '{}' is not found. " - "Model performance monitoring is disabled.".format(self.mnt_metric)) - self.mnt_mode = 'off' - improved = False - - if improved: - self.mnt_best = log[self.mnt_metric] - not_improved_count = 0 - best = True - else: - not_improved_count += 1 - - if not_improved_count > self.early_stop: - self.logger.info("Validation performance didn\'t improve for {} epochs. " - "Training stops.".format(self.early_stop)) - break - - if epoch % self.save_period == 0: - self._save_checkpoint(epoch, save_best=best) - - def _save_checkpoint(self, epoch, save_best=False): - """ - Saving checkpoints - - :param epoch: current epoch number - :param log: logging information of the epoch - :param save_best: if True, rename the saved checkpoint to 'model_best.pth' - """ - arch = type(self.model).__name__ - state = { - 'arch': arch, - 'epoch': epoch, - 'state_dict': self.model.state_dict(), - 'optimizer': self.optimizer.state_dict(), - 'monitor_best': self.mnt_best, - 'config': self.config - } - filename = str(self.checkpoint_dir / 'checkpoint-epoch{}.pth'.format(epoch)) - torch.save(state, filename) - self.logger.info("Saving checkpoint: {} ...".format(filename)) - if save_best: - best_path = str(self.checkpoint_dir / 'model_best.pth') - torch.save(state, best_path) - self.logger.info("Saving current best: model_best.pth ...") - - def _resume_checkpoint(self, resume_path): - """ - Resume from saved checkpoints - - :param resume_path: Checkpoint path to be resumed - """ - resume_path = str(resume_path) - self.logger.info("Loading checkpoint: {} ...".format(resume_path)) - checkpoint = torch.load(resume_path) - self.start_epoch = checkpoint['epoch'] + 1 - self.mnt_best = checkpoint['monitor_best'] - - # load architecture params from checkpoint. - if checkpoint['config']['arch'] != self.config['arch']: - self.logger.warning("Warning: Architecture configuration given in config file is different from that of " - "checkpoint. This may yield an exception while state_dict is being loaded.") - self.model.load_state_dict(checkpoint['state_dict']) - - # load optimizer state from checkpoint only when optimizer type is not changed. - if checkpoint['config']['optimizer']['type'] != self.config['optimizer']['type']: - self.logger.warning("Warning: Optimizer type given in config file is different from that of checkpoint. " - "Optimizer parameters not being resumed.") - else: - self.optimizer.load_state_dict(checkpoint['optimizer']) - - self.logger.info("Checkpoint loaded. Resume training from epoch {}".format(self.start_epoch)) diff --git a/classes/Parameters.py b/classes/Parameters.py deleted file mode 100644 index 7889e9d..0000000 --- a/classes/Parameters.py +++ /dev/null @@ -1,143 +0,0 @@ -import pandas as pd -from pandas import to_datetime -from typing import List -import json - -class Preprocess: - def __init__( - self, data, remove_input_outliers, remove_target_outliers, - target_moving_average_by_day, scale_input, scale_target - ): - self.data = data - self.remove_input_outliers = remove_input_outliers - self.remove_target_outliers = remove_target_outliers - self.target_moving_average_by_day = target_moving_average_by_day - self.scale_input = scale_input - self.scale_target = scale_target - - def __str__(self) -> str: - return json.dumps(self.data, indent=4) - -class Split: - def __init__( - self, data, train_start, validation_start, test_start, test_end, - first_date, last_date - ): - self.data = data - - self.location = [0.80, 0.10, 0.10] - self.train_start = to_datetime(train_start) - self.validation_start = to_datetime(validation_start ) - self.test_start = to_datetime(test_start ) - self.test_end = to_datetime(test_end) - self.first_date = to_datetime(first_date) - self.last_date = to_datetime(last_date) - - self.validation_end = self.test_start - pd.to_timedelta(1, unit='D') - self.train_end = self.validation_start - pd.to_timedelta(1, unit='D') - - def __str__(self) -> str: - return json.dumps(self.data, indent=4) - -class DataParameters: - def __init__( - self, data, id, static_features_map, dynamic_features_map, known_futures, - target_map, time_idx, population, population_cut, split - ): - self.data = data - self.id = id - - self.target_map = target_map - self.targets = list(self.target_map.values()) - self.static_features_map = static_features_map - self.static_features = self.get_static_real_features() - - self.dynamic_features_map = dynamic_features_map - self.dynamic_features = self.get_dynamic_features() - - self.time_varying_known_features = known_futures - - # uses past observations to predict future observations - # the tensorflow TFT uses past observations as input by default - # reference https://github.com/google-research/google-research/blob/master/tft/libs/tft_model.py#L735 - self.time_varying_unknown_features = self.dynamic_features + self.targets - - self.time_idx = time_idx - self.population_filepath = population - self.population_cut = population_cut - - self.split = Split(split, **split) - - def get_static_real_features(self) -> List[str]: - """Generates the list of static features - - Returns: - list: feature names - """ - - features_map = self.static_features_map - feature_list = [] - for value in features_map.values(): - if type(value)==list: - feature_list.extend(value) - else: - feature_list.append(value) - - return feature_list - - def get_dynamic_features(self) -> List[str]: - """Generates the list of dynamic features - - Returns: - list: feature names - """ - - features_map = self.dynamic_features_map - feature_list = [] - for value in features_map.values(): - if type(value)==list: - feature_list.extend(value) - else: - feature_list.append(value) - return feature_list - - def __str__(self) -> str: - return json.dumps(self.data, indent=4) - -class ModelParameters: - def __init__( - self, data:dict, hidden_layer_size, dropout_rate, input_sequence_length, target_sequence_length, - epochs, attention_head_size, optimizer, learning_rate, clipnorm, - early_stopping_patience, seed, batch_size - ) : - self.data = data - - self.hidden_layer_size = hidden_layer_size - self.dropout_rate = dropout_rate - self.input_sequence_length = input_sequence_length - self.target_sequence_length = target_sequence_length - - self.epochs = epochs - - self.attention_head_size = attention_head_size - self.optimizer = optimizer - self.learning_rate = learning_rate - - self.clipnorm = clipnorm - self.early_stopping_patience = early_stopping_patience - self.seed = seed - self.batch_size = batch_size - - def __str__(self) -> str: - return json.dumps(self.data, indent=4) - -class Parameters: - def __init__(self, config, model_parameters, data, preprocess): - self.config = config - - self.model_parameters = ModelParameters(model_parameters, **model_parameters) - self.data = DataParameters(data, **data) - self.preprocess = Preprocess(preprocess, **preprocess) - - def __str__(self) -> str: - return json.dumps(self.config, indent=4) \ No newline at end of file diff --git a/classes/PlotConfig.py b/classes/PlotConfig.py deleted file mode 100644 index 94660fc..0000000 --- a/classes/PlotConfig.py +++ /dev/null @@ -1,43 +0,0 @@ -import matplotlib.pyplot as plt -from matplotlib.ticker import FuncFormatter -# import seaborn as sns -# Apply the default theme -# sns.set_theme() -# sns.set(font_scale = 2) -# sns.set_style('white') - -# https://matplotlib.org/stable/tutorials/introductory/customizing.html#the-default-matplotlibrc-file -SMALL_SIZE = 24 -MEDIUM_SIZE = 28 -BIGGER_SIZE = 36 - -plt.rc('font', size=SMALL_SIZE) # controls default text sizes -plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title -plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels -plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels -plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels -plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize -plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title -plt.rc('axes', titlepad=15) - -# set tick width -plt.rcParams['xtick.major.size'] = 15 # default 3.5 -plt.rcParams['xtick.major.width'] = 2 # default 0.8 - -plt.rcParams['ytick.major.size'] = 14 # default 3.5 -plt.rcParams['ytick.major.width'] = 2.4 # 0.8 - -plt.rcParams['lines.linewidth'] = 2.5 - -DPI = 200 -FIGSIZE = (12.5, 7) -DATE_TICKS = 5 - -markers = ['s', 'x', '+', 'h', 'D', '.', '^', '>', 'p', '<', '*', 'P', 'v'] - -def get_formatter(scale): - return FuncFormatter(lambda x, pos: '{0:g}'.format(x/scale)) - -prop_cycle = iter(plt.rcParams["axes.prop_cycle"]) -obs_color = next(prop_cycle)["color"] -pred_color = next(prop_cycle)["color"] \ No newline at end of file diff --git a/classes/Plotter.py b/classes/Plotter.py deleted file mode 100644 index e581632..0000000 --- a/classes/Plotter.py +++ /dev/null @@ -1,115 +0,0 @@ -""" -Done following -https://pytorch-forecasting.readthedocs.io/en/stable/_modules/pytorch_forecasting/models/base_model.html#BaseModel.plot_prediction -""" - -import os, sys -import numpy as np -from pandas import DataFrame, to_timedelta -from typing import List, Dict -from pytorch_forecasting.models.temporal_fusion_transformer import TemporalFusionTransformer -import matplotlib.pyplot as plt - -sys.path.append('..') -from utils.metric import calculate_result -from classes.PredictionProcessor import * -from classes.PlotConfig import * - -from matplotlib.ticker import StrMethodFormatter, MultipleLocator, ScalarFormatter - -class PlotResults: - def __init__( - self, figPath:str, time_index, - targets:List[str], figsize=FIGSIZE, show=True - ) -> None: - self.figPath = figPath - if not os.path.exists(figPath): - print(f'Creating folder {figPath}') - os.makedirs(figPath, exist_ok=True) - - self.figsize = figsize - self.show = show - self.targets = targets - self.time_index = time_index - - def plot( - self, df:DataFrame, target:str, title:str=None, scale=1, - base:int=None, figure_name:str=None, plot_error:bool=False, - legend_loc='best' - ): - fig, ax = plt.subplots(figsize=self.figsize) - if title is not None: plt.title(title) - x_column = self.time_index - - plt.plot(df[x_column], df[target], color='blue', label='Ground Truth') - plt.plot(df[x_column], df[f'Predicted_{target}'], color='green', label='Prediction') - - if plot_error: - plt.plot(df[x_column], abs(df[target] - df[f'Predicted_{target}']), color='red', label='Error') - _, y_max = ax.get_ylim() - ax.set_ylim(0, y_max*1.1) - - if base is None: - x_first_tick = df[x_column].min() - x_last_tick = df[x_column].max() - x_major_ticks = 5 - ax.set_xticks( - [x_first_tick + (x_last_tick - x_first_tick) * i / (x_major_ticks - 1) for i in range(x_major_ticks)] - ) - else: - ax.xaxis.set_major_locator(MultipleLocator(base=base)) - - # plt.xticks(rotation = 15) - plt.xlabel(x_column) - ax.yaxis.set_major_formatter(get_formatter(scale)) - plt.ylabel(f'{target}') - - if plot_error: - plt.legend(framealpha=0.3, edgecolor="black", ncol=3, loc=legend_loc) - else: - plt.legend(framealpha=0.3, edgecolor="black", ncol=2, loc=legend_loc) - - # fig.tight_layout() # might change y axis values - - if figure_name is not None: - plt.savefig(os.path.join(self.figPath, figure_name), dpi=DPI) - if self.show: - plt.show() - return fig - - def summed_plot( - self, merged_df:DataFrame, type:str='', save:bool=True, - base:int=None, plot_error:bool=False, legend_loc='best' - ): - """ - Plots summation of prediction and observation from all counties - - Args: - figure_name: must contain the figure type extension. No need to add target name as - this method will add the target name as prefix to the figure name. - """ - summed_df = PredictionProcessor.makeSummed( - merged_df, self.targets, self.time_index - ) - figures = [] - for target in self.targets: - predicted_column = f'Predicted_{target}' - y_true, y_pred = merged_df[target].values, merged_df[predicted_column].values - - mae, rmse, rmsle, smape, r2 = calculate_result(y_true, y_pred) - title = f'MAE {mae:0.3g}, RMSE {rmse:0.4g}, RMSLE {rmsle:0.3g}, SMAPE {smape:0.3g}, R2 {r2:0.3g}' - - if (summed_df[target].max() - summed_df[target].min()) >= 1e3: - scale = 1e3 - else: scale = 1 - - target_figure_name = None - if save: target_figure_name = f'Summed_plot_{target}_{type}.jpg' - - fig = self.plot( - summed_df, target, title, scale, base, target_figure_name, - plot_error, legend_loc - ) - figures.append(fig) - - return figures \ No newline at end of file diff --git a/classes/PredictionProcessor.py b/classes/PredictionProcessor.py deleted file mode 100644 index 7b16e59..0000000 --- a/classes/PredictionProcessor.py +++ /dev/null @@ -1,132 +0,0 @@ -import pandas as pd -import numpy as np - -class PredictionProcessor: - """ - Converts the TFT output into plotable dataframe format - """ - - def __init__( - self, time_idx, group_id, horizon, - targets, window - ) -> None: - #TODO: add support for multiple time index and group id - self.time_idx = time_idx[0] - self.group_id = group_id[0] - - self.horizon = horizon - self.targets = targets - self.window = window - - def convert_prediction_to_dict( - self, predictions, index, target_time_step:int=None, - remove_negative:bool = False - ): - time_index = index[self.time_idx].values - ids = index[self.group_id].values - - if remove_negative: - # set negative predictions to zero - predictions[predictions<0] = 0 - - predictions = predictions.numpy() - results = {} - - # if you want result for only a specific horizon in the future - if target_time_step is not None: - assert 0 < target_time_step <= self.horizon,\ - f"Expects target time step within 1 and {self.horizon}, found {target_time_step}." - - # convert target day to index, as it starts from 0 - target_time_step -= 1 - for index in range(len(predictions)): - # given time index is the time index of the first prediction - # https://pytorch-forecasting.readthedocs.io/en/stable/api/pytorch_forecasting.models.base_model.BaseModel.html#pytorch_forecasting.models.base_model.BaseModel.predict - current_time_index = time_index[index] - current_id = ids[index] - - item = (current_id, current_time_index + target_time_step) - predicted_value = predictions[index][target_time_step] - results[item] = [predicted_value] - - return results - - # if you haven't specified a particular horizon, this returns all of them, - # so that you can take the average - for index in range(len(predictions)): - current_time_index = time_index[index] - current_id = ids[index] - - for time_step in range(self.horizon): - item = (current_id, current_time_index + time_step) - predicted_value = predictions[index][time_step] - - if item in results: - results[item].append(predicted_value) - else: - results[item] = [predicted_value] - - return results - - def convert_dict_to_dataframe(self, results:dict, feature_name:str): - ids = [] - predictions = [] - time_index = [] - - for key in results.keys(): - item = results[key] - #TODO: more generalized - ids.append(key[0]) - time_index.append(key[1]) - - predictions.append(np.mean(item)) - - result_df = pd.DataFrame({ - self.group_id: ids, self.time_idx: time_index, - f'Predicted_{feature_name}': predictions - }) - return result_df - - def align_result_with_dataset( - self, df, predictions, index, target_time_step:int = None, - remove_negative:bool = False - ): - id_columns = list(index.columns) - - if type(predictions)==list: - result_df = None - for i, prediction in enumerate(predictions): - prediction_df = self.convert_dict_to_dataframe( - self.convert_prediction_to_dict( - prediction, index, target_time_step, remove_negative - ), - self.targets[i] - ) - if result_df is None: - result_df = prediction_df - else: - result_df = result_df.merge(prediction_df, on=id_columns, how='inner') - else: - # when prediction is on a single target, e.g. cases - result_df = self.convert_dict_to_dataframe( - self.convert_prediction_to_dict( - predictions, index, target_time_step, remove_negative - ), - self.targets[0] - ) - - merged_data = result_df.merge( - df[self.targets + id_columns], on=id_columns, how='inner' - ).reset_index(drop=True) - merged_data = merged_data.sort_values(by=id_columns).reset_index(drop=True) - - # round the values - predicted_columns = [col for col in merged_data.columns if 'Predicted' in col] - merged_data[predicted_columns] = merged_data[predicted_columns].round() - - return merged_data - - @staticmethod - def makeSummed(df, targets, columns): - predicted_columns = [col for col in df.columns if 'Predicted' in col] - return df.groupby(columns)[predicted_columns + targets].aggregate('sum').reset_index() \ No newline at end of file diff --git a/configurations/config.py b/configurations/config.py deleted file mode 100644 index 9ed0c0e..0000000 --- a/configurations/config.py +++ /dev/null @@ -1,123 +0,0 @@ -import os, enum -from data_formatter.base import * -from data_formatter.electricity import ElectricityFormatter -from data_formatter.traffic import TrafficFormatter -from data_formatter.favorita import FavoritaFormatter -from data_formatter.volatility import VolatilityFormatter -from dataclasses import dataclass - -class ExperimentType(str, enum.Enum): - ELECTRICITY = 'electricity' - TRAFFIC = 'traffic' - FAVORITA = 'favorita' - VOLATILITY = 'volatility' - - def __str__(self) -> str: - return super().__str__() - - @staticmethod - def values(): - role_names = [member.value for _, member in ExperimentType.__members__.items()] - return role_names - -class ModelType(enum.auto): - TFT = "tft" - -class ExperimentConfig: - data_formatter_map = { - ExperimentType.ELECTRICITY: ElectricityFormatter, - ExperimentType.TRAFFIC: TrafficFormatter, - ExperimentType.FAVORITA: FavoritaFormatter, - # ExperimentType.VOLATILITY: VolatilityFormatter # volatility dataset unavailable - } - - seed = 7 - - def __init__( - self, experiment:ExperimentType=ExperimentType.ELECTRICITY, - root:str='outputs' - ) -> None: - self.experiment = experiment - self.root = root - - self.experiment_folder = os.path.join(root, experiment) - if not os.path.exists(self.experiment_folder): - os.makedirs(self.experiment_folder, exist_ok=True) - print(f'Model outputs will be saved at {self.experiment_folder}') - - @property - def data_formatter(self): - return self.__class__.data_formatter_map[self.experiment]() - - def model_parameters(self, model:ModelType=None): - model_parameter_map = { - ExperimentType.ELECTRICITY: ElectricModelParameters, - ExperimentType.TRAFFIC: TrafficModelParameters, - ExperimentType.FAVORITA: FavoritaModelParameters, - ExperimentType.VOLATILITY: VolatilityModelParameters - } - parameter = None - try: - parameters = model_parameter_map[self.experiment] - print(f"Experimental config found for {self.experiment}.") - print(f"Fetching parameters from available models {list(parameters.keys())}.") - parameter = parameters[model] - except: - raise ValueError("Experiment or model parameters not found !") - - return parameter - -ElectricModelParameters = { - ModelType.TFT: { - "hidden_layer_size": 16, - "dropout_rate": 0, - "learning_rate": 1e-3, - "batch_size": 64, - "epochs": 100, - 'gradient_clip_val': 1, - "early_stopping_patience": 5, - 'attention_head_size': 4 - } -} - -VolatilityModelParameters = { - ModelType.TFT: { - 'dropout_rate': 0.3, - 'hidden_layer_size': 160, - 'learning_rate': 0.01, - 'batch_size': 64, - 'gradient_clip_val': 1, - "early_stopping_patience": 5, - 'attention_head_size': 1, - 'stack_size': 1, - "epochs": 100, - } -} - -TrafficModelParameters = { - ModelType.TFT: { - 'dropout_rate': 0.3, - 'hidden_layer_size': 320, - 'learning_rate': 0.001, - 'batch_size': 128, - 'gradient_clip_val': 100., - "early_stopping_patience": 5, - 'attention_head_size': 4, - 'stack_size': 1, - "epochs": 100, - } -} - -FavoritaModelParameters = { - ModelType.TFT: { - 'dropout_rate': 0.1, - 'hidden_layer_size': 240, - 'learning_rate': 0.001, - 'batch_size': 128, - 'gradient_clip_val': 100., - "early_stopping_patience": 5, - 'attention_head_size': 4, - 'stack_size': 1, - "epochs": 100, - } -} \ No newline at end of file diff --git a/data_formatter/favorita.py b/data_formatter/favorita.py deleted file mode 100644 index 5b06995..0000000 --- a/data_formatter/favorita.py +++ /dev/null @@ -1,266 +0,0 @@ -from data_formatter.base import * -from pandas import DataFrame -import glob, gc - -class FavoritaFormatter(BaseDataFormatter): - def __init__(self) -> None: - super().__init__('favorita') - - @property - def data_path(self): - return os.path.join(self.data_folder, 'favorita_consolidated.csv') - - @property - def column_definition(self) -> dict: - return [ - ('traj_id', DataTypes.INTEGER, InputTypes.ID), - ('date', DataTypes.DATE, InputTypes.TIME), - ('log_sales', DataTypes.FLOAT, InputTypes.TARGET), - ('onpromotion', DataTypes.CATEGORICAL, InputTypes.KNOWN), - ('transactions', DataTypes.INTEGER, InputTypes.OBSERVED), - ('oil', DataTypes.INTEGER, InputTypes.OBSERVED), - ('day_of_week', DataTypes.CATEGORICAL, InputTypes.KNOWN), - ('day_of_month', DataTypes.INTEGER, InputTypes.KNOWN), - ('month', DataTypes.INTEGER, InputTypes.KNOWN), - ('national_hol', DataTypes.CATEGORICAL, InputTypes.KNOWN), - ('regional_hol', DataTypes.CATEGORICAL, InputTypes.KNOWN), - ('local_hol', DataTypes.CATEGORICAL, InputTypes.KNOWN), - ('open', DataTypes.INTEGER, InputTypes.KNOWN), - ('item_nbr', DataTypes.CATEGORICAL, InputTypes.STATIC), - ('store_nbr', DataTypes.CATEGORICAL, InputTypes.STATIC), - ('city', DataTypes.CATEGORICAL, InputTypes.STATIC), - ('state', DataTypes.CATEGORICAL, InputTypes.STATIC), - ('type', DataTypes.CATEGORICAL, InputTypes.STATIC), - ('cluster', DataTypes.CATEGORICAL, InputTypes.STATIC), - ('family', DataTypes.CATEGORICAL, InputTypes.STATIC), - ('class', DataTypes.CATEGORICAL, InputTypes.STATIC), - ('perishable', DataTypes.CATEGORICAL, InputTypes.STATIC) - ] - - @property - def parameters(self) -> dict: - return { - "window": 90, - "horizon": 30 - } - - def split(self, data, train_start, val_start, test_start, test_end): - """Splits data frame into training-validation-test data frames. - - This also calibrates scaling object, and transforms data for each split. - - Args: - df: Source data frame to split. - valid_boundary: Starting year for validation data - test_boundary: Starting year for test data - - Returns: - Tuple of transformed (train, valid, test) data. - """ - - print('Formatting train-valid-test splits.') - - if valid_boundary is None: - valid_boundary = pd.datetime(2015, 12, 1) - - fixed_params = self.get_fixed_params() - time_steps = fixed_params['total_time_steps'] - lookback = fixed_params['num_encoder_steps'] - forecast_horizon = time_steps - lookback - - data['date'] = pd.to_datetime(data['date']) - df_lists = {'train': [], 'valid': [], 'test': []} - for _, sliced in data.groupby('traj_id'): - index = sliced['date'] - train = sliced.loc[index < valid_boundary] - train_len = len(train) - valid_len = train_len + forecast_horizon - valid = sliced.iloc[train_len - lookback:valid_len, :] - test = sliced.iloc[valid_len - lookback:valid_len + forecast_horizon, :] - - sliced_map = {'train': train, 'valid': valid, 'test': test} - - for k in sliced_map: - item = sliced_map[k] - - if len(item) >= time_steps: - df_lists[k].append(item) - - dfs = {k: pd.concat(df_lists[k], axis=0) for k in df_lists} - - train = dfs['train'] - # self.set_scalers(train, set_real=True) - - # Use all data for label encoding to handle labels not present in training. - # self.set_scalers(data, set_real=False) - - # Filter out identifiers not present in training (i.e. cold-started items). - def filter_ids(frame): - identifiers = set(self.identifiers) - index = frame['traj_id'] - return frame.loc[index.apply(lambda x: x in identifiers)] - - valid = filter_ids(dfs['valid']) - test = filter_ids(dfs['test']) - - return train, valid, train - - - def download(self, force=False) -> None: - """Processes Favorita dataset. - - Makes use of the raw files should be manually downloaded from Kaggle @ - https://www.kaggle.com/c/favorita-grocery-sales-forecasting/data - - Args: - config: Default experiment config for Favorita - """ - - url = 'https://www.kaggle.com/c/favorita-grocery-sales-forecasting/data' - - data_folder = self.data_folder - - # Save manual download to root folder to avoid deleting when re-processing. - zip_file = os.path.join(data_folder, '..', - 'favorita-grocery-sales-forecasting.zip') - - if not os.path.exists(zip_file): - raise ValueError( - f'Favorita zip file not found in {zip_file}!\ - Please manually download data from {url}.') - - # Unpack main zip file - outputs_file = os.path.join(data_folder, 'train.csv.7z') - unzip(zip_file, outputs_file, data_folder) - - # Unpack individually zipped files - for file in glob.glob(os.path.join(data_folder, '*.7z')): - - csv_file = file.replace('.7z', '') - - unzip(file, csv_file, data_folder) - - print('Unzipping complete, commencing data processing...') - - # Extract only a subset of data to save/process for efficiency - start_date = pd.datetime(2015, 1, 1) - end_date = pd.datetime(2016, 6, 1) - - print('Regenerating data...') - - # load temporal data - temporal = pd.read_csv(os.path.join(data_folder, 'train.csv'), index_col=0) - - store_info = pd.read_csv(os.path.join(data_folder, 'stores.csv'), index_col=0) - oil = pd.read_csv( - os.path.join(data_folder, 'oil.csv'), index_col=0).iloc[:, 0] - holidays = pd.read_csv(os.path.join(data_folder, 'holidays_events.csv')) - items = pd.read_csv(os.path.join(data_folder, 'items.csv'), index_col=0) - transactions = pd.read_csv(os.path.join(data_folder, 'transactions.csv')) - - # Take first 6 months of data - temporal['date'] = pd.to_datetime(temporal['date']) - - # Filter dates to reduce storage space requirements - if start_date is not None: - temporal = temporal[(temporal['date'] >= start_date)] - if end_date is not None: - temporal = temporal[(temporal['date'] < end_date)] - - dates = temporal['date'].unique() - - # Add trajectory identifier - temporal['traj_id'] = temporal['store_nbr'].apply( - str) + '_' + temporal['item_nbr'].apply(str) - temporal['unique_id'] = temporal['traj_id'] + '_' + temporal['date'].apply( - str) - - # Remove all IDs with negative returns - print('Removing returns data') - min_returns = temporal['unit_sales'].groupby(temporal['traj_id']).min() - valid_ids = set(min_returns[min_returns >= 0].index) - selector = temporal['traj_id'].apply(lambda traj_id: traj_id in valid_ids) - new_temporal = temporal[selector].copy() - del temporal - gc.collect() - temporal = new_temporal - temporal['open'] = 1 - - # Resampling - print('Resampling to regular grid') - resampled_dfs = [] - for traj_id, raw_sub_df in temporal.groupby('traj_id'): - print('Resampling', traj_id) - sub_df = raw_sub_df.set_index('date', drop=True).copy() - sub_df = sub_df.resample('1d').last() - sub_df['date'] = sub_df.index - sub_df[['store_nbr', 'item_nbr', 'onpromotion']] \ - = sub_df[['store_nbr', 'item_nbr', 'onpromotion']].fillna(method='ffill') - sub_df['open'] = sub_df['open'].fillna( - 0) # flag where sales data is unknown - sub_df['log_sales'] = np.log(sub_df['unit_sales']) - - resampled_dfs.append(sub_df.reset_index(drop=True)) - - new_temporal = pd.concat(resampled_dfs, axis=0) - del temporal - gc.collect() - temporal = new_temporal - - print('Adding oil') - oil.name = 'oil' - oil.index = pd.to_datetime(oil.index) - temporal = temporal.join( - oil.loc[dates].fillna(method='ffill'), on='date', how='left') - temporal['oil'] = temporal['oil'].fillna(-1) - - print('Adding store info') - temporal = temporal.join(store_info, on='store_nbr', how='left') - - print('Adding item info') - temporal = temporal.join(items, on='item_nbr', how='left') - - transactions['date'] = pd.to_datetime(transactions['date']) - temporal = temporal.merge( - transactions, - left_on=['date', 'store_nbr'], - right_on=['date', 'store_nbr'], - how='left') - temporal['transactions'] = temporal['transactions'].fillna(-1) - - # Additional date info - temporal['day_of_week'] = pd.to_datetime(temporal['date'].values).dayofweek - temporal['day_of_month'] = pd.to_datetime(temporal['date'].values).day - temporal['month'] = pd.to_datetime(temporal['date'].values).month - - # Add holiday info - print('Adding holidays') - holiday_subset = holidays[holidays['transferred'].apply( - lambda x: not x)].copy() - holiday_subset.columns = [ - s if s != 'type' else 'holiday_type' for s in holiday_subset.columns - ] - holiday_subset['date'] = pd.to_datetime(holiday_subset['date']) - local_holidays = holiday_subset[holiday_subset['locale'] == 'Local'] - regional_holidays = holiday_subset[holiday_subset['locale'] == 'Regional'] - national_holidays = holiday_subset[holiday_subset['locale'] == 'National'] - - temporal['national_hol'] = temporal.merge( - national_holidays, left_on=['date'], right_on=['date'], - how='left')['description'].fillna('') - temporal['regional_hol'] = temporal.merge( - regional_holidays, - left_on=['state', 'date'], - right_on=['locale_name', 'date'], - how='left')['description'].fillna('') - temporal['local_hol'] = temporal.merge( - local_holidays, - left_on=['city', 'date'], - right_on=['locale_name', 'date'], - how='left')['description'].fillna('') - - temporal.sort_values('unique_id', inplace=True) - - print('Saving processed file to {}'.format(self.data_path)) - temporal.round(6).to_csv(self.data_path, index=False) - print('Done.') \ No newline at end of file diff --git a/data_formatter/base.py b/data_provider/base.py similarity index 100% rename from data_formatter/base.py rename to data_provider/base.py diff --git a/data_provider/data_factory.py b/data_provider/data_factory.py new file mode 100644 index 0000000..3cdb70b --- /dev/null +++ b/data_provider/data_factory.py @@ -0,0 +1,47 @@ +from data_provider.data_loader import Dataset_Custom, Dataset_Pred +from torch.utils.data import DataLoader + +data_dict = { + 'custom': Dataset_Custom, +} + + +def data_provider(args, flag): + Data = data_dict[args.data] + timeenc = 0 if args.embed != 'timeF' else 1 + + if flag == 'test': + shuffle_flag = False + drop_last = False + batch_size = args.batch_size + freq = args.freq + elif flag == 'pred': + shuffle_flag = False + drop_last = False + batch_size = 1 + freq = args.freq + Data = Dataset_Pred + else: + shuffle_flag = True + drop_last = True + batch_size = args.batch_size + freq = args.freq + + data_set = Data( + root_path=args.root_path, + data_path=args.data_path, + flag=flag, + size=[args.seq_len, args.label_len, args.pred_len], + features=args.features, + target=args.target, + timeenc=timeenc, + freq=freq + ) + print(flag, len(data_set)) + data_loader = DataLoader( + data_set, + batch_size=batch_size, + shuffle=shuffle_flag, + num_workers=args.num_workers, + drop_last=drop_last) + return data_set, data_loader diff --git a/data_provider/data_loader.py b/data_provider/data_loader.py new file mode 100644 index 0000000..645519c --- /dev/null +++ b/data_provider/data_loader.py @@ -0,0 +1,215 @@ +import os +import numpy as np +import pandas as pd +import os +import torch +from torch.utils.data import Dataset, DataLoader +from sklearn.preprocessing import StandardScaler +from utils.timefeatures import time_features +import warnings + +warnings.filterwarnings('ignore') + +class Dataset_Custom(Dataset): + def __init__(self, root_path, flag='train', size=None, + features='S', data_path='ETTh1.csv', + target='OT', scale=True, timeenc=0, freq='h'): + # size [seq_len, label_len, pred_len] + # info + if size == None: + self.seq_len = 24 * 4 * 4 + self.label_len = 24 * 4 + self.pred_len = 24 * 4 + else: + self.seq_len = size[0] + self.label_len = size[1] + self.pred_len = size[2] + # init + assert flag in ['train', 'test', 'val'] + type_map = {'train': 0, 'val': 1, 'test': 2} + self.set_type = type_map[flag] + + self.features = features + self.target = target + self.scale = scale + self.timeenc = timeenc + self.freq = freq + + self.root_path = root_path + self.data_path = data_path + self.__read_data__() + + def __read_data__(self): + self.scaler = StandardScaler() + df_raw = pd.read_csv(os.path.join(self.root_path, + self.data_path)) + + ''' + df_raw.columns: ['date', ...(other features), target feature] + ''' + cols = list(df_raw.columns) + cols.remove(self.target) + cols.remove('date') + df_raw = df_raw[['date'] + cols + [self.target]] + # print(cols) + num_train = int(len(df_raw) * 0.7) + num_test = int(len(df_raw) * 0.2) + num_vali = len(df_raw) - num_train - num_test + border1s = [0, num_train - self.seq_len, len(df_raw) - num_test - self.seq_len] + border2s = [num_train, num_train + num_vali, len(df_raw)] + border1 = border1s[self.set_type] + border2 = border2s[self.set_type] + + if self.features == 'M' or self.features == 'MS': + cols_data = df_raw.columns[1:] + df_data = df_raw[cols_data] + elif self.features == 'S': + df_data = df_raw[[self.target]] + + if self.scale: + train_data = df_data[border1s[0]:border2s[0]] + self.scaler.fit(train_data.values) + data = self.scaler.transform(df_data.values) + else: + data = df_data.values + + df_stamp = df_raw[['date']][border1:border2] + df_stamp['date'] = pd.to_datetime(df_stamp.date) + if self.timeenc == 0: + df_stamp['month'] = df_stamp.date.apply(lambda row: row.month, 1) + df_stamp['day'] = df_stamp.date.apply(lambda row: row.day, 1) + df_stamp['weekday'] = df_stamp.date.apply(lambda row: row.weekday(), 1) + df_stamp['hour'] = df_stamp.date.apply(lambda row: row.hour, 1) + data_stamp = df_stamp.drop(['date'], 1).values + elif self.timeenc == 1: + data_stamp = time_features(pd.to_datetime(df_stamp['date'].values), freq=self.freq) + data_stamp = data_stamp.transpose(1, 0) + + self.data_x = data[border1:border2] + self.data_y = data[border1:border2] + self.data_stamp = data_stamp + + def __getitem__(self, index): + s_begin = index + s_end = s_begin + self.seq_len + r_begin = s_end - self.label_len + r_end = r_begin + self.label_len + self.pred_len + + seq_x = self.data_x[s_begin:s_end] + seq_y = self.data_y[r_begin:r_end] + seq_x_mark = self.data_stamp[s_begin:s_end] + seq_y_mark = self.data_stamp[r_begin:r_end] + + return seq_x, seq_y, seq_x_mark, seq_y_mark + + def __len__(self): + return len(self.data_x) - self.seq_len - self.pred_len + 1 + + def inverse_transform(self, data): + return self.scaler.inverse_transform(data) + + +class Dataset_Pred(Dataset): + def __init__(self, root_path, flag='pred', size=None, + features='S', data_path='ETTh1.csv', + target='OT', scale=True, inverse=False, timeenc=0, freq='15min', cols=None): + # size [seq_len, label_len, pred_len] + # info + if size == None: + self.seq_len = 24 * 4 * 4 + self.label_len = 24 * 4 + self.pred_len = 24 * 4 + else: + self.seq_len = size[0] + self.label_len = size[1] + self.pred_len = size[2] + # init + assert flag in ['pred'] + + self.features = features + self.target = target + self.scale = scale + self.inverse = inverse + self.timeenc = timeenc + self.freq = freq + self.cols = cols + self.root_path = root_path + self.data_path = data_path + self.__read_data__() + + def __read_data__(self): + self.scaler = StandardScaler() + df_raw = pd.read_csv(os.path.join(self.root_path, + self.data_path)) + ''' + df_raw.columns: ['date', ...(other features), target feature] + ''' + if self.cols: + cols = self.cols.copy() + cols.remove(self.target) + else: + cols = list(df_raw.columns) + cols.remove(self.target) + cols.remove('date') + df_raw = df_raw[['date'] + cols + [self.target]] + border1 = len(df_raw) - self.seq_len + border2 = len(df_raw) + + if self.features == 'M' or self.features == 'MS': + cols_data = df_raw.columns[1:] + df_data = df_raw[cols_data] + elif self.features == 'S': + df_data = df_raw[[self.target]] + + if self.scale: + self.scaler.fit(df_data.values) + data = self.scaler.transform(df_data.values) + else: + data = df_data.values + + tmp_stamp = df_raw[['date']][border1:border2] + tmp_stamp['date'] = pd.to_datetime(tmp_stamp.date) + pred_dates = pd.date_range(tmp_stamp.date.values[-1], periods=self.pred_len + 1, freq=self.freq) + + df_stamp = pd.DataFrame(columns=['date']) + df_stamp.date = list(tmp_stamp.date.values) + list(pred_dates[1:]) + if self.timeenc == 0: + df_stamp['month'] = df_stamp.date.apply(lambda row: row.month, 1) + df_stamp['day'] = df_stamp.date.apply(lambda row: row.day, 1) + df_stamp['weekday'] = df_stamp.date.apply(lambda row: row.weekday(), 1) + df_stamp['hour'] = df_stamp.date.apply(lambda row: row.hour, 1) + df_stamp['minute'] = df_stamp.date.apply(lambda row: row.minute, 1) + df_stamp['minute'] = df_stamp.minute.map(lambda x: x // 15) + data_stamp = df_stamp.drop(['date'], 1).values + elif self.timeenc == 1: + data_stamp = time_features(pd.to_datetime(df_stamp['date'].values), freq=self.freq) + data_stamp = data_stamp.transpose(1, 0) + + self.data_x = data[border1:border2] + if self.inverse: + self.data_y = df_data.values[border1:border2] + else: + self.data_y = data[border1:border2] + self.data_stamp = data_stamp + + def __getitem__(self, index): + s_begin = index + s_end = s_begin + self.seq_len + r_begin = s_end - self.label_len + r_end = r_begin + self.label_len + self.pred_len + + seq_x = self.data_x[s_begin:s_end] + if self.inverse: + seq_y = self.data_x[r_begin:r_begin + self.label_len] + else: + seq_y = self.data_y[r_begin:r_begin + self.label_len] + seq_x_mark = self.data_stamp[s_begin:s_end] + seq_y_mark = self.data_stamp[r_begin:r_end] + + return seq_x, seq_y, seq_x_mark, seq_y_mark + + def __len__(self): + return len(self.data_x) - self.seq_len + 1 + + def inverse_transform(self, data): + return self.scaler.inverse_transform(data) diff --git a/data_formatter/electricity.py b/data_provider/electricity.py similarity index 86% rename from data_formatter/electricity.py rename to data_provider/electricity.py index 092064a..22c067c 100644 --- a/data_formatter/electricity.py +++ b/data_provider/electricity.py @@ -1,4 +1,4 @@ -from data_formatter.base import * +from data_provider.base import * from pandas import DataFrame, to_datetime class ElectricityFormatter(BaseDataFormatter): @@ -43,7 +43,9 @@ def split(self, data:DataFrame, val_start=1315, test_start=1339): return train, validation, test - def download(self, force=False) -> None: + def download( + self, force=False, start='2014-01-01', end='2014-09-01' + ) -> None: """Downloads electricity dataset from UCI repository.""" if os.path.exists(self.data_path) and not force: @@ -64,8 +66,14 @@ def download(self, force=False) -> None: df.index = pd.to_datetime(df.index) df.sort_index(inplace=True) + # Filter to match range used by other academic papers + start = pd.to_datetime(start) + end = pd.to_datetime(end) + df = df[(df.index >= start) & (df.index <=end)] + print(f'Filtering out data outside {start} and {end}') + # Used to determine the start and end dates of a series - output = df.resample('1h').mean().fillna(0) + output = df.resample('1h').sum().fillna(0) earliest_time = output.index.min() @@ -85,7 +93,6 @@ def download(self, force=False) -> None: tmp['hours_from_start'] = (date - earliest_time).seconds / 60 / 60 + ( date - earliest_time).days * 24 tmp['days_from_start'] = (date - earliest_time).days - tmp['categorical_id'] = label tmp['date'] = date tmp['id'] = label tmp['hour'] = date.hour @@ -97,11 +104,9 @@ def download(self, force=False) -> None: output = pd.concat(df_list, axis=0, join='outer').reset_index(drop=True) - output['categorical_id'] = output['id'].copy() - # Filter to match range used by other academic papers - output = output[(output['days_from_start'] >= 1096) - & (output['days_from_start'] < 1346)].copy() + # output = output[(output['days_from_start'] >= 1096) + # & (output['days_from_start'] < 1346)].copy() output.to_csv(self.data_path, index=False) cleanup(self.data_folder, self.data_path) diff --git a/data_formatter/traffic.py b/data_provider/traffic.py similarity index 99% rename from data_formatter/traffic.py rename to data_provider/traffic.py index fa310c1..153d45a 100644 --- a/data_formatter/traffic.py +++ b/data_provider/traffic.py @@ -1,4 +1,4 @@ -from data_formatter.base import * +from data_provider.base import * from pandas import DataFrame class TrafficFormatter(BaseDataFormatter): diff --git a/examples/sensitivity.py b/examples/sensitivity.py deleted file mode 100644 index e03988c..0000000 --- a/examples/sensitivity.py +++ /dev/null @@ -1,25 +0,0 @@ -from SALib.sample import saltelli -from SALib.analyze import sobol -from SALib.test_functions import Ishigami -import numpy as np - -# Define the model inputs -problem = { - 'num_vars': 3, - 'names': ['x1', 'x2', 'x3'], - 'bounds': [[-3.14159265359, 3.14159265359], - [-3.14159265359, 3.14159265359], - [-3.14159265359, 3.14159265359]] -} - -# Generate samples -param_values = saltelli.sample(problem, 1024) - -# Run model (example) -Y = Ishigami.evaluate(param_values) - -# Perform analysis -Si = sobol.analyze(problem, Y, print_to_console=True) - -# Print the first-order sensitivity indices -print(Si['S1']) \ No newline at end of file diff --git a/exp/exp_basic.py b/exp/exp_basic.py new file mode 100644 index 0000000..4115b1b --- /dev/null +++ b/exp/exp_basic.py @@ -0,0 +1,41 @@ +import os +import torch +from models import Transformer, DLinear + + +class Exp_Basic(object): + def __init__(self, args): + self.args = args + self.model_dict = { + 'Transformer': Transformer, + 'DLinear': DLinear + } + self.device = self._acquire_device() + self.model = self._build_model().to(self.device) + + def _build_model(self): + raise NotImplementedError + return None + + def _acquire_device(self): + if self.args.use_gpu: + os.environ["CUDA_VISIBLE_DEVICES"] = str( + self.args.gpu) if not self.args.use_multi_gpu else self.args.devices + device = torch.device('cuda:{}'.format(self.args.gpu)) + print('Use GPU: cuda:{}'.format(self.args.gpu)) + else: + device = torch.device('cpu') + print('Use CPU') + return device + + def _get_data(self): + pass + + def vali(self): + pass + + def train(self): + pass + + def test(self): + pass diff --git a/exp/exp_classification.py b/exp/exp_classification.py new file mode 100644 index 0000000..3be2173 --- /dev/null +++ b/exp/exp_classification.py @@ -0,0 +1,191 @@ +from data_provider.data_factory import data_provider +from exp.exp_basic import Exp_Basic +from utils.tools import EarlyStopping, adjust_learning_rate, cal_accuracy +import torch +import torch.nn as nn +from torch import optim +import os +import time +import warnings +import numpy as np +import pdb + +warnings.filterwarnings('ignore') + + +class Exp_Classification(Exp_Basic): + def __init__(self, args): + super(Exp_Classification, self).__init__(args) + + def _build_model(self): + # model input depends on data + train_data, train_loader = self._get_data(flag='TRAIN') + test_data, test_loader = self._get_data(flag='TEST') + self.args.seq_len = max(train_data.max_seq_len, test_data.max_seq_len) + self.args.pred_len = 0 + self.args.enc_in = train_data.feature_df.shape[1] + self.args.num_class = len(train_data.class_names) + # model init + model = self.model_dict[self.args.model].Model(self.args).float() + if self.args.use_multi_gpu and self.args.use_gpu: + model = nn.DataParallel(model, device_ids=self.args.device_ids) + return model + + def _get_data(self, flag): + data_set, data_loader = data_provider(self.args, flag) + return data_set, data_loader + + def _select_optimizer(self): + model_optim = optim.Adam(self.model.parameters(), lr=self.args.learning_rate) + return model_optim + + def _select_criterion(self): + criterion = nn.CrossEntropyLoss() + return criterion + + def vali(self, vali_data, vali_loader, criterion): + total_loss = [] + preds = [] + trues = [] + self.model.eval() + with torch.no_grad(): + for i, (batch_x, label, padding_mask) in enumerate(vali_loader): + batch_x = batch_x.float().to(self.device) + padding_mask = padding_mask.float().to(self.device) + label = label.to(self.device) + + outputs = self.model(batch_x, padding_mask, None, None) + + pred = outputs.detach().cpu() + loss = criterion(pred, label.long().squeeze().cpu()) + total_loss.append(loss) + + preds.append(outputs.detach()) + trues.append(label) + + total_loss = np.average(total_loss) + + preds = torch.cat(preds, 0) + trues = torch.cat(trues, 0) + probs = torch.nn.functional.softmax(preds) # (total_samples, num_classes) est. prob. for each class and sample + predictions = torch.argmax(probs, dim=1).cpu().numpy() # (total_samples,) int class index for each sample + trues = trues.flatten().cpu().numpy() + accuracy = cal_accuracy(predictions, trues) + + self.model.train() + return total_loss, accuracy + + def train(self, setting): + train_data, train_loader = self._get_data(flag='TRAIN') + vali_data, vali_loader = self._get_data(flag='TEST') + test_data, test_loader = self._get_data(flag='TEST') + + path = os.path.join(self.args.checkpoints, setting) + if not os.path.exists(path): + os.makedirs(path) + + time_now = time.time() + + train_steps = len(train_loader) + early_stopping = EarlyStopping(patience=self.args.patience, verbose=True) + + model_optim = self._select_optimizer() + criterion = self._select_criterion() + + for epoch in range(self.args.train_epochs): + iter_count = 0 + train_loss = [] + + self.model.train() + epoch_time = time.time() + + for i, (batch_x, label, padding_mask) in enumerate(train_loader): + iter_count += 1 + model_optim.zero_grad() + + batch_x = batch_x.float().to(self.device) + padding_mask = padding_mask.float().to(self.device) + label = label.to(self.device) + + outputs = self.model(batch_x, padding_mask, None, None) + loss = criterion(outputs, label.long().squeeze(-1)) + train_loss.append(loss.item()) + + if (i + 1) % 100 == 0: + print("\titers: {0}, epoch: {1} | loss: {2:.7f}".format(i + 1, epoch + 1, loss.item())) + speed = (time.time() - time_now) / iter_count + left_time = speed * ((self.args.train_epochs - epoch) * train_steps - i) + print('\tspeed: {:.4f}s/iter; left time: {:.4f}s'.format(speed, left_time)) + iter_count = 0 + time_now = time.time() + + loss.backward() + nn.utils.clip_grad_norm_(self.model.parameters(), max_norm=4.0) + model_optim.step() + + print("Epoch: {} cost time: {}".format(epoch + 1, time.time() - epoch_time)) + train_loss = np.average(train_loss) + vali_loss, val_accuracy = self.vali(vali_data, vali_loader, criterion) + test_loss, test_accuracy = self.vali(test_data, test_loader, criterion) + + print( + "Epoch: {0}, Steps: {1} | Train Loss: {2:.3f} Vali Loss: {3:.3f} Vali Acc: {4:.3f} Test Loss: {5:.3f} Test Acc: {6:.3f}" + .format(epoch + 1, train_steps, train_loss, vali_loss, val_accuracy, test_loss, test_accuracy)) + early_stopping(-val_accuracy, self.model, path) + if early_stopping.early_stop: + print("Early stopping") + break + if (epoch + 1) % 5 == 0: + adjust_learning_rate(model_optim, epoch + 1, self.args) + + best_model_path = path + '/' + 'checkpoint.pth' + self.model.load_state_dict(torch.load(best_model_path)) + + return self.model + + def test(self, setting, test=0): + test_data, test_loader = self._get_data(flag='TEST') + if test: + print('loading model') + self.model.load_state_dict(torch.load(os.path.join('./checkpoints/' + setting, 'checkpoint.pth'))) + + preds = [] + trues = [] + folder_path = './test_results/' + setting + '/' + if not os.path.exists(folder_path): + os.makedirs(folder_path) + + self.model.eval() + with torch.no_grad(): + for i, (batch_x, label, padding_mask) in enumerate(test_loader): + batch_x = batch_x.float().to(self.device) + padding_mask = padding_mask.float().to(self.device) + label = label.to(self.device) + + outputs = self.model(batch_x, padding_mask, None, None) + + preds.append(outputs.detach()) + trues.append(label) + + preds = torch.cat(preds, 0) + trues = torch.cat(trues, 0) + print('test shape:', preds.shape, trues.shape) + + probs = torch.nn.functional.softmax(preds) # (total_samples, num_classes) est. prob. for each class and sample + predictions = torch.argmax(probs, dim=1).cpu().numpy() # (total_samples,) int class index for each sample + trues = trues.flatten().cpu().numpy() + accuracy = cal_accuracy(predictions, trues) + + # result save + folder_path = './results/' + setting + '/' + if not os.path.exists(folder_path): + os.makedirs(folder_path) + + print('accuracy:{}'.format(accuracy)) + f = open("result_classification.txt", 'a') + f.write(setting + " \n") + f.write('accuracy:{}'.format(accuracy)) + f.write('\n') + f.write('\n') + f.close() + return diff --git a/exp/exp_long_term_forecasting.py b/exp/exp_long_term_forecasting.py new file mode 100644 index 0000000..aa9cde4 --- /dev/null +++ b/exp/exp_long_term_forecasting.py @@ -0,0 +1,261 @@ +from data_provider.data_factory import data_provider +from exp.exp_basic import Exp_Basic +from utils.tools import EarlyStopping, adjust_learning_rate, visual +from utils.metrics import metric +import torch +import torch.nn as nn +from torch import optim +import os +import time +import warnings +import numpy as np + +warnings.filterwarnings('ignore') + + +class Exp_Long_Term_Forecast(Exp_Basic): + def __init__(self, args): + super(Exp_Long_Term_Forecast, self).__init__(args) + + def _build_model(self): + model = self.model_dict[self.args.model].Model(self.args).float() + + if self.args.use_multi_gpu and self.args.use_gpu: + model = nn.DataParallel(model, device_ids=self.args.device_ids) + return model + + def _get_data(self, flag): + data_set, data_loader = data_provider(self.args, flag) + return data_set, data_loader + + def _select_optimizer(self): + model_optim = optim.Adam(self.model.parameters(), lr=self.args.learning_rate) + return model_optim + + def _select_criterion(self): + criterion = nn.MSELoss() + return criterion + + def vali(self, vali_data, vali_loader, criterion): + total_loss = [] + self.model.eval() + with torch.no_grad(): + for i, (batch_x, batch_y, batch_x_mark, batch_y_mark) in enumerate(vali_loader): + batch_x = batch_x.float().to(self.device) + batch_y = batch_y.float() + + batch_x_mark = batch_x_mark.float().to(self.device) + batch_y_mark = batch_y_mark.float().to(self.device) + + # decoder input + dec_inp = torch.zeros_like(batch_y[:, -self.args.pred_len:, :]).float() + dec_inp = torch.cat([batch_y[:, :self.args.label_len, :], dec_inp], dim=1).float().to(self.device) + # encoder - decoder + if self.args.use_amp: + with torch.cuda.amp.autocast(): + if self.args.output_attention: + outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)[0] + else: + outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark) + else: + if self.args.output_attention: + outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)[0] + else: + outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark) + f_dim = -1 if self.args.features == 'MS' else 0 + outputs = outputs[:, -self.args.pred_len:, f_dim:] + batch_y = batch_y[:, -self.args.pred_len:, f_dim:].to(self.device) + + pred = outputs.detach().cpu() + true = batch_y.detach().cpu() + + loss = criterion(pred, true) + + total_loss.append(loss) + total_loss = np.average(total_loss) + self.model.train() + return total_loss + + def train(self, setting): + train_data, train_loader = self._get_data(flag='train') + vali_data, vali_loader = self._get_data(flag='val') + test_data, test_loader = self._get_data(flag='test') + + path = os.path.join(self.args.checkpoints, setting) + if not os.path.exists(path): + os.makedirs(path) + + time_now = time.time() + + train_steps = len(train_loader) + early_stopping = EarlyStopping(patience=self.args.patience, verbose=True) + + model_optim = self._select_optimizer() + criterion = self._select_criterion() + + if self.args.use_amp: + scaler = torch.cuda.amp.GradScaler() + + for epoch in range(self.args.train_epochs): + iter_count = 0 + train_loss = [] + + self.model.train() + epoch_time = time.time() + for i, (batch_x, batch_y, batch_x_mark, batch_y_mark) in enumerate(train_loader): + iter_count += 1 + model_optim.zero_grad() + batch_x = batch_x.float().to(self.device) + + batch_y = batch_y.float().to(self.device) + batch_x_mark = batch_x_mark.float().to(self.device) + batch_y_mark = batch_y_mark.float().to(self.device) + + # decoder input + dec_inp = torch.zeros_like(batch_y[:, -self.args.pred_len:, :]).float() + dec_inp = torch.cat([batch_y[:, :self.args.label_len, :], dec_inp], dim=1).float().to(self.device) + + # encoder - decoder + if self.args.use_amp: + with torch.cuda.amp.autocast(): + if self.args.output_attention: + outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)[0] + else: + outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark) + + f_dim = -1 if self.args.features == 'MS' else 0 + outputs = outputs[:, -self.args.pred_len:, f_dim:] + batch_y = batch_y[:, -self.args.pred_len:, f_dim:].to(self.device) + loss = criterion(outputs, batch_y) + train_loss.append(loss.item()) + else: + if self.args.output_attention: + outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)[0] + else: + outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark) + + f_dim = -1 if self.args.features == 'MS' else 0 + outputs = outputs[:, -self.args.pred_len:, f_dim:] + batch_y = batch_y[:, -self.args.pred_len:, f_dim:].to(self.device) + loss = criterion(outputs, batch_y) + train_loss.append(loss.item()) + + if (i + 1) % 100 == 0: + print("\titers: {0}, epoch: {1} | loss: {2:.7f}".format(i + 1, epoch + 1, loss.item())) + speed = (time.time() - time_now) / iter_count + left_time = speed * ((self.args.train_epochs - epoch) * train_steps - i) + print('\tspeed: {:.4f}s/iter; left time: {:.4f}s'.format(speed, left_time)) + iter_count = 0 + time_now = time.time() + + if self.args.use_amp: + scaler.scale(loss).backward() + scaler.step(model_optim) + scaler.update() + else: + loss.backward() + model_optim.step() + + print("Epoch: {} cost time: {}".format(epoch + 1, time.time() - epoch_time)) + train_loss = np.average(train_loss) + vali_loss = self.vali(vali_data, vali_loader, criterion) + test_loss = self.vali(test_data, test_loader, criterion) + + print("Epoch: {0}, Steps: {1} | Train Loss: {2:.7f} Vali Loss: {3:.7f} Test Loss: {4:.7f}".format( + epoch + 1, train_steps, train_loss, vali_loss, test_loss)) + early_stopping(vali_loss, self.model, path) + if early_stopping.early_stop: + print("Early stopping") + break + + adjust_learning_rate(model_optim, epoch + 1, self.args) + + best_model_path = path + '/' + 'checkpoint.pth' + self.model.load_state_dict(torch.load(best_model_path)) + + return self.model + + def test(self, setting, test=0): + test_data, test_loader = self._get_data(flag='test') + if test: + print('loading model') + self.model.load_state_dict(torch.load(os.path.join('./checkpoints/' + setting, 'checkpoint.pth'))) + + preds = [] + trues = [] + folder_path = './test_results/' + setting + '/' + if not os.path.exists(folder_path): + os.makedirs(folder_path) + + self.model.eval() + with torch.no_grad(): + for i, (batch_x, batch_y, batch_x_mark, batch_y_mark) in enumerate(test_loader): + batch_x = batch_x.float().to(self.device) + batch_y = batch_y.float().to(self.device) + + batch_x_mark = batch_x_mark.float().to(self.device) + batch_y_mark = batch_y_mark.float().to(self.device) + + # decoder input + dec_inp = torch.zeros_like(batch_y[:, -self.args.pred_len:, :]).float() + dec_inp = torch.cat([batch_y[:, :self.args.label_len, :], dec_inp], dim=1).float().to(self.device) + # encoder - decoder + if self.args.use_amp: + with torch.cuda.amp.autocast(): + if self.args.output_attention: + outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)[0] + else: + outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark) + else: + if self.args.output_attention: + outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)[0] + + else: + outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark) + + f_dim = -1 if self.args.features == 'MS' else 0 + outputs = outputs[:, -self.args.pred_len:, f_dim:] + batch_y = batch_y[:, -self.args.pred_len:, f_dim:].to(self.device) + outputs = outputs.detach().cpu().numpy() + batch_y = batch_y.detach().cpu().numpy() + + pred = outputs + true = batch_y + + preds.append(pred) + trues.append(true) + + if i % 20 == 0: + input = batch_x.detach().cpu().numpy() + gt = np.concatenate((input[0, :, -1], true[0, :, -1]), axis=0) + pd = np.concatenate((input[0, :, -1], pred[0, :, -1]), axis=0) + visual(gt, pd, os.path.join(folder_path, str(i) + '.pdf')) + + # this line handles different size of batch. E.g. last batch can be < batch_size. + preds = np.concatenate(preds, axis=0) + trues = np.concatenate(trues, axis=0) + print('test shape:', preds.shape, trues.shape) + + preds = preds.reshape((-1, preds.shape[-2], preds.shape[-1])) + trues = trues.reshape((-1, trues.shape[-2], trues.shape[-1])) + print('test shape:', preds.shape, trues.shape) + + # result save + folder_path = './results/' + setting + '/' + if not os.path.exists(folder_path): + os.makedirs(folder_path) + + mae, mse, rmse, mape, mspe = metric(preds, trues) + print('mse:{}, mae:{}'.format(mse, mae)) + f = open("result_long_term_forecast.txt", 'a') + f.write(setting + " \n") + f.write('mse:{}, mae:{}'.format(mse, mae)) + f.write('\n') + f.write('\n') + f.close() + + np.save(folder_path + 'metrics.npy', np.array([mae, mse, rmse, mape, mspe])) + np.save(folder_path + 'pred.npy', preds) + np.save(folder_path + 'true.npy', trues) + + return diff --git a/exp/exp_main.py b/exp/exp_main.py new file mode 100644 index 0000000..316e818 --- /dev/null +++ b/exp/exp_main.py @@ -0,0 +1,318 @@ +from data_provider.data_factory import data_provider +from exp.exp_basic import Exp_Basic +from models import Transformer +from utils.tools import EarlyStopping, adjust_learning_rate, visual +from utils.metrics import metric + +import numpy as np +import torch +import torch.nn as nn +from torch import optim + +import os +import time + +import warnings +import matplotlib.pyplot as plt +import numpy as np + +warnings.filterwarnings('ignore') + + +class Exp_Main(Exp_Basic): + def __init__(self, args): + super(Exp_Main, self).__init__(args) + + def _build_model(self): + model_dict = { + # 'Autoformer': Autoformer, + 'Transformer': Transformer, + # 'Informer': Informer, + # 'Reformer': Reformer, + } + model = model_dict[self.args.model].Model(self.args).float() + + if self.args.use_multi_gpu and self.args.use_gpu: + model = nn.DataParallel(model, device_ids=self.args.device_ids) + return model + + def _get_data(self, flag): + data_set, data_loader = data_provider(self.args, flag) + return data_set, data_loader + + def _select_optimizer(self): + model_optim = optim.Adam(self.model.parameters(), lr=self.args.learning_rate) + return model_optim + + def _select_criterion(self): + criterion = nn.MSELoss() + return criterion + + def vali(self, vali_data, vali_loader, criterion): + total_loss = [] + self.model.eval() + with torch.no_grad(): + for i, (batch_x, batch_y, batch_x_mark, batch_y_mark) in enumerate(vali_loader): + batch_x = batch_x.float().to(self.device) + batch_y = batch_y.float() + + batch_x_mark = batch_x_mark.float().to(self.device) + batch_y_mark = batch_y_mark.float().to(self.device) + + # decoder input + dec_inp = torch.zeros_like(batch_y[:, -self.args.pred_len:, :]).float() + dec_inp = torch.cat([batch_y[:, :self.args.label_len, :], dec_inp], dim=1).float().to(self.device) + # encoder - decoder + if self.args.use_amp: + with torch.cuda.amp.autocast(): + if self.args.output_attention: + outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)[0] + else: + outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark) + else: + if self.args.output_attention: + outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)[0] + else: + outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark) + f_dim = -1 if self.args.features == 'MS' else 0 + outputs = outputs[:, -self.args.pred_len:, f_dim:] + batch_y = batch_y[:, -self.args.pred_len:, f_dim:].to(self.device) + + pred = outputs.detach().cpu() + true = batch_y.detach().cpu() + + loss = criterion(pred, true) + + total_loss.append(loss) + total_loss = np.average(total_loss) + self.model.train() + return total_loss + + def train(self, setting): + train_data, train_loader = self._get_data(flag='train') + vali_data, vali_loader = self._get_data(flag='val') + test_data, test_loader = self._get_data(flag='test') + + path = os.path.join(self.args.checkpoints, setting) + if not os.path.exists(path): + os.makedirs(path) + + time_now = time.time() + + train_steps = len(train_loader) + early_stopping = EarlyStopping(patience=self.args.patience, verbose=True) + + model_optim = self._select_optimizer() + criterion = self._select_criterion() + + if self.args.use_amp: + scaler = torch.cuda.amp.GradScaler() + + for epoch in range(self.args.train_epochs): + iter_count = 0 + train_loss = [] + + self.model.train() + epoch_time = time.time() + for i, (batch_x, batch_y, batch_x_mark, batch_y_mark) in enumerate(train_loader): + iter_count += 1 + model_optim.zero_grad() + batch_x = batch_x.float().to(self.device) + + batch_y = batch_y.float().to(self.device) + batch_x_mark = batch_x_mark.float().to(self.device) + batch_y_mark = batch_y_mark.float().to(self.device) + + # decoder input + dec_inp = torch.zeros_like(batch_y[:, -self.args.pred_len:, :]).float() + dec_inp = torch.cat([batch_y[:, :self.args.label_len, :], dec_inp], dim=1).float().to(self.device) + + # encoder - decoder + if self.args.use_amp: + with torch.cuda.amp.autocast(): + if self.args.output_attention: + outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)[0] + else: + outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark) + + f_dim = -1 if self.args.features == 'MS' else 0 + outputs = outputs[:, -self.args.pred_len:, f_dim:] + batch_y = batch_y[:, -self.args.pred_len:, f_dim:].to(self.device) + loss = criterion(outputs, batch_y) + train_loss.append(loss.item()) + else: + if self.args.output_attention: + outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)[0] + else: + outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark) + + f_dim = -1 if self.args.features == 'MS' else 0 + outputs = outputs[:, -self.args.pred_len:, f_dim:] + batch_y = batch_y[:, -self.args.pred_len:, f_dim:].to(self.device) + loss = criterion(outputs, batch_y) + train_loss.append(loss.item()) + + if (i + 1) % 100 == 0: + print("\titers: {0}, epoch: {1} | loss: {2:.7f}".format(i + 1, epoch + 1, loss.item())) + speed = (time.time() - time_now) / iter_count + left_time = speed * ((self.args.train_epochs - epoch) * train_steps - i) + print('\tspeed: {:.4f}s/iter; left time: {:.4f}s'.format(speed, left_time)) + iter_count = 0 + time_now = time.time() + + if self.args.use_amp: + scaler.scale(loss).backward() + scaler.step(model_optim) + scaler.update() + else: + loss.backward() + model_optim.step() + + print("Epoch: {} cost time: {}".format(epoch + 1, time.time() - epoch_time)) + train_loss = np.average(train_loss) + vali_loss = self.vali(vali_data, vali_loader, criterion) + test_loss = self.vali(test_data, test_loader, criterion) + + print("Epoch: {0}, Steps: {1} | Train Loss: {2:.7f} Vali Loss: {3:.7f} Test Loss: {4:.7f}".format( + epoch + 1, train_steps, train_loss, vali_loss, test_loss)) + early_stopping(vali_loss, self.model, path) + if early_stopping.early_stop: + print("Early stopping") + break + + adjust_learning_rate(model_optim, epoch + 1, self.args) + + best_model_path = path + '/' + 'checkpoint.pth' + self.model.load_state_dict(torch.load(best_model_path)) + + return + + def test(self, setting, test=0): + test_data, test_loader = self._get_data(flag='test') + if test: + print('loading model') + self.model.load_state_dict(torch.load(os.path.join('./checkpoints/' + setting, 'checkpoint.pth'))) + + preds = [] + trues = [] + folder_path = './test_results/' + setting + '/' + if not os.path.exists(folder_path): + os.makedirs(folder_path) + + self.model.eval() + with torch.no_grad(): + for i, (batch_x, batch_y, batch_x_mark, batch_y_mark) in enumerate(test_loader): + batch_x = batch_x.float().to(self.device) + batch_y = batch_y.float().to(self.device) + + batch_x_mark = batch_x_mark.float().to(self.device) + batch_y_mark = batch_y_mark.float().to(self.device) + + # decoder input + dec_inp = torch.zeros_like(batch_y[:, -self.args.pred_len:, :]).float() + dec_inp = torch.cat([batch_y[:, :self.args.label_len, :], dec_inp], dim=1).float().to(self.device) + # encoder - decoder + if self.args.use_amp: + with torch.cuda.amp.autocast(): + if self.args.output_attention: + outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)[0] + else: + outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark) + else: + if self.args.output_attention: + outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)[0] + + else: + outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark) + + f_dim = -1 if self.args.features == 'MS' else 0 + outputs = outputs[:, -self.args.pred_len:, f_dim:] + batch_y = batch_y[:, -self.args.pred_len:, f_dim:].to(self.device) + outputs = outputs.detach().cpu().numpy() + batch_y = batch_y.detach().cpu().numpy() + + pred = outputs # outputs.detach().cpu().numpy() # .squeeze() + true = batch_y # batch_y.detach().cpu().numpy() # .squeeze() + + preds.append(pred) + trues.append(true) + if i % 20 == 0: + input = batch_x.detach().cpu().numpy() + gt = np.concatenate((input[0, :, -1], true[0, :, -1]), axis=0) + pd = np.concatenate((input[0, :, -1], pred[0, :, -1]), axis=0) + visual(gt, pd, os.path.join(folder_path, str(i) + '.pdf')) + + preds = np.concatenate(preds, axis=0) + trues = np.concatenate(trues, axis=0) + print('test shape:', preds.shape, trues.shape) + preds = preds.reshape(-1, preds.shape[-2], preds.shape[-1]) + trues = trues.reshape(-1, trues.shape[-2], trues.shape[-1]) + print('test shape:', preds.shape, trues.shape) + + # result save + folder_path = './results/' + setting + '/' + if not os.path.exists(folder_path): + os.makedirs(folder_path) + + mae, mse, rmse, mape, mspe = metric(preds, trues) + print('mse:{}, mae:{}'.format(mse, mae)) + f = open("result.txt", 'a') + f.write(setting + " \n") + f.write('mse:{}, mae:{}'.format(mse, mae)) + f.write('\n') + f.write('\n') + f.close() + + np.save(folder_path + 'metrics.npy', np.array([mae, mse, rmse, mape, mspe])) + np.save(folder_path + 'pred.npy', preds) + np.save(folder_path + 'true.npy', trues) + + return + + def predict(self, setting, load=False): + pred_data, pred_loader = self._get_data(flag='pred') + + if load: + path = os.path.join(self.args.checkpoints, setting) + best_model_path = path + '/' + 'checkpoint.pth' + self.model.load_state_dict(torch.load(best_model_path)) + + preds = [] + + self.model.eval() + with torch.no_grad(): + for i, (batch_x, batch_y, batch_x_mark, batch_y_mark) in enumerate(pred_loader): + batch_x = batch_x.float().to(self.device) + batch_y = batch_y.float() + batch_x_mark = batch_x_mark.float().to(self.device) + batch_y_mark = batch_y_mark.float().to(self.device) + + # decoder input + dec_inp = torch.zeros([batch_y.shape[0], self.args.pred_len, batch_y.shape[2]]).float() + dec_inp = torch.cat([batch_y[:, :self.args.label_len, :], dec_inp], dim=1).float().to(self.device) + # encoder - decoder + if self.args.use_amp: + with torch.cuda.amp.autocast(): + if self.args.output_attention: + outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)[0] + else: + outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark) + else: + if self.args.output_attention: + outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)[0] + else: + outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark) + pred = outputs.detach().cpu().numpy() # .squeeze() + preds.append(pred) + + preds = np.array(preds) + preds = preds.reshape(-1, preds.shape[-2], preds.shape[-1]) + + # result save + folder_path = './results/' + setting + '/' + if not os.path.exists(folder_path): + os.makedirs(folder_path) + + np.save(folder_path + 'real_prediction.npy', preds) + + return diff --git a/layers/Autoformer_EncDec.py b/layers/Autoformer_EncDec.py new file mode 100644 index 0000000..6fce4bc --- /dev/null +++ b/layers/Autoformer_EncDec.py @@ -0,0 +1,203 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + + +class my_Layernorm(nn.Module): + """ + Special designed layernorm for the seasonal part + """ + + def __init__(self, channels): + super(my_Layernorm, self).__init__() + self.layernorm = nn.LayerNorm(channels) + + def forward(self, x): + x_hat = self.layernorm(x) + bias = torch.mean(x_hat, dim=1).unsqueeze(1).repeat(1, x.shape[1], 1) + return x_hat - bias + + +class moving_avg(nn.Module): + """ + Moving average block to highlight the trend of time series + """ + + def __init__(self, kernel_size, stride): + super(moving_avg, self).__init__() + self.kernel_size = kernel_size + self.avg = nn.AvgPool1d(kernel_size=kernel_size, stride=stride, padding=0) + + def forward(self, x): + # padding on the both ends of time series + front = x[:, 0:1, :].repeat(1, (self.kernel_size - 1) // 2, 1) + end = x[:, -1:, :].repeat(1, (self.kernel_size - 1) // 2, 1) + x = torch.cat([front, x, end], dim=1) + x = self.avg(x.permute(0, 2, 1)) + x = x.permute(0, 2, 1) + return x + + +class series_decomp(nn.Module): + """ + Series decomposition block + """ + + def __init__(self, kernel_size): + super(series_decomp, self).__init__() + self.moving_avg = moving_avg(kernel_size, stride=1) + + def forward(self, x): + moving_mean = self.moving_avg(x) + res = x - moving_mean + return res, moving_mean + + +class series_decomp_multi(nn.Module): + """ + Multiple Series decomposition block from FEDformer + """ + + def __init__(self, kernel_size): + super(series_decomp_multi, self).__init__() + self.kernel_size = kernel_size + self.series_decomp = [series_decomp(kernel) for kernel in kernel_size] + + def forward(self, x): + moving_mean = [] + res = [] + for func in self.series_decomp: + sea, moving_avg = func(x) + moving_mean.append(moving_avg) + res.append(sea) + + sea = sum(res) / len(res) + moving_mean = sum(moving_mean) / len(moving_mean) + return sea, moving_mean + + +class EncoderLayer(nn.Module): + """ + Autoformer encoder layer with the progressive decomposition architecture + """ + + def __init__(self, attention, d_model, d_ff=None, moving_avg=25, dropout=0.1, activation="relu"): + super(EncoderLayer, self).__init__() + d_ff = d_ff or 4 * d_model + self.attention = attention + self.conv1 = nn.Conv1d(in_channels=d_model, out_channels=d_ff, kernel_size=1, bias=False) + self.conv2 = nn.Conv1d(in_channels=d_ff, out_channels=d_model, kernel_size=1, bias=False) + self.decomp1 = series_decomp(moving_avg) + self.decomp2 = series_decomp(moving_avg) + self.dropout = nn.Dropout(dropout) + self.activation = F.relu if activation == "relu" else F.gelu + + def forward(self, x, attn_mask=None): + new_x, attn = self.attention( + x, x, x, + attn_mask=attn_mask + ) + x = x + self.dropout(new_x) + x, _ = self.decomp1(x) + y = x + y = self.dropout(self.activation(self.conv1(y.transpose(-1, 1)))) + y = self.dropout(self.conv2(y).transpose(-1, 1)) + res, _ = self.decomp2(x + y) + return res, attn + + +class Encoder(nn.Module): + """ + Autoformer encoder + """ + + def __init__(self, attn_layers, conv_layers=None, norm_layer=None): + super(Encoder, self).__init__() + self.attn_layers = nn.ModuleList(attn_layers) + self.conv_layers = nn.ModuleList(conv_layers) if conv_layers is not None else None + self.norm = norm_layer + + def forward(self, x, attn_mask=None): + attns = [] + if self.conv_layers is not None: + for attn_layer, conv_layer in zip(self.attn_layers, self.conv_layers): + x, attn = attn_layer(x, attn_mask=attn_mask) + x = conv_layer(x) + attns.append(attn) + x, attn = self.attn_layers[-1](x) + attns.append(attn) + else: + for attn_layer in self.attn_layers: + x, attn = attn_layer(x, attn_mask=attn_mask) + attns.append(attn) + + if self.norm is not None: + x = self.norm(x) + + return x, attns + + +class DecoderLayer(nn.Module): + """ + Autoformer decoder layer with the progressive decomposition architecture + """ + + def __init__(self, self_attention, cross_attention, d_model, c_out, d_ff=None, + moving_avg=25, dropout=0.1, activation="relu"): + super(DecoderLayer, self).__init__() + d_ff = d_ff or 4 * d_model + self.self_attention = self_attention + self.cross_attention = cross_attention + self.conv1 = nn.Conv1d(in_channels=d_model, out_channels=d_ff, kernel_size=1, bias=False) + self.conv2 = nn.Conv1d(in_channels=d_ff, out_channels=d_model, kernel_size=1, bias=False) + self.decomp1 = series_decomp(moving_avg) + self.decomp2 = series_decomp(moving_avg) + self.decomp3 = series_decomp(moving_avg) + self.dropout = nn.Dropout(dropout) + self.projection = nn.Conv1d(in_channels=d_model, out_channels=c_out, kernel_size=3, stride=1, padding=1, + padding_mode='circular', bias=False) + self.activation = F.relu if activation == "relu" else F.gelu + + def forward(self, x, cross, x_mask=None, cross_mask=None): + x = x + self.dropout(self.self_attention( + x, x, x, + attn_mask=x_mask + )[0]) + x, trend1 = self.decomp1(x) + x = x + self.dropout(self.cross_attention( + x, cross, cross, + attn_mask=cross_mask + )[0]) + x, trend2 = self.decomp2(x) + y = x + y = self.dropout(self.activation(self.conv1(y.transpose(-1, 1)))) + y = self.dropout(self.conv2(y).transpose(-1, 1)) + x, trend3 = self.decomp3(x + y) + + residual_trend = trend1 + trend2 + trend3 + residual_trend = self.projection(residual_trend.permute(0, 2, 1)).transpose(1, 2) + return x, residual_trend + + +class Decoder(nn.Module): + """ + Autoformer encoder + """ + + def __init__(self, layers, norm_layer=None, projection=None): + super(Decoder, self).__init__() + self.layers = nn.ModuleList(layers) + self.norm = norm_layer + self.projection = projection + + def forward(self, x, cross, x_mask=None, cross_mask=None, trend=None): + for layer in self.layers: + x, residual_trend = layer(x, cross, x_mask=x_mask, cross_mask=cross_mask) + trend = trend + residual_trend + + if self.norm is not None: + x = self.norm(x) + + if self.projection is not None: + x = self.projection(x) + return x, trend diff --git a/layers/Embed.py b/layers/Embed.py new file mode 100644 index 0000000..b9b14ea --- /dev/null +++ b/layers/Embed.py @@ -0,0 +1,173 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.nn.utils import weight_norm +import math + + +class PositionalEmbedding(nn.Module): + def __init__(self, d_model, max_len=5000): + super(PositionalEmbedding, self).__init__() + # Compute the positional encodings once in log space. + pe = torch.zeros(max_len, d_model).float() + pe.require_grad = False + + position = torch.arange(0, max_len).float().unsqueeze(1) + div_term = (torch.arange(0, d_model, 2).float() + * -(math.log(10000.0) / d_model)).exp() + + pe[:, 0::2] = torch.sin(position * div_term) + pe[:, 1::2] = torch.cos(position * div_term) + + pe = pe.unsqueeze(0) + self.register_buffer('pe', pe) + + def forward(self, x): + return self.pe[:, :x.size(1)] + + +class TokenEmbedding(nn.Module): + def __init__(self, c_in, d_model): + super(TokenEmbedding, self).__init__() + padding = 1 if torch.__version__ >= '1.5.0' else 2 + self.tokenConv = nn.Conv1d(in_channels=c_in, out_channels=d_model, + kernel_size=3, padding=padding, padding_mode='circular', bias=False) + for m in self.modules(): + if isinstance(m, nn.Conv1d): + nn.init.kaiming_normal_( + m.weight, mode='fan_in', nonlinearity='leaky_relu') + + def forward(self, x): + x = self.tokenConv(x.permute(0, 2, 1)).transpose(1, 2) + return x + + +class FixedEmbedding(nn.Module): + def __init__(self, c_in, d_model): + super(FixedEmbedding, self).__init__() + + w = torch.zeros(c_in, d_model).float() + w.require_grad = False + + position = torch.arange(0, c_in).float().unsqueeze(1) + div_term = (torch.arange(0, d_model, 2).float() + * -(math.log(10000.0) / d_model)).exp() + + w[:, 0::2] = torch.sin(position * div_term) + w[:, 1::2] = torch.cos(position * div_term) + + self.emb = nn.Embedding(c_in, d_model) + self.emb.weight = nn.Parameter(w, requires_grad=False) + + def forward(self, x): + return self.emb(x).detach() + + +class TemporalEmbedding(nn.Module): + def __init__(self, d_model, embed_type='fixed', freq='h'): + super(TemporalEmbedding, self).__init__() + + minute_size = 4 + hour_size = 24 + weekday_size = 7 + day_size = 32 + month_size = 13 + + Embed = FixedEmbedding if embed_type == 'fixed' else nn.Embedding + if freq == 't': + self.minute_embed = Embed(minute_size, d_model) + self.hour_embed = Embed(hour_size, d_model) + self.weekday_embed = Embed(weekday_size, d_model) + self.day_embed = Embed(day_size, d_model) + self.month_embed = Embed(month_size, d_model) + + def forward(self, x): + x = x.long() + minute_x = self.minute_embed(x[:, :, 4]) if hasattr( + self, 'minute_embed') else 0. + hour_x = self.hour_embed(x[:, :, 3]) + weekday_x = self.weekday_embed(x[:, :, 2]) + day_x = self.day_embed(x[:, :, 1]) + month_x = self.month_embed(x[:, :, 0]) + + return hour_x + weekday_x + day_x + month_x + minute_x + + +class TimeFeatureEmbedding(nn.Module): + def __init__(self, d_model, embed_type='timeF', freq='h'): + super(TimeFeatureEmbedding, self).__init__() + + freq_map = {'h': 4, 't': 5, 's': 6, + 'm': 1, 'a': 1, 'w': 2, 'd': 3, 'b': 3} + d_inp = freq_map[freq] + self.embed = nn.Linear(d_inp, d_model, bias=False) + + def forward(self, x): + return self.embed(x) + + +class DataEmbedding(nn.Module): + def __init__(self, c_in, d_model, embed_type='fixed', freq='h', dropout=0.1): + super(DataEmbedding, self).__init__() + + self.value_embedding = TokenEmbedding(c_in=c_in, d_model=d_model) + self.position_embedding = PositionalEmbedding(d_model=d_model) + self.temporal_embedding = TemporalEmbedding(d_model=d_model, embed_type=embed_type, + freq=freq) if embed_type != 'timeF' else TimeFeatureEmbedding( + d_model=d_model, embed_type=embed_type, freq=freq) + self.dropout = nn.Dropout(p=dropout) + + def forward(self, x, x_mark): + if x_mark is None: + x = self.value_embedding(x) + self.position_embedding(x) + else: + x = self.value_embedding( + x) + self.temporal_embedding(x_mark) + self.position_embedding(x) + return self.dropout(x) + + +class DataEmbedding_wo_pos(nn.Module): + def __init__(self, c_in, d_model, embed_type='fixed', freq='h', dropout=0.1): + super(DataEmbedding_wo_pos, self).__init__() + + self.value_embedding = TokenEmbedding(c_in=c_in, d_model=d_model) + self.position_embedding = PositionalEmbedding(d_model=d_model) + self.temporal_embedding = TemporalEmbedding(d_model=d_model, embed_type=embed_type, + freq=freq) if embed_type != 'timeF' else TimeFeatureEmbedding( + d_model=d_model, embed_type=embed_type, freq=freq) + self.dropout = nn.Dropout(p=dropout) + + def forward(self, x, x_mark): + if x_mark is None: + x = self.value_embedding(x) + else: + x = self.value_embedding(x) + self.temporal_embedding(x_mark) + return self.dropout(x) + + +class PatchEmbedding(nn.Module): + def __init__(self, d_model, patch_len, stride, padding, dropout): + super(PatchEmbedding, self).__init__() + # Patching + self.patch_len = patch_len + self.stride = stride + self.padding_patch_layer = nn.ReplicationPad1d((0, padding)) + + # Backbone, Input encoding: projection of feature vectors onto a d-dim vector space + self.value_embedding = nn.Linear(patch_len, d_model, bias=False) + + # Positional embedding + self.position_embedding = PositionalEmbedding(d_model) + + # Residual dropout + self.dropout = nn.Dropout(dropout) + + def forward(self, x): + # do patching + n_vars = x.shape[1] + x = self.padding_patch_layer(x) + x = x.unfold(dimension=-1, size=self.patch_len, step=self.stride) + x = torch.reshape(x, (x.shape[0] * x.shape[1], x.shape[2], x.shape[3])) + # Input encoding + x = self.value_embedding(x) + self.position_embedding(x) + return self.dropout(x), n_vars diff --git a/layers/SelfAttention_Family.py b/layers/SelfAttention_Family.py new file mode 100644 index 0000000..6c33dc7 --- /dev/null +++ b/layers/SelfAttention_Family.py @@ -0,0 +1,211 @@ +import torch +import torch.nn as nn +import numpy as np +from math import sqrt +from utils.masking import TriangularCausalMask, ProbMask + + +class DSAttention(nn.Module): + '''De-stationary Attention''' + + def __init__(self, mask_flag=True, factor=5, scale=None, attention_dropout=0.1, output_attention=False): + super(DSAttention, self).__init__() + self.scale = scale + self.mask_flag = mask_flag + self.output_attention = output_attention + self.dropout = nn.Dropout(attention_dropout) + + def forward(self, queries, keys, values, attn_mask, tau=None, delta=None): + B, L, H, E = queries.shape + _, S, _, D = values.shape + scale = self.scale or 1. / sqrt(E) + + tau = 1.0 if tau is None else tau.unsqueeze( + 1).unsqueeze(1) # B x 1 x 1 x 1 + delta = 0.0 if delta is None else delta.unsqueeze( + 1).unsqueeze(1) # B x 1 x 1 x S + + # De-stationary Attention, rescaling pre-softmax score with learned de-stationary factors + scores = torch.einsum("blhe,bshe->bhls", queries, keys) * tau + delta + + if self.mask_flag: + if attn_mask is None: + attn_mask = TriangularCausalMask(B, L, device=queries.device) + + scores.masked_fill_(attn_mask.mask, -np.inf) + + A = self.dropout(torch.softmax(scale * scores, dim=-1)) + V = torch.einsum("bhls,bshd->blhd", A, values) + + if self.output_attention: + return (V.contiguous(), A) + else: + return (V.contiguous(), None) + + +class FullAttention(nn.Module): + def __init__(self, mask_flag=True, factor=5, scale=None, attention_dropout=0.1, output_attention=False): + super(FullAttention, self).__init__() + self.scale = scale + self.mask_flag = mask_flag + self.output_attention = output_attention + self.dropout = nn.Dropout(attention_dropout) + + def forward(self, queries, keys, values, attn_mask, tau=None, delta=None): + B, L, H, E = queries.shape + _, S, _, D = values.shape + scale = self.scale or 1. / sqrt(E) + + scores = torch.einsum("blhe,bshe->bhls", queries, keys) + + if self.mask_flag: + if attn_mask is None: + attn_mask = TriangularCausalMask(B, L, device=queries.device) + + scores.masked_fill_(attn_mask.mask, -np.inf) + + A = self.dropout(torch.softmax(scale * scores, dim=-1)) + V = torch.einsum("bhls,bshd->blhd", A, values) + + if self.output_attention: + return (V.contiguous(), A) + else: + return (V.contiguous(), None) + + +class ProbAttention(nn.Module): + def __init__(self, mask_flag=True, factor=5, scale=None, attention_dropout=0.1, output_attention=False): + super(ProbAttention, self).__init__() + self.factor = factor + self.scale = scale + self.mask_flag = mask_flag + self.output_attention = output_attention + self.dropout = nn.Dropout(attention_dropout) + + def _prob_QK(self, Q, K, sample_k, n_top): # n_top: c*ln(L_q) + # Q [B, H, L, D] + B, H, L_K, E = K.shape + _, _, L_Q, _ = Q.shape + + # calculate the sampled Q_K + K_expand = K.unsqueeze(-3).expand(B, H, L_Q, L_K, E) + # real U = U_part(factor*ln(L_k))*L_q + index_sample = torch.randint(L_K, (L_Q, sample_k)) + K_sample = K_expand[:, :, torch.arange( + L_Q).unsqueeze(1), index_sample, :] + Q_K_sample = torch.matmul( + Q.unsqueeze(-2), K_sample.transpose(-2, -1)).squeeze() + + # find the Top_k query with sparisty measurement + M = Q_K_sample.max(-1)[0] - torch.div(Q_K_sample.sum(-1), L_K) + M_top = M.topk(n_top, sorted=False)[1] + + # use the reduced Q to calculate Q_K + Q_reduce = Q[torch.arange(B)[:, None, None], + torch.arange(H)[None, :, None], + M_top, :] # factor*ln(L_q) + Q_K = torch.matmul(Q_reduce, K.transpose(-2, -1)) # factor*ln(L_q)*L_k + + return Q_K, M_top + + def _get_initial_context(self, V, L_Q): + B, H, L_V, D = V.shape + if not self.mask_flag: + # V_sum = V.sum(dim=-2) + V_sum = V.mean(dim=-2) + contex = V_sum.unsqueeze(-2).expand(B, H, + L_Q, V_sum.shape[-1]).clone() + else: # use mask + # requires that L_Q == L_V, i.e. for self-attention only + assert (L_Q == L_V) + contex = V.cumsum(dim=-2) + return contex + + def _update_context(self, context_in, V, scores, index, L_Q, attn_mask): + B, H, L_V, D = V.shape + + if self.mask_flag: + attn_mask = ProbMask(B, H, L_Q, index, scores, device=V.device) + scores.masked_fill_(attn_mask.mask, -np.inf) + + attn = torch.softmax(scores, dim=-1) # nn.Softmax(dim=-1)(scores) + + context_in[torch.arange(B)[:, None, None], + torch.arange(H)[None, :, None], + index, :] = torch.matmul(attn, V).type_as(context_in) + if self.output_attention: + attns = (torch.ones([B, H, L_V, L_V]) / + L_V).type_as(attn).to(attn.device) + attns[torch.arange(B)[:, None, None], torch.arange(H)[ + None, :, None], index, :] = attn + return (context_in, attns) + else: + return (context_in, None) + + def forward(self, queries, keys, values, attn_mask, tau=None, delta=None): + B, L_Q, H, D = queries.shape + _, L_K, _, _ = keys.shape + + queries = queries.transpose(2, 1) + keys = keys.transpose(2, 1) + values = values.transpose(2, 1) + + U_part = self.factor * \ + np.ceil(np.log(L_K)).astype('int').item() # c*ln(L_k) + u = self.factor * \ + np.ceil(np.log(L_Q)).astype('int').item() # c*ln(L_q) + + U_part = U_part if U_part < L_K else L_K + u = u if u < L_Q else L_Q + + scores_top, index = self._prob_QK( + queries, keys, sample_k=U_part, n_top=u) + + # add scale factor + scale = self.scale or 1. / sqrt(D) + if scale is not None: + scores_top = scores_top * scale + # get the context + context = self._get_initial_context(values, L_Q) + # update the context with selected top_k queries + context, attn = self._update_context( + context, values, scores_top, index, L_Q, attn_mask) + + return context.contiguous(), attn + + +class AttentionLayer(nn.Module): + def __init__(self, attention, d_model, n_heads, d_keys=None, + d_values=None): + super(AttentionLayer, self).__init__() + + d_keys = d_keys or (d_model // n_heads) + d_values = d_values or (d_model // n_heads) + + self.inner_attention = attention + self.query_projection = nn.Linear(d_model, d_keys * n_heads) + self.key_projection = nn.Linear(d_model, d_keys * n_heads) + self.value_projection = nn.Linear(d_model, d_values * n_heads) + self.out_projection = nn.Linear(d_values * n_heads, d_model) + self.n_heads = n_heads + + def forward(self, queries, keys, values, attn_mask, tau=None, delta=None): + B, L, _ = queries.shape + _, S, _ = keys.shape + H = self.n_heads + + queries = self.query_projection(queries).view(B, L, H, -1) + keys = self.key_projection(keys).view(B, S, H, -1) + values = self.value_projection(values).view(B, S, H, -1) + + out, attn = self.inner_attention( + queries, + keys, + values, + attn_mask, + tau=tau, + delta=delta + ) + out = out.view(B, L, -1) + + return self.out_projection(out), attn \ No newline at end of file diff --git a/layers/Transformer_EncDec.py b/layers/Transformer_EncDec.py new file mode 100644 index 0000000..dabf4c2 --- /dev/null +++ b/layers/Transformer_EncDec.py @@ -0,0 +1,135 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + + +class ConvLayer(nn.Module): + def __init__(self, c_in): + super(ConvLayer, self).__init__() + self.downConv = nn.Conv1d(in_channels=c_in, + out_channels=c_in, + kernel_size=3, + padding=2, + padding_mode='circular') + self.norm = nn.BatchNorm1d(c_in) + self.activation = nn.ELU() + self.maxPool = nn.MaxPool1d(kernel_size=3, stride=2, padding=1) + + def forward(self, x): + x = self.downConv(x.permute(0, 2, 1)) + x = self.norm(x) + x = self.activation(x) + x = self.maxPool(x) + x = x.transpose(1, 2) + return x + + +class EncoderLayer(nn.Module): + def __init__(self, attention, d_model, d_ff=None, dropout=0.1, activation="relu"): + super(EncoderLayer, self).__init__() + d_ff = d_ff or 4 * d_model + self.attention = attention + self.conv1 = nn.Conv1d(in_channels=d_model, out_channels=d_ff, kernel_size=1) + self.conv2 = nn.Conv1d(in_channels=d_ff, out_channels=d_model, kernel_size=1) + self.norm1 = nn.LayerNorm(d_model) + self.norm2 = nn.LayerNorm(d_model) + self.dropout = nn.Dropout(dropout) + self.activation = F.relu if activation == "relu" else F.gelu + + def forward(self, x, attn_mask=None, tau=None, delta=None): + new_x, attn = self.attention( + x, x, x, + attn_mask=attn_mask, + tau=tau, delta=delta + ) + x = x + self.dropout(new_x) + + y = x = self.norm1(x) + y = self.dropout(self.activation(self.conv1(y.transpose(-1, 1)))) + y = self.dropout(self.conv2(y).transpose(-1, 1)) + + return self.norm2(x + y), attn + + +class Encoder(nn.Module): + def __init__(self, attn_layers, conv_layers=None, norm_layer=None): + super(Encoder, self).__init__() + self.attn_layers = nn.ModuleList(attn_layers) + self.conv_layers = nn.ModuleList(conv_layers) if conv_layers is not None else None + self.norm = norm_layer + + def forward(self, x, attn_mask=None, tau=None, delta=None): + # x [B, L, D] + attns = [] + if self.conv_layers is not None: + for i, (attn_layer, conv_layer) in enumerate(zip(self.attn_layers, self.conv_layers)): + delta = delta if i == 0 else None + x, attn = attn_layer(x, attn_mask=attn_mask, tau=tau, delta=delta) + x = conv_layer(x) + attns.append(attn) + x, attn = self.attn_layers[-1](x, tau=tau, delta=None) + attns.append(attn) + else: + for attn_layer in self.attn_layers: + x, attn = attn_layer(x, attn_mask=attn_mask, tau=tau, delta=delta) + attns.append(attn) + + if self.norm is not None: + x = self.norm(x) + + return x, attns + + +class DecoderLayer(nn.Module): + def __init__(self, self_attention, cross_attention, d_model, d_ff=None, + dropout=0.1, activation="relu"): + super(DecoderLayer, self).__init__() + d_ff = d_ff or 4 * d_model + self.self_attention = self_attention + self.cross_attention = cross_attention + self.conv1 = nn.Conv1d(in_channels=d_model, out_channels=d_ff, kernel_size=1) + self.conv2 = nn.Conv1d(in_channels=d_ff, out_channels=d_model, kernel_size=1) + self.norm1 = nn.LayerNorm(d_model) + self.norm2 = nn.LayerNorm(d_model) + self.norm3 = nn.LayerNorm(d_model) + self.dropout = nn.Dropout(dropout) + self.activation = F.relu if activation == "relu" else F.gelu + + def forward(self, x, cross, x_mask=None, cross_mask=None, tau=None, delta=None): + x = x + self.dropout(self.self_attention( + x, x, x, + attn_mask=x_mask, + tau=tau, delta=None + )[0]) + x = self.norm1(x) + + x = x + self.dropout(self.cross_attention( + x, cross, cross, + attn_mask=cross_mask, + tau=tau, delta=delta + )[0]) + + y = x = self.norm2(x) + y = self.dropout(self.activation(self.conv1(y.transpose(-1, 1)))) + y = self.dropout(self.conv2(y).transpose(-1, 1)) + + return self.norm3(x + y) + + +class Decoder(nn.Module): + def __init__(self, layers, norm_layer=None, projection=None): + super(Decoder, self).__init__() + self.layers = nn.ModuleList(layers) + self.norm = norm_layer + self.projection = projection + + def forward(self, x, cross, x_mask=None, cross_mask=None, tau=None, delta=None): + for layer in self.layers: + x = layer(x, cross, x_mask=x_mask, cross_mask=cross_mask, tau=tau, delta=delta) + + if self.norm is not None: + x = self.norm(x) + + if self.projection is not None: + x = self.projection(x) + return x diff --git a/models/DLinear.py b/models/DLinear.py new file mode 100644 index 0000000..c826a43 --- /dev/null +++ b/models/DLinear.py @@ -0,0 +1,98 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from layers.Autoformer_EncDec import series_decomp + + +class Model(nn.Module): + """ + Paper link: https://arxiv.org/pdf/2205.13504.pdf + """ + + def __init__(self, configs, individual=False): + """ + individual: Bool, whether shared model among different variates. + """ + super(Model, self).__init__() + self.task_name = configs.task_name + self.seq_len = configs.seq_len + if self.task_name == 'classification': + self.pred_len = configs.seq_len + else: + self.pred_len = configs.pred_len + # Series decomposition block from Autoformer + self.decompsition = series_decomp(configs.moving_avg) + self.individual = individual + self.channels = configs.enc_in + + if self.individual: + self.Linear_Seasonal = nn.ModuleList() + self.Linear_Trend = nn.ModuleList() + + for i in range(self.channels): + self.Linear_Seasonal.append( + nn.Linear(self.seq_len, self.pred_len)) + self.Linear_Trend.append( + nn.Linear(self.seq_len, self.pred_len)) + + self.Linear_Seasonal[i].weight = nn.Parameter( + (1 / self.seq_len) * torch.ones([self.pred_len, self.seq_len])) + self.Linear_Trend[i].weight = nn.Parameter( + (1 / self.seq_len) * torch.ones([self.pred_len, self.seq_len])) + else: + self.Linear_Seasonal = nn.Linear(self.seq_len, self.pred_len) + self.Linear_Trend = nn.Linear(self.seq_len, self.pred_len) + + self.Linear_Seasonal.weight = nn.Parameter( + (1 / self.seq_len) * torch.ones([self.pred_len, self.seq_len])) + self.Linear_Trend.weight = nn.Parameter( + (1 / self.seq_len) * torch.ones([self.pred_len, self.seq_len])) + + if self.task_name == 'classification': + self.act = F.gelu + self.dropout = nn.Dropout(configs.dropout) + self.projection = nn.Linear( + configs.enc_in * configs.seq_len, configs.num_class) + + def encoder(self, x): + seasonal_init, trend_init = self.decompsition(x) + seasonal_init, trend_init = seasonal_init.permute( + 0, 2, 1), trend_init.permute(0, 2, 1) + if self.individual: + seasonal_output = torch.zeros([seasonal_init.size(0), seasonal_init.size(1), self.pred_len], + dtype=seasonal_init.dtype).to(seasonal_init.device) + trend_output = torch.zeros([trend_init.size(0), trend_init.size(1), self.pred_len], + dtype=trend_init.dtype).to(trend_init.device) + for i in range(self.channels): + seasonal_output[:, i, :] = self.Linear_Seasonal[i]( + seasonal_init[:, i, :]) + trend_output[:, i, :] = self.Linear_Trend[i]( + trend_init[:, i, :]) + else: + seasonal_output = self.Linear_Seasonal(seasonal_init) + trend_output = self.Linear_Trend(trend_init) + x = seasonal_output + trend_output + return x.permute(0, 2, 1) + + def forecast(self, x_enc): + # Encoder + return self.encoder(x_enc) + + def classification(self, x_enc): + # Encoder + enc_out = self.encoder(x_enc) + # Output + # (batch_size, seq_length * d_model) + output = enc_out.reshape(enc_out.shape[0], -1) + # (batch_size, num_classes) + output = self.projection(output) + return output + + def forward(self, x_enc, x_mark_enc, x_dec, x_mark_dec, mask=None): + if self.task_name == 'long_term_forecast': + dec_out = self.forecast(x_enc) + return dec_out[:, -self.pred_len:, :] # [B, L, D] + if self.task_name == 'classification': + dec_out = self.classification(x_enc) + return dec_out # [B, N] + return None diff --git a/models/Transformer.py b/models/Transformer.py new file mode 100644 index 0000000..c10a908 --- /dev/null +++ b/models/Transformer.py @@ -0,0 +1,100 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from layers.Transformer_EncDec import Decoder, DecoderLayer, Encoder, EncoderLayer, ConvLayer +from layers.SelfAttention_Family import FullAttention, AttentionLayer +from layers.Embed import DataEmbedding +import numpy as np + + +class Model(nn.Module): + """ + Vanilla Transformer + with O(L^2) complexity + Paper link: https://proceedings.neurips.cc/paper/2017/file/3f5ee243547dee91fbd053c1c4a845aa-Paper.pdf + """ + + def __init__(self, configs): + super(Model, self).__init__() + self.task_name = configs.task_name + self.pred_len = configs.pred_len + self.output_attention = configs.output_attention + # Embedding + self.enc_embedding = DataEmbedding(configs.enc_in, configs.d_model, configs.embed, configs.freq, + configs.dropout) + # Encoder + self.encoder = Encoder( + [ + EncoderLayer( + AttentionLayer( + FullAttention(False, configs.factor, attention_dropout=configs.dropout, + output_attention=configs.output_attention), configs.d_model, configs.n_heads), + configs.d_model, + configs.d_ff, + dropout=configs.dropout, + activation=configs.activation + ) for l in range(configs.e_layers) + ], + norm_layer=torch.nn.LayerNorm(configs.d_model) + ) + # Decoder + if self.task_name == 'long_term_forecast' or self.task_name == 'short_term_forecast': + self.dec_embedding = DataEmbedding(configs.dec_in, configs.d_model, configs.embed, configs.freq, + configs.dropout) + self.decoder = Decoder( + [ + DecoderLayer( + AttentionLayer( + FullAttention(True, configs.factor, attention_dropout=configs.dropout, + output_attention=False), + configs.d_model, configs.n_heads), + AttentionLayer( + FullAttention(False, configs.factor, attention_dropout=configs.dropout, + output_attention=False), + configs.d_model, configs.n_heads), + configs.d_model, + configs.d_ff, + dropout=configs.dropout, + activation=configs.activation, + ) + for l in range(configs.d_layers) + ], + norm_layer=torch.nn.LayerNorm(configs.d_model), + projection=nn.Linear(configs.d_model, configs.c_out, bias=True) + ) + self.projection = nn.Linear(configs.d_model, configs.c_out, bias=True) + if self.task_name == 'classification': + self.act = F.gelu + self.dropout = nn.Dropout(configs.dropout) + self.projection = nn.Linear(configs.d_model * configs.seq_len, configs.num_class) + + def forecast(self, x_enc, x_mark_enc, x_dec, x_mark_dec): + # Embedding + enc_out = self.enc_embedding(x_enc, x_mark_enc) + enc_out, attns = self.encoder(enc_out, attn_mask=None) + + dec_out = self.dec_embedding(x_dec, x_mark_dec) + dec_out = self.decoder(dec_out, enc_out, x_mask=None, cross_mask=None) + return dec_out + + def classification(self, x_enc, x_mark_enc): + # Embedding + enc_out = self.enc_embedding(x_enc, None) + enc_out, attns = self.encoder(enc_out, attn_mask=None) + + # Output + output = self.act(enc_out) # the output transformer encoder/decoder embeddings don't include non-linearity + output = self.dropout(output) + output = output * x_mark_enc.unsqueeze(-1) # zero-out padding embeddings + output = output.reshape(output.shape[0], -1) # (batch_size, seq_length * d_model) + output = self.projection(output) # (batch_size, num_classes) + return output + + def forward(self, x_enc, x_mark_enc, x_dec, x_mark_dec, mask=None): + if self.task_name == 'long_term_forecast' or self.task_name == 'short_term_forecast': + dec_out = self.forecast(x_enc, x_mark_enc, x_dec, x_mark_dec) + return dec_out[:, -self.pred_len:, :] # [B, L, D] + if self.task_name == 'classification': + dec_out = self.classification(x_enc, x_mark_enc) + return dec_out # [B, N] + return None diff --git a/models/model.py b/models/model.py deleted file mode 100644 index 7fa23c5..0000000 --- a/models/model.py +++ /dev/null @@ -1,22 +0,0 @@ -import torch.nn as nn -import torch.nn.functional as F -from base import BaseModel - - -class MnistModel(BaseModel): - def __init__(self, num_classes=10): - super().__init__() - self.conv1 = nn.Conv2d(1, 10, kernel_size=5) - self.conv2 = nn.Conv2d(10, 20, kernel_size=5) - self.conv2_drop = nn.Dropout2d() - self.fc1 = nn.Linear(320, 50) - self.fc2 = nn.Linear(50, num_classes) - - def forward(self, x): - x = F.relu(F.max_pool2d(self.conv1(x), 2)) - x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2)) - x = x.view(-1, 320) - x = F.relu(self.fc1(x)) - x = F.dropout(x, training=self.training) - x = self.fc2(x) - return F.log_softmax(x, dim=1) diff --git a/outputs/electricity/First-order.jpg b/outputs/electricity/First-order.jpg deleted file mode 100644 index 9179299..0000000 Binary files a/outputs/electricity/First-order.jpg and /dev/null differ diff --git a/outputs/electricity/Second-order.jpg b/outputs/electricity/Second-order.jpg deleted file mode 100644 index 7d54d02..0000000 Binary files a/outputs/electricity/Second-order.jpg and /dev/null differ diff --git a/outputs/electricity/Summed_plot_power_usage_Test.jpg b/outputs/electricity/Summed_plot_power_usage_Test.jpg deleted file mode 100644 index 5d3a6e9..0000000 Binary files a/outputs/electricity/Summed_plot_power_usage_Test.jpg and /dev/null differ diff --git a/outputs/electricity/Summed_plot_power_usage_Validation.jpg b/outputs/electricity/Summed_plot_power_usage_Validation.jpg deleted file mode 100644 index 6a37140..0000000 Binary files a/outputs/electricity/Summed_plot_power_usage_Validation.jpg and /dev/null differ diff --git a/outputs/electricity/Total.jpg b/outputs/electricity/Total.jpg deleted file mode 100644 index 55f5530..0000000 Binary files a/outputs/electricity/Total.jpg and /dev/null differ diff --git a/outputs/electricity/best-epoch=0.ckpt b/outputs/electricity/best-epoch=0.ckpt deleted file mode 100644 index 1220dd4..0000000 Binary files a/outputs/electricity/best-epoch=0.ckpt and /dev/null differ diff --git a/outputs/electricity/first_Si.csv b/outputs/electricity/first_Si.csv deleted file mode 100644 index e21cbac..0000000 --- a/outputs/electricity/first_Si.csv +++ /dev/null @@ -1,3 +0,0 @@ -index,S1,S1_conf -hour,-0.004068,0.010495 -day_of_week,-0.001436,0.010442 diff --git a/outputs/electricity/latest-epoch=0.ckpt b/outputs/electricity/latest-epoch=0.ckpt deleted file mode 100644 index 508c53c..0000000 Binary files a/outputs/electricity/latest-epoch=0.ckpt and /dev/null differ diff --git a/outputs/electricity/second_Si.csv b/outputs/electricity/second_Si.csv deleted file mode 100644 index 111e961..0000000 --- a/outputs/electricity/second_Si.csv +++ /dev/null @@ -1,2 +0,0 @@ -index,S2,S2_conf -"('hour', 'day_of_week')",-0.001391,0.014494 diff --git a/outputs/electricity/total_si.csv b/outputs/electricity/total_si.csv deleted file mode 100644 index aac5784..0000000 --- a/outputs/electricity/total_si.csv +++ /dev/null @@ -1,3 +0,0 @@ -index,ST,ST_conf -hour,0.976974,0.028696 -day_of_week,0.9963,0.027273 diff --git a/plot_comparison.py b/plot_comparison.py deleted file mode 100644 index 71595cc..0000000 --- a/plot_comparison.py +++ /dev/null @@ -1,31 +0,0 @@ -from classes.PlotConfig import * -import pandas as pd -import os -from matplotlib.ticker import MultipleLocator - -model = 'tft_pytorch' -output_dir = os.path.join(model, 'comparisons') -metrics = ['MAE', 'RMSE', 'RMSLE', 'SMAPE', 'NNSE'] - -result = pd.read_csv( - os.path.join(output_dir, 'performance_comparison.csv') -) - -for metric in metrics: - fig, ax = plt.subplots(figsize=(9,6)) - for method, selection_results in result.groupby('method'): - ax.plot( - selection_results['num_features'], selection_results[metric], label=method - ) - - ymin, ymax = ax.get_ylim() - ax.set_ylim(ymin, ymax*1.005) - - ax.xaxis.set_major_locator(MultipleLocator(base=1)) - - ax.set_ylabel(metric) - ax.set_xlabel('Number of Features') - ax.legend(ncol=2, edgecolor='black') - - fig.tight_layout() - fig.savefig(f'{output_dir}/{metric}.jpg', dpi=200) \ No newline at end of file diff --git a/requirements_full.txt b/requirements_full.txt deleted file mode 100644 index a893ae7..0000000 --- a/requirements_full.txt +++ /dev/null @@ -1,238 +0,0 @@ -absl-py==1.2.0 -accessible-pygments==0.0.4 -aiohttp==3.8.1 -aiosignal==1.2.0 -alabaster==0.7.13 -alembic==1.8.1 -antlr4-python3-runtime==4.9.3 -argon2-cffi==21.3.0 -argon2-cffi-bindings==21.2.0 -asttokens==2.0.5 -astunparse==1.6.3 -async-timeout==4.0.2 -attrs==22.1.0 -autopage==0.5.1 -Babel==2.12.1 -backcall==0.2.0 -beautifulsoup4==4.11.1 -bleach==5.0.1 -blis==0.7.9 -cachetools==5.2.0 -catalogue==2.0.8 -certifi==2022.6.15 -cffi==1.15.1 -charset-normalizer==2.1.0 -click==8.1.3 -cliff==3.10.1 -cmaes==0.8.2 -cmd2==2.4.2 -cmdstanpy==1.0.8 -colorama==0.4.6 -colorlog==6.6.0 -confection==0.0.4 -convertdate==2.4.0 -cycler==0.11.0 -cymem==2.0.7 -debugpy==1.6.2 -decorator==5.1.1 -defusedxml==0.7.1 -dill==0.3.6 -distlib==0.3.6 -docutils==0.17.1 -EasyProcess==1.1 -entrypoint2==1.1 -entrypoints==0.4 -ephem==4.1.3 -executing==0.9.1 -fastai==2.7.12 -fastcore==1.5.29 -fastdownload==0.0.7 -fastjsonschema==2.16.1 -fastprogress==1.0.3 -filelock==3.12.0 -flatbuffers==1.12 -fonttools==4.34.4 -frozenlist==1.3.0 -fsspec==2022.7.1 -future==0.18.2 -gast==0.4.0 -ghp-import==2.1.0 -google-auth==2.9.1 -google-auth-oauthlib==0.4.6 -google-pasta==0.2.0 -greenlet==1.1.2 -grpcio==1.47.0 -h5py==3.7.0 -hijri-converter==2.2.4 -holidays==0.16 -idna==3.3 -imagesize==1.4.1 -imbalanced-learn==0.10.1 -importlib-metadata==6.6.0 -ipykernel==6.15.1 -ipython==8.4.0 -ipython-genutils==0.2.0 -ipywidgets==7.7.1 -jedi==0.18.1 -Jinja2==3.1.2 -joblib==1.2.0 -jsonschema==4.9.0 -jupyter-book==0.15.1 -jupyter-cache==0.6.1 -jupyter-client==7.3.4 -jupyter-core==4.11.1 -jupyterlab-pygments==0.2.2 -jupyterlab-widgets==1.1.1 -keras==2.9.0 -Keras-Preprocessing==1.1.2 -kiwisolver==1.4.4 -korean-lunar-calendar==0.3.1 -langcodes==3.3.0 -latexcodec==2.0.1 -libclang==14.0.6 -linkify-it-py==2.0.0 -llvmlite==0.40.0 -LunarCalendar==0.0.9 -Mako==1.2.1 -Markdown==3.4.1 -markdown-it-py==2.2.0 -MarkupSafe==2.1.1 -matplotlib==3.5.2 -matplotlib-inline==0.1.3 -mdit-py-plugins==0.3.5 -mdurl==0.1.2 -mistune==0.8.4 -multidict==6.0.2 -multiprocess==0.70.14 -murmurhash==1.0.9 -myst-nb==0.17.2 -myst-parser==0.18.1 -nbclient==0.6.6 -nbconvert==6.5.0 -nbformat==5.4.0 -nest-asyncio==1.5.5 -notebook==6.4.12 -numba==0.57.0 -numpy==1.23.1 -oauthlib==3.2.0 -omegaconf==2.2.3 -onnx==1.12.0 -opt-einsum==3.3.0 -optuna==2.10.1 -packaging==21.3 -pandas==1.4.3 -pandocfilters==1.5.0 -parso==0.8.3 -pathy==0.10.1 -patsy==0.5.2 -pbr==5.9.0 -pickleshare==0.7.5 -Pillow==9.2.0 -platformdirs==3.5.0 -plotly-geo==1.0.0 -preshed==3.0.8 -prettytable==3.3.0 -prometheus-client==0.14.1 -prompt-toolkit==3.0.30 -prophet==1.1.1 -protobuf==3.19.4 -psutil==5.9.1 -pure-eval==0.2.2 -pyasn1==0.4.8 -pyasn1-modules==0.2.8 -pybtex==0.24.0 -pybtex-docutils==1.0.2 -pycparser==2.21 -pydantic==1.10.7 -pydata-sphinx-theme==0.13.3 -pyDeprecate==0.3.2 -Pygments==2.12.0 -PyMeeus==0.5.11 -pyparsing==3.0.9 -pyperclip==1.8.2 -pyreadline3==3.4.1 -pyrsistent==0.18.1 -python-dateutil==2.8.2 -pytorch-forecasting==0.10.2 -pytorch-lightning==1.6.5 -pyts==0.12.0 -pytz==2022.1 -pyunpack==0.3 -pywin32==304 -pywinpty==2.0.6 -PyYAML==6.0 -pyzmq==23.2.0 -requests==2.28.1 -requests-oauthlib==1.3.1 -rsa==4.9 -SALib==1.4.7 -scikit-learn==1.1.1 -scipy==1.9.0 -seaborn==0.11.2 -Send2Trash==1.8.0 -setuptools-git==1.2 -six==1.16.0 -sklearn==0.0.post1 -smart-open==6.3.0 -snowballstemmer==2.2.0 -soupsieve==2.3.2.post1 -spacy==3.5.2 -spacy-legacy==3.0.12 -spacy-loggers==1.0.4 -Sphinx==5.0.0 -sphinx-book-theme==1.0.1 -sphinx-comments==0.0.3 -sphinx-copybutton==0.5.2 -sphinx-jupyterbook-latex==0.5.2 -sphinx-multitoc-numbering==0.1.3 -sphinx-thebe==0.2.1 -sphinx-togglebutton==0.3.2 -sphinx_design==0.3.0 -sphinx_external_toc==0.3.1 -sphinxcontrib-applehelp==1.0.4 -sphinxcontrib-bibtex==2.5.0 -sphinxcontrib-devhelp==1.0.2 -sphinxcontrib-htmlhelp==2.0.1 -sphinxcontrib-jsmath==1.0.1 -sphinxcontrib-qthelp==1.0.3 -sphinxcontrib-serializinghtml==1.1.5 -SQLAlchemy==1.4.39 -srsly==2.4.6 -stack-data==0.3.0 -statsmodels==0.13.2 -stevedore==4.0.0 -tabulate==0.9.0 -tensorboard==2.9.1 -tensorboard-data-server==0.6.1 -tensorboard-plugin-wit==1.8.1 -tensorflow-estimator==2.9.0 -tensorflow-gpu==2.9.1 -tensorflow-io-gcs-filesystem==0.26.0 -termcolor==1.1.0 -terminado==0.15.0 -thinc==8.1.10 -threadpoolctl==3.1.0 -tinycss2==1.1.1 -torch==1.13.1+cu116 -torch-tb-profiler==0.4.0 -torchaudio==0.13.1+cu116 -torchmetrics==0.9.3 -torchvision==0.14.1+cu116 -tornado==6.2 -tqdm==4.64.0 -traitlets==5.3.0 -tsai==0.3.6 -typer==0.7.0 -typing_extensions==4.3.0 -uc-micro-py==1.0.1 -urllib3==1.26.11 -virtualenv==20.23.0 -wasabi==1.1.1 -wcwidth==0.2.5 -webencodings==0.5.1 -Werkzeug==2.2.1 -wget==3.2 -widgetsnbextension==3.6.1 -wrapt==1.14.1 -yarl==1.8.1 -zipp==3.15.0 diff --git a/result_long_term_forecast.txt b/result_long_term_forecast.txt new file mode 100644 index 0000000..3ad3d59 --- /dev/null +++ b/result_long_term_forecast.txt @@ -0,0 +1,6 @@ +long_term_forecast_ili_36_24_Transformer_custom_ftM_sl36_ll18_pl24_dm512_nh8_el2_dl1_df2048_fc3_ebtimeF_dtTrue_Exp_0 +mse:4.545623302459717, mae:1.408710241317749 + +long_term_forecast_ili_36_24_DLinear_custom_ftM_sl36_ll18_pl24_dm512_nh8_el2_dl1_df2048_fc3_ebtimeF_dtTrue_Exp_0 +mse:4.793543338775635, mae:1.6582401990890503 + diff --git a/results/long_term_forecast_ili_36_24_DLinear_custom_ftM_sl36_ll18_pl24_dm512_nh8_el2_dl1_df2048_fc3_ebtimeF_dtTrue_Exp_0/metrics.npy b/results/long_term_forecast_ili_36_24_DLinear_custom_ftM_sl36_ll18_pl24_dm512_nh8_el2_dl1_df2048_fc3_ebtimeF_dtTrue_Exp_0/metrics.npy new file mode 100644 index 0000000..99455cc Binary files /dev/null and b/results/long_term_forecast_ili_36_24_DLinear_custom_ftM_sl36_ll18_pl24_dm512_nh8_el2_dl1_df2048_fc3_ebtimeF_dtTrue_Exp_0/metrics.npy differ diff --git a/results/long_term_forecast_ili_36_24_DLinear_custom_ftM_sl36_ll18_pl24_dm512_nh8_el2_dl1_df2048_fc3_ebtimeF_dtTrue_Exp_0/pred.npy b/results/long_term_forecast_ili_36_24_DLinear_custom_ftM_sl36_ll18_pl24_dm512_nh8_el2_dl1_df2048_fc3_ebtimeF_dtTrue_Exp_0/pred.npy new file mode 100644 index 0000000..80771e7 Binary files /dev/null and b/results/long_term_forecast_ili_36_24_DLinear_custom_ftM_sl36_ll18_pl24_dm512_nh8_el2_dl1_df2048_fc3_ebtimeF_dtTrue_Exp_0/pred.npy differ diff --git a/results/long_term_forecast_ili_36_24_DLinear_custom_ftM_sl36_ll18_pl24_dm512_nh8_el2_dl1_df2048_fc3_ebtimeF_dtTrue_Exp_0/true.npy b/results/long_term_forecast_ili_36_24_DLinear_custom_ftM_sl36_ll18_pl24_dm512_nh8_el2_dl1_df2048_fc3_ebtimeF_dtTrue_Exp_0/true.npy new file mode 100644 index 0000000..7347890 Binary files /dev/null and b/results/long_term_forecast_ili_36_24_DLinear_custom_ftM_sl36_ll18_pl24_dm512_nh8_el2_dl1_df2048_fc3_ebtimeF_dtTrue_Exp_0/true.npy differ diff --git a/results/long_term_forecast_ili_36_24_Transformer_custom_ftM_sl36_ll18_pl24_dm512_nh8_el2_dl1_df2048_fc3_ebtimeF_dtTrue_Exp_0/metrics.npy b/results/long_term_forecast_ili_36_24_Transformer_custom_ftM_sl36_ll18_pl24_dm512_nh8_el2_dl1_df2048_fc3_ebtimeF_dtTrue_Exp_0/metrics.npy new file mode 100644 index 0000000..cef2b30 Binary files /dev/null and b/results/long_term_forecast_ili_36_24_Transformer_custom_ftM_sl36_ll18_pl24_dm512_nh8_el2_dl1_df2048_fc3_ebtimeF_dtTrue_Exp_0/metrics.npy differ diff --git a/results/long_term_forecast_ili_36_24_Transformer_custom_ftM_sl36_ll18_pl24_dm512_nh8_el2_dl1_df2048_fc3_ebtimeF_dtTrue_Exp_0/pred.npy b/results/long_term_forecast_ili_36_24_Transformer_custom_ftM_sl36_ll18_pl24_dm512_nh8_el2_dl1_df2048_fc3_ebtimeF_dtTrue_Exp_0/pred.npy new file mode 100644 index 0000000..2801257 Binary files /dev/null and b/results/long_term_forecast_ili_36_24_Transformer_custom_ftM_sl36_ll18_pl24_dm512_nh8_el2_dl1_df2048_fc3_ebtimeF_dtTrue_Exp_0/pred.npy differ diff --git a/results/long_term_forecast_ili_36_24_Transformer_custom_ftM_sl36_ll18_pl24_dm512_nh8_el2_dl1_df2048_fc3_ebtimeF_dtTrue_Exp_0/true.npy b/results/long_term_forecast_ili_36_24_Transformer_custom_ftM_sl36_ll18_pl24_dm512_nh8_el2_dl1_df2048_fc3_ebtimeF_dtTrue_Exp_0/true.npy new file mode 100644 index 0000000..7347890 Binary files /dev/null and b/results/long_term_forecast_ili_36_24_Transformer_custom_ftM_sl36_ll18_pl24_dm512_nh8_el2_dl1_df2048_fc3_ebtimeF_dtTrue_Exp_0/true.npy differ diff --git a/run.py b/run.py new file mode 100644 index 0000000..b6f5f2c --- /dev/null +++ b/run.py @@ -0,0 +1,164 @@ +import argparse +import os +import torch +from exp.exp_long_term_forecasting import Exp_Long_Term_Forecast +from exp.exp_classification import Exp_Classification +import random +import numpy as np + +if __name__ == '__main__': + fix_seed = 2021 + random.seed(fix_seed) + torch.manual_seed(fix_seed) + np.random.seed(fix_seed) + + parser = argparse.ArgumentParser(description='TimesNet') + + # basic config + parser.add_argument('--task_name', type=str, required=True, default='long_term_forecast', + help='task name, options:[long_term_forecast, short_term_forecast, imputation, classification, anomaly_detection]') + parser.add_argument('--is_training', type=int, required=True, default=1, help='status') + parser.add_argument('--model_id', type=str, required=True, default='test', help='model id') + parser.add_argument('--model', type=str, required=True, default='Transformer', + choices=['Transformer', 'DLinear'], help='model name') + + # data loader + parser.add_argument('--data', type=str, required=True, default='ETTm1', help='dataset type') + parser.add_argument('--root_path', type=str, default='./data/ETT/', help='root path of the data file') + parser.add_argument('--data_path', type=str, default='ETTh1.csv', help='data file') + parser.add_argument('--features', type=str, default='M', + help='forecasting task, options:[M, S, MS]; M:multivariate predict multivariate, S:univariate predict univariate, MS:multivariate predict univariate') + parser.add_argument('--target', type=str, default='OT', help='target feature in S or MS task') + parser.add_argument('--freq', type=str, default='h', + help='freq for time features encoding, options:[s:secondly, t:minutely, h:hourly, d:daily, b:business days, w:weekly, m:monthly], you can also use more detailed freq like 15min or 3h') + parser.add_argument('--checkpoints', type=str, default='./checkpoints/', help='location of model checkpoints') + + # forecasting task + parser.add_argument('--seq_len', type=int, default=96, help='input sequence length') + parser.add_argument('--label_len', type=int, default=48, help='start token length') + parser.add_argument('--pred_len', type=int, default=96, help='prediction sequence length') + parser.add_argument('--seasonal_patterns', type=str, default='Monthly', help='subset for M4') + + # inputation task + parser.add_argument('--mask_rate', type=float, default=0.25, help='mask ratio') + + # anomaly detection task + parser.add_argument('--anomaly_ratio', type=float, default=0.25, help='prior anomaly ratio (%)') + + # model define + parser.add_argument('--top_k', type=int, default=5, help='for TimesBlock') + parser.add_argument('--num_kernels', type=int, default=6, help='for Inception') + parser.add_argument('--enc_in', type=int, default=7, help='encoder input size') + parser.add_argument('--dec_in', type=int, default=7, help='decoder input size') + parser.add_argument('--c_out', type=int, default=7, help='output size') + parser.add_argument('--d_model', type=int, default=512, help='dimension of model') + parser.add_argument('--n_heads', type=int, default=8, help='num of heads') + parser.add_argument('--e_layers', type=int, default=2, help='num of encoder layers') + parser.add_argument('--d_layers', type=int, default=1, help='num of decoder layers') + parser.add_argument('--d_ff', type=int, default=2048, help='dimension of fcn') + parser.add_argument('--moving_avg', type=int, default=25, help='window size of moving average') + parser.add_argument('--factor', type=int, default=1, help='attn factor') + parser.add_argument('--distil', action='store_false', + help='whether to use distilling in encoder, using this argument means not using distilling', + default=True) + parser.add_argument('--dropout', type=float, default=0.1, help='dropout') + parser.add_argument('--embed', type=str, default='timeF', + help='time features encoding, options:[timeF, fixed, learned]') + parser.add_argument('--activation', type=str, default='gelu', help='activation') + parser.add_argument('--output_attention', action='store_true', help='whether to output attention in ecoder') + + # optimization + parser.add_argument('--num_workers', type=int, default=10, help='data loader num workers') + parser.add_argument('--itr', type=int, default=1, help='experiments times') + parser.add_argument('--train_epochs', type=int, default=10, help='train epochs') + parser.add_argument('--batch_size', type=int, default=32, help='batch size of train input data') + parser.add_argument('--patience', type=int, default=3, help='early stopping patience') + parser.add_argument('--learning_rate', type=float, default=0.0001, help='optimizer learning rate') + parser.add_argument('--des', type=str, default='test', help='exp description') + parser.add_argument('--loss', type=str, default='MSE', help='loss function') + parser.add_argument('--lradj', type=str, default='type1', help='adjust learning rate') + parser.add_argument('--use_amp', action='store_true', help='use automatic mixed precision training', default=False) + + # GPU + parser.add_argument('--use_gpu', type=bool, default=True, help='use gpu') + parser.add_argument('--gpu', type=int, default=0, help='gpu') + parser.add_argument('--use_multi_gpu', action='store_true', help='use multiple gpus', default=False) + parser.add_argument('--devices', type=str, default='0,1,2,3', help='device ids of multile gpus') + + # de-stationary projector params + parser.add_argument('--p_hidden_dims', type=int, nargs='+', default=[128, 128], + help='hidden layer dimensions of projector (List)') + parser.add_argument('--p_hidden_layers', type=int, default=2, help='number of hidden layers in projector') + + + args = parser.parse_args() + args.use_gpu = True if torch.cuda.is_available() and args.use_gpu else False + + if args.use_gpu and args.use_multi_gpu: + args.devices = args.devices.replace(' ', '') + device_ids = args.devices.split(',') + args.device_ids = [int(id_) for id_ in device_ids] + args.gpu = args.device_ids[0] + + print('Args in experiment:') + print(args) + + if args.task_name == 'classification': + Exp = Exp_Classification + else: + Exp = Exp_Long_Term_Forecast + + if args.is_training: + for ii in range(args.itr): + # setting record of experiments + setting = '{}_{}_{}_{}_ft{}_sl{}_ll{}_pl{}_dm{}_nh{}_el{}_dl{}_df{}_fc{}_eb{}_dt{}_{}_{}'.format( + args.task_name, + args.model_id, + args.model, + args.data, + args.features, + args.seq_len, + args.label_len, + args.pred_len, + args.d_model, + args.n_heads, + args.e_layers, + args.d_layers, + args.d_ff, + args.factor, + args.embed, + args.distil, + args.des, ii) + + exp = Exp(args) # set experiments + print('>>>>>>>start training : {}>>>>>>>>>>>>>>>>>>>>>>>>>>'.format(setting)) + exp.train(setting) + + print('>>>>>>>testing : {}<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'.format(setting)) + exp.test(setting) + torch.cuda.empty_cache() + else: + ii = 0 + setting = '{}_{}_{}_{}_ft{}_sl{}_ll{}_pl{}_dm{}_nh{}_el{}_dl{}_df{}_fc{}_eb{}_dt{}_{}_{}'.format( + args.task_name, + args.model_id, + args.model, + args.data, + args.features, + args.seq_len, + args.label_len, + args.pred_len, + args.d_model, + args.n_heads, + args.e_layers, + args.d_layers, + args.d_ff, + args.factor, + args.embed, + args.distil, + args.des, ii) + + exp = Exp(args) # set experiments + print('>>>>>>>testing : {}<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'.format(setting)) + exp.test(setting, test=1) + torch.cuda.empty_cache() diff --git a/scripts/Exchange_script/Transformer.sh b/scripts/Exchange_script/Transformer.sh new file mode 100644 index 0000000..8488afc --- /dev/null +++ b/scripts/Exchange_script/Transformer.sh @@ -0,0 +1,83 @@ +export CUDA_VISIBLE_DEVICES=4 + + +python -u run.py \ + --is_training 1 \ + --root_path ./dataset/exchange_rate/ \ + --data_path exchange_rate.csv \ + --model_id Exchange_96_96 \ + --model Transformer \ + --data custom \ + --features M \ + --seq_len 96 \ + --label_len 48 \ + --pred_len 96 \ + --e_layers 2 \ + --d_layers 1 \ + --factor 3 \ + --enc_in 8 \ + --dec_in 8 \ + --c_out 8 \ + --des 'Exp' \ + --itr 1 + +python -u run.py \ + --is_training 1 \ + --root_path ./dataset/exchange_rate/ \ + --data_path exchange_rate.csv \ + --model_id Exchange_96_192 \ + --model Transformer \ + --data custom \ + --features M \ + --seq_len 96 \ + --label_len 48 \ + --pred_len 192 \ + --e_layers 2 \ + --d_layers 1 \ + --factor 3 \ + --enc_in 8 \ + --dec_in 8 \ + --c_out 8 \ + --des 'Exp' \ + --itr 1 + +python -u run.py \ + --is_training 1 \ + --root_path ./dataset/exchange_rate/ \ + --data_path exchange_rate.csv \ + --model_id Exchange_96_336 \ + --model Transformer \ + --data custom \ + --features M \ + --seq_len 96 \ + --label_len 48 \ + --pred_len 336 \ + --e_layers 2 \ + --d_layers 1 \ + --factor 3 \ + --enc_in 8 \ + --dec_in 8 \ + --c_out 8 \ + --des 'Exp' \ + --itr 1 \ + --train_epochs 1 + +python -u run.py \ + --is_training 1 \ + --root_path ./dataset/exchange_rate/ \ + --data_path exchange_rate.csv \ + --model_id Exchange_96_720 \ + --model Transformer \ + --data custom \ + --features M \ + --seq_len 96 \ + --label_len 48 \ + --pred_len 720 \ + --e_layers 2 \ + --d_layers 1 \ + --factor 3 \ + --enc_in 8 \ + --dec_in 8 \ + --c_out 8 \ + --des 'Exp' \ + --itr 1 \ No newline at end of file diff --git a/scripts/ILI_script/Transformer.sh b/scripts/ILI_script/Transformer.sh new file mode 100644 index 0000000..a7d21c2 --- /dev/null +++ b/scripts/ILI_script/Transformer.sh @@ -0,0 +1,81 @@ +export CUDA_VISIBLE_DEVICES=0 + +python -u run.py \ + --is_training 1 \ + --root_path ./dataset/illness/ \ + --data_path national_illness.csv \ + --model_id ili_36_24 \ + --model Transformer \ + --data custom \ + --features M \ + --seq_len 36 \ + --label_len 18 \ + --pred_len 24 \ + --e_layers 2 \ + --d_layers 1 \ + --factor 3 \ + --enc_in 7 \ + --dec_in 7 \ + --c_out 7 \ + --des 'Exp' \ + --itr 1 + +python -u run.py \ + --is_training 1 \ + --root_path ./dataset/illness/ \ + --data_path national_illness.csv \ + --model_id ili_36_36 \ + --model Transformer \ + --data custom \ + --features M \ + --seq_len 36 \ + --label_len 18 \ + --pred_len 36 \ + --e_layers 2 \ + --d_layers 1 \ + --factor 3 \ + --enc_in 7 \ + --dec_in 7 \ + --c_out 7 \ + --des 'Exp' \ + --itr 1 + +python -u run.py \ + --is_training 1 \ + --root_path ./dataset/illness/ \ + --data_path national_illness.csv \ + --model_id ili_36_48 \ + --model Transformer \ + --data custom \ + --features M \ + --seq_len 36 \ + --label_len 18 \ + --pred_len 48 \ + --e_layers 2 \ + --d_layers 1 \ + --factor 3 \ + --enc_in 7 \ + --dec_in 7 \ + --c_out 7 \ + --des 'Exp' \ + --itr 1 + +python -u run.py \ + --is_training 1 \ + --root_path ./dataset/illness/ \ + --data_path national_illness.csv \ + --model_id ili_36_60 \ + --model Transformer \ + --data custom \ + --features M \ + --seq_len 36 \ + --label_len 18 \ + --pred_len 60 \ + --e_layers 2 \ + --d_layers 1 \ + --factor 3 \ + --enc_in 7 \ + --dec_in 7 \ + --c_out 7 \ + --des 'Exp' \ + --itr 1 \ No newline at end of file diff --git a/scripts/ILI_script/Transformer_windows.sh b/scripts/ILI_script/Transformer_windows.sh new file mode 100644 index 0000000..346ff17 --- /dev/null +++ b/scripts/ILI_script/Transformer_windows.sh @@ -0,0 +1 @@ +python run.py --is_training 1 --root_path ./dataset/illness/ --data_path national_illness.csv --model_id ili_36_24 --model $model_name --data custom --features M --seq_len 36 --label_len 18 --pred_len 24 --e_layers 2 --d_layers 1 --factor 3 --enc_in 7 --dec_in 7 --c_out 7 --des 'Exp' --itr 1 --task_name long_term_forecast \ No newline at end of file diff --git a/scripts/Traffic_script/Transformer.sh b/scripts/Traffic_script/Transformer.sh new file mode 100644 index 0000000..916d021 --- /dev/null +++ b/scripts/Traffic_script/Transformer.sh @@ -0,0 +1,85 @@ +export CUDA_VISIBLE_DEVICES=5 + +python -u run.py \ + --is_training 1 \ + --root_path ./dataset/traffic/ \ + --data_path traffic.csv \ + --model_id traffic_96_96 \ + --model Transformer \ + --data custom \ + --features M \ + --seq_len 96 \ + --label_len 48 \ + --pred_len 96 \ + --e_layers 2 \ + --d_layers 1 \ + --factor 3 \ + --enc_in 862 \ + --dec_in 862 \ + --c_out 862 \ + --des 'Exp' \ + --itr 1 \ + --train_epochs 3 + +python -u run.py \ + --is_training 1 \ + --root_path ./dataset/traffic/ \ + --data_path traffic.csv \ + --model_id traffic_96_192 \ + --model Transformer \ + --data custom \ + --features M \ + --seq_len 96 \ + --label_len 48 \ + --pred_len 192 \ + --e_layers 2 \ + --d_layers 1 \ + --factor 3 \ + --enc_in 862 \ + --dec_in 862 \ + --c_out 862 \ + --des 'Exp' \ + --itr 1 \ + --train_epochs 3 + +python -u run.py \ + --is_training 1 \ + --root_path ./dataset/traffic/ \ + --data_path traffic.csv \ + --model_id traffic_96_336 \ + --model Transformer \ + --data custom \ + --features M \ + --seq_len 96 \ + --label_len 48 \ + --pred_len 336 \ + --e_layers 2 \ + --d_layers 1 \ + --factor 3 \ + --enc_in 862 \ + --dec_in 862 \ + --c_out 862 \ + --des 'Exp' \ + --itr 1 \ + --train_epochs 3 + +python -u run.py \ + --is_training 1 \ + --root_path ./dataset/traffic/ \ + --data_path traffic.csv \ + --model_id traffic_96_720 \ + --model Transformer \ + --data custom \ + --features M \ + --seq_len 96 \ + --label_len 48 \ + --pred_len 720 \ + --e_layers 2 \ + --d_layers 1 \ + --factor 3 \ + --enc_in 862 \ + --dec_in 862 \ + --c_out 862 \ + --des 'Exp' \ + --itr 1 \ + --train_epochs 3 diff --git a/sensitivity.ipynb b/sensitivity.ipynb index 5908cc3..f6f727c 100644 --- a/sensitivity.ipynb +++ b/sensitivity.ipynb @@ -40,7 +40,7 @@ "from configurations.config import *\n", "\n", "config = ExperimentConfig(experiment=ExperimentType.ELECTRICITY)\n", - "formatter = config.data_formatter" + "formatter = config.data_provider" ] }, { @@ -276,7 +276,7 @@ " \"bounds\": bounds,\n", " # https://salib.readthedocs.io/en/latest/user_guide/advanced.html#generating-alternate-distributions\n", " \"dists\": dists, \n", - " # \"outputs\": config.data_formatter.targets,\n", + " # \"outputs\": config.data_provider.targets,\n", " # 'groups': None, # if input features have groups\n", " 'sample_scaled': True\n", "})" @@ -541,7 +541,7 @@ "metadata": {}, "outputs": [], "source": [ - "from data_formatter.base import BaseDataFormatter\n", + "from data_provider.base import BaseDataFormatter\n", "\n", "def extract_Y(\n", " input_data: pd.DataFrame,\n", diff --git a/slurm-test.sh b/slurm-test.sh deleted file mode 100644 index e0da24c..0000000 --- a/slurm-test.sh +++ /dev/null @@ -1,22 +0,0 @@ -#!/usr/bin/env bash -#SBATCH --job-name="test" -#SBATCH --output=outputs/test.out -#SBATCH --partition=gpu -#SBATCH --time=1:00:00 -#SBATCH --gres=gpu:1 -#SBATCH --account=ds--6013 -#SBATCH --mem=32GB - -source /etc/profile.d/modules.sh -source ~/.bashrc - -# this is for when you are using singularity -module load cuda cudnn singularity -singularity run --nv tft_pytorch.sif python test_tft.py - -# module load cuda-toolkit cudnn anaconda3 - -# conda deactivate -# conda activate ml - -# python inference.py \ No newline at end of file diff --git a/slurm-train.sh b/slurm-train.sh index abbdcbc..5f7ee1c 100644 --- a/slurm-train.sh +++ b/slurm-train.sh @@ -1,9 +1,9 @@ #!/usr/bin/env bash #SBATCH --job-name="total" -#SBATCH --output=outputs/train.out +#SBATCH --output=train.out #SBATCH --partition=gpu -#SBATCH --time=1:00:00 -#SBATCH --account=ds--6013 +#SBATCH --time=24:00:00 +#SBATCH --account=bii_dsc_community #SBATCH --gres=gpu:v100:1 #SBATCH --mem=24GB @@ -12,7 +12,7 @@ source ~/.bashrc # this is for when you are using singularity module load cuda cudnn singularity -singularity run --nv tft_pytorch.sif python train_tft.py +singularity run --nv tft_pytorch.sif python run.py # singularity run --nv tft_pytorch.sif python train_tft.py --input-file=2022_May_age_groups/Total.csv --output=tft_pytorch/scratch/total diff --git a/test_results/long_term_forecast_ili_36_24_DLinear_custom_ftM_sl36_ll18_pl24_dm512_nh8_el2_dl1_df2048_fc3_ebtimeF_dtTrue_Exp_0/0.pdf b/test_results/long_term_forecast_ili_36_24_DLinear_custom_ftM_sl36_ll18_pl24_dm512_nh8_el2_dl1_df2048_fc3_ebtimeF_dtTrue_Exp_0/0.pdf new file mode 100644 index 0000000..c863a94 Binary files /dev/null and b/test_results/long_term_forecast_ili_36_24_DLinear_custom_ftM_sl36_ll18_pl24_dm512_nh8_el2_dl1_df2048_fc3_ebtimeF_dtTrue_Exp_0/0.pdf differ diff --git a/test_results/long_term_forecast_ili_36_24_Transformer_custom_ftM_sl36_ll18_pl24_dm512_nh8_el2_dl1_df2048_fc3_ebtimeF_dtTrue_Exp_0/0.pdf b/test_results/long_term_forecast_ili_36_24_Transformer_custom_ftM_sl36_ll18_pl24_dm512_nh8_el2_dl1_df2048_fc3_ebtimeF_dtTrue_Exp_0/0.pdf new file mode 100644 index 0000000..a2c3f39 Binary files /dev/null and b/test_results/long_term_forecast_ili_36_24_Transformer_custom_ftM_sl36_ll18_pl24_dm512_nh8_el2_dl1_df2048_fc3_ebtimeF_dtTrue_Exp_0/0.pdf differ diff --git a/train_tft.ipynb b/train_tft.ipynb deleted file mode 100644 index 7241dd6..0000000 --- a/train_tft.ipynb +++ /dev/null @@ -1,283 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import os, gc, torch\n", - "\n", - "import warnings\n", - "warnings.filterwarnings(\"ignore\")\n", - "import pandas as pd\n", - "\n", - "import pytorch_lightning as pl\n", - "from pytorch_lightning.callbacks import EarlyStopping\n", - "from pytorch_lightning.loggers import TensorBoardLogger\n", - "\n", - "from pytorch_forecasting import TemporalFusionTransformer, TimeSeriesDataSet\n", - "from pytorch_forecasting.data import GroupNormalizer, MultiNormalizer\n", - "from pytorch_forecasting.metrics import RMSE, MultiLoss\n", - "\n", - "device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n", - "device = torch.device(device)\n", - "\n", - "print(f'Using {device} backend.')" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from configurations.config import *\n", - "@dataclass\n", - "class arguments:\n", - " experiment = 'traffic'\n", - " show_progress = True\n", - "\n", - "config = ExperimentConfig(experiment=arguments.experiment)\n", - "formatter = config.data_formatter" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "df = formatter.read_file()\n", - "print(f'Total data shape {df.shape}')\n", - "\n", - "from utils.metric import show_result\n", - "from utils.data import create_TimeSeriesDataSet\n", - "from utils.model import seed_torch\n", - "seed_torch(seed=config.seed)\n", - "train, validation, test = formatter.split(df)\n", - "\n", - "parameters = config.model_parameters(ModelType.TFT)\n", - "batch_size = parameters['batch_size']\n", - "_, train_dataloader = create_TimeSeriesDataSet(\n", - " train, formatter, batch_size, train=True\n", - ")\n", - "_, val_dataloader = create_TimeSeriesDataSet(validation, formatter, batch_size)\n", - "test_timeseries, test_dataloader = create_TimeSeriesDataSet(test, formatter, batch_size)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import tensorflow as tf\n", - "# click this and locate the lightning_logs folder path and select that folder. \n", - "# this will load tensorbaord visualization\n", - "import tensorboard as tb\n", - "tf.io.gfile = tb.compat.tensorflow_stub.io.gfile\n", - "\n", - "early_stop_callback = EarlyStopping(\n", - " monitor=\"val_loss\", min_delta=0, \n", - " patience=parameters['early_stopping_patience']\n", - " , verbose=True, mode=\"min\"\n", - ")\n", - "best_checkpoint = pl.callbacks.ModelCheckpoint(\n", - " dirpath=config.experiment_folder, monitor=\"val_loss\", \n", - " filename=\"best-{epoch}\"\n", - ")\n", - "latest_checkpoint = pl.callbacks.ModelCheckpoint(\n", - " dirpath=config.experiment_folder, \n", - " every_n_epochs=1, filename=\"latest-{epoch}\"\n", - ")\n", - "\n", - "logger = TensorBoardLogger(config.experiment_folder) # logging results to a tensorboard\n", - "\n", - "# https://pytorch-lightning.readthedocs.io/en/stable/common/trainer.html#trainer-class-api\n", - "trainer = pl.Trainer(\n", - " max_epochs = parameters['epochs'],\n", - " accelerator = 'auto',\n", - " enable_model_summary=True,\n", - " callbacks = [early_stop_callback, best_checkpoint, latest_checkpoint],\n", - " logger = logger,\n", - " enable_progress_bar = arguments.show_progress,\n", - " check_val_every_n_epoch = 2,\n", - " max_time=pd.to_timedelta(1, unit='minutes')\n", - ")\n", - "\n", - "tft = TemporalFusionTransformer.from_dataset(\n", - " test_timeseries,\n", - " learning_rate= parameters['learning_rate'],\n", - " hidden_size= parameters['hidden_layer_size'],\n", - " attention_head_size=parameters['attention_head_size'],\n", - " dropout=parameters['dropout_rate'],\n", - " loss=MultiLoss([RMSE(reduction='mean') for _ in formatter.targets]), # RMSE(reduction='sqrt-mean')\n", - " optimizer='adam',\n", - " log_interval=1,\n", - " # reduce_on_plateau_patience=2\n", - ")\n", - "\n", - "print(f\"Number of parameters in network: {tft.size()/1e3:.1f}k\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from datetime import datetime\n", - "gc.collect()\n", - "\n", - "start = datetime.now()\n", - "print(f'\\n----Training started at {start}----\\n')\n", - "\n", - "trainer.fit(\n", - " tft,\n", - " train_dataloaders=train_dataloader,\n", - " val_dataloaders=val_dataloader,\n", - ")\n", - "end = datetime.now()\n", - "print(f'\\n----Training ended at {end}, elapsed time {end-start}')\n", - "print(f'Best model by validation loss saved at {trainer.checkpoint_callback.best_model_path}')" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from classes.PredictionProcessor import PredictionProcessor\n", - "\n", - "processor = PredictionProcessor(\n", - " formatter.time_index, formatter.group_id, \n", - " formatter.parameters['horizon'], formatter.targets, \n", - " formatter.parameters['window']\n", - ")\n", - "\n", - "# %%\n", - "from classes.Plotter import *\n", - "\n", - "plotter = PlotResults(\n", - " config.experiment_folder, formatter.time_index, \n", - " formatter.targets, show=arguments.show_progress\n", - ")\n", - "\n", - "best_model_path = trainer.checkpoint_callback.best_model_path\n", - "print(f'Loading best model from {best_model_path}')\n", - "\n", - "# tft = TemporalFusionTransformer.load_from_checkpoint(best_model_path)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "\n", - "# print('\\n---Training prediction--\\n')\n", - "# train_predictions, train_index = tft.predict(\n", - "# train_dataloader, return_index=True, \n", - "# show_progress_bar=arguments.show_progress\n", - "# )\n", - "# train_result_merged = processor.align_result_with_dataset(\n", - "# train, train_predictions, train_index\n", - "# )\n", - "\n", - "# show_result(train_result_merged, formatter.targets)\n", - "# plotter.summed_plot(train_result_merged, type='Train_error', plot_error=True)\n", - "# gc.collect()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "print(f'\\n---Validation results--\\n')\n", - "\n", - "validation_predictions, validation_index = tft.predict(\n", - " val_dataloader, return_index=True, \n", - " show_progress_bar=arguments.show_progress\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "validation_result_merged = processor.align_result_with_dataset(\n", - " validation, validation_predictions, validation_index\n", - ")\n", - "show_result(validation_result_merged, formatter.targets)\n", - "\n", - "plotter.summed_plot(validation_result_merged, type='Validation')\n", - "gc.collect()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "print(f'\\n---Test results--\\n')\n", - "\n", - "test_predictions, test_index = tft.predict(\n", - " test_dataloader, return_index=True, \n", - " show_progress_bar=arguments.show_progress\n", - ")\n", - "\n", - "test_result_merged = processor.align_result_with_dataset(\n", - " test, test_predictions, test_index\n", - ")\n", - "show_result(test_result_merged, formatter.targets)\n", - "plotter.summed_plot(test_result_merged, 'Test')\n", - "gc.collect()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# train_result_merged['split'] = 'train'\n", - "validation_result_merged['split'] = 'validation'\n", - "test_result_merged['split'] = 'test'\n", - "df = pd.concat([validation_result_merged, test_result_merged])\n", - "df.to_csv(os.path.join(plotter.figPath, 'predictions.csv'), index=False)\n", - "\n", - "print(f'Ended at {datetime.now()}. Elapsed time {datetime.now() - start}')" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.10.11" - }, - "orig_nbformat": 4 - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/train_tft.py b/train_tft.py deleted file mode 100644 index d956123..0000000 --- a/train_tft.py +++ /dev/null @@ -1,197 +0,0 @@ -import os, gc, torch - -import warnings -warnings.filterwarnings("ignore") -import pandas as pd - -import pytorch_lightning as pl -from pytorch_lightning.callbacks import EarlyStopping -from pytorch_lightning.loggers import TensorBoardLogger - -from pytorch_forecasting import TemporalFusionTransformer -from pytorch_forecasting.metrics import RMSE, MultiLoss - -from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter -from configurations.config import * - -parser = ArgumentParser( - description='Train model', - formatter_class=ArgumentDefaultsHelpFormatter -) - -parser.add_argument( - '--experiment', metavar='-e', - default=ExperimentType.TRAFFIC, - choices=ExperimentType.values(), - help='dataset name of the experiment' -) - -parser.add_argument( - '--disable-progress', - action='store_false', - help='disable the progress bar.' -) - -arguments = parser.parse_args() -show_progress_bar = not arguments.disable_progress -config = ExperimentConfig(experiment=arguments.experiment) -formatter = config.data_formatter - -# Check if running on cpu or gpu -device = "cuda" if torch.cuda.is_available() else "cpu" -device = torch.device(device) - -print(f'Using {device} backend.') - -# Load dataset -df = formatter.load() -print(f'Total data shape {df.shape}') - -from utils.metric import show_result -from utils.data import create_TimeSeriesDataSet -from utils.model import seed_torch -seed_torch(seed=config.seed) -train, validation, test = formatter.split(df) - -parameters = config.model_parameters(ModelType.TFT) -batch_size = parameters['batch_size'] -_, train_dataloader = create_TimeSeriesDataSet( - train, formatter, batch_size, train=True -) -_, val_dataloader = create_TimeSeriesDataSet(validation, formatter, batch_size) -test_timeseries, test_dataloader = create_TimeSeriesDataSet(test, formatter, batch_size) - -import tensorflow as tf -# click this and locate the lightning_logs folder path and select that folder. -# this will load tensorbaord visualization -import tensorboard as tb -tf.io.gfile = tb.compat.tensorflow_stub.io.gfile - -early_stop_callback = EarlyStopping( - monitor="val_loss", min_delta=0, - patience=parameters['early_stopping_patience'] - , verbose=True, mode="min" -) -best_checkpoint = pl.callbacks.ModelCheckpoint( - dirpath=config.experiment_folder, monitor="val_loss", - filename="best-{epoch}" -) -latest_checkpoint = pl.callbacks.ModelCheckpoint( - dirpath=config.experiment_folder, - every_n_epochs=1, filename="latest-{epoch}" -) - -logger = TensorBoardLogger(config.experiment_folder) # logging results to a tensorboard - -# https://pytorch-lightning.readthedocs.io/en/stable/common/trainer.html#trainer-class-api -trainer = pl.Trainer( - max_epochs = parameters['epochs'], - accelerator = 'auto', - enable_model_summary=True, - callbacks = [early_stop_callback, best_checkpoint, latest_checkpoint], - logger = logger, - enable_progress_bar = show_progress_bar, - check_val_every_n_epoch = 1, - gradient_clip_val=parameters['gradient_clip_val'], - max_time=pd.to_timedelta(2, unit='minutes') -) - -tft = TemporalFusionTransformer.from_dataset( - test_timeseries, - learning_rate= parameters['learning_rate'], - hidden_size= parameters['hidden_layer_size'], - attention_head_size=parameters['attention_head_size'], - dropout=parameters['dropout_rate'], - loss=MultiLoss([RMSE(reduction='mean') for _ in formatter.targets]), # RMSE(reduction='sqrt-mean') - optimizer='adam', - log_interval=1, - # reduce_on_plateau_patience=2 -) - -print(f"Number of parameters in network: {tft.size()/1e3:.1f}k") - -from datetime import datetime - -gc.collect() - -start = datetime.now() -print(f'\n----Training started at {start}----\n') - -trainer.fit( - tft, - train_dataloaders=train_dataloader, - val_dataloaders=val_dataloader, -) -end = datetime.now() -print(f'\n----Training ended at {end}, elapsed time {end-start}') -print(f'Best model by validation loss saved at {trainer.checkpoint_callback.best_model_path}') - -from classes.PredictionProcessor import PredictionProcessor - -processor = PredictionProcessor( - formatter.time_index, formatter.group_id, - formatter.parameters['horizon'], formatter.targets, - formatter.parameters['window'] -) - -# %% -from classes.Plotter import * - -plotter = PlotResults( - config.experiment_folder, formatter.time_index, - formatter.targets, show=show_progress_bar -) - -best_model_path = trainer.checkpoint_callback.best_model_path -print(f'Loading best model from {best_model_path}') - -# tft = TemporalFusionTransformer.load_from_checkpoint(best_model_path) - -# print('\n---Training prediction--\n') -# train_predictions, train_index = tft.predict( -# train_dataloader, return_index=True, -# show_progress_bar=show_progress_bar -# ) -# train_result_merged = processor.align_result_with_dataset( -# train, train_predictions, train_index -# ) - -# show_result(train_result_merged, formatter.targets) -# plotter.summed_plot(train_result_merged, type='Train_error', plot_error=True) -# gc.collect() - -print(f'\n---Validation results--\n') - -validation_predictions, validation_index = tft.predict( - val_dataloader, return_index=True, - show_progress_bar=show_progress_bar -) - -validation_result_merged = processor.align_result_with_dataset( - validation, validation_predictions, validation_index -) -show_result(validation_result_merged, formatter.targets) -plotter.summed_plot(validation_result_merged, type='Validation') -gc.collect() - -print(f'\n---Test results--\n') - -test_predictions, test_index = tft.predict( - test_dataloader, return_index=True, - show_progress_bar=show_progress_bar -) - -test_result_merged = processor.align_result_with_dataset( - test, test_predictions, test_index -) -show_result(test_result_merged, formatter.targets) -plotter.summed_plot(test_result_merged, 'Test') -gc.collect() - -# train_result_merged['split'] = 'train' -validation_result_merged['split'] = 'validation' -test_result_merged['split'] = 'test' -df = pd.concat([validation_result_merged, test_result_merged]) -df.to_csv(os.path.join(plotter.figPath, 'predictions.csv'), index=False) - -print(f'Ended at {datetime.now()}. Elapsed time {datetime.now() - start}') \ No newline at end of file diff --git a/trainer/trainer.py b/trainer/trainer.py deleted file mode 100644 index ae71d4b..0000000 --- a/trainer/trainer.py +++ /dev/null @@ -1,110 +0,0 @@ -import numpy as np -import torch -from torchvision.utils import make_grid -from base import BaseTrainer -from utils import inf_loop, MetricTracker - - -class Trainer(BaseTrainer): - """ - Trainer class - """ - def __init__(self, model, criterion, metric_ftns, optimizer, config, device, - data_loader, valid_data_loader=None, lr_scheduler=None, len_epoch=None): - super().__init__(model, criterion, metric_ftns, optimizer, config) - self.config = config - self.device = device - self.data_loader = data_loader - if len_epoch is None: - # epoch-based training - self.len_epoch = len(self.data_loader) - else: - # iteration-based training - self.data_loader = inf_loop(data_loader) - self.len_epoch = len_epoch - self.valid_data_loader = valid_data_loader - self.do_validation = self.valid_data_loader is not None - self.lr_scheduler = lr_scheduler - self.log_step = int(np.sqrt(data_loader.batch_size)) - - self.train_metrics = MetricTracker('loss', *[m.__name__ for m in self.metric_ftns], writer=self.writer) - self.valid_metrics = MetricTracker('loss', *[m.__name__ for m in self.metric_ftns], writer=self.writer) - - def _train_epoch(self, epoch): - """ - Training logic for an epoch - - :param epoch: Integer, current training epoch. - :return: A log that contains average loss and metric in this epoch. - """ - self.model.train() - self.train_metrics.reset() - for batch_idx, (data, target) in enumerate(self.data_loader): - data, target = data.to(self.device), target.to(self.device) - - self.optimizer.zero_grad() - output = self.model(data) - loss = self.criterion(output, target) - loss.backward() - self.optimizer.step() - - self.writer.set_step((epoch - 1) * self.len_epoch + batch_idx) - self.train_metrics.update('loss', loss.item()) - for met in self.metric_ftns: - self.train_metrics.update(met.__name__, met(output, target)) - - if batch_idx % self.log_step == 0: - self.logger.debug('Train Epoch: {} {} Loss: {:.6f}'.format( - epoch, - self._progress(batch_idx), - loss.item())) - self.writer.add_image('input', make_grid(data.cpu(), nrow=8, normalize=True)) - - if batch_idx == self.len_epoch: - break - log = self.train_metrics.result() - - if self.do_validation: - val_log = self._valid_epoch(epoch) - log.update(**{'val_'+k : v for k, v in val_log.items()}) - - if self.lr_scheduler is not None: - self.lr_scheduler.step() - return log - - def _valid_epoch(self, epoch): - """ - Validate after training an epoch - - :param epoch: Integer, current training epoch. - :return: A log that contains information about validation - """ - self.model.eval() - self.valid_metrics.reset() - with torch.no_grad(): - for batch_idx, (data, target) in enumerate(self.valid_data_loader): - data, target = data.to(self.device), target.to(self.device) - - output = self.model(data) - loss = self.criterion(output, target) - - self.writer.set_step((epoch - 1) * len(self.valid_data_loader) + batch_idx, 'valid') - self.valid_metrics.update('loss', loss.item()) - for met in self.metric_ftns: - self.valid_metrics.update(met.__name__, met(output, target)) - self.writer.add_image('input', make_grid(data.cpu(), nrow=8, normalize=True)) - - # add histogram of model parameters to the tensorboard - for name, p in self.model.named_parameters(): - self.writer.add_histogram(name, p, bins='auto') - return self.valid_metrics.result() - - def _progress(self, batch_idx): - base = '[{}/{} ({:.0f}%)]' - if hasattr(self.data_loader, 'n_samples'): - current = batch_idx * self.data_loader.batch_size - total = self.data_loader.n_samples - else: - current = batch_idx - total = self.len_epoch - return base.format(current, total, 100.0 * current / total) diff --git a/tsai.ipynb b/tsai.ipynb new file mode 100644 index 0000000..890fabe --- /dev/null +++ b/tsai.ipynb @@ -0,0 +1,1003 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Imports" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "os : Windows-10-10.0.19045-SP0\n", + "python : 3.10.11\n", + "tsai : 0.3.7\n", + "fastai : 2.7.12\n", + "fastcore : 1.5.29\n", + "torch : 1.13.1+cu117\n", + "device : 1 gpu (['NVIDIA GeForce RTX 3060 Laptop GPU'])\n", + "cpu cores : 14\n", + "threads per cpu : 1\n", + "RAM : 31.69 GB\n", + "GPU memory : [6.0] GB\n" + ] + } + ], + "source": [ + "from tsai.all import *\n", + "my_setup()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Dataset" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## UCI Electricity " + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "electricity : (26304, 322) ██████████| 100.01% [54001664/53995526 00:05<00:00]\n" + ] + } + ], + "source": [ + "# https://forecastingdata.org/\n", + "# https://archive.ics.uci.edu/dataset/321/electricityloaddiagrams20112014\n", + "# dsid = \"electricity\"\n", + "# try:\n", + "# df = get_long_term_forecasting_data(\n", + "# dsid, target_dir='datasets/forecasting/', \n", + "# force_download=False, return_df=True\n", + "# )\n", + "# print(f\"{dsid:15}: {str(df.shape):15}\")\n", + "# remove_dir('./data/forecasting/', False)\n", + "# except Exception as e:\n", + "# print(f\"{dsid:15}: {str(e):15}\")" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "from tsai.data.external import get_Monash_forecasting_data\n", + "import pandas as pd" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Dataset: electricity_hourly_dataset\n", + "converting data to dataframe...\n", + "...done\n", + "\n", + "freq : hourly\n", + "forecast_horizon : 168\n", + "contain_missing_values : False\n", + "contain_equal_length : True\n", + "\n", + "exploding dataframe...\n", + "...done\n", + "\n", + "\n", + "data.shape: (8443584, 3)\n", + "electricity_hourly_dataset: (8443584, 3) \n" + ] + } + ], + "source": [ + "dsid = \"electricity_hourly_dataset\"\n", + "try:\n", + " df = get_Monash_forecasting_data(\n", + " dsid, path='datasets/forecasting/'\n", + " )\n", + " print(f\"{dsid:15}: {str(df.shape):15}\")\n", + " # del df; gc.collect()\n", + " # remove_dir('datasets/forecasting/', False)\n", + "except Exception as e:\n", + " print(f\"{dsid:15}: {str(e):15}\")" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "def add_time_encoding(data:pd.DataFrame, time_column:str=\"date\"):\n", + " df = data.copy()\n", + "\n", + " date = pd.to_datetime(df[time_column])\n", + " earliest_date = date.min()\n", + "\n", + " delta = (date - earliest_date).dt\n", + " df['hours_from_start'] = delta.seconds / 60 / 60 + delta.days * 24\n", + " # df['days_from_start'] = delta.days\n", + " df['hour'] = date.dt.hour\n", + " df['day'] = date.dt.day\n", + " df['weekday'] = date.dt.weekday\n", + " df['month'] = date.dt.month\n", + "\n", + " return df" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
series_nametimestampseries_value
0T12012-01-01 00:00:0114.0
1T12012-01-01 01:00:0118.0
2T12012-01-01 02:00:0121.0
3T12012-01-01 03:00:0120.0
4T12012-01-01 04:00:0122.0
\n", + "
" + ], + "text/plain": [ + " series_name timestamp series_value\n", + "0 T1 2012-01-01 00:00:01 14.0\n", + "1 T1 2012-01-01 01:00:01 18.0\n", + "2 T1 2012-01-01 02:00:01 21.0\n", + "3 T1 2012-01-01 03:00:01 20.0\n", + "4 T1 2012-01-01 04:00:01 22.0" + ] + }, + "execution_count": 5, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "time_column = 'timestamp'\n", + "id_column = 'series_name'\n", + "target_column = 'series_value'\n", + "\n", + "df = df[df[time_column] >= pd.to_datetime('2012-01-01')].reset_index(drop=True)\n", + "df.head()" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "df = add_time_encoding(df, time_column='timestamp')\n", + "df.to_csv('datasets/forecasting/electricity_hourly_dataset.csv', index=False)" + ] + }, + { + "cell_type": "code", + "execution_count": 107, + "metadata": {}, + "outputs": [], + "source": [ + "def summary(df:pd.DataFrame, time_column, id_column):\n", + " T = df[time_column].nunique()\n", + " n_ids = df[id_column].nunique()\n", + " n_samples = df.shape[0]\n", + "\n", + " output = f\"\\\n", + " The dataset has {T} time steps, {n_ids} ids.\\n\\\n", + " Sample size {n_samples}, per user {n_samples/n_ids}.\\n\\\n", + " Start {df[time_column].min()}, end {df[time_column].max()}.\\n\"\n", + " \n", + " print(output)" + ] + }, + { + "cell_type": "code", + "execution_count": 106, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + " The dataset has 26304 time steps, 321 ids.\n", + " Sample size 8443584, per user 26304.0.\n", + " Start 2012-01-01 00:00:01, end 2014-12-31 23:00:01.\n", + "\n" + ] + } + ], + "source": [ + "summary(df, time_column, id_column)" + ] + }, + { + "cell_type": "code", + "execution_count": 91, + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
series_nametimestampseries_valuehours_from_starthourdayweekdaymonth
0T12014-01-01 00:00:0112.00.00121
1T12014-01-01 01:00:0113.01.01121
2T12014-01-01 02:00:0113.02.02121
\n", + "
" + ], + "text/plain": [ + " series_name timestamp series_value hours_from_start hour day \\\n", + "0 T1 2014-01-01 00:00:01 12.0 0.0 0 1 \n", + "1 T1 2014-01-01 01:00:01 13.0 1.0 1 1 \n", + "2 T1 2014-01-01 02:00:01 13.0 2.0 2 1 \n", + "\n", + " weekday month \n", + "0 2 1 \n", + "1 2 1 \n", + "2 2 1 " + ] + }, + "execution_count": 91, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "df.head(3)" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [], + "source": [ + "fcst_history = 168\n", + "fcst_horizon = 24\n", + "stride = 1\n", + "valid_size=0.1\n", + "test_size=0.2" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "['hours_from_start', 'hour', 'day', 'weekday', 'month'] series_value\n" + ] + } + ], + "source": [ + "x_vars = [col for col in df.columns if col not in [time_column, id_column, target_column]]\n", + "y_vars = target_column\n", + "print(x_vars, y_vars)" + ] + }, + { + "cell_type": "code", + "execution_count": 118, + "metadata": {}, + "outputs": [], + "source": [ + "from tsai.data.preparation import prepare_forecasting_data\n", + "from tsai.data.validation import get_forecasting_splits" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "(78912, 8)\n" + ] + } + ], + "source": [ + "temp = df[df[id_column].isin([f'T{num}' for num in range(301, 304)])]\n", + "print(temp.shape)" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [], + "source": [ + "from fastai.learner import Learner\n", + "from tsai.models import FCN\n", + "from tsai.all import *\n", + "from fastai.metrics import mse, mae\n", + "\n", + "# https://docs.fast.ai/callback.tracker.html\n", + "from fastai.callback.tracker import EarlyStoppingCallback, ReduceLROnPlateau, SaveModelCallback\n", + "from tsai.utils import cat2int" + ] + }, + { + "cell_type": "code", + "execution_count": 328, + "metadata": {}, + "outputs": [], + "source": [ + "cat_names = [] # ['series_name']\n", + "cont_names = ['hours_from_start', 'hour', 'day', 'weekday', 'month', target_column]\n", + "\n", + "for feature in cat_names:\n", + " temp[feature] = cat2int(temp[feature].astype(str).values)" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAABAgAAABiCAYAAADdueE1AAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/bCgiHAAAACXBIWXMAAAsTAAALEwEAmpwYAAAWDElEQVR4nO3de3RV5ZnH8d+TBAgaREIit2ixYICQGmIEtaWIOFK1QgcRRVCkA9rFOGMtLaK2VYuM48xyjY63esEL3hAv1AvitSJ4WbUmyPECCQkKBSWQEAIigrk888fZsWfSXAiccDic72etrJz97ne/+zl5kr1ynvPu95i7CwAAAAAAJLakWAcAAAAAAABijwIBAAAAAACgQAAAAAAAACgQAAAAAAAAUSAAAAAAAACiQAAAAAAAAESBAAAQx8zsLTObHjyebGav7cdYfc3MzSwl2H7ZzC6JUpw/NrOSiO11ZvZP0Rg7GO9TMxsZrfEAAEBiokAAAIgpMxtuZu+Z2XYzqzKzd81saFvHcffH3X10xLhuZv33NS53P8vd57fWb2/O4+5vu/uAfY2l0fkeNrO5jcYf7O5vRWN8AACQuFJiHQAAIHGZ2RGSFkuaIekpSR0l/VjSnljGFU1mluLutbGOAwAAoDXMIAAAxFK2JLn7Anevc/dv3P01d/9IksxsajCj4M5ghkGxmZ3e1EBB33eCx8uD5pCZ7TSzC5ron2xmt5hZpZl9JumnjfZH3r7Q38yWBTFUmtnC5s5jZiPNbKOZzTazckkPNbQ1CmGoma0ys21m9pCZpTZ+HhGxeBDDZZImS7oqON+Lwf7vblkws05mdpuZfRl83WZmnYJ9DbH92sy2mNkmM/t5q1kCAAAJgQIBACCW1kiqM7P5ZnaWmXVros9JktZKypB0vaRFZpbe0qDuPiJ4mOfuae6+sIlul0o6R1K+pBMlndfCkDdKek1SN0lZku5o5Tw9JaVL+p6ky5oZc7Kkn0jqp3Ch5HctPafgfPdJelzSfwfnG9NEt99KOlnSEEl5koY1GrunpK6S+kiaJumuZn7uAAAgwVAgAADEjLvvkDRckku6X1KFmb1gZj0ium2RdJu71wQvwEvU6N3+fXR+MO4Gd6+S9J8t9K1R+MV+b3ff7e7vtNBXkuolXe/ue9z9m2b63Blx7v+QdGFbn0AzJkua4+5b3L1C0h8kXRyxvybYX+PuSyTtlBSV9REAAEB8o0AAAIgpd1/t7lPdPUtSrqTekm6L6PKFu3vE9vqgz/7qLWlDo3Gbc5Ukk/TX4BMD/qWVsSvcfXcrfRqfOxrPScE4kc+l8dhbG62JsEtSWpTODQAA4hgFAgDAQcPdiyU9rHChoEEfM7OI7WMkfRmF022SdHSjcZuLq9zdL3X33pJ+IenuVj65wFvY16DxuRue09eSDmvYYWY92zj2lwrPdmhqbAAAgGZRIAAAxIyZDQwWzMsKto9WeKr9XyK6HSXpCjPrYGYTJA2StGQvht8s6fst7H8qGDcruAf/6hbinNAQo6RtCr9Ir9/L8zTn8uDc6QqvG9CwfkFI0mAzGxIsXHhDo+NaO98CSb8zs0wzy5B0naTH9iE+AACQYCgQAABi6SuFFyF838y+Vrgw8ImkX0f0eV/ScZIqFb5X/zx337oXY98gab6ZVZvZ+U3sv1/Sqwq/IF8haVELYw0NYtwp6QVJv3T3z/byPM15QuGFDz9TeBHGuZLk7mskzZH0hqRSSY3XO3hAUk5wvueaGHeupEJJH0n6OHhuc9sQFwAASFD2/2/rBADg4GFmUyVNd/fhsY4FAADgUMcMAgAAAAAAQIEAAAAAAABwiwEAAAAAABAzCAAAAAAAgCgQAAAAAAAASSntMahZhkt922NoAAAAtIOCY4tiHUJCWJ16WKxDAPbbrtW7Kt09M9ZxIPrapUAQLg4Uts/QAAAAiLrCuRbrEBJCwcCBsQ4B2G8rClasj3UMaB/cYgAAAAAAACgQAAAAAAAACgQAAAAAAEDttgYBAAAAAAAHr6KioqNSUlLmScpVYrx5Xi/pk9ra2ukFBQVbmupAgQAAAAAAkHBSUlLm9ezZc1BmZua2pKQkj3U87a2+vt4qKipyysvL50ka21SfRKiSAAAAAADQWG5mZuaORCgOSFJSUpJnZmZuV3jGRNN9DmA8AAAAAAAcLJISpTjQIHi+zdYBKBAAAAAAABADZlZw6aWXZjVsX3fddT1mzpzZO1bxtLoGgZk9KOkcSVvcvdmpCAAAAAAAxKu1a9cWRHO8fv36FbXWp2PHjr5kyZJumzZtKu/Vq1dtNM+/L/ZmBsHDks5s5zgAAAAAAEgoycnJPmXKlIqbbrqpR+N9JSUlHU8++eTs7OzsnFNOOSW7tLS0oySNHz++79SpU4/Oz88fmJWV9YOHHnqoW8Mxv//973vk5uYOys7OzvnVr37V5pkIrRYI3H25pKq2DgwAAAAAAFo2a9asLYsWLUrfunVrcmT7jBkzjpk8efLWNWvWrLrgggu2zpgx4+iGfZs3b+5QWFhY/Pzzz5def/31fSRp0aJFR5SVlaV+9NFHq1evXr1q5cqVh7388stpbYklamsQmNllZlZoZoVSRbSGBQAAAADgkJWenl4/YcKErTfffPNRke0ffvjh4ZdddlmVJM2YMaOqqKjouxf7Y8eOrU5OTlZBQcHurVu3dpCkV1555Yjly5cfkZOTkzN48OCctWvXphYXF6e2JZZW1yDYW+5+n6T7JMnsxIRaCRIAAAAAgH11zTXXbD7hhBNyJk6cWLk3/VNTU797ze3u332/8sorN82aNWuvxmgKn2IAAAAAAEAM9ejRo27MmDHbnnjiiYyGtvz8/K/nzZvXTZLuvffe9BNPPHFnS2OcddZZOx599NGM7du3J0nS559/3uGLL75o06QACgQAAAAAAMTYb3/72/Lq6urvXtDfc889f3v00UczsrOzcxYsWND97rvv3tDS8eeee+6OCRMmVA0dOnRgdnZ2zrhx4/pVV1cnt3RMY9YwHaHZDmYLJI2UlCFps6Tr3f2Blo850aXCtsQBAACAGPLHLdYhJISCgSfEOgRgv60oWFHk7ifGOo79FQqF1uXl5e3zdPx4FQqFMvLy8vo2ta/V6QbufmHUIwIAAAAAAAcVbjEAAAAAAAAUCAAAAAAAAAUCAAAAAAAgCgQAAAAAAEAUCAAAAAAAgPbiUwwAAAAAAEB0lZeXJ48cOXKAJFVWVnZISkry9PT0WklauXLl6tTUVG/u2OXLlx/24IMPdn/44Yc3RDMmCgQAAAAAgIRnpoJojueuopb29+zZs664uHiVJM2cObN3Wlpa3Zw5czY37K+pqVGHDh2aPHbEiBG7RowYsSua8UrcYgAAAAAAwEFh/PjxfSdNmnTM8ccfP3DGjBlZS5cuPWzIkCEDBw0alJOfnz8wFAp1kqTFixd3Oe200/pL4eLChAkT+g4bNmxAVlbWD+bOnXvUvp6/nWYQFO2UrKR9xsYBkiGpMtZBYL+Qw0MDeYx/5PDQcMjn0SbHOoJ2d5DkcEWsA4hnB0kOIel7sQ7gULZp06aOK1asKE5JSVFVVVXSBx98UNyhQwc999xzXa666qqsV199dW3jY8rKylLfe++9kurq6uRBgwblzpo1q6JTp07N3qLQnPa6xaDE3U9sp7FxAJhZITmMb+Tw0EAe4x85PDSQx/hHDuMfOUSiOPfcc7elpIRfqldVVSVfcMEFx65bty7VzLympsaaOmb06NHVnTt39s6dO9emp6fXbNy4MaVfv341bT03txgAAAAAAHCQSEtLq294PHv27D6nnnrqV6WlpZ+++OKLZd9++22Tr+EjZwskJyertra2yUJCaygQAAAAAABwENqxY0dyVlbWt5J07733ZrT3+dqrQHBfO42LA4ccxj9yeGggj/GPHB4ayGP8I4fxjxwi4cyePbv8hhtuyBo0aFBObW1tu5/P3Nu8bgEAAAAAAHEtFAqty8vLS7iFL0OhUEZeXl7fpvZxiwEAAAAAAIhugcDMzjSzEjMrM7Orozk22s7MHjSzLWb2SURbupm9bmalwfduQbuZ2e1B7j4ysxMijrkk6F9qZpdEtBeY2cfBMbeb2T4thIHmmdnRZrbUzFaZ2adm9sugnTzGETNLNbO/mlkoyOMfgvZjzez94Ge/0Mw6Bu2dgu2yYH/fiLGuCdpLzOwnEe1cfw8AM0s2sw/NbHGwTQ7jjJmtC655K82sMGjjmhpHzOxIM3vGzIrNbLWZnUIO44uZDQj+Bhu+dpjZleQRiL2oFQjMLFnSXZLOkpQj6UIzy4nW+NgnD0s6s1Hb1ZL+7O7HSfpzsC2F83Zc8HWZpD9K4X+aJF0v6SRJwyRd33CxDvpcGnFc43Nh/9VK+rW750g6WdLlwd8VeYwveySNcvc8SUMknWlmJ0v6L0m3unt/SdskTQv6T5O0LWi/NeinIPcTJQ1WOE93By9Yuf4eOL+UtDpimxzGp9PcfUjEx6VxTY0v/yvpFXcfKClP4b9JchhH3L0k+BscIqlA0i5JfxJ5BGIumjMIhkkqc/fP3P1bSU9K+lkUx0cbuftySVWNmn8maX7weL6kf45of8TD/iLpSDPrJeknkl539yp33ybpdYVf3PSSdIS7/8XDC1k8EjEWosTdN7n7iuDxVwr/E9RH5DGuBPnYGWx2CL5c0ihJzwTtjfPYkN9nJJ0evPPxM0lPuvsed/9cUpnC116uvweAmWVJ+qmkecG2iRweKrimxgkz6ypphKQHJMndv3X3apHDeHa6pLXuvl7kEYi5aBYI+kjaELG9MWjDwaWHu28KHpdL6hE8bi5/LbVvbKId7cTCU5TzJb0v8hh3gneJV0raovA/MGslVbt7w3K0kT/77/IV7N8uqbvanl9E122SrpLU8NnE3UUO45FLes3MiszssqCNa2r8OFZShaSHLHy7zzwzO1zkMJ5NlLQgeEwegRhjkcIEFlRU+RiLOGBmaZKelXSlu++I3Ece44O71wVTKbMUfrd4YGwjQluY2TmStrh7UaxjwX4b7u4nKDxl+XIzGxG5k2vqQS9F0gmS/uju+ZK+1t+noUsih/HEwuu2jJX0dON95BGIjWgWCL6QdHTEdlbQhoPL5mDalYLvW4L25vLXUntWE+2IMjProHBx4HF3XxQ0k8c4FUyFXSrpFIWnSKYEuyJ/9t/lK9jfVdJWtT2/iJ4fSRprZusUnv4/SuH7oMlhnHH3L4LvWxS+53mYuKbGk42SNrr7+8H2MwoXDMhhfDpL0gp33xxsk0cklJNOOin72WefPSKybc6cOUdNnjz5mKb6Dxs2bMDy5csPk6RTTz21f2VlZXLjPjNnzux93XXX9fjHo/dONAsEH0g6zsIrOndUeLrQC1EcH9HxgqSGFV4vkfR8RPuUYJXYkyVtD6Z4vSpptJl1CxZ9GS3p1WDfDjM7ObivdkrEWIiS4Gf7gKTV7v4/EbvIYxwxs0wzOzJ43FnSGQqvJ7FU0nlBt8Z5bMjveZLeDN5JeUHSRAuvkH+swosu/VVcf9udu1/j7lnu3lfhn++b7j5Z5DCumNnhZtal4bHC18JPxDU1brh7uaQNZjYgaDpd0iqRw3h1of5+e4FEHhFrT1hBVL9aMWHChKoFCxakR7Y9++yz6RdddFHjdeT+wbJly8oyMjLq9ufpNiWl9S57x91rzezfFP5DTZb0oLt/Gq3x0XZmtkDSSEkZZrZR4VVeb5b0lJlNk7Re0vlB9yWSzlZ4waxdkn4uSe5eZWY3KvzPqyTNcfeGX9h/VfiTEjpLejn4QnT9SNLFkj4O7l+XpGtFHuNNL0nzLbxSfZKkp9x9sZmtkvSkmc2V9KGCRbeC74+aWZnCC41OlCR3/9TMnlL4n+FaSZe7e50kcf2Nmdkih/Gkh6Q/hV8vKEXSE+7+ipl9IK6p8eTfJT0eFNM+UzgvSSKHcSUo0p0h6RcRzfx/g4Ry8cUXb7vpppv67N6921JTU72kpKTjli1bOjz22GPps2bNOnr37t1JY8aM2Xbrrbd+2fjYPn36/KCwsHB1r169amfPnt1z4cKFGd27d6/p3bv3t/n5+bv2NSYLv6EBAAAAAEDiCIVC6/Ly8iq/a9iLd/3bZFLraxeddtpp/adNm1Z50UUXVV977bU9KysrU2688cZNPXr0qKutrdUPf/jDAXfcccffTjrppG+GDRs24JZbbtkwYsSIXQ0FgrKyso7Tpk3rW1RUVFxTU6MhQ4bkTJ06tWLOnDmbmztnKBTKyMvL69vUPhYpBAAAAAAgBs4///yqhQsXdpOkRYsWpV988cVV8+fPT8/JyRmUk5OTU1pamhoKhVKbO37p0qVpZ599dnWXLl3q09PT60ePHl29P/FQIAAAAAAAIAYmTZpU/e677x7xzjvvHLZ79+6kzMzM2jvvvLPHsmXL1qxZs2bVqFGjtu/evfuAvW6nQAAAAAAAQAx07dq1/pRTTvlq+vTpfceNG1e1bdu25M6dO9enp6fXbdiwIeWtt97q2tLxo0aN2rlkyZIjd+7cadu2bUt6/fXXj9yfeKK2SCEAAAAAAGibiRMnVk2ZMqXfggULPsvPz9+dm5u7q1+/frm9evX6tqCgYGdLxw4fPnzXuHHjqnJzcwd379695vjjj/96f2JhkUIAAAAAQML5h0UKEwSLFAIAAAAAgBZRIAAAAAAAABQIAAAAAAAABQIAAAAAACAKBAAAAAAAQBQIAAAAAACApJRYBwAAAAAAQKIpLy9PHjly5ABJqqys7JCUlOTp6em1krRy5crVqamp3tLxixcv7tKpU6f6M8444+toxUSBAAAAAACQ8ApWFBREc7yiE4qKWtrfs2fPuuLi4lWSNHPmzN5paWl1c+bM2by347/55ptd0tLS6qJZIOAWAwAAAAAADgJvv/32YUOHDh0wePDgQcOHDz9u/fr1HSRp7ty5R/Xr129wdnZ2zjnnnPP9kpKSjo888kjmPffc02PgwIE5r7zySlo0zs8MAgAAAAAAYszddcUVVxzz0ksvlfXu3bv2/vvv7/ab3/ymz9NPP73u9ttv77l+/fqPO3fu7JWVlckZGRl1U6ZMqWjrrIPWUCAAAAAAACDG9uzZk1RaWtp51KhR2ZJUX1+vzMzMGkkaMGDAN+PGjTt27Nix1ZMnT65urxgoEAAAAAAAEGPurv79+3+zcuXK4sb7li5dWvryyy93ef7557vecsstvUpKSj5tjxhYgwAAAAAAgBjr1KlTfVVVVcobb7xxuCTt2bPHCgsLU+vq6rR27dqOY8aM+equu+76YufOncnbt29P7tKlS91XX32VHM0YKBAAAAAAABBjSUlJevLJJ9deffXVWQMGDMgZPHhwzrJly9Jqa2tt0qRJx2ZnZ+fk5ubmTJ8+fUtGRkbd+PHjq1966aUjo7lIobm3+NGKAAAAAAAcckKh0Lq8vLzKWMdxoIVCoYy8vLy+Te1jBgEAAAAAAKBAAAAAAAAAKBAAAAAAAABRIAAAAAAAJKb6+vp6i3UQB1LwfOub20+BAAAAAACQiD6pqKjomihFgvr6equoqOgq6ZPm+qQcwHgAAAAAADgo1NbWTi8vL59XXl6eq8R487xe0ie1tbXTm+vAxxwCAAAAAICEqJIAAAAAAIBWUCAAAAAAAAAUCAAAAAAAAAUCAAAAAAAgCgQAAAAAAEDS/wH1axAcdPPKjwAAAABJRU5ErkJggg==", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "X, y = prepare_forecasting_data(\n", + " temp, fcst_history, fcst_horizon, \n", + " x_vars=x_vars, y_vars=target_column\n", + ")\n", + "splits = get_forecasting_splits(\n", + " temp, fcst_history, fcst_horizon, valid_size=valid_size, \n", + " test_size=test_size, show_plot=True\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 32, + "metadata": {}, + "outputs": [], + "source": [ + "# We'll use inplace=True to preprocess data at dataset initialization. \n", + "# This will significantly speed up training.\n", + "from tsai.data.core import TSDatasets, TSDataLoaders\n", + "from tsai.data.preprocessing import TSStandardize\n", + "\n", + "tfms = [None, [TSRegression()]]\n", + "batch_tfms = TSStandardize(by_sample=True, by_var=True)\n", + "batch_size = 64\n", + "\n", + "datasets = TSDatasets(X, y, splits=splits, tfms=tfms)\n", + "dataloaders = TSDataLoaders.from_dsets(\n", + " datasets.train, datasets.valid, bs=[batch_size, batch_size*2],\n", + " batch_tfms=batch_tfms, \n", + " # num_workers=0\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 54, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "arch: TSTPlus(c_in=5 c_out=1 seq_len=168 arch_config={} kwargs={'custom_head': functools.partial(, d=[1, 24])})\n" + ] + }, + { + "data": { + "text/html": [ + "\n", + "\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/plain": [ + "SuggestedLRs(valley=0.04786301031708717)" + ] + }, + "execution_count": 54, + "metadata": {}, + "output_type": "execute_result" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYgAAAEVCAYAAAD6u3K7AAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/bCgiHAAAACXBIWXMAAAsTAAALEwEAmpwYAAA/8ElEQVR4nO3dd3zU9f3A8df7MsneCYRAmGETICCKAwUBi4p1oWIdtVq7rNaftv46tHaPX61WrbvW1oW4FSeioCgQ9l4hhAQyIIPs+fn9cZd4hEu4kPveXZL38/G4R5LPd70Twr3z2WKMQSmllGrP5usAlFJK+SdNEEoppVzSBKGUUsolTRBKKaVc0gShlFLKJU0QSimlXOp1CUJEnhGRYhHZ6ub5V4rIdhHZJiIvWB2fUkr1FNLb5kGIyNlAFfCcMWbcSc4dASwGzjPGlIlIkjGm2BtxKqWUv+t1NQhjzAqg1LlMRIaJyPsisk5EVorIKMehm4FHjDFljms1OSillEOvSxAdeAL4kTFmCvA/wKOO8pHASBH5QkS+EpF5PotQKaX8TKCvA7CaiEQAZwCviEhrcYjjYyAwApgJDARWiMh4Y0y5l8NUSim/0+sTBPZaUrkxJtPFsXxgtTGmEdgvIruxJ4y1XoxPKaX8Uq9vYjLGHMP+5n8FgNhNdBx+A3vtARFJwN7klOODMJVSyu/0ugQhIi8CXwIZIpIvIjcBi4CbRGQTsA1Y4Dj9A+CoiGwHlgN3GWOO+iJupZTyN71umKtSSinP6HU1CKWUUp6hCUIppZRLvWoUU0JCgklPT/d1GEop1WOsW7fuiDEm0dWxXpUg0tPTyc7O9nUYSinVY4jIgY6OaROTUkoplzRBKKWUckkThFJKKZd6VR+EUkp1pLGxkfz8fOrq6nwdik+EhoYycOBAgoKC3L5GE4RSqk/Iz88nMjKS9PR0nBbu7BOMMRw9epT8/HyGDBni9nXaxKSU6hPq6uqIj4/vc8kBQESIj4/vcu1JE0QvZoxhc345upyKUnZ9MTm0OpXvXRNEL7ZizxEufvgL3thY4OtQlFJdFBERAUBubi7jxnW6e7JlNEH0Yh9vLwLg8c9ytBahVFdtXgwPjIP7YuwfNy/2dURepwmilzLGsHxXMZGhgewsrOTT3SW+DkmpnmPzYnj7Nqg4CBj7x7dv61aS+NnPfsYjjzzS9vV9993Hb3/7W2bNmsXkyZMZP348b775Zqf3aG5u5q677mLq1KlMmDCBxx9/HIDrrruON954o+28RYsWnfRe7rA0QYjIHSKyTUS2isiLIhLa7vgNIlIiIhsdr+84HbteRPY4XtdbGWdvtK+kivyyWu48fyT9o0N5/LN9vg5JqZ5j2f3QWHt8WWOtvfwULVy4kMWLv04wixcv5vrrr+f1119n/fr1LF++nDvvvLPT2v7TTz9NdHQ0a9euZe3atTz55JPs37+fm266iWeffRaAiooKVq1axfz580851laWDXMVkVTgNmCMMaZWRBYDVwHPtjv1ZWPMD9tdGwfcC2QBBlgnIm8ZY8qsire3+WRnMQDnj02hqcXw23d3sPFgOZlpMb4NTKmeoCK/a+VumDRpEsXFxRw6dIiSkhJiY2NJSUnhjjvuYMWKFdhsNgoKCigqKiIlJcXlPT788EM2b97MkiVL7OFUVLBnzx7mzJnD97//fUpKSnj11Ve57LLLCAzs/tu71fMgAoF+ItIIhAGH3LxuLvCRMaYUQEQ+AuYBL1oSZS+0fGcJGcmRpMb046ppg3ho2R6eWLGPRxdN8XVoSvm/6IGO5iUX5d1wxRVXsGTJEgoLC1m4cCHPP/88JSUlrFu3jqCgINLT0zsdimqM4R//+Adz58494dh1113Hf//7X1566SX+9a9/dSvOVpY1MRljCoC/AnnAYaDCGPOhi1MvE5HNIrJERNIcZamA879OvqNMuaGyrpG1uaWcOyoJgIiQQL51+mDe21rI/iPVPo5OqR5g1q8gqN/xZUH97OXdsHDhQl566SWWLFnCFVdcQUVFBUlJSQQFBbF8+XIOHOhwYVUA5s6dyz//+U8aGxsB2L17N9XV9v/TN9xwA3//+98BGDNmTLfibGVZghCRWOx7Pw8BBgDhInJtu9PeBtKNMROAj4B/n8JzbhGRbBHJLinpekdsY3MLf/1gF8t2FHX5Wn/1+Z4jNLUYzs34eon3689IJyjAxk9f3UyuJgmlOjfhSrjoIYhOA8T+8aKH7OXdMHbsWCorK0lNTaV///4sWrSI7Oxsxo8fz3PPPceoUaM6vf473/kOY8aMYfLkyYwbN47vfve7NDU1AZCcnMzo0aO58cYbuxWjM8v2pBaRK4B5xpibHF9fB0w3xny/g/MDgFJjTLSIXA3MNMZ813HsceBTY0ynTUxZWVmmq/tBGGOY8tuPmTMmmT9eNqFL1/qru5ds4r2thaz/5fkEBXz9N8DLa/O4/+3tNDS38O0ZQ/jhecOJDHV/XRalerIdO3YwevRoX4dhmZqaGsaPH8/69euJjo52eY6rn4GIrDPGZLk638pRTHnAdBEJE/sUvlnAjnaB9Xf68mKn4x8Ac0Qk1lETmeMo8zgRYWRyBLuKKq24vdfZh7eWcPbIxOOSA8DCqYNY/j8zuSQzlSdW5jDngRUcq2v0UaRKKU/5+OOPGT16ND/60Y86TA6nwrJOamPMahFZAqwHmoANwBMicj+QbYx5C7hNRC52HC8FbnBcWyoivwHWOm53f2uHtRVGpUTxSvZBWloMNlvPnoq/7dAxSirrOTcjyeXxpKhQ/nLFROaNS+Gmf2ezfGcxCzK1e0epnmz27Nkn7b84FZaOYjLG3It9uKqzXzkdvwe4p4NrnwGesS66r41MjqS6oZmC8lrS4sK88UjLLHcMbz1npMstZtucm5FEYmQIH24r0gShlHJJZ1IDGSmRAOwqPLGZaX1eGfVNzd4O6ZTUNDTx/Oo8pqXHkRgZ0um5Nptw/phkPt1VTF1jz/j+lOquvrzkzKl875oggJHJ9kWx2vdD7C2u4tJHV/Hu5sO+CKvLnliRQ+GxOu6el+HW+XPGJFPd0MyqfUcsjkwp3wsNDeXo0aN9Mkm07gcRGhp68pOd6IZBQGRoEKkx/U6oQazefxSAwxX+vwNVYUUdj3+Ww/zx/clKj3PrmtOHxRMREsiH24o4b1SyxREq5VsDBw4kPz+fUxkO3xu07ijXFZogHEalRJ6QINbut/eLl1Y3+CKkLvnrh7tobjH8dF7n46idhQQGcO6oJD7aXsTvvmkI6OEd9Ep1JigoqEu7qSltYmozMiWSfSVVNDS1tJWtzbUv/VTm5wlia0EFr67P58YZ6QyK71on+5wxyRytbmB9ni5zpZQ6niYIh1EpkTS1mLalKA6V11JQbl/N8agfJwhjDL95ZzuxYcH84LzhXb5+ZkYiwQE2PtxWaEF0SqmeTBOEw8hk+0imnYXHAFiba29eSooM8esmppLKelbvL+U7Zw0h6hRmRUeGBnHG8Hg+2FbUJzvvlFId0wThMCwxgkCbsNsxkmltbikRIYGcPizerxNE0bF6AIYnRpzyPeaMSSGvtKbXzCZXSnmGJgiH4EAbQxLC2zqq1+4vY/LgWBIj/LwGUWUfYXWyeQ+dmT0mCRF47kvPz8RUSvVcmiCcZKREsquokoqaRnYVVTJ1cCyx4cHUNjZT2+Cfk8lKKu01iKSoro1vdpYUGcqNZwzhhdV5vL7h1DdEUUr1LpognIxKieRgaS2f7bGPk546JI748GAASmv8sxZR7GhiSogI7tZ97vnGKE4bEsc9r21ha0GFJ0JTSvVwmiCctHZUP//VAYIChMy0GOJaE0SVfyaIkqp6ovsFERIY0K37BAXYeGTRZGLDgvnuf9b5dbOaUso7NEE4GZUSBcDq/aWMT40mNCjg6wThpzWIksr6bvU/OEuICOGxa6dQUlXPj1/aoKOalOrjNEE4GRjbj7Bg+1/iU4fYl6toSxDV9T6LqzMllfUkeShBAExMi+GX80ezcs8R3ukha1AppayhCcKJzSaMcDQzTR1sTxDx4fY336N+2sRU7MEaRKtrThvM2AFR/GHpDmoamjx6b6VUz6EJop0Mx8quWemxAESGBhJgE8r8sInJGGNvYorwbIIIsAn3XTyWQxV1PPbpPo/eWynVc2iCaOfms4by58smEBNmb1qy2YTYsGC/7LStbmimtrGZpCjPJgiAqelxXDxxAI+vyOFgaY3H76+U8n+aINoZkRzJlVPTjiuLDw/2yyam4mPdnyTXmXu+MQqbCL9fuuPkJyuleh1NEG6IDQ/yyyam1klyiRGnPkmuM/2j+/H9mcN4b2sh6w5YtiW4UspPaYJwQ3x4iF+u6FpS5UgQFtUgAL595hACbMKnu/rmJitK9WWaINwQF+6ffRBty2xYmCDCQwLJSI5k48Fyy56hlPJPliYIEblDRLaJyFYReVFEQtsd/4mIbBeRzSKyTEQGOx1rFpGNjtdbVsZ5MrHhwVTUNtLU3HLyk72ouLKeoAAhul/Xl/nuisxBMWzMK6elRSfOKdWXWJYgRCQVuA3IMsaMAwKAq9qdtsFxfAKwBPiz07FaY0ym43WxVXG6Iz48GGOgvLbRl2GcoKSynoSIEGwWbxU6KS2Gyvomco5UWfocpZR/sbqJKRDoJyKBQBhwyPmgMWa5MaZ1DOVXQNd21PaSr2dT+1czkyeX2ejMpEExAGzIK7f8WUop/2FZgjDGFAB/BfKAw0CFMebDTi65CXjP6etQEckWka9E5BKr4nSHPycIK/sfWg1NiCAyNJAN2g+hVJ9iZRNTLLAAGAIMAMJF5NoOzr0WyAL+4lQ82BiTBVwD/F1EhnVw7S2ORJJdUmLNSBurEkRzi+Gj7UXUN53aXhNWLLPhis1mX9l2o9YglOpTrGximg3sN8aUGGMagdeAM9qfJCKzgZ8DFxtj2lbEc9RAMMbkAJ8Ck1w9xBjzhDEmyxiTlZiY6PnvAtr2hPD0UNc/LN3Bzc9l8+G2oi5f29xiKK32/DIbHclMi2FXUaWuzaRUH2JlgsgDpotImIgIMAs4bkquiEwCHseeHIqdymNFJMTxeQIwA9huYaydal12o8yDCeL51Qd46vP9AOw/Ut3l649W19NirJ0D4WzSoBiaWwxb8nUzIaX6Civ7IFZjH5m0HtjieNYTInK/iLSOSvoLEAG80m4462ggW0Q2AcuBPxpjfJYgggNtRIYGeqyJaeWeEn715jbOzUgkOSqE3KNdTxCtO8klRlozi7q9iQNjAHQ+hFJ9SKCVNzfG3Avc2674V07HZ3dw3SpgvIWhdVl8eLBHmpj2Flfy/efXMyIpgn9cM5nv/HsteUe7vhieN2ZRO4uPCGFQXJgmCKX6EJ1J7abY8GCPNDE9tXI/xsDTN0wlIiSQwXHh5J5KgvDCLOr2Jg2K0aGuSvUhmiDc5KkaREF5LcMSw0mN6QfA4IQwjlTVU1Xftc7ftoX6vJggMtNiKDxWR2FFndeeqZTyHU0QbrLvCdH9bUcLK+pIjvq63yA9PhyAA13shyiprCcyNJDQoIBux+SuSYPsmyhtPFjmtWeqjr25sYB739yqS6Aoy2iCcFNcRDBl1Y0Y073/jIXH6kiJ/jpBDI4PA+BAF5uZvDWL2tno/pEEB9i0mckPvLQmjx+/tJF/f3mAl7MP+joc1UtpgnBTfHgwDc0tXW4KclbT0ERlXVO7BGGvQXR1JJMVW42eTEhgAGNTo1ifpzUIb/n3qlwm/vpDfvvOdoocG0S9sDqPn722hXNGJjItPY4/vb/T72b5q95BE4SbYsO6P5u6te0+xamJKSIkkISI4C6PZCqpqicpyjtDXJ1NS49j08EK6hpPbfa3ct+7mw9z39vbiA8P5l+rcjnrT8u5+bls/vf1LZybkcjj35rCb785jqq6Jv703k5fh6t6IU0QboqP8ECCOHZiggB7LaKrNYjiY3Ver0EATB8WT0NzC+sOaC3CSqtzjnLHyxuZPCiWpT8+i+V3zuSyKQP5bFcJs0Yl8di3phAaFMDI5EhuOmsIL2cf1F3/lMdpgnBTXLj9zbg7CaK1iSA5un2CCOtSH0R1fRPVDc1e74MAmJoeR4BN+HLfUa8/u6/YXVTJzc9lkxbXj6euyyI0KIBB8WH84dLxrPvlbJ68LouQwK8HJ9x23gj6R4fy89e3+t2eJapn0wThpriw7q/HVFhhHwXVvgaRHh/O4Yo6t5ttjnh5kpyziJBAxqdG81WOJgir3PHyRkKCAnj2xmnEOtYBaxUZGnTC/h/hIYHce9EYdhZWcsmjX/D+1sK2kU0NTS2s2F3CEyv2abOg6jJLZ1L3JnER3V+PqehYHZEhgYSHHP9jbx3JlFdaw8jkyJPep9gHk+ScnT4snqdW5lDT0ERYsP4KedLe4kq2HTrGfReNIS0uzO3r5o5N4YGFE3lo2V5u/e86MpIjGZ4cwYpdJVQ6BlY0Nht+cO5wq0JXvZDWINwUHhxAcKCt253U7ZuX4Ou5ELluLtrni0lyzqYPjaex2ZCd2zf7IZbvKuaNDQWW3PudzYcRgQvG9+/SdSLCNycN5KM7zubBqzIBWLO/lPkT+vPUdVnMHp3EPz/d11b7VMod+uefm0SEuLDuzaYuPFZ3QvMSHF+DcIevE0TW4FgCbcKXOUc5e6R7S6wbY7Av6tvz/ebt7eQcqWZzfgW/mD/ao1u+vrv5MNPS446bTNkVgQE2FmSmsiAz9bjyIYnhzHlgBQ9+vIffXDLOE6GqPkBrEF0Q18l6THlHa07a/FR0rM7lf/yYsGCi+wW5PZIpr7SG4EBbW7+It4WHBDIxLcbtjuplO4rI+u3HPLRsj8WRWa+suoGcI9Wkx4fxzBf7+dGLGzzWtr+7qJI9xVVcOKFrtQd3DEuM4Jppg3hhTR77SnRvceUeTRBdEB8R7LKKbozhmqe+4orHv+xwIl1zi6G4sp6UaNd/9ad3YSTTugNlTBwY7dG/XLvq9KHxbCmo6HTiYEuL4YGPdnPTv7NpaG7hbx/ttqxpxls25pcD8PtLx/O/3xjFu1sOc90za6htcD9JGGP40/s7+fFLG2h2WibjnU2HsAnMG+f5BAHw49kj6BcUwB91zoRykyaILhiZHMnOwkoamo4fSlhQXkt+WS17i6u465VNLpfjOFpVT3OLcdnEBO7PhahtaGZrQQVZ6XGn9k14yPSh8TS3GNbmuh57X1HbyM3PZfPgsj1cNnkgq352HqcNiePuJZt79Hj9DXnl2MS+P8YtZw/jwasyWbO/lD9/cOKbbl1jM3uLK48rM8bwx/d28s9P9/HmxkM8sSKnrfydLYeZPjTesqbDhIgQvjdzGB9tL2LFbmu251W9iyaILpiaHkt9UwtbCo7fVa110tilk1J5b2th2396Z62T5DpqW06PD6OgrPaE5NPepvxymloMWYNjT+Vb8Jgpg2MJChC+ctHMlJ1byjceXMlnu0u4f8FY/nrFBCJDg3js2in0jwnllufWcdDN/hZ/syGvjIyUqLaRaAsyU7nu9MH864vc44b+1jU2c+O/1jL7byv4/vNff7//+GQvj6/I4drpg5g/vj9/+2gXWwsq2FlYSU5JNfMtaF5y9u0ZQ0iN6cd1z6xh0VNf8f7WQp07oTqkndRdMGWw/a/27NxSpji9QWfnlhEeHMCfL59AfVMLf3p/J+NSo5kxPKHtnLZlNlyMYgIYFB9Oi4H8shqGJkZ0GENrMpri4wTRLziASWmxfOn0ptjcYnj4k708uGw3A2PDeOXW09tWgAX7nhpPXz+VSx/9gnP+spygABs2EfoFB/Dct6cxLjXaF9+K21paDBsPlnPRxAHHlf/sglF8uquEu5ds5r0fn0VoUAB3vLyRL3OOcumkVJZuPcyyHcXMzEjkg21FXDo5lfsvHsexukayD5Ry+8sbOWdkor15aWyKpd9Dv+AA3vrhDF5ck8cLq/O49b/rGJ4UwVs/nKFDltUJtAbRBYmRIQxNCGdtu+Gd6w6UkTkohsAAG3+6fALDEiNO6Lws6mCZjVbprau6nuQv6+zcUoYnRbTtk+1L04fGsbWggtte3MA1T37FOX9ZzgMf72ZBZirv3nbmccmh1fCkCF64eTq3njOMG2ak863TB1Pb0MwLa/J88B10Tc6RKirrmpiUFnNceVhwIH+9YiIHy2r443s7+eWbW3lvayG/mD+avy3M5JM7ZzJvXAofbCti3tgU/nzZBGw2ISYsmP+7IpO9xVU8/fl+zhiWQLwXlk+Jjwjhh+eNYMXd5/Lnyyewt7iK19b37L4hZQ1NEF2UlR7LugOlbTNVq+qb2Fl4rK12ERESyN3zRlFa3cAmp+05C4/VEWCTDt8AWld1PdDJXIiWFsO6A2VMTfdt7aHV3HEpxIWHsCm/nPqmFsYOiOLBqzJ5YGEmkaFBHV43LjWau+eN4p4LRvO/3xjNnLHJLN1y+KTNa7623rHMuavEN21IHN+eMYT/fHWAF1bn8b2Zw/jOWUMBGBDTjwevmsTnPz2XRxZNJjDg6/92Z45I4NszhgBY3rzUXmCAjSumDGR8ajTPrsrt9lL2qvfROmUXZaXHsTg7n5wjVQxPimRDXhkthuP6BFqbf9bllXHa0HjAvsxGUmQIAR2MPEqICCY8OKDT7Uf3FFdxrK6pLRn52tgB0WT/wuW24l1ySWYqb248xGe7Szh/TLIHIrPGhrxyokIDGZoQ7vL4XXMz2HiwnPGp0dw9N+OE4wNjXc+M/ukFGWSkRHDJpFSXx60kItw4I52fLN7Eyj1H3J7XovoGrUF00VTH6KE1++3NTNm5ZdjEvl9zq7jwYIYmhrPOqSmqozkQrUTkpCOZsh2jf3zdQe1pZ45IID482OtDYOsam/l8zxG3d2TbkFdG5qDYDocXhwYF8Or3zuC+i8d2aVJgSGAAC6cOOm4BPm+aP6E/CREhPLsq1yfPV/7L0gQhIneIyDYR2SoiL4pIaLvjISLysojsFZHVIpLudOweR/kuEZlrZZxdkR4fRkJEMNmO4Z3rHaNa2jepTBkUy7q8srZqe0ezqJ1lpESy4/CxDo+vyy0jISK4beZ1bxEUYOPCCf35eEcRlXWNXnvukytyuPbp1Vz+2Cp2Fnb8cwd7U+LuosoT+h96g5DAABadNohPdhaz383lXlTfYFmCEJFU4DYgyxgzDggArmp32k1AmTFmOPAA8CfHtWMc544F5gGPiohv/rxqR0TIGhzH2gOlNLcYNuSVM2VwzAnnZaXHUl7TyL4S+3+4ooq6DkcwtRo7IIqiY/UUV9a5PL72QClZg+N6zZIVzhZMSqW+qYX3txZ67ZnLdhaTGtOP3KM1XPjQ5/zhvR0dzorenF9Oizm+ptibLJo+iKAA4d9ai1BOrG5iCgT6iUggEAYcand8AfBvx+dLgFlif/dbALxkjKk3xuwH9gLTLI7VbVnpsRwsrWXF7hKq6pvIctEn0NoPsf5AGdX1TVTWN510fZ3WYZ7bDp3412zxsToOltaS5Scd1J42KS2GwfFhvLHRO81MpdUNbMov54qsgSz7yTlcOjmVxz/L4c4OJjq27sOd2QtrEABJkaFcOGEAr2Qf9GotTvk3yxKEMaYA+CuQBxwGKowxH7Y7LRU46Di/CagA4p3LHfIdZScQkVtEJFtEsktKvDM7dNoQe0J47LN9gOs5CUMTIogJCyL7QOnXO8l1sMxGqzEDogDY1m4iHkC2n8x/sIqIsCAzlVX7jrYNCT5V9U3N1DR0vnf4it0lGAPnZiQRGx7Mny+fyF1zM3h382Fed9EXsiGvjKGJ4X4xvNgqN85Ip7qhuccvh6I8x8ompljsNYEhwAAgXESu9fRzjDFPGGOyjDFZiYneGYExpn8UYcEBrN5fSnJUCANj+51wjs0mTB4US/aBMooqOp9F3SoqNIj0+DC2FpxYg8jOLSMk0MbYAf49maw7LskcgDHw1sb2FU33GWO46dlsTvvdMp5ckdPh0NlPdxUTHx7MeKfJebeeM4yp6bH86s1tx830NsbelDgprXcm51YTBsaQHh/G8l26DIeys7KJaTaw3xhTYoxpBF4Dzmh3TgGQBuBohooGjjqXOwx0lPmFwABbW1t0Z30CUwbHklNSzY5C+3o8J+ukBhibGs3WQ65qEKVkpsUQHNh7B54NTYxg8qAYnliZ07akeVct3VLI53uP0D8mlN8t3cGcBz7j4+1Fx53T3GL4bHeJffay04ikAJvwtyszEeAnizfS3GI4WFrDz9/YytHqBia76Gvqbc4ckcBXOUf9fk6K8g4r323ygOkiEuboV5gF7Gh3zlvA9Y7PLwc+MfYG4LeAqxyjnIYAI4A1FsbaZa39DpM7afJpbQ56b8thoONlNpyNGxBNflkt5TVfLx1eWt3A1oKKtjkVvdnvLx3PsdpG7nh543ErnbqjtqGZ3y/dwej+Ubz347N59sapBAbY+M5z2W3/BmDvcC6raeScjBNrnGlxYdx/yVjW5pZx6T9XMfOvn/JK9kGunpbGZZMHdvv783dnDk+kpqGZDXl9czModTwr+yBWY+94Xg9scTzrCRG5X0Qudpz2NBAvInuBnwA/c1y7DVgMbAfeB35gjPGrDXXPHZVEcKCNs0ckdHjOxIExBNqE7ANlRIUGurXWzbhURz+EU0f1JzuLaTFw/mj/nUTmKaNSovj1xWP5fO8RHl2+97hj+0qqOu1beHzFPgrKa7nvojEE2ISZGUm89+OzGNM/it+8s73t2uW7SrAJnD3CdZPkJZmpfHNSKrsLK7nhjHRW3n0ef7h0AqFBfjGQzlKnD4vHJvD53iO+DkX5AUtnUhtj7gXubVf8K6fjdcAVHVz7O+B31kXXPZlpMWz/9dzjlk1or19wAGMHRLEpv8Kt2gPYaxAAWwsq2hb7+2h7ISlRoW3Jo7dbODWNL3OO8sDHu5k8OJbS6gaeXZXLugNlLMgcwINXTTrhmoLyWh77bB/zJ/Q/rqYVFGDj/gVjufyxL3n4k73cPW8Un+0qJjMththw1x3OIsL/XTGRP1w6vk8kBWfR/YKYmBbDyj1HuHPOibPBVd/Sexu0vaCz5NCqdVkMd7eQjA0PJjWmH1sdNYi6xmZW7D7C7DFJvXL+gysiwu++OZ70+HAWPbWaH724gSNV9Zw5PIG3Nh1ib/GJO6L9fqm99fJ/vzH6hGNZ6XFcOjmVJ1fmkJ1byqb8CmZmJHUag80mfS45tDpreAKb88upqNHhrn2dJgiLtfZDuNNB3WpcalTbUNdV+45Q29jM+WOsXQba30SEBPLYt6Zw6aRUnr4+i0/unMnfr8okNDDghKanT3YW8e7mw9x6zjBSY04cUQb2JblDAwP4znPZgH14q3LtzBGJtBj4Mkebmfo6TRAWa53Y1t/NJiawNzPlHKmmsq6Rj7YXERESyPSh/rFAnzeNTI7kbwszmTU6mQCbkBARwrXTB/HGxgJyHUtClFY3cPeSLYxKieR7M4d1eK+kyFBuP38k5TWNJEQEM3ZA32iuOxWTBsUQHhzAyj2aIPo6TRAWS44K5eFrJrFo+mC3r3GeUf3xjmLOGZnos4Xc/M3NZw8lKMDGI8v3YozhF29soaK2gb9dmXnSn9H1pw8mMy2GBZmpPt3P298FBdiYPjReO6qVLvftDRdOGHDyk5yMdXRGv7A6j5LKer9eAtvbkiJDuea0QTz35QEGx4exdEshd8/LaJuF3pnAABuvf/+MPtOX0x1njkhg2c5iDpbWkBbXuxaHVO7TGoQfSooMJSkyhLc3H3IM19Q1+p3des4wAmzCXz/czZTBsXz37I6bltrT5OCesxzDt7WZqW/TBOGnxqVGYwxMS4/r1ev/nIrkqFCumz6YyJBA/nblxA43YVKnblhiBClRoXy+V5fd6Ms0QfipcY4mE21ecu3n80fzxT3ntW3VqjxLRDh3VCLLdhSzzcXSL6pv0AThp84emUhCRDDzxvWt4a3uEhGiOtn3WnXfT87PIDYsmO/9d73OieijNEH4qaz0OLJ/cT4DOhjXr5TVEiNDePTayRyuqOX2lze4vTWr6j00QSilOjR5UCy/umgsy3eV8NAne3wdjvIyTRBKqU5de9ogLp2cyoPL9rBeV3ntUzRBKKU6JSL8ZsE4+gUF8Ep2vq/DUV6kCUIpdVLhIYHMHp3M+1sP09ismwn1FZoglFJuuXBCf8pqGvlCl+DoMzRBKKXcck5GIpGhgbyz+fDJT1a9giYIpZRbQgIDmDMmhQ+2FlLf5FcbPCqLaIJQSrntoon9qaxv4rNdugRHX6AJQinlthnDE4gNC9Jmpj7CrQQhIuEiYnN8PlJELhYRXedAqT4mKMDGvHH9+XhHEbUN2szU27lbg1gBhIpIKvAh8C3gWauCUkr5r4sm9qemoZlPdhb7OhRlMXcThBhjaoBLgUeNMVcAYzu9QCRDRDY6vY6JyO3tzrnL6fhWEWkWkTjHsVwR2eI4ln0K35tSygKnDYknMTKEtzcd8nUoymLu7ignInI6sAi4yVHW6f6OxphdQKbj4gCgAHi93Tl/Af7iOOci4A5jTKnTKecaY3TQtVJ+JMAmzBubwpJ1+dQ1NhMapNvh9lbu1iBuB+4BXjfGbBORocDyLjxnFrDPGHOgk3OuBl7swj2VUj4yZ2wytY3NuuNcL+dWgjDGfGaMudgY8ydHZ/URY8xtXXjOVXTy5i8iYcA84FXnxwIfisg6Ebmlk2tvEZFsEckuKdGhd0p5w/Sh8USGBvLBtkJfh6Is5O4ophdEJEpEwoGtwHYRucvNa4OBi4FXOjntIuCLds1LZxpjJgMXAD8QkbNdXWiMecIYk2WMyUpM1L2blfKGoAAbs0YlsWxHEU26NlOv5W4T0xhjzDHgEuA9YAj2kUzuuABYb4wp6uScE2oYxpgCx8di7H0X09x8nlLKC+aOTaGsppG1uboEeG/lboIIcsx7uAR4yxjTiL0JyB2d9i2ISDRwDvCmU1m4iES2fg7MwV5zUUr5iXMyEgkJtGkzUy/mboJ4HMgFwoEVIjIYOHayixxv7ucDrzmV3Soitzqd9k3gQ2NMtVNZMvC5iGwC1gDvGmPedzNWpZQXhAUHctaIBD7aXoQxuh1pb+TWMFdjzEPAQ05FB0TkXDeuqwbi25U91u7rZ2k36c4YkwNMdCc2pZTvzBmbwsc7itl26BjjUqN9HY7yMHc7qaNF5G+to4VE5P+w1yaUUn3YrFFJ2AQ+1GamXsndJqZngErgSsfrGPAvq4JSSvUM8REhTE2P44NtnY1BUT2VuwlimDHmXmNMjuP1a2ColYEppXqGuWNT2FVUydaCCl+HojzM3QRRKyJntn4hIjOAWmtCUkr1JJdNGUhsWBC/X7pDO6t7GXcTxK3AI44F9HKBh4HvWhaVUqrHiO4XxO2zR7Jq31E+3qErvPYm7i61sckYMxGYAEwwxkwCzrM0MqVUj3HNaYMYlhjO75fuoKFJZ1b3Fl3aUc4Yc8wxoxrgJxbEo5TqgYICbPx8/mj2H6nmv191tian6km6s+WoeCwKpVSPd25GEmcOT+DBZXsor2nwdTjKA7qTILQ3SinVRkT4xYWjqaxr5MmVOb4OR3lApwlCRCodO8G1f1UCA7wUo1KqhxiVEsXZIxN5c+MhHdHUC3SaIIwxkcaYKBevSGOMu7vRKaX6kPnj+5NfVsvmfJ0X0dN1p4lJKaVOMGdMCkEBwrtbDvs6FNVNmiCUUh4VHRbEmcMTeHfzYW1m6uE0QSilPG7+hAEUlNeySZuZejRNEEopjzt/TLK9mWnzIV+HorpBE4RSyuOi+wVx1ohElm4p1GamHkwThFLKEvPH96egvJaNB8t9HYo6RZoglFKWmN3WzKSjmXoqTRBKKUtE9wvi7BGJLN2io5l6Kk0QSinLzB6TzKGKOnKOVPs6FHUKNEEopSwzfWg8AF/lHPVxJOpUWJYgRCRDRDY6vY6JyO3tzpkpIhVO5/zK6dg8EdklIntF5GdWxamUsk56fBjJUSF8uU8TRE9k2XpKxphdQCaAiAQABcDrLk5daYy50LnAcf4jwPlAPrBWRN4yxmy3Kl6llOeJCNOHxvPF3qMYYxDxv10C6puaKaqoZ1B8mK9D8TveamKaBewzxri7k8g0YK8xJscY0wC8BCywLDqllGVOHxrPkap69pX4Zz/EUyv3M/Ovy/li7xFfh+J3vJUgrgJe7ODY6SKySUTeE5GxjrJU4KDTOfmOshOIyC0iki0i2SUlJZ6LWCnlEf7eD/FVzlFaDPzwhfUcLK3xdTh+xfIEISLBwMXAKy4OrwcGO/a7/gfwRlfvb4x5whiTZYzJSkxM7FasSinPGxwfRkpUqF8miKbmFtYfKOOckYk0tRhu+c86ahuafR2W3/BGDeICYL0xpqj9Acce11WOz5cCQSKSgL2/Is3p1IGOMqVUD2Pvh4jjq5xSv5sPsbOwkuqGZi6dnMpDV09iZ+Ex7n51s9/F6SveSBBX00HzkoikiKPXSkSmOeI5CqwFRojIEEcN5CrgLS/EqpSywPS2fogqX4dynHUHygCYMjiWczOS+J85Gby96RCvrde/R8HiBCEi4dhHIr3mVHariNzq+PJyYKuIbAIeAq4ydk3AD4EPgB3AYmPMNitjVUpZp7Uf4sucUh9HcrzsA2WkRIWSGtMPgO/PHMbQhHBeXZ/v48j8g6XbhhpjqoH4dmWPOX3+MPBwB9cuBZZaGZ9Syjuc+yG+NX2wr8Npsy63lCnpsW3Db0WECyf05+HleymprCcxMsTHEfqWzqRWSllORDh9WDyrc476Tfv+ofJaDlXUkTU49rjy+RMG0GLg/a26yKAmCKWUV0wfGseRqga/6YfIdvQ/ZA2OO648IyWSEUkRvK2r0GqCUEp5R2s/xIrd/jEhbV1uKWHBAYzuH3nCsfkT+rM2t5SiY3U+iMx/aIJQSnnF4PhwRqVE8u4W//jLPPtAGZlpMQQGnPg2eOGE/hgDS/0kVl/RBKGU8pqLJg5g3YEy8st8O2O5qr6JHYePndD/0Gp4UqQ9mfXxZiZNEEopr7lowgAA3vHyG291fROPLN/L4YpaADbmldNiICs9rsNr5o/vT/aBsrZr+iJNEEoprxkUH8bEtBje2njIq8/9aHsRf/lgF7P/7zOe+Xw/a/YfxSYwaVBMh9fMn9AfoE/XIjRBKKW86uKJA9h++Bh7i703mqmg3F4LmDw4lvvf2c4/lu8lIyWKyNCgDq8ZmhjBmP5RfXo0kyYIpZRXXTihPyLw9ibv1SLyy2qJDw/muW9P4+FrJtE/KpS5Y5NPet03J6Wy6WA5uworvRCl/9EEoZTyquSoUE4bEsfbmw95bdLcofJaBsT0c8yUHsCqe2Zx++yRJ73usikDCQ6w8eKaPC9E6X80QSilvO6iiQPIKalm26FjXnneofLatvWWuiIuPJi541J4fUMBdY19bxlwTRBKKa+7YFx/Am3C25utb2YyxlDgqEGciqunpVFR28h7fXDpDU0QSimviwsP5swRCby7+bDlzUwVtY3UNDSTGntqCeL0ofGkx4fx4uqDJz+5l9EEoZTyifPHJJNfVsvuImtHM+WX2UcwpcaEntL1IsJV0waxJreUvcV9q7NaE4RSyidmjbKPIlq284TNJj3qUHlrggg75XtcPmUgQQHCi2v6Vi1CE4RSyidSokMZlxrFsh3Flj6ndQ7EgFOsQQAkRIQwZ0wKr63P71Od1ZoglFI+M2tUMuvzyjhaVW/ZMw6V1xIaZCMuPLhb97l62iDKahp5fUPf2Y5UE4RSymdmj07GGFi+q8SyZxQ4zYHojhnD48lMi+GhZXv6TC1CE4RSymfGpUaRHBXCsh3W9UMUlNed0hyI9kSEu+dmcLiijudX942Jc5oglFI+IyKcNyqJFbtLqG+y5q/ygrJTmyTnyhnDE5gxPJ5Hlu+lqr7JI/f0Z5YlCBHJEJGNTq9jInJ7u3MWichmEdkiIqtEZKLTsVxH+UYRybYqTqWUb80alUx1QzNr9pd6/N51jc0cqao/5Ulyrtw1dxSl1Q088/l+j93TX1mWIIwxu4wxmcaYTGAKUAO83u60/cA5xpjxwG+AJ9odP9dxjyyr4lRK+daM4QmEBNosGc10uMK+ZainahAAmWkxzBmTzJMrciirbvDYff2Rt5qYZgH7jDEHnAuNMauMMWWOL78CBnopHqWUn+gXHMCZwxP4eEeRx2dVH2ob4uq5BAFw55wMqhqaeHj5Xo/e1994K0FcBbx4knNuAt5z+toAH4rIOhG5xbLIlFI+N2u0fVb1loIKj963wDGLeuApLrPRkYyUSK6eNohnvtjPit3WjcDyNcsThIgEAxcDr3RyzrnYE8RPnYrPNMZMBi4AfiAiZ3dw7S0iki0i2SUlvfcfSqne7BvjU4gJC+K37+7waC2ioLwWEfsS4572y/ljGJkUye0vb6TQ0ZTV23ijBnEBsN4Y43Icm4hMAJ4CFhhjjraWG2MKHB+LsfddTHN1vTHmCWNMljEmKzEx0ePBK6WsFxMWzE/njWLN/lLe2Oi5iWiHymtJjgwlONDzb3X9ggN4ZNFk6hqbue3FDTQ1t3j8Gb7mjQRxNR00L4nIIOA14FvGmN1O5eEiEtn6OTAH2OqFWJVSPrIwK42JaTH87t2dHKtr9Mg97ZPkPF97aDU8KYLff3M8a3JLeeDj3Se/oIexNEE43tzPx54EWstuFZFbHV/+CogHHm03nDUZ+FxENgFrgHeNMe9bGatSyrdsNuG3C8ZxtLqev33omTfbQ93YB8Jdl0xK5aqpaTyyfF+vW+3V0gRhjKk2xsQbYyqcyh4zxjzm+Pw7xpjY1uGwrcNZjTE5xpiJjtdYY8zvrIxTKeUfxg+MZtFpg3juy1y2Hepeh3VLi+FQed0p7wPRFf8zN4NAm/Dy2t612qvOpFZK+ZW75owiNiyYPyzd2a37HKmup6G5xaNzIDqSEBHCrNFJvLa+gIam3tMXoQlCKeVXosOC+O45Q/l87xE2Hiw/5fsUtG0UZH2CAFg4NY2j1Q18YvH+Ft6kCUIp5XeuOW0w0f2CeLQbE9EOlduHnlrdB9Hq7BGJJEeF9KpmJk0QSim/ExESyPVnpPPh9iL2FJ1ax29BeQ2AV/ogAAIDbFwxJY3Pdpf0mnkRmiCUUn7pxjPSCQsO4J+f7jul6w+V1xEZEkhUaJCHI+vYlVlptBhYsq531CI0QSil/FJseDBXTxvEm5sOcbC0psvX55fVeq320GpQfBinD41ncXY+LS2eXVfKFzRBKKX81s1nDcUm8MSKnC5fu6+kivT4cAui6tzCqWnkldbw1f6jJz/Zz2mCUEr5rZToUC6fMpCXsw9S2oWltWsbmsk9Wk1GSqSF0bk2b1wK0f2C+NcXuV5/tqdpglBK+bVvTU+noamFdzcfcvuaPcWVGAOjfJAgQoMCuHFGOh9tL2L7oWNef74naYJQSvm1MQOiGJUSyWsb3F/Eb2ehfeSTL2oQADfOGEJkSCAPLdvjk+d7iiYIpZTfu2RSKhvyysk9Uu3W+bsKKwkNsjHYB30QANH9grhxRjrvbytkZ2HPrUVoglBK+b0FmQMQgdfdrEXsKqxkRFIkATaxOLKOffvMIUSEBPKPZV9P9jPGsLPwGM09ZISTJgillN/rH92P04fG88bGArc2FNpZWOmz5qVWMWHB3HBGOku3HmZ3USXZuaVc+fiXzPv7Sv73tS0e317VCpoglFI9wjcnpXLgaA3r88o7Pe9oVT1Hqup90kHd3k1nDiEsKIBrnlzN5Y99Se7RGuaMSebl7IO8sCbP1+GdlCYIpVSPMG9cCqFBNt44STPTLh93UDuLDQ/m1nOGUd/UzF1zM/jsrpn889opzMxI5L63trHuQJmvQ+yUJgilVI8QGRrE+WNSeHvzoU6X1Pb1CKb2fnjecDbfO4cfnDucsOBAAmzCgwsnMSCmH9/77zqKj/nvuk2aIJRSPcalk1Ipr2nk013FHZ6zq7CSuPBgEiNCvBhZx0QEkeM7y6PDgnjiW1lU1Tdx5eNfsnjtQeqbmn0UYcc0QSileowzRySQEBHCknX5HZ6zs6iSjOTIE96U/U1GSiRPXZdFWHAgd7+6mbP+tJzHPttHU7P/bDikCUIp1WMEBdi4bHIqn+wsprjyxKaZlhbDniLfj2By1xnDE3j3tjP5z03TGJkcyR/f28nfP/afyXWaIJRSPcqVU9NoajG8tv7EzuqDZTXUNDT7xQgmd4kIZ41I5L/fOY0rswbyyKd7+WLvEV+HBWiCUEr1MMMSI5iWHsfLaw+eMJfA3zqou+q+i8cyLDGC21/eyJGqel+HY12CEJEMEdno9DomIre3O0dE5CER2Ssim0VkstOx60Vkj+N1vVVxKqV6noVT09h/pJo1+0uPK28d4joyuWcmiLDgQB6+ZhIVtY3cuXiTz/eUsCxBGGN2GWMyjTGZwBSgBni93WkXACMcr1uAfwKISBxwL3AaMA24V0RirYpVKdWzfGN8fyJDAo/f/3nzYhZ9OZ+c0EWEP5oJmxf7LL7uGJUSxa8uHMNnu0t4dlWuT2PxVhPTLGCfMeZAu/IFwHPG7isgRkT6A3OBj4wxpcaYMuAjYJ6XYlVK+bl+wQEsmDSAd7ccpqK20Z4M3r6N+KYibBioOAhv39Zjk8Si0wZx+tB4nlyZ49N1m7yVIK4CXnRRngo4b96a7yjrqFwppQBYmDWI+qYW3tp0iJaPfw2Ntcef0FgLy+73TXDdJCJ86/TBHK6oY+WeEp/FYXmCEJFg4GLgFYvuf4uIZItIdkmJ736QSinvGpcaxZj+Ufzm7e1Q0cG8iI7Ke4DZo5OJCw8+vhnNy7xRg7gAWG+MKXJxrABIc/p6oKOso/ITGGOeMMZkGWOyEhMTPRSyUsrfiQg/u2AU88alUBWa4vqk6IHeDcqDggNtXDoplY93FPlsRJM3EsTVuG5eAngLuM4xmmk6UGGMOQx8AMwRkVhH5/QcR5lSSrU5e2QiD109iaj5v4GgfscfDOoHs37lm8A8ZOHUNBqbDa+7mPPhDZYmCBEJB84HXnMqu1VEbnV8uRTIAfYCTwLfBzDGlAK/AdY6Xvc7ypRS6kQTroSLHoLoNEDsHy96yF7eg41IjmTyoBhezj5xzoc3SE/YtMJdWVlZJjs729dhKKWUx7y8No+fvrqFV793OlMGx3n8/iKyzhiT5eqYzqRWSik/duGEAYQHB/DSGu93VmuCUEopPxYeEshFEwfwzubDlFSe2FltjLFsroQmCKWU8nM3nz2U5hbDr9/edsKxp1bu5/pn1lDT0OTx52qCUEopPzcsMYLbZg3nnc2H+Xj71zMGlu8q5g/v7SC6XxD9ggI8/lxNEEop1QPccvYwRqVE8os3tnKsrpG9xVXc9sIGRqVE8ZcrJliyQZImCKWU6gGCA2388bIJFFfWcd+b27jluWyCA208eb19VzorWHNXpZRSHpeZFsONM4bw9Of7CQoQXrh5Oqkx/U5+4SnSBKGUUj3InXNGknukmosmDmBquufnRTjTBKGUUj1IWHAgT98w1SvP0j4IpZRSLmmCUEop5ZImCKWUUi5pglBKKeWSJgillFIuaYJQSinlkiYIpZRSLmmCUEop5VKv2lFOREqAA0A0UOEoPtnnrR8TgCNdfKTz/dw91r7c3ficy7oaa2dxdnS8szhPFqtVP9PuxtoT//17Uqz6u+q5WL357z/YGJPo8ogxpte9gCfc/dzpY3Z3nuPusfbl7sbXnVg7i7Oj453F6cbP0pKfaXdj7Yn//j0pVv1d7Zm/q529emsT09td+Ny5rDvPcfdY+/KuxHeqsZ7sOlfHO4uz/dftY7XqZ9rRcXdj7Yn//s6f+3us+rt68uP++LvaoV7VxNQdIpJtOti429/0lFh7SpygsVqlp8TaU+IE78baW2sQp+IJXwfQBT0l1p4SJ2isVukpsfaUOMGLsWoNQimllEtag1BKKeWSJgillFIuaYJQSinlkiYIN4jIWSLymIg8JSKrfB1PR0TEJiK/E5F/iMj1vo6nMyIyU0RWOn6uM30dz8mISLiIZIvIhb6OpSMiMtrx81wiIt/zdTydEZFLRORJEXlZROb4Op7OiMhQEXlaRJb4OhZXHL+b/3b8PBd58t69PkGIyDMiUiwiW9uVzxORXSKyV0R+1tk9jDErjTG3Au8A//bXOIEFwECgEci3Ik4PxmqAKiC0B8QK8FNgsTVReuz3dIfj9/RKYIafx/qGMeZm4FZgoZ/HmmOMucmqGF3pYtyXAkscP8+LPRrIqcyu60kv4GxgMrDVqSwA2AcMBYKBTcAYYDz2JOD8SnK6bjEQ6a9xAj8Dvuu4dok//0wBm+O6ZOB5P4/1fOAq4AbgQn+N03HNxcB7wDX+/DN1uu7/gMk9JFbL/k91M+57gEzHOS94Mo5AejljzAoRSW9XPA3Ya4zJARCRl4AFxpg/AC6bEERkEFBhjKn01zhFJB9ocHzZbEWcnorVSRkQYkmgeOznOhMIx/6fsVZElhpjWvwtTsd93gLeEpF3gRc8GaMnYxURAf4IvGeMWW9FnJ6K1Re6Ejf2GvhAYCMebhXq9QmiA6nAQaev84HTTnLNTcC/LIvIta7G+RrwDxE5C1hhZWAudClWEbkUmAvEAA9bGtmJuhSrMebnACJyA3DE08mhE139mc7E3twQAiy1MjAXuvq7+iNgNhAtIsONMY9ZGVw7Xf25xgO/AyaJyD2OROILHcX9EPCwiMyne8txnKCvJoguM8bc6+sYTsYYU4M9kfk9Y8xr2BNaj2GMedbXMXTGGPMp8KmPw3CLMeYh7G9sfs8YcxR7X4lfMsZUAzdace9e30ndgQIgzenrgY4yf9NT4gSN1Qo9JU7QWL3B63H31QSxFhghIkNEJBh7B+RbPo7JlZ4SJ2isVugpcYLG6g3ej9tbvfK+egEvAof5eujnTY7ybwC7sY8K+LnGqbFqnBqrv7z8JW5drE8ppZRLfbWJSSml1EloglBKKeWSJgillFIuaYJQSinlkiYIpZRSLmmCUEop5ZImCNWriUiVl5/nkf1CxL5fRoWIbBSRnSLyVzeuuURExnji+UqBJgilukREOl2/zBhzhgcft9IYkwlMAi4UkZPt8XAJ9hVnlfIITRCqzxGRYSLyvoisE/uudqMc5ReJyGoR2SAiH4tIsqP8PhH5j4h8AfzH8fUzIvKpiOSIyG1O965yfJzpOL7EUQN43rHENSLyDUfZOhF5SETe6SxeY0wt9qWcUx3X3ywia0Vkk4i8KiJhInIG9r0g/uKodQzr6PtUyl2aIFRf9ATwI2PMFOB/gEcd5Z8D040xk4CXgLudrhkDzDbGXO34ehT25cqnAfeKSJCL50wCbndcOxSYISKhwOPABY7nJ54sWBGJBUbw9RLurxljphpjJgI7sC/DsAr7ujx3GWMyjTH7Ovk+lXKLLvet+hQRiQDOAF5x/EEPX29YNBB4WUT6Y9+xa7/TpW85/pJv9a4xph6oF5Fi7Dvjtd86dY0xJt/x3I1AOvZtVnOMMa33fhG4pYNwzxKRTdiTw9+NMYWO8nEi8lvse2lEAB908ftUyi2aIFRfYwPKHW377f0D+Jsx5i3H5jv3OR2rbnduvdPnzbj+v+TOOZ1ZaYy5UESGAF+JyGJjzEbgWeASY8wmxyZGM11c29n3qZRbtIlJ9SnGmGPAfhG5AuxbX4rIRMfhaL5eX/96i0LYBQx12k5y4ckucNQ2/gj81FEUCRx2NGstcjq10nHsZN+nUm7RBKF6uzARyXd6/QT7m+pNjuabbdj39QV7jeEVEVkHHLEiGEcz1feB9x3PqQQq3Lj0MeBsR2L5JbAa+ALY6XTOS8Bdjk72YXT8fSrlFl3uWykvE5EIY0yVY1TTI8AeY8wDvo5Lqfa0BqGU993s6LTehr1Z63HfhqOUa1qDUEop5ZLWIJRSSrmkCUIppZRLmiCUUkq5pAlCKaWUS5oglFJKuaQJQimllEv/D2y1xXQI40BGAAAAAElFTkSuQmCC", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "callbacks = [\n", + " ReduceLROnPlateau(factor=0.5, min_lr=1e-6),\n", + " EarlyStoppingCallback(patience=5),\n", + " # SaveModelCallback()\n", + "]\n", + "archs = {\n", + " 'LSTMPlus': {'n_layers':3, 'bidirectional': True}\n", + "}\n", + "model = create_model(TSTPlus, dls=dataloaders, verbose=True)\n", + "learner = Learner(\n", + " dataloaders, model, metrics=[mse, mae]\n", + ")\n", + "learner.lr_find()" + ] + }, + { + "cell_type": "code", + "execution_count": 55, + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "\n", + "\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
epochtrain_lossvalid_lossmsemaetime
02893242.75000033424628.00000033424628.0000004935.94091801:31
12916043.75000035351892.00000035351892.0000005150.01074203:49
22981291.50000033067046.00000033067046.0000004932.22900403:51
32908789.75000033899084.00000033899084.0000005003.19335904:06
43033225.00000034537444.00000034537444.0000005049.70605504:20
52969528.75000033934708.00000033934708.0000004960.64013704:21
63303880.25000036272200.00000036272200.0000004989.99609403:13
73163850.00000036075752.00000036075752.0000005165.19189504:09
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "No improvement since epoch 2: early stopping\n" + ] + }, + { + "data": { + "text/plain": [ + "30076" + ] + }, + "execution_count": 55, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "learner.fit(n_epoch=10, lr=1e-3, cbs=callbacks)\n", + "gc.collect()" + ] + }, + { + "cell_type": "code", + "execution_count": 40, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAwEAAAIYCAYAAAA1seDyAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/bCgiHAAAACXBIWXMAAAsTAAALEwEAmpwYAACcZUlEQVR4nOzdeXxU1f3/8dcnewJhCTsBBZRNUcKmuOO+obhrq7UulbpXu1rbn22tdvt2s3UrLlXrgpa6VXGtKFpxAQUEAUHFsihb2ElClvP749yQSUgghJm5dzLv5+Mxj5m5986ddybJ3Pu559xzzTmHiIiIiIikj4ywA4iIiIiISHKpCBARERERSTMqAkRERERE0oyKABERERGRNKMiQEREREQkzagIEBERERFJMyoCpFUws8VmdkzYOUREJLHMbKCZzTSzjWZ2rZndbWb/Lw7r7WNmzsyympiv7Yy0Ko3+oYuIiIhE1A+BKc65krCDiKQytQSIiIhIKtkTmBt2CJFUpyJAWhUzyzWzP5vZ8uD2ZzPLDeZ1NrPnzGydmZWa2ZtmlhHM+5GZLQualxeY2dHB9Awzu8HMPjWzNWb2hJkVBfPyzOzhYPo6M3vfzLqF99OLiLRuZvYacCRwu5ltMrMBZvaAmd0SzB9jZkvN7HtmttLMvjSzi2Nef7KZfWhmG8xsiZn9vIU5tK2RlKciQFqbnwCjgRJgKHAA8NNg3veApUAXoBtwI+DMbCBwNTDKOVcIHA8sDl5zDXAacATQE1gL3BHM+ybQHugNdAIuB8oS9YOJiKQ759xRwJvA1c65ts65TxpZrDv+u7kYuBS4w8w6BvM2AxcCHYCTgSvM7LQWRNG2RlJeqEWAmd0fVOpzmrHsn4ITgWaa2Sdmti4JESX1nA/c7Jxb6ZxbBfwC+EYwrxLoAezpnKt0zr3pnHNANZAL7GNm2c65xc65T4PXXA78xDm31DlXAfwcOCs4cawS/4W8t3Ou2jk3wzm3IWk/qUga0fZCdkElfjtQ6ZybDGwCBgI45153zn3knKtxzs0GHsPveO8qbWsk5YXdEvAAcEJzFnTOXe+cKwlOBPor8GQCc0nq6gl8EfP8i2AawP8Bi4CXzewzM7sBwDm3CLgO/6W70swmmlnta/YEngqaYNcB8/Bf5N2AfwAvAROD5uDfmVl2In84kTT2ANpeSPOscc5VxTzfArQFMLMDzWyKma0ys/X4ne/OLXgPbWsk5YVaBDjnpgKlsdPMbC8ze9HMZgT96AY18tKv4at3kYaW479Ma+0RTMM5t9E59z3nXD/gVOC7tf0xnXOPOucODV7rgN8Gr18CnOic6xBzy3POLQuO8PzCObcPcDAwFt/MLCJxpu2FxMmjwLNAb+dce+BuwFqwHm1rJOWF3RLQmAnANc65EcD3gTtjZ5rZnkBf4LUQskn0PQb81My6mFln4CbgYQAzG2tme5uZAevxR1lqzI85fVRwUlc5vq9lTbC+u4Fbg787gvWOCx4faWb7mVkmsAHfZFuDiCSLtheyqwqBUudcuZkdAHy9hevRtkZSXqSuE2BmbfFV7j/9/w7g+8/FOg+Y5JyrTmY2SRm3AO2A2cHzfwbTAPoDt+NP1loL3Omcm2Jm+wO/AQbjv1zfBsYHr7kNf5To5aDZdiXwOPAM/uSzu4Fe+D6nj+ObbUUkwbS9kBa6EviDmd0OvAE8gT9JeFdpWyMpz/y5KiEGMOsDPOecG2Jm7YAFzrkeO1j+Q+Aq59zbycooIiLh0/ZCRCR+ItUdKDjb/XMzOxvAvKG184P+nh2BaSFFFBGRCND2QkRk94Q9ROhj+C/ogeYv7nEpftitS81sFv6KgONiXnIeMNGF3XwhIiJJpe2FiEh8hd4dSEREREREkitS3YFERERERCTxVASIiIiIiKSZ0IYI7dSpk+vbt29Yb7/LqquryczMDDtGsyhr4qRS3lTKCqmVN1lZZ8yYsdo51yXhbxRxUdxeRO3vNWp5IHqZopYHlKk5opYHopmpJduL0IqA3r17M3369LDefpeVlpZSVFQUdoxmUdbESaW8qZQVUitvsrKa2RcJf5MUEMXtRdT+XqOWB6KXKWp5QJmaI2p5IJqZWrK9UHcgEREREZE0oyJARERERCTNqAgQEREREUkzoZ0TICKyI5WVlSxdupTy8vKwo2ynpqaGFStWxG19eXl59OrVi+zs7LitU0REZEdUBIhIJC1dupTCwkL69OmDmYUdp56qqiqysuLz9emcY82aNSxdupSojYAjIiKtV1y7A5lZBzObZGbzzWyemR0Uz/WLSPooLy+nU6dOkSsA4s3M6NSpUyRbPEREpPWKd0vAbcCLzrmzzCwHKGhqQaveGue3FpHWprUXALXS5ecUkRg11VjFBqhuB5nqmCHJF7e/OjNrDxwOXATgnNsKNLmnn7n2U1jzKXTaK14RRETiZt26dTz66KNceeWVu/S6k046iUcffZQOHTokJpiIpDbnYMFkePmndCz9zE/LzIXctpDTBnIKYx639bcmnxf6+4bPs/NBBxdkJ+JZevYFVgF/N7OhwAzgO865zU2+4pGz4NJXoE3nOMYQEdl969at484779yuCKiqqtrh6yZPnpzIWJFmZnnAVCAXv32Z5Jz7WYNlLgL+D1gWTLrdOXdvMnOKhOarOfDSj+HzqdB5IFsO+REF2ZmwdVNw2wwVwePy9bBhed3zrZugZsffP9tYZlAwtNm1giK3EGs3CIjWhbAkMeJZBGQBw4FrnHPvmtltwA3A/6tdwMzGA+MB+hZ3xa1fRvVDZ7DhjEchu8meQ5FQVlZGaWlp2DGaRVkTJ5XyplJW2D5vTU3NTne4E+lHP/oRn376KUOHDiU7O5u8vDw6duzIggULmDNnDuPGjWPJkiVUVFRw9dVXc9lllwGw9957884777Bp0yZOOeUUDj74YN555x169uzJk08+SX5+fqPvV1NTk1K/ryZUAEc55zaZWTbwlpm94Jx7p8Fyjzvnrm7uSq2mEmpqIEOjWkuK2rQKptwKHzwIee3hxP+DkRdTvn4jBc298qxzUL21flGwdTNUbPT3jT7fFCwfPN+wtP7zyi3bvU2HzFzY93QYfiHsebBaFFqxeBYBS4Glzrl3g+eT8EXANs65CcAEgJKSEmdn30rW4xdQ9J/vwbmPRLpPXBQvEd0UZU2cVMqbSllh+7wrVqzYNgLPL/49l4+Xb4jr++3Tsx0/O2XfJuf/9re/Ze7cucyaNYvXX3+dk08+mTlz5tC3b1+qqqr4+9//TlFREWVlZYwaNYpzzjmHTp06AZCVlUVWVhYLFy7kscce47777uOcc87hmWee4YILLmj0/TIyMlLq99UY55wDNgVPs4Ob2931Zq6eD7/uBV0GQJdB0Dm47zIQOvaBjMzdfQuRxKjaCu/9Dd74nd/xPmA8HPEjKGjB/7oZZOX6W5tO8clXUx0UBEFRsGklFTMeJW/BszB7InTa2xcDQ78ObbvE5z1Tndvtr7TIiNtet3PuKzNbYmYDnXMLgKOBj3f4okEnw0m/h+e/62+n3KaKU0Qi6YADDqg3hOdf/vIXnnrqKQCWLFnCwoULtxUBtfr27UtJSQkAI0aMYPHixcmKGxozy8R3B90buCPmwFCsM83scOAT4Hrn3JJG1rOt5bh/z46U73sOmWsWkrloChmzHtu2nMvMobrjXlQX7R3c+lNd1J+a9ntCZuKuuxC1lrbI5KmuJHPNJ2SumktVu70oZUTYibZJ6mfkHNmfv0rBm78ic/1itu45hi2H/YSaor2hHCgvTX6mHcqFjFxo14my0TeSf+iN5CyaTO7ciWS/chPuPzdT2fcYKoacR+Ueh4Elr1UuEp9R9Vayl/yXnIWTyf7sZQrzO7PhyF9S1fvgcHPtpngfer8GeCQYGegz4OKdvmLUpbBhGbz5B2jfC474YZwjiUiq29ER+2Rp06bNtsdvvPEGr776KtOmTaOgoIAxY8Y0OsRnbm7utseZmZmUlZUlJWuYnHPVQImZdQCeMrMhzrk5MYv8G3jMOVdhZt8GHgSOamQ99VqO8077c93M8vWweiGsmo+tmk/WqgVkrZwNn/y7bpmMLCjay7cW1LYadBkInfpDdt5u/5xRa2kLJY9zsO5/sGyGvy2dDl/Ogir/d94WYM9D4aCrYMAJoXfnStpntGIuvHQjfPa6b7U6fxI5/Y8lJ8xMu2Bbpm6XwSGXwaoF2AcPkTPrMXI+fRHa7wHDLvC39sXJy5NsVVvhsykw92lY8Lz/3sltBwOOx33xLu2eOh+GnAXH3wqF3ZOfLw7iWgQ452YCI3f5hUf9P3/yy5RboV1P/4clIhKiwsJCNm7c2Oi89evX07FjRwoKCpg/fz7vvNOwy7s459aZ2RTgBGBOzPQ1MYvdC/xul1ee1x56jfS3WFs3B8XBAlg1H1Z/Ais/hvnPgavxy1iG70LUsFtR5wH+BElpWtk6WP5BsMM/A5ZNh82r/LzMXOgxFEZeDMUjoNsQtsx+loKPHoKJX/MF2egroOTr/iTU1mjzar8fM+MBv7N44u9g5CUJbZFKii4D/Y7u0TfB/Ofhg4fg9V/BG7+BvY+B4d+EAcen/s8JUFUBn06Bj5+G+ZOhYj3ktodBJ8E+p8FeR0JWLutXLqdo7oPw1p/gk5fgqJ/AqMsi3a29MdFIawan/AU2fgXPXgttu0P/Y8JOJSJprFOnThxyyCEMGTKE/Px8unXrtm3e8ccfzz333MPgwYMZOHAgo0ePDjFpdJhZF6AyKADygWOB3zZYpodz7svg6anAvLgFyGkDPUv8LVZVBaxZ5AuDVZ8E9wtg4StQU1m3XPve9VsOOgetB/kd4hYxZVRXwoo59Xf4V39SN79Tf78DWDzCF2Nd94Ws+se6y4d3peDI78K8Z2Ha7TD5+/DaLX7H+IDx0K5Hkn+oBKnaCu9NCPr9b/I7g2NuaFm//yjLyoUhZ/jb2sXw4cP+9vj50LabL/CGXwhF/cJOumuqKuDT14Ij/i/E7PifDPueBv3G+J89VlYeHPlj2P8cmPwDePEG+PAROPkPsMeBIfwQLWMupBMcSkpK3MyZM+tPrNgIfz8R1nwGFz8PPYeFkq0xUWyya4qyJk4q5U2lrLB93nnz5jF48OAQEzWtqqpq20nL8dLYz2tmM5xzu966GhIz2x/fvScTf0X6J5xzN5vZzcB059yzZvZr/M5/FVAKXOGcm7+j9Ta6vYiH6kq/M7Nqfl1hsGq+b02oiune1bZ7XXeioNVgnWtLh14DI3NUe7f+352DdV/47jzLPvA7/F/OqvsMCjr7Hf3ikdBrBPQc3qzCqF4m52DJezDtrzDvOd9da8iZvqtQj/1blnsXxf070Tn45EV46SdQ+qkvio7/lf8bCStTHOxSpuoqWPSKbx345CVw1dDnMBhxEQwaG92ud5Xlfsf/46eDHf8NvoVx0Fh/xL/fmO2K2iYzOecL3RdugI3LYdg34JhfxO/k7WZqyfYiGi0BtXIL4fxJcO+x8Mg58K1XfLOtiIhEnnNuNrDd0Rvn3E0xj38M/DiZuZqUmQ2d+/vb4FPqptdU+77usd2KVs2HmY/6I71Ah9plcwp9f+DC7v5o6LbH3etPzy2MzsAXZevq+vHX9uXfstrPy8qDHiUw8lK/w188EjrssfvZzfwR0j0OhNLP4N2/wQf/8CPQ9D0cDroa9j429PMGmm3Fx0G//ynb+v3T/9iwUyVfZhYMPNHfNnwJMx/xBcG/LoX8jrD/eTDim9A1Agd0anf85z7ld/y3boS8DjD4VH/Ev+8RO9zxb5IZ7DMO9joa3vgtvHOn74J49M98V6kI/01HqwgA/4V5wSS47zh4+Ey45OWkV1MiIpLGMjKhqK+/DTyhbrpzfiCL1QvZ9NUi2tZshE0rfFfWjV/5I+gbV2w7Mbae7DZQ2G374qCwR/3pee3jWyxUbY3p1jPd369ZWDe/80Dof1zdDn+3fRPft7uoH5z4WxjzYz9u/rt/g0fP8V2MDrrS7zjmRPTaQZvX+P7w0+/3hd0Jv/UDnLSG/vC7q10POPz7cOh3YfFUmPEgTL8P3r0Leo3yO8T7np7cc28qy+HT/9R19and8d93HOxzui9AW7Lj35jctnDcL323qOe/B89d57tLnfyH7bsoRkT0igDwTWlffxwePBUeOw8ufCa6XwgiIpIezPwodu17sbXjUGisi4JzfhSR2OJg01e+ONj4pZ/+5SzfdaJy8/avz8prpEUhKBa2Te/hj7I2LBac892btu3wT4cvZ0N1hZ/fpqvv1jP0XL/DXzzcFx1hye8Ah3wHRl8JHz8Db/8Vnrse/vNLv2M96jL/s0dB1VZ4/15/MmzFJhj1LV/EtLZ+//GQkeG70/Qb44um2RN9QfDs1fDij2G/M/25Az2HJ6Z1rLIMFv0n6Orzot/xz+/oj/bXHvFPZNHWdTBc9DzMfhxe/incc6T/eznyJ5E7vyiaRQDAHqPhzHvhiQvhycvgnId0QRgREYk2M7+hz++w877hFRt9cbDpqwYFQ3BbOc+PVFLRyIXyMnPqioK23WhbvhlWzoYtweBLWfn+6OMBl9WdvNu+d3S6JMXKzIb9zvLnCPxvGky7A6b+Hv57G+x3ti8Sug8JJ5tzsPBl3/VnzSLY6yjf7z8K3VtSQZtO/ryP0Vf6c0I+eBBmPe5HUOq2n+8qtN/Zu79zXFkGi171R/w/edF328svgiGn+z7+fQ9PbmuNGQw9zw+N+9otvoCc+7QfZWm/syPzfxjdIgBgn1P9EFsv/ABe+KG/sFhEPjgREZHdklvob5333vFyW7c0KBQatDKs+ZSMmhoYcGLQrWcEdN0n9bqomMGeB/vbmk/h3bt9d4qZj/ijygddA3sfnbz9gJXz/M7/p6/5K+d+/QnfdUr7Ibsu9pyQE34NH03yBcHk7/uj5fuc5lsH9jy4+Z9vZZkf4evjp33L2rYd/zP9Ef8+h4X/P5DfAU7+PQw733cRevIyf87ESb+HroPCzUbUiwCAA8fDhqX+iEC7Yjjsu2EnEhERSZ6cAt+PfgdDL26I4Cgzu6XTXnDS//kuNzMe8ENwPnKmH7519JV+aMbs/MS895ZSmFLb778tnPAb350j7B3K1iKvfdDd61JYPtPvFH/0T99tqNPevhgY+nVo22X7127d4kcjmvt0XZe6gk6+JWmf04Id/wju2vYcBpe+6gufV38Odx/iW0gO/2Go1yeJ7inLsY7+Oex3DvznFzBrYthpRES207at/yJfvnw5Z511VqPLjBkzhunTpyczlkhqKyjyB/++MxtOn+C7Qf37WvjTEJjya9i0Kn7vVV0J79wFfynxBcDIS+CaD/1FzlQAJEbPEhj7R/jefDjtLj8c7Ss3wR8HwePf8F18tm72O/3/vAj+by/fTfzzqb4QvPAZ+N4ncMpt/kJeUSwAamVk+IvpXTPDn/z+39vgjgPh42d9t7MQRPjTipGRAePu8M2ez1wFbbv6fnkiIhHTs2dPJk2aFHYMkdYlK8ef0Lz/ObD4LX/ewBu/8Vds3f8cf1R1d/rpf1Lb738h9DvSd1lRv//kyWnjR9Up+bofmveDh/yQvPOepSMGOGjTxfez3+c02POQaO/w70ibznDaHTD8G76L0BPf8MPjnvS7pF9oLXU+wawcOPdh+PtJ8PiFcPHkpF1gRETSzw033EDv3r256qqrAPj5z39OVlYWU6ZMobS0lKqqKm655RbGjRtX73WLFy9m7NixzJkzh7KyMi6++GJmzZrFoEGDKCtrZOhIEWk+M+h7mL+tXuiP3M98FD78hx+n/aCr/EHC5vYrXzkfXv6JP+LcaW/42uMw4Hj1+w9Tl4H+BNqjb4L5z1P++bvkDznZ7/i3pgFi9hgN49/wXd2m3Ap3jPatXodcF5eLrDVH6hQB4PuRnf/P4GJiZ8G3XvUXMRGR1u2FG+Crj+K7zu77wYm/aXL2ueeey3XXXbetCHjiiSd46aWXuPbaaykoKGDdunWMHj2aU089FWtih+Guu+6ioKCAefPmMXv2bIYPHx7fn0EknXXu77uSHPVT333nvQnw8Bn+pOjRV/pRWJramdpSCq//xo/aktPWj/gz6rL4jRkvuy8rF4acQVnPMeS3pvNdYmVm+Wtj7Hu6L0Zf/7Xv9n7S76H/MQl/+9Q4JyBWu57+YmJV5f5iYmXrwk4kIq3QsGHDWLlyJcuXL2fWrFl07NiR7t27c+ONNzJs2DCOOeYYli1bxooVK5pcx9SpU7ngggsA2H///dl/f7VeisRdQZG/SNV1H8Fpd4Nl+DHp/zwE3vgdbF5dt2x1pb842V+Gwfv3+D7a137oWxBUAEhY2vWAs+735zhkZPmT4B//BqxfmtC3Ta2WgFpdB8N5j8GDp/ihpcbdHnYiEUmkHRyxT6Szzz6bSZMm8dVXX3HuuefyyCOPsGrVKt577z3y8/Pp06cP5eXloWQTkQaycqHka77f+OdTYdrtvpvFm3+AoeeR07kEZtwFqxf4IUeP/zV02yfs1CJ1+o2BK/7rL5w39ff+omdjfuRbthJwcnrqtQTU6hMMr/ThP2Dxf8NOIyKt0LnnnsvEiROZNGkSZ599NuvXr6dr165kZ2czZcoUvvjiix2+/vDDD+fRRx8FYM6cOcyePTsZsUXSmxn0O8J3H77qPV8UzJpI25eug5oq+NpE+MbTKgAkmrJyfcvWVe/6v+NXboK7D/UnxMdZ6hYBAGNu8OcE/Ps7UFURdhoRaWX23XdfNm7cSHFxMT169OD8889n+vTplJSU8NBDDzFo0I4v9nLFFVewadMmBg8ezE033cSIESOSlFxEAH+S6Sm3wfVz2XjqA3DlOzDwRJ34K9HXcU/42mO+aK3cAg+cDE9+GzatjNtbpGZ3oFo5beDkP/m+U2/9yRcFIiJx9NFHdSckd+7cmWnTplFVVUVWVv2vz02bNgHQp08f5syZA0B+fj4TJ+raJiKha9OZyj5HqN+/pJ6BJ0LfI3y3tv/eBgte8CfDj7p0t0dLSu2WAPBnTw850384qz4JO42IiIiISPzkFMDR/w+unAbFw+CFH8A9R8LS3bv4ZOoXAeAv6Z2dD89dH9pV10REREREEqZzf38+y1l/992C7j3Gd4nfUtqi1bWOIqBtVzj2ZvjiLfjw4bDTiIiIiIjEnxkMOQOuft8PkPPBP+D2kS1aVesoAgCGXQh7HOSHDN20Kuw0IhIHLk1a9tLl5xQRkTjJLfRXVv72VH+BvBZoPUVARoYfAWDrZnjpxrDTiMhuysvLY82aNa1+B9k5x5o1a8jLS85l4kVEpBXpPgQueq5FL03t0YEa6jIQDr0epv7OXzBkr6PCTiQiLdSrVy+WLl3KqlXRa9mrqakhIyN+x1Dy8vLo1atX3NYnIiKyM62rCAA47Hsw90l/kvAV0/wZ1SKScrKzs+nbt2/YMRpVWlpKUVFR2DFERERarPV0B6qVnQdj/wRrF/sWARERERERqaf1FQEAfQ+HkvPh7b/CirlhpxERSQtmlmdm75nZLDOba2a/aGSZXDN73MwWmdm7ZtYnhKgiImkvrkWAmS02s4/MbKaZ7d4VDHbXcbdAXns/fmpNTahRRETSRAVwlHNuKFACnGBmoxsscymw1jm3N/An4LfJjSgiIpCYloAjnXMlzrmWDVoaLwVFcPyvYOn7MP2+UKOIiKQD520KnmYHt4bDO40DHgweTwKONjNLUkQREQm0vhODY+1/Lsx8FP5zMwwaC+16hJ1IRKRVM7NMYAawN3CHc+7dBosUA0sAnHNVZrYe6ASsbrCe8cB4gOLiYkpLW3ZFzEQpKyuLVKao5YHoZYpaHlCm5ohaHohmppaIdxHggJfNzAF/c85NiPP6d42ZP0n4roPhhR/Cuf8INY6ISGvnnKsGSsysA/CUmQ1xzs1pwXomABMASkpKXNRGY4raCFFRywPRyxS1PKBMzRG1PBDNTC0R7yLgUOfcMjPrCrxiZvOdc1NrZ4ZyZMc6kjfqagqm/Z6N05+gst8xLVpNKlV9ypo4qZQ3lbJCauVNpaxhcc6tM7MpwAlAbBGwDOgNLDWzLKA9sCaEiCIiaS2uRYBzbllwv9LMngIOAKbGzA/nyM7RP4JFz1M49Rew30mQ23aXV5FKVZ+yJk4q5U2lrJBaeVMpazKZWRegMigA8oFj2f7E32eBbwLTgLOA11xrvyy0iEgExe3EYDNrY2aFtY+B46h/9Cc8WTlwym2wYSlMuTXsNCIirVUPYIqZzQbeB15xzj1nZjeb2anBMvcBncxsEfBd4IaQsoqIpLV4tgR0w/f/rF3vo865F+O4/t2zx4Ew8hJ4927Y/xzoOSzsRCIirYpzbjaw3Zerc+6mmMflwNnJzCUiItuLW0uAc+4z59zQ4Lavcy56h9yP/hm06QLPXgvVVWGnEREREREJReu8YnBT8jvAib+Fr2b7FgERERERkTSUXkUAwD6nQf/j/bkB6/4XdhoRERERkaRLvyLADE7+vX/8/PdBg1KIiIiISJpJvyIAoMMecORPYOFL8PHTYacREREREUmq9CwCAA68HHoMhRd+BGXrwk4jIiIiIpI06VsEZGb5awdsXgWTf6BuQSIiIiKSNtK3CAB/rYAxP4aPnoDXbgk7jYiIiIhIUsTzYmGp6fAfwPql8ObvoV0PGPWtsBOJiIiIiCSUigAzOPmPsGmlHy2obTcYfErYqUREREREEia9uwPVysyCs+6H4hEw6VL4YlrYiUREREREEkZFQK2cAvj6E9ChNzx2HqycH3YiEREREZGEUBEQq00nuOBfkJULD58JG5aHnUhEREREJO5UBDTUsQ+c/08oXw8Pn+XvRURERERaERUBjekxFM79B6xeABPPh6qKsBOJiIiIiMSNioCm7HUknHYXLH4Tnvo2uJqwE4mIiIiIxIWGCN2R/c+BjV/CKzdRkN0Rxv3RDykqIiIiIpLCVATszMHXwoYvyXv3LujaDw6+JuxEIiIiIiK7RUXAzpjB8b+iovR/5L78U2jbHfY/O+xUIiIiIiItpiKgOTIy2HzsH8jduh6evgLadoF+Y8JOJSIiIiLSIjoxuLmycuG8R6DzAJh4AXw5O+xEIiIiIiItoiJgV+R3gAsmQV57eOQsWPtF2IlERERERHaZioBd1a6nv6pwVYW/qvCW0rATiYhEgpn1NrMpZvaxmc01s+80sswYM1tvZjOD201hZBURSXcqAlqi6yD42kRY9z949BzYuiXsRCIiUVAFfM85tw8wGrjKzPZpZLk3nXMlwe3m5EYUERFQEdByex4EZ94LS6fDvy6F6qqwE4mIhMo596Vz7oPg8UZgHlAcbioREWmMRgfaHfucCif9H0z+Pjz5LdjnNOjcH4r6QXZ+2OlEREJjZn2AYcC7jcw+yMxmAcuB7zvn5jby+vHAeIDi4mJKS6PV9bKsrCxSmaKWB6KXKWp5QJmaI2p5IJqZWkJFwO464DLYsgZe/w3MfSqYaNC+N3TeGzr1h0571z1uVwwZaoARkdbLzNoC/wKuc85taDD7A2BP59wmMzsJeBro33AdzrkJwASAkpISV1RUlNjQu6i0tJQoZYpaHohepqjlAWVqjqjlgWhmagkVAfEw5gY46Goo/RRWL4Q1i/xt9UJY8ghs3VS3bFY+dNrLFwad9vYtB7WP8zs0/R7OQXUl1FRBTSXUVPvH26ZVbXuesaUCCjIht52/2JmISJKYWTa+AHjEOfdkw/mxRYFzbrKZ3WlmnZ1zq5OZU0Qk3cW1CDCzTGA6sMw5Nzae64683LbQY6i/xXIONq3Yvjj46iOY929w1XXL5hdBZk6wk1/lzzOo3el3Nc2O0qH2QWYOFHSGNp2hTZf69wW1j2und4acNrv7KYhIGjMzA+4D5jnn/tjEMt2BFc45Z2YH4M9NW5PEmCIiQvxbAr6DPxGsXZzXm7rMoLC7v/U9rP68qq2wdnFQHCyE0s/9zn5mNmRk1b9lZkNGJmRkN/48Iwsy/f2mtatoa+WweRVsXg1bVvvHaxbC5jVQubnxrNkFdUVDp73h6P8HHfZI+EckIq3GIcA3gI/MbGYw7UZgDwDn3N3AWcAVZlYFlAHnOedcCFlFRNJa3IoAM+sFnAzcCnw3Xutt1bJyoMsAf4ujraWlsKO+als3++IgtkCoLRg2B88XTIZPXoST/wD7na1uRSKyU865t4Adflk4524Hbk9OIhERaUo8WwL+DPwQKGxqgaiP9rAjqXQmePOyFkJBIRT0hc7bz81Y/z/avPw9sp+8jIqPnmHLkbfg8tqHlDU6UilvKmWF1MqbSllFREQaE5ciwMzGAiudczPMbExTy0V9tIcdSaUzweOStagILnsZ3voTua//mtyvPoDT74J+Y+KSsVYqfa6QWnlTKSukVt5UyioiItKYeI1VeQhwqpktBiYCR5nZw3Fat4QlIxMO/z5861V/0vBD4+DFG6GyPOxkIiIiIrIb4lIEOOd+7Jzr5ZzrA5wHvOacuyAe65YI6DkMvj0VRl0G79wB9xwJX80JO5WIiIiItJCuWiXNk1MAJ/8ezp/kL452z5Hw379ATfOHLhURERGRaIh7EeCcez3trhGQTvofC1dMg/7HwSv/Dx46FdYtCTuViIiIiOwCtQTIrmvTCc59GMbdAcs/hLsOgdn/DDuViIiIiDSTigBpGTMYdgFc/hZ0HQRPfgsmXQpla8NOJiIiIiI7oSJAdk9RX7hoMhz1U/j4ad8q8NkbYaeq4xxUbIKydWEnEREREYmMeF4sTNJVZhYc/gPY62h4crw/T+Cgq+HomyArN37vU1MD5et8a8OWNbClFMpKY+5rp62tP616K2Aw6GQ46CrY4yBdAVlERETSmooAiZ/i4X4o0Vf+H0y7HT59DfocBjVV4Kr9fU11cKuCmiraVpRDVkbMvODe1S3D1i1+Z758HbgmRiPKyIL8jlDQCfKLfAtF/nAoKPLTtpTCBw/B/Of8kKcHXQ37jIPM7KR+RCIiIiJRoCJA4iunAE7+A/Q/Hl74Icye6HfQa2+W6S9CFjzPcA6ycxuZXzstE7Lz63buC4rq7mMf57bb+dH9I37k80y7E/51KbxyExz4bRj+TcjvkJSPR0RERCQKVARIYgw4zt92YkNpKUVFRUkIhC9QRl4Cwy+CRa/41opXboLXfwvDvwEHXu5bEERERERaORUBkn4yMmDA8f725Wx45054/z54928weKzvKtT7QJ03ICIiEnGVlZUsXbqU8vLypL1nTU0NK1asSNr7xcrLy6NXr15kZ+9+d2YVAZLeeuwPp98NR/8M3r/HFwPz/g3FI2D0lTpvIFmcCzuBiIikoKVLl1JYWEifPn2wJB28q6qqIisr+bvQzjnWrFnD0qVL6dt393suqAgQAWjXw49mdNj3YNZjMecN/Cw4b+BCnTeQCNVV8Naf4M3f09E5yGsf3Nr58zy23bdv8Dxmel47yA3uVbCJiKSV8vLypBYAYTIzOnXqxKpVq+KyPhUBIrFy2sCob8GIS2DhSzDtDj/a0Ru/hWHfIKv4cCg4xO98yu5ZvQie+jYsmw6DxlKe35182wrlG6Big7/fsKzueeWWna+z1wFw6PUw4ATf7UtERFq9dCgAasXzZ1URINKYjAwYeKK/LZ8ZnDdwD+3evcvPL+oHPYbG3Er8KEWpYkspLHkPug6Gjnsm971rauD9e/1J2Vm5cNb9MORMykpLyd/RSeLVlVCxEcrX1xUJsfebV/vRnyZ+DbruA4dcB0PO9NexSJbKcpj3LHw6xV9Ru88hyXtvERGRXaAiQGRnepbAGRPg+F+zccFUCjd9Cl/OgmUzYO5Tdcu138OfY9CjxBcGPUugbdeQQjdQvgG+eBs+n+pvKz7y0zOy/YhJh/8A2nZJfI71S+GZq+Cz12HvY+HUv/quWM2RmV03NGxTjvgRzPmX72L01HiYcgscfK3fIc/Oj8uP0KiV8+GDB31XsrK1kJnjC5LDf+g/22QWIiIiklTr1q3j0Ucf5corr9yl15100kk8+uijdOjQITHBdkJbJpHmatOJyj5HQNHpddO2lMJXs31RsHymv5//XN38wh71Wwt6DIV2PRM/8tDWLWT970344EP4/E1Y/qG/AFtmLuxxIBz1U+g1yhcx798LMx+Bg6/xV1TOLYx/Hufgo3/C89+HmkoY+ycYcXH8P4fMLBh6Lux3tu/O9eYfYfL3fXeu0VfCqEvj15Wrsgw+fhZm/B3+N80XVINPgREX+QvSTf4BvPEbX3SdeQ+07xWf9xURkUhZt24dd95553ZFwM5OIJ48eXKio+2QigCR3VFQBP3G+Fut8g3w1Ufw5UxfFHw5Cxa+XHe149z20KG33yls3wvaB4877OHv23bf9f7sVRWwdDosftPvdC55j3Y1lf6Ca8Uj/QnPfQ/3O/7ZeXWv6zfGD4n62i/h9V/De/fAET/0O+hZObv32dTavAaevx4+fsYPvXraXdBpr/isuym13bkGnABf/NcXA//5hW8hGHWpLwha2kqzch4Fb/8N5j/lr2Jd1A+OvRlKzoc2neuWO+NvsNeR8Nx34a5D4LQ7YdDJcfnxREQkOm644QY+/fRTSkpKyM7OJi8vj44dOzJ//nw++eQTTjvtNJYsWUJ5eTnf+c53GD9+PAB9+vRh+vTpbNq0iRNPPJFDDz2Ut99+m+LiYp555hny8xPYgo2KAJH4y2vn+4LH9gffuhlWzPWtBas/8d1i1i/1R5DL19d/fUa2by1o3zumWIgpFNoV++4mX86Cz9/wO/3/eweqygDzrQ2jr2Bj5+EU7nss5Lbdcd7O/eGch2DpDHj1Z/5Kz9Pu8K0FQ87avRNsF7wIz17ju8gc83PfNScjs+Xr21Vm0OdQf1s+0xcBb/0Z3rnLdxE6+NrmnRNRWQZzn4YZD8CSd8jNyIZ9TvVH/fsc1nSLxtDzfOE16WKY+HUYdRkcd0v9QkxEROLiF/+ey8fLN8R1nfv0bMfPTtl3h8v85je/Yc6cOcycOZPXX3+dk08+mTlz5mwbxvP++++nqKiIsrIyRo0axZlnnkmnTp3qrWPhwoU89thj3HPPPZxzzjn861//4oILLojrz9KQigCRZMhpA70P8LeGakfBWbcE1tfegiLh8zdh4/K6VoRamTlQvdU/7roPjPimP9K/58GQ3xGAytLSnRcAsXqNgG/+Gz79D7z6c3jyMvjvX+CYn8Hex+xa152KjfDSjfDBQ9BtCHzjKeg+pPmvT4SeJXDOg35Uov/+GWY8CNP/Dvud5U8i7rbP9q9Z8XFdX//y9dBpbzjuFtbtcQIde/Vv3vt22gsufQX+c7O/SvX/pvmTobsMjOMPFw1m1ht4COgGOGCCc+62BssYcBtwErAFuMg590Gys4qIJMoBBxxQbxz/v/zlLzz1lD+HcMmSJSxcuHC7IqBv376UlJQAMGLECBYvXpzwnCoCRMKWF4x733Vw4/Orq3whUFsYrPufP7JePNwfhY7nycdmfoe/31Ew90nfTeiRs2DPQ+HYX0CvkTtfx+L/wtOX+6yHXg9jfuxHAYqKznvDuNt9rml3+KP7sx+HASfCYd/1RcvHTwdH/d/1Bdfg2qP+h4IZrrR0194zKxeOvxX6HuE/mwlj4EQ/7GwruzJ1FfA959wHZlYIzDCzV5xzH8cscyLQP7gdCNwV3IuI7JadHbFPljZt2mx7/Prrr/Pqq68ybdo0CgoKGDNmTKNXN87NrdtOZmZmUlZWlvCcKgJEoi4zy3cD6rBH8t4zI8MfIR98qj8S/sZv4d6j/YmvR90EXQZs/5rKcj8az9u3+y42F78Ae4xOXuZd1b4YTvgVHP59eG8CvHs33HcsZOX7rlWd+sNxt8LQr0GbTjtfX3MMOA4u/68fuejZa/xQoqf8udVcd8I59yXwZfB4o5nNA4qB2CJgHPCQc84B75hZBzPrEbxWRCTlFBYWsnHjxkbnrV+/no4dO1JQUMD8+fN55513kpyuaSoCRKRpWTlwwGV+R3jaHfD2X2D+874//Zgf+3MXwJ+f8OS3YdU8f1LxcbfsWlekMBUUwZgb/AnSHzwIqxf6AmjPQxJzlL5dD/jG075L0mu3+oulnXk/9B4V//cKkZn1AYYB7zaYVQwsiXm+NJhWrwgws/HAeIDi4mJKd7X1JcHKysoilSlqeSB6maKWB5SpOXaWp6amhqqqqiQm2v4927dvz8EHH8yQIUPIy8ujW7du2+Yfc8wx3HXXXQwePJgBAwZw4IEHUl1dvW1+VVVVvce169/Rz1VTUxOX35GKABHZudy2MOZHfmSdqb/3w4rOfgIO/DbkFPqWgoJOcP4k6H9s2GlbJretHyI1GTIy/YhNfQ6DSZfC30+AI3/iz01oBVc6NrO2wL+A65xzLTpLzzk3AZgAUFJS4op2dCG5EJSWlhKlTFHLA9HLFLU8oEzNsbM8K1as2OEwnInQ2NCfjz32WKPLZmVl8eKLLzY6L7bf/5w5c7Y9/uEPf7jD98/IyIjL7yj1tzYikjxtOsOJv4FrZsA+p/kTh6fc4rsJXTktdQuAsPQ+AC5/EwaN9UOYPnw6bFwRdqrdYmbZ+ALgEefck40ssgzoHfO8VzBNRESSSEWAiOy6jnv6cfCveBsu+Bec/fcdX8lXmpbfAc5+AE65Df73Ltx1MCx8NexULRKM/HMfMM8598cmFnsWuNC80cB6nQ8gIpJ86g4kIi3XbZ/Gh9aUXWPmRx/qPdpfU+CRM/05CqnnEOAbwEdmNjOYdiOwB4Bz7m5gMn540EX4IUIvTn5MERFRESAiEhVdB8Flr8FLP/HXFEgxzrm3gB2eTR2MCpSkky9ERKQp6g4kIhIl2fkw9o9wzj/CTiIiIq1Y3IoAM8szs/fMbJaZzTWzX8Rr3SIiaWefU8NOICIirVg8WwIqgKOcc0OBEuCE4KQvEREREREB2rb119FZvnw5Z511VqPLjBkzhunTpyc0R9zOCQj6eW4KnmYHNxev9YuIiIiItBY9e/Zk0qRJob1/XE8MNrNMYAawN3CHc+7dBvMjfQXIHYnaFfR2RFkTJ5XyplJWSK28qZRVREQS64YbbqB3795cdZUf8+DnP/85WVlZTJkyhbVr11JZWcktt9zCuHHj6r1u8eLFjB07ljlz5lBWVsbFF1/MrFmzGDRoEGVlZQnPHdciwDlXDZSYWQfgKTMb4pybEzM/0leA3JGoXUFvR5Q1cVIpbyplhdTKm0pZRUTSxgs3wFcfxXed3ffzF8ncgXPPPZfrrrtuWxHwxBNP8NJLL3HttdfSrl07Vq9ezejRozn11FPxl1PZ3l133UVBQQHz5s1j9uzZDB8+PL4/RyMSMkSoc26dmU0BTgDm7Gx5EREREZFUNGzYMFauXMny5ctZtWoVHTt2pHv37lx//fVMnTqVjIwMli1bxooVK+jevXuj65g6dSrXXnstAPvvvz/7779/wnPHrQgwsy5AZVAA5APHAr+N1/pFRERERJq0kyP2iXT22WczadIkvvrqK84991weeeQRVq1axYwZM8jOzqZPnz6Ul5eHlq8x8RwdqAcwxcxmA+8Drzjnnovj+kVEREREIufcc89l4sSJTJo0ibPPPpv169fTtWtXsrOzmTJlCl988cUOX3/44Yfz6KOPAjBnzhxmz56d8MzxHB1oNjAsXusTEREREUkF++67Lxs3bqS4uJgePXpw/vnnc8opp7DffvsxcuRIBg0atMPXX3HFFVx88cUMHjyYwYMHM2LEiIRnTsg5ASIiIiIi6eSjj+pOSu7cuTPTpk1rdLlNm/yI+n369GHOHH/qbH5+PhMnTkx8yBjx7A4kIiIiIiIpQEWAiIiIiEiaUREgIiIiIinLORd2hKSJ58+qIkBEREREUlJeXh5r1qxJi0LAOceaNWvIy8uLy/p0YrCIiIiIpKRevXqxdOlSVq1albT3rKmpISMjnOPoeXl59OrVKy7rUhEgIiIiIikpOzubvn37JvU9S0tLKSoqSup7JoK6A4mIiIiIpBkVASIiIiIiaUZFgIiIiIhImlERICIiIiKSZlQEiIiIiIikGRUBIiISF2Z2v5mtNLM5TcwfY2brzWxmcLsp2RlFRMTTEKEiIhIvDwC3Aw/tYJk3nXNjkxNHRESaopYAERGJC+fcVKA07BwiIrJzagkQEZFkOsjMZgHLge875+Y2tpCZjQfGAxQXF1NaGq3aoqysLFKZopYHopcpanlAmZojankgmplaQkWAiIgkywfAns65TWZ2EvA00L+xBZ1zE4AJACUlJS5qV+eM2hVDo5YHopcpanlAmZojankgmplaQt2BREQkKZxzG5xzm4LHk4FsM+scciwRkbSkIkBERJLCzLqbmQWPD8Bvg9aEm0pEJD2pO5CIiMSFmT0GjAE6m9lS4GdANoBz7m7gLOAKM6sCyoDznHMupLgiImlNRYCIiMSFc+5rO5l/O34IURERCZm6A4mIiIiIpBkVASIiIiIiaUZFgIiIiIhImlERICIiIiKSZlQEiIiIiIikmbgVAWbW28ymmNnHZjbXzL4Tr3WLiIiIiEj8xHOI0Crge865D8ysEJhhZq845z6O43uIiIiIiMhuiltLgHPuS+fcB8HjjcA8oDhe6xcRERERkfhIyDkBZtYHGAa8m4j1i4iIiIhIy8X9isFm1hb4F3Cdc25Dg3njgfEAxcXFlJaWxvvtE6asrCxl8ipr4qRS3lTKCqmVN5WyioiINCauRYCZZeMLgEecc082nO+cmwBMACgpKXFFRUXxfPuEKi0tJVXyKmvipFLeVMoKqZU3lbKKiIg0Jp6jAxlwHzDPOffHeK1XRERERETiK57nBBwCfAM4ysxmBreT4rh+ERERERGJg7h1B3LOvQVYvNYnIiIiIiKJoSsGi4iIiIikGRUBIiIiIiJpRkWAiIiIiEiaUREgIiIiIpJmVASIiIiIiKQZFQEiIiIiImlGRYCIiIiISJpRESAiIiIikmZUBIiISFyY2f1mttLM5jQx38zsL2a2yMxmm9nwZGcUERFPRYCIiMTLA8AJO5h/ItA/uI0H7kpCJhERaYSKABERiQvn3FSgdAeLjAMect47QAcz65GcdCIiEisr7AAiIpI2ioElMc+XBtO+bLigmY3HtxZQXFxMaemOaovkKysri1SmqOWB6GWKWh5QpuaIWh6IZqaWUBEgIiKR45ybAEwAKCkpcUVFRSEnqq+0tJQoZYpaHohepqjlAWVqjqjlgWhmagl1BxIRkWRZBvSOed4rmCYiIkmmIkBERJLlWeDCYJSg0cB659x2XYFERCTx1B1IRETiwsweA8YAnc1sKfAzIBvAOXc3MBk4CVgEbAEuDiepiIioCBARkbhwzn1tJ/MdcFWS4oiIyA6oO5CIiIiISJpRESAiIiIikmZUBIiIiIiIpBkVASIiIiIiaUZFgIiIiIhImlERICIiIiKSZlQEiIiIiIikGRUBIiIiIiJpRkWAiIiIiEiaUREgIiIiIpJm4lYEmNn9ZrbSzObEa50iIiIiIhJ/8WwJeAA4IY7rExERERGRBIhbEeCcmwqUxmt9IiIiIiKSGFnJfDMzGw+MByguLqa0NHVqhrKyspTJq6yJk0p5UykrpFbeVMoqIiLSmKQWAc65CcAEgJKSEldUVJTMt98tpaWlpEpeZU2cVMqbSlkhtfKmUlYREZHGaHQgEREREZE0oyJARERERCTNxHOI0MeAacBAM1tqZpfGa90iIiIiIhI/cTsnwDn3tXitS0REREREEkfdgURERERE0oyKABERERGRNBNaEbBy01be+WwNldU1YUUQEREREUlLSb1OQKzSzZWcN+EdCnOzOLR/Z8YM7MKYgV3p1i4vrEgiIrKbzOwE4DYgE7jXOfebBvMvAv4PWBZMut05d29SQ4qISHhFwICubfjdBSN445OVTJm/ihfmfAXA4B7tfEEwoAvD9+xIdqZ6LImIpAIzywTuAI4FlgLvm9mzzrmPGyz6uHPu6qQHFBGRbUIrAjIMThjSnROGdMc5x4IVG3l9wSqmzF/JPVM/467XP6UwL4vD+ndmzICuHDGwi1oJRESi7QBgkXPuMwAzmwiMAxoWASIiErLQioBYZsag7u0Y1L0dlx+xFxvKK/nvwtW8vmAVr3+ykskf+VaCfYJWgiMHdWVY7w5kqZVARCRKioElMc+XAgc2styZZnY48AlwvXNuScMFzGw8MB6guLiY0tLSBMRtubKyskhliloeiF6mqOUBZWqOqOWBaGZqiUgUAQ21y8vmxP16cOJ+PXDOMe/Ljbz+yUpen7+Kv039jDtf/5R2eVkc1r8Lo/sVMapvEQO6FpKRYWFHFxGRHfs38JhzrsLMvg08CBzVcCHn3ARgAkBJSYkrKipKbsqdKC0tJUqZopYHopcpanlAmZojankgmplaIpJFQCwzY5+e7dinZzuuHLM368sq+e+i1UyZv5KpC1fx/EdfAtA+P5uRe3ZkVN8iRvUpYr/i9uRkqaVARCSJlgG9Y573ou4EYACcc2tint4L/C4JuUREpIHIFwENtc/P5qT9enBS0Erwv9ItvL94Le9/Xsr7i0v5z/yVAORlZ1DSuwMH9PEtBcP36Eib3JT7cUVEUsn7QH8z64vf+T8P+HrsAmbWwzn3ZfD0VGBeciOKiAikYBEQy8zYs1Mb9uzUhrNG9AJg1cYKpi8u5b3Fvii4fcoial6DzAxj357tGNWnKLh1pFPb3JB/AhGR1sM5V2VmVwMv4YcIvd85N9fMbgamO+eeBa41s1OBKqAUuCi0wCIiaSyli4DGdCnM3XY+AcDG8ko++N863v/cFwb/eOcL7nvrcwD26tKGA/oWsVeXthS1yaFjQQ4d2+TQsSCbDgU5tMvLwkznGYiINJdzbjIwucG0m2Ie/xj4cbJziYhIfa2uCGioMC+bIwZ04YgBXQCoqKrmo6XrfUvB56U8N/tLNpZXNfrarAyjQ0E2HQtyaJtjdGlXQFGbHDoU5FDUxhcKHYPH7fKyycnKIDvT33IyM8jOMrIzM8jKMBUTIiIiIhIZrb4IaCg3K5ORfYoY2acIxoBzjg3lVazdvJW1W4Lb5sptj0s3V7Juy1ZWrd/CF2u2MHPJOtZu2UpltWv2e5pRVxhkWl2hkFX3vLaAqLdM1o5fU2/5mNdv3ryJNm3KcDicA+egxjkcgGPb9JqYx36e89OcIyszg+IO+fQuyqe4QwH5OZmJ+YWIiIiISNKlXRHQkJnRPj+b9vnZ9KFNk8vFDgflnGPz1upthUPp5q1sLK+isrqGyuoatlbVsLXa+edVwbTa59uWcWytN9+/rqyymg3l/rFf1tVbb+3zqprmFyHx0LltDr06FtCrYz69i/x9r44F9O6YT88O+eRlq0gQERERSRVpXwS0hJnRNjeLtrlZ9C4qCCVDTY2jsiYoChoUEuvWradDh/aYGQZkmFHbG8nM588wMPx0C36m2McVVdUsX1fGktIylq7dwtK1ZSxZu4WPlq3npblfbdcS0rUwN6Y4yKd3xwJ6dSyga7tcOgVdqDJ1HQcRERGRSFARkKIyMozcjExys4AGgxyVZm6lqKjtbr9Hj/b5jNhz++nVNY4VG8pZutYXCLGFwowv1vLc7C+pbtBSkWFQ1CaHzm1z6dQ2h05t/H1BRjW9u2zaNr32viBHf5qSfM45qmqczuMRwP891Dj/nVdd46iqqaGmBqqdY+2WSlxOhV+u3msarIPtJjT2cKev3X5efRs2VmC5W8nNziA3K1MHXSQpqmsc5ZXVVFTVUF5ZHdxqKK+qpmLbvZ9WURXMa2SZ8srgcey6guUrtlaRnZ1JphmZGUZWptU9zsggIwOyMjKC50ZGcJ8Zc8uq9ziDDAvWkxG7rvqvrXtNxnbzyrZspl1hBRnBazNi1lM7LTODuvnW9PTaXNsem5GRwXavywgO4saT9rRkl2VmGD07+G5AB/Td/op5VdU1fBUUCas2VrBmUwVrNm9l9aatrNlUwepNFcxau441m7ayqaIKWLLdOvKzM+lc6IuFwrwscrP8+RA5wXkR/nEm2VlGbr1pGeRkZdZ7nhucL1GrqQ2r2zbNbTet9sm6DRvIL9hKVU2N3ymo9jsGVcFOQmW1ozp47ufVf167M9FYlqbyNJwOvkUnLyuT/JwM8rMzycvOJD8nk/xsf8vLyaSqbDNdN2dsmx67TGM7CFVB97Ot1XXd0WrvK6rqd0nbWl3N1irfNa3GueDclJhzW7Js2+8ndl5Og3NdmvpC85+lb92qCrrAba3yn2Ndt7oGj6v8xqRsazVlldVs2Vr3uGxr8Lyyatvj8mCZ2MdlldXbMmQYZAUn9mdlWL3HhiM3OyvYIAXTM+s2ENmZwUYmZsOxbQNi9TdO225mZAYbt6xgwyPeVxu3cuNTH1Fd7ah2df9HdTvn/v+s2uHvG8yvdjH/f428vnYdNbXrctv/r6aa7EwjNyuT3KwM8rL9fW7tfcy0unkZ5GVlbisi8hrcN7aebfNils3JzGj0/7q2wK79f62qrt+9NfZ/uf68mMfB91Pt927td0JV0OW2KnYdNTVUVvkW89rvkPKKCvLzcsnKqB3Ew//f1p5jl5VhMdP985ysDLIyMsjK9N9l/v/cf8fVnx67fMw6M43sDD9QiH/fHR9gqP3uq92+VAZ/z5UxP3ft9qT2s9s2PdjmVFXXUBncx86v3T75zzh4XFP3edcWuhs2l+EysrbtsDfcwa+oqvE79lXVu3R+ZENZGVb/7zDmbzAvK5PCvCzysjNx1ZVkZufU+/+v+3/1n8/WqqqY74Lt//dr3Pbzaov6qpoaUuVfPcNopGCwFhf9KgIk7rIyM4LzB3beVWr5ilW43Las3ljBms0VQaFQVyys3uTPt1hTVdfdaWsjj6Ms9qhCVmbGdtV8w3/d+tsHa3S6c46Kyhq2VFa3aEclJ9NvwB1s29EP40swO2YDWrvhqqyu2a7o2R152RkU5GT5Aqm2UMrJpENBDj07ZNabXpCTSVZmxrYNRFWNo7p2wxqzkd1SVk5GVva2jU7sxra6xrGpqqpuh7Jmxzuf1c5t27jF7ohKnQ1lVbw8dwWZwRG/2iN/GVZ3BLDhLSsjg7xs21aMNVV41T8amEFmBvXvGzlimGFQVlZGmzZ133H1/23r/1fv6H/cGsxtuH9oO5xXN2H9xk1k5+ZRXlVDRcxR14qqup242Pt1ZZVUBI/9Dl3d/e4UP2YEhUZmUJCxbQc00bIzG+6Y19/Bd64GrLLJAqT2/zvRag8aZGdk4HBU1wQ7/zXx/e5rjtjPrHYblZ0BBbnZ9QrBDvnZ9YrAvOz6hWRe7X3DnfrYZYLlcoP7rMzmHeiIPSczUVxMcRH7vVxVE1NA1H5PV9dQum4dhYXtqXF1y9fUxD5mu2n+PbafXrPd632X7xoXO50mlq2b9mELfm4VARKqvOxMijrkU9whv8XrcC726LWLKQ78Bq52ev0Nb8zjRna6m1p286aNFHXoUPeFmZFBZqaRHbPjkbltnp+W6G4lldX+hPLy2qPewZHvlWvWkZ1fQNnWmm3Ty2OOkpdXVmPGttaV7Ab3OQ1HroppiakdiSonywDbdlSu4UnvW+udLB87zdWbVlldQ1XlVgrbFNQb/aq5j7OC+9oWj4Icv2Ofl5VJRgK6RSRjo1RT48j8bULfImUM6FrA9J8eE3aMepLxN7Ar4pmnKmj9qy0YGisiYguHhtNrn2/dWkHbgvzgiLj/TtnREfZt/88Z2/9vN3W0vnZec7rwNeczcs7VtSTsoMWi9kh83bJ1R9i3LbNdy0dNvXVXVtdQXl5O24L8mJbG2s/CF6XbdtJrWxszM7Ztb2pbGrY9Du7989id+7oWidrtU+1yLf2cWhuz2s+3ecuXZldSVNQusaF20a0teI2KAEl5ZrXN3okfoai01EXuH79249guL7ve9NK2LqW+yNNxw7MjiSheRJojK9hpb5O7e7sIqfg/bWa+OyPJ6Y6Xip+RtB7qdCoiIiIikmZUBIiIiIiIpBkVASIiIiIiaUZFgIiIiIhImlERICIiIiKSZlQEiIiIiIikGRUBIiIiIiJpRkWAiIiIiEiaMZfsa1TXvrHZRmBBKG/eMp2B1WGHaCZlTZxUyptKWSG18iYr657OuS5JeJ9Ii+j2Imp/r1HLA9HLFLU8oEzNEbU8EM1MA51zhbvygjCvGLzAOTcyxPffJWY2PVXyKmvipFLeVMoKqZU3lbK2EpHbXkTtbyBqeSB6maKWB5SpOaKWB6KbaVdfo+5AIiIiIiJpRkWAiIiIiEiaCbMImBDie7dEKuVV1sRJpbyplBVSK28qZW0Novh5Ry1T1PJA9DJFLQ8oU3NELQ+0kkyhnRgsIiIiIiLhUHcgEREREZE0oyJARERERCTNJLwIMLMTzGyBmS0ysxsamZ9rZo8H8981sz6JztQYM+ttZlPM7GMzm2tm32lkmTFmtt7MZga3m8LIGpNnsZl9FGTZbmgo8/4SfLazzWx4SDkHxnxmM81sg5ld12CZUD9bM7vfzFaa2ZyYaUVm9oqZLQzuOzbx2m8Gyyw0s2+GlPX/zGx+8Ht+ysw6NPHaHf7NJDHvz81sWczv+6QmXrvD748kZX08JudiM5vZxGuT/tm2Rrvzv5jEPGcH24kaM0v6MIG78x2Q5Ey/DPLMNLOXzaxnmHli5n3PzJyZdU5WnqYyNfe7MFl5gunXBH9Lc83sd8nK01Sm5n4HJzlTiZm9U/t9b2YHhJxnqJlNC7ZB/zazds1amXMuYTcgE/gU6AfkALOAfRoscyVwd/D4PODxRGbaQdYewPDgcSHwSSNZxwDPhZGvicyLgc47mH8S8AJgwGjg3QhkzgS+wl8EKTKfLXA4MByYEzPtd8ANweMbgN828roi4LPgvmPwuGMIWY8DsoLHv20sa3P+ZpKY9+fA95vxt7LD749kZG0w/w/ATVH5bFvjraX/i0nOMxgYCLwOjIzIZ9Ss74AkZ2oX8/ja2m19WHmC6b2Bl4AvUuW7MMl5jgReBXKD513DztRgfpPfwUn+nF4GTgwenwS8HnKe94EjgseXAL9szroS3RJwALDIOfeZc24rMBEY12CZccCDweNJwNFmZgnOtR3n3JfOuQ+CxxuBeUBxsnPE2TjgIee9A3Qwsx4hZzoa+NQ590XIOepxzk0FShtMjv3bfBA4rZGXHg+84pwrdc6tBV4BTkhUTmg8q3PuZedcVfD0HaBXIjPsiiY+2+ZozvdHXO0oa/C9dA7wWCIzpLvd+F9MWh7n3DznXGhXMI7id0ATmTbEPG0DJG0kkh38L/8J+GEys9Taje/ChGgizxXAb5xzFcEyKyOQCQjvO7iJTA6oPdreHlgecp4BwNTg8SvAmc1ZV6KLgGJgSczzpWy/Y71tmeALbD3QKcG5dsh8l6RhwLuNzD7IzGaZ2Qtmtm9yk23HAS+b2QwzG9/I/OZ8/sl2Hk3/A0fpswXo5pz7Mnj8FdCtkWWi+Blfgm8BaszO/maS6eqgq8D9TXTviNpnexiwwjm3sIn5UfpsW5vm/C9KnR19BySVmd1qZkuA84Gwu9COA5Y552aFmaMRO/suTKYBwGHmu2e/YWajQs4Ta2ffwcl0HfB/wd/274EfhxuHudQdJDsb3+K1UzoxuAEzawv8C7iuwVEMgA/w3ViGAn8Fnk5yvIYOdc4NB04ErjKzw0POs0NmlgOcCvyzkdlR+2zrcb6NLfLj6ZrZT4Aq4JEmFonK38xdwF5ACfAlvok36r7Gjo9AReWzbdVS5X8xLM34Dkgq59xPnHO98XmuDiuHmRUANxJyIdKIqH0XZuG7t44GfgA8EUbvjCbs7Ds4ma4Arg/+tq8H7gs5zyXAlWY2A9+lfWtzXpToImAZ9auRXsG0Rpcxsyx8s8qaBOdqlJll4wuAR5xzTzac75zb4JzbFDyeDGQn+8SiBnmWBfcrgafw3SdiNefzT6YTgQ+ccysazojaZxtYUdt9KrhvrFk0Mp+xmV0EjAXOD3aUttOMv5mkcM6tcM5VO+dqgHuayBGlzzYLOAN4vKllovLZtlLN+V9Me835DgjRIzSzi0KC7AX0BWaZ2WL898kHZtY9xEzN/S5MpqXAk0E34veAGiDsbXGzvoOT7JtA7X7iPwn59+acm++cO845NwJfKH3anNclugh4H+hvZn2Do8DnAc82WOZZ/IcJcBbwWhhfXkGlex8wzzn3xyaW6V5bEQdngmcQXsHSxswKax/jTwprOArCs8CF5o0G1sc0qYehySo+Sp9tjNi/zW8CzzSyzEvAcWbWMWjGPS6YllRmdgK+n+upzrktTSzTnL+ZpGhwbsrpTeRozvdHshwDzHfOLW1sZpQ+21aqOf+Laa053wHJZmb9Y56OA+aHlcU595Fzrqtzro9zrg9+Z3e4c+6rsDJBs78Lk+lp/MnBmNkA/KAMq8MMFNjhd3AIlgNHBI+PAkLtomRmXYP7DOCnwN3NeuGunJHckhv+rOlP8FXJT4JpN+O/qADy8FXUIuA9oF+iMzWR81B8E/NsYGZwOwm4HLg8WOZqfL+rWfgTrw4OI2uQpV+QY1aQqfazjc1rwB3BZ/8RIYxiEZO3DX6nvn3MtMh8tvji5EugEr9xuBR/bsp/8P/crwJFwbIjgXtjXntJ8Pe7CLg4pKyL8P3na/92a0fc6glM3tHfTEh5/xH8Tc7G7+D1aJg3eL7d90eyswbTH6j9W41ZNvTPtjXeduV/McQ8pwePK4AVwEsR+Iwa/Q4IOdO/8Du1s4F/A8Vh5mkwfzHJHx2o2d+FIebJAR4Ofm8fAEeF/RkF07f7Dg75czoUmBF8578LjAg5z3eCbeUnwG8Aa866LFihiIiIiIikCZ0YLCIiIiKSZlQEiIiIiIikGRUBIiIiIiJpRkWAiIiIiEiaUREgIiIiIpJmVASIiIiIiKQZFQEiIiIiImlGRYCIiIiISJpRESAiIiIikmZUBIiIiIiIpBkVASIiIiIiaUZFgIiIiIhImlERICIiIiKSZlQEiIiIiIikGRUBIiIiIiJpRkWAtFpmttjMfmBms81ss5ndZ2bdzOwFM9toZq+aWUczyzOzh81sjZmtM7P3zaxbsI72weu+NLNlZnaLmWWG/bOJiEj8NHd7ESz7TzP7yszWm9lUM9s3Zj25ZvZ7M/ufma0ws7vNLD+8n0ykaSoCpLU7EzgWGACcArwA3Ah0wf/9Xwt8E2gP9AY6AZcDZcHrHwCqgL2BYcBxwLeSll5ERJKlOdsLgun9ga7AB8AjMev4TfD6Evx2oxi4KfHRRXZdqEWAmd1vZivNbE4zlv2Tmc0Mbp+Y2bokRJTU91fn3Arn3DLgTeBd59yHzrly4Cn8jn0lfud/b+dctXNuhnNuQ9AacBJwnXNus3NuJfAn4LyQfhYREUmc5mwvcM7d75zb6JyrAH4ODA1ajQ0YD1zvnCt1zm0EfoW2GRJRWSG//wPA7cBDO1vQOXd97WMzu4bgn1FkJ1bEPC5r5Hlb4B/4VoCJZtYBeBj4CbAnkA186b/bAV84L0lsZBERCcFOtxdBd9BbgbPxLQQ1wfzOQC5QAMyI2WYYoC6kEkmhtgQ456YCpbHTzGwvM3vRzGaY2ZtmNqiRl34NeCwpIaXVc85VOud+4ZzbBzgYGAtciN/ZrwA6O+c6BLd2zrl9d7Q+ERFptb4OjAOOwXcj7RNMN2A1vljYN2ab0d451zaUpCI7EcVzAiYA1zjnRgDfB+6MnWlmewJ9gddCyCatkJkdaWb7BUd4NuC7B9U4574EXgb+YGbtzCwjKFKPCDWwiIiEpRB/cGgN/qj/r2pnOOdqgHuAP5lZVwAzKzaz48MIKrIzkSoCzKwt/kjsP81sJvA3oEeDxc4DJjnnqpMcT1qv7sAkfAEwD3gD30UIfItADvAxsDZYruHfpIiIpIeHgC+AZfjtwjsN5v8IWAS8Y2YbgFeBgUlNKNJM5pwLN4BZH+A559wQM2sHLHDONbmTZWYfAlc5595OVkYRERERkdYkUi0BzrkNwOdmdjaAeUNr5wfnB3QEpoUUUUREREQk5YU9ROhj+B36gWa21MwuBc4HLjWzWcBc/Ak4tc4DJrqwmy9ERERERFJY6N2BREREREQkuSLVHUhERERERBIvtIuFderUyfXt2zest99l1dXVZGamxvU+lDVxUilvKmWF1MqbrKwzZsxY7ZzrkvA3irhU2l6k0t8xpFZeZU2cVMqbSlkh2tuL0IqA3r17M3369LDefpeVlpZSVFQUdoxmUdbESaW8qZQVUitvsrKa2RcJf5MUkErbi1T6O4bUyqusiZNKeVMpK0R7e6HuQCIiIiIiaUZFgIiIiIhImlERICIiIiKSZkI7J0BEZEcqKytZunQp5eXlYUfZTk1NDStWrIjb+vLy8ujVqxfZ2dlxW6eISDqI8rYCor29UBEgIpG0dOlSCgsL6dOnD2YWdpx6qqqqyMqKz9enc441a9awdOlSUmUEHBGRqIjytgKivb1QdyARiaTy8nI6deoUyS/1eDIzOnXqFNmjWCIiUZYu2wqI//ZCRYCIRFY6fKlD+vycIiKJkE7fofH8WZtVBJhZBzObZGbzzWyemR3UYL6Z2V/MbJGZzTaz4TtdZ+XmlmYWEZE0YtUVYUcQEWl1mtsScBvwonNuEDAUmNdg/olA/+A2HrhrZyvMXPcFlK1rflIRkQhr27YtAMuXL+ess85qdJkxY8akzEWvoiRjU/xOqhMRCVtUthc7LQLMrD1wOHAfgHNuq3NuXYPFxgEPOe8doIOZ9djhil01fPBgi0KLiERVz549mTRpUtgxWhWr2AAbvgw7hohIXIW9vWjO6cp9gVXA381sKDAD+I5zLrY/TzGwJOb50mBavW9tMxuPbylgv575VE+7i/UDzoPM6A+LV1ZWRmlpadgxmkVZEyeV8qZSVtg+b01NDVVVVaHlufHGG+nVqxdXXnklADfffDNZWVm8/vrrrF27lsrKSm6++WZOPfXUba+pqqpi8eLFnHbaacycOZOysjK+9a1vMXv2bAYOHMiWLVuoqqpq9OeqqalJqd9Xcjn44CEY86Owg4iIbOeGG26gd+/eXHXVVQD8/Oc/JysriylTplBaWkpVVRW33HIL48aNq/e6xYsXM3bsWObMmUNZWRkXX3wxs2bNYtCgQZSVlSU8d3OKgCxgOHCNc+5dM7sNuAH4f7v6Zs65CcAEgOH77OUyN31J0ZdTYf+zd3VVSVdaWkpRUVHYMZpFWRMnlfKmUlbYPu+KFSvqhlV74Qb46qP4vmH3/eDE3zQ5+2tf+xrXXXcd1157LQCTJk3ipZde4rrrrqOgoIB169YxevRoTj/99G0namVlZW3LnJWVxT333EObNm2YN28es2fPZvjw4fWWiZWRkZFSv69kcjmFMOMBOOx7kKmRrUWkCSFsKwDOPfdcrrvuum1FwBNPPMFLL73EtddeW297ceqppzZ5Yu9dd91FQUFBve1FojXnnIClwFLn3LvB80n4oiDWMqB3zPNewbQm1eQUQucBMO2v4Fxz84qIJMWwYcNYuXIly5cvZ9asWXTs2JHu3btz4403MmzYMI455hiWLVu2w4vATJ06lQsuuACA/fffn/333z9Z8VsVl18EG5fDJy+EHUVEZDupur3Y6SEV59xXZrbEzAY65xYARwMfN1jsWeBqM5sIHAisd87tvAPn6Cvhuevgi/9Cn0N3Pb2IpIedHIVJlLPPPptJkybx1Vdfce655/LII4+watUq3nvvPfLz8+nTp4/G90+Cmtx20C4P3r8PBp8SdhwRiaqQthWQmtuL5o4OdA3wiJnNBkqAX5nZ5WZ2eTB/MvAZsAi4B7iyWWsdeh4UdIK3b9+l0CIiyXDuuecyceJEJk2axNlnn8369evp2rUr2dnZTJkyhS+++GKHrz/88MN59NFHAZgzZw6zZ89ORuzWacRF8NkUWPNp2ElERLaTituLZnWudM7NBEY2mHx3zHwHXLXL756dD6O+BW/8FlYvhM79d3kVIiKJsu+++7Jx40aKi4vp0aMH559/PqeccgolJSWMGjWKQYMG7fD1V1xxBRdffDGDBw9m8ODBjBgxIknJW6HhF/ptxfT74fhbw04jIlJPKm4vwj/DatRl8Naf4Z07Yeyfwk4jIlLPRx/VnWTWuXNnpk2bRlVV1XYn927atAmAPn36MGfOHADy8/OZOHFi8sK2ZoXdYdDJMPMROOqn/iCSiEiEpNr2orndgRKnbRcYei7MfBQ2rwk7jYiIRNXIS6FsLcx9OuwkIiIpL/wiAGD0VVBV7pt5RUREGtP3cOjUH6bfF3YSEZGUF40ioOsg2PtYeG8CVEbrzGkRCY9Lk+GD0+Xn3G1mMPISWPo+fKmTrEXES6fv0Hj+rNEoAgAOugo2r4SP/hl2EhGJgLy8PNasWdPqv9ydc6xZs4a8vLywo6SGkq9BVr5aA0QESJ9tBcR/exH+icG1+o2BbkNg2h0w7AJ/xEdE0lavXr1YunQpq1atCjvKdmpqasjIiN8xlLy8PHr16hW39bVq+R1hyJkw+wk49mbIax92IhEJUZS3FRDt7UV0igAzOOhqePpy+PQ/sPcxYScSkRBlZ2fTt2/fsGM0qrS0lKKiorBjpK9Rl8DMh2HW43Dg+LDTiEiIorytgGhvL6LTHQj80Z223X1rgIiISGOKR0DPYb5LUBp0ARARSYRoFQFZOf6ozqevwYq5YacREZGoGnkprJoPX7wddhIRkZQUrSIAYMTFkF2g1gARkQgxsw5mNsnM5pvZPDM7yMx+bmbLzGxmcDspZvkfm9kiM1tgZsfHTD8hmLbIzG5ocaAhZ/rzAXSCsIhIi0SvCCgogpLz/UlfG78KO42IiHi3AS865wYBQ4F5wfQ/OedKgttkADPbBzgP2Bc4AbjTzDLNLBO4AzgR2Af4WrDsrsspgKFfh4+fhU0rd+sHExFJR9ErAgBGXwE1VfDePWEnERFJe2bWHjgcuA/AObfVObduBy8ZB0x0zlU45z4HFgEHBLdFzrnPnHNbgYnBsi0z8hKoqYQP/9HiVYiIpKvojA4Uq9NeMOhk38x72Pf8ER8REQlLX2AV8HczGwrMAL4TzLvazC4EpgPfc86tBYqBd2JevzSYBrCkwfQDG3tDMxsPjAcoLi6mtLR0+4UyO1PY6yAy3ruP9YMvhIzMFv548VNWVtZ41ohKpbzKmjiplDeVskK080azCAA/XOj852DWozDqW2GnERFJZ1nAcOAa59y7ZnYbcANwO/BLwAX3fwAuiccbOucmABMASkpKXJND7B30bfjnRRSVfgADjm98mSSK8nCAjUmlvMqaOKmUN5WyQrTzRrM7EMAeo6HncJh2J9TUhJ1GRCSdLQWWOufeDZ5PAoY751Y456qdczXAPfjuPgDLgN4xr+8VTGtqessNGgttu8H79+7WakRE0k10iwAzOPhqKP0UPnkx7DQiImnLOfcVsMTMBgaTjgY+NrMeMYudDswJHj8LnGdmuWbWF+gPvAe8D/Q3s75mloM/efjZ3QqXmQ3DL4SFr8Daxbu1KhGRdBLdIgBg8Dho3xum3R52EhGRdHcN8IiZzQZKgF8BvzOzj4JpRwLXAzjn5gJPAB8DLwJXBS0GVcDVwEv40YWeCJbdPSMu8geOZjyw26sSEUkX0T0nACAzCw68HF7+CSz7AIqHh51IRCQtOedmAiMbTP7GDpa/Fbi1kemTgclxDde+Fww4ET74B4z5MWTlxnX1IiKtUbRbAsA38+YU6uJhIiLStFGXwJbVMO/fYScREUkJ0S8C8trBiG/C3Kdg3ZKdLy8iIumn31HQsS+8rysIi4g0R/SLAPBdggDe+1u4OUREJJoyMmDkxfC/t2HFx2GnERGJvNQoAjr0hn1PgxkPQvmGsNOIiEgUlVwAmbkw/f6wk4iIRF5qFAEAB10FFRvgw4fDTiIiIlHUppM/YDRrIlRsCjuNiEikpU4RUDwC9jgY3rkLqqvCTiMiIlE08lLYuhE+eiLsJCIikZY6RQD41oD1/4N5u3dtGRERaaV6HwDdhsD794NzYacREYms1CoCBp4IRf38xcP05S4iIg2ZwchLYMVHsPT9sNOIiERWs4oAM1scXBVypplNb2T+GDNbH8yfaWY3xT8qkJEJo6+EZTNgybsJeQsREUlx+5/jry+j4UJFRJq0Ky0BRzrnSpxzDa8YWevNYH6Jc+7meIRrVMnXIa+Dbw0QERFpKLcQhp7rry+zpTTsNCIikZRa3YEActrAqEth3nNQ+lnYaUREJIpGXgrVFRpRTkSkCVnNXM4BL5uZA/7mnJvQyDIHmdksYDnwfefc3IYLmNl4YDxAcXExpaUtO0Jj/c+mw3//QsXrf2LLmF+0aB27qqysrMV5k01ZEyeV8qZSVkitvKmUNW112wf2OMhfM+Cgq/3FxEREZJvmFgGHOueWmVlX4BUzm++cmxoz/wNgT+fcJjM7CXga6N9wJUHxMAGgpKTEFRUVtSx1URHsdzZ5H08i74RfQEEL17MLSktLaXHeJFPWxEmlvKmUFVIrbyplTWsjL4UnvwWfTYG9jw47jYhIpDTr0IhzbllwvxJ4CjigwfwNzrlNwePJQLaZdY5z1vpGXw6VW2Dukwl9GxERSVH7nAoFnXUFYRGRRuy0CDCzNmZWWPsYOA6Y02CZ7mZmweMDgvWuiX/cGN33h6K9/LkBIiIiDWXlwrALYMFkWL8s7DQiIpHSnJaAbsBbQX//94DnnXMvmtnlZnZ5sMxZwJxgmb8A5zmX4IH8zWDwWFj8JpStTehbiYhIihp5sb+uzIwHwk4iIhIpOy0CnHOfOeeGBrd9nXO3BtPvds7dHTy+PZg31Dk32jn3dqKDAzDoFKipgk9eTsrbiYhIiunYB/Y+Bj54CGpqwk4jIhIZqT1cQvEIaNsd5v877CQiIhJV+50Fm77yVxEWEREg1YuAjAzfJWjhq7B1S9hpREQkivoe4e8/ez3UGCIiUZLaRQDAoLFQVQafvhZ2EhERiaJ2PaDLIBUBIiIxUr8I6HMo5HWA+RolSEREmtDvSPjibagsDzuJiEgkpH4RkJkNA06ABS9AdWXYaUREJIr6jYGqcljybthJREQiIfWLAIDBp0D5Olj8VthJREQkivocApapLkEiIoHWUQTsdRRk5atLkIiINC63EHqNUhEgIhJoHUVATgHsfTTMf17jQIuISOP2OhKWfwhbSsNOIiISutZRBIDvErTxS1j+QdhJREQkivqNAZy/0ryISJprPUXAgOMhIwvmPRt2EhERiaLiEZBTqC5BIiK0piIgvyP0OQzmPQfOhZ1GRESiJjPbDyutIkBEpBUVAeCvHlz6KayaH3YSERGJon5joPQzWPtF2ElERELVuoqAgSf7+3kaJUhERBrRb4y/V2uAiKS51lUEtOsBvQ7QeQEiItK4LgOhsIeKABFJe62rCADfJeir2WrqFRGR7Zn51oDP39CQ0iKS1lpfETBorL+f/3y4OUREJJr6jYEta2DFR2EnEREJTesrAjrtBV330dWDRUSkcX2P8PfqEiQiaaz1FQHgLxz2xduwaVXYSUREJGra9YAug1QEiEhaa51FwKCxgIMFk8NOIiIiUdTvSH+wqLI87CQiIqFonUVA9/2gwx7qEiQiIo3rNwaqymHJu2EnEREJRessAsxg0Cm+qbd8Q9hpREQkavocApapLkEikrZaZxEA/ryA6q2w8OWwk4iISNTkFkKvUSoCRCRttd4ioPcB0KaLugSJiEjj9joSln8IW0rDTiIiknSttwjIyISBJ8HCV3Til4jIbjKzDmY2yczmm9k8MzvIzIrM7BUzWxjcdwyWNTP7i5ktMrPZZjY8Zj3fDJZfaGbfDO8nwp8XgIPFb4YaQ0QkDK23CADfJWjrJn9lSBER2R23AS865wYBQ4F5wA3Af5xz/YH/BM8BTgT6B7fxwF0AZlYE/Aw4EDgA+Flt4RCK4hGQU6guQSKSllp3EdD3cMhtB/OeDTuJiEjKMrP2wOHAfQDOua3OuXXAOODBYLEHgdOCx+OAh5z3DtDBzHoAxwOvOOdKnXNrgVeAE5L2gzSUmQ19DlURICJpKas5C5nZYmAjUA1UOedGNphv+KNEJwFbgIuccx/EN2oLZOVC/+NgwQtQXQWZzfpxRUSkvr7AKuDvZjYUmAF8B+jmnPsyWOYroFvwuBhYEvP6pcG0pqZvx8zG41sRKC4uprQ0Mf32c7uPos0nL7Bu8Wxq2vXa7fWVlZUlLGsipFJeZU2cVMqbSlkh2nl3Za/4SOfc6ibmxTb9Hohv+j1wN7PFx+CxMGcSLHnHH/EREZFdlQUMB65xzr1rZrdR1/UHAOecMzMXrzd0zk0AJgCUlJS4oqKieK26viEnw9Sb6bDmQ+iz/26vrrS0lIRlTYBUyqusiZNKeVMpK0Q7b7y6AzXV9Bu+vY+FzFyYp1GCRERaaCmw1DlXe2WtSfiiYEXtd31wvzKYvwzoHfP6XsG0pqaHp8tAKOyhLkEiknaa2xLggJeDozx/C47QxGqqiffL2IWS1bzbUNs9DiXz42dYf8AP/IXEWiDKzTkNKWvipFLeVMoKqZU3lbLGg3PuKzNbYmYDnXMLgKOBj4PbN4HfBPfPBC95FrjazCbiW4XXO+e+NLOXgF/FnAx8HPDjZP4s2zHzowQtfBlqaiCjdZ8qJyJSq7lFwKHOuWVm1hV4xczmO+em7uqbJa15t6H9z4BnrqKo4n/Qc1iLVhHl5pyGlDVxUilvKmWF1MqbSlnj6BrgETPLAT4DLsa3Jj9hZpcCXwDnBMtOxp8jtgh/ntjFAM65UjP7JfB+sNzNzrnwq6l+Y2DWY7DiI+gxNOw0IiJJ0awiwDm3LLhfaWZP4Yd2iy0CotfEG2vAiWAZvktQC4sAEZF05pybCYxsZNbRjSzrgKuaWM/9wP1xDbe7+h7h7z97XUWAiKSNnbZ7mlkbMyusfYxvvp3TYLFngQuDC8SMJmj6jXvalmrTCfY8RFcPFhGR7bXrAV0G6bwAEUkrzen82A14y8xmAe8BzzvnXjSzy83s8mCZyfjm4UXAPcCVCUm7OwafAqvmw+qFYScREZGo6XckfPG2rjAvImljp0WAc+4z59zQ4Lavc+7WYPrdzrm7g8fOOXeVc24v59x+zrnpiQ6+ywad7O/n/TvcHCIiEj39xkBVOSx5d6eLioi0BukzDEL7Xv58AHUJEhGRhvocAhlZ6hIkImkjfYoAgEFjYdkM2LA87CQiIhIluYXQa5SKABFJG+lVBAw+1d/Pfz7cHCIiEj39xsDyD2FL+KOWiogkWnoVAV0GQOcBMO/ZsJOIiEjU9BsDOFj8ZthJREQSLr2KAPBdghb/V0d6RESkvuIRkFOoLkEikhbSrwgYPBZcNXzyYthJREQkSjKzoc+hKgJEJC2kXxHQczi0K/ZXDxYREYnVbwyUfgZrvwg7iYhIQqVfEWDmuwR9+h/YujnsNCIiEiX9xvh7tQaISCuXfkUA+C5BVeWw6NWwk4iISJR0GQiFPVQEiEirl55FwB4HQ36RugSJiEh9Zr414PM3oKYm7DQiIgmTnkVAZhYMPAk+eQmqtoadRkREoqTfGNiyBlZ8FHYSEZGESc8iAHyXoIr1sHhq2ElERCRK+h7h79UlSERasfQtAvodCdlt1CVIRETqa9cDugxWESAirVr6FgHZedD/GFgwWf0+RUSkvn5j4Iu3obI87CQiIgmRvkUAwOBTYdMKWPp+2ElERCRK+o3xo8gteTfsJCIiCZHeRUD/YyEjGz5+JuwkIiISJX0OgYwsdQkSkVYrvYuAvPYw8ASY+YguHCYiInVyC6HXKBUBItJqpXcRAHDQNVC+Dj58JOwkIiISJf3GwPIPYUtp2ElEROJORcAeB/qjPe/cATXVYacREZGo6DcGcLD4zbCTiIjEnYoAgIOvgbWLYb6GCxURkUDxCMgpVJcgEWmVVAQADBoLHfvA27eHnURERKIiMxv6HAqfTgk7iYhI3KkIAMjIhNFXwdL34H8aDk5ERAL9xsDaz31rsYhIK6IioNaw8yGvA0z7a9hJREQkKvqN8fefvRFqDBGReFMRUCunDYy6FOY9B6WfhZ1GRESioMtAKOyh8wJEpNVRERDrgPG+D+i0O8NOIiIiUWDmWwM+fwNqasJOIyISNyoCYhV2h/3O8RcP07jQIiICvgjYsgZWfBR2EhGRuGl2EWBmmWb2oZltN46mmV1kZqvMbGZw+1Z8YybRQVdB5RaYfl/YSUREJAr6HuHv1SVIRFqRXWkJ+A4wbwfzH3fOlQS3e3czV3i67QN7HQ3vToCqirDTiIhI2Nr1gC6DVQSISKvSrCLAzHoBJwOpu3O/Kw6+BjavhNlPhJ1ERESioN8Y+OJtqCwPO4mISFxkNXO5PwM/BAp3sMyZZnY48AlwvXNuScMFzGw8MB6guLiY0tKI9rvvsD/tOg+Ct25jw54ngRllZWXRzduAsiZOKuVNpayQWnlTKavESb8x8O5d8L9psNeRYacREdltOy0CzGwssNI5N8PMxjSx2L+Bx5xzFWb2beBB4KiGCznnJgATAEpKSlxRUVFLcyfeYdfDU9+mqPQD6H8spaWlRDpvDGVNnFTKm0pZIbXyplJWiZO+h0F2Acx7VkWAiLQKzekOdAhwqpktBiYCR5nZw7ELOOfWOOdqO9DfC4yIa8ow7HuGHxv6bV08TEQk7eW0gQEnwMfPQHVV2GlERHbbTosA59yPnXO9nHN9gPOA15xzF8QuY2Y9Yp6eyo5PIE4NWTlw4OV+bOgvZ4edRkREwjbkDD9U6Oe6erCIpL4WXyfAzG42s1ODp9ea2VwzmwVcC1wUj3ChG3ER5LSFabeHnURERMK297GQUwhznww7iYjIbtulIsA597pzbmzw+Cbn3LPB4x875/Z1zg11zh3pnJufiLBJl98Bhl8Ic/6Fbfwy7DQiIhKm7DwYPBbm/RuqtoadRkRkt+iKwTtz4OXgasib9UDYSUREJGz7ngHl6+HT18JOIiKyW1QE7EzHPWGf08id8xiUbwg7jYiIhKnfGMjvCHP+FXYSEZHdoiKgOQ6+moytG+HDf4SdREQkFGa22Mw+MrOZZjY9mPZzM1sWTJtpZifFLP9jM1tkZgvM7PiY6ScE0xaZ2Q1h/Cy7JSsHBp8CCyZDZVnYaUREWkxFQHMUj6Cy5wHwzl0aGk5E0tmRzrkS59zImGl/CqaVOOcmA5jZPvjR5PYFTgDuNLNMM8sE7gBOBPYBvhYsm1qGnAlbN8HCl8NOIiLSYioCmql8+Ldg/RL4+Omwo4iIRN04YKJzrsI59zmwCDgguC1yzn3mnNuKv/bMuBBztsyeh0KbLuoSJCIpTUVAM1X2PRo67e2HC3Uu7DgiIsnmgJfNbIaZjY+ZfrWZzTaz+82sYzCtGFgSs8zSYFpT01NLZhbscxp88jJUbAw7jYhIi2SFHSBlWAYcdBU8dz188Tb0OSTsRCIiyXSoc26ZmXUFXjGz+cBdwC/xBcIvgT8Al8TjzYJCYzxAcXExpaWl8Vht3GTteSzt3r+HTR9MYuvAusaMsrKyyGXdkVTKq6yJk0p5UykrRDuvioBdMfRr8Not8PZfVQSISFpxzi0L7lea2VPAAc65qbXzzewe4Lng6TKgd8zLewXT2MH0hu83AZgAUFJS4oqKiuLxY8RPh2OhsCdtP38JDrp42+TS0lIil3UHUimvsiZOKuVNpawQ7bzqDrQrsvNh1GXwyQuwemHYaUREksLM2phZYe1j4Dhgjpn1iFnsdGBO8PhZ4DwzyzWzvkB/4D3gfaC/mfU1sxz8ycPPJuvniKuMDBhyBix6FcrWhp1GRGSXqQjYVaO+BZm5/twAEZH00A14y8xm4Xfmn3fOvQj8Lhg2dDZwJHA9gHNuLvAE8DHwInCVc67aOVcFXA28BMwDngiWTU37ngE1lTD/+bCTiIjsMnUH2lVtu0DJ12DWRDjyp/65iEgr5pz7DBjayPRv7OA1twK3NjJ9MjA5rgHDUjwcOuwJc56EYReEnUZEZJeoJaAlRl8FVeXw/r1hJxERkbCY+WsGfPY6bF4ddhoRkV2iIqAlugyAASfC+/foipEiIulsyBngqmFeap7aICLpS0VASx18NWxZA7MeCzuJiIiEpdsQ6DzAdwkSEUkhKgJaas9DoOcwmHYH1NSEnUZERMJg5k8QXvwWbPgy7DQiIs2mIqClzOCgq2HNIvjkxbDTiIhIWIacATj4+Jmwk4iINJuKgN2xz2nQvreGCxURSWddBkK3/WDOv8JOIiLSbCoCdkdmFoy+Ar74LyybEXYaEREJy5DTYel7ZGxYGnYSEZFmURGwu4Z9A3LbwZt/hKqtYacREZEw7HsGADkLdeEwEUkNKgJ2V147OOAymP8c/K4fPHEhzHxMY0aLiKSTor7Qc7iKABFJGbpicDwc+RPoNcqfIPzJS8HJYQa9D4ABJ/hb18H+ZGIREWmdhpxJ1ss/gTWfQqe9wk4jIrJDagmIh4xMGHginHIbfHcejH8dxtwAVRXwn1/AXQfBbfvD5B/Aov/46SIi0rrse5q/1zUDRCQFqCUg3sz89QN6DvOFwIYvYeFLsOBF+OAf8N4EyGkLex3przrc/zho2yXs1CIisrva96Ky50iy5z4JR/wg7DQiIjukIiDR2vWAERf5W2UZfD4VFrzguw3N+zdg0GskDDge9j4Wcgv9clUVUFUGleX+vqoimF6+0/ltLA967Q9dBkPXQdB+D8hQo4+ISKJt7X8K2W/8DFbO891ARUQiSkVAMmXn+539AceDc/DVbN9C8MmL8Not/rYrLNOvMyuv7j4rj+xNK2F+THN0dhvoMqCuKNhWHPTWeQoiInG0tf+JtJn6C98l6KifhB1HRKRJKgLCYgY9hvrbmB/Bxq/8ZedrqiErt9Gde7LzICu/7j6z8V/futJSivINVi3wR6NWzff3n/4HZj1at2BOW3+Rm4bFQbtiFQciIi3gCrpAn8P8hcOOvFHfpSISWc0uAswsE5gOLHPOjW0wLxd4CBgBrAHOdc4tjmPO1q+wO+x3VvzWl98R9hjtb7G2lPqiYNV8WDkfVs2DhS/DzIfrlsltB3scBGfe64dAFRGR5htyBvz7O761t8fQsNOIiDRqV1oCvgPMAxrbK7wUWOuc29vMzgN+C5wbh3wSbwVFsOfB/hZrS2nQajAPVnwMMx6Ayd+HMyaEElNEJGUNPhWe/55vDVARICIR1ayzRc2sF3AycG8Ti4wDHgweTwKONlMbaEopKII+h8Cob8HYP8IRP4LZj8Osx8NOJiKSWgqKYK+jYM5T/vwvEZEIam5LwJ+BHwKFTcwvBpYAOOeqzGw90Amod9lcMxsPjAcoLi6mtLS0BZHDUVZWljJ545J1yMUUfvIyWc99l/WFA6jpsGd8wjWQSp8rpFbeVMoKqZU3lbJKSPY9AxZeDkunQ+9RYacREdnOTosAMxsLrHTOzTCzMbvzZs65CcAEgJKSEldUVLQ7q0uq0tJSUiVv3LKe8wDcdQgd/vM9uOQlyMze/XU2kEqfK6RW3lTKCqmVN5WySkgGnQSZub5LkIoAEYmg5nQHOgQ41cwWAxOBo8zs4QbLLAN6A5hZFtAef4KwpLIOveHU22DZDHj912GnERFJHXntof+xMPcpP+qbiEjE7LQIcM792DnXyznXBzgPeM05d0GDxZ4Fvhk8PitYRh0hW4N9T4dh34A3/wifvxl2GhGR1DHkDNj0FfxvWthJRES20+LLyJrZzWZ2avD0PqCTmS0CvgvcEI9wEhEn/hY67QVPjvejCImIyM4NOAGyC3yXIBGRiNmlIsA593rtNQKcczc5554NHpc75852zu3tnDvAOfdZIsJKSHLawJn3weZV8Ow1Gu1CRKQ5ctr4QuDjZ6C6Kuw0sjNl63yr97t/CzuJSFK0uCVA0kzPEjjmZzD/OZjx97DTiIikhiFnwpY18PkbYSeRpmxeDa/+Av68H/znF/DCD+G9e8JOJZJwKgKk+UZf5ce+fvFGf7VhERHZsb2P8Vdhn/tk2EmkofXL4IUb4E9D4K0/wV5HwmVTfOvNCz+EBS+EnVAkoVQESPNlZMBpd/sm7n99CyrLw04kIhJt2Xkw6GSY92+o2hp2GgFY86nv2nrbUHhvgh8A46r34JyHoHg4nHU/dN8fJl0Cyz4IO61IwqgIkF1T2A1OuxNWfASv/jzsNCIi0bfvGVC+Hj59Lewk6W3Fx/4A1u0jYdbjMOKbcO2HcPpd0GVA3XI5beDrT0BBZ3j0XFj7RXiZRRJIRYDsugHHw4GXw7t3wScvh51GRCTa+o2B/I4aJSgsy2bAY1+Huw7yXXwOuhqumw0n/wE67tn4awq7wQWToLoCHjkLytYmN7NIEqgIkJY55hfQdV94+grYuCLsNCIi0ZWVA4NPhQWTobIs7DTpwTl/bZuHToN7joIv/gtH3ADXfQTH/RIKu+98HV0GwnmPwtrFMPECqKpIdGqRpFIRIC2Tnef7TW7dBE9fDjU1YScSEYmuIWf478uFaj1NKOd8C/X9x8ODY2HFXDj2Zrh+Dhz5Yygo2rX19TkUxt0JX7wFz1ylbZ20KioCpOW6DoLjf+X7ub57V9hpRESiq89h0KarugQl0vzJ8LfD4dGzYcNyOOn3vtvPId+B3MKWr3f/s+Go/wcf/ROm3BK/vCIhywo7gKS4kZf4IuCVn/kjJj2Ghp1IRCR6MjJhn3Hw4cNQsXH3dkple3OehEkXQ6e9/ZH7/c+BzOz4rf+w78G6/8Gbf4D2vWHkxfFbt0hI1BIgu8cMTv0rtOkMky6FrZvDTiQiEk1DzoSqMljwYthJWpevPvJddXqPhiumwbDz41sAgN/WnfxHf92H578HC1+J7/pFQqCWANl9BUVw+t/goXHw4g2+KBARkfp6H+iPIj//XVg2HUZeWn9oStl1W0ph4tchr4Mf5z8rJ3HvlZkFZz8Afz8R/nkRXDxZrd9RUVPjz7nZugkqNsHWjb7FraJ22sa6+4pNvkg89LvQplPYyUOlIkDio98RcOh1wVUXj4Z9Tws7kYhItGRkwPn/9F1K3r8P3r0b+h4Bo74FA0/yO5nSfNVVfmd841dw8Yt+WM9Eyy2Er/8T7j0GHjkHvvUqdOid+PcV79PXKJzyf1C9JWaHP7g1h2VCbltfDGxaAWfem9i8EadvHImfI38Cn70B/74Weo2E9r3CyVG21l8Rcs2iulvVVjj+FijqF04mERGAroP9jsfxv4YPH4Lpf4cnvgGFPX0/8+EXNm/4SoFXfwafvwHj7oBeI5L3vu16+GLu/uPh0XPgkhchr33y3j9dla2Df11GRkY29NgPcvr5HfqcQn+fWwg5De5z29aflp3vu3ZN+TW88RsoOR/2OjLsnyw0KgIkfjKz4az74O7D4Mnx8M1/+5PhEqGyDEo/D3byF9bf6d+ypm45y/QXg9myBu491l8FMpkbCxGRxrTt4k82PeQ6+OQleP8emHIrvPFbGHwKjLoM9jzY77DI9mY/AdNuhwO+DcMuSP77d9sHzv0HPHwmPP4NOH9SYrsiCbzxO9iyhk3nPUP7QUfs3roOvR4+esKf33HF237Y8zSkIkDiq6ifvwrjU9+GN/8IR/ygea+rrvJXZqyqvZVD9Vb/eNPK+kf113wK65cAru71bbv7USEGjfX3tbeOffwX8+pF8PAZ8MDJcPbfYeCJifjpRUR2TUYmDDrJ39Z8CtPvhw//AXOfgi6DYdSlMPQ8jSYUa/lMePYa2PNQOP7W8HL0G+PPgXv6Ct8CftpdKtoSZdUCeO9vMPxCqrvut/vry87z+yr/OB3+exuM+dHurzMFqQiQ+Nv/XFj0Krz+a3+Vxuqtfqe+Krivrtj2uGNVhX/umnEBltx20Gkv2ONA6HSBf9xpb3+/sw1k5719381Hz/EnkZ38Bz+8qYhIVHTay+/UHvkTfz2B9++Byd+HV3/uC4FR3/LdidLZplUw8Xwo6OxP0o33KEC7quTrfujQ138NHfb0FyST+HLODzqS3QaOvgnideHmvY7yI3a9+QfY7yz//5dmVARI/Jn5neyqcn/CVmauH7khK9ffMnP90fmsPMora8hv2x6y8iDTTyMrJ1gmuOUXQef+0KbL7h1ladsVLnoe/nkxPHc9rF/qLwCjIzciEiU5BTD8G76by7IZ8P698ME//P2eh/rWgUFjwTKCUVBiRz/Z0OD5xrpb7OgoFRugcgvtstpAh2L//dimq79v2y24dfH3OW3C/kS86kr45yWwZbXvh9+2S9iJvCN+5AuBN34DHfbwQ5RK/CyY7K9HdMJv/XDkFaXxW/fxv/LDvT7/PfjGU2m3P6AiQBIjrz2c+/BOFysrLSW/aBcv4747ctrAeY/6Ifre/IMvBE69XX05RZrBzBYDG4FqoMo5N9LMioDHgT7AYuAc59xaMzPgNuAkYAtwkXPug2A93wR+Gqz2Fufcg8n8OVKGmR9koddIOO5W301o+n3+olgZWVBT1bz1ZOU1OFmyENr1hOwCajauhnVLYOl02LyKet0sa+W0baRIiHncc3hSRuYpeOtW+OItOH0C9ByW8PdrNjM45TbYsMx3C2rXEzpq6NC4qCyHF39c1zUu3gq7+9aFyd/3rW/7nRX/94gwFQGSfjKz/Bd2h97w2i2+teLcf2h0B5HmOdI5tzrm+Q3Af5xzvzGzG4LnPwJOBPoHtwOBu4ADg6LhZ8BI/B7nDDN71jm3Npk/RMpp08kPw3zwNb675Rf/9d0jakdAyS0MRklpZKSUHXSZ2VRaSlHtgZiaaj+IwqYVwW1lcL+qbtqqBfD5VChfV7eSnLZw3C9hxMWJO5L64SPkzXoQRl8FQ89NzHvsjsxsf52C+0+EJy4k88zHoejgsFOlvml/hXVfwIXPJK7r18hLYOYj8NKN/mJw+R0S8z4RpCJA0pMZHP4DaNcLnr3af3Gf/09oXxx2MpFUMw4YEzx+EHgdXwSMAx5yzjngHTPrYGY9gmVfcc6VApjZK8AJwGPJjZ2iMjJhwPH+loh11x7hZycnX1ZV+JaD9Uthyq98F8u5T/kTZTv2iW+upTPgueup7HUw2cfeHN91x1Nee78dufdoCp/5Jhw4HgacCN32TbtuJnGxfqkfYGTwqf4k7ETJyISxf4J7jvIHBk/+feLeq6H374MFL8B5j/juz0mmIkDSW8nXfDP24xfCfcf6Yd667RN2KpGocsDLZuaAvznnJgDdnHNfBvO/Amr7hRQDS2JeuzSY1tT0esxsPDAeoLi4mNLSOPYDTqCysrKUyQq7m7cA2g6AsX8nd+5jFLz5a7jzILYc/CMq9r/An7Owm2zzKtpP/DquTRdWH/l/5K7fsNvrTKx8Mk+5j/xXfkTOa7fAa7dQ3bYnlX2PorLv0VT2OiiUnb2dieLfbZsXbyCnppr1B/6AmphsCcmatycF+3+D3PfvZUO/sVR32z9uq24qb86CZ2n70ncB2PzfCVTsl/xzSVQEiOx1FFzyAjxyNtx/Apz3MPQ9POxUIlF0qHNumZl1BV4xs/mxM51zLigQdltQYEwAKCkpcUXJPHdoN5TGdq9JAXHLe/jVsP84+Pe1tHnjZ7RZ/AqM++vuXaDx/7d33+FRldkDx78nhd6riCgiCmIBRCkivQgoggoINrCBurZd+xYLumt3f+suVkQRFFCaiCCiIqwNFYQAgrSVJkWIgHSSvL8/zh2JYSaZZOpNzud55slk5s6dk8tw33nv+77nZB2CKYPg4C64fjalSx3rj2NbrS2ZNaZRLf0wrPqQ1JUfkLpiCmWWjIX0cnpV+5QeOpqTJIXhQn4OsrNg+bu6eLbzX+NXBPTHz2Hle9DhPqrU//0X8pj9H+v5CKyZReV5D8ANc6JW5yhovGvnwuy7dKF/1gHKL3iR8m2HxX19YuTddGOKg2POgOtmayXIMZdAxjuJjsiYpOOc2+T93AZMAVoCW71pPng/t3mbbwLq5Xr5cd5joR43flelHlw5WZMtbMmAF9rCVy9CThgpoIP54F7Y8BX0HaHnaL+pWFuzPA18E+5ZC1dM0gq1W5boAuJnGsFLHeDTx+Gn7zQVZrI4tA++fgX+fRZMvBYWj4Ox/bRqb6xlZ8HMe3S6btvbY/9+AWUqQ4/HYPNizcQVK1uWaJrbGifrZ6PT/bB7IywqOJlKtFknwJiAKvU07Vy9VjD5evjsn8l1UjYmgUSkvIhUDNwHugNLgWnAYG+zwcC73v1pwNWiWgO7vGlDs4DuIlJVRKp6+5kVxz/FxJKIfvG9+Suof55+kX/dK4RWGAte18JpbW/XXO5+l14GTu6q883vWKJVars8oKmxP30cXu4Iz54K026DFTP0S3gi7N0Bcx6Df56mGXMq1NJMf1dOhh2rYMKVuh4klha+DluXwvmParrceDrtYjipC3z8COzeXPD2hfXLOu1Mlamk04/LVtH3q3u2rn/IOhT998yHTQcyJreyVeGqyVoB8qOHdGFSzyejNixogti9GT55lHKHs6BWQx1uDtwq1bX0rcmjNjBFM3+SBrzlnPtARL4B3haR64B1wABv+xloetDVaIrQawCcc5ki8gjwjbfd8MAiYVOMVK4Ll7+tV5A/uA9eOFe/9La6seDz6fr58P5dOlWzy4PxiTeeRHSxcO3ToN2dsHc7rPoQVn4ASyfDwtGa1vXE9nByd2jYJbJpVWFI2bUevvwHfDcWsvbrgua2t8PxrY8sau4zAqYM0/bxkpGQEoPryPsydXFu/XbQpG/0918QEe2ojWgNs+7XgnTRsi8Txl6qx/faWUcSkYhAx/vhzUs1S9HZ10TvPQtgnQBj8korrSe4SnXhi+f0S+qlI+N/RaIkyMmGyTfAhvmUKlURlo3Ps4FoHvLKdb2OQb08nYTjtHiMZd6IOefcWuCo5OfOuR1AlyCPO+APIfY1ChgV7RhNkhHRiroNOsH0OzQF47Kp0Pd5nQoRzO7N8PZV+v/70ldLxgWY8jX0ODW7XK8Er/scVs6ClTO1cwDaCWjYVa8an9guegXcfvoOPn+Oyt9PBUnV9KttboVajY/etulArYXw8XBtH7s/Ep0YcpvzDziwC3o+kbjzerUGmj1wzqNasK9h18j3eXg/jBuoReWunnp05e+GXaBuCx0NaHZF3C5+FdgJEJEywDygtLf9ROfcg3m2GQI8xZF5nf9xzsVwQpUxMZaSoie4yvV0buLo3nD5BGwGXZT99xn48b/Q53l2ntCTahXLwu6fYNcGHYXZtdG7vwm2fg8rP9SrKLmlldEGqfJx0GJw8Zg6YExxUqkODBoPGW/r+fSFttD5L9Dmlt9/yc86qNNNDu6Bq6ZCOR8sAo62tFJwUie99Xxcp1Gt/lhrQ3w3Fr5+WacQHd9Gv5w27KpfKAvzhdk5WPMxfP4vrflQuhIHzrqBsh3+qP9W+TnvT3o+/uI5Pee2GhbZ35vblqVaDO+c63WUJJHa3gYZE3RE6uYvIb1s0feVkwUTr4MNX2stiROC1I/4bTSgHyx+C1oMKfr7FUI4IwEHgc7OuT0ikg58JiIznXNf5dlugnPuluiHaEwCtRqqJ8VJ18Or3Ujr/DhU7W5XnqNh3Rfw6WNwxgC9AvbLL3qirX6S3oJxTodUd23QK1K/dRI2agMy8VotcNT6pvj+LcaY/InoVeYGHbVi++wH4PtpOsWkVmP9v/3+nbDpW/2iZKmaVeB82GqoVs9d/6V2CNZ8ArP/preKx0LDztohaNBRp7UGk31Yazl8/i+dc1+xDnR7BFoMZv++bMpWCqPTJQK9noJfN8PMe3UfTS6K/O90TvdXpop+GU60tNJwwTPwxkV6db7zX4q2H+co9+mD8MP70Ovp/I9Vw65affu/z0DTy+MyGlBgJ8Ab0t3j/Zru3Wy1pCk5Tu0Ng9+DcQOpNHEAzDkZzroamg6CCjUTHZ0/7cvUjlXV+nDhs+F3qkS0cmr56nBss98/l3UQJl2n848P7IIO9yaus7Yvs2RexTSmIBVr60LTpZNgxt3wUjvoeJ9WHf5uDLS7C5r0SXSUySm9zJFRAtAr8mu8UYLv39ORAkmB487xRgm6QJ3mcHgfLHwDvnpeL5rUbAx9nocz+h/5ormvEMtyUlJ1qtbo3jqds0ItXTsQiWVTYN1nWrQrWc6dDTrAmZdpkpAz+kPNUwq/j3lPU2bpWzqC0vKG/LcNjAa81V/X0rQYnP/2USAujOwnIpIKLAAaAiOcc/fmeX4I8BjwM7AS+KNzbkOQ/eQu/tIiIyMj0vjjZv/+/ZQtG8FwUBxZrDFyaC/y/RQqrJpK+uYFuJQ0Dp/YlYOnDeDw8e2Tbu5qvsc26yClVr1PVp3m5FQ5Mb6BOUeF6UNJXzeX3QMmkV1LU/9F5bOQk0X5j++n9PKJHGh2Dfva/TUqBYvyChlrTjZlv3yasgteZG+HhzjYNLKTePXq1Rc4586OaCfFQLNmzdyiRYsSHUZYSmydgKLYs02v/i+fpr+ffD4MGhfyXOqnYxv3WLOzdBQlMHXop+8AB2WrgcuBAzvhhLa62Ldht6MW9RYp3r3btcjm/l80xXaodR4FObQX/tMSylWFoXMLbEvjemz3bIP/nA3HnKkXAwtzYWnhGJh2CwcbX0Lpy0aF91rn4JVOsG8H3LoQUtPDfjsRKXR7EVYnINcbVEFzQ9/qnFua6/HqwB7n3EERGQZc5pzrnN++/HRSBzv5xIqfYoVc8f78g15ZWTwe9m3XOenNLtdFRFXrJzpMIMSxzTqk2QfmPa15icvX0rSooabfxML8l3Re8PmPQZub84+3KHJydAHi/Bd0gVXv5yA1ujkQgsa6LxMmXgNrP9Xh+f2ZMGwe1GxU5Pcpykm9OPJTe+Hbc1oiLZsCP3ygi0HLVgm5WVLEGqaEx7p3O6yZox0ClwMth0K9c0JuXuR4M9fCyG6aOOO6j3Skp7A++TvMexKu+QBOaFPwW8b72H47Cqb/ES5+Wae0hWPlLBg3CBp0JLPHC1SrWYjjsnIWvDUALvq3zjoIU1Hai0JdInPO7QTmAD3yPL7DORdIHDsSaFGY/RrjOzUbwfl/hz8t1/mrtU7VL9b/agpv9IElE3X+ZrLIztKh4v+00CwdFY/R4WCXDW/01Tn18bA5Az78q1bLjNW8/ZQULfjS8X7t8EwcEvu81j8t0qI/677UQklDP9XKoJOH6jxcY0xop10Ml7yUbwfAFFL5GnBmfz2ul76SbwcgItUawBVva6fjrf66qLswfvlR1yic0T+sDkBCnDVEp1jN+rOOehRk4wJ4Z4gWuBvwRqGu5gOaFvbY5vqdIsbtR4GdABGp6Y0AICJlgW7Aijzb5F5OfhGwPIoxGpO80krp/NUrJ2kBmI5/hh1rdW76s411odOWpQXvJ1ZysnW0YsQ58O4foFx1LVBy/UfQ/AotAHNgp3YE9vwc21gO7tEr5eWqawcklvP1RXSecY/HYfl78NZlhW+cwrVoHIw6X6+2XTtTCyVVrA29/w82L4J5T8XmfY0xJhnUbaH59LcsgXcGF+6L64d/1ek/XR+OWXgRS0mBC57V0d2PCohzxxrtDFWoBVe8A6UrFP79RKDDfbBznbbfMRTOSEAdYI6IZKDFXWY756aLyHARCSxzvk1ElonIYuA2YEhswjUmiVWpBx3vhdsXw1VTNDf2t6Pgxbbwcif49jU4sDs+seTkUGrlezCilRZ3SS8PA8fBDXPg5G5HvoAf20wL+uzaCGMvjm1J+Bl369DxpSN1YW88tL5JOxz/mwtj+oZ3FSdcWYc0fdzUG/Uq0bC52hgGNOkDZw7UqzkbF0TvfY0xJtmccr4u6l39kY42hzPVfM0cvUjT7s4jhbOSVZ0zodVNWsl6wzfBt9mzDcZcrPevnKwdgaI65Xyo00wvIsVwNKDAToBzLsM519w5d6Zz7nTn3HDv8Qecc9O8+/c7505zzjV1znVyzq3If6/GFGMpKVrpsv9rcOcPejU664CeGJ9ppMOEX7+iIwQ5OdF975wc+P5deOFcKnxwG6Sk6XDksHnQuFfwq+8ntIGBY2HbCp2HeGhvdGMCvZqx+C1ofw/UPy/6+89P8yug/2jYvBhevxB+3RrxLmXvNs2M8c0rcO6tmtO8fI2jN+z1pKbQmzIUDu2L+H2NMSZptRii5/jvxsKnj+e/bfZhzeRWtb7Wi/CDTvfr+Xz6H3WKbW4Hf9Uc/3t/hsvfiXydXWA0e+c6rVcQI1b5yJhYKldNr0bf9AVc/4nOe1z/Fcy4S0cInqgPb/bXvMDrvij6OgLnYMUMeKk9vH015GSxp8dz+r5N+hRc3r1hV+j3Kmz8BsZfEd059NtXw/Q/wfHnahXGRGhykY54ZK6F13po1caiWj+fyuN7w5YM6DcKuj8aeuFxmcpaHXXHas2LbowxxVmnP2tChrmPa/KMUL4ZCT+v0AQR6WXiF18kSlfUxetbl8DXLx15PPswvD1YL+z1Hw3HRWlZ7Ck9oE7TmI4GRDdlhjEmOBE9MRzXQr+w71ynnYH1X+pi0kBp+NRSWizk+NZaEfL4VqELv4Dua/VHMOfvmhKu6olw8UtwRn8O7dxV8Jf/3Jr00cI9U2/Solv9R0eeVSfroK4DSCuli9OinKWnUE7qBFe/q1drXj1fS7cXJnOPc9pwfXA/rmJd3Vc4BY0adIDWN2uO7kY9NXe3McYURyLQ+1/w6xZ47w6ocAyc0v332+z5GeY8Bid10XOin5zaW1PZfvJ3bTMr1YVpt2q9hj4jjv5bIxFYGzB+kFbbbn5F9PbtsZEAY+JNRIdAmw7Uk+UtX8Pda3XOfqsbNWPPlyNg3GU6UvB8Gx1+zHgHdnrlN5zTipGvdtMvtft26Anolm91v0WtWdDscuj5JKyYrguJI52uNPtBvWLe53ktMZ9o9VrCkBlaxv21nl4u7TAc3q/HY8ZdcFJndg8MswMQ0OUBqNFI91GYojzGGOM3qekwYDTUPk0XCm9a+PvnPxkOh/fqVNlEFXQsKhGd5ulydDrTx8O1sFenv2qK8Ghr1FNrFMx76ugpSFFgIwHGJIPy1XXOfuNe+vuhffDTQh0lWP+ldgC+HaXPVTpOpxltydD7F/6fDr9Gq8R4q2G6gHnOo5rZoNfTRTtR/zBTc/W3uvHI35UMjjldayO80Rde7w2XT4D6bUNvv3M9TLhKM/10uA863IvbubNw75leVlP1jeyqHYl+oyL4A4wxJsmVrqjZcUZ207Vm182Gaidqh2DhGGjzh6JV4E0GVetDh7u1AwBw9rXQ/q7YvFdgbcD4y2HJ23qhLoqsE2BMMipVThfQBhbR5mTD1mXaIVj/JWT+T7+cn3U1pJWO/vu3vwsO7oYvnoPSlaDrg4V7/a5NMPVmvYLRbXj044tU9ZO0IzCmL4y9BAaMCT6Mu2aOTo3KyYZBE6BRj6O3CdexzaHDvTp1q1EvOKNf0fdljDHJruIxmj47MGJ97SxNm12+pp4L/azNrXqhq8oJRb9QFq5GvbTmwNwn4YwBUZ1Wa9OBjPGDlFQvRdkwzcc8bC60vCE2HQDQE1q34dDiGvjsWfjvs+G/NicbJt+g6wH6vRa7GCNVuS5cM1PXBYwfpAXeApzTAjZjL4EKtWHonMg6AAHn/Qnqng3v3wm7f4p8f8YYk8xqnqKjrTs3wIvtYOPX0PUhKFMp0ZFFJq2Ujm70e7Xo02/DFVgb8Mv/YMk7Ud21dQKMMcGJwAXPaEajjx/WtKbhmPcUrPscLnwWajSMbYyRKl8DBr8H9VrBpOu1lsPBPZrGdfYDuvDr+o8iT/cWkJqmC7ezDsK7t4SXS9sYY/zs+NaaGOLXzVpLpemgREcUHfFcz9D4Ah0NmPdkVNcGWCfAGBNaSir0fQFO6alz2RcXkK/4x89g7hN6km86MD4xRqpMZR2yPrm71nIY0RKWT4Nuj+hIRlEqPuanRkPo/ohmk/hmZHT3bYwxyahJH50ONHBc4bLWGSWiU6gy18LSiQVvHyb7lzDG5C81Xacg1W+n6UOXTw++3d4dMOkGTVPa6+m4hhix9LIw8E048zLIPqTFv9reFrsrPedcr+nxPvyb1lEwxpji7vhWULF2oqPwr0YXQG1vbUCURgOsE2CMKVh6GRg0The3TrxGF8zm5pyX/nK7Zr6J9tXzeEhNh0te1irPDTrE9r1ENKVrWmmtJhyD1G/GGGOKkZQU6HAPZK6BpZOis8uo7MUYU/wFUr5VP1nTla2ff+S5+S/Bypk6hebYZgkLMSpivcgroFIdXTexaYEuvjbGGGPy0/hCqH26rg3IyY54d9YJMMaEr1w1uGqKpn57sz9szoCfFsHsv+m6gVbDEh2hv5x+KZzeT9dRhFu4zBhjTMkUGA3YsToqowHWCTDGFE7F2nD1uzoyMOZizaRTrgb0fd5/1R+TwQVPQ/laMHmoViY2xhhjQmncG2qdpmsDIhwNsE6AMabwqhyvHQER2LkOLh2powSm8MpWhb4jYPtK+OjhREdjjDEmmf02GrAKlk6ObFdRCskYU9LUaKjFUq6eBvXbJjoafzupM7QcCvNfgLVzEx2NMcaYZHbqRVCriU4ljWA0wDoBxpiiq3YinNgu0VEUD10f1kXXU2+G/TsTHY0xxphklXs0YNmUou8miiEZY4wpqlLl4JKXtKrmzHsTHY0xxphkdmofqHlqRKMB1gkwxphkUbcFtL8bMsbD9+8mOhpjjDHJKjAasH1lkUcDrBNgjDHJpP1dWpTtvTsSHYkxxphk1qQv1GysmYKKwDoBxhiTTFLT4eKX4fC+REdijDEmmf02GvBD0V4e5XCMMcZEquYpcNG/Ex2FMcaYZNekL3S8v0gvtU6AMcYkozMHJDoCY4wxyS4lFTreV7SXRjkUY4wxxhhjTJKzToAxxhhjjDEljHUCjDHGGGOMKWGsE2CMMcYYY0wJU2AnQETKiMjXIrJYRJaJyMNBtiktIhNEZLWIzBeR+jGJ1hhjjDHGGBOxcEYCDgKdnXNNgWZADxFpnWeb64BfnHMNgX8CT0Q1SmOMMcYYY0zUFNgJcGqP92u6d3N5NusDjPbuTwS6iIhELUpjjDHGGGNM1KSFs5GIpAILgIbACOfc/Dyb1AU2ADjnskRkF1Ad2J5nP0OBoQB169YlMzMzsujjaP/+/b6J12KNHT/F66dYwV/x+ilWY4wxJhhxLu9F/Xw2FqkCTAFudc4tzfX4UqCHc26j9/saoJVzbnvQHek2vwJFq3OcGDXI06lJYhZr7PgpXj/FCv6KN16xnuCcqxmH90lqPmsv/PQ5Bn/Fa7HGjp/i9VOskMTtRVgjAQHOuZ0iMgfoASzN9dQmoB6wUUTSgMrAjgJ294Nz7uzCvH8iici3fonXYo0dP8Xrp1jBX/H6KdZiwjfthd8+G36K12KNHT/F66dYIbnjDSc7UE1vBAARKQt0A1bk2WwaMNi73w/4xBVmiMEYY4wxxhgTN+GMBNQBRnvrAlKAt51z00VkOPCtc24a8CowRkRWA5nAwJhFbIwxxhhjjIlIgZ0A51wG0DzI4w/kun8A6F/I9365kNsnmp/itVhjx0/x+ilW8Fe8foq1OPDT8fZTrOCveC3W2PFTvH6KFZI43kItDDbGGGOMMcb4XzjFwowxxhhjjDHFSMw7ASLSQ0R+EJHVInJfkOdLi8gE7/n5IlI/1jEFIyL1RGSOiHwvIstE5PYg23QUkV0issi7PRBsX/EiIj+KyBIvlm+DPC8i8px3bDNE5KwExdko1zFbJCK7ReSOPNsk9NiKyCgR2ealuw08Vk1EZovIKu9n1RCvHexts0pEBgfbJg6xPiUiK7x/5ymBxfxBXpvvZyaO8T4kIpty/Xv3CvHafM8fcYp1Qq44fxSRRSFeG/djW9xYexE71l5ELT7ftBX5xJuU7YWf2op84vVXe+Gci9kNSAXWAA2AUsBioEmebW4GXvTuDwQmxDKmfGKtA5zl3a8IrAwSa0dgeiLiCxHzj0CNfJ7vBcwEBGgNzE+CmFOBLWg+26Q5tkB74Cxgaa7HngTu8+7fBzwR5HXVgLXez6re/aoJiLU7kObdfyJYrOF8ZuIY70PAXWF8VvI9f8Qj1jzPPwM8kCzHtjjdrL2IeczWXkQnJt+0FfnEm5TthZ/ailDx5nk+6duLWI8EtARWO+fWOucOAeOBPnm26QOM9u5PBLqIiMQ4rqM45zY75xZ6938FlqOVkP2sD/CGU18BVUSkToJj6gKscc6tS3Acv+Ocm4dmtsot92dzNNA3yEvPB2Y75zKdc78As9E6GjETLFbn3IfOuSzv16+A42IZQ2GEOLbhCOf8EVX5xeqdlwYA42IZQwlm7UViWXsRBj+1FeCv9sJPbQUUj/Yi1p2AusCGXL9v5OgT5W/beB/KXUD1GMeVL2+IuTkwP8jTbURksYjMFJHT4hvZURzwoYgsEJGhQZ4P5/jH20BC/6dIpmMLUNs5t9m7vwWoHWSbZDzG16JX9IIp6DMTT7d4w9GjQgyfJ9uxbQdsdc6tCvF8Mh1bP7L2IrasvYgdv7YV4I/2wm9tBfikvbCFwXmISAVgEnCHc253nqcXosOSTYF/A1PjHF5e5znnzgJ6An8QkfYJjidfIlIKuAh4J8jTyXZsf8fp+F3Sp9ISkb8AWcCbITZJls/MC8BJQDNgMzpsmuwGkf9VnWQ5tiZOrL2IHb+2F35pK8A37YUf2wrwSXsR607AJqBert+P8x4Luo2IpAGVgR0xjisoEUlHT+hvOucm533eObfbObfHuz8DSBeRGnEOM3c8m7yf24Ap6JBYbuEc/3jqCSx0zm3N+0SyHVvP1sBwuPdzW5BtkuYYi8gQ4ELgCq8hOkoYn5m4cM5tdc5lO+dygFdCxJFMxzYNuASYEGqbZDm2PmbtRQxZexFTvmorwD/thd/aCvBXexHrTsA3wMkicqLXqx8ITMuzzTQgsEq+H/BJqA9kLHnzt14Fljvnng2xzTGB+aci0hI9folqgMqLSMXAfXShz9I8m00DrhbVGtiVa8gyEUL2jJPp2OaS+7M5GHg3yDazgO4iUtUbpuzuPRZXItIDuAe4yDm3L8Q24Xxm4iLPXOOLQ8QRzvkjXroCK5xzG4M9mUzH1sesvYgRay9izjdtBfirvfBhWwF+ai/CXUFc1BuacWAlunL7L95jw9EPH0AZdLhvNfA10CDWMYWI8zx0CC8DWOTdegE3Ajd629wCLENXnn8FnJuIWL1YGnhxLPZiChzb3PEKMMI79kuAsxMYb3n0JF0512NJc2zRxmYzcBidT3gdOtf4Y2AV8BFQzdv2bGBkrtde631+VwPXJCjW1eicyMBnN5BB5VhgRn6fmQTFO8b7TGagJ+s6eeP1fj/q/BHvWL3HXw98VnNtm/BjW9xuwf69sfYiGvFaexG92HzTVuQTb1K2FyFiTcq2IlS83uOv45P2wioGG2OMMcYYU8LYwmBjjDHGGGNKGOsEGGOMMcYYU8JYJ8AYY4wxxpgSxjoBxhhjjDHGlDDWCTDGGGOMMaaEsU6AMcYYY4wxJYx1AowxxhhjjClhrBNgjDHGGGNMCfP/OP+vEamibB8AAAAASUVORK5CYII=", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "learner.plot_metrics()" + ] + }, + { + "cell_type": "code", + "execution_count": 41, + "metadata": {}, + "outputs": [], + "source": [ + "X_train, y_train, X_val, y_val, X_test, y_test = split_Xy(X, y, splits)" + ] + }, + { + "cell_type": "code", + "execution_count": 299, + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "\n", + "\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "\n", + "
\n", + " \n", + " 0.00% [0/573 00:00<?]\n", + "
\n", + " " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "preds = learner.get_preds(0, with_decoded=False)" + ] + }, + { + "cell_type": "code", + "execution_count": 300, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "array([[[ 24., 22., 22., ..., 56., 66., 52.]],\n", + "\n", + " [[ 22., 22., 20., ..., 66., 52., 20.]],\n", + "\n", + " [[ 22., 20., 22., ..., 52., 20., 23.]],\n", + "\n", + " ...,\n", + "\n", + " [[132., 122., 133., ..., 99., 142., 147.]],\n", + "\n", + " [[122., 133., 129., ..., 142., 147., 123.]],\n", + "\n", + " [[133., 129., 123., ..., 147., 123., 123.]]], dtype=float32)" + ] + }, + "execution_count": 300, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "y_train, preds[0]" + ] + }, + { + "cell_type": "code", + "execution_count": 42, + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "\n", + "\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "_, _, preds = learner.get_X_preds(X_test)" + ] + }, + { + "cell_type": "code", + "execution_count": 50, + "metadata": {}, + "outputs": [], + "source": [ + "y_preds = np.reshape(preds, y_test.shape)" + ] + }, + { + "cell_type": "code", + "execution_count": 52, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "(array([[2586.70288086, 2968.31591797, 3197.52416992, 3346.09301758,\n", + " 3476.81274414, 3597.73413086, 3561.25561523, 3394.43505859,\n", + " 3087.15185547, 2682.01269531, 2252.69238281, 1835.6607666 ,\n", + " 1400.54528809, 1097.96679688, 926.84667969, 816.6517334 ,\n", + " 701.20263672, 601.66210938, 547.01208496, 628.43530273,\n", + " 918.94256592, 1302.46643066, 1699.14001465, 2150.11206055]]),\n", + " array([[7366., 7730., 8074., 8118., 8041., 8313., 8051., 8610., 8490.,\n", + " 9038., 9291., 9104., 9117., 5682., 2629., 2349., 2255., 2184.,\n", + " 2180., 2162., 2423., 2718., 2761., 3229.]], dtype=float32))" + ] + }, + "execution_count": 52, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "y_preds[0], y_test[0]" + ] + }, + { + "cell_type": "code", + "execution_count": 49, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "15735" + ] + }, + "execution_count": 49, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "len(preds)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.11" + }, + "orig_nbformat": 4 + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/utils/data.py b/utils/data.py deleted file mode 100644 index 2af768b..0000000 --- a/utils/data.py +++ /dev/null @@ -1,46 +0,0 @@ -from pytorch_forecasting import TimeSeriesDataSet -from pytorch_forecasting.data import GroupNormalizer, MultiNormalizer -from data_formatter.base import * -import pandas as pd - -def create_TimeSeriesDataSet( - data: pd.DataFrame, - formatter: BaseDataFormatter, - batch_size:int, train=False, -): - data_timeseries = TimeSeriesDataSet( - data, - time_idx=formatter.time_index[0], # can't handle multiple time index - target=formatter.targets, - group_ids = formatter.group_id, - max_encoder_length=formatter.parameters['window'], - max_prediction_length=formatter.parameters['horizon'], - static_reals=formatter.extract_columns([DataTypes.INTEGER, DataTypes.FLOAT], InputTypes.STATIC), - static_categoricals=formatter.extract_columns(DataTypes.CATEGORICAL, InputTypes.STATIC), - time_varying_known_reals = formatter.extract_columns([DataTypes.INTEGER, DataTypes.FLOAT], InputTypes.KNOWN), - time_varying_unknown_reals = formatter.extract_columns([DataTypes.INTEGER, DataTypes.FLOAT], InputTypes.OBSERVED), - target_normalizer = MultiNormalizer( - [GroupNormalizer(groups=formatter.group_id) for _ in range(len(formatter.targets))] - ) - ) - - if train: - dataloader = data_timeseries.to_dataloader( - train=True, batch_size=batch_size - ) - else: - dataloader = data_timeseries.to_dataloader( - train=False, batch_size=batch_size*4 - ) - - return data_timeseries, dataloader - -def valid_date(date): - try: - pd.to_datetime(date) - return True - except: - return False - -def missing_percentage(df:pd.DataFrame): - return df.isnull().mean().round(4).mul(100).sort_values(ascending=False) \ No newline at end of file diff --git a/utils/losses.py b/utils/losses.py new file mode 100644 index 0000000..21438e7 --- /dev/null +++ b/utils/losses.py @@ -0,0 +1,89 @@ +# This source code is provided for the purposes of scientific reproducibility +# under the following limited license from Element AI Inc. The code is an +# implementation of the N-BEATS model (Oreshkin et al., N-BEATS: Neural basis +# expansion analysis for interpretable time series forecasting, +# https://arxiv.org/abs/1905.10437). The copyright to the source code is +# licensed under the Creative Commons - Attribution-NonCommercial 4.0 +# International license (CC BY-NC 4.0): +# https://creativecommons.org/licenses/by-nc/4.0/. Any commercial use (whether +# for the benefit of third parties or internally in production) requires an +# explicit license. The subject-matter of the N-BEATS model and associated +# materials are the property of Element AI Inc. and may be subject to patent +# protection. No license to patents is granted hereunder (whether express or +# implied). Copyright © 2020 Element AI Inc. All rights reserved. + +""" +Loss functions for PyTorch. +""" + +import torch as t +import torch.nn as nn +import numpy as np +import pdb + + +def divide_no_nan(a, b): + """ + a/b where the resulted NaN or Inf are replaced by 0. + """ + result = a / b + result[result != result] = .0 + result[result == np.inf] = .0 + return result + + +class mape_loss(nn.Module): + def __init__(self): + super(mape_loss, self).__init__() + + def forward(self, insample: t.Tensor, freq: int, + forecast: t.Tensor, target: t.Tensor, mask: t.Tensor) -> t.float: + """ + MAPE loss as defined in: https://en.wikipedia.org/wiki/Mean_absolute_percentage_error + + :param forecast: Forecast values. Shape: batch, time + :param target: Target values. Shape: batch, time + :param mask: 0/1 mask. Shape: batch, time + :return: Loss value + """ + weights = divide_no_nan(mask, target) + return t.mean(t.abs((forecast - target) * weights)) + + +class smape_loss(nn.Module): + def __init__(self): + super(smape_loss, self).__init__() + + def forward(self, insample: t.Tensor, freq: int, + forecast: t.Tensor, target: t.Tensor, mask: t.Tensor) -> t.float: + """ + sMAPE loss as defined in https://robjhyndman.com/hyndsight/smape/ (Makridakis 1993) + + :param forecast: Forecast values. Shape: batch, time + :param target: Target values. Shape: batch, time + :param mask: 0/1 mask. Shape: batch, time + :return: Loss value + """ + return 200 * t.mean(divide_no_nan(t.abs(forecast - target), + t.abs(forecast.data) + t.abs(target.data)) * mask) + + +class mase_loss(nn.Module): + def __init__(self): + super(mase_loss, self).__init__() + + def forward(self, insample: t.Tensor, freq: int, + forecast: t.Tensor, target: t.Tensor, mask: t.Tensor) -> t.float: + """ + MASE loss as defined in "Scaled Errors" https://robjhyndman.com/papers/mase.pdf + + :param insample: Insample values. Shape: batch, time_i + :param freq: Frequency value + :param forecast: Forecast values. Shape: batch, time_o + :param target: Target values. Shape: batch, time_o + :param mask: 0/1 mask. Shape: batch, time_o + :return: Loss value + """ + masep = t.mean(t.abs(insample[:, freq:] - insample[:, :-freq]), dim=1) + masked_masep_inv = divide_no_nan(mask, masep[:, None]) + return t.mean(t.abs(target - forecast) * masked_masep_inv) diff --git a/utils/masking.py b/utils/masking.py new file mode 100644 index 0000000..a19cbf6 --- /dev/null +++ b/utils/masking.py @@ -0,0 +1,26 @@ +import torch + + +class TriangularCausalMask(): + def __init__(self, B, L, device="cpu"): + mask_shape = [B, 1, L, L] + with torch.no_grad(): + self._mask = torch.triu(torch.ones(mask_shape, dtype=torch.bool), diagonal=1).to(device) + + @property + def mask(self): + return self._mask + + +class ProbMask(): + def __init__(self, B, H, L, index, scores, device="cpu"): + _mask = torch.ones(L, scores.shape[-1], dtype=torch.bool).to(device).triu(1) + _mask_ex = _mask[None, None, :].expand(B, H, L, scores.shape[-1]) + indicator = _mask_ex[torch.arange(B)[:, None, None], + torch.arange(H)[None, :, None], + index, :].to(device) + self._mask = indicator.view(scores.shape).to(device) + + @property + def mask(self): + return self._mask diff --git a/utils/metric.py b/utils/metric.py deleted file mode 100644 index 8a14443..0000000 --- a/utils/metric.py +++ /dev/null @@ -1,50 +0,0 @@ -import torch -import pandas as pd -from pandas import DataFrame, to_datetime, to_timedelta -import numpy as np -from typing import List -from sklearn.metrics import mean_absolute_error, mean_squared_error, mean_squared_log_error, r2_score - -def calculate_result(y_true, y_pred): - mae = mean_absolute_error(y_true, y_pred) - rmse = np.sqrt(mean_squared_error(y_true, y_pred)) - rmsle = np.sqrt(mean_squared_log_error(y_true, y_pred)) - smape = symmetric_mean_absolute_percentage(y_true, y_pred) - r2 = r2_score(y_true, y_pred) - - return mae, rmse, rmsle, smape, r2 - -# https://pytorch-forecasting.readthedocs.io/en/stable/api/pytorch_forecasting.metrics.point.SMAPE.html?highlight=smape -def symmetric_mean_absolute_percentage(y_true, y_pred): - value = 2*abs(y_true - y_pred) / (abs(y_true) + abs(y_pred)) - # for cases when both ground truth and predicted value are zero - value = np.where(np.isnan(value), 0, value) - - return np.mean(value) - -def show_result(df: pd.DataFrame, targets:List[str]): - for target in targets: - predicted_column = f'Predicted_{target}' - y_true, y_pred = df[target].values, df[predicted_column].values - - mae, rmse, rmsle, smape, r2 = calculate_result(y_true, y_pred) - print(f'Target {target}, MAE {mae:.5g}, RMSE {rmse:.5g}, RMSLE {rmsle:0.5g}, SMAPE {smape:0.5g}. R2 {r2:0.5g}.') - print() - -def accuracy(output, target): - with torch.no_grad(): - pred = torch.argmax(output, dim=1) - assert pred.shape[0] == len(target) - correct = 0 - correct += torch.sum(pred == target).item() - return correct / len(target) - - -def top_k_acc(output, target, k=3): - with torch.no_grad(): - pred = torch.topk(output, k, dim=1)[1] - assert pred.shape[0] == len(target) - correct = 0 - for i in range(k): - correct += torch.sum(pred[:, i] == target).item() - return correct / len(target) diff --git a/utils/metrics.py b/utils/metrics.py new file mode 100644 index 0000000..b4f5a76 --- /dev/null +++ b/utils/metrics.py @@ -0,0 +1,41 @@ +import numpy as np + + +def RSE(pred, true): + return np.sqrt(np.sum((true - pred) ** 2)) / np.sqrt(np.sum((true - true.mean()) ** 2)) + + +def CORR(pred, true): + u = ((true - true.mean(0)) * (pred - pred.mean(0))).sum(0) + d = np.sqrt(((true - true.mean(0)) ** 2 * (pred - pred.mean(0)) ** 2).sum(0)) + return (u / d).mean(-1) + + +def MAE(pred, true): + return np.mean(np.abs(pred - true)) + + +def MSE(pred, true): + return np.mean((pred - true) ** 2) + + +def RMSE(pred, true): + return np.sqrt(MSE(pred, true)) + + +def MAPE(pred, true): + return np.mean(np.abs((pred - true) / true)) + + +def MSPE(pred, true): + return np.mean(np.square((pred - true) / true)) + + +def metric(pred, true): + mae = MAE(pred, true) + mse = MSE(pred, true) + rmse = RMSE(pred, true) + mape = MAPE(pred, true) + mspe = MSPE(pred, true) + + return mae, mse, rmse, mape, mspe diff --git a/utils/model.py b/utils/model.py deleted file mode 100644 index 25c1c09..0000000 --- a/utils/model.py +++ /dev/null @@ -1,31 +0,0 @@ -import os -from typing import List -import pandas as pd -import pytorch_lightning as pl -import random - -def seed_torch(seed=7): - random.seed(seed) - os.environ['PYTHONHASHSEED'] = str(seed) - pl.seed_everything(seed) - -def get_best_model_path(checkpoint_folder, prefix='best-epoch='): - for item in os.listdir(checkpoint_folder): - if item.startswith(prefix): - print(f'Found saved model {item}.') - return os.path.join(checkpoint_folder, item) - - raise FileNotFoundError(f"Couldn't find the best model in {checkpoint_folder}") - -def upscale_prediction(targets:List[str], predictions, target_scaler, target_sequence_length:int): - """ - if target was scaled, this inverse transforms the target. Also reduces the shape from - (time, target_sequence_length, 1) to ((time, target_sequence_length) - """ - if target_scaler is None: - return [predictions[i].reshape((-1, target_sequence_length)) for i in range(len(targets))] - - df = pd.DataFrame({targets[i]: predictions[i].flatten() for i in range(len(targets))}) - df[targets] = target_scaler.inverse_transform(df[targets]) - - return [df[target].values.reshape((-1, target_sequence_length)) for target in targets] \ No newline at end of file diff --git a/utils/timefeatures.py b/utils/timefeatures.py new file mode 100644 index 0000000..7c12972 --- /dev/null +++ b/utils/timefeatures.py @@ -0,0 +1,148 @@ +# From: gluonts/src/gluonts/time_feature/_base.py +# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). +# You may not use this file except in compliance with the License. +# A copy of the License is located at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# or in the "license" file accompanying this file. This file is distributed +# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +# express or implied. See the License for the specific language governing +# permissions and limitations under the License. + +from typing import List + +import numpy as np +import pandas as pd +from pandas.tseries import offsets +from pandas.tseries.frequencies import to_offset + + +class TimeFeature: + def __init__(self): + pass + + def __call__(self, index: pd.DatetimeIndex) -> np.ndarray: + pass + + def __repr__(self): + return self.__class__.__name__ + "()" + + +class SecondOfMinute(TimeFeature): + """Minute of hour encoded as value between [-0.5, 0.5]""" + + def __call__(self, index: pd.DatetimeIndex) -> np.ndarray: + return index.second / 59.0 - 0.5 + + +class MinuteOfHour(TimeFeature): + """Minute of hour encoded as value between [-0.5, 0.5]""" + + def __call__(self, index: pd.DatetimeIndex) -> np.ndarray: + return index.minute / 59.0 - 0.5 + + +class HourOfDay(TimeFeature): + """Hour of day encoded as value between [-0.5, 0.5]""" + + def __call__(self, index: pd.DatetimeIndex) -> np.ndarray: + return index.hour / 23.0 - 0.5 + + +class DayOfWeek(TimeFeature): + """Hour of day encoded as value between [-0.5, 0.5]""" + + def __call__(self, index: pd.DatetimeIndex) -> np.ndarray: + return index.dayofweek / 6.0 - 0.5 + + +class DayOfMonth(TimeFeature): + """Day of month encoded as value between [-0.5, 0.5]""" + + def __call__(self, index: pd.DatetimeIndex) -> np.ndarray: + return (index.day - 1) / 30.0 - 0.5 + + +class DayOfYear(TimeFeature): + """Day of year encoded as value between [-0.5, 0.5]""" + + def __call__(self, index: pd.DatetimeIndex) -> np.ndarray: + return (index.dayofyear - 1) / 365.0 - 0.5 + + +class MonthOfYear(TimeFeature): + """Month of year encoded as value between [-0.5, 0.5]""" + + def __call__(self, index: pd.DatetimeIndex) -> np.ndarray: + return (index.month - 1) / 11.0 - 0.5 + + +class WeekOfYear(TimeFeature): + """Week of year encoded as value between [-0.5, 0.5]""" + + def __call__(self, index: pd.DatetimeIndex) -> np.ndarray: + return (index.isocalendar().week - 1) / 52.0 - 0.5 + + +def time_features_from_frequency_str(freq_str: str) -> List[TimeFeature]: + """ + Returns a list of time features that will be appropriate for the given frequency string. + Parameters + ---------- + freq_str + Frequency string of the form [multiple][granularity] such as "12H", "5min", "1D" etc. + """ + + features_by_offsets = { + offsets.YearEnd: [], + offsets.QuarterEnd: [MonthOfYear], + offsets.MonthEnd: [MonthOfYear], + offsets.Week: [DayOfMonth, WeekOfYear], + offsets.Day: [DayOfWeek, DayOfMonth, DayOfYear], + offsets.BusinessDay: [DayOfWeek, DayOfMonth, DayOfYear], + offsets.Hour: [HourOfDay, DayOfWeek, DayOfMonth, DayOfYear], + offsets.Minute: [ + MinuteOfHour, + HourOfDay, + DayOfWeek, + DayOfMonth, + DayOfYear, + ], + offsets.Second: [ + SecondOfMinute, + MinuteOfHour, + HourOfDay, + DayOfWeek, + DayOfMonth, + DayOfYear, + ], + } + + offset = to_offset(freq_str) + + for offset_type, feature_classes in features_by_offsets.items(): + if isinstance(offset, offset_type): + return [cls() for cls in feature_classes] + + supported_freq_msg = f""" + Unsupported frequency {freq_str} + The following frequencies are supported: + Y - yearly + alias: A + M - monthly + W - weekly + D - daily + B - business days + H - hourly + T - minutely + alias: min + S - secondly + """ + raise RuntimeError(supported_freq_msg) + + +def time_features(dates, freq='h'): + return np.vstack([feat(dates) for feat in time_features_from_frequency_str(freq)]) diff --git a/utils/tools.py b/utils/tools.py new file mode 100644 index 0000000..a357cc2 --- /dev/null +++ b/utils/tools.py @@ -0,0 +1,115 @@ +import os + +import numpy as np +import torch +import matplotlib.pyplot as plt +import pandas as pd + +plt.switch_backend('agg') + + +def adjust_learning_rate(optimizer, epoch, args): + # lr = args.learning_rate * (0.2 ** (epoch // 2)) + if args.lradj == 'type1': + lr_adjust = {epoch: args.learning_rate * (0.5 ** ((epoch - 1) // 1))} + elif args.lradj == 'type2': + lr_adjust = { + 2: 5e-5, 4: 1e-5, 6: 5e-6, 8: 1e-6, + 10: 5e-7, 15: 1e-7, 20: 5e-8 + } + if epoch in lr_adjust.keys(): + lr = lr_adjust[epoch] + for param_group in optimizer.param_groups: + param_group['lr'] = lr + print('Updating learning rate to {}'.format(lr)) + + +class EarlyStopping: + def __init__(self, patience=7, verbose=False, delta=0): + self.patience = patience + self.verbose = verbose + self.counter = 0 + self.best_score = None + self.early_stop = False + self.val_loss_min = np.Inf + self.delta = delta + + def __call__(self, val_loss, model, path): + score = -val_loss + if self.best_score is None: + self.best_score = score + self.save_checkpoint(val_loss, model, path) + elif score < self.best_score + self.delta: + self.counter += 1 + print(f'EarlyStopping counter: {self.counter} out of {self.patience}') + if self.counter >= self.patience: + self.early_stop = True + else: + self.best_score = score + self.save_checkpoint(val_loss, model, path) + self.counter = 0 + + def save_checkpoint(self, val_loss, model, path): + if self.verbose: + print(f'Validation loss decreased ({self.val_loss_min:.6f} --> {val_loss:.6f}). Saving model ...') + torch.save(model.state_dict(), path + '/' + 'checkpoint.pth') + self.val_loss_min = val_loss + + +class dotdict(dict): + """dot.notation access to dictionary attributes""" + __getattr__ = dict.get + __setattr__ = dict.__setitem__ + __delattr__ = dict.__delitem__ + + +class StandardScaler(): + def __init__(self, mean, std): + self.mean = mean + self.std = std + + def transform(self, data): + return (data - self.mean) / self.std + + def inverse_transform(self, data): + return (data * self.std) + self.mean + + +def visual(true, preds=None, name='./pic/test.pdf'): + """ + Results visualization + """ + plt.figure() + plt.plot(true, label='GroundTruth', linewidth=2) + if preds is not None: + plt.plot(preds, label='Prediction', linewidth=2) + plt.legend() + plt.savefig(name, bbox_inches='tight') + + +def adjustment(gt, pred): + anomaly_state = False + for i in range(len(gt)): + if gt[i] == 1 and pred[i] == 1 and not anomaly_state: + anomaly_state = True + for j in range(i, 0, -1): + if gt[j] == 0: + break + else: + if pred[j] == 0: + pred[j] = 1 + for j in range(i, len(gt)): + if gt[j] == 0: + break + else: + if pred[j] == 0: + pred[j] = 1 + elif gt[i] == 0: + anomaly_state = False + if anomaly_state: + pred[i] = 1 + return gt, pred + + +def cal_accuracy(y_pred, y_true): + return np.mean(y_pred == y_true) \ No newline at end of file