Skip to content

Commit

Permalink
Merge branch 'main' into train-continue
Browse files Browse the repository at this point in the history
  • Loading branch information
ourownstory authored Aug 23, 2024
2 parents e043201 + 8330bab commit 30c106f
Show file tree
Hide file tree
Showing 13 changed files with 1,182 additions and 1,134 deletions.
25 changes: 23 additions & 2 deletions .github/workflows/metrics.yml
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ on:
- main
- develop
workflow_dispatch:

jobs:
metrics:
runs-on: ubuntu-latest # container: docker://ghcr.io/iterative/cml:0-dvc2-base1
Expand All @@ -19,24 +20,32 @@ jobs:
uses: actions/checkout@v3
with:
ref: ${{ github.event.pull_request.head.sha }}

- name: Install Python 3.12
uses: actions/setup-python@v5
with:
python-version: "3.12"

- name: Setup NodeJS (for CML)
uses: actions/setup-node@v3 # For CML
with:
node-version: '16'

- name: Setup CML
uses: iterative/setup-cml@v1

- name: Install Poetry
uses: snok/install-poetry@v1

- name: Install Dependencies
run: poetry install --no-interaction --no-root --with=pytest,metrics --without=dev,docs,linters

- name: Install Project
run: poetry install --no-interaction --with=pytest,metrics --without=dev,docs,linters

- name: Train model
run: poetry run pytest tests/test_model_performance.py -n 1 --durations=0

- name: Download metrics from main
uses: dawidd6/action-download-artifact@v2
with:
Expand All @@ -45,28 +54,40 @@ jobs:
name: metrics
path: tests/metrics-main/
if_no_artifact_found: warn

- name: Open Benchmark Report
run: echo "## Model Benchmark" >> report.md

- name: Write Benchmark Report
run: poetry run python tests/metrics/compareMetrics.py >> report.md

- name: Publish Report with CML
env:
REPO_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
echo "<details>\n<summary>Model training plots</summary>\n" >> report.md
echo "<details><summary>Model training plots</summary>" >> report.md
echo "" >> report.md
echo "## Model Training" >> report.md
echo "" >> report.md
echo "### PeytonManning" >> report.md
cml asset publish tests/metrics/PeytonManning.svg --md >> report.md
echo "" >> report.md
echo "### YosemiteTemps" >> report.md
cml asset publish tests/metrics/YosemiteTemps.svg --md >> report.md
echo "" >> report.md
echo "### AirPassengers" >> report.md
cml asset publish tests/metrics/AirPassengers.svg --md >> report.md
echo "" >> report.md
echo "### EnergyPriceDaily" >> report.md
cml asset publish tests/metrics/EnergyPriceDaily.svg --md >> report.md
echo "\n</details>" >> report.md
echo "" >> report.md
echo "</details>" >> report.md
echo "" >> report.md
cml comment update --target=pr report.md # Post reports as comments in GitHub PRs
cml check create --title=ModelReport report.md # update status of check in PR
- name: Upload metrics if on main
if: github.ref == 'refs/heads/main'
uses: actions/upload-artifact@v3
with:
name: metrics
Expand Down
12 changes: 5 additions & 7 deletions neuralprophet/components/future_regressors/neural_nets.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,18 +21,16 @@ def __init__(self, config, id_list, quantiles, n_forecasts, device, config_trend
if self.regressors_dims is not None:
# Regresors params
self.regressor_nets = nn.ModuleDict({})
# TO DO: if no hidden layers, then just a as legacy
self.d_hidden_regressors = config.d_hidden
self.num_hidden_layers_regressors = config.num_hidden_layers
self.regressors_layers = config.regressors_layers
# one net per regressor. to be adapted to combined network
for regressor in self.regressors_dims.keys():
# Nets for both additive and multiplicative regressors
regressor_net = nn.ModuleList()
# This will be later 1 + static covariates
d_inputs = 1
for i in range(self.num_hidden_layers_regressors):
regressor_net.append(nn.Linear(d_inputs, self.d_hidden_regressors, bias=True))
d_inputs = self.d_hidden_regressors
for d_hidden_i in self.regressors_layers:
regressor_net.append(nn.Linear(d_inputs, d_hidden_i, bias=True))
d_inputs = d_hidden_i
# final layer has input size d_inputs and output size equal to no. of quantiles
regressor_net.append(nn.Linear(d_inputs, len(self.quantiles), bias=False))
for lay in regressor_net:
Expand Down Expand Up @@ -79,7 +77,7 @@ def regressor(self, regressor_input, name):
Forecast component of dims (batch, n_forecasts, num_quantiles)
"""
x = regressor_input
for i in range(self.num_hidden_layers_regressors + 1):
for i in range(len(self.regressors_layers) + 1):
if i > 0:
x = nn.functional.relu(x)
x = self.regressor_nets[name][i](x)
Expand Down
12 changes: 5 additions & 7 deletions neuralprophet/components/future_regressors/shared_neural_nets.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,18 +21,16 @@ def __init__(self, config, id_list, quantiles, n_forecasts, device, config_trend
if self.regressors_dims is not None:
# Regresors params
self.regressor_nets = nn.ModuleDict({})
# TO DO: if no hidden layers, then just a as legacy
self.d_hidden_regressors = config.d_hidden
self.num_hidden_layers_regressors = config.num_hidden_layers
self.regressors_layers = config.regressors_layers
# Combined network
for net_i, size_i in Counter([x["mode"] for x in self.regressors_dims.values()]).items():
# Nets for both additive and multiplicative regressors
regressor_net = nn.ModuleList()
# This will be later size_i(1 + static covariates)
d_inputs = size_i
for i in range(self.num_hidden_layers_regressors):
regressor_net.append(nn.Linear(d_inputs, self.d_hidden_regressors, bias=True))
d_inputs = self.d_hidden_regressors
for d_hidden_i in self.regressors_layers:
regressor_net.append(nn.Linear(d_inputs, d_hidden_i, bias=True))
d_inputs = d_hidden_i
# final layer has input size d_inputs and output size equal to no. of quantiles
regressor_net.append(nn.Linear(d_inputs, len(self.quantiles), bias=False))
for lay in regressor_net:
Expand Down Expand Up @@ -81,7 +79,7 @@ def regressors(self, regressor_inputs, mode):
Forecast component of dims (batch, n_forecasts, num_quantiles)
"""
x = regressor_inputs
for i in range(self.num_hidden_layers_regressors + 1):
for i in range(len(self.regressors_layers) + 1):
if i > 0:
x = nn.functional.relu(x)
x = self.regressor_nets[mode][i](x)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,18 +21,16 @@ def __init__(self, config, id_list, quantiles, n_forecasts, device, config_trend
if self.regressors_dims is not None:
# Regresors params
self.regressor_nets = nn.ModuleDict({})
# TO DO: if no hidden layers, then just a as legacy
self.d_hidden_regressors = config.d_hidden
self.num_hidden_layers_regressors = config.num_hidden_layers
self.regressors_layers = config.regressors_layers
# Combined network
for net_i, size_i in Counter([x["mode"] for x in self.regressors_dims.values()]).items():
# Nets for both additive and multiplicative regressors
regressor_net = nn.ModuleList()
# This will be later size_i(1 + static covariates)
d_inputs = size_i
for i in range(self.num_hidden_layers_regressors):
regressor_net.append(nn.Linear(d_inputs, self.d_hidden_regressors, bias=True))
d_inputs = self.d_hidden_regressors
for d_hidden_i in self.regressors_layers:
regressor_net.append(nn.Linear(d_inputs, d_hidden_i, bias=True))
d_inputs = d_hidden_i
# final layer has input size d_inputs and output size equal to no. of quantiles
regressor_net.append(nn.Linear(d_inputs, size_i * len(self.quantiles), bias=False))
for lay in regressor_net:
Expand Down Expand Up @@ -82,7 +80,7 @@ def regressors(self, regressor_inputs, mode):
Forecast component of dims (batch, n_forecasts, num_quantiles)
"""
x = regressor_inputs
for i in range(self.num_hidden_layers_regressors + 1):
for i in range(len(self.regressors_layers) + 1):
if i > 0:
x = nn.functional.relu(x)
x = self.regressor_nets[mode][i](x)
Expand Down
8 changes: 4 additions & 4 deletions neuralprophet/configure.py
Original file line number Diff line number Diff line change
Expand Up @@ -243,7 +243,7 @@ def set_lr_finder_args(self, dataset_size, num_batches):
Set the lr_finder_args.
This is the range of learning rates to test.
"""
num_training = 150 + int(np.log10(100 + dataset_size) * 25)
num_training = 100 + int(np.log10(dataset_size) * 20)
if num_batches < num_training:
log.warning(
f"Learning rate finder: The number of batches ({num_batches}) is too small than the required number \
Expand All @@ -252,7 +252,7 @@ def set_lr_finder_args(self, dataset_size, num_batches):
# num_training = num_batches
self.lr_finder_args.update(
{
"min_lr": 1e-6,
"min_lr": 1e-7,
"max_lr": 10,
"num_training": num_training,
"early_stop_threshold": None,
Expand Down Expand Up @@ -516,8 +516,8 @@ class Regressor:
@dataclass
class ConfigFutureRegressors:
model: str
d_hidden: int
num_hidden_layers: int
regressors_layers: Optional[List[int]]

regressors: OrderedDict = field(init=False) # contains RegressorConfig objects

def __post_init__(self):
Expand Down
23 changes: 8 additions & 15 deletions neuralprophet/forecaster.py
Original file line number Diff line number Diff line change
Expand Up @@ -199,13 +199,10 @@ class NeuralProphet:
* ``shared_neural_nets``
* ``shared_neural_nets_coef``
future_regressors_d_hidden: int
Number of hidden layers in the neural network model for future regressors.
Ignored if ``future_regressors_model`` is ``linear``.
future_regressors_layers: list of int
list of hidden layer dimensions of the future regressor nets. Specifies number of hidden layers (number of entries)
and layer dimension (list entry). Default [] (no hidden layers)
future_regressors_num_hidden_layers: int
Dimension of hidden layers in the neural network model for future regressors.
Ignored if ``future_regressors_model`` is ``linear``.
COMMENT
AR Config
Expand Down Expand Up @@ -438,8 +435,7 @@ def __init__(
season_global_local: np_types.SeasonGlobalLocalMode = "global",
seasonality_local_reg: Optional[Union[bool, float]] = False,
future_regressors_model: np_types.FutureRegressorsModel = "linear",
future_regressors_d_hidden: int = 4,
future_regressors_num_hidden_layers: int = 2,
future_regressors_layers: Optional[list] = [],
n_forecasts: int = 1,
n_lags: int = 0,
ar_layers: Optional[list] = [],
Expand Down Expand Up @@ -557,8 +553,7 @@ def __init__(
self.config_lagged_regressors: Optional[configure.ConfigLaggedRegressors] = None
self.config_regressors = configure.ConfigFutureRegressors(
model=future_regressors_model,
d_hidden=future_regressors_d_hidden,
num_hidden_layers=future_regressors_num_hidden_layers,
regressors_layers=future_regressors_layers,
) # Optional[configure.ConfigFutureRegressors] = None

# set during fit()
Expand Down Expand Up @@ -2864,13 +2859,12 @@ def _train(
lr_finder = tuner.lr_find(
model=self.model,
train_dataloaders=train_loader,
val_dataloaders=val_loader,
# val_dataloaders=val_loader, # not be used, but may lead to Lightning bug if not provided
**self.config_train.lr_finder_args,
)
# Estimate the optimal learning rate from the loss curve
assert lr_finder is not None
_, _, lr_suggestion = utils.smooth_loss_and_suggest(lr_finder.results)
self.model.learning_rate = lr_suggestion
_, _, self.model.learning_rate = utils.smooth_loss_and_suggest(lr_finder)
start = time.time()
self.trainer.fit(
self.model,
Expand All @@ -2891,8 +2885,7 @@ def _train(
)
assert lr_finder is not None
# Estimate the optimal learning rate from the loss curve
_, _, lr_suggestion = utils.smooth_loss_and_suggest(lr_finder.results)
self.model.learning_rate = lr_suggestion
_, _, self.model.learning_rate = utils.smooth_loss_and_suggest(lr_finder)
start = time.time()
self.trainer.fit(
self.model,
Expand Down
Loading

0 comments on commit 30c106f

Please sign in to comment.