Skip to content

Commit

Permalink
merged
Browse files Browse the repository at this point in the history
  • Loading branch information
MaiBe-ctrl committed Aug 15, 2024
2 parents 96b2ca2 + a19a20e commit 8f14ab9
Show file tree
Hide file tree
Showing 5 changed files with 583 additions and 570 deletions.
19 changes: 8 additions & 11 deletions docs/source/tutorials/tutorial11.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -46,9 +46,7 @@
"from pytorch_lightning.profilers import SimpleProfiler\n",
"\n",
"# Configure Simple Profiler\n",
"trainer_config = {\n",
" \"profiler\": SimpleProfiler(dirpath=\"./pl_profiling\", filename=\"simple\")\n",
"}"
"trainer_config = {\"profiler\": SimpleProfiler(dirpath=\"./pl_profiling\", filename=\"simple\")}"
]
},
{
Expand All @@ -71,12 +69,11 @@
"metadata": {},
"outputs": [],
"source": [
"from pytorch_lightning.profilers import AdvancedProfiler\n",
"from pytorch_lightning.profilers import AdvancedProfiler\n",
"\n",
"# Configure Advanced Profiler\n",
"trainer_config = {\n",
" \"profiler\": AdvancedProfiler(dirpath=\"./pl_profiling\", filename=\"advanced\")\n",
"}"
"trainer_config = {\"profiler\": AdvancedProfiler(dirpath=\"./pl_profiling\", filename=\"advanced\")}"
]
},
{
Expand Down Expand Up @@ -126,8 +123,7 @@
"\n",
"# Model and prediction\n",
"m = NeuralProphet(trainer_config=trainer_config)\n",
"df_train, df_val = m.split_df(df, valid_p=0.2)\n",
"metrics = m.fit(df_train, validation_df=df_val, progress=None)"
"m.fit(df, learning_rate=0.1, epochs=10, batch_size=128, progress=False, minimal=True)"
]
},
{
Expand Down Expand Up @@ -162,9 +158,7 @@
"from pytorch_lightning.loggers import TensorBoardLogger\n",
"\n",
"# Configure TensorBoard logger\n",
"trainer_config = {\n",
" \"logger\": TensorBoardLogger(\"tb_logs\", name=\"NeuralProphet\")\n",
"}"
"trainer_config = {\"logger\": TensorBoardLogger(\"tb_logs\", name=\"NeuralProphet\")}"
]
},
{
Expand Down Expand Up @@ -267,6 +261,9 @@
"source": [
"import pandas as pd\n",
"from neuralprophet import NeuralProphet\n",
"from neuralprophet import set_random_seed\n",
"\n",
"set_random_seed(42)\n",
"\n",
"# Load the dataset from the CSV file using pandas\n",
"df = pd.read_csv(\"https://github.com/ourownstory/neuralprophet-data/raw/main/kaggle-energy/datasets/tutorial01.csv\")\n",
Expand Down
4 changes: 2 additions & 2 deletions neuralprophet/configure.py
Original file line number Diff line number Diff line change
Expand Up @@ -208,7 +208,7 @@ def set_lr_finder_args(self, dataset_size, num_batches):
Set the lr_finder_args.
This is the range of learning rates to test.
"""
num_training = 150 + int(np.log10(100 + dataset_size) * 25)
num_training = 100 + int(np.log10(dataset_size) * 20)
if num_batches < num_training:
log.warning(
f"Learning rate finder: The number of batches ({num_batches}) is too small than the required number \
Expand All @@ -217,7 +217,7 @@ def set_lr_finder_args(self, dataset_size, num_batches):
# num_training = num_batches
self.lr_finder_args.update(
{
"min_lr": 1e-6,
"min_lr": 1e-7,
"max_lr": 10,
"num_training": num_training,
"early_stop_threshold": None,
Expand Down
8 changes: 3 additions & 5 deletions neuralprophet/forecaster.py
Original file line number Diff line number Diff line change
Expand Up @@ -2805,13 +2805,12 @@ def _train(
lr_finder = tuner.lr_find(
model=self.model,
train_dataloaders=train_loader,
val_dataloaders=val_loader,
# val_dataloaders=val_loader, # not be used, but may lead to Lightning bug if not provided
**self.config_train.lr_finder_args,
)
# Estimate the optimal learning rate from the loss curve
assert lr_finder is not None
_, _, lr_suggestion = utils.smooth_loss_and_suggest(lr_finder.results)
self.model.learning_rate = lr_suggestion
_, _, self.model.learning_rate = utils.smooth_loss_and_suggest(lr_finder)
start = time.time()
self.trainer.fit(
self.model,
Expand All @@ -2832,8 +2831,7 @@ def _train(
)
assert lr_finder is not None
# Estimate the optimal learning rate from the loss curve
_, _, lr_suggestion = utils.smooth_loss_and_suggest(lr_finder.results)
self.model.learning_rate = lr_suggestion
_, _, self.model.learning_rate = utils.smooth_loss_and_suggest(lr_finder)
start = time.time()
self.trainer.fit(
self.model,
Expand Down
Loading

0 comments on commit 8f14ab9

Please sign in to comment.