From 79b597650885899d6f9b5b7888e0f0cfa83e08ed Mon Sep 17 00:00:00 2001 From: Sarah Krebs Date: Thu, 20 Jul 2023 09:03:55 +0200 Subject: [PATCH 1/5] Handle configspace as dictionary in mlp example --- CHANGELOG.md | 1 + examples/2_multi_fidelity/1_mlp_epochs.py | 12 ++++++------ 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index a9493c38c7..fffd53788c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,7 @@ ## Bugfixes - Fix bug in the incumbent selection in the case that multi-fidelity is combined with multi-objective (#1019). - Fix callback order (#1040). +- Handle configspace as dictionary in mlp example. # 2.0.1 diff --git a/examples/2_multi_fidelity/1_mlp_epochs.py b/examples/2_multi_fidelity/1_mlp_epochs.py index 9fd256c5d6..7159fdc8b1 100644 --- a/examples/2_multi_fidelity/1_mlp_epochs.py +++ b/examples/2_multi_fidelity/1_mlp_epochs.py @@ -84,18 +84,18 @@ def train(self, config: Configuration, seed: int = 0, budget: int = 25) -> float # For deactivated parameters (by virtue of the conditions), # the configuration stores None-values. # This is not accepted by the MLP, so we replace them with placeholder values. - lr = config["learning_rate"] if config["learning_rate"] else "constant" - lr_init = config["learning_rate_init"] if config["learning_rate_init"] else 0.001 - batch_size = config["batch_size"] if config["batch_size"] else 200 + lr = config.get("learning_rate") if config.get("learning_rate") else "constant" + lr_init = config.get("learning_rate_init") if config.get("learning_rate_init") else 0.001 + batch_size = config.get("batch_size") if config.get("batch_size") else 200 with warnings.catch_warnings(): warnings.filterwarnings("ignore") classifier = MLPClassifier( - hidden_layer_sizes=[config["n_neurons"]] * config["n_layer"], - solver=config["solver"], + hidden_layer_sizes=[config.get("n_neurons")] * config.get("n_layer"), + solver=config.get("solver"), batch_size=batch_size, - activation=config["activation"], + activation=config.get("activation"), learning_rate=lr, learning_rate_init=lr_init, max_iter=int(np.ceil(budget)), From 684afe3c7b3c32af60379df628904beb3b20208a Mon Sep 17 00:00:00 2001 From: Sarah Krebs Date: Thu, 20 Jul 2023 09:29:16 +0200 Subject: [PATCH 2/5] Adapt sgd loss to newest scikit-learn version --- examples/2_multi_fidelity/2_sgd_datasets.py | 2 +- tests/fixtures/models.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/2_multi_fidelity/2_sgd_datasets.py b/examples/2_multi_fidelity/2_sgd_datasets.py index 384d1c2246..09864a963a 100644 --- a/examples/2_multi_fidelity/2_sgd_datasets.py +++ b/examples/2_multi_fidelity/2_sgd_datasets.py @@ -89,7 +89,7 @@ def train(self, config: Configuration, instance: str, seed: int = 0) -> float: # SGD classifier using given configuration clf = SGDClassifier( - loss="log", + loss="log_loss", penalty="elasticnet", alpha=config["alpha"], l1_ratio=config["l1_ratio"], diff --git a/tests/fixtures/models.py b/tests/fixtures/models.py index 1321176f27..29f9bf152e 100644 --- a/tests/fixtures/models.py +++ b/tests/fixtures/models.py @@ -71,7 +71,7 @@ def train(self, config: Configuration, instance: str = "0-1", budget: float = 1, # SGD classifier using given configuration clf = SGDClassifier( - loss="log", + loss="log_loss", penalty="elasticnet", alpha=config["alpha"], l1_ratio=config["l1_ratio"], From bab8ba073dd8e65ccc995087bece33aaf64149c6 Mon Sep 17 00:00:00 2001 From: Sarah Krebs Date: Thu, 20 Jul 2023 09:46:03 +0200 Subject: [PATCH 3/5] Add change to changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index fffd53788c..be22f94d8d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,6 +9,7 @@ - Fix bug in the incumbent selection in the case that multi-fidelity is combined with multi-objective (#1019). - Fix callback order (#1040). - Handle configspace as dictionary in mlp example. +- Adapt sgd loss to newest scikit-learn version. # 2.0.1 From b6cdd9baf225583dd3804f41485cb383de019d9e Mon Sep 17 00:00:00 2001 From: Sarah Krebs Date: Thu, 20 Jul 2023 10:18:26 +0200 Subject: [PATCH 4/5] Handle configspace as dictionary in parego example --- CHANGELOG.md | 2 +- examples/3_multi_objective/2_parego.py | 12 ++++++------ 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index be22f94d8d..dbcac80e4c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,7 +8,7 @@ ## Bugfixes - Fix bug in the incumbent selection in the case that multi-fidelity is combined with multi-objective (#1019). - Fix callback order (#1040). -- Handle configspace as dictionary in mlp example. +- Handle configspace as dictionary in mlp and parego example. - Adapt sgd loss to newest scikit-learn version. # 2.0.1 diff --git a/examples/3_multi_objective/2_parego.py b/examples/3_multi_objective/2_parego.py index d8fdc5ff8d..bcc1feba08 100644 --- a/examples/3_multi_objective/2_parego.py +++ b/examples/3_multi_objective/2_parego.py @@ -66,9 +66,9 @@ def configspace(self) -> ConfigurationSpace: return cs def train(self, config: Configuration, seed: int = 0, budget: int = 10) -> dict[str, float]: - lr = config["learning_rate"] if config["learning_rate"] else "constant" - lr_init = config["learning_rate_init"] if config["learning_rate_init"] else 0.001 - batch_size = config["batch_size"] if config["batch_size"] else 200 + lr = config.get("learning_rate") if config.get("learning_rate") else "constant" + lr_init = config.get("learning_rate_init") if config.get("learning_rate_init") else 0.001 + batch_size = config.get("batch_size") if config.get("batch_size") else 200 start_time = time.time() @@ -76,10 +76,10 @@ def train(self, config: Configuration, seed: int = 0, budget: int = 10) -> dict[ warnings.filterwarnings("ignore") classifier = MLPClassifier( - hidden_layer_sizes=[config["n_neurons"]] * config["n_layer"], - solver=config["solver"], + hidden_layer_sizes=[config.get("n_neurons")] * config.get("n_layer"), + solver=config.get("solver"), batch_size=batch_size, - activation=config["activation"], + activation=config.get("activation"), learning_rate=lr, learning_rate_init=lr_init, max_iter=int(np.ceil(budget)), From c6ec7a46f934def23707dbd4139ded918ecba76f Mon Sep 17 00:00:00 2001 From: Sarah Krebs Date: Thu, 20 Jul 2023 11:10:08 +0200 Subject: [PATCH 5/5] Correct get usage --- examples/2_multi_fidelity/1_mlp_epochs.py | 12 ++++++------ examples/3_multi_objective/2_parego.py | 12 ++++++------ 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/examples/2_multi_fidelity/1_mlp_epochs.py b/examples/2_multi_fidelity/1_mlp_epochs.py index 7159fdc8b1..48c027d3bd 100644 --- a/examples/2_multi_fidelity/1_mlp_epochs.py +++ b/examples/2_multi_fidelity/1_mlp_epochs.py @@ -84,18 +84,18 @@ def train(self, config: Configuration, seed: int = 0, budget: int = 25) -> float # For deactivated parameters (by virtue of the conditions), # the configuration stores None-values. # This is not accepted by the MLP, so we replace them with placeholder values. - lr = config.get("learning_rate") if config.get("learning_rate") else "constant" - lr_init = config.get("learning_rate_init") if config.get("learning_rate_init") else 0.001 - batch_size = config.get("batch_size") if config.get("batch_size") else 200 + lr = config.get("learning_rate", "constant") + lr_init = config.get("learning_rate_init", 0.001) + batch_size = config.get("batch_size", 200) with warnings.catch_warnings(): warnings.filterwarnings("ignore") classifier = MLPClassifier( - hidden_layer_sizes=[config.get("n_neurons")] * config.get("n_layer"), - solver=config.get("solver"), + hidden_layer_sizes=[config["n_neurons"]] * config["n_layer"], + solver=config["solver"], batch_size=batch_size, - activation=config.get("activation"), + activation=config["activation"], learning_rate=lr, learning_rate_init=lr_init, max_iter=int(np.ceil(budget)), diff --git a/examples/3_multi_objective/2_parego.py b/examples/3_multi_objective/2_parego.py index bcc1feba08..856c2e857f 100644 --- a/examples/3_multi_objective/2_parego.py +++ b/examples/3_multi_objective/2_parego.py @@ -66,9 +66,9 @@ def configspace(self) -> ConfigurationSpace: return cs def train(self, config: Configuration, seed: int = 0, budget: int = 10) -> dict[str, float]: - lr = config.get("learning_rate") if config.get("learning_rate") else "constant" - lr_init = config.get("learning_rate_init") if config.get("learning_rate_init") else 0.001 - batch_size = config.get("batch_size") if config.get("batch_size") else 200 + lr = config.get("learning_rate", "constant") + lr_init = config.get("learning_rate_init", 0.001) + batch_size = config.get("batch_size", 200) start_time = time.time() @@ -76,10 +76,10 @@ def train(self, config: Configuration, seed: int = 0, budget: int = 10) -> dict[ warnings.filterwarnings("ignore") classifier = MLPClassifier( - hidden_layer_sizes=[config.get("n_neurons")] * config.get("n_layer"), - solver=config.get("solver"), + hidden_layer_sizes=[config["n_neurons"]] * config["n_layer"], + solver=config["solver"], batch_size=batch_size, - activation=config.get("activation"), + activation=config["activation"], learning_rate=lr, learning_rate_init=lr_init, max_iter=int(np.ceil(budget)),