diff --git a/docs/advanced_examples/GLMComparison.ipynb b/docs/advanced_examples/GLMComparison.ipynb index 160880705..839b6b3dc 100644 --- a/docs/advanced_examples/GLMComparison.ipynb +++ b/docs/advanced_examples/GLMComparison.ipynb @@ -97,13 +97,13 @@ "# Getting the original data-set containing the risk features\n", "# Link: https://www.openml.org/d/41214\n", "risks_data, _ = fetch_openml(\n", - " data_id=41214, as_frame=True, cache=True, data_home=\"~/.cache/sklean\", return_X_y=True\n", + " data_id=41214, as_frame=True, cache=True, data_home=\"~/.cache/sklearn\", return_X_y=True\n", ")\n", "\n", "# Getting the data set containing claims amount\n", "# Link: https://www.openml.org/d/41215\n", "claims_data, _ = fetch_openml(\n", - " data_id=41215, as_frame=True, cache=True, data_home=\"~/.cache/sklean\", return_X_y=True\n", + " data_id=41215, as_frame=True, cache=True, data_home=\"~/.cache/sklearn\", return_X_y=True\n", ")" ] }, diff --git a/docs/advanced_examples/PoissonRegression.ipynb b/docs/advanced_examples/PoissonRegression.ipynb index 0933b1df3..ee153b763 100644 --- a/docs/advanced_examples/PoissonRegression.ipynb +++ b/docs/advanced_examples/PoissonRegression.ipynb @@ -86,7 +86,7 @@ "outputs": [], "source": [ "df, _ = fetch_openml(\n", - " data_id=41214, as_frame=True, cache=True, data_home=\"~/.cache/sklean\", return_X_y=True\n", + " data_id=41214, as_frame=True, cache=True, data_home=\"~/.cache/sklearn\", return_X_y=True\n", ")\n", "df = df.head(50000)" ] diff --git a/docs/advanced_examples/XGBRegressor.ipynb b/docs/advanced_examples/XGBRegressor.ipynb index 3a908c5dc..3eafd91a7 100644 --- a/docs/advanced_examples/XGBRegressor.ipynb +++ b/docs/advanced_examples/XGBRegressor.ipynb @@ -1135,11 +1135,11 @@ "source": [ "# Evaluation\n", "\n", - "r2_score_sklean = metrics.r2_score(y_test, y_preds_XGBoost)\n", + "r2_score_sklearn = metrics.r2_score(y_test, y_preds_XGBoost)\n", "r2_score_clear_concrete = metrics.r2_score(y_test, y_preds_clear)\n", "r2_score_fhe_concrete = metrics.r2_score(y_test, y_preds_fhe)\n", "\n", - "print(f\"R2_score with XGBoost: {r2_score_sklean:.4f}\")\n", + "print(f\"R2_score with XGBoost: {r2_score_sklearn:.4f}\")\n", "print(f\"R2_score without FHE : {r2_score_clear_concrete:.4f}\")\n", "print(f\"R2_score with FHE : {r2_score_fhe_concrete:.4f}\")" ] diff --git a/use_case_examples/federated_learning/federated_utils.py b/use_case_examples/federated_learning/federated_utils.py index cc91601c0..54580efed 100644 --- a/use_case_examples/federated_learning/federated_utils.py +++ b/use_case_examples/federated_learning/federated_utils.py @@ -228,7 +228,7 @@ def get_model_parameters(model: LogisticRegression) -> LogRegParams: def set_model_params(model: LogisticRegression, params: LogRegParams) -> LogisticRegression: - """Sets the parameters of a sklean LogisticRegression model.""" + """Sets the parameters of a sklearn LogisticRegression model.""" model.coef_ = params[0] if model.fit_intercept: model.intercept_ = params[1] diff --git a/use_case_examples/federated_learning/load_to_cml.py b/use_case_examples/federated_learning/load_to_cml.py index 373e92e2a..149f6c468 100644 --- a/use_case_examples/federated_learning/load_to_cml.py +++ b/use_case_examples/federated_learning/load_to_cml.py @@ -17,7 +17,7 @@ with path_to_model.open("rb") as file: sklearn_model = pickle.load(file) - # Compile model without data since the server doesn't have acces to it + # Compile model without data since the server doesn't have access to it # Indeed in this scenario the users have the data but the server doesn't. # We then have to compile the model using random input sampled with the same # low and high bounds as the real data, in this context [0, 255].