Skip to content

Commit

Permalink
Merge pull request #3 from anh9895/main
Browse files Browse the repository at this point in the history
Add MhaMlpTuner and MhaMlpComparator classes.
  • Loading branch information
thieu1995 authored Nov 2, 2024
2 parents 862bfbb + 3e36b88 commit 78aeeae
Show file tree
Hide file tree
Showing 28 changed files with 1,367 additions and 359 deletions.
3 changes: 2 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@

examples/comparator/history/
examples/core/history/
examples/helpers/history/
metaperceptron/data/
# Pycharm
Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
#!/usr/bin/env python
# Created by "Thieu" at 22:28, 14/08/2023 ----------%
# Created by "Thieu" at 23:30, 17/08/2024 ----------%
# Email: nguyenthieu2102@gmail.com %
# Github: https://github.com/thieu1995 %
# --------------------------------------------------%
54 changes: 54 additions & 0 deletions examples/comparator/exam_mha_mlp_binary_classifier_comparator.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,54 @@
#!/usr/bin/env python
# Created by "Thieu" at 23:30, 17/08/2024 ----------%
# Email: nguyenthieu2102@gmail.com %
# Github: https://github.com/thieu1995 %
# --------------------------------------------------%

from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import load_breast_cancer
from metaperceptron import MhaMlpComparator


## Load data object
X, y = load_breast_cancer(return_X_y=True)

# Split data into train and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)

# Standardize the features
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
print(X_train.shape, X_test.shape)

# Here is the list of optimizers you want to compare
optim_dict = {
'BaseGA': {"epoch": 10, "pop_size": 20},
"OriginalPSO": {"epoch": 10, "pop_size": 20},
}

# Initialize the comparator
compartor = MhaMlpComparator(
optim_dict=optim_dict,
task="classification",
hidden_layers=(10, ),
act_names="ReLU",
dropout_rates=None,
act_output=None,
obj_name="F1S",
verbose=True,
seed=42,
)

### Perform comparison
# results = compartor.compare_cross_val_score(X_train, y_train, metric="AS", cv=4, n_trials=2, to_csv=True)
# print(results)

# results = compartor.compare_cross_validate(X_train, y_train, metrics=["AS", "PS", "F1S", "NPV"],
# cv=4, return_train_score=True, n_trials=2, to_csv=True)
# print(results)

results = compartor.compare_train_test(X_train, y_train, X_test, y_test,
metrics=["AS", "PS", "F1S", "NPV"], n_trials=2, to_csv=True)
print(results)
Original file line number Diff line number Diff line change
@@ -0,0 +1,54 @@
#!/usr/bin/env python
# Created by "Thieu" at 19:55, 19/08/2024 ----------%
# Email: nguyenthieu2102@gmail.com %
# Github: https://github.com/thieu1995 %
# --------------------------------------------------%

from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import load_iris
from metaperceptron import MhaMlpComparator


## Load data object
X, y = load_iris(return_X_y=True)

# Split data into train and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)

# Standardize the features
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
print(X_train.shape, X_test.shape)

# Here is the list of optimizers you want to compare
optim_dict = {
'BaseGA': {"epoch": 10, "pop_size": 20},
"OriginalPSO": {"epoch": 10, "pop_size": 20},
}

# Initialize the comparator
compartor = MhaMlpComparator(
optim_dict=optim_dict,
task="classification",
hidden_layers=(10, ),
act_names="ReLU",
dropout_rates=None,
act_output=None,
obj_name="F1S",
verbose=True,
seed=42,
)

## Perform comparison
# results = compartor.compare_cross_val_score(X_train, y_train, metric="AS", cv=4, n_trials=2, to_csv=True)
# print(results)

# results = compartor.compare_cross_validate(X_train, y_train, metrics=["AS", "PS", "F1S", "NPV"],
# cv=4, return_train_score=True, n_trials=2, to_csv=True)
# print(results)

results = compartor.compare_train_test(X_train, y_train, X_test, y_test,
metrics=["AS", "PS", "F1S", "NPV"], n_trials=2, to_csv=True)
print(results)
60 changes: 60 additions & 0 deletions examples/comparator/exam_mha_mlp_regressor_comparator.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,60 @@
#!/usr/bin/env python
# Created by "Thieu" at 19:59, 19/08/2024 ----------%
# Email: nguyenthieu2102@gmail.com %
# Github: https://github.com/thieu1995 %
# --------------------------------------------------%

from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.datasets import load_diabetes
from metaperceptron import MhaMlpComparator


## Load data object
X, y = load_diabetes(return_X_y=True)

# Split data into train and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)

# Standardize the features
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
print(X_train.shape, X_test.shape)

# Min-max the output
y_scaler = MinMaxScaler(feature_range=(0, 1))
y_train = y_scaler.fit_transform(y_train.reshape(-1, 1))
y_test = y_scaler.transform(y_test.reshape(-1, 1))


# Here is the list of optimizers you want to compare
optim_dict = {
'BaseGA': {"epoch": 10, "pop_size": 20},
"OriginalPSO": {"epoch": 10, "pop_size": 20},
}

# Initialize the comparator
compartor = MhaMlpComparator(
optim_dict=optim_dict,
task="regression",
hidden_layers=(10, ),
act_names="ELU",
dropout_rates=None,
act_output=None,
obj_name="R2",
verbose=True,
seed=42,
)

### Perform comparison
# results = compartor.compare_cross_val_score(X_train, y_train, metric="RMSE", cv=4, n_trials=2, to_csv=True)
# print(results)

# results = compartor.compare_cross_validate(X_train, y_train, metrics=["MSE", "MAPE", "R2", "KGE", "NSE"],
# cv=4, return_train_score=True, n_trials=2, to_csv=True)
# print(results)

results = compartor.compare_train_test(X_train, y_train, X_test, y_test,
metrics=["MSE", "MAPE", "R2", "KGE", "NSE"], n_trials=2, to_csv=True)
print(results)
21 changes: 0 additions & 21 deletions examples/helpers/exam_encoder.py

This file was deleted.

35 changes: 0 additions & 35 deletions examples/helpers/exam_get_weights.py

This file was deleted.

35 changes: 0 additions & 35 deletions examples/helpers/exam_lb_ub.py

This file was deleted.

39 changes: 0 additions & 39 deletions examples/helpers/exam_save_load_model.py

This file was deleted.

33 changes: 0 additions & 33 deletions examples/helpers/exam_save_results.py

This file was deleted.

32 changes: 0 additions & 32 deletions examples/helpers/exam_save_y_predicted.py

This file was deleted.

Loading

0 comments on commit 78aeeae

Please sign in to comment.