-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathparams.py
126 lines (89 loc) · 2.84 KB
/
params.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
import torch
from uniform_instance_gen import DatasetConfig, datasetConfigs
class ProgressConfig:
path_to_save_progress: str
save_training: bool
class Config:
num_of_envs: int
size: str
dataset_config: DatasetConfig
n_j: int
n_m: int
num_of_operations_ub_per_job: int
num_of_training_operations: int
torch_seed: int
learning_rate: float
gamma: float # discount factor
k_epochs: int
epsilon_clip: float
num_of_layers: int
input_dim: int
hidden_dim: int
num_of_mlp_layers_feature_extract: int
num_of_mlp_layers_actor: int
num_of_hidden_dim_actor: int
num_of_mlp_layers_critic: int
num_of_hidden_dim_critic: int
max_updates: int
duration_low: int
duration_high: int
device: str
progress_config: ProgressConfig
stochastic: bool
machine_utilisation: float
has_arrival_time: bool
arrival_time_multiplier: int
config = Config()
# just change this
config.size = 'MK01'
datasetConfig = datasetConfigs[config.size]
config.dataset_config = datasetConfig
config.num_of_envs = 4
config.n_j = datasetConfig.num_of_jobs
config.n_m = datasetConfig.num_of_machines
config.num_of_operations_ub_per_job = datasetConfig.highest_num_of_operations_per_job
config.num_of_operations_lb_per_job = datasetConfig.lowest_num_of_operations_per_job
# used for graph pool
config.num_of_training_operations = datasetConfig.get_total_num_of_operations()
config.num_of_alternatives_lb = datasetConfig.num_of_alternative_bounds[0]
config.num_of_alternatives_ub = datasetConfig.num_of_alternative_bounds[1]
config.torch_seed = 600
config.learning_rate = 2e-5
config.gamma = 1
config.k_epochs = 1
config.epsilon_clip = 0.2
config.num_of_layers = 3
config.input_dim = config.n_m + 1
config.hidden_dim = 64
config.num_of_mlp_layers_feature_extract = 2
config.num_of_mlp_layers_actor = 2
config.num_of_hidden_dim_actor = 32
config.num_of_mlp_layers_critic = 2
config.num_of_hidden_dim_critic = 32
config.max_updates = 10_000
config.duration_low = datasetConfig.duration_bounds[0]
config.duration_high = datasetConfig.duration_bounds[1]
config.device = 'cuda' if torch.cuda.is_available() else 'cpu'
config.stochastic = False
config.machine_utilisation = 0.95
device = torch.device(config.device)
config.progress_config = ProgressConfig()
config.progress_config.save_training = False
config.progress_config.path_to_save_progress = f'./records/{config.size}/ID_5'
if config.stochastic:
config.progress_config.path_to_save_progress = f'./stochastic_records/{config.size}/ID_3'
'''
Notes:
The folder name will be MK01 / MK02
The subfolder name you can determine yourself
keep a fixed format
Suggestion:
<Experiment_ID>
Things to save
1. a serialized config object in the folder.
2. training log
3. validation log
4. best_weight
5. last_weight
6. last_optimizer_weights
'''