-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathHyperparams_used.txt
122 lines (97 loc) · 2.39 KB
/
Hyperparams_used.txt
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
EfficientNetB0
-------------------------------------------------
batch_size=128,
epochs=300,
con_win_size = 9,
patience = 5,
learning_rate = 1e-4,
# Parameters
self.input_shape = (224, 224, 1)
self.num_classes = 21
self.num_strings = 6
________________________________________________
InT
-------------------------------------------------
batch_size=128,
epochs=300,
con_win_size = 9,
transformer_layers = 6,
patch_size = 4,
hidden_size = 64,
num_heads = 4,
mlp_dim = 128,
patience = 5,
learning_rate = 1e-4,
# Parameters
self.input_shape = (192, con_win_size, 1)
self.num_classes = 21
self.num_strings = 6
________________________________________________
Swin-B
-------------------------------------------------
batch_size=128,
epochs=300,
con_win_size = 9,
patch_size = (2, 2), # 2-by-2 sized patches
dropout_rate = 0.03, # Dropout rate
num_heads = 8, # Attention heads
embed_dim = 64, # Embedding dimension
num_mlp = 256, # MLP layer size
qkv_bias = True, # Convert embedded patches to query, key, and values with a learnable additive value
window_size = 2, # Size of attention window
shift_size = 1, # Size of shifting window
image_dimension = 224, # Initial image size
weight_decay = 0.0001,
label_smoothing = 0.1,
patience = 5,
learning_rate = 1e-4,
# Parameters
self.input_shape = (224, 224, 1)
self.num_classes = 21
self.num_strings = 6
________________________________________________
TabInception
-------------------------------------------------
batch_size=128,
epochs=300,
con_win_size = 9,
transformer_layers = 6,
patch_size = 4,
hidden_size = 64,
num_heads = 4,
mlp_dim = 128,
patience = 5,
learning_rate = 1e-4,
# Parameters
self.input_shape = (192, self.con_win_size, 1)
self.num_classes = 21
self.num_strings = 6
________________________________________________
VIT orginial
-------------------------------------------------
batch_size=128,
epochs=300,
con_win_size = 9,
transformer_layers = 6,
patch_size = 4,
hidden_size = 64,
num_heads = 4,
mlp_dim = 128,
patience = 5,
learning_rate = 1e-4,
# Parameters
self.input_shape = (192, self.con_win_size, 1)
self.num_classes = 21
self.num_strings = 6
________________________________________________
TABCNN
batch_size=128,
epochs=300,
con_win_size = 9,
patience = 5,
learning_rate = 1e-4,
# Parameters
self.input_shape = (192, self.con_win_size, 1)
self.num_classes = 21
self.num_strings = 6
________________________________________________