-
Notifications
You must be signed in to change notification settings - Fork 0
/
config_conic.sys
80 lines (63 loc) · 4.37 KB
/
config_conic.sys
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
log = "log/"
[Debug]
debug = "False" # Debug Mode - True or False: Radically reduces the dataset size for faster training
debugDilution = 200 # Debug Dilution - 1/nth of the dataset will be used for training
wandb = "True" # Wandb Logging
[Stain Normalization]
normalization = "reinhard" # Normalization Avl: macenko, reinhard, None
targetImagePath = "Dataset/MonuSegData/Training/TissueImages/TCGA-A7-A13F-01Z-00-DX1.png" # Target Image for Normalization
[Image Augmentation - CoNIC]
imagePath = "Dataset/Otherdata/CoNICChallengeTrain/images.npy"
labelPath = "Dataset/Otherdata/CoNICChallengeTrain/labels.npy"
augmentPerImage = 10 # Number of augmentation to perform per image that was generated using sliding down
finalTileHeight = 256 # Final Height of augmenated image
finalTileWidth = 256 # Final Width of augmenated image
[Train-Val split]
splitRatio = 0.7 # 0.8 -> 80% training 20% validation
[Training]
trainDataset = "Dataset/Otherdata/prepared_CoNIC/trainNormal/" # Images to be during Training
valDataset = "Dataset/Otherdata/prepared_CoNIC/valNormal/" # Images to used during Validation
testDataset = "Dataset/Otherdata/prepared_CoNIC/testNormal/" # Images to used during Testing
resumeModel = "model/best_model.pth" # Model to used during re-training
torchsummary = "False" # Torch Summary
sampleImages = "False" # Sample Images during Training
dinoModelType = "giga" # DINO Model Type - small, large, giga
reUseFeatures = "True" # Use existing features - Should re-train features if dataset is changed
batchVisualization = "True" # Sampling Visualization
trainingPhase = "high-density" # Training Phase - high-density, low-density
[Class Config]
class1 = "0, 0, 0" #black
class2 = "255, 255, 255" #white
[Model]
model_type = "UNet" # Model Avl: UNet ,UNet_3Plus, EluNet, UNet_3PlusShort
[Parameters]
input_img_type = "rgb" # Input Image Type: rgb, gray
kernel_size = 3
use_maxblurpool = "False" # Use MaxBlurPool
epochs = 50
batch_size = 16
learning_rate = 0.0000001 # can be set to "auto" for finding the best learning rate or set to 0.00001
lr_decay = "False" # Learning Rate Decay - True or False
num_classes = 2
weight_path = "None" # Path to the model else None
activation = "relu" # Activations available: relu, GLU
resume = "False" # Resume Training
resume_epoch = 0 # Resume Training from epoch
channel = 32 # Default: 16 Can be set to 64 or other
attention = "False" # Model 1 only
loss = "bce" # Losses available: bce, jaccard, focal, pwcel, dice, weighteddice, unet3+loss, improvedLoss, ClassRatioLoss, RBAF, focalDiceLoss, wassersteinLoss, focalDiceHDLoss
dropout = 0.3 # Dropout rate
dropoutLOC = "std" # Dropout Location: std, after (after 20 iterations)
dilation = 1 # For Dilated convolution.
l1_regularization = "False" # L1 Regularization
[experiment features]
multiScaleAttention = "False" # Spatial, Channel and Edge Attention used in tandem
eigen_decomposition = "False" # Eigen Decomposition
top_k_features = 0 # Top K Features - 0- False, n-Top N values
dropBlock = "True" # DropBlock, automatically sets dropout to 0 and proceeds with dropblock
dropBlockProb = 0.8 # DropBlock Probability
dropBlockSize = 5 # DropBlock Size
lookahead = "False" # Lookahead
lookahead_k = 5 # Lookahead k
lookahead_alpha = 0.8 # Lookahead alpha
guidedFilter = "True" # Guilded Filter