forked from RangiLyu/nanodet
-
Notifications
You must be signed in to change notification settings - Fork 0
/
nanodet_custom_xml_dataset.yml
116 lines (114 loc) · 2.8 KB
/
nanodet_custom_xml_dataset.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
#Config File example
save_dir: workspace/nanodet_m
model:
weight_averager:
name: ExpMovingAverager
decay: 0.9998
arch:
name: NanoDetPlus
detach_epoch: 10
backbone:
name: ShuffleNetV2
model_size: 1.0x
out_stages: [2,3,4]
activation: LeakyReLU
fpn:
name: GhostPAN
in_channels: [116, 232, 464]
out_channels: 96
kernel_size: 5
num_extra_level: 1
use_depthwise: True
activation: LeakyReLU
head:
name: NanoDetPlusHead
num_classes: 80
input_channel: 96
feat_channels: 96
stacked_convs: 2
kernel_size: 5
strides: [8, 16, 32, 64]
activation: LeakyReLU
reg_max: 7
norm_cfg:
type: BN
loss:
loss_qfl:
name: QualityFocalLoss
use_sigmoid: True
beta: 2.0
loss_weight: 1.0
loss_dfl:
name: DistributionFocalLoss
loss_weight: 0.25
loss_bbox:
name: GIoULoss
loss_weight: 2.0
# Auxiliary head, only use in training time.
aux_head:
name: SimpleConvHead
num_classes: 80
input_channel: 192
feat_channels: 192
stacked_convs: 4
strides: [8, 16, 32, 64]
activation: LeakyReLU
reg_max: 7
class_names: &class_names ['NAME1', 'NAME2', 'NAME3', 'NAME4', '...'] #Please fill in the category names (not include background category)
data:
train:
name: XMLDataset
class_names: *class_names
img_path: TRAIN_IMAGE_FOLDER #Please fill in train image path
ann_path: TRAIN_XML_FOLDER #Please fill in train xml path
input_size: [320,320] #[w,h]
keep_ratio: True
pipeline:
perspective: 0.0
scale: [0.6, 1.4]
stretch: [[1, 1], [1, 1]]
rotation: 0
shear: 0
translate: 0.2
flip: 0.5
brightness: 0.2
contrast: [0.8, 1.2]
saturation: [0.8, 1.2]
normalize: [[103.53, 116.28, 123.675], [57.375, 57.12, 58.395]]
val:
name: XMLDataset
class_names: *class_names
img_path: VAL_IMAGE_FOLDER #Please fill in val image path
ann_path: VAL_XML_FOLDER #Please fill in val xml path
input_size: [320,320] #[w,h]
keep_ratio: True
pipeline:
normalize: [[103.53, 116.28, 123.675], [57.375, 57.12, 58.395]]
device:
gpu_ids: [0] # Set like [0, 1, 2, 3] if you have multi-GPUs
workers_per_gpu: 8
batchsize_per_gpu: 96
precision: 32 # set to 16 to use AMP training
schedule:
# resume:
# load_model: YOUR_MODEL_PATH
optimizer:
name: AdamW
lr: 0.001
weight_decay: 0.05
warmup:
name: linear
steps: 500
ratio: 0.0001
total_epochs: 300
lr_schedule:
name: CosineAnnealingLR
T_max: 300
eta_min: 0.00005
val_intervals: 10
grad_clip: 35
evaluator:
name: CocoDetectionEvaluator
save_key: mAP
log:
interval: 10