-
Notifications
You must be signed in to change notification settings - Fork 1
/
train.py
executable file
·120 lines (104 loc) · 5.24 KB
/
train.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
import argparse
import os
import random
import numpy as np
import pandas as pd
import torch
from pytorch_metric_learning.losses import NormalizedSoftmaxLoss
from torch.backends import cudnn
from torch.optim import AdamW
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
from model import Model
from utils import DomainDataset, compute_metric
# for reproducibility
random.seed(1)
np.random.seed(1)
torch.manual_seed(1)
cudnn.deterministic = True
cudnn.benchmark = False
# train for one epoch
def train(net, data_loader, train_optimizer):
net.train()
total_loss, total_num, train_bar = 0.0, 0, tqdm(data_loader, dynamic_ncols=True)
for img, domain, label, img_name in train_bar:
_, _, _, proj = net(img.cuda())
loss = loss_criterion(proj, label.cuda())
train_optimizer.zero_grad()
loss.backward()
train_optimizer.step()
total_num += img.size(0)
total_loss += loss.item() * img.size(0)
train_bar.set_description('Train Epoch: [{}/{}] Loss: {:.4f}'.format(epoch, epochs, total_loss / total_num))
return total_loss / total_num
# val for one epoch
def val(net, data_loader):
net.eval()
vectors, domains, labels = [], [], []
with torch.no_grad():
for img, domain, label, img_name in tqdm(data_loader, desc='Feature extracting', dynamic_ncols=True):
_, _, _, proj = net(img.cuda())
vectors.append(proj.cpu())
domains.append(domain)
labels.append(label)
vectors = torch.cat(vectors, dim=0)
domains = torch.cat(domains, dim=0)
labels = torch.cat(labels, dim=0)
acc = compute_metric(vectors, domains, labels)
results['P@100'].append(acc['P@100'] * 100)
results['P@200'].append(acc['P@200'] * 100)
results['mAP@200'].append(acc['mAP@200'] * 100)
results['mAP@all'].append(acc['mAP@all'] * 100)
print('Val Epoch: [{}/{}] | P@100:{:.1f}% | P@200:{:.1f}% | mAP@200:{:.1f}% | mAP@all:{:.1f}%'
.format(epoch, epochs, acc['P@100'] * 100, acc['P@200'] * 100, acc['mAP@200'] * 100,
acc['mAP@all'] * 100))
return acc['precise'], vectors
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Train Model')
# common args
parser.add_argument('--data_root', default='/home/data', type=str, help='Datasets root path')
parser.add_argument('--data_name', default='sketchy', type=str, choices=['sketchy', 'tuberlin'],
help='Dataset name')
parser.add_argument('--backbone_type', default='resnet50', type=str, choices=['resnet50', 'vgg16'],
help='Backbone type')
parser.add_argument('--proj_dim', default=512, type=int, help='Projected embedding dim')
parser.add_argument('--batch_size', default=64, type=int, help='Number of images in each mini-batch')
parser.add_argument('--epochs', default=10, type=int, help='Number of epochs over the model to train')
parser.add_argument('--warmup', default=1, type=int, help='Number of warmups over the model to train')
parser.add_argument('--save_root', default='result', type=str, help='Result saved root path')
# args parse
args = parser.parse_args()
data_root, data_name, backbone_type, proj_dim = args.data_root, args.data_name, args.backbone_type, args.proj_dim
batch_size, epochs, warmup, save_root = args.batch_size, args.epochs, args.warmup, args.save_root
# data prepare
train_data = DomainDataset(data_root, data_name, split='train')
val_data = DomainDataset(data_root, data_name, split='val')
train_loader = DataLoader(train_data, batch_size=batch_size, shuffle=True, num_workers=8)
val_loader = DataLoader(val_data, batch_size=batch_size, shuffle=False, num_workers=8)
# model and loss setup
model = Model(backbone_type, proj_dim).cuda()
loss_criterion = NormalizedSoftmaxLoss(len(train_data.classes), proj_dim).cuda()
# optimizer config
optimizer = AdamW([{'params': model.parameters()}, {'params': loss_criterion.parameters(), 'lr': 1e-1}],
lr=1e-5, weight_decay=5e-4)
# training loop
results = {'train_loss': [], 'val_precise': [], 'P@100': [], 'P@200': [], 'mAP@200': [], 'mAP@all': []}
save_name_pre = '{}_{}_{}'.format(data_name, backbone_type, proj_dim)
if not os.path.exists(save_root):
os.makedirs(save_root)
best_precise = 0.0
for epoch in range(1, epochs + 1):
# warmup, not update the parameters of backbone
for param in model.backbone.parameters():
param.requires_grad = False if epoch <= warmup else True
train_loss = train(model, train_loader, optimizer)
results['train_loss'].append(train_loss)
val_precise, features = val(model, val_loader)
results['val_precise'].append(val_precise * 100)
# save statistics
data_frame = pd.DataFrame(data=results, index=range(1, epoch + 1))
data_frame.to_csv('{}/{}_results.csv'.format(save_root, save_name_pre), index_label='epoch')
if val_precise > best_precise:
best_precise = val_precise
torch.save(model.state_dict(), '{}/{}_model.pth'.format(save_root, save_name_pre))
torch.save(features, '{}/{}_vectors.pth'.format(save_root, save_name_pre))