-
Notifications
You must be signed in to change notification settings - Fork 0
/
Parameter_init.py
42 lines (37 loc) · 1.51 KB
/
Parameter_init.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
import torch
import torch.nn as nn
from torch.nn import init
import functools
from torch.autograd import Variable
from torch.optim import lr_scheduler
def init_weights(net, init_type='normal', gain=0.2):
def init_func(m):
classname = m.__class__.__name__
if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
if init_type == 'normal':
init.normal(m.weight.data, 0.0, gain)
elif init_type == 'xavier':
init.xavier_normal(m.weight.data, gain=gain)
elif init_type == 'kaiming':
init.kaiming_normal(m.weight.data, a=0, mode='fan_in')
elif init_type == 'orthogonal':
init.orthogonal(m.weight.data, gain=gain)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
if hasattr(m, 'bias') and m.bias is not None:
init.constant(m.bias.data, 0.0)
elif classname.find('BatchNorm2d') != -1:
init.normal(m.weight.data, 1.0, gain)
init.constant(m.bias.data, 0.0)
print('initialize network with %s' % init_type)
net.apply(init_func)
def init_net(net, init_type='normal', gpu_ids=[]):
if len(gpu_ids) > 0:
assert(torch.cuda.is_available())
net.cuda(gpu_ids[0])
# net = torch.nn.DataParallel(net, gpu_ids)
if init_type is None:
print("wuchushihua")
return net
init_weights(net, init_type)
return net