-
Notifications
You must be signed in to change notification settings - Fork 2
/
discriminator.py
99 lines (86 loc) · 2.79 KB
/
discriminator.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
# importing libraries
import torch.nn as nn
import torch.nn.functional as F
import torch
import torchaudio
import torchvision
import os
from torchvision import transforms, datasets
import numpy as np
import shutil
from torch.utils.data import DataLoader, Subset
import torch.optim as optim
import time
import matplotlib.pyplot as plt
from torch.optim.lr_scheduler import StepLR
# ReLU Block
class ConvInstanceNormLeakyReLUBlock(nn.Module):
def __init__(self, in_channels: int, out_channels: int, stride: int):
"""
Class object initialization for Convolution-InstanceNorm-LeakyReLU layer
We use leaky ReLUs with a slope of 0.2.
"""
super().__init__()
self.conv = nn.Sequential(
nn.Conv2d(
in_channels,
out_channels,
kernel_size=4,
stride=stride,
padding=1,
bias=True,
padding_mode="reflect",
),
nn.InstanceNorm2d(out_channels),
nn.LeakyReLU(0.2, inplace=True),
)
def forward(self, x):
return self.conv(x)
class Discriminator(nn.Module):
def __init__(self, in_channels=3, features=[64, 128, 256, 512]):
"""
Let Ck denote a 4 × 4 Convolution-InstanceNorm-LeakyReLU layer with
k filters and stride 2. Discriminator architecture is: C64-C128-C256-C512.
After the last layer, we apply a convolution to produce a 1-dimensional
output.
We use leaky ReLUs with a slope of 0.2.
"""
super().__init__()
self.initial_layer = nn.Sequential(
nn.Conv2d(
in_channels,
features[0],
kernel_size=4,
stride=2,
padding=1,
padding_mode="reflect",
),
nn.LeakyReLU(0.2, inplace=True),
)
layers = []
in_channels = features[0]
for feature in features[1:]:
layers.append(
ConvInstanceNormLeakyReLUBlock(
in_channels,
feature,
stride=1 if feature == features[-1] else 2,
)
)
in_channels = feature
# After the last layer, we apply a convolution to produce a 1-dimensional output
layers.append(
nn.Conv2d(
in_channels,
1,
kernel_size=4,
stride=1,
padding=1,
padding_mode="reflect",
)
)
self.model = nn.Sequential(*layers)
def forward(self, x):
x = self.initial_layer(x)
# feed the model output into a sigmoid function to make a 1/0 label
return torch.sigmoid(self.model(x))