forked from iwtw/pytorch-TP-GAN
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathlayers.py
105 lines (94 loc) · 4.06 KB
/
layers.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
#wrappers for convenience
import torch.nn as nn
from torch.nn.init import xavier_normal , kaiming_normal
def sequential(*kargs ):
seq = nn.Sequential(*kargs)
for layer in reversed(kargs):
if hasattr( layer , 'out_channels'):
seq.out_channels = layer.out_channels
break
if hasattr( layer , 'out_features'):
seq.out_channels = layer.out_features
break
return seq
def weight_initialization( weight , init , activation):
if init is None:
return
if init == "kaiming":
assert not activation is None
if hasattr(activation,"negative_slope"):
kaiming_normal( weight , a = activation.negative_slope )
else:
kaiming_normal( weight , a = 0 )
elif init == "xavier":
xavier_normal( weight )
return
def conv( in_channels , out_channels , kernel_size , stride = 1 , padding = 0 , init = "kaiming" , activation = nn.ReLU() , use_batchnorm = False ):
convs = []
if type(padding) == type(list()) :
assert len(padding) != 3
if len(padding)==4:
convs.append( nn.ReflectionPad2d( padding ) )
padding = 0
#print(padding)
convs.append( nn.Conv2d( in_channels , out_channels , kernel_size , stride , padding ) )
#weight init
weight_initialization( convs[-1].weight , init , activation )
#activation
if not activation is None:
convs.append( activation )
#bn
if use_batchnorm:
convs.append( nn.BatchNorm2d( out_channels ) )
seq = nn.Sequential( *convs )
seq.out_channels = out_channels
return seq
def deconv( in_channels , out_channels , kernel_size , stride = 1 , padding = 0 , output_padding = 0 , init = "kaiming" , activation = nn.ReLU() , use_batchnorm = False):
convs = []
convs.append( nn.ConvTranspose2d( in_channels , out_channels , kernel_size , stride , padding , output_padding ) )
#weight init
weight_initialization( convs[0].weight , init , activation )
#activation
if not activation is None:
convs.append( activation )
#bn
if use_batchnorm:
convs.append( nn.BatchNorm2d( out_channels ) )
seq = nn.Sequential( *convs )
seq.out_channels = out_channels
return seq
class ResidualBlock(nn.Module):
def __init__(self, in_channels ,
out_channels = None,
kernel_size = 3,
stride = 1,
padding = None ,
weight_init = "kaiming" ,
activation = nn.ReLU() ,
is_bottleneck = False ,
use_projection = False,
scaling_factor = 1.0
):
super(type(self),self).__init__()
if out_channels is None:
out_channels = in_channels // stride
self.out_channels = out_channels
self.use_projection = use_projection
self.scaling_factor = scaling_factor
self.activation = activation
convs = []
assert stride in [1,2]
if stride == 1 :
self.shorcut = nn.Sequential()
else:
self.shorcut = conv( in_channels , out_channels , 1 , stride , 0 , None , None , False )
if is_bottleneck:
convs.append( conv( in_channels , in_channels//2 , 1 , 1 , 0 , weight_init , activation , False))
convs.append( conv( in_channels//2 , out_channels//2 , kernel_size , stride , (kernel_size - 1)//2 , weight_init , activation , False))
convs.append( conv( out_channels//2 , out_channels , 1 , 1 , 0 , None , None , False))
else:
convs.append( conv( in_channels , in_channels , kernel_size , 1 , padding if padding is not None else (kernel_size - 1)//2 , weight_init , activation , False))
convs.append( conv( in_channels , out_channels , kernel_size , 1 , padding if padding is not None else (kernel_size - 1)//2 , None , None , False))
self.layers = nn.Sequential( *convs )
def forward(self,x):
return self.activation( self.layers(x) + self.scaling_factor * self.shorcut(x) )