-
Notifications
You must be signed in to change notification settings - Fork 54
/
model.py
304 lines (255 loc) · 16.2 KB
/
model.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
from tensorlayer.layers import *
from utils import *
def discriminator(input_images, is_train=True, reuse=False):
w_init = tf.random_normal_initializer(stddev=0.02)
b_init = None
gamma_init = tf.random_normal_initializer(1., 0.02)
df_dim = 64
with tf.variable_scope("discriminator", reuse=reuse):
tl.layers.set_name_reuse(reuse)
net_in = InputLayer(input_images,
name='input')
net_h0 = Conv2d(net_in, df_dim, (4, 4), (2, 2), act=lambda x: tl.act.lrelu(x, 0.2),
padding='SAME', W_init=w_init, name='h0/conv2d')
net_h1 = Conv2d(net_h0, df_dim * 2, (4, 4), (2, 2), act=None,
padding='SAME', W_init=w_init, b_init=b_init, name='h1/conv2d')
net_h1 = BatchNormLayer(net_h1, act=lambda x: tl.act.lrelu(x, 0.2),
is_train=is_train, gamma_init=gamma_init, name='h1/batchnorm')
net_h2 = Conv2d(net_h1, df_dim * 4, (4, 4), (2, 2), act=None,
padding='SAME', W_init=w_init, b_init=b_init, name='h2/conv2d')
net_h2 = BatchNormLayer(net_h2, act=lambda x: tl.act.lrelu(x, 0.2),
is_train=is_train, gamma_init=gamma_init, name='h2/batchnorm')
net_h3 = Conv2d(net_h2, df_dim * 8, (4, 4), (2, 2), act=None,
padding='SAME', W_init=w_init, b_init=b_init, name='h3/conv2d')
net_h3 = BatchNormLayer(net_h3, act=lambda x: tl.act.lrelu(x, 0.2),
is_train=is_train, gamma_init=gamma_init, name='h3/batchnorm')
net_h4 = Conv2d(net_h3, df_dim * 16, (4, 4), (2, 2), act=None,
padding='SAME', W_init=w_init, b_init=b_init, name='h4/conv2d')
net_h4 = BatchNormLayer(net_h4, act=lambda x: tl.act.lrelu(x, 0.2),
is_train=is_train, gamma_init=gamma_init, name='h4/batchnorm')
net_h5 = Conv2d(net_h4, df_dim * 32, (4, 4), (2, 2), act=None,
padding='SAME', W_init=w_init, b_init=b_init, name='h5/conv2d')
net_h5 = BatchNormLayer(net_h5, act=lambda x: tl.act.lrelu(x, 0.2),
is_train=is_train, gamma_init=gamma_init, name='h5/batchnorm')
net_h6 = Conv2d(net_h5, df_dim * 16, (1, 1), (1, 1), act=None,
padding='SAME', W_init=w_init, b_init=b_init, name='h6/conv2d')
net_h6 = BatchNormLayer(net_h6, act=lambda x: tl.act.lrelu(x, 0.2),
is_train=is_train, gamma_init=gamma_init, name='h6/batchnorm')
net_h7 = Conv2d(net_h6, df_dim * 8, (1, 1), (1, 1), act=None,
padding='SAME', W_init=w_init, b_init=b_init, name='h7/conv2d')
net_h7 = BatchNormLayer(net_h7, is_train=is_train, gamma_init=gamma_init, name='h7/batchnorm')
net = Conv2d(net_h7, df_dim * 2, (1, 1), (1, 1), act=None,
padding='SAME', W_init=w_init, b_init=b_init, name='h7_res/conv2d')
net = BatchNormLayer(net, act=lambda x: tl.act.lrelu(x, 0.2),
is_train=is_train, gamma_init=gamma_init, name='h7_res/batchnorm')
net = Conv2d(net, df_dim * 2, (3, 3), (1, 1), act=None,
padding='SAME', W_init=w_init, b_init=b_init, name='h7_res/conv2d2')
net = BatchNormLayer(net, act=lambda x: tl.act.lrelu(x, 0.2),
is_train=is_train, gamma_init=gamma_init, name='h7_res/batchnorm2')
net = Conv2d(net, df_dim * 8, (3, 3), (1, 1), act=None,
padding='SAME', W_init=w_init, b_init=b_init, name='h7_res/conv2d3')
net = BatchNormLayer(net, is_train=is_train, gamma_init=gamma_init, name='h7_res/batchnorm3')
net_h8 = ElementwiseLayer(layer=[net_h7, net], combine_fn=tf.add, name='h8/add')
net_h8.outputs = tl.act.lrelu(net_h8.outputs, 0.2)
net_ho = FlattenLayer(net_h8, name='output/flatten')
net_ho = DenseLayer(net_ho, n_units=1, act=tf.identity, W_init=w_init, name='output/dense')
logits = net_ho.outputs
net_ho.outputs = tf.nn.sigmoid(net_ho.outputs)
return net_ho, logits
def u_net_bn(x, is_train=False, reuse=False, is_refine=False):
w_init = tf.truncated_normal_initializer(stddev=0.01)
b_init = tf.constant_initializer(value=0.0)
gamma_init = tf.random_normal_initializer(1., 0.02)
with tf.variable_scope("u_net", reuse=reuse):
tl.layers.set_name_reuse(reuse)
inputs = InputLayer(x, name='input')
conv1 = Conv2d(inputs, 64, (4, 4), (2, 2), act=lambda x: tl.act.lrelu(x, 0.2), padding='SAME',
W_init=w_init, b_init=b_init, name='conv1')
conv2 = Conv2d(conv1, 128, (4, 4), (2, 2), act=None, padding='SAME',
W_init=w_init, b_init=b_init, name='conv2')
conv2 = BatchNormLayer(conv2, act=lambda x: tl.act.lrelu(x, 0.2),
is_train=is_train, gamma_init=gamma_init, name='bn2')
conv3 = Conv2d(conv2, 256, (4, 4), (2, 2), act=None, padding='SAME',
W_init=w_init, b_init=b_init, name='conv3')
conv3 = BatchNormLayer(conv3, act=lambda x: tl.act.lrelu(x, 0.2),
is_train=is_train, gamma_init=gamma_init, name='bn3')
conv4 = Conv2d(conv3, 512, (4, 4), (2, 2), act=None, padding='SAME',
W_init=w_init, b_init=b_init, name='conv4')
conv4 = BatchNormLayer(conv4, act=lambda x: tl.act.lrelu(x, 0.2),
is_train=is_train, gamma_init=gamma_init, name='bn4')
conv5 = Conv2d(conv4, 512, (4, 4), (2, 2), act=None, padding='SAME',
W_init=w_init, b_init=b_init, name='conv5')
conv5 = BatchNormLayer(conv5, act=lambda x: tl.act.lrelu(x, 0.2),
is_train=is_train, gamma_init=gamma_init, name='bn5')
conv6 = Conv2d(conv5, 512, (4, 4), (2, 2), act=None, padding='SAME',
W_init=w_init, b_init=b_init, name='conv6')
conv6 = BatchNormLayer(conv6, act=lambda x: tl.act.lrelu(x, 0.2),
is_train=is_train, gamma_init=gamma_init, name='bn6')
conv7 = Conv2d(conv6, 512, (4, 4), (2, 2), act=None, padding='SAME',
W_init=w_init, b_init=b_init, name='conv7')
conv7 = BatchNormLayer(conv7, act=lambda x: tl.act.lrelu(x, 0.2),
is_train=is_train, gamma_init=gamma_init, name='bn7')
conv8 = Conv2d(conv7, 512, (4, 4), (2, 2), act=lambda x: tl.act.lrelu(x, 0.2),
padding='SAME', W_init=w_init, b_init=b_init, name='conv8')
up7 = DeConv2d(conv8, 512, (4, 4), out_size=(2, 2), strides=(2, 2), padding='SAME',
act=None, W_init=w_init, b_init=b_init, name='deconv7')
up7 = BatchNormLayer(up7, act=tf.nn.relu, is_train=is_train, gamma_init=gamma_init, name='dbn7')
up6 = ConcatLayer([up7, conv7], concat_dim=3, name='concat6')
up6 = DeConv2d(up6, 1024, (4, 4), out_size=(4, 4), strides=(2, 2), padding='SAME',
act=None, W_init=w_init, b_init=b_init, name='deconv6')
up6 = BatchNormLayer(up6, act=tf.nn.relu, is_train=is_train, gamma_init=gamma_init, name='dbn6')
up5 = ConcatLayer([up6, conv6], concat_dim=3, name='concat5')
up5 = DeConv2d(up5, 1024, (4, 4), out_size=(8, 8), strides=(2, 2), padding='SAME',
act=None, W_init=w_init, b_init=b_init, name='deconv5')
up5 = BatchNormLayer(up5, act=tf.nn.relu, is_train=is_train, gamma_init=gamma_init, name='dbn5')
up4 = ConcatLayer([up5, conv5], concat_dim=3, name='concat4')
up4 = DeConv2d(up4, 1024, (4, 4), out_size=(16, 16), strides=(2, 2), padding='SAME',
act=None, W_init=w_init, b_init=b_init, name='deconv4')
up4 = BatchNormLayer(up4, act=tf.nn.relu, is_train=is_train, gamma_init=gamma_init, name='dbn4')
up3 = ConcatLayer([up4, conv4], concat_dim=3, name='concat3')
up3 = DeConv2d(up3, 256, (4, 4), out_size=(32, 32), strides=(2, 2), padding='SAME',
act=None, W_init=w_init, b_init=b_init, name='deconv3')
up3 = BatchNormLayer(up3, act=tf.nn.relu, is_train=is_train, gamma_init=gamma_init, name='dbn3')
up2 = ConcatLayer([up3, conv3], concat_dim=3, name='concat2')
up2 = DeConv2d(up2, 128, (4, 4), out_size=(64, 64), strides=(2, 2), padding='SAME',
act=None, W_init=w_init, b_init=b_init, name='deconv2')
up2 = BatchNormLayer(up2, act=tf.nn.relu, is_train=is_train, gamma_init=gamma_init, name='dbn2')
up1 = ConcatLayer([up2, conv2], concat_dim=3, name='concat1')
up1 = DeConv2d(up1, 64, (4, 4), out_size=(128, 128), strides=(2, 2), padding='SAME',
act=None, W_init=w_init, b_init=b_init, name='deconv1')
up1 = BatchNormLayer(up1, act=tf.nn.relu, is_train=is_train, gamma_init=gamma_init, name='dbn1')
up0 = ConcatLayer([up1, conv1], concat_dim=3, name='concat0')
up0 = DeConv2d(up0, 64, (4, 4), out_size=(256, 256), strides=(2, 2), padding='SAME',
act=None, W_init=w_init, b_init=b_init, name='deconv0')
up0 = BatchNormLayer(up0, act=tf.nn.relu, is_train=is_train, gamma_init=gamma_init, name='dbn0')
if is_refine:
out = Conv2d(up0, 1, (1, 1), act=tf.nn.tanh, name='out')
out = ElementwiseLayer([out, inputs], tf.add, 'add_for_refine')
out.outputs = tl.act.ramp(out.outputs, v_min=-1, v_max=1)
else:
out = Conv2d(up0, 1, (1, 1), act=tf.nn.tanh, name='out')
return out
def vgg16_cnn_emb(t_image, reuse=False):
with tf.variable_scope("vgg16_cnn", reuse=reuse) as vs:
tl.layers.set_name_reuse(reuse)
t_image = (t_image + 1) * 127.5 # convert input of [-1, 1] to [0, 255]
mean = tf.constant([123.68, 116.779, 103.939], dtype=tf.float32, shape=[1, 1, 1, 3], name='img_mean')
net_in = InputLayer(t_image - mean, name='vgg_input_im')
# conv1
network = tl.layers.Conv2dLayer(net_in,
act=tf.nn.relu,
shape=[3, 3, 3, 64],
strides=[1, 1, 1, 1],
padding='SAME',
name='vgg_conv1_1')
network = tl.layers.Conv2dLayer(network,
act=tf.nn.relu,
shape=[3, 3, 64, 64],
strides=[1, 1, 1, 1],
padding='SAME',
name='vgg_conv1_2')
network = tl.layers.PoolLayer(network,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME',
pool=tf.nn.max_pool,
name='vgg_pool1')
# conv2
network = tl.layers.Conv2dLayer(network,
act=tf.nn.relu,
shape=[3, 3, 64, 128],
strides=[1, 1, 1, 1],
padding='SAME',
name='vgg_conv2_1')
network = tl.layers.Conv2dLayer(network,
act=tf.nn.relu,
shape=[3, 3, 128, 128],
strides=[1, 1, 1, 1],
padding='SAME',
name='vgg_conv2_2')
network = tl.layers.PoolLayer(network,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME',
pool=tf.nn.max_pool,
name='vgg_pool2')
# conv3
network = tl.layers.Conv2dLayer(network,
act=tf.nn.relu,
shape=[3, 3, 128, 256],
strides=[1, 1, 1, 1],
padding='SAME',
name='vgg_conv3_1')
network = tl.layers.Conv2dLayer(network,
act=tf.nn.relu,
shape=[3, 3, 256, 256],
strides=[1, 1, 1, 1],
padding='SAME',
name='vgg_conv3_2')
network = tl.layers.Conv2dLayer(network,
act=tf.nn.relu,
shape=[3, 3, 256, 256],
strides=[1, 1, 1, 1],
padding='SAME',
name='vgg_conv3_3')
network = tl.layers.PoolLayer(network,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME',
pool=tf.nn.max_pool,
name='vgg_pool3')
# conv4
network = tl.layers.Conv2dLayer(network,
act=tf.nn.relu,
shape=[3, 3, 256, 512],
strides=[1, 1, 1, 1],
padding='SAME',
name='vgg_conv4_1')
network = tl.layers.Conv2dLayer(network,
act=tf.nn.relu,
shape=[3, 3, 512, 512],
strides=[1, 1, 1, 1],
padding='SAME',
name='vgg_conv4_2')
network = tl.layers.Conv2dLayer(network,
act=tf.nn.relu,
shape=[3, 3, 512, 512],
strides=[1, 1, 1, 1],
padding='SAME',
name='vgg_conv4_3')
network = tl.layers.PoolLayer(network,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME',
pool=tf.nn.max_pool,
name='vgg_pool4')
conv4 = network
# conv5
network = tl.layers.Conv2dLayer(network,
act=tf.nn.relu,
shape=[3, 3, 512, 512],
strides=[1, 1, 1, 1],
padding='SAME',
name='vgg_conv5_1')
network = tl.layers.Conv2dLayer(network,
act=tf.nn.relu,
shape=[3, 3, 512, 512],
strides=[1, 1, 1, 1],
padding='SAME',
name='vgg_conv5_2')
network = tl.layers.Conv2dLayer(network,
act=tf.nn.relu,
shape=[3, 3, 512, 512],
strides=[1, 1, 1, 1],
padding='SAME',
name='vgg_conv5_3')
network = tl.layers.PoolLayer(network,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME',
pool=tf.nn.max_pool,
name='vgg_pool5')
network = FlattenLayer(network, name='vgg_flatten')
return conv4, network
if __name__ == "__main__":
pass