-
Notifications
You must be signed in to change notification settings - Fork 2
/
train.py
453 lines (398 loc) · 18.9 KB
/
train.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
import argparse
from collections import defaultdict
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
import datastuff
class Reader(nn.Module):
"""
attn sum style reader dealie
"""
def __init__(self, word_embs, words_new2old, opt):
super(Reader, self).__init__()
self.wlut = nn.Embedding(opt.wordtypes, opt.emb_size)
if opt.std_feats:
self.flut = nn.Embedding(opt.ftypes, opt.emb_size if opt.add_inp else opt.feat_size)
if opt.speaker_feats:
self.splut = nn.Embedding(opt.sptypes, opt.emb_size if opt.add_inp else opt.sp_size)
self.emb_size, self.rnn_size, self.add_inp = opt.emb_size, opt.rnn_size, opt.add_inp
self.std_feats, self.speaker_feats = opt.std_feats, opt.speaker_feats
insize = opt.emb_size
if opt.std_feats and not opt.add_inp:
insize += 3*opt.feat_size + opt.extra_size
if opt.speaker_feats and not opt.add_inp:
insize += 2*opt.sp_size
self.doc_rnn = nn.GRU(insize, 2*opt.rnn_size, opt.layers, bidirectional=True)
self.drop = nn.Dropout(opt.dropout)
if opt.add_inp:
self.extr_lin = nn.Linear(opt.extra_size, opt.emb_size)
else:
self.extr_mul = nn.Parameter(
torch.Tensor(1, 1, opt.extra_size).uniform_(-opt.initrange, opt.initrange))
self.inp_activ = nn.ReLU() if opt.relu else nn.Tanh()
self.softmax = nn.Softmax(dim=1)
self.initrange = opt.initrange
self.mt_loss, self.mt_step_mode = opt.mt_loss, opt.mt_step_mode
if self.mt_loss == "idx-loss":
mt_in = 2*opt.rnn_size if self.mt_step_mode == "before" else 4*opt.rnn_size
self.doc_mt_lin = nn.Linear(mt_in, opt.max_entities+1) # 0 is an ignore idx
elif self.mt_loss == "ant-loss":
self.transform_for_ants = opt.transform_for_ants
if opt.transform_for_ants:
trans_size = 2*opt.rnn_size if self.mt_step_mode == "before" else 4*opt.rnn_size
self.ant_lin = nn.Linear(2*opt.rnn_size, trans_size, bias=False)
self.topdrop, self.mt_drop = opt.topdrop, opt.mt_drop
self.use_choices, self.use_test_choices = opt.use_choices, opt.use_test_choices
self.init_weights(word_embs, words_new2old)
def init_weights(self, word_embs, words_new2old):
"""
(re)init weights
"""
initrange = self.initrange
luts = [self.wlut]
if self.std_feats:
luts.append(self.flut)
if self.speaker_feats:
luts.append(self.splut)
for lut in luts:
lut.weight.data.uniform_(-initrange, initrange)
rnns = [self.doc_rnn]
for rnn in rnns:
for thing in rnn.parameters():
thing.data.uniform_(-initrange, initrange)
lins = []
if self.add_inp:
lins.append(self.extr_lin)
if self.mt_loss == "idx-loss":
lins.append(self.doc_mt_lin)
if self.mt_loss == "ant-loss" and self.transform_for_ants:
lins.append(self.ant_lin)
for lin in lins:
lin.weight.data.uniform_(-initrange, initrange)
if lin.bias is not None:
lin.bias.data.zero_()
# do the word embeddings
for i in xrange(len(words_new2old)):
old_idx = words_new2old[i]
if old_idx < word_embs.size(0):
self.wlut.weight.data[i][:word_embs.size(1)].copy_(word_embs[old_idx])
def forward(self, batch, val=False):
"""
returns bsz x seqlen scores
"""
seqlen, bsz = batch["words"].size()
wembs = self.wlut(batch["words"]) # seqlen x bsz -> seqlen x bsz x emb_size
if self.std_feats:
# seqlen x bsz x 3 -> seqlen x bsz*3 x emb_size -> seqlen x bsz x 3 x emb_size
fembs = self.flut(batch["feats"].view(seqlen, -1)).view(
seqlen, bsz, -1, self.flut.embedding_dim)
if self.speaker_feats:
# seqlen x bsz x 2 -> seqlen x bsz*2 x emb_size -> seqlen x bsz x 2 x emb_size
sembs = self.splut(batch["spee_feats"].view(seqlen, -1)).view(
seqlen, bsz, -1, self.splut.embedding_dim)
inp = wembs
if self.add_inp: # mlp the input
if self.std_feats:
ex_size = batch["extr"].size(2)
inp = (inp + fembs.sum(2)
+ self.extr_lin(batch["extr"].view(-1, ex_size)).view(seqlen, bsz, -1))
if self.speaker_feats:
inp = inp + sembs.sum(2)
if self.std_feats or self.speaker_feats:
inp = self.inp_activ(inp)
else: # concatenate everything
things_to_cat = [inp]
if self.std_feats:
things_to_cat.append(fembs.view(seqlen, bsz, -1))
things_to_cat.append(batch["extr"] * self.extr_mul.expand_as(batch["extr"]))
if self.speaker_feats:
things_to_cat.append(sembs.view(seqlen, bsz, -1))
if len(things_to_cat) > 1:
inp = torch.cat(things_to_cat, 2) # seqlen x bsz x sum (all the stuff)
if self.drop.p > 0:
inp = self.drop(inp)
# view each state as [fwd_q, fwd_d, bwd_d, bwd_q]
states, _ = self.doc_rnn(inp) # seqlen x bsz x 2*2*rnn_size
doc_states = states[:, :, self.rnn_size:3*self.rnn_size]
if args.use_qidx:
# get states before and after the question idx
b4states = states.view(-1, states.size(2))[batch["qpos"]-bsz][:, :self.rnn_size]
afterstates = states.view(-1, states.size(2))[batch["qpos"]+bsz][:, -self.rnn_size:]
query_rep = torch.cat([b4states, afterstates], 1) # bsz x 2*rnn_size
else:
query_rep = torch.cat([states[seqlen-1, :, :self.rnn_size],
states[0, :, -self.rnn_size:]], 1) # bsz x 2*rnn_size
if self.topdrop and self.drop.p > 0:
doc_states = self.drop(doc_states)
# bsz x seqlen x 2*rnn_size * bsz x 2*rnn_size x 1 -> bsz x seqlen x 1 -> bsz x seqlen
scores = torch.bmm(doc_states.transpose(0, 1), query_rep.unsqueeze(2)).squeeze(2)
if self.use_choices or (val and self.use_test_choices):
scores = batch["choicemask"] * scores
doc_mt_scores = None
if self.mt_loss == "idx-loss":
doc_mt_scores = self.get_step_scores(states)
elif self.mt_loss == "ant-loss":
doc_mt_scores = self.get_ant_scores(states)
return self.softmax(scores), doc_mt_scores
def get_states_for_step(self, states):
"""
gets the states we want for doing multiclass pred @ time t
args:
states - seqlen x bsz x 2*2*rnn_size; view each state as [fwd_q, fwd_d, bwd_d, bwd_q]
returns:
seqlen*bsz x something
"""
seqlen, bsz, drnn_sz = states.size()
if not hasattr(self, "dummy"):
self.dummy = states.data.new(1, drnn_sz/2).zero_()
dummy = self.dummy
if self.mt_step_mode == "exact":
nustates = states.view(-1, drnn_sz) # seqlen*bsz x 2*2*rnn_size
elif self.mt_step_mode == "before-after":
dummyvar = Variable(dummy.expand(bsz, drnn_sz/2))
# prepend zeros to front, giving seqlen*bsz x 2*rnn_size
fwds = torch.cat([dummyvar, states.view(-1, drnn_sz)[:-bsz, :drnn_sz/2]], 0)
# append zeros to back, giving seqlen*bsz x 2*rnn_size
bwds = torch.cat([states.view(-1, drnn_sz)[bsz:, drnn_sz/2:], dummyvar], 0)
nustates = torch.cat([fwds, bwds], 1) # seqlen*bsz x 2*2*rnn_size
elif self.mt_step_mode == "before": # just before
dummyvar = Variable(dummy.expand(bsz, drnn_sz/2))
# prepend zeros to front, giving seqlen*bsz x 2*rnn_size
nustates = torch.cat([dummyvar, states.view(-1, drnn_sz)[:-bsz, :drnn_sz/2]], 0)
else:
assert False, "%s not a thing" % self.mt_step_mode
return nustates
def get_step_scores(self, states):
"""
states - seqlen x bsz x 2*2*rnn_size
returns:
seqlen*bsz x nclasses
"""
states_for_step = self.get_states_for_step(states)
if self.mt_drop and self.drop.p > 0:
states_for_step = self.drop(states_for_step)
doc_mt_preds = self.doc_mt_lin(states_for_step) # seqlen*bsz x nclasses
return doc_mt_preds
def get_ant_scores(self, states):
"""
states - seqlen x bsz x 2*2*rnn_size
return:
bsz x seqlen x seqlen
"""
seqlen, bsz, drnn_sz = states.size()
states_for_step = self.get_states_for_step(states).view(seqlen, bsz, -1)
# may need to transform first....
# bsz x seqlen x sz * bsz x sz x seqlen -> bsz x seqlen x seqlen
if self.transform_for_ants:
ant_states = states.view(-1, states.size(2))[:, :drnn_sz/2] # seqlen*bsz x 2*rnn_size
ant_states = self.ant_lin(ant_states).view(seqlen, bsz, -1)
else:
ant_states = states[:, :, :drnn_sz/2] # seqlen x bsz x 2*rnn_size
scores = torch.bmm(states_for_step.transpose(0, 1),
ant_states.transpose(0, 1).transpose(1, 2))
return scores
def get_ncorrect(batch, scores):
"""
i'm just gonna brute force this
scores - bsz x seqlen
answers - bsz
"""
bsz, seqlen = scores.size()
words, answers = batch["words"].data, batch["answers"].data
ncorrect = 0
for b in xrange(bsz):
word2prob = defaultdict(float)
best, best_prob = -1, -float("inf")
for i in xrange(seqlen):
word2prob[words[i][b]] += scores.data[b][i]
if word2prob[words[i][b]] > best_prob:
best = words[i][b]
best_prob = word2prob[words[i][b]]
ncorrect += (best == answers[b])
return ncorrect
def attn_sum_loss(batch, scores):
"""
scores - bsz x seqlen
answers - bsz
"""
bsz, seqlen = scores.size()
mask = batch["answers"].data.unsqueeze(1).expand(bsz, seqlen).eq(batch["words"].data.t())
marg_log_prob_sum = (scores * Variable(mask.float())).sum(1).log().sum()
return -marg_log_prob_sum
xent = nn.CrossEntropyLoss(ignore_index=0, size_average=False)
def multitask_loss1(batch, doc_mt_scores):
"""
doc_mt_scores - seqlen*bsz x nclasses
"""
mt1_targs = batch["mt1_targs"] # seqlen x bsz, w/ 0 where we want to ignore
loss = xent(doc_mt_scores, mt1_targs.view(-1))
return loss
def multitask_loss2(batch, doc_mt_scores, sm):
"""
doc_mt_scores - bsz x seqlen x seqlen
N.B. rn this only considers entities that are repeated; should
really have it predict a dummy value for things not repeated
"""
bsz, seqlen, _ = doc_mt_scores.size()
loss = 0
reps = batch["mt2_targs"] # bsz x seqlen; indicators for repeated entities
for b in xrange(bsz):
# get lower triangle (excluding diagonal!) then softmax
pws = sm(torch.tril(doc_mt_scores[b], diagonal=-1)) # seqlen x seqlen
words_b = batch["words"].data[:, b].unsqueeze(1).expand(seqlen, seqlen).t()
#mask = ents[b].data.unsqueeze(1).expand(seqlen, seqlen).eq(words_b)
mask = words_b.t().eq(words_b) # seqlen x seqlen
marg_log_probs = (pws * Variable(torch.tril( # probably not necessary to tril...
mask.float(), diagonal=-1))).sum(1).add_(1e-6).log()
# we need to ignore rows not corresponding to entities
loss = loss - marg_log_probs.dot(reps[b])
return loss
parser = argparse.ArgumentParser(description='')
parser.add_argument('-datafile', type=str, default='', help='')
parser.add_argument('-bsz', type=int, default=64, help='')
parser.add_argument('-maxseqlen', type=int, default=1024, help='')
parser.add_argument('-save', type=str, default='', help='path to save the final model')
parser.add_argument('-load', type=str, default='', help='path to saved model')
parser.add_argument('-std_feats', action='store_true', help='')
parser.add_argument('-speaker_feats', action='store_true', help='')
parser.add_argument('-use_choices', action='store_true', help='')
parser.add_argument('-use_test_choices', action='store_true', help='')
parser.add_argument('-use_qidx', action='store_true', help='')
parser.add_argument('-query_idx', type=int, default=56298, help='query idx in ORIGINAL data')
parser.add_argument('-mt_loss', type=str, default='',
choices=["", "idx-loss", "ant-loss"], help='')
parser.add_argument('-mt_step_mode', type=str, default='before',
choices=["exact", "before-after", "before"],
help='which rnn states to use when doing mt stuff')
parser.add_argument('-max_entities', type=int, default=2,
help='number of distinct entities to predict')
parser.add_argument('-max_mentions', type=int, default=2,
help='number of entity tokens to predict')
parser.add_argument('-transform_for_ants', action='store_true', help='')
parser.add_argument('-mt_coeff', type=float, default=1, help='scales mt loss')
parser.add_argument('-emb_size', type=int, default=128, help='size of word embeddings')
parser.add_argument('-rnn_size', type=int, default=128, help='size of rnn hidden state')
parser.add_argument('-feat_size', type=int, default=128, help='')
parser.add_argument('-sp_size', type=int, default=80, help='')
parser.add_argument('-layers', type=int, default=1, help='num rnn layers')
parser.add_argument('-add_inp', action='store_true', help='mlp features (instead of concat)')
parser.add_argument('-dropout', type=float, default=0, help='dropout')
parser.add_argument('-topdrop', action='store_true', help='dropout on last rnn layer')
parser.add_argument('-mt_drop', action='store_true', help='dropout before mt decoder')
parser.add_argument('-relu', action='store_true', help='relu for input mlp')
parser.add_argument('-optim', type=str, default='adam', help='')
parser.add_argument('-lr', type=float, default=0.001, help='learning rate')
parser.add_argument('-beta1', type=float, default=0.9, help='')
parser.add_argument('-epochs', type=int, default=4, help='')
parser.add_argument('-clip', type=float, default=5, help='gradient clipping')
parser.add_argument('-initrange', type=float, default=0.1, help='uniform init interval')
parser.add_argument('-seed', type=int, default=3435, help='')
parser.add_argument('-log_interval', type=int, default=200, help='')
parser.add_argument('-test', action='store_true', help='')
parser.add_argument('-just_eval', action='store_true', help='')
parser.add_argument('-cuda', action='store_true', help='')
args = parser.parse_args()
if __name__ == "__main__":
print args
torch.manual_seed(args.seed)
if torch.cuda.is_available():
if not args.cuda:
print "WARNING: You have a CUDA device, so you should probably run with -cuda"
else:
torch.cuda.manual_seed(args.seed)
# make data
data = datastuff.DataStuff(args)
saved_args, saved_state = None, None
if len(args.load) > 0:
saved_stuff = torch.load(args.load)
saved_args, saved_state = saved_stuff["opt"], saved_stuff["state_dict"]
net = Reader(data.word_embs, data.words_new2old, saved_args)
net.load_state_dict(saved_state)
else:
args.wordtypes = len(data.words_new2old)
args.ftypes = data.feat_voc_size
args.sptypes = data.spee_feat_foc_size
args.extra_size = data.extra_size
net = Reader(data.word_embs, data.words_new2old, args)
data.del_word_embs() # just to save memory
if args.cuda:
net = net.cuda()
optalg = None
if args.optim == "adagrad":
optalg = optim.Adagrad(net.parameters(), lr=args.lr)
else:
optalg = optim.Adam(net.parameters(), lr=args.lr, betas=(args.beta1, 0.999))
batch_start_idxs = range(0, data.ntrain, args.bsz)
val_batch_start_idxs = range(0, data.nvalid, args.bsz)
if args.test:
test_batch_start_idxs = range(0, data.ntest, args.bsz)
def train(epoch):
pred_loss, mt_loss, ndocs = 0, 0, 0
net.train()
trainperm = torch.randperm(len(batch_start_idxs))
for batch_idx in xrange(len(batch_start_idxs)):
net.zero_grad()
batch = data.load_data(batch_start_idxs[trainperm[batch_idx]],
args, mode="train") # a dict
bsz = batch["words"].size(1)
for k in batch:
batch[k] = Variable(batch[k].cuda() if args.cuda else batch[k])
word_scores, doc_mt_scores = net(batch)
lossvar = attn_sum_loss(batch, word_scores)
pred_loss += lossvar.data[0]
if args.mt_loss == "idx-loss":
mt_lossvar = multitask_loss1(batch, doc_mt_scores)
mt_loss += mt_lossvar.data[0]
lossvar = lossvar + args.mt_coeff*mt_lossvar
elif args.mt_loss == "ant-loss":
mt_lossvar = multitask_loss2(batch, doc_mt_scores, net.softmax)
mt_loss += mt_lossvar.data[0]
lossvar = lossvar + args.mt_coeff*mt_lossvar
lossvar /= bsz
lossvar.backward()
torch.nn.utils.clip_grad_norm(net.parameters(), args.clip)
optalg.step()
ndocs += bsz
if (batch_idx+1) % args.log_interval == 0:
print "batch %d/%d | loss %g | mt-los %g" % (batch_idx+1, len(batch_start_idxs),
pred_loss/ndocs, mt_loss/ndocs)
print "train epoch %d | loss %g | mt-los %g" % (epoch, pred_loss/ndocs, mt_loss/ndocs)
def evaluate(epoch, test=False):
net.eval()
total, ncorrect = 0, 0
start_idxs = test_batch_start_idxs if test else val_batch_start_idxs
mode = "test" if test else "valid"
for i in xrange(len(start_idxs)):
batch = data.load_data(start_idxs[i], args, mode=mode) # a dict
bsz = batch["words"].size(1)
for k in batch:
batch[k] = Variable(batch[k].cuda() if args.cuda else batch[k], volatile=True)
word_scores, _ = net(batch, val=True)
ncorrect += get_ncorrect(batch, word_scores)
total += bsz
acc = float(ncorrect)/total
print "val epoch %d | acc: %g (%d / %d)" % (epoch, acc, ncorrect, total)
return acc
if args.just_eval:
acc = evaluate(0)
if args.test:
acc = evaluate(0, test=True)
else:
best_acc = 0
epoch = 1
improved = False
while epoch < args.epochs+1 or improved:
train(epoch)
acc = evaluate(epoch)
if acc > best_acc:
best_acc = acc
improved = True
if len(args.save) > 0:
print "saving to", args.save
state = {"opt": args, "state_dict": net.state_dict()}
torch.save(state, args.save)
else:
improved = False
epoch += 1
print