-
Notifications
You must be signed in to change notification settings - Fork 1
/
train.py
163 lines (114 loc) · 5.62 KB
/
train.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
import os
import torch
import yaml
import argparse
from core.dataset import MMDataLoader
from core.losses import MultimodalLoss
from core.scheduler import get_scheduler
from core.utils import setup_seed, get_best_results
from models.lnln import build_model
from core.metric import MetricsTop
# os.environ["CUDA_VISIBLE_DEVICES"] = '1'
USE_CUDA = torch.cuda.is_available()
device = torch.device("cuda" if USE_CUDA else "cpu")
print(device)
parser = argparse.ArgumentParser()
parser.add_argument('--config_file', type=str, default='')
parser.add_argument('--seed', type=int, default=-1)
opt = parser.parse_args()
print(opt)
def main():
best_valid_results, best_test_results = {}, {}
config_file = 'configs/train_sims.yaml' if opt.config_file == '' else opt.config_file
with open(config_file) as f:
args = yaml.load(f, Loader=yaml.FullLoader)
print(args)
seed = args['base']['seed'] if opt.seed == -1 else opt.seed
setup_seed(seed)
print("seed is fixed to {}".format(seed))
ckpt_root = os.path.join('ckpt', args['dataset']['datasetName'])
if not os.path.exists(ckpt_root):
os.makedirs(ckpt_root)
print("ckpt root :", ckpt_root)
model = build_model(args).to(device)
dataLoader = MMDataLoader(args)
optimizer = torch.optim.AdamW(model.parameters(),
lr=args['base']['lr'],
weight_decay=args['base']['weight_decay'])
scheduler_warmup = get_scheduler(optimizer, args)
loss_fn = MultimodalLoss(args)
metrics = MetricsTop(train_mode = args['base']['train_mode']).getMetics(args['dataset']['datasetName'])
for epoch in range(1, args['base']['n_epochs']+1):
train(model, dataLoader['train'], optimizer, loss_fn, epoch, metrics)
if args['base']['do_validation']:
valid_results = evaluate(model, dataLoader['valid'], loss_fn, epoch, metrics)
best_valid_results = get_best_results(valid_results, best_valid_results, epoch, model, optimizer, ckpt_root, seed, save_best_model=False)
print(f'Current Best Valid Results: {best_valid_results}')
test_results = evaluate(model, dataLoader['test'], loss_fn, epoch, metrics)
best_test_results = get_best_results(test_results, best_test_results, epoch, model, optimizer, ckpt_root, seed, save_best_model=True)
print(f'Current Best Test Results: {best_test_results}\n')
scheduler_warmup.step()
def train(model, train_loader, optimizer, loss_fn, epoch, metrics):
y_pred, y_true = [], []
loss_dict = {}
model.train()
for cur_iter, data in enumerate(train_loader):
complete_input = (data['vision'].to(device), data['audio'].to(device), data['text'].to(device))
incomplete_input = (data['vision_m'].to(device), data['audio_m'].to(device), data['text_m'].to(device))
sentiment_labels = data['labels']['M'].to(device)
completeness_labels = 1. - data['labels']['missing_rate_l'].to(device)
effectiveness_labels = torch.cat([torch.ones(len(sentiment_labels)*8), torch.zeros(len(sentiment_labels)*8)]).long().to(device)
label = {'sentiment_labels': sentiment_labels, 'completeness_labels': completeness_labels, 'effectiveness_labels': effectiveness_labels}
out = model(complete_input, incomplete_input)
loss = loss_fn(out, label)
loss['loss'].backward()
optimizer.step()
optimizer.zero_grad()
y_pred.append(out['sentiment_preds'].cpu())
y_true.append(label['sentiment_labels'].cpu())
if cur_iter == 0:
for key, value in loss.items():
loss_dict[key] = value.item()
else:
for key, value in loss.items():
loss_dict[key] += value.item()
pred, true = torch.cat(y_pred), torch.cat(y_true)
results = metrics(pred, true)
loss_dict = {key: value / (cur_iter+1) for key, value in loss_dict.items()}
print(f'Train Loss Epoch {epoch}: {loss_dict}')
print(f'Train Results Epoch {epoch}: {results}')
def evaluate(model, eval_loader, loss_fn, epoch, metrics):
loss_dict = {}
y_pred, y_true = [], []
model.eval()
for cur_iter, data in enumerate(eval_loader):
complete_input = (None, None, None)
incomplete_input = (data['vision_m'].to(device), data['audio_m'].to(device), data['text_m'].to(device))
sentiment_labels = data['labels']['M'].to(device)
completeness_labels = 1. - data['labels']['missing_rate_l'].to(device)
effectiveness_labels = torch.cat([torch.ones(len(sentiment_labels)*8), torch.zeros(len(sentiment_labels)*8)]).long().to(device)
label = {'sentiment_labels': sentiment_labels, 'completeness_labels': completeness_labels, 'effectiveness_labels': effectiveness_labels}
with torch.no_grad():
out = model(complete_input, incomplete_input)
loss = loss_fn(out, label)
y_pred.append(out['sentiment_preds'].cpu())
y_true.append(label['sentiment_labels'].cpu())
if cur_iter == 0:
for key, value in loss.items():
try:
loss_dict[key] = value.item()
except:
loss_dict[key] = value
else:
for key, value in loss.items():
try:
loss_dict[key] += value.item()
except:
loss_dict[key] += value
pred, true = torch.cat(y_pred), torch.cat(y_true)
results = metrics(pred, true)
# print(f'Test Loss Epoch {epoch}: {loss_dict}')
# print(f'Test Results Epoch {epoch}: {results}')
return results
if __name__ == '__main__':
main()