-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathmain.py
102 lines (88 loc) · 3.74 KB
/
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
from model import *
from torch.utils.data import DataLoader
import config
from utils import *
import torch.optim as optim
from loader import *
from tensorboardX import SummaryWriter
from datetime import datetime
import argparse
def test(data, label, model):
label = get_label(label)
p_label = eval_label(model,data)
fn = compute_fn(p_label, label)
fp = compute_fp(p_label, label)
acc = accuracy(p_label, label)
return fn, fp, acc
def train_with_random_data(logdir):
now = datetime.now()
model = nielsen()
optimizer = optim.Adam(model.parameters(), lr= 0.0005)
data, t_label = random_data_generator(config.data_size)
summary = SummaryWriter(logdir=logdir+"train_{}-{}-{}-{}".format(now.month, now.day, now.hour, now.minute))
for epoch in range(config.epoch):
fn = 0
fp = 0
acc = 0
loss = 0
for i in range(config.data_size // config.batch_size):
batch_data = data[i*config.batch_size:(i+1)*config.batch_size]
batch_label = t_label[i*config.batch_size:(i+1)*config.batch_size]
b_loss = train(batch_data, batch_label, model, optimizer)
b_fn, b_fp, b_acc = test(batch_data,batch_label,model)
fn = fn + (b_fn - fn) / (i + 1)
fp = fp + (b_fp - fp) / (i + 1)
acc = acc + (b_acc - acc) / (i + 1)
loss = loss + (b_loss - loss) / (i + 1)
print("Epoch : %d Loss : %.2f | Accuracy : %.2f | FN : %.2f | FP : %.2f" %(epoch, loss, acc, fn,fp))
summary.add_scalar(logdir, loss, epoch)
summary.add_scalar(logdir, acc, epoch)
summary.add_scalar(logdir, fn, epoch)
summary.add_scalar(logdir, fp, epoch)
if epoch % 5 == 0:
torch.save(model.state_dict(),logdir+'/weights.pth')
def train_with_dataset(dataset_dir, label_dir, logdir):
now = datetime.now()
dataset = Firedataset(dataset_dir,label_dir)
model = nielsen()
optimizer = optim.Adam(model.parameters(), lr= 0.005)
dataloader = DataLoader(dataset, batch_size=config.batch_size,
shuffle=True)
summary = SummaryWriter(logdir=logdir+"train_{}-{}-{}-{}".format(now.month, now.day, now.hour, now.minute))
for idx, batch in enumerate(dataloader):
fn = 0
fp = 0
acc = 0
loss = 0
i = 0
batch_data = batch['video']
batch_label = batch['label']
b_loss = train(batch_data, batch_label, model, optimizer)
i+=1
b_fn, b_fp, b_acc = test(batch_data, batch_label, model)
fn = fn + (b_fn - fn) / (i + 1)
fp = fp + (b_fp - fp) / (i + 1)
acc = acc + (b_acc - acc) / (i + 1)
loss = loss + (b_loss - loss) / (i + 1)
print("Epoch : %d Loss : %.2f | Accuracy : %.2f | FN : %.2f | FP : %.2f" % (idx, loss, acc, fn, fp))
summary.add_scalar(logdir, loss, idx)
summary.add_scalar(logdir, acc, idx)
summary.add_scalar(logdir, fn, idx)
summary.add_scalar(logdir, fp, idx)
if idx % 5 == 0:
torch.save(model.state_dict(),logdir+'/weights.pth')
if __name__=="__main__":
parser = argparse.ArgumentParser(description='Predictive Models of Fire via Deep learning Exploiting Colorific Variation')
parser.add_argument('--data', default=None, help='Directory of FireDataset')
parser.add_argument('--label', default=None, help='Directory of Labels')
parser.add_argument('--log', default='logs/', help='Path for logs')
args = parser.parse_args()
dataset_dir = args.data
label_dir = args.label
logdir = args.log
if dataset_dir == None:
print("Train with Random Data")
train_with_random_data(logdir)
else:
print("Train with Dataset")
train_with_dataset(dataset_dir,label_dir,logdir)