forked from wkentaro/pytorch-fcn
-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathlearning_curve.py
executable file
·129 lines (106 loc) · 4.05 KB
/
learning_curve.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
#!/usr/bin/env python
from __future__ import division
import argparse
import os.path as osp
import matplotlib
matplotlib.use('Agg') # NOQA
import matplotlib.pyplot as plt
import pandas
import seaborn
def learning_curve(log_file):
print('==> Plotting log file: %s' % log_file)
df = pandas.read_csv(log_file)
colors = ['red', 'green', 'blue', 'purple', 'orange']
colors = seaborn.xkcd_palette(colors)
plt.figure(figsize=(20, 6), dpi=300)
row_min = df.min()
row_max = df.max()
# initialize DataFrame for train
columns = [
'epoch',
'iteration',
'train/loss',
'train/acc',
'train/acc_cls',
'train/mean_iu',
'train/fwavacc',
]
df_train = df[columns]
if hasattr(df_train, 'rolling'):
df_train = df_train.rolling(window=10).mean()
else:
df_train = pandas.rolling_mean(df_train, window=10)
df_train = df_train.dropna()
iter_per_epoch = df_train[df_train['epoch'] == 1]['iteration'].values[0]
df_train['epoch_detail'] = df_train['iteration'] / iter_per_epoch
# initialize DataFrame for val
columns = [
'epoch',
'iteration',
'valid/loss',
'valid/acc',
'valid/acc_cls',
'valid/mean_iu',
'valid/fwavacc',
]
df_valid = df[columns]
df_valid = df_valid.dropna()
df_valid['epoch_detail'] = df_valid['iteration'] / iter_per_epoch
data_frames = {'train': df_train, 'valid': df_valid}
n_row = 2
n_col = 3
for i, split in enumerate(['train', 'valid']):
df_split = data_frames[split]
# loss
plt.subplot(n_row, n_col, i * n_col + 1)
plt.ticklabel_format(style='sci', axis='y', scilimits=(0, 0))
plt.plot(df_split['epoch_detail'], df_split['%s/loss' % split], '-',
markersize=1, color=colors[0], alpha=.5,
label='%s loss' % split)
plt.xlim((0, row_max['epoch']))
plt.ylim((min(row_min['train/loss'], row_min['valid/loss']),
max(row_max['train/loss'], row_max['valid/loss'])))
plt.xlabel('epoch')
plt.ylabel('%s loss' % split)
# loss (log)
plt.subplot(n_row, n_col, i * n_col + 2)
plt.ticklabel_format(style='sci', axis='y', scilimits=(0, 0))
plt.semilogy(df_split['epoch_detail'], df_split['%s/loss' % split],
'-', markersize=1, color=colors[0], alpha=.5,
label='%s loss' % split)
plt.xlim((0, row_max['epoch']))
plt.ylim((min(row_min['train/loss'], row_min['valid/loss']),
max(row_max['train/loss'], row_max['valid/loss'])))
plt.xlabel('epoch')
plt.ylabel('%s loss (log)' % split)
# lbl accuracy
plt.subplot(n_row, n_col, i * n_col + 3)
plt.ticklabel_format(style='sci', axis='y', scilimits=(0, 0))
plt.plot(df_split['epoch_detail'], df_split['%s/acc' % split],
'-', markersize=1, color=colors[1], alpha=.5,
label='%s accuracy' % split)
plt.plot(df_split['epoch_detail'], df_split['%s/acc_cls' % split],
'-', markersize=1, color=colors[2], alpha=.5,
label='%s accuracy class' % split)
plt.plot(df_split['epoch_detail'], df_split['%s/mean_iu' % split],
'-', markersize=1, color=colors[3], alpha=.5,
label='%s mean IU' % split)
plt.plot(df_split['epoch_detail'], df_split['%s/fwavacc' % split],
'-', markersize=1, color=colors[4], alpha=.5,
label='%s fwav accuracy' % split)
plt.legend()
plt.xlim((0, row_max['epoch']))
plt.ylim((0, 1))
plt.xlabel('epoch')
plt.ylabel('%s label accuracy' % split)
out_file = osp.splitext(log_file)[0] + '.png'
plt.savefig(out_file)
print('==> Wrote figure to: %s' % out_file)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('log_file')
args = parser.parse_args()
log_file = args.log_file
learning_curve(log_file)
if __name__ == '__main__':
main()