-
Notifications
You must be signed in to change notification settings - Fork 13
/
Copy pathtrain.cpp
152 lines (118 loc) · 4.32 KB
/
train.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
#include "src/mnist.h"
#include "src/network.h"
#include "src/layer.h"
#include <iomanip>
#include <nvtx3/nvToolsExt.h>
using namespace cudl;
int main(int argc, char* argv[])
{
/* configure the network */
int batch_size_train = 256;
int num_steps_train = 2400;
int monitoring_step = 200;
double initial_learning_rate = 0.02f;
double learning_rate = 0.0;
double lr_decay = 0.0005f;
bool load_pretrain = false;
bool file_save = false;
int batch_size_test = 10;
int num_steps_test = 1000;
/* Welcome Message */
std::cout << "== MNIST training with CUDNN ==" << std::endl;
// phase 1. training
std::cout << "[TRAIN]" << std::endl;
// step 1. loading dataset
MNIST train_data_loader = MNIST("./dataset");
train_data_loader.train(batch_size_train, true);
// step 2. model initialization
Network model;
model.add_layer(new Conv2D("conv1", 20, 5));
model.add_layer(new Pooling("pool", 2, 0, 2, CUDNN_POOLING_MAX));
model.add_layer(new Conv2D("conv2", 50, 5));
model.add_layer(new Pooling("pool", 2, 0, 2, CUDNN_POOLING_MAX));
model.add_layer(new Dense("dense1", 500));
model.add_layer(new Activation("relu", CUDNN_ACTIVATION_RELU));
model.add_layer(new Dense("dense2", 10));
model.add_layer(new Softmax("softmax"));
model.cuda();
if (load_pretrain)
model.load_pretrain();
model.train();
// step 3. train
int step = 0;
Blob<float> *train_data = train_data_loader.get_data();
Blob<float> *train_target = train_data_loader.get_target();
train_data_loader.get_batch();
int tp_count = 0;
while (step < num_steps_train)
{
// nvtx profiling start
std::string nvtx_message = std::string("step" + std::to_string(step));
nvtxRangePushA(nvtx_message.c_str());
// update shared buffer contents
train_data->to(cuda);
train_target->to(cuda);
// forward
model.forward(train_data);
tp_count += model.get_accuracy(train_target);
// back-propagation
model.backward(train_target);
// update parameter
// we will use learning rate decay to the learning rate
learning_rate = initial_learning_rate / (1.f + lr_decay * step);
model.update(learning_rate);
// fetch next data
step = train_data_loader.next();
// nvtx profiling end
nvtxRangePop();
// calculation softmax loss
if (step % monitoring_step == 0)
{
float loss = model.loss(train_target);
float accuracy = 100.f * tp_count / monitoring_step / batch_size_train;
std::cout << "step: " << std::right << std::setw(4) << step << \
", loss: " << std::left << std::setw(5) << std::fixed << std::setprecision(3) << loss << \
", accuracy: " << accuracy << "%" << std::endl;
tp_count = 0;
}
}
// trained parameter save
if (file_save)
model.write_file();
// phase 2. inferencing
// step 1. load test set
std::cout << "[INFERENCE]" << std::endl;
MNIST test_data_loader = MNIST("./dataset");
test_data_loader.test(batch_size_test);
// step 2. model initialization
model.test();
// step 3. iterates the testing loop
Blob<float> *test_data = test_data_loader.get_data();
Blob<float> *test_target = test_data_loader.get_target();
test_data_loader.get_batch();
tp_count = 0;
step = 0;
while (step < num_steps_test)
{
// nvtx profiling start
std::string nvtx_message = std::string("step" + std::to_string(step));
nvtxRangePushA(nvtx_message.c_str());
// update shared buffer contents
test_data->to(cuda);
test_target->to(cuda);
// forward
model.forward(test_data);
tp_count += model.get_accuracy(test_target);
// fetch next data
step = test_data_loader.next();
// nvtx profiling stop
nvtxRangePop();
}
// step 4. calculate loss and accuracy
float loss = model.loss(test_target);
float accuracy = 100.f * tp_count / num_steps_test / batch_size_test;
std::cout << "loss: " << std::setw(4) << loss << ", accuracy: " << accuracy << "%" << std::endl;
// Good bye
std::cout << "Done." << std::endl;
return 0;
}