-
Notifications
You must be signed in to change notification settings - Fork 11
/
Copy pathdnn_introduction_ex.cpp
307 lines (267 loc) · 13.4 KB
/
dnn_introduction_ex.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
// The contents of this file are in the public domain. See LICENSE_FOR_EXAMPLE_PROGRAMS.txt
/*
This is an example illustrating the use of the deep learning tools from the
dlib C++ Library. In it, we will train the venerable LeNet convolutional
neural network to recognize hand written digits. The network will take as
input a small image and classify it as one of the 10 numeric digits between
0 and 9.
The specific network we will run is from the paper
LeCun, Yann, et al. "Gradient-based learning applied to document recognition."
Proceedings of the IEEE 86.11 (1998): 2278-2324.
except that we replace the sigmoid non-linearities with rectified linear units.
These tools will use CUDA and cuDNN to drastically accelerate network
training and testing. CMake should automatically find them if they are
installed and configure things appropriately. If not, the program will
still run but will be much slower to execute.
*/
#include <dlib/dnn.h>
#include <iostream>
#include <sstream>
#include <fstream>
#include <dlib/data_io.h>
using namespace std;
using namespace dlib;
#if 0
template <
int N,
template <typename> class BN,
int stride,
typename SUBNET
>
using block = BN<con<N,3,3,1,1,relu<BN<con<N,3,3,stride,stride,SUBNET>>>>>;
template <
template <int,template<typename>class,int,typename> class block,
int N,
template<typename>class BN,
typename SUBNET
>
using residual = add_prev1<block<N,BN,1,tag1<SUBNET>>>;
template <
template <int,template<typename>class,int,typename> class block,
int N,
template<typename>class BN,
typename SUBNET
>
using residual_down = add_prev2<avg_pool<2,2,2,2,skip1<tag2<block<N,BN,2,tag1<SUBNET>>>>>>;
template <typename SUBNET> using res = relu<residual<block,8,bn_con,SUBNET>>;
template <typename SUBNET> using ares = relu<residual<block,8,affine,SUBNET>>;
template <typename SUBNET> using res_down = relu<residual_down<block,8,bn_con,SUBNET>>;
template <typename SUBNET> using ares_down = relu<residual_down<block,8,affine,SUBNET>>;
const unsigned long number_of_classes = 26;
using net_type = loss_multiclass_log<fc<number_of_classes,
avg_pool_everything<
res<res<res<res_down<
repeat<9,res, // repeat this layer 9 times
res_down<
res<
input<matrix<unsigned char>>
>>>>>>>>>>;
#endif
#if 1
// Inception layer has some different convolutions inside. Here we define
// blocks as convolutions with different kernel size that we will use in
// inception layer block.
template <typename SUBNET> using block_a1 = relu<con<10,1,1,1,1,SUBNET>>;
template <typename SUBNET> using block_a2 = relu<con<10,3,3,1,1,relu<con<16,1,1,1,1,SUBNET>>>>;
template <typename SUBNET> using block_a3 = relu<con<10,5,5,1,1,relu<con<16,1,1,1,1,SUBNET>>>>;
template <typename SUBNET> using block_a4 = relu<con<10,1,1,1,1,max_pool<3,3,1,1,SUBNET>>>;
// Here is inception layer definition. It uses different blocks to process input
// and returns combined output. Dlib includes a number of these inceptionN
// layer types which are themselves created using concat layers.
template <typename SUBNET> using incept_a = inception4<block_a1,block_a2,block_a3,block_a4, SUBNET>;
// Network can have inception layers of different structure. It will work
// properly so long as all the sub-blocks inside a particular inception block
// output tensors with the same number of rows and columns.
template <typename SUBNET> using block_b1 = relu<con<4,1,1,1,1,SUBNET>>;
template <typename SUBNET> using block_b2 = relu<con<4,3,3,1,1,SUBNET>>;
template <typename SUBNET> using block_b3 = relu<con<4,1,1,1,1,max_pool<3,3,1,1,SUBNET>>>;
template <typename SUBNET> using incept_b = inception3<block_b1,block_b2,block_b3,SUBNET>;
// Now we can define a simple network for classifying MNIST digits. We will
// train and test this network in the code below.
using net_type = loss_multiclass_log<
fc<26,
relu<fc<32,
max_pool<2,2,2,2,incept_b<
max_pool<2,2,2,2,incept_a<
input<matrix<unsigned char>>
>>>>>>>>;
#endif
int main(int argc, char** argv) try
{
// This example is going to run on the MNIST dataset.
if (argc != 2)
{
cout << "This example needs the MNIST dataset to run!" << endl;
cout << "You can get MNIST from http://yann.lecun.com/exdb/mnist/" << endl;
cout << "Download the 4 files that comprise the dataset, decompress them, and" << endl;
cout << "put them in a folder. Then give that folder as input to this program." << endl;
return 1;
}
// MNIST is broken into two parts, a training set of 60000 images and a test set of
// 10000 images. Each image is labeled so that we know what hand written digit is
// depicted. These next statements load the dataset into memory.
std::vector<matrix<unsigned char>> training_images;
std::vector<unsigned long> training_labels;
std::vector<matrix<unsigned char>> testing_images;
std::vector<unsigned long> testing_labels;
std::ifstream data(argv[1]);
std::string line;
while(std::getline(data,line)) {
std::stringstream lineStream(line);
std::string cell;
if (!std::getline(lineStream, cell, ',')) {
cerr << "failed to get first cell" << endl;
return 1;
}
int letter = stoi(cell);
if (letter > 25) {
cerr << "invalid letter " << letter << endl;
return 1;
}
matrix<unsigned char, 28, 28> picture;
int col = 0;
int row = 0;
while(std::getline(lineStream, cell, ',')) {
if (row > 27) {
cerr << "invalid row " << training_images.size() << endl;
return 1;
}
picture(col, row) = stoi(cell);
col++;
if (col > 27) {
col = 0;
row++;
}
}
training_images.push_back(picture);
training_labels.push_back(letter);
}
std::cout << training_images.size() << std::endl;
randomize_samples(training_images, training_labels);
for (size_t i=0; i<training_images.size() / 3; i++) {
testing_images.push_back(training_images[training_images.size() - i - 1]);
testing_labels.push_back(training_labels[training_images.size() - i - 1]);
}
training_images.resize(training_images.size() - testing_images.size());
training_labels.resize(training_labels.size() - testing_labels.size());
//load_mnist_dataset(argv[1], training_images, training_labels, testing_images, testing_labels);
// Now let's define the LeNet. Broadly speaking, there are 3 parts to a network
// definition. The loss layer, a bunch of computational layers, and then an input
// layer. You can see these components in the network definition below.
//
// The input layer here says the network expects to be given matrix<unsigned char>
// objects as input. In general, you can use any dlib image or matrix type here, or
// even define your own types by creating custom input layers.
//
// Then the middle layers define the computation the network will do to transform the
// input into whatever we want. Here we run the image through multiple convolutions,
// ReLU units, max pooling operations, and then finally a fully connected layer that
// converts the whole thing into just 10 numbers.
//
// Finally, the loss layer defines the relationship between the network outputs, our 10
// numbers, and the labels in our dataset. Since we selected loss_multiclass_log it
// means we want to do multiclass classification with our network. Moreover, the
// number of network outputs (i.e. 10) is the number of possible labels. Whichever
// network output is largest is the predicted label. So for example, if the first
// network output is largest then the predicted digit is 0, if the last network output
// is largest then the predicted digit is 9.
#if 0
using net_type = loss_multiclass_log<
fc<26,
relu<fc<218,
relu<fc<1008,
//relu<fc<500,
//relu<fc<100,
//relu<fc<50,
//relu<fc<784,
max_pool<2,2,2,2,relu<con<16,5,5,1,1,
max_pool<2,2,2,2,relu<con<6,5,5,1,1,
input<matrix<unsigned char>>
>>>>>>>>>>
//>>
//>>
//>>
//>>
>>;
#endif
// This net_type defines the entire network architecture. For example, the block
// relu<fc<84,SUBNET>> means we take the output from the subnetwork, pass it through a
// fully connected layer with 84 outputs, then apply ReLU. Similarly, a block of
// max_pool<2,2,2,2,relu<con<16,5,5,1,1,SUBNET>>> means we apply 16 convolutions with a
// 5x5 filter size and 1x1 stride to the output of a subnetwork, then apply ReLU, then
// perform max pooling with a 2x2 window and 2x2 stride.
// So with that out of the way, we can make a network instance.
net_type net;
// And then train it using the MNIST data. The code below uses mini-batch stochastic
// gradient descent with an initial learning rate of 0.01 to accomplish this.
dnn_trainer<net_type> trainer(net);
trainer.set_iterations_without_progress_threshold(3000);
trainer.set_learning_rate(0.01);
trainer.set_min_learning_rate(0.00001);
trainer.set_mini_batch_size(512);
trainer.be_verbose();
// Since DNN training can take a long time, we can ask the trainer to save its state to
// a file named "mnist_sync" every 20 seconds. This way, if we kill this program and
// start it again it will begin where it left off rather than restarting the training
// from scratch. This is because, when the program restarts, this call to
// set_synchronization_file() will automatically reload the settings from mnist_sync if
// the file exists.
trainer.set_synchronization_file("mnist_sync", std::chrono::seconds(20));
// Finally, this line begins training. By default, it runs SGD with our specified
// learning rate until the loss stops decreasing. Then it reduces the learning rate by
// a factor of 10 and continues running until the loss stops decreasing again. It will
// keep doing this until the learning rate has dropped below the min learning rate
// defined above or the maximum number of epochs as been executed (defaulted to 10000).
trainer.train(training_images, training_labels);
// At this point our net object should have learned how to classify MNIST images. But
// before we try it out let's save it to disk. Note that, since the trainer has been
// running images through the network, net will have a bunch of state in it related to
// the last batch of images it processed (e.g. outputs from each layer). Since we
// don't care about saving that kind of stuff to disk we can tell the network to forget
// about that kind of transient data so that our file will be smaller. We do this by
// "cleaning" the network before saving it.
net.clean();
serialize("mnist_network.dat") << net;
// Now if we later wanted to recall the network from disk we can simply say:
// deserialize("mnist_network.dat") >> net;
// Now let's run the training images through the network. This statement runs all the
// images through it and asks the loss layer to convert the network's raw output into
// labels. In our case, these labels are the numbers between 0 and 9.
std::vector<unsigned long> predicted_labels = net(training_images);
int num_right = 0;
int num_wrong = 0;
// And then let's see if it classified them correctly.
for (size_t i = 0; i < training_images.size(); ++i)
{
if (predicted_labels[i] == training_labels[i])
++num_right;
else
++num_wrong;
}
cout << "training num_right: " << num_right << endl;
cout << "training num_wrong: " << num_wrong << endl;
cout << "training accuracy: " << num_right/(double)(num_right+num_wrong) << endl;
// Let's also see if the network can correctly classify the testing images. Since
// MNIST is an easy dataset, we should see at least 99% accuracy.
predicted_labels = net(testing_images);
num_right = 0;
num_wrong = 0;
for (size_t i = 0; i < testing_images.size(); ++i)
{
if (predicted_labels[i] == testing_labels[i])
++num_right;
else
++num_wrong;
}
cout << "testing num_right: " << num_right << endl;
cout << "testing num_wrong: " << num_wrong << endl;
cout << "testing accuracy: " << num_right/(double)(num_right+num_wrong) << endl;
// Finally, you can also save network parameters to XML files if you want to do
// something with the network in another tool. For example, you could use dlib's
// tools/convert_dlib_nets_to_caffe to convert the network to a caffe model.
net_to_xml(net, "lenet.xml");
}
catch(std::exception& e)
{
cout << e.what() << endl;
}