6
6
from chainer import training
7
7
from chainer .training import extensions
8
8
import numpy as np
9
- import argparse
10
9
11
- train , test = chainer .datasets .get_cifar100 ()
10
+ train , test = chainer .datasets .get_cifar10 ()
12
11
13
12
class MLP (chainer .Chain ):
14
13
@@ -25,11 +24,6 @@ def __call__(self, x):
25
24
h2 = F .relu (self .l2 (h1 ))
26
25
return self .l3 (h2 )
27
26
28
- parser = argparse .ArgumentParser (description = 'Chainer example: MNIST' )
29
- parser .add_argument ('--gpu' , '-g' , type = int , default = - 1 ,
30
- help = 'GPU ID (negative value indicates CPU)' )
31
- args = parser .parse_args ()
32
-
33
27
34
28
batchsize = 100
35
29
train_iter = chainer .iterators .SerialIterator (train , batchsize )
@@ -38,8 +32,9 @@ def __call__(self, x):
38
32
39
33
model = L .Classifier (MLP (784 , 10 ))
40
34
41
- if args .gpu >= 0 :
42
- chainer .cuda .get_device (args .gpu ).use () # Make a specified GPU current
35
+ gpu = - 1
36
+ if gpu >= 0 :
37
+ chainer .cuda .get_device (gpu ).use () # Make a specified GPU current
43
38
model .to_gpu () # Copy the model to the GPU
44
39
45
40
opt = chainer .optimizers .Adam ()
@@ -49,11 +44,11 @@ def __call__(self, x):
49
44
epoch = 10
50
45
51
46
# Set up a trainer
52
- updater = training .StandardUpdater (train_iter , opt , device = args . gpu )
53
- trainer = training .Trainer (updater , (epoch , 'epoch' ), out = 'result' )
47
+ updater = training .StandardUpdater (train_iter , opt , device = gpu )
48
+ trainer = training .Trainer (updater , (epoch , 'epoch' ), out = '/tmp/ result' )
54
49
55
50
# Evaluate the model with the test dataset for each epoch
56
- trainer .extend (extensions .Evaluator (test_iter , model , device = args . gpu ))
51
+ trainer .extend (extensions .Evaluator (test_iter , model , device = gpu ))
57
52
58
53
# Dump a computational graph from 'loss' variable at the first iteration
59
54
# The "main" refers to the target link of the "main" optimizer.
0 commit comments