Skip to content

Commit 61beacd

Browse files
committedNov 17, 2014
global variable fixes (found via th -g)
1 parent 49bb858 commit 61beacd

File tree

2 files changed

+140
-140
lines changed

2 files changed

+140
-140
lines changed
 

‎FlattenTable.lua

+6-6
Original file line numberDiff line numberDiff line change
@@ -39,10 +39,10 @@ local function checkMapping(output, input, input_map)
3939
end
4040
-- forward DFS order
4141
for i = 1, #input do
42-
ok = checkMapping(output, input[i], input_map[i])
43-
if not ok then
44-
return false
45-
end
42+
local ok = checkMapping(output, input[i], input_map[i])
43+
if not ok then
44+
return false
45+
end
4646
end
4747
return true
4848
else
@@ -77,7 +77,7 @@ function FlattenTable:updateOutput(input)
7777
self.input_map = flatten(self.output, input)
7878
end
7979
return self.output
80-
end
80+
end
8181

8282
function FlattenTable:updateGradInput(input, gradOutput)
8383
assert(type(input) == 'table', 'input must be a table')
@@ -90,7 +90,7 @@ function FlattenTable:updateGradInput(input, gradOutput)
9090
if not checkMapping(gradOutput, self.gradInput, self.input_map) then
9191
self.gradInput = inverseFlatten(gradOutput, self.input_map)
9292
end
93-
93+
9494
return self.gradInput
9595
end
9696

‎test/test.lua

+134-134
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
-- you can easily test specific units like this:
1+
-- you can easily test specific units like this:
22
-- th -lnn -e "nn.test{'LookupTable'}"
33
-- th -lnn -e "nn.test{'LookupTable', 'Add'}"
44

@@ -66,7 +66,7 @@ function nntest.Add()
6666
local ferr,berr = jac.testIO(module,input)
6767
mytester:asserteq(ferr, 0, torch.typename(module) .. ' - i/o forward err ')
6868
mytester:asserteq(berr, 0, torch.typename(module) .. ' - i/o backward err ')
69-
end
69+
end
7070
end
7171

7272
function nntest.CMul()
@@ -160,12 +160,12 @@ function nntest.HardTanh()
160160
local inj = math.random(3,5)
161161
local ink = math.random(3,5)
162162
local input = torch.Tensor(ink, inj, ini):zero()
163-
163+
164164
local module = nn.HardTanh()
165-
165+
166166
local err = jac.testJacobian(module, input)
167167
mytester:assertlt(err, precision , 'error on state ')
168-
168+
169169
local ferr, berr = jac.testIO(module, input)
170170
mytester:asserteq(ferr, 0, torch.typename(module) .. ' - i/o forward err ')
171171
mytester:asserteq(berr, 0, torch.typename(module) .. ' - i/o backward err ')
@@ -176,12 +176,12 @@ function nntest.Abs()
176176
local inj = math.random(3,5)
177177
local ink = math.random(3,5)
178178
local input = torch.Tensor(ink, inj, ini):zero()
179-
179+
180180
local module = nn.Abs()
181-
181+
182182
local err = jac.testJacobian(module, input)
183183
mytester:assertlt(err, precision , 'error on state ')
184-
184+
185185
local ferr, berr = jac.testIO(module, input)
186186
mytester:asserteq(ferr, 0, torch.typename(module) .. ' - i/o forward err ')
187187
mytester:asserteq(berr, 0, torch.typename(module) .. ' - i/o backward err ')
@@ -376,25 +376,25 @@ function nntest.SparseLinear()
376376
local ini = math.random(50,100)
377377
local inj = math.random(5,10)
378378
local numNonzero = math.random(3,5)
379-
379+
380380
local module = nn.SparseLinear(ini,inj)
381381

382382
-- Create a random sparse vector
383-
N = {}
383+
local N = {}
384384
for i = 1, ini do N[i] = i end
385-
for i = 1, numNonzero do
385+
for i = 1, numNonzero do
386386
local j = math.random(i,ini)
387387
N[i], N[j] = N[j], N[i]
388-
end
388+
end
389389
local input = torch.Tensor(numNonzero, 2):zero()
390390
for i = 1, numNonzero do input[{i,1}] = N[i] end
391391
local values = input:select(2,2)
392392
values:copy(torch.rand(values:nElement())):mul(2):add(-1)
393-
393+
394394
-- Check output
395395
local actual = module:forward(input)
396396
local expected = torch.Tensor(inj)
397-
for j = 1, inj do
397+
for j = 1, inj do
398398
expected[j] = 0
399399
for i = 1,numNonzero do
400400
expected[j] = expected[j] + values[i] * module.weight[{j, N[i]}]
@@ -412,13 +412,13 @@ function nntest.SparseLinear()
412412

413413
local err = sjac.testJacobianParameters(module, input, module.bias, module.gradBias)
414414
mytester:assertlt(err,precision, 'error on bias ')
415-
415+
416416
local err = sjac.testJacobianUpdateParameters(module, input, module.weight)
417417
mytester:assertlt(err,precision, 'error on weight [direct update] ')
418418

419419
local err = sjac.testJacobianUpdateParameters(module, input, module.bias)
420420
mytester:assertlt(err,precision, 'error on bias [direct update] ')
421-
421+
422422
for t,err in pairs(sjac.testAllUpdate(module, input, 'weight', 'gradWeight')) do
423423
mytester:assertlt(err, precision, string.format(
424424
'error on weight [%s]', t))
@@ -483,7 +483,7 @@ local function criterionJacobianTest1D(cri, input, target)
483483
local fx1 = cri:forward(input, target)
484484
-- f(xi - h)
485485
input[i] = input[i] - 2*eps
486-
local fx2 = cri:forward(input, target)
486+
local fx2 = cri:forward(input, target)
487487
-- f'(xi) = (f(xi + h) - f(xi - h)) / 2h
488488
local cdfx = (fx1 - fx2) / (2*eps)
489489
-- store f' in appropriate place
@@ -501,14 +501,14 @@ function nntest.MSECriterion()
501501
local input = torch.rand(10)
502502
local target = input:clone():add(torch.rand(10))
503503
local cri = nn.MSECriterion()
504-
criterionJacobianTest1D(cri, input, target)
504+
criterionJacobianTest1D(cri, input, target)
505505
end
506506

507507
function nntest.MarginCriterion()
508508
local input = torch.rand(100)
509509
local target = input:clone():add(torch.rand(100))
510510
local cri = nn.MarginCriterion()
511-
criterionJacobianTest1D(cri, input, target)
511+
criterionJacobianTest1D(cri, input, target)
512512
end
513513

514514
function nntest.WeightedMSECriterion()
@@ -536,9 +536,9 @@ function nntest.DistKLDivCriterion()
536536
end
537537

538538
function nntest.ClassNLLCriterion()
539-
local numLabels = math.random(5,10)
539+
local numLabels = math.random(5,10)
540540
local input = torch.rand(numLabels)
541-
local target = math.random(1,numLabels)
541+
local target = math.random(1,numLabels)
542542

543543
-- default ClassNLLCriterion
544544
local cri = nn.ClassNLLCriterion()
@@ -814,19 +814,19 @@ function nntest.SpatialConvolution()
814814
local input = torch.Tensor(from, inj, ini):zero()
815815

816816
-- stochastic
817-
817+
818818
local err = jac.testJacobian(module, input)
819819
mytester:assertlt(err, precision, 'error on state ')
820-
820+
821821
local err = jac.testJacobianParameters(module, input, module.weight, module.gradWeight)
822822
mytester:assertlt(err , precision, 'error on weight ')
823-
823+
824824
local err = jac.testJacobianParameters(module, input, module.bias, module.gradBias)
825825
mytester:assertlt(err , precision, 'error on bias ')
826826

827827
local err = jac.testJacobianUpdateParameters(module, input, module.weight)
828828
mytester:assertlt(err , precision, 'error on weight [direct update] ')
829-
829+
830830
local err = jac.testJacobianUpdateParameters(module, input, module.bias)
831831
mytester:assertlt(err , precision, 'error on bias [direct update] ')
832832

@@ -841,7 +841,7 @@ function nntest.SpatialConvolution()
841841
end
842842

843843
-- batch
844-
844+
845845
--verbose = true
846846
local batch = math.random(2,5)
847847
outi = math.random(4,8)
@@ -857,16 +857,16 @@ function nntest.SpatialConvolution()
857857

858858
local err = jac.testJacobian(module, input)
859859
mytester:assertlt(err, precision, 'batch error on state ')
860-
860+
861861
local err = jac.testJacobianParameters(module, input, module.weight, module.gradWeight)
862862
mytester:assertlt(err , precision, 'batch error on weight ')
863-
863+
864864
local err = jac.testJacobianParameters(module, input, module.bias, module.gradBias)
865865
mytester:assertlt(err , precision, 'batch error on bias ')
866866

867867
local err = jac.testJacobianUpdateParameters(module, input, module.weight)
868868
mytester:assertlt(err , precision, 'batch error on weight [direct update] ')
869-
869+
870870
local err = jac.testJacobianUpdateParameters(module, input, module.bias)
871871
mytester:assertlt(err , precision, 'batch error on bias [direct update] ')
872872

@@ -879,7 +879,7 @@ function nntest.SpatialConvolution()
879879
mytester:assertlt(err, precision, string.format(
880880
'batch error on bias [%s]', t))
881881
end
882-
882+
883883
local ferr, berr = jac.testIO(module, input)
884884
mytester:asserteq(0, ferr, torch.typename(module) .. ' - i/o forward err ')
885885
mytester:asserteq(0, berr, torch.typename(module) .. ' - i/o backward err ')
@@ -898,19 +898,19 @@ function nntest.SpatialConvolutionMM()
898898
local input = torch.Tensor(from, inj, ini):zero()
899899

900900
-- stochastic
901-
901+
902902
local err = jac.testJacobian(module, input)
903903
mytester:assertlt(err, precision, 'error on state ')
904-
904+
905905
local err = jac.testJacobianParameters(module, input, module.weight, module.gradWeight)
906906
mytester:assertlt(err , precision, 'error on weight ')
907-
907+
908908
local err = jac.testJacobianParameters(module, input, module.bias, module.gradBias)
909909
mytester:assertlt(err , precision, 'error on bias ')
910910

911911
local err = jac.testJacobianUpdateParameters(module, input, module.weight)
912912
mytester:assertlt(err , precision, 'error on weight [direct update] ')
913-
913+
914914
local err = jac.testJacobianUpdateParameters(module, input, module.bias)
915915
mytester:assertlt(err , precision, 'error on bias [direct update] ')
916916

@@ -925,7 +925,7 @@ function nntest.SpatialConvolutionMM()
925925
end
926926

927927
-- batch
928-
928+
929929
--verbose = true
930930
local batch = math.random(2,5)
931931
outi = math.random(4,8)
@@ -937,16 +937,16 @@ function nntest.SpatialConvolutionMM()
937937

938938
local err = jac.testJacobian(module, input)
939939
mytester:assertlt(err, precision, 'batch error on state ')
940-
940+
941941
local err = jac.testJacobianParameters(module, input, module.weight, module.gradWeight)
942942
mytester:assertlt(err , precision, 'batch error on weight ')
943-
943+
944944
local err = jac.testJacobianParameters(module, input, module.bias, module.gradBias)
945945
mytester:assertlt(err , precision, 'batch error on bias ')
946946

947947
local err = jac.testJacobianUpdateParameters(module, input, module.weight)
948948
mytester:assertlt(err , precision, 'batch error on weight [direct update] ')
949-
949+
950950
local err = jac.testJacobianUpdateParameters(module, input, module.bias)
951951
mytester:assertlt(err , precision, 'batch error on bias [direct update] ')
952952

@@ -959,7 +959,7 @@ function nntest.SpatialConvolutionMM()
959959
mytester:assertlt(err, precision, string.format(
960960
'batch error on bias [%s]', t))
961961
end
962-
962+
963963
local ferr, berr = jac.testIO(module, input)
964964
mytester:asserteq(0, ferr, torch.typename(module) .. ' - i/o forward err ')
965965
mytester:asserteq(0, berr, torch.typename(module) .. ' - i/o backward err ')
@@ -980,13 +980,13 @@ function nntest.SpatialConvolutionMap()
980980

981981
local module = nn.SpatialConvolutionMap(nn.tables.random(from, to, fanin), ki, kj, si, sj)
982982
local input = torch.Tensor(from, inj, ini):zero()
983-
983+
984984
local err = jac.testJacobian(module, input)
985985
mytester:assertlt(err, precision, 'error on state ')
986-
986+
987987
local err = jac.testJacobianParameters(module, input, module.weight, module.gradWeight)
988988
mytester:assertlt(err , precision, 'error on weight ')
989-
989+
990990
local err = jac.testJacobianParameters(module, input, module.bias, module.gradBias)
991991
mytester:assertlt(err , precision, 'error on bias ')
992992

@@ -999,7 +999,7 @@ function nntest.SpatialConvolutionMap()
999999
mytester:assertlt(err, precision, string.format(
10001000
'error on bias [%s]', t))
10011001
end
1002-
1002+
10031003
local ferr, berr = jac.testIO(module, input)
10041004
mytester:asserteq(0, ferr, torch.typename(module) .. ' - i/o forward err ')
10051005
mytester:asserteq(0, berr, torch.typename(module) .. ' - i/o backward err ')
@@ -1055,20 +1055,20 @@ function nntest.SpatialFullConvolution()
10551055
local inj = math.random(5,8)
10561056
local module = nn.SpatialFullConvolution(from, to, ki, kj, si, sj)
10571057
local input = torch.Tensor(from, inj, ini):zero()
1058-
1058+
10591059
-- stochastic
10601060
local err = jac.testJacobian(module, input)
10611061
mytester:assertlt(err, precision, 'error on state ')
1062-
1062+
10631063
local err = jac.testJacobianParameters(module, input, module.weight, module.gradWeight)
10641064
mytester:assertlt(err , precision, 'error on weight ')
1065-
1065+
10661066
local err = jac.testJacobianParameters(module, input, module.bias, module.gradBias)
10671067
mytester:assertlt(err , precision, 'error on bias ')
10681068

10691069
local err = jac.testJacobianUpdateParameters(module, input, module.weight)
10701070
mytester:assertlt(err , precision, 'error on weight [direct update] ')
1071-
1071+
10721072
local err = jac.testJacobianUpdateParameters(module, input, module.bias)
10731073
mytester:assertlt(err , precision, 'error on bias [direct update] ')
10741074

@@ -1091,16 +1091,16 @@ function nntest.SpatialFullConvolution()
10911091

10921092
local err = jac.testJacobian(module, input)
10931093
mytester:assertlt(err, precision, 'batch error on state ')
1094-
1094+
10951095
local err = jac.testJacobianParameters(module, input, module.weight, module.gradWeight)
10961096
mytester:assertlt(err , precision, 'batch error on weight ')
1097-
1097+
10981098
local err = jac.testJacobianParameters(module, input, module.bias, module.gradBias)
10991099
mytester:assertlt(err , precision, 'batch error on bias ')
11001100

11011101
local err = jac.testJacobianUpdateParameters(module, input, module.weight)
11021102
mytester:assertlt(err , precision, 'batch error on weight [direct update] ')
1103-
1103+
11041104
local err = jac.testJacobianUpdateParameters(module, input, module.bias)
11051105
mytester:assertlt(err , precision, 'batch error on bias [direct update] ')
11061106

@@ -1113,7 +1113,7 @@ function nntest.SpatialFullConvolution()
11131113
mytester:assertlt(err, precision, string.format(
11141114
'batch error on bias [%s]', t))
11151115
end
1116-
1116+
11171117
local ferr, berr = jac.testIO(module, input)
11181118
mytester:asserteq(0, ferr, torch.typename(module) .. ' - i/o forward err ')
11191119
mytester:asserteq(0, berr, torch.typename(module) .. ' - i/o backward err ')
@@ -1132,20 +1132,20 @@ function nntest.SpatialFullConvolutionMap()
11321132
local inj = math.random(5,7)
11331133
local module = nn.SpatialFullConvolutionMap(tt, ki, kj, si, sj)
11341134
local input = torch.Tensor(from, inj, ini):zero()
1135-
1135+
11361136
-- stochastic
11371137
local err = jac.testJacobian(module, input)
11381138
mytester:assertlt(err, precision, 'error on state ')
1139-
1139+
11401140
local err = jac.testJacobianParameters(module, input, module.weight, module.gradWeight)
11411141
mytester:assertlt(err , precision, 'error on weight ')
1142-
1142+
11431143
local err = jac.testJacobianParameters(module, input, module.bias, module.gradBias)
11441144
mytester:assertlt(err , precision, 'error on bias ')
11451145

11461146
local err = jac.testJacobianUpdateParameters(module, input, module.weight)
11471147
mytester:assertlt(err , precision, 'error on weight [direct update] ')
1148-
1148+
11491149
local err = jac.testJacobianUpdateParameters(module, input, module.bias)
11501150
mytester:assertlt(err , precision, 'error on bias [direct update] ')
11511151

@@ -1158,7 +1158,7 @@ function nntest.SpatialFullConvolutionMap()
11581158
mytester:assertlt(err, precision, string.format(
11591159
'error on bias [%s]', t))
11601160
end
1161-
1161+
11621162
local ferr, berr = jac.testIO(module, input)
11631163
mytester:asserteq(0, ferr, torch.typename(module) .. ' - i/o forward err ')
11641164
mytester:asserteq(0, berr, torch.typename(module) .. ' - i/o backward err ')
@@ -1223,7 +1223,7 @@ local function batchcompare(smod, sin, plist)
12231223

12241224
smod:accGradParameters(sin, sgout, 1)
12251225
bmod:accGradParameters(bin, bgout, 1)
1226-
1226+
12271227
mytester:assertTensorEq(sout,bout:select(1,1), 1e-8, 'batchcompare error on output')
12281228
mytester:assertTensorEq(sgin,bgin:select(1,1), 1e-8, 'batchcompare error on gradInput')
12291229

@@ -1265,7 +1265,7 @@ function nntest.SpatialFullConvolutionBatchCompare()
12651265

12661266
batchcompare(module,input, {'weight','bias','gradWeight','gradBias'})
12671267
end
1268-
1268+
12691269

12701270

12711271
function nntest.SpatialSubSamplingBatchCompare()
@@ -1296,19 +1296,19 @@ function nntest.SpatialSubSampling()
12961296
local inj = (outj-1)*sj+kj
12971297
local module = nn.SpatialSubSampling(from, ki, kj, si, sj)
12981298
local input = torch.Tensor(from, inj, ini):zero()
1299-
1299+
13001300
local err = jac.testJacobian(module, input)
13011301
mytester:assertlt(err, precision, 'error on state ')
1302-
1302+
13031303
local err = jac.testJacobianParameters(module, input, module.weight, module.gradWeight)
13041304
mytester:assertlt(err , precision, 'error on weight ')
1305-
1305+
13061306
local err = jac.testJacobianParameters(module, input, module.bias, module.gradBias)
13071307
mytester:assertlt(err , precision, 'error on bias ')
13081308

13091309
local err = jac.testJacobianUpdateParameters(module, input, module.weight)
13101310
mytester:assertlt(err , precision, 'error on weight [direct update] ')
1311-
1311+
13121312
local err = jac.testJacobianUpdateParameters(module, input, module.bias)
13131313
mytester:assertlt(err , precision, 'error on bias [direct update] ')
13141314

@@ -1337,16 +1337,16 @@ function nntest.SpatialSubSampling()
13371337

13381338
local err = jac.testJacobian(module, input)
13391339
mytester:assertlt(err, precision, 'batch error on state ')
1340-
1340+
13411341
local err = jac.testJacobianParameters(module, input, module.weight, module.gradWeight)
13421342
mytester:assertlt(err , precision, 'batch error on weight ')
1343-
1343+
13441344
local err = jac.testJacobianParameters(module, input, module.bias, module.gradBias)
13451345
mytester:assertlt(err , precision, 'batch error on bias ')
13461346

13471347
local err = jac.testJacobianUpdateParameters(module, input, module.weight)
13481348
mytester:assertlt(err , precision, 'batch error on weight [direct update] ')
1349-
1349+
13501350
local err = jac.testJacobianUpdateParameters(module, input, module.bias)
13511351
mytester:assertlt(err , precision, 'batch error on bias [direct update] ')
13521352

@@ -1359,7 +1359,7 @@ function nntest.SpatialSubSampling()
13591359
mytester:assertlt(err, precision, string.format(
13601360
'batch error on bias [%s]', t))
13611361
end
1362-
1362+
13631363
local ferr, berr = jac.testIO(module, input)
13641364
mytester:asserteq(0, ferr, torch.typename(module) .. ' - i/o forward err ')
13651365
mytester:asserteq(0, berr, torch.typename(module) .. ' - i/o backward err ')
@@ -1442,12 +1442,12 @@ function nntest.Tanh()
14421442
local inj = math.random(3,5)
14431443
local ink = math.random(3,5)
14441444
local input = torch.Tensor(ink, inj, ini):zero()
1445-
1445+
14461446
local module = nn.Tanh()
1447-
1447+
14481448
local err = jac.testJacobian(module, input)
14491449
mytester:assertlt(err, precision , 'error on state ')
1450-
1450+
14511451
local ferr, berr = jac.testIO(module, input)
14521452
mytester:asserteq(ferr, 0, torch.typename(module) .. ' - i/o forward err ')
14531453
mytester:asserteq(berr, 0, torch.typename(module) .. ' - i/o backward err ')
@@ -1463,13 +1463,13 @@ function nntest.TemporalConvolution()
14631463
local ini = (outi-1)*si+ki
14641464
local module = nn.TemporalConvolution(from, to, ki,si)
14651465
local input = torch.Tensor(ini, from):zero()
1466-
1466+
14671467
local err = jac.testJacobian(module, input)
14681468
mytester:assertlt(err, precision, 'error on state ')
1469-
1469+
14701470
local err = jac.testJacobianParameters(module, input, module.weight, module.gradWeight)
14711471
mytester:assertlt(err , precision, 'error on weight ')
1472-
1472+
14731473
local err = jac.testJacobianParameters(module, input, module.bias, module.gradBias)
14741474
mytester:assertlt(err , precision, 'error on bias ')
14751475

@@ -1488,17 +1488,17 @@ function nntest.TemporalConvolution()
14881488
mytester:assertlt(err, precision, string.format(
14891489
'error on bias [%s]', t))
14901490
end
1491-
1491+
14921492
-- 2D
14931493
local nBatchFrame = 4
14941494
local input = torch.Tensor(nBatchFrame, ini, from):zero()
1495-
1495+
14961496
local err = jac.testJacobian(module, input)
14971497
mytester:assertlt(err, precision, 'error on state ')
1498-
1498+
14991499
local err = jac.testJacobianParameters(module, input, module.weight, module.gradWeight)
15001500
mytester:assertlt(err , precision, 'error on weight ')
1501-
1501+
15021502
local err = jac.testJacobianParameters(module, input, module.bias, module.gradBias)
15031503
mytester:assertlt(err , precision, 'error on bias ')
15041504

@@ -1517,21 +1517,21 @@ function nntest.TemporalConvolution()
15171517
mytester:assertlt(err, precision, string.format(
15181518
'error on bias [%s]', t))
15191519
end
1520-
1520+
15211521
local ferr, berr = jac.testIO(module, input)
15221522
mytester:asserteq(0, ferr, torch.typename(module) .. ' - i/o forward err ')
15231523
mytester:asserteq(0, berr, torch.typename(module) .. ' - i/o backward err ')
1524-
1524+
15251525
-- 2D matches 1D
15261526
local output = module:forward(input):clone()
15271527
local outputGrad = torch.randn(output:size())
15281528
local inputGrad = module:backward(input, outputGrad):clone()
1529-
1529+
15301530
local input1D = input:select(1, 2)
15311531
local output1D = module:forward(input1D)
15321532
local outputGrad1D = outputGrad:select(1, 2)
15331533
local inputGrad1D = module:backward(input1D, outputGrad1D)
1534-
1534+
15351535
mytester:assertTensorEq(output:select(1,2), output1D, 0.000001, 'error on 2D vs 1D forward)')
15361536
mytester:assertTensorEq(inputGrad:select(1,2), inputGrad1D, 0.000001, 'error on 2D vs 1D backward)')
15371537
end
@@ -1544,19 +1544,19 @@ function nntest.TemporalSubSampling()
15441544
local ini = (outi-1)*si+ki
15451545
local module = nn.TemporalSubSampling(from, ki, si)
15461546
local input = torch.Tensor(ini, from):zero()
1547-
1547+
15481548
local err = jac.testJacobian(module, input)
15491549
mytester:assertlt(err, precision, 'error on state ')
1550-
1550+
15511551
local err = jac.testJacobianParameters(module, input, module.weight, module.gradWeight)
15521552
mytester:assertlt(err , precision, 'error on weight ')
1553-
1553+
15541554
local err = jac.testJacobianParameters(module, input, module.bias, module.gradBias)
15551555
mytester:assertlt(err , precision, 'error on bias ')
1556-
1556+
15571557
local err = jac.testJacobianUpdateParameters(module, input, module.weight)
15581558
mytester:assertlt(err , precision, 'error on weight [direct update] ')
1559-
1559+
15601560
local err = jac.testJacobianUpdateParameters(module, input, module.bias)
15611561
mytester:assertlt(err , precision, 'error on bias [direct update] ')
15621562

@@ -1601,17 +1601,17 @@ function nntest.TemporalMaxPooling()
16011601
local ferr, berr = jac.testIO(module, input)
16021602
mytester:asserteq(0, ferr, torch.typename(module) .. ' - i/o forward err ')
16031603
mytester:asserteq(0, berr, torch.typename(module) .. ' - i/o backward err ')
1604-
1604+
16051605
-- 2D matches 1D
16061606
local output = module:forward(input):clone()
16071607
local outputGrad = torch.randn(output:size())
16081608
local inputGrad = module:backward(input, outputGrad):clone()
1609-
1609+
16101610
local input1D = input:select(1, 2)
16111611
local output1D = module:forward(input1D)
16121612
local outputGrad1D = outputGrad:select(1, 2)
16131613
local inputGrad1D = module:backward(input1D, outputGrad1D)
1614-
1614+
16151615
mytester:assertTensorEq(output:select(1,2), output1D, 0.000001, 'error on 2D vs 1D forward)')
16161616
mytester:assertTensorEq(inputGrad:select(1,2), inputGrad1D, 0.000001, 'error on 2D vs 1D backward)')
16171617
end
@@ -1633,19 +1633,19 @@ function nntest.VolumetricConvolution()
16331633
local inj = (outj-1)*sj+kj
16341634
local module = nn.VolumetricConvolution(from, to, kt, ki, kj, st, si, sj)
16351635
local input = torch.Tensor(from, int, inj, ini):zero()
1636-
1636+
16371637
local err = jac.testJacobian(module, input)
16381638
mytester:assertlt(err, precision, 'error on state ')
1639-
1639+
16401640
local err = jac.testJacobianParameters(module, input, module.weight, module.gradWeight)
16411641
mytester:assertlt(err , precision, 'error on weight ')
1642-
1642+
16431643
local err = jac.testJacobianParameters(module, input, module.bias, module.gradBias)
16441644
mytester:assertlt(err , precision, 'error on bias ')
16451645

16461646
local err = jac.testJacobianUpdateParameters(module, input, module.weight)
16471647
mytester:assertlt(err , precision, 'error on weight [direct update] ')
1648-
1648+
16491649
local err = jac.testJacobianUpdateParameters(module, input, module.bias)
16501650
mytester:assertlt(err , precision, 'error on bias [direct update] ')
16511651

@@ -1658,7 +1658,7 @@ function nntest.VolumetricConvolution()
16581658
mytester:assertlt(err, precision, string.format(
16591659
'error on bias [%s]', t))
16601660
end
1661-
1661+
16621662
local ferr, berr = jac.testIO(module, input)
16631663
mytester:asserteq(0, ferr, torch.typename(module) .. ' - i/o forward err ')
16641664
mytester:asserteq(0, berr, torch.typename(module) .. ' - i/o backward err ')
@@ -1681,10 +1681,10 @@ function nntest.VolumetricMaxPooling()
16811681
local inj = (outj-1)*sj+kj
16821682
local module = nn.VolumetricMaxPooling(kt, ki, kj, st, si, sj)
16831683
local input = torch.Tensor(from, int, inj, ini):zero()
1684-
1684+
16851685
local err = jac.testJacobian(module, input)
16861686
mytester:assertlt(err, precision, 'error on state ')
1687-
1687+
16881688
local ferr, berr = jac.testIO(module, input)
16891689
mytester:asserteq(0, ferr, torch.typename(module) .. ' - i/o forward err ')
16901690
mytester:asserteq(0, berr, torch.typename(module) .. ' - i/o backward err ')
@@ -1833,12 +1833,12 @@ end
18331833
function nntest.Module_getParameters_8()
18341834
local function makeMLP(nin, ns)
18351835
local net = nn.Sequential()
1836-
1837-
for k,v in ipairs(ns) do
1836+
1837+
for k,v in ipairs(ns) do
18381838
net:add(nn.Linear(nin, v))
18391839
nin = v
18401840
end
1841-
_,_ = net:getParameters()
1841+
local _,_ = net:getParameters()
18421842
return net
18431843
end
18441844

@@ -1847,9 +1847,9 @@ function nntest.Module_getParameters_8()
18471847

18481848
local net = nn.Sequential():add(mlp1:get(1))
18491849
:add(mlp2:get(1))
1850-
1850+
18511851
-- clone the second MLP to ensure that the weights before calling getParameters are preserved
1852-
mlp2 = mlp2:clone()
1852+
mlp2 = mlp2:clone()
18531853

18541854
local p, gp = net:getParameters()
18551855

@@ -1858,7 +1858,7 @@ function nntest.Module_getParameters_8()
18581858
-- check that the weights have the same values as before get Parameters was called
18591859
mytester:asserteq((net.modules[1].weight - mlp1.modules[1].weight):norm(), 0, ' error when using partial realloc')
18601860
mytester:asserteq((net.modules[2].weight - mlp2.modules[1].weight):norm(), 0, ' error when using partial realloc')
1861-
1861+
18621862
end
18631863

18641864
function nntest.PairwiseDistance()
@@ -1876,17 +1876,17 @@ function nntest.PairwiseDistance()
18761876

18771877
local err = jac.testJacobian(module,input)
18781878
mytester:assertlt(err,precision, ' error on state ')
1879-
1879+
18801880
local ferr,berr = jac.testIO(module,input)
18811881
mytester:asserteq(ferr, 0, torch.typename(module)..' - i/o forward err ')
18821882
mytester:asserteq(berr, 0, torch.typename(module)..' - i/o backward err ')
18831883

18841884
-- Also check that the forward prop result is correct.
18851885
input = torch.rand(2, ini)
1886-
err = torch.dist(input:select(1,1), input:select(1,2), p) -
1886+
err = torch.dist(input:select(1,1), input:select(1,2), p) -
18871887
module:forward(input)[1]
1888-
mytester:assertlt(err,precision, ' error on non-batch fprop ')
1889-
1888+
mytester:assertlt(err,precision, ' error on non-batch fprop ')
1889+
18901890
-- TEST CASE 2: batch input
18911891
local inj = math.random(3,5)
18921892
input = torch.Tensor(2, inj, ini):zero()
@@ -1905,12 +1905,12 @@ function nntest.PairwiseDistance()
19051905
local inputb = torch.rand(inj,ini)
19061906
local dist_manual = torch.Tensor(inj)
19071907
for i=1, inputa:size(1) do
1908-
dist_manual[i] = torch.dist(inputa:select(1,i), inputb:select(1,i),p)
1908+
dist_manual[i] = torch.dist(inputa:select(1,i), inputb:select(1,i),p)
19091909
end
19101910
-- compare the distances to the module's fprop
19111911
local dist = module:forward(torch.cat(inputa,inputb,1):resize(2,inj,ini))
1912-
err = dist - dist_manual
1913-
mytester:assertlt(err:norm(), precision, torch.typename(module) ..
1912+
err = dist - dist_manual
1913+
mytester:assertlt(err:norm(), precision, torch.typename(module) ..
19141914
' error on batch fprop ')
19151915
end
19161916
end
@@ -1923,7 +1923,7 @@ function nntest.LookupTable()
19231923
local module = nn.LookupTable(totalIndex, entry_size)
19241924
local minval = 1
19251925
local maxval = totalIndex
1926-
1926+
19271927
local output = module:forward(input)
19281928
module:backwardUpdate(input, output, 0.1)
19291929
input:zero()
@@ -1934,7 +1934,7 @@ function nntest.LookupTable()
19341934

19351935
local err = jac.testJacobianUpdateParameters(module, input, module.weight, minval, maxval)
19361936
mytester:assertlt(err,precision, '1D error on weight [direct update] ')
1937-
1937+
19381938
module.gradWeight:zero()
19391939
for t,err in pairs(jac.testAllUpdate(module, input, 'weight', 'gradWeight')) do
19401940
mytester:assertlt(err, precision, string.format(
@@ -1947,7 +1947,7 @@ function nntest.LookupTable()
19471947

19481948
local err = jac.testJacobianParameters(module, input, module.weight, module.gradWeight, minval, maxval)
19491949
mytester:assertlt(err,precision, '2D error on weight ')
1950-
1950+
19511951
local err = jac.testJacobianUpdateParameters(module, input, module.weight, minval, maxval)
19521952
mytester:assertlt(err,precision, '2D error on weight [direct update] ')
19531953

@@ -1962,15 +1962,15 @@ function nntest.LookupTable()
19621962
local ferr,berr = jac.testIO(module,input,minval,maxval)
19631963
mytester:asserteq(ferr, 0, torch.typename(module) .. ' - i/o forward err ')
19641964
mytester:asserteq(berr, 0, torch.typename(module) .. ' - i/o backward err ')
1965-
1965+
19661966
-- accUpdate
19671967
module:accUpdateOnly()
19681968
mytester:assert(not module.gradWeight, 'gradWeight is nil')
19691969
module:float()
19701970
local output = module:forward(input)
19711971
module:backwardUpdate(input, output, 0.1)
19721972
end
1973-
1973+
19741974
function nntest.AddConstant()
19751975
local nbatch = torch.random(3, 5)
19761976
local f = torch.random(3, 5)
@@ -2049,18 +2049,18 @@ end
20492049

20502050
function nntest.SelectTable()
20512051
local input = {
2052-
torch.rand(3,4,5), torch.rand(3,4,5),
2053-
{torch.rand(3,4,5)},
2052+
torch.rand(3,4,5), torch.rand(3,4,5),
2053+
{torch.rand(3,4,5)},
20542054
{torch.rand(3,4,5), {torch.rand(3,4,5)}}
20552055
}
20562056
local gradOutputs = {
2057-
torch.rand(3,4,5), torch.rand(3,4,5),
2058-
{torch.rand(3,4,5)},
2057+
torch.rand(3,4,5), torch.rand(3,4,5),
2058+
{torch.rand(3,4,5)},
20592059
{torch.rand(3,4,5), {torch.rand(3,4,5)}}
20602060
}
20612061
local zeros = {
2062-
torch.Tensor(3,4,5):zero(), torch.Tensor(3,4,5):zero(),
2063-
{torch.Tensor(3,4,5):zero()},
2062+
torch.Tensor(3,4,5):zero(), torch.Tensor(3,4,5):zero(),
2063+
{torch.Tensor(3,4,5):zero()},
20642064
{torch.Tensor(3,4,5):zero(), {torch.Tensor(3,4,5):zero()}}
20652065
}
20662066
local nonIdx = {2,3,4,1}
@@ -2088,7 +2088,7 @@ function nntest.MixtureTable()
20882088
local expertInput = torch.randn(5,3,6)
20892089
local gradOutput = torch.randn(5,6)
20902090
local input = {
2091-
torch.rand(5,3),
2091+
torch.rand(5,3),
20922092
{expertInput:select(2,1), expertInput:select(2,2), expertInput:select(2,3)}
20932093
}
20942094
local module = nn.MixtureTable()
@@ -2111,13 +2111,13 @@ function nntest.MixtureTable()
21112111
local gradInput = module:backward(input, gradOutput)
21122112
mytester:assertTensorEq(gradInput[1], gaterGradInput2, 0.000001, "mixture2 gater gradInput")
21132113
mytester:assertTensorEq(gradInput[2], expertGradInput2, 0.000001, "mixture2 expert gradInput")
2114-
2114+
21152115
--[[ 3D ]]--
21162116
local expertInput = torch.randn(5,6,3,2)
21172117
local gradOutput = torch.randn(5,6,2)
21182118
-- expertInput is a Table:
21192119
local input = {
2120-
torch.rand(5,3),
2120+
torch.rand(5,3),
21212121
{expertInput:select(3,1), expertInput:select(3,2), expertInput:select(3,3)}
21222122
}
21232123
local module = nn.MixtureTable()
@@ -2140,13 +2140,13 @@ function nntest.MixtureTable()
21402140
local gradInput = module:backward(input, gradOutput)
21412141
mytester:assertTensorEq(gradInput[1], gaterGradInput2, 0.000001, "mixture4 gater gradInput")
21422142
mytester:assertTensorEq(gradInput[2], expertGradInput2, 0.000001, "mixture4 expert gradInput")
2143-
2143+
21442144
--[[ 1D ]]--
21452145
-- expertInput is a Table:
21462146
local expertInput = torch.randn(3,6)
21472147
local gradOutput = torch.randn(6)
21482148
local input = {
2149-
torch.rand(3),
2149+
torch.rand(3),
21502150
{expertInput:select(1,1), expertInput:select(1,2), expertInput:select(1,3)}
21512151
}
21522152
local module = nn.MixtureTable()
@@ -2164,7 +2164,7 @@ function nntest.MixtureTable()
21642164
-- test type-cast
21652165
module:float()
21662166
local input2 = {
2167-
input[1]:float(),
2167+
input[1]:float(),
21682168
{input[2][1]:float(), input[2][2]:float(), input[2][3]:float()}
21692169
}
21702170
local output = module:forward(input2)
@@ -2190,13 +2190,13 @@ function nntest.MixtureTable()
21902190
local gradInput = module:backward(input2, gradOutput:float())
21912191
mytester:assertTensorEq(gradInput[1], gaterGradInput2:float(), 0.000001, "mixture6B gater gradInput")
21922192
mytester:assertTensorEq(gradInput[2], expertGradInput2:float(), 0.000001, "mixture6B expert gradInput")
2193-
2193+
21942194
--[[ 2D gater, 1D expert]]--
21952195
-- expertInput is a Table:
21962196
local expertInput = torch.randn(5,3)
21972197
local gradOutput = torch.randn(5)
21982198
local input = {
2199-
torch.rand(5,3),
2199+
torch.rand(5,3),
22002200
{expertInput:select(2,1), expertInput:select(2,2), expertInput:select(2,3)}
22012201
}
22022202
local module = nn.MixtureTable()
@@ -2270,7 +2270,7 @@ function nntest.SpatialUpSamplingNearest()
22702270
table.insert(shape, torch.random(2, 2+dim-1))
22712271
end
22722272

2273-
-- Check that the gradient is correct by using finite elements
2273+
-- Check that the gradient is correct by using finite elements
22742274
local input = torch.Tensor(unpack(shape)):zero()
22752275

22762276
local err = jac.testJacobian(m, input)
@@ -2286,10 +2286,10 @@ function nntest.ConcatTable()
22862286
-- Test tensor input
22872287
local input = torch.rand(5, 5, 5)
22882288
local m = nn.Sequential()
2289-
2289+
22902290
local concat = nn.ConcatTable()
22912291
concat:add(nn.Identity())
2292-
2292+
22932293
m:add(concat) -- Output of concat is a table of length 1
22942294
m:add(nn.JoinTable(1)) -- jac needs a tensor tensor output
22952295

@@ -2308,7 +2308,7 @@ function nntest.ConcatTable()
23082308
torch.randn(3,3,4):float(), torch.randn(3,3,4):float(), torch.randn(3,3,4):float()
23092309
}
23102310
local gradOutput = {
2311-
{_gradOutput[1][1], _gradOutput[2][1], {_gradOutput[3][1]}},
2311+
{_gradOutput[1][1], _gradOutput[2][1], {_gradOutput[3][1]}},
23122312
{_gradOutput[1][2], _gradOutput[2][2], {_gradOutput[3][2]}},
23132313
{_gradOutput[1][3], _gradOutput[2][3], {_gradOutput[3][3]}}
23142314
}
@@ -2317,7 +2317,7 @@ function nntest.ConcatTable()
23172317
module:add(nn.Identity())
23182318
module:add(nn.Identity())
23192319
module:float()
2320-
2320+
23212321
local output = module:forward(input)
23222322
local output2 = {input, input, input}
23232323
equal(output2, output, "ConcatTable table output")
@@ -2328,7 +2328,7 @@ end
23282328

23292329
function nntest.FlattenTable()
23302330
-- Create a nested table. Obviously we can't even stochastically test
2331-
-- the space of all possible nested tables (it's infinite), but here is a
2331+
-- the space of all possible nested tables (it's infinite), but here is a
23322332
-- hand-coded one that covers all the cases we need:
23332333
local input = {
23342334
torch.rand(1),
@@ -2380,7 +2380,7 @@ function nntest.FlattenTable()
23802380
input[2][#(input[2])+1] = torch.rand(5)
23812381
m:forward(input)
23822382
mytester:assert(old_input_map ~= m.input_map and old_output ~= m.output)
2383-
2383+
23842384
-- CASE 3: An element is removed from the input table
23852385
old_input_map = m.input_map
23862386
old_output = m.output

0 commit comments

Comments
 (0)
Please sign in to comment.