5 Copyright (c) 2016 Idiap Research Institute, http://www.idiap.ch/
6 Written by Francois Fleuret <francois.fleuret@idiap.ch>
8 This file is free software: you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 3 as
10 published by the Free Software Foundation.
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this file. If not, see <http://www.gnu.org/licenses/>.
26 torch.setdefaulttensortype('torch.DoubleTensor')
29 function checkGrad(model, criterion, input, target)
30 local params, gradParams = model:getParameters()
34 local output = model:forward(input)
35 local loss = criterion:forward(output, target)
36 local gradOutput = criterion:backward(output, target)
38 model:backward(input, gradOutput)
39 local analyticalGradParam = gradParams:clone()
43 for i = 1, params:size(1) do
46 params[i] = x - epsilon
47 local output0 = model:forward(input)
48 local loss0 = criterion:forward(output0, target)
50 params[i] = x + epsilon
51 local output1 = model:forward(input)
52 local loss1 = criterion:forward(output1, target)
56 local ana = analyticalGradParam[i]
57 local num = (loss1 - loss0) / (2 * epsilon)
60 err = math.max(err, math.abs(num - ana) / math.abs(num))
67 function printTensorTable(t)
68 if torch.type(t) == 'table' then
69 for i, t in pairs(t) do
70 print('-- ELEMENT [' .. i .. '] --')
78 -- +-- Linear(10, 10) --> ReLU --> d -->
81 -- --> a --> b -----------> c ---------------+
84 -- +--------------- e -->
95 dag:connect(b, nn.Linear(10, 15), nn.ReLU(), d)
100 dag:setOutput({ d, e })
102 -- Check the output of the dot file
103 print('Writing /tmp/graph.dot')
104 dag:saveDot('/tmp/graph.dot')
106 -- Let's make a model where the dag is inside another nn.Container.
107 model = nn.Sequential()
108 :add(nn.Linear(50, 50))
112 local input = torch.Tensor(30, 50):uniform()
113 local output = model:updateOutput(input):clone()
116 -- Check that DAG:accGradParameters and friends work okay
117 print('Gradient estimate error ' .. checkGrad(model, nn.MSECriterion(), input, output))
119 -- Check that we can save and reload the model
121 torch.save('/tmp/test.t7', model)
122 local otherModel = torch.load('/tmp/test.t7')
123 print('Gradient estimate error ' .. checkGrad(otherModel, nn.MSECriterion(), input, output))