require 'torch'
require 'nn'
+
+-- require 'cunn'
+
require 'dagnn'
torch.setdefaulttensortype('torch.DoubleTensor')
torch.manualSeed(1)
-function checkGrad(model, criterion, input, target)
+function checkGrad(model, criterion, input, target, epsilon)
local params, gradParams = model:getParameters()
- local epsilon = 1e-5
+ local epsilon = epsilon or 1e-5
local output = model:forward(input)
local loss = criterion:forward(output, target)
local num = (loss1 - loss0) / (2 * epsilon)
if num ~= ana then
- err = math.max(err, torch.abs(num - ana) / torch.abs(num))
+ err = math.max(err, math.abs(num - ana) / math.max(epsilon, math.abs(num)))
end
end
dag:connect(c, d)
dag:connect(c, e)
+dag:setLabel(a, 'first module')
+
dag:setInput(a)
dag:setOutput({ d, e })
--- We check it works when we put it into a nn.Sequential
+-- Check the output of the dot file. Generate a pdf with:
+--
+-- dot ./graph.dot -Lg -T pdf -o ./graph.pdf
+--
+print('Writing ./graph.dot')
+dag:saveDot('./graph.dot')
+
+-- Let's make a model where the dag is inside another nn.Container.
model = nn.Sequential()
:add(nn.Linear(50, 50))
:add(dag)
:add(nn.CAddTable())
+criterion = nn.MSECriterion()
+
+if cunn then
+ print("Using CUDA")
+ model:cuda()
+ criterion:cuda()
+ torch.setdefaulttensortype('torch.CudaTensor')
+ epsilon = 1e-3
+end
+
local input = torch.Tensor(30, 50):uniform()
local output = model:updateOutput(input):clone()
output:uniform()
-print('Gradient estimate error ' .. checkGrad(model, nn.MSECriterion(), input, output))
+-- Check that DAG:accGradParameters and friends work okay
+print('Gradient estimate error ' .. checkGrad(model, criterion, input, output, epsilon))
+
+-- Check that we can save and reload the model
+model:clearState()
+torch.save('./test.t7', model)
+local otherModel = torch.load('./test.t7')
+print('Gradient estimate error ' .. checkGrad(otherModel, criterion, input, output, epsilon))
-print('Writing /tmp/graph.dot')
-dag:saveDot('/tmp/graph.dot')
+dag:print()