1 import torch
2 from torch.autograd import Variable
3
4 # Variable in torch is to build a computational graph,
5 # but this graph is dynamic compared with a static graph in Tensorflow or Theano.
6 # So torch does not have placeholder, torch can just pass variable to the computational graph.
7
8 tensor = torch.FloatTensor([[1,2],[3,4]]) # build a tensor
9 variable = Variable(tensor, requires_grad=True) # build a variable, usually for compute gradients
10
11 print(tensor) # [torch.FloatTensor of size 2x2]
12 print(variable) # [torch.FloatTensor of size 2x2]
13
14 # till now the tensor and variable seem the same.
15 # However, the variable is a part of the graph, it's a part of the auto-gradient.
16
17 t_out = torch.mean(tensor*tensor) # x^2
18 v_out = torch.mean(variable*variable) # x^2
19 print(t_out)
20 print(v_out) # 7.5
21
22 v_out.backward() # backpropagation from v_out
23 # v_out = 1/4 * sum(variable*variable)
24 # the gradients w.r.t the variable, d(v_out)/d(variable) = 1/4*2*variable = variable/2
25 print(variable.grad)
26 '''
27 0.5000 1.0000
28 1.5000 2.0000
29 '''
30
31 print(variable) # this is data in variable format
32 """
33 Variable containing:
34 1 2
35 3 4
36 [torch.FloatTensor of size 2x2]
37 """
38
39 print(variable.data) # this is data in tensor format
40 """
41 1 2
42 3 4
43 [torch.FloatTensor of size 2x2]
44 """
45
46 print(variable.data.numpy()) # numpy format
47 """
48 [[ 1. 2.]
49 [ 3. 4.]]
50 """