1 | import torch |
1 | a = torch.randn(2,3) |
tensor([[ 1.3270, -2.5966, -0.1547],
[ 0.3614, 1.0595, 0.8459]])
tensor([[ 8.1156, 1.4439, 0.2679],
[ -1.1320, 35.5890, -10.9797]])
a grad is False
b grad is False
a grad is True
b grad is True
a_grad = None
b_grad = tensor([[ 16.2312, 2.8879, 0.5359],
[ -2.2639, 71.1781, -21.9595]])
1 | a = torch.ones(2,3)+1 |
tensor([[ 2., 2., 2.],
[ 2., 2., 2.]])
tensor([[ 4., 4., 4.],
[ 4., 4., 4.]])
a grad is False
a grad is True
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-31-b43c2a9b1b1c> in <module>()
13
14 # c.backward()
---> 15 b.backward()
16 # b = a*2/(a-1)
17 #db/da = 2ln(a-1)
RuntimeError: element 0 of tensors does not require grad and does not have a grad_fn
1 | x = torch.randn(3,requires_grad = True) |
tensor([-571.8924, 188.2365, -904.6218])
tensor([ 204.8000, 2048.0000, 0.2048])
True
True
False
True
1 | import torch |
Net(
(conv1): Conv2d(1, 6, kernel_size=(5, 5), stride=(1, 1))
(conv2): Conv2d(6, 16, kernel_size=(5, 5), stride=(1, 1))
(fc1): Linear(in_features=400, out_features=120, bias=True)
(fc2): Linear(in_features=120, out_features=84, bias=True)
(fc3): Linear(in_features=84, out_features=10, bias=True)
)
1 | # 查看一下参数 |
10
torch.Size([16, 6, 5, 5])
1 | # 载入数据 |
tensor([[-0.0400, 0.0145, -0.0109, -0.0255, 0.0603, 0.0657, -0.0557,
-0.0260, 0.0959, 0.1870]])
1 | net.zero_grad() |
1 | output = net(input) |
tensor(37.9689)
1 | print(loss.grad_fn) # MSELoss |
<MseLossBackward object at 0x00000275FF44FDA0>
<AddmmBackward object at 0x00000275FF44FBA8>
<ExpandBackward object at 0x00000275FF44FDA0>
1 | net.zero_grad() |
tensor([ 0., 0., 0., 0., 0., 0.])
tensor([-0.1013, 0.0209, -0.0838, -0.0878, -0.0151, 0.0398])
1 | learning_rate = 0.01 |
1 | # 使用内置 更新权重 |