a = torch.tensor(1.0, requires_grad=True) y = a ** 2 a_ = a.detach() print(a_.grad) # None,requires_grad=False a_.requires_grad_() # set a_.requires_grad = True z = a_ * 3 y.backward() z.backward() print(a_.grad) # tensor(3.) print(a.grad) # tensor(2.)
可见,a_即使重新定义requires_grad=True,也与a的梯度没有关系。
1 2 3 4 5 6 7 8 9
import torch
a = torch.tensor(1.0, requires_grad=True) a_ = a.detach() a_.add_(torch.tensor(1.0)) print(a) # tensor(2., requires_grad=True) print(a_) # tensor(2.) # a_.backward() # print(a.grad) # RuntimeError: element 0 of tensors does not require grad and does not have a grad_fn