File tree 2 files changed +30
-2
lines changed
2 files changed +30
-2
lines changed Original file line number Diff line number Diff line change 57
57
# - x.detach()
58
58
# - wrap in 'with torch.no_grad():'
59
59
60
- # .requires_grad_(...) changes an existing Tensor’s requires_grad flag in-place.
60
+ # .requires_grad_(...) changes an existing flag in-place.
61
61
a = torch .randn (2 , 2 )
62
62
print (a .requires_grad )
63
63
b = ((a * 3 ) / (a - 1 ))
80
80
print ((x ** 2 ).requires_grad )
81
81
82
82
# -------------
83
- # with backward() the gradient for this tensor will be accumulated into .grad attribute.
83
+ # backward() accumulates the gradient for this tensor into .grad attribute.
84
84
# !!! We need to be careful during optimization !!!
85
85
# Use .zero_() to empty the gradients before a new optimization step!
86
86
weights = torch .ones (4 , requires_grad = True )
Original file line number Diff line number Diff line change
1
+ import torch
2
+
3
+ x = torch .tensor (1.0 )
4
+ y = torch .tensor (2.0 )
5
+
6
+ # This is the parameter we want to optimize -> requires_grad=True
7
+ w = torch .tensor (1.0 , requires_grad = True )
8
+
9
+ # forward pass to compute loss
10
+ y_predicted = w * x
11
+ loss = (y_predicted - y )** 2
12
+ print (loss )
13
+
14
+ # backward pass to compute gradient dLoss/dw
15
+ loss .backward ()
16
+ print (w .grad )
17
+
18
+ # update weights
19
+ # next forward and backward pass...
20
+
21
+ # continue optimizing:
22
+ # update weights, this operation should not be part of the computational graph
23
+ with torch .no_grad ():
24
+ w -= 0.01 * w .grad
25
+ # don't forget to zero the gradients
26
+ w .grad .zero_ ()
27
+
28
+ # next forward and backward pass...
You can’t perform that action at this time.
0 commit comments