Skip to content

Commit 8be6978

Browse files
committed
added 04_backpropagation
1 parent 5c237c8 commit 8be6978

File tree

2 files changed

+30
-2
lines changed

2 files changed

+30
-2
lines changed

03_autograd.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -57,7 +57,7 @@
5757
# - x.detach()
5858
# - wrap in 'with torch.no_grad():'
5959

60-
# .requires_grad_(...) changes an existing Tensor’s requires_grad flag in-place.
60+
# .requires_grad_(...) changes an existing flag in-place.
6161
a = torch.randn(2, 2)
6262
print(a.requires_grad)
6363
b = ((a * 3) / (a - 1))
@@ -80,7 +80,7 @@
8080
print((x ** 2).requires_grad)
8181

8282
# -------------
83-
# with backward() the gradient for this tensor will be accumulated into .grad attribute.
83+
# backward() accumulates the gradient for this tensor into .grad attribute.
8484
# !!! We need to be careful during optimization !!!
8585
# Use .zero_() to empty the gradients before a new optimization step!
8686
weights = torch.ones(4, requires_grad=True)

04_backpropagation.py

+28
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,28 @@
1+
import torch
2+
3+
x = torch.tensor(1.0)
4+
y = torch.tensor(2.0)
5+
6+
# This is the parameter we want to optimize -> requires_grad=True
7+
w = torch.tensor(1.0, requires_grad=True)
8+
9+
# forward pass to compute loss
10+
y_predicted = w * x
11+
loss = (y_predicted - y)**2
12+
print(loss)
13+
14+
# backward pass to compute gradient dLoss/dw
15+
loss.backward()
16+
print(w.grad)
17+
18+
# update weights
19+
# next forward and backward pass...
20+
21+
# continue optimizing:
22+
# update weights, this operation should not be part of the computational graph
23+
with torch.no_grad():
24+
w -= 0.01 * w.grad
25+
# don't forget to zero the gradients
26+
w.grad.zero_()
27+
28+
# next forward and backward pass...

0 commit comments

Comments
 (0)