Skip to content

Commit f8317c3

Browse files
committed
switched wrong file names for 06_1 and 06_2
1 parent aa2573f commit f8317c3

File tree

2 files changed

+52
-52
lines changed

2 files changed

+52
-52
lines changed

06_1_loss_and_optimizer.py

+18-37
Original file line numberDiff line numberDiff line change
@@ -13,49 +13,31 @@
1313

1414
# here : f = 2 * x
1515

16-
# 0) Training samples, watch the shape!
17-
X = torch.tensor([[1], [2], [3], [4]], dtype=torch.float32)
18-
Y = torch.tensor([[2], [4], [6], [8]], dtype=torch.float32)
19-
20-
n_samples, n_features = X.shape
21-
print(f'#samples: {n_samples}, #features: {n_features}')
22-
# 0) create a test sample
23-
X_test = torch.tensor([5], dtype=torch.float32)
24-
25-
# 1) Design Model, the model has to implement the forward pass!
26-
# Here we can use a built-in model from PyTorch
27-
input_size = n_features
28-
output_size = n_features
29-
30-
# we can call this model with samples X
31-
model = nn.Linear(input_size, output_size)
32-
33-
'''
34-
class LinearRegression(nn.Module):
35-
def __init__(self, input_dim, output_dim):
36-
super(LinearRegression, self).__init__()
37-
# define diferent layers
38-
self.lin = nn.Linear(input_dim, output_dim)
39-
40-
def forward(self, x):
41-
return self.lin(x)
42-
43-
model = LinearRegression(input_size, output_size)
44-
'''
45-
46-
print(f'Prediction before training: f(5) = {model(X_test).item():.3f}')
16+
# 0) Training samples
17+
X = torch.tensor([1, 2, 3, 4], dtype=torch.float32)
18+
Y = torch.tensor([2, 4, 6, 8], dtype=torch.float32)
19+
20+
# 1) Design Model: Weights to optimize and forward function
21+
w = torch.tensor(0.0, dtype=torch.float32, requires_grad=True)
22+
23+
def forward(x):
24+
return w * x
25+
26+
print(f'Prediction before training: f(5) = {forward(5).item():.3f}')
4727

4828
# 2) Define loss and optimizer
4929
learning_rate = 0.01
5030
n_iters = 100
5131

32+
# callable function
5233
loss = nn.MSELoss()
53-
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
34+
35+
optimizer = torch.optim.SGD([w], lr=learning_rate)
5436

5537
# 3) Training loop
5638
for epoch in range(n_iters):
57-
# predict = forward pass with our model
58-
y_predicted = model(X)
39+
# predict = forward pass
40+
y_predicted = forward(X)
5941

6042
# loss
6143
l = loss(Y, y_predicted)
@@ -70,7 +52,6 @@ def forward(self, x):
7052
optimizer.zero_grad()
7153

7254
if epoch % 10 == 0:
73-
[w, b] = model.parameters() # unpack parameters
74-
print('epoch ', epoch+1, ': w = ', w[0][0].item(), ' loss = ', l)
55+
print('epoch ', epoch+1, ': w = ', w, ' loss = ', l)
7556

76-
print(f'Prediction after training: f(5) = {model(X_test).item():.3f}')
57+
print(f'Prediction after training: f(5) = {forward(5).item():.3f}')

06_2_model_loss_and_ optimizer.py

+34-15
Original file line numberDiff line numberDiff line change
@@ -13,31 +13,49 @@
1313

1414
# here : f = 2 * x
1515

16-
# 0) Training samples
17-
X = torch.tensor([1, 2, 3, 4], dtype=torch.float32)
18-
Y = torch.tensor([2, 4, 6, 8], dtype=torch.float32)
16+
# 0) Training samples, watch the shape!
17+
X = torch.tensor([[1], [2], [3], [4]], dtype=torch.float32)
18+
Y = torch.tensor([[2], [4], [6], [8]], dtype=torch.float32)
1919

20-
# 1) Design Model: Weights to optimize and forward function
21-
w = torch.tensor(0.0, dtype=torch.float32, requires_grad=True)
20+
n_samples, n_features = X.shape
21+
print(f'#samples: {n_samples}, #features: {n_features}')
22+
# 0) create a test sample
23+
X_test = torch.tensor([5], dtype=torch.float32)
2224

23-
def forward(x):
24-
return w * x
25+
# 1) Design Model, the model has to implement the forward pass!
26+
# Here we can use a built-in model from PyTorch
27+
input_size = n_features
28+
output_size = n_features
2529

26-
print(f'Prediction before training: f(5) = {forward(5).item():.3f}')
30+
# we can call this model with samples X
31+
model = nn.Linear(input_size, output_size)
32+
33+
'''
34+
class LinearRegression(nn.Module):
35+
def __init__(self, input_dim, output_dim):
36+
super(LinearRegression, self).__init__()
37+
# define diferent layers
38+
self.lin = nn.Linear(input_dim, output_dim)
39+
40+
def forward(self, x):
41+
return self.lin(x)
42+
43+
model = LinearRegression(input_size, output_size)
44+
'''
45+
46+
print(f'Prediction before training: f(5) = {model(X_test).item():.3f}')
2747

2848
# 2) Define loss and optimizer
2949
learning_rate = 0.01
3050
n_iters = 100
3151

32-
# callable function
3352
loss = nn.MSELoss()
34-
35-
optimizer = torch.optim.SGD([w], lr=learning_rate)
53+
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
3654

3755
# 3) Training loop
3856
for epoch in range(n_iters):
39-
# predict = forward pass
40-
y_predicted = forward(X)
57+
# predict = forward pass with our model
58+
y_predicted = model(X)
4159

4260
# loss
4361
l = loss(Y, y_predicted)
@@ -52,6 +70,7 @@ def forward(x):
5270
optimizer.zero_grad()
5371

5472
if epoch % 10 == 0:
55-
print('epoch ', epoch+1, ': w = ', w, ' loss = ', l)
73+
[w, b] = model.parameters() # unpack parameters
74+
print('epoch ', epoch+1, ': w = ', w[0][0].item(), ' loss = ', l)
5675

57-
print(f'Prediction after training: f(5) = {forward(5).item():.3f}')
76+
print(f'Prediction after training: f(5) = {model(X_test).item():.3f}')

0 commit comments

Comments
 (0)