13
13
14
14
# here : f = 2 * x
15
15
16
- # 0) Training samples, watch the shape!
17
- X = torch .tensor ([[1 ], [2 ], [3 ], [4 ]], dtype = torch .float32 )
18
- Y = torch .tensor ([[2 ], [4 ], [6 ], [8 ]], dtype = torch .float32 )
19
-
20
- n_samples , n_features = X .shape
21
- print (f'#samples: { n_samples } , #features: { n_features } ' )
22
- # 0) create a test sample
23
- X_test = torch .tensor ([5 ], dtype = torch .float32 )
24
-
25
- # 1) Design Model, the model has to implement the forward pass!
26
- # Here we can use a built-in model from PyTorch
27
- input_size = n_features
28
- output_size = n_features
29
-
30
- # we can call this model with samples X
31
- model = nn .Linear (input_size , output_size )
32
-
33
- '''
34
- class LinearRegression(nn.Module):
35
- def __init__(self, input_dim, output_dim):
36
- super(LinearRegression, self).__init__()
37
- # define diferent layers
38
- self.lin = nn.Linear(input_dim, output_dim)
39
-
40
- def forward(self, x):
41
- return self.lin(x)
42
-
43
- model = LinearRegression(input_size, output_size)
44
- '''
45
-
46
- print (f'Prediction before training: f(5) = { model (X_test ).item ():.3f} ' )
16
+ # 0) Training samples
17
+ X = torch .tensor ([1 , 2 , 3 , 4 ], dtype = torch .float32 )
18
+ Y = torch .tensor ([2 , 4 , 6 , 8 ], dtype = torch .float32 )
19
+
20
+ # 1) Design Model: Weights to optimize and forward function
21
+ w = torch .tensor (0.0 , dtype = torch .float32 , requires_grad = True )
22
+
23
+ def forward (x ):
24
+ return w * x
25
+
26
+ print (f'Prediction before training: f(5) = { forward (5 ).item ():.3f} ' )
47
27
48
28
# 2) Define loss and optimizer
49
29
learning_rate = 0.01
50
30
n_iters = 100
51
31
32
+ # callable function
52
33
loss = nn .MSELoss ()
53
- optimizer = torch .optim .SGD (model .parameters (), lr = learning_rate )
34
+
35
+ optimizer = torch .optim .SGD ([w ], lr = learning_rate )
54
36
55
37
# 3) Training loop
56
38
for epoch in range (n_iters ):
57
- # predict = forward pass with our model
58
- y_predicted = model (X )
39
+ # predict = forward pass
40
+ y_predicted = forward (X )
59
41
60
42
# loss
61
43
l = loss (Y , y_predicted )
@@ -70,7 +52,6 @@ def forward(self, x):
70
52
optimizer .zero_grad ()
71
53
72
54
if epoch % 10 == 0 :
73
- [w , b ] = model .parameters () # unpack parameters
74
- print ('epoch ' , epoch + 1 , ': w = ' , w [0 ][0 ].item (), ' loss = ' , l )
55
+ print ('epoch ' , epoch + 1 , ': w = ' , w , ' loss = ' , l )
75
56
76
- print (f'Prediction after training: f(5) = { model ( X_test ).item ():.3f} ' )
57
+ print (f'Prediction after training: f(5) = { forward ( 5 ).item ():.3f} ' )
0 commit comments