|
18 | 18 | [5, 1],
|
19 | 19 | [2, 4.5]][2]
|
20 | 20 |
|
21 |
| -x = np.linspace(-1, 1, 200, dtype=np.float32) |
| 21 | +x = np.linspace(-1, 1, 200, dtype=np.float32) # x data |
| 22 | + |
| 23 | +# Test (1): Visualize a simple linear function with two parameters, |
| 24 | +# you can change LR to 1 to see the different pattern in gradient descent. |
22 | 25 |
|
23 |
| -# test 1 |
24 | 26 | # y_fun = lambda a, b: a * x + b
|
25 | 27 | # tf_y_fun = lambda a, b: a * x + b
|
26 | 28 |
|
27 |
| -# test 2 |
| 29 | + |
| 30 | +# Test (2): Using Tensorflow as a calibrating tool for empirical formula like following. |
| 31 | + |
28 | 32 | # y_fun = lambda a, b: a * x**3 + b * x**2
|
29 | 33 | # tf_y_fun = lambda a, b: a * x**3 + b * x**2
|
30 | 34 |
|
31 |
| -# test 3 |
| 35 | + |
| 36 | +# Test (3): Most simplest two parameters and two layers Neural Net, and their local & global minimum, |
| 37 | +# you can try different INIT_PARAMS set to visualize the gradient descent. |
| 38 | + |
32 | 39 | y_fun = lambda a, b: np.sin(b*np.cos(a*x))
|
33 | 40 | tf_y_fun = lambda a, b: tf.sin(b*tf.cos(a*x))
|
34 | 41 |
|
|
50 | 57 | result, _ = sess.run([pred, train_op]) # training
|
51 | 58 |
|
52 | 59 |
|
| 60 | +# visualization codes: |
53 | 61 | print('a=', a_, 'b=', b_)
|
54 | 62 | plt.figure(1)
|
55 | 63 | plt.scatter(x, y, c='b') # plot data
|
|
0 commit comments