-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathtest_fnn.py
More file actions
98 lines (80 loc) · 3.53 KB
/
test_fnn.py
File metadata and controls
98 lines (80 loc) · 3.53 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
import json
import unittest
import numpy as np
import fnn
import mnist_experiment as me
import check_grad as cg
# A tiny subset from the mnist dataset.
with open('toy_dataset.json', 'r') as f:
toy_dataset = json.load(f)
toy_data = np.array(toy_dataset['data'][:10])
toy_labels = np.array(toy_dataset['labels'][:10])
# Data and labels to test the simple example we went through
# in the class.
data = np.array([[1, 2], [3, 1]])
labels = np.array([[0, 0, 1, 0, 0],
[1, 0, 0, 0, 0]])
class TestFNN(unittest.TestCase):
def setUp(self):
np.random.seed(seed=349)
# Setup model to test on the tiny subset of mnist.
self.m1 = fnn.FNN(784, 10, [16, 8], [fnn.relu, fnn.relu])
# Used to check the whether bias is dealt correctly
# in forwardprop.
self.m1.layers[-1].b[0][0] = 1.0
# Setup another model to test on the simple example
# we went through in class
self.m2 = fnn.FNN(2, 5, [3, 2], [fnn.relu, fnn.relu])
self.m2.layers[0].w = np.array([[1, -3, 4],
[-2, 1, 2]])
self.m2.layers[0].b = np.array([[2, -1, 1]])
self.m2.layers[1].w = np.array([[1, -1],
[2, 1],
[-1, 2]])
self.m2.layers[1].b = np.array([[1, -1]])
self.m2.layers[2].w = np.array([[1, -2, 2, -2, 1],
[-1, 1, 1, -1, -1]])
self.m2.layers[2].b = np.array([[1, 0, -1, 1, 2]])
def test_forwardprop(self):
# Test on the tiny subset of mnist.
probs, loss = self.m1.forwardprop(toy_data, toy_labels)
self.assertTrue(abs(loss - 2.3677889) < 0.0000001)
# Test on the simple example.
self.m2.forwardprop(data)
# Check the activations of the first two layers.
self.assertTrue(np.allclose(self.m2.layers[0].a,
np.array([[0, 0, 9],
[3, 0, 15]])))
self.assertTrue(np.allclose(self.m2.layers[1].a,
np.array([[0, 17],
[0, 26]])))
print('\n' + '=' * 50 + '\n')
print("Your forward propagation is correct!")
print('\n' + '=' * 50 + '\n')
def test_backprop(self):
# Use gradient check to test on the tiny mnist
# subset.
self.assertTrue(
cg.check_backprop(self.m1, toy_data, toy_labels) < 1e-4)
# Use pre-computed gradients to test on the simple
# example.
self.m2.forwardprop(data)
self.m2.backprop(labels)
# Check the gradients of the loss w.r.t the first
# two layers' weights and bias.
self.assertTrue(np.allclose(self.m2.layers[1].d_w,
np.array([[0, 3],
[0, 0],
[0, 15]])))
self.assertTrue(np.allclose(self.m2.layers[1].d_b,
np.array([[0, 1]])))
self.assertTrue(np.allclose(self.m2.layers[0].d_w,
np.array([[-3, 0, 6],
[-1, 0, 2]])))
self.assertTrue(np.allclose(self.m2.layers[0].d_b,
np.array([[-1, 0, 2]])))
print('\n' + '=' * 50 + '\n')
print("Your backpropagation is correct!")
print('\n' + '=' * 50 + '\n')
if __name__ == '__main__':
unittest.main()