This repository was archived by the owner on Mar 12, 2022. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathbackpropagation.py
More file actions
142 lines (121 loc) · 4.33 KB
/
backpropagation.py
File metadata and controls
142 lines (121 loc) · 4.33 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
'''
Created on 24 Nis 2017
@author: FIRAT
'''
import tensorflow as tf
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
def sigma(x):
return tf.div(tf.constant(1.0),
tf.add(tf.constant(1.0), tf.exp(tf.negative(x))))
def sigmaprime(x):
return tf.multiply(sigma(x), tf.subtract(tf.constant(1.0), sigma(x)))
def load_image( infilename ) :
img = Image.open( infilename )
img.load()
img = img.convert('1')
data = np.asarray( img, dtype="int32" ).reshape(-1)
# print(data)
return data
def plot(x,y,xlabel,ylabel):
plt.plot(x,y)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.show()
#sample larin yuklenmesi
imageY = load_image('dl_data/1.png')
imageA = load_image('dl_data/2d.png')
imageS = load_image('dl_data/3h.png')
imageI = load_image('dl_data/4a.png')
imageYY = load_image('dl_data/1a.png')
testY = load_image('dl_data/1d.png')
testA = load_image('dl_data/2e.png')
testS = load_image('dl_data/3d.png')
testI = load_image('dl_data/4d.png')
testYY = load_image('dl_data/1e.png')
#toplam 4 sinif oldugu icin her
#bir sinif, dizinin sadece bir elemani setlenecek sekilde ayarlandi
classY=[1,0,0,0]
classA=[0,1,0,0]
classS=[0,0,1,0]
classI=[0,0,0,1]
a0 = tf.placeholder(tf.float32, [None, 900]) #input sayisi
y = tf.placeholder(tf.float32, [None, 4]) #y,a,s,i
#katman 1 noron sayisi
h1Neuron = 5
#katman 2 noron sayisi
h2Neuron = 7
#katman 3 noron sayisi
h3Neuron = 4
#hidden layer 1
w1 = tf.Variable(tf.truncated_normal([900,h1Neuron]))
b1 = tf.Variable(tf.truncated_normal([1,h1Neuron]))
#hidden layer 2
w2 = tf.Variable(tf.truncated_normal([h1Neuron,h2Neuron]))
#hidden layer 3
w3 = tf.Variable(tf.truncated_normal([h2Neuron,h3Neuron]))
#calculated layer 1
z1 = tf.add(tf.matmul(a0, w1), b1)
#activation function layer 1
a1 = sigma(z1)
#calculated layer 2
z2 = tf.add(tf.matmul(a1,w2),0)
#activation function layer 2
a2 = sigma(z2)
#calculated layer 3
z3 = tf.add(tf.matmul(a2,w3),0)
#activation function layer 3
a3 = sigma(z3)
#backpropagating
diff = tf.subtract(a3,y)
#karesel hata
squarredErr = tf.reduce_mean(tf.pow(y - a3, 2))
dz3 = tf.multiply(diff,sigmaprime(z3))
dw3 = tf.matmul(tf.transpose(a2),dz3)
da2 = tf.matmul(dz3,tf.transpose(w3))
dz2 = tf.multiply(da2,sigmaprime(z2))
dw2 = tf.matmul(tf.transpose(a1),dz2)
da1 = tf.matmul(dz2,tf.transpose(w2))
dz1 = tf.multiply(da1,sigmaprime(z1))
dw1 = tf.matmul(tf.transpose(a0),dz1)
da0 = tf.matmul(dz1,tf.transpose(w1))
#ogrenme katsayisi
learningRate = tf.constant(0.5)
#noronlarin update edilmesi
step=[
tf.assign(w1, tf.subtract(w1,tf.multiply(learningRate,dw1)))
, tf.assign(w2, tf.subtract(w2,tf.multiply(learningRate,dw2)))
, tf.assign(w3, tf.subtract(w3,tf.multiply(learningRate,dw3)))
]
acct_mat = tf.equal(tf.argmax(a3,1),tf.argmax(y,1)) #accurate matrix
acct_res = tf.reduce_sum(tf.cast(acct_mat,tf.float32)) #accurate resolution
sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())
epoch=2000
cost=[]
#for dongusu icerisinde training yapilmaktadir
for i in range(epoch):
sess.run(step, feed_dict = {a0: [imageY,imageA,imageS,imageI,imageYY],
y : [classY,classA,classS,classI,classY]})
if i % 10 == 0:
res = sess.run(acct_res, feed_dict = {a0: [testY,testA,testS,testI,testYY],
y : [classY,classA,classS,classI,classY]})
print(res)
#testing
# res = sess.run(acct_res, feed_dict =
# {a0: [testY,testA,testS,testI,testYY],
# y : [classY,classA,classS,classI,classY]})
# mat = sess.run(acct_mat, feed_dict =
# {a0: [testY,testA,testS,testI,testYY],
# y : [classY,classA,classS,classI,classY]})
# print(res) #resolution,correctness
# print(mat) #confusion matrix
#egitim sirasinda her iterasyonda squared error hesaplamak icin -for dongusune koyulmali-
# cost = sess.run(squarredErr, feed_dict = {a0: [imageY,imageA,imageS],
# y : [classY,classA,classS]})
# cost.append(sess.run(squarredErr, feed_dict = {a0: [imageY,imageA,imageS,imageI,imageYY],
# y : [classY,classA,classS,classI,classY]}))
# print(min(cost))
# print(max(cost))
# plot(list(range(epoch)),cost,'epoch','squarred err')