-
Notifications
You must be signed in to change notification settings - Fork 3
Expand file tree
/
Copy pathtransfer_learning_combined_prediction.py
More file actions
131 lines (114 loc) · 4.34 KB
/
transfer_learning_combined_prediction.py
File metadata and controls
131 lines (114 loc) · 4.34 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
from __future__ import print_function
import keras
from keras.datasets import cifar10
from keras.models import Model
from keras.layers import Input, Dense, Dropout, Flatten, maximum
from keras.layers import Conv2D, MaxPooling2D, Reshape, Lambda
from keras import backend as K
from scipy import misc
from numpy import array
import numpy as np
import matplotlib.pyplot as plt
from skimage import color
from sklearn import svm
import tools
batch_size = 128
num_classes = 32
epochs = 474
patch_size = 32
# input image dimensions
img_rows, img_cols = 64, 64
# the data, shuffled and split between train and test sets
#read in training data
class_num = 32
train_ele_num = 52
x_train = []
y_train = []
for i in range(class_num):
for j in range(train_ele_num):
filename = 'selective_32/data/'+str(i+1)+'/'+str(i+1)+'_'+str(j+1)+'.jpg'
img = misc.imread(filename)
img_flattened = np.reshape(img, img.shape[0]*img.shape[1])
x_train.append(img_flattened)
y_train.append(i)
x_train = np.asarray(x_train)
y_train = np.asarray(y_train)
#read in testing data
ele_num = 64
x_test = []
y_test = []
for i in range(class_num):
for j in range(train_ele_num, ele_num):
filename = 'selective_32/data/'+str(i+1)+'/'+str(i+1)+'_'+str(j+1)+'.jpg'
img = misc.imread(filename)
img_flattened = np.reshape(img, img.shape[0]*img.shape[1])
x_test.append(img_flattened)
y_test.append(i)
x_test = np.asarray(x_test)
y_test = np.asarray(y_test)
if K.image_data_format() == 'channels_first':
x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, patch_size, patch_size)
else:
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (patch_size, patch_size, 1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
#y_train = keras.utils.to_categorical(y_train, num_classes)
#y_test = keras.utils.to_categorical(y_test, num_classes)
input = Input(shape=input_shape)
#hidden layer 0
left = Conv2D(filters=96, kernel_size=(8, 8), activation='relu', padding='same', name='h0_l')(input)
right = Conv2D(filters=96, kernel_size=(8, 8), activation='relu', padding='same', name='h0_r')(input)
h0 = maximum([left, right])
#apply max pooling
h0 = MaxPooling2D(pool_size=(4, 4), strides=(2, 2), name='pool0')(h0)
#hidden layer 1
left = Conv2D(filters=192, kernel_size=(8, 8), activation='relu', padding='same', name='h1_l')(h0)
right = Conv2D(filters=192, kernel_size=(8, 8), activation='relu', padding='same', name='h1_r')(h0)
h1 = maximum([left, right])
#apply max pooling
h1 = MaxPooling2D(pool_size=(4, 4), strides=(2, 2), name='pool1')(h1)
#hidden layer 2
left = Conv2D(filters=192, kernel_size=(5, 5), activation='relu', padding='same', name='h2_l')(h1)
right = Conv2D(filters=192, kernel_size=(5, 5), activation='relu', padding='same', name='h2_r')(h1)
h2 = maximum([left, right])
#apply max pooling
h2 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='pool2')(h2)
#maxout layer
#x = Lambda(lambda x: K.max(h2, 3, True))(h2)
x_shape = h2.get_shape().as_list()
x = Reshape((x_shape[1]*x_shape[2]*x_shape[3],))(h2)
print(x.shape)
m1 = Dense(500, name='dense1')(x)
m2 = Dense(500, name='dense2')(x)
m3 = Dense(500, name='dense3')(x)
m4 = Dense(500, name='dense4')(x)
m5 = Dense(500, name='dense5')(x)
maxout = maximum([m1, m2, m3, m4, m5])
final = Model(input, maxout)
print(final.summary())
final.load_weights('macro_48_48_model.h5', by_name=True)
x_train_grids = [tools.random_image_crop(im, patch_size) for im in x_train]
train_features = final.predict(np.asarray(x_train_grids))
clf = svm.LinearSVC()
print(train_features.shape)
print(np.ravel(y_train).shape)
clf.fit(train_features, np.ravel(y_train))
print('training finished.')
x_test_grids = [tools.extract_grid_patches(im, patch_size) for im in x_test]
final_prediction = []
for grids in x_test_grids:
test_features = final.predict(grids)
predictions = clf.predict(test_features).tolist()
final_prediction.append(max(set(predictions), key=predictions.count))
y_test = np.ravel(y_test)
print('accuracy:'+str(sum(y_test == final_prediction)/y_test.shape[0]))