forked from miguelalba96/tensorflow-facialexpr-recognition
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathtest_landmarks.py
More file actions
154 lines (124 loc) · 4.73 KB
/
test_landmarks.py
File metadata and controls
154 lines (124 loc) · 4.73 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
import os
import glob
import dlib
import pandas as pd
import numpy as np
import seaborn as sns
import sklearn.metrics
import matplotlib.pyplot as plt
import tensorflow as tf
from tqdm import tqdm
import util
from prep.facialexpr import encode_relabels
from lib.config import ConfigReader, TestNetConfig, TrainNetConfig, DataConfig
from lib.CNNS.land_marks import _facenet
from train_landmarks import get_landmarks, create_batch_landmarks
#p = '/Users/miguelangelalbaacosta/Downloads/shape_predictor_68_face_landmarks.dat'
p = '/home/miguel_alba/data/shape_predictor_68_face_landmarks.dat'
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(p)
face_rects = [dlib.rectangle(left=1, top=1, right=47, bottom=47)]
def prep_crops(data):
prep_data = []
for ex in data:
crop = ex['crop'].copy()
crop = crop[..., None]
ex['crop'] = crop
prep_data.append(ex)
return prep_data
def relab_one_hot(labels):
label_dict = {
0: 'anger',
1: 'contempt',
2: 'disgust',
3: 'fear',
4: 'happiness',
5: 'neutral',
6: 'sadness',
7: 'surprise'}
num = []
for i, img in enumerate(labels):
num.append(label_dict[np.argmax(labels[i], axis=-1)])
return np.asarray(num)
def confusion_matrix(groundtruth, new_pred, out_dir, label_names=None):
plt.style.use('ggplot')
confusion = sklearn.metrics.confusion_matrix(groundtruth, new_pred)
if label_names is not None:
labels = label_names
else:
labels = ['anger', 'contempt', 'disgust', 'fear', 'happiness', 'neutral', 'sadness', 'surprise']
fig, ax = plt.subplots(1, figsize=(10, 10))
ax = sns.heatmap(confusion, ax=ax, cmap=plt.cm.Blues, annot=True)
ax.set_xticklabels(labels)
ax.set_yticklabels(labels)
plt.title('Confusion matrix (Validation set)')
plt.ylabel('True class')
plt.xlabel('Predicted class')
plt.show()
#
model_accuracy = np.trace(confusion) / sum(sum(confusion))
print("model accuracy: ", model_accuracy)
#
precision = np.diagonal(confusion) / np.sum(confusion, axis=0)
print(pd.DataFrame({'label': labels, 'Precision': precision}))
#
recall = np.diagonal(confusion) / np.sum(confusion, axis=1)
print(pd.DataFrame({'label': labels, 'Recall': recall}))
results = dict(model_accuracy=model_accuracy,
classes=labels,
presicion=precision.tolist(),
recall=recall.tolist())
util.spit_json('{}/result.json'.format(out_dir), results)
return None
def evaluation(conf_path):
conf_path = conf_path
config_reader = ConfigReader(conf_path)
train_config = TrainNetConfig(config_reader.get_train_config())
test_config = TestNetConfig(config_reader.get_test_config())
data_config = DataConfig(config_reader.get_train_config())
ckpt_path = '{}/logs/train'.format(os.path.join(test_config.model_path, 'models', train_config.name))
eval_files = glob.glob('{}/eval_*.pz'.format(data_config.eval_dir))
eval_data = []
for fn in tqdm(eval_files):
_data = util.load1(fn)
_data = prep_crops(_data)
eval_data.extend(_data)
net = _facenet(test_config)
net.batch_model()
saver = tf.train.Saver(tf.global_variables())
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
net.load(sess, saver, ckpt_path)
results = []
try:
for ex in tqdm(eval_data):
crop = [ex['crop'].copy()]
landmark = np.array([get_landmarks(ex['crop'][:, :, 0], face_rects)]).astype(np.float32)
crop = np.array(crop).astype(np.float32)
ground_truth = encode_relabels(ex['label'])
predicted = sess.run(tf.nn.softmax(net.logits), feed_dict={net.x: crop, net.landmark: landmark})
results.append([predicted, ground_truth])
except tf.errors.OutOfRangeError:
print('===INFO====: Test completed, all crops were successfully evaluated')
sess.close()
results_dir = os.path.join(test_config.model_path, 'results', train_config.name)
util.mdir(results_dir)
util.save1('{}/predictions.pz'.format(results_dir), results)
predictions = [relab_one_hot(p[0]) for p in results]
labels = [relab_one_hot([p[1]]) for p in results]
confusion_matrix(labels, predictions, results_dir)
return results
def _test_201909_():
conf_path = '/Volumes/SSD_ML/facialexpr/lib/experiments/experiment_2.yml'
results = evaluation(conf_path)
return None
def cloud_tesing():
conf_path = '/home/miguel_alba/facialexpr/lib/experiments/experiment_cloud.yml'
results = evaluation(conf_path)
return None
def main():
#_test_201909_()
cloud_tesing()
if __name__ == '__main__':
main()