-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathvideo_facial_landmarks.py
More file actions
133 lines (107 loc) · 4.01 KB
/
video_facial_landmarks.py
File metadata and controls
133 lines (107 loc) · 4.01 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
# USAGE
# python video_facial_landmarks.py --shape-predictor shape_predictor_68_face_landmarks.dat
import argparse
import time
import cv2
import dlib
import pyautogui
from imutils import face_utils
# import the necessary packages
from imutils.video import VideoStream
import imutils
from scipy.spatial import distance as dist
def eye_aspect_ratio(eye):
# compute the euclidean distances between the two sets of
# vertical eye landmarks (x, y)-coordinates
A = dist.euclidean(eye[1], eye[5])
B = dist.euclidean(eye[2], eye[4])
# compute the euclidean distance between the horizontal
# eye landmark (x, y)-coordinates
C = dist.euclidean(eye[0], eye[3])
return (A + B) / (2.0 * C)
pyautogui.FAILSAFE = False
BLINK_AR_THRESH = 0.25
EYE_AR_CONSEC_FRAMES = 3
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-p", "--shape-predictor", required=True,
help="path to facial landmark predictor")
args = vars(ap.parse_args())
# initialize dlib's face detector (HOG-based) and then create
# the facial landmark predictor
print("[INFO] loading facial landmark predictor...")
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(args["shape_predictor"])
# grab the indexes of the facial landmarks for the left and
# right eye, respectively
(left_eye_start, left_eye_end) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
(right_eye_start, right_eye_end) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]
# initialize the video stream and allow the cammera sensor to warmup
print("[INFO] camera sensor warming up...")
vs = VideoStream().start()
time.sleep(2.0)
frame_count = 0
vert_zero = 0
horiz_zero = 0
vert_acc = 0
horiz_acc = 0
blink_frame_counter = 0
total_blinks = 0
# loop over the frames from the video stream
while True:
# grab the frame from the threaded video stream, resize it to
# have a maximum width of 400 pixels, and convert it to
# grayscale
frame = vs.read()
frame = imutils.resize(frame,width= 400)
frame_count += 1
# frame = imutils.resize(frame, width=400)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# detect faces in the grayscale frame
rects = detector(gray, 0)
for rect in rects:
# determine the facial landmarks for the face region, then
# convert the facial landmark (x, y)-coordinates to a NumPy
# array
shape = predictor(gray, rect)
shape = face_utils.shape_to_np(shape)
leftEyeCorner = shape[39]
leftEyebrowCorner = shape[21]
noseTop = shape[27]
vertical_diff = leftEyeCorner[1] - leftEyebrowCorner[1]
horizontal_diff = noseTop[0] - leftEyeCorner[0]
v_diff = vert_zero - vertical_diff
h_diff = horiz_zero - horizontal_diff
if frame_count < 100:
vert_acc += vertical_diff
horiz_acc += horizontal_diff
else:
horiz_zero = int(horiz_acc / 100)
vert_zero = int(vert_acc / 100)
h_movement = 0 if abs(h_diff) < 7 else h_diff
v_movement = 0 if abs(v_diff) < 3 else v_diff
pyautogui.moveRel(h_movement, v_movement)
left_eye = shape[left_eye_start:left_eye_end]
right_eye = shape[right_eye_start:right_eye_end]
ear = (eye_aspect_ratio(left_eye) + eye_aspect_ratio(right_eye)) / 2.0
if ear < BLINK_AR_THRESH:
blink_frame_counter += 1
else:
if blink_frame_counter > EYE_AR_CONSEC_FRAMES:
# blink is detected
total_blinks += 1
pyautogui.click()
blink_frame_counter = 0
# loop over the (x, y)-coordinates for the facial landmarks
# and draw them on the image
for (x, y) in shape:
cv2.circle(frame, (x, y), 1, (0, 0, 255), -1)
# show the frame
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
# do a bit of cleanup
cv2.destroyAllWindows()
vs.stop()