-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathdetection.py
More file actions
124 lines (97 loc) · 4.42 KB
/
detection.py
File metadata and controls
124 lines (97 loc) · 4.42 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
from PyQt5.QtWidgets import QMainWindow
from PyQt5.uic import loadUi
from PyQt5.QtCore import QThread, Qt, pyqtSignal, pyqtSlot
from PyQt5.QtGui import QImage, QPixmap
import cv2
import numpy as np
import time
# Handles the YOLOv4 detection algorithm, saves detected frames and sends alert to the server-side application
class Detection(QThread):
def __init__(self):
super(Detection, self).__init__()
changePixmap = pyqtSignal(QImage)
# Runs the detection model, evaluates detections and draws boxes around detected objects
def run(self):
# Loads Yolov4
net = cv2.dnn.readNet("weights\yolov4custom.weights", "cfg/yolov4.cfg")
classes = []
# Loads object names
with open("obj.names", "r") as f:
classes = [line.strip() for line in f.readlines()]
layer_names = net.getLayerNames()
output_layers = [layer_names[i - 1] for i in net.getUnconnectedOutLayers()]
colors = np.random.uniform(0, 255, size=(len(classes), 3))
font = cv2.FONT_HERSHEY_PLAIN
starting_time = time.time() - 11
self.running = True
# Starts camera
cap = cv2.VideoCapture(0)
# Detection while loop
while self.running:
ret, frame = cap.read()
if ret:
height, width, channels = frame.shape
# Running the detection model
blob = cv2.dnn.blobFromImage(
frame, 0.00392, (416, 416), (0, 0, 0), True, crop=False
)
net.setInput(blob)
outs = net.forward(output_layers)
# Evaluating detections
class_ids = []
confidences = []
boxes = []
for out in outs:
for detection in out:
scores = detection[5:]
class_id = np.argmax(scores)
confidence = scores[class_id]
# If detection confidance is above 98% a weapon was detected
if confidence > 0.20:
# Calculating coordinates
center_x = int(detection[0] * width)
center_y = int(detection[1] * height)
w = int(detection[2] * width)
h = int(detection[3] * height)
# Rectangle coordinates
x = int(center_x - w / 2)
y = int(center_y - h / 2)
boxes.append([x, y, w, h])
confidences.append(float(confidence))
class_ids.append(class_id)
indexes = cv2.dnn.NMSBoxes(boxes, confidences, 0.8, 0.3)
# Draw boxes around detected objects
for i in range(len(boxes)):
if i in indexes:
x, y, w, h = boxes[i]
label = str(classes[class_ids[i]])
confidence = confidences[i]
color = (256, 0, 0)
cv2.rectangle(frame, (x, y), (x + w, y + h), color, 2)
cv2.putText(
frame,
label + " {0:.1%}".format(confidence),
(x, y - 20),
font,
3,
color,
3,
)
elapsed_time = starting_time - time.time()
# Save detected frame every 10 seconds
if elapsed_time <= -5:
starting_time = time.time()
self.save_detection(frame)
# Showing final result
rgbImage = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
bytesPerLine = channels * width
convertToQtFormat = QImage(
rgbImage.data, width, height, bytesPerLine, QImage.Format_RGB888
)
p = convertToQtFormat.scaled(874, 530, Qt.KeepAspectRatio)
self.changePixmap.emit(p)
# Saves detected frame as a .jpg within the saved_alert folder
def save_detection(self, frame):
cv2.imwrite("saved_frame/frame.jpg", frame)
print("Frame Saved")
# Sends alert to the server