-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathML_video.py
More file actions
83 lines (58 loc) · 2.53 KB
/
ML_video.py
File metadata and controls
83 lines (58 loc) · 2.53 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
import torch
import cv2
from time import time
import numpy as np
import gen_annotation
import argparse
from colorama import Fore
parser = argparse.ArgumentParser()
parser.add_argument('--video_path', type=str, help='Input Video Path', required=True)
parser.add_argument('--last_annotation_path', type=str, help='Last Annotation Path')
args = parser.parse_args()
last_frame = 0
if args.last_annotation_path == None:
gen_annotation.create_dir_annotation()
else:
last_frame = gen_annotation.load_annotation(args.last_annotation_path)
# Model
model = torch.hub.load('yolov5', 'custom', path='AI-CAR2-MODEL.pt', source='local', force_reload=True)
cap = cv2.VideoCapture(args.video_path)
larguraCap, alturaCap = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)), int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
colors = [tuple(255 * np.random.rand(3)) for _ in range(10)]
quant_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
stime_video = time()
count_frames = 0
porcent_processamento = ''
while True:
ret, frame = cap.read()
frame = cv2.resize(frame, (640, 480))
if not ret:
break
tempo_atual = (time() - stime_video)/60
temp_porcent_processamento = '{:.2f}%'.format((count_frames/quant_frames)*100)
if temp_porcent_processamento != porcent_processamento:
print(Fore.BLUE + "Processamento: " + temp_porcent_processamento, 'Tempo Atual: {:.2f}min'.format(tempo_atual))
porcent_processamento = temp_porcent_processamento
count_frames += 1
if count_frames > last_frame:
start_time = time()
results = model(frame)
end_time = time()
fps = 1/np.round(end_time - start_time, 2)
labels = []
bboxs = []
for color, (_, result) in zip(colors, results.pandas().xyxy[0].iterrows()):
confidence = result['confidence']
if confidence > 0.8:
tl = (int(result['xmin']), int(result['ymin']))
br = (int(result['xmax']), int(result['ymax']))
labels.append(result['name'])
print(Fore.GREEN + f"Detectado: {result['name']} C: {confidence}")
# bbox = [x_min, y_min, largura, altura]
bboxs.append([tl[0], tl[1], br[0] - tl[0], br[1] - tl[1]])
if len(labels) > 0:
_, result = gen_annotation.add(image=frame, labels=labels, bboxs=bboxs, auto_commit=True, last_frame=quant_frames)
print(Fore.YELLOW + result + " FPS: {:.2f}".format(fps))
else:
print(Fore.YELLOW + "Image has already been annotated")
cap.release()