-
Notifications
You must be signed in to change notification settings - Fork 21
/
reconocedor_automatico.py
97 lines (91 loc) · 3.94 KB
/
reconocedor_automatico.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
import os
# Mostrar solo errores de TensorFlow
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# Desabilitar GPU ( correr en CPU )
# os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
from alpr.alpr import ALPR
from argparse import ArgumentParser
import yaml
import logging
from timeit import default_timer as timer
import cv2
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
def main_demo(cfg, demo=True, benchmark=True, save_vid=False):
alpr = ALPR(cfg['modelo'])
video_path = cfg['video']['fuente']
cap = cv2.VideoCapture(video_path)
is_img = cv2.haveImageReader(video_path)
cv2_wait = 0 if is_img else 1
logger.info(f'Se va analizar la fuente: {video_path}')
frame_id = 0
if save_vid:
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH) + 0.5)
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT) + 0.5)
size = (width, height)
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('alpr-result.avi', fourcc, 20.0, size)
# Cada cuantos frames hacer inferencia
intervalo_reconocimiento = cfg['video']['frecuencia_inferencia']
if not is_img:
logger.info(f'El intervalo del reconocimiento para el video es de: {intervalo_reconocimiento}')
while True:
return_value, frame = cap.read()
if return_value:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
else:
# Descomenten esto para camara IP Esto es por si el stream deja de transmitir algún
# frame o se tarda más de lo normal. En este caso simplemente volvemos a intentar leer el frame.
# vid = cv2.VideoCapture(video_path)
# continue
break
if demo:
frame_w_pred, total_time = alpr.mostrar_predicts(
frame)
frame_w_pred = cv2.cvtColor(frame_w_pred, cv2.COLOR_RGB2BGR)
frame_w_pred_r = cv2.resize(frame_w_pred, dsize=(1400, 1000))
if benchmark:
display_bench = f'ms: {total_time:.4f} FPS: {1 / total_time:.0f}'
fontScale = 1.5
cv2.putText(frame_w_pred_r, display_bench, (5, 45), cv2.FONT_HERSHEY_SIMPLEX,
fontScale, (10, 140, 10), 4)
if save_vid:
out.write(frame_w_pred)
cv2.namedWindow("result", cv2.WINDOW_AUTOSIZE)
cv2.imshow("result", frame_w_pred_r)
if cv2.waitKey(cv2_wait) & 0xFF == ord('q'):
break
else:
if frame_id % intervalo_reconocimiento == 0:
start = timer()
alpr.predict(frame)
total_time = timer() - start
if benchmark:
display_bench = f'ms: {total_time:.4f} FPS: {1 / total_time:.0f}'
print(display_bench, flush=True)
frame_id += 1
cap.release()
if save_vid:
out.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
try:
parser = ArgumentParser()
parser.add_argument("--cfg", dest="cfg_file", help="Path del archivo de config, \
default: ./config.yaml", default='config.yaml')
parser.add_argument("--demo", dest="demo",
action='store_true', help="En vez de guardar las patentes, mostrar las predicciones")
parser.add_argument("--guardar_video", dest="save_video",
action='store_true', help="Guardar video en ./alpr-result.avi")
parser.add_argument("--benchmark", dest="bench",
action='store_true', help="Medir la inferencia (incluye todo el pre/post processing")
args = parser.parse_args()
with open(args.cfg_file, 'r') as stream:
try:
cfg = yaml.safe_load(stream)
except yaml.YAMLError as exc:
logger.exception(exc)
main_demo(cfg, args.demo, args.bench, args.save_video)
except Exception as e:
logger.exception(e)