diff --git a/live-recognition/live_recognition.py b/live-recognition/live_recognition.py new file mode 100644 index 0000000..93fdf99 --- /dev/null +++ b/live-recognition/live_recognition.py @@ -0,0 +1,40 @@ +import cv2 +from darkflow.net.build import TFNet +import numpy as np +import time + +options = { + 'model': 'cfg/tiny-yolo-voc-1c.cfg', + 'load': 'bin/tiny-yolo-voc.weights', + 'threshold': 0.7, + 'gpu': 1.0 +} + +tfnet = TFNet(options) +colors = [tuple(255 * np.random.rand(3)) for _ in range(10)] + +capture = cv2.VideoCapture(0) +capture.set(cv2.CAP_PROP_FRAME_WIDTH, 1920) +capture.set(cv2.CAP_PROP_FRAME_HEIGHT, 1080) + +while True: + stime = time.time() + ret, frame = capture.read() + if ret: + results = tfnet.return_predict(frame) + for color, result in zip(colors, results): + tl = (result['topleft']['x'], result['topleft']['y']) + br = (result['bottomright']['x'], result['bottomright']['y']) + label = result['label'] + confidence = result['confidence'] + text = '{}: {:.0f}%'.format(label, confidence * 100) + frame = cv2.rectangle(frame, tl, br, color, 5) + frame = cv2.putText( + frame, text, tl, cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 0), 2) + cv2.imshow('frame', frame) + print('FPS {:.2f}'.format(1 / (time.time() - stime))) + if cv2.waitKey(1) & 0xFF == ord('q'): + break + +capture.release() +cv2.destroyAllWindows()