-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathinit.py
126 lines (92 loc) · 4.21 KB
/
init.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
import cv2
import face_recognition
import glob
import os
import numpy as np
class FaceRecognition:
def __init__(self):
self.known_faces = []
self.known_face_encodings = []
self.known_face_names = []
self.bootstrap()
@staticmethod
def get_known_faces():
known_faces = dict()
images_list = glob.glob('./faces/*')
for image_path in images_list:
face_name = os.path.basename(os.path.splitext(image_path)[0])
image = face_recognition.load_image_file(image_path)
known_faces[face_name] = face_recognition.face_encodings(image)[0]
return known_faces
def load_faces_library(self):
known_faces = self.get_known_faces()
self.known_face_encodings = list(known_faces.values())
self.known_face_names = list(known_faces.keys())
return
def process_faces_encoded(self, face_encodings):
"""
Processes the faces encoded in the current frame, comparing them with the known faces
:param face_encodings:
:return:
"""
face_names = []
for face_encoding in face_encodings:
# See if the face is a match for the known face(s)
matches = face_recognition.compare_faces(self.known_face_encodings, face_encoding)
name = "Unknown"
# # If a match was found in known_face_encodings, just use the first one.
# if True in matches:
# first_match_index = matches.index(True)
# name = known_face_names[first_match_index]
# Or instead, use the known face with the smallest distance to the new face
face_distances = face_recognition.face_distance(self.known_face_encodings, face_encoding)
best_match_index = np.argmin(face_distances)
if matches[best_match_index]:
name = self.known_face_names[best_match_index]
face_names.append(name)
return face_names
def display_names(self, face_locations, face_names, frame):
for (top, right, bottom, left), name in zip(face_locations, face_names):
# Scale back up face locations since the frame we detected in was scaled to 1/4 size
top *= 4
right *= 4
bottom *= 4
left *= 4
# Draw a box around the face
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
# Draw a label with a name below the face
cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)
return
def bootstrap(self):
video_capture = cv2.VideoCapture(0)
self.load_faces_library()
# Initialize some variables
process_this_frame = True
while True:
# Grab a single frame of video
ret, frame = video_capture.read()
# Resize frame of video to 1/4 size for faster face recognition processing
small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
# Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
rgb_small_frame = small_frame[:, :, ::-1]
# Only process every other frame of video to save time
if process_this_frame:
# Find all the faces and face encodings in the current frame of video
face_locations = face_recognition.face_locations(rgb_small_frame)
face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)
face_names = self.process_faces_encoded(face_encodings)
process_this_frame = not process_this_frame
# Display the results
self.display_names(face_locations, face_names, frame)
# Display the resulting image
cv2.imshow('Video', frame)
# Hit 'q' on the keyboard to quit!
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Release handle to the webcam
video_capture.release()
cv2.destroyAllWindows()
pass
FaceRecognition()