-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathDetect a Face.py
32 lines (26 loc) · 2.31 KB
/
Detect a Face.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
# Face Recognition
import cv2
face_cascade = cv2.CascadeClassifier('/home/shaxpy/Desktop/P23-Module1-Face-Recognition/Module_1_Face_Recognition/haarcascade_frontalface_default.xml') # We load the cascade for the face.
eye_cascade = cv2.CascadeClassifier('/home/shaxpy/Desktop/P23-Module1-Face-Recognition/Module_1_Face_Recognition/haarcascade_eye.xml') # We load the cascade for the eyes.
def detect(gray, frame): # We create a function that takes as input the image in black and white (gray) and the original image (frame), and that will return the same image with the detector rectangles.
faces = face_cascade.detectMultiScale(gray, 1.3, 5) # We apply the detectMultiScale method from the face cascade to locate one or several faces in the image.
for (x, y, w, h) in faces: # For each detected face:
cv2.rectangle(frame, (x, y), (x+w, y+h), (255, 0, 0), 2) # We paint a rectangle around the face.
roi_gray = gray[y:y+h, x:x+w] # We get the region of interest in the black and white image.
roi_color = frame[y:y+h, x:x+w] # We get the region of interest in the colored image.
cv2.putText(img,'Face',(x+100,y-10),font,2.5,(255,0,0),4,cv2.LINE_AA)
eyes = eye_cascade.detectMultiScale(roi_gray, 1.1, 24) # We apply the detectMultiScale method to locate one or several eyes in the image.
for (ex, ey, ew, eh) in eyes: # For each detected eye:
cv2.rectangle(roi_color,(ex, ey),(ex+ew, ey+eh), (0, 255, 0), 2) # We paint a rectangle around the eyes, but inside the referential of the face.
cv2.putText(img,'Eyes',(ex+200,ey+200),font,1.5,(0,200,0),2,cv2.LINE_8)
return frame # We return the image with the detector rectangles.
video_capture = cv2.VideoCapture(0) # We turn the webcam on.
while True: # We repeat infinitely (until break):
_, frame = video_capture.read() # We get the last frame.
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # We do some colour transformations.
canvas = detect(gray, frame) # We get the output of our detect function.
cv2.imshow('Video', canvas) # We display the outputs.
if cv2.waitKey(1) & 0xFF == ord('q'): # If we type on the keyboard:
break # We stop the loop.
video_capture.release() # We turn the webcam off.
cv2.destroyAllWindows() # We destroy all the windows inside which the images were displayed.