diff --git a/README.md b/README.md
index fd0b8f7..d888325 100644
--- a/README.md
+++ b/README.md
@@ -23,7 +23,7 @@ Haar Cascades is a machine learning based approach where a cascade function is t
See: https://docs.opencv.org/3.3.1/d7/d8b/tutorial_py_face_detection.html
-The third example is a simple C++ prgoram which reads from the camera and displays to a window on the screen using OpenCV:
+The third example is a simple C++ program which reads from the camera and displays to a window on the screen using OpenCV:
```
$ g++ -std=c++11 -Wall -I/usr/lib/opencv simple_camera.cpp -L/usr/lib -lopencv_core -lopencv_highgui -lopencv_videoio -o simple_camera
@@ -80,7 +80,11 @@ flip-method : video flip methods
Release Notes
-Initial Release March, 2019
+V2 Release September, 2019
+* L4T 32.2.1 (JetPack 4.2.2)
+* Tested on Jetson Nano
+
+Initial Release (V1) March, 2019
* L4T 32.1.0 (JetPack 4.2)
* Tested on Jetson Nano
diff --git a/face_detect.py b/face_detect.py
index 788c03b..e749733 100644
--- a/face_detect.py
+++ b/face_detect.py
@@ -9,41 +9,66 @@
import cv2
# gstreamer_pipeline returns a GStreamer pipeline for capturing from the CSI camera
-# Defaults to 1280x720 @ 30fps
+# Defaults to 1280x720 @ 30fps
# Flip the image by setting the flip_method (most common values: 0 and 2)
# display_width and display_height determine the size of the window on the screen
-def gstreamer_pipeline (capture_width=3280, capture_height=2464, display_width=820, display_height=616, framerate=21, flip_method=0) :
- return ('nvarguscamerasrc ! '
- 'video/x-raw(memory:NVMM), '
- 'width=(int)%d, height=(int)%d, '
- 'format=(string)NV12, framerate=(fraction)%d/1 ! '
- 'nvvidconv flip-method=%d ! '
- 'video/x-raw, width=(int)%d, height=(int)%d, format=(string)BGRx ! '
- 'videoconvert ! '
- 'video/x-raw, format=(string)BGR ! appsink' % (capture_width,capture_height,framerate,flip_method,display_width,display_height))
-
-def face_detect() :
- face_cascade = cv2.CascadeClassifier('/usr/share/OpenCV/haarcascades/haarcascade_frontalface_default.xml')
- eye_cascade = cv2.CascadeClassifier('/usr/share/OpenCV/haarcascades/haarcascade_eye.xml')
+
+def gstreamer_pipeline(
+ capture_width=3280,
+ capture_height=2464,
+ display_width=820,
+ display_height=616,
+ framerate=21,
+ flip_method=0,
+):
+ return (
+ "nvarguscamerasrc ! "
+ "video/x-raw(memory:NVMM), "
+ "width=(int)%d, height=(int)%d, "
+ "format=(string)NV12, framerate=(fraction)%d/1 ! "
+ "nvvidconv flip-method=%d ! "
+ "video/x-raw, width=(int)%d, height=(int)%d, format=(string)BGRx ! "
+ "videoconvert ! "
+ "video/x-raw, format=(string)BGR ! appsink"
+ % (
+ capture_width,
+ capture_height,
+ framerate,
+ flip_method,
+ display_width,
+ display_height,
+ )
+ )
+
+
+def face_detect():
+ face_cascade = cv2.CascadeClassifier(
+ "/usr/share/OpenCV/haarcascades/haarcascade_frontalface_default.xml"
+ )
+ eye_cascade = cv2.CascadeClassifier(
+ "/usr/share/OpenCV/haarcascades/haarcascade_eye.xml"
+ )
cap = cv2.VideoCapture(gstreamer_pipeline(), cv2.CAP_GSTREAMER)
if cap.isOpened():
- cv2.namedWindow('Face Detect', cv2.WINDOW_AUTOSIZE)
- while cv2.getWindowProperty('Face Detect',0) >= 0:
+ cv2.namedWindow("Face Detect", cv2.WINDOW_AUTOSIZE)
+ while cv2.getWindowProperty("Face Detect", 0) >= 0:
ret, img = cap.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
- for (x,y,w,h) in faces:
- cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
- roi_gray = gray[y:y+h, x:x+w]
- roi_color = img[y:y+h, x:x+w]
+ for (x, y, w, h) in faces:
+ cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)
+ roi_gray = gray[y : y + h, x : x + w]
+ roi_color = img[y : y + h, x : x + w]
eyes = eye_cascade.detectMultiScale(roi_gray)
- for (ex,ey,ew,eh) in eyes:
- cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2)
+ for (ex, ey, ew, eh) in eyes:
+ cv2.rectangle(
+ roi_color, (ex, ey), (ex + ew, ey + eh), (0, 255, 0), 2
+ )
- cv2.imshow('Face Detect',img)
- keyCode = cv2.waitKey(30) & 0xff
+ cv2.imshow("Face Detect", img)
+ keyCode = cv2.waitKey(30) & 0xFF
# Stop the program on the ESC key
if keyCode == 27:
break
@@ -53,5 +78,6 @@ def face_detect() :
else:
print("Unable to open camera")
-if __name__ == '__main__':
+
+if __name__ == "__main__":
face_detect()
diff --git a/simple_camera.py b/simple_camera.py
index ca9d286..e666482 100644
--- a/simple_camera.py
+++ b/simple_camera.py
@@ -1,47 +1,66 @@
# MIT License
# Copyright (c) 2019 JetsonHacks
# See license
-# Using a CSI camera (such as the Raspberry Pi Version 2) connected to a
+# Using a CSI camera (such as the Raspberry Pi Version 2) connected to a
# NVIDIA Jetson Nano Developer Kit using OpenCV
# Drivers for the camera and OpenCV are included in the base image
import cv2
# gstreamer_pipeline returns a GStreamer pipeline for capturing from the CSI camera
-# Defaults to 1280x720 @ 60fps
+# Defaults to 1280x720 @ 60fps
# Flip the image by setting the flip_method (most common values: 0 and 2)
# display_width and display_height determine the size of the window on the screen
-def gstreamer_pipeline (capture_width=1280, capture_height=720, display_width=1280, display_height=720, framerate=60, flip_method=0) :
- return ('nvarguscamerasrc ! '
- 'video/x-raw(memory:NVMM), '
- 'width=(int)%d, height=(int)%d, '
- 'format=(string)NV12, framerate=(fraction)%d/1 ! '
- 'nvvidconv flip-method=%d ! '
- 'video/x-raw, width=(int)%d, height=(int)%d, format=(string)BGRx ! '
- 'videoconvert ! '
- 'video/x-raw, format=(string)BGR ! appsink' % (capture_width,capture_height,framerate,flip_method,display_width,display_height))
+
+def gstreamer_pipeline(
+ capture_width=1280,
+ capture_height=720,
+ display_width=1280,
+ display_height=720,
+ framerate=60,
+ flip_method=0,
+):
+ return (
+ "nvarguscamerasrc ! "
+ "video/x-raw(memory:NVMM), "
+ "width=(int)%d, height=(int)%d, "
+ "format=(string)NV12, framerate=(fraction)%d/1 ! "
+ "nvvidconv flip-method=%d ! "
+ "video/x-raw, width=(int)%d, height=(int)%d, format=(string)BGRx ! "
+ "videoconvert ! "
+ "video/x-raw, format=(string)BGR ! appsink"
+ % (
+ capture_width,
+ capture_height,
+ framerate,
+ flip_method,
+ display_width,
+ display_height,
+ )
+ )
+
def show_camera():
# To flip the image, modify the flip_method parameter (0 and 2 are the most common)
- print gstreamer_pipeline(flip_method=0)
+ print(gstreamer_pipeline(flip_method=0))
cap = cv2.VideoCapture(gstreamer_pipeline(flip_method=0), cv2.CAP_GSTREAMER)
if cap.isOpened():
- window_handle = cv2.namedWindow('CSI Camera', cv2.WINDOW_AUTOSIZE)
- # Window
- while cv2.getWindowProperty('CSI Camera',0) >= 0:
- ret_val, img = cap.read();
- cv2.imshow('CSI Camera',img)
- # This also acts as
- keyCode = cv2.waitKey(30) & 0xff
+ window_handle = cv2.namedWindow("CSI Camera", cv2.WINDOW_AUTOSIZE)
+ # Window
+ while cv2.getWindowProperty("CSI Camera", 0) >= 0:
+ ret_val, img = cap.read()
+ cv2.imshow("CSI Camera", img)
+ # This also acts as
+ keyCode = cv2.waitKey(30) & 0xFF
# Stop the program on the ESC key
if keyCode == 27:
- break
+ break
cap.release()
cv2.destroyAllWindows()
else:
- print 'Unable to open camera'
+ print("Unable to open camera")
-if __name__ == '__main__':
+if __name__ == "__main__":
show_camera()