diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 00000000..c1efe8e9 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,28 @@ +# How to Contribute + +We'd love to accept your patches and contributions to this project. There are +just a few small guidelines you need to follow. + +## Contributor License Agreement + +Contributions to this project must be accompanied by a Contributor License +Agreement. You (or your employer) retain the copyright to your contribution; +this simply gives us permission to use and redistribute your contributions as +part of the project. Head over to to see +your current agreements on file or to sign a new one. + +You generally only need to submit a CLA once, so if you've already submitted one +(even if it was for a different project), you probably don't need to do it +again. + +## Code reviews + +All submissions, including submissions by project members, require review. We +use GitHub pull requests for this purpose. Consult +[GitHub Help](https://help.github.com/articles/about-pull-requests/) for more +information on using pull requests. + +## Community Guidelines + +This project follows [Google's Open Source Community +Guidelines](https://opensource.google.com/conduct/). diff --git a/LICENSE b/LICENSE index 261eeb9e..d6456956 100644 --- a/LICENSE +++ b/LICENSE @@ -1,3 +1,4 @@ + Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ diff --git a/download_models.sh b/download_models.sh new file mode 100644 index 00000000..7a9dac5b --- /dev/null +++ b/download_models.sh @@ -0,0 +1,19 @@ +#!/bin/sh +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +mkdir -p all_models +wget https://dl.google.com/coral/canned_models/all_models.tar.gz +tar -C all_models -xvzf all_models.tar.gz +rm -f all_models.tar.gz diff --git a/gstreamer/README.md b/gstreamer/README.md new file mode 100644 index 00000000..4e128ea7 --- /dev/null +++ b/gstreamer/README.md @@ -0,0 +1,33 @@ +This folder contains two examples using gstreamer to obtain camera images. These +examples work on Linux using a webcam, Raspberry Pi with +the Raspicam and on the Coral DevBoard using the Coral camera. For the +former two you will also need a Coral USB Accelerator to run the models. + +## Installation + +Make sure the gstreamer libraries are install. On the Coral DevBoard this isn't +necessary, but on Raspberry Pi or a general Linux system it will be. + +``` +sh install_requirements.sh +``` + + +## Classification Demo + +``` +python3 classify.py +``` + +You can change the model and the labels file using flags ```--model``` and +```--labels```. +## Detection Demo (SSD models) + +``` +python3 detect.py +``` + +As before, you can change the model and the labels file using flags ```--model``` +and ```--labels```. + + diff --git a/gstreamer/classify.py b/gstreamer/classify.py new file mode 100644 index 00000000..826a6d21 --- /dev/null +++ b/gstreamer/classify.py @@ -0,0 +1,74 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""A demo which runs object classification on camera frames.""" +import argparse +import time +import re +import svgwrite +import imp +import os +from edgetpu.classification.engine import ClassificationEngine +import gstreamer + +def load_labels(path): + p = re.compile(r'\s*(\d+)(.+)') + with open(path, 'r', encoding='utf-8') as f: + lines = (p.match(line).groups() for line in f.readlines()) + return {int(num): text.strip() for num, text in lines} + +def generate_svg(dwg, text_lines): + for y, line in enumerate(text_lines): + dwg.add(dwg.text(line, insert=(11, y*20+1), fill='black', font_size='20')) + dwg.add(dwg.text(line, insert=(10, y*20), fill='white', font_size='20')) + +def main(): + default_model_dir = "../all_models" + default_model = 'mobilenet_v2_1.0_224_quant_edgetpu.tflite' + default_labels = 'imagenet_labels.txt' + parser = argparse.ArgumentParser() + parser.add_argument('--model', help='.tflite model path', + default=os.path.join(default_model_dir,default_model)) + parser.add_argument('--labels', help='label file path', + default=os.path.join(default_model_dir, default_labels)) + parser.add_argument('--top_k', type=int, default=3, + help='number of classes with highest score to display') + parser.add_argument('--threshold', type=float, default=0.1, + help='class score threshold') + args = parser.parse_args() + + print("Loading %s with %s labels."%(args.model, args.labels)) + engine = ClassificationEngine(args.model) + labels = load_labels(args.labels) + + last_time = time.monotonic() + def user_callback(image, svg_canvas): + nonlocal last_time + start_time = time.monotonic() + results = engine.ClassifyWithImage(image, threshold=args.threshold, top_k=args.top_k) + end_time = time.monotonic() + text_lines = [ + 'Inference: %.2f ms' %((end_time - start_time) * 1000), + 'FPS: %.2f fps' %(1.0/(end_time - last_time)), + ] + for index, score in results: + text_lines.append('score=%.2f: %s' % (score, labels[index])) + print(' '.join(text_lines)) + last_time = end_time + generate_svg(svg_canvas, text_lines) + + result = gstreamer.run_pipeline(user_callback) + +if __name__ == '__main__': + main() diff --git a/gstreamer/detect.py b/gstreamer/detect.py new file mode 100644 index 00000000..c803ddc2 --- /dev/null +++ b/gstreamer/detect.py @@ -0,0 +1,99 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""A demo which runs object detection on camera frames. + +export TEST_DATA=/usr/lib/python3/dist-packages/edgetpu/test_data + +Run face detection model: +python3 -m edgetpuvision.detect \ + --model ${TEST_DATA}/mobilenet_ssd_v2_face_quant_postprocess_edgetpu.tflite + +Run coco model: +python3 -m edgetpuvision.detect \ + --model ${TEST_DATA}/mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite \ + --labels ${TEST_DATA}/coco_labels.txt +""" +import argparse +import time +import re +import svgwrite +import imp +import os +from edgetpu.detection.engine import DetectionEngine +import gstreamer + +def load_labels(path): + p = re.compile(r'\s*(\d+)(.+)') + with open(path, 'r', encoding='utf-8') as f: + lines = (p.match(line).groups() for line in f.readlines()) + return {int(num): text.strip() for num, text in lines} + +def shadow_text(dwg, x, y, text, font_size=20): + dwg.add(dwg.text(text, insert=(x+1, y+1), fill='black', font_size=font_size)) + dwg.add(dwg.text(text, insert=(x, y), fill='white', font_size=font_size)) + +def generate_svg(dwg, objs, labels, text_lines): + width, height = dwg.attribs['width'], dwg.attribs['height'] + for y, line in enumerate(text_lines): + shadow_text(dwg, 10, y*20, line) + for obj in objs: + x0, y0, x1, y1 = obj.bounding_box.flatten().tolist() + x, y, w, h = x0, y0, x1 - x0, y1 - y0 + x, y, w, h = int(x * width), int(y * height), int(w * width), int(h * height) + percent = int(100 * obj.score) + label = '%d%% %s' % (percent, labels[obj.label_id]) + shadow_text(dwg, x, y - 5, label) + dwg.add(dwg.rect(insert=(x,y), size=(w, h), + fill='red', fill_opacity=0.3, stroke='white')) + +def main(): + default_model_dir = '../all_models' + default_model = 'mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite' + default_labels = 'coco_labels.txt' + parser = argparse.ArgumentParser() + parser.add_argument('--model', help='.tflite model path', + default=os.path.join(default_model_dir,default_model)) + parser.add_argument('--labels', help='label file path', + default=os.path.join(default_model_dir, default_labels)) + parser.add_argument('--top_k', type=int, default=3, + help='number of classes with highest score to display') + parser.add_argument('--threshold', type=float, default=0.1, + help='class score threshold') + args = parser.parse_args() + + print("Loading %s with %s labels."%(args.model, args.labels)) + engine = DetectionEngine(args.model) + labels = load_labels(args.labels) + + last_time = time.monotonic() + def user_callback(image, svg_canvas): + nonlocal last_time + start_time = time.monotonic() + objs = engine.DetectWithImage(image, threshold=args.threshold, + keep_aspect_ratio=True, relative_coord=True, + top_k=args.top_k) + end_time = time.monotonic() + text_lines = [ + 'Inference: %.2f ms' %((end_time - start_time) * 1000), + 'FPS: %.2f fps' %(1.0/(end_time - last_time)), + ] + print(' '.join(text_lines)) + last_time = end_time + generate_svg(svg_canvas, objs, labels, text_lines) + + result = gstreamer.run_pipeline(user_callback) + +if __name__ == '__main__': + main() diff --git a/gstreamer/gstreamer.py b/gstreamer/gstreamer.py new file mode 100644 index 00000000..65f252d0 --- /dev/null +++ b/gstreamer/gstreamer.py @@ -0,0 +1,118 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the 'License'); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an 'AS IS' BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +from functools import partial +import svgwrite + +import gi +gi.require_version('Gst', '1.0') +gi.require_version('GstBase', '1.0') +from gi.repository import GLib, GObject, Gst, GstBase +from PIL import Image + +GObject.threads_init() +Gst.init(None) + +def on_bus_message(bus, message, loop): + t = message.type + if t == Gst.MessageType.EOS: + loop.quit() + elif t == Gst.MessageType.WARNING: + err, debug = message.parse_warning() + sys.stderr.write('Warning: %s: %s\n' % (err, debug)) + elif t == Gst.MessageType.ERROR: + err, debug = message.parse_error() + sys.stderr.write('Error: %s: %s\n' % (err, debug)) + loop.quit() + return True + +def on_new_sample(sink, overlay, screen_size, appsink_size, user_function): + sample = sink.emit('pull-sample') + buf = sample.get_buffer() + result, mapinfo = buf.map(Gst.MapFlags.READ) + if result: + img = Image.frombytes('RGB', (appsink_size[0], appsink_size[1]), mapinfo.data, 'raw') + svg_canvas = svgwrite.Drawing('', size=(screen_size[0], screen_size[1])) + user_function(img, svg_canvas) + overlay.set_property('data', svg_canvas.tostring()) + buf.unmap(mapinfo) + return Gst.FlowReturn.OK + +def detectCoralDevBoard(): + try: + if 'MX8MQ' in open('/sys/firmware/devicetree/base/model').read(): + print('Detected Edge TPU dev board.') + return True + except: pass + return False + +def run_pipeline(user_function, + src_size=(640,480), + appsink_size=(320, 180)): + PIPELINE = 'v4l2src device=/dev/video0 ! {src_caps} ! {leaky_q} ' + if detectCoralDevBoard(): + SRC_CAPS = 'video/x-raw,format=YUY2,width={width},height={height},framerate=30/1' + PIPELINE += """ ! glupload ! tee name=t + t. ! {leaky_q} ! glfilterbin filter=glcolorscale + ! {dl_caps} ! videoconvert ! {sink_caps} ! {sink_element} + t. ! {leaky_q} ! glfilterbin filter=glcolorscale + ! rsvgoverlay name=overlay ! waylandsink + """ + else: + SRC_CAPS = 'video/x-raw,width={width},height={height},framerate=30/1' + PIPELINE += """ ! tee name=t + t. ! {leaky_q} ! videoconvert ! videoscale ! {sink_caps} ! {sink_element} + t. ! {leaky_q} ! videoconvert + ! rsvgoverlay name=overlay ! videoconvert ! ximagesink + """ + + SINK_ELEMENT = 'appsink name=appsink sync=false emit-signals=true max-buffers=1 drop=true' + DL_CAPS = 'video/x-raw,format=RGBA,width={width},height={height}' + SINK_CAPS = 'video/x-raw,format=RGB,width={width},height={height}' + LEAKY_Q = 'queue max-size-buffers=1 leaky=downstream' + + src_caps = SRC_CAPS.format(width=src_size[0], height=src_size[1]) + dl_caps = DL_CAPS.format(width=appsink_size[0], height=appsink_size[1]) + sink_caps = SINK_CAPS.format(width=appsink_size[0], height=appsink_size[1]) + pipeline = PIPELINE.format(leaky_q=LEAKY_Q, + src_caps=src_caps, dl_caps=dl_caps, sink_caps=sink_caps, + sink_element=SINK_ELEMENT) + + print('Gstreamer pipeline: ', pipeline) + pipeline = Gst.parse_launch(pipeline) + + overlay = pipeline.get_by_name('overlay') + appsink = pipeline.get_by_name('appsink') + appsink.connect('new-sample', partial(on_new_sample, + overlay=overlay, screen_size = src_size, + appsink_size=appsink_size, user_function=user_function)) + loop = GObject.MainLoop() + + # Set up a pipeline bus watch to catch errors. + bus = pipeline.get_bus() + bus.add_signal_watch() + bus.connect('message', on_bus_message, loop) + + # Run pipeline. + pipeline.set_state(Gst.State.PLAYING) + try: + loop.run() + except: + pass + + # Clean up. + pipeline.set_state(Gst.State.NULL) + while GLib.MainContext.default().iteration(False): + pass diff --git a/gstreamer/install_requirements.sh b/gstreamer/install_requirements.sh new file mode 100644 index 00000000..f90559f9 --- /dev/null +++ b/gstreamer/install_requirements.sh @@ -0,0 +1,35 @@ +#!/bin/bash +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +if grep -s -q "MX8MQ" /sys/firmware/devicetree/base/model; then + echo "Installing DevBoard specific dependencies" + sudo apt-get install python3-pip + sudo pip3 install svgwrite + sudo pip3 install python-periphery +else + # Install gstreamer + sudo apt-get install -y gstreamer1.0-plugins-bad gstreamer1.0-plugins-good python3-gst-1.0 python3-gi + pip3 install svgwrite + + if grep -s -q "Raspberry Pi" /sys/firmware/devicetree/base/model; then + echo "Installing Raspberry Pi specific dependencies" + sudo apt-get install python3-rpi.gpio + # Add v4l2 video module to kernel + if ! grep -q "bcm2835-v4l2" /etc/modules; then + echo bcm2835-v4l2 | sudo tee -a /etc/modules + fi + sudo modprobe bcm2835-v4l2 + fi +fi diff --git a/opencv/detect.py b/opencv/detect.py new file mode 100644 index 00000000..4dc59129 --- /dev/null +++ b/opencv/detect.py @@ -0,0 +1,103 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""A demo which runs object detection on camera frames. + +export TEST_DATA=/usr/lib/python3/dist-packages/edgetpu/test_data + +Run face detection model: +python3 -m edgetpuvision.detect \ + --model ${TEST_DATA}/mobilenet_ssd_v2_face_quant_postprocess_edgetpu.tflite + +Run coco model: +python3 -m edgetpuvision.detect \ + --model ${TEST_DATA}/mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite \ + --labels ${TEST_DATA}/coco_labels.txt + +Press Q key to exit. + +""" +import cv2 +from PIL import Image +import argparse +import re +import os +from edgetpu.detection.engine import DetectionEngine + +def load_labels(path): + p = re.compile(r'\s*(\d+)(.+)') + with open(path, 'r', encoding='utf-8') as f: + lines = (p.match(line).groups() for line in f.readlines()) + return {int(num): text.strip() for num, text in lines} + +def main(): + default_model_dir = '../all_models' + default_model = 'mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite' + default_labels = 'coco_labels.txt' + parser = argparse.ArgumentParser() + parser.add_argument('--model', help='.tflite model path', + default=os.path.join(default_model_dir,default_model)) + parser.add_argument('--labels', help='label file path', + default=os.path.join(default_model_dir, default_labels)) + parser.add_argument('--top_k', type=int, default=3, + help='number of classes with highest score to display') + parser.add_argument('--threshold', type=float, default=0.1, + help='class score threshold') + args = parser.parse_args() + + print("Loading %s with %s labels."%(args.model, args.labels)) + engine = DetectionEngine(args.model) + labels = load_labels(args.labels) + + + cap = cv2.VideoCapture(0) + + while cap.isOpened(): + ret, frame = cap.read() + if not ret: + break + cv2_im = frame + + pil_im = Image.fromarray(cv2_im) + + objs = engine.DetectWithImage(pil_im, threshold=args.threshold, + keep_aspect_ratio=True, relative_coord=True, + top_k=args.top_k) + + cv2_im = append_objs_to_img(cv2_im, objs, labels) + + cv2.imshow('frame', cv2_im) + if cv2.waitKey(1) & 0xFF == ord('q'): + break + + cap.release() + cv2.destroyAllWindows() + + +def append_objs_to_img(cv2_im, objs, labels): + height, width, channels = cv2_im.shape + for obj in objs: + x0, y0, x1, y1 = obj.bounding_box.flatten().tolist() + x0, y0, x1, y1 = int(x0*width), int(y0*height), int(x1*width), int(y1*height) + percent = int(100 * obj.score) + label = '%d%% %s' % (percent, labels[obj.label_id]) + + cv2_im = cv2.rectangle(cv2_im, (x0, y0), (x1, y1), (0, 255, 0), 2) + cv2_im = cv2.putText(cv2_im, label, (x0, y0+30), + cv2.FONT_HERSHEY_SIMPLEX, 1.0, (255, 0, 0), 2) + return cv2_im + + +if __name__ == '__main__': + main() diff --git a/opencv/install_requirements.sh b/opencv/install_requirements.sh new file mode 100644 index 00000000..1890bfc3 --- /dev/null +++ b/opencv/install_requirements.sh @@ -0,0 +1,21 @@ +#!/bin/bash +# +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +if grep -s -q "MX8MQ" /sys/firmware/devicetree/base/model; then + echo "OpenCV is not yet suppported for DevBoard" +else + sudo apt install python3-opencv +fi diff --git a/pygame/README.md b/pygame/README.md new file mode 100644 index 00000000..5a41172a --- /dev/null +++ b/pygame/README.md @@ -0,0 +1,25 @@ +This folder contains some simple camera classification and detection examples using pygame + +If you dont have pygame installed you can install it by: +``` +pip3 install pygame +``` + +To run the demo execture the following command, which will use the default +model ```mobilenet_v2_1.0_224_quant_edgetpu.tflite``` + +You run the classifier with: +``` +python3 classify_capture.py +``` + +You can change the model and the labels file using flags: +``` +python3 classify_capture.py --model ../all_models/inception_v3_299_quant_edgetpu.tflite +``` + +You run the detector with: +``` +python3 detect.py +``` + diff --git a/pygame/classify_capture.py b/pygame/classify_capture.py new file mode 100644 index 00000000..93f99691 --- /dev/null +++ b/pygame/classify_capture.py @@ -0,0 +1,73 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""A demo to classify Pygame camera stream.""" +import argparse +import os +import io +import time +from collections import deque +import numpy as np +import pygame +import pygame.camera +from pygame.locals import * + +import edgetpu.classification.engine + +def main(): + default_model_dir = "../all_models" + default_model = 'mobilenet_v2_1.0_224_quant_edgetpu.tflite' + default_labels = 'imagenet_labels.txt' + parser = argparse.ArgumentParser() + parser.add_argument('--model', help='.tflite model path', + default=os.path.join(default_model_dir,default_model)) + parser.add_argument('--labels', help='label file path', + default=os.path.join(default_model_dir, default_labels)) + args = parser.parse_args() + + with open(args.labels, 'r') as f: + pairs = (l.strip().split(maxsplit=1) for l in f.readlines()) + labels = dict((int(k), v) for k, v in pairs) + + engine = edgetpu.classification.engine.ClassificationEngine(args.model) + + pygame.init() + pygame.camera.init() + camlist = pygame.camera.list_cameras() + + camera = pygame.camera.Camera(camlist[0], (640, 480)) + _, width, height, channels = engine.get_input_tensor_shape() + camera.start() + try: + fps = deque(maxlen=20) + fps.append(time.time()) + while True: + imagen = camera.get_image() + imagen = pygame.transform.scale(imagen, (width, height)) + input = np.frombuffer(imagen.get_buffer(), dtype=np.uint8) + start_ms = time.time() + results = engine.ClassifyWithInputTensor(input, top_k=3) + inference_ms = (time.time() - start_ms)*1000.0 + fps.append(time.time()) + fps_ms = len(fps)/(fps[-1] - fps[0]) + annotate_text = "Inference: %5.2fms FPS: %3.1f" % (inference_ms, fps_ms) + for result in results: + annotate_text += "\n%.0f%% %s" % (100*result[1], labels[result[0]]) + print(annotate_text) + finally: + camera.stop() + + +if __name__ == '__main__': + main() diff --git a/pygame/detect.py b/pygame/detect.py new file mode 100644 index 00000000..53498465 --- /dev/null +++ b/pygame/detect.py @@ -0,0 +1,102 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""A demo to run the detector in a Pygame camera stream.""" +import argparse +import os +import io +import time +import re +from collections import deque +import numpy as np +import pygame +import pygame.camera +from pygame.locals import * + +from edgetpu.detection.engine import DetectionEngine + +def load_labels(path): + p = re.compile(r'\s*(\d+)(.+)') + with open(path, 'r', encoding='utf-8') as f: + lines = (p.match(line).groups() for line in f.readlines()) + return {int(num): text.strip() for num, text in lines} + +def main(): + cam_w, cam_h = 640, 480 + default_model_dir = "../all_models" + default_model = 'mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite' + default_labels = 'coco_labels.txt' + parser = argparse.ArgumentParser() + parser.add_argument('--model', help='.tflite model path', + default=os.path.join(default_model_dir,default_model)) + parser.add_argument('--labels', help='label file path', + default=os.path.join(default_model_dir, default_labels)) + parser.add_argument('--top_k', type=int, default=5, + help='number of classes with highest score to display') + parser.add_argument('--threshold', type=float, default=0.5, + help='class score threshold') + args = parser.parse_args() + + with open(args.labels, 'r') as f: + pairs = (l.strip().split(maxsplit=1) for l in f.readlines()) + labels = dict((int(k), v) for k, v in pairs) + + print("Loading %s with %s labels."%(args.model, args.labels)) + engine = DetectionEngine(args.model) + labels = load_labels(args.labels) + + pygame.init() + pygame.font.init() + font = pygame.font.SysFont("Arial", 20) + + pygame.camera.init() + camlist = pygame.camera.list_cameras() + + _, w, h, _ = engine.get_input_tensor_shape() + camera = pygame.camera.Camera(camlist[0], (cam_w, cam_h)) + display = pygame.display.set_mode((cam_w, cam_h), 0) + + red = pygame.Color(255, 0, 0) + + camera.start() + try: + last_time = time.monotonic() + while True: + mysurface = camera.get_image() + imagen = pygame.transform.scale(mysurface, (w, h)) + input = np.frombuffer(imagen.get_buffer(), dtype=np.uint8) + start_time = time.monotonic() + results = engine.DetectWithInputTensor(input, threshold=args.threshold, top_k=args.top_k) + stop_time = time.monotonic() + inference_ms = (stop_time - start_time)*1000.0 + fps_ms = 1.0 / (stop_time - last_time) + last_time = stop_time + annotate_text = "Inference: %5.2fms FPS: %3.1f" % (inference_ms, fps_ms) + for result in results: + x0, y0, x1, y1 = result.bounding_box.flatten().tolist() + rect = pygame.Rect(x0 * cam_w, y0 * cam_h, (x1 - x0) * cam_w, (y1 - y0) * cam_h) + pygame.draw.rect(mysurface, red, rect, 1) + label = "%.0f%% %s" % (100*result.score, labels[result.label_id]) + text = font.render(label, True, red) + mysurface.blit(text, (x0 * cam_w , y0 * cam_h)) + text = font.render(annotate_text, True, red) + mysurface.blit(text, (0, 0)) + display.blit(mysurface, (0, 0)) + pygame.display.flip() + finally: + camera.stop() + + +if __name__ == '__main__': + main() diff --git a/pygame/install_requirements.sh b/pygame/install_requirements.sh new file mode 100644 index 00000000..df8ba5be --- /dev/null +++ b/pygame/install_requirements.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +if grep -s -q "MX8MQ" /sys/firmware/devicetree/base/model; then + echo "Installing DevBoard specific dependencies" + sudo apt-get install -y python3-dev libsdl-image1.2-dev libsdl-mixer1.2-dev libsdl-ttf2.0-dev libsdl1.2-dev libsmpeg-dev python-numpy subversion libportmidi-dev ffmpeg libswscale-dev libavformat-dev libavcodec-dev libfreetype6-dev + sudo pip3 install pygame +else + sudo pip3 install pygame +fi + diff --git a/raspicam/README.md b/raspicam/README.md new file mode 100644 index 00000000..8be71137 --- /dev/null +++ b/raspicam/README.md @@ -0,0 +1,29 @@ +This folder contains some simple camera classification examples specific to Raspberry +Pi, using the picamera python module to access the camera. + +If you dont have picamera installed you can install it by: + +``` +pip3 install picamera +``` + +Don't forget to enable your camera using raspi-config under "Interfacing Options": + +``` +sudo raspi-config +``` + +To run the demo execture the following command, which will use the default +model ```mobilenet_v2_1.0_224_quant_edgetpu.tflite``` + + +``` +python3 classify_capture.py +``` + +You can change the model and the labels file using flags: + +``` +python3 classify_capture.py --model ../all_models/inception_v3_299_quant_edgetpu.tflite + +``` diff --git a/raspicam/classify_capture.py b/raspicam/classify_capture.py new file mode 100644 index 00000000..721c1784 --- /dev/null +++ b/raspicam/classify_capture.py @@ -0,0 +1,74 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""A demo to classify Raspberry Pi camera stream.""" +import argparse +import os +import io +import time +from collections import deque +import numpy as np +import picamera + +import edgetpu.classification.engine + +def main(): + default_model_dir = "../all_models" + default_model = 'mobilenet_v2_1.0_224_quant_edgetpu.tflite' + default_labels = 'imagenet_labels.txt' + parser = argparse.ArgumentParser() + parser.add_argument('--model', help='.tflite model path', + default=os.path.join(default_model_dir,default_model)) + parser.add_argument('--labels', help='label file path', + default=os.path.join(default_model_dir, default_labels)) + args = parser.parse_args() + + with open(args.labels, 'r') as f: + pairs = (l.strip().split(maxsplit=1) for l in f.readlines()) + labels = dict((int(k), v) for k, v in pairs) + + engine = edgetpu.classification.engine.ClassificationEngine(args.model) + + with picamera.PiCamera() as camera: + camera.resolution = (640, 480) + camera.framerate = 30 + camera.annotate_text_size = 20 + _, width, height, channels = engine.get_input_tensor_shape() + camera.start_preview() + try: + stream = io.BytesIO() + fps = deque(maxlen=20) + fps.append(time.time()) + for foo in camera.capture_continuous(stream, + format='rgb', + use_video_port=True, + resize=(width, height)): + stream.truncate() + stream.seek(0) + input = np.frombuffer(stream.getvalue(), dtype=np.uint8) + start_ms = time.time() + results = engine.ClassifyWithInputTensor(input, top_k=3) + inference_ms = (time.time() - start_ms)*1000.0 + fps.append(time.time()) + fps_ms = len(fps)/(fps[-1] - fps[0]) + camera.annotate_text = "Inference: %5.2fms FPS: %3.1f" % (inference_ms, fps_ms) + for result in results: + camera.annotate_text += "\n%.0f%% %s" % (100*result[1], labels[result[0]]) + print(camera.annotate_text) + finally: + camera.stop_preview() + + +if __name__ == '__main__': + main() diff --git a/raspicam/install_requirements.sh b/raspicam/install_requirements.sh new file mode 100644 index 00000000..629d4b26 --- /dev/null +++ b/raspicam/install_requirements.sh @@ -0,0 +1,17 @@ +#!/bin/bash +# +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +sudo pip3 install picamera