-
-
Notifications
You must be signed in to change notification settings - Fork 45
/
Copy pathconfig.yaml
executable file
·148 lines (135 loc) · 5.74 KB
/
config.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
######################################
# Ambianic main configuration file #
######################################
version: '0.10.21'
# path to the data directory
data_dir: &data_dir ./data
# Set logging level to one of DEBUG, INFO, WARNING, ERROR
logging:
file: ./data/ambianic-log.txt
level: DEBUG
# Pipeline event timeline configuration
timeline:
event_log: ./data/timeline-event-log.yaml
# Cameras and other input data sources
# Using Home Assistant conventions to ease upcoming integration
sources:
# front_door_camera: &src_front_door_cam
# uri: *secret_uri_front_door_camera
# type: video, audio, or auto
# type: video
# live: true
# entry_area_camera: &src_entry_area_cam
# uri: *secret_uri_entry_area_camera
# type: video
# recorded front door cam feed for quick testing
# live: is this a live source or a recording
# when live is True, the AVSource element will try to reconnect
# if the stream is interrupted due to network disruption or another reason.
# live: True
recorded_cam_feed: &src_recorded_cam_feed
uri: file:///workspace/tests/pipeline/avsource/test2-cam-person1.mkv
type: video
ai_models:
# image_classification:
# tf_graph:
# labels:
image_detection: &tfm_image_detection
model:
tflite: ai_models/mobilenet_ssd_v2_coco_quant_postprocess.tflite
edgetpu: ai_models/mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite
labels: ai_models/coco_labels.txt
face_detection: &tfm_face_detection
model:
tflite: ai_models/mobilenet_ssd_v2_face_quant_postprocess.tflite
edgetpu: ai_models/mobilenet_ssd_v2_face_quant_postprocess_edgetpu.tflite
labels: ai_models/coco_labels.txt
top_k: 2
# A named pipeline defines an ordered sequence of operations
# such as reading from a data source, AI model inference, saving samples and others.
pipelines:
# sequence of piped operations for use in daytime front door watch
front_door_watch:
- source: *src_recorded_cam_feed
- detect_objects: # run ai inference on the input data
<<: *tfm_image_detection
confidence_threshold: 0.8
- save_detections: # save samples from the inference results
positive_interval: 2 # how often (in seconds) to save samples with ANY results above the confidence threshold
idle_interval: 6000 # how often (in seconds) to save samples with NO results above the confidence threshold
- detect_faces: # run ai inference on the samples from the previous element output
<<: *tfm_face_detection
confidence_threshold: 0.8
- save_detections: # save samples from the inference results
positive_interval: 2
idle_interval: 600
# sequence of piped operations for use in daytime front door watch
# entry_area_watch:
# - source: *src_entry_area_cam
# - mask: svg # apply a mask to the input data
# - detect_objects: # run ai inference on the input data
# <<: *tfm_image_detection
# confidence_threshold: 0.8
# - save_detections: # save samples from the inference results
# output_directory: *ea_object_detect_dir
# positive_interval: 2 # how often (in seconds) to save samples with ANY results above the confidence threshold
# idle_interval: 6000 # how often (in seconds) to save samples with NO results above the confidence threshold
# - detect_faces: # run ai inference on the samples from the previous element output
# <<: *tfm_face_detection
# confidence_threshold: 0.8
# - save_detections: # save samples from the inference results
# output_directory: *ea_face_detect_dir
# positive_interval: 2
# idle_interval: 600
# - gate: [category: person] # select the labels that will be used as input by the next ai model
# - ai: # run the next ai model inference on data from the previous section of the pipeline
# <<: *tfm_face_detection
# confidence_threshold: 0.6
# save_samples:
# location: *data_dir
# positive_interval: 2 # how often (in seconds) to save samples with positive label detections
# - gate: [category: face] # select detected faces and pass on to face recognition model
# - ai: *tfm_face_recognition
# confidence_threshold: 0.6
# - save_samples:
# location: *data_dir
# positive_interval: 2 # how often (in seconds) to save samples with positive label detections
# - tee: # a tee splits the pipeline into independent streams, each with its own thread and copy of the input data from the previous pipe
# - gate: [category: vehicle]
# - ai: *detect_license_plate
# nighttime_front_door_watch:
# source: @front_door
# -
# mask: svg
# - uri: ...
# inference: @find_person_or_vehicle
# -
# mask:
#
# vacation_front_door_watch:
# Policies group pipeline and are mutually exclusive.
# Only one policy is active at a time.
# When a specific policy is activated, all other policies are idle
# policies:
# daytime:
# - daytime_front_door_watch
# - daytime_backyard_door_watch
# nighttime:
# vacation:
# Masks allow filtering out input signal before an AI model sees it
# masks:
# svg mask applies an SVG graphics onto an input image
# svg: ambianic.masks.SvgMask
# arguments:
# - uri # of the svg file
# scale_to_fit indicates whether to scale the mask to fit the stream source shape
# default True
# - scale_to_fit
# audio_hz_trim is a simple mask that cuts out frequency ranges from an input audio stream
# audio_hz_trim: ambian.masks.AudioHzTrim
# arguments:
# - hz_band # lower and upper frequency range to cut out
# print a heartbeat message to the log
# helps debug why some threads hang up or go idle for a variety of potential reasons
#heartbeat:
# interval: 60 # once a minute