Skip to content

Commit

Permalink
chore: checkpoint
Browse files Browse the repository at this point in the history
  • Loading branch information
ivelin committed Aug 30, 2021
1 parent 0593f83 commit ae52c98
Show file tree
Hide file tree
Showing 5 changed files with 140 additions and 273 deletions.
4 changes: 1 addition & 3 deletions .gitpod.yml
Original file line number Diff line number Diff line change
Expand Up @@ -15,15 +15,13 @@ tasks:
pyenv global system
echo "Started ambianic-edge development environment."
echo "Running ambianic-edge testsuite:"
python3 -m pytest --cov=ambianic --cov-report=term tests/
python3 -m pytest --log-cli-level=DEBUG --cov=ambianic --cov-report=term tests/
- command: |
sudo mkdir /opt/ambianic-edge/
sudo ln -s /workspace/ambianic-edge/ai_models/ /opt/ambianic-edge/ai_models
pyenv global system
echo "To run the amgianic-edge server use:"
echo "python3 -m ambianic"
echo "To run tests with coverage report use:"
echo "python3 -m pytest --cov=ambianic --cov-report=term tests/"
vscode:
extensions:
Expand Down
79 changes: 12 additions & 67 deletions src/ambianic/webapp/fastapi_app.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,13 +56,12 @@ def health_check():
@app.get('/api/status')
def get_status():
response_object = {'status': 'OK', 'version': __version__}
resp = jsonify(response_object)
return resp
return response_object

@app.get('/api/auth/premium-notification')
def initialize_premium_notification():
userAuth0Id = request.args.get('userId')
endpoint = request.args.get('notification_endpoint')
def initialize_premium_notification(userId: str, notification_endpoint: str):
userAuth0Id = userId
endpoint = notification_endpoint
auth_file = {
'name': 'AMBIANIC-EDGE-PREMIUM',
'credentials': {
Expand All @@ -81,76 +80,22 @@ def initialize_premium_notification():

@app.get('/api/timeline')
@app.get('/api/timeline.json')
def get_timeline():
def get_timeline(page: int=1):
response_object = {'status': 'success'}
req_page = request.args.get('page', default=1, type=int)
log.debug('Requested timeline events page" %d', req_page)
resp = samples.get_timeline(page=req_page, data_dir=app.data_dir)
log.debug('Requested timeline events page" %d', page)
resp = samples.get_timeline(page=page, data_dir=app.data_dir)
response_object['timeline'] = resp
log.debug('Returning %d timeline events', len(resp))
# log.debug('Returning samples: %s ', response_object)
resp = jsonify(response_object)
return resp

@app.get('/api/samples')
def get_samples():
response_object = {'status': 'success'}
req_page = request.args.get('page', default=1, type=int)
resp = samples.get_samples(page=req_page)
response_object['samples'] = resp
log.debug('Returning %d samples', len(resp))
# log.debug('Returning samples: %s ', response_object)
resp = jsonify(response_object)
return resp


@app.post('/api/samples')
def add_samples():
response_object = {'status': 'success'}
post_data = request.get_json()
new_sample = {
'title': post_data.get('title'),
'author': post_data.get('author'),
'read': post_data.get('read')
}
samples.add_sample(new_sample)
response_object['message'] = 'Sample added!'
response_object['sample_id'] = new_sample["id"]
log.debug('Sample added: %s ', new_sample)
# log.debug('Returning samples: %s ', response_object)
resp = jsonify(response_object)
return resp


@app.put('/api/samples/<sample_id>')
def update_sample(sample_id):
response_object = {'status': 'success'}
post_data = request.get_json()
sample = {
'id': sample_id,
'title': post_data.get('title'),
'author': post_data.get('author'),
'read': post_data.get('read')
}
log.debug('update_sample %s', sample)
samples.update_sample(sample)
response_object['message'] = 'Sample updated!'
return jsonify(response_object)

@app.delete('/api/samples/<sample_id>')
def delete_sample(sample_id):
response_object = {'status': 'success'}
samples.delete_sample(sample_id)
response_object['message'] = 'Sample removed!'
return jsonify(response_object)
return response_object

@app.get('/api/config')
def get_config():
return jsonify(config.as_dict())
return config.as_dict()

@app.get('/api/config/source/<source_id>')
def get_config_source(source_id):
return jsonify(config_sources.get(source_id))
@app.get('/api/config/source/{source_id}')
def get_config_source(source_id: str):
return config_sources.get(source_id)

@app.put('/api/config/source')
def update_config_source(source: SensorSource):
Expand Down
137 changes: 3 additions & 134 deletions src/ambianic/webapp/server/samples.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
"""REST API for timeline events from pipeline samples."""
"""REST API for timeline events fired by pipelines."""
import logging
import uuid
from datetime import datetime
Expand All @@ -11,109 +11,8 @@

log = logging.getLogger()

SAMPLES = [
{
'file': '20190913-063945-json.txt',
'id': '2a34987234324324',
"datetime": "2019-09-13T16:32:34.704797",
"image": "20190913-163234-image.jpg",
"inference_result": [
{
"category": "person",
"confidence": 0.98046875,
"box": {
"xmin": 0.5251423732654468,
"ymin": 0.0021262094378471375,
"xmax": 0.9498447340887946,
"ymax": 0.23079824447631836
}
}
]
},
{
'file': '20190913-064945-json.txt',
'id': '3ea34987234345424',
"datetime": "2019-09-15T06:38:30.019847",
"image": "20190915-063830-image.jpg",
"inference_result": []
},
{
'file': '20190913-064945-json.txt',
'id': '2c349bb74234324324',
"datetime": "2019-09-12T15:20:47.550151",
"image": "20190912-152047-image.jpg",
"inference_result": [
{
"category": "person",
"confidence": 0.9921875,
"box": {
"xmin": 0.349978506565094,
"ymin": 0.09689526346356389,
"xmax": 0.5911635756492615,
"ymax": 0.40339951629117893
}
}
]
},
]


def get_samples(before_datetime=None, page=1):
"""Get stored pipeline samples.
Parameters
----------
before_datetime : date time in ISO 8601 compatible format,
YYYY-MM-DDTHH:MM:SS. For example '2002-12-25 00:00:00-06:39'.
It uses python's standard function datetime.fromisoformat().
If not provided, the function will start with the most recent available
sample.
page : positive integer
Paginates samples in batches of 5. Defaults to page=1.

Returns
-------
dictionary
Returns a dictionary of previously saved pipeline samples.
"""
parsed_datetime = None
assert isinstance(page, int)
assert page > 0
page_size = 5
if before_datetime:
try:
parsed_datetime = datetime.fromisoformat(before_datetime)
log.debug('Fetching samples saved before %s',
parsed_datetime)
except ValueError as e:
log.warning('Unable to parse before_datetime parameter: %s. '
' Error: %s', before_datetime, str(e))
page_start_position = (page-1)*page_size
page_end_position = page_start_position + page_size
if not parsed_datetime:
log.debug('Fetching most recent saved samples')
log.debug('Fetching samples page %d. Page size %d. '
'Sample index range [%d:%d]. ',
page, page_size, page_start_position, page_end_position)
p = Path(os.path.join(DEFAULT_DATA_DIR, 'detections/front-door/faces'))
log.debug('Samples path: %s', p.resolve())
files = list(p.glob("*-json.txt"))
log.debug('Fetched %d file names.', len(files))
files = sorted(files, key=os.path.getmtime, reverse=True)
samples = []
for json_file in files[page_start_position:page_end_position]:
with open(json_file) as f:
sample = json.load(f)
sample['id'] = uuid.uuid4().hex
sample['file'] = str(json_file)
samples.append(sample)
# lines = map(str, files)
# log.debug('File names follow:\n %s', "\n".join(lines))
return samples


def remove_timeline(file_path):
def _remove_timeline(file_path):
try:
os.remove(file_path)
except Exception:
Expand Down Expand Up @@ -184,7 +83,7 @@ def get_timeline(before_datetime=None, page=1, data_dir=None):
yaml.constructor.ConstructorError
):
log.exception("Detected unreadable timeline, removing %s" % file_path)
remove_timeline(file_path)
_remove_timeline(file_path)
continue

events_queue = []
Expand Down Expand Up @@ -221,33 +120,3 @@ def get_timeline(before_datetime=None, page=1, data_dir=None):

# return the remaining queue if there are no more files to process
return events_queue


def add_sample(new_sample=None):
assert new_sample
log.debug('add_sample new_sample 0 %s', new_sample)
new_sample['id'] = uuid.uuid4().hex
log.debug('add_sample 1 %s', new_sample)
SAMPLES.append(new_sample)


def update_sample(edited_sample=None):
assert edited_sample
for i, old_sample in enumerate(SAMPLES):
old_sample = SAMPLES[i]
if old_sample['id'] == edited_sample['id']:
SAMPLES[i] = edited_sample
return True
log.debug('sample not found %s', edited_sample)
return False


def delete_sample(sample_id):
sample = None
for sample in SAMPLES:
if sample['id'] == sample_id:
SAMPLES.remove(sample)
log.debug('sample deleted %s', sample)
return True
log.debug('sample not found %s', sample)
return False
98 changes: 98 additions & 0 deletions tests/test-config.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,98 @@
######################################
# Ambianic main configuration file #
######################################
version: '2021.08.30.test'

# path to the data directory
data_dir: ./data

# Set logging level to one of DEBUG, INFO, WARNING, ERROR
logging:
file: ./data/ambianic-log.txt
level: INFO
# set a less noisy log level for the console output
# console_level: WARNING

# Store notifications provider configuration
# see https://github.com/caronc/apprise#popular-notification-services for syntax examples
# notifications:
# catch_all_email:
# include_attachments: true
# providers:
# - mailto://userid:[email protected]
# alert_fall:
# providers:
# - mailto://userid:[email protected]
# - json://hostname/a/path/to/post/to

# Pipeline event timeline configuration
timeline:
event_log: ./data/timeline-event-log.yaml

# Cameras and other input data sources
# Using Home Assistant conventions to ease upcoming integration
sources:

# # direct support for raspberry picamera
# picamera:
# uri: picamera
# type: video
# live: true
#
# # local video device integration example
# webcam:
# uri: /dev/video0
# type: video
# live: true

recorded_cam_feed:
uri: file:///workspace/tests/pipeline/avsource/test2-cam-person1.mkv
type: video
# live: true

ai_models:
image_detection:
model:
tflite: ai_models/mobilenet_ssd_v2_coco_quant_postprocess.tflite
edgetpu: ai_models/mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite
labels: ai_models/coco_labels.txt
face_detection:
model:
tflite: ai_models/mobilenet_ssd_v2_face_quant_postprocess.tflite
edgetpu: ai_models/mobilenet_ssd_v2_face_quant_postprocess_edgetpu.tflite
labels: ai_models/coco_labels.txt
top_k: 2
fall_detection:
model:
tflite: ai_models/posenet_mobilenet_v1_100_257x257_multi_kpt_stripped.tflite
edgetpu: ai_models/posenet_mobilenet_v1_075_721_1281_quant_decoder_edgetpu.tflite
labels: ai_models/pose_labels.txt

# A named pipeline defines an ordered sequence of operations
# such as reading from a data source, AI model inference, saving samples and others.
pipelines:
# Pipeline names could be descriptive, e.g. front_door_watch or entry_room_watch.
area_watch:
- source: recorded_cam_feed
- detect_objects: # run ai inference on the input data
ai_model: image_detection
confidence_threshold: 0.6
# Watch for any of the labels listed below. The labels must be from the model trained label set.
# If no labels are listed, then watch for all model trained labels.
label_filter:
- person
- car
- save_detections: # save samples from the inference results
positive_interval: 300 # how often (in seconds) to save samples with ANY results above the confidence threshold
idle_interval: 6000 # how often (in seconds) to save samples with NO results above the confidence threshold
- detect_falls: # look for falls
ai_model: fall_detection
confidence_threshold: 0.6
- save_detections: # save samples from the inference results
positive_interval: 10
idle_interval: 600000
# notify: # notify a thirdy party service
# providers:
# - alert_fall


Loading

0 comments on commit ae52c98

Please sign in to comment.