Skip to content
This repository has been archived by the owner on Dec 16, 2024. It is now read-only.

Commit

Permalink
Merge pull request #2 from nnshah1/fixes
Browse files Browse the repository at this point in the history
Fixes
  • Loading branch information
QuPengfei authored Apr 30, 2020
2 parents c4590b9 + 3d763dc commit 63fc1ff
Show file tree
Hide file tree
Showing 4 changed files with 78 additions and 45 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -2,27 +2,35 @@
"name": "emotion_recognition",
"version": 1,
"type": "FFmpeg",
"template":"-flags unaligned -hwaccel vaapi -hwaccel_output_format vaapi -hwaccel_device /dev/dri/renderD128 -i \"{source[uri]}\" -vf \"detect=model={models[face_detection_retail][1][network]}:model_proc=\"{models[face_detection_retail][1][proc]}\":interval=\"{parameters[every-nth-frame]}\":device=HDDL:nireq=\"{parameters[nireq]}\",classify=model=\"{models[emotion_recognition][1][network]}\":model_proc=\"{models[emotion_recognition][1][proc]}\":device=HDDL,metaconvert=converter=json:method=all:source=NULL:tags=NULL\"",
"description":"Emotion Recognition",
"template": [
"-flags unaligned -hwaccel vaapi -hwaccel_output_format vaapi",
" -hwaccel_device /dev/dri/renderD128 -i \"{source[uri]}\" ",
"-vf \"detect=model={models[face_detection_retail][1][network]}",
":model_proc=\"{models[face_detection_retail][1][proc]}\"",
":interval=\"{parameters[every-nth-frame]}\":device=HDDL:nireq=\"{parameters[nireq]}\",",
"classify=model=\"{models[emotion_recognition][1][network]}\"",
":model_proc=\"{models[emotion_recognition][1][proc]}\":device=HDDL,",
"metaconvert\"",
" -an -y -f metapublish"
],
"description": "Emotion Recognition",
"parameters": {
"type" : "object",
"properties" : {
"type": "object",
"properties": {
"inference-interval": {
"element":"detection",
"element": "detection",
"type": "integer",
"minimum": 0,
"maximum": 4294967295,
"default": 1
},
"nireq": {
"element":"detection",
"element": "detection",
"type": "integer",
"minimum": 1,
"maximum": 64,
"default": 2
}
}
}
}


}
Original file line number Diff line number Diff line change
Expand Up @@ -2,25 +2,34 @@
"name": "face_recognition",
"version": 1,
"type": "FFmpeg",
"template":"-flags unaligned -hwaccel vaapi -hwaccel_output_format vaapi -hwaccel_device /dev/dri/renderD128 -i \"{source[uri]}\" -vf \"detect=model={models[face_detection_retail][1][network]}:model_proc=\"{models[face_detection_retail][1][proc]}\":interval=\"{parameters[every-nth-frame]}\":device=HDDL:nireq=\"{parameters[nireq]}\",classify=model=\"{models[face_reidentification][1][network]}\":model_proc=\"{models[face_reidentification][1][proc]}\":device=HDDL\",identify=gallery=\"/home/gallery/face_gallery_FP16/gallery.json,metaconvert=converter=json:method=all:source=NULL:tags=NULL\"",
"description":"Face Recognition",
"template": [
"-flags unaligned -hwaccel vaapi -hwaccel_output_format vaapi",
" -hwaccel_device /dev/dri/renderD128 -i \"{source[uri]}\" ",
"-vf \"detect=model={models[face_detection_retail][1][network]}",
":model_proc=\"{models[face_detection_retail][1][proc]}\"",
":interval=\"{parameters[inference-interval]}\":device=HDDL:nireq=\"{parameters[nireq]}\",",
"classify=model=\"{models[face_reidentification][1][network]}\":model_proc=\"{models[face_reidentification][1][proc]}\":device=HDDL,",
"identify=gallery=\"/home/gallery/face_gallery_FP16/gallery.json\",metaconvert\"",
" -y -an -f metapublish"
],
"description": "Face Recognition",
"parameters": {
"type" : "object",
"properties" : {
"type": "object",
"properties": {
"inference-interval": {
"element":"detection",
"element": "detection",
"type": "integer",
"minimum": 0,
"maximum": 4294967295,
"default": 1
},
"nireq": {
"element":"detection",
"element": "detection",
"type": "integer",
"minimum": 1,
"maximum": 64,
"default": 2
}
}
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -2,27 +2,31 @@
"name": "object_detection",
"version": 1,
"type": "FFmpeg",
"template":"-flags unaligned -hwaccel vaapi -hwaccel_output_format vaapi -hwaccel_device /dev/dri/renderD128 -i \"{source[uri]}\" -vf \"detect=model={models[object_detection][1][network]}:device=HDDL:model_proc=\"{models[object_detection][1][proc]}\":interval={parameters[every-nth-frame]}:nireq={parameters[nireq]},metaconvert=converter=json:method=all:source=NULL:tags=NULL\"",
"template": [
"-flags unaligned -hwaccel vaapi -hwaccel_output_format vaapi -hwaccel_device /dev/dri/renderD128",
" -i \"{source[uri]}\" -vf \"detect=model={models[object_detection][1][network]}",
":device=HDDL:model_proc=\"{models[object_detection][1][proc]}\":interval={parameters[inference-interval]}",
":nireq={parameters[nireq]},metaconvert\"",
" -an -y -f metapublish"
],
"description": "Object Detection",
"parameters": {
"type" : "object",
"properties" : {
"type": "object",
"properties": {
"inference-interval": {
"element":"detection",
"element": "detection",
"type": "integer",
"minimum": 0,
"maximum": 4294967295,
"default": 6
},
"nireq": {
"element":"detection",
"element": "detection",
"type": "integer",
"minimum": 1,
"maximum": 64,
"default": 2
}
}
}
}


}
54 changes: 33 additions & 21 deletions ad-insertion/analytics/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,39 +11,48 @@
import time
import os
import re
import traceback


video_analytics_topic = "seg_analytics_sched"
machine_prefix=os.environ.get("VA_PRE")
machine_prefix = os.environ.get("VA_PRE")
if machine_prefix == None:
machine_prefix="VA-"
va=RunVA()
machine_prefix = "VA-"
va = RunVA()

global_total_fps = 0
global_seg_count = 0


def process_stream(streamstring):
streamjson = ast.literal_eval(streamstring)
pipeline1 = streamjson["pipeline"]+"/1"
pipeline1 = streamjson["pipeline"] + "/1"
stream = streamjson['source']['uri']
user = streamjson["user_info"]["name"]
elapsed_time = time.time()-streamjson["start_time"]
print("VA feeder: stream: "+stream+" "+user+" elapsed-time on kafka queue:" + str(elapsed_time), flush=True)
elapsed_time = time.time() - streamjson["start_time"]
print("VA feeder: stream: " + stream + " " + user +
" elapsed-time on kafka queue:" + str(elapsed_time), flush=True)

zk_path = None
init_stream = None
if 'uri-init' in streamjson['source']:
init_stream = streamjson['source']['uri-init']
init_stream = streamjson['source']['uri-init']

m1 = re.search(r'(dash/.*)/chunk-stream[0-9]*-([0-9]*.m4s)$', stream)
if m1: zk_path="/analytics/"+m1.group(1)+"/"+m1.group(2)+"/"+streamjson["pipeline"]
if m1:
zk_path = "/analytics/" + \
m1.group(1) + "/" + m1.group(2) + "/" + streamjson["pipeline"]

m1 = re.search("(hls/.*)/[0-9]*p_([0-9]*.ts)$", stream)
if m1: zk_path="/analytics/"+m1.group(1)+"/"+m1.group(2)+"/"+streamjson["pipeline"]
print("zk path: "+zk_path, flush=True)
if m1:
zk_path = "/analytics/" + \
m1.group(1) + "/" + m1.group(2) + "/" + streamjson["pipeline"]
print("zk path: " + zk_path, flush=True)

zk=ZKState(zk_path)
zk = ZKState(zk_path)
if zk.processed():
print("VA feeder: " + user + " " + stream + " already complete", flush=True)
print("VA feeder: " + user + " " + stream +
" already complete", flush=True)
zk.close()
return

Expand All @@ -55,15 +64,15 @@ def process_stream(streamstring):
stream = "file://" + merged_segment
print("VA feeder: video-analytics merged segment: " +
stream, flush=True)
fps=va.loop({

fps = va.loop({
"source": {
"uri": stream,
"type":"uri"
"type": "uri"
},
"destination": {
"type": "kafka",
"host": socket.gethostbyname("kafka-service")+":9092",
"host": socket.gethostbyname("kafka-service") + ":9092",
"topic": "seg_analytics_data"
},
"tags": streamjson["tags"],
Expand All @@ -72,7 +81,7 @@ def process_stream(streamstring):
"start_time": streamjson["start_time"],
}, streamjson["pipeline"])

if fps<0:
if fps < 0:
zk.process_abort()
else:
zk.process_end()
Expand All @@ -81,13 +90,15 @@ def process_stream(streamstring):
global global_total_fps, global_seg_count
global_total_fps = global_total_fps + fps
global_seg_count = global_seg_count + 1
avg_fps = global_total_fps/global_seg_count
print("VA statistics : "+ "avg_fps " + str(avg_fps) + " " + str(global_total_fps)+" " + str(global_seg_count),flush=True)

avg_fps = global_total_fps / global_seg_count
print("VA statistics : " + "avg_fps " + str(avg_fps) + " " +
str(global_total_fps) + " " + str(global_seg_count), flush=True)

if merged_segment:
merge.delete_merged_segment(merged_segment)
zk.close()


if __name__ == "__main__":
c = Consumer("analytics")
while True:
Expand All @@ -98,7 +109,8 @@ def process_stream(streamstring):
try:
process_stream(msg)
except Exception as e:
print("VA feeder: "+str(e), flush=True)
print("VA feeder: " + str(e), flush=True)
traceback.print_exc()
except Exception as e:
print("VA feeder: error in main" + str(e), flush=True)
time.sleep(1)
Expand Down

0 comments on commit 63fc1ff

Please sign in to comment.