-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy path07_input_frames.py
170 lines (140 loc) · 5.62 KB
/
07_input_frames.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
from pathlib import Path
from naoth.log import Reader as LogReader
from naoth.log import Parser
import os
from google.protobuf.json_format import MessageToDict
from vaapi.client import Vaapi
from tqdm import tqdm
import argparse
def is_done(log_id):
# get the log status - showing how many entries per representation there should be
try:
# we use list here because we only know the log_id here and not the if of the logstatus object
response = client.log_status.list(log=log_id)
if len(response) == 0:
return False
log_status = response[0]
except Exception as e:
print(e)
if not log_status.num_cognition_frames or int(log_status.num_cognition_frames) == 0:
print("\tWARNING: first calculate the number of cognitions frames and put it in the db")
quit()
response = client.cognitionframe.get_frame_count(log=log_id)
if int(log_status.num_cognition_frames) == int(response["count"]):
return True
else:
print(log_status.num_cognition_frames, response["count"])
return False
def is_done_motion(log_id):
# get the log status - showing how many entries per representation there should be
try:
# we use list here because we only know the log_id here and not the if of the logstatus object
response = client.log_status.list(log=log_id)
if len(response) == 0:
return False
log_status = response[0]
except Exception as e:
print(e)
if not log_status.num_motion_frames or int(log_status.num_motion_frames) == 0:
print("\tWARNING: first calculate the number of cognitions frames and put it in the db")
quit()
response = client.motionframe.get_frame_count(log=log_id)
if int(log_status.num_motion_frames) == int(response["count"]):
return True
else:
return False
def parse_cognition_log(log_data):
log_path = Path(log_root_path) / log_data.log_path
my_parser = Parser()
game_log = LogReader(str(log_path), my_parser)
frame_array = list()
for idx, frame in enumerate(tqdm(game_log)):
# stop parsing log if FrameInfo is missing
try:
frame_number = frame['FrameInfo'].frameNumber
frame_time = frame['FrameInfo'].time
except Exception as e:
print(f"FrameInfo not found in current frame - will not parse any other frames from this log and continue with the next one")
break
json_obj = {
"log":log_data.id,
"frame_number":frame_number,
"frame_time": frame_time,
}
frame_array.append(json_obj)
if idx % 100 == 0:
try:
response = client.cognitionframe.bulk_create(
frame_list=frame_array
)
frame_array.clear()
except Exception as e:
print(f"error inputing the data for {log_path}")
print(e)
quit()
# handle the last frames
# just upload whatever is in the array. There will be old data but that does not matter, it will be filtered out on insertion
try:
response = client.cognitionframe.bulk_create(
frame_list=frame_array
)
print(response)
except Exception as e:
print(f"error inputing the data {log_path}")
def parse_motion_log(log_data):
sensor_log_path = Path(log_root_path) / log_data.sensor_log_path
my_parser = Parser()
game_log = LogReader(str(sensor_log_path), my_parser)
frame_array = list()
for idx, frame in enumerate(tqdm(game_log)):
# stop parsing log if FrameInfo is missing
try:
frame_number = frame['FrameInfo'].frameNumber
frame_time = frame['FrameInfo'].time
except Exception as e:
print(f"FrameInfo not found in current frame - will not parse any other frames from this log and continue with the next one")
break
json_obj = {
"log":log_data.id,
"frame_number":frame_number,
"frame_time": frame_time,
}
frame_array.append(json_obj)
if idx % 100 == 0:
try:
response = client.motionframe.bulk_create(
frame_list=frame_array
)
frame_array.clear()
except Exception as e:
print(f"error inputing the data for {sensor_log_path}")
print(e)
quit()
# handle the last frames
# just upload whatever is in the array. There will be old data but that does not matter, it will be filtered out on insertion
try:
response = client.motionframe.bulk_create(
frame_list=frame_array
)
print(response)
except Exception as e:
print(f"error inputing the data {sensor_log_path}")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-f", "--force", action="store_true", default=False)
args = parser.parse_args()
log_root_path = os.environ.get("VAT_LOG_ROOT")
client = Vaapi(
base_url=os.environ.get("VAT_API_URL"),
api_key=os.environ.get("VAT_API_TOKEN"),
)
existing_data = client.logs.list()
def sort_key_fn(data):
return data.log_path
for log_data in sorted(existing_data, key=sort_key_fn, reverse=True):
print("log_path: ", log_data.log_path)
if not is_done(log_data.id):
parse_cognition_log(log_data)
print("log_path: ", log_data.sensor_log_path)
if not is_done_motion(log_data.id):
parse_motion_log(log_data)