-
Notifications
You must be signed in to change notification settings - Fork 0
/
infer.py
329 lines (287 loc) · 11.7 KB
/
infer.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
import sys
import argparse
import os
import json
from food_data import FoodNumericDataset, FoodNumericDataModule
from food_model import FoodNumericModel
# from rich import print as rprint
import torch
from torch import tensor
import food_utils
import tqdm
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as stats
import math
import sklearn
def get_list(inputs):
converted_list = []
for idx, elem in enumerate(inputs):
_input_str = elem['text_input']
str_splitted = _input_str.split(' [SEP] ')
elem['splitted'] = str_splitted
# for i, v in enumerate(str_splitted):
# print(i, v)
elem['splitted_no_dim'] = str_splitted[:3] + str_splitted[4:]
elem['text_input_no_dim'] = ' [SEP] '.join(elem['splitted_no_dim'])
a,b,c,d,e = elem['text_input_no_dim'].split(' [SEP] ')
_converted_val = float(elem['target_quantity']) / food_utils.unit_2_normalize_factor_dict[elem['target_unit_str']]
converted_list.append({
'idx': idx,
'target_text': a,
'other_ings': '_'.join(b.split(' [SEP2] ')),
'title': c,
'tags':'_'.join(d.split(' [SEP2] ')) ,
'servings':e,
'file_name': elem['file_name'],
'recipe_db_id': elem['recipe_db_id'],
'target_quantity': float(elem['target_quantity']),
'target_unit': elem['target_unit_str'] ,
'target_dim': elem['target_dim_str'],
'converted_quantity': _converted_val ,
})
return converted_list
class PredModel:
def __init__(self, path, ver):
if ver not in ['ing_q', 'dim', 'unit']:
raise ValueError(f'ver [{ver}] canoot be used')
self.ver = ver
self.model = FoodNumericModel.load_from_checkpoint(path)
self.device = torch.device("cuda:1" if torch.cuda.is_available() else "cpu")
self.model.to(self.device)
def parse_q_text(self, target_text='',ings=[], title='',dimension='', tags=[], servings=4):
input_dict = {
'target' : f'{target_text}',
'ings' : ings,
'title' : title,
'dimension': dimension,
'tags' : tags,
'servings' : servings,
}
if dimension not in ['weight', 'amount']:
raise ValueError
text = f"{input_dict['target']} [SEP] {' [SEP2] '.join(input_dict['ings'])}"+ \
f" [SEP] {input_dict['title']} [SEP] {input_dict['dimension']} [SEP] "+ \
f"{' [SEP2] '.join(input_dict['tags'])} [SEP] {input_dict['servings']}"
return text
def get_pred_ing_q(self, target_text='',ings=[], title='',dimension='', tags=[], servings=4):
text = self.parse_q_text(target_text, ings, title, dimension, tags, servings)
return self.get_pred_ing_q_text(text)
def get_pred_ing_q_text(self, text):
# rprint(text)
_elem = self.model.lm_tokenizer(text, padding='max_length', return_tensors='pt', max_length=512)
_elem.to(self.device)
self.model.eval()
with torch.no_grad():
res = self.model.backbone({
'input_ids' : _elem['input_ids'],
'token_type_ids' : _elem['token_type_ids'],
'attention_mask' : _elem['attention_mask'],
}), self.model({
'input_ids' : _elem['input_ids'],
'token_type_ids' : _elem['token_type_ids'],
'attention_mask' : _elem['attention_mask'],
})
# print('res ??? ')
# print(res)
# from IPython import embed; embed(colors="Linux")
if self.model.is_q_predict and self.model.is_e_predict:
converted_res = {
'mean_pred': res[0][0][0].tolist(),
'logvar_pred':res[0][1][0].tolist(),
'exp_prob': torch.exp(res[0][3][0]).tolist(),
'pred_val': res[1][0].tolist(),
'input_txt': text,
}
elif self.model.is_q_predict:
converted_res = {
'mean_pred': 0.,
'logvar_pred': 0., # default
'exp_prob': 0., # default
'pred_val': res[1][0].tolist() , # default
'input_txt': text,
}
return converted_res
# target / ing / title / dimension / tags / serving
def parse_u_text(self, target_text='',ings=[], title='',dimension='', tags=[], servings=4):
if dimension not in ['weight', 'amount']:
raise ValueError
# target / ing / title / dimension / tags / serving
input_dict = {
'target' : f'{target_text}',
'ings' : ings,
'title' : title,
'dimension': dimension,
'tags' : tags,
'servings' : servings,
}
text = f"{input_dict['target']} [SEP] {' [SEP2] '.join(input_dict['ings'])}"+ \
f" [SEP] {input_dict['title']} [SEP] {input_dict['dimension']} [SEP] "+ \
f"{' [SEP2] '.join(input_dict['tags'])} [SEP] {input_dict['servings']}"
return text
def get_pred_unit(self,target_text='',ings=[], title='',dimension='', tags=[], servings=4):
text = self.parse_u_text(target_text, ings, title, dimension, tags, servings)
# rprint(text)
return self.get_pred_unit_text(text)
def get_pred_unit_text(self, text):
_elem = self.model.lm_tokenizer(text, padding='max_length', return_tensors='pt', max_length=512)
_elem.to(self.device)
self.model.eval()
with torch.no_grad():
res = self.model({
'input_ids' : _elem['input_ids'],
'token_type_ids' : _elem['token_type_ids'],
'attention_mask' : _elem['attention_mask'],
})
converted_res = {
'pred_unit':res[0],
'prob': res[1][0].tolist(),
'text' : text,
}
return converted_res
def parse_d_text(self,target_text='',ings=[], title='', tags=[], servings=4):
input_dict = {
'target' : f'{target_text}',
'ings' : ings,
'title' : title,
'tags' : tags,
'servings' : servings,
}
text = f"{input_dict['target']} [SEP] {' [SEP2] '.join(input_dict['ings'])}"+ \
f" [SEP] {input_dict['title']} [SEP] "+ \
f"{' [SEP2] '.join(input_dict['tags'])} [SEP] {input_dict['servings']}"
return text
# target / ing / title / dimension / tags / serving
def get_pred_dimension(self,target_text='',ings=[], title='', tags=[], servings=4):
# target / ing / title / dimension / tags / serving
text = self.parse_d_text(target_text, ings, title, tags, servings)
# rprint(text)
return self.get_pred_dimension_text(text)
def get_pred_dimension_text(self, text):
_elem = self.model.lm_tokenizer(text, padding='max_length', return_tensors='pt', max_length=512)
_elem.to(self.device)
self.model.eval()
with torch.no_grad():
res = self.model({
'input_ids' : _elem['input_ids'],
'token_type_ids' : _elem['token_type_ids'],
'attention_mask' : _elem['attention_mask'],
})
converted_res = {
'pred_dimension':res[0],
'prob': res[1][0].tolist(),
'text' : text,
}
return converted_res
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', type=str, default='./checkpoints')
parser.add_argument('--food_data_path', type=str, default='./data')
args = parser.parse_args()
# main()
dm = FoodNumericDataModule(
batch_size=1,
food_data_path=args.food_data_path,
min_e=-2,
n_exponent = 7,
size='all',
exp_ver='ing_q',
q_ing_phrase_ver='ing_name_q_u_mask', # ['ing_name', 'ing_name_q_u_mask', 'ing_phrase_q_mask']
# other_ing_phrase_ver='ing_phrase',
other_ing_phrase_ver='ing_name', # ['ing_name', 'ing_phrase', ing_phrase_q_u_mask', 'ing_phrase_q_mask']
is_include_ing_phrase=True,
is_include_title=True,
is_include_tags=True,
is_include_other_ing=True,
is_include_dimension=True,
is_include_serving=True,
)
dm.setup('test')
test_loader = dm.test_dataloader()
inputs = []
for elem in iter(test_loader):
converted = {}
for k, v in elem.items():
converted[k] = v[0]
inputs.append(converted)
testset_list = get_list(inputs)
df = pd.DataFrame(testset_list)
# dim
print(' ## 1. Pred Dimension ')
_model = PredModel(f'{args.checkpoint_path}/dim.ckpt','dim')
_func = _model.get_pred_dimension
preds = []
for i in tqdm.tqdm(range(len(df))):
ti = df.iloc[i]
# print(ti)
pred = _func(
target_text=ti['target_text'],
ings=ti['other_ings'].split('_'),
title=ti['title'],
tags=ti['tags'].split('_'),
servings=ti['servings'],
)
preds.append(pred['pred_dimension'])
df['pred_dim'] = preds
print(f"Accuracy : {sklearn.metrics.accuracy_score(df['target_dim'], df['pred_dim'])}")
# unit
print(' ## 2. Pred Unit ( Pred Dim )')
_model = PredModel(f'{args.checkpoint_path}/unit.ckpt','unit')
_func = _model.get_pred_unit
pred_u = []
pred_u2 = []
for i in tqdm.tqdm(range(len(df))):
ti = df.iloc[i]
# print(ti)
pred = _func(
target_text=ti['target_text'],
ings=ti['other_ings'].split('_'),
title=ti['title'],
dimension=ti['pred_dim'],
tags=ti['tags'].split('_'),
servings=ti['servings'],
)
pred_u.append(pred['pred_unit'])
pred = _func(
target_text=ti['target_text'],
ings=ti['other_ings'].split('_'),
title=ti['title'],
dimension=ti['target_dim'],
tags=ti['tags'].split('_'),
servings=ti['servings'],
)
pred_u2.append(pred['pred_unit'])
df['pred_u_pd'] = pred_u
df['pred_u_td'] = pred_u2
print(f"Unit Accuracy : {sklearn.metrics.accuracy_score(df['target_unit'], df['pred_u_td'])}")
# ing_q
_model = PredModel(f'{args.checkpoint_path}/ing_q.ckpt','ing_q')
_func = _model.get_pred_ing_q
pred_q = []
pred_q2 = []
for i in tqdm.tqdm(range(len(df))):
ti = df.iloc[i]
# print(ti)
pred = _func(
target_text=ti['target_text'],
ings=ti['other_ings'].split('_'),
title=ti['title'],
dimension=ti['pred_dim'],
tags=ti['tags'].split('_'),
servings=ti['servings'],
)
pred_q.append(pred['pred_val'])
pred = _func(
target_text=ti['target_text'],
ings=ti['other_ings'].split('_'),
title=ti['title'],
dimension=ti['target_dim'],
tags=ti['tags'].split('_'),
servings=ti['servings'],
)
pred_q2.append(pred['pred_val'])
df['pred_q_pd'] = pred_q
df['pred_q_td'] = pred_q2
print(f"Quantity MAE : {sklearn.metrics.mean_absolute_error(df['target_quantity'], df['pred_q_td'])}")