-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathBaseline_preprocess.py
97 lines (80 loc) · 2.59 KB
/
Baseline_preprocess.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
import warnings
warnings.filterwarnings("ignore")
import pandas as pd
import numpy as np
import pickle
from datetime import datetime
from tqdm import tqdm
from multiprocessing import Pool
import torch
from transformers import *
import nltk
import sys
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
bert = BertModel.from_pretrained('bert-base-uncased')
def Tokenize(dataset):
datas = []
for data in tqdm(dataset):
tmp = {}
tmp['date'] = data['date']
titles = []
for i in data['title']:
titles.append(tokenizer.encode(i, add_special_tokens = False))
tmp['title'] = titles
datas.append(tmp)
return datas
def padding(arrs, max_len):
tmp = []
for arr in arrs:
if len(arr) > max_len:
arr = arr[-max_len:]
tmp.append(arr)
return tmp
if __name__ == '__main__':
with open(sys.argv[1], 'rb') as f:
news = pickle.load(f)
news_dict = news.to_dict('records')
datas = []
for i in tqdm(list(set([x['date'] for x in news_dict]))):
tmp = {}
tmp['date'] = i
tmp['title'] = [x['title'] for x in news_dict if x['date'] == i]
datas.append(tmp)
n_workers = 4
results = [None] * n_workers
with Pool(processes=n_workers) as pool:
for i in range(n_workers):
batch_start = (len(datas) // n_workers) * i
if i == n_workers - 1:
batch_end = len(datas)
else:
batch_end = (len(datas) // n_workers) * (i + 1)
batch = datas[batch_start: batch_end]
results[i] = pool.apply_async(Tokenize, [batch])
pool.close()
pool.join()
train_token = []
for result in results:
train_token += result.get()
use_gpu = torch.cuda.is_available()
if use_gpu:
bert.cuda()
train_vector = []
for data in tqdm(train_token):
tmp = {}
tmp['date'] = data['date']
title = []
for i in data['title']:
with torch.no_grad():
if use_gpu:
a = bert(torch.tensor(i).unsqueeze(-1).cuda())[0]
else:
a = bert(torch.tensor(i).unsqueeze(-1))[0]
a = a.view(a.shape[0], a.shape[2])
a = a.mean(0)
title.append(a)
tmp['title'] = torch.stack(title).mean(0).cpu()
train_vector.append(tmp)
datas = dict(zip([x['date'] for x in train_vector], [x['title'] for x in train_vector]))
with open(sys.argv[2], 'wb') as f:
pickle.dump(datas, f)