-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathw2v.py
67 lines (57 loc) · 2.08 KB
/
w2v.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
'''
pre-train the word embeddings on the Web service dataset
'''
import json
import joblib
import numpy as np
import os
import argparse
from gensim.models import Word2Vec
parser = argparse.ArgumentParser(
description='train word2vec embeddings on the Web service dataset based on the pre-trained FastText word embeddings.'
)
parser.add_argument('--dataset', default='pw', type=str, required=False, help='name of the dataset. Options: [pw, aws]')
args = parser.parse_args()
dataset = args.dataset
assert dataset in ['pw', 'aws']
DATA_ROOT = './data/'
CACHE_ROOT = './cache/'
PRE_TRAIN_MODEL_NAME="wiki.en.vec"
EMB_DIM = 300
MODEL_NAME = '%s_word2vec.txt' %dataset
# clear cache
if os.path.exists(os.path.join(CACHE_ROOT,MODEL_NAME + '.pt')):
os.remove(os.path.join(CACHE_ROOT,MODEL_NAME + '.pt'))
index2label = joblib.load(os.path.join(DATA_ROOT,'%s_index2label.pkl'%dataset))
sents = []
counter = {}
with open(os.path.join(DATA_ROOT,'%s.json'%dataset),'r', errors='ignore', encoding='utf8') as fr:
for line in fr:
row = json.loads(line)
sents.append(row['text'])
for word in set(row['text']):
counter[word] = counter.get(word,0) + 1
for label_tokens in index2label.values():
sents.append(label_tokens)
w2v_model = Word2Vec(size=EMB_DIM, sg=1, min_count=1, window=9)
w2v_model.build_vocab(sents)
w2v_model.intersect_word2vec_format(os.path.join(CACHE_ROOT,PRE_TRAIN_MODEL_NAME), binary=False, lockf=1.0)
w2v_model.train(sents, total_examples=w2v_model.corpus_count, epochs=w2v_model.epochs)
w2v_model.wv.save_word2vec_format(os.path.join(CACHE_ROOT, MODEL_NAME))
# generate embedding vectors for categories
vectors = w2v_model.wv
index2vector = {}
for index,label_tokens in index2label.items():
vector = np.zeros((EMB_DIM,))
l = 0
idf_sum = 0
for token in label_tokens:
if token in vectors:
vector += vectors[token]
l += 1
else:
print(token)
if l > 0:
vector /= l
index2vector[index] = vector
joblib.dump(index2vector,os.path.join(DATA_ROOT,'%s_index2vector.pkl'%dataset))