-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathgensim_word2vec.py
73 lines (63 loc) · 3.05 KB
/
gensim_word2vec.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
from gensim.models import Word2Vec
class GensimWord2VecVectorizer(BaseEstimator, TransformerMixin):
"""
Word vectors are averaged across to create the document-level vectors/features.
gensim's own gensim.sklearn_api.W2VTransformer doesn't support out of vocabulary words,
hence we roll out our own.
All the parameters are gensim.models.Word2Vec's parameters.
https://radimrehurek.com/gensim/models/word2vec.html#gensim.models.word2vec.Word2Vec
"""
def __init__(self, vector_size=100, alpha=0.025, window=5, min_count=5, max_vocab_size=None,
sample=0.001, seed=1, workers=3, min_alpha=0.0001, sg=0, hs=0, negative=5,
ns_exponent=0.75, cbow_mean=1, hashfxn=hash, epochs=5, null_word=0,
trim_rule=None, sorted_vocab=1, batch_words=10000, compute_loss=False,
callbacks=(), max_final_vocab=None):
self.vector_size = vector_size
self.alpha = alpha
self.window = window
self.min_count = min_count
self.max_vocab_size = max_vocab_size
self.sample = sample
self.seed = seed
self.workers = workers
self.min_alpha = min_alpha
self.sg = sg
self.hs = hs
self.negative = negative
self.ns_exponent = ns_exponent
self.cbow_mean = cbow_mean
self.hashfxn = hashfxn
self.epochs = epochs
self.null_word = null_word
self.trim_rule = trim_rule
self.sorted_vocab = sorted_vocab
self.batch_words = batch_words
self.compute_loss = compute_loss
self.callbacks = callbacks
self.max_final_vocab = max_final_vocab
def fit(self, X, y=None):
self.model_ = Word2Vec(
sentences=X, corpus_file=None,
vector_size=self.vector_size, alpha=self.alpha, window=self.window, min_count=self.min_count,
max_vocab_size=self.max_vocab_size, sample=self.sample, seed=self.seed,
workers=self.workers, min_alpha=self.min_alpha, sg=self.sg, hs=self.hs,
negative=self.negative, ns_exponent=self.ns_exponent, cbow_mean=self.cbow_mean,
hashfxn=self.hashfxn, epochs=self.epochs, null_word=self.null_word,
trim_rule=self.trim_rule, sorted_vocab=self.sorted_vocab, batch_words=self.batch_words,
compute_loss=self.compute_loss, callbacks=self.callbacks,
max_final_vocab=self.max_final_vocab)
return self
def transform(self, X):
X_embeddings = np.array([self._get_embedding(words) for words in X])
return X_embeddings
def _get_embedding(self, words):
valid_words = [word for word in words if word in self.model_.wv.key_to_index]
if valid_words:
embedding = np.zeros((len(valid_words), self.vector_size), dtype=np.float32)
for idx, word in enumerate(valid_words):
embedding[idx] = self.model_.wv[word]
return np.mean(embedding, axis=0)
else:
return np.zeros(self.vector_size)