簡體   English   中英

在scikit-learn管道中使用gensim word2vec

[英]Using gensim word2vec in scikit-learn pipeline

我試圖在scikit-learn管道中使用word2vec

from sklearn.base import BaseEstimator, TransformerMixin
import pandas as pd
import numpy as np

class ItemSelector(BaseEstimator, TransformerMixin):
    def __init__(self, key):
        self.key = key

    def fit(self, x, y=None):
        return self

    def transform(self, data_dict):
        return data_dict[self.key]


from sklearn.pipeline import Pipeline
from gensim.sklearn_api import W2VTransformer
pipeline_word2vec = Pipeline([
                ('selector', ItemSelector(key='X')),
                ('w2v', W2VTransformer()),
            ])

pipeline_word2vec.fit(pd.DataFrame({'X':['hello world','is amazing']}), np.array([1,0]))

這給了我

---------------------------------------------------------------------------
RuntimeError                              Traceback (most recent call last)
<ipython-input-11-9e2dd309d07c> in <module>()
     23                 ('w2v', W2VTransformer()),
     24             ])
---> 25 pipeline_word2vec.fit(pd.DataFrame({'X':['hello world','is amazing']}), np.array([1,0]))

/usr/local/anaconda3/lib/python3.6/site-packages/sklearn/pipeline.py in fit(self, X, y, **fit_params)
    248         Xt, fit_params = self._fit(X, y, **fit_params)
    249         if self._final_estimator is not None:
--> 250             self._final_estimator.fit(Xt, y, **fit_params)
    251         return self
    252 

/usr/local/anaconda3/lib/python3.6/site-packages/gensim/sklearn_api/w2vmodel.py in fit(self, X, y)
     62             sg=self.sg, hs=self.hs, negative=self.negative, cbow_mean=self.cbow_mean,
     63             hashfxn=self.hashfxn, iter=self.iter, null_word=self.null_word, trim_rule=self.trim_rule,
---> 64             sorted_vocab=self.sorted_vocab, batch_words=self.batch_words
     65         )
     66         return self

/usr/local/anaconda3/lib/python3.6/site-packages/gensim/models/word2vec.py in __init__(self, sentences, size, alpha, window, min_count, max_vocab_size, sample, seed, workers, min_alpha, sg, hs, negative, cbow_mean, hashfxn, iter, null_word, trim_rule, sorted_vocab, batch_words, compute_loss, callbacks)
    525             batch_words=batch_words, trim_rule=trim_rule, sg=sg, alpha=alpha, window=window, seed=seed,
    526             hs=hs, negative=negative, cbow_mean=cbow_mean, min_alpha=min_alpha, compute_loss=compute_loss,
--> 527             fast_version=FAST_VERSION)
    528 
    529     def _do_train_job(self, sentences, alpha, inits):

/usr/local/anaconda3/lib/python3.6/site-packages/gensim/models/base_any2vec.py in __init__(self, sentences, workers, vector_size, epochs, callbacks, batch_words, trim_rule, sg, alpha, window, seed, hs, negative, cbow_mean, min_alpha, compute_loss, fast_version, **kwargs)
    336             self.train(
    337                 sentences, total_examples=self.corpus_count, epochs=self.epochs, start_alpha=self.alpha,
--> 338                 end_alpha=self.min_alpha, compute_loss=compute_loss)
    339         else:
    340             if trim_rule is not None:

/usr/local/anaconda3/lib/python3.6/site-packages/gensim/models/word2vec.py in train(self, sentences, total_examples, total_words, epochs, start_alpha, end_alpha, word_count, queue_factor, report_delay, compute_loss, callbacks)
    609             sentences, total_examples=total_examples, total_words=total_words,
    610             epochs=epochs, start_alpha=start_alpha, end_alpha=end_alpha, word_count=word_count,
--> 611             queue_factor=queue_factor, report_delay=report_delay, compute_loss=compute_loss, callbacks=callbacks)
    612 
    613     def score(self, sentences, total_sentences=int(1e6), chunksize=100, queue_factor=2, report_delay=1):

/usr/local/anaconda3/lib/python3.6/site-packages/gensim/models/base_any2vec.py in train(self, sentences, total_examples, total_words, epochs, start_alpha, end_alpha, word_count, queue_factor, report_delay, compute_loss, callbacks)
    567             sentences, total_examples=total_examples, total_words=total_words,
    568             epochs=epochs, start_alpha=start_alpha, end_alpha=end_alpha, word_count=word_count,
--> 569             queue_factor=queue_factor, report_delay=report_delay, compute_loss=compute_loss, callbacks=callbacks)
    570 
    571     def _get_job_params(self, cur_epoch):

/usr/local/anaconda3/lib/python3.6/site-packages/gensim/models/base_any2vec.py in train(self, data_iterable, epochs, total_examples, total_words, queue_factor, report_delay, callbacks, **kwargs)
    239             epochs=epochs,
    240             total_examples=total_examples,
--> 241             total_words=total_words, **kwargs)
    242 
    243         for callback in self.callbacks:

/usr/local/anaconda3/lib/python3.6/site-packages/gensim/models/base_any2vec.py in _check_training_sanity(self, epochs, total_examples, total_words, **kwargs)
    599 
    600         if not self.wv.vocab:  # should be set by `build_vocab`
--> 601             raise RuntimeError("you must first build vocabulary before training the model")
    602         if not len(self.wv.vectors):
    603             raise RuntimeError("you must initialize vectors before training the model")

RuntimeError: you must first build vocabulary before training the model

在一個jupyter筆記本中。 相反,我尋求訓練有素的模型。 我怎樣才能解決這個問題?

W2VTransformer有一個參數min_count ,默認情況下等於5.因此,錯誤只是因為您只提供2個文檔,但要求詞匯表中的每個單詞至少出現在5個文檔中。

可能的解決方案:

  • 減少min_count

  • 為模型提供更多文檔

暫無
暫無

聲明:本站的技術帖子網頁,遵循CC BY-SA 4.0協議,如果您需要轉載,請注明本站網址或者原文地址。任何問題請咨詢:yoyou2525@163.com.

 
粵ICP備18138465號  © 2020-2024 STACKOOM.COM