簡體   English   中英

基於部分匹配合並兩個pandas DataFrame

[英]Merge two pandas DataFrame based on partial match

兩個 DataFrame 的城市名稱格式不同。 我想為兩個 DataFrame 中的City字段之間的所有部分字符串匹配做一個 Left-outer join 和 pull geo字段。

import pandas as pd

df1 = pd.DataFrame({
                    'City': ['San Francisco, CA','Oakland, CA'], 
                    'Val': [1,2]
                  })

df2 = pd.DataFrame({
                    'City': ['San Francisco-Oakland, CA','Salinas, CA'], 
                    'Geo': ['geo1','geo2']
                  })

加入時的預期數據DataFrame

 City                   Val   Geo

 San Francisco, CA      1     geo1
 Oakland, CA            2     geo1

更新: fuzzywuzzy項目已重命名為thefuzz並移至此處

您可以使用thefuzz包和功能extractOne

# Python env: pip install thefuzz
# Anaconda env: pip install thefuzz
# -> thefuzz is not yet available on Anaconda (2021-09-18)
# -> you can use the old package: conda install -c conda-forge fuzzywuzzy

from thefuzz import process

best_city = lambda x: process.extractOne(x, df2["City"])[2]  # See note below
df1['Geo'] = df2.loc[df1["City"].map(best_city).values, 'Geo'].values

輸出:

>>> df1
                City  Val   Geo
0  San Francisco, CA    1  geo1
1        Oakland, CA    2  geo1

注意: extractOne從最佳匹配中返回一個包含 3 個值的元組:來自df2 [0] 的城市名稱、准確度分數 [1] 和索引 [2](<- 我使用的那個)。

這應該可以完成工作。 字符串與Levenshtein_distance匹配。

pip install thefuzz[speedup]

import pandas as pd
import numpy as np

from thefuzz import process

def fuzzy_match(
    a: pd.DataFrame, b: pd.DataFrame, col: str, limit: int = 5, thresh: int = 80
):
    """use fuzzy matching to join on column"""

    s = b[col].tolist()

    matches = a[col].apply(lambda x: process.extract(x, s, limit=limit))
    matches = pd.DataFrame(np.concatenate(matches), columns=["match", "score"])

    # join other columns in b to matches
    to_join = (
        pd.merge(left=b, right=matches, how="right", left_on="City", right_on="match")
        .set_index(  # create an index that represents the matching row in df a, you can drop this when `limit=1`
            np.array(
                list(
                    np.repeat(i, limit if limit < len(b) else len(b))
                    for i in range(len(a))
                )
            ).flatten()
        )
        .drop(columns=["match"])
        .astype({"score": "int16"})
    )
    print(f"\t the index here represents the row in dataframe a on which to join")
    print(to_join)

    res = pd.merge(
        left=a, right=to_join, left_index=True, right_index=True, suffixes=("", "_b")
    )

    # return only the highest match or you can just set the limit to 1
    # and remove this
    df = res.reset_index()
    df = df.iloc[df.groupby(by="index")["score"].idxmax()].reset_index(drop=True)

    return df.drop(columns=["City_b", "score", "index"])

def test(df):

    expected = pd.DataFrame(
        {
            "City": ["San Francisco, CA", "Oakland, CA"],
            "Val": [1, 2],
            "Geo": ["geo1", "geo1"],
        }
    )

    print(f'{"expected":-^70}')
    print(expected)

    print(f'{"res":-^70}')
    print(df)

    assert expected.equals(df)


if __name__ == "__main__":

    a = pd.DataFrame({"City": ["San Francisco, CA", "Oakland, CA"], "Val": [1, 2]})
    b = pd.DataFrame(
        {"City": ["San Francisco-Oakland, CA", "Salinas, CA"], "Geo": ["geo1", "geo2"]}
    )

    print(f'\n\n{"fuzzy match":-^70}')
    res = fuzzy_match(a, b, col="City")
    test(res)

使用余弦相似度。 sklearn 文本特征提取

對於大型數據集,計算余弦相似度可能很慢。 看一看: pip install sparse_dot_topn

參見: https : //www.sun-analytics.nl/posts/2017-07-26-boosting-selection-of-most-similar-entities-in-large-scale-datasets/

pip install scikit-learn

import numpy as np
import pandas as pd

from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity


# https://stackoverflow.com/a/27086669/8615419
# as a preprocessor for TfidfVectorizer
def clean_corpus(s: str):
    """return clean corpus -- replaced any non word chars with space"""
    for ch in ['\\','`','*','_','{','}','[',']','(',')','>','#','+','-','.','!','$','\'',',']:
        if ch in s:
            s = s.replace(ch, " ")
    return s.lower()

# why n-grams?
# this should account for any word misspellings
def fit_vectorizer(corpus: np.array, n: int = 3):

    vectorizer = TfidfVectorizer(analyzer="char_wb", preprocessor=clean_corpus, ngram_range=(n, n))
    tfidf = vectorizer.fit_transform(corpus)

    return tfidf, vectorizer


def cosine_similarity_join(a, b, col_name):

    a_len = len(a[col_name])

    # all of the "documents" in a 1D array
    corpus = np.concatenate([a[col_name].to_numpy(), b[col_name].to_numpy()])

    tfidf, vectorizer = fit_vectorizer(corpus, 3)
    # print(vectorizer.get_feature_names())

    # in this matrix each row represents the str in a and the col is the str from b, value is the cosine similarity
    res = cosine_similarity(tfidf[:a_len], tfidf[a_len:])
    print('in this matrix each row represents the str in a and the col is the str from b')
    print(res)

    res_series = pd.DataFrame(res).stack().rename("score")
    res_series.index.set_names(['a', 'b'], inplace=True)
    # print(res_series)
    
    # join scores to b
    b_scored = pd.merge(left=b, right=res_series, left_index=True, right_on='b').droplevel('b')
    # print(b_scored.sort_index())
   
    # find the indices on which to match, (highest score in each row)
    # best_match = np.argmax(res, axis=1)
    
    res = pd.merge(left=a, right=b_scored, left_index=True, right_index=True, suffixes=('', '_b'))
    print(res)

    df = res.reset_index()
    df = df.iloc[df.groupby(by="index")["score"].idxmax()].reset_index(drop=True)

    return df.drop(columns=["City_b", "score", "index"])  

def test(df):

    expected = pd.DataFrame(
        {
            "City": ["San Francisco, CA", "Oakland, CA"],
            "Val": [1, 2],
            "Geo": ["geo1", "geo1"],
        }
    )

    print(f'{"expected":-^70}')
    print(expected)

    print(f'{"res":-^70}')
    print(df)

    assert expected.equals(df)


if __name__ == "__main__":

    a = pd.DataFrame({"City": ["San Francisco, CA", "Oakland, CA"], "Val": [1, 2]})
    b = pd.DataFrame(
        {"City": ["San Francisco-Oakland, CA", "Salinas, CA"], "Geo": ["geo1", "geo2"]}
    )

    print(f'\n\n{"n-gram cosine similarity":-^70}')
    res = cosine_similarity_join(a, b, col_name="City")
    test(res)

暫無
暫無

聲明:本站的技術帖子網頁,遵循CC BY-SA 4.0協議,如果您需要轉載,請注明本站網址或者原文地址。任何問題請咨詢:yoyou2525@163.com.

 
粵ICP備18138465號  © 2020-2024 STACKOOM.COM