I'm trying to use multiprocessing on a fuzzy matching script I've written, I need to go through 1.4 Billion comparisons and it takes 30+ hours without multiprocessing so I'm attempting to integrate it here.
def fuzzyCompare(data1, data2):
print("Performing Fuzzy Matches...\n")
similarityDf = pd.DataFrame(columns = ["Similarity Ratio", "Id1", Id2])
count = 0
for i in range(len(data1)):
str1 = data1["CompanyName"][i] + "," + data1["Address1"][i] + "," + data1["City"][i] + "," + data1["PostalZip"][i]
str1 = str1.lower().replace(" ","")
for j in range(len(data2)):
str2 = data2["Company"][j] + "," + data2["Physical Street 1"][j] + "," + data2["Physical City"][j] + "," + data2["Physical Postal Code/ZIP"][j]
str2 = str2.lower().replace(" ","")
ratio = fuzz.ratio(str1,str2)
if(ratio > 0):
similarityDf.at[count, "Similarity Ratio"] = str(ratio) + "%"
similarityDf.at[count, "Id1"] = data1["Id1][i]
similarityDf.at[count, "Id2"] = data2["Id2][j]
count = count + 1
print("Performed " + str(len(data1)*len(data2)) + " Fuzzy Comparisons.\n")
return similarityDf
def main():
data1 = readData(excelFile1) *#read excelfile into dataframe*
data2 = readData(excelFile2) *#read excelfile into dataframe*
df_split = np.array_split(data2, 4) *#split data2 into 4*
args = [(data1, df_split[0]),
(data1, df_split[1]),
(data1, df_split[2]),
(data1, df_split[3])]
with mp.Pool(processes=4) as p:
outputData = pd.concat(p.starmap(fuzzyCompare, args))
if __name__ == "__main__":
mp.freeze_support()
main()
I have a print statement at the end of my fuzzyCompare()
and it prints the result for only 1 worker then I receive the following error:
multiprocessing.pool.RemoteTraceback
Traceback (most recent call last):
File "C:\Users\...\AppData\Local\Programs\Python\Python37-32\lib\multiprocessing\pool.py", line 121, in worker
result = (True, func(*args, **kwds))
File "C:\Users\...\AppData\Local\Programs\Python\Python37-32\lib\multiprocessing\pool.py", line 47, in starmapstar
return list(itertools.starmap(args[0], args[1]))
File "C:\Users\...\Documents\Code\Python\fuzzyCompare\multiFuzzyCLI.py", line 47, in fuzzyCompare
str2 = data2["Company"][j] + "," + data2["Physical Street 1"][j] + "," + data2["Physical City"][j] + "," + data2["Physical Postal Code/ZIP"][j]
File "C:\Users\...\AppData\Local\Programs\Python\Python37-32\lib\site-packages\pandas\core\series.py", line 1068, in __getitem__
result = self.index.get_value(self, key)
File "C:\Users\...\AppData\Local\Programs\Python\Python37-32\lib\site-packages\pandas\core\indexes\base.py", line 4730, in get_value
return self._engine.get_value(s, k, tz=getattr(series.dtype, "tz", None))
File "pandas\_libs\index.pyx", line 80, in pandas._libs.index.IndexEngine.get_value
File "pandas\_libs\index.pyx", line 88, in pandas._libs.index.IndexEngine.get_value
File "pandas\_libs\index.pyx", line 131, in pandas._libs.index.IndexEngine.get_loc
File "pandas\_libs\hashtable_class_helper.pxi", line 992, in pandas._libs.hashtable.Int64HashTable.get_item
File "pandas\_libs\hashtable_class_helper.pxi", line 998, in
pandas._libs.hashtable.Int64HashTable.get_item
KeyError: 0
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "multiFuzzyCLI.py", line 145, in <module>
main()
File "multiFuzzyCLI.py", line 132, in main
outputData = pd.concat(p.starmap(fuzzyCompare, args))
File "C:\Users\...\AppData\Local\Programs\Python\Python37-32\lib\multiprocessing\pool.py", line 276, in starmap
return self._map_async(func, iterable, starmapstar, chunksize).get()
File "C:\Users\...\AppData\Local\Programs\Python\Python37-32\lib\multiprocessing\pool.py", line 657, in get
raise self._value
KeyError: 0
I know what a KeyError is, I just don't understand how it's getting the error in this case. Thanks
You get a KeyError
because you try to index every DataFrame with an index starting from 0, and np.array_split
will maintain the original Index of each split.
To properly index the i
-th row of a DataFrame you should always use DataFrame.iloc
as this works in general for any Index, not necessarily a RangeIndex that begins from 0. So you need to change all of your selections to be of the form:
data2["Company"].iloc[j] # Not data2["Company"][j]
import pandas as pd
import numpy as np
df = pd.DataFrame({'CompanyName': list('abcdefghij')})
df_split = np.array_split(data2, 4)
# For first split this works as we get lucky the index starts from 0
data2 = df_split[0]
for j in range(len(data2)):
print(data2['CompanyName'][j])
# a
# b
# c
# Later slices fail; `df_split[1].index` is RangeIndex(start=3, stop=6, step=1)
data2 = df_split[1]
for j in range(len(data2)):
print(data2['CompanyName'][j])
# KeyError: 0
# Instead properly select with `.iloc`
for j in range(len(data2)):
print(data2['CompanyName'].iloc[j])
# d
# e
# f
The technical post webpages of this site follow the CC BY-SA 4.0 protocol. If you need to reprint, please indicate the site URL or the original address.Any question please contact:yoyou2525@163.com.