I created a python tool with Tkinter GUI. These are the pieces of the script. The problem is with the try-except in these lines of code.
try:
pldf_csv[data['sls_data']['add_columns']].write_csv(endpath,sep='\t')
except:
write_eror_status = True
print("CANNOT WRITE FILE")
If I run the python file via VSCode the try-except works like this
But then if I compile the script with pyinstaller
to exe
, those line doesn't execute at all
Full code
class IngestGenerator:
def __init__(self, fn,fd,n3pl):
self.filename = fn
self.filedir = fd
self.name3pl = n3pl
def generate_result_csv(self):
"""To extend 3PL please refer to comment with <(extend this)> note
Don't forget to extend the yaml when extending 3PL
"""
start_time = time.time()
# with open("columns 1a.yaml", 'r') as f:
with open(os.path.join(os.path.dirname(__file__), 'columns 1a.yaml'), 'r') as f:
data = yaml.load(f, Loader=SafeLoader)
with tempfile.TemporaryDirectory() as tmpdirname:
try :
# create new list if there's new 3pl behavior (extend this)
list_type_like = [data['behavior']['type_like'][i]['name3pl'] for i in range(0,len(data['behavior']['type_like']))] #collects names of 3pl which have categorical column to be divided based on
write_eror_status = False
status = False
for i in range(0,len(data['behavior']['type_like'])):
if data['behavior']['type_like'][i]['name3pl']==self.name3pl: #get the name of category column and the values of categories (extend this whole if-statement)
list_types = data['behavior']['type_like'][i]['cats']
cat_column = data['behavior']['type_like'][i]['categorical_col']
status = True
else : pass
if status == False: #for logging to check if its in the list of type-like 3pl (extend this whole if else statement)
print("3PL cannot be found on type-like 3PL list")
else: print("3PL can be found on type-like 3PL list")
try:
for cat in list_types: #dynamic list creation for each category (extend this line only)
globals()[f"{cat}_final_df"] = []
except : print("3pl isn't split based on it's categories")
xl = win32com.client.Dispatch("Excel.Application")
print("Cast to CSV first (win32com)")
wb = xl.Workbooks.Open(self.filename,ReadOnly=1)
xl.DisplayAlerts = False
xl.Visible = False
xl.ScreenUpdating = False
xl.EnableEvents = False
sheet_names = [sheet.Name for sheet in wb.Sheets if sheet.Name.lower() != 'summary']
print("Sheet names")
print(sheet_names)
for sheet_name in sheet_names:
print("Reading sheet "+sheet_name)
ws = wb.Worksheets(sheet_name)
ws.SaveAs(tmpdirname+"\\myfile_tmp_{}.csv".format(sheet_name), 24)
used_columns = data['sls_data'][f'{self.name3pl.lower()}_used_columns']
renamed_columns = data['sls_data'][f'{self.name3pl.lower()}_rename_columns']
rowskip = data['behavior']['row_skip'][f'{self.name3pl.lower()}']
list_dtypes = [str for u in used_columns]
print("CP 1")
scandf = pl.scan_csv(tmpdirname+"\\myfile_tmp_{}.csv".format(sheet_name),skip_rows= rowskip,n_rows=10) #scan csv to get column name
print(scandf.columns)
scanned_cols = scandf.columns.copy()
used_cols_inDF = [] #collects column names dynamically
for i in range(0,len(used_columns)):
if type(used_columns[i]) is list: #check for each scanned-columns which contained in yaml used_columns, append if the scanned columns exist in yaml
for sc in scanned_cols:
for uc in used_columns[i]:
if sc == uc:
print(f"Column match : {uc}")
used_cols_inDF.append(uc)
else:pass
else:
for sc in scanned_cols: #purpose is same with the if statement
if sc == used_columns[i]:
print(f"Column match : {used_columns[i]}")
used_cols_inDF.append(used_columns[i])
else:pass
print(used_cols_inDF)
"""
JNT files have everchanging column names. Some files only have Total Ongkir, some only have Total,
and some might have Total and Total Ongkir. If both exists then will use column Total Ongkir (extend this if necessary i.e for special cases of 3pl)
"""
if self.name3pl == 'JNT':
if "Total" in used_cols_inDF and "Total Ongkir" in used_cols_inDF:
used_cols_inDF.remove("Total")
else:pass
else:pass
pldf_csv = pl.read_csv(tmpdirname+"\\myfile_tmp_{}.csv".format(sheet_name),
columns = used_cols_inDF,
new_columns = renamed_columns,
dtypes = list_dtypes,
skip_rows= rowskip
).filter(~pl.fold(acc=True, f=lambda acc, s: acc & s.is_null(), exprs=pl.all(),)) #filter rows with all null values
print(pldf_csv)
print(pldf_csv.columns)
for v in data['sls_data']['add_columns']: #create dynamic columns
if "3pl invoice distance (m) (optional)" in v.lower() or "3pl cod amount (optional)" in v.lower():
pldf_csv = pldf_csv.with_column(pl.Series(name="{}".format(v),values= np.zeros(shape=pldf_csv.shape[0])))
elif "3pl tn (mandatory)" in v.lower() or "weight (kg) (optional)" in v.lower():
pass
elif "total fee (3pl) (optional)" in v.lower():
pldf_csv = pldf_csv.with_column(pl.col(v).str.replace_all(",","").str.strip().cast(pl.Float64,False).fill_null(0))
else :
pldf_csv = pldf_csv.with_column(pl.lit(None).alias(v))
print(pldf_csv)
endpath = self.filedir+"\{}_{}_{}.csv".format(get_file_name(file_name_appear_label["text"]),sheet_name,"IngestResult").replace('/','\\')
if self.name3pl not in list_type_like: #(extend this line only)
if self.name3pl == 'JNT': #(extend this line and its statement if necessary i.e for special cases of 3pl)
pldf_csv = pldf_csv.with_column((pl.col("Total Fee (3PL) (Optional)")+pl.col("Biaya Asuransi").str.replace_all(",","").str.strip().cast(pl.Float64,False).fill_null(0)).alias("Total Fee (3PL) (Optional)"))
else: pass
print(pldf_csv)
try:
pldf_csv[data['sls_data']['add_columns']].write_csv(endpath,sep='\t')
except:
write_eror_status = True
print("CANNOT WRITE FILE")
elif self.name3pl in list_type_like: #(extend this line only)
for cat in list_types:
globals()[f"{cat}_final_df"].append(pldf_csv.filter(pl.col(cat_column).str.contains(cat)))
if self.name3pl not in list_type_like:
pass
elif self.name3pl in list_type_like:
for cat in list_types:
globals()[f"{cat}_final_df"] = pl.concat(globals()[f"{cat}_final_df"])
print(globals()[f"{cat}_final_df"])
globals()[f"endpath_{cat}"] = self.filedir+"\{}_{}_{}.csv".format(get_file_name(file_name_appear_label["text"]),cat,"IngestResult").replace('/','\\')
print("done creating paths")
try:
globals()[f"{cat}_final_df"][data['sls_data']['add_columns']].write_csv(globals()[f"endpath_{cat}"],sep='\t')
except :
write_eror_status = True
print("CANNOT WRITE FILE")
if write_eror_status == False:
progress_label["text"] = "Successful!"
else:
progress_label["text"] = "Cannot write result into Excel, please close related Excel files and kill Excel processes from Task Manager"
print("Process finished")
except Exception as e:
print("ERROR with message")
print(e)
progress_label["text"] = "Failed due to {}".format(e)
finally :
wb.Close(False)
submit_btn["state"] = "normal"
browse_btn["state"] = "normal"
print("Total exec time : {}".format((time.time()-start_time)/60))
def file_submit_btn_click():
if (file_name_appear_label["text"]==""):
progress_label["text"] = "Please input your file"
elif option_menu.get() == '':
progress_label["text"] = "Please select 3pl name"
else:
try :
submit_btn["state"] = "disabled"
browse_btn["state"] = "disabled"
progress_label["text"] = "Loading . . ."
name3pl = option_menu.get()
ingest = IngestGenerator(file_name,file_path,name3pl)
print(get_file_name(file_name_appear_label["text"]))
threading.Thread(target=ingest.generate_result_csv).start()
except Exception as e:
print(e)
Since the error was raised when writing CSV with polars
and polars
has its own dependencies, when it's in exe form it needs to include --recursive-copy-metadata as per documentation pyinstaller --recursive-copy-metadata polars --onefile -w "myfile.py"
The technical post webpages of this site follow the CC BY-SA 4.0 protocol. If you need to reprint, please indicate the site URL or the original address.Any question please contact:yoyou2525@163.com.