简体   繁体   中英

Convert DataFrame to Parquet in Azure Functionapp using Python

I am downloading 2 csv files from my Azure Data Lake storage(gen 2). Then merging them together and uploading it in parquet format to the same storage account, but to different folder. I want to upload the summary dataframe in parquet format to my storage account using a FuctionApp in VS Code. The code runs perfectly locally, but the functionapp gives me '500-internal server error'. There is an issue with the Pyarrow engine that I use for the to_parquet method.Azure does not seem to support this engine.

    import pandas as pd
    from azure.storage.filedatalake import DataLakeServiceClient
    import azure.functions as func
    from io import StringIO

def main(req: func.HttpRequest) -> func.HttpResponse:
    
    STORAGEACCOUNTURL= 'https://storage_acc_name.dfs.core.windows.net/'
    STORAGEACCOUNTKEY= 'Key'
    LOCALFILENAME= ['file1', 'file2']

    file1 = pd.DataFrame()
    file2 = pd.DataFrame()
   

    service_client = DataLakeServiceClient(account_url=STORAGEACCOUNTURL, credential=STORAGEACCOUNTKEY)
    adl_client_instance = service_client.get_file_system_client(file_system="raw")
    
    directory_client = adl_client_instance.get_directory_client("raw")

    for i in LOCALFILENAME:
        if i == 'file1.csv':
            file_client = adl_client_instance.get_file_client(i)
            adl_data = file_client.download_file()
            byte1 = adl_data.readall()
            s=str(byte1,'utf-8')
            file1 = pd.read_csv(StringIO(s))
            
        if i == 'file2.csv':
            file_client = adl_client_instance.get_file_client(i)
            adl_data = file_client.download_file()
            byte2 = adl_data.readall()
            s=str(byte2,'utf-8')
            file2 = pd.read_csv(StringIO(s))
    
    
    summary = pd.merge(left=file1, right=file2, on='key', how='inner')

    service_client = DataLakeServiceClient(account_url=STORAGEACCOUNTURL, credential=STORAGEACCOUNTKEY)
    file_system_client = service_client.get_file_system_client(file_system="output")
    directory_client = file_system_client.get_directory_client("output") 
    file_client = directory_client.create_file("output.parquet") 
    file_contents = pd.DataFrame(summary).to_parquet()
    file_client.append_data(data=file_contents, offset=0, length=len(file_contents)) 
    
    file_client.flush_data(len(file_contents))

    return("This HTTP triggered function executed successfully.")

if __name__ == '__main__':
    main("name")

maybe you can use pyspark

df_MF=spark_session.createDataFrame(df)
# now you get spark df,you can save it use spark save it

The technical post webpages of this site follow the CC BY-SA 4.0 protocol. If you need to reprint, please indicate the site URL or the original address.Any question please contact:yoyou2525@163.com.

 
粤ICP备18138465号  © 2020-2024 STACKOOM.COM