简体   繁体   中英

Convert nested json with nested arrays in csv in python

If I have a list of such dictionary/json in a json file, how can I convert it to csv using python script or any other way besides manual.

My headers in here will be to flatten it and each key with a single value to be a column. The array Response in here, I want to have each element in here to be a separate row with all the above data same as individual columns. So for example, if the below Response array has 3 items, then there should be 3 rows of items in list as adRefId , addrRefId etc. with the same above and below fields out of the array namely creation_date , expiration_date , modification_date , revision , adRefId , addrRefId , doc_type etc..

[
  {
    "aggregate_result": [],
    "explain": "",
    "key_with_document": [
      {
        "document": {
          "creation_date": 1643342434,
          "expiration_date": 2053342527,
          "modification_date": 1643342527,
          "revision": 4,
          "struct": {
            "MatchResponse": [
              {
                "adRefId": "e6040-c8dcdb165993",
                "addrRefId": "city_list:0",
                "MatchCode": "REGI_ADDR_BLOCK",
                "maxScore": 0.9968223809704663
              },
              {
                "adRefId": "800-3c7a04dc8d3f",
                "addrRefId": "address_list:0",
                "MatchCode": "_ADDR_BLOCK",
                "maxScore": 0
              },
              {
                "adRefId": "ab39f31d-6b21-4377-9c91-85fdd345c22a",
                "addrRefId": "name_block_list:0",
                "MatchCode": "ADDR_BLOCK",
                "maxScore": 0
              }
            ],
            "MatchStatus": 200,
            "dataRefs": [
              {
                "addressRef": {
                  "addrRefId": "0",
                  "addrType": "REGISTRATION_ADDRESS",
                  "addressLine1": "123 Test Street",
                  "addressLine2": "",
                  "city": "",
                  "country": "Federation",
                  "postalCode": "12345",
                  "province": ""
                },
                "dataId": "0"
              }
            ],
            "docType": "_SCREEN",
            "extRefId1": "b326c63721536765412099",
            "extRefId1Type": "",
            "extRefId2": "",
            "extRefId2Type": "_SETTINGS",
            "ules": [
              "1213395"
            ],
            "Status": [
              "20"
            ]
          }
        },
        "key": {
          "id": [
            {
              "collection": "__ROOT__",
              "string": "3721536765412099_E"
            }
          ],
          "is_partial": false
        }
      }
    ]
  }
]

I tried the following but unable to include the correct syntax in meta for columns to include.

def main():
    so()
    data = read_json(filename='Extract1.json')

    df2 = pd.json_normalize(data, record_path=['key_with_document', ['document','struct','MatchResponse']], meta=['key_with_document']) # Here how to include keys like creation_date, expiration_date etc.
    print(df2)
    df2.to_csv('out2.csv')


if __name__ == '__main__':
    main()



My output looks like this where keys_with_document part is all in 1 column but I want keys to be in separate columns在此处输入图像描述

This seems to do what you want. Note that I am ignoring dataRefs , because that seems to be yet another list. You could extend this to suck in element [0] of that as well.

data="""[
  {
    "aggregate_result": [],
    "explain": "",
    "key_with_document": [
      {
        "document": {
          "creation_date": 1643342434,
          "expiration_date": 2053342527,
          "modification_date": 1643342527,
          "revision": 4,
          "struct": {
            "MatchResponse": [
              {
                "adRefId": "e6040-c8dcdb165993",
                "addrRefId": "city_list:0",
                "MatchCode": "REGI_ADDR_BLOCK",
                "maxScore": 0.9968223809704663
              },
              {
                "adRefId": "800-3c7a04dc8d3f",
                "addrRefId": "address_list:0",
                "MatchCode": "_ADDR_BLOCK",
                "maxScore": 0
              },
              {
                "adRefId": "ab39f31d-6b21-4377-9c91-85fdd345c22a",
                "addrRefId": "name_block_list:0",
                "MatchCode": "ADDR_BLOCK",
                "maxScore": 0
              }
            ],
            "MatchStatus": 200,
            "dataRefs": [
              {
                "addressRef": {
                  "addrRefId": "0",
                  "addrType": "REGISTRATION_ADDRESS",
                  "addressLine1": "123 Test Street",
                  "addressLine2": "",
                  "city": "",
                  "country": "Federation",
                  "postalCode": "12345",
                  "province": ""
                },
                "dataId": "0"
              }
            ],
            "docType": "_SCREEN",
            "extRefId1": "b326c63721536765412099",
            "extRefId1Type": "",
            "extRefId2": "",
            "extRefId2Type": "_SETTINGS",
            "ules": [
              "1213395"
            ],
            "Status": [
              "20"
            ]
          }
        },
        "key": {
          "id": [
            {
              "collection": "__ROOT__",
              "string": "3721536765412099_E"
            }
          ],
          "is_partial": false
        }
      }
    ]
  }
]"""

import json
import csv

data = json.loads(data)
print(data)
fixed = [
    "creation_date",
    "expiration_date",
    "modification_date",
    "revision"
]
fromstruct = [
    "docType",
    "extRefId1",
    "extRefId1Type",
    "extRefId2",
    "extRefId2Type",
    "ules",
    "Status"
]
fromresponse = [
    "adRefId",
    "addrRefId",
    "MatchCode",
    "maxScore",
]


allfields = fixed + fromstruct + fromresponse
fout = csv.DictWriter(open('my.csv','w',newline=''),fieldnames=allfields)
fout.writeheader()
for obj in data:
    for obj2 in obj['key_with_document']:
        row = {}
        odoc = obj2['document']
        ostr = odoc['struct']
        for name in fixed:
            row[name] = odoc[name]
        for name in fromstruct:
            if isinstance(ostr[name],list):
                row[name] = ostr[name][0]
            else:
                row[name] = ostr[name]
        for obj3 in ostr['MatchResponse']:
            for name in fromresponse:
                row[name] = obj3[name]
            fout.writerow( row )


Output CSV file:

creation_date,expiration_date,modification_date,revision,docType,extRefId1,extRefId1Type,extRefId2,extRefId2Type,ules,Status,adRefId,addrRefId,MatchCode,maxScore
1643342434,2053342527,1643342527,4,_SCREEN,b326c63721536765412099,,,_SETTINGS,1213395,20,e6040-c8dcdb165993,city_list:0,REGI_ADDR_BLOCK,0.9968223809704663
1643342434,2053342527,1643342527,4,_SCREEN,b326c63721536765412099,,,_SETTINGS,1213395,20,800-3c7a04dc8d3f,address_list:0,_ADDR_BLOCK,0
1643342434,2053342527,1643342527,4,_SCREEN,b326c63721536765412099,,,_SETTINGS,1213395,20,ab39f31d-6b21-4377-9c91-85fdd345c22a,name_block_list:0,ADDR_BLOCK,0

I managed to figure out the answer using pandas . Here is my alternative:


def read_json(filename: str) -> dict:
  
    try:
        with open(filename) as f:
            data = json.loads(f.read())
    except:
        raise Exception(f"Reading {filename} file encountered an error")
  
    return data

def main():
      
      data = read_json(filename='ExtractFile1.json')

      df3 = pd.json_normalize(data, record_path=['key_with_document', ['document','struct','MatchResponse']], meta=[['key_with_document', 'document', 'creation_date'],['key_with_document', 'document', 'expiration_date'], ['key_with_document', 'document','modification_date'], ['key_with_document', 'document','revision'], ['key_with_document', 'document','struct','MatchStatus'],['key_with_document', 'document','struct','docType'],['key_with_document', 'document','struct','extRefId1'],['key_with_document', 'document','struct','extRefId1Type'],['key_with_document', 'document','struct','extRefId2'],['key_with_document', 'document','struct','extRefId2Type'],['key_with_document', 'document','struct','Rul'],['key_with_document', 'document','struct','Status'],
    ['key_with_document','document','struct','dataRefs']])

      df3.to_csv('out3.csv')

if __name__ == '__main__':
    main()

more generic solution would be like below:

import pandas as pd
tree=       {
    "aggregate_result": [],
    "explain": "",
    "key_with_document": [
      {
        "document": {
          "creation_date": 1643342434,
          "expiration_date": 2053342527,
          "modification_date": 1643342527,
          "revision": 4,
          "struct": {
            "MatchResponse": [
              {
                "adRefId": "e6040-c8dcdb165993",
                "addrRefId": "city_list:0",
                "MatchCode": "REGI_ADDR_BLOCK",
                "maxScore": 0.9968223809704663
              },
              {
                "adRefId": "800-3c7a04dc8d3f",
                "addrRefId": "address_list:0",
                "MatchCode": "_ADDR_BLOCK",
                "maxScore": 0
              },
              {
                "adRefId": "ab39f31d-6b21-4377-9c91-85fdd345c22a",
                "addrRefId": "name_block_list:0",
                "MatchCode": "ADDR_BLOCK",
                "maxScore": 0
              }
            ],
            "MatchStatus": 200,
            "dataRefs": [
              {
                "addressRef": {
                  "addrRefId": "0",
                  "addrType": "REGISTRATION_ADDRESS",
                  "addressLine1": "123 Test Street",
                  "addressLine2": "",
                  "city": "",
                  "country": "Federation",
                  "postalCode": "12345",
                  "province": ""
                },
                "dataId": "0"
              }
            ],
            "docType": "_SCREEN",
            "extRefId1": "b326c63721536765412099",
            "extRefId1Type": "",
            "extRefId2": "",
            "extRefId2Type": "_SETTINGS",
            "ules": [
              "1213395"
            ],
            "Status": [
              "20"
            ]
          }
        },
        "key": {
          "id": [
            {
              "collection": "__ROOT__",
              "string": "3721536765412099_E"
            }
          ],
          "is_partial": "false"
        }
      }
    ]
  }


def parser(master_tree):
  flatten_tree_node = []
  def _process_leaves(tree:dict,prefix:str = "node", tree_node:dict = dict(), update:bool = True):
      is_nested = False
      if isinstance(tree,dict):
        for k in tree.keys():
            if type(tree[k]) == str:
                colName = prefix + "_" + k
                tree_node[colName] = tree[k]
            elif type(tree[k]) == dict:
                prefix += "_" + k
                leave = tree[k]
                _process_leaves(leave,prefix = prefix, tree_node = tree_node, update = False)
        for k in tree.keys():
            if type(tree[k]) == list:
                is_nested = True
                prefix += "_" + k
                for leave in tree[k]:
                    _process_leaves(leave,prefix = prefix, tree_node = tree_node.copy())
        if not is_nested and update:
            flatten_tree_node.append(tree_node)
        
  _process_leaves(master_tree)
  df = pd.DataFrame(flatten_tree_node)
  df.columns = df.columns.str.replace("@", "_")
  df.columns = df.columns.str.replace("#", "_")
  return df

print(parser(tree))

  node_explain  ... node_aggregate_result_key_with_document_document_key_id_string
0               ...                                                NaN            
1               ...                                                NaN            
2               ...                                                NaN            
3               ...                                                NaN            
4               ...                                 3721536765412099_E            
5               ...                                                NaN            

[6 rows x 21 columns]

The technical post webpages of this site follow the CC BY-SA 4.0 protocol. If you need to reprint, please indicate the site URL or the original address.Any question please contact:yoyou2525@163.com.

 
粤ICP备18138465号  © 2020-2024 STACKOOM.COM