簡體   English   中英

UTF-8的Python和BeautifulSoup編碼問題

[英]Python and BeautifulSoup encoding issue from UTF-8

我是python的新手,目前正在編寫一個可從網絡上抓取數據的應用程序。 大部分已經完成,編碼只剩下一點問題了。 該網站采用ISO-8859-1編碼,但是當我嘗試html.decode('iso-8859-1') ,它什么也沒做。 如果運行該程序,請對PLZ使用5000050126 ,您將在輸出中看到我的意思。 如果有人可以幫助我,那將是非常棒的。

import urllib.request
import time
import csv
import operator

from bs4 import BeautifulSoup


#Performs a HTTP-'POST' request, passes it to BeautifulSoup and returns the result
def doRequest(request):
    requestResult = urllib.request.urlopen(request)
    soup = BeautifulSoup(requestResult)
    return soup


#Returns all the result links from the given search parameters
def getLinksFromSearch(plz_von, plz_bis):
    database = []
    links = []

    #The search parameters
    params = {
    'name_ff': '',
    'strasse_ff': '',
    'plz_ff': plz_von,
    'plz_ff2': plz_bis,
    'ort_ff': '',
    'bundesland_ff': '',
    'land_ff': 'DE',
    'traeger_ff': '',
    'Dachverband_ff': '',
    'submit2' : 'Suchen'
    }

    DATA = urllib.parse.urlencode(params)
    DATA = DATA.encode('utf-8')

    request = urllib.request.Request(
    "http://www.altenheim-adressen.de/schnellsuche/suche1.cfm",
    DATA)

    # adding charset parameter to the Content-Type header.
    request.add_header("Content-Type", "application/x-www-form-urlencoded;charset=utf-8")
    request.add_header("User-Agent", "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:33.0) Gecko/20100101 Firefox/33.0")

    #The search request 
    html = doRequest(request)
    h = html.decode('iso-8859-1')
    soup = BeautifulSoup(h)

    for link in soup.find_all('a'):
        database.append(link.get('href'))

    #Remove the first Element ('None') to avoid Attribute Errors
    database.pop(0)

    for item in database:
        if item.startswith("suche"):
            links.append(item)

    return links



#Performs a search on the link results
def searchOnLinks(links):
    adresses = []
    i = 1
    j = len(links)
    print("Found", j, "results, collecting data.")
    for item in links:
        adresses.append(getContactInfoFromPage(item, i, j))
        i = i + 1
        time.sleep(0.1)
    print("All done.")
    return adresses

#A method to scrape the contact info from the search result
def getContactInfoFromPage(page, i, j):
    name = ''
    straße = ''
    plz = ''
    stadt = ''
    telefon = ''
    mail = ''
    url = ''

    data = [
           #'Name',
           #'Straße',
           #'PLZ',
           #'Stadt',
           #'Telefon',
           #'E-Mail',
           #'Homepage'
            ]

    request = urllib.request.Request("http://www.altenheim-adressen.de/schnellsuche/" + page)
    #request.add_header("Content-Type", "application/x-www-form-urlencoded;charset=utf-8")
    request.add_header("Content-Type", "text/html;charset=UTF-8")
    request.add_header("User-Agent", "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:33.0) Gecko/20100101 Firefox/33.0")


    print("(" , i , "/" , j , ") Making request...") 
    soup = doRequest(request)
    print("Done.")

    findeName = soup.findAll('b')
    name = findeName[2]
    name = name.string.split('>')

    data.append(name[0])

    straße = getFieldValue(soup, "Straße")
    data.append(straße)

    ort = getFieldValue(soup, "Ort")
    (plz, stadt) = ort.split(' ', 1)
    data.append(plz)
    data.append(stadt)

    telefon = getFieldValue(soup, "Telefon")
    data.append(telefon)

    mail = getFieldValue(soup, "EMail")
    data.append(mail)

    url = getFieldValue(soup, "Internetadresse")
    data.append(url)

    return data

#Strips the text from the given field's sibling
def getFieldValue(soup, field):
    field_label = soup.find('td', text=field + ':')
    return field_label.find_next_sibling('td').get_text(strip=True)

#The main input/output function
def inputOutput():
    #PLZ is German for zip-code and consists of a five-digit number
    #The program passes the numbers to the servers, and the server
    #returns all search results between the two numbers
    plz_von = input("Please enter first PLZ: ")
    plz_bis = input("Please enter second PLZ: ")

    links = getLinksFromSearch(plz_von, plz_bis)

    #Checks if the search yielded any results
    if len(links) > 0:
        data = searchOnLinks(links)
        file_name = input("Save as: ")
        print("Writing to file...")
        with open(file_name + '.csv', 'w', newline='') as fp:
            a = csv.writer(fp, delimiter=',')
            a.writerows(data)

    else:
        print("The search yielded no results.")


inputOutput()

您的doRequest()函數返回BeautifulSoup對象 ,您無法對該對象進行解碼。 只需直接使用它:

soup = doRequest(request)

您根本不需要解碼響應。 BeautifulSoup使用HTML( <meta>標頭)中的兩個提示以及統計分析來確定正確的輸入編碼。

在這種情況下,HTML文檔聲稱它是Latin-1:

<meta name="content-type" content="text/html; charset=iso-8859-1">

響應也不在Content-Type標頭中包含字符集,因此這是服務器配置錯誤的情況。 您可以使用以下命令強制BeautifulSoup忽略<meta>標頭:

soup = BeautifulSoup(requestResult, from_encoding='utf8')

暫無
暫無

聲明:本站的技術帖子網頁,遵循CC BY-SA 4.0協議,如果您需要轉載,請注明本站網址或者原文地址。任何問題請咨詢:yoyou2525@163.com.

 
粵ICP備18138465號  © 2020-2024 STACKOOM.COM