繁体   English   中英

如何使用 Beautiful Soup BS4 在雅虎财经上抓取多个页面

[英]How to scrape multiple pages on Yahoo Finance with Beautiful Soup BS4

我是 Python 新手,正在尝试使用 BS4 从 Yahoo Finance 获取一些财务数据。 使用单个页面,脚本可以正常工作。 但是,现在我试图一次抓取多个页面,但不知何故url in urls:循环中的 for url in urls:无法按预期工作。 它只会从最后一个 url 中抓取数据。

任何人都知道如何解决这个问题?

#!/usr/bin/python
# -*- coding: utf-8 -*-
import urllib2
from bs4 import BeautifulSoup
import ssl
import json
import ast
import os
from urllib2 import Request, urlopen
import datetime 

# For ignoring SSL certificate errors
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE

# Input from the user
urls = ['https://finance.yahoo.com/quote/ALV.DE?p=ALV.DE&.tsrc=fin-srch', 'https://finance.yahoo.com/quote/SAP?p=SAP&.tsrc=fin-srch']

# Making the website believe that you are accessing it using a Mozilla browser
for url in urls:
    req = Request(url, headers={'User-Agent': 'Mozilla/5.0'})
    webpage = urlopen(req).read()

# Creating a BeautifulSoup object of the HTML page for easy extraction of data.
soup = BeautifulSoup(webpage, 'html.parser')
html = soup.prettify('utf-8')
company_json = {}
other_details = {}

for h1 in soup.findAll('h1'):
    company_json['TICKER'] = h1.text.strip()
for span in soup.findAll('span',attrs={'class': 'Trsdu(0.3s) Trsdu(0.3s) Fw(b) Fz(36px) Mb(-4px) D(b)'}):
    company_json['PRESENT_VALUE'] = span.text.strip()
for div in soup.findAll('div', attrs={'class': 'D(ib) Va(t)'}):
    for span in div.findAll('span', recursive=False):
        company_json['PRESENT_GROWTH'] = span.text.strip()
for td in soup.findAll('td', attrs={'data-test': 'PREV_CLOSE-value'}):
    for span in td.findAll('span', recursive=False):
        other_details['PREV_CLOSE'] = span.text.strip()
for td in soup.findAll('td', attrs={'data-test': 'OPEN-value'}):
    for span in td.findAll('span', recursive=False):
        other_details['OPEN'] = span.text.strip()
for td in soup.findAll('td', attrs={'data-test': 'BID-value'}):
    for span in td.findAll('span', recursive=False):
        other_details['BID'] = span.text.strip()
for td in soup.findAll('td', attrs={'data-test': 'ASK-value'}):
    for span in td.findAll('span', recursive=False):
        other_details['ASK'] = span.text.strip()
for td in soup.findAll('td', attrs={'data-test': 'DAYS_RANGE-value'}):
    for span in td.findAll('span', recursive=False):
        other_details['DAYS_RANGE'] = span.text.strip()
for td in soup.findAll('td',attrs={'data-test': 'FIFTY_TWO_WK_RANGE-value'}):
    for span in td.findAll('span', recursive=False):
        other_details['FIFTY_TWO_WK_RANGE'] = span.text.strip()
for td in soup.findAll('td', attrs={'data-test': 'TD_VOLUME-value'}):
    for span in td.findAll('span', recursive=False):
        other_details['TD_VOLUME'] = span.text.strip()
for td in soup.findAll('td',attrs={'data-test': 'AVERAGE_VOLUME_3MONTH-value'}):
    for span in td.findAll('span', recursive=False):
        other_details['AVERAGE_VOLUME_3MONTH'] = span.text.strip()
for td in soup.findAll('td', attrs={'data-test': 'MARKET_CAP-value'}):
    for span in td.findAll('span', recursive=False):
        other_details['MARKET_CAP'] = span.text.strip()
for td in soup.findAll('td', attrs={'data-test': 'BETA_3Y-value'}):
    for span in td.findAll('span', recursive=False):
        other_details['BETA_3Y'] = span.text.strip()
for td in soup.findAll('td', attrs={'data-test': 'PE_RATIO-value'}):
    for span in td.findAll('span', recursive=False):
        other_details['PE_RATIO'] = span.text.strip()
for td in soup.findAll('td', attrs={'data-test': 'EPS_RATIO-value'}):
    for span in td.findAll('span', recursive=False):
        other_details['EPS_RATIO'] = span.text.strip()
for td in soup.findAll('td', attrs={'data-test': 'EARNINGS_DATE-value'}):
    other_details['EARNINGS_DATE'] = []
    for span in td.findAll('span', recursive=False):
        other_details['EARNINGS_DATE'].append(span.text.strip())
for td in soup.findAll('td',attrs={'data-test': 'DIVIDEND_AND_YIELD-value'}):
    other_details['DIVIDEND_AND_YIELD'] = td.text.strip()
for td in soup.findAll('td',attrs={'data-test': 'EX_DIVIDEND_DATE-value'}):
    for span in td.findAll('span', recursive=False):
        other_details['EX_DIVIDEND_DATE'] = span.text.strip()
for td in soup.findAll('td',attrs={'data-test': 'ONE_YEAR_TARGET_PRICE-value' }):
    for span in td.findAll('span', recursive=False):
        other_details['ONE_YEAR_TARGET_PRICE'] = span.text.strip()
other_details['DATE'] = str(datetime.datetime.now())
company_json['OTHER_DETAILS'] = other_details
with open('dax30_kpis.json', 'a') as outfile:
    json.dump(company_json, outfile, indent=4)
print company_json

print '----------Extraction of data is complete. Check json file.----------'

你的缩进似乎是错误的:

# Making the website believe that you are accessing it using a Mozilla browser
for url in urls:
    req = Request(url, headers={'User-Agent': 'Mozilla/5.0'})
    webpage = urlopen(req).read()
# Creating a BeautifulSoup object of the HTML page for easy extraction of data.
soup = BeautifulSoup(webpage, 'html.parser')
# ... rest of the code

应该

# Making the website believe that you are accessing it using a Mozilla browser
for url in urls:
    req = Request(url, headers={'User-Agent': 'Mozilla/5.0'})
    webpage = urlopen(req).read()
    # Creating a BeautifulSoup object of the HTML page for easy extraction of data.
    soup = BeautifulSoup(webpage, 'html.parser')
# ... rest of the code

此更改是必要的,因为您正在调用循环中的所有 url 并将它们保存在单个变量中。 因此,您的实现最终会覆盖所有抓取的网站,并且只处理最后一个 url 返回的结果。

你需要把所有的处理,直到

#... website processing code
with open('dax30_kpis.json', 'a') as outfile:
    json.dump(company_json, outfile, indent=4)

进入for循环:

###code before###
# Making the website believe that you are accessing it using a Mozilla browser
for url in urls:
    req = Request(url, headers={'User-Agent': 'Mozilla/5.0'})
    webpage = urlopen(req).read()

    # Creating a BeautifulSoup object of the HTML page for easy extraction of data.
    soup = BeautifulSoup(webpage, 'html.parser')
    html = soup.prettify('utf-8')
    company_json = {}
    other_details = {}

    for h1 in soup.findAll('h1'):
        company_json['TICKER'] = h1.text.strip()
    for span in soup.findAll('span',attrs={'class': 'Trsdu(0.3s) Trsdu(0.3s) Fw(b) Fz(36px) Mb(-4px) D(b)'}):
        company_json['PRESENT_VALUE'] = span.text.strip()
    for div in soup.findAll('div', attrs={'class': 'D(ib) Va(t)'}):
        for span in div.findAll('span', recursive=False):
            company_json['PRESENT_GROWTH'] = span.text.strip()
    for td in soup.findAll('td', attrs={'data-test': 'PREV_CLOSE-value'}):
        for span in td.findAll('span', recursive=False):
            other_details['PREV_CLOSE'] = span.text.strip()
    for td in soup.findAll('td', attrs={'data-test': 'OPEN-value'}):
        for span in td.findAll('span', recursive=False):
            other_details['OPEN'] = span.text.strip()
    for td in soup.findAll('td', attrs={'data-test': 'BID-value'}):
        for span in td.findAll('span', recursive=False):
            other_details['BID'] = span.text.strip()
    for td in soup.findAll('td', attrs={'data-test': 'ASK-value'}):
        for span in td.findAll('span', recursive=False):
            other_details['ASK'] = span.text.strip()
    for td in soup.findAll('td', attrs={'data-test': 'DAYS_RANGE-value'}):
        for span in td.findAll('span', recursive=False):
            other_details['DAYS_RANGE'] = span.text.strip()
    for td in soup.findAll('td',attrs={'data-test': 'FIFTY_TWO_WK_RANGE-value'}):
        for span in td.findAll('span', recursive=False):
            other_details['FIFTY_TWO_WK_RANGE'] = span.text.strip()
    for td in soup.findAll('td', attrs={'data-test': 'TD_VOLUME-value'}):
        for span in td.findAll('span', recursive=False):
            other_details['TD_VOLUME'] = span.text.strip()
    for td in soup.findAll('td',attrs={'data-test': 'AVERAGE_VOLUME_3MONTH-value'}):
        for span in td.findAll('span', recursive=False):
            other_details['AVERAGE_VOLUME_3MONTH'] = span.text.strip()
    for td in soup.findAll('td', attrs={'data-test': 'MARKET_CAP-value'}):
        for span in td.findAll('span', recursive=False):
            other_details['MARKET_CAP'] = span.text.strip()
    for td in soup.findAll('td', attrs={'data-test': 'BETA_3Y-value'}):
        for span in td.findAll('span', recursive=False):
            other_details['BETA_3Y'] = span.text.strip()
    for td in soup.findAll('td', attrs={'data-test': 'PE_RATIO-value'}):
        for span in td.findAll('span', recursive=False):
            other_details['PE_RATIO'] = span.text.strip()
    for td in soup.findAll('td', attrs={'data-test': 'EPS_RATIO-value'}):
        for span in td.findAll('span', recursive=False):
            other_details['EPS_RATIO'] = span.text.strip()
    for td in soup.findAll('td', attrs={'data-test': 'EARNINGS_DATE-value'}):
        other_details['EARNINGS_DATE'] = []
        for span in td.findAll('span', recursive=False):
            other_details['EARNINGS_DATE'].append(span.text.strip())
    for td in soup.findAll('td',attrs={'data-test': 'DIVIDEND_AND_YIELD-value'}):
        other_details['DIVIDEND_AND_YIELD'] = td.text.strip()
    for td in soup.findAll('td',attrs={'data-test': 'EX_DIVIDEND_DATE-value'}):
        for span in td.findAll('span', recursive=False):
            other_details['EX_DIVIDEND_DATE'] = span.text.strip()
    for td in soup.findAll('td',attrs={'data-test': 'ONE_YEAR_TARGET_PRICE-value' }):
        for span in td.findAll('span', recursive=False):
            other_details['ONE_YEAR_TARGET_PRICE'] = span.text.strip()
    other_details['DATE'] = str(datetime.datetime.now())
    company_json['OTHER_DETAILS'] = other_details
    with open('dax30_kpis.json', 'a') as outfile:
        json.dump(company_json, outfile, indent=4)
    print company_json
### Code after ###

暂无
暂无

声明:本站的技术帖子网页,遵循CC BY-SA 4.0协议,如果您需要转载,请注明本站网址或者原文地址。任何问题请咨询:yoyou2525@163.com.

 
粤ICP备18138465号  © 2020-2024 STACKOOM.COM