繁体   English   中英

Scrapy-递归地抓取到第三页

[英]Scrapy - Recursively scraping to third page

我希望对于经验丰富的Scrapy用户来说,我的要求非常简单明了。

从本质上讲,以下代码非常适合基于第一页中的链接从第二页进行抓取。 我想使用第二页中的链接扩展代码以从第三页抓取。 使用下面的代码, def parse_items是登陆页面(第一级),其中包含50个列表,并且将代码设置为从50个链接中的每一个递归地抓取。 def parse_listing_page指定要从“列表页面”中抓取哪些项目。 在每个列表页面中,我希望我的脚本跟随链接到达另一个页面,并在返回“列表页面”然后返回到登录页面之前刮取一两个项目。

下面的代码非常适合递归地在2个级别进行抓取。 如何使用下面的代码将此扩展到3?

from scrapy import log
from scrapy.log import ScrapyFileLogObserver
from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from scrapy.selector import HtmlXPathSelector
from firstproject.items import exampleItem
from scrapy.http import Request
import urlparse

logfile_info = open('example_INFOlog.txt', 'a')
logfile_error = open('example_ERRlog.txt', 'a')
log_observer_info = log.ScrapyFileLogObserver(logfile_info, level=log.INFO)
log_observer_error = log.ScrapyFileLogObserver(logfile_error, level=log.ERROR)
log_observer_info.start()
log_observer_error.start()

class MySpider(CrawlSpider):
    name = "example"

    allowed_domains = ["example.com.au"]

    rules = (Rule (SgmlLinkExtractor(allow=("",),restrict_xpaths=('//li[@class="nextLink"]',))
    , callback="parse_items", follow=True),
    )

    def start_requests(self):
        start_urls = reversed([
            "http://www.example.com.au/1?new=true&list=10-to-100",
            "http://www.example.com.au/2?new=true&list=10-to-100",
            "http://www.example.com.au/2?new=true&list=100-to-200",
        ])

        return[Request(url = start_url) for start_url in start_urls ]

    def parse_start_url(self, response):
        return self.parse_items(response)

    def parse_items(self, response):
        hxs = HtmlXPathSelector(response)
        listings = hxs.select("//h2")
        items = []
        for listings in listings:
            item = exampleItem()
            item ["title"] = listings.select("a/text()").extract()[0]
            item ["link"] = listings.select("a/@href").extract()[0]
            items.append(item)

            url = "http://example.com.au%s" % item["link"]
            yield Request(url=url, meta={'item':item},callback=self.parse_listing_page)


    def parse_listing_page(self,response):
        hxs = HtmlXPathSelector(response)

        item = response.meta['item']

        item["item_1"] = hxs.select('#censored Xpath').extract()
        item["item_2"] = hxs.select('#censored Xpath').extract()
        item["item_3"] = hxs.select('#censored Xpath').extract()
        item["item_4"] = hxs.select('#censored Xpath').extract()

        return item

非常感谢

这就是代码流的工作方式。

首先调用MySpider类中的Rule构造函数。 Rule构造函数的回调设置为parse_items parse_items的末尾有一个yield ,使函数递归到parse_listing_page 如果你想从改乘一个第三级parse_listing_page必须有一个Request从产量parse_listing_page

这是我更新的代码。 下面的代码能够以适当的格式(经过测试)提取counter_link ,但是似乎使用了else语句,因此不会产生parse_listing_counter 如果删除ifelse子句并强制代码回调parse_listing_counter ,则不会产生任何项目(甚至不包括parse_items或列表页面中的项目)。

我在代码中做错了什么? 我还检查了XPath-一切正常。

from scrapy import log
from scrapy.log import ScrapyFileLogObserver
from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from scrapy.selector import HtmlXPathSelector
from firstproject.items import exampleItem
from scrapy.http import Request
import urlparse

logfile_info = open('example_INFOlog.txt', 'a')
logfile_error = open('example_ERRlog.txt', 'a')
log_observer_info = log.ScrapyFileLogObserver(logfile_info, level=log.INFO)
log_observer_error = log.ScrapyFileLogObserver(logfile_error, level=log.ERROR)
log_observer_info.start()
log_observer_error.start()

class MySpider(CrawlSpider):
    name = "example"

    allowed_domains = ["example.com.au"]

    rules = (Rule (SgmlLinkExtractor(allow=("",),restrict_xpaths=('//li[@class="nextLink"]',))
    , callback="parse_items", follow=True),
    )

    def start_requests(self):
        start_urls = reversed([
            "http://www.example.com.au/1?new=true&list=10-to-100",
            "http://www.example.com.au/2?new=true&list=10-to-100",
            "http://www.example.com.au/2?new=true&list=100-to-200",
        ])

        return[Request(url = start_url) for start_url in start_urls ]

    def parse_start_url(self, response):
        return self.parse_items(response)

    def parse_items(self, response):
        hxs = HtmlXPathSelector(response)
        listings = hxs.select("//h2")
        items = []
        for listings in listings:
            item = exampleItem()
            item ["title"] = listings.select("a/text()").extract()[0]
            item ["link"] = listings.select("a/@href").extract()[0]
            items.append(item)

            url = "http://example.com.au%s" % item["link"]
            yield Request(url=url, meta={'item':item},callback=self.parse_listing_page)


    def parse_listing_page(self,response):
        hxs = HtmlXPathSelector(response)

        item = response.meta['item']

        item["item_1"] = hxs.select('#censored Xpath').extract()
        item["item_2"] = hxs.select('#censored Xpath').extract()
        item["item_3"] = hxs.select('#censored Xpath').extract()
        item["item_4"] = hxs.select('#censored Xpath').extract()

        item["counter_link"] = hxs.selext('#censored Xpath').extract()[0]
        counter_link = response.meta.get('counter_link', None)
        if counter_link:
            url2 = "http://example.com.au%s" % item["counter_link"]
            yield Request(url=url2, meta={'item':item},callback=self.parse_listing_counter)
        else:
            yield item

    def parse_listing_counter(self,response):
        hxs = HtmlXPathSelector(response)

        item = response.meta['item']

        item["counter"] = hxs.select('#censored Xpath').extract()

        return item

暂无
暂无

声明:本站的技术帖子网页,遵循CC BY-SA 4.0协议,如果您需要转载,请注明本站网址或者原文地址。任何问题请咨询:yoyou2525@163.com.

 
粤ICP备18138465号  © 2020-2024 STACKOOM.COM