I'm running a Scrapy spider inside a script and I want to assign the scraped data to a variable, rather than output to a file, and read that file to get the data.
Right now the spider is outputting the data to a json file, I then read this data, arrange the data to my needs, and then delete the json file from the spider deleted (mostly because I couldn't figure out how to overwrite the spider output). This works and does what I want but definitely seems pretty brute force, is there a more efficient way that I can access the spider data without having to first output it to a json?
This is my code
class SpiderManager:
def __init__(self):
self.run_spider()
self.compile_json_data()
@staticmethod
def write_json(data, filename="quote_data.json"):
"""Write data to JSON file"""
with open(filename, "w") as f:
json.dump(data, f, indent=4)
@staticmethod
def read_json(filename="quote_data.json"):
"""Get data from JSON file"""
try:
with open(filename) as json_file:
data = json.load(json_file)
except FileNotFoundError:
data = OrderedDict()
except ValueError:
data = []
return data
@staticmethod
def compile_json_data(spider_file="quotes_spider.json"):
"""Read the data from the spider & created an OrderedDict"""
spider_data = SpiderManager.read_json(spider_file)
spider_data = sorted(spider_data, key=itemgetter("dob"))
ordered_data = OrderedDict()
for author_quote in spider_data:
ordered_data.update({author_quote["author"]: author_quote["quote"]})
SpiderManager.write_json(ordered_data, filename="quotes_dict.json")
try:
Path.cwd() / Path(spider_file).unlink()
except (FileNotFoundError, TypeError) as e:
pass
def run_spider(self):
"""Run the spider"""
process = CrawlerProcess({"FEED_FORMAT": "json",
"FEED_URI": "quotes_spider.json",
})
process.crawl(MySpider)
process.start()
class MySpider(scrapy.Spider):
name = "quotes"
temp_data = {}
def start_requests(self):
urls = [
'http://quotes.toscrape.com/',
]
for url in urls:
yield scrapy.Request(url=url, callback=self.parse)
def parse(self, response):
quote_blocks = response.css("div.quote")
for quote_block in quote_blocks:
url = quote_block.css("a::attr(href)").get()
quote = quote_block.css("span::text").get().strip()
yield response.follow(url, self.parse_crossword,
cb_kwargs=dict(quote=quote))
def parse_crossword(self, response, quote):
author = response.css("h3::text").get().strip()
dob = response.css("span.author-born-date::text").get()
dob = datetime.strptime(dob, "%B %d, %Y")
yield {
"author": author,
"dob": dob,
"quote": quote
}
if __name__ == '__main__':
SpiderManager()
Items & ItemPipelines were what I needed to achieve this. I created a class variable in SpiderManager, and then used a pipeline to append each item to the class variable. Below is my code, I added an Item class and Pipeline class, and specified the pipeline CrawlerProcess
class SaveItemPipeline:
"""Append item to list in SpiderManager"""
def process_item(self, item, spider):
SpiderManager.spider_data.append(item)
class MyItem(Item):
author = Field()
dob = Field()
quote = Field()
...
class SpiderManager:
...
def run_spider(self):
"""Run the spider"""
process = CrawlerProcess({
"ITEM_PIPELINES": {SaveItemPipeline: 100},
})
process.crawl(MySpider)
process.start()
The technical post webpages of this site follow the CC BY-SA 4.0 protocol. If you need to reprint, please indicate the site URL or the original address.Any question please contact:yoyou2525@163.com.