[英]Data save coding problems
這是蜘蛛
#-*-coding:utf-8-*-
import scrapy
from scrapy_test.items import ScrapyTestItem
class QiushiSpider(scrapy.Spider):
name = "qiushibaike"
start_urls = {
"http://www.qiushibaike.com",
}
def parse(self, response):
item = ScrapyTestItem()
for temp in response.xpath(".//div[@id='content']//div[@id='content-left']/div"):
# a = str(map(str, ''.join(temp.xpath(".//div[@class='content']/span/text()").extract())))
item['text'] = temp.xpath(".//div[@class='content']/span/text()").extract()
item['number'] = temp.xpath(".//div[@class='stats']/span[@class='stats-vote']/i/text()").extract()
yield item
這是保存數據的管道
import json
from scrapy.exceptions import DropItem
class ScrapyTestPipeline(object):
def __init__(self):
self.ids_seen = set()
self.file = open("aaa.jl", "w",encoding='utf8' )
def process_item(self, item, spider):
item['id'] = hash(''.join(item['text']))
if item['id'] in self.ids_seen:
raise DropItem("Duplicate item found: %s " % item)
else:
line = json.dumps(dict(item)) + "\n"
self.file.write(line)
return item
我試圖保存數據,但數據編碼出錯,嘗試以其他方式進行操作,但失敗了。 我該怎么辦?
這是錯誤數據 “文本”:[“ \\ n \\ n \\ nlz \\ u7537 \\ uff0c \\ u4eca \\ u5929 \\ u8ddf \\ u670b \\ u53cb \\ u53bbktv \\ u5531 \\ u6b4c \\ uff01]
您是如何運行刮板的? 您正在使用scrapy的默認導出器將數據保存到JSON / CSV中?
在settings.py
執行此操作
FEED_EXPORT_ENCODING = 'utf-8'
#-*-coding:utf-8-*-
import scrapy
from scrapy_test.items import ScrapyTestItem
import re
class QiushiSpider(scrapy.Spider):
name = "qiushibaike"
start_urls = {
"http://www.qiushibaike.com",
}
def parse(self, response):
item = ScrapyTestItem()
for temp in response.xpath(".//div[@id='content']//div[@id='content-left']/div"):
a = temp.xpath(".//div[@class='content']/span")
id = temp.xpath('.//@id').extract_first('')
item['id'] = re.search('[0-9]*$', id).group(0)
item['text'] = (''.join(a.xpath("string(.)").extract())).strip().encode('utf-8')
item['number'] = temp.xpath(".//div[@class='stats']/span[@class='stats-vote']/i/text()").extract_first('')
yield item
這是管道
# -*- coding: utf-8 -*-
from scrapy.exceptions import DropItem
class ScrapyTestPipeline(object):
def __init__(self):
self.ids_seen = set()
self.file = "liu.jl"
def process_item(self, item, spider):
if item['id'] in self.ids_seen:
raise DropItem("Duplicate item found: %s " % item)
else:
self.ids_seen.add(item['id'])
with open(self.file, 'a',encoding='utf-8') as f:
f.write('text: '+item['text'].decode() + '\n')
f.write('number '+item['number'] + '\n')
f.write('id '+item['id'] + '\n\n')
return item
聲明:本站的技術帖子網頁,遵循CC BY-SA 4.0協議,如果您需要轉載,請注明本站網址或者原文地址。任何問題請咨詢:yoyou2525@163.com.