[英]Scraping asb.net site does not work when using a function in selenium in python
我想抓取一个 .net 网站,我制作这个代码
from scrapy import Selector
from selenium import webdriver
from selenium.webdriver.common.by import By
from webdriver_manager.chrome import ChromeDriverManager
class BoursakuwaitSpider(scrapy.Spider):
name = 'boursakuwait'
custom_settings = {
'FEED_URI': 'second.json',
'FEED_FORMAT': 'json',
}
start_urls = ['https://casierjudiciaire.justice.gov.ma/verification.aspx']
def parse(self, no_response):
browser = webdriver.Chrome(executable_path=ChromeDriverManager().install())
browser.get('https://casierjudiciaire.justice.gov.ma/verification.aspx')
time.sleep(10)
response = Selector(text=browser.page_source)
当我使用 function 解析代码不起作用但如果我只使用 class 像这样:
import time
import scrapy
from scrapy import Selector
from selenium import webdriver
from selenium.webdriver.common.by import By
from webdriver_manager.chrome import ChromeDriverManager
class BoursakuwaitSpider(scrapy.Spider):
name = 'boursakuwait'
custom_settings = {
'FEED_URI': 'second.json',
'FEED_FORMAT': 'json',
}
start_urls = ['https://casierjudiciaire.justice.gov.ma/verification.aspx']
browser = webdriver.Chrome(executable_path=ChromeDriverManager().install())
browser.get('https://casierjudiciaire.justice.gov.ma/verification.aspx')
time.sleep(10)
response = Selector(text=browser.page_source)
代码工作正确。 但对我来说,我想使用 function(第一个代码)我不知道问题出在哪里。 请任何帮助。
首先,创建def start_requests(self):
方法。 然后在这个方法下设置所有的selenium依赖。
您必须将the browser/ driver
从一个 def 方法转移到另一个注入 self 关键字。 以下代码正在工作:
例子:
import time
import scrapy
from scrapy import Selector
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.service import Service
from webdriver_manager.chrome import ChromeDriverManager
from scrapy.crawler import CrawlerProcess
from selenium.webdriver.chrome.options import Options
class BoursakuwaitSpider(scrapy.Spider):
name = 'boursakuwait'
# custom_settings = {
# 'FEED_URI': 'second.json',
# 'FEED_FORMAT': 'json',
# }
def start_requests(self):
options = webdriver.ChromeOptions()
options.add_argument("start-maximized")
options.add_experimental_option("detach", True)
url = 'https://stackoverflow.com'
self.browser = webdriver.Chrome(service=Service(ChromeDriverManager().install()),options=options)
self.browser.get(url)
time.sleep(5)
yield scrapy.Request(
url='https://stackoverflow.com',
callback=self.parse
)
def parse(self,response):
self.browser.get(response.url)
time.sleep(5)
#response = Selector(text=self.browser.page_source)
if __name__ == "__main__":
process =CrawlerProcess()
process.crawl(BoursakuwaitSpider)
process.start()
声明:本站的技术帖子网页,遵循CC BY-SA 4.0协议,如果您需要转载,请注明本站网址或者原文地址。任何问题请咨询:yoyou2525@163.com.