繁体   English   中英

将 URL 从数据库传递到 webdriver 时出错

[英]Error while passing a URL from Database to webdriver

我正在使用 selenium 和 Python 来抓取一些数据。代码对于单个 URL 工作正常(如果我们对 URL 进行硬编码),在我们的例子中,我们有很多 URL,我计划将 URL 从数据库传递给 webdriver。

但是当我这样做时,它给出了例外,下面是代码和例外。谁能告诉我我做错了什么?

我在此行中遇到异常browser.get(passed_url)但如果我将它作为字符串传递如下,它正在工作 browser.get('https://www.google.com/search?q=vitamin+b12')

from bs4 import BeautifulSoup
from selenium import webdriver 
from selenium.webdriver.common.by import By 
from selenium.webdriver.support.ui import WebDriverWait 
from selenium.webdriver.support import expected_conditions as EC 
from selenium.common.exceptions import TimeoutException
import psycopg2
import os
import glob
import datetime


option = webdriver.ChromeOptions()
option.add_argument(" — incognito")
#browser = webdriver.Chrome(executable_path='/usr/local/bin/chromedriver/', chrome_options=option)
browser = webdriver.Chrome(executable_path='/users/user_123/downloads/chrome_driver/chromedriver', chrome_options=option)

try:
#Database connection string
 DSN = "dbname='postgres' user='postgres' host='localhost' password='postgres' port='5432'"
 #DWH table to which data is ported
 TABLE_NAME = 'staging.search_url'
 #Connecting DB..
 conn = psycopg2.connect(DSN)
 print("Database connected...")
 conn.set_client_encoding('latin-1')
 cur = conn.cursor()
 cur.execute("SET datestyle='German'")
except (Exception, psycopg2.Error) as error:
 print('database connection failed')
 quit()



search_url_fetch="""select url_to_be_searched from staging.search_url"""
psql_cursor = conn.cursor()
psql_cursor.execute(search_url_fetch)
serach_url_list = psql_cursor.fetchall()
print('Fetched DB values')
for row in serach_url_list:
    passed_url=''
    passed_url=str(row)
    passed_url=passed_url.replace(',)','')
    passed_url=passed_url.replace('(','')
    print(passed_url)
    print("\n")
    
    browser.get('https://www.google.com/search?q=vitamin+b12')
    #browser.get(passed_url)
    full_titles_element = browser.find_elements_by_xpath("//div[@class='mnr-c pla-unit']")
    # use list comprehension to get the actual repo titles and not the selenium objects.
    full_text_title = [x.text for x in full_titles_element]
    # print out all the titles.
    print('Whole names that appear in site:')
    print(full_text_title, '\n')


    product_name_list = browser.find_elements_by_xpath("//span[@class='pymv4e']")
    # use list comprehension to get the actual repo titles and not the selenium objects.
    #upd_product_name_list=list(filter(None, product_name_list))

    upd_product_name_list=list(filter(None, product_name_list))
    product_name = [x.text for x in product_name_list]
    print('Product names:')
    print(product_name, '\n')
    filtered = [x for x in product_name if len(x.strip()) > 0]
    print(filtered)
    element_length=(len(filtered))
    print(element_length)
    print("\n")

    positions=[]
    for x in range(1, element_length+1):
        positions.append(x)
    print(positions)    
    print("\n")

    company_name_list = browser.find_elements_by_xpath("//div[@class='LbUacb']")
    # use list comprehension to get the actual repo titles and not the selenium objects.
    company = [x.text for x in company_name_list]
    # print out all the titles.
    print('Company Name:')
    print(company, '\n')

    urls=[]
    find_href = browser.find_elements_by_xpath("//a[@class='plantl pla-unit-single-clickable-target clickable-card']")
    for my_href in find_href:
        url_list=my_href.get_attribute("href")
        urls.append(url_list)
        #print(my_href.get_attribute("href"))
    print(urls)
    print("\n")
    result = zip(positions,filtered, urls, company)

    print(tuple(result))

例外:

Warning (from warnings module):
  File "/Users/user_123/Documents/PLA/selenium_chrome_with_beautiful_soup.py", line 16
    browser = webdriver.Chrome(executable_path='/users/user_123/downloads/chrome_driver/chromedriver', chrome_options=option)
DeprecationWarning: use options instead of chrome_options

Database connected...
Fetched DB values
'https://www.google.com/search?q=vitamin+b12'


Traceback (most recent call last):
  File "/Users/user_123/Documents/PLA/selenium_chrome_with_beautiful_soup.py", line 49, in <module>
    browser.get(passed_url)
  File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/selenium/webdriver/remote/webdriver.py", line 333, in get
    self.execute(Command.GET, {'url': url})
  File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/selenium/webdriver/remote/webdriver.py", line 321, in execute
    self.error_handler.check_response(response)
  File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/selenium/webdriver/remote/errorhandler.py", line 242, in check_response
    raise exception_class(message, screen, stacktrace)
selenium.common.exceptions.InvalidArgumentException: Message: invalid argument
  (Session info: chrome=79.0.3945.130)

您正在传递'引号以及字符串的开头和结尾。 见下文,我将它们从字符串中new_url并分配了一个新变量new_url

回答 :

new_url = passed_url[1:len(passed_url)-1]
browser.get(new_url)

例子:

a = "'https://www.google.com/search?q=vitamin+b12'"
b = a[1:len(a)-1]
print(a)
print(b)

您在下面编辑的代码:


from bs4 import BeautifulSoup
from selenium import webdriver 
from selenium.webdriver.common.by import By 
from selenium.webdriver.support.ui import WebDriverWait 
from selenium.webdriver.support import expected_conditions as EC 
from selenium.common.exceptions import TimeoutException
import psycopg2
import os
import glob
import datetime

option = webdriver.ChromeOptions()
option.add_argument(" — incognito")
#browser = webdriver.Chrome(executable_path='/usr/local/bin/chromedriver/', chrome_options=option)
browser = webdriver.Chrome(executable_path='/users/user_123/downloads/chrome_driver/chromedriver', chrome_options=option)
try:
#Database connection string
 DSN = "dbname='postgres' user='postgres' host='localhost' password='postgres' port='5432'"
 #DWH table to which data is ported
 TABLE_NAME = 'staging.search_url'
 #Connecting DB..
 conn = psycopg2.connect(DSN)
 print("Database connected...")
 conn.set_client_encoding('latin-1')
 cur = conn.cursor()
 cur.execute("SET datestyle='German'")
except (Exception, psycopg2.Error) as error:
 print('database connection failed')
 quit()



search_url_fetch="""select url_to_be_searched from staging.search_url"""
psql_cursor = conn.cursor()
psql_cursor.execute(search_url_fetch)
serach_url_list = psql_cursor.fetchall()
print('Fetched DB values')
for row in serach_url_list:
    passed_url=''
    passed_url=str(row)
    passed_url=passed_url.replace(',)','')
    passed_url=passed_url.replace('(','')
    new_url = passed_url[1:len(passed_url)-1]
    print(passed_url)
    print("\n")

    #browser.get('https://www.google.com/search?q=vitamin+b12')
    browser.get(new_url)
    full_titles_element = browser.find_elements_by_xpath("//div[@class='mnr-c pla-unit']")
    # use list comprehension to get the actual repo titles and not the selenium objects.
    full_text_title = [x.text for x in full_titles_element]
    # print out all the titles.
    print('Whole names that appear in site:')
    print(full_text_title, '\n')


    product_name_list = browser.find_elements_by_xpath("//span[@class='pymv4e']")
    # use list comprehension to get the actual repo titles and not the selenium objects.
    #upd_product_name_list=list(filter(None, product_name_list))

    upd_product_name_list=list(filter(None, product_name_list))
    product_name = [x.text for x in product_name_list]
    print('Product names:')
    print(product_name, '\n')
    filtered = [x for x in product_name if len(x.strip()) > 0]
    print(filtered)
    element_length=(len(filtered))
    print(element_length)
    print("\n")

    positions=[]
    for x in range(1, element_length+1):
        positions.append(x)
    print(positions)    
    print("\n")

    company_name_list = browser.find_elements_by_xpath("//div[@class='LbUacb']")
    # use list comprehension to get the actual repo titles and not the selenium objects.
    company = [x.text for x in company_name_list]
    # print out all the titles.
    print('Company Name:')
    print(company, '\n')

    urls=[]
    find_href = browser.find_elements_by_xpath("//a[@class='plantl pla-unit-single-clickable-target clickable-card']")
    for my_href in find_href:
        url_list=my_href.get_attribute("href")
        urls.append(url_list)
        #print(my_href.get_attribute("href"))
    print(urls)
    print("\n")
    result = zip(positions,filtered, urls, company)

    print(tuple(result))

暂无
暂无

声明:本站的技术帖子网页,遵循CC BY-SA 4.0协议,如果您需要转载,请注明本站网址或者原文地址。任何问题请咨询:yoyou2525@163.com.

 
粤ICP备18138465号  © 2020-2024 STACKOOM.COM