简体   繁体   中英

How do I parallelize a Python loop with webdriver?

In my code bellow I try to import all the odds from different urls pages (25 in total). I currently use a simple loop but it takes too much time. How can I parallelize this code to reduce the execution time.

Here is the code:

#!/usr/bin/python3
# -*- coding: utf­-8 ­-*-

from selenium import webdriver
import statistics as stat
import numpy as np

driver = webdriver.Firefox()
url = 'https://www.coteur.com/cotes-foot.php'
driver.get(url)

#Store url associated with the soccer games
url_links = []
for i in driver.find_elements_by_xpath('//a[contains(@href, "match/cotes-")]'):
    url_links.append(i.get_attribute('href'))

driver.close()
print(len(url_links), '\n')

for l in range(len(url_links)):
    driver = webdriver.Firefox()
    driver.get(url_links[l])

    #Store odds into table
    odds = []
    header = []
    for i in driver.find_elements_by_xpath('//button[contains(@class, "btn btn-default btn-xs btncote")]'):
        odds.append(i.text)

    for i in driver.find_elements_by_xpath('//th[contains(@width, "20%")]'):
        header.append(i.text)

    rows = int(len(odds)/3) 
    columns = 3
    odds = [float(i) for i in odds]
    odds = np.array(odds)
    header = np.array(header)
    odds = odds.reshape(rows, columns)

    print(odds, '\n')
    driver;close()

ThreadPoolExecutor would do the perfect job, if your using python>3.5.

from selenium import webdriver
import statistics as stat
import numpy as np
from concurrent.futures import ThreadPoolExecutor

driver = webdriver.Firefox()
url = 'https://www.coteur.com/cotes-foot.php'
driver.get(url)

#Store url associated with the soccer games
url_links = []
for i in driver.find_elements_by_xpath('//a[contains(@href, "match/cotes-")]'):
    url_links.append(i.get_attribute('href'))

driver.close()
print(len(url_links), '\n')

def sraper(url_link):
    driver = webdriver.Firefox()
    driver.get(url_link)

    #Store odds into table
    odds = []
    header = []
    for i in driver.find_elements_by_xpath('//button[contains(@class, "btn btn-default btn-xs btncote")]'):
        odds.append(i.text)

    for i in driver.find_elements_by_xpath('//th[contains(@width, "20%")]'):
        header.append(i.text)

    rows = int(len(odds)/3) 
    columns = 3
    odds = [float(i) for i in odds]
    odds = np.array(odds)
    header = np.array(header)
    odds = odds.reshape(rows, columns)

    print(odds, '\n')
    driver;close()
    
with ThreadPoolExecutor(max_workers=8) as executor:
    executor.map(sraper, url_links)

The technical post webpages of this site follow the CC BY-SA 4.0 protocol. If you need to reprint, please indicate the site URL or the original address.Any question please contact:yoyou2525@163.com.

 
粤ICP备18138465号  © 2020-2024 STACKOOM.COM