can you tell me why my while loop isn't working, please? I get no error message, it just runs once.
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import pandas as pd
import time
PATH = "/Users/csongordoma/Documents/chromedriver"
driver = webdriver.Chrome(PATH)
current_page = 1
driver.get('https://ingatlan.com/lista/elado+lakas+budapest?page=' + str(current_page))
data = {}
df = pd.DataFrame(columns=['Price', 'Address', 'Size', 'Rooms', 'URL', 'Labels'])
listings = driver.find_elements_by_css_selector('div.listing__card')
while current_page < 10:
for listing in listings:
data['Price'] = listing.find_elements_by_css_selector('div.price')[0].text
data['Address'] = listing.find_elements_by_css_selector('div.listing__address')[0].text
data['Size'] = listing.find_elements_by_css_selector('div.listing__parameters')[0].text
data['Labels'] = listing.find_elements_by_css_selector('div.listing__labels')[0].text
data['URL'] = listing.find_elements_by_css_selector('a.listing__link.js-listing-active-area')[0].get_attribute('href')
df = df.append(data, ignore_index=True)
current_page += 1
print(len(listings))
print(df)
# driver.find_element_by_xpath("//a[. = 'Következő oldal']").click()
driver.quit()
the output is a good data frame of 20 items which is one page's worth. on the website I'm trying to scrape. Set the limit at 10 cycles to not overload anyone, but ideally, I want to run through all pages.
Just arrange the code inside your while loop and indent the currentpage to the outer loop. I added a try except in case of any errors and webdriver waits for consistency of getting elements after driver.get.
current_page = 1
data = {}
df = pd.DataFrame(columns=['Price', 'Address', 'Size', 'Rooms', 'URL', 'Labels'])
while current_page < 10:
driver.get('https://ingatlan.com/lista/elado+lakas+budapest?page=' + str(current_page))
try:
listings=WebDriverWait(driver, 10).until(EC.presence_of_all_elements_located((By.CSS_SELECTOR, "div.listing__card")))
for listing in listings:
data['Price'] = listing.find_elements_by_css_selector('div.price')[0].text
data['Address'] = listing.find_elements_by_css_selector('div.listing__address')[0].text
data['Size'] = listing.find_elements_by_css_selector('div.listing__parameters')[0].text
data['Labels'] = listing.find_elements_by_css_selector('div.listing__labels')[0].text
data['URL'] = listing.find_elements_by_css_selector('a.listing__link.js-listing-active-area')[0].get_attribute('href')
df = df.append(data, ignore_index=True)
except:
print('Error')
current_page += 1
print(len(listings))
print(df)
Import
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
The technical post webpages of this site follow the CC BY-SA 4.0 protocol. If you need to reprint, please indicate the site URL or the original address.Any question please contact:yoyou2525@163.com.