[英]crawling next page when url remains same
我需要一些幫助來從https://onland.kbstar.com/quics?page=C060250&keyword=%EB%8F%99%EC%9E%91%EA%B5%AC下面的 url 抓取數據
我想抓取第二個頁面,但是當我單擊“2”但不知道該怎么做時,url 保持不變。 請幫忙!
這是我用於抓取第一頁的 python 代碼:
from selenium import webdriver
import pandas as pd
import numpy as np
import time
from bs4 import BeautifulSoup
import urllib.request as req
import urllib
import re
from datetime import datetime
import requests
dataframe = pd.DataFrame()
def KB_liveON(area_name):
query = area_name
area = urllib.parse.quote(query)
url = 'https://onland.kbstar.com' \
+ '/quics?page=C060250' \
+ '&keyword=' + str(area)
# + '#CP'
r = requests.get(url)
soup = BeautifulSoup(r.text, 'lxml')
table = soup.find('table')
trs = table.tbody.find_all('tr')
dataframe = pd.DataFrame()
value_list = []
for tr in trs[::1]:
tds = tr.find_all('td')
#cols = [' '.join(td.text.strip().split()) for td in tds]
cols = [td.text.strip().split() for td in tds]
progress = cols[0]
location = cols[1]
complex_name = cols[2]
area = cols[3]
sale_price = cols[4]
sale_price2 = cols[5]
time = cols[6]
type_of_sale = cols[7]
construction_company = cols[8]
value_list.append([progress ,location, complex_name ,area, sale_price, sale_price2, time, type_of_sale, construction_company])
cols = ['progress' ,'location','complex_name' ,'area','sale_price','sale_price2','time','type_of_sale','construction_company']
df = pd.DataFrame(value_list, columns=cols)
return df
kb = KB_liveON('동작구')
dataframe = dataframe.append(kb)
dataframe
首先,我在 Google Colab 上安裝了 Selenium WebDriver。 然后我寫了一個爬蟲從多個頁面爬取數據。
蟒蛇代碼:
import time
from selenium import webdriver
import pandas as pd
from bs4 import BeautifulSoup
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
def extract_data(value_list, html_tags):
soup = BeautifulSoup(html_tags, 'lxml')
table = soup.find('table')
trs = table.tbody.find_all('tr')
for tr in trs[::1]:
tds = tr.find_all('td')
cols = [td.text.strip().split() for td in tds]
progress = cols[0]
location = cols[1]
complex_name = cols[2]
area = cols[3]
sale_price = cols[4]
sale_price2 = cols[5]
time = cols[6]
type_of_sale = cols[7]
construction_company = cols[8]
value_list.append([progress ,location, complex_name ,area, sale_price, sale_price2, time, type_of_sale, construction_company])
return value_list
def KB_liveON(area):
url = 'https://onland.kbstar.com' \
+ '/quics?page=C060250' \
+ '&keyword=' + str(area)
wd.get(url)
data_list = []
# Extract data from first page
tbl = wd.find_elements_by_class_name("tbl_list")[0]
html_tags = tbl.get_attribute('outerHTML')
data_list = extract_data(data_list, html_tags)
# Find and extract data from other pages except first page
forms = wd.find_elements_by_xpath("//div[@class='paging']//form")
for f in forms[1:]:
f.submit()
time.sleep(10)
tbl = wd.find_elements_by_class_name("tbl_list")[0]
html_tags = tbl.get_attribute('outerHTML')
data_list = extract_data(data_list, html_tags)
time.sleep(10)
cols = ['progress' ,'location','complex_name' ,'area','sale_price','sale_price2','time','type_of_sale','construction_company']
df = pd.DataFrame(data_list, columns=cols)
return df
if __name__=='__main__':
options = webdriver.ChromeOptions()
options.add_argument('--headless')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
wd = webdriver.Chrome('chromedriver', options=options)
df = KB_liveON('동작구')
print (df)
聲明:本站的技術帖子網頁,遵循CC BY-SA 4.0協議,如果您需要轉載,請注明本站網址或者原文地址。任何問題請咨詢:yoyou2525@163.com.