[英]Dynamic Table Scraping with selenium in python
我正在嘗試訪問此站點的數據: http : //surge.srcc.lsu.edu/s1.html 。 到目前為止,我的代碼循環通過兩個下拉菜單,但表是動態命名的,我無法從中獲取數據。 我試圖通過“output_data_table”上面的類訪問數據,但遇到了麻煩。
# importing libraries
from selenium import webdriver
import time
from selenium.webdriver.support.ui import Select
import lxml.html
driver = webdriver.Firefox()
driver.get("http://surge.srcc.lsu.edu/s1.html")
# definition for switching frames
def frame_switch(css_selector):
driver.switch_to.frame(driver.find_element_by_css_selector(css_selector))
frame_switch("iframe")
html_source = driver.page_source
nameSelect = Select(driver.find_element_by_xpath('//select[@id="storm_name"]'))
stormCount = len(nameSelect.options)
for i in range(1, stormCount):
print("starting loop on option storm " + nameSelect.options[i].text)
nameSelect.select_by_index(i)
time.sleep(3)
yearSelect = Select(driver.find_element_by_xpath('//select[@id="year"]'))
yearCount = len(yearSelect.options)
for j in range(1, yearCount):
print("starting loop on option year " + yearSelect.options[j].text)
yearSelect.select_by_index(j)
root = lxml.html.fromstring(driver.page_source)
#table=driver.find_element_by_id("output_data_table")
for row in root.xpath('.//table[@id="output_data_table"]//tr'):
# needs dynamic table name
cells = row.xpath('.//td/text()')
dict_value = {'0th': cells[0],
'1st': cells[1],
'2nd': cells[2],
'3rd': cells[3],
'4th': cells[5],
'5th': cells[6],
'6th': cells[7],
'7th': cells[8]}
print(dict_value)
好像你必須在調用“root = lxml.html.fromstring(driver.page_source)”之前等待。
如果你不等,你得到的html源代碼沒有javascript生成的表格。 在它之前放一個“time.sleep(10)”。
這似乎得到了表格。 我使用BeautifulSoup作為一個簡單的例子。
from selenium import webdriver
import time, re
from selenium.webdriver.support.ui import Select
import lxml.html
from bs4 import BeautifulSoup
driver = webdriver.Firefox()
driver.get("http://surge.srcc.lsu.edu/s1.html")
# definition for switching frames
def frame_switch(css_selector):
driver.switch_to.frame(driver.find_element_by_css_selector(css_selector))
frame_switch("iframe")
html_source = driver.page_source
nameSelect = Select(driver.find_element_by_xpath('//select[@id="storm_name"]'))
stormCount = len(nameSelect.options)
for i in range(1, stormCount):
print("starting loop on option storm " + nameSelect.options[i].text)
nameSelect.select_by_index(i)
time.sleep(3)
yearSelect = Select(driver.find_element_by_xpath('//select[@id="year"]'))
yearCount = len(yearSelect.options)
for j in range(1, yearCount):
print("starting loop on option year " + yearSelect.options[j].text)
yearSelect.select_by_index(j)
time.sleep(10)
soup = BeautifulSoup(driver.page_source, 'html.parser')
# get the needed table body
print soup.find_all("tbody", {"class" : re.compile(".*")})[1].prettify()
# print out each column
get_table = soup.find_all("tbody", {"class" : re.compile(".*")})[1]
columns = get_table.find_all("tr")
for column in columns:
print column.getText()
聲明:本站的技術帖子網頁,遵循CC BY-SA 4.0協議,如果您需要轉載,請注明本站網址或者原文地址。任何問題請咨詢:yoyou2525@163.com.