我试图使用selenium和Django从dex工具中刮取市场数据,但到目前为止还无法刮掉所有的市场数据。U会注意到dex工具市场的数据正在延迟加载,这意味着一旦你向下滚动或按下一页,就会加载新的数据,因为所有数据都不能显示在一个网页中,所以数据被划分为35页。目前我只能抓取屏幕上出现的第一页,下面的代码不会刮掉所有的数据。如何改进代码以抓取所有35页数据值
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
import time
def getData(url):
driver = webdriver.Chrome(
executable_path='C:/Users/denni/OneDrive/Desktop/DextoolScrapper/app/chromedriver.exe'
)
driver.get('https://www.dextools.io/app/uniswap/pair-explorer/0xa29fe6ef9592b5d408cca961d0fb9b1faf497d6d')
# get table
tableElement = WebDriverWait(driver, 10).until(
EC.presence_of_element_located((By.TAG_NAME, 'ngx-datatable'))
)
# scroll into table view
driver.execute_script("arguments[0].scrollIntoView();", tableElement)
# scrolling through the table body to the bottom
tableBodyelement = tableElement.find_element_by_tag_name('datatable-body-cell')
driver.execute_script("arguments[0].scrollTo(0, arguments[0].scrollHeight)", tableBodyelement)
rowWrapper = tableElement.find_elements_by_tag_name('datatable-row-wrapper')
for row in rowWrapper:
cells = row.find_elements_by_tag_name('datatable-body-cell')
date = cells[0].text
type = cells[1].text
price_usd = cells[2].text
price_eth = cells[3].text
amount_cuminu = cells[4].text
total_eth = cells[5].text
maker = cells[6].find_element_by_tag_name('a').get_attribute('href')
print(date, type, price_usd, price_eth, amount_cuminu, total_eth, maker)
print('----')这是上述代码第一页刮取的数据的结果。
发布于 2021-07-10 16:37:30
只需将代码放在while True循环中,然后单击此循环末尾的next。当没有更多要退出循环的try/except页面时,可以使用next捕获错误。
最终,它可能需要在sleep()之后使用click(),这样JavaScript就有时间替换已经存在的表ngx-datatable中的值。
编辑:现在使用pandas.DataFrame将所有文件保存在excel文件中。
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
import time
def getData(url):
driver = webdriver.Chrome(
executable_path='C:/Users/denni/OneDrive/Desktop/DextoolScrapper/app/chromedriver.exe'
)
#driver = webdriver.Chrome()
#driver = webdriver.Firefox()
driver.get('https://www.dextools.io/app/uniswap/pair-explorer/0xa29fe6ef9592b5d408cca961d0fb9b1faf497d6d')
page = 0
all_results = [] # list for all rows
while True:
page += 1
print('--- page:', page, '---')
# get table
tableElement = WebDriverWait(driver, 10).until(
EC.presence_of_element_located((By.TAG_NAME, 'ngx-datatable'))
)
# scroll into table view
driver.execute_script("arguments[0].scrollIntoView();", tableElement)
# scrolling through the table body to the bottom
tableBodyelement = tableElement.find_element_by_tag_name('datatable-body-cell')
driver.execute_script("arguments[0].scrollTo(0, arguments[0].scrollHeight)", tableBodyelement)
rowWrapper = tableElement.find_elements_by_tag_name('datatable-row-wrapper')
for row in rowWrapper:
cells = row.find_elements_by_tag_name('datatable-body-cell')
date = cells[0].text
type = cells[1].text
price_usd = cells[2].text
price_eth = cells[3].text
amount_cuminu = cells[4].text
total_eth = cells[5].text
maker = cells[6].find_element_by_tag_name('a').get_attribute('href')
print(date, type, price_usd, price_eth, amount_cuminu, total_eth, maker)
print('----')
# add row to list
all_results.append( [date, type, price_usd, price_eth, amount_cuminu, total_eth, maker] )
try:
next_page = driver.find_element_by_xpath('//a[@aria-label="go to next page"]')
next_page.click()
time.sleep(0.5)
except Exception as ex:
print("last page???")
break
# after loop convert to DataFrame and write it to excel
import pandas as pd
df = pd.DataFrame(all_results, columns=['date', 'type', 'price_usd', 'price_eth', 'amount_cuminu', 'total_eth', 'maker'])
df.to_excel('results.xlsx')
# ---
getData(None)https://stackoverflow.com/questions/68329390
复制相似问题