当试图通过('url‘:response.request.url)将url保存到字典中时,Scrapy从Scrapy中保存所有相同的url (http://localhost:8050/render.html)
我尝试过添加额外的参数,这些参数将传递真正的URL,但没有效果。
from scrapy import Spider
from scrapy.http import FormRequest
from scrapy.utils.response import open_in_browser
from scrapy import Request
import scrapy
from scrapy_splash import SplashRequest
class QuotesJSSpider(scrapy.Spider):
name = 'quotesjs'
start_urls = ('https://www.facebook.com/login',)
custom_settings = {
'SPLASH_URL': 'http://localhost:8050',
'DOWNLOADER_MIDDLEWARES': {
'scrapy_splash.SplashCookiesMiddleware': 723,
'scrapy_splash.SplashMiddleware': 725,
'scrapy.downloadermiddlewares.httpcompression.HttpCompressionMiddleware': 810,
},
'SPIDER_MIDDLEWARES': {
'scrapy_splash.SplashDeduplicateArgsMiddleware': 100,
},
'DUPEFILTER_CLASS': 'scrapy_splash.SplashAwareDupeFilter',
}
def parse(self, response):
token = response.xpath('//*[@id="u_0_a"]').extract_first()
return FormRequest.from_response(response,
formdata={'lgndim' : token,
'pass': 'xxx',
'email': 'xxxx'},
callback=self.load_sites)
def load_sites(self, response):
urls = [
'https://www.facebook.com/page1/about',
'https://www.facebook.com/page2/about',
]
for url in urls:
yield SplashRequest(url=url, callback=self.scrape_pages)
def scrape_pages(self, response):
shops = {
'company_name' : response.css('title::text').extract(),
'url' : response.request.url,
}
yield shops结果应该如下:'url‘:https://www.facebook.com/page1/about’
而不是这个:'url‘:http://localhost:8050/render.html,
发布于 2019-02-02 21:46:33
原始请求的url可以在这里获得:response.request._original_url。
为了避免不得不访问内部属性,您还可以尝试:
def load_sites(self, response):
urls = [
'https://www.facebook.com/page1/about',
'https://www.facebook.com/page2/about',
]
for url in urls:
yield SplashRequest(url=url, callback=self.scrape_pages, meta={'original_url': url})
def scrape_pages(self, response)
shops = {
'company_name' : response.css('title::text').extract(),
'url' : response.meta['original_url'],
}
yield shops def scrape_pages(self, response):
shops = {
'company_name' : response.css('title::text').extract(),
'url' : response.url,
}https://stackoverflow.com/questions/54485316
复制相似问题