我试图弄清楚如何只提取列表中文本的一部分。
下面是我目前正在使用的脚本:
import scrapy.selector
import urlparse
from scrapy.spiders import Spider
from scrapy.http import Request
from MediaMarkt.items import MediamarktItem
def complete_url(string):
return "http://www.mediamarkt.be" + string
def encode(str):
return str.encode('utf8', 'ignore')
class MshbeSpider(Spider):
name = "mshbetv"
start_urls = ['http://www.mediamarkt.be/mcs/productlist/_TV,98952,452540.html?langId=-17']
def parse(self, response):
items = response.xpath('//*[@id="filters"]/form/fieldset[2]/div[2]/ul[2]/li/a/@href')
for item in items:
link = item.extract()
yield Request(link, callback=self.parse_category)
def parse_category(self, response):
items = response.xpath('//ul[@class="products-list"]/li/div')
for item in items:
mshtv = MediamarktItem()
mshtv['item_3_price'] = encode(item.xpath('normalize-space(.//aside/div/div/div/text())').extract()[0]).replace("-","")
mshtv['item_2_name'] = encode(item.xpath('normalize-space(.//div/h2/a/text())').extract()[0])
mshtv['item_a_link'] = complete_url(item.select('.//div/h2/a/@href').extract()[0])
mshtv['item_4_avai'] = encode(item.xpath('normalize-space(.//aside/div/div/ul/span/text())').extract()[0])
mshtv['item_1_cat'] = encode(item.xpath('normalize-space(//*[@id="category"]/hgroup/h1/text())').extract()[0])
yield mshtv
new_link = response.xpath('//li[@class="pagination-next"]/a/@href').extract()[0]
yield Request(complete_url(new_link),callback=self.parse_category)“mshtv‘’item_2_name‘”字段是我只想提取特定文本的地方。试试看我能找到但没有成功的一切。
例如,在我的当前脚本中,“mshtv‘’item_2_name”的结果是
我有一份正确的制造商型号清单。如本例所示,它们是
我想要实现的是,在运行我的脚本时,我只获得制造商的型号作为返回。
这有可能吗?
发布于 2016-05-11 15:23:44
将正确的模型号列表放入dict中,然后将包含模型号的字符串拆分为空格上的单词,并在dict中查找。
d = {}
for model in models:
d[model] = True
for word in mshtv['item_2_name'].split(" "):
if word in d:
print word发布于 2016-05-11 16:59:49
在我尝试了“嗯”提出的解决方案之后,我已经尝试过了。它起作用了,但现在有些地方出了问题。我只得到13个结果,而在我得到200多个结果之前。
import scrapy.selector
import urlparse
from scrapy.spiders import Spider
from scrapy.http import Request
from MediaMarkt.items import MediamarktItem
models = ["ue78js9500lxxn","UE60J6200AWXXN","kdl40w705cbaep","KDL55W755CBAEP","KDL40W705CBAEP"]
d = {}
for model in models:
d[model] = True
def complete_url(string):
return "http://www.mediamarkt.be" + string
def encode(str):
return str.encode('utf8', 'ignore')
class MshbeSpider(Spider):
name = "mshbetv"
start_urls = ['http://www.mediamarkt.be/mcs/productlist/_TV,98952,452540.html?langId=-17']
def parse(self, response):
items = response.xpath('//*[@id="filters"]/form/fieldset[2]/div[2]/ul[2]/li/a/@href')
for item in items:
link = item.extract()
yield Request(link, callback=self.parse_category)
def parse_category(self, response):
items = response.xpath('//ul[@class="products-list"]/li/div')
for item in items:
mshtv = MediamarktItem()
mshtv['item_3_price'] = encode(item.xpath('normalize-space(.//aside/div/div/div/text())').extract()[0]).replace("-","")
mshtv['item_2_name'] = encode(item.xpath('normalize-space(.//div/h2/a/text())').extract()[0])
mshtv['item_a_link'] = complete_url(item.select('.//div/h2/a/@href').extract()[0])
mshtv['item_4_avai'] = encode(item.xpath('normalize-space(.//aside/div/div/ul/span/text())').extract()[0])
mshtv['item_1_cat'] = encode(item.xpath('normalize-space(//*[@id="category"]/hgroup/h1/text())').extract()[0])
for word in mshtv['item_2_name'].split(" "):
if word in d:
mshtv['item_model'] = word
yield mshtv
new_link = response.xpath('//li[@class="pagination-next"]/a/@href').extract()[0]
yield Request(complete_url(new_link),callback=self.parse_category)https://stackoverflow.com/questions/37166863
复制相似问题