我是个新手。我正在尝试使用Selenium和scrapy从url列表中获取。 我试过这个代码:
class TechstarSpider(scrapy.Spider):
name = "techstar"
allowed_domains = ["techstar.com"]
start_urls = [l.strip() for l in open('pages.txt').readlines()]
def __init__(self, **kwargs):
super(TechstarSpider, self).__init__(**kwargs)
self.driver = webdriver.Firefox()
def parse(self, response):
self.driver.get(response.url)
time.sleep(10)
resp = TextResponse(url=self.driver.current_url, body=self.driver.page_source, encoding='utf-8')
item = StartupweekendataItem()
for data in resp.xpath('//*[@id="main"]/div[2]'):
item['Title'] = data.xpath('//*[@id="main"]/div[2]/div[1]/div/div/ol/h3/text()')[0].extract(),
item['Date'] = data.xpath('//*[@id="main"]/div[2]/div[1]/div/div/ol/h3/span/text()')[0].extract(),
item['Judge1'] = data.xpath('//*[@id="event-judges"]/following-sibling::div[@class="row"][1]/div[2]/h2/text()')[0].extract(),
item['Judge1FB'] = data.xpath('//*[@id="event-judges"]/following-sibling::div[@class="row"][1]/div[2]/h2/a[@class="fa fa-facebook font-18"]/@href')[0].extract(),
item['Judge1Linked'] = data.xpath('//*[@id="event-judges"]/following-sibling::div[@class="row"][1]/div[2]/h2/a[@class="fa fa-linkedin font-18"]/@href')[0].extract(),
item['Judge2'] = data.xpath('//*[@id="event-judges"]/following-sibling::div[@class="row"][2]/div[2]/h2/text()')[0].extract(),
item['Judge2FB'] = data.xpath('//*[@id="event-judges"]/following-sibling::div[@class="row"][2]/div[2]/h2/a[@class="fa fa-facebook font-18"]/@href')[0].extract(),
item['Judge2Linked'] = data.xpath('//*[@id="event-judges"]/following-sibling::div[@class="row"][1]/div[2]/h2/a[@class="fa fa-linkedin font-18"]/@href')[0].extract(),
item['Coach1'] = data.xpath('//*[@id="event-coaches"]/following-sibling::div[@class="row"][1]//h2//text()')[0].extract(),
item['Coach1FB'] = data.xpath('//*[@id="event-coaches"]/following-sibling::div[@class="row"][1]//a[@class="fa fa-facebook font-18"]//@href')[0].extract(),
item['Coach1Linked'] = data.xpath('//*[@id="event-coaches"]/following-sibling::div[@class="row"][1]//a[@class="fa fa-linkedin font-18"]//@href')[0].extract(),
item['Coach2'] = data.xpath('//*[@id="event-coaches"]/following-sibling::div[@class="row"][2]//h2//text()')[0].extract(),
item['Coach2FB'] = data.xpath('//*[@id="event-coaches"]/following-sibling::div[@class="row"][2]//a[@class="fa fa-facebook font-18"]//@href')[0].extract(),
item['Coach2Linked'] = data.xpath('//*[@id="event-coaches"]/following-sibling::div[@class="row"][2]//a[@class="fa fa-linkedin font-18"]//@href')[0].extract(),
item['Organizer1'] = data.xpath('//*[@id="event-organizers"]/following-sibling::div[2]/div[@class="large-15 columns"]/ul/li[1]/h5/text()')[0].extract(),
item['Organizer1FB'] = data.xpath('//*[@id="event-organizers"]/following-sibling::div[2]/div[@class="large-15 columns"]/ul/li[1]/a["fa fa-facebook font-18"]/@href')[0].extract(),
item['Organizer1Linked'] = data.xpath('//*[@id="event-organizers"]/following-sibling::div[2]/div[@class="large-15 columns"]/ul/li[1]/a["fa fa-linkedin font-18"]/@href')[0].extract(),
item['Organizer2'] = data.xpath('//*[@id="event-organizers"]/following-sibling::div[2]/div[@class="large-15 columns"]/ul/li[2]/h5/text()')[0].extract(),
item['Organizer2FB'] = data.xpath('//*[@id="event-organizers"]/following-sibling::div[2]/div[@class="large-15 columns"]/ul/li[2]/a["fa fa-facebook font-18"]/@href')[0].extract(),
item['Organizer2Linked'] = data.xpath('//*[@id="event-organizers"]/following-sibling::div[2]/div[@class="large-15 columns"]/ul/li[2]/a["fa fa-linkedin font-18"]/@href')[0].extract(),
item['Facilitator'] = data.xpath("//h2[contains(string(), 'Facilitators')]/../../following-sibling::div[1]//h2//text()")[0].extract(),
item['FacilitatorFB'] = data.xpath("//h2[contains(string(), 'Facilitators')]/../../following-sibling::div[1]//a['fa fa-facebook font-18']//@href")[0].extract(),
item['FacilitatorLinked'] = data.xpath("//h2[contains(string(), 'Facilitators')]/../../following-sibling::div[1]//a['fa fa-linkedin font-18']//@href")[0].extract(),
item['Staff1'] = data.xpath("//h2[contains(string(), 'Facilitators')]/../../following-sibling::div[3]//h2//text()")[0].extract(),
item['Staff1FB'] = data.xpath("//h2[contains(string(), 'Facilitators')]/../../following-sibling::div[3]//a[@class'fa fa-facebook font-18']//@href")[0].extract(),
item['Staff1Linked'] = data.xpath("//h2[contains(string(), 'Facilitators')]/../../following-sibling::div[3]//a[@class='fa fa-linkedin font-18']//@href")[0].extract(),
yield item
在页面.txt包含如下URL列表:
^{pr2}$但它抛出了一个错误:
2017-11-12 12:38:26 [scrapy] ERROR: Spider error processing <GET http://communities.techstars.com/bolivia/santacruz/startup-weekend/10815> (referer: None)
Traceback (most recent call last):
File "/usr/lib/python2.7/site-packages/scrapy/utils/defer.py", line 102, in iter_errback
yield next(it)
File "/usr/lib/python2.7/site-packages/scrapy/spidermiddlewares/offsite.py", line 29, in process_spider_output
for x in result:
File "/usr/lib/python2.7/site-packages/scrapy/spidermiddlewares/referer.py", line 22, in <genexpr>
return (_set_referer(r) for r in result or ())
File "/usr/lib/python2.7/site-packages/scrapy/spidermiddlewares/urllength.py", line 37, in <genexpr>
return (r for r in result or () if _filter(r))
File "/usr/lib/python2.7/site-packages/scrapy/spidermiddlewares/depth.py", line 58, in <genexpr>
return (r for r in result or () if _filter(r))
File "/home/habenn/Projects/Aleksandr/startupweekendata/startupweekendata/spiders/techstar.py", line 53, in parse
item['Judge1'] = data.xpath('//*[@id="event-judges"]/following-sibling::div[@class="row"][1]/div[2]/h2/text()')[0].extract(),
File "/usr/lib/python2.7/site-packages/parsel/selector.py", line 61, in __getitem__
o = super(SelectorList, self).__getitem__(pos)
IndexError: list index out of range
问题:它抛出了一个错误,我如何修复它?代码太长了,你能帮我简化代码吗? 任何帮助都会很有帮助的。在
目前没有回答
相关问题 更多 >
编程相关推荐