Scrapy爬虫未包含所有请求页面
我有一个用Scrapy写的脚本,主要是用来抓取Yelp上的评论。简单来说,我可以给它提供一系列Yelp页面,它应该能返回所有页面的评论。目前的脚本如下:
from scrapy.spider import Spider
from scrapy.selector import Selector
from scrapy.http import Request
import re
from yelp2.items import YelpReviewItem
RESTAURANTS = ['sixteen-chicago']
def createRestaurantPageLinks(self, response):
reviewsPerPage = 40
sel = Selector(response)
totalReviews = int(sel.xpath('//div[@class="rating-info clearfix"]//span[@itemprop="reviewCount"]/text()').extract()[0].strip().split(' ')[0])
pages = [Request(url=response.url + '?start=' + str(reviewsPerPage*(n+1)), callback=self.parse) for n in range(totalReviews/reviewsPerPage)]
return pages
class Yelp2aSpider(Spider):
name = "yelp2a"
allowed_domains = ["yelp.com"]
start_urls = ['http://www.yelp.com/biz/%s' % s for s in RESTAURANTS]
def parse(self, response):
requests = []
sel = Selector(response)
reviews = sel.xpath('//div[@class="review review-with-no-actions"]')
items = []
for review in reviews:
item = YelpReviewItem()
item['venueName'] = sel.xpath('//meta[@property="og:title"]/@content').extract()
item['reviewer'] = review.xpath('.//li[@class="user-name"]/a/text()').extract()
item['reviewerLoc'] = review.xpath('.//li[@class="user-location"]/b/text()').extract()
item['rating'] = review.xpath('.//meta[@itemprop="ratingValue"]/@content').extract()
item['reviewDate'] = review.xpath('.//meta[@itemprop="datePublished"]/@content').extract()
item['reviewText'] = review.xpath('.//p[@itemprop="description"]/text()').extract()
item['url'] = response.url
items.append(item)
return items
if response.url.find('?start=') == -1:
requests += createRestaurantPageLinks(self, response)
return requests
不过,我遇到的问题是,这个脚本抓取每个请求的评论页面时,除了第一页之外,其他页面的评论都能抓到。如果我把最后的“if”语句注释掉,它就只抓取第一页的内容。我觉得我可能只需要一个简单的“else”命令,但我卡住了……非常感谢任何帮助!
编辑:这是根据收到的帮助,目前的代码状态……
from scrapy.spider import Spider
from scrapy.selector import Selector
from scrapy.http import Request
import re
from yelp2.items import YelpReviewItem
RESTAURANTS = ['sixteen-chicago']
def createRestaurantPageLinks(self, response):
reviewsPerPage = 40
sel = Selector(response)
totalReviews = int(sel.xpath('//div[@class="rating-info clearfix"]//span[@itemprop="reviewCount"]/text()').extract()[0].strip().split(' ')[0])
pages = [Request(url=response.url + '?start=' + str(reviewsPerPage*(n+1)), callback=self.parse) for n in range(totalReviews/reviewsPerPage)]
return pages
class Yelp2aSpider(Spider):
name = "yelp2a"
allowed_domains = ["yelp.com"]
start_urls = ['http://www.yelp.com/biz/%s' % s for s in RESTAURANTS]
def parse(self, response):
requests = []
sel = Selector(response)
reviews = sel.xpath('//div[@class="review review-with-no-actions"]')
items = []
for review in reviews:
item = YelpReviewItem()
item['venueName'] = sel.xpath('//meta[@property="og:title"]/@content').extract()
item['reviewer'] = review.xpath('.//li[@class="user-name"]/a/text()').extract()
item['reviewerLoc'] = review.xpath('.//li[@class="user-location"]/b/text()').extract()
item['rating'] = review.xpath('.//meta[@itemprop="ratingValue"]/@content').extract()
item['reviewDate'] = review.xpath('.//meta[@itemprop="datePublished"]/@content').extract()
item['reviewText'] = review.xpath('.//p[@itemprop="description"]/text()').extract()
item['url'] = response.url
yield item
if response.url.find('?start=') == -1:
requests += createRestaurantPageLinks(self, response)
for request in requests:
yield request
正如下面评论中提到的,按现在的代码运行可以抓取到所有想要的页面,但每个页面只返回一条评论,而不是所有评论。
我尝试把yield item
改成yield items
,但每次抓取URL时都会返回一个错误信息:ERROR: Spider must return Request, BaseItem or None, got 'list' in <GET http://www.yelp.com/biz/[...]>
。
3 个回答
0
如果你在多个地方返回项目或请求,应该把你的 return
语句换成 yield
语句。这样做会把你的函数变成一个生成器,每次生成一个新元素(也就是“产出”一个),而不会在返回所有元素之前就退出函数。否则,按照你现在的代码,函数在第一次 return
后就会结束,无法继续发送后续页面的请求。
补充说明:你应该一次产出一个项目或请求,所以:
把
for review in reviews:
item = ...
return items
替换为
for review in reviews:
item = ...
yield item
并把
return requests
替换为
for request in requests:
yield request
1
你需要稍微调整一下方法的顺序。首先在 parse()
方法中解析餐厅页面。然后,返回评论请求,并在另一个方法中处理这些响应,比如 parse_review()
:
import re
from scrapy.item import Item, Field
from scrapy.spider import Spider
from scrapy.selector import Selector
from scrapy.http import Request
from yelp2.items import YelpReviewItem
RESTAURANTS = ['sixteen-chicago']
class Yelp2aSpider(Spider):
name = "yelp2a"
allowed_domains = ["yelp.com"]
start_urls = ['http://www.yelp.com/biz/%s' % s for s in RESTAURANTS]
def parse(self, response):
reviewsPerPage = 40
sel = Selector(response)
totalReviews = int(sel.xpath('//div[@class="rating-info clearfix"]//span[@itemprop="reviewCount"]/text()').extract()[0].strip().split(' ')[0])
pages = [Request(url=response.url + '?start=' + str(reviewsPerPage*(n+1)), callback=self.parse_review) for n in range(totalReviews/reviewsPerPage)]
return pages
def parse_review(self, response):
sel = Selector(response)
reviews = sel.xpath('//div[@class="review review-with-no-actions"]')
for review in reviews:
item = YelpReviewItem()
item['venueName'] = sel.xpath('//meta[@property="og:title"]/@content').extract()
item['reviewer'] = review.xpath('.//li[@class="user-name"]/a/text()').extract()
item['reviewerLoc'] = review.xpath('.//li[@class="user-location"]/b/text()').extract()
item['rating'] = review.xpath('.//meta[@itemprop="ratingValue"]/@content').extract()
item['reviewDate'] = review.xpath('.//meta[@itemprop="datePublished"]/@content').extract()
item['reviewText'] = review.xpath('.//p[@itemprop="description"]/text()').extract()
item['url'] = response.url
yield item
0
最后的解决办法确实就在于一个 yield
语句的缩进问题。下面的代码就是最终实现我想要的效果。
from scrapy.spider import Spider
from scrapy.selector import Selector
from scrapy.http import Request
import re
from yelp2.items import YelpReviewItem
RESTAURANTS = ['sixteen-chicago']
def createRestaurantPageLinks(self, response):
reviewsPerPage = 40
sel = Selector(response)
totalReviews = int(sel.xpath('//div[@class="rating-info clearfix"]//span[@itemprop="reviewCount"]/text()').extract()[0].strip().split(' ')[0])
pages = [Request(url=response.url + '?start=' + str(reviewsPerPage*(n+1)), callback=self.parse) for n in range(totalReviews/reviewsPerPage)]
return pages
class YelpXSpider(Spider):
name = "yelpx"
allowed_domains = ["yelp.com"]
start_urls = ['http://www.yelp.com/biz/%s' % s for s in RESTAURANTS]
def parse(self, response):
requests = []
sel = Selector(response)
reviews = sel.xpath('//div[@class="review review-with-no-actions"]')
items = []
for review in reviews:
item = YelpReviewItem()
item['venueName'] = sel.xpath('//meta[@property="og:title"]/@content').extract()
item['reviewer'] = review.xpath('.//li[@class="user-name"]/a/text()').extract()
item['reviewerLoc'] = review.xpath('.//li[@class="user-location"]/b/text()').extract()
item['rating'] = review.xpath('.//meta[@itemprop="ratingValue"]/@content').extract()
item['reviewDate'] = review.xpath('.//meta[@itemprop="datePublished"]/@content').extract()
item['reviewText'] = review.xpath('.//p[@itemprop="description"]/text()').extract()
item['url'] = response.url
yield item
if response.url.find('?start=') == -1:
requests += createRestaurantPageLinks(self, response)
for request in requests:
yield request
感谢大家帮助我这个新手!