Python - Scrapy中的循环
我遇到一个小问题:我需要让scrapy循环执行一定的次数。这样做的原因是我在提交一个POST请求并抓取结果。不过,结果并不在一个单独的页面上,因此需要再次提交POST请求,并把“cpipage”这个参数加1。cpipage就是页面的编号。下面是我的爬虫代码,我把网址改成了nourl.com,因为这不是我抓取的网站。
from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.spider import Spider
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from scrapy.selector import Selector
from scrapy.http import FormRequest, Request
#from etmd.items import Etmditems
import scrapy
class EtmdSpider(Spider):
name = "etmd"
start_urls = ["http://b2.nourl.com/dp.asp"]
def parse(self, response):
url = "http://b2.nourl.com/dp.asp"
payload = {"AppKey": "94921000e1999f84a518725", "ComparisonType1_1": "LIKE", "Value1_1": "", "MatchNull1_1" : "N", "ComparisonType2_1" : "LIKE", "MatchNull2_1" : "N", "Value2_1" : "", "ComparisonType3_1": "=", "MatchNull3_1" : "N", "Value3_1" : "", "x":"69", "y":"27", "FieldName1" : "County", "Operator1": "OR", "NumCriteriaDetails1": "1", "Operator2" : "OR", "NumCriteriaDetails2" : "1", "FieldName3": "Year", "Operator3" : "OR", "NumCriteriaDetails3": "1", "PageID" : "2", "GlobalOperator": "AND", "NumCriteria" : "3", "Search" : "1", "cpipage": "4"}
return (FormRequest(url, formdata = payload, callback = self.parse_data))
def parse_data(self, response):
items = []
sel = Selector(response)
items.append(sel.xpath('//td').extract())
exportfile = open( "exported.txt", "a")
exportfile.write (str(items))
print items
在这个payload字典里,我有一个cpipage
,在这个例子中是“4”,但我需要它一直增加到175。有没有办法在我现在的代码里做到这一点,或者通过脚本运行scrapy爬虫,而不是在命令行里运行?
我已经尝试过一个for循环:
for i in range(175):
url = "http://b2.nourl.com/dp.asp"
payload = {"AppKey": "94921000e1999f84a518725", "ComparisonType1_1": "LIKE", "Value1_1": "", "MatchNull1_1" : "N", "ComparisonType2_1" : "LIKE", "MatchNull2_1" : "N", "Value2_1" : "", "ComparisonType3_1": "=", "MatchNull3_1" : "N", "Value3_1" : "", "x":"69", "y":"27", "FieldName1" : "County", "Operator1": "OR", "NumCriteriaDetails1": "1", "Operator2" : "OR", "NumCriteriaDetails2" : "1", "FieldName3": "Year", "Operator3" : "OR", "NumCriteriaDetails3": "1", "PageID" : "2", "GlobalOperator": "AND", "NumCriteria" : "3", "Search" : "1", "cpipage": "%i" %i}
return (FormRequest(url, formdata = payload, callback = self.parse_data))
1 个回答
1
return
语句会立即结束这个方法的执行。
你可以选择返回所有请求的列表:
def parse(self, response):
requests = []
for i in range(175):
url = "http://b2.nourl.com/dp.asp"
payload = {"AppKey": "94921000e1999f84a518725", "ComparisonType1_1": "LIKE", "Value1_1": "", "MatchNull1_1" : "N", "ComparisonType2_1" : "LIKE", "MatchNull2_1" : "N", "Value2_1" : "", "ComparisonType3_1": "=", "MatchNull3_1" : "N", "Value3_1" : "", "x":"69", "y":"27", "FieldName1" : "County", "Operator1": "OR", "NumCriteriaDetails1": "1", "Operator2" : "OR", "NumCriteriaDetails2" : "1", "FieldName3": "Year", "Operator3" : "OR", "NumCriteriaDetails3": "1", "PageID" : "2", "GlobalOperator": "AND", "NumCriteria" : "3", "Search" : "1", "cpipage": "%i" %i}
requests.append(FormRequest(url, formdata = payload, callback = self.parse_data))
return requests
或者一个一个地 yield
它们:
def parse(self, response):
for i in range(175):
url = "http://b2.nourl.com/dp.asp"
payload = {"AppKey": "94921000e1999f84a518725", "ComparisonType1_1": "LIKE", "Value1_1": "", "MatchNull1_1" : "N", "ComparisonType2_1" : "LIKE", "MatchNull2_1" : "N", "Value2_1" : "", "ComparisonType3_1": "=", "MatchNull3_1" : "N", "Value3_1" : "", "x":"69", "y":"27", "FieldName1" : "County", "Operator1": "OR", "NumCriteriaDetails1": "1", "Operator2" : "OR", "NumCriteriaDetails2" : "1", "FieldName3": "Year", "Operator3" : "OR", "NumCriteriaDetails3": "1", "PageID" : "2", "GlobalOperator": "AND", "NumCriteria" : "3", "Search" : "1", "cpipage": "%i" %i}
yield FormRequest(url, formdata = payload, callback = self.parse_data)