Scrapy:爬行蜘蛛不会解析响应

2024-04-23 22:01:48 发布

您现在位置:Python中文网/ 问答频道 /正文

我以前用过爬行蜘蛛。但是当我修改代码以便与Redis集成,并添加我自己的中间件来设置UserAgent和cookies时,spider不再解析响应,因此spider不会生成新的请求,spider在开始之后很快就关闭了。你知道吗

Here's the running stats

即使我写了这样的代码: def parse\u start\u url(self,response): 返回self.u项(回应) 它只解析来自第一个url的响应

这是我的密码: 卡盘:

# -*- coding: utf-8 -*-
from scrapy.linkextractors import LinkExtractor
from yydzh.items import YydzhItem
from scrapy.spiders import Rule, CrawlSpider


class YydzhSpider(CrawlSpider):
    name = 'yydzhSpider'
    allowed_domains = ['yydzh.com']
    start_urls = ['http://www.yydzh.com/thread.php?fid=198']
    rules = (
         Rule(LinkExtractor(allow='thread\.php\?fid=198&page=([1-9]|1[0-9])#s', 
         restrict_xpaths=("//div[@class='pages']")), 
         callback='parse_item', follow=True,
         ),
    )

#def parse_start_url(self, response):
#   return self.parse_item(response)

def parse_item(self, response):
    item = YydzhItem()
    for each in response.xpath \
        ("//*[@id='ajaxtable']//tr[@class='tr2'][last()]/following-sibling::tr[@class!='tr2']"):
        item['title'] = each.xpath("./td[2]/h3[1]/a//text()").extract()[0]
        item['author'] = each.xpath('./td[3]/a//text()').extract()[0]
        item['category'] = each.xpath('./td[2]/span[1]//text()').extract()[0]
        item['url'] = each.xpath("./td[2]/h3[1]//a/@href").extract()[0]
        yield item

设置我认为至关重要:

SCHEDULER = "scrapy_redis.scheduler.Scheduler"
DUPEFILTER_CLASS = "scrapy_redis.dupefilter.RFPDupeFilter"
DOWNLOADER_MIDDLEWARES = {
'yydzh.middlewares.UserAgentmiddleware': 500,
'yydzh.middlewares.CookieMiddleware': 600
}
COOKIES_ENABLED = True

中间件: UserAgentmiddleware随机更改用户代理以避免被服务器注意到

CookieMiddleware将cookies添加到请求登录扫描的页面的请求中

logger = logging.getLogger(__name__)


class UserAgentmiddleware(UserAgentMiddleware):

def process_request(self, request, spider):
    agent = random.choice(agents)
    request.headers["User-Agent"] = agent

class CookieMiddleware(RetryMiddleware):

def __init__(self, settings, crawler):
    RetryMiddleware.__init__(self, settings)
    self.rconn = redis.Redis(host=REDIS_HOST, port=REDIS_PORT,
                             password=REDIS_PASS, db=1, decode_responses=True)  
    init_cookie(self.rconn, crawler.spider.name)


@classmethod
def from_crawler(cls, crawler):
    return cls(crawler.settings, crawler)

def process_request(self, request, spider):
    redisKeys = self.rconn.keys()
    while len(redisKeys) > 0:
        elem = random.choice(redisKeys)
        if spider.name + ':Cookies' in elem:
            cookie = json.loads(self.rconn.get(elem))
            request.cookies = cookie
            request.meta["accountText"] = elem.split("Cookies:")[-1]
            break
        else:
            redisKeys.remove(elem)

def process_response(self, request, response, spider):
    if('您没有登录或者您没有权限访问此页面' in str(response.body)):
        accountText = request.meta["accountText"]
        remove_cookie(self.rconn, spider.name, accountText)
        update_cookie(self.rconn, spider.name, accountText)
        logger.warning("更新Cookie成功!(账号为:%s)" % accountText)
        return request

    return response

Tags: nameselfparseresponserequestdefitemxpath