Python网络爬虫的文件存储问题

2 投票
1 回答
711 浏览
提问于 2025-04-16 23:53

我正在使用网络爬虫抓取数据,并把结果(来自某个Twitter页面的推文)存储为每个用户的单独html文件。我打算稍后解析这些html文件,并把数据存入数据库进行分析。不过,我遇到了一个奇怪的问题。

当我运行以下程序——这是整个爬虫的一小部分——我能够为每个关注者生成一个单独的html文件:

import re
import urllib2
import twitter

start_follower = "NYTimesKrugman"
depth = 3

searched = set()

api = twitter.Api()

def crawl(follower, in_depth):
    if in_depth > 0:
        searched.add(follower)
        directory = "C:\\Python28\\Followertest1\\" + follower + ".html"
        output = open(directory, 'a')
        output.write(follower)
        output.write('\n\n')
        users = api.GetFriends(follower)
        names = set([str(u.screen_name) for u in users])
        names -= searched
        for name in list(names)[0:5]:
            crawl(name, in_depth-1) 

crawl(start_follower, depth)

for x in searched:
    print x
print "Program is completed."

但是,当我运行完整的爬虫时,却没有为每个关注者生成单独的文件:

import twitter
import urllib
from BeautifulSoup import BeautifulSoup
import re
import time

start_follower = "NYTimeskrugman" 
depth = 2
searched = set()

api = twitter.Api()


def add_to_U(user):
    U.append(user)

def site(follower): #creates a twitter site url in string format based on the follower username
    followersite = "http://mobile.twitter.com/" + follower
    return followersite

def getPage(follower): #obtains access to a webapge
    url = site(follower)
    response = urllib.urlopen(url)
    return response

def getSoup(response): #creates the parsing module
    html = response.read()
    soup = BeautifulSoup(html)
    return soup

def gettweets(soup, output):
    tags = soup.findAll('div', {'class' : "list-tweet"})#to obtain tweet of a follower
    for tag in tags: 
        a = tag.renderContents()
        b = str (a)
        output.write(b)
        output.write('\n\n')

def are_more_tweets(soup):#to check whether there is more than one page on mobile twitter 
    links = soup.findAll('a', {'href': True}, {id: 'more_link'})
    for link in links:
        b = link.renderContents()
        test_b = str(b)
        if test_b.find('more') != -1:
            return True
    return False

def getnewlink(soup): #to get the link to go to the next page of tweets on twitter 
    links = soup.findAll('a', {'href': True}, {id : 'more_link'})
    for link in links:
        b = link.renderContents()
        if str(b) == 'more':
            c = link['href']
            d = 'http://mobile.twitter.com' +c
            return d

def crawl(follower, in_depth): #main method of sorts
    if in_depth > 0:
        searched.add(follower)
        directory = "C:\\Python28\\Followertest2\\" + follower + ".html"
        output = open(directory, 'a')
        output.write(follower)
        output.write('\n\n')
        a = getPage(follower)
        soup = getSoup(a)
        gettweets(soup, output)
        tweets = are_more_tweets(soup)
        while(tweets): 
            b = getnewlink(soup)
            red = urllib.urlopen(b)
            html = red.read()
            soup = BeautifulSoup(html)
            gettweets(soup, output)
            tweets = are_more_tweets(soup)
        users = api.GetFriends(follower)
        names = set([str(u.screen_name) for u in users])
        names -= searched
        for name in list(names)[0:5]:
            print name
            crawl(name, in_depth - 1)

crawl(start_follower, depth)
print("Program done. Look at output file.")

更具体来说,我似乎只为前五个关注者生成了单独的html文件,然后就没有新文件被创建了。希望能得到一些帮助!

1 个回答

1

这里提到的depth值在代码片段和完整代码中是不一样的(在完整代码中,你只会得到一层递归)。另外,你只从关注者列表中获取前五个名字:for name in list(names)[0:5]: 所以你总共得到六个人:一个是最开始的关注者,还有他们的前五个朋友。

撰写回答