import urllib
from bs4 import BeautifulSoup
import requests
import readability
import time
import http.client
seed_url = "https://en.wikipedia.org/wiki/Sustainable_energy"
root_url = "https://en.wikipedia.org"
max_limit=5
#file = open("file_crawled.txt", "w")
def get_urls(seed_url):
r = requests.get(seed_url)
soup = BeautifulSoup(r.content,"html.parser")
links = soup.findAll('a', href=True)
valid_links=[]
for links in links:
if 'wiki' in links['href'] and '.' not in links['href'] and ':' not in links['href'] and '#' not in links['href']:
valid_links.append(root_url + links['href'])
return valid_links
visited=[]
def crawl_dfs(seed_url, max_depth):
depth=1
file1 = open("file_crawled.txt", "w+")
visited.append(root_url)
if depth<=max_depth:
children=get_urls(seed_url)
for child in children:
if child not in visited:
file1.write(child)
time.sleep(1)
visited.append(child)
crawl_dfs(child,max_depth-1)
file1.close()
crawl_dfs(seed_url,max_limit)
dfs爬网使用python 3.6 帮助我的代码,请纠正我错了,我的爬网链接没有写入我的文件名为file1。我不知道为什么我已经尝试了一切在我的最后
只需打开和关闭一次文件—在第一次
crawl_dfs()
之前打开,在第一次crawl_dfs()
之后关闭测试:
相关问题 更多 >
编程相关推荐