Python:轮流下载多个文件

1 投票
2 回答
1784 浏览
提问于 2025-04-16 02:00

在这个脚本中,有一个循环用来下载文件并保存(使用curl工具)。但是这个循环的执行速度太快了,导致下载和保存的操作没有足够的时间完成。因此,最终得到的文件会出现问题,变得不完整。

def get_images_thread(table):
    class LoopThread ( threading.Thread ):
        def run ( self ):
            global db
            c=db.cursor()
            c.execute(""" SELECT * FROM js_stones ORDER BY stone_id LIMIT 1
                            """)           
            ec = EasyCurl(table)

            while(1):
                stone = c.fetchone()
                if stone == None:
                    break
                img_fname = stone[2]
                print img_fname
                url = "http://www.jstone.it/"+img_fname
                fname = url.strip("/").split("/")[-1].strip()
                ec.perform(url, filename="D:\\Var\\Python\\Jstone\\downloadeble_pictures\\"+fname, 
                                    progress=ec.textprogress)

2 个回答

-1

如果我理解你的问题没错的话,

from time import sleep
sleep(1)

这个方法应该能“解决”你的问题(虽然这个方法有点不太正规!)。相关的文档可以在这里找到。不过,我建议你先确认一下,这确实是你遇到的问题。因为暂停几秒钟导致文件下载出错的可能性几乎是零。提供更多细节会更好。

os.waitpid()

这个方法也可能有帮助。

4

这是来自 PycURL库示例 的一段摘录,

# Make a queue with (url, filename) tuples
queue = Queue.Queue()
for url in urls:
    url = url.strip()
    if not url or url[0] == "#":
        continue
    filename = "doc_%03d.dat" % (len(queue.queue) + 1)
    queue.put((url, filename))


# Check args
assert queue.queue, "no URLs given"
num_urls = len(queue.queue)
num_conn = min(num_conn, num_urls)
assert 1 <= num_conn <= 10000, "invalid number of concurrent connections"
print "PycURL %s (compiled against 0x%x)" % (pycurl.version, pycurl.COMPILE_LIBCURL_VERSION_NUM)
print "----- Getting", num_urls, "URLs using", num_conn, "connections -----"


class WorkerThread(threading.Thread):
    def __init__(self, queue):
        threading.Thread.__init__(self)
        self.queue = queue

    def run(self):
        while 1:
            try:
                url, filename = self.queue.get_nowait()
            except Queue.Empty:
                raise SystemExit
            fp = open(filename, "wb")
            curl = pycurl.Curl()
            curl.setopt(pycurl.URL, url)
            curl.setopt(pycurl.FOLLOWLOCATION, 1)
            curl.setopt(pycurl.MAXREDIRS, 5)
            curl.setopt(pycurl.CONNECTTIMEOUT, 30)
            curl.setopt(pycurl.TIMEOUT, 300)
            curl.setopt(pycurl.NOSIGNAL, 1)
            curl.setopt(pycurl.WRITEDATA, fp)
            try:
                curl.perform()
            except:
                import traceback
                traceback.print_exc(file=sys.stderr)
                sys.stderr.flush()
            curl.close()
            fp.close()
            sys.stdout.write(".")
            sys.stdout.flush()


# Start a bunch of threads
threads = []
for dummy in range(num_conn):
    t = WorkerThread(queue)
    t.start()
    threads.append(t)


# Wait for all threads to finish
for thread in threads:
    thread.join()

撰写回答