asyncio 异步爬虫(转)

转载过来的代码,方便自己查看,勿怪

# 异步方式爬取当当畅销书的图书信息
import aiohttp
import asyncio
import time
import pandas as pd
from bs4 import BeautifulSoup

table = []

async def fetch(session, url):
    async with session.get(url) as response:
        return await response.text(encoding='gb18030')

async def parser(html):
    soup = BeautifulSoup(html,'lxml')
    book_list = soup.find('ul',class_="bang_list clearfix bang_list_mode")('li')
    for book in book_list:
        info = book.find_all('div')
        rank = info[0].text[0:-1]
        name = info[2].text
        comments = info[3].text.split('条')[0]
        author = info[4].text
        date_and_publisher = info[5].text.split()
        publisher = date_and_publisher[1] if len(date_and_publisher) >= 2 else ''

        table.append([rank,name,comments,author,publisher])

async def download(url):
    async with aiohttp.ClientSession() as session:
        html = await fetch(session, url)
        await parser(html)

urls = ['http://bang.dangdang.com/books/bestsellers/01.00.00.00.00.00-recent7-0-0-1-%d'%i for i in range(1,26)]
print('#' * 50)
t1 = time.time()

# 利用asyncio模块进行异步IO处理
loop = asyncio.get_event_loop()
tasks = [asyncio.ensure_future(download(url)) for url in urls]
tasks = asyncio.gather(*tasks)
loop.run_until_complete(tasks)

df = pd.DataFrame(table, columns=['rank','name','comments','author','publisher'])

df.to_excel('E:\\dangdang.xlsx', index=False)
t2 = time.time()
print('使用aiohttp,总共耗时:%s' % (t2 - t1))
print('#' * 50)

版权声明:本文为weixin_44285988原创文章,遵循CC 4.0 BY-SA版权协议,转载请附上原文出处链接和本声明。