import aiohttp
import asyncio
import async_timeout
from urllib.parse import urljoin,urldefrag root_url = 'http://python/org/' # 开始的url
crawled_urls,url_hub = [], [root_url]
headers = {'user-agent': 'Opera/9.80 (X11; Linux x86_64; U; en) Presto/2.2.15 Version/10.10'} async def get_body(url):
async with aiohttp.ClientSession() as session:
try:
with async_timeout.timeout(10): #超时时间的设定
async with session.get(url,headers=headers) as response:
if response.status == 200:
html = await response.text()
return {'error':'','html':html,'url':url}
else:
return {'error':response.status,'html':'','url':url}
except Exception as err:
return {'error':response.status,'html':'','url':url} async def handle_task(task_id,work_queue):
while not work_queue.empty():
queue_url = await work_queue.get()
if not queue_url in crawled_urls: body = await get_body(queue_url)
if not body['error']:
crawled_urls.append(queue_url)
parse(body)
else:
print('{}爬取失败'.format(queue_url)) #解析返回的数据
def parse(body):
pass def remove_fragment(url):
pure_url, frag = urldefrag(url)
return pure_url #解析html,拼接新的url
def get_urls(html):
new_urls = [url.split('"')[0] for url in str(html).replace("'", '"').split('href="')[1:]]
return [urljoin(root_url, remove_fragment(new_url)) for new_url in new_urls] if __name__ == '__main__':
q = asyncio.Queue() #初始化一个异步的队列
[q.put_nowait(url) for url in url_hub] #从初始的url队列中遍历,把url放入到队列中
loop = asyncio.get_event_loop()
tasks = [handle_task(task_id, q) for task_id in range(3)] #3个并发
loop.run_until_complete(asyncio.wait(tasks))
loop.close()
for u in crawled_urls:
print(u)
print('-' * 30)
print(len(crawled_urls))