工具:python3
目标:将编写的代码封装,不同函数完成不同功能,爬取任意页数的html
新学语法:with open as
除了有更优雅的语法,with还可以很好的处理上下文环境产生的异常。
# coding:utf- import urllib.request def loadPage(fullurl,filename):
"""作用:根据url发送请求,获取服务器响应请求"""
ua_headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.181 Safari/537.36"}
print("正在下载" + filename) request = urllib.request.Request(fullurl, headers=ua_headers )
response = urllib.request.urlopen(request)
return response.read() def writePage(html, filename):
"""
作用:将html内容写入到本地
html:服务器相应文件内容
"""
print("正在写入" + filename)
# 新建文件,存储html信息
with open(filename, "wb") as f:
f.write(html) def tiebaSpider(url, beginpage, endpage):
"""
作用:贴吧爬虫调度器,负责组合处理每个页面的url
url:贴吧url的前部分
beginPage: 起始页
endPage: 结束页
:return:
"""
# 构造每页的url和文件名
for page in range(beginpage, endpage+):
pn = (page-)*
fullurl = url + "&" + "pn=" + str(pn)
filename = "第" + str(page) + "页.html" html = loadPage(fullurl, filename)
writePage(html, filename)
print("完成!") if __name__ == "__main__":
kw = input("请输入要爬取的贴吧名: ")
beginPage = int(input("请输入起始页: "))
endPage = int(input("请输入结束页: ")) url = "http://tieba.baidu.com/f?"
kw = urllib.parse.urlencode({"kw": kw}) url = url + kw tiebaSpider(url, beginPage, endPage)