背景
很多人说学习爬虫是提升自己的一个非常好的方法,所以有了第一次使用爬虫,水平有限,依葫芦画瓢,主要作为学习的记录。
思路
最终的效果图
下面是效果图,简单实现了点击上一页、下一页翻页的功能:
目录结构
目录结构如下:
爬取信息及入库示例代码
#coding:utf-8#fiction_spider.py import requests import re import MySQLdb def get_title(): html = requests.get('http://www.jinyongwang.com/tian/').content rem = r'<li><a href="(.*?)">(.*?)</a>' return re.findall(rem,html) def get_content(url): html = requests.get('http://www.jinyongwang.com/'+url).content #print html matchs_p = r'<p>(.*?)</p><script.*?' data = re.findall(matchs_p, html) return data[0] if __name__ == '__main__': a = MySQLdb.connect(host='10.1.*.*', port=3306, user='user', passwd='passwd', db='testdb', charset='utf8') for i in get_title(): cur = a.cursor() print i[1] print i[0] sqli = 'INSERT INTO `fiction` (`title`, `content`) VALUES ("%s","%s" )'%(i[1],get_content(i[0])) cur.execute(sqli) cur.close() a.commit() a.close()
生成访问页面示例代码
#coding:utf-8#webapp.py import web import re urls = ('/(.*)','Index') db = web.database(dbn = 'mysql',host='10.1.*.*', port=3306, user='user', passwd='passwd', db='testdb', charset='utf8') render = web.template.render('template') class Index: def GET(self,html): id = re.findall('(.*?).html',html)[0] print id data = db.query("select * from fiction where id=%s"%id) return render.index(data[0],id) if __name__ == '__main__': web.application(urls,globals()).run()
页面访问的index.html内容如下:
$def with(data,s) <meta charset="utf-8"/> <title>$:data.title</title> <h1>$:data.title</h1> <div style="margin:0px auto;text-align:center;"> <a href="$:(int(s)-1).html">上一页</a> <a href="$:(int(s)+1).html">下一页</a> </div> $:data.content <br> <div style="margin:0px auto;text-align:center;"> <a href="$:(int(s)-1).html">上一页</a> <a href="$:(int(s)+1).html">下一页</a> </div>
保存到txt:
if __name__ == '__main__': a = open(u'射雕**传.txt','w') m = 0 for i in get_title(): #print i[1], get_content(i[0]) time.sleep(2) data = i[1] + '\n' + '\n' + get_content(i[0]).replace('</p><p>','\n\n') + '\n\n' #在标题和内容之间插入换行符,将html中的<p>参数变成换行符 a.writelines(data) m += 1 print u'正在爬取第%s段内容' % m # if m >2: # print u'正在爬取第%s段内容' % m # break a.close()