解析库之bs4的基本使用方法

'''
pip install beautifulsoup4#安装bs4
pip install lxml#安装lxml '''
html_doc = """
<html><head><title>The Dormouse's story</title></head>
<body>
<p class="sister"><b>$37</b></p> <p class="story" id="p">Once upon a time there were three little sisters; and their names were
<a href="http://example.com/elsie" class="sister" >Elsie</a>,
<a href="http://example.com/lacie" class="sister" id="link2">Lacie</a> and
<a href="http://example.com/tillie" class="sister" id="link3">Tillie</a>;
and they lived at the bottom of a well.</p> <p class="story">...</p>
""" from bs4 import BeautifulSoup#从bs4中导入BeautifulSoup #调用BeautifulSoup实例化一个soup对象
#参数一:解析文本
#参数二:解析器(html.parser,lxml,....)
soup=BeautifulSoup(html_doc,'lxml')
print(soup)
print('*'*100)
print(type(soup)) #对其美化
html=soup.prettify()
print(html)

bs4之遍历文档树

html_doc = """
<html><head><title>The Dormouse's story</title></head><body><p class="sister"><b>$37</b></p><p class="story" id="p">Once upon a time there were three little sisters; and their names were<a href="http://example.com/elsie" class="sister" >Elsie</a>,<a href="http://example.com/lacie" class="sister" id="link2">Lacie</a> and<a href="http://example.com/tillie" class="sister" id="link3">Tillie</a>;and they lived at the bottom of a well.</p><p class="story">...</p>
"""
from bs4 import BeautifulSoup#从bs4中导入BeautifulSoup
soup=BeautifulSoup(html_doc,'lxml') '''
遍历文档树:
1、直接使用
2、获取标签的名称
3、获取标签的属性
4、获取标签的内容
5、嵌套选择
6、子节点,子孙节点
7、父节点、祖先节点
8、兄弟节点
'''
#1、直接使用
print(soup.p)#查找第一个p标签
print(soup.a)#查找第一个a标签 # 2、获取标签的名称
print(soup.head.name)#获取head标签的名称 #3、获取标签的属性
print(soup.a.attrs)#获取a标签中所有属性
print(soup.a.attrs['href'])#获取a标签中href属性 #4、获取标签的内容
print(soup.p.text) #$37 #5、嵌套选择
print(soup.html.head) #<head><title>The Dormouse's story</title></head> #6、子节点,子孙节点
print(soup.body.children)#body所以子节点返回迭代器对象 <list_iterator object at 0x02C72650>
print(list(soup.body.children))#强转换为list对象 print(soup.body.descendants)#子孙节点 生成器具对象
print(list(soup.body.descendants))#子孙节点 #7、父节点、祖先节点
print(soup.p.parent)#找p标签的父节点 为body
print(soup.p.parents)#获取p标签的祖先节点
print(list(soup.p.parents))#获取p标签的祖先节点 #8、兄弟节点
#找到下一个兄弟
print(soup.p.next_sibling)
#找下面所有的兄弟
print(soup.p.next_siblings) #返回生成器
print(list(soup.p.next_siblings)) #找上一个兄弟
print(soup.a.previous_sibling)#找a上一个兄弟节点
print(soup.a.previous_siblings)#返回生成器
print(list(soup.a.previous_siblings))

bs4搜索文档库的应用

html_doc = """
<html><head><title>The Dormouse's story</title></head>
<body>
<p class="sister"><b>$37</b></p> <p class="story" id="p">Once upon a time there were three little sisters; and their names were
<a href="http://example.com/elsie" class="sister" >Elsie</a>,
<a href="http://example.com/lacie" class="sister" id="link2">Lacie</a> and
<a href="http://example.com/tillie" class="sister" id="link3">Tillie</a>;
and they lived at the bottom of a well.</p> <p class="story">...</p>
""" from bs4 import BeautifulSoup
soup=BeautifulSoup(html_doc,'lxml') #字符串过滤器
#name
p_tag=soup.find(name='p')
print(p_tag)#根据文本p查找某个标签
tag_s1 = soup.find_all(name='p')#查找所有标签名为p的节点 #attrs
#查找第一个class为sister的节点
p=soup.find(attrs={'class':'sister'})
print(p)
#查找所有class为sister的节点
p=soup.find_all(attrs={'class':'sister'})
print(p) #text
text=soup.find(text='$37')
print(text) #配合使用
#找到一个id为link2的以及文本为Lacie的a标签
a=soup.find(name='a',attrs={"id":"link2"},text='Lacie')
print(a)

用python爬取豌豆荚数据

'''

下载地址:
https://www.wandoujia.com/wdjweb/api/category/more?catId=6001&subCatId=0&page=3&ctoken=lAd3GvU1DbFpJzYVdADWw9pS https://www.wandoujia.com/wdjweb/api/category/more?catId=6001&subCatId=0&page=4&ctoken=lAd3GvU1DbFpJzYVdADWw9pS
'''
from bs4 import BeautifulSoup
import re
import requests
def get_page(url):
response=requests.get(url)
return response #app名称 #2、开始解析 #解析详情页
def parse_detail(text):
soup=BeautifulSoup(text,'lxml') #app名称
name=soup.find(name='span',attrs={"class":"title"}).text
print(name) #好评率
love=soup.find(name='span',attrs={"class":"love"}).text
print(love) #评论数
commit_num=soup.find(name='a',attrs={"class":"comment-open"}).text
print(commit_num) #小编点评
commit_content = soup.find(name='div', attrs={"class": "con"}).text
print(commit_content) #app下载链接
download_url = soup.find(name='a', attrs={"class": "normal-dl-btn"}).attrs['href']
print(download_url) #解析主页
def parse_index(data):
soup = BeautifulSoup(data, 'lxml')
'''
图标地址
<li data-pn="com.casual.game.jump" class="card" data-suffix=""><div class="icon-wrap"><a href="https://www.wandoujia.com/apps/com.casual.game.jump"> <img src="https://android-artworks.25pp.com/fs08/2018/07/11/9/109_2154e564e7823981ae2cde0102547db1_con_130x130.png" data-original="https://android-artworks.25pp.com/fs08/2018/07/11/9/109_2154e564e7823981ae2cde0102547db1_con_130x130.png" alt="Jump Jump Go" class="icon lazy" width="68" height="68" style=""> </a></div><div class="app-desc"><h2 class="app-title-h2"><a href="https://www.wandoujia.com/apps/com.casual.game.jump" title="Jump Jump Go" class="name">Jump Jump Go</a></h2><div class="meta"> <a rel="nofollow" class="tag gooddev" href="https://www.wandoujia.com/manual_testing?from=www" target="_blank" title="编辑亲测基本可用,无明显恶意行为。"></a> <span class="install-count">6540人安装</span> <span class="dot">・</span> <span title="5.22MB">5.22MB</span></div><div class="comment"> Jump Jump (跳一跳) was the most popular puzzle game i </div></div> <a class="tag-link" href="https://www.wandoujia.com/category/6001?pos=w/cardtag/gamecategory_com.casual.game.jump">休闲益智</a> <a data-app-id="7837882" data-app-vid="300445168" data-app-name="Jump Jump Go" data-app-pname="com.casual.game.jump" data-app-vcode="8" data-app-vname="1.08" data-app-categoryid="6001" data-app-subcategoryid="" data-app-icon="https://android-artworks.25pp.com/fs08/2018/07/11/9/109_2154e564e7823981ae2cde0102547db1_con_130x130.png" data-app-rtype="1" data-app-requireid="1" class="detail-check-btn" href="https://www.wandoujia.com/apps/com.casual.game.jump">查看 </a> </li>
'''
app_list=soup.find_all(name='li',attrs={"class":"card"})
for app in app_list:
#print("tank"*1000)
print(app)
#图标地址
#获取第一个标签中的data-original属性
img=app.find(name='img').attrs['data-original']
print(img) #下载次数
#获取class为install-count的span标签中的文本
down_num=app.find(name='span',attrs={"class":"install-count"}).text
print(down_num) #大小
#根据文本正则获取到文本中包含 数字加MB(\d代表数字)的span标签中的文本
size=soup.find(name='span',text=re.compile("\d+MB")).text
print(size) #详情页地址
#直接获取第一个a标签并提取href值
detail_url=app.find(name='a').attrs['href']
print(detail_url) # 3往app详情页发送请求
response = get_page(detail_url) #4解析app详情页
parse_detail(response.text) def main():
for line in range(1,33):
#1、往app接口发送请求
url=f'https://www.wandoujia.com/wdjweb/api/category/more?catId=6001&subCatId=0&page={line}&ctoken=lAd3GvU1DbFpJzYVdADWw9pS'
response=get_page(url)
#print(response.text)
print("*"*1000)
#反序列化为字典
data=response.json()#json必须是双引号
#获取App接口书局
app_li=data['data']['content']
print(app_li) #解析app标签数据
parse_index(app_li) if __name__ == '__main__':

pymongo的简单使用

from pymongo import MongoClient

#1连接mongoDB客户端
#参数1:mongoDB的ip地址
#参数2:mongoDB的端口号 默认:27017
client=MongoClient('localhost',27017)
print(client) #2、进入zmm_db库,没有则创建
print(client['zmm_db']) #3、创建集合
client['zmm_db']['people'] #4、给zmm_db库插入数据
#1、插入一条
data1={
'name':'zmm',
'age':'',
'sex':'female'
}
client['zmm_db']['people'].insert(data1) #2、插入多条
data1={
'name':'zmm',
'age':'',
'sex':'female'
}
data2={
'name':'lyj',
'age':'',
'sex':'female'
}
data3={
'name':'zcj',
'age':'',
'sex':'female'
}
client['zmm_db']['people'].insert([data1,data2,data3]) #5、查数据
data_s=client['zmm_db']['people'].find()
for data in data_s:
print(data) #差看一条数据
print(client['zmm_db']['people'].find_one()) #官方推荐使用
#插入一条inser_one
client['zmm_db']['people'].insert_one()
#插入多条inser_many
client['zmm_db']['people'].insert_many()

有关命令的作用

1、BeautifulSoup解析库
2、MongoDB存储库
3、requests-html 请求库 BeautifulSoup
1、什么是bs4?
是一个基于re开发的解析库,可以提供一些强大的及解析功能.
提高提取数据的效率与爬虫开发效率 2、安装与使用
pip install beaytifulsoup4 MongoDB 1、下载安装
2、在C盘创建一个data/db文件夹
-数据的存放路径
3、输入mongod启动服务
进入终端cmd(以管理员身份),输入mongod启动MongoDB服务 4、mongo进入MongoDB客户端
打开一个新的终端cmd(以管理员身份),输入mongo进入客户端

数据库操作:
切换库:
SQL:
use admin;有则切换,无则报错
MongoDB use zmm;有则切换,无则创建,并切换到zmm库中 查数据库
SQL:
show databases;
MongoDB:
show dbs;显示的数据库若无数据则不显示 删除库:
SQL:
drop database;
MongoDB:
db.dropDatabase() 集合操作:MySQL中叫做表
SQL:
create table f1,f2.....
MongoDB:
#在当前库中通过点来创建集合
db.student 插入数据:
#插入多条数据
db.student.insert([{"name1":"tank1"},{"name2":"tank2"}])
#插入一条name为zmm的数据
db.student.insert({"name":"zmm"})
#结果为WriteResult({ "nInserted" : 1 }) 查数据:
#查找student集合中所有数据
db.student.find({})
#结果{ "_id" : ObjectId("5d0b381114466bde5bc647c5"), "name" : "zmm" } #查看结果
db.student.find({})
#结果如下
#{ "_id" : ObjectId("5d0b381114466bde5bc647c5"), "name" : "zmm" }
#{ "_id" : ObjectId("5d0b393914466bde5bc647c6"), "name1" : "tank1" }
#{ "_id" : ObjectId("5d0b3a0214466bde5bc647c7"), "name1" : "tank1" }
#{ "_id" : ObjectId("5d0b3a0214466bde5bc647c8"), "name2" : "tank2" } #查一条 查找name为zmm的记录
#结果{ "_id" : ObjectId("5d0b381114466bde5bc647c5"), "name" : "zmm" }

通过本次学习,学习到python的更多知识,特别开心

05-11 21:46