这是一个用于获取物理师会议报告的简单爬虫,数据库表结构正在不断完善中

爬虫信息:

 # -*- coding:utf-8 -*-
import urllib.request
import pymysql
from bs4 import BeautifulSoup
import requests
import time
import re
import os # 数据库连接基础类
class Conn_Mssql:
#查询Mysql数据库
def Select_mssql(strsql):
#数据库连接信息
conn = pymysql.connect("DESKTOP-V9MQNL6", "root", "password", "internetdaq", charset="utf8")
cur = conn.cursor()
cur.execute(strsql)
return cur
#插入与更新数据库
def InsertOrUpdate_mssql(strsql):
# 数据库连接信息
conn = pymysql.connect("DESKTOP-V9MQNL6", "root", "password", "internetdaq", charset="utf8")
cur = conn.cursor()
cur.execute(strsql)
conn.commit()
conn.close()
return cur #获取网络信息中的信息,并存储
class Get_HttpMessage:
# 下载文件
def getFile(url):
try:
file_name = url.split('/')[-1]
file_path = "StorePDF\\"+file_name
u = urllib.request.urlopen(url)
except :
print(url, "url file not found")
return
block_sz = 90192
with open(file_path, 'wb') as f:
while True:
buffer = u.read(block_sz)
if buffer:
f.write(buffer)
else:
break
print("Sucessful to download" + " " + file_name)
#开始获取网络信息
def startGet():
print('start')
#链接的APPM网络
url = "https://www.aapm.org/pubs/reports/"
request = urllib.request.Request(url)
response = urllib.request.urlopen(request)
data = response.read()
soup = BeautifulSoup(data,"lxml")
#href属性包含docid字符串
for link in soup.find_all(href=re.compile("docid")):
#地址值
text_url = link['href']
#地址名称
text_Name = link.get_text()
if len(text_url)>0 and len(text_Name)>10 :
strSQl = "insert into daqtest (SAVE_TIME,URL_Name,URL_Link) values (NOW(),'" + text_Name + "','" +url+ text_url + "')"
strSQl =strSQl.encode('utf8')
try:
#存储地址信息
Conn_Mssql.InsertOrUpdate_mssql(strSQl)
except:
print('母页面MySQL存储失败')
            
time.sleep(1)
#含有论文的网页地址
urlSecond = url + text_url
request2 = urllib.request.Request(urlSecond)
response2 = urllib.request.urlopen(request2)
data2 = response2.read()
soup2 = BeautifulSoup(data2, "lxml")
#此变量用于消除重复的PDF信息
pdfName = ""
#查询网页中的PDF信息
for link2 in soup2.find_all(href=re.compile("pdf")):
#PDF信息
text_url2 = link2['href']
#PDF的所在网页来源
text_Name2 = url + text_url
if len(text_url2) > 0 and pdfName != text_url2:
pdfName = text_url2
strSQl2 = "insert into daqtest (SAVE_TIME,URL_Name,URL_Link) values (NOW(),'" + text_Name2 + "','" + text_url2 + "')"
strSQl2 = strSQl2.encode('utf8')
try:
#存储PDF信息至数据库
Conn_Mssql.InsertOrUpdate_mssql(strSQl2)
#慢一点,减缓网站压力
time.sleep(1)
#下载论文中的PDF文件
Get_HttpMessage.getFile(text_url2)
except:
print('子页面MySQL存储失败')
#程序入口
Get_HttpMessage.startGet()

这是用于存储的数据库表结构

 /*
Navicat MySQL Data Transfer Source Server : dde
Source Server Version : 50624
Source Host : DESKTOP-V9MQNL6:3306
Source Database : internetdaq Target Server Type : MYSQL
Target Server Version : 50624
File Encoding : 65001 */ SET FOREIGN_KEY_CHECKS=0; -- ----------------------------
-- Table structure for daqtest
-- ----------------------------
DROP TABLE IF EXISTS `daqtest`;
CREATE TABLE `daqtest` (
`ID` bigint(20) NOT NULL AUTO_INCREMENT,
`SAVE_TIME` datetime DEFAULT NULL ON UPDATE CURRENT_TIMESTAMP,
`URL_Name` varchar(600) COLLATE utf8_unicode_ci DEFAULT NULL,
`URL_Link` varchar(6000) COLLATE utf8_unicode_ci DEFAULT NULL,
PRIMARY KEY (`ID`)
) ENGINE=InnoDB AUTO_INCREMENT=4634 DEFAULT CHARSET=utf8 COLLATE=utf8_unicode_ci;
05-08 08:14