1.访问搜索图集结果,获得json如下(右图为data的一条的详细内容).页面以Ajax呈现,每次请求20个图集,其中
2. 访问其中的图集
访问artical_url,获得图集图片详细信息,其中图片url为下载地址
展现出爬虫关键部分,整体项目地址在https://github.com/GeoffreyHub/toutiao_spider
#!/usr/bin/env python
# encoding: utf-8 """
@version: python37
@author: Geoffrey
@file: spider.py
@time: 18-10-24 上午11:15
"""
import json
import re
from multiprocessing import Pool
import urllib3
urllib3.disable_warnings()
from requests import RequestException from common.request_help import make_session
from db.mysql_handle import MysqlHandler
from img_spider.settings import * class SpiderTouTiao: def __init__(self, keyword):
self.session = make_session(debug=True)
self.url_index = 'https://www.toutiao.com/search_content/'
self.keyword = keyword
self.mysql_handler = MysqlHandler(MYSQL_CONFIG) def search_index(self, offset):
url = self.url_index
data = {
'offset': f'{offset}',
'format': 'json',
'keyword': self.keyword,
'autoload': 'true',
'count': '',
'cur_tab': '',
'from': 'gallery'
} try:
response = self.session.get(url, params=data)
if response.status_code is 200:
json_data = response.json()
with open(f'../json_data/搜索结果-{offset}.json', 'w', encoding='utf-8') as f:
json.dump(json_data, f, indent=4, ensure_ascii=False)
return self.get_gallery_url(json_data)
except :
pass
print('请求失败') @staticmethod
def get_gallery_url(json_data):
dict_data = json.dumps(json_data)
for info in json_data["data"]:
title = info["title"]
gallery_pic_count = info["gallery_pic_count"]
article_url = info["article_url"]
yield title, gallery_pic_count, article_url def gallery_list(self, search_data):
gallery_urls = {}
for title, gallery_pic_count, article_url in search_data:
print(title, gallery_pic_count, article_url)
response = self.session.get(article_url)
html = response.text
images_pattern = re.compile('gallery: JSON.parse\("(.*?)"\),', re.S)
result = re.search(images_pattern, html) if result:
# result = result.replace('\\', '')
# result = re.sub(r"\\", '', result)
result = eval("'{}'".format(result.group(1)))
result = json.loads(result)
# picu_urls = zip(result["sub_abstracts"], result["sub_titles"], [url["url"] for url in result["sub_images"]])
picu_urls = zip(result["sub_abstracts"], [url["url"] for url in result["sub_images"]])
# print(list(picu_urls))
gallery_urls[title] = picu_urls
else:
print('解析不到图片url') with open(f'../json_data/{title}-搜索结果.json', 'w', encoding='utf-8') as f:
json.dump(result, f, indent=4, ensure_ascii=False) break # print(gallery_urls)
return gallery_urls def get_imgs(self, gallery_urls):
params = []
for title, infos in (gallery_urls.items()):
for index, info in enumerate(infos):
abstract, img_url = info
print(index, abstract)
response = self.session.get(img_url)
img_content = response.content
params.append([title, abstract, img_content]) with open(f'/home/geoffrey/图片/今日头条/{title}-{index}.jpg', 'wb') as f:
f.write(img_content) SQL = 'insert into img_gallery(title, abstract, imgs) values(%s, %s, %s)'
self.mysql_handler.insertOne(SQL, [title, abstract, img_content])
self.mysql_handler.end() print(f'保存图集完成' + '-'*50 )
# SQL = 'insert into img_gallery(title, abstract, imgs) values(%s, %s, %s)'
# self.mysql_handler.insertMany(SQL, params)
# self.mysql_handler.end() def main(offset):
spider = SpiderTouTiao(KEY_WORD)
search_data = spider.search_index(offset)
gallery_urls = spider.gallery_list(search_data)
spider.get_imgs(gallery_urls)
spider.mysql_handler.dispose() if __name__ == '__main__':
groups = [x*20 for x in range(GROUP_START, GROPE_END)] pool = Pool(10)
pool.map(main, groups) # for i in groups:
# main(i)
项目结构如下: