浏览器的图片都是通过Ajax异步加载的,通过浏览器F12的network下的XHR可以看到,当往下拉动加载更多图片时,XHR会加载出
许多内容,可以判定我们所需的数据可以通过这个接口拿到。下面是代码;
spiders文件
# -*- coding: utf-8 -*-
import scrapy
from urllib.parse import urlencode
import re
from picture.items import PictureItem
class DongzhiwuSpider(scrapy.Spider):
name = 'dongzhiwu'
allowed_domains = ['www.google.com']
start_urls = ['https://www.google.com.hk/']
phrase_list = ['苹果','香蕉','深圳']#爬取关键字列表
def start_requests(self):
for name in range(len(self.phrase_list)):
key = urlencode({'q': self.phrase_list[name]})
for page in range(1, 21):#爬取20页
url = "https://www.google.com.hk/search?ei=0n6sW-DlJITr-QbQl7Mw&hl=zh-CN&safe=strict&yv=3&tbm=isch&" + key +"&vet=10ahUKEwjglqSRzdrdAhWEdd4KHdDLDAYQuT0IOCgB.0n6sW-DlJITr-QbQl7Mw.i&ved=0ahUKEwjglqSRzdrdAhWEdd4KHdDLDAYQuT0IOCgB&ijn="+str(page)+"&start="+str(page*100)+"&asearch=ichunk&async=_id:rg_s,_pms:s,_fmt:pc"
yield scrapy.Request(url, callback=self.parse, meta=({'q': self.phrase_list[name]}), dont_filter=True)
def parse(self, response):
item = PictureItem()
item['name'] = response.meta['q']#关键字名
item['pic_urls'] = re.findall('imgurl=(http.*?)&', response.text)#每一页的图片链接
yield item
由此我们得到了每一页的图片链接,接下来去管道文件中写道本地即可。
pipelines文件:
from hashlib import md5
from urllib.request import urlretrieve
import os
class PicturePipeline(object):
def process_item(self, item, spider):
if not os.path.exists('google图片'):
os.mkdir('google图片')
kind_path = '{0}/{1}'.format('google图片', item['name'])
if not os.path.exists(kind_path):
os.mkdir(kind_path)
for url in item['pic_urls']:
img_path = '{0}/{1}.{2}'.format(kind_path, md5(url.encode("utf-8")).hexdigest(), 'jpg')
try:
if not os.path.exists(img_path):
urlretrieve(url, filename=img_path)
except :
continue
print(item['name']+"写入完毕")
return item
以上。