1. start_urls -- 起始URL 的内部实现(将迭代器转换为生成器)
class QSpider(scrapy.Spider):
name = 'q'
allowed_domains = ['chouti.com']
start_urls = ['http://chouti.com/'] def start_requests(self)
# 方式一:
for url in self.start_urls:
yield Request(url=url) # 方式二:
req_list = []
for url in self.start_urls:
req_list.append(Request(url=url))
return req_list
2. 深度和优先级
深度:
- 最开始是0
- 每次 yield 时,会根据原来请求中的 depth + 1
- 配置:DEPTH_LIMIT 深度控制
优先级:
- 请求被下载的优先级 -= 深度 * 配置 DEPTH_PRIORITY
- settings配置:DEPTH_PRIORITY
3. 下载中间件
scrapy 中设置代理( 两个单独的程序之间的 环境变量是不共享的,因为是两个进程 )
- 内置:
在爬虫启动时,提前在 os.envrion 中设置代理
# 方式一( 设置环境变亮 ):
import os
os.environ["HTTPS_PROXY"] = "https://root:[email protected]:80"
os.environ["HTTP_PROXY"] = "1.1.1.2"
# 方式二( 设置参数 ):
yield Request(url=url, callback=self.parse, meta={'proxy':'https://root:[email protected]:80'}) - 自定义:
1. settings:
DOWNLOADER_MIDDLEWARES = {
#'xdb.middlewares.XdbDownloaderMiddleware': 543,
'xdb.proxy.XdbProxyMiddleware':751,
} 2. proxy.py
class DdbProxyMiddleware(object):
def process_request(self, request, spider):
PROXIES = [
{'ip_port': '111.11.228.75:80', 'user_pass': ''},
{'ip_port': '120.198.243.22:80', 'user_pass': ''},
{'ip_port': '111.8.60.9:8123', 'user_pass': ''},
{'ip_port': '101.71.27.120:80', 'user_pass': ''},
{'ip_port': '122.96.59.104:80', 'user_pass': ''},
{'ip_port': '122.224.249.122:8088', 'user_pass': ''},
]
proxy = random.choice(PROXIES)
if proxy['user_pass'] is not None:
request.meta['proxy'] = to_bytes("http://%s" % proxy['ip_port'])
encoded_user_pass = base64.b64encode(to_bytes(proxy['user_pass']))
request.headers['Proxy-Authorization'] = to_bytes('Basic ' + encoded_user_pass)
else:
request.meta['proxy'] = to_bytes("http://%s" % proxy['ip_port'])