利用requests库获取代理,用Beautiful库解析网页筛选ip

# -*- coding: utf- -*-
import requests
from bs4 import BeautifulSoup
from threading import Thread headers = {'user-agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:55.0) Gecko/20100101 Firefox/55.0'} #定义获取IP函数
def get_ip():
#写入txt
write_ip = open('get_ip.txt', 'w')
for page in range(, ):
url = 'http://www.xicidaili.com/nn/%s' % page
r = requests.get(url, headers=headers,timeout=) # 用beautifulsoup库解析网页
soup = BeautifulSoup(r.content, 'lxml')
trs = soup.find('table', id='ip_list').find_all('tr') for tr in trs[:]:
tds = tr.find_all('td')
ip = tds[].text.strip()
port = tds[].text.strip()
write_ip.write('%s\n'%(ip+':'+port))
write_ip.close()
print('done') get_ip()
05-11 16:55