获取动态代理IP
1. 介绍
使用爬虫抓取数据时,经常要用到多个ip代理,防止单个ip访问太过频繁被封禁。ip代理可以从这个网站获取:http://www.xicidaili.com/nn/
。因此写一个python程序来获取ip代理,保存到本地。
2. 代码
#!/usr/bin/python
# _*_ coding: utf-8 _*_
#grab ip proxies from xicidaili
import sys, time, re, requests
from multiprocessing.dummy import Pool as ThreadPool
from lxml import etree
IP_POOL = 'ip_pool.py'
URL = 'http://www.xicidaili.com/nn/' #IP代理 高匿
#URL = 'http://www.xicidaili.com/wt/' #IP代理 http
RUN_TIME = time.strftime("%Y-%m-%d %H:%M", time.localtime()) #执行时间
#用字典存放有效ip代理
alive_ip = {'http': [], 'https': []}
#多线程
pool = ThreadPool(20)
#返回html文本
def get_html(url):
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64; rv:55.0) Gecko/20100101 Firefox/55.0",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
"Accept-Language": "zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3",
"Accept-Encoding": "gzip, deflate",
"Referer": "https://www.xicidaili.com/",
"Connection": "keep-alive",
"Upgrade-Insecure-Requests": "1"
}
r = requests.get(url, headers=headers)
r.encoding = 'utf-8'
return r.text
#测试ip代理是否存活
def test_alive(proxy):
global alive_ip
proxies = {'http': proxy}
try:
r = requests.get('https://www.baidu.com', proxies=proxies, timeout=3)
if r.status_code == 200:
if proxy.startswith('https'):
alive_ip['https'].append(proxy)
else:
alive_ip['http'].append(proxy)
except:
print("%s无效!"%proxy)
#解析html文本,获取ip代理
def get_alive_ip_address():
iplist = []
html = get_html(URL)
selector = etree.HTML(html)
table = selector.xpath('//table[@id="ip_list"]')[0]
lines = table.xpath('./tr')[1:]
for line in lines:
speed, connect_time = line.xpath('.//div/@title')
data = line.xpath('./td')
ip = data[1].xpath('./text()')[0]
port = data[2].xpath('./text()')[0]
anonymous = data[4].xpath('./text()')[0]
ip_type = data[5].xpath('./text()')[0]
#过滤掉速度慢和非高匿的ip代理
if float(speed[:-1])>1 or float(connect_time[:-1])>1 or anonymous != '高匿':
continue
iplist.append(ip_type.lower() + '://' + ip + ':' + port)
pool.map(test_alive, iplist)
#把抓取到的有效ip代理写入到本地
def write_txt(output_file):
with open(output_file, 'w') as f:
f.write('#create time: %s\n\n' % RUN_TIME)
f.write('http_ip_pool = \\\n')
f.write(str(alive_ip['http']).replace(',', ',\n'))
f.write('\n\n')
with open(output_file, 'a') as f:
f.write('https_ip_pool = \\\n')
f.write(str(alive_ip['https']).replace(',', ',\n'))
print('write successful: %s' % output_file)
def main():
get_alive_ip_address()
write_txt(output_file)
if __name__ == '__main__':
try:
output_file = sys.argv[1] #第一个参数作为文件名
except:
output_file = IP_POOL
main()#grab ip proxies from xicidaili
运行代码后生成,ip_pool.py文件
#create time: 2020-04-17 15:09
http_ip_pool = \
['http://114.104.135.251:8888',
'http://183.167.217.152:63000',
'http://222.95.241.80:3000',
'http://117.88.177.244:3000',
'http://150.138.106.174:80',
'http://113.208.115.190:8118',
'http://211.159.219.225:8118',
'http://114.239.211.175:61234',
'http://117.88.176.188:3000',
'http://117.88.5.19:3000',
'http://110.73.0.113:8123',
'http://113.194.31.60:9999',
'http://121.237.149.165:3000',
'http://202.115.142.147:9200',
'http://117.88.5.253:3000',
'http://58.253.157.142:9999',
'http://115.223.108.111:8010',
'http://118.114.194.229:8118',
'http://140.250.197.186:9999',
'http://115.223.87.86:8010']
https_ip_pool = \
['https://112.16.217.191:808',
'https://125.126.113.184:60004',
'https://210.5.10.87:53281',
'https://111.231.239.143:1081',
'https://218.75.69.50:39590',
'https://115.223.64.38:8010',
'https://58.254.220.116:52470',
'https://117.45.139.179:9006',
'https://110.189.152.86:52277',
'https://117.88.4.162:3000',
'https://110.243.9.72:9999',
'https://117.88.4.242:3000',
'https://120.39.216.129:808',
'https://223.241.116.201:18118',
'https://171.35.170.144:9999',
'https://115.219.104.254:8010',
'https://117.88.177.76:3000',
'https://222.95.144.204:3000',
'https://182.92.220.212:8080',
'https://117.88.4.253:3000',
'https://114.99.54.65:8118',
'https://117.88.4.160:3000',
'https://117.88.4.23:3000']
之后就可以直接使用了,from ip_pool import http_ip_pool, https_ip_pool