🛬前言

上一篇我们已经介绍了python怎样爬取代理并筛选合格的代理,

这一篇使用openpyxl操作excel制作代理IP池。

需要了解:

openpyxl的方法:

# 读取第一列数据
list(ws.columns)[0]
# 读取第一行数据
list(ws.rows)[0]

zip描述:

zip() 函数用于将可迭代的对象作为参数,将对象中对应的元素打包成一个个元组,然后返回由这些元组组成的列表。

for x, y in zip(list(ws.columns)[0], list(ws.columns)[1]):

💌实现思路

1.要将获取的数据存入xlsx中

def proxies_save(proxies_use):
    """代理IP保存xlsx文件"""
    # 实例化
    wb = openpyxl.Workbook()
    # 激活
    ws = wb.active
    # 表名
    ws.title = "代理IP池"
    # 添加表头
    ws.append(['类型', '代理IP'])
    # 遍历字典取值保存到xlsx
    for proxie in proxies_use:
        for index in proxie:
            ws.append([index, proxie[index]])
    wb.save("proxiesPool.xlsx")
    print("保存结束===================")

xlsx部分图:

xlsx

2.读取xlsx文件

def proxies_read():
    """openpyxl读取数据"""
    wb = openpyxl.load_workbook("proxiesPool.xlsx")
    ws = wb["代理IP池"]
    proxies_lists = []
    # print('获取特定列:')
    # print(list(ws.columns)[0])
    # 并行遍历 两列
    for x, y in zip(list(ws.columns)[0], list(ws.columns)[1]):
        proxies_dict = {}
        # 表名行跳出本次循环
        if x.value == "类型":
            continue
        proxies_dict[x.value] = y.value
        proxies_lists.append(proxies_dict)
    return proxies_lists

3.测试IP池

def proxies_test(read_list):
    """ 测试使用代理池"""
    headers = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.122 Safari/537.36"
    }

    for proxies in read_list:
        try:
            response = requests.get("http://www.baidu.com", headers=headers, proxies=proxies, timeout=0.2)
            if response.status_code == 200:
                print("{}请求成功 ".format(proxies))
        except:
            print("{} is bad proxies ".format(proxies))

🍥具体代码实现

这是两篇代码的集合,可以按方法分解出来。

import time
import requests
import parsel
import openpyxl

def proxies_get(page, proxies_list):
    """爬取多页代理IP"""
    url = "https://www.kuaidaili.com/free/inha/{}/".format(page)
    headers = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.122 Safari/537.36"
    }
    response = requests.get(url, headers=headers)
    # 筛选需要的数据tr
    html = parsel.Selector(response.text)
    tr = html.css('tbody tr')
    # 遍历每行获取td
    for td in tr:
        proxies_dict = {}
        ip_data = td.css('td[data-title*="IP"]::text').get()
        port_data = td.css('td[data-title*="PORT"]::text').get()
        type_data = td.css('td[data-title*="类型"]::text').get()
        # 拼接成 ip:port
        url_str = "{}:{}".format(ip_data, port_data)
        proxies_dict[type_data] = url_str
        # 将字典数据添加到proxies_list列表
        proxies_list.append(proxies_dict)


def check_ip(proxies_list):
    """筛选代理IP的方法"""
    headers = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.122 Safari/537.36"
    }
    # 列表接收合格代理IP
    proxies_ok = []
    # 遍历爬下来的全部IP
    i = 0
    for proxie in proxies_list:
        # 捕获请求不成功异常
        try:
            # proxies ——代理IP  timeout=0.2——0.2秒内访问成功算合格
            response = requests.get("http://www.baidu.com", headers=headers, proxies=proxie, timeout=0.2)
            # 状态码200表示请求成功
            if response.status_code == 200:
                proxies_ok.append(proxie)
        except Exception as e:
            # 打印垃圾代理IP和错误信息
            print(proxie, e)
    # 返回合格代理IP
    return proxies_ok


def proxies_save(proxies_use):
    """代理IP保存xlsx文件"""
    # 实例化
    wb = openpyxl.Workbook()
    # 激活
    ws = wb.active
    # 表名
    ws.title = "代理IP池"
    # 添加表头
    ws.append(['类型', '代理IP'])
    # 遍历字典取值保存到xlsx
    for proxie in proxies_use:
        for index in proxie:
            ws.append([index, proxie[index]])
    wb.save("proxiesPool.xlsx")
    print("保存结束===================")


def proxies_read():
    """openpyxl读取数据"""
    wb = openpyxl.load_workbook("proxiesPool.xlsx")
    ws = wb["代理IP池"]
    proxies_lists = []
    # print('获取特定列:')
    # print(list(ws.columns)[0])
    # 并行遍历 两列
    for x, y in zip(list(ws.columns)[0], list(ws.columns)[1]):
        proxies_dict = {}
        # 表名行跳出本次循环
        if x.value == "类型":
            continue
        proxies_dict[x.value] = y.value
        proxies_lists.append(proxies_dict)
    return proxies_lists


def proxies_test(read_list):
    """ 测试使用代理池"""
    headers = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.122 Safari/537.36"
    }

    for proxies in read_list:
        try:
            response = requests.get("http://www.baidu.com", headers=headers, proxies=proxies, timeout=0.2)
            if response.status_code == 200:
                print("{}请求成功 ".format(proxies))
        except:
            print("{} is bad proxies ".format(proxies))


# proxies_list存储未筛选的代理IP
proxies_list = []
# 控制爬取页数
for page in range(10):
    # 休眠时间,防止被认为爬虫
    time.sleep(1)
    proxies_get((page + 1), proxies_list)
    print("==============第{}页数据爬取完毕==============".format(page + 1))

# 打印未筛选的代理IP池  pprint格式化 = print打印出来好看点
print(proxies_list, "共有代理IP:{}个".format(len(proxies_list)))
print("===============筛选代理IP================")
# 打印合格代理IP池
proxies_use = check_ip(proxies_list)
print(proxies_use, "共有合格代理IP:{}个".format(len(proxies_use)))
proxies_save(proxies_use)
# 方法执行完后会生成proxiesPool.xlsx表,上边的方法就可以注释了,下边是读取方法
# 读取xlsx代理池
read_list = proxies_read()
# 执行测试
proxies_test(read_list)
print("测试完成")

🍢总结

其实筛选代理的那一步可以省去,直接存入xlsx中,读取IP池的时候筛选,看大家喜好。


一个可爱的人