034 scrapy设置随机请求头和代理

一、设置随机请求头

浏览器请求头网站地址:http://www.useragentstring.com/pages/useragentstring.php?typ=Browser

1、添加中间件

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
class UserAgentDownloaderMiddleware(object):
    USER_AGENT = [
        "Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; Acoo Browser 1.98.744; .NET CLR 3.5.30729)",
        "Mozilla/5.0 (compatible; U; ABrowse 0.6; Syllable) AppleWebKit/420+ (KHTML, like Gecko)",
        "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0; Trident/4.0; Acoo Browser; GTB5; Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1) ; InfoPath.1; .NET CLR 3.5.30729; .NET CLR 3.0.30618)",
        "Mozilla/4.0 (compatible; Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; Acoo Browser 1.98.744; .NET CLR 3.5.30729); Windows NT 5.1; Trident/4.0)",
        "Mozilla/5.0 (compatible; MSIE 9.0; AOL 9.0; Windows NT 6.0; Trident/5.0)",
        "Mozilla/5.0 (compatible; MSIE 9.0; AOL 9.1; AOLBuild 4334.5012; Windows NT 6.0; WOW64; Trident/5.0)",
        "Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.6) Gecko/20100121 Firefox/3.5.6 Wyzo/3.5.6.1",
        "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; QQDownload 551; QQDownload 661; TencentTraveler 4.0; (R1 1.5))",
        "Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; InfoPath.1; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET CLR 1.1.4322; Sleipnir/2.9.2)",
        "Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; InfoPath.1; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET CLR 1.1.4322; Sleipnir/2.9.2)",
        "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/7046A194A",
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/534.24 (KHTML, like Gecko) RockMelt/0.9.58.494 Chrome/11.0.696.71 Safari/534.24",
    ]
    def process_request(self, request, spider):
        import random
        user_agent = random.choice(self.USER_AGENT)
        request.headers['User-Agent'] = user_agent

2、settings中启用中间件

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
#不遵循robot协议
ROBOTSTXT_OBEY = False

#请求延迟2s
DOWNLOAD_DELAY = 2

#请求头
DEFAULT_REQUEST_HEADERS = {
  'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
  'Accept-Language': 'en',
}

#启用中间件
DOWNLOADER_MIDDLEWARES = {
   'xxxx.middlewares.UserAgentDownloaderMiddleware': 543,
}

3、爬虫文件

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
# -*- coding: utf-8 -*-
import scrapy
import json

class UseragentSpider(scrapy.Spider):
    name = 'useragent'
    allowed_domains = ['httpbin.org']
    start_urls = ['http://httpbin.org/user-agent']

    def parse(self, response):
        print('*'*30)
        print(json.loads(response.text)['user-agent'])
        print('*'*30)
        yield scrapy.Request(url=self.start_urls[0],dont_filter=True)  #dont_filter不去重

4、启动

1
2
3
4
#-*- coding:utf-8 -*-
from scrapy import cmdline

cmdline.execute("scrapy crawl useragent".split())

二、设置代理IP

1、添加中间件

1
2
3
4
5
6
class IPProxyDownloaderMiddleware(object):
    IP_PROXY=['49.70.85.116:9999','117.69.201.223:9999','60.13.42.94:9999']
    def process_request(self, request, spider):
        import random
        proxy = random.choice(self.IP_PROXY)
        request.meta['proxy'] = proxy

2、settings中启用中间件

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
#不遵循robot协议
ROBOTSTXT_OBEY = False

#请求延迟2s
DOWNLOAD_DELAY = 2

#请求头
DEFAULT_REQUEST_HEADERS = {
  'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
  'Accept-Language': 'en',
}

#启用中间件
DOWNLOADER_MIDDLEWARES = {
   'xxxx.middlewares.IPProxyDownloaderMiddleware': 200,
}

3、爬虫文件

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
# -*- coding: utf-8 -*-
import scrapy
import json

class IpproxySpider(scrapy.Spider):
    name = 'ipproxy'
    allowed_domains = ['httpbin.org']
    start_urls = ['http://httpbin.org/ip']

    def parse(self, response):
        print('*'*30)
        print(json.loads(response.text)['origin'])
        print('*'*30)
        yield scrapy.Request(url=self.start_urls[0],dont_filter=True)  #dont_filter不去

随机请求头(me)

1
2
3
4
5
6
7
8
from fake_useragent import UserAgent


class UserAgentDownloaderMiddleware(object):

    def process_request(self, request, spider):
        user_agent = UserAgent().random
        request.headers['User-Agent'] = user_agent

IP代理(me)

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
class ProxyMiddleware(object):

    def proxy1(self):
        # 代理服务器
        proxyHost = "u2999.10.tn.16yun.cn"
        proxyPort = "6442"
        #
        # # 代理隧道验证信息
        proxyUser = "16VNOVSV"
        proxyPass = "050601"

        proxyMeta = "http://%(user)s:%(pass)s@%(host)s:%(port)s" % {
            "host": proxyHost,
            "port": proxyPort,
            "user": proxyUser,
            "pass": proxyPass,
        }
        proxy = {
            "http": proxyMeta,
            "https": proxyMeta,
        }
        return proxy

    def proxy2(self):
        # 代理服务器
        proxyHost = "http-dynamic.xiaoxiangdaili.com"
        proxyPort = "10030"
        #
        # # 代理隧道验证信息
        proxyUser = "575876117614186496"
        proxyPass = "W7ZcVA7T"

        proxyMeta = "http://%(user)s:%(pass)s@%(host)s:%(port)s" % {
            "host": proxyHost,
            "port": proxyPort,
            "user": proxyUser,
            "pass": proxyPass,
        }
        proxy = {
            "http": proxyMeta,
            "https": proxyMeta,
        }
        return proxy

    def proxy3(self):
        # 代理服务器
        proxyHost = "tps123.kdlapi.com"
        proxyPort = "15818"
        #
        # # 代理隧道验证信息
        proxyUser = "t18890465330712"
        proxyPass = "t94bsds3"

        proxyMeta = "http://%(user)s:%(pass)s@%(host)s:%(port)s" % {
            "host": proxyHost,
            "port": proxyPort,
            "user": proxyUser,
            "pass": proxyPass,
        }
        proxy = {
            "http": proxyMeta,
            "https": proxyMeta,
        }
        return proxy

    def process_request(self, request, spider):
        proxy = {}
        while True:
            id = random.randint(1, 3)
            if id == 1:
                proxy = self.proxy1()
            elif id == 2:
                proxy = self.proxy2()
            elif id == 3:
                proxy = self.proxy3()
            if proxy:
                break
            else:
                pass
        if request.url.startswith("http://"):
            request.meta['proxy'] = proxy["http"]
        elif request.url.startswith("https://"):
            request.meta['proxy'] = proxy["https"]
        # print(request.meta)