Scrapy 分布式爬虫Redis+Mysql写入

参考原文:https://www.jianshu.com/p/0be9eedd50f6

抓取数一共:5325 items (at 828 items/min) 和mysql写入5325一致

redis输入抓取的url链接开始进行爬取

*
*
lpush book:start_urls https://www.amazon.cn/s?k=%E5%9B%BE%E4%B9%A6&i=stripbooks&bbn=658390051&rh=n%3A658390051&dc&qid=1641542379&ref=sr_ex_n_1


import scrapy
from copy import deepcopy
from dangdang.items import DangdangItem
from scrapy_redis.spiders import RedisSpider


class BookSpider(RedisSpider):
    name = 'book'
    allowed_domains = ['dangdang.com']
    # start_urls = ['http://book.dangdang.com/']
    redis_key = 'book:start_urls'

    def parse(self, response):
        for div in response.css('div.level_one'):
            item = DangdangItem()

            item['一级分类'] = div.xpath('dl/dt//text()').getall()
            if item['一级分类']:
                item['一级分类'] = ' '.join([i.strip() for i in item['一级分类'] if i.strip()])
                item['一级分类'] = item['一级分类'].replace('\xa0', ' ')

                for dl in div.css('dl.inner_dl'):
                    item['二级分类'] = dl.xpath('dt//text()').getall()
                    item['二级分类'] = ''.join([i.strip() for i in item['二级分类'] if i.strip()])

                    for a in dl.css('dd > a'):
                        item['三级分类'] = a.xpath('@title').get()
                        yield scrapy.Request(
                            url=a.xpath('@href').get(),
                            callback=self.parse_book,
                            meta={'item': deepcopy(item)}
                        )

    def parse_book(self, response):
        item = response.meta.get('item')
        for li in response.xpath('//ul[@class="list_aa "]/li'):
            item['图片名称'] = li.xpath('p[@class="name"]/a/text()').get()
            if item['图片名称']:
                item['图片名称'] = item['图片名称'].replace('\xa0', ' ')

            item['图片链接'] = li.xpath('a[@class="img"]/img/@src').get()
            if item['图片链接'] == 'images/model/guan/url_none.png':
                item['图片链接'] = li.xpath('a[@class="img"]/img/@data-original').get()

            item['图书价格'] = li.xpath('string(p[@class="price"]/span[1])').get()
            if item['图书价格']:
                item['图书价格'] = item['图书价格'].replace('&yen', '¥')
                print(item)
            yield item
# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html

import scrapy


class DangdangItem(scrapy.Item):
    # define the fields for your item here like:
    # name = scrapy.Field()
    一级分类 = scrapy.Field()
    二级分类 = scrapy.Field()
    三级分类 = scrapy.Field()
    图片名称 = scrapy.Field()
    图片链接 = scrapy.Field()
    图书价格 = scrapy.Field()
    pass
from scrapy import signals



from fake_useragent import UserAgent   ##随机UA头
class RandomUserAgentMiddleware(object):
    def process_request(self, request, spider):
        ua = UserAgent()
        request.headers['User-Agent'] = ua.random
        #输出随机UA
        # print(ua.random) -->添加自定义的UA,给request的headers赋值即可

 

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html


# useful for handling different item types with a single interface
from itemadapter import ItemAdapter


class DangdangPipeline:
    def process_item(self, item, spider):
        return item

import pymysql
class mysqlPipeline(object):
    ##链接数据库
    def open_spider(self, spider):
        self.conn = pymysql.connect(host='******',user='scrapy',passwd='scrapy',db='scrapy')  #连接数据库
    def process_item(self, item, spider):
        #添加数据到sqlname表中
        self.conn.query(
            "insert dangdang(一级分类,二级分类,三级分类,图片名称,图片链接,图书价格)"  #需要插入的字段
            "values('{}','{}','{}','{}','{}','{}')".format(
                item['一级分类'],item['二级分类'],item['三级分类'],item['图片名称'],item['图片链接'],item['图书价格']  #爬取项目中定义的字段
            ))
        self.conn.commit()#执行添加
    def close_spider(self, spider):
        self.conn.close()  #关闭连接
        yield item

 

 

# Scrapy settings for dangdang project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
#     https://docs.scrapy.org/en/latest/topics/settings.html
#     https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
#     https://docs.scrapy.org/en/latest/topics/spider-middleware.html

BOT_NAME = 'dangdang'

SPIDER_MODULES = ['dangdang.spiders']
NEWSPIDER_MODULE = 'dangdang.spiders'


# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'dangdang (+http://www.yourdomain.com)'

# Obey robots.txt rules
ROBOTSTXT_OBEY = True

# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32

# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16

# Disable cookies (enabled by default)
#COOKIES_ENABLED = False

# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False

# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
#   'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
#   'Accept-Language': 'en',
#}

# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
#    'dangdang.middlewares.DangdangSpiderMiddleware': 543,
#}

# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
DOWNLOADER_MIDDLEWARES = {
   'dangdang.middlewares.RandomUserAgentMiddleware': 300,
   'dangdang.middlewares.DangdangDownloaderMiddleware': 543,
}

# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
#    'scrapy.extensions.telnet.TelnetConsole': None,
#}

# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
   'dangdang.pipelines.DangdangPipeline': 300,
   'dangdang.pipelines.mysqlPipeline': 401,
    'scrapy_redis.pipelines.RedisPipeline': 400,

}

# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False

# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'


####指定那个去重f方法给request
DUPEFILTER_CLASS = "scrapy_redis.dupefilter.RFPDupeFilter"
###指定scheduler队列
SCHEDULER = "scrapy_redis.scheduler.Scheduler"
###队列中内容是否持久保存,为FAlse的时候在关闭redis时候清空redis
SCHEDULER_PERSIST = True
#SCHEDULER_QUEUE_CLASS = "scrapy_redis.queue.SpiderPriorityQueue"
#SCHEDULER_QUEUE_CLASS = "scrapy_redis.queue.SpiderQueue"
#SCHEDULER_QUEUE_CLASS = "scrapy_redis.queue.SpiderStack"
LOG_LEVEL = 'DEBUG'

REDIS_HOST = '*****'
REDIS_PORT = 6379
REDIS_PARAMS ={
    'password': 'redissooele',
    'db': 0
}