爬虫-scrapy框架,pymongo储存,scrapy-redis分布式的使用

博客围绕抓取豆瓣电影展开,介绍了scrapy的学习使用,包括spider、items、pipelines文件编写,中间件编写及反爬设置,setting文件编写等,还提及用pymongo储存数据,以及scrapy - redis分布式的使用,并给出最后执行分布式命令举例。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

抓取豆瓣电影,scrapy的学习使用,pymongo储存,scrapy-redis分布式的使用

##1.spider文件的编写

import scrapy
from test1.items import Test1Item
from scrapy_redis.spiders import RedisCrawlSpider

class QiushibaikeSpider(RedisCrawlSpider):
    name = 'qiushibaike'
    redis_key = 'myspider:start_urls'
    #start_urls = ['https://movie.douban.com/top250']

    def parse(self, response):
        list=response.xpath('//*[@id="content"]/div/div[1]/ol/li')
        # print(list)
        for i in list:
            doubanlist=Test1Item()
            doubanlist['name']=i.xpath('.//div/div[2]/div[1]/a/span[1]/text()').extract_first()

            doubanlist['star']=i.xpath('.//div/div[2]/div[2]/div/span[2]/text()').extract_first()
            doubanlist['comment']=i.xpath('.//div/div[2]/div[2]/p[2]/span/text()').extract_first()
            yield doubanlist
        next_page=response.xpath('//*[@id="content"]/div/div[1]/div[2]/span[3]/a/@href').extract()
        if next_page:
            next=next_page[0]
            yield scrapy.Request('https://movie.douban.com/top250'+next,callback=self.parse)

##2.items文件的编写

import scrapy


class Test1Item(scrapy.Item):
    # define the fields for your item here like:
    # name = scrapy.Field()
    name=scrapy.Field()

    star = scrapy.Field()
    comment = scrapy.Field()

##3.pipelines 文件的编写

import pymongo
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html


class Test1Pipeline(object):
    def __init__(self):
        client = pymongo.MongoClient('localhost', 27017)
        database_name = client['douban']
        self.tablelist = database_name['table']

    def process_item(self, item, spider):
        date=dict(item)
        self.tablelist.insert(date)
        return item

##4.中间件的编写,反爬ip和ua的代理设置

# -*- coding: utf-8 -*-
import random
# Define here the models for your spider middleware
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html

from scrapy import signals


class Test1SpiderMiddleware(object):
    # Not all methods need to be defined. If a method is not defined,
    # scrapy acts as if the spider middleware does not modify the
    # passed objects.

    @classmethod
    def from_crawler(cls, crawler):
        # This method is used by Scrapy to create your spiders.
        s = cls()
        crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
        return s

    def process_spider_input(self, response, spider):
        # Called for each response that goes through the spider
        # middleware and into the spider.

        # Should return None or raise an exception.
        return None

    def process_spider_output(self, response, result, spider):
        # Called with the results returned from the Spider, after
        # it has processed the response.

        # Must return an iterable of Request, dict or Item objects.
        for i in result:
            yield i

    def process_spider_exception(self, response, exception, spider):
        # Called when a spider or process_spider_input() method
        # (from other spider middleware) raises an exception.

        # Should return either None or an iterable of Response, dict
        # or Item objects.
        pass

    def process_start_requests(self, start_requests, spider):
        # Called with the start requests of the spider, and works
        # similarly to the process_spider_output() method, except
        # that it doesn’t have a response associated.

        # Must return only requests (not items).
        for r in start_requests:
            yield r

    def spider_opened(self, spider):
        spider.logger.info('Spider opened: %s' % spider.name)


class Test1DownloaderMiddleware(object):
    # Not all methods need to be defined. If a method is not defined,
    # scrapy acts as if the downloader middleware does not modify the
    # passed objects.

    @classmethod
    def from_crawler(cls, crawler):
        # This method is used by Scrapy to create your spiders.
        s = cls()
        crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
        return s

    def process_request(self, request, spider):
        # Called for each request that goes through the downloader
        # middleware.

        # Must either:
        # - return None: continue processing this request
        # - or return a Response object
        # - or return a Request object
        # - or raise IgnoreRequest: process_exception() methods of
        #   installed downloader middleware will be called
        return None

    def process_response(self, request, response, spider):
        # Called with the response returned from the downloader.

        # Must either;
        # - return a Response object
        # - return a Request object
        # - or raise IgnoreRequest
        return response

    def process_exception(self, request, exception, spider):
        # Called when a download handler or a process_request()
        # (from other downloader middleware) raises an exception.

        # Must either:
        # - return None: continue processing this exception
        # - return a Response object: stops process_exception() chain
        # - return a Request object: stops process_exception() chain
        pass

    def spider_opened(self, spider):
        spider.logger.info('Spider opened: %s' % spider.name)



user_agent_list = [
        'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.8) Gecko Fedora/1.9.0.8-1.fc10 Kazehakase/0.5.6',
        'Mozilla/5.0 (X11; Linux i686; U;) Gecko/20070322 Kazehakase/0.4.5',
        'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.71 Safari/537.1 LBBROWSER',
        'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
        'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11',
        'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',
        'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; QQDownload 732; .NET4.0C; .NET4.0E)',
        'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11',
        'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; SV1; QQDownload 732; .NET4.0C; .NET4.0E; 360SE)',
        'Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 5.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 3.0.04506.30)',
        'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/535.20 (KHTML, like Gecko) Chrome/19.0.1036.7 Safari/535.20',
        'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.8) Gecko Fedora/1.9.0.8-1.fc10 Kazehakase/0.5.6',
        'Mozilla/5.0 (X11; U; Linux x86_64; zh-CN; rv:1.9.2.10) Gecko/20100922 Ubuntu/10.10 (maverick) Firefox/3.6.10',
        'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.71 Safari/537.1 LBBROWSER',
        'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.89 Safari/537.1',
        'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0; Acoo Browser; SLCC1; .NET CLR 2.0.50727; Media Center PC 5.0; .NET CLR 3.0.04506)',
        'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.0.12) Gecko/20070731 Ubuntu/dapper-security Firefox/1.5.0.12',
        'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; QQDownload 732; .NET4.0C; .NET4.0E; LBBROWSER)',
        'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.89 Safari/537.1',
        'Mozilla/5.0 (iPhone; CPU iPhone OS 10_3 like Mac OS X) AppleWebKit/603.1.30 (KHTML, like Gecko) Version/10.3 Mobile/14E277 Safari/603.1.30',
        'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36'
    ]

PROXIES = ['http://	61.135.217.7:80', 'http://219.141.153.41:80', 'http://106.56.102.18:8070',
           'http://183.167.217.152:63000', 'http://59.32.37.112:61234', 'http://118.190.95.43:9001']
class My_agent(object):

    def process_request(self, request, spider):
        agent = random.choice(user_agent_list)
        request.headers['User_Agent']=agent

class My_ip(object):

    def process_request(self, request, spider):
        ip = random.choice(PROXIES)
        request.meta['Proxy']=ip

##5.setting设置文件的编写

# -*- coding: utf-8 -*-

# Scrapy settings for test1 project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
#     https://doc.scrapy.org/en/latest/topics/settings.html
#     https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
#     https://doc.scrapy.org/en/latest/topics/spider-middleware.html

BOT_NAME = 'test1'

SPIDER_MODULES = ['test1.spiders']
NEWSPIDER_MODULE = 'test1.spiders'


# Crawl responsibly by identifying yourself (and your website) on the user-agent
USER_AGENT = 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'

# Obey robots.txt rules
ROBOTSTXT_OBEY = False

# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
#CONCURRENT_REQUESTS = 32

# Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16

# Disable cookies (enabled by default)
#COOKIES_ENABLED = False

# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False

# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
#   'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
#   'Accept-Language': 'en',
#}

# Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
#    'test1.middlewares.Test1SpiderMiddleware': 543,
#}

# Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
DOWNLOADER_MIDDLEWARES = {
   'test1.middlewares.My_agent': 543,
    'test1.middlewares.My_ip': 544,
}

# Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
#    'scrapy.extensions.telnet.TelnetConsole': None,
#}

# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html

#管道文件scrapy_redis尽量必须写,项目最后经过所以自己做的管道文件后最后存到redis数据库去
ITEM_PIPELINES = {
   'test1.pipelines.Test1Pipeline': 300,
    'scrapy_redis.pipelines.RedisPipeline': 500
}

# Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False

# Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
HTTPCACHE_ENABLED = True
HTTPCACHE_EXPIRATION_SECS = 0
HTTPCACHE_DIR = 'httpcache'
HTTPCACHE_IGNORE_HTTP_CODES = []
HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'

# 调度状态持久化,不清理redis缓存,允许暂停/启动爬虫
SCHEDULER_PERSIST = True
SCHEDULER = "scrapy_redis.scheduler.Scheduler"
DUPEFILTER_CLASS = "scrapy_redis.dupefilter.RFPDupeFilter"

# 请求调度使用优先队列(默认)(建议开启,这样就算用的redis队列,部署用的默认scrapy队列)
#SCHEDULER_QUEUE_CLASS = 'scrapy_redis.queue.SpiderPriorityQueue'

# 指定redis的地址和端口(可选,程序将使用默认的地址localhost:6379)
#REDIS_HOST = 'localhost'
#REDIS_PORT = 6378


*最后执行分布式命令举例
在这里插入图片描述

Scrapy中,我们可以使用Redis作为中间缓存和分布式爬虫的数据共享机制,然后将数据传输到MongoDB中。这里是一个简单的Redis到MongoDB的分布式爬虫架构示例,包括 Pipeline 和 settings 的配置。 首先,我们来看 Pipeline 的代码: ```python import pymongo from pymongo.errors import ConnectionFailure from scrapy.pipelines.images import ImagesPipeline from scrapy_redis.spiders import RedisSpider from scrapy.exceptions import DropItem from scrapy.conf import settings from scrapy.http import Request class RedisToMongoPipeline: def __init__(self): self.mongodb_uri = settings.get('MONGODB_URI') # 获取MongoDB的连接字符串 self.client = pymongo.MongoClient(self.mongodb_uri) self.collection_name = settings.get('REDIS_MONGO_COLLECTION_NAME') @classmethod def from_crawler(cls, crawler): pipeline = cls() crawler.signals.connect(pipeline.spider_opened, signal=signals.spider_opened) crawler.signals.connect(pipeline.spider_closed, signal=signals.spider_closed) return pipeline def spider_opened(self, spider): self.db = self.client[spider.name] # 使用蜘蛛名作为数据库名 self.collection = self.db[self.collection_name] def process_item(self, item, spider): try: self.collection.insert_one(dict(item)) except ConnectionFailure: raise DropItem("Unable to connect to MongoDB") except pymongo.errors.DuplicateKeyError: print(f"Duplicated item found, skipping: {item}") return item # 如果MongoDB中已有相同的条目,则忽略 return item def spider_closed(self, spider): self.client.close() ``` 接下来,我们需要在 `settings.py` 中添加相关的配置: ```python # Redis相关设置 REDISCLOUD_URL = "redis://your_redis_url" # 替换为你实际的Redis URL SCHEDULER = "scrapy_redis.scheduler.Scheduler" DUPEFILTER_CLASS = "scrapy_redis.dupefilter.RFPDupeFilter" # MongoDB相关设置 MONGODB_URI = "mongodb+srv://username:[email protected]/your_db?retryWrites=true&w=majority" # 替换为你的MongoDB连接 REDIS_MONGO_COLLECTION_NAME = "scrapy_items" # 数据库中保存爬取物品的集合名 # 分布式爬虫设置 SPIDER_MIDDLEWARES = { 'scrapy_redis.middlewares.RedisMiddleware': 700, } ITEM_PIPELINES = { 'your_project.RedisToMongoPipeline': 300, # 将自定义的Pipeline放在最后,以便先处理Redis中的数据 } ``` 记得替换上述代码中的占位符(如`your_redis_url`, `username`, `password`, `your_db`等)为实际的连接信息。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

loong_XL

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值