sun
import scrapy from sun0769.items import Sun0769Item import time class SunSpider(scrapy.Spider): name = 'sun' allowed_domains = ['wz.sun0769.com'] start_urls = ['https://wz.sun0769.com/political/index/search?keyword=%E6%8A%95%E8%AF%89&page=1'] def parse(self, response): ####停顿1s time.sleep(2) html_list = response.xpath("//ul[@class='title-state-ul']/li") # print(html_list..extract()) # test_list = html_list.xpath("./li/span[1]/text()") # print(test_list.extract()) for list_ul in html_list: item= {} item["number"]=list_ul.xpath("./span[1]/text()").extract_first() item["status"]=list_ul.xpath("normalize-space(./span[2])").extract_first() item["title"]=list_ul.xpath("normalize-space(./span[3]/a)").extract_first() item["time_1"]=list_ul.xpath("normalize-space(./span[4])").extract_first() item["time_2"]=list_ul.xpath("normalize-space(./span[5])").extract_first() item["url"]=list_ul.xpath("normalize-space(./span[3]/a/@href)").extract_first() item["url"]="https://wz.sun0769.com/"+ item["url"] # print(item) # print(next_url) yield scrapy.Request(url=item["url"],callback=self.parse_detail,meta={"item":item}) ##获取详情页url返回给scrapy处理----parse_detail ###parse_detail不能有()否则就是调用 ####把item=Sun0769Item()内容,返回给parse_detail处理添加详情页内容 #####翻页 next_url = response.xpath("//a[@class='arrow-page prov_rota']/@href").extract_first() next_url = "https://wz.sun0769.com" + next_url # print(next_url) ######next_url 不等于None if next_url is not None: #######url新链接返回给parse(self, response): 处理 yield scrapy.Request(url=next_url,callback=self.parse) def parse_detail(self,response):###处理详情页 item = response.meta["item"] ###获取详情页留意 item["content"]=response.xpath("normalize-space(//div[@class='details-box']/pre)").extract_first() item["img_url"]=response.xpath("//div[@class='clear details-img-list Picture-img']/img/@src").extract_first() # print(item) print(item["img_url"]) yield item
pipl
# Define your item pipelines here # Don't forget to add your pipeline to the ITEM_PIPELINES setting # See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html import pymysql import sun0769.settings from sun0769.settings import SQL_HOST,SQL_USER,SQL_PASSWD # useful for handling different item types with a single interface from itemadapter import ItemAdapter ####定义mysqlPipeline ###setting class mysqlPipeline(object): ##链接数据库 ##open_spider(self, spider): 爬虫开启时候执行一次 def open_spider(self, spider): self.conn = pymysql.connect(host=,user=SQL_USER,passwd=SQL_PASSWD,db='scrapy',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor) #连接数据库 def process_item(self, item, spider): #添加数据到chengjiao表中 self.conn.query( "insert sun0769(number,status,title,time_1,time_2,url,content,img_url)" #需要插入的字段 "values('{}','{}','{}','{}','{}','{}','{}','{}')".format( item['number'],item['status'],item['title'],item['time_1'],item['time_2'],item['url'],item['content'],str(item['img_url']) #爬取项目中定义的字段 )) self.conn.commit()#执行添加 ##close_spider(self, spider): 爬虫关闭时候执行一次 def close_spider(self, spider): self.conn.close() #关闭连接 yield item class Sun0769Pipeline: def process_item(self, item, spider): # sun0769.settings.SQL_HOST # spider.settings.get("SQL_USER") return item
midd
# Define here the models for your spider middleware # # See documentation in: # https://docs.scrapy.org/en/latest/topics/spider-middleware.html from scrapy import signals # useful for handling different item types with a single interface from itemadapter import is_item, ItemAdapter class Sun0769SpiderMiddleware: # Not all methods need to be defined. If a method is not defined, # scrapy acts as if the spider middleware does not modify the # passed objects. @classmethod def from_crawler(cls, crawler): # This method is used by Scrapy to create your spiders. s = cls() crawler.signals.connect(s.spider_opened, signal=signals.spider_opened) return s def process_spider_input(self, response, spider): # Called for each response that goes through the spider # middleware and into the spider. # Should return None or raise an exception. return None def process_spider_output(self, response, result, spider): # Called with the results returned from the Spider, after # it has processed the response. # Must return an iterable of Request, or item objects. for i in result: yield i def process_spider_exception(self, response, exception, spider): # Called when a spider or process_spider_input() method # (from other spider middleware) raises an exception. # Should return either None or an iterable of Request or item objects. pass def process_start_requests(self, start_requests, spider): # Called with the start requests of the spider, and works # similarly to the process_spider_output() method, except # that it doesn’t have a response associated. # Must return only requests (not items). for r in start_requests: yield r def spider_opened(self, spider): spider.logger.info('Spider opened: %s' % spider.name) class Sun0769DownloaderMiddleware: # Not all methods need to be defined. If a method is not defined, # scrapy acts as if the downloader middleware does not modify the # passed objects. @classmethod def from_crawler(cls, crawler): # This method is used by Scrapy to create your spiders. s = cls() crawler.signals.connect(s.spider_opened, signal=signals.spider_opened) return s def process_request(self, request, spider): # Called for each request that goes through the downloader # middleware. # Must either: # - return None: continue processing this request # - or return a Response object # - or return a Request object # - or raise IgnoreRequest: process_exception() methods of # installed downloader middleware will be called return None def process_response(self, request, response, spider): # Called with the response returned from the downloader. # Must either; # - return a Response object # - return a Request object # - or raise IgnoreRequest return response def process_exception(self, request, exception, spider): # Called when a download handler or a process_request() # (from other downloader middleware) raises an exception. # Must either: # - return None: continue processing this exception # - return a Response object: stops process_exception() chain # - return a Request object: stops process_exception() chain pass def spider_opened(self, spider): spider.logger.info('Spider opened: %s' % spider.name) ###### ##随机UA头######## from fake_useragent import UserAgent class RandomUserAgentMiddleware(object): def process_request(self, request, spider): ua = UserAgent() request.headers['User-Agent'] = ua.random
items
# Define here the models for your scraped items # # See documentation in: # https://docs.scrapy.org/en/latest/topics/items.html import scrapy class Sun0769Item(scrapy.Item): # define the fields for your item here like: # name = scrapy.Field() number = scrapy.Field() status = scrapy.Field() title = scrapy.Field() time_1 = scrapy.Field() time_2 = scrapy.Field() url = scrapy.Field() content = scrapy.Field() img_url = scrapy.Field()
sett
# Scrapy settings for sun0769 project # # For simplicity, this file contains only settings considered important or # commonly used. You can find more settings consulting the documentation: # # https://docs.scrapy.org/en/latest/topics/settings.html # https://docs.scrapy.org/en/latest/topics/downloader-middleware.html # https://docs.scrapy.org/en/latest/topics/spider-middleware.html ###项目名字 BOT_NAME = 'sun0769' ####爬虫位置 SPIDER_MODULES = ['sun0769.spiders'] ##新建爬虫位置 NEWSPIDER_MODULE = 'sun0769.spiders' SQL_HOST = '****' SQL_USER = 'scrapy' SQL_PASSWD = 'scrapy' ###设置输出日志等级 LOG_LEVEL = 'WARNING' # Crawl responsibly by identifying yourself (and your website) on the user-agent # from fake_useragent import UserAgent ##随机UA头 # USER_AGENT = UserAgent(verify_ssl=False).random # Obey robots.txt rules ####默认情况下遵守rebots协议 False ROBOTSTXT_OBEY = True # Configure maximum concurrent requests performed by Scrapy (default: 16) ####最大并发请求数量 #CONCURRENT_REQUESTS = 32 # Configure a delay for requests for the same website (default: 0) # See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay # See also autothrottle settings and docs ##下载延迟,每次请求前等待3秒 #DOWNLOAD_DELAY = 3 # The download delay setting will honor only one of: ####每个域名最大并发数 #CONCURRENT_REQUESTS_PER_DOMAIN = 16 ###每个IP最大并发数 #CONCURRENT_REQUESTS_PER_IP = 16 # Disable cookies (enabled by default) ####cookies是否开启默认开启 #COOKIES_ENABLED = False # Disable Telnet Console (enabled by default) #TELNETCONSOLE_ENABLED = False # Override the default request headers: # 默认请求头 #DEFAULT_REQUEST_HEADERS = { # 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', # 'Accept-Language': 'en', #} # Enable or disable spider middlewares # See https://docs.scrapy.org/en/latest/topics/spider-middleware.html ###爬虫中间件 #SPIDER_MIDDLEWARES = { # 'sun0769.middlewares.Sun0769SpiderMiddleware': 543, #} # Enable or disable downloader middlewares # See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html ###下载中间件 DOWNLOADER_MIDDLEWARES = { 'sun0769.middlewares.RandomUserAgentMiddleware': 300, 'sun0769.middlewares.Sun0769DownloaderMiddleware': 543, } # Enable or disable extensions # See https://docs.scrapy.org/en/latest/topics/extensions.html # 插件 #EXTENSIONS = { # 'scrapy.extensions.telnet.TelnetConsole': None, #} # Configure item pipelines # See https://docs.scrapy.org/en/latest/topics/item-pipeline.html #########items 权重 ITEM_PIPELINES = { 'sun0769.pipelines.mysqlPipeline': 299, 'sun0769.pipelines.Sun0769Pipeline': 298, } ##自动限速 默认关闭 # Enable and configure the AutoThrottle extension (disabled by default) # See https://docs.scrapy.org/en/latest/topics/autothrottle.html #AUTOTHROTTLE_ENABLED = True # The initial download delay #AUTOTHROTTLE_START_DELAY = 5 # The maximum download delay to be set in case of high latencies #AUTOTHROTTLE_MAX_DELAY = 60 # The average number of requests Scrapy should be sending in parallel to # each remote server #AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0 # Enable showing throttling stats for every response received: #AUTOTHROTTLE_DEBUG = False #####HTTP缓存默认关闭 # Enable and configure HTTP caching (disabled by default) # See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings #HTTPCACHE_ENABLED = True #HTTPCACHE_EXPIRATION_SECS = 0 #HTTPCACHE_DIR = 'httpcache' #HTTPCACHE_IGNORE_HTTP_CODES = [] #HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'