scrapy(爬取腾讯招聘写入mysql)–items使用

import scrapy
import jsonpath
###导入items
from tencent.items import TencentItem

class TencenthrSpider(scrapy.Spider):
    name = 'tencenthr'
    allowed_domains = ['careers.tencent.com']
    #js接口
    start_urls = ['https://careers.tencent.com/tencentcareer/api/post/Query?timestamp=1640657047807&countryId=&cityId=&bgIds=&productId=&categoryId=&parentCategoryId=&attrId=&keyword=&pageIndex=1&pageSize=10&language=zh-cn&area=cn']

    def start_requests(self):
        for i in range(0, 1):
            url = f'https://careers.tencent.com/tencentcareer/api/post/Query?timestamp=1640657047807&countryId=&cityId=&bgIds=&productId=&categoryId=&parentCategoryId=&attrId=&keyword=&pageIndex={i}&pageSize=10&language=zh-cn&area=cn'
            yield scrapy.Request(url=url, callback=self.parse)
            # url 请求访问的网址
            # callback 回调函数,引擎回将下载好的页面(Response对象)发给该方法,执行数据解析
            # 这里可以使用callback指定新的函数,不是用parse作为默认的回调参数
    def parse(self,response):
        ##转js
        html = response.json() ###转json
        #信息提取
        title= jsonpath.jsonpath(html,'$...RecruitPostName')
        # print(title)
        LocationName= jsonpath.jsonpath(html,'$...LocationName')
        # print(LocationName)
        LastUpdateTime= jsonpath.jsonpath(html,'$...LastUpdateTime')
        # print(LastUpdateTime)
        ##转自定义表
        for i in range(len(title)):
            # print(title[i],LocationName[i],LastUpdateTime[i]) #输出评论
            ###item=TencentItem() 二次判断爬取资源是否有错
            item=TencentItem()
            item["title"]=title[i]
            item["LocationName"]=LocationName[i]
            item["LastUpdateTime"]=LastUpdateTime[i]
            print(item)
            ##转pip操作写入数据库
            yield item

# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html

import scrapy


class TencentItem(scrapy.Item):
    # define the fields for your item here like:
    # name = scrapy.Field()
    ####判断tencenthr
    title = scrapy.Field()
    LocationName = scrapy.Field()
    LastUpdateTime = scrapy.Field()




# Scrapy settings for tencent project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
#     https://docs.scrapy.org/en/latest/topics/settings.html
#     https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
#     https://docs.scrapy.org/en/latest/topics/spider-middleware.html
import random
BOT_NAME = 'tencent'

SPIDER_MODULES = ['tencent.spiders']
NEWSPIDER_MODULE = 'tencent.spiders'

# headers = {
#    'user-agent': UserAgent(verify_ssl=False).random ##随机UA头
# }
# Crawl responsibly by identifying yourself (and your website) on the user-agent
from fake_useragent import UserAgent   ##随机UA头
USER_AGENT = UserAgent(verify_ssl=False).random



# Obey robots.txt rules
ROBOTSTXT_OBEY = True

# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32


# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16

# Disable cookies (enabled by default)
#COOKIES_ENABLED = False

# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False

# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
#   'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
#   'Accept-Language': 'en',
#}

# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
# SPIDER_MIDDLEWARES = {
#    'tencent.middlewares.TencentSpiderMiddleware': 543,
# }

# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
#    'tencent.middlewares.TencentDownloaderMiddleware': 543,
#}

# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
#    'scrapy.extensions.telnet.TelnetConsole': None,
#}

# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
   'tencent.pipelines.mysqlPipeline': 299,
   'tencent.pipelines.TencentPipeline': 300,
}

# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False

# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'

 

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
import pymysql


# useful for handling different item types with a single interface
# from pymongo import MongoClient
import pymysql
from itemadapter import ItemAdapter
from tencent.items import TencentItem


####判断来自 什么爬虫 和 items
class TencentPipeline:
    def process_item(self, item, spider):
        if isinstance(item,TencentItem):
            collections.insert(dict(item))
            print(item)
        return item

####定义mysqlPipeline  ###setting
class mysqlPipeline(object):
    ##链接数据库
    def open_spider(self, spider):
        self.conn = pymysql.connect(host='127.0.0.1',user='root',passwd='Sooele0000%',db='scrapy')  #连接数据库
    def process_item(self, item, spider):
        #添加数据到chengjiao表中
        self.conn.query(
            "insert chengjiao(title,LocationName,LastUpdateTime)"  #需要插入的字段
            "values('{}','{}','{}')".format(
                item['title'],item['LocationName'],item['LastUpdateTime']  #爬取项目中定义的字段
            ))
        self.conn.commit()#执行添加
    def close_spider(self, spider):
        self.conn.close()  #关闭连接
        yield item