博客
关于我
强烈建议你试试无所不能的chatGPT,快点击我
37.scrapy解决翻页及采集杭州造价网站材料数据
阅读量:5925 次
发布时间:2019-06-19

本文共 13523 字,大约阅读时间需要 45 分钟。

1.目标采集地址: http://183.129.219.195:8081/bs/hzzjb/web/list 2.这里的翻页还是较为简单的,只要模拟post请求发送data包含关键参数就能获取下一页页面信息。 获取页面标签信息的方法不合适,是之前写的,应该用xpath匹配整个table数据获取父类选择器再去二次匹配子类标签数据。 3.采集结果如下:
#hzzjb.py# -*- coding: utf-8 -*-import scrapyimport jsonimport refrom hzzjb_web.items import HzzjbWebItemclass HzzjbSpider(scrapy.Spider):    name = 'hzzjb'    allowed_domains = ['183.129.219.195:8081/bs']    start_urls = ['http://183.129.219.195:8081/bs/hzzjb/web/list']    custom_settings = {        "DOWNLOAD_DELAY": 0.2,        "ITEM_PIPELINES": {            'hzzjb_web.pipelines.MysqlPipeline': 320,        },        "DOWNLOADER_MIDDLEWARES": {            'hzzjb_web.middlewares.HzzjbWebDownloaderMiddleware': 500        },    }    def parse(self, response):        _response=response.text        # print(_response)        try :            #获取信息表            tag_list=response.xpath("//table[@class='table1']//tr/td").extract()            # print(tag_list)            # for i in tag_list:            #     print(i)            tag1=tag_list[:9]            tag2=tag_list[9:18]            tag3=tag_list[18:27]            tag4=tag_list[27:36]            tag5=tag_list[36:45]            tag6=tag_list[45:54]            tag7=tag_list[54:63]            tag8=tag_list[63:72]            tag9=tag_list[72:81]            tag10=tag_list[81:90]            tag11=tag_list[90:99]            tag12=tag_list[99:108]            tag13=tag_list[108:117]            tag14=tag_list[117:126]            tag15=tag_list[126:135]            tag16=tag_list[135:144]            tag17=tag_list[144:153]            tag18=tag_list[153:162]            tag19=tag_list[162:171]            tag20=tag_list[171:180]            list=[]            list.append(tag1)            list.append(tag2)            list.append(tag3)            list.append(tag4)            list.append(tag5)            list.append(tag6)            list.append(tag7)            list.append(tag8)            list.append(tag9)            list.append(tag10)            list.append(tag11)            list.append(tag12)            list.append(tag13)            list.append(tag14)            list.append(tag15)            list.append(tag16)            list.append(tag17)            list.append(tag18)            list.append(tag19)            list.append(tag20)            print(list)        except:            print('————————————————网站编码有异常!————————————————————')        for index,tag in enumerate(list):            # print('*'*100)            # print(index+1,TAG(i))            item = HzzjbWebItem()            try:                # 地区                district = tag[0].replace('','').replace('','')                # print(district)                item['district'] = district                # 类别                category = tag[1].replace('','').replace('','')                # print(category)                item['category'] = category                # 材料名称                material_name = tag[2].replace('','').replace('','')                # print(material_name)                item['material_name'] = material_name                # 规格及型号                version = tag[3].replace('','').replace('','')                # print(version)                item['version'] = version                # 单位                unit = tag[4].replace('','').replace('','')                # print(unit)                item['unit'] = unit                # 含税信息价                tax_information_price = tag[5].replace('','').replace('','')                # print(tax_information_price)                item['tax_information_price'] = tax_information_price                # 除税信息价                except_tax_information_price = tag[6].replace('','').replace('','')                # print(except_tax_information_price)                item['except_tax_information_price'] = except_tax_information_price                # 年/月                year_month = tag[7].replace('','').replace('','')                # print(year_month)                item['y_m'] = year_month            except:                pass            # print('*'*100)            yield item        for i in range(2, 5032):            # 翻页            data={                'mtype': '2',                '_query.nfStart':'',                '_query.yfStart':'',                '_query.nfEnd':'',                '_query.yfEnd':'',                '_query.dqstr':'',                '_query.dq':'',                '_query.lbtype':'',                '_query.clmc':'',                '_query.ggjxh':'',                'pageNumber': '{}'.format(i),                'pageSize':'',                'orderColunm':'',                'orderMode':'',            }            yield scrapy.FormRequest(url='http://183.129.219.195:8081/bs/hzzjb/web/list', callback=self.parse, formdata=data, method="POST", dont_filter=True)
#items.py# -*- coding: utf-8 -*-# Define here the models for your scraped items## See documentation in:# https://doc.scrapy.org/en/latest/topics/items.htmlimport scrapyclass HzzjbWebItem(scrapy.Item):    # define the fields for your item here like:    # name = scrapy.Field()    district=scrapy.Field()    category=scrapy.Field()    material_name=scrapy.Field()    version=scrapy.Field()    unit=scrapy.Field()    tax_information_price=scrapy.Field()    except_tax_information_price=scrapy.Field()    y_m=scrapy.Field()
#piplines.py# -*- coding: utf-8 -*-# Define your item pipelines here## Don't forget to add your pipeline to the ITEM_PIPELINES setting# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.htmlfrom scrapy.conf import settingsimport pymysqlclass HzzjbWebPipeline(object):    def process_item(self, item, spider):        return item# 数据保存mysqlclass MysqlPipeline(object):    def open_spider(self, spider):        self.host = settings.get('MYSQL_HOST')        self.port = settings.get('MYSQL_PORT')        self.user = settings.get('MYSQL_USER')        self.password = settings.get('MYSQL_PASSWORD')        self.db = settings.get(('MYSQL_DB'))        self.table = settings.get('TABLE')        self.client = pymysql.connect(host=self.host, user=self.user, password=self.password, port=self.port, db=self.db, charset='utf8')    def process_item(self, item, spider):        item_dict = dict(item)        cursor = self.client.cursor()        values = ','.join(['%s'] * len(item_dict))        keys = ','.join(item_dict.keys())        sql = 'INSERT INTO {table}({keys}) VALUES ({values})'.format(table=self.table, keys=keys, values=values)        try:            if cursor.execute(sql, tuple(item_dict.values())):  # 第一个值为sql语句第二个为 值 为一个元组                print('数据入库成功!')                self.client.commit()        except Exception as e:            print(e)             self.client.rollback()        return item    def close_spider(self, spider):        self.client.close()
#setting.py# -*- coding: utf-8 -*-# Scrapy settings for hzzjb_web project## For simplicity, this file contains only settings considered important or# commonly used. You can find more settings consulting the documentation:##     https://doc.scrapy.org/en/latest/topics/settings.html#     https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#     https://doc.scrapy.org/en/latest/topics/spider-middleware.htmlBOT_NAME = 'hzzjb_web'SPIDER_MODULES = ['hzzjb_web.spiders']NEWSPIDER_MODULE = 'hzzjb_web.spiders'# mysql配置参数MYSQL_HOST = "172.16.0.55"MYSQL_PORT = 3306MYSQL_USER = "root"MYSQL_PASSWORD = "concom603"MYSQL_DB = 'web_datas'TABLE = "web_hzzjb"# Crawl responsibly by identifying yourself (and your website) on the user-agent#USER_AGENT = 'hzzjb_web (+http://www.yourdomain.com)'# Obey robots.txt rulesROBOTSTXT_OBEY = False# Configure maximum concurrent requests performed by Scrapy (default: 16)#CONCURRENT_REQUESTS = 32# Configure a delay for requests for the same website (default: 0)# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay# See also autothrottle settings and docs#DOWNLOAD_DELAY = 3# The download delay setting will honor only one of:#CONCURRENT_REQUESTS_PER_DOMAIN = 16#CONCURRENT_REQUESTS_PER_IP = 16# Disable cookies (enabled by default)#COOKIES_ENABLED = False# Disable Telnet Console (enabled by default)#TELNETCONSOLE_ENABLED = False# Override the default request headers:#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',# 'Accept-Language': 'en',#}# Enable or disable spider middlewares# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html#SPIDER_MIDDLEWARES = {
# 'hzzjb_web.middlewares.HzzjbWebSpiderMiddleware': 543,#}# Enable or disable downloader middlewares# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.htmlDOWNLOADER_MIDDLEWARES = { 'hzzjb_web.middlewares.HzzjbWebDownloaderMiddleware': 500,}# Enable or disable extensions# See https://doc.scrapy.org/en/latest/topics/extensions.html#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,#}# Configure item pipelines# See https://doc.scrapy.org/en/latest/topics/item-pipeline.htmlITEM_PIPELINES = { 'hzzjb_web.pipelines.HzzjbWebPipeline': 300,}# Enable and configure the AutoThrottle extension (disabled by default)# See https://doc.scrapy.org/en/latest/topics/autothrottle.html#AUTOTHROTTLE_ENABLED = True# The initial download delay#AUTOTHROTTLE_START_DELAY = 5# The maximum download delay to be set in case of high latencies#AUTOTHROTTLE_MAX_DELAY = 60# The average number of requests Scrapy should be sending in parallel to# each remote server#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0# Enable showing throttling stats for every response received:#AUTOTHROTTLE_DEBUG = False# Enable and configure HTTP caching (disabled by default)# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings#HTTPCACHE_ENABLED = True#HTTPCACHE_EXPIRATION_SECS = 0#HTTPCACHE_DIR = 'httpcache'#HTTPCACHE_IGNORE_HTTP_CODES = []#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
#middlewares.py# -*- coding: utf-8 -*-# Define here the models for your spider middleware## See documentation in:# https://doc.scrapy.org/en/latest/topics/spider-middleware.htmlfrom scrapy import signalsclass HzzjbWebSpiderMiddleware(object):    # Not all methods need to be defined. If a method is not defined,    # scrapy acts as if the spider middleware does not modify the    # passed objects.    @classmethod    def from_crawler(cls, crawler):        # This method is used by Scrapy to create your spiders.        s = cls()        crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)        return s    def process_spider_input(self, response, spider):        # Called for each response that goes through the spider        # middleware and into the spider.        # Should return None or raise an exception.        return None    def process_spider_output(self, response, result, spider):        # Called with the results returned from the Spider, after        # it has processed the response.        # Must return an iterable of Request, dict or Item objects.        for i in result:            yield i    def process_spider_exception(self, response, exception, spider):        # Called when a spider or process_spider_input() method        # (from other spider middleware) raises an exception.        # Should return either None or an iterable of Response, dict        # or Item objects.        pass    def process_start_requests(self, start_requests, spider):        # Called with the start requests of the spider, and works        # similarly to the process_spider_output() method, except        # that it doesn’t have a response associated.        # Must return only requests (not items).        for r in start_requests:            yield r    def spider_opened(self, spider):        spider.logger.info('Spider opened: %s' % spider.name)class HzzjbWebDownloaderMiddleware(object):    # Not all methods need to be defined. If a method is not defined,    # scrapy acts as if the downloader middleware does not modify the    # passed objects.    @classmethod    def from_crawler(cls, crawler):        # This method is used by Scrapy to create your spiders.        s = cls()        crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)        return s    def process_request(self, request, spider):        # Called for each request that goes through the downloader        # middleware.        # Must either:        # - return None: continue processing this request        # - or return a Response object        # - or return a Request object        # - or raise IgnoreRequest: process_exception() methods of        #   installed downloader middleware will be called        return None    def process_response(self, request, response, spider):        # Called with the response returned from the downloader.        # Must either;        # - return a Response object        # - return a Request object        # - or raise IgnoreRequest        return response    def process_exception(self, request, exception, spider):        # Called when a download handler or a process_request()        # (from other downloader middleware) raises an exception.        # Must either:        # - return None: continue processing this exception        # - return a Response object: stops process_exception() chain        # - return a Request object: stops process_exception() chain        pass    def spider_opened(self, spider):        spider.logger.info('Spider opened: %s' % spider.name)

 

posted on
2018-10-19 09:43 阅读(
...) 评论(
...)

转载于:https://www.cnblogs.com/lvjing/p/9814690.html

你可能感兴趣的文章
Telnet部署与启动 windows&&linux
查看>>
行列式的乘法定理
查看>>
有1000瓶水,3个瓶子可以再换1瓶,一共可以喝多少瓶?
查看>>
Search in Rotated Sorted Array ||
查看>>
NUC_HomeWork1 -- POJ2067(最短路)
查看>>
卸载mysql
查看>>
二叉树的遍历
查看>>
The Distinguish of the share or static lib in MFC
查看>>
如何导出数据库的数据词典
查看>>
Unity The Property Matching Rule
查看>>
查询sqlserver表中的列数
查看>>
高清精美壁纸:2013年4月桌面日历壁纸免费下载
查看>>
空指针赋值分区
查看>>
asp.net批量上传图片代码
查看>>
APMServ在服务器上的使用
查看>>
NDK入门
查看>>
【网络挖掘:成就与未来方向】之网络挖掘应用程序与相关概念
查看>>
ARM Cortex-M3 操作模式和特权级别
查看>>
【转】 矩阵分解方法及 在推荐系统中的应用
查看>>
设计模式之Facade---外观模式
查看>>