Scrapy案例爬取图书网信息

阅读: 评论:0

Scrapy案例爬取图书网信息

Scrapy案例爬取图书网信息

创建项目

scrapy startproject book_info 

创建爬虫

cd book_info 
scrapy genspider cl_book suning 

爬虫文件编写cl_book.py

# -*- coding: utf-8 -*-
import scrapy
import refrom ..items import BookInfoItemfrom bs4 import BeautifulSoup# 为避免数据错乱,引用深拷贝
from copy import deepcopyclass ClBookSpider(scrapy.Spider):name = 'cl_book'allowed_domains = ['suning']start_urls = ['/']def parse(self, response):#)item = BookInfoItem()# 1. 首先获取大分类#dt = response.xpath('/html/body/div[6]/div/div[1]/div[1]/div[1]/div[4]/dl/dt')m_items = response.xpath('//div[@class="menu-item"]')for m_item in m_items:#print(m_item)dd_list = m_item.xpath('.//dd/a')if dd_list:item['p_class'] = m_item.xpath('.//dt/h3/a/text()').extract_first()#print(item['p_class'])for dd in dd_list:if dd is None:continue#print(dd.xpath('./text()').extract_first())item['s_class'] = dd.xpath('./text()').extract_first()#print(dd.xpath('./@href').extract_first())item['booklist_url'] = dd.xpath('./@href').extract_first()if 'search.suning' in item['booklist_url']:print(item['booklist_url'])item['booklist_url_ci'] = ' 'else:item['booklist_url_ci'] = item['booklist_url'].split('-')[1]#print(item)# yield scrapy.Request(url=item['booklist_url'],#                      meta={'item':item},callback=self.parse_second)yield scrapy.Request(url=item['booklist_url'],meta={'item':deepcopy(item)},callback=self.parse_second)def parse_second(self, response):item = a['item']#print(item)#print(response.url)#print('parse_second end')li_list = response.xpath('//ul[@class="clearfix"]/li')#print(li_list)for li in li_list:#print(li.xpath('.//p[@class="sell-point"]/a/@href').extract_first())item['book_url'] = 'https:' + li.xpath('.//p[@class="sell-point"]/a/@href').extract_first()# 问题: 本来的儿童类的书,匹配到了医学类中了# 原因: 因为scrpy异步通讯同时访问了item,导致匹配混乱# 解决方法: 用深拷贝解决#print(item['book_url'])if item['book_url']:# yield scrapy.Request(url=item['book_url'],#                      meta={'item': item}, callback=self.parse_book_detail)yield scrapy.Request(url=item['book_url'],meta={'item': deepcopy(item)}, callback=self.parse_book_detail)#print(item)# 翻页# https: // list.suning / emall / showProductList.do?ci = 502348 & pg = 03 & cp = 2 & il = 0 & iy = 0 & adNumber = 0 & n = 1 & ch = 4 & prune = 0 & sesab = ACBAABC & id = IDENTIFYING & cc = 010# .do?ci=502348&pg=03&cp=2# .do?ci=502348&pg=03&cp=3# 每页的组成 ci和cp构成# ci图书类别: item['booklist_url']中包含# cp页码all_Page = response.xpath('//span[@class="page-more"]/text()').extract_first()#print(all_Page)if all_Page:#print(re.findall(r'd+',all_Page))iallPage = int(re.findall(r'd+',all_Page)[0])#print(iallPage)#print(item['booklist_url_ci'])if item['booklist_url_ci']:for i in range(iallPage+1):#print(i)next_url = '.do?ci={}&pg=03&cp={}'.format(item['booklist_url_ci'],i)#print(next_url)yield scrapy.Request(url=next_url,meta={'item': a['item']}, callback=self.parse_second)#yield item#yield item#情况1:BookInfoPipeline process_item 管道中打印的item缺少# booklist_url book_url book_name book_author book_press book_time# {'book_url': '.html',#  'booklist_url': '.html',#  'booklist_url_ci': '502322',#  'p_class': '人文社科',#  's_class': '历史'}else:print('all_Page is Null')def del_str(self,de_str):return place('t','').replace('r','').replace('n','').replace(' ','').strip()def parse_book_detail(self,response):item = a['item']#print(item,"parse_book_detail")book_div = response.xpath('//div[@class="proinfo-main"]/div[@class="proinfo-title"]')#print(book_div)h1 = book_div.xpath('./h1')#act_first())# 目的问题,我无法获取h1标签的内容# 细致分析后,发现问题,打印的时候发现多了个span标签---这个玩意是反爬吗?要怎么处理呢?# 我是用BeautifulSoup解决# <h1 id="itemDisplayName">#                                         <span id="superNewProduct" class="snew hide">超级新品</span>#                 小口袋中国经典故事(10册)愚公移山孔融让梨司马光砸缸守株待兔儿童读物0-3-6岁早教益智启蒙精美绘本中国经典性情养成故##             </h1># print(h1.xpath('./text()'))# 转换为字符串,然后用soup提取if h1:soup = act_first(), features='lxml')#print(eplace('超级新品',''))item['book_name'] = eplace('超级新品','')item['book_name'] = self.del_str(item['book_name'])#print(item['book_name'])book_info = response.xpath('//ul[@class="bk-publish clearfix"]')#print(act_first())if book_info:soup = BeautifulSoup(act_first(), features='lxml')# print(eplace('超级新品',''))#item['book_name'] = eplace('超级新品', '')book_li = soup.find_all('li')if len(book_li)>0 and book_li[0]:book_author = book_li[0].textbook_author = self.del_str(book_author)item['book_author'] = book_authorelse:item['book_author'] = ' 'if len(book_li)>1 and book_li[1]:book_press = book_li[1].textbook_press = self.del_str(book_press)item['book_press'] = book_presselse:item['book_press'] = ' 'if len(book_li)>2 and book_li[2]:book_time = book_li[2].textbook_time = self.del_str(book_time)item['book_time'] = book_timeelse:item['book_time'] = ' 'else:item['book_author'] = ' 'item['book_press'] = ' 'item['book_time'] = ' 'yield item

items.py

# -*- coding: utf-8 -*-# Define here the models for your scraped items
#
# See documentation in:
# .htmlimport scrapyclass BookInfoItem(scrapy.Item):# define the fields for your item here like:# name = scrapy.Field()# 一级分类p_class = scrapy.Field()# 二级分类s_class = scrapy.Field()# 书列表页urlbooklist_url = scrapy.Field()# 书详情页urlbook_url = scrapy.Field()# 书名book_name = scrapy.Field()# 书作者book_author = scrapy.Field()# 书出版社book_press = scrapy.Field()# 书出版时间book_time = scrapy.Field()# 翻页需要的ci信息booklist_url_ci = scrapy.Field()

start.py


from scrapy ute(['scrapy','crawl','cl_book']) #方式2

settings.py

# -*- coding: utf-8 -*-# Scrapy settings for book_info project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
#     .html
#     .html
#     .htmlBOT_NAME = 'book_info'SPIDER_MODULES = ['book_info.spiders']
NEWSPIDER_MODULE = 'book_info.spiders'LOG_LEVEL = 'WARNING'# Override the default request headers:
DEFAULT_REQUEST_HEADERS = {'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8','Accept-Language': 'en','user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.26 Safari/537.36 Core/1.63.6788.400 QQBrowser/10.3.2816.400',}# Enable or disable spider middlewares
# See .html
SPIDER_MIDDLEWARES = {'book_info.middlewares.BookInfoSpiderMiddleware': 543,
}# Configure item pipelines
# See .html
ITEM_PIPELINES = {'book_info.pipelines.BookInfoPipeline': 300,
}

本文发布于:2024-02-01 17:50:16,感谢您对本站的认可!

本文链接:https://www.4u4v.net/it/170678173038419.html

版权声明:本站内容均来自互联网,仅供演示用,请勿用于商业和其他非法用途。如果侵犯了您的权益请与我们联系,我们将在24小时内删除。

标签:图书网   案例   信息   Scrapy
留言与评论(共有 0 条评论)
   
验证码:

Copyright ©2019-2022 Comsenz Inc.Powered by ©

网站地图1 网站地图2 网站地图3 网站地图4 网站地图5 网站地图6 网站地图7 网站地图8 网站地图9 网站地图10 网站地图11 网站地图12 网站地图13 网站地图14 网站地图15 网站地图16 网站地图17 网站地图18 网站地图19 网站地图20 网站地图21 网站地图22/a> 网站地图23