Scrapy - 京东商品信息

mac2025-09-17  35

Scrapy项目结构

爬取结果显示

jdSpider  爬虫核心代码

# -*- coding: utf-8 -*- import scrapy from TestDemo.items import TestdemoItem import requests import json class JdspiderSpider(scrapy.Spider): name = 'jdSpider' allowed_domains = ['jd.com'] keyword = '手机' page = 1 start_urls = 'https://search.jd.com/Search?keyword=%s&enc=utf-8&qrst=1&rt=1&stop=1&vt=2&wq=%s&cid2=653&cid3=655&page=%d&click=0' next_url = 'https://search.jd.com/s_new.php?keyword=%s&enc=utf-8&qrst=1&rt=1&stop=1&vt=2&wq=%s&cid2=653&cid3=655&page=%d&scrolling=y&show_items=%s' comment_url = 'https://club.jd.com/comment/productCommentSummaries.action?referenceIds={}' def start_requests(self): yield scrapy.Request(self.start_urls % (self.keyword, self.keyword, self.page), callback=self.parse) def parse(self, response): # for sel in response.xpath('//ul[@class="gl-warp clearfix"]/li/div[@class="gl-i-wrap"]'): # item = TestdemoItem() # item['name'] = sel.xpath('.//div[@class="p-name p-name-type-2"]//em/text()').extract() # item['price'] = sel.xpath('.//div[@class="p-price"]/strong/i/text()').extract() # yield item ''' 每次可直接获取前三十的商品信息,后三十的商品信息需处理后获取 :param response: :return: ''' sids = [] for li in response.xpath('//*[@id="J_goodsList"]/ul/li'): item = TestdemoItem() title = li.xpath('div/div/a/em/text()').extract() # 标题 price = li.xpath('div/div/strong/i/text()').extract() # 价格 id = li.xpath('@data-pid').extract() # 后30商品的id sids.append(''.join(id)) url = li.xpath('div/div[@class="p-name p-name-type-2"]/a/@href').extract() # 需要跟进的链接 comment_num = li.xpath('div/div/strong/a/text()').extract() # 评价条数 if len(comment_num) == 0: cid = li.xpath('@data-sku').extract() # comment_id item['comment_num'] = self.get_Comment(cid[0]) item['title'] = ''.join(title) item['price'] = ''.join(price) item['url'] = ''.join(url) if item['url'].startswith('//'): item['url'] = 'https:' + item['url'] elif not item['url'].startswith('https'): item['info'] = None yield item continue yield scrapy.Request(item['url'], callback=self.detail_parse, meta={'item': item}) headers = {'referer': response.url} self.page += 1 yield scrapy.Request(self.next_url % (self.keyword, self.keyword, self.page, ','.join(sids)), callback=self.next_parse, headers=headers) def next_parse(self, response): ''' 爬取每页后30商品数据, :param response: :return: ''' for li in response.xpath('//li[@class="gl-item"]'): item = TestdemoItem() title = li.xpath('div/div/a/em/text()').extract() # 标题 price = li.xpath('div/div/strong/i/text()').extract() # 价格 comment_num = li.xpath('div/div/strong/a/text()').extract() # 评价条数 url = li.xpath('div/div[@class="p-name p-name-type-2"]/a/@href').extract() # 需要跟进的链接 item['title'] = ''.join(title) item['price'] = ''.join(price) if len(comment_num) == 0: cid = li.xpath('@data-sku').extract() # comment_id item['comment_num'] = self.get_Comment(cid[0]) item['url'] = ''.join(url) if item['url'].startswith('//'): item['url'] = 'https:' + item['url'] elif not item['url'].startswith('https:'): item['info'] = None yield item continue yield scrapy.Request(item['url'], callback=self.detail_parse, meta={"item": item}) if self.page < 3: self.page += 1 yield scrapy.Request(self.start_urls % (self.keyword, self.keyword, self.page), callback=self.parse) def detail_parse(self, response): ''' 爬取详情页信息 :return: ''' item = response.meta['item'] item['info'] = {} brand = response.xpath('//div[@class="inner border"]/div[@class="head"]/a/text()').extract() name = response.xpath('//div[@class="item ellipsis"]/text()').extract() item['info']['brand'] = ''.join(brand) item['info']['name'] = ''.join(name) for div in response.xpath('//div[@class="Ptable"]/div[@class="Ptable-item"]'): h3 = ''.join(div.xpath('h3/text()').extract()) if h3 == '': h3 = '不清' dt = div.xpath('dl/dl/dt/text()').extract() dd = div.xpath('dl/dl/dd[not(@class)]/text()').extract() item['info'][h3] = {} # 将dt dd 链到一起 zip for t, d in zip(dt, dd): item['info'][h3][t] = d yield item def get_Comment(self, sku_id): headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 Safari/537.36', 'referer': 'https://item.jd.com/' + sku_id + '.html' } new_url = self.comment_url.format(sku_id) resp = requests.get(new_url, headers=headers).json() comment_num = resp['CommentsCount'][0]['CommentCount'] print(resp['CommentsCount']) return comment_num

 管道输出: 结果保存在excel文件中

# -*- coding: utf-8 -*- from openpyxl import Workbook class TestdemoPipeline(object): # def process_item(self, item, spider): # print('商品:', item["name"]) # print('价格:', item["price"]) # # return item def __init__(self): self.wb = Workbook() self.ws = self.wb.active self.ws.title = 'phone' def process_item(self, item, spider): self.ws.append( [str(item["title"]), str(item["price"]), str(item['comment_num']), str(item['url']), str(item['info'])]) self.wb.save("apx.xlsx") return item

item实体

# -*- coding: utf-8 -*- import scrapy from scrapy import Item, Field class TestdemoItem(scrapy.Item): # define the fields for your item here like: title = scrapy.Field() # 标题 price = scrapy.Field() # 价格 comment_num = scrapy.Field() # 评价条数 url = scrapy.Field() # 商品链接 info = scrapy.Field() # 详细信息 # name = Field() # price = Field()

settings.py配置

BOT_NAME = 'TestDemo' SPIDER_MODULES = ['TestDemo.spiders'] NEWSPIDER_MODULE = 'TestDemo.spiders' USER_AGENT = 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.62 Safari/537.36' ITEM_PIPELINES = { 'TestDemo.pipelines.TestdemoPipeline': 300, }

项目启动

from scrapy import cmdline from TestDemo.spiders.jdSpider import JdspiderSpider if __name__ =="__main__": cmdline.execute("scrapy crawl jdSpider".split())

 

最新回复(0)