class DouyuspiderItem(scrapy.Item): name = scrapy.Field()# 存储照⽚的名字 imagesUrls = scrapy.Field()# 照⽚的 url 路径 imagesPath = scrapy.Field()# 照⽚保存在本地的路径
import scrapy import json from douyuSpider.items import DouyuspiderItem class DouyuSpider(scrapy.Spider): name = "douyu" allowd_domains = ["http://capi.douyucdn.cn"] offset = 0 url = "http://capi.douyucdn.cn/api/v1/getVerticalRoom?limit=20& offset=" start_urls = [url + str(offset)] def parse(self, response): # 返回从 json⾥获取 data 段数据集合 data = json.loads(response.text)["data"] # 若是 data⾥⾯没有值,那么就直接退出函数 #if not data: # return for each in data: item = DouyuspiderItem() item["name"] = each["nickname"] item["imagesUrls"] = each["vertical_src"] yield item self.offset += 20 yield scrapy.Request(self.url + str(self.offset), callback = self.parse)
ITEM_PIPELINES = {'douyuSpider.pipelines.ImagesPipeline': 1} # Images 的存放位置,以后会在 pipelines.py⾥调⽤ IMAGES_STORE = "/Users/Power/lesson_python/douyuSpider/Images" # user-agent USER_AGENT = 'DYZB/2.290 (iPhone; iOS 9.3.4; Scale/2.00)'
import scrapy import os from scrapy.pipelines.images import ImagesPipeline from scrapy.utils.project import get_project_settings class ImagesPipeline(ImagesPipeline): IMAGES_STORE = get_project_settings().get("IMAGES_STORE") def get_media_requests(self, item, info): image_url = item["imagesUrls"] yield scrapy.Request(image_url) def item_completed(self, results, item, info): # 固定写法,获取图⽚路径,同时判断这个路径是否正确,若是正确,就 放 到 image_path⾥,ImagesPipeline 源码剖析可⻅ image_path = [x["path"] for ok, x in results if ok] os.rename(self.IMAGES_STORE + "/" + image_path[0], self.IMA GES_STORE + "/" + item["name"] + ".jpg") item["imagesPath"] = self.IMAGES_STORE + "/" + item["name"] return item #get_media_requests 的做⽤就是为每⼀个图⽚连接⽣成⼀个 Request 对象,这 个 ⽅法的输出将做为 item_completed 的输⼊中的 results,results 是⼀个元组, 每 个元组包括 (success, imageinfoorfailure) 。若是 success=true , imageinfo or_failure 是⼀个字典,包括 url/path/checksum 三个 key。
from scrapy import cmdline cmdline.execute('scrapy crawl douyu'.split())
py2 main.py
获取完整Python爬虫视频教程请点:python网络爬虫课程。python