import osos.makedirs('./image/', exist_ok=True)IMAGE_URL = "http://image.nationalgeographic.com.cn/2017/1122/20171122113404332.jpg" def urllib_download(): from urllib.request import urlretrieve urlretrieve(IMAGE_URL, './image/img1.png') def request_download(): import requests r = requests.get(IMAGE_URL) with open('./image/img2.png', 'wb') as f: f.write(r.content) def chunk_download(): import requests r = requests.get(IMAGE_URL, stream=True) with open('./image/img3.png', 'wb') as f: for chunk in r.iter_content(chunk_size=32): f.write(chunk) urllib_download()print('download img1')request_download()print('download img2')chunk_download()print('download img3')
============================第二类========================================
import requests import os url = "https://ss0.bdstatic.com/5aV1bjqh_Q23odCf/static/superman/img/logo/bd_logo1_31bdc765.png" root = "F://python//" path = root + url.split("/")[-1] try: if not os.path.exists(root): os.mkdir(root) if not os.path.exists(path): r = requests.get(url) r.raise_for_status() #使用with语句可以不用自己手动关闭已经打开的文件流 with open(path,"wb") as f: #开始写文件,wb代表写二进制文件 f.write(r.content) print("爬取完成") else: print("文件已存在") except Exception as e: print("爬取失败:"+str(e))--------------------- 作者:我要七龙ru 来源: 原文:https://blog.csdn.net/a735311619/article/details/77488576?utm_source=copy 版权声明:本文为博主原创文章,转载请附上博文链接!
转载于:https://www.cnblogs.com/alex-13/p/9792316.html
相关资源:Python下载图片