如何使用我已经知道其URL地址的Python在本地保存图像?

我知道互联网上一张图片的网址。

例如,http://www.digimouth.com/news/media/2011/09/google-logo.jpg,其中包含Google的徽标。

现在,我如何使用Python下载此图像,而无需在浏览器中实际打开URL并手动保存文件。

331854 次浏览
import urllib
resource = urllib.urlopen("http://www.digimouth.com/news/media/2011/09/google-logo.jpg")
output = open("file01.jpg","wb")
output.write(resource.read())
output.close()

file01.jpg将包含您的图像。

Python 2

如果您只想将其保存为文件,这里有一个更直接的方法:

import urllib


urllib.urlretrieve("http://www.digimouth.com/news/media/2011/09/google-logo.jpg", "local-filename.jpg")

第二个参数是保存文件的本地路径。

Python 3

正如Sergo所建议的,下面的代码应该适用于Python3。

import urllib.request


urllib.request.urlretrieve("http://www.digimouth.com/news/media/2011/09/google-logo.jpg", "local-filename.jpg")

我__0编写了__ABC,它可以在我的GitHub上供您使用。

我利用BeautifulSoup来解析任何网站的图片。如果你将做很多网页抓取(或打算使用我的工具),我建议你sudo pip install BeautifulSoup。有关BeautifulSoup的信息在这里中提供。

为了方便起见,下面是我的代码:

from bs4 import BeautifulSoup
from urllib2 import urlopen
import urllib


# use this image scraper from the location that
#you want to save scraped images to


def make_soup(url):
html = urlopen(url).read()
return BeautifulSoup(html)


def get_images(url):
soup = make_soup(url)
#this makes a list of bs4 element tags
images = [img for img in soup.findAll('img')]
print (str(len(images)) + "images found.")
print 'Downloading images to current working directory.'
#compile our unicode list of image links
image_links = [each.get('src') for each in images]
for each in image_links:
filename=each.split('/')[-1]
urllib.urlretrieve(each, filename)
return image_links


#a standard call looks like this
#get_images('http://www.wookmark.com')

适用于Python 2和Python 3的解决方案:

try:
from urllib.request import urlretrieve  # Python 3
except ImportError:
from urllib import urlretrieve  # Python 2


url = "http://www.digimouth.com/news/media/2011/09/google-logo.jpg"
urlretrieve(url, "local-filename.jpg")

或者,如果requests的额外要求可以接受,并且如果它是HTTP(S)URL:

def load_requests(source_url, sink_path):
"""
Load a file from an URL (e.g. http).


Parameters
----------
source_url : str
Where to load the file from.
sink_path : str
Where the loaded file is stored.
"""
import requests
r = requests.get(source_url, stream=True)
if r.status_code == 200:
with open(sink_path, 'wb') as f:
for chunk in r:
f.write(chunk)

这是一个非常简短的回答。

import urllib
urllib.urlretrieve("http://photogallery.sandesh.com/Picture.aspx?AlubumId=422040", "Abc.jpg")

我做了一个关于YUP的扩展脚本。这是剧本。我修了一些东西。它现在将绕过403:禁止的问题。当图像检索失败时,它不会崩溃。它试图避免损坏的预览。它获得正确的绝对URL.它给出了更多的信息。它可以在命令行中使用参数运行。

# getem.py
# python2 script to download all images in a given url
# use: python getem.py http://url.where.images.are


from bs4 import BeautifulSoup
import urllib2
import shutil
import requests
from urlparse import urljoin
import sys
import time


def make_soup(url):
req = urllib2.Request(url, headers={'User-Agent' : "Magic Browser"})
html = urllib2.urlopen(req)
return BeautifulSoup(html, 'html.parser')


def get_images(url):
soup = make_soup(url)
images = [img for img in soup.findAll('img')]
print (str(len(images)) + " images found.")
print 'Downloading images to current working directory.'
image_links = [each.get('src') for each in images]
for each in image_links:
try:
filename = each.strip().split('/')[-1].strip()
src = urljoin(url, each)
print 'Getting: ' + filename
response = requests.get(src, stream=True)
# delay to avoid corrupted previews
time.sleep(1)
with open(filename, 'wb') as out_file:
shutil.copyfileobj(response.raw, out_file)
except:
print '  An error occured. Continuing.'
print 'Done.'


if __name__ == '__main__':
url = sys.argv[1]
get_images(url)

Python 3

urllib.request—用于打开URL的可扩展库

from urllib.error import HTTPError
from urllib.request import urlretrieve


try:
urlretrieve(image_url, image_local_path)
except FileNotFoundError as err:
print(err)   # something wrong with local path
except HTTPError as err:
print(err)  # something wrong with url

这可以通过请求来完成。加载页面并将二进制内容转储到文件中。

import os
import requests


url = 'https://apod.nasa.gov/apod/image/1701/potw1636aN159_HST_2048.jpg'
page = requests.get(url)


f_ext = os.path.splitext(url)[-1]
f_name = 'img{}'.format(f_ext)
with open(f_name, 'wb') as f:
f.write(page.content)

Python 3的版本

我为Python 3调整了@MadProps的代码

# getem.py
# python2 script to download all images in a given url
# use: python getem.py http://url.where.images.are


from bs4 import BeautifulSoup
import urllib.request
import shutil
import requests
from urllib.parse import urljoin
import sys
import time


def make_soup(url):
req = urllib.request.Request(url, headers={'User-Agent' : "Magic Browser"})
html = urllib.request.urlopen(req)
return BeautifulSoup(html, 'html.parser')


def get_images(url):
soup = make_soup(url)
images = [img for img in soup.findAll('img')]
print (str(len(images)) + " images found.")
print('Downloading images to current working directory.')
image_links = [each.get('src') for each in images]
for each in image_links:
try:
filename = each.strip().split('/')[-1].strip()
src = urljoin(url, each)
print('Getting: ' + filename)
response = requests.get(src, stream=True)
# delay to avoid corrupted previews
time.sleep(1)
with open(filename, 'wb') as out_file:
shutil.copyfileobj(response.raw, out_file)
except:
print('  An error occured. Continuing.')
print('Done.')


if __name__ == '__main__':
get_images('http://www.wookmark.com')

迟交答案,但对于python>=3.6,您可以使用数据负载,即:

import dload
dload.save("http://www.digimouth.com/news/media/2011/09/google-logo.jpg")

如果需要图像作为bytes,请使用:

img_bytes = dload.bytes("http://www.digimouth.com/news/media/2011/09/google-logo.jpg")

使用pip3 install dload进行安装

使用请求的Python 3的新鲜事物:

代码中的注释。准备使用功能。


import requests
from os import path


def get_image(image_url):
"""
Get image based on url.
:return: Image name if everything OK, False otherwise
"""
image_name = path.split(image_url)[1]
try:
image = requests.get(image_url)
except OSError:  # Little too wide, but work OK, no additional imports needed. Catch all conection problems
return False
if image.status_code == 200:  # we could have retrieved error page
base_dir = path.join(path.dirname(path.realpath(__file__)), "images") # Use your own path or "" to use current working directory. Folder must exist.
with open(path.join(base_dir, image_name), "wb") as f:
f.write(image.content)
return image_name


get_image("https://apod.nasddfda.gov/apod/image/2003/S106_Mishra_1947.jpg")


使用请求库

import requests
import shutil,os


headers = {
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36'
}
currentDir = os.getcwd()
path = os.path.join(currentDir,'Images')#saving images to Images folder


def ImageDl(url):
attempts = 0
while attempts < 5:#retry 5 times
try:
filename = url.split('/')[-1]
r = requests.get(url,headers=headers,stream=True,timeout=5)
if r.status_code == 200:
with open(os.path.join(path,filename),'wb') as f:
r.raw.decode_content = True
shutil.copyfileobj(r.raw,f)
print(filename)
break
except Exception as e:
attempts+=1
print(e)




ImageDl(url)

如果您还没有图像的URL,可以使用西班牙凉菜汤

from gazpacho import Soup
base_url = "http://books.toscrape.com"


soup = Soup.get(base_url)
links = [img.attrs["src"] for img in soup.find("img")]

然后下载__ABC__为0的资产,如上所述:

from pathlib import Path
from urllib.request import urlretrieve as download


directory = "images"
Path(directory).mkdir(exist_ok=True)


link = links[0]
name = link.split("/")[-1]


download(f"{base_url}/{link}", f"{directory}/{name}")

使用简单的Pythonwget模块下载链接。用法如下:

import wget
wget.download('http://www.digimouth.com/news/media/2011/09/google-logo.jpg')

下载图像文件,避免所有可能的错误:

import requests
import validators
from urllib.request import Request, urlopen
from urllib.error import URLError, HTTPError




def is_downloadable(url):
valid=validators. url(url)
if valid==False:
return False
req = Request(url)
try:
response = urlopen(req)
except HTTPError as e:
return False
except URLError as e:
return False
else:
return True






for i in range(len(File_data)):   #File data Contain list of address for image
#file
url = File_data[i][1]
try:
if (is_downloadable(url)):
try:
r = requests.get(url, allow_redirects=True)
if url.find('/'):
fname = url.rsplit('/', 1)[1]
fname = pth+File_data[i][0]+"$"+fname #Destination to save
#image file
open(fname, 'wb').write(r.content)
except Exception as e:
print(e)
except Exception as e:
print(e)
# import the required libraries from Python
import pathlib,urllib.request


# Using pathlib, specify where the image is to be saved
downloads_path = str(pathlib.Path.home() / "Downloads")


# Form a full image path by joining the path to the
# images' new name


picture_path  = os.path.join(downloads_path, "new-image.png")


# "/home/User/Downloads/new-image.png"


# Using "urlretrieve()" from urllib.request save the image
urllib.request.urlretrieve("//example.com/image.png", picture_path)


# urlretrieve() takes in 2 arguments
# 1. The URL of the image to be downloaded
# 2. The image new name after download. By default, the image is saved
#    inside your current working directory

这是下载图像最简单的方法。

import requests
from slugify import slugify


img_url = 'https://apod.nasa.gov/apod/image/1701/potw1636aN159_HST_2048.jpg'
img = requests.get(img_url).content
img_file = open(slugify(img_url) + '.' + str(img_url).split('.')[-1], 'wb')
img_file.write(img)
img_file.close()

好的,所以,这是我的初步尝试,可能完全是矫枉过正。 如果需要更新,因为这不处理任何超时,但是,我得到了这个工作的乐趣。

此处列出的代码:https://github.com/jayrizzo/jayrizzotools/blob/master/pyimagedownloader.py

#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# =============================================================================
# Created Syst: MAC OSX High Sierra 21.5.0 (17G65)
# Created Plat: Python 3.9.5 ('v3.9.5:0a7dcbdb13', 'May  3 2021 13:17:02')
# Created By  : Jeromie Kirchoff
# Created Date: Thu Jun 15 23:31:01 2022 CDT
# Last ModDate: Thu Jun 16 01:41:01 2022 CDT
# =============================================================================
# NOTE: Doesn't work on SVG images at this time.
# I will look into this further: https://stackoverflow.com/a/6599172/1896134
# =============================================================================
import requests                                 # to get image from the web
import shutil                                   # to save it locally
import os                                       # needed
from os.path import exists as filepathexist     # check if file paths exist
from os.path import join                        # joins path for different os
from os.path import expanduser                  # expands current home
from pyuser_agent import UA                     # generates random UserAgent


class ImageDownloader(object):
"""URL ImageDownloader.
Input : Full Image URL
Output: Image saved to your ~/Pictures/JayRizzoDL folder.
"""
def __init__(self, URL: str):
self.url = URL
self.headers = {"User-Agent" : UA().random}
self.currentHome = expanduser('~')
self.desktop = join(self.currentHome + "/Desktop/")
self.download = join(self.currentHome + "/Downloads/")
self.pictures = join(self.currentHome + "/Pictures/JayRizzoDL/")
self.outfile = ""
self.filename = ""
self.response = ""
self.rawstream = ""
self.createdfilepath = ""
self.imgFileName = ""
# Check if the JayRizzoDL exists in the pictures folder.
# if it doesn't exist create it.
if not filepathexist(self.pictures):
os.mkdir(self.pictures)
self.main()


def getFileNameFromURL(self, URL: str):
"""Parse the URL for the name after the last forward slash."""
NewFileName = self.url.strip().split('/')[-1].strip()
return NewFileName


def getResponse(self, URL: str):
"""Try streaming the URL for the raw data."""
self.response = requests.get(self.url, headers=self.headers, stream=True)
return self.response


def gocreateFile(self, name: str, response):
"""Try creating the file with the raw data in a custom folder."""
self.outfile = join(self.pictures, name)
with open(self.outfile, 'wb') as outFilePath:
shutil.copyfileobj(response.raw, outFilePath)
return self.outfile


def main(self):
"""Combine Everything and use in for loops."""
self.filename = self.getFileNameFromURL(self.url)
self.rawstream = self.getResponse(self.url)
self.createdfilepath = self.gocreateFile(self.filename, self.rawstream)
print(f"File was created: {self.createdfilepath}")
return


if __name__ == '__main__':
# Example when calling the file directly.
ImageDownloader("https://stackoverflow.design/assets/img/logos/so/logo-stackoverflow.png")