Projekt_AI-Automatyczny_saper/venv/Lib/site-packages/matplotlib/image.py

1746 lines
66 KiB
Python

"""
The image module supports basic image loading, rescaling and display
operations.
"""
import math
import os
import logging
from pathlib import Path
import numpy as np
import PIL.PngImagePlugin
import matplotlib as mpl
from matplotlib import _api
import matplotlib.artist as martist
from matplotlib.backend_bases import FigureCanvasBase
import matplotlib.colors as mcolors
import matplotlib.cm as cm
import matplotlib.cbook as cbook
# For clarity, names from _image are given explicitly in this module:
import matplotlib._image as _image
# For user convenience, the names from _image are also imported into
# the image namespace:
from matplotlib._image import *
from matplotlib.transforms import (
Affine2D, BboxBase, Bbox, BboxTransform, BboxTransformTo,
IdentityTransform, TransformedBbox)
_log = logging.getLogger(__name__)
# map interpolation strings to module constants
_interpd_ = {
'antialiased': _image.NEAREST, # this will use nearest or Hanning...
'none': _image.NEAREST, # fall back to nearest when not supported
'nearest': _image.NEAREST,
'bilinear': _image.BILINEAR,
'bicubic': _image.BICUBIC,
'spline16': _image.SPLINE16,
'spline36': _image.SPLINE36,
'hanning': _image.HANNING,
'hamming': _image.HAMMING,
'hermite': _image.HERMITE,
'kaiser': _image.KAISER,
'quadric': _image.QUADRIC,
'catrom': _image.CATROM,
'gaussian': _image.GAUSSIAN,
'bessel': _image.BESSEL,
'mitchell': _image.MITCHELL,
'sinc': _image.SINC,
'lanczos': _image.LANCZOS,
'blackman': _image.BLACKMAN,
}
interpolations_names = set(_interpd_)
def composite_images(images, renderer, magnification=1.0):
"""
Composite a number of RGBA images into one. The images are
composited in the order in which they appear in the *images* list.
Parameters
----------
images : list of Images
Each must have a `make_image` method. For each image,
`can_composite` should return `True`, though this is not
enforced by this function. Each image must have a purely
affine transformation with no shear.
renderer : `.RendererBase`
magnification : float, default: 1
The additional magnification to apply for the renderer in use.
Returns
-------
image : uint8 array (M, N, 4)
The composited RGBA image.
offset_x, offset_y : float
The (left, bottom) offset where the composited image should be placed
in the output figure.
"""
if len(images) == 0:
return np.empty((0, 0, 4), dtype=np.uint8), 0, 0
parts = []
bboxes = []
for image in images:
data, x, y, trans = image.make_image(renderer, magnification)
if data is not None:
x *= magnification
y *= magnification
parts.append((data, x, y, image._get_scalar_alpha()))
bboxes.append(
Bbox([[x, y], [x + data.shape[1], y + data.shape[0]]]))
if len(parts) == 0:
return np.empty((0, 0, 4), dtype=np.uint8), 0, 0
bbox = Bbox.union(bboxes)
output = np.zeros(
(int(bbox.height), int(bbox.width), 4), dtype=np.uint8)
for data, x, y, alpha in parts:
trans = Affine2D().translate(x - bbox.x0, y - bbox.y0)
_image.resample(data, output, trans, _image.NEAREST,
resample=False, alpha=alpha)
return output, bbox.x0 / magnification, bbox.y0 / magnification
def _draw_list_compositing_images(
renderer, parent, artists, suppress_composite=None):
"""
Draw a sorted list of artists, compositing images into a single
image where possible.
For internal Matplotlib use only: It is here to reduce duplication
between `Figure.draw` and `Axes.draw`, but otherwise should not be
generally useful.
"""
has_images = any(isinstance(x, _ImageBase) for x in artists)
# override the renderer default if suppressComposite is not None
not_composite = (suppress_composite if suppress_composite is not None
else renderer.option_image_nocomposite())
if not_composite or not has_images:
for a in artists:
a.draw(renderer)
else:
# Composite any adjacent images together
image_group = []
mag = renderer.get_image_magnification()
def flush_images():
if len(image_group) == 1:
image_group[0].draw(renderer)
elif len(image_group) > 1:
data, l, b = composite_images(image_group, renderer, mag)
if data.size != 0:
gc = renderer.new_gc()
gc.set_clip_rectangle(parent.bbox)
gc.set_clip_path(parent.get_clip_path())
renderer.draw_image(gc, round(l), round(b), data)
gc.restore()
del image_group[:]
for a in artists:
if (isinstance(a, _ImageBase) and a.can_composite() and
a.get_clip_on()):
image_group.append(a)
else:
flush_images()
a.draw(renderer)
flush_images()
def _resample(
image_obj, data, out_shape, transform, *, resample=None, alpha=1):
"""
Convenience wrapper around `._image.resample` to resample *data* to
*out_shape* (with a third dimension if *data* is RGBA) that takes care of
allocating the output array and fetching the relevant properties from the
Image object *image_obj*.
"""
# decide if we need to apply anti-aliasing if the data is upsampled:
# compare the number of displayed pixels to the number of
# the data pixels.
interpolation = image_obj.get_interpolation()
if interpolation == 'antialiased':
# don't antialias if upsampling by an integer number or
# if zooming in more than a factor of 3
pos = np.array([[0, 0], [data.shape[1], data.shape[0]]])
disp = transform.transform(pos)
dispx = np.abs(np.diff(disp[:, 0]))
dispy = np.abs(np.diff(disp[:, 1]))
if ((dispx > 3 * data.shape[1] or
dispx == data.shape[1] or
dispx == 2 * data.shape[1]) and
(dispy > 3 * data.shape[0] or
dispy == data.shape[0] or
dispy == 2 * data.shape[0])):
interpolation = 'nearest'
else:
interpolation = 'hanning'
out = np.zeros(out_shape + data.shape[2:], data.dtype) # 2D->2D, 3D->3D.
if resample is None:
resample = image_obj.get_resample()
_image.resample(data, out, transform,
_interpd_[interpolation],
resample,
alpha,
image_obj.get_filternorm(),
image_obj.get_filterrad())
return out
def _rgb_to_rgba(A):
"""
Convert an RGB image to RGBA, as required by the image resample C++
extension.
"""
rgba = np.zeros((A.shape[0], A.shape[1], 4), dtype=A.dtype)
rgba[:, :, :3] = A
if rgba.dtype == np.uint8:
rgba[:, :, 3] = 255
else:
rgba[:, :, 3] = 1.0
return rgba
class _ImageBase(martist.Artist, cm.ScalarMappable):
"""
Base class for images.
interpolation and cmap default to their rc settings
cmap is a colors.Colormap instance
norm is a colors.Normalize instance to map luminance to 0-1
extent is data axes (left, right, bottom, top) for making image plots
registered with data plots. Default is to label the pixel
centers with the zero-based row and column indices.
Additional kwargs are matplotlib.artist properties
"""
zorder = 0
def __init__(self, ax,
cmap=None,
norm=None,
interpolation=None,
origin=None,
filternorm=True,
filterrad=4.0,
resample=False,
**kwargs
):
martist.Artist.__init__(self)
cm.ScalarMappable.__init__(self, norm, cmap)
if origin is None:
origin = mpl.rcParams['image.origin']
_api.check_in_list(["upper", "lower"], origin=origin)
self.origin = origin
self.set_filternorm(filternorm)
self.set_filterrad(filterrad)
self.set_interpolation(interpolation)
self.set_resample(resample)
self.axes = ax
self._imcache = None
self.update(kwargs)
def __getstate__(self):
state = super().__getstate__()
# We can't pickle the C Image cached object.
state['_imcache'] = None
return state
def get_size(self):
"""Return the size of the image as tuple (numrows, numcols)."""
if self._A is None:
raise RuntimeError('You must first set the image array')
return self._A.shape[:2]
def set_alpha(self, alpha):
"""
Set the alpha value used for blending - not supported on all backends.
Parameters
----------
alpha : float or 2D array-like or None
"""
martist.Artist._set_alpha_for_array(self, alpha)
if np.ndim(alpha) not in (0, 2):
raise TypeError('alpha must be a float, two-dimensional '
'array, or None')
self._imcache = None
def _get_scalar_alpha(self):
"""
Get a scalar alpha value to be applied to the artist as a whole.
If the alpha value is a matrix, the method returns 1.0 because pixels
have individual alpha values (see `~._ImageBase._make_image` for
details). If the alpha value is a scalar, the method returns said value
to be applied to the artist as a whole because pixels do not have
individual alpha values.
"""
return 1.0 if self._alpha is None or np.ndim(self._alpha) > 0 \
else self._alpha
def changed(self):
"""
Call this whenever the mappable is changed so observers can update.
"""
self._imcache = None
self._rgbacache = None
cm.ScalarMappable.changed(self)
def _make_image(self, A, in_bbox, out_bbox, clip_bbox, magnification=1.0,
unsampled=False, round_to_pixel_border=True):
"""
Normalize, rescale, and colormap the image *A* from the given *in_bbox*
(in data space), to the given *out_bbox* (in pixel space) clipped to
the given *clip_bbox* (also in pixel space), and magnified by the
*magnification* factor.
*A* may be a greyscale image (M, N) with a dtype of float32, float64,
float128, uint16 or uint8, or an (M, N, 4) RGBA image with a dtype of
float32, float64, float128, or uint8.
If *unsampled* is True, the image will not be scaled, but an
appropriate affine transformation will be returned instead.
If *round_to_pixel_border* is True, the output image size will be
rounded to the nearest pixel boundary. This makes the images align
correctly with the axes. It should not be used if exact scaling is
needed, such as for `FigureImage`.
Returns
-------
image : (M, N, 4) uint8 array
The RGBA image, resampled unless *unsampled* is True.
x, y : float
The upper left corner where the image should be drawn, in pixel
space.
trans : Affine2D
The affine transformation from image to pixel space.
"""
if A is None:
raise RuntimeError('You must first set the image '
'array or the image attribute')
if A.size == 0:
raise RuntimeError("_make_image must get a non-empty image. "
"Your Artist's draw method must filter before "
"this method is called.")
clipped_bbox = Bbox.intersection(out_bbox, clip_bbox)
if clipped_bbox is None:
return None, 0, 0, None
out_width_base = clipped_bbox.width * magnification
out_height_base = clipped_bbox.height * magnification
if out_width_base == 0 or out_height_base == 0:
return None, 0, 0, None
if self.origin == 'upper':
# Flip the input image using a transform. This avoids the
# problem with flipping the array, which results in a copy
# when it is converted to contiguous in the C wrapper
t0 = Affine2D().translate(0, -A.shape[0]).scale(1, -1)
else:
t0 = IdentityTransform()
t0 += (
Affine2D()
.scale(
in_bbox.width / A.shape[1],
in_bbox.height / A.shape[0])
.translate(in_bbox.x0, in_bbox.y0)
+ self.get_transform())
t = (t0
+ (Affine2D()
.translate(-clipped_bbox.x0, -clipped_bbox.y0)
.scale(magnification)))
# So that the image is aligned with the edge of the axes, we want to
# round up the output width to the next integer. This also means
# scaling the transform slightly to account for the extra subpixel.
if (t.is_affine and round_to_pixel_border and
(out_width_base % 1.0 != 0.0 or out_height_base % 1.0 != 0.0)):
out_width = math.ceil(out_width_base)
out_height = math.ceil(out_height_base)
extra_width = (out_width - out_width_base) / out_width_base
extra_height = (out_height - out_height_base) / out_height_base
t += Affine2D().scale(1.0 + extra_width, 1.0 + extra_height)
else:
out_width = int(out_width_base)
out_height = int(out_height_base)
out_shape = (out_height, out_width)
if not unsampled:
if not (A.ndim == 2 or A.ndim == 3 and A.shape[-1] in (3, 4)):
raise ValueError(f"Invalid shape {A.shape} for image data")
if A.ndim == 2:
# if we are a 2D array, then we are running through the
# norm + colormap transformation. However, in general the
# input data is not going to match the size on the screen so we
# have to resample to the correct number of pixels
# TODO slice input array first
inp_dtype = A.dtype
a_min = A.min()
a_max = A.max()
# figure out the type we should scale to. For floats,
# leave as is. For integers cast to an appropriate-sized
# float. Small integers get smaller floats in an attempt
# to keep the memory footprint reasonable.
if a_min is np.ma.masked:
# all masked, so values don't matter
a_min, a_max = np.int32(0), np.int32(1)
if inp_dtype.kind == 'f':
scaled_dtype = A.dtype
# Cast to float64
if A.dtype not in (np.float32, np.float16):
if A.dtype != np.float64:
_api.warn_external(
f"Casting input data from '{A.dtype}' to "
f"'float64' for imshow")
scaled_dtype = np.float64
else:
# probably an integer of some type.
da = a_max.astype(np.float64) - a_min.astype(np.float64)
# give more breathing room if a big dynamic range
scaled_dtype = np.float64 if da > 1e8 else np.float32
# scale the input data to [.1, .9]. The Agg
# interpolators clip to [0, 1] internally, use a
# smaller input scale to identify which of the
# interpolated points need to be should be flagged as
# over / under.
# This may introduce numeric instabilities in very broadly
# scaled data
# Always copy, and don't allow array subtypes.
A_scaled = np.array(A, dtype=scaled_dtype)
# clip scaled data around norm if necessary.
# This is necessary for big numbers at the edge of
# float64's ability to represent changes. Applying
# a norm first would be good, but ruins the interpolation
# of over numbers.
self.norm.autoscale_None(A)
dv = np.float64(self.norm.vmax) - np.float64(self.norm.vmin)
vmid = np.float64(self.norm.vmin) + dv / 2
fact = 1e7 if scaled_dtype == np.float64 else 1e4
newmin = vmid - dv * fact
if newmin < a_min:
newmin = None
else:
a_min = np.float64(newmin)
newmax = vmid + dv * fact
if newmax > a_max:
newmax = None
else:
a_max = np.float64(newmax)
if newmax is not None or newmin is not None:
np.clip(A_scaled, newmin, newmax, out=A_scaled)
# used to rescale the raw data to [offset, 1-offset]
# so that the resampling code will run cleanly. Using
# dyadic numbers here could reduce the error, but
# would not full eliminate it and breaks a number of
# tests (due to the slightly different error bouncing
# some pixels across a boundary in the (very
# quantized) colormapping step).
offset = .1
frac = .8
# we need to run the vmin/vmax through the same rescaling
# that we run the raw data through because there are small
# errors in the round-trip due to float precision. If we
# do not run the vmin/vmax through the same pipeline we can
# have values close or equal to the boundaries end up on the
# wrong side.
vmin, vmax = self.norm.vmin, self.norm.vmax
if vmin is np.ma.masked:
vmin, vmax = a_min, a_max
vrange = np.array([vmin, vmax], dtype=scaled_dtype)
A_scaled -= a_min
vrange -= a_min
# a_min and a_max might be ndarray subclasses so use
# item to avoid errors
a_min = a_min.astype(scaled_dtype).item()
a_max = a_max.astype(scaled_dtype).item()
if a_min != a_max:
A_scaled /= ((a_max - a_min) / frac)
vrange /= ((a_max - a_min) / frac)
A_scaled += offset
vrange += offset
# resample the input data to the correct resolution and shape
A_resampled = _resample(self, A_scaled, out_shape, t)
# done with A_scaled now, remove from namespace to be sure!
del A_scaled
# un-scale the resampled data to approximately the
# original range things that interpolated to above /
# below the original min/max will still be above /
# below, but possibly clipped in the case of higher order
# interpolation + drastically changing data.
A_resampled -= offset
vrange -= offset
if a_min != a_max:
A_resampled *= ((a_max - a_min) / frac)
vrange *= ((a_max - a_min) / frac)
A_resampled += a_min
vrange += a_min
# if using NoNorm, cast back to the original datatype
if isinstance(self.norm, mcolors.NoNorm):
A_resampled = A_resampled.astype(A.dtype)
mask = (np.where(A.mask, np.float32(np.nan), np.float32(1))
if A.mask.shape == A.shape # nontrivial mask
else np.ones_like(A, np.float32))
# we always have to interpolate the mask to account for
# non-affine transformations
out_alpha = _resample(self, mask, out_shape, t, resample=True)
# done with the mask now, delete from namespace to be sure!
del mask
# Agg updates out_alpha in place. If the pixel has no image
# data it will not be updated (and still be 0 as we initialized
# it), if input data that would go into that output pixel than
# it will be `nan`, if all the input data for a pixel is good
# it will be 1, and if there is _some_ good data in that output
# pixel it will be between [0, 1] (such as a rotated image).
out_mask = np.isnan(out_alpha)
out_alpha[out_mask] = 1
# Apply the pixel-by-pixel alpha values if present
alpha = self.get_alpha()
if alpha is not None and np.ndim(alpha) > 0:
out_alpha *= _resample(self, alpha, out_shape,
t, resample=True)
# mask and run through the norm
resampled_masked = np.ma.masked_array(A_resampled, out_mask)
# we have re-set the vmin/vmax to account for small errors
# that may have moved input values in/out of range
s_vmin, s_vmax = vrange
if isinstance(self.norm, mcolors.LogNorm):
if s_vmin < 0:
s_vmin = max(s_vmin, np.finfo(scaled_dtype).eps)
with cbook._setattr_cm(self.norm,
vmin=s_vmin,
vmax=s_vmax,
):
output = self.norm(resampled_masked)
else:
if A.shape[2] == 3:
A = _rgb_to_rgba(A)
alpha = self._get_scalar_alpha()
output_alpha = _resample( # resample alpha channel
self, A[..., 3], out_shape, t, alpha=alpha)
output = _resample( # resample rgb channels
self, _rgb_to_rgba(A[..., :3]), out_shape, t, alpha=alpha)
output[..., 3] = output_alpha # recombine rgb and alpha
# at this point output is either a 2D array of normed data
# (of int or float)
# or an RGBA array of re-sampled input
output = self.to_rgba(output, bytes=True, norm=False)
# output is now a correctly sized RGBA array of uint8
# Apply alpha *after* if the input was greyscale without a mask
if A.ndim == 2:
alpha = self._get_scalar_alpha()
alpha_channel = output[:, :, 3]
alpha_channel[:] = np.asarray(
np.asarray(alpha_channel, np.float32) * out_alpha * alpha,
np.uint8)
else:
if self._imcache is None:
self._imcache = self.to_rgba(A, bytes=True, norm=(A.ndim == 2))
output = self._imcache
# Subset the input image to only the part that will be
# displayed
subset = TransformedBbox(clip_bbox, t0.inverted()).frozen()
output = output[
int(max(subset.ymin, 0)):
int(min(subset.ymax + 1, output.shape[0])),
int(max(subset.xmin, 0)):
int(min(subset.xmax + 1, output.shape[1]))]
t = Affine2D().translate(
int(max(subset.xmin, 0)), int(max(subset.ymin, 0))) + t
return output, clipped_bbox.x0, clipped_bbox.y0, t
def make_image(self, renderer, magnification=1.0, unsampled=False):
"""
Normalize, rescale, and colormap this image's data for rendering using
*renderer*, with the given *magnification*.
If *unsampled* is True, the image will not be scaled, but an
appropriate affine transformation will be returned instead.
Returns
-------
image : (M, N, 4) uint8 array
The RGBA image, resampled unless *unsampled* is True.
x, y : float
The upper left corner where the image should be drawn, in pixel
space.
trans : Affine2D
The affine transformation from image to pixel space.
"""
raise NotImplementedError('The make_image method must be overridden')
def _check_unsampled_image(self):
"""
Return whether the image is better to be drawn unsampled.
The derived class needs to override it.
"""
return False
@martist.allow_rasterization
def draw(self, renderer, *args, **kwargs):
# if not visible, declare victory and return
if not self.get_visible():
self.stale = False
return
# for empty images, there is nothing to draw!
if self.get_array().size == 0:
self.stale = False
return
# actually render the image.
gc = renderer.new_gc()
self._set_gc_clip(gc)
gc.set_alpha(self._get_scalar_alpha())
gc.set_url(self.get_url())
gc.set_gid(self.get_gid())
if (renderer.option_scale_image() # Renderer supports transform kwarg.
and self._check_unsampled_image()
and self.get_transform().is_affine):
im, l, b, trans = self.make_image(renderer, unsampled=True)
if im is not None:
trans = Affine2D().scale(im.shape[1], im.shape[0]) + trans
renderer.draw_image(gc, l, b, im, trans)
else:
im, l, b, trans = self.make_image(
renderer, renderer.get_image_magnification())
if im is not None:
renderer.draw_image(gc, l, b, im)
gc.restore()
self.stale = False
def contains(self, mouseevent):
"""Test whether the mouse event occurred within the image."""
inside, info = self._default_contains(mouseevent)
if inside is not None:
return inside, info
# 1) This doesn't work for figimage; but figimage also needs a fix
# below (as the check cannot use x/ydata and extents).
# 2) As long as the check below uses x/ydata, we need to test axes
# identity instead of `self.axes.contains(event)` because even if
# axes overlap, x/ydata is only valid for event.inaxes anyways.
if self.axes is not mouseevent.inaxes:
return False, {}
# TODO: make sure this is consistent with patch and patch
# collection on nonlinear transformed coordinates.
# TODO: consider returning image coordinates (shouldn't
# be too difficult given that the image is rectilinear
trans = self.get_transform().inverted()
x, y = trans.transform([mouseevent.x, mouseevent.y])
xmin, xmax, ymin, ymax = self.get_extent()
if xmin > xmax:
xmin, xmax = xmax, xmin
if ymin > ymax:
ymin, ymax = ymax, ymin
if x is not None and y is not None:
inside = (xmin <= x <= xmax) and (ymin <= y <= ymax)
else:
inside = False
return inside, {}
def write_png(self, fname):
"""Write the image to png file *fname*."""
im = self.to_rgba(self._A[::-1] if self.origin == 'lower' else self._A,
bytes=True, norm=True)
PIL.Image.fromarray(im).save(fname, format="png")
def set_data(self, A):
"""
Set the image array.
Note that this function does *not* update the normalization used.
Parameters
----------
A : array-like or `PIL.Image.Image`
"""
if isinstance(A, PIL.Image.Image):
A = pil_to_array(A) # Needed e.g. to apply png palette.
self._A = cbook.safe_masked_invalid(A, copy=True)
if (self._A.dtype != np.uint8 and
not np.can_cast(self._A.dtype, float, "same_kind")):
raise TypeError("Image data of dtype {} cannot be converted to "
"float".format(self._A.dtype))
if self._A.ndim == 3 and self._A.shape[-1] == 1:
# If just one dimension assume scalar and apply colormap
self._A = self._A[:, :, 0]
if not (self._A.ndim == 2
or self._A.ndim == 3 and self._A.shape[-1] in [3, 4]):
raise TypeError("Invalid shape {} for image data"
.format(self._A.shape))
if self._A.ndim == 3:
# If the input data has values outside the valid range (after
# normalisation), we issue a warning and then clip X to the bounds
# - otherwise casting wraps extreme values, hiding outliers and
# making reliable interpretation impossible.
high = 255 if np.issubdtype(self._A.dtype, np.integer) else 1
if self._A.min() < 0 or high < self._A.max():
_log.warning(
'Clipping input data to the valid range for imshow with '
'RGB data ([0..1] for floats or [0..255] for integers).'
)
self._A = np.clip(self._A, 0, high)
# Cast unsupported integer types to uint8
if self._A.dtype != np.uint8 and np.issubdtype(self._A.dtype,
np.integer):
self._A = self._A.astype(np.uint8)
self._imcache = None
self._rgbacache = None
self.stale = True
def set_array(self, A):
"""
Retained for backwards compatibility - use set_data instead.
Parameters
----------
A : array-like
"""
# This also needs to be here to override the inherited
# cm.ScalarMappable.set_array method so it is not invoked by mistake.
self.set_data(A)
def get_interpolation(self):
"""
Return the interpolation method the image uses when resizing.
One of 'antialiased', 'nearest', 'bilinear', 'bicubic', 'spline16',
'spline36', 'hanning', 'hamming', 'hermite', 'kaiser', 'quadric',
'catrom', 'gaussian', 'bessel', 'mitchell', 'sinc', 'lanczos',
or 'none'.
"""
return self._interpolation
def set_interpolation(self, s):
"""
Set the interpolation method the image uses when resizing.
If None, use :rc:`image.interpolation`. If 'none', the image is
shown as is without interpolating. 'none' is only supported in
agg, ps and pdf backends and will fall back to 'nearest' mode
for other backends.
Parameters
----------
s : {'antialiased', 'nearest', 'bilinear', 'bicubic', 'spline16', \
'spline36', 'hanning', 'hamming', 'hermite', 'kaiser', 'quadric', 'catrom', \
'gaussian', 'bessel', 'mitchell', 'sinc', 'lanczos', 'none'} or None
"""
if s is None:
s = mpl.rcParams['image.interpolation']
s = s.lower()
_api.check_in_list(_interpd_, interpolation=s)
self._interpolation = s
self.stale = True
def can_composite(self):
"""Return whether the image can be composited with its neighbors."""
trans = self.get_transform()
return (
self._interpolation != 'none' and
trans.is_affine and
trans.is_separable)
def set_resample(self, v):
"""
Set whether image resampling is used.
Parameters
----------
v : bool or None
If None, use :rc:`image.resample`.
"""
if v is None:
v = mpl.rcParams['image.resample']
self._resample = v
self.stale = True
def get_resample(self):
"""Return whether image resampling is used."""
return self._resample
def set_filternorm(self, filternorm):
"""
Set whether the resize filter normalizes the weights.
See help for `~.Axes.imshow`.
Parameters
----------
filternorm : bool
"""
self._filternorm = bool(filternorm)
self.stale = True
def get_filternorm(self):
"""Return whether the resize filter normalizes the weights."""
return self._filternorm
def set_filterrad(self, filterrad):
"""
Set the resize filter radius only applicable to some
interpolation schemes -- see help for imshow
Parameters
----------
filterrad : positive float
"""
r = float(filterrad)
if r <= 0:
raise ValueError("The filter radius must be a positive number")
self._filterrad = r
self.stale = True
def get_filterrad(self):
"""Return the filterrad setting."""
return self._filterrad
class AxesImage(_ImageBase):
"""
An image attached to an Axes.
Parameters
----------
ax : `~.axes.Axes`
The axes the image will belong to.
cmap : str or `~matplotlib.colors.Colormap`, default: :rc:`image.cmap`
The Colormap instance or registered colormap name used to map scalar
data to colors.
norm : `~matplotlib.colors.Normalize`
Maps luminance to 0-1.
interpolation : str, default: :rc:`image.interpolation`
Supported values are 'none', 'antialiased', 'nearest', 'bilinear',
'bicubic', 'spline16', 'spline36', 'hanning', 'hamming', 'hermite',
'kaiser', 'quadric', 'catrom', 'gaussian', 'bessel', 'mitchell',
'sinc', 'lanczos', 'blackman'.
origin : {'upper', 'lower'}, default: :rc:`image.origin`
Place the [0, 0] index of the array in the upper left or lower left
corner of the axes. The convention 'upper' is typically used for
matrices and images.
extent : tuple, optional
The data axes (left, right, bottom, top) for making image plots
registered with data plots. Default is to label the pixel
centers with the zero-based row and column indices.
filternorm : bool, default: True
A parameter for the antigrain image resize filter
(see the antigrain documentation).
If filternorm is set, the filter normalizes integer values and corrects
the rounding errors. It doesn't do anything with the source floating
point values, it corrects only integers according to the rule of 1.0
which means that any sum of pixel weights must be equal to 1.0. So,
the filter function must produce a graph of the proper shape.
filterrad : float > 0, default: 4
The filter radius for filters that have a radius parameter, i.e. when
interpolation is one of: 'sinc', 'lanczos' or 'blackman'.
resample : bool, default: False
When True, use a full resampling method. When False, only resample when
the output image is larger than the input image.
**kwargs : `.Artist` properties
"""
def __str__(self):
return "AxesImage(%g,%g;%gx%g)" % tuple(self.axes.bbox.bounds)
def __init__(self, ax,
cmap=None,
norm=None,
interpolation=None,
origin=None,
extent=None,
filternorm=True,
filterrad=4.0,
resample=False,
**kwargs
):
self._extent = extent
super().__init__(
ax,
cmap=cmap,
norm=norm,
interpolation=interpolation,
origin=origin,
filternorm=filternorm,
filterrad=filterrad,
resample=resample,
**kwargs
)
def get_window_extent(self, renderer=None):
x0, x1, y0, y1 = self._extent
bbox = Bbox.from_extents([x0, y0, x1, y1])
return bbox.transformed(self.axes.transData)
def make_image(self, renderer, magnification=1.0, unsampled=False):
# docstring inherited
trans = self.get_transform()
# image is created in the canvas coordinate.
x1, x2, y1, y2 = self.get_extent()
bbox = Bbox(np.array([[x1, y1], [x2, y2]]))
transformed_bbox = TransformedBbox(bbox, trans)
clip = ((self.get_clip_box() or self.axes.bbox) if self.get_clip_on()
else self.figure.bbox)
return self._make_image(self._A, bbox, transformed_bbox, clip,
magnification, unsampled=unsampled)
def _check_unsampled_image(self):
"""Return whether the image would be better drawn unsampled."""
return self.get_interpolation() == "none"
def set_extent(self, extent):
"""
Set the image extent.
Parameters
----------
extent : 4-tuple of float
The position and size of the image as tuple
``(left, right, bottom, top)`` in data coordinates.
Notes
-----
This updates ``ax.dataLim``, and, if autoscaling, sets ``ax.viewLim``
to tightly fit the image, regardless of ``dataLim``. Autoscaling
state is not changed, so following this with ``ax.autoscale_view()``
will redo the autoscaling in accord with ``dataLim``.
"""
self._extent = xmin, xmax, ymin, ymax = extent
corners = (xmin, ymin), (xmax, ymax)
self.axes.update_datalim(corners)
self.sticky_edges.x[:] = [xmin, xmax]
self.sticky_edges.y[:] = [ymin, ymax]
if self.axes._autoscaleXon:
self.axes.set_xlim((xmin, xmax), auto=None)
if self.axes._autoscaleYon:
self.axes.set_ylim((ymin, ymax), auto=None)
self.stale = True
def get_extent(self):
"""Return the image extent as tuple (left, right, bottom, top)."""
if self._extent is not None:
return self._extent
else:
sz = self.get_size()
numrows, numcols = sz
if self.origin == 'upper':
return (-0.5, numcols-0.5, numrows-0.5, -0.5)
else:
return (-0.5, numcols-0.5, -0.5, numrows-0.5)
def get_cursor_data(self, event):
"""
Return the image value at the event position or *None* if the event is
outside the image.
See Also
--------
matplotlib.artist.Artist.get_cursor_data
"""
xmin, xmax, ymin, ymax = self.get_extent()
if self.origin == 'upper':
ymin, ymax = ymax, ymin
arr = self.get_array()
data_extent = Bbox([[xmin, ymin], [xmax, ymax]])
array_extent = Bbox([[0, 0], [arr.shape[1], arr.shape[0]]])
trans = self.get_transform().inverted()
trans += BboxTransform(boxin=data_extent, boxout=array_extent)
point = trans.transform([event.x, event.y])
if any(np.isnan(point)):
return None
j, i = point.astype(int)
# Clip the coordinates at array bounds
if not (0 <= i < arr.shape[0]) or not (0 <= j < arr.shape[1]):
return None
else:
return arr[i, j]
def format_cursor_data(self, data):
if np.ndim(data) == 0 and self.colorbar:
return (
"["
+ cbook.strip_math(
self.colorbar.formatter.format_data_short(data)).strip()
+ "]")
else:
return super().format_cursor_data(data)
class NonUniformImage(AxesImage):
mouseover = False # This class still needs its own get_cursor_data impl.
def __init__(self, ax, *, interpolation='nearest', **kwargs):
"""
Parameters
----------
interpolation : {'nearest', 'bilinear'}, default: 'nearest'
**kwargs
All other keyword arguments are identical to those of `.AxesImage`.
"""
super().__init__(ax, **kwargs)
self.set_interpolation(interpolation)
def _check_unsampled_image(self):
"""Return False. Do not use unsampled image."""
return False
is_grayscale = _api.deprecate_privatize_attribute("3.3")
def make_image(self, renderer, magnification=1.0, unsampled=False):
# docstring inherited
if self._A is None:
raise RuntimeError('You must first set the image array')
if unsampled:
raise ValueError('unsampled not supported on NonUniformImage')
A = self._A
if A.ndim == 2:
if A.dtype != np.uint8:
A = self.to_rgba(A, bytes=True)
self._is_grayscale = self.cmap.is_gray()
else:
A = np.repeat(A[:, :, np.newaxis], 4, 2)
A[:, :, 3] = 255
self._is_grayscale = True
else:
if A.dtype != np.uint8:
A = (255*A).astype(np.uint8)
if A.shape[2] == 3:
B = np.zeros(tuple([*A.shape[0:2], 4]), np.uint8)
B[:, :, 0:3] = A
B[:, :, 3] = 255
A = B
self._is_grayscale = False
vl = self.axes.viewLim
l, b, r, t = self.axes.bbox.extents
width = (round(r) + 0.5) - (round(l) - 0.5)
height = (round(t) + 0.5) - (round(b) - 0.5)
width *= magnification
height *= magnification
im = _image.pcolor(self._Ax, self._Ay, A,
int(height), int(width),
(vl.x0, vl.x1, vl.y0, vl.y1),
_interpd_[self._interpolation])
return im, l, b, IdentityTransform()
def set_data(self, x, y, A):
"""
Set the grid for the pixel centers, and the pixel values.
Parameters
----------
x, y : 1D array-like
Monotonic arrays of shapes (N,) and (M,), respectively, specifying
pixel centers.
A : array-like
(M, N) ndarray or masked array of values to be colormapped, or
(M, N, 3) RGB array, or (M, N, 4) RGBA array.
"""
x = np.array(x, np.float32)
y = np.array(y, np.float32)
A = cbook.safe_masked_invalid(A, copy=True)
if not (x.ndim == y.ndim == 1 and A.shape[0:2] == y.shape + x.shape):
raise TypeError("Axes don't match array shape")
if A.ndim not in [2, 3]:
raise TypeError("Can only plot 2D or 3D data")
if A.ndim == 3 and A.shape[2] not in [1, 3, 4]:
raise TypeError("3D arrays must have three (RGB) "
"or four (RGBA) color components")
if A.ndim == 3 and A.shape[2] == 1:
A = A.squeeze(axis=-1)
self._A = A
self._Ax = x
self._Ay = y
self._imcache = None
self.stale = True
def set_array(self, *args):
raise NotImplementedError('Method not supported')
def set_interpolation(self, s):
"""
Parameters
----------
s : {'nearest', 'bilinear'} or None
If None, use :rc:`image.interpolation`.
"""
if s is not None and s not in ('nearest', 'bilinear'):
raise NotImplementedError('Only nearest neighbor and '
'bilinear interpolations are supported')
super().set_interpolation(s)
def get_extent(self):
if self._A is None:
raise RuntimeError('Must set data first')
return self._Ax[0], self._Ax[-1], self._Ay[0], self._Ay[-1]
def set_filternorm(self, s):
pass
def set_filterrad(self, s):
pass
def set_norm(self, norm):
if self._A is not None:
raise RuntimeError('Cannot change colors after loading data')
super().set_norm(norm)
def set_cmap(self, cmap):
if self._A is not None:
raise RuntimeError('Cannot change colors after loading data')
super().set_cmap(cmap)
class PcolorImage(AxesImage):
"""
Make a pcolor-style plot with an irregular rectangular grid.
This uses a variation of the original irregular image code,
and it is used by pcolorfast for the corresponding grid type.
"""
def __init__(self, ax,
x=None,
y=None,
A=None,
cmap=None,
norm=None,
**kwargs
):
"""
Parameters
----------
ax : `~.axes.Axes`
The axes the image will belong to.
x, y : 1D array-like, optional
Monotonic arrays of length N+1 and M+1, respectively, specifying
rectangle boundaries. If not given, will default to
``range(N + 1)`` and ``range(M + 1)``, respectively.
A : array-like
The data to be color-coded. The interpretation depends on the
shape:
- (M, N) ndarray or masked array: values to be colormapped
- (M, N, 3): RGB array
- (M, N, 4): RGBA array
cmap : str or `~matplotlib.colors.Colormap`, default: :rc:`image.cmap`
The Colormap instance or registered colormap name used to map
scalar data to colors.
norm : `~matplotlib.colors.Normalize`
Maps luminance to 0-1.
**kwargs : `.Artist` properties
"""
super().__init__(ax, norm=norm, cmap=cmap)
self.update(kwargs)
if A is not None:
self.set_data(x, y, A)
is_grayscale = _api.deprecate_privatize_attribute("3.3")
def make_image(self, renderer, magnification=1.0, unsampled=False):
# docstring inherited
if self._A is None:
raise RuntimeError('You must first set the image array')
if unsampled:
raise ValueError('unsampled not supported on PColorImage')
fc = self.axes.patch.get_facecolor()
bg = mcolors.to_rgba(fc, 0)
bg = (np.array(bg)*255).astype(np.uint8)
l, b, r, t = self.axes.bbox.extents
width = (round(r) + 0.5) - (round(l) - 0.5)
height = (round(t) + 0.5) - (round(b) - 0.5)
width = int(round(width * magnification))
height = int(round(height * magnification))
if self._rgbacache is None:
A = self.to_rgba(self._A, bytes=True)
self._rgbacache = A
if self._A.ndim == 2:
self._is_grayscale = self.cmap.is_gray()
else:
A = self._rgbacache
vl = self.axes.viewLim
im = _image.pcolor2(self._Ax, self._Ay, A,
height,
width,
(vl.x0, vl.x1, vl.y0, vl.y1),
bg)
return im, l, b, IdentityTransform()
def _check_unsampled_image(self):
return False
def set_data(self, x, y, A):
"""
Set the grid for the rectangle boundaries, and the data values.
Parameters
----------
x, y : 1D array-like, optional
Monotonic arrays of length N+1 and M+1, respectively, specifying
rectangle boundaries. If not given, will default to
``range(N + 1)`` and ``range(M + 1)``, respectively.
A : array-like
The data to be color-coded. The interpretation depends on the
shape:
- (M, N) ndarray or masked array: values to be colormapped
- (M, N, 3): RGB array
- (M, N, 4): RGBA array
"""
A = cbook.safe_masked_invalid(A, copy=True)
if x is None:
x = np.arange(0, A.shape[1]+1, dtype=np.float64)
else:
x = np.array(x, np.float64).ravel()
if y is None:
y = np.arange(0, A.shape[0]+1, dtype=np.float64)
else:
y = np.array(y, np.float64).ravel()
if A.shape[:2] != (y.size-1, x.size-1):
raise ValueError(
"Axes don't match array shape. Got %s, expected %s." %
(A.shape[:2], (y.size - 1, x.size - 1)))
if A.ndim not in [2, 3]:
raise ValueError("A must be 2D or 3D")
if A.ndim == 3 and A.shape[2] == 1:
A = A.squeeze(axis=-1)
self._is_grayscale = False
if A.ndim == 3:
if A.shape[2] in [3, 4]:
if ((A[:, :, 0] == A[:, :, 1]).all() and
(A[:, :, 0] == A[:, :, 2]).all()):
self._is_grayscale = True
else:
raise ValueError("3D arrays must have RGB or RGBA as last dim")
# For efficient cursor readout, ensure x and y are increasing.
if x[-1] < x[0]:
x = x[::-1]
A = A[:, ::-1]
if y[-1] < y[0]:
y = y[::-1]
A = A[::-1]
self._A = A
self._Ax = x
self._Ay = y
self._rgbacache = None
self.stale = True
def set_array(self, *args):
raise NotImplementedError('Method not supported')
def get_cursor_data(self, event):
# docstring inherited
x, y = event.xdata, event.ydata
if (x < self._Ax[0] or x > self._Ax[-1] or
y < self._Ay[0] or y > self._Ay[-1]):
return None
j = np.searchsorted(self._Ax, x) - 1
i = np.searchsorted(self._Ay, y) - 1
try:
return self._A[i, j]
except IndexError:
return None
class FigureImage(_ImageBase):
"""An image attached to a figure."""
zorder = 0
_interpolation = 'nearest'
def __init__(self, fig,
cmap=None,
norm=None,
offsetx=0,
offsety=0,
origin=None,
**kwargs
):
"""
cmap is a colors.Colormap instance
norm is a colors.Normalize instance to map luminance to 0-1
kwargs are an optional list of Artist keyword args
"""
super().__init__(
None,
norm=norm,
cmap=cmap,
origin=origin
)
self.figure = fig
self.ox = offsetx
self.oy = offsety
self.update(kwargs)
self.magnification = 1.0
def get_extent(self):
"""Return the image extent as tuple (left, right, bottom, top)."""
numrows, numcols = self.get_size()
return (-0.5 + self.ox, numcols-0.5 + self.ox,
-0.5 + self.oy, numrows-0.5 + self.oy)
def make_image(self, renderer, magnification=1.0, unsampled=False):
# docstring inherited
fac = renderer.dpi/self.figure.dpi
# fac here is to account for pdf, eps, svg backends where
# figure.dpi is set to 72. This means we need to scale the
# image (using magnification) and offset it appropriately.
bbox = Bbox([[self.ox/fac, self.oy/fac],
[(self.ox/fac + self._A.shape[1]),
(self.oy/fac + self._A.shape[0])]])
width, height = self.figure.get_size_inches()
width *= renderer.dpi
height *= renderer.dpi
clip = Bbox([[0, 0], [width, height]])
return self._make_image(
self._A, bbox, bbox, clip, magnification=magnification / fac,
unsampled=unsampled, round_to_pixel_border=False)
def set_data(self, A):
"""Set the image array."""
cm.ScalarMappable.set_array(self,
cbook.safe_masked_invalid(A, copy=True))
self.stale = True
class BboxImage(_ImageBase):
"""The Image class whose size is determined by the given bbox."""
def __init__(self, bbox,
cmap=None,
norm=None,
interpolation=None,
origin=None,
filternorm=True,
filterrad=4.0,
resample=False,
**kwargs
):
"""
cmap is a colors.Colormap instance
norm is a colors.Normalize instance to map luminance to 0-1
kwargs are an optional list of Artist keyword args
"""
super().__init__(
None,
cmap=cmap,
norm=norm,
interpolation=interpolation,
origin=origin,
filternorm=filternorm,
filterrad=filterrad,
resample=resample,
**kwargs
)
self.bbox = bbox
def get_window_extent(self, renderer=None):
if renderer is None:
renderer = self.get_figure()._cachedRenderer
if isinstance(self.bbox, BboxBase):
return self.bbox
elif callable(self.bbox):
return self.bbox(renderer)
else:
raise ValueError("Unknown type of bbox")
def contains(self, mouseevent):
"""Test whether the mouse event occurred within the image."""
inside, info = self._default_contains(mouseevent)
if inside is not None:
return inside, info
if not self.get_visible(): # or self.get_figure()._renderer is None:
return False, {}
x, y = mouseevent.x, mouseevent.y
inside = self.get_window_extent().contains(x, y)
return inside, {}
def make_image(self, renderer, magnification=1.0, unsampled=False):
# docstring inherited
width, height = renderer.get_canvas_width_height()
bbox_in = self.get_window_extent(renderer).frozen()
bbox_in._points /= [width, height]
bbox_out = self.get_window_extent(renderer)
clip = Bbox([[0, 0], [width, height]])
self._transform = BboxTransformTo(clip)
return self._make_image(
self._A,
bbox_in, bbox_out, clip, magnification, unsampled=unsampled)
def imread(fname, format=None):
"""
Read an image from a file into an array.
Parameters
----------
fname : str or file-like
The image file to read: a filename, a URL or a file-like object opened
in read-binary mode.
Passing a URL is deprecated. Please open the URL
for reading and pass the result to Pillow, e.g. with
``PIL.Image.open(urllib.request.urlopen(url))``.
format : str, optional
The image file format assumed for reading the data. If not
given, the format is deduced from the filename. If nothing can
be deduced, PNG is tried.
Returns
-------
`numpy.array`
The image data. The returned array has shape
- (M, N) for grayscale images.
- (M, N, 3) for RGB images.
- (M, N, 4) for RGBA images.
"""
# hide imports to speed initial import on systems with slow linkers
from urllib import parse
if format is None:
if isinstance(fname, str):
parsed = parse.urlparse(fname)
# If the string is a URL (Windows paths appear as if they have a
# length-1 scheme), assume png.
if len(parsed.scheme) > 1:
ext = 'png'
else:
ext = Path(fname).suffix.lower()[1:]
elif hasattr(fname, 'geturl'): # Returned by urlopen().
# We could try to parse the url's path and use the extension, but
# returning png is consistent with the block above. Note that this
# if clause has to come before checking for fname.name as
# urlopen("file:///...") also has a name attribute (with the fixed
# value "<urllib response>").
ext = 'png'
elif hasattr(fname, 'name'):
ext = Path(fname.name).suffix.lower()[1:]
else:
ext = 'png'
else:
ext = format
img_open = (
PIL.PngImagePlugin.PngImageFile if ext == 'png' else PIL.Image.open)
if isinstance(fname, str):
parsed = parse.urlparse(fname)
if len(parsed.scheme) > 1: # Pillow doesn't handle URLs directly.
_api.warn_deprecated(
"3.4", message="Directly reading images from URLs is "
"deprecated since %(since)s and will no longer be supported "
"%(removal)s. Please open the URL for reading and pass the "
"result to Pillow, e.g. with "
"``PIL.Image.open(urllib.request.urlopen(url))``.")
# hide imports to speed initial import on systems with slow linkers
from urllib import request
ssl_ctx = mpl._get_ssl_context()
if ssl_ctx is None:
_log.debug(
"Could not get certifi ssl context, https may not work."
)
with request.urlopen(fname, context=ssl_ctx) as response:
import io
try:
response.seek(0)
except (AttributeError, io.UnsupportedOperation):
response = io.BytesIO(response.read())
return imread(response, format=ext)
with img_open(fname) as image:
return (_pil_png_to_float_array(image)
if isinstance(image, PIL.PngImagePlugin.PngImageFile) else
pil_to_array(image))
def imsave(fname, arr, vmin=None, vmax=None, cmap=None, format=None,
origin=None, dpi=100, *, metadata=None, pil_kwargs=None):
"""
Save an array as an image file.
Parameters
----------
fname : str or path-like or file-like
A path or a file-like object to store the image in.
If *format* is not set, then the output format is inferred from the
extension of *fname*, if any, and from :rc:`savefig.format` otherwise.
If *format* is set, it determines the output format.
arr : array-like
The image data. The shape can be one of
MxN (luminance), MxNx3 (RGB) or MxNx4 (RGBA).
vmin, vmax : float, optional
*vmin* and *vmax* set the color scaling for the image by fixing the
values that map to the colormap color limits. If either *vmin*
or *vmax* is None, that limit is determined from the *arr*
min/max value.
cmap : str or `~matplotlib.colors.Colormap`, default: :rc:`image.cmap`
A Colormap instance or registered colormap name. The colormap
maps scalar data to colors. It is ignored for RGB(A) data.
format : str, optional
The file format, e.g. 'png', 'pdf', 'svg', ... The behavior when this
is unset is documented under *fname*.
origin : {'upper', 'lower'}, default: :rc:`image.origin`
Indicates whether the ``(0, 0)`` index of the array is in the upper
left or lower left corner of the axes.
dpi : float
The DPI to store in the metadata of the file. This does not affect the
resolution of the output image. Depending on file format, this may be
rounded to the nearest integer.
metadata : dict, optional
Metadata in the image file. The supported keys depend on the output
format, see the documentation of the respective backends for more
information.
pil_kwargs : dict, optional
Keyword arguments passed to `PIL.Image.Image.save`. If the 'pnginfo'
key is present, it completely overrides *metadata*, including the
default 'Software' key.
"""
from matplotlib.figure import Figure
if isinstance(fname, os.PathLike):
fname = os.fspath(fname)
if format is None:
format = (Path(fname).suffix[1:] if isinstance(fname, str)
else mpl.rcParams["savefig.format"]).lower()
if format in ["pdf", "ps", "eps", "svg"]:
# Vector formats that are not handled by PIL.
if pil_kwargs is not None:
raise ValueError(
f"Cannot use 'pil_kwargs' when saving to {format}")
fig = Figure(dpi=dpi, frameon=False)
fig.figimage(arr, cmap=cmap, vmin=vmin, vmax=vmax, origin=origin,
resize=True)
fig.savefig(fname, dpi=dpi, format=format, transparent=True,
metadata=metadata)
else:
# Don't bother creating an image; this avoids rounding errors on the
# size when dividing and then multiplying by dpi.
sm = cm.ScalarMappable(cmap=cmap)
sm.set_clim(vmin, vmax)
if origin is None:
origin = mpl.rcParams["image.origin"]
if origin == "lower":
arr = arr[::-1]
if (isinstance(arr, memoryview) and arr.format == "B"
and arr.ndim == 3 and arr.shape[-1] == 4):
# Such an ``arr`` would also be handled fine by sm.to_rgba (after
# casting with asarray), but it is useful to special-case it
# because that's what backend_agg passes, and can be in fact used
# as is, saving a few operations.
rgba = arr
else:
rgba = sm.to_rgba(arr, bytes=True)
if pil_kwargs is None:
pil_kwargs = {}
pil_shape = (rgba.shape[1], rgba.shape[0])
image = PIL.Image.frombuffer(
"RGBA", pil_shape, rgba, "raw", "RGBA", 0, 1)
if format == "png":
# Only use the metadata kwarg if pnginfo is not set, because the
# semantics of duplicate keys in pnginfo is unclear.
if "pnginfo" in pil_kwargs:
if metadata:
_api.warn_external("'metadata' is overridden by the "
"'pnginfo' entry in 'pil_kwargs'.")
else:
metadata = {
"Software": (f"Matplotlib version{mpl.__version__}, "
f"https://matplotlib.org/"),
**(metadata if metadata is not None else {}),
}
pil_kwargs["pnginfo"] = pnginfo = PIL.PngImagePlugin.PngInfo()
for k, v in metadata.items():
if v is not None:
pnginfo.add_text(k, v)
if format in ["jpg", "jpeg"]:
format = "jpeg" # Pillow doesn't recognize "jpg".
facecolor = mpl.rcParams["savefig.facecolor"]
if cbook._str_equal(facecolor, "auto"):
facecolor = mpl.rcParams["figure.facecolor"]
color = tuple(int(x * 255) for x in mcolors.to_rgb(facecolor))
background = PIL.Image.new("RGB", pil_shape, color)
background.paste(image, image)
image = background
pil_kwargs.setdefault("format", format)
pil_kwargs.setdefault("dpi", (dpi, dpi))
image.save(fname, **pil_kwargs)
def pil_to_array(pilImage):
"""
Load a `PIL image`_ and return it as a numpy int array.
.. _PIL image: https://pillow.readthedocs.io/en/latest/reference/Image.html
Returns
-------
numpy.array
The array shape depends on the image type:
- (M, N) for grayscale images.
- (M, N, 3) for RGB images.
- (M, N, 4) for RGBA images.
"""
if pilImage.mode in ['RGBA', 'RGBX', 'RGB', 'L']:
# return MxNx4 RGBA, MxNx3 RBA, or MxN luminance array
return np.asarray(pilImage)
elif pilImage.mode.startswith('I;16'):
# return MxN luminance array of uint16
raw = pilImage.tobytes('raw', pilImage.mode)
if pilImage.mode.endswith('B'):
x = np.frombuffer(raw, '>u2')
else:
x = np.frombuffer(raw, '<u2')
return x.reshape(pilImage.size[::-1]).astype('=u2')
else: # try to convert to an rgba image
try:
pilImage = pilImage.convert('RGBA')
except ValueError as err:
raise RuntimeError('Unknown image mode') from err
return np.asarray(pilImage) # return MxNx4 RGBA array
def _pil_png_to_float_array(pil_png):
"""Convert a PIL `PNGImageFile` to a 0-1 float array."""
# Unlike pil_to_array this converts to 0-1 float32s for backcompat with the
# old libpng-based loader.
# The supported rawmodes are from PIL.PngImagePlugin._MODES. When
# mode == "RGB(A)", the 16-bit raw data has already been coarsened to 8-bit
# by Pillow.
mode = pil_png.mode
rawmode = pil_png.png.im_rawmode
if rawmode == "1": # Grayscale.
return np.asarray(pil_png, np.float32)
if rawmode == "L;2": # Grayscale.
return np.divide(pil_png, 2**2 - 1, dtype=np.float32)
if rawmode == "L;4": # Grayscale.
return np.divide(pil_png, 2**4 - 1, dtype=np.float32)
if rawmode == "L": # Grayscale.
return np.divide(pil_png, 2**8 - 1, dtype=np.float32)
if rawmode == "I;16B": # Grayscale.
return np.divide(pil_png, 2**16 - 1, dtype=np.float32)
if mode == "RGB": # RGB.
return np.divide(pil_png, 2**8 - 1, dtype=np.float32)
if mode == "P": # Palette.
return np.divide(pil_png.convert("RGBA"), 2**8 - 1, dtype=np.float32)
if mode == "LA": # Grayscale + alpha.
return np.divide(pil_png.convert("RGBA"), 2**8 - 1, dtype=np.float32)
if mode == "RGBA": # RGBA.
return np.divide(pil_png, 2**8 - 1, dtype=np.float32)
raise ValueError(f"Unknown PIL rawmode: {rawmode}")
def thumbnail(infile, thumbfile, scale=0.1, interpolation='bilinear',
preview=False):
"""
Make a thumbnail of image in *infile* with output filename *thumbfile*.
See :doc:`/gallery/misc/image_thumbnail_sgskip`.
Parameters
----------
infile : str or file-like
The image file. Matplotlib relies on Pillow_ for image reading, and
thus supports a wide range of file formats, including PNG, JPG, TIFF
and others.
.. _Pillow: https://python-pillow.org/
thumbfile : str or file-like
The thumbnail filename.
scale : float, default: 0.1
The scale factor for the thumbnail.
interpolation : str, default: 'bilinear'
The interpolation scheme used in the resampling. See the
*interpolation* parameter of `~.Axes.imshow` for possible values.
preview : bool, default: False
If True, the default backend (presumably a user interface
backend) will be used which will cause a figure to be raised if
`~matplotlib.pyplot.show` is called. If it is False, the figure is
created using `.FigureCanvasBase` and the drawing backend is selected
as `.Figure.savefig` would normally do.
Returns
-------
`~.figure.Figure`
The figure instance containing the thumbnail.
"""
im = imread(infile)
rows, cols, depth = im.shape
# This doesn't really matter (it cancels in the end) but the API needs it.
dpi = 100
height = rows / dpi * scale
width = cols / dpi * scale
if preview:
# Let the UI backend do everything.
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(width, height), dpi=dpi)
else:
from matplotlib.figure import Figure
fig = Figure(figsize=(width, height), dpi=dpi)
FigureCanvasBase(fig)
ax = fig.add_axes([0, 0, 1, 1], aspect='auto',
frameon=False, xticks=[], yticks=[])
ax.imshow(im, aspect='auto', resample=True, interpolation=interpolation)
fig.savefig(thumbfile, dpi=dpi)
return fig