How to save an animated GIF to a variable using Pillow - variables

I found out from here that I can create and save animated GIFs using Pillow. However, it doesn't look like the save method returns any value.
I can save the GIF to a file and then open that file using Image.open, but that seems unnecessary, given that I don't really want the GIF to be saved.
How can I save the GIF to a variable, rather than a file?
That is, I would like to be able to do some_variable.show() and display a GIF, without ever having to save the GIF onto my computer.

To avoid writing any files, you can just save your image to BytesIO object. For example:
#!/usr/bin/env python
from __future__ import division
from PIL import Image
from PIL import ImageDraw
from io import BytesIO
N = 25 # number of frames
# Create individual frames
frames = []
for n in range(N):
frame = Image.new("RGB", (200, 150), (25, 25, 255*(N-n)//N))
draw = ImageDraw.Draw(frame)
x, y = frame.size[0]*n/N, frame.size[1]*n/N
draw.ellipse((x, y, x+40, y+40), 'yellow')
# Saving/opening is needed for better compression and quality
fobj = BytesIO()
frame.save(fobj, 'GIF')
frame = Image.open(fobj)
frames.append(frame)
# Save the frames as animated GIF to BytesIO
animated_gif = BytesIO()
frames[0].save(animated_gif,
format='GIF',
save_all=True,
append_images=frames[1:], # Pillow >= 3.4.0
delay=0.1,
loop=0)
animated_gif.seek(0,2)
print ('GIF image size = ', animated_gif.tell())
# Optional: display image
#animated_gif.seek(0)
#ani = Image.open(animated_gif)
#ani.show()
# Optional: write contents to file
animated_gif.seek(0)
open('animated.gif', 'wb').write(animated_gif.read())
In the end, variable animated_gif contains contents of the following image:
However, displaying an animated GIF in Python is not very reliable. ani.show() from the code above displays only first frame on my machine.

Related

How to get intel realsense D435i camera serial numbers from frames for multiple cameras?

I have initialized one pipeline for two cameras and I am getting color and depth images from the same.
The problem is that I cannot find camera serial numbers for corresponding frames to determine which camera captured the frames.
Below is my code:
import pyrealsense2 as rs
import numpy as np
import cv2
import logging
import time
# Configure depth and color streams...
pipeline_1 = rs.pipeline()
config_1 = rs.config()
config_1.enable_device('938422072752')
config_1.enable_device('902512070386')
config_1.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
config_1.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)
# Start streaming from both cameras
pipeline_1.start(config_1)
try:
while True:
# Camera 1
# Wait for a coherent pair of frames: depth and color
frames_1 = pipeline_1.wait_for_frames()
depth_frame_1 = frames_1.get_depth_frame()
color_frame_1 = frames_1.get_color_frame()
if not depth_frame_1 or not color_frame_1:
continue
# Convert images to numpy arrays
depth_image_1 = np.asanyarray(depth_frame_1.get_data())
color_image_1 = np.asanyarray(color_frame_1.get_data())
# Apply colormap on depth image (image must be converted to 8-bit per pixel first)
depth_colormap_1 = cv2.applyColorMap(cv2.convertScaleAbs(depth_image_1, alpha=0.5), cv2.COLORMAP_JET)
# Camera 2
# Wait for a coherent pair of frames: depth and color
frames_2 = pipeline_1.wait_for_frames()
depth_frame_2 = frames_2.get_depth_frame()
color_frame_2 = frames_2.get_color_frame()
if not depth_frame_2 or not color_frame_2:
continue
# Convert images to numpy arrays
depth_image_2 = np.asanyarray(depth_frame_2.get_data())
color_image_2 = np.asanyarray(color_frame_2.get_data())
# Apply colormap on depth image (image must be converted to 8-bit per pixel first)
depth_colormap_2 = cv2.applyColorMap(cv2.convertScaleAbs(depth_image_2, alpha=0.5), cv2.COLORMAP_JET)
# Stack all images horizontally
images = np.hstack((color_image_1, depth_colormap_1,color_image_2, depth_colormap_2))
# Show images from both cameras
cv2.namedWindow('RealSense', cv2.WINDOW_NORMAL)
cv2.imshow('RealSense', images)
cv2.waitKey(20)
finally:
pipeline_1.stop()
How can I find camera serial numbers after wait_for_frames() to determine which camera captured depth and color image.
I adopted your code, combined it with the C++ example posted by nayab to compose the following code that grabs the color image (only) of multiple RealSense cameras and stacks them horizontally:
import pyrealsense2 as rs
import numpy as np
import cv2
import logging
import time
realsense_ctx = rs.context() # The context encapsulates all of the devices and sensors, and provides some additional functionalities.
connected_devices = []
# get serial numbers of connected devices:
for i in range(len(realsense_ctx.devices)):
detected_camera = realsense_ctx.devices[i].get_info(
rs.camera_info.serial_number)
connected_devices.append(detected_camera)
pipelines = []
configs = []
for i in range(len(realsense_ctx.devices)):
pipelines.append(rs.pipeline()) # one pipeline for each device
configs.append(rs.config()) # one config for each device
configs[i].enable_device(connected_devices[i])
configs[i].enable_stream(rs.stream.color, 1920, 1080, rs.format.bgr8, 30)
pipelines[i].start(configs[i])
try:
while True:
images = []
for i in range(len(pipelines)):
print("waiting for frame at cam", i)
frames = pipelines[i].wait_for_frames()
color_frame = frames.get_color_frame()
images.append(np.asanyarray(color_frame.get_data()))
# Stack all images horizontally
image_composite = images[0]
for i in range(1, len(images)):
images_composite = np.hstack((image_composite, images[i]))
# Show images from both cameras
cv2.namedWindow('RealSense', cv2.WINDOW_NORMAL)
cv2.imshow('RealSense', images_composite)
cv2.waitKey(20)
finally:
for i in range(len(pipelines)):
pipelines[i].stop()
This will look for the connected devices and find the serial numbers.
They are saved in a list and you can use them to start the available cameras.
# Configure depth and color streams...
realsense_ctx = rs.context()
connected_devices = []
for i in range(len(realsense_ctx.devices)):
detected_camera = ealsense_ctx.devices[i].get_info(rs.camera_info.serial_number)
connected_devices.append(detected_camera)

Is there a way to not display the .PNG image generated with Matplotlib FuncAnimation?

I have a script which uses the FuncAnimation routines in a loop to generate a lot of different animations which are saved in various directories. I'm working in Spyder, and every time an animation is generated, a .PNG image is displayed in the console.
Is there any way to NOT display the .PNG file with every animation? I'd like to turn off this image, since they will fill up my console in a long loop. When generating simple plots, it's easy to not display the image by just not calling plt.show. For animations, plt.show is called isn't called at all and the image still displays.
You can see that a .PNG image generated in the basic example:
import numpy as np
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import matplotlib.animation as animation
def update_line(num, data, line):
line.set_data(data[..., :num])
return line,
# Fixing random state for reproducibility
np.random.seed(19680801)
# Set up formatting for the movie files
Writer = animation.writers['ffmpeg']
writer = Writer(fps=15, metadata=dict(artist='Me'), bitrate=1800)
fig = plt.figure()
data = np.random.rand(2, 25)
l, = plt.plot([], [], 'r-')
plt.xlim(0, 1)
plt.ylim(0, 1)
line_ani = animation.FuncAnimation(fig, update_line, 25, fargs=(data, l),
interval=50, blit=True)
line_ani.save('lines.mp4', writer=writer)
The .PNG image associated with the animation can be effectively hidden by calling plt.close(fig) after the animation is saved. This answer was inspired by a response by Demis to a similar question asking about simple plots instead of animations.

how to save figure in vis_bbox without white background, when plotting with matplotlib?

i'm trying to save the image after vis_bbox prediction with its original image dimension.
my code:
from PIL import Image, ImageChops
import cv2
img = utils.read_image('/home/ubuntu/ui.jpg', color=True)
bboxes, labels,scores = model.predict([img])
bbox, label, score = bboxes[0], labels[0], scores[0],
colors = voc_colormap(label + 1)
bccd_labels = ('cell', 'cell')
vis_bbox(img, bbox, label_names=bccd_labels, instance_colors=colors, alpha=0.9, linewidth=1.0)
plt.axis("off")
plt.savefig("/home/ubuntu/ins.jpg")
while saving , it saves the image with white background and default size (432 *288).
i need to save the predicted image from vis_bbox with the original dimension (1300 *1300).
Any suggestions would be helpful!

Barcode decoding with pyzbar on raspberry pi

I am using pyzbar to decode barcodes on Raspberry Pi 3 using Pi Camera v1 (resolution 1296x972). Qr codes are decoded very well. When decoding two dimensional barcodes (CODABAR), the success rate is very low.
I have tried saving one frame from the video stream and decode it with pyzbar on the Raspberry and it fails. When i try to decode the same image on Ubuntu, and decodes it successfully.
from pyzbar import pyzbar
from PIL import Image
img = Image.open('sampleImage.png')
d = pyzbar.decode(img)
print (d)
Any thoughts what may be the problem?
UPDATE:
The following image is my specific use case.
Because I am using Pi Camera v1 to take images, I tried to do adjustment to image sharpness:
from picamera import PiCamera
self.camera = PiCamera()
self.camera.sharpness = 100
The following image is with sharpness 100. However, pyzbar still fails to decode it on the Raspberry Pi.
You need to remove the black border from your image. According to this answer,
you can simply crop your image then feed the image to pyzbar.decode() function.
import cv2
from pyzbar import pyzbar
import numpy as np
def autocrop(image, threshold=0):
"""Crops any edges below or equal to threshold
Crops blank image to 1x1.
Returns cropped image.
"""
if len(image.shape) == 3:
flatImage = np.max(image, 2)
else:
flatImage = image
assert len(flatImage.shape) == 2
rows = np.where(np.max(flatImage, 0) > threshold)[0]
if rows.size:
cols = np.where(np.max(flatImage, 1) > threshold)[0]
image = image[cols[0]: cols[-1] + 1, rows[0]: rows[-1] + 1]
else:
image = image[:1, :1]
return image
if __name__ == "__main__":
image = cv2.imread('sampleImage.png')
crop = autocrop(image, 165)
d = pyzbar.decode(crop)
print(d)

How to Urlretrieve and crop image based on data from CSV file?

I have a CSV file with url's and box coordinates (x coordinate of the top left corner, y coordinate of the top left corner, x coordinate of the bottom right corner and y coordinate of the bottom right corner) and I would like to acquire the image, crop it based on the coordinates (to 256x256) and then save the image. Unfortunately a solution to download the whole database and then create a separate with cropped images is difficult due to the size of the database. That for, it is necessary to create the image database with cropped images from the beginning. Another way is to save the image and then subsequently crop it and rewrite the initial image (and then i += 1 iterate to the next one).
Would the current approach work or should I use a different method for it? Additonally, how would I save the acquired images to a specified folder, as currently it downloads to the same folder as the script.
import urllib.request
import csv
import numpy as np
import pandas as pd
from io import BytesIO
import requests
from PIL import Image
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
filename = "images"
# open file to read
with open("data_test.csv".format(filename), 'r') as csvfile:
reader = csv.reader(csvfile)
# pop header row (1st row in csv)
header = next(reader)
# iterate on all lines
i = 0
for line in csvfile:
splitted_line = line.split(',')
# check if we have an image URL
if splitted_line[1] != '' and splitted_line[1] != "\n":
response = requests.get(splitted_line[1])
img = Image.open(BytesIO(response.content))
#crop_img = img[splitted_line[2]:splitted_line[3], splitted_line[4]:splitted_line[5]]
#crop_img = img[315:105, 370:173]
img.save(str(i) + ".png")
#crop_img = img[105:105+173,315:315+370]
#[y: y + h, x: x + w]
new_img = img.resize((256, 256))
new_img.save(str(i) + ".png")
imgplot = plt.imshow(img)
plt.show()
# urllib.request.urlopen(splitted_line[1])
print("Image saved for {0}".format(splitted_line[0]))
# img = cv2.imread(img_path, 0)
i += 1
else:
print("No result for {0}".format(splitted_line[0]))
Any further recommendations are welcome.
Edit: The latest version gives me error :
crop_img = img[105:105+173,315:315+370]
TypeError: 'JpegImageFile' object is not subscriptable
I solved the problem using Bytes.IO and some cropping/resizing techniques.
import csv
from io import BytesIO
import requests
from PIL import Image
import matplotlib.pyplot as plt
filename = "images"
# open file to read
with open("data_test.csv".format(filename), 'r') as csvfile:
reader = csv.reader(csvfile)
# pop header row (1st row in csv)
header = next(reader)
# iterate on all lines
i = 0
for line in csvfile:
splitted_line = line.split(',')
# check if we have an image URL
if splitted_line[1] != '' and splitted_line[1] != "\n":
response = requests.get(splitted_line[1])
img = Image.open(BytesIO(response.content))
#im.crop(box) ⇒ 4-tuple defining the left, upper, right, and lower pixel coordinate
left_x = int(splitted_line[2])
top_y = int(splitted_line[3])
right_x = int(splitted_line[4])
bottom_y = int(splitted_line[5])
crop = img.crop((left_x, top_y, right_x, bottom_y))
new_img = crop.resize((256, 256))
"""
# preview new images
imgplot = plt.imshow(new_img)
plt.show()
"""
new_img.save(str(i) + ".png")
print("Image saved for {0}".format(splitted_line[0]))
i += 1
else:
print("No result for {0}".format(splitted_line[0]))
Hope it will help someone. Any optimization recommendations are still welcome.