How to get intel realsense D435i camera serial numbers from frames for multiple cameras? - realsense

I have initialized one pipeline for two cameras and I am getting color and depth images from the same.
The problem is that I cannot find camera serial numbers for corresponding frames to determine which camera captured the frames.
Below is my code:
import pyrealsense2 as rs
import numpy as np
import cv2
import logging
import time
# Configure depth and color streams...
pipeline_1 = rs.pipeline()
config_1 = rs.config()
config_1.enable_device('938422072752')
config_1.enable_device('902512070386')
config_1.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
config_1.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)
# Start streaming from both cameras
pipeline_1.start(config_1)
try:
while True:
# Camera 1
# Wait for a coherent pair of frames: depth and color
frames_1 = pipeline_1.wait_for_frames()
depth_frame_1 = frames_1.get_depth_frame()
color_frame_1 = frames_1.get_color_frame()
if not depth_frame_1 or not color_frame_1:
continue
# Convert images to numpy arrays
depth_image_1 = np.asanyarray(depth_frame_1.get_data())
color_image_1 = np.asanyarray(color_frame_1.get_data())
# Apply colormap on depth image (image must be converted to 8-bit per pixel first)
depth_colormap_1 = cv2.applyColorMap(cv2.convertScaleAbs(depth_image_1, alpha=0.5), cv2.COLORMAP_JET)
# Camera 2
# Wait for a coherent pair of frames: depth and color
frames_2 = pipeline_1.wait_for_frames()
depth_frame_2 = frames_2.get_depth_frame()
color_frame_2 = frames_2.get_color_frame()
if not depth_frame_2 or not color_frame_2:
continue
# Convert images to numpy arrays
depth_image_2 = np.asanyarray(depth_frame_2.get_data())
color_image_2 = np.asanyarray(color_frame_2.get_data())
# Apply colormap on depth image (image must be converted to 8-bit per pixel first)
depth_colormap_2 = cv2.applyColorMap(cv2.convertScaleAbs(depth_image_2, alpha=0.5), cv2.COLORMAP_JET)
# Stack all images horizontally
images = np.hstack((color_image_1, depth_colormap_1,color_image_2, depth_colormap_2))
# Show images from both cameras
cv2.namedWindow('RealSense', cv2.WINDOW_NORMAL)
cv2.imshow('RealSense', images)
cv2.waitKey(20)
finally:
pipeline_1.stop()
How can I find camera serial numbers after wait_for_frames() to determine which camera captured depth and color image.

I adopted your code, combined it with the C++ example posted by nayab to compose the following code that grabs the color image (only) of multiple RealSense cameras and stacks them horizontally:
import pyrealsense2 as rs
import numpy as np
import cv2
import logging
import time
realsense_ctx = rs.context() # The context encapsulates all of the devices and sensors, and provides some additional functionalities.
connected_devices = []
# get serial numbers of connected devices:
for i in range(len(realsense_ctx.devices)):
detected_camera = realsense_ctx.devices[i].get_info(
rs.camera_info.serial_number)
connected_devices.append(detected_camera)
pipelines = []
configs = []
for i in range(len(realsense_ctx.devices)):
pipelines.append(rs.pipeline()) # one pipeline for each device
configs.append(rs.config()) # one config for each device
configs[i].enable_device(connected_devices[i])
configs[i].enable_stream(rs.stream.color, 1920, 1080, rs.format.bgr8, 30)
pipelines[i].start(configs[i])
try:
while True:
images = []
for i in range(len(pipelines)):
print("waiting for frame at cam", i)
frames = pipelines[i].wait_for_frames()
color_frame = frames.get_color_frame()
images.append(np.asanyarray(color_frame.get_data()))
# Stack all images horizontally
image_composite = images[0]
for i in range(1, len(images)):
images_composite = np.hstack((image_composite, images[i]))
# Show images from both cameras
cv2.namedWindow('RealSense', cv2.WINDOW_NORMAL)
cv2.imshow('RealSense', images_composite)
cv2.waitKey(20)
finally:
for i in range(len(pipelines)):
pipelines[i].stop()

This will look for the connected devices and find the serial numbers.
They are saved in a list and you can use them to start the available cameras.
# Configure depth and color streams...
realsense_ctx = rs.context()
connected_devices = []
for i in range(len(realsense_ctx.devices)):
detected_camera = ealsense_ctx.devices[i].get_info(rs.camera_info.serial_number)
connected_devices.append(detected_camera)

Related

Matplotlib transparent background without save() function

I have this kind of an animation and I want to integrate it to my GUI.
here is the plot
But, the background color is set to black right now. Here is the code. I am using Windows 10 and for GUI I am mostly using PyQt6 but for the matplotlib I used mlp.use("TkAgg") because it didn't create output if I dont use TkAgg.
I want to make it transparent. I only want the curves. I searched on the internet but everything is about save() function. Isn't there another solution for this? I don't want to save it, I am using animation, therefore it should be transparent everytime, not in a image.
import queue
import sys
from matplotlib.animation import FuncAnimation
import PyQt6.QtCore
import matplotlib as mlp
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg
as FigureCanvas
mlp.use("TkAgg")
import matplotlib.pyplot as plt
import numpy as np
import sounddevice as sd
plt.rcParams['toolbar'] = 'None'
plt.rcParams.update({
"figure.facecolor": "black", # red with alpha = 30%
})
# Lets define audio variables
# We will use the default PC or Laptop mic to input the sound
device = 0 # id of the audio device by default
window = 1000 # window for the data
downsample = 1 # how much samples to drop
channels = [1] # a list of audio channels
interval = 40 # this is update interval in miliseconds for plot
# lets make a queue
q = queue.Queue()
# Please note that this sd.query_devices has an s in the end.
device_info = sd.query_devices(device, 'input')
samplerate = device_info['default_samplerate']
length = int(window*samplerate/(1000*downsample))
plotdata = np.zeros((length,len(channels)))
# next is to make fig and axis of matplotlib plt
fig,ax = plt.subplots(figsize=(2,1))
fig.subplots_adjust(0,0,1,1)
ax.axis("off")
fig.canvas.manager.window.overrideredirect(1)
# lets set the title
ax.set_title("On Action")
# Make a matplotlib.lines.Line2D plot item of color green
# R,G,B = 0,1,0.29
lines = ax.plot(plotdata,color = "purple")
# We will use an audio call back function to put the data in
queue
def audio_callback(indata,frames,time,status):
q.put(indata[::downsample,[0]])
# now we will use an another function
# It will take frame of audio samples from the queue and update
# to the lines
def update_plot(frame):
global plotdata
while True:
try:
data = q.get_nowait()
except queue.Empty:
break
shift = len(data)
plotdata = np.roll(plotdata, -shift,axis = 0)
# Elements that roll beyond the last position are
# re-introduced
plotdata[-shift:,:] = data
for column, line in enumerate(lines):
line.set_ydata(plotdata[:,column])
return lines
# Lets add the grid
ax.set_yticks([0])
# ax.yaxis.grid(True)
""" INPUT FROM MIC """
stream = sd.InputStream(device = device, channels = max(channels),
samplerate = samplerate, callback = audio_callback)
""" OUTPUT """
ani = FuncAnimation(fig,update_plot,interval=interval,blit=True, )
plt.get_current_fig_manager().window.wm_geometry("200x100+850+450")
with stream:
plt.show()

Camera calibration python

good evening I'm trying to calibrate a camera. I followed the code posted on the OpenCV website but as I tried to run it, for some reason the code runs through the images I have given it but when the runtime is finished it doesn't produce the calibration parameters. here's the following error message I get
error: (-215:Assertion failed) nimages > 0 in function 'cv::calibrateCameraRO'
#!/usr/bin/env python
import cv2 as cv
import numpy as np
import os
import glob
# Defining the dimensions of checkerboard
CHECKERBOARD = (10, 7)
size = (1376, 917)
criteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 30, 0.001)
# Defining the world coordinates for 3D points
objp = np.zeros((CHECKERBOARD[0] * CHECKERBOARD[1], 3), np.float32)
objp[:, :2] = np.mgrid[0:CHECKERBOARD[0], 0:CHECKERBOARD[1]].T.reshape(-1, 2)
#prev_img_shape = None
# Creating vector to store vectors of 3D points for each checkerboard image
objpoints = []
# Creating vector to store vectors of 2D points for each checkerboard image
imgpoints = []
# Extracting path of individual image stored in a given directory
images = glob.glob('*.jpeg')
for image in images:
print(image)
img = cv.imread(image)
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
# Find the chess board corners
# If desired number of corners are found in the image then ret = true
ret, corners = cv.findChessboardCorners(gray, CHECKERBOARD, None)
"""
If desired number of corner are detected,
we refine the pixel coordinates and display
them on the images of checker board
"""
if ret == True:
objpoints.append(objp)
# refining pixel coordinates for given 2d points.
corners2 = cv.cornerSubPix(gray, corners, (11, 11), (-1, -1), criteria)
imgpoints.append(corners)
# Draw and display the corners
cv.drawChessboardCorners(img, CHECKERBOARD, corners2, ret)
cv.imshow('img', img)
cv.waitKey(1000)
cv.destroyAllWindows()
"""
Performing camera calibration by
passing the value of known 3D points (objpoints)
and corresponding pixel coordinates of the
detected corners (imgpoints)
"""
ret, mtx, dist, rvecs, tvecs = cv.calibrateCamera(objpoints, imgpoints, size, None, None)
print("\n camera Calibrated", ret)
print("\nCamera matrix:\n", mtx)
print("\ndist:\n", dist)
print("\nrotation vector : \n", rvecs)
print("\n translation vector : \n", tvecs)

Barcode decoding with pyzbar on raspberry pi

I am using pyzbar to decode barcodes on Raspberry Pi 3 using Pi Camera v1 (resolution 1296x972). Qr codes are decoded very well. When decoding two dimensional barcodes (CODABAR), the success rate is very low.
I have tried saving one frame from the video stream and decode it with pyzbar on the Raspberry and it fails. When i try to decode the same image on Ubuntu, and decodes it successfully.
from pyzbar import pyzbar
from PIL import Image
img = Image.open('sampleImage.png')
d = pyzbar.decode(img)
print (d)
Any thoughts what may be the problem?
UPDATE:
The following image is my specific use case.
Because I am using Pi Camera v1 to take images, I tried to do adjustment to image sharpness:
from picamera import PiCamera
self.camera = PiCamera()
self.camera.sharpness = 100
The following image is with sharpness 100. However, pyzbar still fails to decode it on the Raspberry Pi.
You need to remove the black border from your image. According to this answer,
you can simply crop your image then feed the image to pyzbar.decode() function.
import cv2
from pyzbar import pyzbar
import numpy as np
def autocrop(image, threshold=0):
"""Crops any edges below or equal to threshold
Crops blank image to 1x1.
Returns cropped image.
"""
if len(image.shape) == 3:
flatImage = np.max(image, 2)
else:
flatImage = image
assert len(flatImage.shape) == 2
rows = np.where(np.max(flatImage, 0) > threshold)[0]
if rows.size:
cols = np.where(np.max(flatImage, 1) > threshold)[0]
image = image[cols[0]: cols[-1] + 1, rows[0]: rows[-1] + 1]
else:
image = image[:1, :1]
return image
if __name__ == "__main__":
image = cv2.imread('sampleImage.png')
crop = autocrop(image, 165)
d = pyzbar.decode(crop)
print(d)

How to save an animated GIF to a variable using Pillow

I found out from here that I can create and save animated GIFs using Pillow. However, it doesn't look like the save method returns any value.
I can save the GIF to a file and then open that file using Image.open, but that seems unnecessary, given that I don't really want the GIF to be saved.
How can I save the GIF to a variable, rather than a file?
That is, I would like to be able to do some_variable.show() and display a GIF, without ever having to save the GIF onto my computer.
To avoid writing any files, you can just save your image to BytesIO object. For example:
#!/usr/bin/env python
from __future__ import division
from PIL import Image
from PIL import ImageDraw
from io import BytesIO
N = 25 # number of frames
# Create individual frames
frames = []
for n in range(N):
frame = Image.new("RGB", (200, 150), (25, 25, 255*(N-n)//N))
draw = ImageDraw.Draw(frame)
x, y = frame.size[0]*n/N, frame.size[1]*n/N
draw.ellipse((x, y, x+40, y+40), 'yellow')
# Saving/opening is needed for better compression and quality
fobj = BytesIO()
frame.save(fobj, 'GIF')
frame = Image.open(fobj)
frames.append(frame)
# Save the frames as animated GIF to BytesIO
animated_gif = BytesIO()
frames[0].save(animated_gif,
format='GIF',
save_all=True,
append_images=frames[1:], # Pillow >= 3.4.0
delay=0.1,
loop=0)
animated_gif.seek(0,2)
print ('GIF image size = ', animated_gif.tell())
# Optional: display image
#animated_gif.seek(0)
#ani = Image.open(animated_gif)
#ani.show()
# Optional: write contents to file
animated_gif.seek(0)
open('animated.gif', 'wb').write(animated_gif.read())
In the end, variable animated_gif contains contents of the following image:
However, displaying an animated GIF in Python is not very reliable. ani.show() from the code above displays only first frame on my machine.

A real time Spectrum analyser with pyaudio in python on Raspi

I am trying to get an fft plot on realtime audio using a USB microphone plugged into my raspi. I want to be able to activate an LED when a certain frequency is detected through the fft plot. I have so far tried to get just a live sound wave to be plotted but I am having trouble. I have followed this video: https://www.youtube.com/watch?v=AShHJdSIxkY&lc=z22efhti3uaff52pv04t1aokgg3rlotuia3kw5mpcsnubk0h00410.1510779722591217
I have tried changing the chunk size to a greater value and a lower value but have had no success.For some reason I get the -9981 error but it takes a long time to print the error. No plot is displayed. I have even tried overclocking my Raspberry Pi to see if that would work but it still doesn't work.
I was wondering if anyone else had tried something like this on their Pi and if it was possible or if I had to do it using a different package other than pyaudio.
Here is my python code:
import pyaudio
import struct
import numpy as np
import matplotlib.pyplot as plt
CHUNK = 100000
FORMAT = pyaudio.paInt16
CHANNELS = 1
RATE = 44100
p = pyaudio.PyAudio()
stream = p.open(
format = FORMAT,
channels = CHANNELS,
rate = RATE,
input = True,
output = True,
frames_per_buffer = CHUNK,
start = True
)
fig, ax = plt.subplots()
x = np.arange(0, 2 * CHUNK, 2)
line, = ax.plot(x, np.random.rand(CHUNK))
ax.set_ylim(0, 255)
ax.set_xlim(0, CHUNK)
while True:
data = stream.read(CHUNK)
data_int = np.array(struct.unpack(str(CHUNK*2) + 'B', data), dtype='b')[::2] + 127
line.set_ydata(data_int)
fig.canvas.draw()
fig.canvas.flush_events()
To display add:
plt.show(block=False)
after
ax.set_xlim(0, CHUNK)
But with rpi you have to configure your usb sound card as default card