Matplotlib transparent background without save() function - matplotlib

I have this kind of an animation and I want to integrate it to my GUI.
here is the plot
But, the background color is set to black right now. Here is the code. I am using Windows 10 and for GUI I am mostly using PyQt6 but for the matplotlib I used mlp.use("TkAgg") because it didn't create output if I dont use TkAgg.
I want to make it transparent. I only want the curves. I searched on the internet but everything is about save() function. Isn't there another solution for this? I don't want to save it, I am using animation, therefore it should be transparent everytime, not in a image.
import queue
import sys
from matplotlib.animation import FuncAnimation
import PyQt6.QtCore
import matplotlib as mlp
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg
as FigureCanvas
mlp.use("TkAgg")
import matplotlib.pyplot as plt
import numpy as np
import sounddevice as sd
plt.rcParams['toolbar'] = 'None'
plt.rcParams.update({
"figure.facecolor": "black", # red with alpha = 30%
})
# Lets define audio variables
# We will use the default PC or Laptop mic to input the sound
device = 0 # id of the audio device by default
window = 1000 # window for the data
downsample = 1 # how much samples to drop
channels = [1] # a list of audio channels
interval = 40 # this is update interval in miliseconds for plot
# lets make a queue
q = queue.Queue()
# Please note that this sd.query_devices has an s in the end.
device_info = sd.query_devices(device, 'input')
samplerate = device_info['default_samplerate']
length = int(window*samplerate/(1000*downsample))
plotdata = np.zeros((length,len(channels)))
# next is to make fig and axis of matplotlib plt
fig,ax = plt.subplots(figsize=(2,1))
fig.subplots_adjust(0,0,1,1)
ax.axis("off")
fig.canvas.manager.window.overrideredirect(1)
# lets set the title
ax.set_title("On Action")
# Make a matplotlib.lines.Line2D plot item of color green
# R,G,B = 0,1,0.29
lines = ax.plot(plotdata,color = "purple")
# We will use an audio call back function to put the data in
queue
def audio_callback(indata,frames,time,status):
q.put(indata[::downsample,[0]])
# now we will use an another function
# It will take frame of audio samples from the queue and update
# to the lines
def update_plot(frame):
global plotdata
while True:
try:
data = q.get_nowait()
except queue.Empty:
break
shift = len(data)
plotdata = np.roll(plotdata, -shift,axis = 0)
# Elements that roll beyond the last position are
# re-introduced
plotdata[-shift:,:] = data
for column, line in enumerate(lines):
line.set_ydata(plotdata[:,column])
return lines
# Lets add the grid
ax.set_yticks([0])
# ax.yaxis.grid(True)
""" INPUT FROM MIC """
stream = sd.InputStream(device = device, channels = max(channels),
samplerate = samplerate, callback = audio_callback)
""" OUTPUT """
ani = FuncAnimation(fig,update_plot,interval=interval,blit=True, )
plt.get_current_fig_manager().window.wm_geometry("200x100+850+450")
with stream:
plt.show()

Related

Python MATPLOTLIB ANIMATION without the use of Global Variables?

QUESTION: Whats the cleanest and simplest way to use Python's MATPLOTLIB animation function without the use of global array's or constantly appending a global "list of data points" to a plot?
Here is an example of a animated graph that plots the bid and ask sizes of a stock ticker. In this example the variables time[], ask[], and bid[] are used as global variables.
How do we modify the matplotlib animate() function to not use global variables?
so I'm trying to remove "all" global variables and just run one function call...
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
import numpy as np
from random import randint
stock = {'ask': 12.82, 'askSize': 21900, 'bid': 12.81, 'bidSize': 17800}
def get_askSize():
return stock["askSize"] + randint(1,9000) # grab a random integer to be the next y-value in the animation
def get_bidSize():
return stock["bidSize"] + randint(1,9000) # grab a random integer to be the next y-value in the animation
def animate(i):
pt_ask = get_askSize()
pt_bid = get_bidSize()
time.append(i) #x
ask.append(pt_ask) #y
bid.append(pt_bid) #y
ax.clear()
ax.plot(time, ask)
ax.plot(time, bid)
ax.set_xlabel('Time')
ax.set_ylabel('Volume')
ax.set_title('ask and bid size')
ax.set_xlim([0,40])
#axis = axis_size(get_bidSize, get_askSize)
ylim_min = (get_askSize() + get_bidSize())/6
ylim_max = (get_askSize() + get_bidSize())
ax.set_ylim([ylim_min,ylim_max])
# create empty lists for the x and y data
time = []
ask = []
bid = []
# create the figure and axes objects
fig, ax = plt.subplots()
# run the animation
ani = FuncAnimation(fig, animate, frames=40, interval=500, repeat=False)
plt.show()
As #Warren mentioned, you can use the fargs parameter to pass in shared variables to be used in your animation function.
You should also precompute all of your points, and then use your frames to merely act as an expanding window on those frames. This will be a much more performant solution and prevents you from needing to convert between numpy arrays and lists on every tick of your animation in order to update the underlying data for your lines.
This also enables you to precompute your y-limits to prevent your resultant plot from jumping all over the place.
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
import numpy as np
rng = np.random.default_rng(0)
def animate(i, ask_line, bid_line, data):
i += 1
x = data['x'][:i]
ask_line.set_data(x, data['ask'][:i])
bid_line.set_data(x, data['bid'][:i])
stock = {'ask': 12.82, 'askSize': 21900, 'bid': 12.81, 'bidSize': 17800}
frames = 40
data = {
'x': np.arange(0, frames),
'ask': stock['askSize'] + rng.integers(0, 9000, size=frames),
'bid': stock['bidSize'] + rng.integers(0, 9000, size=frames),
}
fig, ax = plt.subplots()
ask_line, = ax.plot([], [])
bid_line, = ax.plot([], [])
ax.set(xlabel='Time', ylabel='Volume', title='ask and bid size', xlim=(0, 40))
ax.set_ylim(
min(data['ask'].min(), data['bid'].min()),
max(data['ask'].max(), data['bid'].max()),
)
# run the animation
ani = FuncAnimation(
fig, animate, fargs=(ask_line, bid_line, data),
frames=40, interval=500, repeat=False
)
plt.show()
You can use the fargs parameter of FuncAnimation to provide additional arguments to your animate callback function. So animate might start like
def animate(i, askSize, bidSize):
...
and in the call of FuncAnimation, you would add the parameter fargs=(askSize, bidSize). Add whatever variables (in whatever form) that you need to make available within the animate function.
I use this in my example of the use of FuncAnimation with AnimatedPNGWriter in the package numpngw; see Example 8. In that example, my callback function is
def update_line(num, x, data, line):
"""
Animation "call back" function for each frame.
"""
line.set_data(x, data[num, :])
return line,
and FuncAnimation is created with
ani = animation.FuncAnimation(fig, update_line, frames=len(t),
init_func=lambda : None,
fargs=(x, sol, lineplot))
You are using animation wrong, as you are adding and removing lines at each iteration, which makes the animation a lot slower. For line plots, the best way to proceed is:
initialize the figure and axes
initialize empty lines
inside the animate function, update the data of each line.
Something like this:
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
import numpy as np
from random import randint
stock = {'ask': 12.82, 'askSize': 21900, 'bid': 12.81, 'bidSize': 17800}
def get_askSize():
return stock["askSize"] + randint(1,9000) # grab a random integer to be the next y-value in the animation
def get_bidSize():
return stock["bidSize"] + randint(1,9000) # grab a random integer to be the next y-value in the animation
def add_point_to_line(x, y, line):
# retrieve the previous data in the line
xd, yd = [list(t) for t in line.get_data()]
# append the new point
xd.append(x)
yd.append(y)
# set the new data
line.set_data(xd, yd)
def animate(i):
pt_ask = get_askSize()
pt_bid = get_bidSize()
# append a new value to the lines
add_point_to_line(i, pt_ask, ax.lines[0])
add_point_to_line(i, pt_bid, ax.lines[1])
# update axis limits if necessary
ylim_min = (get_askSize() + get_bidSize())/6
ylim_max = (get_askSize() + get_bidSize())
ax.set_ylim([ylim_min,ylim_max])
# create the figure and axes objects
fig, ax = plt.subplots()
# create empty lines that will be populated on the animate function
ax.plot([], [])
ax.plot([], [])
ax.set_xlabel('Time')
ax.set_ylabel('Volume')
ax.set_title('ask and bid size')
ax.set_xlim([0,40])
# run the animation
ani = FuncAnimation(fig, animate, frames=40, interval=500, repeat=False)
plt.show()

How to get intel realsense D435i camera serial numbers from frames for multiple cameras?

I have initialized one pipeline for two cameras and I am getting color and depth images from the same.
The problem is that I cannot find camera serial numbers for corresponding frames to determine which camera captured the frames.
Below is my code:
import pyrealsense2 as rs
import numpy as np
import cv2
import logging
import time
# Configure depth and color streams...
pipeline_1 = rs.pipeline()
config_1 = rs.config()
config_1.enable_device('938422072752')
config_1.enable_device('902512070386')
config_1.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
config_1.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)
# Start streaming from both cameras
pipeline_1.start(config_1)
try:
while True:
# Camera 1
# Wait for a coherent pair of frames: depth and color
frames_1 = pipeline_1.wait_for_frames()
depth_frame_1 = frames_1.get_depth_frame()
color_frame_1 = frames_1.get_color_frame()
if not depth_frame_1 or not color_frame_1:
continue
# Convert images to numpy arrays
depth_image_1 = np.asanyarray(depth_frame_1.get_data())
color_image_1 = np.asanyarray(color_frame_1.get_data())
# Apply colormap on depth image (image must be converted to 8-bit per pixel first)
depth_colormap_1 = cv2.applyColorMap(cv2.convertScaleAbs(depth_image_1, alpha=0.5), cv2.COLORMAP_JET)
# Camera 2
# Wait for a coherent pair of frames: depth and color
frames_2 = pipeline_1.wait_for_frames()
depth_frame_2 = frames_2.get_depth_frame()
color_frame_2 = frames_2.get_color_frame()
if not depth_frame_2 or not color_frame_2:
continue
# Convert images to numpy arrays
depth_image_2 = np.asanyarray(depth_frame_2.get_data())
color_image_2 = np.asanyarray(color_frame_2.get_data())
# Apply colormap on depth image (image must be converted to 8-bit per pixel first)
depth_colormap_2 = cv2.applyColorMap(cv2.convertScaleAbs(depth_image_2, alpha=0.5), cv2.COLORMAP_JET)
# Stack all images horizontally
images = np.hstack((color_image_1, depth_colormap_1,color_image_2, depth_colormap_2))
# Show images from both cameras
cv2.namedWindow('RealSense', cv2.WINDOW_NORMAL)
cv2.imshow('RealSense', images)
cv2.waitKey(20)
finally:
pipeline_1.stop()
How can I find camera serial numbers after wait_for_frames() to determine which camera captured depth and color image.
I adopted your code, combined it with the C++ example posted by nayab to compose the following code that grabs the color image (only) of multiple RealSense cameras and stacks them horizontally:
import pyrealsense2 as rs
import numpy as np
import cv2
import logging
import time
realsense_ctx = rs.context() # The context encapsulates all of the devices and sensors, and provides some additional functionalities.
connected_devices = []
# get serial numbers of connected devices:
for i in range(len(realsense_ctx.devices)):
detected_camera = realsense_ctx.devices[i].get_info(
rs.camera_info.serial_number)
connected_devices.append(detected_camera)
pipelines = []
configs = []
for i in range(len(realsense_ctx.devices)):
pipelines.append(rs.pipeline()) # one pipeline for each device
configs.append(rs.config()) # one config for each device
configs[i].enable_device(connected_devices[i])
configs[i].enable_stream(rs.stream.color, 1920, 1080, rs.format.bgr8, 30)
pipelines[i].start(configs[i])
try:
while True:
images = []
for i in range(len(pipelines)):
print("waiting for frame at cam", i)
frames = pipelines[i].wait_for_frames()
color_frame = frames.get_color_frame()
images.append(np.asanyarray(color_frame.get_data()))
# Stack all images horizontally
image_composite = images[0]
for i in range(1, len(images)):
images_composite = np.hstack((image_composite, images[i]))
# Show images from both cameras
cv2.namedWindow('RealSense', cv2.WINDOW_NORMAL)
cv2.imshow('RealSense', images_composite)
cv2.waitKey(20)
finally:
for i in range(len(pipelines)):
pipelines[i].stop()
This will look for the connected devices and find the serial numbers.
They are saved in a list and you can use them to start the available cameras.
# Configure depth and color streams...
realsense_ctx = rs.context()
connected_devices = []
for i in range(len(realsense_ctx.devices)):
detected_camera = ealsense_ctx.devices[i].get_info(rs.camera_info.serial_number)
connected_devices.append(detected_camera)

How to fix overlapping Metpy/Cartopy images?

When I run this code
import Scientific.IO.NetCDF as S
import cartopy.crs as ccrs
import cartopy.feature as cfeature
import matplotlib.pyplot as plt
import xarray as xr
import metpy
import numpy as N
from metpy.plots import ContourPlot, ImagePlot, MapPanel, PanelContainer
# Any import of metpy will activate the accessors
import metpy.calc as mpcalc
#from metpy.testing import get_test_data
from metpy.units import units
# Open the netCDF file as a xarray Datase
#
datadir='C:/Users/stratus/AppData/Local/lxss/home/stratus/PROJECT/NEWPROJECT/FEB012017/nam_218_20170131_1200_000.nc'
data = xr.open_dataset(datadir,decode_cf=True)
# To parse the full dataset, we can call parse_cf without an argument, and assign the returned
# Dataset.
data = data.metpy.parse_cf()
tempatt=data['TMP_P0_L100_GLC0'].attrs
# If we instead want just a single variable, we can pass that variable name to parse_cf and
# it will return just that data variable as a DataArray.
data_var = data.metpy.parse_cf('TMP_P0_L100_GLC0')
# To rename variables, supply a dictionary between old and new names to the rename method
data.rename({
'TMP_P0_L100_GLC0': 'temperature',
}, inplace=True)
data['temperature'].metpy.convert_units('degC')
# Get multiple coordinates (for example, in just the x and y direction)
x, y = data['temperature'].metpy.coordinates('x', 'y')
# If we want to get just a single coordinate from the coordinates method, we have to use
# tuple unpacking because the coordinates method returns a generator
vertical, = data['temperature'].metpy.coordinates('vertical')
data_crs = data['temperature'].metpy.cartopy_crs
# Or, we can just get a coordinate from the property
#time = data['temperature'].metpy.time
# To verify, we can inspect all their names
#print([coord.name for coord in (x, y, vertical, time)])
#
#heights = data['height'].metpy.loc[{'time': time[0], 'vertical': 850. * units.hPa}]
#lat, lon = xr.broadcast(y, x)
#f = mpcalc.coriolis_parameter(lat)
#dx, dy = mpcalc.grid_deltas_from_dataarray(heights)
#u_geo, v_geo = mpcalc.geostrophic_wind(heights, f, dx, dy)
#print(u_geo)
#print(v_geo)
fig=plt.figure(1)
# A very simple example example of a plot of 500 hPa heights
data_crs = data['temperature'].metpy.cartopy_crs
ax = plt.axes(projection=ccrs.LambertConformal())
data['temperature'].metpy.loc[{'vertical': 850. * units.hPa}].plot(ax=ax, transform=data_crs)
ax.add_feature(cfeature.LAND)
ax.add_feature(cfeature.OCEAN)
ax.add_feature(cfeature.COASTLINE)
plt.show()
#ax.set_extent([-120,-80,20,50])
plt.title("850 mb Temperature")
#plt.suptitle("Metpy Test")
plt.show()
I had to edit the code as per some of the answers but I am getting a mostly blank map now. 850 T Map fail I am mainly trying to have the temperatures at 850 mb overlap the US so I could show it to a friend to practice for a project I am helping him with. The filling of the parentheses for the data helped a bit which is why I edited it.
As pointed out in the comments it is difficult to answer without a reproducible example. However, the following may solve your issue:
data_crs = data['temperature'].metpy.cartopy_crs
ax = plt.axes(projection=ccrs.LambertConformal())
data['temperature'].metpy.loc[{'vertical': 1000. * units.hPa}].plot(ax=ax, transform=data_crs)
ax.add_feature(cfeature.LAND)
ax.add_feature(cfeature.OCEAN)
ax.add_feature(cfeature.COASTLINE)
plt.show()

pick_event in Jupyter with matplotlib scatter plot

I really like the simplicity with how ipywidgets.interactive works with pandas dataframe but I am having trouble getting data when a point in a scatter plot is selected.
I have looked at some examples that use matplotlib.widgets etc. but none that use it with interactive in Jupyter. It looks like this technique would be described here but it comes up just short:
http://minrk-ipywidgets.readthedocs.io/en/latest/examples/Using%20Interact.html
Here is an ipynb of what I am trying to accomplish:
from ipywidgets import interactive
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.widgets import Button
from matplotlib.text import Annotation
from io import StringIO
data_ssv = """tone_amp_0 tone_freq_0 SNR
75.303 628.0 68.374
84.902 8000.0 61.292
92.856 288.0 70.545
70.000 2093.0 35.036
76.511 6834.0 66.952 """
data = pd.read_table(StringIO(data_ssv), sep="\s+", header=0)
col_names=list(data.columns.values)
plottable_col=( ['tone_amp_0', 'tone_freq_0', 'SNR'] )
def annotate(axis, text, x, y):
text_annotation = Annotation(text, xy=(x, y), xycoords='data')
axis.add_artist(text_annotation)
def onpick(event):
ind = event.ind
label_pos_x = event.mouseevent.xdata
label_pos_y = event.mouseevent.ydata
offset = 0 # just in case two dots are very close, this offset will help the labels not appear one on top of each other
for i in ind: # if the dots are to close one to another, a list of dots clicked is returned by the matplotlib library
label = "gen_labels" # generated_labels[i]
print( "index", i, label ) # step 4: log it for debugging purposes
ax=plt.gca()
annotate(ax,label,label_pos_x + offset,label_pos_y + offset)
ax.figure.canvas.draw_idle()
offset += 0.01 # alter the offset just in case there are more than one dots affected by the click
def update_plot(X='tone_amp_0', Y='tone_frq_0', Z='SNR'):
plt.scatter( data.loc[:, [X]],data.loc[:, [Y]], marker='.', edgecolors='none', c=data.loc[:,[Z]], picker=True, cmap='RdYlGn' )
plt.title(X+' vs '+Y); plt.xlabel(X); plt.ylabel(Y); plt.colorbar().set_label(Z, labelpad=+1)
plt.grid(); plt.show()
plt.gcf().canvas.mpl_connect('pick_event', onpick)
interactive(update_plot, X=plottable_col, Y=plottable_col, Z=plottable_col)
When I select a data point nothing is happening. Not sure how to debug this or understand what I am doing wrong. Can someone point out what I am doing wrong here?
Try put a semicolon at the end of plt.gcf().canvas.mpl_connect('pick_event', onpick).

Fast Live Plotting in Matplotlib / PyPlot

For years, I've been struggling to get efficient live plotting in matplotlib, and to this day I remain unsatisfied.
I want a redraw_figure function that updates the figure "live" (as the code runs), and will display the latest plots if I stop at a breakpoint.
Here is some demo code:
import time
from matplotlib import pyplot as plt
import numpy as np
def live_update_demo():
plt.subplot(2, 1, 1)
h1 = plt.imshow(np.random.randn(30, 30))
redraw_figure()
plt.subplot(2, 1, 2)
h2, = plt.plot(np.random.randn(50))
redraw_figure()
t_start = time.time()
for i in xrange(1000):
h1.set_data(np.random.randn(30, 30))
redraw_figure()
h2.set_ydata(np.random.randn(50))
redraw_figure()
print 'Mean Frame Rate: %.3gFPS' % ((i+1) / (time.time() - t_start))
def redraw_figure():
plt.draw()
plt.pause(0.00001)
live_update_demo()
Plots should update live when the code is run, and we should see the latest data when stopping at any breakpoint after redraw_figure(). The question is how to best implement redraw_figure()
In the implementation above (plt.draw(); plt.pause(0.00001)), it works, but is very slow (~3.7FPS)
I can implement it as:
def redraw_figure():
plt.gcf().canvas.flush_events()
plt.show(block=False)
And it runs faster (~11FPS), but plots are not up-to date when you stop at breakpoints (eg if I put a breakpoint on the t_start = ... line, the second plot does not appear).
Strangely enough, what does actually work is calling the show twice:
def redraw_figure():
plt.gcf().canvas.flush_events()
plt.show(block=False)
plt.show(block=False)
Which gives ~11FPS and does keep plots up-to-data if your break on any line.
Now I've heard it said that the "block" keyword is deprecated. And calling the same function twice seems like a weird, probably-non-portable hack anyway.
So what can I put in this function that will plot at a reasonable frame rate, isn't a giant kludge, and preferably will work across backends and systems?
Some notes:
I'm on OSX, and using TkAgg backend, but solutions on any backend/system are welcome
Interactive mode "On" will not work, because it does not update live. It just updates when in the Python console when the interpreter waits for user input.
A blog suggested the implementation:
def redraw_figure():
fig = plt.gcf()
fig.canvas.draw()
fig.canvas.flush_events()
But at least on my system, that does not redraw the plots at all.
So, if anybody has an answer, you would directly make me and thousands of others very happy. Their happiness would probably trickle through to their friends and relatives, and their friends and relatives, and so on, so that you could potentially improve the lives of billions.
Conclusions
ImportanceOfBeingErnest shows how you can use blit for faster plotting, but it's not as simple as putting something different in the redraw_figure function (you need to keep track of what things to redraw).
First of all, the code that is posted in the question runs with 7 fps on my machine, with QT4Agg as backend.
Now, as has been suggested in many posts, like here or here, using blit might be an option. Although this article mentions that blit causes strong memory leakage, I could not observe that.
I have modified your code a bit and compared the frame rate with and without the use of blit. The code below gives
28 fps when run without blit
175 fps with blit
Code:
import time
from matplotlib import pyplot as plt
import numpy as np
def live_update_demo(blit = False):
x = np.linspace(0,50., num=100)
X,Y = np.meshgrid(x,x)
fig = plt.figure()
ax1 = fig.add_subplot(2, 1, 1)
ax2 = fig.add_subplot(2, 1, 2)
img = ax1.imshow(X, vmin=-1, vmax=1, interpolation="None", cmap="RdBu")
line, = ax2.plot([], lw=3)
text = ax2.text(0.8,0.5, "")
ax2.set_xlim(x.min(), x.max())
ax2.set_ylim([-1.1, 1.1])
fig.canvas.draw() # note that the first draw comes before setting data
if blit:
# cache the background
axbackground = fig.canvas.copy_from_bbox(ax1.bbox)
ax2background = fig.canvas.copy_from_bbox(ax2.bbox)
plt.show(block=False)
t_start = time.time()
k=0.
for i in np.arange(1000):
img.set_data(np.sin(X/3.+k)*np.cos(Y/3.+k))
line.set_data(x, np.sin(x/3.+k))
tx = 'Mean Frame Rate:\n {fps:.3f}FPS'.format(fps= ((i+1) / (time.time() - t_start)) )
text.set_text(tx)
#print tx
k+=0.11
if blit:
# restore background
fig.canvas.restore_region(axbackground)
fig.canvas.restore_region(ax2background)
# redraw just the points
ax1.draw_artist(img)
ax2.draw_artist(line)
ax2.draw_artist(text)
# fill in the axes rectangle
fig.canvas.blit(ax1.bbox)
fig.canvas.blit(ax2.bbox)
# in this post http://bastibe.de/2013-05-30-speeding-up-matplotlib.html
# it is mentionned that blit causes strong memory leakage.
# however, I did not observe that.
else:
# redraw everything
fig.canvas.draw()
fig.canvas.flush_events()
#alternatively you could use
#plt.pause(0.000000000001)
# however plt.pause calls canvas.draw(), as can be read here:
#http://bastibe.de/2013-05-30-speeding-up-matplotlib.html
live_update_demo(True) # 175 fps
#live_update_demo(False) # 28 fps
Update:
For faster plotting, one may consider using pyqtgraph.
As the pyqtgraph documentation puts it: "For plotting, pyqtgraph is not nearly as complete/mature as matplotlib, but runs much faster."
I ported the above example to pyqtgraph. And although it looks kind of ugly, it runs with 250 fps on my machine.
Summing that up,
matplotlib (without blitting): 28 fps
matplotlib (with blitting): 175 fps
pyqtgraph : 250 fps
pyqtgraph code:
import sys
import time
from pyqtgraph.Qt import QtCore, QtGui
import numpy as np
import pyqtgraph as pg
class App(QtGui.QMainWindow):
def __init__(self, parent=None):
super(App, self).__init__(parent)
#### Create Gui Elements ###########
self.mainbox = QtGui.QWidget()
self.setCentralWidget(self.mainbox)
self.mainbox.setLayout(QtGui.QVBoxLayout())
self.canvas = pg.GraphicsLayoutWidget()
self.mainbox.layout().addWidget(self.canvas)
self.label = QtGui.QLabel()
self.mainbox.layout().addWidget(self.label)
self.view = self.canvas.addViewBox()
self.view.setAspectLocked(True)
self.view.setRange(QtCore.QRectF(0,0, 100, 100))
# image plot
self.img = pg.ImageItem(border='w')
self.view.addItem(self.img)
self.canvas.nextRow()
# line plot
self.otherplot = self.canvas.addPlot()
self.h2 = self.otherplot.plot(pen='y')
#### Set Data #####################
self.x = np.linspace(0,50., num=100)
self.X,self.Y = np.meshgrid(self.x,self.x)
self.counter = 0
self.fps = 0.
self.lastupdate = time.time()
#### Start #####################
self._update()
def _update(self):
self.data = np.sin(self.X/3.+self.counter/9.)*np.cos(self.Y/3.+self.counter/9.)
self.ydata = np.sin(self.x/3.+ self.counter/9.)
self.img.setImage(self.data)
self.h2.setData(self.ydata)
now = time.time()
dt = (now-self.lastupdate)
if dt <= 0:
dt = 0.000000000001
fps2 = 1.0 / dt
self.lastupdate = now
self.fps = self.fps * 0.9 + fps2 * 0.1
tx = 'Mean Frame Rate: {fps:.3f} FPS'.format(fps=self.fps )
self.label.setText(tx)
QtCore.QTimer.singleShot(1, self._update)
self.counter += 1
if __name__ == '__main__':
app = QtGui.QApplication(sys.argv)
thisapp = App()
thisapp.show()
sys.exit(app.exec_())
Here's one way to do live plotting: get the plot as an image array then draw the image to a multithreaded screen.
Example using a pyformulas screen (~30 FPS):
import pyformulas as pf
import matplotlib.pyplot as plt
import numpy as np
import time
fig = plt.figure()
screen = pf.screen(title='Plot')
start = time.time()
for i in range(10000):
t = time.time() - start
x = np.linspace(t-3, t, 100)
y = np.sin(2*np.pi*x) + np.sin(3*np.pi*x)
plt.xlim(t-3,t)
plt.ylim(-3,3)
plt.plot(x, y, c='black')
# If we haven't already shown or saved the plot, then we need to draw the figure first...
fig.canvas.draw()
image = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
image = image.reshape(fig.canvas.get_width_height()[::-1] + (3,))
screen.update(image)
#screen.close()
Disclaimer: I'm the maintainer of pyformulas