SUMMARY
PyQt5 doesn't appear to be creating a new thread corresponding to QThread object, or I haven't established Slot/Signal linkage correctly. Please help me to isolate my problem.
I'm a relatively casual user of Python, but I've been asked to create a utility for another team that wraps some of their Python libraries (which themselves wrap C++) in a GUI. Because this utility is for another team, I can't change versions of compilers etc, or at least, not without providing a decent reason.
The utility is intended to provide an interface for debugging into some hardware that my colleagues are developing.
After examining the options, I decided to use Qt and the PyQt bindings. The steps I followed were:
Install Visual Studio 2010 SP1 (required because other team's libraries are compiled using this version of the MS compiler).
Install Python 2.7.9 (their version of Python)
Install qt-opensource-windows-x86-msvc2010-5.2.1.exe
Get source for SIP-4.18.zip and compile and install
Get source for PyQt-gpl-5.2.1.zip, compile and install
Try to build a PyQt application that wraps the other team's comms and translation libraries. Those libraries aren't asynchronous as far as I can tell, so I think that I need to separate that part of the application from the GUI.
The code that I've written produces the UI and is responsive in the sense that if I put break points in the methods that are called from the QAction objects, then those break points are appropriately triggered. My problem is that the Worker object that I create doesn't appear to move to a separate thread, (despite the call to moveToThread) because if I make the connection of type BlockingQueuedConnection instead of QueuedConnection then I get a deadlock. Breakpoints that I put on the slots in the Worker type are never triggered.
Here's the code::
import os
import sys
import time
from PyQt5.QtWidgets import QMainWindow, QTextEdit, QAction, QApplication, QStatusBar, QLabel, QWidget, QDesktopWidget, QInputDialog
from PyQt5.QtGui import QIcon
from PyQt5.QtCore import Qt, QThread, QObject, pyqtSignal, pyqtSlot
class Worker(QObject):
def __init__(self):
super(Worker, self).__init__()
self._isRunning = True
self._connectionId = ""
self._terminate = False
#pyqtSlot()
def cmd_start_running(self):
"""This slot is used to send a command to the HW asking for it to enter Running mode.
It will actually work by putting a command in a queue for the main_loop to get to
in its own serialised good time. All the other commands will work in a similar fashion
Up until such time as it is implemented, I will fake it."""
self._isRunning = True
pass
#pyqtSlot()
def cmd_stop_running(self):
"""This slot is used to send a command to the HW asking for it to enter Standby mode.
Up until such time as it is implemented, I will fake it."""
self._isRunning = False
#pyqtSlot()
def cmd_get_version(self):
"""This slot is used to send a command to the HW asking for its version string"""
pass
#pyqtSlot()
def cmd_terminate(self):
"""This slot is used to notify this object that it has to join the main thread."""
pass
#pyqtSlot()
def main_loop(self):
"""This slot is the main loop that is attached to the QThread object. It has sleep periods
that allow the messages on the other slots to be processed."""
while not self._terminate:
self.thread().sleep(1)
# While there is stuff on the wire, get it off, translate it, then
# signal it
# For the mean while, pretend that _isRunning corresponds to when
# RT streams will be
# being received from the HW.
if self._isRunning:
pass
# Search queue for commands, if any found, translate, then put on
# the wire
class DemoMainWindow(QMainWindow):
sgnl_get_version = pyqtSignal()
sgnl_start_running = pyqtSignal()
sgnl_stop_running = pyqtSignal()
sgnl_terminate = pyqtSignal()
def __init__(self):
super(DemoMainWindow, self).__init__()
self.initUI()
self._workerObject = Worker()
self._workerThread = QThread()
self._workerObject.moveToThread(self._workerThread)
self._workerThread.started.connect(self._workerObject.main_loop, type=Qt.QueuedConnection)
# I changed the following connection to type BlockingQueuedConnection,
# and got a Deadlock error
# reported, so I assume that there is already a problem before I get to
# this point.
# I understand that the default for 'type' (Qt.AutoConnection) is
# supposed to correctly infer that a QueuedConnection is required.
# I was getting desperate.
self.sgnl_get_version.connect(self._workerObject.cmd_get_version, type=Qt.QueuedConnection)
self.sgnl_start_running.connect(self._workerObject.cmd_start_running, type=Qt.QueuedConnection)
self.sgnl_stop_running.connect(self._workerObject.cmd_stop_running, type=Qt.QueuedConnection)
self.sgnl_terminate.connect(self._workerObject.cmd_terminate, type=Qt.QueuedConnection)
def initUI(self):
textEdit = QTextEdit()
self.setCentralWidget(textEdit)
lbl = QLabel(self.statusBar())
lbl.setText("HW Version: ")
self.statusBar().addPermanentWidget(lbl)
exitAction = QAction(QIcon('exit24.png'), 'Exit', self)
exitAction.setShortcut('Ctrl+Q')
exitAction.setStatusTip('Exit application')
exitAction.triggered.connect(self.close)
connectAction = QAction(QIcon('connect24.png'), 'Connect', self)
connectAction.setStatusTip('Connect to HW')
connectAction.triggered.connect(self.establishCanConnection)
enterRunningAction = QAction(QIcon('start24.png'), 'Start Running', self)
enterRunningAction.setStatusTip('Start Running')
enterRunningAction.triggered.connect(self.enterRunning)
enterStandbyAction = QAction(QIcon('stop24.png'), 'Stop Running', self)
enterStandbyAction.setStatusTip('Stop Running')
enterStandbyAction.triggered.connect(self.enterStandby)
self.statusBar()
menubar = self.menuBar()
fileMenu = menubar.addMenu('&File')
fileMenu.addAction(exitAction)
hwMenu = menubar.addMenu('&Hardware')
hwMenu.addAction(connectAction)
hwMenu.addAction(enterRunningAction)
hwMenu.addAction(enterStandbyAction)
toolbar = self.addToolBar('Exit')
toolbar.addAction(exitAction)
toolbar.addAction(connectAction)
toolbar.addAction(enterRunningAction)
toolbar.addAction(enterStandbyAction)
self.setGeometry(300, 300, 400, 350) # x, y, width, height
self.setWindowTitle('Demo Prog')
self.show()
def establishCanConnection(self):
iDlg = QInputDialog(self)
iDlg.setInputMode(QInputDialog.IntInput)
idInt, ok = iDlg.getInt(self, 'CAN ID Selection', 'HW ID:')
canID = '%s%d' % ('HW', idInt)
if ok:
self._workerThread.start()
pass
# this would be where the channel is established
def enterRunning(self):
self.sgnl_start_running.emit()
# this would be where the command to start running is sent from
def enterStandby(self):
self.sgnl_stop_running.emit()
# send the command to stop running
if __name__ == '__main__':
app = QApplication(sys.argv)
mainWindow = DemoMainWindow()
sys.exit(app.exec_())
Note that the call to start the _workerThread is in the establishCanConnection method, but that shouldn't be a problem, should it?
I used the procmon utility to check if more threads are created if establishCanConnection is run, and it appears that there are more threads, but I found it hard to relate which thread (if any of them) related to the QThread object.
Don't use BlockingQueuedConnection unless you really need it. If you don't know whether you need it or not, then you don't need it.
Cross-thread signals are queued in the event-loop of the receiving thread. If that thread is running code that blocks, it won't be able to process any events. Thus, if you send a signal with BlockingQueuedConnection to a thread that is blocked, you'll get a deadlock.
Your example uses a worker object that runs a blocking while loop, so it is subject to the deadlock problem outlined above. If you want to send signals to a thread that is blocked, you will need to arrange for the blocking code to periodically allow the thread to process its events, like this:
while not self._terminate:
self.thread().sleep(1)
QApplication.processEvents()
PS:
If you want to check that the worker is running in a different thread, you can print the return value of QThread.currentThread() or QThread.currentThreadId() (these functions are static, so you don't need an instance of QThread to call them).
Related
I'm working on a code that use Twisted this way: a client establishes a permanent Perspective Broker TCP connection (ie, the connection is kept open during the whole process live, which can be days, months or years), and performs logins using this broken for different Avatars when needed.
My problem is that the Avatar logout clean up function is recorded by the broker, to be called and removed when the broker TCP connection is lost.
My problem is that the connection is never lost, and the broker.disconnects list increases more and more.
Even worse, the avatar logout function in defined by the code I'm working on has a reference on the Avatar (by closure), and the avatars can be big, causing a problematic memory leak to the whole process.
Here is an example adpated frome the doc:
The server:
#!/usr/bin/env python
# Inspired from pb6server.py here:
# https://twistedmatrix.com/documents/current/core/howto/pb-cred.html#two-clients
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
from zope.interface import implementer
from twisted.spread import pb
from twisted.cred import checkers, portal
from twisted.internet import reactor
PROTOCOL = None
# I didn't find another way to get a reference to the broker: suggestion
# welcome!
class MyFactory(pb.PBServerFactory):
def buildProtocol(self, addr):
global PROTOCOL
PROTOCOL = super().buildProtocol(addr)
return PROTOCOL
class MyPerspective(pb.Avatar):
def __init__(self, name):
self.name = name
def perspective_foo(self, arg):
assert PROTOCOL is not None
print(f"Leaked avatars: {len(PROTOCOL.disconnects)}")
#implementer(portal.IRealm)
class MyRealm:
def requestAvatar(self, avatarId, mind, *interfaces):
if pb.IPerspective not in interfaces:
raise NotImplementedError
return pb.IPerspective, MyPerspective(avatarId), lambda: None
p = portal.Portal(MyRealm())
c = checkers.InMemoryUsernamePasswordDatabaseDontUse(
user0=b"pass0", user1=b"pass1", user2=b"pass2", user3=b"pass3", user4=b"pass4"
)
p.registerChecker(c)
factory = MyFactory(p)
reactor.listenTCP(8800, factory)
reactor.run()
And the client:
#!/usr/bin/env python
# Inspired from pb6client.py here:
# https://twistedmatrix.com/documents/current/core/howto/pb-cred.html#two-clients
from twisted.spread import pb
from twisted.internet import reactor
from twisted.cred import credentials
USERS_COUNT = 5
def main():
factory = pb.PBClientFactory()
reactor.connectTCP("localhost", 8800, factory)
for k in range(10):
i = k % USERS_COUNT
def1 = factory.login(
credentials.UsernamePassword(f"user{i}".encode(), f"pass{i}".encode())
)
def1.addCallback(connected)
reactor.run()
def connected(perspective):
print("got perspective1 ref:", perspective)
print("asking it to foo(13)")
perspective.callRemote("foo", 13)
main()
How may I notify the broker that the avatar should be cleaned, whitout closing the broker TCP connection?
Thanks!
Python v3.5, Windows 10
I'm using multiple processes and trying to captures user input. Searching everything I see there are odd things that happen when using input() with multiple processes. After 8 hours+ of trying, nothing I implement worked, I'm positive I am doing it wrong but I can't for the life of me figure it out.
The following is a very stripped down program that demonstrates the issue. Now it works fine when I run this program within PyCharm, but when I use pyinstaller to create a single executable it fails. The program constantly is stuck in a loop asking the user to enter something as shown below:.
I am pretty sure it has to do with how Windows takes in standard input from things I've read. I've also tried passing the user input variables as Queue() items to the functions but the same issue. I read you should put input() in the main python process so I did that under if __name__ = '__main__':
from multiprocessing import Process
import time
def func_1(duration_1):
while duration_1 >= 0:
time.sleep(1)
print('Duration_1: %d %s' % (duration_1, 's'))
duration_1 -= 1
def func_2(duration_2):
while duration_2 >= 0:
time.sleep(1)
print('Duration_2: %d %s' % (duration_2, 's'))
duration_2 -= 1
if __name__ == '__main__':
# func_1 user input
while True:
duration_1 = input('Enter a positive integer.')
if duration_1.isdigit():
duration_1 = int(duration_1)
break
else:
print('**Only positive integers accepted**')
continue
# func_2 user input
while True:
duration_2 = input('Enter a positive integer.')
if duration_2.isdigit():
duration_2 = int(duration_2)
break
else:
print('**Only positive integers accepted**')
continue
p1 = Process(target=func_1, args=(duration_1,))
p2 = Process(target=func_2, args=(duration_2,))
p1.start()
p2.start()
p1.join()
p2.join()
You need to use multiprocessing.freeze_support() when you produce a Windows executable with PyInstaller.
Straight out from the docs:
multiprocessing.freeze_support()
Add support for when a program which uses multiprocessing has been frozen to produce a Windows executable. (Has been tested with py2exe, PyInstaller and cx_Freeze.)
One needs to call this function straight after the if name == 'main' line of the main module. For example:
from multiprocessing import Process, freeze_support
def f():
print('hello world!')
if __name__ == '__main__':
freeze_support()
Process(target=f).start()
If the freeze_support() line is omitted then trying to run the frozen executable will raise RuntimeError.
Calling freeze_support() has no effect when invoked on any operating system other than Windows. In addition, if the module is being run normally by the Python interpreter on Windows (the program has not been frozen), then freeze_support() has no effect.
In your example you also have unnecessary code duplication you should tackle.
Working up from threads to processes, I have switched to concurrent.futures, and would like to gain/retain flexibility in switching between a ThreadPoolExecutor and a ProcessPoolExecutor for various scenarios. However, despite the promise of a unified facade, I am having a hard time passing multiprocessing Queue objects as arguments on the futures.submit() when I switch to using a ProcessPoolExecutor:
import multiprocessing as mp
import concurrent.futures
def foo(q):
q.put('hello')
if __name__ == '__main__':
executor = concurrent.futures.ProcessPoolExecutor()
q = mp.Queue()
p = executor.submit(foo, q)
p.result()
print(q.get())
bumps into the following exception coming from multiprocessing's code:
RuntimeError: Queue objects should only be shared between processes through inheritance
which I believe means it doesn't like receiving the queue as an argument, but rather expects to (not in any OOP sense) "inherit it" on the multiprocessing fork rather than getting it as an argument.
The twist is that with bare-bones multiprocessing, meaning when not using it through the facade which concurrent.futures is ― there seems to be no such limitation, as the following code seamlessly works:
import multiprocessing as mp
def foo(q):
q.put('hello')
if __name__ == '__main__':
q = mp.Queue()
p = mp.Process(target=foo, args=(q,))
p.start()
p.join()
print(q.get())
I wonder what am I missing about this ― how can I make the ProcessPoolExecutor accept the queue as an argument when using concurrent.futures the same as it does when using the ThreadPoolExecutor or multiprocessing very directly like shown right above?
I am using QThread to do some time intensive calculations to keep GUI from freezing. In the QThread I am accessing and changing global lists many times during the thread life span, however I am unable to get the same result as if it were just on the main thread.
I would assume you had to perform some kind of lock, but i'm new to QThread and don't know how to implement it.
#Main Thread
self.runGasAnalysisThread = GasAnalysisThread()
self.runGasAnalysisThread.start()
#QThread
class GasAnalysisThread(QtCore.QThread):
"""Performs gas analysis function"""
def __init__(self,parent = None):
super().__init__(parent)
def run(self):
try:
boolValue = True
while True:
#Change lists here
float(Data.TestList[0])+ 1 #Data is another module I am using to store variables
Again, moving the code to the main thread works correctly, but as soon as I do it with QThread I get different results.
How would I implement a locking mechanism to keep this from happening?
It is common to be confused while using Qt's threading, as one would think that subclassing QThread would be the right path.
Truth is that a QThread is the Qt thread object where your process is actually run, which means that you'll need a separate class for it and move its instance in a QThread. Subclassing QThread is usually unnecessary.
If you need any kind of interaction between the "worker" (the object that does the processing) and the main thread (as in the GUI), it's good practice to use Qt's signals.
In this example I'm using a button to start the processing, once the processor is started it disables the button, and re-enables it once it signals that the process has finished.
class Worker(QtCore.QObject):
stateChanged = QtCore.pyqtSignal(bool)
def __init__(self):
super().__init__()
def run(self):
self.stateChanged.emit(True)
try:
boolValue = True
while True:
# this doesn't make much sense, as it doesn't seem to do anything;
# I'll leave it just for the sake of the argument
float(Data.TestList[0]) + 1
except:
pass
self.stateChanged.emit(False)
class SomeWidget(QtWidgets.QWidget):
def __init__(self, parent=None):
super().__init__(parent)
layout = QtWidgets.QHBoxLayout()
self.setLayout(layout)
self.startButton = QtWidgets.QPushButton('Start')
layout.addWidget(self.startButton)
self.worker = Worker()
self.workerThread = QtCore.QThread()
self.worker.moveToThread(self.workerThread)
self.workerThread.started.connect(self.worker.run)
self.startButton.clicked.connect(self.workerThread.start)
self.worker.stateChanged.connect(self.startButton.setDisabled)
I wrote a C extension (mycext.c) for Python 3.2. The extension relies on constant data stored in a C header (myconst.h). The header file is generated by a Python script. In the same script, I make use of the recently compiled module. The workflow in the Python3 myscript (not shown completely) is as follows:
configure_C_header_constants()
write_constants_to_C_header() # write myconst.h
os.system('python3 setup.py install --user') # compile mycext
import mycext
mycext.do_stuff()
This works perfectly fine the in a Python session for the first time. If I repeat the procedure in the same session (for example, in two different testcases of a unittest), the first compiled version of mycext is always (re)loaded.
How do I effectively reload a extension module with the latest compiled version?
You can reload modules in Python 3.x by using the imp.reload() function. (This function used to be a built-in in Python 2.x. Be sure to read the documentation -- there are a few caveats!)
Python's import mechanism will never dlclose() a shared library. Once loaded, the library will stay until the process terminates.
Your options (sorted by decreasing usefulness):
Move the module import to a subprocess, and call the subprocess again after recompiling, i.e. you have a Python script do_stuff.py that simply does
import mycext
mycext.do_stuff()
and you call this script using
subprocess.call([sys.executable, "do_stuff.py"])
Turn the compile-time constants in your header into variables that can be changed from Python, eliminating the need to reload the module.
Manually dlclose() the library after deleting all references to the module (a bit fragile since you don't hold all the references yourself).
Roll your own import mechanism.
Here is an example how this can be done. I wrote a minimal Python C extension mini.so, only exporting an integer called version.
>>> import ctypes
>>> libdl = ctypes.CDLL("libdl.so")
>>> libdl.dlclose.argtypes = [ctypes.c_void_p]
>>> so = ctypes.PyDLL("./mini.so")
>>> so.PyInit_mini.argtypes = []
>>> so.PyInit_mini.restype = ctypes.py_object
>>> mini = so.PyInit_mini()
>>> mini.version
1
>>> del mini
>>> libdl.dlclose(so._handle)
0
>>> del so
At this point, I incremented the version number in mini.c and recompiled.
>>> so = ctypes.PyDLL("./mini.so")
>>> so.PyInit_mini.argtypes = []
>>> so.PyInit_mini.restype = ctypes.py_object
>>> mini = so.PyInit_mini()
>>> mini.version
2
You can see that the new version of the module is used.
For reference and experimenting, here's mini.c:
#include <Python.h>
static struct PyModuleDef minimodule = {
PyModuleDef_HEAD_INIT, "mini", NULL, -1, NULL
};
PyMODINIT_FUNC
PyInit_mini()
{
PyObject *m = PyModule_Create(&minimodule);
PyModule_AddObject(m, "version", PyLong_FromLong(1));
return m;
}
there is another way, set a new module name, import it, and change reference to it.
Update: I have now created a Python library around this approach:
https://github.com/bergkvist/creload
https://pypi.org/project/creload/
Rather than using the subprocess module in Python, you can use multiprocessing. This allows the child process to inherit all of the memory from the parent (on UNIX-systems).
For this reason, you also need to be careful not to import the C extension module into the parent.
If you return a value that depends on the C extension, it might also force the C extension to become imported in the parent as it receives the return-value of the function.
import multiprocessing as mp
import sys
def subprocess_call(fn, *args, **kwargs):
"""Executes a function in a forked subprocess"""
ctx = mp.get_context('fork')
q = ctx.Queue(1)
is_error = ctx.Value('b', False)
def target():
try:
q.put(fn(*args, **kwargs))
except BaseException as e:
is_error.value = True
q.put(e)
ctx.Process(target=target).start()
result = q.get()
if is_error.value:
raise result
return result
def my_c_extension_add(x, y):
assert 'my_c_extension' not in sys.modules.keys()
# ^ Sanity check, to make sure you didn't import it in the parent process
import my_c_extension
return my_c_extension.add(x, y)
print(subprocess_call(my_c_extension_add, 3, 4))
If you want to extract this into a decorator - for a more natural feel, you can do:
class subprocess:
"""Decorate a function to hint that it should be run in a forked subprocess"""
def __init__(self, fn):
self.fn = fn
def __call__(self, *args, **kwargs):
return subprocess_call(self.fn, *args, **kwargs)
#subprocess
def my_c_extension_add(x, y):
assert 'my_c_extension' not in sys.modules.keys()
# ^ Sanity check, to make sure you didn't import it in the parent process
import my_c_extension
return my_c_extension.add(x, y)
print(my_c_extension_add(3, 4))
This can be useful if you are working in a Jupyter notebook, and you want to rerun some function without rerunning all your existing cells.
Notes
This answer might only be relevant on Linux/macOS where you have a fork() system call:
Python multiprocessing linux windows difference
https://rhodesmill.org/brandon/2010/python-multiprocessing-linux-windows/