How to Rabbitmq Connection Close - rabbitmq

I am using Pika 1.1.0, Python 3.7.4. There is consumer in the thread. I want to kill the thread and close the rabbitmq connection, but it fails. Where am I doing wrong? How can I do it?
Error: pika.exceptions.StreamLostError: Stream connection lost: IndexError('pop from an empty deque')
def BrokerConnection(username, password):
try:
credentials = pika.PlainCredentials(username,password)
parameters = pika.ConnectionParameters("127.0.0.1","5672","vhost",credentials, heartbeat=0)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
return connection, channel
except:
return None, None
def BrokerListen():
def BrokerConnect():
while True:
try:
queue = "test-que"
connection, channel = BrokerConnection("admin", "123345")
if channel is not None:
brokerConnections.update({"key1":channel})
return connection, channel
except:
time.sleep(1)
print("error")
connection, channel, queue = BrokerConnect()
print(f'[*] Waiting for {queue} messages. To exit press CTRL+C')
def callback(ch, method, properties, body):
data = json.loads(body)
print(data)
ch.basic_ack(delivery_tag=method.delivery_tag)
channel.basic_consume(queue=queue, on_message_callback=callback)
channel.start_consuming()
def ConnectionClose():
channel = brokerConnections["key1"]
if channel.is_open:
connection = channel.connection
channel.stop_consuming()
connection.close()
del brokerConnections["key1"]

Related

How to solve the consumer error in Rabbitmq using python

when I run Consumer.py for headersExchange in rabbitmq using python,it is getting error like below
I have mentioned consumer and publish program below
Traceback (most recent call last):
File "headersConsumer.py", line 32, in <module>
main()
File "headersConsumer.py", line 14, in main
channel.exchange_declare(exchange = 'headers_logs',exchange_type='headers',durable=True)
File "C:\Python38\lib\site-packages\pika\adapters\blocking_connection.py", line 2387, in
exchange_declare
self._flush_output(declare_ok_result.is_ready)
File "C:\Python38\lib\site-packages\pika\adapters\blocking_connection.py", line 1339, in
_flush_output
raise self._closing_reason # pylint: disable=E0702
pika.exceptions.ChannelClosedByBroker: (406, "PRECONDITION_FAILED
- inequivalent arg 'type' for exchange 'headers_logs' in vhost '/': received 'headers' but
current is 'fanout'")
I have written consumer code like this
#!/usr/bin/env python
import pika, sys, os
def main():
connection = pika.BlockingConnection(pika.ConnectionParameters(host='localhost'))
channel = connection.channel()
channel.exchange_declare(exchange = 'headers_logs',exchange_type='headers',durable=True)
channel.queue_declare(queue = "HeaderQueue1", durable=True)
channel.queue_bind(exchange = 'headers_logs', queue="HeadersQueue1", routing_key='',
arguments={'x-match': 'any', 'key1': 'one', 'key2': 'two'})
def callback(ch, method, properties, body):
print(" [x] %r" % body.decode())
print(' [*] Waiting for logs. To exit press CTRL+C')
channel.basic_consume(
queue="HeadersQueue1", on_message_callback=callback, auto_ack=True)
channel.start_consuming()
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
print('Interrupted')
try:
sys.exit(0)
except SystemExit:
os._exit(0)
I have written publish program likethis
import pika
import sys
connection = pika.BlockingConnection(pika.ConnectionParameters(host='localhost'))
channel = connection.channel()
channel.exchange_declare(exchange='headers_logs',exchange_type='headers')
message = ' '.join(sys.argv[1:]) or "Hello World!"
channel.basic_publish((exchange='headers_logs',routing_key="",body=message,properties=pika.BasicProperties(
delivery_mode = 2, # make message persistent
headers = {'key1':'one', 'key2': 'three'}
))
print(" [x] Sent %r" % message)
connection.close()
I am not understanding this error,Can anyone please suggest this error
PRECONDITION_FAILED means that you declared an exchange with set of parameters then you are trying to create the same queue name using different parameters.
in your case:
headers_logs' in vhost '/': received 'headers' but
current is 'fanout'")
so you are trying to change the exchange type from fanout to headers
Se here for more detail (this is for the queues but exchanges work in the same way).
Before a queue can be used it has to be declared. Declaring a queue
will cause it to be created if it does not already exist. The
declaration will have no effect if the queue does already exist and
its attributes are the same as those in the declaration. When the
existing queue attributes are not the same as those in the declaration
a channel-level exception with code 406 (PRECONDITION_FAILED) will be
raised.

Communication between multiple processes with zmq

I am having n processes that have their own local data and actions and I want each process to send a "snapshot" of it's local data to the rest running nodes-processes.
The code that I have so far looks like that:
def node1():
Process(target=sync_1).start()
sleep(4)
data = {'node': 1, 'data': 'node 1 data'}
context_b = zmq.Context()
socket_b = context_b.socket(zmq.PUB)
connnected = False
try:
socket_b.bind("tcp://*:%s" % 5560)
connnected = True
except Exception as e:
print(e)
if connnected:
topic = "101"
try:
socket_b.send_string(topic + ' ' + json.dumps(data))
except Exception as e:
print(e)
socket_b.close()
context_b.term()
def node2():
Process(target=sync_2).start()
def sync_1():
context_c = zmq.Context()
socket_c = context_c.socket(zmq.SUB)
_port = 5560
try:
socket_c.connect("tcp://localhost:%s" % _port)
except Exception as e:
print(e)
topicfilter = "101"
socket_c.setsockopt_string(zmq.SUBSCRIBE, topicfilter, encoding='utf-8')
try:
raw = socket_c.recv().decode("utf-8")
json0 = raw.find('{')
topic = raw[0:json0].strip()
msg = json.loads(raw[json0:])
print("[SYNC 1] received {}-{}]".format(topic, msg))
except Exception as e:
print(e)
def sync_2():
context_c = zmq.Context()
socket_c = context_c.socket(zmq.SUB)
_port = 5560
try:
socket_c.connect("tcp://localhost:%s" % _port)
except Exception as e:
print(e)
topicfilter = "101"
socket_c.setsockopt_string(zmq.SUBSCRIBE, topicfilter, encoding='utf-8')
try:
raw = socket_c.recv().decode("utf-8")
json0 = raw.find('{')
topic = raw[0:json0].strip()
msg = json.loads(raw[json0:])
print("[SYNC 2] received {}-{}]".format(topic, msg))
except Exception as e:
print(e)
if __name__ == '__main__':
Process(target=node1).start()
Process(target=node2).start()
Each node has one "listener" process running at the background (the sync function) in order to receive each node data and use it accordingly, and it works fine when all the sub sockets are connected to one node (node 1 in that case) but I want each node to send data to all the listeners , so I am not sure how to implement that since the listener processes can connect to one port.
Also, the nodes will have to send the local data snapshot every time there is an update , so this cannot be an one time communication , therefore I thought of having listener processes actively waiting for updates all the times.
I believe a diagram could be useful for that problem:
There can be a way easier way to solve this issue , so any help would be highly appreciated!
Update:
The solution was to use the XPUB-XSUB pattern.
By using this pattern I created a proxy thread that allowed me to do exactly what I wanted.
The most useful example I could find for Python is this .

Twisted will not send data back only if I use async DB ops

After struggling with inlineCallbacks and yield of twisted/txredisapi, I can save my data into redis. Thanks to author of txredisapi. Now I met a new issue, socket server will not send back to client before/after saving into DB.
Twisted offers simple socket server as following:
from twisted.internet import protocol, reactor
class Echo(protocol.Protocol):
def dataReceived(self, data):
self.transport.write(data) ### write back
class EchoFactory(protocol.Factory):
def buildProtocol(self, addr):
return Echo()
reactor.listenTCP(8000, EchoFactory)
recctor.run()
My code is similiar, only with additional DB ops.
#!/usr/bin/env python
import time
import binascii
import txredisapi
from twisted.internet import defer
from twisted.internet import protocol, reactor
from twisted.internet.protocol import Factory
from twisted.enterprise import adbapi
from twisted.python import log
from dmpack import Dmpack
from dmdb import Dmdb
from dmconfig import DmConf
dm = Dmpack()
conf = DmConf().loadConf()
rcs = txredisapi.lazyConnection(password=conf['RedisPassword'])
dbpool = adbapi.ConnectionPool("MySQLdb",db=conf['DbName'],user=conf['DbAccount'],\
passwd=conf['DbPassword'],host=conf['DbHost'],\
use_unicode=True,charset=conf['DbCharset'])
def getDataParsed(data):
realtime = None
period = None
self.snrCode = dm.snrToAscii(data[2:7])
realtime = data[7:167] # save it into redis
period = data[167:-2] # save it into SQL
return (snrCode, realtime, period)
class PlainTCP(protocol.Protocol):
def __init__(self, factory):
self.factory = factory
self.factory.numConnections = 0
self.snrCode = None
self.rData = None
self.pData = None
self.err = None
def connectionMade(self):
self.factory.numConnections += 1
print "Nr. of connections: %d\n" %(self.factory.numConnections)
self.transport.write("Hello remote\r\n") # it only prints very 5 connections.
def connectionLost(self, reason):
self.factory.numConnections -= 1
print "Nr. of connections: %d\n" %(self.factory.numConnections)
#defer.inlineCallbacks
def dataReceived(self, data):
global dbpool, rcs
(self.snrCode,rDat,pDat) = getDataParsed(data)
if self.snrCode == None or rDat == None or pDat == None:
err = "Bad format"
else:
err = "OK"
print "err:%s"%(err) # debug print to show flow control
self.err = err
self.transport.write(self.snrCode)
self.transport.write(self.err)
self.transport.write(rDat)
self.transport.write(pDat)
self.transport.loseConnection()
if self.snrCode != None and rDat != None and pDat != None:
res = yield self.saveRealTimeData(rcs, rDat)
res = yield self.savePeriodData(dbpool, pDat, conf)
print "err2:%s"%(err) # debug print to show flow control
#defer.inlineCallbacks
def saveRealTimeData(self, rc, dat):
key = "somekey"
val = "somedata"
yield rc.set(key,val)
yield rc.expire(key,30)
#defer.inlineCallbacks
def savePeriodData(self,rc,dat,conf):
query = "some SQL statement"
yield rc.runQuery(query)
class PlainTCPFactory(protocol.Factory):
def buildProtocol(self, addr):
return PlainTCP(self)
def main():
dmdb = Dmdb()
if not dmdb.detectDb():
print "Please run MySQL RDBS first."
sys.exit()
log.startLogging(sys.stdout)
reactor.listenTCP(8080, PlainTCPFactory())
reactor.run()
if __name__ == "__main__":
main()
And clip of my client, which is a simple client:
def connectSend(host="127.0.0.1",port=8080):
global packet
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.connect((host, port))
s.sendall(''.join(packet))
data = s.recv(1024)
s.close()
print 'Received', repr(data)
except socket.error, err:
print "Remote socket is not available: %s"%str(err)
sys.exit(1)
The current status is:
If disable #defer.inlineCallbacks and yield opertions of dataReceived(), both self.transport.write() inside of connectionMode() and dataReceived() can output data to clients.
If we enabled #defer.inlineCallbacks and two yield DB ops of SQL/Redis, then self.transport.write() inside of connectionMode() prints every 5 connections, and dataReceived() will not output any data to clients.
the debug print statements will print on log regardless of #defer.inlineCallbacks anyway.
I was told that dataReceived() should not be #defer.inlineCallbacks. But it doesn't change anything if I removed that decoration.
I am thinking if gevent can help me out of this unpredicted behavior. I am twisted into an endless tornado, cyclone .....
Anyone who has similiar experience, please help me. Thanks.
By changing function as following, the code works.
#COMMENT OUT decorator of #defer.inlineCallbacks
def dataReceived(self, data):
global dbpool, rcs
(self.snrCode,rDat,pDat) = getDataParsed(data)
if self.snrCode == None or rDat == None or pDat == None:
err = "Bad format"
else:
err = "OK"
print "err:%s"%(err) # debug print to show flow control
self.err = err
self.transport.write(self.snrCode)
self.transport.write(self.err)
self.transport.write(rDat)
self.transport.write(pDat)
self.transport.loseConnection()
if self.snrCode != None and rDat != None and pDat != None:
self.saveRealTimeData(rcs, rDat)
self.savePeriodData(dbpool, pDat, conf)
# Removing yield before DB ops
print "err2:%s"%(err) # debug print to show flow control
#defer.inlineCallbacks
def saveRealTimeData(self, rc, dat):
print "saveRedis"
key = "somekey"
val = "somedata"
yield rc.set(key,val)
yield rc.expire(key,30)
#defer.inlineCallbacks
def savePeriodData(self,rc,dat,conf):
print "save SQL"
query = "some SQL statement"
yield rc.runQuery(query)
If we keep #defer.inlineCallbacks and yield in dataReceived. The connection is closed before second DB op. Therefore no data is output to connection. Maybe is caused by inlineCallbacks decorator.
By removing this, the flow control is simple and straightforward.
However, I still can get why I can not add inlineCallbacks if there are two deferred DB ops. This time they don't need deferred?

strange error when receiving udp datagram

I have a file, mc_send.py, that send mcast messages and shall receive a unicastmessage back from the program that received the mcast message, mc_recv.py. mcast work but when receiving the unicast message back a strange error appear: ValueError: maximum length of data to be read cannot be negative The error is att line 14 in this file mc_send.py:
I have struggled with this many hours on windows7 with python2.7.2 and pyqt4 v4.9 but can't find what I'm doing wrong. This programs is based on the broadcast examples from pyqt4.
""" to see all ports on windows: netstat -ap udp | find "4545" """
from PyQt4 import QtCore, QtGui, QtNetwork
unicast_addr = "127.0.0.1"
unicast_port = 45455
mcast_addr = "239.255.43.21"
mcast_port = 45454
class Sender(QtGui.QDialog):
def processPendingDatagrams(self):
while self.udpServer.hasPendingDatagrams():
datagram, host, port = self.udpServer.readDatagram(self.udpSocket.pendingDatagramSize())
print "got msg:", datagram
def __init__(self, parent=None):
super(Sender, self).__init__(parent)
self.groupAddress = QtNetwork.QHostAddress(mcast_addr)
self.unicastAddress = QtNetwork.QHostAddress(unicast_addr)
self.statusLabel = QtGui.QLabel("Ready to multicast datagrams to group %s on port 45454" %
self.groupAddress.toString())
# setup socket for listening on incomming datagrams
self.udpServer = QtNetwork.QUdpSocket(self)
self.udpServer.bind(unicast_port)
self.udpServer.readyRead.connect(self.processPendingDatagrams)
self.startButton = QtGui.QPushButton("&Start")
self.quitButton = QtGui.QPushButton("&Quit")
buttonBox = QtGui.QDialogButtonBox()
buttonBox.addButton(self.startButton, QtGui.QDialogButtonBox.ActionRole)
buttonBox.addButton(self.quitButton, QtGui.QDialogButtonBox.RejectRole)
self.timer = QtCore.QTimer(self)
self.udpSocket = QtNetwork.QUdpSocket(self)
self.messageNo = 1
self.startButton.clicked.connect(self.startSending)
self.quitButton.clicked.connect(self.close)
self.timer.timeout.connect(self.send_mc_msg)
mainLayout = QtGui.QVBoxLayout()
mainLayout.addWidget(self.statusLabel)
mainLayout.addWidget(buttonBox)
self.setLayout(mainLayout)
self.setWindowTitle("WSim")
def startSending(self):
self.startButton.setEnabled(False)
self.timer.start(1000)
def send_mc_msg(self):
self.udpSocket.writeDatagram("hello %d" %(self.messageNo), self.groupAddress, mcast_port)
self.messageNo += 1
if __name__ == '__main__':
import sys
app = QtGui.QApplication(sys.argv)
sender = Sender()
sender.show()
sys.exit(sender.exec_())
The multicast receiver that also send the unicast response back looks like this mc_recv.py:
from PyQt4 import QtGui, QtNetwork
mcast_addr = "239.255.43.21"
mcast_port = 45454
answer_addr = "127.0.0.1"
answer_port = 45455
class Receiver(QtGui.QDialog):
def __init__(self, parent=None):
super(Receiver, self).__init__(parent)
self.groupAddress = QtNetwork.QHostAddress(mcast_addr)
self.udpSocket = QtNetwork.QUdpSocket(self)
self.udpSocket.bind(mcast_port, QtNetwork.QUdpSocket.ReuseAddressHint)
self.udpSocket.joinMulticastGroup(self.groupAddress)
self.udpSocket.readyRead.connect(self.processPendingDatagrams)
# Use this socket to send unicast messages to back
self.answerSocket = QtNetwork.QUdpSocket(self)
self.answerAddress = QtNetwork.QHostAddress(answer_addr)
quitButton = QtGui.QPushButton("&Quit")
quitButton.clicked.connect(self.close)
buttonLayout = QtGui.QHBoxLayout()
buttonLayout.addStretch(1)
buttonLayout.addWidget(quitButton)
buttonLayout.addStretch(1)
self.statusLabel = QtGui.QLabel("Listening for multicasted messages on %s" % mcast_addr)
mainLayout = QtGui.QVBoxLayout()
mainLayout.addWidget(self.statusLabel)
mainLayout.addLayout(buttonLayout)
self.setLayout(mainLayout)
self.setWindowTitle("mrecv")
def processPendingDatagrams(self):
"""receive and decode multicast messages and send a response message on the return address"""
while self.udpSocket.hasPendingDatagrams():
datagram, host, port = self.udpSocket.readDatagram(self.udpSocket.pendingDatagramSize())
self.statusLabel.setText("received mcast msg '%s'" % datagram)
# send a response back to msend
self.answerSocket.writeDatagram("hi back", self.answerAddress, answer_port)
if __name__ == '__main__':
import sys
app = QtGui.QApplication(sys.argv)
receiver = Receiver()
receiver.show()
sys.exit(receiver.exec_())
found the cause, embarrassingly simple error, had written self.udpSocket.pendingDatagramSize()
instead of self.udpServer.pendingDatagramSize() when I was reading the data...

Celery: abort task on connection error

I have to implement a Task subclass that gracefully fails if the broker is not running - currently I'm using RabbitMQ.
I could probably just use a try statement to catch the exception:
try:
Mytask.delay(arg1, arg2)
except socket.error:
# Send an notice to an admin
pass
but I'd like to create a subclass of Task that can handle that.
I've tried something like that:
class MyTask(Task):
ignore_result = True
def __call__(self, *args, **kwargs):
try:
return self.run(*args, **kwargs)
except socket.error:
# Send an notice to an admin
return None
but the workflow is clearly wrong. I think I need to inject maybe a backend subclass or a failure policy somehow.
Do you have any suggestion?
A possible solution I came up with:
import socket
from celery.decorators import task
from celery.task import Task
from celery.backends.base import BaseBackend
UNDELIVERED = 'UNDELIVERED'
class DummyBackend(BaseBackend):
"""
Dummy queue backend for undelivered messages (due to the broker being down).
"""
def store_result(self, *args, **kwargs):
pass
def get_status(self, *args, **kwargs):
return UNDELIVERED
def _dummy(self, *args, **kwargs):
return None
wait_for = get_result = get_traceback = _dummy
class SafeTask(Task):
"""
A task not raising socket errors if the broker is down.
"""
abstract = True
on_broker_error = None
errbackend = DummyBackend
#classmethod
def apply_async(cls, *args, **kwargs):
try:
return super(SafeTask, cls).apply_async(*args, **kwargs)
except socket.error, err:
if cls.on_broker_error is not None:
cls.on_broker_error(err, cls, *args, **kwargs)
return cls.app.AsyncResult(None, backend=cls.errbackend(),
task_name=cls.name)
def safetask(*args, **kwargs):
"""
Task factory returning safe tasks handling socket errors.
When a socket error occurs, the given callable *on_broker_error*
is called passing the exception object, the class of the task
and the original args and kwargs.
"""
if 'base' not in kwargs:
on_broker_error = kwargs.pop('on_broker_error', SafeTask.on_broker_error)
errbackend = kwargs.pop('errbackend', SafeTask.errbackend)
kwargs['base'] = type('SafeTask', (SafeTask,), {
'on_broker_error': staticmethod(on_broker_error),
'errbackend': errbackend,
'abstract': True,
})
return task(*args, **kwargs)
You can both subclass SafeTask or use the decorator #safetask.
If you can think of an improvement, don't hesitate to contribute.