Celery publisher doesn't send messages to the intended rabbitmq queue - rabbitmq

I have a flask app running with celery in it. When a user inputs a message into app uri it needed to be sent to a rabbitmq queue as a message to a given queue. But it send messages to a default queue. How do I specifically send a message to a defined queue?
publisher.py
from flask import Flask,request,jsonify
from celery import Celery,bootsteps
from time import sleep
from kombu.common import QoS
class NoChannelGlobalQoS(bootsteps.StartStopStep):
requires = {'celery.worker.consumer.tasks:Tasks'}
def start(self, c):
qos_global = False
c.connection.default_channel.basic_qos(0, c.initial_prefetch_count, qos_global)
def set_prefetch_count(prefetch_count):
return c.task_consumer.qos(
prefetch_count=prefetch_count,
apply_global=qos_global,
)
c.qos = QoS(set_prefetch_count, c.initial_prefetch_count)
server = Flask(__name__)
broker_uri1="amqp://"
backend="mongodb+srv://"
app = Celery('TestApp', broker_uri=broker_uri1,backend=backend)
CELERY_TASK_ROUTES = {'app.tasks.*': {'queue': 'rabbit'},'app.task.*': {'queue': 'rabbit'}}
app.config_from_object('celeryconfig')
app.conf.task_default_exchange='rabbit'
app.conf.task_default_routing_key='rabbit'
app.steps['worker'].add(NoChannelGlobalQoS)
#server.route("/")
def print_hello():
return "Flask server working"
#server.route("/sync-reverse")
def reverse_by_worker_with_results():
input = request.args.get("text")
app.select_queues(queues="rabbit")
task = app.signature('tasks.reverse', kwargs={'text': input})
result = task.delay()
while result.status == 'PENDING':
print(result.status)
sleep(2)
print(result.status)
return "Reversed >><br />Input Text : " + input + "<br />Output Text : " + result.get()
#server.route("/async-reverse")
def reverse_by_worker():
input = request.args.get("text")
app.select_queues(queues="rabbit") #supposed to add queues to the task
task = app.signature('tasks.reverse', kwargs={'text': input})
result = task.delay()
if result.id:
return 'Task add'
else:
return 'Failure in adding task'
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
server.run(host= '0.0.0.0')
worker.py
from celery import Celery,bootsteps
from time import sleep
from kombu.common import QoS
broker_uri='amqp:///'
backend_uri="mongodb+srv://"
app = Celery('TestApp', broker=broker_uri, backend=backend_uri)
app.config_from_object('celeryconfig')
app.conf.task_default_exchange='rabbit'
app.conf.task_default_routing_key='rabbit'
class NoChannelGlobalQoS(bootsteps.StartStopStep):
requires = {'celery.worker.consumer.tasks:Tasks'}
def start(self, c):
qos_global = False
c.connection.default_channel.basic_qos(0, c.initial_prefetch_count, qos_global)
def set_prefetch_count(prefetch_count):
return c.task_consumer.qos(
prefetch_count=prefetch_count,
apply_global=qos_global,
)
c.qos = QoS(set_prefetch_count, c.initial_prefetch_count)
app.steps['consumer'].add(NoChannelGlobalQoS)
#app.task
def reverse(text):
sleep(10)
return text[:-1]
I have used a celery config file to define the queue name and type as suggested
from kombu import Queue
task_queues = [Queue(name="rabbit", queue_arguments={"x-queue-type": "quorum"})]
task_routes = {
'tasks.add': 'rabbit',
}

Related

What If I produce 3 messages for 3 different queues but consumes only from 2 queues in rabbitmq?

In the following python programme I am configuring RabbitMq. I am creating an exchange named "order" and publishing 3 messages with routing keys "order.notify","order.report","order.test".
import pika
import json
import uuid
con = pika.BlockingConnection(pika.ConnectionParameters('localhost'))
channel = con.channel()
channel.exchange_declare(
exchange='order',
exchange_type = 'direct'
)
channel.basic_publish(
exchange= 'order',
routing_key= 'order.notify',
body= json.dumps({'user_email' : 'First'})
#body= json.dumps({'user_email' : order['user_email']})
)
print('[x] Sent nortify message')
channel.basic_publish(
exchange= 'order',
routing_key= 'order.report',
body= json.dumps({'user_email' : 'Second'})
)
print('[x] Sent report message')
channel.basic_publish(
exchange= 'order',
routing_key= 'order.test',
body= json.dumps({'user_email' : 'third'})
)
print('[x] Sent report message')
con.close()
Now at consumer side I have created only 2 queues , with binding keys order.nortiy and order.report
report.py
import pika
import json
con = pika.BlockingConnection(pika.ConnectionParameters('localhost'))
channel = con.channel()
queue = channel.queue_declare('order_notify')
queue_name = queue.method.queue
channel.queue_bind(
exchange='order',
queue=queue_name,
routing_key='order.report' #binding key
)
def callback(ch,method, properties, body):
payload = json.loads(body)
# print(' [x] Notifying {}' .format(payload['user_email']))
print('Report Queue')
print(payload['user_email'])
ch.basic_ack(delivery_tag= method.delivery_tag)
channel.basic_consume(on_message_callback= callback,queue=queue_name)
print(' [*] waiting for report messages. To exit press CTRL + C')
channel.start_consuming()
nortify.py
import pika
import json
con = pika.BlockingConnection(pika.ConnectionParameters('localhost'))
channel = con.channel()
queue = channel.queue_declare('order_notify')
queue_name = queue.method.queue
channel.queue_bind(
exchange='order',
queue=queue_name,
routing_key='order.notify' #binding key
)
def callback(ch,method, properties, body):
payload = json.loads(body)
# print(' [x] Notifying {}' .format(payload['user_email']))
print('Nortify Queue')
print(payload['user_email'])
ch.basic_ack(delivery_tag= method.delivery_tag)
channel.basic_consume(on_message_callback= callback,queue=queue_name)
print(' [*] waiting for report messages. To exit press CTRL + C')
channel.start_consuming()
Now which queue wil consume which message. I try to run and queues were consuming randomly. Can someone please explain?
Tried to run the above programme but was getting random results.Each queue was consuming different messages when run different times

SSH and Ping to hosts concurrently with Python asyncio?

I'm trying to SSH/Ping to hosts concurrently, but I don't see any result so for, probably my implementation isn't correct. This is what I have so far. Any idea appreciated.
import paramiko
import time
import asyncio
import subprocess
async def sshTest(ipaddress,deviceUsername,devicePassword,sshPort): #finalDict
try:
print("Performing SSH Connection to the device")
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(ipaddress, username=deviceUsername, password=devicePassword, port=sshPort, look_for_keys=False, allow_agent=False)
print("Channel established")
except Exception as e:
print(e)
async def pingf(ip):
p1 = subprocess.Popen(['ping', '-c','5', ip], stdout=subprocess.PIPE)
output = p1.communicate()[0]
print(output)
async def main():
taskA = loop.create_task(sshTest('192.168.255.68','admin','admin','22'))
taskB = loop.create_task(sshTest('192.168.254.108','admin','admin','22'))
taskC = loop.create_task(sshTest('192.168.249.134','admin','admin','22'))
taskD = loop.create_task(sshTest('192.168.254.108','admin','admin','22'))
task1 = loop.create_task(pingf('192.168.255.68'))
task2 = loop.create_task(pingf('192.168.254.108'))
task3 = loop.create_task(pingf('192.168.249.134'))
task4 = loop.create_task(pingf('192.168.254.108'))
await asyncio.wait([taskA,taskB,taskC,taskD,task1,task2,task3,task4])
if __name__ == "__main__":
start = time.time()
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
end = time.time()
print("The time of execution of above program is :", end-start)
Asyncio is a form of cooperative multitasking. This means that in order for tasks to run concurrently, a task must explicitly yield control back to the scheduler, which in Python means "your tasks need to await on something".
Neither of your tasks ever calls await, so they're not going to run concurrently. What you have right now is going to run serially.
If you want to run ssh connections concurrently, you're going to have to either:
Replace paramiko with something like AsyncSSH, which is written to work with asyncio, or
Use threading or multiprocessing to parallelize your tasks, rather than using asyncio.
Additionally, if you're working with asyncio, anything that involves running an external command (such as your pingf task) is going to need to use asyncio's run_in_executor method.
For the example you've shown here, I would suggest instead using the concurrent.futures module. Your code might end up looking something like this (I've modified the code to run in my test environment and given the sshTest task something to do beyond simply connecting):
import concurrent.futures
import paramiko
import asyncio
import subprocess
def sshTest(ipaddress, deviceUsername, devicePassword, sshPort): # finalDict
try:
print("Performing SSH Connection to the device")
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(
ipaddress,
username=deviceUsername,
password=devicePassword,
port=sshPort,
look_for_keys=True,
allow_agent=True,
)
stdin, stdout, stderr = client.exec_command("sh -c 'sleep 2; uptime'")
output = stdout.read()
return output
except Exception:
return "failed to connect"
def pingf(ip):
output = subprocess.check_output(["ping", "-c", "5", ip])
return output
def main():
futures = []
with concurrent.futures.ThreadPoolExecutor() as pool:
futures.append(pool.submit(sshTest, "localhost", "root", "admin", "2200"))
futures.append(pool.submit(sshTest, "localhost", "root", "admin", "2201"))
futures.append(pool.submit(sshTest, "localhost", "root", "admin", "2202"))
futures.append(pool.submit(pingf, "192.168.1.1"))
futures.append(pool.submit(pingf, "192.168.1.5"))
futures.append(pool.submit(pingf, "192.168.1.254"))
for future in concurrent.futures.as_completed(futures):
print("return value from task:", future.result())
if __name__ == "__main__":
main()

How to pass headers in header excahnge method in rabbitmq using python

Consumer.py:
#!/usr/bin/env python
import pika, sys, os
def main():
connection = pika.BlockingConnection(pika.ConnectionParameters(host='localhost'))
channel = connection.channel()
channel.exchange_declare(exchange='headers_logs', exchange_type='headers')
channel.queue_declare(queue='', exclusive=True)
queue_name = "HeadersQueue1"
channel.queue_bind(exchange='headers_logs', queue=queue_name)
def callback(ch, method, properties, body):
print(" [x] %r" % body.decode())
print(' [*] Waiting for logs. To exit press CTRL+C')
channel.basic_consume(
queue=queue_name, on_message_callback=callback, auto_ack=True)
channel.start_consuming()
if name == 'main':
try:
main()
except KeyboardInterrupt:
print('Interrupted')
try:
sys.exit(0)
except SystemExit:
os._exit(0)
Publish.py:
#!/usr/bin/env python
import pika
import sys
connection = pika.BlockingConnection(pika.ConnectionParameters(host='localhost'))
channel = connection.channel()
channel.exchange_declare(exchange='headers_logs',exchange_type='headers')
message = ' '.join(sys.argv[1:]) or "Hello World!"
channel.basic_publish(exchange='headers_logs',headers={"name":"ram"},body=message)
print(" [x] Sent %r" % message)
connection.close()
Here I have written consumer and publish program like above. Can anyone please guide that how to write Simple headersExchange program in rabbitMq using python
To use a headers exchange, you need to declare the exchange type as headers, not fanout as in your question's text.
exchangeName = 'headers_logs'
channel.exchange_declare(exchangeName, exchange_type='headers', durable=True)
Then create the queue and bind it to the exchange using the headers. Note that 'x-match' here can be set to match any or all headers. The routing key is set to empty string because it will not be used for routing messages.
qName = 'queue_logs'
channel.queue_declare(queue=qNameI, durable=True)
channel.queue_bind(qName, exchangeName, routing_key='', arguments={'x-match': 'any', 'key1': 'one', 'key2': 'two'})
Now we can publish a message to the exchange with a set of headers:
channel.basic_publish(
exchange=exchangeName,
routing_key='',
body='test message body',
properties=pika.BasicProperties(
delivery_mode = 2, # make message persistent
headers = {'key1':'one', 'key2': 'three'}
)
)
I have only matched 'key1' in this message to demonstrate that 'x-match' has been set to 'any'.

ActiveMQ/STOMP Clear Schedule Messages Pointed To Destination

I would like to remove messages that are scheduled to be delivered to a specific queue but i'm finding the process to be unnecessarily burdensome.
Here I am sending a blank message to a queue with a delay:
self._connection.send(body="test", destination=f"/queue/my-queue", headers={
"AMQ_SCHEDULED_DELAY": 100_000_000,
"foo": "bar"
})
And here I would like to clear the scheduled messages for that queue:
self._connection.send(destination=f"ActiveMQ.Scheduler.Management", headers={
"AMQ_SCHEDULER_ACTION": "REMOVEALL",
}, body="")
Of course the "destination" here needs to be ActiveMQ.Scheduler.Management instead of my actual queue. But I can't find anyway to delete scheduled messages that are destined for queue/my-queue. I tried using the selector header, but that doesn't seem to work for AMQ_SCHEDULER_ACTION type messages.
The only suggestions I've seen is to write a consumer to browser all of the scheduled messages, inspect each one for its destination, and delete each schedule by its ID. This seems insane to me as I don't have just a handful of messages but many millions of messages that I'd like to delete.
Is there a way I could send a command to ActiveMQ to clear scheduled messages with a custom header value?
Maybe I can define a custom scheduled messages location for each queue?
Edit:
I've written a wrapper around the stomp.py connection to handle purging schedules destined for a queue. The MQStompFacade takes an existing stomp.Connection and the name of the queue you are working with and provides enqueue, enqueue_many, receive, purge, and move.
When receiving from a queue, if include_delayed is True, it will subscribe to both the queue and a topic that consumes the schedules. Assuming the messages were enqueued with this class and have the name of the original destination queue as a custom header, scheduled messages that aren't destined for the receiving queue will be filtered out.
Not yet testing in production. Probably a lot of of optimizations here.
Usage:
stomp = MQStompFacade(connection, "my-queue")
stomp.enqueue_many([
EnqueueRequest(message="hello"),
EnqueueRequest(message="goodbye", delay=100_000)
])
stomp.purge() # <- removes queued and scheduled messages destined for "/queues/my-queue"
class MQStompFacade (ConnectionListener):
def __init__(self, connection: Connection, queue: str):
self._connection = connection
self._queue = queue
self._messages: List[Message] = []
self._connection_id = rand_string(6)
self._connection.set_listener(self._connection_id, self)
def __del__(self):
self._connection.remove_listener(self._connection_id)
def enqueue_many(self, requests: List[EnqueueRequest]):
txid = self._connection.begin()
for request in requests:
headers = request.headers or {}
# Used in scheduled message selectors
headers["queue"] = self._queue
if request.delay_millis:
headers['AMQ_SCHEDULED_DELAY'] = request.delay_millis
if request.priority is not None:
headers['priority'] = request.priority
self._connection.send(body=request.message,
destination=f"/queue/{self._queue}",
txid=txid,
headers=headers)
self._connection.commit(txid)
def enqueue(self, request: EnqueueRequest):
self.enqueue_many([request])
def purge(self, selector: Optional[str] = None):
num_purged = 0
for _ in self.receive(idle_timeout=5, selector=selector):
num_purged += 1
return num_purged
def move(self, destination_queue: AbstractQueueFacade,
selector: Optional[str] = None):
buffer_size = 500
move_buffer = []
for message in self.receive(idle_timeout=5, selector=selector):
move_buffer.append(EnqueueRequest(
message=message.body
))
if len(move_buffer) >= buffer_size:
destination_queue.enqueue_many(move_buffer)
move_buffer = []
if move_buffer:
destination_queue.enqueue_many(move_buffer)
def receive(self,
max: Optional[int] = None,
timeout: Optional[int] = None,
idle_timeout: Optional[int] = None,
selector: Optional[str] = None,
peek: Optional[bool] = False,
include_delayed: Optional[bool] = False):
"""
Receiving messages until one of following conditions are met
Args:
max: Receive messages until the [max] number of messages are received
timeout: Receive message until this timeout is reached
idle_timeout (seconds): Receive messages until the queue is idle for this amount of time
selector: JMS selector that can be applied to message headers. See https://activemq.apache.org/selector
peek: Set to TRUE to disable automatic ack on matched criteria. Peeked messages will remain the queue
include_delayed: Set to TRUE to return messages scheduled for delivery in the future
"""
self._connection.subscribe(f"/queue/{self._queue}",
id=self._connection_id,
ack="client",
selector=selector
)
if include_delayed:
browse_topic = f"topic/scheduled_{self._queue}_{rand_string(6)}"
schedule_selector = f"queue = '{self._queue}'"
if selector:
schedule_selector = f"{schedule_selector} AND ({selector})"
self._connection.subscribe(browse_topic,
id=self._connection_id,
ack="auto",
selector=schedule_selector
)
self._connection.send(
destination=f"ActiveMQ.Scheduler.Management",
headers={
"AMQ_SCHEDULER_ACTION": "BROWSE",
"JMSReplyTo": browse_topic
},
id=self._connection_id,
body=""
)
listen_start = time.time()
last_receive = time.time()
messages_received = 0
scanning = True
empty_receive = False
while scanning:
try:
message = self._messages.pop()
last_receive = time.time()
if not peek:
self._ack(message)
messages_received += 1
yield message
except IndexError:
empty_receive = True
time.sleep(0.1)
if max and messages_received >= max:
scanning = False
elif timeout and time.time() > listen_start + timeout:
scanning = False
elif empty_receive and idle_timeout and time.time() > last_receive + idle_timeout:
scanning = False
else:
scanning = True
self._connection.unsubscribe(id=self._connection_id)
def on_message(self, frame):
destination = frame.headers.get("original-destination", frame.headers.get("destination"))
schedule_id = frame.headers.get("scheduledJobId")
message = Message(
attributes=MessageAttributes(
id=frame.headers["message-id"],
schedule_id=schedule_id,
timestamp=frame.headers["timestamp"],
queue=destination.replace("/queue/", "")
),
body=frame.body
)
self._messages.append(message)
def _ack(self, message: Message):
"""
Deletes the message from queue.
If the message has an scheduled_id, will also remove the associated scheduled job
"""
if message.attributes.schedule_id:
self._connection.send(
destination=f"ActiveMQ.Scheduler.Management",
headers={
"AMQ_SCHEDULER_ACTION": "REMOVE",
"scheduledJobId": message.attributes.schedule_id
},
id=self._connection_id,
body=""
)
self._connection.ack(message.attributes.id, subscription=self._connection_id)
In order to remove specific messages you need to know the ID which you can get via a browse of the scheduled messages. The only other option available is to use the start and stop time options in the remove operations to remove all messages inside a range.
MessageProducer producer = session.createProducer(management);
Message request = session.createMessage();
request.setStringProperty(ScheduledMessage.AMQ_SCHEDULER_ACTION, ScheduledMessage.AMQ_SCHEDULER_ACTION_REMOVEALL);
request.setStringProperty(ScheduledMessage.AMQ_SCHEDULER_ACTION_START_TIME, Long.toString(start));
request.setStringProperty(ScheduledMessage.AMQ_SCHEDULER_ACTION_END_TIME, Long.toString(end));
producer.send(request);
If that doesn't suit your need I'm sure the project would welcome contributions.

How can I remove messages from an ActiveMQ queue using Python?

I have an ActiveMQ queue which has several messages that were sent using persistent set to true. When I create a subscriber in Python to read the queue, I get all of the messages in the queue. The next time I open the subscriber, I get all of the same messages. I adjusted the code that writes to the queue to set persistent to false, but the message remains in the queue. Have I neglected to send an acknowledgement?
The code is written using Python 2.7 because that's what our customer is using. I'd love to upgrade them, but I don't have the time.
Here's the script that reads the queue:
import socket
import threading
import xml.etree.ElementTree as etree
from xml.dom import minidom # for pretty printing
# import SampleXML
import sys
import os
import math
import time
from time import monotonic
import string
import stomp # for queue support
import platform
class ConnectionListener(stomp.ConnectionListener):
def __init__(self, connection):
self.connection = connection
print ("Listener created")
def on_message(self, message):
print ("Received message with body ") + message.body
class Reader:
def __init__(self):
pass
def ConnectToQueue(self):
#For Production
user = os.getenv("ACTIVEMQ_USER") or "worthington"
#user = os.getenv("ACTIVEMQ_USER") or "worthington_test"
password = os.getenv("ACTIVEMQ_PASSWORD") or "level3"
host = os.getenv("ACTIVEMQ_HOST") or "localhost"
port = os.getenv("ACTIVEMQ_PORT") or 61613
# destination = sys.argv[1:2] or ["/topic/event"]
# destination = destination[0]
dest = "from_entec_test"
#For Production
# dest = "from_entec"
try:
conn = stomp.Connection10(host_and_ports = [(host, port)])
conn.set_listener('message', ConnectionListener(conn))
# conn.start()
# subscribe_id = '-'.join(map(str, (platform.node(), os.getppid(), os.getpid())))
conn.connect(login=user,passcode=password)
subscribe_id = "Queue Test Listener"
conn.subscribe(destination=dest, id=subscribe_id, ack='client-individual')
conn.unsubscribe(id=subscribe_id)
conn.disconnect()
except Exception as error:
reason = str(error)
print("Exception when readig data from queue: " + str(error))
pass
if __name__ == "__main__" :
try:
UploadData = Reader()
UploadData.ConnectToQueue()
print ("Reader finished.")
except Exception as Value:
reason = str(Value)
pass
And here's the code that writes to it:
import socket
import threading
import xml.etree.ElementTree as etree
from xml.dom import minidom # for pretty printing
# import SampleXML
import sys
import os
import math
import time
from time import monotonic
import string
import stomp # for queue support
import platform
class ConnectionListener(stomp.ConnectionListener):
def __init__(self, connection):
self.connection = connection
print "Listener created"
def on_message(self, message):
print "Received message with body " + message.body
class UploadData:
def __init__(self):
pass
def ConnectToQueue(self):
#For Production
user = os.getenv("ACTIVEMQ_USER") or "worthington"
#user = os.getenv("ACTIVEMQ_USER") or "worthington_test"
password = os.getenv("ACTIVEMQ_PASSWORD") or "level3"
host = os.getenv("ACTIVEMQ_HOST") or "localhost"
port = os.getenv("ACTIVEMQ_PORT") or 61613
# destination = sys.argv[1:2] or ["/topic/event"]
# destination = destination[0]
dest = "from_entec_test"
#For Production
# dest = "from_entec"
try:
conn = stomp.Connection10(host_and_ports = [(host, port)])
# conn.start()
# subscribe_id = '-'.join(map(str, (platform.node(), os.getppid(), os.getpid())))
subscribe_id = "Queue Test Listener"
conn.connect(login=user,passcode=password)
message = "This is a test message."
conn.send(dest, message, persistent='true')
print "Sent message containing: " + message
conn.disconnect()
except Exception, error:
reason = str(error)
print "Exception when writing data to queue: " + str(error)
pass
if __name__ == "__main__" :
try:
UploadData = UploadData()
UploadData.ConnectToQueue()
except Exception, Value:
reason = str(Value)
print "Main routine exception: " + str(Value)
pass
I'm not very familiar with Python STOMP clients but from the code you appear to be subscribing using the 'client-individual' mode of STOMP which means that each message you receive requires you to send an ACK frame back with the message Id value so that the remote can mark it as consumed. Since you are not doing that the messages will not be removed from the Queue.
As an alternative you can use the 'auto' acknowledgement mode which marks the message as consumed as soon as the broker dispatches them. To understand the STOMP subscription model please refer to the STOMP specification.