I would like to remove messages that are scheduled to be delivered to a specific queue but i'm finding the process to be unnecessarily burdensome.
Here I am sending a blank message to a queue with a delay:
self._connection.send(body="test", destination=f"/queue/my-queue", headers={
"AMQ_SCHEDULED_DELAY": 100_000_000,
"foo": "bar"
})
And here I would like to clear the scheduled messages for that queue:
self._connection.send(destination=f"ActiveMQ.Scheduler.Management", headers={
"AMQ_SCHEDULER_ACTION": "REMOVEALL",
}, body="")
Of course the "destination" here needs to be ActiveMQ.Scheduler.Management instead of my actual queue. But I can't find anyway to delete scheduled messages that are destined for queue/my-queue. I tried using the selector header, but that doesn't seem to work for AMQ_SCHEDULER_ACTION type messages.
The only suggestions I've seen is to write a consumer to browser all of the scheduled messages, inspect each one for its destination, and delete each schedule by its ID. This seems insane to me as I don't have just a handful of messages but many millions of messages that I'd like to delete.
Is there a way I could send a command to ActiveMQ to clear scheduled messages with a custom header value?
Maybe I can define a custom scheduled messages location for each queue?
Edit:
I've written a wrapper around the stomp.py connection to handle purging schedules destined for a queue. The MQStompFacade takes an existing stomp.Connection and the name of the queue you are working with and provides enqueue, enqueue_many, receive, purge, and move.
When receiving from a queue, if include_delayed is True, it will subscribe to both the queue and a topic that consumes the schedules. Assuming the messages were enqueued with this class and have the name of the original destination queue as a custom header, scheduled messages that aren't destined for the receiving queue will be filtered out.
Not yet testing in production. Probably a lot of of optimizations here.
Usage:
stomp = MQStompFacade(connection, "my-queue")
stomp.enqueue_many([
EnqueueRequest(message="hello"),
EnqueueRequest(message="goodbye", delay=100_000)
])
stomp.purge() # <- removes queued and scheduled messages destined for "/queues/my-queue"
class MQStompFacade (ConnectionListener):
def __init__(self, connection: Connection, queue: str):
self._connection = connection
self._queue = queue
self._messages: List[Message] = []
self._connection_id = rand_string(6)
self._connection.set_listener(self._connection_id, self)
def __del__(self):
self._connection.remove_listener(self._connection_id)
def enqueue_many(self, requests: List[EnqueueRequest]):
txid = self._connection.begin()
for request in requests:
headers = request.headers or {}
# Used in scheduled message selectors
headers["queue"] = self._queue
if request.delay_millis:
headers['AMQ_SCHEDULED_DELAY'] = request.delay_millis
if request.priority is not None:
headers['priority'] = request.priority
self._connection.send(body=request.message,
destination=f"/queue/{self._queue}",
txid=txid,
headers=headers)
self._connection.commit(txid)
def enqueue(self, request: EnqueueRequest):
self.enqueue_many([request])
def purge(self, selector: Optional[str] = None):
num_purged = 0
for _ in self.receive(idle_timeout=5, selector=selector):
num_purged += 1
return num_purged
def move(self, destination_queue: AbstractQueueFacade,
selector: Optional[str] = None):
buffer_size = 500
move_buffer = []
for message in self.receive(idle_timeout=5, selector=selector):
move_buffer.append(EnqueueRequest(
message=message.body
))
if len(move_buffer) >= buffer_size:
destination_queue.enqueue_many(move_buffer)
move_buffer = []
if move_buffer:
destination_queue.enqueue_many(move_buffer)
def receive(self,
max: Optional[int] = None,
timeout: Optional[int] = None,
idle_timeout: Optional[int] = None,
selector: Optional[str] = None,
peek: Optional[bool] = False,
include_delayed: Optional[bool] = False):
"""
Receiving messages until one of following conditions are met
Args:
max: Receive messages until the [max] number of messages are received
timeout: Receive message until this timeout is reached
idle_timeout (seconds): Receive messages until the queue is idle for this amount of time
selector: JMS selector that can be applied to message headers. See https://activemq.apache.org/selector
peek: Set to TRUE to disable automatic ack on matched criteria. Peeked messages will remain the queue
include_delayed: Set to TRUE to return messages scheduled for delivery in the future
"""
self._connection.subscribe(f"/queue/{self._queue}",
id=self._connection_id,
ack="client",
selector=selector
)
if include_delayed:
browse_topic = f"topic/scheduled_{self._queue}_{rand_string(6)}"
schedule_selector = f"queue = '{self._queue}'"
if selector:
schedule_selector = f"{schedule_selector} AND ({selector})"
self._connection.subscribe(browse_topic,
id=self._connection_id,
ack="auto",
selector=schedule_selector
)
self._connection.send(
destination=f"ActiveMQ.Scheduler.Management",
headers={
"AMQ_SCHEDULER_ACTION": "BROWSE",
"JMSReplyTo": browse_topic
},
id=self._connection_id,
body=""
)
listen_start = time.time()
last_receive = time.time()
messages_received = 0
scanning = True
empty_receive = False
while scanning:
try:
message = self._messages.pop()
last_receive = time.time()
if not peek:
self._ack(message)
messages_received += 1
yield message
except IndexError:
empty_receive = True
time.sleep(0.1)
if max and messages_received >= max:
scanning = False
elif timeout and time.time() > listen_start + timeout:
scanning = False
elif empty_receive and idle_timeout and time.time() > last_receive + idle_timeout:
scanning = False
else:
scanning = True
self._connection.unsubscribe(id=self._connection_id)
def on_message(self, frame):
destination = frame.headers.get("original-destination", frame.headers.get("destination"))
schedule_id = frame.headers.get("scheduledJobId")
message = Message(
attributes=MessageAttributes(
id=frame.headers["message-id"],
schedule_id=schedule_id,
timestamp=frame.headers["timestamp"],
queue=destination.replace("/queue/", "")
),
body=frame.body
)
self._messages.append(message)
def _ack(self, message: Message):
"""
Deletes the message from queue.
If the message has an scheduled_id, will also remove the associated scheduled job
"""
if message.attributes.schedule_id:
self._connection.send(
destination=f"ActiveMQ.Scheduler.Management",
headers={
"AMQ_SCHEDULER_ACTION": "REMOVE",
"scheduledJobId": message.attributes.schedule_id
},
id=self._connection_id,
body=""
)
self._connection.ack(message.attributes.id, subscription=self._connection_id)
In order to remove specific messages you need to know the ID which you can get via a browse of the scheduled messages. The only other option available is to use the start and stop time options in the remove operations to remove all messages inside a range.
MessageProducer producer = session.createProducer(management);
Message request = session.createMessage();
request.setStringProperty(ScheduledMessage.AMQ_SCHEDULER_ACTION, ScheduledMessage.AMQ_SCHEDULER_ACTION_REMOVEALL);
request.setStringProperty(ScheduledMessage.AMQ_SCHEDULER_ACTION_START_TIME, Long.toString(start));
request.setStringProperty(ScheduledMessage.AMQ_SCHEDULER_ACTION_END_TIME, Long.toString(end));
producer.send(request);
If that doesn't suit your need I'm sure the project would welcome contributions.
Related
In the following python programme I am configuring RabbitMq. I am creating an exchange named "order" and publishing 3 messages with routing keys "order.notify","order.report","order.test".
import pika
import json
import uuid
con = pika.BlockingConnection(pika.ConnectionParameters('localhost'))
channel = con.channel()
channel.exchange_declare(
exchange='order',
exchange_type = 'direct'
)
channel.basic_publish(
exchange= 'order',
routing_key= 'order.notify',
body= json.dumps({'user_email' : 'First'})
#body= json.dumps({'user_email' : order['user_email']})
)
print('[x] Sent nortify message')
channel.basic_publish(
exchange= 'order',
routing_key= 'order.report',
body= json.dumps({'user_email' : 'Second'})
)
print('[x] Sent report message')
channel.basic_publish(
exchange= 'order',
routing_key= 'order.test',
body= json.dumps({'user_email' : 'third'})
)
print('[x] Sent report message')
con.close()
Now at consumer side I have created only 2 queues , with binding keys order.nortiy and order.report
report.py
import pika
import json
con = pika.BlockingConnection(pika.ConnectionParameters('localhost'))
channel = con.channel()
queue = channel.queue_declare('order_notify')
queue_name = queue.method.queue
channel.queue_bind(
exchange='order',
queue=queue_name,
routing_key='order.report' #binding key
)
def callback(ch,method, properties, body):
payload = json.loads(body)
# print(' [x] Notifying {}' .format(payload['user_email']))
print('Report Queue')
print(payload['user_email'])
ch.basic_ack(delivery_tag= method.delivery_tag)
channel.basic_consume(on_message_callback= callback,queue=queue_name)
print(' [*] waiting for report messages. To exit press CTRL + C')
channel.start_consuming()
nortify.py
import pika
import json
con = pika.BlockingConnection(pika.ConnectionParameters('localhost'))
channel = con.channel()
queue = channel.queue_declare('order_notify')
queue_name = queue.method.queue
channel.queue_bind(
exchange='order',
queue=queue_name,
routing_key='order.notify' #binding key
)
def callback(ch,method, properties, body):
payload = json.loads(body)
# print(' [x] Notifying {}' .format(payload['user_email']))
print('Nortify Queue')
print(payload['user_email'])
ch.basic_ack(delivery_tag= method.delivery_tag)
channel.basic_consume(on_message_callback= callback,queue=queue_name)
print(' [*] waiting for report messages. To exit press CTRL + C')
channel.start_consuming()
Now which queue wil consume which message. I try to run and queues were consuming randomly. Can someone please explain?
Tried to run the above programme but was getting random results.Each queue was consuming different messages when run different times
Rabbitmq consumer consumes initially very fast but when i send messages after 8-9 hrs it not consuming at all. I tried increasing heartbeat count from 30 - 600, keeping prefetch as 1 and acknowledging after processing it still facing the same issue. Im using blocking connection. Any other reason for this is behaviour.
self.rabbit_mq_creds = self.rabbit_mq_credentials()
self.credentials = pika.PlainCredentials(username=xx,
password=xx])
self.parameters = pika.ConnectionParameters(host=xx,
port=xx),
virtual_host=xx,
credentials=self.credentials,
heartbeat=600,
retry_delay=180,
connection_attempts=3)
self.connection = pika.BlockingConnection(self.parameters)
self.channel = self.connection.channel()
self.input_queue = "input_queue"
self.channel.queue_declare(queue=self.input_queue, durable=True, arguments={"x-queue-type": "quorum"})
self.channel.basic_qos(prefetch_count=1))
self.channel.basic_consume(queue=self.input_queue, on_message_callback=self.on_request)
`
def on_request(self, ch, method, props, body):
temp = {}
try:
data = json.loads(body.decode('utf-8'))
///
data processing
///
except Exception as k:
logger.error(f"Error: {k} \n\nTrace: {traceback.format_exc()}")
ch.basic_publish(exchange='', routing_key=props.reply_to,
properties=pika.BasicProperties(correlation_id=props.correlation_id), body=str(temp))
ch.basic_ack(delivery_tag=method.delivery_tag)
def trigger_consumption(self):
self.channel.start_consuming()`
I have a flask app running with celery in it. When a user inputs a message into app uri it needed to be sent to a rabbitmq queue as a message to a given queue. But it send messages to a default queue. How do I specifically send a message to a defined queue?
publisher.py
from flask import Flask,request,jsonify
from celery import Celery,bootsteps
from time import sleep
from kombu.common import QoS
class NoChannelGlobalQoS(bootsteps.StartStopStep):
requires = {'celery.worker.consumer.tasks:Tasks'}
def start(self, c):
qos_global = False
c.connection.default_channel.basic_qos(0, c.initial_prefetch_count, qos_global)
def set_prefetch_count(prefetch_count):
return c.task_consumer.qos(
prefetch_count=prefetch_count,
apply_global=qos_global,
)
c.qos = QoS(set_prefetch_count, c.initial_prefetch_count)
server = Flask(__name__)
broker_uri1="amqp://"
backend="mongodb+srv://"
app = Celery('TestApp', broker_uri=broker_uri1,backend=backend)
CELERY_TASK_ROUTES = {'app.tasks.*': {'queue': 'rabbit'},'app.task.*': {'queue': 'rabbit'}}
app.config_from_object('celeryconfig')
app.conf.task_default_exchange='rabbit'
app.conf.task_default_routing_key='rabbit'
app.steps['worker'].add(NoChannelGlobalQoS)
#server.route("/")
def print_hello():
return "Flask server working"
#server.route("/sync-reverse")
def reverse_by_worker_with_results():
input = request.args.get("text")
app.select_queues(queues="rabbit")
task = app.signature('tasks.reverse', kwargs={'text': input})
result = task.delay()
while result.status == 'PENDING':
print(result.status)
sleep(2)
print(result.status)
return "Reversed >><br />Input Text : " + input + "<br />Output Text : " + result.get()
#server.route("/async-reverse")
def reverse_by_worker():
input = request.args.get("text")
app.select_queues(queues="rabbit") #supposed to add queues to the task
task = app.signature('tasks.reverse', kwargs={'text': input})
result = task.delay()
if result.id:
return 'Task add'
else:
return 'Failure in adding task'
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
server.run(host= '0.0.0.0')
worker.py
from celery import Celery,bootsteps
from time import sleep
from kombu.common import QoS
broker_uri='amqp:///'
backend_uri="mongodb+srv://"
app = Celery('TestApp', broker=broker_uri, backend=backend_uri)
app.config_from_object('celeryconfig')
app.conf.task_default_exchange='rabbit'
app.conf.task_default_routing_key='rabbit'
class NoChannelGlobalQoS(bootsteps.StartStopStep):
requires = {'celery.worker.consumer.tasks:Tasks'}
def start(self, c):
qos_global = False
c.connection.default_channel.basic_qos(0, c.initial_prefetch_count, qos_global)
def set_prefetch_count(prefetch_count):
return c.task_consumer.qos(
prefetch_count=prefetch_count,
apply_global=qos_global,
)
c.qos = QoS(set_prefetch_count, c.initial_prefetch_count)
app.steps['consumer'].add(NoChannelGlobalQoS)
#app.task
def reverse(text):
sleep(10)
return text[:-1]
I have used a celery config file to define the queue name and type as suggested
from kombu import Queue
task_queues = [Queue(name="rabbit", queue_arguments={"x-queue-type": "quorum"})]
task_routes = {
'tasks.add': 'rabbit',
}
I have a setup where celery_result_backend has been configured to 'amqp'. I can see my tasks getting executed by the worker in logs. But
It is creating the queue with task id but its status is expired.I am not getting the result (result = AsyncResult(taskid); result.get() hangs). I tried all the backed supported:
1)Mysql: It is not putting data to the celery created tables
2) Redis: It is not putting data to the db
I two centos system.
1) I am calling the delay method to send the task to proper rabbitmq. And the worker is listening to the queue, from there it will pick the task and process(I can see task in the queue and getting executed by the worker in machine 2 But the result is not being put into the backend.
).Here I am doing the result.get() It hangs.
2) The worker is running on it to execute the task.It executes the task but I think not able to put the rersult
Settings:
RABBITMQ_BROKER_HOST = '10.213.166.133'
RABBITMQ_BROKER_PORT = dqms_settings.RABBITMQ_BROKER_PORT
RABBITMQ_BROKER_VHOST = dqms_settings.RABBITMQ_BROKER_VHOST
RABBITMQ_BROKER_USERNAME = dqms_settings.RABBITMQ_BROKER_USERNAME
RABBITMQ_BROKER_PASSWORD = dqms_settings.RABBITMQ_BROKER_PASSWORD
BROKER_URL = 'amqp://%s:%s#%s:%s/%s' % (RABBITMQ_BROKER_USERNAME,
RABBITMQ_BROKER_PASSWORD,
RABBITMQ_BROKER_HOST,
RABBITMQ_BROKER_PORT,
RABBITMQ_BROKER_VHOST)
#CELERY_TASK_RESULT_EXPIRES = 18000
#CELERY_IGNORE_RESULT = True
CELERY_ACCEPT_CONTENT = ['json']
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
#CELERY_RESULT_BACKEND = 'db+mysql://svcacct-dqms:s3cretP#ssw0rd#10.213.166.202:3306/dqms'
#CELERY_RESULT_BACKEND = 'amqp'
#CELERY_AMQP_TASK_RESULT_EXPIRES = 1000
#CELERY_RESULT_SERIALIZER = 'json'
CELERY_TIMEZONE = TIME_ZONE
CELERYD_PREFETCH_MULTIPLIER = dqms_settings.CELERYD_PREFETCH_MULTIPLIER
CELERY_DEFAULT_QUEUE = dqms_settings.CELERY_DEFAULT_QUEUE
CELERY_DEFAULT_EXCHANGE_TYPE = dqms_settings.CELERY_DEFAULT_EXCHANGE_TYPE
CELERY_DEFAULT_ROUTING_KEY = dqms_settings.CELERY_DEFAULT_ROUTING_KEY
CELERY_QUEUES = dqms_settings.CELERY_QUEUES
CELERY_ROUTES = dqms_settings.CELERY_ROUTES
CELERYD_HIJACK_ROOT_LOGGER = dqms_settings.CELERYD_HIJACK_ROOT_LOGGER
CELERY_ACKS_LATE = dqms_settings.CELERY_ACKS_LATE
CELERY_RESULT_BACKEND = 'redis://:s3cretP#ssw0rd#10.213.166.204:6379/5' #'djcelery.backends.database.DatabaseBackend'
#CELERY_REDIS_MAX_CONNECTIONS = 6
#CELERY_ALWAYS_EAGER = False
Can some one help why it is not putting the result in the queue?
This is a issue which is happening quite common now.
setting CELERY_ALWAYS_EAGER to TRUE will do the work
However this is not the best solution in production scenario.
I'm trying to publish messages with pika, using Celery tasks.
from celery import shared_task
from django.conf import settings
import json
#shared_task
def publish_message():
params = pika.URLParameters(settings.BROKER_URL + '?' + 'socket_timeout=10&' + 'connection_attempts=2')
conn = pika.BlockingConnection(parameters=params)
channel = conn.channel()
channel.exchange_declare(
exchange = 'foo',
type='topic'
)
channel.tx_select()
channel.basic_publish(
exchange = 'foo',
routing_key = 'bar',
body = json.dumps({'foo':'bar'}),
properties = pika.BasicProperties(content_type='application/json')
)
channel.tx_commit()
conn.close()
This task is called from the views.
Due to some weird reason, sometimes randomly, the messages are not getting queued. In my case, every second message is getting dropped. What am I missing here?
I would recommend that you enable confirm_delivery in pika. This will ensure that messages get delivered properly, and if for some reason the message could not be delivered. Pika will fail with either an exception, or return False.
channel.confirm_delivery()
successful = channel.basic_publish(...)
If the process fails you can try to send the message again, or log the error message from the exception so that you can act accordingly.
Try this:
chanel = conn.channel()
try:
chanel.queue_declare(queue='foo')
except:
pass
chanel.basic_publish(
exchange='',
routing_key='foo',
body=json.dumps({'foo':'bar'})
)