Rabbitmq Exchanges vs Queues - rabbitmq

Im trying to do some basic testing with amqp using rabbitmq. I wrote a simple python script with pika to create an amqp client. I first did it sending messages directly to a queue. This worked well and was able to monitor performance with the web rabbitmq managment tool as well as "rabbitmqctl list_queues". I next tried to send messages to an exchange using multiple queues with the receiver listen on all queues. Im confused because when I use the exchange I can not longer see the queues in the management tool or rabbitmqctl. How do i monitor queues when using exchanges?
###########################################
queue send code:
###########################################
channel.queue_declare(queue='hello')
msgCnt = 1
while True:
msg = "'Hello World! Msg Count: "+str(msgCnt)+"'"
channel.basic_publish(exchange='',
routing_key='hello',
body=msg)
print(" [x] Sent " + msg)
msgCnt = msgCnt + 1
time.sleep(1)
###########################################
exchange send code:
###########################################
queue = sys.argv[1] #passing test.0001, test.0002 in diff clients.
channel.exchange_declare(exchange='test', type='topic')
msgCnt = 1
while True:
msg = "'Hello World! Msg Count: "+str(msgCnt)+"'"
channel.basic_publish(exchange='test',
routing_key=queue,
body=msg)
print(" [x] Sent " + msg)
msgCnt = msgCnt + 1
time.sleep(1)
###########################################
consumer code:
###########################################
channel.exchange_declare(exchange='test',
type='topic')
result = channel.queue_declare(exclusive=True)
queue_name = result.method.queue
binding_keys = sys.argv[1:]
for binding_key in binding_keys:
channel.queue_bind(exchange='test',
queue=queue_name,
routing_key=binding_key)
def callback(ch, method, properties, body):
print(" [x] Received %r" % body)
channel.basic_consume(callback,
queue=queue_name,
no_ack=True)
print(' [*] Waiting for messages. To exit press CTRL+C')
channel.start_consuming()

Related

Telegram bot not responding after upload files and database on heroku

I'm a beginner of python. Below is my python code for telegram bot. It's working on XAMPP but I would to host the bot on cloud so that there's no need to start the XAMPP's Apache & MYSQL everytime when I'm trying to use the bot. However, it's not working after it's been uploaded to Heroku. May I know how can I fix this ? Thank you in advance.
Modified for uploading to Heroku
import logging
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters
import os
import mysql.connector
from typing import Dict
from telegram import ReplyKeyboardMarkup, Update, ReplyKeyboardRemove
from telegram.ext import (
Updater,
CommandHandler,
MessageHandler,
Filters,
ConversationHandler,
CallbackContext,
)
PORT = int(os.environ.get('PORT', 5000))
# Enable logging
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO)
mydb = mysql.connector.connect(
host='us-cdbr-east-05.cleardb.net',
user='b081bd520f9623',
passwd='557dad71',
database='heroku_26b1a208f24f1fa')
query = mydb.cursor()
logger = logging.getLogger(__name__)
TOKEN = '5333685233:AAFr4-1nB6_I8ZMdt25Y4zBotHRA9I_qtMI'
# Define a few command handlers. These usually take the two arguments update and
# context. Error handlers also receive the raised TelegramError object in error.
def start(update, context):
"""Send a message when the command /start is issued."""
update.message.reply_text('Hi! This is start')
def help(update, context):
"""Send a message when the command /help is issued."""
update.message.reply_text('Help!')
def sql(update, context):
sql = "SELECT nama_item, jumlah_dalam_kg FROM data_penjualan_harian WHERE nama_item = 'Lemon'"
query.execute(sql)
sql_result = query.fetchall()
pesan_balasan = ''
for x in sql_result:
pesan_balasan = pesan_balasan + str(x) + '\n'
#memperbagus balasan bot
#menghilangkan tanda petik
pesan_balasan = pesan_balasan.replace("'","")
#menghilangkan tanda kurung
pesan_balasan = pesan_balasan.replace("(","")
pesan_balasan = pesan_balasan.replace(")","")
#menghilangkan tanda koma
pesan_balasan = pesan_balasan.replace(",","")
update.message.reply_text(pesan_balasan)
def main():
updater = Updater(TOKEN, use_context=True)
# Get the dispatcher to register handlers
dp = updater.dispatcher
# on different commands - answer in Telegram
dp.add_handler(CommandHandler("start", start))
dp.add_handler(CommandHandler("help", help))
# on noncommand i.e message - echo the message on Telegram
dp.add_handler(MessageHandler(Filters.text, echo))
# log all errors
dp.add_error_handler(error)
# # Start the Bot
updater.start_webhook(listen="0.0.0.0",
port=PORT,
url_path=TOKEN,
webhook_url='https://powerful-lowlands-14039.herokuapp.com/' + TOKEN)
# Run the bot until you press Ctrl-C or the process receives SIGINT,
# SIGTERM or SIGABRT. This should be used most of the time, since
# start_polling() is non-blocking and will stop the bot gracefully.
updater.idle()
if __name__ == '__main__':
main()

Celery consumer (only) with an external producer

I'm using Celery 4.4.7 with Redis as my message broker.
I want to use celery as a consumer only, as the external producer is a java application.
The java application pushes messages to a channel on redis. But my celery application is not picking up the messages.
I have simulated the java producer in python using redis-py (redis_producer.py) to publish to a channel. The redis_consumer.py is able to pickup the messages from the producer.
But my celery_consumer.py seems to be blind to these messages.
Messages from redis_producer.py is picked up by the redis_consumer.py, but not from celery.
Messages from kombu_producer.py is picked up by the celery worker, but not from my redis_consumer.py
redis_producer.py
import json
import redis
r = redis.Redis(host='localhost', port=6379)
for i in range(10):
body = {
'id': i,
'message': f'Hello {i}',
}
r.publish(channel='redis.test.topic', message=json.dumps(body))
redis_consumer.py
import json
import multiprocessing
import os
import signal
import redis
redis_conn = redis.Redis(charset='utf-8', decode_responses=True)
def sub(name: str):
pubsub = redis_conn.pubsub()
pubsub.subscribe('redis.test.topic')
for message in pubsub.listen():
print(message)
if message.get('type') == 'message':
data = message.get('data')
print('%s: %s' % (name, data))
def on_terminate(signum, stack):
wait_for_current_scp_operation()
if __name__ == '__main__':
multiprocessing.Process(target=sub, args=('consumer',)).start()
signal.signal(signal.SIGTERM, on_terminate)
celery_consumer.py
import os
import ssl
from celery import Celery
from celery import shared_task
from celery.utils.log import get_task_logger
from kombu import Exchange
from kombu import Queue
logger = get_task_logger(__name__)
# Celery broker Url
broker_url = os.environ.get('CELERY_BROKER_URL', None)
if broker_url is None:
broker_url = 'redis://localhost:6379/0'
# store to use to store task results, default=None
result_backend = os.environ.get('CELERY_RESULT_BACKEND', None)
if result_backend is None:
result_backend = 'redis://localhost:6379/0'
config = dict(
broker_url=broker_url,
result_backend=result_backend,
# maximum number of connections that can be open in the connection pool
broker_pool_limit=20,
broker_transport_options={
'visibility_timeout': 3600,
'confirm_publish': True,
},
# Serializer method
task_serializer='json',
result_serializer='json',
accept_content=['json'],
# Below two settings just reserves one task at a time
# Task acknowledgement mode, default=False
task_acks_late=True,
# How many message to prefetch, default=4, value=1 disable prefetch
worker_prefetch_multiplier=1,
# Dates and times in messages will be converted to use the UTC timezone
timezone='UTC',
enable_utc=True,
# if true, store the task return values
task_ignore_result=False,
# if true, result messages will be persistent, messages won't be lost after a broker restart
result_persistent=False,
# Celery tasks expiry (in secs), default=1d, value=0/None never expire
result_expires=900,
# Message compression setting
task_compression='gzip',
# Task execution marker, default=False
task_track_started=True,
# rate limits on tasks (Disable all rate limits, even if tasks has explicit rate limits set)
worker_disable_rate_limits=True,
# is True, all tasks will be executed locally by blocking until the task returns
task_always_eager=False,
# Send events so the worker can be monitored by tools like celerymon
worker_send_task_events=False,
# Expiry time in seconds for when a monitor clients event queue will be deleted, default=never
event_queue_expires=60,
# Default queue, exchange, routing keys configuration
# task_default_queue = 'default.queue',
# task_default_exchange = 'default.exchange',
# task_default_exchange_type='topic',
# task_default_routing_key = 'default.route',
# task_create_missing_queues = True
task_queues=(
# Default configuration
Queue('redis.test.topic',
Exchange('redis.test.topic'),
routing_key='redis.test.topic'),
),
)
def create_celery_app() -> Celery:
logger.info('Initializing Celery...')
celery_app = Celery(name=__name__)
celery_app.config_from_object(config)
return celery_app
# create a celery app
app = create_celery_app()
#app.task(name='task_process_message', bind=True, max_retries=3)
def task_process_message(self, message):
try:
logger.info(f'{message}: Triggered task: task_process_message')
except Exception as e:
logger.exception(
f'Error executing task_process_message({message}')
# We do not have to reset timestamp since the job always looks back by 1 hr
self.retry(exc=e, countdown=utils.get_retry_delay(self.request.retries))
#shared_task(name='shared_task_process_message', bind=True, max_retries=3)
def shared_task_process_message(self, message):
try:
logger.info(f'{message}: Triggered task: shared_task_process_message')
except Exception as e:
logger.exception(
f'Error executing shared_task_process_message({message}')
# We do not have to reset timestamp since the job always looks back by 1 hr
self.retry(exc=e, countdown=utils.get_retry_delay(self.request.retries))
kombu_producer.py
from kombu import Producer, Consumer, Queue, Connection
import json
redis_url = 'redis://localhost:6379/0'
conn = Connection(redis_url)
producer = Producer(conn.channel())
channel = 'redis.test.topic'
for i in range(10):
body = {
'task': 'task_process_message',
'id': f'{i}',
'kwargs': {'message': f'Hello {i}',
}
}
producer.publish(body=body, routing_key='redis.test.topic')
The below picture shows activity on redis using a regular redis producer/consumer
Below picture shows activity on redis while running kombu producer and celery consumer.

Google Cloud IoT - Multiple config messages

In my code I'm subscribing to 3 different topics:
/devices/{}/config
/devices/{}/events
/devices/{}/state
If I'm not subscribed in /devices/{}/config I don't receive any config message, and that's ok. But if I'm subscribed in /devices/{}/config I get one message for each subscribe I have.
Example:
Subscribed in "/devices/{}/config" and "/devices/{}/events" I get 2 config messages.
Subscribed in "/devices/{}/config" and "/devices/{}/state" I get 2 config messages.
Subscribed in "/devices/{}/config", "/devices/{}/state" and "/devices/{}/events" I get 3 config messages.
Subscribed in "/devices/{}/events" and "/devices/{}/state" I get 0 config messages.
This is causing error in IoT core: mqtt:
The device "xxxxxxxx" could not be updated. Device state can be updated only once every 1s.
In fact i want and need just one config message. What am I doing wrong?
This is my code:
# [START iot_mqtt_includes]
import argparse
import datetime
import os
import random
import ssl
import time
import log
import updateConfig
import jwt
import paho.mqtt.client as mqtt
import payload
# [END iot_mqtt_includes]
# The initial backoff time after a disconnection occurs, in seconds.
minimum_backoff_time = 1
# The maximum backoff time before giving up, in seconds.
MAXIMUM_BACKOFF_TIME = 32
# Whether to wait with exponential backoff before publishing.
should_backoff = False
# [START iot_mqtt_jwt]
def create_jwt(project_id, private_key_file, algorithm):
token = {
# The time that the token was issued at
'iat': datetime.datetime.utcnow(),
# The time the token expires.
'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=60),
# The audience field should always be set to the GCP project id.
'aud': project_id
}
# Read the private key file.
with open(private_key_file, 'r') as f:
private_key = f.read()
f.close()
return jwt.encode(token, private_key, algorithm=algorithm)
# [END iot_mqtt_jwt]
# [START iot_mqtt_config]
def error_str(rc):
"""Convert a Paho error to a human readable string."""
return '{}: {}'.format(rc, mqtt.error_string(rc))
def on_connect(unused_client, unused_userdata, unused_flags, rc):
"""Callback for when a device connects."""
log.append_log('ao_conectar - ' + mqtt.connack_string(rc))
# After a successful connect, reset backoff time and stop backing off.
global should_backoff
global minimum_backoff_time
should_backoff = False
minimum_backoff_time = 1
def on_disconnect(unused_client, unused_userdata, rc):
"""Paho callback for when a device disconnects."""
append_log('ao_desconectar - '+ error_str(rc))
# Since a disconnect occurred, the next loop iteration will wait with
# exponential backoff.
global should_backoff
should_backoff = True
def on_publish(unused_client, unused_userdata, unused_mid):
"""Paho callback when a message is sent to the broker."""
def on_message(unused_client, unused_userdata, message):
"""Callback when the device receives a message on a subscription."""
config = str(message.payload)
retorno = updateConfig.update(config)
def get_client(
project_id, cloud_region, registry_id, device_id, private_key_file,
algorithm, ca_certs, mqtt_bridge_hostname, mqtt_bridge_port):
"""Create our MQTT client. The client_id is a unique string that identifies
this device. For Google Cloud IoT Core, it must be in the format below."""
client = mqtt.Client(
client_id=('projects/{}/locations/{}/registries/{}/devices/{}'
.format(
project_id,
cloud_region,
registry_id,
device_id)))
# With Google Cloud IoT Core, the username field is ignored, and the
# password field is used to transmit a JWT to authorize the device.
client.username_pw_set(
username='unused',
password=create_jwt(
project_id, private_key_file, algorithm))
# Enable SSL/TLS support.
client.tls_set(ca_certs=ca_certs, tls_version=ssl.PROTOCOL_TLSv1_2)
# Register message callbacks. https://eclipse.org/paho/clients/python/docs/
# describes additional callbacks that Paho supports. In this example, the
# callbacks just print to standard out.
client.on_connect = on_connect
client.on_publish = on_publish
client.on_disconnect = on_disconnect
client.on_message = on_message
# Connect to the Google MQTT bridge.
client.connect(mqtt_bridge_hostname, mqtt_bridge_port)
# This is the topic that the device will receive configuration updates on.
mqtt_config_topic = '/devices/{}/config'.format(device_id)
# Subscribe to the config topic.
client.subscribe(mqtt_config_topic, qos=1)
return client
# [END iot_mqtt_config]
def parse_command_line_args():
"""Parse command line arguments."""
parser = argparse.ArgumentParser(description=(
'Example Google Cloud IoT Core MQTT device connection code.'))
parser.add_argument(
'--project_id',
default=os.environ.get('GOOGLE_CLOUD_PROJECT'),
help='GCP cloud project name')
parser.add_argument(
'--registry_id', required=True, help='Cloud IoT Core registry id')
parser.add_argument(
'--device_id', required=True, help='Cloud IoT Core device id')
parser.add_argument(
'--private_key_file',
required=True, help='Path to private key file.')
parser.add_argument(
'--algorithm',
choices=('RS256', 'ES256'),
required=True,
help='Which encryption algorithm to use to generate the JWT.')
parser.add_argument(
'--cloud_region', default='us-central1', help='GCP cloud region')
parser.add_argument(
'--ca_certs',
default='roots.pem',
help=('CA root from https://pki.google.com/roots.pem'))
parser.add_argument(
'--message_type',
choices=('event', 'state'),
default='event',
help=('Indicates whether the message to be published is a '
'telemetry event or a device state message.'))
parser.add_argument(
'--mqtt_bridge_hostname',
default='mqtt.googleapis.com',
help='MQTT bridge hostname.')
parser.add_argument(
'--mqtt_bridge_port',
choices=(8883, 443),
default=8883,
type=int,
help='MQTT bridge port.')
parser.add_argument(
'--jwt_expires_minutes',
default=20,
type=int,
help=('Expiration time, in minutes, for JWT tokens.'))
return parser.parse_args()
# [START iot_mqtt_run]
def main():
log.append_log("Iniciando uma nova conexao com o Google IoT.")
global minimum_backoff_time
args = parse_command_line_args()
# Publish to the events or state topic based on the flag.
jwt_iat = datetime.datetime.utcnow()
jwt_exp_mins = args.jwt_expires_minutes
client = get_client(
args.project_id, args.cloud_region, args.registry_id, args.device_id,
args.private_key_file, args.algorithm, args.ca_certs,
args.mqtt_bridge_hostname, args.mqtt_bridge_port)
# Publish num_messages mesages to the MQTT bridge once per second.
while True:
# Process network events.
client.loop()
# Wait if backoff is required.
if should_backoff:
# If backoff time is too large, give up.
if minimum_backoff_time > MAXIMUM_BACKOFF_TIME:
log.append_log('Tempo maximo de backoff excedido. Desistindo.')
break
# Otherwise, wait and connect again.
delay = minimum_backoff_time + random.randint(0, 1000) / 1000.0
log.append_log('Esperando {} segundos antes de reconectar.'.format(delay))
time.sleep(delay)
minimum_backoff_time *= 2
client.connect(args.mqtt_bridge_hostname, args.mqtt_bridge_port)
# [START iot_mqtt_jwt_refresh]
seconds_since_issue = (datetime.datetime.utcnow() - jwt_iat).seconds
if seconds_since_issue > 60 * jwt_exp_mins:
log.append_log('Atualizando token de acesso depois de {} segundos'.format(seconds_since_issue))
client.loop_stop()
jwt_iat = datetime.datetime.utcnow()
client = get_client(
args.project_id, args.cloud_region,
args.registry_id, args.device_id, args.private_key_file,
args.algorithm, args.ca_certs, args.mqtt_bridge_hostname,
args.mqtt_bridge_port)
# [END iot_mqtt_jwt_refresh]
# Publish "payload" to the MQTT topic. qos=1 means at least once
# delivery. Cloud IoT Core also supports qos=0 for at most once
# delivery.
payloadToPublish = payload.lerPayload()
if payloadToPublish != 'sem payload':
if payloadToPublish[0] == 'event':
mqtt_topic = '/devices/{}/{}'.format(args.device_id, 'events')
log.append_log('publicando [' + payloadToPublish[1] + ']')
else:
mqtt_topic = '/devices/{}/{}'.format(args.device_id, 'state')
client.publish(mqtt_topic, payloadToPublish[1], qos=1)
# [END iot_mqtt_run]
if __name__ == '__main__':
main()
This is the expected behavior:
The event and state topic are used for device to cloud communication and the config topic is used for sending configuration data to an IoT device. Subscribing to the events / state topics is effectively a NOOP.
Cloud IoT Core only supports QoS 1, which is "at least once" for message transmission, you may want to try QoS 0, which does not retry message transmission until ACK but I don't think this is what you want
STATE and CONFIG transmission is limited to a single message per second; transmitting configuration changes faster than this will cause the error you're seeing

How do i send delayed message in rabbitmq using the rabbitmq-delayed-message-exchange plugin?

i have enabled the plugin using
rabbitmq-plugins enable rabbitmq_delayed_message_exchange
I am trying to create delayed exchange and attached header x-delay with 5000ms as int value and binded it to a queue it dint work.
So i tried it using Pika in python:
import pika
credentials = pika.PlainCredentials('admin', 'admin')
parameters = pika.ConnectionParameters('localhost',5672,'/',credentials)
connection = pika.BlockingConnection(pika.ConnectionParameters(host='127.0.0.1',port=5673,credentials=credentials))
channel = connection.channel()
#channel.exchange_declare(exchange='x-delayed-type', type='direct')
channel.exchange_declare("test-exchange", type="x-delayed-message", arguments={"x-delayed-type":"direct"},durable=True,auto_delete=True)
channel.queue_declare(queue='task_queue',durable=True)
channel.queue_bind(queue="task_queue", exchange="test-exchange", routing_key="task_queue")
for i in range(0,100):
channel.basic_publish(exchange='test-exchange', routing_key='task_queue',
body='gooogle',
properties=pika.BasicProperties(headers={"x-delay": 5000},delivery_mode=1))
print i
How can i make delayed exchange using delay make working?
Error Report :
ERROR REPORT==== 10-Mar-2017::13:08:09 ===
Error on AMQP connection <0.683.0> (127.0.0.1:42052 -> 127.0.0.1:5673, vhost: '/', user: 'admin', state: running), channel 1:
{{{undef,
[{erlang,system_time,[milli_seconds],[]},
{rabbit_delayed_message,internal_delay_message,4,
[{file,"src/rabbit_delayed_message.erl"},{line,179}]},
{rabbit_delayed_message,handle_call,3,
[{file,"src/rabbit_delayed_message.erl"},{line,122}]},
{gen_server,handle_msg,5,[{file,"gen_server.erl"},{line,585}]},
{proc_lib,init_p_do_apply,3,[{file,"proc_lib.erl"},{line,239}]}]},
{gen_server,call,
[rabbit_delayed_message,
{delay_message,
{exchange,
{resource,<<"/">>,exchange,<<"test-exchange">>},
'x-delayed-message',true,true,false,
[{<<"x-delayed-type">>,longstr,<<"direct">>}],
undefined,undefined,
{[],[]}},
{delivery,false,false,<0.691.0>,
{basic_message,
{resource,<<"/">>,exchange,<<"test-exchange">>},
[<<"task_queue">>],
{content,60,
{'P_basic',undefined,undefined,
[{<<"x-delay">>,signedint,5000}],
1,undefined,undefined,undefined,undefined,
undefined,undefined,undefined,undefined,undefined,
undefined},
<<48,0,0,0,0,13,7,120,45,100,101,108,97,121,73,0,0,19,
136,1>>,
rabbit_framing_amqp_0_9_1,
[<<"gooogle">>]},
<<80,125,217,116,181,47,214,41,203,179,7,85,150,76,35,2>>,
false},
undefined,noflow},
5000},
infinity]}},
[{gen_server,call,3,[{file,"gen_server.erl"},{line,188}]},
{rabbit_exchange_type_delayed_message,route,2,
[{file,"src/rabbit_exchange_type_delayed_message.erl"},{line,53}]},
{rabbit_exchange,route1,3,[{file,"src/rabbit_exchange.erl"},{line,381}]},
{rabbit_exchange,route,2,[{file,"src/rabbit_exchange.erl"},{line,371}]},
{rabbit_channel,handle_method,3,
[{file,"src/rabbit_channel.erl"},{line,949}]},
{rabbit_channel,handle_cast,2,[{file,"src/rabbit_channel.erl"},{line,457}]},
{gen_server2,handle_msg,2,[{file,"src/gen_server2.erl"},{line,1032}]},
{proc_lib,init_p_do_apply,3,[{file,"proc_lib.erl"},{line,239}]}]}
Paste a workable code with Rabbitmq 3.7.7:
send.py
#!/usr/bin/env python
import pika
import sys
connection = pika.BlockingConnection(pika.ConnectionParameters(host='localhost'))
channel = connection.channel()
#channel.exchange_declare(exchange='direct_logs',
# exchange_type='direct')
#channel.exchange_declare("test-exchange", type="x-delayed-message", arguments={"x-delayed-type":"direct"},durable=True,auto_delete=True)
channel.exchange_declare(exchange='test-exchange',
exchange_type='x-delayed-message',
arguments={"x-delayed-type":"direct"})
severity = sys.argv[1] if len(sys.argv) > 2 else 'info'
message = ' '.join(sys.argv[2:]) or 'Hello World!'
channel.basic_publish(exchange='test-exchange',
routing_key=severity,
properties=pika.BasicProperties(
headers={'x-delay': 5000} # Add a key/value header
),
body=message)
print(" [x] Sent %r:%r" % (severity, message))
connection.close()
receive.py
#!/usr/bin/env python
import pika
import sys
connection = pika.BlockingConnection(pika.ConnectionParameters(host='localhost'))
channel = connection.channel()
channel.exchange_declare(exchange='test-exchange',
exchange_type='x-delayed-message',
arguments={"x-delayed-type":"direct"})
result = channel.queue_declare(exclusive=True)
queue_name = result.method.queue
binding_keys = sys.argv[1:]
if not binding_keys:
sys.stderr.write("Usage: %s [binding_key]...\n" % sys.argv[0])
sys.exit(1)
for binding_key in binding_keys:
channel.queue_bind(exchange='test-exchange',
queue=queue_name,
routing_key=binding_key)
print(' [*] Waiting for logs. To exit press CTRL+C')
def callback(ch, method, properties, body):
print(" [x] %r:%r" % (method.routing_key, body))
channel.basic_consume(callback,
queue=queue_name,
no_ack=True)
channel.start_consuming()
python send.py error aaaabbbb
python receive.py error
[*] Waiting for logs. To exit press CTRL+C
[x] 'error':'aaaabbbb'

Can Kombu do publish and sucscribe to multiple consumers

Using Kombu with RabbitMQ to implement a classic publish/subscribe design pattern. I have created a producer that creates a topic:
from kombu import Connection, Exchange, Queue
media_exchange = Exchange('media', 'topic', durable=False)
video_queue = Queue('video', exchange=media_exchange, routing_key='video')
with Connection('amqp://guest:guest#localhost//') as conn:
producer = conn.Producer(serializer='json')
producer.publish('Hello World!',
exchange=media_exchange, routing_key='video',
declare=[video_queue])
I then created a consumer to consume from the publisher:
from kombu import Connection, Exchange, Queue
media_exchange = Exchange('media', type='topic', durable=False)
video_queue = Queue('video', exchange=media_exchange, routing_key='video')
def process_media(body, message):
print(body)
#message.ack()
with Connection('amqp://guest:guest#localhost//') as conn:
with conn.Consumer(video_queue, callbacks=[process_media]) as consumer:
# Process messages and handle events on all channels
while True:
conn.drain_events()
In then launch two consumers, each one in a separate terminal; both wait for a message:
terminal 1: python consumer.py
terminal 2: python consumer.py
When I run the producer, only one consumer receives the message.
The producer publishes in an exchange, not in a queue. The queues are defined by the consumers. When using different queue name for each consumer then all will get the message.
When using many consumers for the same queue then it is load balancing, that's why only one of your consumers gets the message.
To clarify, the messages in the queue are 'consumed' i.e. the first consumer consumes it, and the message is no more in the queue, that's why the second consumer isn't getting anything.
To have 2 separate consumers for same message - use 2 separate queues i.e.
video_queue1 and video_queue2, declared and bound to the exchange media_exchange, using same key video.
producer.py
from kombu import Connection, Exchange, Queue
media_exchange = Exchange('media', 'topic', durable=False)
video_queue1 = Queue('video1', exchange=media_exchange, routing_key='video')
video_queue2 = Queue('video2', exchange=media_exchange, routing_key='video')
with Connection('amqp://guest:guest#localhost//') as conn:
producer = conn.Producer(serializer='json')
producer.publish('Hello World!',
exchange=media_exchange, routing_key='video',
declare=[video_queue1, video_queue2])
consumer1.py
from kombu import Connection, Exchange, Queue
media_exchange = Exchange('media', type='topic', durable=False)
video_queue = Queue('video1', exchange=media_exchange, routing_key='video')
def process_media(body, message):
print(body)
#message.ack()
with Connection('amqp://guest:guest#localhost//') as conn:
with conn.Consumer(video_queue, callbacks=[process_media]) as consumer:
# Process messages and handle events on all channels
while True:
conn.drain_events()
consumer2.py
from kombu import Connection, Exchange, Queue
media_exchange = Exchange('media', type='topic', durable=False)
video_queue = Queue('video2', exchange=media_exchange, routing_key='video')
def process_media(body, message):
print(body)
#message.ack()
with Connection('amqp://guest:guest#localhost//') as conn:
with conn.Consumer(video_queue, callbacks=[process_media]) as consumer:
# Process messages and handle events on all channels
while True:
conn.drain_events()