Extract Runtime and Time of Completion from Celery Task: - rabbitmq

After running a celery task, I get the following message piped to stdout:
[2015-11-22 21:08:52,158: INFO/MainProcess] Task jobs.tasks.some_task[9c37f17b-dec4-4cb0-ab2b-fb5e997e430a]
succeeded in 2.9128301960008685s: 0.9731072908536255
I would like to programmatically extract the completion time (here: 2015-11-22 21:08:52) and the runtime (2.9128301960008685s) and display these to the user.
How can these values be extracted from the AsyncResult?
Broker is RabbitMQ.

Use celery task events to get the task runtime and time of completion. For more details about task events, refer this link.
from celery import Celery
import datetime
taskId_startTime = {}
taskId_addedTime = {}
def my_monitor():
app = Celery('vwadaptor', broker='redis://workerdb:6379/0',backend='redis://workerdb:6379/0')
state = app.events.State()
def announce_task_succeeded(event):
state.event(event)
task = state.tasks.get(event['uuid'])
print "runtime: ", task.runtime
print "time of completion: ", datetime.datetime.now()
with app.connection() as connection:
recv = app.events.Receiver(connection, handlers={
'task-succeeded': announce_task_succeeded,
})
recv.capture(limit=None, timeout=None, wakeup=True)
my_monitor()

Related

Telegram bot not responding after upload files and database on heroku

I'm a beginner of python. Below is my python code for telegram bot. It's working on XAMPP but I would to host the bot on cloud so that there's no need to start the XAMPP's Apache & MYSQL everytime when I'm trying to use the bot. However, it's not working after it's been uploaded to Heroku. May I know how can I fix this ? Thank you in advance.
Modified for uploading to Heroku
import logging
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters
import os
import mysql.connector
from typing import Dict
from telegram import ReplyKeyboardMarkup, Update, ReplyKeyboardRemove
from telegram.ext import (
Updater,
CommandHandler,
MessageHandler,
Filters,
ConversationHandler,
CallbackContext,
)
PORT = int(os.environ.get('PORT', 5000))
# Enable logging
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO)
mydb = mysql.connector.connect(
host='us-cdbr-east-05.cleardb.net',
user='b081bd520f9623',
passwd='557dad71',
database='heroku_26b1a208f24f1fa')
query = mydb.cursor()
logger = logging.getLogger(__name__)
TOKEN = '5333685233:AAFr4-1nB6_I8ZMdt25Y4zBotHRA9I_qtMI'
# Define a few command handlers. These usually take the two arguments update and
# context. Error handlers also receive the raised TelegramError object in error.
def start(update, context):
"""Send a message when the command /start is issued."""
update.message.reply_text('Hi! This is start')
def help(update, context):
"""Send a message when the command /help is issued."""
update.message.reply_text('Help!')
def sql(update, context):
sql = "SELECT nama_item, jumlah_dalam_kg FROM data_penjualan_harian WHERE nama_item = 'Lemon'"
query.execute(sql)
sql_result = query.fetchall()
pesan_balasan = ''
for x in sql_result:
pesan_balasan = pesan_balasan + str(x) + '\n'
#memperbagus balasan bot
#menghilangkan tanda petik
pesan_balasan = pesan_balasan.replace("'","")
#menghilangkan tanda kurung
pesan_balasan = pesan_balasan.replace("(","")
pesan_balasan = pesan_balasan.replace(")","")
#menghilangkan tanda koma
pesan_balasan = pesan_balasan.replace(",","")
update.message.reply_text(pesan_balasan)
def main():
updater = Updater(TOKEN, use_context=True)
# Get the dispatcher to register handlers
dp = updater.dispatcher
# on different commands - answer in Telegram
dp.add_handler(CommandHandler("start", start))
dp.add_handler(CommandHandler("help", help))
# on noncommand i.e message - echo the message on Telegram
dp.add_handler(MessageHandler(Filters.text, echo))
# log all errors
dp.add_error_handler(error)
# # Start the Bot
updater.start_webhook(listen="0.0.0.0",
port=PORT,
url_path=TOKEN,
webhook_url='https://powerful-lowlands-14039.herokuapp.com/' + TOKEN)
# Run the bot until you press Ctrl-C or the process receives SIGINT,
# SIGTERM or SIGABRT. This should be used most of the time, since
# start_polling() is non-blocking and will stop the bot gracefully.
updater.idle()
if __name__ == '__main__':
main()

Disable global qos in rabbitmq from celery

I have created a RabbitMQ 3-node cluster using three aws ec2 servers. I'm trying to access the quorum queue I created using celery. When I connect it gives the error
raise error_for_code(reply_code, reply_text,
amqp.exceptions.AMQPNotImplementedError: Basic.consume: (540) NOT_IMPLEMENTED - queue 'Replica_que' in vhost '/' does not support global qos
I suppose it will work if I disabled global qos but I couldn't find where I can do it. How do I disable global qos in celery?
my celery code
from celery import Celery
from time import sleep
import kombu
broker_uri=['amqp://xxxx:5672/', 'amqp://xxxx:5672/','amqp://xxx:5672/']
backend_uri="mongodb+srv://xxxxx"
app = Celery('TestApp', broker=broker_uri,backend=backend_uri)
app.config_from_object('celeryconfig')
app.conf.task_default_exchange='Replica_que'
app.conf.task_default_routing_key='Replica'
#app.task
def reverse(text):
sleep(10)
return text[:-1]
and the config code
from kombu import Queue
task_queues = [Queue(name="Replica_que", queue_arguments={"x-queue-type": "quorum"})]
task_routes = {
'tasks.add': 'Replica_que',
}
This was possible by adding a celeryconfig.py file,
from kombu import Queue
task_queues = [Queue(name="Replica_que", queue_arguments={"x-queue-type": "quorum"})]
task_routes = {
'tasks.add': 'Replica_que',
}
and creating a custom QoS class: https://github.com/celery/celery/issues/6067
So I added the QoS class
class NoChannelGlobalQoS(bootsteps.StartStopStep):
requires = {'celery.worker.consumer.tasks:Tasks'}
def start(self, c):
qos_global = False
c.connection.default_channel.basic_qos(0, c.initial_prefetch_count, qos_global)
def set_prefetch_count(prefetch_count):
return c.task_consumer.qos(
prefetch_count=prefetch_count,
apply_global=qos_global,
)
c.qos = QoS(set_prefetch_count, c.initial_prefetch_count)
app.steps['consumer'].add(NoChannelGlobalQoS)
Currently this is an issue in celery related to quorum queue but this works.

Celery consumer (only) with an external producer

I'm using Celery 4.4.7 with Redis as my message broker.
I want to use celery as a consumer only, as the external producer is a java application.
The java application pushes messages to a channel on redis. But my celery application is not picking up the messages.
I have simulated the java producer in python using redis-py (redis_producer.py) to publish to a channel. The redis_consumer.py is able to pickup the messages from the producer.
But my celery_consumer.py seems to be blind to these messages.
Messages from redis_producer.py is picked up by the redis_consumer.py, but not from celery.
Messages from kombu_producer.py is picked up by the celery worker, but not from my redis_consumer.py
redis_producer.py
import json
import redis
r = redis.Redis(host='localhost', port=6379)
for i in range(10):
body = {
'id': i,
'message': f'Hello {i}',
}
r.publish(channel='redis.test.topic', message=json.dumps(body))
redis_consumer.py
import json
import multiprocessing
import os
import signal
import redis
redis_conn = redis.Redis(charset='utf-8', decode_responses=True)
def sub(name: str):
pubsub = redis_conn.pubsub()
pubsub.subscribe('redis.test.topic')
for message in pubsub.listen():
print(message)
if message.get('type') == 'message':
data = message.get('data')
print('%s: %s' % (name, data))
def on_terminate(signum, stack):
wait_for_current_scp_operation()
if __name__ == '__main__':
multiprocessing.Process(target=sub, args=('consumer',)).start()
signal.signal(signal.SIGTERM, on_terminate)
celery_consumer.py
import os
import ssl
from celery import Celery
from celery import shared_task
from celery.utils.log import get_task_logger
from kombu import Exchange
from kombu import Queue
logger = get_task_logger(__name__)
# Celery broker Url
broker_url = os.environ.get('CELERY_BROKER_URL', None)
if broker_url is None:
broker_url = 'redis://localhost:6379/0'
# store to use to store task results, default=None
result_backend = os.environ.get('CELERY_RESULT_BACKEND', None)
if result_backend is None:
result_backend = 'redis://localhost:6379/0'
config = dict(
broker_url=broker_url,
result_backend=result_backend,
# maximum number of connections that can be open in the connection pool
broker_pool_limit=20,
broker_transport_options={
'visibility_timeout': 3600,
'confirm_publish': True,
},
# Serializer method
task_serializer='json',
result_serializer='json',
accept_content=['json'],
# Below two settings just reserves one task at a time
# Task acknowledgement mode, default=False
task_acks_late=True,
# How many message to prefetch, default=4, value=1 disable prefetch
worker_prefetch_multiplier=1,
# Dates and times in messages will be converted to use the UTC timezone
timezone='UTC',
enable_utc=True,
# if true, store the task return values
task_ignore_result=False,
# if true, result messages will be persistent, messages won't be lost after a broker restart
result_persistent=False,
# Celery tasks expiry (in secs), default=1d, value=0/None never expire
result_expires=900,
# Message compression setting
task_compression='gzip',
# Task execution marker, default=False
task_track_started=True,
# rate limits on tasks (Disable all rate limits, even if tasks has explicit rate limits set)
worker_disable_rate_limits=True,
# is True, all tasks will be executed locally by blocking until the task returns
task_always_eager=False,
# Send events so the worker can be monitored by tools like celerymon
worker_send_task_events=False,
# Expiry time in seconds for when a monitor clients event queue will be deleted, default=never
event_queue_expires=60,
# Default queue, exchange, routing keys configuration
# task_default_queue = 'default.queue',
# task_default_exchange = 'default.exchange',
# task_default_exchange_type='topic',
# task_default_routing_key = 'default.route',
# task_create_missing_queues = True
task_queues=(
# Default configuration
Queue('redis.test.topic',
Exchange('redis.test.topic'),
routing_key='redis.test.topic'),
),
)
def create_celery_app() -> Celery:
logger.info('Initializing Celery...')
celery_app = Celery(name=__name__)
celery_app.config_from_object(config)
return celery_app
# create a celery app
app = create_celery_app()
#app.task(name='task_process_message', bind=True, max_retries=3)
def task_process_message(self, message):
try:
logger.info(f'{message}: Triggered task: task_process_message')
except Exception as e:
logger.exception(
f'Error executing task_process_message({message}')
# We do not have to reset timestamp since the job always looks back by 1 hr
self.retry(exc=e, countdown=utils.get_retry_delay(self.request.retries))
#shared_task(name='shared_task_process_message', bind=True, max_retries=3)
def shared_task_process_message(self, message):
try:
logger.info(f'{message}: Triggered task: shared_task_process_message')
except Exception as e:
logger.exception(
f'Error executing shared_task_process_message({message}')
# We do not have to reset timestamp since the job always looks back by 1 hr
self.retry(exc=e, countdown=utils.get_retry_delay(self.request.retries))
kombu_producer.py
from kombu import Producer, Consumer, Queue, Connection
import json
redis_url = 'redis://localhost:6379/0'
conn = Connection(redis_url)
producer = Producer(conn.channel())
channel = 'redis.test.topic'
for i in range(10):
body = {
'task': 'task_process_message',
'id': f'{i}',
'kwargs': {'message': f'Hello {i}',
}
}
producer.publish(body=body, routing_key='redis.test.topic')
The below picture shows activity on redis using a regular redis producer/consumer
Below picture shows activity on redis while running kombu producer and celery consumer.

How do i send delayed message in rabbitmq using the rabbitmq-delayed-message-exchange plugin?

i have enabled the plugin using
rabbitmq-plugins enable rabbitmq_delayed_message_exchange
I am trying to create delayed exchange and attached header x-delay with 5000ms as int value and binded it to a queue it dint work.
So i tried it using Pika in python:
import pika
credentials = pika.PlainCredentials('admin', 'admin')
parameters = pika.ConnectionParameters('localhost',5672,'/',credentials)
connection = pika.BlockingConnection(pika.ConnectionParameters(host='127.0.0.1',port=5673,credentials=credentials))
channel = connection.channel()
#channel.exchange_declare(exchange='x-delayed-type', type='direct')
channel.exchange_declare("test-exchange", type="x-delayed-message", arguments={"x-delayed-type":"direct"},durable=True,auto_delete=True)
channel.queue_declare(queue='task_queue',durable=True)
channel.queue_bind(queue="task_queue", exchange="test-exchange", routing_key="task_queue")
for i in range(0,100):
channel.basic_publish(exchange='test-exchange', routing_key='task_queue',
body='gooogle',
properties=pika.BasicProperties(headers={"x-delay": 5000},delivery_mode=1))
print i
How can i make delayed exchange using delay make working?
Error Report :
ERROR REPORT==== 10-Mar-2017::13:08:09 ===
Error on AMQP connection <0.683.0> (127.0.0.1:42052 -> 127.0.0.1:5673, vhost: '/', user: 'admin', state: running), channel 1:
{{{undef,
[{erlang,system_time,[milli_seconds],[]},
{rabbit_delayed_message,internal_delay_message,4,
[{file,"src/rabbit_delayed_message.erl"},{line,179}]},
{rabbit_delayed_message,handle_call,3,
[{file,"src/rabbit_delayed_message.erl"},{line,122}]},
{gen_server,handle_msg,5,[{file,"gen_server.erl"},{line,585}]},
{proc_lib,init_p_do_apply,3,[{file,"proc_lib.erl"},{line,239}]}]},
{gen_server,call,
[rabbit_delayed_message,
{delay_message,
{exchange,
{resource,<<"/">>,exchange,<<"test-exchange">>},
'x-delayed-message',true,true,false,
[{<<"x-delayed-type">>,longstr,<<"direct">>}],
undefined,undefined,
{[],[]}},
{delivery,false,false,<0.691.0>,
{basic_message,
{resource,<<"/">>,exchange,<<"test-exchange">>},
[<<"task_queue">>],
{content,60,
{'P_basic',undefined,undefined,
[{<<"x-delay">>,signedint,5000}],
1,undefined,undefined,undefined,undefined,
undefined,undefined,undefined,undefined,undefined,
undefined},
<<48,0,0,0,0,13,7,120,45,100,101,108,97,121,73,0,0,19,
136,1>>,
rabbit_framing_amqp_0_9_1,
[<<"gooogle">>]},
<<80,125,217,116,181,47,214,41,203,179,7,85,150,76,35,2>>,
false},
undefined,noflow},
5000},
infinity]}},
[{gen_server,call,3,[{file,"gen_server.erl"},{line,188}]},
{rabbit_exchange_type_delayed_message,route,2,
[{file,"src/rabbit_exchange_type_delayed_message.erl"},{line,53}]},
{rabbit_exchange,route1,3,[{file,"src/rabbit_exchange.erl"},{line,381}]},
{rabbit_exchange,route,2,[{file,"src/rabbit_exchange.erl"},{line,371}]},
{rabbit_channel,handle_method,3,
[{file,"src/rabbit_channel.erl"},{line,949}]},
{rabbit_channel,handle_cast,2,[{file,"src/rabbit_channel.erl"},{line,457}]},
{gen_server2,handle_msg,2,[{file,"src/gen_server2.erl"},{line,1032}]},
{proc_lib,init_p_do_apply,3,[{file,"proc_lib.erl"},{line,239}]}]}
Paste a workable code with Rabbitmq 3.7.7:
send.py
#!/usr/bin/env python
import pika
import sys
connection = pika.BlockingConnection(pika.ConnectionParameters(host='localhost'))
channel = connection.channel()
#channel.exchange_declare(exchange='direct_logs',
# exchange_type='direct')
#channel.exchange_declare("test-exchange", type="x-delayed-message", arguments={"x-delayed-type":"direct"},durable=True,auto_delete=True)
channel.exchange_declare(exchange='test-exchange',
exchange_type='x-delayed-message',
arguments={"x-delayed-type":"direct"})
severity = sys.argv[1] if len(sys.argv) > 2 else 'info'
message = ' '.join(sys.argv[2:]) or 'Hello World!'
channel.basic_publish(exchange='test-exchange',
routing_key=severity,
properties=pika.BasicProperties(
headers={'x-delay': 5000} # Add a key/value header
),
body=message)
print(" [x] Sent %r:%r" % (severity, message))
connection.close()
receive.py
#!/usr/bin/env python
import pika
import sys
connection = pika.BlockingConnection(pika.ConnectionParameters(host='localhost'))
channel = connection.channel()
channel.exchange_declare(exchange='test-exchange',
exchange_type='x-delayed-message',
arguments={"x-delayed-type":"direct"})
result = channel.queue_declare(exclusive=True)
queue_name = result.method.queue
binding_keys = sys.argv[1:]
if not binding_keys:
sys.stderr.write("Usage: %s [binding_key]...\n" % sys.argv[0])
sys.exit(1)
for binding_key in binding_keys:
channel.queue_bind(exchange='test-exchange',
queue=queue_name,
routing_key=binding_key)
print(' [*] Waiting for logs. To exit press CTRL+C')
def callback(ch, method, properties, body):
print(" [x] %r:%r" % (method.routing_key, body))
channel.basic_consume(callback,
queue=queue_name,
no_ack=True)
channel.start_consuming()
python send.py error aaaabbbb
python receive.py error
[*] Waiting for logs. To exit press CTRL+C
[x] 'error':'aaaabbbb'

twisted - interrupt callback via KeyboardInterrupt

I'm currently repeating a task in a for loop inside a callback using Twisted, but would like the reactor to break the loop in the callback (one) if the user issues a KeyboardInterrupt via Ctrl-C. From what I have tested, the reactor only stops or processes interrupts at the end of the callback.
Is there any way of sending a KeyboardInterrupt to the callback or the error handler in the middle of the callback run?
Cheers,
Chris
#!/usr/bin/env python
from twisted.internet import reactor, defer
def one(result):
print "Start one()"
for i in xrange(10000):
print i
print "End one()"
reactor.stop()
def oneErrorHandler(failure):
print failure
print "INTERRUPTING one()"
reactor.stop()
if __name__ == '__main__':
d = defer.Deferred()
d.addCallback(one)
d.addErrback(oneErrorHandler)
reactor.callLater(1, d.callback, 'result')
print "STARTING REACTOR..."
try:
reactor.run()
except KeyboardInterrupt:
print "Interrupted by keyboard. Exiting."
reactor.stop()
I got this working dandy. The fired SIGINT sets a flag running for any running task in my code, and additionally calls reactor.callFromThread(reactor.stop) to stop any twisted running code:
#!/usr/bin/env python
import sys
import twisted
import re
from twisted.internet import reactor, defer, task
import signal
def one(result, token):
print "Start one()"
for i in xrange(1000):
print i
if token.running is False:
raise KeyboardInterrupt()
#reactor.callFromThread(reactor.stop) # this doesn't work
print "End one()"
def oneErrorHandler(failure):
print "INTERRUPTING one(): Unkown Exception"
import traceback
print traceback.format_exc()
reactor.stop()
def oneKeyboardInterruptHandler(failure):
failure.trap(KeyboardInterrupt)
print "INTERRUPTING one(): KeyboardInterrupt"
reactor.stop()
def repeatingTask(token):
d = defer.Deferred()
d.addCallback(one, token)
d.addErrback(oneKeyboardInterruptHandler)
d.addErrback(oneErrorHandler)
d.callback('result')
class Token(object):
def __init__(self):
self.running = True
def sayBye():
print "bye bye."
if __name__ == '__main__':
token = Token()
def customHandler(signum, stackframe):
print "Got signal: %s" % signum
token.running = False # to stop my code
reactor.callFromThread(reactor.stop) # to stop twisted code when in the reactor loop
signal.signal(signal.SIGINT, customHandler)
t2 = task.LoopingCall(reactor.callLater, 0, repeatingTask, token)
t2.start(5)
reactor.addSystemEventTrigger('during', 'shutdown', sayBye)
print "STARTING REACTOR..."
reactor.run()
This is intentional to avoid (semi-)preemption, since Twisted is a cooperative multitasking system. Ctrl-C is handled in Python with a SIGINT handler installed by the interpreter at startup. The handler sets a flag when it is invoked. After each byte code is executed, the interpreter checks the flag. If it is set, KeyboardInterrupt is raised at that point.
The reactor installs its own SIGINT handler. This replaces the behavior of the interpreter's handler. The reactor's handler initiates reactor shutdown. Since it doesn't raise an exception, it doesn't interrupt whatever code is running. The loop (or whatever) gets to finish, and when control is returned to the reactor, shutdown proceeds.
If you'd rather have Ctrl-C (ie SIGINT) raise KeyboardInterrupt, then you can just restore Python's SIGINT handler using the signal module:
signal.signal(signal.SIGINT, signal.default_int_handler)
Note, however, that if you send a SIGINT while code from Twisted is running, rather than your own application code, the behavior is undefined, as Twisted does not expect to be interrupted by KeyboardInterrupt.