Telegram bot not responding after upload files and database on heroku - telegram-bot

I'm a beginner of python. Below is my python code for telegram bot. It's working on XAMPP but I would to host the bot on cloud so that there's no need to start the XAMPP's Apache & MYSQL everytime when I'm trying to use the bot. However, it's not working after it's been uploaded to Heroku. May I know how can I fix this ? Thank you in advance.
Modified for uploading to Heroku
import logging
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters
import os
import mysql.connector
from typing import Dict
from telegram import ReplyKeyboardMarkup, Update, ReplyKeyboardRemove
from telegram.ext import (
Updater,
CommandHandler,
MessageHandler,
Filters,
ConversationHandler,
CallbackContext,
)
PORT = int(os.environ.get('PORT', 5000))
# Enable logging
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO)
mydb = mysql.connector.connect(
host='us-cdbr-east-05.cleardb.net',
user='b081bd520f9623',
passwd='557dad71',
database='heroku_26b1a208f24f1fa')
query = mydb.cursor()
logger = logging.getLogger(__name__)
TOKEN = '5333685233:AAFr4-1nB6_I8ZMdt25Y4zBotHRA9I_qtMI'
# Define a few command handlers. These usually take the two arguments update and
# context. Error handlers also receive the raised TelegramError object in error.
def start(update, context):
"""Send a message when the command /start is issued."""
update.message.reply_text('Hi! This is start')
def help(update, context):
"""Send a message when the command /help is issued."""
update.message.reply_text('Help!')
def sql(update, context):
sql = "SELECT nama_item, jumlah_dalam_kg FROM data_penjualan_harian WHERE nama_item = 'Lemon'"
query.execute(sql)
sql_result = query.fetchall()
pesan_balasan = ''
for x in sql_result:
pesan_balasan = pesan_balasan + str(x) + '\n'
#memperbagus balasan bot
#menghilangkan tanda petik
pesan_balasan = pesan_balasan.replace("'","")
#menghilangkan tanda kurung
pesan_balasan = pesan_balasan.replace("(","")
pesan_balasan = pesan_balasan.replace(")","")
#menghilangkan tanda koma
pesan_balasan = pesan_balasan.replace(",","")
update.message.reply_text(pesan_balasan)
def main():
updater = Updater(TOKEN, use_context=True)
# Get the dispatcher to register handlers
dp = updater.dispatcher
# on different commands - answer in Telegram
dp.add_handler(CommandHandler("start", start))
dp.add_handler(CommandHandler("help", help))
# on noncommand i.e message - echo the message on Telegram
dp.add_handler(MessageHandler(Filters.text, echo))
# log all errors
dp.add_error_handler(error)
# # Start the Bot
updater.start_webhook(listen="0.0.0.0",
port=PORT,
url_path=TOKEN,
webhook_url='https://powerful-lowlands-14039.herokuapp.com/' + TOKEN)
# Run the bot until you press Ctrl-C or the process receives SIGINT,
# SIGTERM or SIGABRT. This should be used most of the time, since
# start_polling() is non-blocking and will stop the bot gracefully.
updater.idle()
if __name__ == '__main__':
main()

Related

Subscriptions was added in the Youtube AP's Activities: list recently?

I use this function (https://developers.google.com/youtube/v3/docs/activities/list) to retrieve list of user activities.
I use my channelId and I don't see subscriptions for period before autumn of 2021. Can anyone explain me why? May be you write me when this type of activity (subscription) was added in the type of request, named Activities: list?
Thanks!
See below example code:
import os
import google_auth_oauthlib.flow
import googleapiclient.discovery
import googleapiclient.errors
scopes = ["https://www.googleapis.com/auth/youtube.readonly"]
def main():
# Disable OAuthlib's HTTPS verification when running locally.
# *DO NOT* leave this option enabled in production.
os.environ["OAUTHLIB_INSECURE_TRANSPORT"] = "1"
api_service_name = "youtube"
api_version = "v3"
client_secrets_file = "YOUR_CLIENT_SECRET_FILE.json"
# Get credentials and create an API client
flow = google_auth_oauthlib.flow.InstalledAppFlow.from_client_secrets_file(
client_secrets_file, scopes)
credentials = flow.run_console()
youtube = googleapiclient.discovery.build(
api_service_name, api_version, credentials=credentials)
request = youtube.activities().list(
part="snippet,contentDetails",
channelId="yourChannelId", #Note: paste your own channelId
maxResults=300
)
response = request.execute()
print(response)
if __name__ == "__main__":
main()
You can do an example of request in the right side:
https://developers.google.com/youtube/v3/docs/activities/list

Telegram bot Message Handler error [Local varialbe referenced before assignment]

from dotenv import load_dotenv
import os
from panda import *
from telegram.ext import *
from telegram.update import *
load_dotenv('.env')
Token =os.getenv('TOKEN')
print("The bot connected .....")
# commands handler
# start message
def start_command(update,context):
update.message.reply_text("Hello am mr panda am here to help you: ")
# help command
def help_command(update,context):
res = panda.help()
update.message.reply_text(res)
# message handler
**def message_handle(update,context):
message = str(update.message.text).lower()
respose = panda.hello(message)
update.message.reply_text(respose)**
# errror handler
def error(update,context):
print(f"Update the context error : {context.error}")
# main function
def main():
global message
updater =Updater(Token,use_context=True)
dp = updater.dispatcher
# command handlers
dp.add_handler(CommandHandler("start",start_command))
dp.add_handler(CommandHandler("help",help_command))
**# message handlers
dp.add_handler(MessageHandler(Filters.text,message_handle))**
# error handlers
dp.add_error_handler(error)
updater.start_polling()
updater.idle()
main()
This was the code Am getting error
Update the context error: local variable 'message' referenced before assignment
I think there is an error on the highlighted portions I do little searches and I referred to the documentation too I can't catch the error.
Does anyone have solution that would be great :)

How to get messages of telegram channel by python-telegram-bot tool

I was wondering if there is a possible way to get messages from the telegram channel knowing that I logged in to this account and I am the admin of this channel so I just want the get messages.
import feedparser
from telegram import Update, ForceReply, InlineKeyboardButton, InlineKeyboardMarkup
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters, CallbackContext, CallbackQueryHandler
from bs4 import BeautifulSoup
from datetime import datetime
import json
import telegram
from time import sleep
from telegram.ext import MessageHandler, Filters
class Config:
def __init__(self):
with open("config.json", "r") as config:
self.config = json.load(config)
class TelegramBotChannel:
def __init__(self, token, start_channel_id):
self.updater = Updater(token=token, use_context=True)
self.dispatcher = self.updater.dispatcher
self.start_channel_id = start_channel_id
if __name__ == '__main__':
telegram_bot = TelegramBotChannel(Config().config["token"], Config().config["start"])
pass
This is the minimal code to fetch the messages from a channel using a telegram bot which is the subscriber (only admin subscription possible) of the channel. Provide the correct bot api as KEY.:
from api_keys import bot_api_key as KEY
from telegram.ext import Updater, Filters, MessageHandler
updater = Updater(token=KEY, use_context=True)
dispatcher = updater.dispatcher
def forwarder(update, context):
msg = update.channel_post
if msg:
print(msg)
forwardHandler = MessageHandler(Filters.text & (~Filters.command), forwarder)
dispatcher.add_handler(forwardHandler)
updater.start_polling()
updater.idle()
Bots can only get updates about channel posts if they are a member in that channel (and bots can only be added to channels as admin). If they are admins in the channel, they will receive updates just like from every other chat.
Requirements :
Your bot should be in the channel. obviously as an admin
so first just make a function :
def forwader(update , context):
context.bot.copy_message("#temporary2for" ,"#tempmain" , update.channel_post.message_id)
After that make handler :
forwadHandler= MessageHandler(Filters.text & (~Filters.command) , forwader)
Than register your handler :
dispatcher.add_handler(forwadHandler)
Than don't forget to start Bot polling :
updater.start_polling()
updater.idle()
Full code :
from telegram import bot
from telegram.ext import Updater , CommandHandler , Filters , MessageHandler
from config import useless
import logging
updater = Updater(token=useless, use_context=True)
dispatcher = updater.dispatcher
import logging
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO)
def forwader(update , context):
context.bot.copy_message("#temporary2for" ,"#tempmain" , update.channel_post.message_id)
forwadHandler= MessageHandler(Filters.text & (~Filters.command) , forwader)
dispatcher.add_handler(forwadHandler)
updater.start_polling()
updater.idle()
Some Import are useless .

Celery consumer (only) with an external producer

I'm using Celery 4.4.7 with Redis as my message broker.
I want to use celery as a consumer only, as the external producer is a java application.
The java application pushes messages to a channel on redis. But my celery application is not picking up the messages.
I have simulated the java producer in python using redis-py (redis_producer.py) to publish to a channel. The redis_consumer.py is able to pickup the messages from the producer.
But my celery_consumer.py seems to be blind to these messages.
Messages from redis_producer.py is picked up by the redis_consumer.py, but not from celery.
Messages from kombu_producer.py is picked up by the celery worker, but not from my redis_consumer.py
redis_producer.py
import json
import redis
r = redis.Redis(host='localhost', port=6379)
for i in range(10):
body = {
'id': i,
'message': f'Hello {i}',
}
r.publish(channel='redis.test.topic', message=json.dumps(body))
redis_consumer.py
import json
import multiprocessing
import os
import signal
import redis
redis_conn = redis.Redis(charset='utf-8', decode_responses=True)
def sub(name: str):
pubsub = redis_conn.pubsub()
pubsub.subscribe('redis.test.topic')
for message in pubsub.listen():
print(message)
if message.get('type') == 'message':
data = message.get('data')
print('%s: %s' % (name, data))
def on_terminate(signum, stack):
wait_for_current_scp_operation()
if __name__ == '__main__':
multiprocessing.Process(target=sub, args=('consumer',)).start()
signal.signal(signal.SIGTERM, on_terminate)
celery_consumer.py
import os
import ssl
from celery import Celery
from celery import shared_task
from celery.utils.log import get_task_logger
from kombu import Exchange
from kombu import Queue
logger = get_task_logger(__name__)
# Celery broker Url
broker_url = os.environ.get('CELERY_BROKER_URL', None)
if broker_url is None:
broker_url = 'redis://localhost:6379/0'
# store to use to store task results, default=None
result_backend = os.environ.get('CELERY_RESULT_BACKEND', None)
if result_backend is None:
result_backend = 'redis://localhost:6379/0'
config = dict(
broker_url=broker_url,
result_backend=result_backend,
# maximum number of connections that can be open in the connection pool
broker_pool_limit=20,
broker_transport_options={
'visibility_timeout': 3600,
'confirm_publish': True,
},
# Serializer method
task_serializer='json',
result_serializer='json',
accept_content=['json'],
# Below two settings just reserves one task at a time
# Task acknowledgement mode, default=False
task_acks_late=True,
# How many message to prefetch, default=4, value=1 disable prefetch
worker_prefetch_multiplier=1,
# Dates and times in messages will be converted to use the UTC timezone
timezone='UTC',
enable_utc=True,
# if true, store the task return values
task_ignore_result=False,
# if true, result messages will be persistent, messages won't be lost after a broker restart
result_persistent=False,
# Celery tasks expiry (in secs), default=1d, value=0/None never expire
result_expires=900,
# Message compression setting
task_compression='gzip',
# Task execution marker, default=False
task_track_started=True,
# rate limits on tasks (Disable all rate limits, even if tasks has explicit rate limits set)
worker_disable_rate_limits=True,
# is True, all tasks will be executed locally by blocking until the task returns
task_always_eager=False,
# Send events so the worker can be monitored by tools like celerymon
worker_send_task_events=False,
# Expiry time in seconds for when a monitor clients event queue will be deleted, default=never
event_queue_expires=60,
# Default queue, exchange, routing keys configuration
# task_default_queue = 'default.queue',
# task_default_exchange = 'default.exchange',
# task_default_exchange_type='topic',
# task_default_routing_key = 'default.route',
# task_create_missing_queues = True
task_queues=(
# Default configuration
Queue('redis.test.topic',
Exchange('redis.test.topic'),
routing_key='redis.test.topic'),
),
)
def create_celery_app() -> Celery:
logger.info('Initializing Celery...')
celery_app = Celery(name=__name__)
celery_app.config_from_object(config)
return celery_app
# create a celery app
app = create_celery_app()
#app.task(name='task_process_message', bind=True, max_retries=3)
def task_process_message(self, message):
try:
logger.info(f'{message}: Triggered task: task_process_message')
except Exception as e:
logger.exception(
f'Error executing task_process_message({message}')
# We do not have to reset timestamp since the job always looks back by 1 hr
self.retry(exc=e, countdown=utils.get_retry_delay(self.request.retries))
#shared_task(name='shared_task_process_message', bind=True, max_retries=3)
def shared_task_process_message(self, message):
try:
logger.info(f'{message}: Triggered task: shared_task_process_message')
except Exception as e:
logger.exception(
f'Error executing shared_task_process_message({message}')
# We do not have to reset timestamp since the job always looks back by 1 hr
self.retry(exc=e, countdown=utils.get_retry_delay(self.request.retries))
kombu_producer.py
from kombu import Producer, Consumer, Queue, Connection
import json
redis_url = 'redis://localhost:6379/0'
conn = Connection(redis_url)
producer = Producer(conn.channel())
channel = 'redis.test.topic'
for i in range(10):
body = {
'task': 'task_process_message',
'id': f'{i}',
'kwargs': {'message': f'Hello {i}',
}
}
producer.publish(body=body, routing_key='redis.test.topic')
The below picture shows activity on redis using a regular redis producer/consumer
Below picture shows activity on redis while running kombu producer and celery consumer.

Google Cloud IoT - Multiple config messages

In my code I'm subscribing to 3 different topics:
/devices/{}/config
/devices/{}/events
/devices/{}/state
If I'm not subscribed in /devices/{}/config I don't receive any config message, and that's ok. But if I'm subscribed in /devices/{}/config I get one message for each subscribe I have.
Example:
Subscribed in "/devices/{}/config" and "/devices/{}/events" I get 2 config messages.
Subscribed in "/devices/{}/config" and "/devices/{}/state" I get 2 config messages.
Subscribed in "/devices/{}/config", "/devices/{}/state" and "/devices/{}/events" I get 3 config messages.
Subscribed in "/devices/{}/events" and "/devices/{}/state" I get 0 config messages.
This is causing error in IoT core: mqtt:
The device "xxxxxxxx" could not be updated. Device state can be updated only once every 1s.
In fact i want and need just one config message. What am I doing wrong?
This is my code:
# [START iot_mqtt_includes]
import argparse
import datetime
import os
import random
import ssl
import time
import log
import updateConfig
import jwt
import paho.mqtt.client as mqtt
import payload
# [END iot_mqtt_includes]
# The initial backoff time after a disconnection occurs, in seconds.
minimum_backoff_time = 1
# The maximum backoff time before giving up, in seconds.
MAXIMUM_BACKOFF_TIME = 32
# Whether to wait with exponential backoff before publishing.
should_backoff = False
# [START iot_mqtt_jwt]
def create_jwt(project_id, private_key_file, algorithm):
token = {
# The time that the token was issued at
'iat': datetime.datetime.utcnow(),
# The time the token expires.
'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=60),
# The audience field should always be set to the GCP project id.
'aud': project_id
}
# Read the private key file.
with open(private_key_file, 'r') as f:
private_key = f.read()
f.close()
return jwt.encode(token, private_key, algorithm=algorithm)
# [END iot_mqtt_jwt]
# [START iot_mqtt_config]
def error_str(rc):
"""Convert a Paho error to a human readable string."""
return '{}: {}'.format(rc, mqtt.error_string(rc))
def on_connect(unused_client, unused_userdata, unused_flags, rc):
"""Callback for when a device connects."""
log.append_log('ao_conectar - ' + mqtt.connack_string(rc))
# After a successful connect, reset backoff time and stop backing off.
global should_backoff
global minimum_backoff_time
should_backoff = False
minimum_backoff_time = 1
def on_disconnect(unused_client, unused_userdata, rc):
"""Paho callback for when a device disconnects."""
append_log('ao_desconectar - '+ error_str(rc))
# Since a disconnect occurred, the next loop iteration will wait with
# exponential backoff.
global should_backoff
should_backoff = True
def on_publish(unused_client, unused_userdata, unused_mid):
"""Paho callback when a message is sent to the broker."""
def on_message(unused_client, unused_userdata, message):
"""Callback when the device receives a message on a subscription."""
config = str(message.payload)
retorno = updateConfig.update(config)
def get_client(
project_id, cloud_region, registry_id, device_id, private_key_file,
algorithm, ca_certs, mqtt_bridge_hostname, mqtt_bridge_port):
"""Create our MQTT client. The client_id is a unique string that identifies
this device. For Google Cloud IoT Core, it must be in the format below."""
client = mqtt.Client(
client_id=('projects/{}/locations/{}/registries/{}/devices/{}'
.format(
project_id,
cloud_region,
registry_id,
device_id)))
# With Google Cloud IoT Core, the username field is ignored, and the
# password field is used to transmit a JWT to authorize the device.
client.username_pw_set(
username='unused',
password=create_jwt(
project_id, private_key_file, algorithm))
# Enable SSL/TLS support.
client.tls_set(ca_certs=ca_certs, tls_version=ssl.PROTOCOL_TLSv1_2)
# Register message callbacks. https://eclipse.org/paho/clients/python/docs/
# describes additional callbacks that Paho supports. In this example, the
# callbacks just print to standard out.
client.on_connect = on_connect
client.on_publish = on_publish
client.on_disconnect = on_disconnect
client.on_message = on_message
# Connect to the Google MQTT bridge.
client.connect(mqtt_bridge_hostname, mqtt_bridge_port)
# This is the topic that the device will receive configuration updates on.
mqtt_config_topic = '/devices/{}/config'.format(device_id)
# Subscribe to the config topic.
client.subscribe(mqtt_config_topic, qos=1)
return client
# [END iot_mqtt_config]
def parse_command_line_args():
"""Parse command line arguments."""
parser = argparse.ArgumentParser(description=(
'Example Google Cloud IoT Core MQTT device connection code.'))
parser.add_argument(
'--project_id',
default=os.environ.get('GOOGLE_CLOUD_PROJECT'),
help='GCP cloud project name')
parser.add_argument(
'--registry_id', required=True, help='Cloud IoT Core registry id')
parser.add_argument(
'--device_id', required=True, help='Cloud IoT Core device id')
parser.add_argument(
'--private_key_file',
required=True, help='Path to private key file.')
parser.add_argument(
'--algorithm',
choices=('RS256', 'ES256'),
required=True,
help='Which encryption algorithm to use to generate the JWT.')
parser.add_argument(
'--cloud_region', default='us-central1', help='GCP cloud region')
parser.add_argument(
'--ca_certs',
default='roots.pem',
help=('CA root from https://pki.google.com/roots.pem'))
parser.add_argument(
'--message_type',
choices=('event', 'state'),
default='event',
help=('Indicates whether the message to be published is a '
'telemetry event or a device state message.'))
parser.add_argument(
'--mqtt_bridge_hostname',
default='mqtt.googleapis.com',
help='MQTT bridge hostname.')
parser.add_argument(
'--mqtt_bridge_port',
choices=(8883, 443),
default=8883,
type=int,
help='MQTT bridge port.')
parser.add_argument(
'--jwt_expires_minutes',
default=20,
type=int,
help=('Expiration time, in minutes, for JWT tokens.'))
return parser.parse_args()
# [START iot_mqtt_run]
def main():
log.append_log("Iniciando uma nova conexao com o Google IoT.")
global minimum_backoff_time
args = parse_command_line_args()
# Publish to the events or state topic based on the flag.
jwt_iat = datetime.datetime.utcnow()
jwt_exp_mins = args.jwt_expires_minutes
client = get_client(
args.project_id, args.cloud_region, args.registry_id, args.device_id,
args.private_key_file, args.algorithm, args.ca_certs,
args.mqtt_bridge_hostname, args.mqtt_bridge_port)
# Publish num_messages mesages to the MQTT bridge once per second.
while True:
# Process network events.
client.loop()
# Wait if backoff is required.
if should_backoff:
# If backoff time is too large, give up.
if minimum_backoff_time > MAXIMUM_BACKOFF_TIME:
log.append_log('Tempo maximo de backoff excedido. Desistindo.')
break
# Otherwise, wait and connect again.
delay = minimum_backoff_time + random.randint(0, 1000) / 1000.0
log.append_log('Esperando {} segundos antes de reconectar.'.format(delay))
time.sleep(delay)
minimum_backoff_time *= 2
client.connect(args.mqtt_bridge_hostname, args.mqtt_bridge_port)
# [START iot_mqtt_jwt_refresh]
seconds_since_issue = (datetime.datetime.utcnow() - jwt_iat).seconds
if seconds_since_issue > 60 * jwt_exp_mins:
log.append_log('Atualizando token de acesso depois de {} segundos'.format(seconds_since_issue))
client.loop_stop()
jwt_iat = datetime.datetime.utcnow()
client = get_client(
args.project_id, args.cloud_region,
args.registry_id, args.device_id, args.private_key_file,
args.algorithm, args.ca_certs, args.mqtt_bridge_hostname,
args.mqtt_bridge_port)
# [END iot_mqtt_jwt_refresh]
# Publish "payload" to the MQTT topic. qos=1 means at least once
# delivery. Cloud IoT Core also supports qos=0 for at most once
# delivery.
payloadToPublish = payload.lerPayload()
if payloadToPublish != 'sem payload':
if payloadToPublish[0] == 'event':
mqtt_topic = '/devices/{}/{}'.format(args.device_id, 'events')
log.append_log('publicando [' + payloadToPublish[1] + ']')
else:
mqtt_topic = '/devices/{}/{}'.format(args.device_id, 'state')
client.publish(mqtt_topic, payloadToPublish[1], qos=1)
# [END iot_mqtt_run]
if __name__ == '__main__':
main()
This is the expected behavior:
The event and state topic are used for device to cloud communication and the config topic is used for sending configuration data to an IoT device. Subscribing to the events / state topics is effectively a NOOP.
Cloud IoT Core only supports QoS 1, which is "at least once" for message transmission, you may want to try QoS 0, which does not retry message transmission until ACK but I don't think this is what you want
STATE and CONFIG transmission is limited to a single message per second; transmitting configuration changes faster than this will cause the error you're seeing