I would appreciate if somebody could shine a light on this, I try to setup a simple telegram bot to be run on Python on Google App Engine with telethon libarary, but failed. Was this approach workable ?
after gcloud app deploy, I receive the Internal Server Error from browser.
the problem does not happen if I remarked the following line:
client = TelegramClient(phone, api_id, api_hash)
main.py
from flask import Flask, request
import datetime
import time
import asyncio
from telethon import TelegramClient, events, sync
app = Flask(__name__)
api_id = xxxxxxxx
api_hash = 'xxxxxxxxxxxxxxxxxxxxxxxx'
phone = '+xxxxxxxxxxxx'
#app.route('/', methods=['GET'])
def hello():
reqaction = request.args.get('action', 'connect')
client = TelegramClient(phone, api_id, api_hash)
if __name__ == '__main__':
app.run(host='localhost', port=8080, debug=True)
below is the logging:
Traceback (most recent call last):
File "/env/lib/python3.7/site-packages/flask/app.py", line 2446, in wsgi_app
response = self.full_dispatch_request()
File "/env/lib/python3.7/site-packages/flask/app.py", line 1951, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/env/lib/python3.7/site-packages/flask/app.py", line 1820, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/env/lib/python3.7/site-packages/flask/_compat.py", line 39, in reraise
raise value
File "/env/lib/python3.7/site-packages/flask/app.py", line 1949, in full_dispatch_request
rv = self.dispatch_request()
File "/env/lib/python3.7/site-packages/flask/app.py", line 1935, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/srv/main.py", line 95, in hello
client = TelegramClient(phone, api_id, api_hash) ;
File "lib/telethon/client/telegrambaseclient.py", line 230, in __init__
self._loop = loop or asyncio.get_event_loop()
File "/opt/python3.7/lib/python3.7/asyncio/events.py", line 644, in get_event_loop
% threading.current_thread().name)
RuntimeError: There is no current event loop in thread 'ThreadPoolExecutor-0_0'.
Many thanks in advance for your giude and help :)
Related
I am using Celery with SQS as a broker and I am trying to renew my credentials "AWS_ACCESS_KEY_ID" and "AWS_SECRET_ACCESS_KEY", before they expire, the first time I run the task and the result is success, but after 15 minutes it expires although credentials have been renewed, the function to update credentials is as follows:
import os
import boto3
from celery import Celery
from kombu.utils.url import safequote
def update_aws_credentials():
role_info = {
'RoleArn': f"arn:aws:iam::{os.environ['AWS_ACCOUNT_NUMER']}:role/my_role_execution",
'RoleSessionName': 'roleExecution',
'DurationSeconds': 900
}
sts_client = boto3.client('sts', region_name='eu-central-1')
credentials = sts_client.assume_role(**role_info)
aws_access_key_id = credentials["Credentials"]['AccessKeyId']
aws_secret_access_key = credentials["Credentials"]['SecretAccessKey']
aws_session_token = credentials["Credentials"]["SessionToken"]
os.environ["AWS_ACCESS_KEY_ID"] = aws_access_key_id
os.environ["AWS_SECRET_ACCESS_KEY"] = aws_secret_access_key
os.environ["AWS_DEFAULT_REGION"] = 'eu-central-1'
os.environ["AWS_SESSION_TOKEN"] = aws_session_token
return aws_access_key_id, aws_secret_access_key
def get_celery(aws_access_key_id, aws_secret_access_key):
broker = f"sqs://{safequote(aws_access_key_id)}:{safequote(aws_secret_access_key)}#"
backend = 'redis://redis-service:6379/0'
celery = Celery(f"my_task", broker=broker, backend=backend)
celery.conf["broker_transport_options"] = {
'polling_interval': 30,
'region': 'eu-central-1',
'predefined_queues': {
f"my_queue": {
'url': f"https://sqs.eu-central-1.amazonaws.com/{os.environ['AWS_ACCOUNT_NUMER']}/my_queue"
}
}
}
celery.conf["task_default_queue"] = f"my_queue"
return celery
def refresh_sqs_credentials():
access, secret = update_aws_credentials()
return get_celery(access, secret)
Running refresh_sqs_credentials, new credentials are created:
celery = worker.refresh_sqs_credentials()
And then I run my task with celery:
task = celery.send_task('my_task.code_of_my_task', args=[content], task_id=task_id)
All tasks that I run before 15 minutes finish successfully, but after 15 minutes the error is the following:
[2021-12-14 14:08:15,637] ERROR in app: Exception on /tasks/run [POST]
Traceback (most recent call last):
File "/api/app.py", line 87, in post
task = celery.send_task('glgt_ap35080_dev_sqs_runalgo.allocation_alg_task', args=[content], task_id=task_id)
File "/usr/local/lib/python3.6/site-packages/celery/app/base.py", line 717, in send_task
amqp.send_task_message(P, name, message, **options)
File "/usr/local/lib/python3.6/site-packages/celery/app/amqp.py", line 547, in send_task_message
**properties
File "/usr/local/lib/python3.6/site-packages/kombu/messaging.py", line 178, in publish
exchange_name, declare,
File "/usr/local/lib/python3.6/site-packages/kombu/connection.py", line 525, in _ensured
return fun(*args, **kwargs)
File "/usr/local/lib/python3.6/site-packages/kombu/messaging.py", line 200, in _publish
mandatory=mandatory, immediate=immediate,
File "/usr/local/lib/python3.6/site-packages/kombu/transport/virtual/base.py", line 605, in basic_publish
return self._put(routing_key, message, **kwargs)
File "/usr/local/lib/python3.6/site-packages/kombu/transport/SQS.py", line 294, in _put
c.send_message(**kwargs)
File "/usr/local/lib/python3.6/site-packages/botocore/client.py", line 337, in _api_call
File "/usr/local/lib/python3.6/site-packages/botocore/client.py", line 656, in _make_api_call
raise error_class(parsed_response, operation_name)
botocore.exceptions.ClientError: An error occurred (ExpiredToken) when calling the SendMessage operation: The security token included in the request is expired
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/lib/python3.6/site-packages/flask/app.py", line 1813, in full_dispatch_request
rv = self.dispatch_request()
File "/usr/local/lib/python3.6/site-packages/flask/app.py", line 1799, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/usr/local/lib/python3.6/site-packages/flask_restplus/api.py", line 325, in wrapper
resp = resource(*args, **kwargs)
File "/usr/local/lib/python3.6/site-packages/flask/views.py", line 88, in view
return self.dispatch_request(*args, **kwargs)
File "/usr/local/lib/python3.6/site-packages/flask_restplus/resource.py", line 44, in dispatch_request
resp = meth(*args, **kwargs)
File "/api/app.py", line 90, in post
abort(500)
File "/usr/local/lib/python3.6/site-packages/werkzeug/exceptions.py", line 774, in abort
return _aborter(status, *args, **kwargs)
File "/usr/local/lib/python3.6/site-packages/werkzeug/exceptions.py", line 755, in __call__
raise self.mapping[code](*args, **kwargs)
werkzeug.exceptions.InternalServerError: 500 Internal Server Error: The server encountered an internal error and was unable to complete your request. Either the server is overloaded or there is an error in the application.
10.142.95.217 - - [14/Dec/2021 14:08:15] "POST /tasks/run HTTP/1.1" 500 -
I'm storing the credentials in environment variables, I don't understand why it expires after 15 minutes, can someone help me please?
The versions of the packages used are:
boto3==1.14.54
celery==5.0.0
kombu==5.0.2
pycurl==7.43.0.6
Thank you
s3fs seems to fail from time to time when reading from an S3 bucket using an AWS Lambda function within a VPN. I am using s3fs==0.4.0 and pandas==1.0.1.
import s3fs
import pandas as pd
def lambda_handler(event, context):
bucket = event['Records'][0]['s3']['bucket']['name']
s3_file = event['Records'][0]['s3']['object']['key']
s3fs.S3FileSystem.connect_timeout = 1800
s3fs.S3FileSystem.read_timeout = 1800
with s3fs.S3FileSystem(anon=False).open(f"s3://{bucket}/{s3_file}", 'rb') as f:
self.data = pd.read_json(f, **kwargs)
The stacktrace is the following:
Traceback (most recent call last):
File "/var/task/urllib3/connection.py", line 157, in _new_conn
(self._dns_host, self.port), self.timeout, **extra_kw
File "/var/task/urllib3/util/connection.py", line 84, in create_connection
raise err
File "/var/task/urllib3/util/connection.py", line 74, in create_connection
sock.connect(sa)
TimeoutError: [Errno 110] Connection timed out
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/var/task/botocore/httpsession.py", line 263, in send
chunked=self._chunked(request.headers),
File "/var/task/urllib3/connectionpool.py", line 720, in urlopen
method, url, error=e, _pool=self, _stacktrace=sys.exc_info()[2]
File "/var/task/urllib3/util/retry.py", line 376, in increment
raise six.reraise(type(error), error, _stacktrace)
File "/var/task/urllib3/packages/six.py", line 735, in reraise
raise value
File "/var/task/urllib3/connectionpool.py", line 672, in urlopen
chunked=chunked,
File "/var/task/urllib3/connectionpool.py", line 376, in _make_request
self._validate_conn(conn)
File "/var/task/urllib3/connectionpool.py", line 994, in _validate_conn
conn.connect()
File "/var/task/urllib3/connection.py", line 300, in connect
conn = self._new_conn()
File "/var/task/urllib3/connection.py", line 169, in _new_conn
self, "Failed to establish a new connection: %s" % e
urllib3.exceptions.NewConnectionError: <botocore.awsrequest.AWSHTTPSConnection object at 0x7f4d578e3ed0>: Failed to establish a new connection: [Errno 110] Connection timed out
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/var/task/botocore/endpoint.py", line 200, in _do_get_response
http_response = self._send(request)
File "/var/task/botocore/endpoint.py", line 244, in _send
return self.http_session.send(request)
File "/var/task/botocore/httpsession.py", line 283, in send
raise EndpointConnectionError(endpoint_url=request.url, error=e)
botocore.exceptions.EndpointConnectionError: Could not connect to the endpoint URL: "https://my_bucket.s3.eu-west-1.amazonaws.com/?list-type=2&prefix=my_folder%2Fsomething%2F&delimiter=%2F&encoding-type=url"
Has someone faced this same issue? Why would it fail only sometimes? Is there a s3fs configuration that could help for this specific issue?
Actually there was no problem at all with s3fs. Seems like we were using a Lambda function with two Subnets within the VPC and one was working normally but the other one wasn't allowed to access S3 resources, therefore when a Lambda was spawned using the second network it wouldn't be able to connect at all.
Fixing this issue was as easy as removing the second subnet.
You could also use boto3 which is supported by AWS, in order to get json from S3.
import json
import boto3
def lambda_handler(event, context):
bucket = event['Records'][0]['s3']['bucket']['name']
key = event['Records'][0]['s3']['object']['key']
s3 = boto3.resource('s3')
file_object = s3_resource.Object(bucket, key)
json_content = json.loads(file_object.get()['Body'].read())
All I'm trying to do is firstly get the list of link of all anchor tags and then scrape the text of tables from each of those scraped list. But All I'm getting is an error. Can anybody please help me?
My code:
from bs4 import BeautifulSoup as soup
from urllib.request import urlopen as uReq
import csv
from datetime import datetime
pages = []
my_url = 'https://deltaimmigration.com.au/Australia-jobs/'
uClient = uReq(my_url)
page_html = uClient.read()
uClient.close()
page_soup = soup(page_html, "html.parser")
col = page_soup.find('td', width="250")
# rows = table.find_all('tr')
# print(len(col))
# print(col.text)
filename = "links.txt"
f = open(filename,'w')
headers = "Link of all jobs "
f.write(headers)
for a in col.find_all('a', href=True):
link= a['href']
f_link = link.replace(".." , "https://deltaimmigration.com.au")
pages.append(f_link)
for items in pages:
tables = items.find('table', width="900")
table = tables[0]
for table in tables:
rows_tables = table.tbody.find_all('tr')
rows = rows_tables[0].text
for row in rows:
print(row.text)
f.close()
Can somebody please help me to solve this? Can we not scrape the data from the scraped links from the same page?
Error:
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "e:/scrape/link.py", line 8, in <module>
uClient = uReq(my_url)
File "C:\Users\user\Anaconda3\lib\urllib\request.py", line 222, in urlopen
return opener.open(url, data, timeout)
File "C:\Users\user\Anaconda3\lib\urllib\request.py", line 525, in open
response = self._open(req, data)
File "C:\Users\user\Anaconda3\lib\urllib\request.py", line 543, in _open
'_open', req)
File "C:\Users\user\Anaconda3\lib\urllib\request.py", line 503, in _call_chain
result = func(*args)
File "C:\Users\user\Anaconda3\lib\urllib\request.py", line 1360, in https_open
context=self._context, check_hostname=self._check_hostname)
File "C:\Users\user\Anaconda3\lib\urllib\request.py", line 1319, in do_open
raise URLError(err)
urllib.error.URLError: <urlopen error [WinError 10054] An existing connection was forcibly closed by the remote host>
user#Bidhya-1s MINGW64 /e/scrape
$ C:/Users/user/Anaconda3/python.exe e:/scrape/link.py
Traceback (most recent call last):
File "C:\Users\user\Anaconda3\lib\urllib\request.py", line 1317, in do_open
encode_chunked=req.has_header('Transfer-encoding'))
File "C:\Users\user\Anaconda3\lib\http\client.py", line 1244, in request
self._send_request(method, url, body, headers, encode_chunked)
File "C:\Users\user\Anaconda3\lib\http\client.py", line 1290, in _send_request
self.endheaders(body, encode_chunked=encode_chunked)
File "C:\Users\user\Anaconda3\lib\http\client.py", line 1239, in endheaders
self._send_output(message_body, encode_chunked=encode_chunked)
File "C:\Users\user\Anaconda3\lib\http\client.py", line 1026, in _send_output
self.send(msg)
File "C:\Users\user\Anaconda3\lib\http\client.py", line 966, in send
self.connect()
File "C:\Users\user\Anaconda3\lib\http\client.py", line 1414, in connect
server_hostname=server_hostname)
File "C:\Users\user\Anaconda3\lib\ssl.py", line 423, in wrap_socket
session=session
File "C:\Users\user\Anaconda3\lib\ssl.py", line 870, in _create
self.do_handshake()
File "C:\Users\user\Anaconda3\lib\ssl.py", line 1139, in do_handshake
self._sslobj.do_handshake()
ConnectionResetError: [WinError 10054] An existing connection was forcibly closed by the remote host
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "e:/scrape/link.py", line 8, in <module>
uClient = uReq(my_url)
File "C:\Users\user\Anaconda3\lib\urllib\request.py", line 222, in urlopen
return opener.open(url, data, timeout)
File "C:\Users\user\Anaconda3\lib\urllib\request.py", line 525, in open
response = self._open(req, data)
File "C:\Users\user\Anaconda3\lib\urllib\request.py", line 543, in _open
'_open', req)
File "C:\Users\user\Anaconda3\lib\urllib\request.py", line 503, in _call_chain
result = func(*args)
File "C:\Users\user\Anaconda3\lib\urllib\request.py", line 1360, in https_open
context=self._context, check_hostname=self._check_hostname)
File "C:\Users\user\Anaconda3\lib\urllib\request.py", line 1319, in do_open
raise URLError(err)
urllib.error.URLError: <urlopen error [WinError 10054] An existing connection was forcibly closed by the remote host>
'''
Basically your code contains much faults and completely unreadable.
I see that you are using find instead of findAll which means you are only picking the first url, and then I'm not even seeing any request you made to that url in order to parse it. therefor i couldn't understand your desired target as you haven't included much information. anyway from my point of view, i think that my below code will help you.
import requests
from bs4 import BeautifulSoup
main = "https://deltaimmigration.com.au/Australia-jobs/"
def First():
r = requests.get(main)
soup = BeautifulSoup(r.text, 'html5lib')
links = []
with open("links.txt", 'w', newline="", encoding="UTF-8") as f:
for item in soup.findAll("td", {'width': '250'}):
item = item.contents[1].get("href")[3:]
item = f"https://deltaimmigration.com.au/{item}"
f.write(item+"\n")
links.append(item)
print(f"We Have Collected {len(links)} urls")
return links
def Second():
links = First()
with requests.Session() as req:
for link in links:
print(f"Extracting {link}")
r = req.get(link)
soup = BeautifulSoup(r.text, 'html5lib')
for item in soup.findAll("table", {'width': '900'}):
print("*" * 40)
print(item.text)
print("*" * 40)
Second()
I try to use gevent.socketio with my TurboGears 2 Website:
in the ini-file i use
[server:main]
use = egg:gevent-socketio#paster
transports = xhr-multipart, xhr-polling, websocket
host = 0.0.0.0
port = 8080
when i try to access the controller in the Webbrowser:
#expose('wago.templates.test')
def index(self):
socketio_manage(request.environ, {"/stat": StatNamespace}, request=request)
return dict()
i get the following error:
Traceback (most recent call last):
File "/home/pi/tgenv/lib/python2.7/site-packages/tg/wsgiapp.py", line 105, in __call__
response = self.wrapped_dispatch(controller, environ, context)
File "/home/pi/tgenv/lib/python2.7/site-packages/tg/wsgiapp.py", line 278, in dispatch
return controller(environ, context)
File "/home/pi/tgenv/lib/python2.7/site-packages/tg/controllers/dispatcher.py", line 132, in __call__
response = self._perform_call(context)
File "/home/pi/tgenv/lib/python2.7/site-packages/tg/controllers/dispatcher.py", line 113, in _perform_call
r = self._call(func, params, remainder=remainder, context=context)
File "/home/pi/tgenv/lib/python2.7/site-packages/tg/controllers/decoratedcontroller.py", line 120, in _call
output = controller_caller(context_config, bound_controller_callable, remainder, params)
File "/home/pi/tgenv/lib/python2.7/site-packages/tg/decorators.py", line 42, in _decorated_controller_caller
return application_controller_caller(tg_config, controller, remainder, params)
File "/home/pi/tgenv/lib/python2.7/site-packages/tg/configuration/app_config.py", line 124, in call_controller
return controller(*remainder, **params)
File "/home/pi/tgenv/WAGO/wago/controllers/root.py", line 13, in index
socketio_manage(request.environ, {"/stat": StatNamespace}, request=request)
File "/home/pi/tgenv/lib/python2.7/site-packages/socketio/__init__.py", line 67, in socketio_manage
socket = environ['socketio']
KeyError: 'socketio'
I used several tutorials for pyramid to introduce myself to gevent-socketio.
I tried it with older versions from TurboGears2, gevent and gevent-socketio, i also tried this module but always the same error.
i'm pretty new to sockets, so maybe i'm just missing something obvious
the gevent-socketio recognizes socket requests only from a specific url (socket.io/1/)
because TurboGears uses the python function names as url it is not posible to us "." or "1" on the regular way. A simple solution:
#expose()
def _default(self, *args):
args = list(args)
if "socketio" in request.environ:
#do socketio stuff...
else:
abort(404)
i have added djcelery and kombu.transport.django to django apps.
settings.py:
import djcelery
djcelery.setup_loader()
from .celery import app as celery_app
BROKER_URL = "redis://abc:abc#localhost:6379/0"
CELERYBEAT_SCHEDULER = "djcelery.schedulers.DatabaseScheduler"
CELERY_TIMEZONE = "Europe/London"
CELERY_ENABLE_UTC = True
# store AsyncResult in redis
CELERY_RESULT_BACKEND = "redis"
REDIS_HOST = "localhost"
REDIS_PORT = 6379
REDIS_DB = 0
REDIS_VHOST = 0
REDIS_USER="xyz"
REDIS_PASSWORD="xyz"
REDIS_CONNECT_RETRY = True
celery.py
from __future__ import absolute_import
import os
from celery import Celery
from django.conf import settings
from . import celeryconfig
# set the default Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'config.settings')
app = Celery('apps')
# Using a string here means the worker will not have to
# pickle the object when using Windows.
app.config_from_object('django.conf:settings')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
#app.task(bind=True)
def debug_task(self):
print('Request: {0!r}'.format(self.request))
celery supervisor configuration:
[program:celeryd]
command=/home/vagrant/.virtualenvs/project/bin/python /vagrant/project/manage.py celeryd -B -E -c 1 --settings=config.settings
directory=/vagrant/project
user=vagrant
autostart=true
autorestart=true
stdout_logfile=/var/log/supervisor/celeryd.log
stderr_logfile=/var/log/supervisor/celeryd_err.log
when a task is scheduled using django-celery interface which supposed to download some files, following errors are thrown:
Traceback (most recent call last):
File "/home/vagrant/.virtualenvs/project/local/lib/python2.7/site-packages/celery/beat.py", line 203, in maybe_due
result = self.apply_async(entry, publisher=publisher)
File "/home/vagrant/.virtualenvs/project/local/lib/python2.7/site-packages/celery/beat.py", line 259, in apply_async
entry, exc=exc)), sys.exc_info()[2])
File "/home/vagrant/.virtualenvs/project/local/lib/python2.7/site-packages/celery/beat.py", line 251, in apply_async
**entry.options)
File "/home/vagrant/.virtualenvs/project/local/lib/python2.7/site-packages/celery/app/task.py", line 555, in apply_async
**dict(self._get_exec_options(), **options)
File "/home/vagrant/.virtualenvs/project/local/lib/python2.7/site-packages/celery/app/base.py", line 323, in send_task
reply_to=reply_to or self.oid, **options
File "/home/vagrant/.virtualenvs/project/local/lib/python2.7/site-packages/celery/app/amqp.py", line 302, in publish_task
**kwargs
File "/home/vagrant/.virtualenvs/project/local/lib/python2.7/site-packages/kombu/messaging.py", line 168, in publish
routing_key, mandatory, immediate, exchange, declare)
File "/home/vagrant/.virtualenvs/project/local/lib/python2.7/site-packages/kombu/connection.py", line 440, in _ensured
return fun(*args, **kwargs)
File "/home/vagrant/.virtualenvs/project/local/lib/python2.7/site-packages/kombu/messaging.py", line 180, in _publish
[maybe_declare(entity) for entity in declare]
File "/home/vagrant/.virtualenvs/project/local/lib/python2.7/site-packages/kombu/messaging.py", line 111, in maybe_declare
return maybe_declare(entity, self.channel, retry, **retry_policy)
File "/home/vagrant/.virtualenvs/project/local/lib/python2.7/site-packages/kombu/common.py", line 99, in maybe_declare
return _maybe_declare(entity)
File "/home/vagrant/.virtualenvs/project/local/lib/python2.7/site-packages/kombu/common.py", line 110, in _maybe_declare
entity.declare()
File "/home/vagrant/.virtualenvs/project/local/lib/python2.7/site-packages/kombu/entity.py", line 505, in declare
self.queue_declare(nowait, passive=False)
File "/home/vagrant/.virtualenvs/project/local/lib/python2.7/site-packages/kombu/entity.py", line 531, in queue_declare
nowait=nowait)
File "/home/vagrant/.virtualenvs/project/local/lib/python2.7/site-packages/kombu/transport/virtual/__init__.py", line 447, in queue_declare
return queue_declare_ok_t(queue, self._size(queue), 0)
File "/home/vagrant/.virtualenvs/project/local/lib/python2.7/site-packages/kombu/transport/redis.py", line 651, in _size
sizes = cmds.execute()
File "/home/vagrant/.virtualenvs/project/local/lib/python2.7/site-packages/redis/client.py", line 2157, in execute
conn.disconnect()
File "/home/vagrant/.virtualenvs/project/local/lib/python2.7/site-packages/kombu/transport/redis.py", line 800, in disconnect
channel._on_connection_disconnect(self)
File "/home/vagrant/.virtualenvs/project/local/lib/python2.7/site-packages/kombu/transport/redis.py", line 461, in _on_connection_disconnect
self.connection.cycle._on_connection_disconnect(connection)
File "/home/vagrant/.virtualenvs/project/local/lib/python2.7/site-packages/kombu/transport/redis.py", line 259, in _on_connection_disconnect
self.poller.unregister(connection._sock)
File "/home/vagrant/.virtualenvs/project/local/lib/python2.7/site-packages/kombu/utils/eventio.py", line 85, in unregister
self._epoll.unregister(fd)
SchedulingError: Couldn't apply scheduled task downloader: argument must be an int, or have a fileno() method.
and i'm not getting any clue where from these errors are coming...
any suggestions will be greatly appreciated.
Update 1:
i use django-celery admin interface to schedule a task and task is defined inside an app registered in INSTALLED_APP as follows:
#shared_task
def downloader():
# execute static/shell_scripts/feed_downloader.sh
import subprocess
PROJECT_PATH = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
subprocess.call(PROJECT_PATH+"/static/shell_scripts/feed_downloader.sh", shell=True)
the feed_downloader.sh file calls a django management command which if i execute independetly works fine like:
python manage.py feed_downloader
if i add a print statement just before line no. 85 to check fd parameter in file
/home/vagrant/.virtualenvs/project/local/lib/python2.7/site-packages/kombu/utils/eventio.py
then following messages get printed:
see this <socket._socketobject object at 0x131b750>
see this None
see this <socket._socketobject object at 0x131bc90>
see this None