Related
Im trying to use rabbitmq as a broker for celery and using Hashicorp Vault for root credential rotation. How do I use vault's rabbitmq secret engine with celery?
def get_broker_url():
url = "http://localhost:8200/v1/rabbitmq/creds/dev-role"
payload = {}
headers = {
'X-Vault-Token': 'VAULT_TOKEN'
}
response = requests.get(url, headers=headers, data=payload).json()
return "amqp://{username}:{password}#127.0.0.1:5672/".format(username=response["data"]["username"], password=response["data"]["password"])
And while initialising celery
app = Celery(app_name, broker=get_broker_url(), backend="rpc://")
Im getting this error when lease expires
[2022-11-24 13:05:54,489: WARNING/MainProcess] consumer: Connection to broker lost. Trying to re-establish the connection...
Traceback (most recent call last):
File "/Users/vg/celery-mq-demo/venv/lib/python3.7/site-packages/celery/worker/consumer/consumer.py", line 332, in start
blueprint.start(self)
File "/Users/vg/celery-mq-demo/venv/lib/python3.7/site-packages/celery/bootsteps.py", line 116, in start
step.start(parent)
File "/Users/vg/celery-mq-demo/venv/lib/python3.7/site-packages/celery/worker/consumer/consumer.py", line 628, in start
c.loop(*c.loop_args())
File "/Users/vg/celery-mq-demo/venv/lib/python3.7/site-packages/celery/worker/loops.py", line 97, in asynloop
next(loop)
File "/Users/vg/celery-mq-demo/venv/lib/python3.7/site-packages/kombu/asynchronous/hub.py", line 362, in create_loop
cb(*cbargs)
File "/Users/vg/celery-mq-demo/venv/lib/python3.7/site-packages/kombu/transport/base.py", line 235, in on_readable
reader(loop)
File "/Users/vg/celery-mq-demo/venv/lib/python3.7/site-packages/kombu/transport/base.py", line 217, in _read
drain_events(timeout=0)
File "/Users/vg/celery-mq-demo/venv/lib/python3.7/site-packages/amqp/connection.py", line 525, in drain_events
while not self.blocking_read(timeout):
File "/Users/vg/celery-mq-demo/venv/lib/python3.7/site-packages/amqp/connection.py", line 531, in blocking_read
return self.on_inbound_frame(frame)
File "/Users/vg/celery-mq-demo/venv/lib/python3.7/site-packages/amqp/method_framing.py", line 53, in on_frame
callback(channel, method_sig, buf, None)
File "/Users/vg/celery-mq-demo/venv/lib/python3.7/site-packages/amqp/connection.py", line 538, in on_inbound_method
method_sig, payload, content,
File "/Users/vg/celery-mq-demo/venv/lib/python3.7/site-packages/amqp/abstract_channel.py", line 156, in dispatch_method
listener(*args)
File "/Users/vg/celery-mq-demo/venv/lib/python3.7/site-packages/amqp/connection.py", line 668, in _on_close
(class_id, method_id), ConnectionError)
amqp.exceptions.ConnectionForced: (0, 0): (320) CONNECTION_FORCED - user 'root-3a978f79-e2fb-2889-08d4-0c69d2bcbffd' is deleted
[2022-11-24 13:05:54,498: WARNING/MainProcess] /Users/vg/celery-mq-demo/venv/lib/python3.7/site-packages/celery/worker/consumer/consumer.py:367: CPendingDeprecationWarning:
In Celery 5.1 we introduced an optional breaking change which
on connection loss cancels all currently executed tasks with late acknowledgement enabled.
These tasks cannot be acknowledged as the connection is gone, and the tasks are automatically redelivered back to the queue.
You can enable this behavior using the worker_cancel_long_running_tasks_on_connection_loss setting.
In Celery 5.1 it is set to False by default. The setting will be set to True by default in Celery 6.0.
warnings.warn(CANCEL_TASKS_BY_DEFAULT, CPendingDeprecationWarning)
[2022-11-24 13:05:54,505: CRITICAL/MainProcess] Unrecoverable error: AccessRefused(403, 'ACCESS_REFUSED - Login was refused using authentication mechanism AMQPLAIN. For details see the broker logfile.', (0, 0), '')
Traceback (most recent call last):
File "/Users/vg/celery-mq-demo/venv/lib/python3.7/site-packages/celery/worker/worker.py", line 203, in start
self.blueprint.start(self)
File "/Users/vg/celery-mq-demo/venv/lib/python3.7/site-packages/celery/bootsteps.py", line 116, in start
step.start(parent)
File "/Users/vg/celery-mq-demo/venv/lib/python3.7/site-packages/celery/bootsteps.py", line 365, in start
return self.obj.start()
File "/Users/vg/celery-mq-demo/venv/lib/python3.7/site-packages/celery/worker/consumer/consumer.py", line 332, in start
blueprint.start(self)
File "/Users/vg/celery-mq-demo/venv/lib/python3.7/site-packages/celery/bootsteps.py", line 116, in start
step.start(parent)
File "/Users/vg/celery-mq-demo/venv/lib/python3.7/site-packages/celery/worker/consumer/connection.py", line 21, in start
c.connection = c.connect()
File "/Users/vg/celery-mq-demo/venv/lib/python3.7/site-packages/celery/worker/consumer/consumer.py", line 428, in connect
conn = self.connection_for_read(heartbeat=self.amqheartbeat)
File "/Users/vg/celery-mq-demo/venv/lib/python3.7/site-packages/celery/worker/consumer/consumer.py", line 435, in connection_for_read
self.app.connection_for_read(heartbeat=heartbeat))
File "/Users/vg/celery-mq-demo/venv/lib/python3.7/site-packages/celery/worker/consumer/consumer.py", line 462, in ensure_connected
callback=maybe_shutdown,
File "/Users/vg/celery-mq-demo/venv/lib/python3.7/site-packages/kombu/connection.py", line 381, in ensure_connection
self._ensure_connection(*args, **kwargs)
File "/Users/vg/celery-mq-demo/venv/lib/python3.7/site-packages/kombu/connection.py", line 437, in _ensure_connection
callback, timeout=timeout
File "/Users/vg/celery-mq-demo/venv/lib/python3.7/site-packages/kombu/utils/functional.py", line 312, in retry_over_time
return fun(*args, **kwargs)
File "/Users/vg/celery-mq-demo/venv/lib/python3.7/site-packages/kombu/connection.py", line 877, in _connection_factory
self._connection = self._establish_connection()
File "/Users/vg/celery-mq-demo/venv/lib/python3.7/site-packages/kombu/connection.py", line 812, in _establish_connection
conn = self.transport.establish_connection()
File "/Users/vg/celery-mq-demo/venv/lib/python3.7/site-packages/kombu/transport/pyamqp.py", line 201, in establish_connection
conn.connect()
File "/Users/vg/celery-mq-demo/venv/lib/python3.7/site-packages/amqp/connection.py", line 329, in connect
self.drain_events(timeout=self.connect_timeout)
File "/Users/vg/celery-mq-demo/venv/lib/python3.7/site-packages/amqp/connection.py", line 525, in drain_events
while not self.blocking_read(timeout):
File "/Users/vg/celery-mq-demo/venv/lib/python3.7/site-packages/amqp/connection.py", line 531, in blocking_read
return self.on_inbound_frame(frame)
File "/Users/vg/celery-mq-demo/venv/lib/python3.7/site-packages/amqp/method_framing.py", line 53, in on_frame
callback(channel, method_sig, buf, None)
File "/Users/vg/celery-mq-demo/venv/lib/python3.7/site-packages/amqp/connection.py", line 538, in on_inbound_method
method_sig, payload, content,
File "/Users/vg/celery-mq-demo/venv/lib/python3.7/site-packages/amqp/abstract_channel.py", line 156, in dispatch_method
listener(*args)
File "/Users/vg/celery-mq-demo/venv/lib/python3.7/site-packages/amqp/connection.py", line 668, in _on_close
(class_id, method_id), ConnectionError)
amqp.exceptions.AccessRefused: (0, 0): (403) ACCESS_REFUSED - Login was refused using authentication mechanism AMQPLAIN. For details see the broker logfile.
Since broker root user's lease expires based on the ttl, How to add new broker credential for celery to use?
celery version - 5.2.7
I'm testing this locally where I have a ~/.aws/config file.
~/.aws/config looks some thing like:
[profile a]
...
[profile b]
...
I also have a AWS_PROFILE environmental variable set as "a".
I would like to read a file in which is accessible with profile b using pandas.
I am able to access it through s3fs by doing:
import s3fs
fs = s3fs.S3FileSystem(profile="b")
fs.get("BUCKET/FILE.parquet", "FILE.parquet")
pd.read_parquet("FILE.parquet")
However, if I try to pass this to pd.read_parquet using storage_options I get a PermissionError: Forbidden.
pd.read_parquet(
"s3://BUCKET/FILE.parquet",
storage_options={"profile": "b"},
)
full Traceback below
Traceback (most recent call last):
File "/home/ray/local/bin/anaconda3/envs/main/lib/python3.8/site-packages/s3fs/core.py", line 233, in _call_s3
out = await method(**additional_kwargs)
File "/home/ray/local/bin/anaconda3/envs/main/lib/python3.8/site-packages/aiobotocore/client.py", line 154, in _make_api_call
raise error_class(parsed_response, operation_name)
botocore.exceptions.ClientError: An error occurred (403) when calling the HeadObject operation: Forbidden
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/ray/local/bin/anaconda3/envs/main/lib/python3.8/site-packages/pandas/io/parquet.py", line 459, in read_parquet
return impl.read(
File "/home/ray/local/bin/anaconda3/envs/main/lib/python3.8/site-packages/pandas/io/parquet.py", line 221, in read
return self.api.parquet.read_table(
File "/home/ray/local/bin/anaconda3/envs/main/lib/python3.8/site-packages/pyarrow/parquet.py", line 1672, in read_table
dataset = _ParquetDatasetV2(
File "/home/ray/local/bin/anaconda3/envs/main/lib/python3.8/site-packages/pyarrow/parquet.py", line 1504, in __init__
if filesystem.get_file_info(path_or_paths).is_file:
File "pyarrow/_fs.pyx", line 438, in pyarrow._fs.FileSystem.get_file_info
File "pyarrow/error.pxi", line 122, in pyarrow.lib.pyarrow_internal_check_status
File "pyarrow/_fs.pyx", line 1004, in pyarrow._fs._cb_get_file_info
File "/home/ray/local/bin/anaconda3/envs/main/lib/python3.8/site-packages/pyarrow/fs.py", line 226, in get_file_info
info = self.fs.info(path)
File "/home/ray/local/bin/anaconda3/envs/main/lib/python3.8/site-packages/fsspec/asyn.py", line 72, in wrapper
return sync(self.loop, func, *args, **kwargs)
File "/home/ray/local/bin/anaconda3/envs/main/lib/python3.8/site-packages/fsspec/asyn.py", line 53, in sync
raise result[0]
File "/home/ray/local/bin/anaconda3/envs/main/lib/python3.8/site-packages/fsspec/asyn.py", line 20, in _runner
result[0] = await coro
File "/home/ray/local/bin/anaconda3/envs/main/lib/python3.8/site-packages/s3fs/core.py", line 911, in _info
out = await self._call_s3(
File "/home/ray/local/bin/anaconda3/envs/main/lib/python3.8/site-packages/s3fs/core.py", line 252, in _call_s3
raise translate_boto_error(err)
PermissionError: Forbidden
Note: there is an old question somewhat related to this but it didn't help: How to read parquet file from s3 using dask with specific AWS profile
You just need to add the following argument to the function:
storage_options=dict(profile='your_profile_name')
Hence the read statement is:
pd.read_parquet("s3://your_bucket",storage_options=dict(profile='your_profile_name'))
I am trying to install numpy but it is giving this error please help what should I do ?
ERROR: Exception:
Traceback (most recent call last):
File "c:\users\cutea\appdata\local\programs\python\python38-32\lib\site-packages\pip\_vendor\urllib3\response.py", line 425, in _error_catcher
yield
File "c:\users\cutea\appdata\local\programs\python\python38-32\lib\site-packages\pip\_vendor\urllib3\response.py", line 507, in read
data = self._fp.read(amt) if not fp_closed else b""
File "c:\users\cutea\appdata\local\programs\python\python38-32\lib\site-packages\pip\_vendor\cachecontrol\filewrapper.py", line 62, in read
data = self.__fp.read(amt)
File "c:\users\cutea\appdata\local\programs\python\python38-32\lib\http\client.py", line 454, in read
n = self.readinto(b)
File "c:\users\cutea\appdata\local\programs\python\python38-32\lib\http\client.py", line 498, in readinto
n = self.fp.readinto(b)
File "c:\users\cutea\appdata\local\programs\python\python38-32\lib\socket.py", line 669, in readinto
return self._sock.recv_into(b)
File "c:\users\cutea\appdata\local\programs\python\python38-32\lib\ssl.py", line 1241, in recv_into
return self.read(nbytes, buffer)
File "c:\users\cutea\appdata\local\programs\python\python38-32\lib\ssl.py", line 1099, in read
return self._sslobj.read(len, buffer)
socket.timeout: The read operation timed out
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "c:\users\cutea\appdata\local\programs\python\python38-32\lib\site-packages\pip\_internal\cli\base_command.py", line 186, in _main
status = self.run(options, args)
File "c:\users\cutea\appdata\local\programs\python\python38-32\lib\site-packages\pip\_internal\commands\install.py", line 331, in run
resolver.resolve(requirement_set)
File "c:\users\cutea\appdata\local\programs\python\python38-32\lib\site-packages\pip\_internal\legacy_resolve.py", line 177, in resolve
discovered_reqs.extend(self._resolve_one(requirement_set, req))
File "c:\users\cutea\appdata\local\programs\python\python38-32\lib\site-packages\pip\_internal\legacy_resolve.py", line 333, in _resolve_one
abstract_dist = self._get_abstract_dist_for(req_to_install)
File "c:\users\cutea\appdata\local\programs\python\python38-32\lib\site-packages\pip\_internal\legacy_resolve.py", line 282, in _get_abstract_dist_for
abstract_dist = self.preparer.prepare_linked_requirement(req)
File "c:\users\cutea\appdata\local\programs\python\python38-32\lib\site-packages\pip\_internal\operations\prepare.py", line 480, in prepare_linked_requirement
local_path = unpack_url(
File "c:\users\cutea\appdata\local\programs\python\python38-32\lib\site-packages\pip\_internal\operations\prepare.py", line 282, in unpack_url
return unpack_http_url(
File "c:\users\cutea\appdata\local\programs\python\python38-32\lib\site-packages\pip\_internal\operations\prepare.py", line 158, in unpack_http_url
from_path, content_type = _download_http_url(
File "c:\users\cutea\appdata\local\programs\python\python38-32\lib\site-packages\pip\_internal\operations\prepare.py", line 303, in _download_http_url
for chunk in download.chunks:
File "c:\users\cutea\appdata\local\programs\python\python38-32\lib\site-packages\pip\_internal\utils\ui.py", line 160, in iter
for x in it:
File "c:\users\cutea\appdata\local\programs\python\python38-32\lib\site-packages\pip\_internal\network\utils.py", line 15, in response_chunks
for chunk in response.raw.stream(
File "c:\users\cutea\appdata\local\programs\python\python38-32\lib\site-packages\pip\_vendor\urllib3\response.py", line 564, in stream
data = self.read(amt=amt, decode_content=decode_content)
File "c:\users\cutea\appdata\local\programs\python\python38-32\lib\site-packages\pip\_vendor\urllib3\response.py", line 529, in read
raise IncompleteRead(self._fp_bytes_read, self.length_remaining)
File "c:\users\cutea\appdata\local\programs\python\python38-32\lib\contextlib.py", line 131, in __exit__
self.gen.throw(type, value, traceback)
File "c:\users\cutea\appdata\local\programs\python\python38-32\lib\site-packages\pip\_vendor\urllib3\response.py", line 430, in _error_catcher
raise ReadTimeoutError(self._pool, None, "Read timed out.")
pip._vendor.urllib3.exceptions.ReadTimeoutError: HTTPSConnectionPool(host='files.pythonhosted.org', port=443): Read timed out.
Look directly at the last line :
Read timed out
Connect to wifi or faster internet and try again.
my internet connection was poor then i got this error. Then i tried it with faster connection and it worked for me...
The issue started appearing over the weekend. For some reason, it feels to be a DataFlow issue.
Previously, I was able to execute the script and write TF records just fine. However, now, I am unable to initialize the computation graph to process the data.
The traceback is:
Traceback (most recent call last):
File "my_script.py", line 1492, in <module>
MyBeamClass()
File "my_script.py", line 402, in __init__
self.run()
File "my_script.py", line 514, in run
transform_fn_io.WriteTransformFn(path=self.JOB_DIR + '/transform/'))
File "/anaconda3/envs/ml27/lib/python2.7/site-packages/apache_beam/pipeline.py", line 426, in __exit__
self.run().wait_until_finish()
File "/anaconda3/envs/ml27/lib/python2.7/site-packages/apache_beam/runners/dataflow/dataflow_runner.py", line 1238, in wait_until_finish
(self.state, getattr(self._runner, 'last_error_msg', None)), self)
apache_beam.runners.dataflow.dataflow_runner.DataflowRuntimeException: Dataflow pipeline failed. State: FAILED, Error:
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/dataflow_worker/batchworker.py", line 649, in do_work
work_executor.execute()
File "/usr/local/lib/python2.7/dist-packages/dataflow_worker/executor.py", line 176, in execute
op.start()
File "apache_beam/runners/worker/operations.py", line 531, in apache_beam.runners.worker.operations.DoOperation.start
def start(self):
File "apache_beam/runners/worker/operations.py", line 532, in apache_beam.runners.worker.operations.DoOperation.start
with self.scoped_start_state:
File "apache_beam/runners/worker/operations.py", line 533, in apache_beam.runners.worker.operations.DoOperation.start
super(DoOperation, self).start()
File "apache_beam/runners/worker/operations.py", line 202, in apache_beam.runners.worker.operations.Operation.start
def start(self):
File "apache_beam/runners/worker/operations.py", line 206, in apache_beam.runners.worker.operations.Operation.start
self.setup()
File "apache_beam/runners/worker/operations.py", line 480, in apache_beam.runners.worker.operations.DoOperation.setup
with self.scoped_start_state:
File "apache_beam/runners/worker/operations.py", line 485, in apache_beam.runners.worker.operations.DoOperation.setup
pickler.loads(self.spec.serialized_fn))
File "/usr/local/lib/python2.7/dist-packages/apache_beam/internal/pickler.py", line 247, in loads
return dill.loads(s)
File "/usr/local/lib/python2.7/dist-packages/dill/_dill.py", line 317, in loads
return load(file, ignore)
File "/usr/local/lib/python2.7/dist-packages/dill/_dill.py", line 305, in load
obj = pik.load()
File "/usr/lib/python2.7/pickle.py", line 864, in load
dispatch[key](self)
File "/usr/lib/python2.7/pickle.py", line 1232, in load_build
for k, v in state.iteritems():
AttributeError: 'str' object has no attribute 'iteritems'
I am using tensorflow==1.13.1 and tensorflow-transform==0.9.0 and apache_beam==2.7.0
with beam.Pipeline(options=self.pipe_opt) as p:
with beam_impl.Context(temp_dir=self.google_cloud_options.temp_location):
# rest of the script
_ = (
transform_fn
| 'WriteTransformFn' >>
transform_fn_io.WriteTransformFn(path=self.JOB_DIR + '/transform/'))
I was experiencing the same error.
It seems to be triggered by a mismatch in the tensorflow-transform versions of your local (or master) machine and the workers one (specified in the setup.py file).
In my case I was running tensorflow-transform==0.13 on my local machine whereas the workers were running 0.8.
Downgrading the local version to 0.8 fixed the issue.
An end user somehow ended up with their sessionid cookie blank (as in "sessionid=;"). This causes the following error call stack (below the function call to request.user) when using Django in conjunction with GAE:
File "/src/django/utils/functional.py", line 204, in inner
self._setup()
File "/src/django/utils/functional.py", line 270, in _setup
self._wrapped = self._setupfunc()
File "/src/django/contrib/auth/middleware.py", line 18, in <lambda>
request.user = SimpleLazyObject(lambda: get_user(request))
File "/src/django/contrib/auth/middleware.py", line 10, in get_user
request._cached_user = auth.get_user(request)
File "/src/django/contrib/auth/__init__.py", line 136, in get_user
user_id = request.session[SESSION_KEY]
File "/src/django/contrib/sessions/backends/base.py", line 44, in __getitem__
return self._session[key]
File "/src/django/contrib/sessions/backends/base.py", line 167, in _get_session
self._session_cache = self.load()
File "/src/django/contrib/sessions/backends/cached_db.py", line 39, in load
expire_date__gt=timezone.now()
File "/src/django/db/models/manager.py", line 143, in get
return self.get_query_set().get(*args, **kwargs)
File "/src/django/db/models/query.py", line 398, in get
num = len(clone)
File "/src/django/db/models/query.py", line 106, in __len__
self._result_cache = list(self.iterator())
File "/src/django/db/models/query.py", line 317, in iterator
for row in compiler.results_iter():
File "/src/djangotoolbox/db/basecompiler.py", line 375, in results_iter
results = self.build_query(fields).fetch(
File "/src/djangotoolbox/db/basecompiler.py", line 481, in build_query
query.add_filters(self.query.where)
File "/src/djangotoolbox/db/basecompiler.py", line 174, in add_filters
self.add_filters(child)
File "/src/djangotoolbox/db/basecompiler.py", line 176, in add_filters
field, lookup_type, value = self._decode_child(child)
File "/src/djangotoolbox/db/basecompiler.py", line 216, in _decode_child
lookup_type, value, field, annotation)
File "/src/djangotoolbox/db/basecompiler.py", line 254, in _normalize_lookup_value
return self.ops.value_for_db(value, field, lookup_type)
File "/src/djangoappengine/db/base.py", line 128, in value_for_db
return super_value_for_db(value, field, lookup)
File "/src/djangotoolbox/db/base.py", line 245, in value_for_db
field_kind, db_type, lookup)
File "/src/djangoappengine/db/base.py", line 160, in _value_for_db
raise DatabaseError("Only strings and positive integers "
DatabaseError: Only strings and positive integers may be used as keys on GAE.
This error does not occur if sessionid is set to some invalid non-empty value (such as "session=garbage;"). I think it is related to follow contrast of behavior in a python shell:
>>> Session.objects.filter(session_key='abc').exists()
0
>>> Session.objects.filter(session_key='').exists()
Traceback (most recent call last):
File "<console>", line 1, in <module>
File "/src/django/db/models/query.py", line 610, in exists
return self.query.has_results(using=self.db)
File "/src/django/db/models/sql/query.py", line 445, in has_results
return compiler.has_results()
File "/src/dbindexer/compiler.py", line 32, in has_results
return super(SQLCompiler, self).has_results()
File "/src/djangotoolbox/db/basecompiler.py", line 384, in has_results
return self.get_count(check_exists=True)
File "/src/djangotoolbox/db/basecompiler.py", line 468, in get_count
return self.build_query().count(high_mark)
File "/src/djangotoolbox/db/basecompiler.py", line 481, in build_query
query.add_filters(self.query.where)
File "/src/djangotoolbox/db/basecompiler.py", line 174, in add_filters
self.add_filters(child)
File "/src/djangotoolbox/db/basecompiler.py", line 176, in add_filters
field, lookup_type, value = self._decode_child(child)
File "/src/djangotoolbox/db/basecompiler.py", line 216, in _decode_child
lookup_type, value, field, annotation)
File "/src/djangotoolbox/db/basecompiler.py", line 254, in _normalize_lookup_value
return self.ops.value_for_db(value, field, lookup_type)
File "/src/djangoappengine/db/base.py", line 128, in value_for_db
return super_value_for_db(value, field, lookup)
File "/src/djangotoolbox/db/base.py", line 245, in value_for_db
field_kind, db_type, lookup)
File "/src/djangoappengine/db/base.py", line 160, in _value_for_db
raise DatabaseError("Only strings and positive integers "
DatabaseError: Only strings and positive integers may be used as keys on GAE.
Is this a djangoappengine or djangotoolbox bug, or a Django bug? What's the proper way to prevent this error, and consider user unauthenticated?
Ok, I think I may have to add a middleware class to handle this special case, and place it directly after SessionMiddleware:
class EmptySessionMiddleware(object):
def process_request(self, request):
session = request.session
if session.session_key is not None and len(session.session_key) == 0:
logging.info('[EmptySessionMiddleware] setting empty session key to None')
session._session_key = None
It's a weird special case, but basically the problem is that Django session middleware only checks for None session before looking up in db (not empty string), and an empty string primary key query in djangoappengine raises an exception. I'm not sure there's another way to handle this case.