<StdImageFieldFile: None> is not JSON serializable - serialization

I'm using django-stdimage for uploading and resizing images, and it works well.
I'm having a problem, though, with django-allauth; when I try to login with the social account, and there already is a normal account with the same e-mail address, I'm getting the following error:
TypeError at /accounts/facebook/login/callback/
is not JSON serializable
This is the full traceback:
Environment:
Request Method: GET
Request URL: http://localhost:8000/accounts/facebook/login/callback/?code=AQCf7MjgfOAsqf0sS0gup0hqLKyZClQvkGKyWtkORNBru_ITaRNHKgxwaH5RaCSARIb9U1ZgnqhWm3OQAfKW1r5nbVRkKr4fcLWtXdGL85-LYIyuF-NftkJpIhdIMR-VTMF8XXbKescZhxz0hDP_eKl1tKL6uPqWKc8NliWWHh9kOYSS69rAzNRUjZhgx6Zul9sAkV9nRoDo-JunhDRtvOV3crnpr9zAU6jsPDChcJ5dgcRPQ39EoOhrDE16-ia6WF1lFMz_fw1Pgjvo-2jduNG-c9TPyY23A205wm3d1PItoXH2U4GU8j1u5iAg1OIJuvDh-2viQA1disQoM_Du3vUldbX4Plun-yNay2kzNepOyw&state=0J6Ydn3lDKi0
Django Version: 1.8
Python Version: 2.7.6
Installed Applications:
('django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'blog',
'custom_user',
'django_markdown',
'storages',
'parsley',
'stdimage',
'stdimage_serializer',
'rest_framework',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.facebook')
Installed Middleware:
('django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware')
Traceback:
File "/home/stefano/projects/blog-project/blogprojectenv/local/lib/python2.7/site-packages/django/core/handlers/base.py" in get_response
132. response = wrapped_callback(request, *callback_args, **callback_kwargs)
File "/home/stefano/projects/blog-project/blogprojectenv/local/lib/python2.7/site-packages/allauth/socialaccount/providers/oauth2/views.py" in view
62. return self.dispatch(request, *args, **kwargs)
File "/home/stefano/projects/blog-project/blogprojectenv/local/lib/python2.7/site-packages/allauth/socialaccount/providers/oauth2/views.py" in dispatch
135. return complete_social_login(request, login)
File "/home/stefano/projects/blog-project/blogprojectenv/local/lib/python2.7/site-packages/allauth/socialaccount/helpers.py" in complete_social_login
145. return _complete_social_login(request, sociallogin)
File "/home/stefano/projects/blog-project/blogprojectenv/local/lib/python2.7/site-packages/allauth/socialaccount/helpers.py" in _complete_social_login
161. ret = _process_signup(request, sociallogin)
File "/home/stefano/projects/blog-project/blogprojectenv/local/lib/python2.7/site-packages/allauth/socialaccount/helpers.py" in _process_signup
26. request.session['socialaccount_sociallogin'] = sociallogin.serialize()
File "/home/stefano/projects/blog-project/blogprojectenv/local/lib/python2.7/site-packages/allauth/socialaccount/models.py" in serialize
198. user=serialize_instance(self.user),
File "/home/stefano/projects/blog-project/blogprojectenv/local/lib/python2.7/site-packages/allauth/utils.py" in serialize_instance
194. return json.loads(json.dumps(data, cls=DjangoJSONEncoder))
File "/usr/lib/python2.7/json/__init__.py" in dumps
250. sort_keys=sort_keys, **kw).encode(obj)
File "/usr/lib/python2.7/json/encoder.py" in encode
207. chunks = self.iterencode(o, _one_shot=True)
File "/usr/lib/python2.7/json/encoder.py" in iterencode
270. return _iterencode(o, 0)
File "/home/stefano/projects/blog-project/blogprojectenv/local/lib/python2.7/site-packages/django/core/serializers/json.py" in default
112. return super(DjangoJSONEncoder, self).default(o)
File "/usr/lib/python2.7/json/encoder.py" in default
184. raise TypeError(repr(o) + " is not JSON serializable")
Exception Type: TypeError at /accounts/facebook/login/callback/
Exception Value: <StdImageFieldFile: None> is not JSON serializable
I can't understand if it's a django-allauth problem or something else.
This is models.py:
class CustomUser(AbstractBaseUser, PermissionsMixin):
first_name = models.CharField(max_length=254, blank=True)
second_name = models.CharField(max_length=254, blank=True)
email = models.EmailField(blank=True, unique=True)
date_joined = models.DateTimeField(_('date joined'), default=datetime.now())
#avatar = models.ImageField('profile picture', upload_to=upload_avatar_to, null=True, blank=True)
avatar = StdImageField(upload_to=upload_avatar_to, null=True, blank=True,
variations={
'thumbnail': {'width': 250, 'height': 250, "crop": True}
})
is_active = models.BooleanField(default=False)
is_admin = models.BooleanField(default=False)
is_staff = models.BooleanField(default=False)
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['first_name', 'second_name']
objects = CustomUserManager()
class Meta:
verbose_name = _('user')
verbose_name_plural = _('users')
def save(self, *args, **kwargs):
super(CustomUser, self).save(*args, **kwargs)
def get_absolute_url(self):
return "/users/%s" % urlquote(self.email)
def get_full_name(self):
"""
Returns the first_name plus the last_name, with a space in between.
"""
return self.email
def __str__(self):
return self.email
def get_short_name(self):
"""
Returns the first name for the user.
"""
return self.first_name
def email_user(self, subject, message, from_email=None):
"""
Sends an email to this user.
"""
send_email(subject, message, from_email, [self.email])
I've also tried to use django-stdimage-serializer, but when I try to set up the models file with it, and fire up makemigrations, that's what I get:
Traceback (most recent call last):
File "manage.py", line 10, in <module>
execute_from_command_line(sys.argv)
File "/home/stefano/projects/blog-project/blogprojectenv/local/lib/python2.7/site-packages/django/core/management/__init__.py", line 338, in execute_from_command_line
utility.execute()
File "/home/stefano/projects/blog-project/blogprojectenv/local/lib/python2.7/site-packages/django/core/management/__init__.py", line 312, in execute
django.setup()
File "/home/stefano/projects/blog-project/blogprojectenv/local/lib/python2.7/site-packages/django/__init__.py", line 18, in setup
apps.populate(settings.INSTALLED_APPS)
File "/home/stefano/projects/blog-project/blogprojectenv/local/lib/python2.7/site-packages/django/apps/registry.py", line 108, in populate
app_config.import_models(all_models)
File "/home/stefano/projects/blog-project/blogprojectenv/local/lib/python2.7/site-packages/django/apps/config.py", line 198, in import_models
self.models_module = import_module(models_module_name)
File "/usr/lib/python2.7/importlib/__init__.py", line 37, in import_module
__import__(name)
File "/home/stefano/projects/blog-project/blog/models.py", line 6, in <module>
from custom_user.models import CustomUserManager, CustomUser
File "/home/stefano/projects/blog-project/custom_user/models.py", line 52, in <module>
class CustomUser(AbstractBaseUser, PermissionsMixin):
File "/home/stefano/projects/blog-project/custom_user/models.py", line 60, in CustomUser
'thumbnail': {'width': 250, 'height': 250, "crop": True}
File "/home/stefano/projects/blog-project/blogprojectenv/local/lib/python2.7/site-packages/rest_framework/fields.py", line 1405, in __init__
super(ImageField, self).__init__(*args, **kwargs)
File "/home/stefano/projects/blog-project/blogprojectenv/local/lib/python2.7/site-packages/rest_framework/fields.py", line 1359, in __init__
super(FileField, self).__init__(*args, **kwargs)
TypeError: __init__() got an unexpected keyword argument 'upload_to'
What could I do?

If anyone is interested, this answer is exactly what I was looking for.

Related

How to store crawled data from Scrapy to FTP as csv?

My scrapy settings.py
from datetime import datetime
file_name = datetime.today().strftime('%Y-%m-%d_%H%M_')
save_name = file_name + 'Mobile_Nshopping'
FEED_URI = 'ftp://myusername:mypassword#ftp.mymail.com/uploads/%(save_name)s.csv'
when I'm running my spider scrapy crawl my_project_name getting error...
Can I have to create a pipeline?
\scrapy\extensions\feedexport.py:247: ScrapyDeprecationWarning: The `FEED_URI` and `FEED_FORMAT` settings have been deprecated in favor of the `FEEDS` setting. Please see the `FEEDS` setting docs for more details
exporter = cls(crawler)
Traceback (most recent call last):
File "c:\users\viren\appdata\local\programs\python\python38\lib\runpy.py", line 194, in _run_module_as_main
return _run_code(code, main_globals, None,
File "c:\users\viren\appdata\local\programs\python\python38\lib\runpy.py", line 87, in _run_code
exec(code, run_globals)
File "C:\Users\viren\AppData\Local\Programs\Python\Python38\Scripts\scrapy.exe\__main__.py", line 7, in <module>
File "c:\users\viren\appdata\local\programs\python\python38\lib\site-packages\scrapy\cmdline.py", line 145, in execute
_run_print_help(parser, _run_command, cmd, args, opts)
File "c:\users\viren\appdata\local\programs\python\python38\lib\site-packages\scrapy\cmdline.py", line 100, in _run_print_help
func(*a, **kw)
File "c:\users\viren\appdata\local\programs\python\python38\lib\site-packages\scrapy\cmdline.py", line 153, in _run_command
cmd.run(args, opts)
File "c:\users\viren\appdata\local\programs\python\python38\lib\site-packages\scrapy\commands\crawl.py", line 22, in run
crawl_defer = self.crawler_process.crawl(spname, **opts.spargs)
File "c:\users\viren\appdata\local\programs\python\python38\lib\site-packages\scrapy\crawler.py", line 191, in crawl
crawler = self.create_crawler(crawler_or_spidercls)
File "c:\users\viren\appdata\local\programs\python\python38\lib\site-packages\scrapy\crawler.py", line 224, in create_crawler
return self._create_crawler(crawler_or_spidercls)
File "c:\users\viren\appdata\local\programs\python\python38\lib\site-packages\scrapy\crawler.py", line 229, in _create_crawler
return Crawler(spidercls, self.settings)
File "c:\users\viren\appdata\local\programs\python\python38\lib\site-packages\scrapy\crawler.py", line 72, in __init__
self.extensions = ExtensionManager.from_crawler(self)
File "c:\users\viren\appdata\local\programs\python\python38\lib\site-packages\scrapy\middleware.py", line 53, in from_crawler
return cls.from_settings(crawler.settings, crawler)
File "c:\users\viren\appdata\local\programs\python\python38\lib\site-packages\scrapy\middleware.py", line 35, in from_settings
mw = create_instance(mwcls, settings, crawler)
File "c:\users\viren\appdata\local\programs\python\python38\lib\site-packages\scrapy\utils\misc.py", line 167, in create_instance
instance = objcls.from_crawler(crawler, *args, **kwargs)
File "c:\users\viren\appdata\local\programs\python\python38\lib\site-packages\scrapy\extensions\feedexport.py", line 247, in from_crawler
exporter = cls(crawler)
File "c:\users\viren\appdata\local\programs\python\python38\lib\site-packages\scrapy\extensions\feedexport.py", line 282, in __init__
if not self._storage_supported(uri, feed_options):
File "c:\users\viren\appdata\local\programs\python\python38\lib\site-packages\scrapy\extensions\feedexport.py", line 427, in _storage_supported
self._get_storage(uri, feed_options)
File "c:\users\viren\appdata\local\programs\python\python38\lib\site-packages\scrapy\extensions\feedexport.py", line 458, in _get_storage
instance = build_instance(feedcls.from_crawler, crawler)
File "c:\users\viren\appdata\local\programs\python\python38\lib\site-packages\scrapy\extensions\feedexport.py", line 455, in build_instance
return build_storage(builder, uri, feed_options=feed_options, preargs=preargs)
File "c:\users\viren\appdata\local\programs\python\python38\lib\site-packages\scrapy\extensions\feedexport.py", line 46, in build_storage
return builder(*preargs, uri, *args, **kwargs)
File "c:\users\viren\appdata\local\programs\python\python38\lib\site-packages\scrapy\extensions\feedexport.py", line 201, in from_crawler
return build_storage(
File "c:\users\viren\appdata\local\programs\python\python38\lib\site-packages\scrapy\extensions\feedexport.py", line 46, in build_storage
return builder(*preargs, uri, *args, **kwargs)
File "c:\users\viren\appdata\local\programs\python\python38\lib\site-packages\scrapy\extensions\feedexport.py", line 192, in __init__
self.port = int(u.port or '21')
File "c:\users\viren\appdata\local\programs\python\python38\lib\urllib\parse.py", line 174, in port
raise ValueError(message) from None
ValueError: Port could not be cast to integer value as 'Edh=)9sd'
I don't know how to store CSV into FTP.
error is coming because my password is int?
Is there anything I forget to write?
Can I have to create a pipeline?
Yes, you probably should create a pipeline. As shown in the Scrapy Architecture Diagram, the basic concept is this: requests are sent, responses come back and processed by the spider, and finally, the pipeline does something with the items returned by the spider. In your case, you could create a pipeline that saves the data in a CSV file and uploads it to an ftp server. See Scrapy's Item Pipeline documentation for more information.
I don't know how to store CSV into FTP. error is coming because my password is int? Is there anything I forget to write?
I believe this is due to the deprecation error below (and shown at the top of the errors you provided):
ScrapyDeprecationWarning: The FEED_URI and FEED_FORMAT settings have been deprecated in favor of the FEEDS setting. Please see the FEEDS setting docs for more details.
Try replacing FEED_URI with FEEDS; see the Scrapy documentation on FEEDS.
You need to specify the port as well.
You can specify this in settings.
See also class definition from scrapy docs
class FTPFilesStore:
FTP_USERNAME = None
FTP_PASSWORD = None
USE_ACTIVE_MODE = None
def __init__(self, uri):
if not uri.startswith("ftp://"):
raise ValueError(f"Incorrect URI scheme in {uri}, expected 'ftp'")
u = urlparse(uri)
self.port = u.port
self.host = u.hostname
self.port = int(u.port or 21)
self.username = u.username or self.FTP_USERNAME
self.password = u.password or self.FTP_PASSWORD
self.basedir = u.path.rstrip('/')

Scrapy Selenium geckodriver problem - error while trying to scrape

Unhandled error in Deferred: 2020-07-24 09:12:40 [twisted] CRITICAL: Unhandled error in Deferred:
Traceback (most recent call last): File "/home/baku/Dev/workspace/moje-python/scrape_linkedin/venv/lib/python3.8/site-packages/scrapy/crawler.py", line 192, in crawl return self._crawl(crawler, *args, **kwargs) File "/home/baku/Dev/workspace/moje-python/scrape_linkedin/venv/lib/python3.8/site-packages/scrapy/crawler.py", line 196, in _crawl d = crawler.crawl(*args, **kwargs) File "/home/baku/Dev/workspace/moje-python/scrape_linkedin/venv/lib/python3.8/site-packages/twisted/internet/defer.py", line 1613, in unwindGenerator return _cancellableInlineCallbacks(gen) File "/home/baku/Dev/workspace/moje-python/scrape_linkedin/venv/lib/python3.8/site-packages/twisted/internet/defer.py", line 1529, in _cancellableInlineCallbacks
_inlineCallbacks(None, g, status)
--- --- File "/home/baku/Dev/workspace/moje-python/scrape_linkedin/venv/lib/python3.8/site-packages/twisted/internet/defer.py", line 1418, in _inlineCallbacks result = g.send(result) File "/home/baku/Dev/workspace/moje-python/scrape_linkedin/venv/lib/python3.8/site-packages/scrapy/crawler.py", line 87, in crawl self.engine = self._create_engine() File "/home/baku/Dev/workspace/moje-python/scrape_linkedin/venv/lib/python3.8/site-packages/scrapy/crawler.py", line 101, in _create_engine return ExecutionEngine(self, lambda _: self.stop()) File "/home/baku/Dev/workspace/moje-python/scrape_linkedin/venv/lib/python3.8/site-packages/scrapy/core/engine.py", line 69, in init self.downloader = downloader_cls(crawler) File "/home/baku/Dev/workspace/moje-python/scrape_linkedin/venv/lib/python3.8/site-packages/scrapy/core/downloader/init.py", line 83, in init self.middleware = DownloaderMiddlewareManager.from_crawler(crawler) File "/home/baku/Dev/workspace/moje-python/scrape_linkedin/venv/lib/python3.8/site-packages/scrapy/middleware.py", line 53, in from_crawler return cls.from_settings(crawler.settings, crawler) File "/home/baku/Dev/workspace/moje-python/scrape_linkedin/venv/lib/python3.8/site-packages/scrapy/middleware.py", line 35, in from_settings mw = create_instance(mwcls, settings, crawler) File "/home/baku/Dev/workspace/moje-python/scrape_linkedin/venv/lib/python3.8/site-packages/scrapy/utils/misc.py", line 150, in create_instance instance = objcls.from_crawler(crawler,
*args, **kwargs) File "/home/baku/Dev/workspace/moje-python/scrape_linkedin/venv/lib/python3.8/site-packages/scrapy_selenium/middlewares.py", line 67, in from_crawler middleware = cls( File "/home/baku/Dev/workspace/moje-python/scrape_linkedin/venv/lib/python3.8/site-packages/scrapy_selenium/middlewares.py", line 43, in init for argument in driver_arguments: builtins.TypeError: 'NoneType' object is not iterable
2020-07-24 09:12:40 [twisted] CRITICAL: Traceback (most recent call last): File "/home/baku/Dev/workspace/moje-python/scrape_linkedin/venv/lib/python3.8/site-packages/twisted/internet/defer.py", line 1418, in _inlineCallbacks result = g.send(result) File "/home/baku/Dev/workspace/moje-python/scrape_linkedin/venv/lib/python3.8/site-packages/scrapy/crawler.py", line 87, in crawl self.engine = self._create_engine() File "/home/baku/Dev/workspace/moje-python/scrape_linkedin/venv/lib/python3.8/site-packages/scrapy/crawler.py", line 101, in _create_engine return ExecutionEngine(self, lambda _: self.stop()) File "/home/baku/Dev/workspace/moje-python/scrape_linkedin/venv/lib/python3.8/site-packages/scrapy/core/engine.py", line 69, in init self.downloader = downloader_cls(crawler) File "/home/baku/Dev/workspace/moje-python/scrape_linkedin/venv/lib/python3.8/site-packages/scrapy/core/downloader/init.py", line 83, in init self.middleware = DownloaderMiddlewareManager.from_crawler(crawler) File "/home/baku/Dev/workspace/moje-python/scrape_linkedin/venv/lib/python3.8/site-packages/scrapy/middleware.py", line 53, in from_crawler return cls.from_settings(crawler.settings, crawler) File "/home/baku/Dev/workspace/moje-python/scrape_linkedin/venv/lib/python3.8/site-packages/scrapy/middleware.py", line 35, in from_settings mw = create_instance(mwcls, settings, crawler) File "/home/baku/Dev/workspace/moje-python/scrape_linkedin/venv/lib/python3.8/site-packages/scrapy/utils/misc.py", line 150, in create_instance instance = objcls.from_crawler(crawler,
*args, **kwargs) File "/home/baku/Dev/workspace/moje-python/scrape_linkedin/venv/lib/python3.8/site-packages/scrapy_selenium/middlewares.py", line 67, in from_crawler middleware = cls( File "/home/baku/Dev/workspace/moje-python/scrape_linkedin/venv/lib/python3.8/site-packages/scrapy_selenium/middlewares.py", line 43, in init for argument in driver_arguments: TypeError: 'NoneType' object is not iterable
my settings.py
from shutil import which
SELENIUM_DRIVER_NAME = 'firefox'
SELENIUM_DRIVER_EXECUTABLE_PATH = which('geckodriver')
SELENIUM_BROWSER_EXECUTABLE_PATH = which('firefox')
...
'scrapy_selenium.SeleniumMiddleware': 800,
looks like permissions for driver are good:
:/usr/local/bin$ ll | grep gecko
-rwxrwxrwx 1 baku baku 7008696 lip 24 09:09 geckodriver*
crawler code:
class LinkedInProfileSeleniumSpider(scrapy.Spider):
name = 'lips'
allowed_domains = ['www.linkedin.com']
def start_requests(self):
yield SeleniumRequest(
url="https://www.linkedin.com/login/",
callback=self.proceed_login,
wait_until=(
EC.presence_of_element_located(
(By.CSS_SELECTOR, "#username")
)
),
script='window.scrollTo(0, document.body.scrollHeight);',
wait_time=30
)
def proceed_login(self, response):
# AFTER LOGIN
driver = response.request.meta['driver']
...
can you please help why it's failing? thanks!
( btw it works with chrome drivers, fails with gecko )
The same problem I had on mac, this one I am trying on ubuntu machine.
Not sure what can be the issue, there to debug etc.
I does not even land into self.proceed_login. Fails on first request.

`flush` fails when initilized from object

I'm currently running into a permanent while using the flush (or commit as it depends on it) method from SQLAlchemy session under Flask Alchemy
The flush part always failed with sqlalchemy.exc.ResourceClosedError: This transaction is closed (full stack error bellow). While running directly an insert call from engine works as well as retrieving data using the query builder.
Also, deleting an item is working correctly (through session.delete(model) and session.commit())
Here is the code failing:
roles_put = Blueprint('roles_put', __name__)
#roles_put.route('<role_id>', methods=['PUT'])
def role_update(role_id):
role = Role.query.get(role_id)
if not role:
role = Role.query.filter_by(name=role_id).first()
if not role:
raise IDNotFoundError()
print(role)
role.set_data(
request.form,
[
'name', 'manage_user', 'manage_video', 'manage_comment', 'manage_avatar', 'manage_channel', 'manage_reward',
'manage_role', 'manage_top', 'manage_calendar', 'manage_setting', 'validate_video', 'moderate_comment',
]
)
MainAPI.db.session.add(role)
MainAPI.db.session.flush()
MainAPI.db.session.commit()
# if not role.save():
# raise UpdateError()
return jsonify(role.serialize())
SQLAlchemy initialize through:
app = Flask('Name')
def init_db(app, config):
"""
Init SQLAlchemy DB
"""
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql://%s:%s#%s/%s' % (
config.get('database', 'user'),
config.get('database', 'password'),
config.get('database', 'host'),
config.get('database', 'database')
)
print(app.config['SQLALCHEMY_DATABASE_URI'])
MainApi.db = SQLAlchemy(app)
Role Model:
class RoleModel(MainApi.db.Model):
__tablename__ = 'roles'
LOCKED_ROLE_NAMES = ['guest', 'admin', 'logged', 'public']
id = MainApi.db.Column(MainApi.db.Integer, primary_key=True, unique=True, autoincrement=True)
name = MainApi.db.Column(MainApi.db.String(40), nullable=False, unique=True)
# manage rights
manage_user = MainApi.db.Column(MainApi.db.Boolean, nullable=False, default=False)
moderate_comment = MainApi.db.Column(MainApi.db.Boolean, nullable=False, default=False)
created_at = MainApi.db.Column(MainApi.db.DateTime, nullable=False, default=datetime.utcnow)
last_updated_at = MainApi.db.Column(MainApi.db.DateTime, nullable=True)
created_by = MainApi.db.Column(
MainApi.db.Integer,
MainApi.db.ForeignKey(
'users.id', ondelete='RESTRICT', onupdate='CASCADE'
),
nullable=True
)
last_updated_by = MainApi.db.Column(
MainApi.db.Integer,
MainApi.db.ForeignKey(
'users.id', ondelete='CASCADE', onupdate='CASCADE'
), nullable=True
)
users = MainApi.db.relationship(
'UserModel', foreign_keys='UserModel.role_id',
back_populates='role'
)
#staticmethod
def is_allowed(action, role):
"""
Check if user having role can make action
:param action: action name
:type action: str
:param role: user role
:type role: int|str|RoleModel
:return:
"""
if isinstance(role, str):
role = RoleModel.query.filter_by(name=role).first()
elif isinstance(role, int):
role = RoleModel.query.get(role)
if not isinstance(role, RoleModel):
raise Exception
return getattr(role, action.strip().replace(' ', '_'))
#staticmethod
def get_role_id(role):
"""
Check if user having role can make action
:param role: user role
:type role: int|str|RoleModel
:return: role id
:rtype: int
"""
if isinstance(role, str):
role = RoleModel.query.filter_by(name=role).first()
elif isinstance(role, int):
role = RoleModel.query.get(role)
if not isinstance(role, RoleModel):
raise Exception
return role.id
def serialize(self):
users = [u.serialize() for u in self.users] if self.users else []
return {
'id': self.id,
'name': self.name,
'manage_user': self.manage_user,
'moderate_comment': self.moderate_comment,
'created_at': self.created_at,
'last_updated_at': self.last_updated_at,
'created_by': self.created_by,
'last_updated_by': self.last_updated_by,
'users': users,
}
#event.listens_for(RoleModel.name, 'set', propagate=True)
def before_set_name(_target, value, old, _initiator):
print(_initiator)
print(request.url)
if request and 'roles/init' not in request.url:
if old in RoleModel.LOCKED_ROLE_NAMES or value in RoleModel.LOCKED_ROLE_NAMES:
raise UnauthorizedError()
#event.listens_for(RoleModel, 'before_insert', propagate=True)
def receive_before_insert(_mapper, _connection, target):
user = Registry.registered('current-user-id')
target.created_at = datetime.utcnow()
if user:
target.created_by = user
#event.listens_for(RoleModel, 'before_update', propagate=True)
def receive_before_update(_mapper, _connection, target):
user = Registry.registered('current-user-id')
target.updated_at = datetime.utcnow()
if user:
target.updated_by = user
Full error stack:
INFO:werkzeug: * Running on http://localhost:8080/ (Press CTRL+C to quit)
<RoleModel 5>
<sqlalchemy.orm.attributes.Event object at 0x7f5baedf8788>
http://localhost:8080/MainApi/roles/5
ERROR:flask.app:Exception on /MainApi/roles/5 [PUT]
Traceback (most recent call last):
File "/home/titouan/main_api/python-api/venv/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 1177, in _execute_context
conn = self._revalidate_connection()
File "/home/titouan/main_api/python-api/venv/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 469, in _revalidate_connection
raise exc.ResourceClosedError("This Connection is closed")
sqlalchemy.exc.ResourceClosedError: This Connection is closed
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/home/titouan/main_api/python-api/venv/lib/python3.7/site-packages/sqlalchemy/orm/session.py", line 2556, in _flush
flush_context.execute()
File "/home/titouan/main_api/python-api/venv/lib/python3.7/site-packages/sqlalchemy/orm/unitofwork.py", line 422, in execute
rec.execute(self)
File "/home/titouan/main_api/python-api/venv/lib/python3.7/site-packages/sqlalchemy/orm/unitofwork.py", line 589, in execute
uow,
File "/home/titouan/main_api/python-api/venv/lib/python3.7/site-packages/sqlalchemy/orm/persistence.py", line 236, in save_obj
update,
File "/home/titouan/main_api/python-api/venv/lib/python3.7/site-packages/sqlalchemy/orm/persistence.py", line 978, in _emit_update_statements
statement, multiparams
File "/home/titouan/main_api/python-api/venv/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 988, in execute
return meth(self, multiparams, params)
File "/home/titouan/main_api/python-api/venv/lib/python3.7/site-packages/sqlalchemy/sql/elements.py", line 287, in _execute_on_connection
return connection._execute_clauseelement(self, multiparams, params)
File "/home/titouan/main_api/python-api/venv/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 1107, in _execute_clauseelement
distilled_params,
File "/home/titouan/main_api/python-api/venv/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 1182, in _execute_context
e, util.text_type(statement), parameters, None, None
File "/home/titouan/main_api/python-api/venv/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 1466, in _handle_dbapi_exception
util.raise_from_cause(sqlalchemy_exception, exc_info)
File "/home/titouan/main_api/python-api/venv/lib/python3.7/site-packages/sqlalchemy/util/compat.py", line 383, in raise_from_cause
reraise(type(exception), exception, tb=exc_tb, cause=cause)
File "/home/titouan/main_api/python-api/venv/lib/python3.7/site-packages/sqlalchemy/util/compat.py", line 128, in reraise
raise value.with_traceback(tb)
File "/home/titouan/main_api/python-api/venv/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 1177, in _execute_context
conn = self._revalidate_connection()
File "/home/titouan/main_api/python-api/venv/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 469, in _revalidate_connection
raise exc.ResourceClosedError("This Connection is closed")
sqlalchemy.exc.StatementError: (sqlalchemy.exc.ResourceClosedError) This Connection is closed
[SQL: UPDATE roles SET name=%s, manage_video=%s WHERE roles.id = %s]
[parameters: [{'name': 'admin2', 'manage_video': '0', 'roles_id': 5}]]
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/titouan/main_api/python-api/venv/lib/python3.7/site-packages/flask/app.py", line 2311, in wsgi_app
response = self.full_dispatch_request()
File "/home/titouan/main_api/python-api/venv/lib/python3.7/site-packages/flask/app.py", line 1834, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/home/titouan/main_api/python-api/venv/lib/python3.7/site-packages/flask_cors/extension.py", line 161, in wrapped_function
return cors_after_request(app.make_response(f(*args, **kwargs)))
File "/home/titouan/main_api/python-api/venv/lib/python3.7/site-packages/flask/app.py", line 1737, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/home/titouan/main_api/python-api/venv/lib/python3.7/site-packages/flask/_compat.py", line 36, in reraise
raise value
File "/home/titouan/main_api/python-api/venv/lib/python3.7/site-packages/flask/app.py", line 1832, in full_dispatch_request
rv = self.dispatch_request()
File "/home/titouan/main_api/python-api/venv/lib/python3.7/site-packages/flask/app.py", line 1818, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/home/titouan/main_api/python-api/main_api/routes/roles/put.py", line 28, in role_update
MainApi.db.session.flush()
File "/home/titouan/main_api/python-api/venv/lib/python3.7/site-packages/sqlalchemy/orm/scoping.py", line 162, in do
return getattr(self.registry(), name)(*args, **kwargs)
File "/home/titouan/main_api/python-api/venv/lib/python3.7/site-packages/sqlalchemy/orm/session.py", line 2458, in flush
self._flush(objects)
File "/home/titouan/main_api/python-api/venv/lib/python3.7/site-packages/sqlalchemy/orm/session.py", line 2596, in _flush
transaction.rollback(_capture_exception=True)
File "/home/titouan/main_api/python-api/venv/lib/python3.7/site-packages/sqlalchemy/util/langhelpers.py", line 79, in __exit__
compat.reraise(type_, value, traceback)
File "/home/titouan/main_api/python-api/venv/lib/python3.7/site-packages/sqlalchemy/util/compat.py", line 129, in reraise
raise value
File "/home/titouan/main_api/python-api/venv/lib/python3.7/site-packages/sqlalchemy/orm/session.py", line 2596, in _flush
transaction.rollback(_capture_exception=True)
File "/home/titouan/main_api/python-api/venv/lib/python3.7/site-packages/sqlalchemy/orm/session.py", line 509, in rollback
self._assert_active(prepared_ok=True, rollback_ok=True)
File "/home/titouan/main_api/python-api/venv/lib/python3.7/site-packages/sqlalchemy/orm/session.py", line 303, in _assert_active
raise sa_exc.ResourceClosedError(closed_msg)
sqlalchemy.exc.ResourceClosedError: This transaction is closed
Version:
Python 3.7.3 (VENV)
MySQL 8 (docker)
Flask-SQLAlchemy==2.4.0 / Flask-SQLAlchemy==2.3.2 (tried both)
Flask==1.0.3
SQLAlchemy==1.3.4
Thanks to anyone having a hint here.
Finally solved it.
The issue was in the models before_insert and before_update callbacks.
Trying to retrieve the current-user-id from Flask.g seems to have a strange impact on the session. It's certainly linked to the way my Registry class implementation works.

Odoo: ValueError("Expected singleton: %s" % self)

I'm modifying Odoo OpenEduCat exam module to fit the need of my institution. For that, I have tailored the code as shown below. However,when I click on generate button, odoo raises expected singleton error. Generating button
Error details
--Python code--
from openerp import models, fields, api
class OpResultTemplate(models.Model):
_name = 'op.result.template'
_description = 'Result Template'
_rec_name = 'name'
exam_session_id = fields.Many2one(
'op.exam.session', 'Exam Session', related='line_ids.exam_session_id', required=False)
name = fields.Char("Name", size=254, required=True)
result_date = fields.Date(
'Result Date', required=True, default=fields.Date.today())
line_ids = fields.One2many(
'op.result.template.line', 'result_id', 'Session Lines')
####this is for semester
inter1_ids = fields.One2many(
'op.internal1', 'result_id', 'Internal 01')
inter2_ids = fields.One2many(
'op.internal2', 'result_id', 'Internal 02')
model_ids = fields.One2many(
'op.model', 'result_id', 'Model')
final_ids = fields.One2many(
'op.final', 'result_id', 'Semester')
state = fields.Selection(
[('normal', 'Normal'), ('semester', 'Semester')],
string='State', required=True, default='normal')
# pass_status_ids = fields.Many2many('op.pass.status', string='Pass Status')
#api.one
def generate_result(self):
data = self.read(['state'])[0]
if data['state'] == 'normal' :
####Write information in to Marksheet Register the place where result generate to.
marksheet_reg_id = self.env['op.marksheet.register'].create({
'name': 'Mark Sheet for %s' % self.line_ids.exam_session_id.name,
'exam_session_id': self.line_ids.exam_session_id.id,
'generated_date': fields.Date.today(),
'generated_by': self.env.uid,
'status': 'draft',
'course_id': self.line_ids.exam_session_id.course_id.name,
'batch_id': self.line_ids.exam_session_id.batch_id.name,
'exam_type': self.line_ids.exam_session_id.exam_type.name,
'semester_id': self.line_ids.exam_session_id.semester_id.name,
})
student_list = []####Define array to store
for exam_session in self.line_ids:####line_ids is table that located in Result generator which allow to choose exam session
total_exam = 0.0#global var
for exam in exam_session.exam_session_id:####exam_session.exam_lines is the table that list the exam or subject located in Result generator->Exam session
total_exam += exam.exam_ids.total_marks
for attd in exam.exam_ids.attendees_line:####exam.exam_id.attendees_line location that contant student name and mark in each subject
result_dict = {####this loop is to write information to result line
'exam_id': exam.exam_ids.id,
'exam_tmpl_id': exam.exam_ids.id,
'marks': attd.marks,####IMPORTANCE mark that student get in each subject THIS IS WHERE TO APPLY PERCENTAGES
'status': attd.marks >= exam.exam_ids.min_marks and####IMPORTANCE take the mark and decide pass or fail base on passing mark in each subject
'pass' or 'fail',
'per': (100 * attd.marks) / exam.exam_ids.total_marks,####NOT IMPORTANCE this can be delete, this take the mark student get and find the percentage of the subject student get in each subject
'student_id': attd.student_id.id,####student name
'total_marks': exam.exam_ids.total_marks,####the total mark of each subject that have been enter when created subject for exam
}
--Error details--
Odoo Server Error
Traceback (most recent call last):
File "/home/v4d/odoo/openerp/http.py", line 650, in _handle_exception
return super(JsonRequest, self)._handle_exception(exception)
File "/home/v4d/odoo/openerp/http.py", line 687, in dispatch
result = self._call_function(**self.params)
File "/home/v4d/odoo/openerp/http.py", line 323, in _call_function
return checked_call(self.db, *args, **kwargs)
File "/home/v4d/odoo/openerp/service/model.py", line 118, in wrapper
return f(dbname, *args, **kwargs)
File "/home/v4d/odoo/openerp/http.py", line 316, in checked_call
result = self.endpoint(*a, **kw)
File "/home/v4d/odoo/openerp/http.py", line 966, in call
return self.method(*args, **kw)
File "/home/v4d/odoo/openerp/http.py", line 516, in response_wrap
response = f(*args, **kw)
File "/home/v4d/odoo/addons/web/controllers/main.py", line 899, in call_button
action = self._call_kw(model, method, args, {})
File "/home/v4d/odoo/addons/web/controllers/main.py", line 887, in _call_kw
return getattr(request.registry.get(model), method)(request.cr, request.uid, *args, **kwargs)
File "/home/v4d/odoo/openerp/api.py", line 250, in wrapper
return old_api(self, *args, **kwargs)
File "/home/v4d/odoo/openerp/api.py", line 421, in old_api
result = new_api(recs, *args, **kwargs)
File "/home/v4d/odoo/openerp/api.py", line 425, in new_api
result = [method(rec, *args, **kwargs) for rec in self]
File "/home/v4d/odoo/addons/openeducat_exam/models/result_template.py", line 71, in generate_result
total_exam += exam.exam_ids.total_marks
File "/home/v4d/odoo/openerp/fields.py", line 821, in get
record.ensure_one()
File "/home/v4d/odoo/openerp/models.py", line 5432, in ensure_one
raise ValueError("Expected singleton: %s" % self)
ValueError: Expected singleton: op.exam(44, 45, 46)
I have tried other solutions that could be found on the Internet, but it didn't seem to work. Please kindly help me to deal with this.Thank in advance.
Here is the issue in your code,
####IMPORTANCE take the mark and decide pass or fail base on passing mark in each subject
'status': attd.marks >= exam.exam_ids.min_marks and 'pass' or 'fail',
exam.exam_ids it will return list of browsable objects (recordset list) and you are trying to access min_marks properties, so here it gets confused min_marks property from which object. So it raise an error.
So either you need to specify single object by specifying exam.exam_ids[0] (only single object will return) or you need to search proper records from the one2many model and then you can access to the min_marks field.
Properties are separately created for all objects (OOP rule). Static
properties will be accessible via class.

How we use pipelines item in scrapy

I'm new user of scrapy to crawl my websites.I want to store data crawled into mysql database.
myspider.py:
class MininovaSpider(CrawlSpider):
name = 'myspider'
allowed_domains = ['example.com']
start_urls = ['http://www.example.com']
rules = [Rule(SgmlLinkExtractor(allow=('/categorie/.*'),restrict_xpaths=('//div[#id="contLeftNavig"]',)), 'parse_t')]
def parse_t(self, response):
x = HtmlXPathSelector(response)
torrent = Torrent()
torrent['url'] = response.url
torrent['title']=x.select("//h1[#class='infoAneTitre']/text()").extract()
torrent['wilaya'] = x.select("//span[#class='ville_t']/text()").extract()
#torrent['prix'] = x.select("//div[#id='datail_ann']/ul[1]/li[4]/span/text()").extract()
#torrent['surface'] = x.select("//div[#id='datail_ann']/ul[3]/li[1]/span/text()").extract()
torrent['description'] = x.select("//div[#class='box_pad']/text()").extract()
return torrent
and for pipelines.py, i modified and used the example of googldir.So when i run crawl i get this error :
exceptions.AttributeError: 'MininovaSpider' object has no attribute 'iterkeys'
exceptions.TypeError: 'MininovaSpider' object is not subscriptable
pipeline.py:
from scrapy import log
from twisted.enterprise import adbapi
import time
import MySQLdb.cursors
class Pipeline(object):
def __init__(self):
self.dbpool = adbapi.ConnectionPool('MySQLdb',
db='test',
user='root',
passwd='',
cursorclass=MySQLdb.cursors.DictCursor,
charset='utf8',
use_unicode=True
)
def process_item(self, spider, item):
query = self.dbpool.runInteraction(self._conditional_insert, item)
query.addErrback(self.handle_error)
return item
def _conditional_insert(self, tx, item):
tx.execute("select * from database where url = %s", (item['url'] ))
result = tx.fetchone()
if result:
log.msg("Item already stored in db: %s" % item, level=log.DEBUG)
else:
tx.execute(\
"insert into database (wilaya,titre, site, lien,resume,timestamp) "
"values (%s, %s, %s, %s,%s,%s)",
(item['wilaya'],
item['title'],
'example.com',item['url'],item['description'],
time.time())
)
log.msg("Item stored in db: %s" % item, level=log.DEBUG)
def handle_error(self, e):
log.err(e)
and traceback:
Traceback (most recent call last):
File "/usr/lib/python2.7/twisted/internet/defer.py", line 287, in addCallbacks
self._runCallbacks()
File "/usr/lib/python2.7/twisted/internet/defer.py", line 545, in _runCallbacks
current.result = callback(current.result, *args, **kw)
File "/usr/lib/python2.7/site-packages/scrapy/core/scraper.py", line 208, in _itemproc_finished
item=output, response=response, spider=spider)
File "/usr/lib/python2.7/site-packages/scrapy/utils/signal.py", line 53, in send_catch_log_deferred
*arguments, **named)
--- <exception caught here> ---
File "/usr/lib/python2.7/twisted/internet/defer.py", line 134, in maybeDeferred
result = f(*args, **kw)
File "/usr/lib/python2.7/site-packages/scrapy/xlib/pydispatch/robustapply.py", line 47, in robustApply
return receiver(*arguments, **named)
File "/usr/lib/python2.7/site-packages/scrapy/contrib/feedexport.py", line 177, in item_scraped
slot.exporter.export_item(item)
File "/usr/lib/python2.7/site-packages/scrapy/contrib/exporter/__init__.py", line 109, in export_item
itemdict = dict(self._get_serialized_fields(item))
File "/usr/lib/python2.7/site-packages/scrapy/contrib/exporter/__init__.py", line 60, in _get_serialized_fields
field_iter = item.iterkeys()
**exceptions.AttributeError: 'MininovaSpider' object has no attribute 'iterkeys'
2012-01-18 16:00:43-0600 [scrapy] Unhandled Error
Traceback (most recent call last):
File "/usr/lib/python2.7/threading.py", line 503, in __bootstrap
self.__bootstrap_inner()
File "/usr/lib/python2.7/threading.py", line 530, in __bootstrap_inner
self.run()
File "/usr/lib/python2.7/threading.py", line 483, in run
self.__target(*self.__args, **self.__kwargs)
--- <exception caught here> ---
File "/usr/lib/python2.7/twisted/python/threadpool.py", line 207, in _worker
result = context.call(ctx, function, *args, **kwargs)
File "/usr/lib/python2.7/twisted/python/context.py", line 118, in callWithContext
return self.currentContext().callWithContext(ctx, func, *args, **kw)
File "/usr/lib/python2.7/twisted/python/context.py", line 81, in callWithContext
return func(*args,**kw)
File "/usr/lib/python2.7/twisted/enterprise/adbapi.py", line 448, in _runInteraction
result = interaction(trans, *args, **kw)
File "/opt/scrapy/test/pipelines.py", line 33, in _conditional_insert
tx.execute("select * from database where url = %s", (item['url'] ))
**exceptions.TypeError: 'MininovaSpider' object is not subscriptable
exceptions.TypeError: 'MininovaSpider' object is not subscriptable
Looks like you have yielded somewhere a spider (MininovaSpider) instance instead of an item. I think you have there more code you haven't shown.
In Pipeline.process_item() put this to confirm:
def process_item(self, spider, item):
assert isinstance(item, Torrent), 'Here should be Torrent instance!'
query = self.dbpool.runInteraction(self._conditional_insert, item)
query.addErrback(self.handle_error)
return item