DRF serializer is_valid() == True, but .save() not persisting model object - serialization

My list, retrieve, and destroy actions work as expected.
HTTP Response = 201 'created'
"POST /api/atoms/?uid=04d38ad99b2a4353a18438c651eac5ab&created_at=2019-05-12T22:30:04.725089Z&updated_at=2019-05-12T22:30:07.053148Z&charge=1&mass=2 HTTP/1.1" 201 152
Within ViewSet
class AtomViewSet(viewsets.ViewSet):
def create(self, request):
serializer = AtomSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
# True
serializer.save()
# self.perform_create(serializer) # doesn't work
# self.node.save() # doesn't work
# headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=status.HTTP_201_CREATED,) # headers=headers
Here is what I get when I run serializer.save()
<Atom: {'uid': '04d38ad99b2a4353a18438c651eac5qq', 'created_at': datetime.datetime(2019, 5, 12, 22, 30, 4, 725089, tzinfo=<UTC>), 'updated_at': datetime.datetime(2019, 4, 12, 22, 30, 7, 53148, tzinfo=<UTC>), 'charge': 0.0, 'mass': 1.0}>
Despite that encouraging output of save, the resource strangely does not get saved to the database.
I'm using ViewSets and a custom serializer against a non-model model.
AttributeError: 'AtomViewSet' object has no attribute 'perform_create'
Serializer
class AtomSerializer(serializers.Serializer):
uid = serializers.CharField()
created_at = serializers.DateTimeField()
updated_at = serializers.DateTimeField()
charge = serializers.FloatField()
mass = serializers.FloatField()
def create(self, validated_data):
return Atom(**validated_data)
def update(self, instance, validated_data):
for field, value in validated_data.items():
setattr(instance, field, value)
return instance
I'm looking at my serializer now. Not sure what the create and update are supposed to contain
http://www.cdrf.co/3.1/rest_framework.serializers/Serializer.html

In your AtomSerializer you are not actually saving the object, just creating or modifying an instance of it in memory, use save() to persist the instance to the db.
def create(self, validated_data):
atom = Atom(**validated_data)
atom.save()
# Alternative you could use atom = Atom.objects.create(**validated_data)
return atom
Likewise for your update method, you need to save the object after modifying it.
def update(self, instance, validated_data):
for field, value in validated_data.items():
setattr(instance, field, value)
instance.save() # <-- saving the instance after setattr
return instance

Related

Odoo 9 context value missing in override method

in odoo9 I override the search_read method. The super method works ok. With the data returned I want to make a filter, the filter is on the context, the value was asigned on the click of the button comming from the view.
<button name="status_instalacion" string="Instalación" type="action" icon="fa-wrench fa-2x" context="{'stage_id' : 1, 'current_id': active_id}"/>
The problem occurs when I query the context in the search_read method. It exists but doesn't have the values I placed
context on click of button:
self._context
{u'lang': u'en_US', u'stage_id': 1, u'tz': False, u'uid': 1, u'current_id': 40, u'tipo_validacion': u'Sistemas Cr\xedticos', u'sistema_critico': u'AGUA'}
the stage_id is the value I want
context on read_search:
self._context
{u'lang': u'en_US', u'bin_size': True, u'tipo_validacion': u'Sistemas Cr\xedticos', u'tz': False, u'uid': 1,
u'active_test': False, u'sistema_critico': u'AGUA'}
as you can see the 'stage_id' value is missing
Tried also assigning the value to a property of the class, but the value never changes it is always the initial value.
from logging import getLogger
from openerp import api, fields, models
_logger = getLogger(__name__)
class MgmtsystemSistemasEquipos(models.Model):
""" Equipos."""
_name = 'mgmtsystem.sistemas.equipos'
dmy = 99 # ---> this value never changes
def dummy(self): # ---> tried calling a function. not work
return self.dmy
def set_dummy(self, id): # ----> set the value
self.dmy = id or self.dmy
codigo = fields.Char(
string=u'Código',
help=u"Código equipo",
required=True,
size=30)
name = fields.Char(
string=u'Nombre equipo',
required=True,
readonly=False,
index=True,
help="Nombre corto equipo",
size=30)
stage_id = fields.Many2one(
'mgmtsystem.action.stage',
'Fase',
default=_default_stage,
readonly=True)
#api.multi
def status_instalacion(self):
import pudb
pu.db
# save value to variable dmy to retrieve later
id = self._context.get('stage_id')
self.set_dummy(id)
#api.model
def search_read(
self, domain=None, fields=None, offset=0,
limit=None, order=None):
import pudb
pu.db
# here the variable allways has the original value (99)
current_stage_id = self.dmy
current_stage_id = self.dummy()
current_stage_id = getattr(self, dmy)
res = super(MgmtsystemSistemasEquipos, self).search_read(
domain, fields, offset, limit, order)
current_id = res[0]['id']
valid_protocols_ids = self._get_ids(
current_stage_id, current_id,
'mgmtsystem_equipos_protocolos',
'mgmtsystem_equipos_protocolos_rel',
'protocolo_id')
# # remove ids
res[0]['protocolos_ids'] = valid_protocols_ids
res[0]['informes_ids'] = valid_informes_ids
res[0]['anexos_ids'] = valid_anexos_ids
return res
# #api.multi
def _get_ids(self, current_stage_id, current_id, model, model_rel, field_rel):
import pudb
pu.db
# in this method the value of the variable is allways the original
current_stage_id = self.dummy()
sql = """ select a.id from
%s as a
join %s as b
on a.id = b.%s where b.equipo_id = %s
and a.stage_id = %s; """ % (model, model_rel, field_rel,
current_id, current_stage_id)
import psycopg2
try:
self.env.cr.execute(sql)
except psycopg2.ProgrammingError, ex:
message = 'Error trying to download data from server. \n {0} \n {1}'.format(ex.pgerror, sql)
_logger.info(message)
return False
rows = self.env.cr.fetchall()
list_of_ids = []
for row in rows:
list_of_ids.append(row[0])
return list_of_ids
I don't know Python very well, and thats the cause of my misunderstanding of how to read the value of the variable.
But then again, Why is the context modified in the search_read method?.
Thank you.
You should try following.
#api.model
def search_read(self, domain=None, fields=None, offset=0, limit=None, order=None):
import pudb
pu.db
# Here you need to get the value from the context.
current_stage_id = self._context.get('stage_id', getattr(self, dmy))
res = super(MgmtsystemSistemasEquipos, self).search_read(domain=domain, fields=fields, offset=offset, limit=limit, order=order)
current_id = res[0]['id']
valid_protocols_ids = self._get_ids(
current_stage_id, current_id,
'mgmtsystem_equipos_protocolos',
'mgmtsystem_equipos_protocolos_rel',
'protocolo_id')
# # remove ids
res[0]['protocolos_ids'] = valid_protocols_ids
res[0]['informes_ids'] = valid_informes_ids
res[0]['anexos_ids'] = valid_anexos_ids
return res
In your code those lines won't work just because there is no recordset available in self (it's correct behaviour search_read must have #api.model decorator).
# here the variable allways has the original value (99)
current_stage_id = self.dmy
current_stage_id = self.dummy()
current_stage_id = getattr(self, dmy)
So just remove those and lines and apply some other logic to get data.

Can I restrict objects in Python3 so that only attributes that I make a setter for are allowed?

I have something called a Node. Both Definition and Theorem are a type of node, but only Definitions should be allowed to have a plural attribute:
class Definition(Node):
def __init__(self,dic):
self.type = "definition"
super(Definition, self).__init__(dic)
self.plural = move_attribute(dic, {'plural', 'pl'}, strict=False)
#property
def plural(self):
return self._plural
#plural.setter
def plural(self, new_plural):
if new_plural is None:
self._plural = None
else:
clean_plural = check_type_and_clean(new_plural, str)
assert dunderscore_count(clean_plural)>=2
self._plural = clean_plural
class Theorem(Node):
def __init__(self, dic):
self.type = "theorem"
super().__init__(dic)
self.proofs = move_attribute(dic, {'proofs', 'proof'}, strict=False)
# theorems CANNOT have plurals:
# if 'plural' in self:
# raise KeyError('Theorems cannot have plurals.')
As you can see, Definitions have a plural.setter, but theorems do not. However, the code
theorem = Theorem(some input)
theorem.plural = "some plural"
runs just fine and raises no errors. But I want it to raise an error. As you can see, I tried to check for plurals manually at the bottom of my code shown, but this would only be a patch. I would like to block the setting of ANY attribute that is not expressly defined. What is the best practice for this sort of thing?
I am looking for an answer that satisfies the "chicken" requirement:
I do not think this solves my issue. In both of your solutions, I can
append the code t.chicken = 'hi'; print(t.chicken), and it prints hi
without error. I do not want users to be able to make up new
attributes like chicken.
The short answer is "Yes, you can."
The follow-up question is "Why?" One of the strengths of Python is the remarkable dynamism, and by restricting that ability you are actually making your class less useful (but see edit at bottom).
However, there are good reasons to be restrictive, and if you do choose to go down that route you will need to modify your __setattr__ method:
def __setattr__(self, name, value):
if name not in ('my', 'attribute', 'names',):
raise AttributeError('attribute %s not allowed' % name)
else:
super().__setattr__(name, value)
There is no need to mess with __getattr__ nor __getattribute__ since they will not return an attribute that doesn't exist.
Here is your code, slightly modified -- I added the __setattr__ method to Node, and added an _allowed_attributes to Definition and Theorem.
class Node:
def __setattr__(self, name, value):
if name not in self._allowed_attributes:
raise AttributeError('attribute %s does not and cannot exist' % name)
super().__setattr__(name, value)
class Definition(Node):
_allowed_attributes = '_plural', 'type'
def __init__(self,dic):
self.type = "definition"
super().__init__(dic)
self.plural = move_attribute(dic, {'plural', 'pl'}, strict=False)
#property
def plural(self):
return self._plural
#plural.setter
def plural(self, new_plural):
if new_plural is None:
self._plural = None
else:
clean_plural = check_type_and_clean(new_plural, str)
assert dunderscore_count(clean_plural)>=2
self._plural = clean_plural
class Theorem(Node):
_allowed_attributes = 'type', 'proofs'
def __init__(self, dic):
self.type = "theorem"
super().__init__(dic)
self.proofs = move_attribute(dic, {'proofs', 'proof'}, strict=False)
In use it looks like this:
>>> theorem = Theorem(...)
>>> theorem.plural = 3
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "<stdin>", line 6, in __setattr__
AttributeError: attribute plural does not and cannot exist
edit
Having thought about this some more, I think a good compromise for what you want, and to actually answer the part of your question about restricting allowed changes to setters only, would be to:
use a metaclass to inspect the class at creation time and dynamically build the _allowed_attributes tuple
modify the __setattr__ of Node to always allow modification/creation of attributes with at least one leading _
This gives you some protection against both misspellings and creation of attributes you don't want, while still allowing programmers to work around or enhance the classes for their own needs.
Okay, the new meta class looks like:
class NodeMeta(type):
def __new__(metacls, cls, bases, classdict):
node_cls = super().__new__(metacls, cls, bases, classdict)
allowed_attributes = []
for base in (node_cls, ) + bases:
for name, obj in base.__dict__.items():
if isinstance(obj, property) and hasattr(obj, '__fset__'):
allowed_attributes.append(name)
node_cls._allowed_attributes = tuple(allowed_attributes)
return node_cls
The Node class has two adjustments: include the NodeMeta metaclass and adjust __setattr__ to only block non-underscore leading attributes:
class Node(metaclass=NodeMeta):
def __init__(self, dic):
self._dic = dic
def __setattr__(self, name, value):
if not name[0] == '_' and name not in self._allowed_attributes:
raise AttributeError('attribute %s does not and cannot exist' % name)
super().__setattr__(name, value)
Finally, the Node subclasses Theorem and Definition have the type attribute moved into the class namespace so there is no issue with setting them -- and as a side note, type is a bad name as it is also a built-in function -- maybe node_type instead?
class Definition(Node):
type = "definition"
...
class Theorem(Node):
type = "theorem"
...
As a final note: even this method is not immune to somebody actually adding or changing attributes, as object.__setattr__(theorum_instance, 'an_attr', 99) can still be used -- or (even simpler) the _allowed_attributes can be modified; however, if somebody is going to all that work they hopefully know what they are doing... and if not, they own all the pieces. ;)
You can check for the attribute everytime you access it.
class Theorem(Node):
...
def __getattribute__(self, name):
if name not in ["allowed", "attribute", "names"]:
raise MyException("attribute "+name+" not allowed")
else:
return self.__dict__[name]
def __setattr__(self, name, value):
if name not in ["allowed", "attribute", "names"]:
raise MyException("attribute "+name+" not allowed")
else:
self.__dict__[name] = value
You can build the allowed method list dynamically as a side effect of a decorator:
allowed_attrs = []
def allowed(f):
allowed_attrs.append(f.__name__)
return f
You would also need to add non method attributes manually.
If you really want to prevent all other dynamic attributes. I assume there's a well-defined time window that you want to allow adding attributes.
Below I allow it until object initialisation is finished. (you can control it with allow_dynamic_attribute variable.
class A:
def __init__(self):
self.allow_dynamic_attribute = True
self.abc = "hello"
self._plural = None # need to give default value
# A.__setattr__ = types.MethodType(__setattr__, A)
self.allow_dynamic_attribute = False
def __setattr__(self, name, value):
if hasattr(self, 'allow_dynamic_attribute'):
if not self.allow_dynamic_attribute:
if not hasattr(self, name):
raise Exception
super().__setattr__(name, value)
#property
def plural(self):
return self._plural
#plural.setter
def plural(self, new_plural):
self._plural = new_plural
a = A()
print(a.abc) # fine
a.plural = "yes" # fine
print(a.plural) # fine
a.dkk = "bed" # raise exception
Or it can be more compact this way, I couldn't figure out how MethodType + super can get along together.
import types
def __setattr__(self, name, value):
if not hasattr(self, name):
raise Exception
else:
super().__setattr__(name,value) # this doesn't work for reason I don't know
class A:
def __init__(self):
self.foo = "hello"
# after this point, there's no more setattr for you
A.__setattr__ = types.MethodType(__setattr__, A)
a = A()
print(a.foo) # fine
a.bar = "bed" # raise exception
Yes, you can create private members that cannot be modified from outside the class. The variable name should start with two underscores:
class Test(object):
def __init__(self, t):
self.__t = t
def __str__(self):
return str(self.__t)
t = Test(2)
print(t) # prints 2
t.__t = 3
print(t) # prints 2
That said, trying to access such a variable as we do in t.__t = 3 will not raise an exception.
A different approach which you can take to achieve the wanted behavior is using functions. This approach will require "accessing attributes" using functional notation, but if that doesn't bother you, you can get exactly what you want. The following demo "hardcodes" the values, but obviously you can have Theorem() accept an argument and use it to set values to the attributes dynamically.
Demo:
# -*- coding: utf-8 -*-
def Theorem():
def f(attrib):
def proofs():
return ''
def plural():
return '◊◊◊◊◊◊◊◊'
if attrib == 'proofs':
return proofs()
elif attrib == 'plural':
return plural()
else:
raise ValueError("Attribute [{}] doesn't exist".format(attrib))
return f
t = Theorem()
print(t('proofs'))
print(t('plural'))
print(t('wait_for_error'))
OUTPUT

◊◊◊◊◊◊◊◊
Traceback (most recent call last):
File "/Users/alfasi/Desktop/1.py", line 40, in <module>
print(t('wait_for_error'))
File "/Users/alfasi/Desktop/1.py", line 32, in f
raise ValueError("Attribute [{}] doesn't exist".format(attrib))
ValueError: Attribute [wait_for_error] doesn't exist

How can I use a SQL nextval function in a Django insert - Custom Model Field?

I'm trying to write a model field that when called on a model insert will issue a nextval("my_seq") such that the resulting query that django issues is something like:
INSERT INTO app_table (a, b) VALUES ("something", nextval("my_seq"));
So far I've come up with a field that I override get_db_prep_value and make it return the nextval string when the object is being inserted, but it gets quoted.
How do I get around the value being quoted?
This is what I have for the field:
class MyField(models.Field):
def __init__(self, *args, **kwargs):
self.obj = None
super(MyField, self).__init__(*args, **kwargs
def contribute_to_class(self, cls, name):
super(myField, self).contribute_to_class(cls, name)
setattr(cls, self.name, self)
def __get__(self, obj, tp = None):
if obj is None:
raise AttributeError('Can only be accessed via instance')
self.obj = obj
return obj.__dict__[self.name]
def __set__(self, obj, value):
obj.__dict__[self.value] = self.to_python(value)
def db_type(self, connection):
return 'varchar(25)'
def get_db_prep_value(self, value, connection, prepared = False):
value = super(MyField, self).get_db_prep_value(value, connection, prepared)
if self.obj is not None and self.obj.id is None:
if self.obj.flag:
value = '( "XXX" || nextval("my_seq") )'
return value
def to_python(self, value):
if isinstance(value, six.string_types) or value is None:
return value
return smart_text(value)
def get_prep_value(self, value):
return self.to_python(value)
I am not sure if what you are doing is the best Django practice but here is how you can use nextval.
Add to your model
def get_placeholder(self, value, connection):
return 'nextval("%s")'
Also consider using SubfieldBase
in Django 1.5+
class MyField(models.Field, metaclass=models.SubfieldBase):
see The SubfieldBase metaclass
in Django < 1.5
__metaclass__ = models.SubfieldBase
see Writing a custom field in Django

pygame - python getting variables with the visiter pattern

I'm trying to use the visiter pattern to get self.coin value from the first class and return it to the method in the second class but its not working, its always returning none... can anyone help?
class coin_collector(Observer):
def __init__(self):
super(Observer, self).__init__()
self.coin_count = 0
self.run = True
self.coin = 0
def acceptVisitor(self, visitor):
visitor.visit(self)
def update(self, observable, other):
me = coin_collector()
me.coin_Count(other, True)
def coin_Count(self, value, TF):
run = TF
if run:
self.coin = value
print self.coin
return self.coin
def __str__(self):
return self.__class__.__name__
#this is part of a different class in a different file
def visit(self, location):
location.coin_Count(0, False)
def update(self):
visitee = coin_collector()
self.c_Count = self.visit(visitee) # for some reason this always returns none
print self.c_Count, "working" # this always prints none...
Ok, so let's start with Coin Collector. His coin attribute is set to 0.
You also have a second class with an update and visit function.
The update function creates a new coin_collector at every call. Then it assigns the return value of visit function to self.c_Count. Let's now see the visit function.
It takes in a brand new coin collector and returns None It would assign its value parameter to the coin field if you would pass True as TF.
After coming back from coin_count function, you do not do anything in the visit function so the return value is lost. That is why, when you try to assign the result of self.visit, you get a None.

"Pythonic" way to "reset" an object's variables?

("variables" here refers to "names", I think, not completely sure about the definition pythonistas use)
I have an object and some methods. These methods all need and all change the object's variables. How can I, in the most pythonic and in the best, respecting the techniques of OOP, way achieve to have the object variables used by the methods but also keep their original values for the other methods?
Should I copy the object everytime a method is called? Should I save the original values and have a reset() method to reset them everytime a method needs them? Or is there an even better way?
EDIT: I was asked for pseudocode. Since I am more interested in understanding the concept rather than just specifically solving the problem I am encountering I am going to try give an example:
class Player():
games = 0
points = 0
fouls = 0
rebounds = 0
assists = 0
turnovers = 0
steals = 0
def playCupGame(self):
# simulates a game and then assigns values to the variables, accordingly
self.points = K #just an example
def playLeagueGame(self):
# simulates a game and then assigns values to the variables, accordingly
self.points = Z #just an example
self.rebounds = W #example again
def playTrainingGame(self):
# simulates a game and then assigns values to the variables, accordingly
self.points = X #just an example
self.rebounds = Y #example again
The above is my class for a Player object (for the example assume he is a basketball one). This object has three different methods that all assign values to the players' statistics.
So, let's say the team has two league games and then a cup game. I'd have to make these calls:
p.playLeagueGame()
p.playLeagueGame()
p.playCupGame()
It's obvious that when the second and the third calls are made, the previously changed statistics of the player need to be reset. For that, I can either write a reset method that sets all the variables back to 0, or copy the object for every call I make. Or do something completely different.
That's where my question lays, what's the best approach, python and oop wise?
UPDATE: I am suspicious that I have superovercomplicated this and I can easily solve my problem by using local variables in the functions. However, what happens if I have a function inside another function, can I use locals of the outer one inside the inner one?
Not sure if it's "Pythonic" enough, but you can define a "resettable" decorator
for the __init__ method that creates a copy the object's __dict__ and adds a reset() method that switches the current __dict__ to the original one.
Edit - Here's an example implementation:
def resettable(f):
import copy
def __init_and_copy__(self, *args, **kwargs):
f(self, *args)
self.__original_dict__ = copy.deepcopy(self.__dict__)
def reset(o = self):
o.__dict__ = o.__original_dict__
self.reset = reset
return __init_and_copy__
class Point(object):
#resettable
def __init__(self, x, y):
self.x = x
self.y = y
def __str__(self):
return "%d %d" % (self.x, self.y)
class LabeledPoint(Point):
#resettable
def __init__(self, x, y, label):
self.x = x
self.y = y
self.label = label
def __str__(self):
return "%d %d (%s)" % (self.x, self.y, self.label)
p = Point(1, 2)
print p # 1 2
p.x = 15
p.y = 25
print p # 15 25
p.reset()
print p # 1 2
p2 = LabeledPoint(1, 2, "Test")
print p2 # 1 2 (Test)
p2.x = 3
p2.label = "Test2"
print p2 # 3 2 (Test2)
p2.reset()
print p2 # 1 2 (Test)
Edit2: Added a test with inheritance
I'm not sure about "pythonic", but why not just create a reset method in your object that does whatever resetting is required? Call this method as part of your __init__ so you're not duplicating the data (ie: always (re)initialize it in one place -- the reset method)
I would create a default dict as a data member with all of the default values, then do __dict__.update(self.default) during __init__ and then again at some later point to pull all the values back.
More generally, you can use a __setattr__ hook to keep track of every variable that has been changed and later use that data to reset them.
Sounds like you want to know if your class should be an immutable object. The idea is that, once created, an immutable object can't/should't/would't be changed.
On Python, built-in types like int or tuple instances are immutable, enforced by the language:
>>> a=(1, 2, 3, 1, 2, 3)
>>> a[0] = 9
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: 'tuple' object does not support item assignment
As another example, every time you add two integers a new instance is created:
>>> a=5000
>>> b=7000
>>> d=a+b
>>> d
12000
>>> id(d)
42882584
>>> d=a+b
>>> id(d)
42215680
The id() function returns the address of the int object 12000. And every time we add a+b a new 12000 object instance is created.
User defined immutable classes must be enforced manually, or simply done as a convention with a source code comment:
class X(object):
"""Immutable class. Don't change instance variables values!"""
def __init__(self, *args):
self._some_internal_value = ...
def some_operation(self, arg0):
new_instance = X(arg0 + ...)
new_instance._some_internal_operation(self._some_internal_value, 42)
return new_instance
def _some_internal_operation(self, a, b):
"""..."""
Either way, it's OK to create a new instance for every operation.
See the Memento Design Pattern if you want to restore previous state, or the Proxy Design Pattern if you want the object to seem pristine, as if just created. In any case, you need to put something between what's referenced, and it's state.
Please comment if you need some code, though I'm sure you'll find plenty on the web if you use the design pattern names as keywords.
# The Memento design pattern
class Scores(object):
...
class Player(object):
def __init__(self,...):
...
self.scores = None
self.history = []
self.reset()
def reset(self):
if (self.scores):
self.history.append(self.scores)
self.scores = Scores()
It sounds like overall your design needs some reworking. What about a PlayerGameStatistics class that would keep track of all that, and either a Player or a Game would hold a collection of these objects?
Also the code you show is a good start, but could you show more code that interacts with the Player class? I'm just having a hard time seeing why a single Player object should have PlayXGame methods -- does a single Player not interact with other Players when playing a game, or why does a specific Player play the game?
A simple reset method (called in __init__ and re-called when necessary) makes a lot of sense. But here's a solution that I think is interesting, if a bit over-engineered: create a context manager. I'm curious what people think about this...
from contextlib import contextmanager
#contextmanager
def resetting(resettable):
try:
resettable.setdef()
yield resettable
finally:
resettable.reset()
class Resetter(object):
def __init__(self, foo=5, bar=6):
self.foo = foo
self.bar = bar
def setdef(self):
self._foo = self.foo
self._bar = self.bar
def reset(self):
self.foo = self._foo
self.bar = self._bar
def method(self):
with resetting(self):
self.foo += self.bar
print self.foo
r = Resetter()
r.method() # prints 11
r.method() # still prints 11
To over-over-engineer, you could then create a #resetme decorator
def resetme(f):
def rf(self, *args, **kwargs):
with resetting(self):
f(self, *args, **kwargs)
return rf
So that instead of having to explicitly use with you could just use the decorator:
#resetme
def method(self):
self.foo += self.bar
print self.foo
I liked (and tried) the top answer from PaoloVictor. However, I found that it "reset" itself, i.e., if you called reset() a 2nd time it would throw an exception.
I found that it worked repeatably with the following implementation
def resettable(f):
import copy
def __init_and_copy__(self, *args, **kwargs):
f(self, *args, **kwargs)
def reset(o = self):
o.__dict__ = o.__original_dict__
o.__original_dict__ = copy.deepcopy(self.__dict__)
self.reset = reset
self.__original_dict__ = copy.deepcopy(self.__dict__)
return __init_and_copy__
It sounds to me like you need to rework your model to at least include a separate "PlayerGameStats" class.
Something along the lines of:
PlayerGameStats = collections.namedtuple("points fouls rebounds assists turnovers steals")
class Player():
def __init__(self):
self.cup_games = []
self.league_games = []
self.training_games = []
def playCupGame(self):
# simulates a game and then assigns values to the variables, accordingly
stats = PlayerGameStats(points, fouls, rebounds, assists, turnovers, steals)
self.cup_games.append(stats)
def playLeagueGame(self):
# simulates a game and then assigns values to the variables, accordingly
stats = PlayerGameStats(points, fouls, rebounds, assists, turnovers, steals)
self.league_games.append(stats)
def playTrainingGame(self):
# simulates a game and then assigns values to the variables, accordingly
stats = PlayerGameStats(points, fouls, rebounds, assists, turnovers, steals)
self.training_games.append(stats)
And to answer the question in your edit, yes nested functions can see variables stored in outer scopes. You can read more about that in the tutorial: http://docs.python.org/tutorial/classes.html#python-scopes-and-namespaces
thanks for the nice input, as I had kind of a similar problem. I'm solving it with a hook on the init method, since I'd like to be able to reset to whatever initial state an object had. Here's my code:
import copy
_tool_init_states = {}
def wrap_init(init_func):
def init_hook(inst, *args, **kws):
if inst not in _tool_init_states:
# if there is a class hierarchy, only the outer scope does work
_tool_init_states[inst] = None
res = init_func(inst, *args, **kws)
_tool_init_states[inst] = copy.deepcopy(inst.__dict__)
return res
else:
return init_func(inst, *args, **kws)
return init_hook
def reset(inst):
inst.__dict__.clear()
inst.__dict__.update(
copy.deepcopy(_tool_init_states[inst])
)
class _Resettable(type):
"""Wraps __init__ to store object _after_ init."""
def __new__(mcs, *more):
mcs = super(_Resetable, mcs).__new__(mcs, *more)
mcs.__init__ = wrap_init(mcs.__init__)
mcs.reset = reset
return mcs
class MyResettableClass(object):
__metaclass__ = Resettable
def __init__(self):
self.do_whatever = "you want,"
self.it_will_be = "resetted by calling reset()"
To update the initial state, you could build some method like reset(...) that writes data into _tool_init_states. I hope this helps somebody. If this is possible without a metaclass, please let me know.