Serializing Tree into nested list using python - serialization

I have a binary tree class like this:
class BinaryTree:
def __init__(self, data, left=None, right=None):
self.data = data
self.left = left
self.right = right
Now I'm facing a task to serialize this structure in to a nested list. BTW, I have a left-to-right traversal function in mind:
def binary_tree(tree):
if tree:
for node_data in binary_tree(tree.left):
yield node_data
for node_data in binary_tree(tree.right):
yield node_data
Or there is a general way to serialize it into mixed nested structure? For example, {[]}, or [{}]?

as a method of BinaryTree:
def to_dict(self):
data = self.data
left = self.left
if left is not None:
left = left.to_dict()
right = self.right
if right is not None:
right = right.to_dict()
return {'data':data, 'left':left, 'right':right}
and as a class method of BinaryTree:
def from_dict(cls, D):
data = D['data']
left = D['left']
if left is not None:
left = cls.from_dict(left)
right = D['right']
if right is not None:
right = cls.from_dict(right)
return cls(data, left, right)

Related

How to display a button in each cell of a QTableWidget's column so that it removes its corresponding row when clicked?

I want to display a button in each cell of a QTableWidget's column. Each button, when clicked, must remove its corresponding row in the table.
To do so, I created a RemoveRowDelegate class with the button as editor and used the QAbstractItemView::openPersistentEditor method in a CustomTable class to display the button permanently.
class RemoveRowDelegate(QStyledItemDelegate):
def __init__(self, parent, cross_icon_path):
super().__init__(parent)
self.cross_icon_path = cross_icon_path
self.table = None
def createEditor(self, parent, option, index):
editor = QToolButton(parent)
editor.setStyleSheet("background-color: rgba(255, 255, 255, 0);") # Delete borders but maintain the click animation (as opposed to "border: none;")
pixmap = QPixmap(self.cross_icon_path)
button_icon = QIcon(pixmap)
editor.setIcon(button_icon)
editor.clicked.connect(self.remove_row)
return editor
# Delete the corresponding row
def remove_row(self):
sending_button = self.sender()
for i in range(self.table.rowCount()):
if self.table.cellWidget(i, 0) == sending_button:
self.table.removeRow(i)
break
class CustomTable(QTableWidget):
def __init__(self, parent=None, df=None):
super().__init__(parent)
self.columns = []
self.horizontalHeader().setSectionResizeMode(QHeaderView.ResizeToContents)
self.verticalHeader().setSectionResizeMode(QHeaderView.ResizeToContents)
if df is not None:
self.fill(df)
# Build the table from a pandas df
def fill(self, df):
self.columns = [''] + list(df.columns)
nb_rows, _ = df.shape
nb_columns = len(self.columns)
self.setRowCount(nb_rows)
self.setColumnCount(nb_columns)
self.setHorizontalHeaderLabels(self.columns)
for i in range(nb_rows):
self.openPersistentEditor(self.model().index(i, 0))
for j in range(1, nb_columns):
item = df.iloc[i, j-1]
table_item = QTableWidgetItem(item)
self.setItem(i, j, table_item)
def add_row(self):
nb_rows = self.rowCount()
self.insertRow(nb_rows)
self.openPersistentEditor(self.model().index(nb_rows, 0))
def setItemDelegateForColumn(self, column_index, delegate):
super().setItemDelegateForColumn(column_index, delegate)
delegate.table = self
I set the delegate for the first column of the table and build the latter from a pandas dataframe:
self.table = CustomTable() # Here, self is my user interface
remove_row_delegate = RemoveRowDelegate(self, self.cross_icon_path)
self.table.setItemDelegateForColumn(0, remove_row_delegate)
self.table.fill(df)
For now, this solution does the job but I think of several other possibilities:
Using the QTableWidget::setCellWidget method
Overriding the paint method and catching the left click event
But:
I believe the first alternative is not very clean as I must create the buttons in a for loop and each time a row is added (but after all, I also call openPersistentEditor the same way here).
I am wondering if the second alternative is worth the effort. And if it does, how to do it?
Also:
I believe my remove_row method can be optimized as I iterate over all rows (that is one of the reasons why I thought about the second alternative). Would you have a better suggestion ?
I had to override the setItemDelegateForColumn method so that I can access the table from the RemoveRowDelegate class. Can it be avoided ?
Any other remark that you think might be of interest would be greatly appreciated!
As suggested by #ekhumoro, I finally used a context menu:
class CustomTable(QTableWidget):
def __init__(self, parent=None, df=None, add_icon_path=None, remove_icon_path=None):
super().__init__(parent)
self.add_icon_path = add_icon_path
self.remove_icon_path = remove_icon_path
# Activation of customContextMenuRequested signal and connecting it to a method that displays a context menu
self.setContextMenuPolicy(Qt.CustomContextMenu)
self.customContextMenuRequested.connect(lambda pos: self.show_context_menu(pos))
def show_context_menu(self, pos):
idx = self.indexAt(pos)
if idx.isValid():
row_idx = idx.row()
# Creating context menu and personalized actions
context_menu = QMenu(parent=self)
if self.add_icon_path:
pixmap = QPixmap(self.add_icon_path)
add_icon = QIcon(pixmap)
add_row_action = QAction('Insert a line', icon=add_icon)
else:
add_row_action = QAction('Insert a line')
add_row_action.triggered.connect(lambda: self.insertRow(row_idx))
if self.remove_icon_path:
pixmap = QPixmap(self.remove_icon_path)
remove_icon = QIcon(pixmap)
remove_row_action = QAction('Delete the line', icon=remove_icon)
else:
remove_row_action = QAction('Delete the line')
remove_row_action.triggered.connect(lambda: self.removeRow(row_idx))
context_menu.addAction(add_row_action)
context_menu.addAction(remove_row_action)
# Displaying context menu
context_menu.exec_(self.mapToGlobal(pos))
Moreover, note that using QTableWidget::removeRow method is more optimized than my previous method. One just need to get the row index properly from the click position thanks to QTableWidget::indexAt method.

how to have auto-completer in a QTableWidget cell that newly-created in pyqt5?

I am trying to have auto-completer in my table cells and it is working but the problem comes when I add a new row to the table,
1. how can I add the same functionality to newly-created rows?
I use _addRow method to add a new row.
2. how can I add this functionality to the second or the third column of the table?
here I use self.locs for the first column and I need sth like self.tech to match the second column.
Here is the code:
class TableItemCompleter(QStyledItemDelegate):
def createEditor(self, parent, option, index):
editor = QLineEdit(parent)
completionlist = index.data(Qt.UserRole)
autoCompleter = QCompleter(completionlist,parent)
autoCompleter.setCaseSensitivity(Qt.CaseInsensitive)
autoCompleter.setFilterMode(Qt.MatchContains)
editor.setCompleter(autoCompleter)
return editor
class TableWidget(QTableWidget):
def __init__(self, df, action='Edit'):
super().__init__()
self.df = df
self.setStyleSheet('font-size:15px;')
# Set table dimensions
rows, cols = self.df.shape
if action == 'Edit':
pass
elif action == 'Append':
n = 4 # number of rows
self.setRowCount(n)
self.setColumnCount(cols)
self.locs = self.df['LOCATION'].unique().tolist()
#tech = self.df['TECHNOLOGY'].unique().tolist()
self.setHorizontalHeaderLabels(list(self.df.columns))
self.verticalHeader().setDefaultSectionSize(50)
self.horizontalHeader().setDefaultSectionSize(200)
self.setItemDelegateForColumn(0, TableItemCompleter())
#self.setItemDelegateForColumn(TableItemCompleter(), 2)# how can I have it for second col?
for row in range(n-1):
for col in range(self.columnCount()):
item = QTableWidgetItem('')#str(self.df.tail(n-1).iloc[row,col]))
item.setData(Qt.UserRole, self.locs)
self.setItem(row, col, item)
#self.setItem(row, col, QTableWidgetItem(str(self.df.tail(2).iloc[row,col])))
self.cellChanged[int, int].connect(self.update_df)
#self.setItemDelegate(TableItemCompleter())
else:
pass
def _addRow(self):
rowCount = self.rowCount()
self.insertRow(rowCount)
Thank you :)
Since the list of completer strings is the same for each column of the model, there's no use in setting that data on the index. A better solution is to initialize the delegate with a "matrix" of completions based on the columns.
CompletionKeys = {
0: 'LOCATION',
1: 'TECHNOLOGY',
2: ...
}
class TableItemCompleter(QStyledItemDelegate):
def __init__(self, completionMap, parent=None):
super().__init__(parent)
self.completers = {}
for column, completionList in completionMap.items():
completer = QCompleter(completionList, self)
completer.setCaseSensitivity(Qt.CaseInsensitive)
completer.setFilterMode(Qt.MatchContains)
self.completers[column] = completer
def createEditor(self, parent, option, index):
editor = QLineEdit(parent)
if index.column() in self.completers:
editor.setCompleter(self.completers[index.column()])
return editor
class TableWidget(QTableWidget):
def __init__(self, df, action='Edit'):
super().__init__()
self.df = df
self.setStyleSheet('font-size:15px;')
# Set table dimensions
rows, cols = self.df.shape
if action == 'Edit':
pass
elif action == 'Append':
n = 4 # number of rows
self.setRowCount(n)
self.setColumnCount(cols)
self.setHorizontalHeaderLabels(list(self.df.columns))
self.verticalHeader().setDefaultSectionSize(50)
self.horizontalHeader().setDefaultSectionSize(200)
for row in range(n-1):
for col in range(cols):
item = QTableWidgetItem('')
self.setItem(row, col, item)
completionMap = {}
for col in range(cols):
key = CompletionKeys.get(col)
if key:
completionMap[col] = self.df[key].unique().tolist()
self.setItemDelegate(TableItemCompleter(completionMap, self))
self.cellChanged[int, int].connect(self.update_df)

How to map different indices in Pyomo?

I am a new Pyomo/Python user. Now I need to formulate one set of constraints with index 'n', where all of the 3 components are with different indices but correlate with index 'n'. I am just curious that how I can map the relationship between these sets.
In my case, I read csv files in which their indices are related to 'n' to generate my set. For example: a1.n1, a2.n3, a3.n5 /// b1.n2, b2.n4, b3.n6, b4.n7 /// c1.n1, c2.n2, c3.n4, c4.n6 ///. The constraint expression of index n1 and n2 is the follows for example:
for n1: P(a1.n1) + L(c1.n1) == D(n1)
for n2: - F(b1.n2) + L(c2.n2) == D(n2)
Now let's go the coding. The set creating codes are as follow, they are within a class:
import pyomo
import pandas
import pyomo.opt
import pyomo.environ as pe
class MyModel:
def __init__(self, Afile, Bfile, Cfile):
self.A_data = pandas.read_csv(Afile)
self.A_data.set_index(['a'], inplace = True)
self.A_data.sort_index(inplace = True)
self.A_set = self.A_data.index.unique()
... ...
Then I tried to map the relationship in the constraint construction like follows:
def createModel(self):
self.m = pe.ConcreteModel()
self.m.A_set = pe.Set( initialize = self.A_set )
def obj_rule(m):
return ...
self.m.OBJ = pe.Objective(rule = obj_rule, sense = pe.minimize)
def constr(m, n)
As = self.A_data.reset_index()
Amap = As[ As['n'] == n ]['a']
Bs = self.B_data.reset_index()
Bmap = Bs[ Bs['n'] == n ]['b']
Cs = self.C_data.reset_index()
Cmap = Cs[ Cs['n'] == n ]['c']
return sum(m.P[(p,n)] for p in Amap) - sum(m.F[(s,n)] for s in Bmap) + sum(m.L[(r,n)] for r in Cmap) == self.D_data.ix[n, 'D']
self.m.cons = pe.Constraint(self.m.D_set, rule = constr)
def solve(self):
... ...
Finally, the error raises when I run this:
KeyError: "Index '(1, 1)' is not valid for indexed component 'P'"
I know it is the wrong way, so I am wondering if there is a good way to map their relationships. Thanks in advance!
Gabriel
I just forgot to post my answer to my own question when I solved this one week ago. The key thing towards this problem is setting up a map index.
Let me just modify the code in the question. Firstly, we need to modify the dataframe to include the information of the mapped indices. Then, the set for the mapped index can be constructed, taking 2 mapped indices as example:
self.m.A_set = pe.Set( initialize = self.A_set, dimen = 2 )
The names of the two mapped indices are 'alpha' and 'beta' respectively. Then the constraint can be formulated, based on the variables declared at the beginning:
def constr(m, n)
Amap = self.A_data[ self.A_data['alpha'] == n ]['beta']
Bmap = self.B_data[ self.B_data['alpha'] == n ]['beta']
return sum(m.P[(i,n)] for i in Amap) + sum(m.L[(r,n)] for r in Bmap) == D.loc[n, 'D']
m.TravelingBal = pe.Constraint(m.A_set, rule = constr)
The summation groups all associated B to A with a mapped index set.

QSortFilterProxyModel does not apply Caseinsensitive

As I've subclassed QSortFilterModel to be able to search thru several coloumns in a QListView, the CaseInsensitive option no longer works. Ive tried to apply it as follows:
class CustomSortFilterProxyModel(QtCore.QSortFilterProxyModel):
def __init__(self, parent=None):
super(CustomSortFilterProxyModel, self).__init__(parent)
self.filterString = ''
self.filterFunctions = {}
self.setFilterCaseSensitivity(QtCore.Qt.CaseInsensitive) #Applied here
def setFilterString(self, text):
self.filterString = str(text)
self.setFilterCaseSensitivity(QtCore.Qt.CaseInsensitive) #And applied here
self.invalidateFilter()
def filterAcceptsRow(self, row_num, parent):
self.filterColumns = [1,3]
model = self.sourceModel()
row = model.row(row_num)
tests = [self.filterString in row[col] for col in self.filterColumns]
return True in tests
How come my search string is case sensitive?
The sensitivity you set there only applies to the default filterAcceptsRow implementation. If you override it, you'll need to handle this yourself, by doing something like:
return any(self.filterString.casefold() in row[col].casefold() for col in self.filterColumns))
(see the str.casefold docs)

"Pythonic" way to "reset" an object's variables?

("variables" here refers to "names", I think, not completely sure about the definition pythonistas use)
I have an object and some methods. These methods all need and all change the object's variables. How can I, in the most pythonic and in the best, respecting the techniques of OOP, way achieve to have the object variables used by the methods but also keep their original values for the other methods?
Should I copy the object everytime a method is called? Should I save the original values and have a reset() method to reset them everytime a method needs them? Or is there an even better way?
EDIT: I was asked for pseudocode. Since I am more interested in understanding the concept rather than just specifically solving the problem I am encountering I am going to try give an example:
class Player():
games = 0
points = 0
fouls = 0
rebounds = 0
assists = 0
turnovers = 0
steals = 0
def playCupGame(self):
# simulates a game and then assigns values to the variables, accordingly
self.points = K #just an example
def playLeagueGame(self):
# simulates a game and then assigns values to the variables, accordingly
self.points = Z #just an example
self.rebounds = W #example again
def playTrainingGame(self):
# simulates a game and then assigns values to the variables, accordingly
self.points = X #just an example
self.rebounds = Y #example again
The above is my class for a Player object (for the example assume he is a basketball one). This object has three different methods that all assign values to the players' statistics.
So, let's say the team has two league games and then a cup game. I'd have to make these calls:
p.playLeagueGame()
p.playLeagueGame()
p.playCupGame()
It's obvious that when the second and the third calls are made, the previously changed statistics of the player need to be reset. For that, I can either write a reset method that sets all the variables back to 0, or copy the object for every call I make. Or do something completely different.
That's where my question lays, what's the best approach, python and oop wise?
UPDATE: I am suspicious that I have superovercomplicated this and I can easily solve my problem by using local variables in the functions. However, what happens if I have a function inside another function, can I use locals of the outer one inside the inner one?
Not sure if it's "Pythonic" enough, but you can define a "resettable" decorator
for the __init__ method that creates a copy the object's __dict__ and adds a reset() method that switches the current __dict__ to the original one.
Edit - Here's an example implementation:
def resettable(f):
import copy
def __init_and_copy__(self, *args, **kwargs):
f(self, *args)
self.__original_dict__ = copy.deepcopy(self.__dict__)
def reset(o = self):
o.__dict__ = o.__original_dict__
self.reset = reset
return __init_and_copy__
class Point(object):
#resettable
def __init__(self, x, y):
self.x = x
self.y = y
def __str__(self):
return "%d %d" % (self.x, self.y)
class LabeledPoint(Point):
#resettable
def __init__(self, x, y, label):
self.x = x
self.y = y
self.label = label
def __str__(self):
return "%d %d (%s)" % (self.x, self.y, self.label)
p = Point(1, 2)
print p # 1 2
p.x = 15
p.y = 25
print p # 15 25
p.reset()
print p # 1 2
p2 = LabeledPoint(1, 2, "Test")
print p2 # 1 2 (Test)
p2.x = 3
p2.label = "Test2"
print p2 # 3 2 (Test2)
p2.reset()
print p2 # 1 2 (Test)
Edit2: Added a test with inheritance
I'm not sure about "pythonic", but why not just create a reset method in your object that does whatever resetting is required? Call this method as part of your __init__ so you're not duplicating the data (ie: always (re)initialize it in one place -- the reset method)
I would create a default dict as a data member with all of the default values, then do __dict__.update(self.default) during __init__ and then again at some later point to pull all the values back.
More generally, you can use a __setattr__ hook to keep track of every variable that has been changed and later use that data to reset them.
Sounds like you want to know if your class should be an immutable object. The idea is that, once created, an immutable object can't/should't/would't be changed.
On Python, built-in types like int or tuple instances are immutable, enforced by the language:
>>> a=(1, 2, 3, 1, 2, 3)
>>> a[0] = 9
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: 'tuple' object does not support item assignment
As another example, every time you add two integers a new instance is created:
>>> a=5000
>>> b=7000
>>> d=a+b
>>> d
12000
>>> id(d)
42882584
>>> d=a+b
>>> id(d)
42215680
The id() function returns the address of the int object 12000. And every time we add a+b a new 12000 object instance is created.
User defined immutable classes must be enforced manually, or simply done as a convention with a source code comment:
class X(object):
"""Immutable class. Don't change instance variables values!"""
def __init__(self, *args):
self._some_internal_value = ...
def some_operation(self, arg0):
new_instance = X(arg0 + ...)
new_instance._some_internal_operation(self._some_internal_value, 42)
return new_instance
def _some_internal_operation(self, a, b):
"""..."""
Either way, it's OK to create a new instance for every operation.
See the Memento Design Pattern if you want to restore previous state, or the Proxy Design Pattern if you want the object to seem pristine, as if just created. In any case, you need to put something between what's referenced, and it's state.
Please comment if you need some code, though I'm sure you'll find plenty on the web if you use the design pattern names as keywords.
# The Memento design pattern
class Scores(object):
...
class Player(object):
def __init__(self,...):
...
self.scores = None
self.history = []
self.reset()
def reset(self):
if (self.scores):
self.history.append(self.scores)
self.scores = Scores()
It sounds like overall your design needs some reworking. What about a PlayerGameStatistics class that would keep track of all that, and either a Player or a Game would hold a collection of these objects?
Also the code you show is a good start, but could you show more code that interacts with the Player class? I'm just having a hard time seeing why a single Player object should have PlayXGame methods -- does a single Player not interact with other Players when playing a game, or why does a specific Player play the game?
A simple reset method (called in __init__ and re-called when necessary) makes a lot of sense. But here's a solution that I think is interesting, if a bit over-engineered: create a context manager. I'm curious what people think about this...
from contextlib import contextmanager
#contextmanager
def resetting(resettable):
try:
resettable.setdef()
yield resettable
finally:
resettable.reset()
class Resetter(object):
def __init__(self, foo=5, bar=6):
self.foo = foo
self.bar = bar
def setdef(self):
self._foo = self.foo
self._bar = self.bar
def reset(self):
self.foo = self._foo
self.bar = self._bar
def method(self):
with resetting(self):
self.foo += self.bar
print self.foo
r = Resetter()
r.method() # prints 11
r.method() # still prints 11
To over-over-engineer, you could then create a #resetme decorator
def resetme(f):
def rf(self, *args, **kwargs):
with resetting(self):
f(self, *args, **kwargs)
return rf
So that instead of having to explicitly use with you could just use the decorator:
#resetme
def method(self):
self.foo += self.bar
print self.foo
I liked (and tried) the top answer from PaoloVictor. However, I found that it "reset" itself, i.e., if you called reset() a 2nd time it would throw an exception.
I found that it worked repeatably with the following implementation
def resettable(f):
import copy
def __init_and_copy__(self, *args, **kwargs):
f(self, *args, **kwargs)
def reset(o = self):
o.__dict__ = o.__original_dict__
o.__original_dict__ = copy.deepcopy(self.__dict__)
self.reset = reset
self.__original_dict__ = copy.deepcopy(self.__dict__)
return __init_and_copy__
It sounds to me like you need to rework your model to at least include a separate "PlayerGameStats" class.
Something along the lines of:
PlayerGameStats = collections.namedtuple("points fouls rebounds assists turnovers steals")
class Player():
def __init__(self):
self.cup_games = []
self.league_games = []
self.training_games = []
def playCupGame(self):
# simulates a game and then assigns values to the variables, accordingly
stats = PlayerGameStats(points, fouls, rebounds, assists, turnovers, steals)
self.cup_games.append(stats)
def playLeagueGame(self):
# simulates a game and then assigns values to the variables, accordingly
stats = PlayerGameStats(points, fouls, rebounds, assists, turnovers, steals)
self.league_games.append(stats)
def playTrainingGame(self):
# simulates a game and then assigns values to the variables, accordingly
stats = PlayerGameStats(points, fouls, rebounds, assists, turnovers, steals)
self.training_games.append(stats)
And to answer the question in your edit, yes nested functions can see variables stored in outer scopes. You can read more about that in the tutorial: http://docs.python.org/tutorial/classes.html#python-scopes-and-namespaces
thanks for the nice input, as I had kind of a similar problem. I'm solving it with a hook on the init method, since I'd like to be able to reset to whatever initial state an object had. Here's my code:
import copy
_tool_init_states = {}
def wrap_init(init_func):
def init_hook(inst, *args, **kws):
if inst not in _tool_init_states:
# if there is a class hierarchy, only the outer scope does work
_tool_init_states[inst] = None
res = init_func(inst, *args, **kws)
_tool_init_states[inst] = copy.deepcopy(inst.__dict__)
return res
else:
return init_func(inst, *args, **kws)
return init_hook
def reset(inst):
inst.__dict__.clear()
inst.__dict__.update(
copy.deepcopy(_tool_init_states[inst])
)
class _Resettable(type):
"""Wraps __init__ to store object _after_ init."""
def __new__(mcs, *more):
mcs = super(_Resetable, mcs).__new__(mcs, *more)
mcs.__init__ = wrap_init(mcs.__init__)
mcs.reset = reset
return mcs
class MyResettableClass(object):
__metaclass__ = Resettable
def __init__(self):
self.do_whatever = "you want,"
self.it_will_be = "resetted by calling reset()"
To update the initial state, you could build some method like reset(...) that writes data into _tool_init_states. I hope this helps somebody. If this is possible without a metaclass, please let me know.