Numpy use dispatcher for argument displacement - numpy

This question is specific to NEP 18, which proposes a dispatch mechanism for using an arbitrary functions together with classes that implement the __array_function__ interface (such as np.ndarray).
def _einsum_dispatcher(subscripts, *operands):
return (*operands,), dict(subscripts=subscripts)
#np.core.overrides.array_function_dispatch(_einsum_dispatcher, verify=False)
def my_einsum(*operands, **subscripts):
raise RuntimeError("This should currently never happen")
class ArrayLike:
def __array_function__(self, func, types, args, kwargs):
return f"""
func = {func},
types = {types},
args = {args},
kwargs = {kwargs}
"""
Running a test for my_einsum we get
a = ArrayLike()
print(my_einsum('...', a))
Output:
RuntimeError: This should currently never happen
The function never gets intercepted by __array_function__ method for ArrayLike. Although, this can be solved by changing the dispatcher a bit:
def _einsum_dispatcher(subscripts, *operands):
return (*operands,)
Now when testing my_einsum the exact same way again:
a = ArrayLike()
print(my_einsum('...', a))
Output:
func = <function my_einsum at 0x7efded61bd30>,
types = (<class '__main__.ArrayLike'>,),
args = ('...', <__main__.ArrayLike object at 0x7efdef83efa0>),
kwargs = {}
The function is correct, but how do I move the subscripts argument into kwargs ? Can the array_function_dispatch only be used to dispatch argument types?

Related

TypeError: one() takes 1 positional argument but 2 were given

PyCharm return "TypeError: one() takes 1 positional argument but 2 were given"
I've searched for the whole night but still can't figure it out T T
I think the bug is from mpl_connect(), because when I use the connect() from pyqtBoundSignal it works
"""test.py"""
class forTest(QWidget):
signalTest = pyqtSignal()
def __init__(self):
super(forTest, self).__init__()
canvas = FigureCanvas(figure(facecolor="blue"))
# I got the following from other answer, but still not working
self.cid = canvas.mpl_connect('button_press_event', self.one)
layout = QHBoxLayout()
self.setLayout(layout)
layout.addWidget(canvas)
def one(self):
self.signalTest.emit()
print("emit()")
"""receice.py"""
class RTest(object):
def handle_signal(self):
print("get successfully")
"""main.py"""
if __name__ == "__main__":
app = QApplication(sys.argv)
tt = forTest()
rr = RTest()
tt.signalTest.connect(rr.handle_signal)
tt.show()
sys.exit(app.exec_())
From the mpl_connect() documentation:
func : callable
The callback function to be executed, which must have the signature:
def func(event: Event) -> Any
So, one() must have a further argument:
def one(self, event):
self.signalTest.emit()
print("emit()")
The reason for which the direct signal connection works is that PyQt is able to discard positional arguments when they exceed the connected signal (or function) argument count.

Pandas: which “function names” can be used? (how are they looked up?)

When using pandas you can in certain cases pass names of functions as strings instead of actual references to those functions. For example: df.transform('round').
In the pandas docs they call these strings "function names".
I discovered that the lookup mechanism here doesn't look at the current namespace:
import pandas as pd
sales = pd.DataFrame(data={
"price": [23.12, 22.34, 12.56, 27.78, 11.9],
})
display(sales)
def new_price(price):
return price * 1.1
display(sales.transform('round')) # Works
display(sales.transform(new_price)) # Works
display(sales.transform('new_price')) # Does not work
My question: is there a list of these function names that you can use in cases like this?
This is the relevant code from the pandas source:
class Apply(metaclass=abc.ABCMeta):
...
def _try_aggregate_string_function(self, obj, arg: str, *args, **kwargs):
"""
if arg is a string, then try to operate on it:
- try to find a function (or attribute) on ourselves
- try to find a numpy function
- raise
"""
assert isinstance(arg, str)
f = getattr(obj, arg, None)
if f is not None:
if callable(f):
return f(*args, **kwargs)
# people may try to aggregate on a non-callable attribute
# but don't let them think they can pass args to it
assert len(args) == 0
assert len([kwarg for kwarg in kwargs if kwarg not in ["axis"]]) == 0
return f
f = getattr(np, arg, None)
if f is not None and hasattr(obj, "__array__"):
# in particular exclude Window
return f(obj, *args, **kwargs)
raise AttributeError(
f"'{arg}' is not a valid function for '{type(obj).__name__}' object"
)
It basically searches for a method of self with that name or for a numpy method.

writing a class decorator

i have a class that has a method called my_func(x,s,n). I need to vectorize this function. That is to say, i want to be able to pass x = [3,4,5,6,7] or any range of values and it gives me a result. I am using numpy and looking through here, i managed to find a solution that works. However, I want to make it object oriented. I tried this:
class Vectorize:
"""vectorization wrapper that works with instance methods"""
def __init__(self, otypes=None, signature=None):
self.otypes = otypes
self.sig = signature
# Decorator as an instance method
def decorator(self, fn):
vectorized = np.vectorize(fn, otypes=self.otypes, signature=self.sig)
#wraps(fn)
def wrapper(*args, **kwargs):
return vectorized(*args, **kwargs)
return wrapper
and then i tried this:
#Vectorize(signature=("(),(),(),()->()"))
def my_func(self, k: int, s: float, n: int):
I keep getting an error, Vectorize object is not callable. Is there any other way to do this? Thanks
I managed to fix this issue. But, now that you have said signature degrades performance, I'm considering alternate solution. For those who are curious:
class Vectorize:
"""vectorization decorator that works with instance methods"""
def vectorize(self, otypes=None, signature=None):
# Decorator as an instance method
def decorator(fn):
vectorized = np.vectorize(fn, otypes=otypes, signature=signature)
#wraps(fn)
def wrapper(*args, **kwargs):
return vectorized(*args, **kwargs)
return wrapper
return decorator
class CustomClass:
v = Vectorize()
#v.vectorize(signature=("(),(),(),()->()"))
def my_func(self, k: int, s: float, n: int):

Can I restrict objects in Python3 so that only attributes that I make a setter for are allowed?

I have something called a Node. Both Definition and Theorem are a type of node, but only Definitions should be allowed to have a plural attribute:
class Definition(Node):
def __init__(self,dic):
self.type = "definition"
super(Definition, self).__init__(dic)
self.plural = move_attribute(dic, {'plural', 'pl'}, strict=False)
#property
def plural(self):
return self._plural
#plural.setter
def plural(self, new_plural):
if new_plural is None:
self._plural = None
else:
clean_plural = check_type_and_clean(new_plural, str)
assert dunderscore_count(clean_plural)>=2
self._plural = clean_plural
class Theorem(Node):
def __init__(self, dic):
self.type = "theorem"
super().__init__(dic)
self.proofs = move_attribute(dic, {'proofs', 'proof'}, strict=False)
# theorems CANNOT have plurals:
# if 'plural' in self:
# raise KeyError('Theorems cannot have plurals.')
As you can see, Definitions have a plural.setter, but theorems do not. However, the code
theorem = Theorem(some input)
theorem.plural = "some plural"
runs just fine and raises no errors. But I want it to raise an error. As you can see, I tried to check for plurals manually at the bottom of my code shown, but this would only be a patch. I would like to block the setting of ANY attribute that is not expressly defined. What is the best practice for this sort of thing?
I am looking for an answer that satisfies the "chicken" requirement:
I do not think this solves my issue. In both of your solutions, I can
append the code t.chicken = 'hi'; print(t.chicken), and it prints hi
without error. I do not want users to be able to make up new
attributes like chicken.
The short answer is "Yes, you can."
The follow-up question is "Why?" One of the strengths of Python is the remarkable dynamism, and by restricting that ability you are actually making your class less useful (but see edit at bottom).
However, there are good reasons to be restrictive, and if you do choose to go down that route you will need to modify your __setattr__ method:
def __setattr__(self, name, value):
if name not in ('my', 'attribute', 'names',):
raise AttributeError('attribute %s not allowed' % name)
else:
super().__setattr__(name, value)
There is no need to mess with __getattr__ nor __getattribute__ since they will not return an attribute that doesn't exist.
Here is your code, slightly modified -- I added the __setattr__ method to Node, and added an _allowed_attributes to Definition and Theorem.
class Node:
def __setattr__(self, name, value):
if name not in self._allowed_attributes:
raise AttributeError('attribute %s does not and cannot exist' % name)
super().__setattr__(name, value)
class Definition(Node):
_allowed_attributes = '_plural', 'type'
def __init__(self,dic):
self.type = "definition"
super().__init__(dic)
self.plural = move_attribute(dic, {'plural', 'pl'}, strict=False)
#property
def plural(self):
return self._plural
#plural.setter
def plural(self, new_plural):
if new_plural is None:
self._plural = None
else:
clean_plural = check_type_and_clean(new_plural, str)
assert dunderscore_count(clean_plural)>=2
self._plural = clean_plural
class Theorem(Node):
_allowed_attributes = 'type', 'proofs'
def __init__(self, dic):
self.type = "theorem"
super().__init__(dic)
self.proofs = move_attribute(dic, {'proofs', 'proof'}, strict=False)
In use it looks like this:
>>> theorem = Theorem(...)
>>> theorem.plural = 3
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "<stdin>", line 6, in __setattr__
AttributeError: attribute plural does not and cannot exist
edit
Having thought about this some more, I think a good compromise for what you want, and to actually answer the part of your question about restricting allowed changes to setters only, would be to:
use a metaclass to inspect the class at creation time and dynamically build the _allowed_attributes tuple
modify the __setattr__ of Node to always allow modification/creation of attributes with at least one leading _
This gives you some protection against both misspellings and creation of attributes you don't want, while still allowing programmers to work around or enhance the classes for their own needs.
Okay, the new meta class looks like:
class NodeMeta(type):
def __new__(metacls, cls, bases, classdict):
node_cls = super().__new__(metacls, cls, bases, classdict)
allowed_attributes = []
for base in (node_cls, ) + bases:
for name, obj in base.__dict__.items():
if isinstance(obj, property) and hasattr(obj, '__fset__'):
allowed_attributes.append(name)
node_cls._allowed_attributes = tuple(allowed_attributes)
return node_cls
The Node class has two adjustments: include the NodeMeta metaclass and adjust __setattr__ to only block non-underscore leading attributes:
class Node(metaclass=NodeMeta):
def __init__(self, dic):
self._dic = dic
def __setattr__(self, name, value):
if not name[0] == '_' and name not in self._allowed_attributes:
raise AttributeError('attribute %s does not and cannot exist' % name)
super().__setattr__(name, value)
Finally, the Node subclasses Theorem and Definition have the type attribute moved into the class namespace so there is no issue with setting them -- and as a side note, type is a bad name as it is also a built-in function -- maybe node_type instead?
class Definition(Node):
type = "definition"
...
class Theorem(Node):
type = "theorem"
...
As a final note: even this method is not immune to somebody actually adding or changing attributes, as object.__setattr__(theorum_instance, 'an_attr', 99) can still be used -- or (even simpler) the _allowed_attributes can be modified; however, if somebody is going to all that work they hopefully know what they are doing... and if not, they own all the pieces. ;)
You can check for the attribute everytime you access it.
class Theorem(Node):
...
def __getattribute__(self, name):
if name not in ["allowed", "attribute", "names"]:
raise MyException("attribute "+name+" not allowed")
else:
return self.__dict__[name]
def __setattr__(self, name, value):
if name not in ["allowed", "attribute", "names"]:
raise MyException("attribute "+name+" not allowed")
else:
self.__dict__[name] = value
You can build the allowed method list dynamically as a side effect of a decorator:
allowed_attrs = []
def allowed(f):
allowed_attrs.append(f.__name__)
return f
You would also need to add non method attributes manually.
If you really want to prevent all other dynamic attributes. I assume there's a well-defined time window that you want to allow adding attributes.
Below I allow it until object initialisation is finished. (you can control it with allow_dynamic_attribute variable.
class A:
def __init__(self):
self.allow_dynamic_attribute = True
self.abc = "hello"
self._plural = None # need to give default value
# A.__setattr__ = types.MethodType(__setattr__, A)
self.allow_dynamic_attribute = False
def __setattr__(self, name, value):
if hasattr(self, 'allow_dynamic_attribute'):
if not self.allow_dynamic_attribute:
if not hasattr(self, name):
raise Exception
super().__setattr__(name, value)
#property
def plural(self):
return self._plural
#plural.setter
def plural(self, new_plural):
self._plural = new_plural
a = A()
print(a.abc) # fine
a.plural = "yes" # fine
print(a.plural) # fine
a.dkk = "bed" # raise exception
Or it can be more compact this way, I couldn't figure out how MethodType + super can get along together.
import types
def __setattr__(self, name, value):
if not hasattr(self, name):
raise Exception
else:
super().__setattr__(name,value) # this doesn't work for reason I don't know
class A:
def __init__(self):
self.foo = "hello"
# after this point, there's no more setattr for you
A.__setattr__ = types.MethodType(__setattr__, A)
a = A()
print(a.foo) # fine
a.bar = "bed" # raise exception
Yes, you can create private members that cannot be modified from outside the class. The variable name should start with two underscores:
class Test(object):
def __init__(self, t):
self.__t = t
def __str__(self):
return str(self.__t)
t = Test(2)
print(t) # prints 2
t.__t = 3
print(t) # prints 2
That said, trying to access such a variable as we do in t.__t = 3 will not raise an exception.
A different approach which you can take to achieve the wanted behavior is using functions. This approach will require "accessing attributes" using functional notation, but if that doesn't bother you, you can get exactly what you want. The following demo "hardcodes" the values, but obviously you can have Theorem() accept an argument and use it to set values to the attributes dynamically.
Demo:
# -*- coding: utf-8 -*-
def Theorem():
def f(attrib):
def proofs():
return ''
def plural():
return '◊◊◊◊◊◊◊◊'
if attrib == 'proofs':
return proofs()
elif attrib == 'plural':
return plural()
else:
raise ValueError("Attribute [{}] doesn't exist".format(attrib))
return f
t = Theorem()
print(t('proofs'))
print(t('plural'))
print(t('wait_for_error'))
OUTPUT

◊◊◊◊◊◊◊◊
Traceback (most recent call last):
File "/Users/alfasi/Desktop/1.py", line 40, in <module>
print(t('wait_for_error'))
File "/Users/alfasi/Desktop/1.py", line 32, in f
raise ValueError("Attribute [{}] doesn't exist".format(attrib))
ValueError: Attribute [wait_for_error] doesn't exist

Python: Anything wrong with dynamically assigning instance methods as instance attributes

I came up with the following code to decorate instance methods using a decorator that requires the instance itself as an argument:
from functools import wraps
def logging_decorator(tricky_instance):
def wrapper(fn):
#wraps(fn)
def wrapped(*a, **kw):
if tricky_instance.log:
print("Calling %s.." % fn.__name__)
return fn(*a, **kw)
return wrapped
return wrapper
class Tricky(object):
def __init__(self, log):
self.log = log
self.say_hi = logging_decorator(self)(self.say_hi)
def say_hi(self):
print("Hello, world!")
i1 = Tricky(log=True)
i2 = Tricky(log=False)
i1.say_hi()
i2.say_hi()
This seems to work great, but I fear that I may have overlooked some unintended side effects of this trick. Am I about to shoot myself in the foot, or is this safe?
Note that I don't actually want to use this for logging, it's just the shortest meaningful example I could come up with.
It's not really clear to me why you would ever want to do this. If you want to assign a new method type dynamically use types:
import types
class Tricky(object):
def __init__(self):
def method(self):
print('Hello')
self.method = types.MethodType(method, self)
If you want to do something with the instance, do it in the __init__ method. If you just want access to the method's instance inside the decorator, you can use the im_self attribute:
def decorator(tricky_instance):
def wrapper(meth):
print(meth.im_self == tricky_instance)
return meth
return wrapper
Personally, I think this is veering into Maybe-I-Shouldn't-Use-Decorators land.
I think I was trying to be needlessly smart. There seems to be an embarrassingly simpler solution:
from functools import wraps
def logging_decorator(fn):
#wraps(fn)
def wrapped(self, *a, **kw):
if self.log:
print("Calling %s.." % fn.__name__)
return fn(self, *a, **kw)
return wrapped
class Tricky(object):
def __init__(self, log):
self.log = log
#logging_decorator
def say_hi(self):
print("Hello, world!")
i1 = Tricky(log=True)
i2 = Tricky(log=False)
i1.say_hi()
i2.say_hi()