As a following of Astropy Units equivalencies - interferometry baselines. I would like to ask about how I can use SI prefixes for my custom-defined unit. Until now, and following the recommendations from the attached link, I made a subclass from astropy Quantity and then I have overridden the .to method.
Here is my code:
class uWavelength(un.Quantity):
def __new__(cls, value, freq=None, dtype=None, copy=True, **kwargs):
unit = un.Unit(un.def_unit('lambdas', format={'format': r'\lambda'}, prefixes=True))
self = super().__new__(cls, value=value, unit=unit, dtype=dtype, copy=copy, **kwargs)
self.freq = freq
if self.freq is not None:
self.equivalencies = self.lambdas_equivalencies()
return self
#property
def freq(self):
return self._freq
#freq.setter
def freq(self, val):
if val is not None:
self._equivalencies = self.lambdas_equivalencies(restfreq=val)
self._freq = val
#property
def equivalencies(self):
return self._equivalencies
#equivalencies.setter
def equivalencies(self, val):
self._equivalencies = val
def lambdas_equivalencies(self, restfreq=None):
if self.freq is not None:
restfreq_hz = self.freq.to(un.Hz, equivalencies=un.spectral())
elif restfreq is not None:
restfreq_hz = restfreq.to(un.Hz, equivalencies=un.spectral())
else:
sys.exit("Frequency not provided")
eq = [
(self.unit, un.s, lambda x: x / restfreq_hz, lambda x: x * restfreq_hz),
(self.unit, un.m, lambda x: x / restfreq_hz * co.c.to(un.m / un.s).value,
lambda x: x / co.c.to(un.m / un.s).value * restfreq_hz),
(un.m, un.s, lambda x: x / co.c.to(un.m / un.s).value, lambda x: x * co.c.to(un.m / un.s).value),
]
return eq
def to(self, unit, restfreq=None, copy=True):
equiv = []
if restfreq is None:
equiv = self.equivalencies
else:
equiv = self.lambdas_equivalencies(restfreq=restfreq)
unit = un.Unit(unit)
if copy:
# Avoid using to_value to ensure that we make a copy. We also
# don't want to slow down this method (esp. the scalar case).
value = self._to_value(unit, equiv)
else:
# to_value only copies if necessary
value = self.to_value(unit, equiv)
return self._new_view(value, unit)class uWavelength(un.Quantity):
def __new__(cls, value, freq=None, dtype=None, copy=True, **kwargs):
unit = un.Unit(un.def_unit('lambdas', format={'format': r'\lambda'}, prefixes=True))
self = super().__new__(cls, value=value, unit=unit, dtype=dtype, copy=copy, **kwargs)
self.freq = freq
if self.freq is not None:
self.equivalencies = self.lambdas_equivalencies()
return self
#property
def freq(self):
return self._freq
#freq.setter
def freq(self, val):
if val is not None:
self._equivalencies = self.lambdas_equivalencies(restfreq=val)
self._freq = val
#property
def equivalencies(self):
return self._equivalencies
#equivalencies.setter
def equivalencies(self, val):
self._equivalencies = val
def lambdas_equivalencies(self, restfreq=None):
if self.freq is not None:
restfreq_hz = self.freq.to(un.Hz, equivalencies=un.spectral())
elif restfreq is not None:
restfreq_hz = restfreq.to(un.Hz, equivalencies=un.spectral())
else:
sys.exit("Frequency not provided")
eq = [
(self.unit, un.s, lambda x: x / restfreq_hz, lambda x: x * restfreq_hz),
(self.unit, un.m, lambda x: x / restfreq_hz * co.c.to(un.m / un.s).value,
lambda x: x / co.c.to(un.m / un.s).value * restfreq_hz),
(un.m, un.s, lambda x: x / co.c.to(un.m / un.s).value, lambda x: x * co.c.to(un.m / un.s).value),
]
return eq
def to(self, unit, restfreq=None, copy=True):
equiv = []
if restfreq is None:
equiv = self.equivalencies
else:
equiv = self.lambdas_equivalencies(restfreq=restfreq)
unit = un.Unit(unit)
if copy:
# Avoid using to_value to ensure that we make a copy. We also
# don't want to slow down this method (esp. the scalar case).
value = self._to_value(unit, equiv)
else:
# to_value only copies if necessary
value = self.to_value(unit, equiv)
return self._new_view(value, unit)
However when I use the class I can only use the unit lambda, but I would like to use klambda or mega-lambda, etc. According to astropy this can be done by using the parameter prefixes=True, however this does not seem to work.
I don't think you should be actually defining the unit inside your class's __new__, as it's not allowing you to actually set the unit when you instantiate a uWavelength.
Instead put this outside your class:
lambdas = u.def_unit('lambdas', format={'format': r'\lambda'})
Something I think the docs don't make really clear is when you use prefixes=True it doesn't really do anything unless you also provide a namespace= argument, and even then the docs don't make it super clear how to use namespaces. I think it would be better to go with astrofrog's suggestion of explicitly declaring the prefixed units needed like:
klambdas = u.def_unit('kilolambdas', represents=u.CompositeUnit(1e3, [lambdas], [1]), format={'format' : r'k\lambda'})```
unless you *really* need every imaginable SI prefix, you could try:
lambdas = u.def_unit('lambdas', format={'format': r'\lambda'}, prefixes=True, namespace=globals())
and it will inject every prefixed unit into your module namespace.
Then, since you want your default unit for `uWavelength` to be `lambdas`, then both to reduce confusion, and also add some documentation of this fact (through the signature of `__new__`) specify:
```python
class uWavelength(un.Quantity):
def __new__(cls, value, freq=None, unit=lambdas, dtype=None, copy=True, **kwargs):
and further, if you wanted, you could add a check like:
unit = u.Unit(unit)
assert unit.is_equivalent(lambdas), 'unit must be equivalent to lambdas'
Related
enter image description here
I am solving this problem, i write code. but getting error at last line ' results'
please anyone know how to deal vector based dynamic optimization please let me know.
model = ConcreteModel()
model.num_itter = Param(initialize = 0)
global p_
p_ = model.num_itter
# print(value(p_))
torq = np.zeros((14,6))
# global p_
def M_func(model,i):
return np.array(Mqn( *value(model.q[i]) ))
def sai_func(model, i):
return np.array(sain( *value(model.q[i]) ))
def sai_func_T(model, i):
return np.array(sain( *value(model.q[i]) )).T
def c_func(model, i):
return np.array(cqn(*np.append(value(model.q[0]),value(model.u[0])) ))
def dsai_dt_func(model, i):
return np.array(saidn(*np.append(value(model.q[0]),value(model.u[0])) ))
def first_eq(model,i):
return value(model.M[i])#value(model.q[i])-\
value(model.sai[i]).T#value(model.lm[i])+\
torq#value(model.tu[i])
def second_eq(model,i):
return value(model.sai[i])#value(model.a[i])
def third_eq(model,i):
return (-value(model.dsai_dt[i])#value(model.u[i])).reshape(10,1)
def solu_f(model,i):
global p_
s_ = solu10[int(p_/2),:]
# time.sleep(2)
p_= p_+1
print('p',p_)
return s_
def init_q(model,i):
return solu10[i,:]
def init_u(model,i):
return np.zeros((14,))
def init_tu(model,i):
return np.zeros((6,))
def init_lm(model,i):
return np.zeros((10,))
def init_a(model,i):
return np.zeros((14,))
# Define the time horizon
t0 =0 ;tf =40
model.t = ContinuousSet(bounds=(t0,tf))
# measurements = {_: solu10[_,:] for _ in range(len(solu10))}
# Define the state variables, control inputs, and other variables
model.time_points = Set(initialize=range(200))
model.q = Var(model.t,initialize=init_q)
model.u = Var(model.t,initialize=init_u)
model.a = Var(model.t,initialize=init_a)
model.tu =Var(model.t,initialize=init_tu)
model.lm = Var(model.t,initialize=init_lm)
model.dqdt = DerivativeVar(model.q,wrt=model.t)
model.dudt = DerivativeVar(model.u,wrt=model.t)
# Define the parameters
model.M = Param(model.t,mutable=True, initialize=M_func,within=Any)
model.sai = Param(model.t,mutable=True, initialize=sai_func,within=Any)
model.c = Param(model.t,mutable=True, initialize=c_func,within=Any)
model.dsai_dt = Param(model.t,mutable=True, initialize=dsai_dt_func,within=Any)
model.first_meq = Param(model.t,mutable =True, initialize = first_eq,within = Any)
model.solu = Param(model.t,mutable = True,initialize = solu_f, within =Any)
model.second_meq = Param(model.t,mutable =True, initialize = second_eq,within = Any)
model.third_meq = Param(model.t,mutable =True, initialize = third_eq,within = Any)
# print(value(p_))
# Define the Constraint equations
def dyn_eq_1(model,i):
return model.first_meq[i]==-model.c[i]
def dyn_eq_2(model,i):
return model.second_meq[i]==model.third_meq[i]
def vel_constraint(model,i):
return model.dqdt[i]==model.u[i]
def acc_constraint(model,i):
return model.dudt[i]==model.a[i]
def path_constraint(model, i):
return model.q[i] == model.solu[i]
model.dyn_eq_1s= Constraint(model.t, rule=dyn_eq_1)
model.dyn_eq_2s= Constraint(model.t, rule=dyn_eq_2)
model.vel_constraints = Constraint(model.t, rule= vel_constraint)
model.acc_constraints = Constraint(model.t, rule= acc_constraint)
model.path_constraints = Constraint(model.t, rule=path_constraint)
# Define the bounds
u_lb = -np.array([2,2,0.5,0.34,float('inf'),float('inf'),0.20,28,float('inf'),float('inf'),0.20,28,0.25,0.25])
u_ub = [2,2,0.5,0.34,None,None,0.20,28,None,None,0.20,28,0.25,0.25]
a_lb = -np.array([1.5,1.5,0.2,0.10,float('inf'),float('inf'),0.10,5,float('inf'),float('inf'),0.10,5,0.10,0.10])
a_ub = [1.5,1.5,0.2,0.10,None,None,0.10,5,None,None,0.10,5,0.10,0.10]
tu_lb = -np.array([4,0.5,4,0.5,4,4])
tu_ub = [4,4,4,4,4,4]
model.u.setlb(u_lb)
model.u.setub(u_ub)
model.a.setlb(a_lb)
model.a.setub(a_ub)
model.tu.setlb(tu_lb)
model.tu.setub(tu_ub)
def u_nes_fn(model,i):
u_nes = np.array([])
for _ in [6,7,10,11,12,13]:
u_nes = np.append(u_nes,value(model.u[i])[_])
return u_nes.astype('float64')
model.u_nes = Param(model.t,initialize= u_nes_fn,within = Any)
# Define the objective function
def tu_u_(model,i):
return np.linalg.norm(value( model.u_nes[i]*model.tu[i]*model.u_nes[i]*model.tu[i]))
model.tu_u = Integral(model.t,wrt=model.t,rule = tu_u_)
def objfun(model):
return model.tu_u
model.obj = Objective(rule = objfun)
# Define the solver and solve the problem
solver = SolverFactory('ipopt')
results = solver.solve(model)
ValueError Traceback (most recent call last)
c:\Users\pyomo_opti.ipynb Cell 16 in <cell line: 11>()
9 # Define the solver and solve the problem
10 solver = SolverFactory('ipopt')
---> 11 results = solver.solve(model)
File c:\Users\AppData\Local\Programs\Python\Python310\lib\site-packages\pyomo\opt\base\solvers.py:570, in OptSolver.solve(self, *args, **kwds)
565 try:
566
...
File pyomo\repn\plugins\ampl\ampl_.pyx:410, in pyomo.repn.plugins.ampl.ampl_.ProblemWriter_nl.call()
File pyomo\repn\plugins\ampl\ampl_.pyx:1072, in pyomo.repn.plugins.ampl.ampl_.ProblemWriter_nl._print_model_NL()
ValueError: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()
I am trying to build a metric that is comparable to the metrics.PrecisionAtRecall class. Therefore, I've tried to build a custom metric by extending the keras.metrics.Metric class.
The original function is WSS = (TN + FN)/N − 1 + TP/(TP + FN) and this should be calculated at a certain recall value, for say 95%.
What I have until now is the following:
class WorkSavedOverSamplingAtRecall(tf.keras.metrics.Metric):
def __init__(self, recall, name='wss_at_recall', **kwargs):
super(WorkSavedOverSamplingAtRecall, self).__init__(name=name, **kwargs)
self.wss = self.add_weight(name='wss', initializer='zeros')
def update_state(self, y_true, y_pred, sample_weight=None):
y_pred_pos = tf.cast(backend.round(backend.clip(y_pred, 0, 1)), tf.float32)
y_pred_neg = 1 - y_pred_pos
y_pos = tf.cast(backend.round(backend.clip(y_true, 0, 1)), tf.float32)
y_neg = 1 - y_pos
fn = backend.sum(y_neg * y_pred_pos)
tn = backend.sum(y_neg * y_pred_neg)
tp = backend.sum(y_pos * y_pred_pos)
n = len(y_true) # number of studies in batch
r = tp/(tp+fn+backend.epsilon()) # recall
self.wss.assign(((tn+fn)/n)-(1+r))
def result(self):
return self.wss
def reset_states(self):
# The state of the metric will be reset at the start of each epoch.
self.wss.assign(0.)
How can I calculate the WSS at a certain recall? I've seen the following in tensorflow's own git repository:
def __init__(self, recall, num_thresholds=200, name=None, dtype=None):
if recall < 0 or recall > 1:
raise ValueError('`recall` must be in the range [0, 1].')
self.recall = recall
self.num_thresholds = num_thresholds
super(PrecisionAtRecall, self).__init__(
value=recall,
num_thresholds=num_thresholds,
name=name,
dtype=dtype)
But that is't really possible through the keras.metrics.Metric class
If we follow the definition of the WSS#95 given by this paper :Reducing Workload in Systematic Review Preparation Using Automated Citation Classification, then we have
For the present work, we have fixed recall at 0.95 and therefore work saved over sampling at 95% recall (WSS#95%) is:
And you could define your update function by :
class WorkSavedOverSamplingAtRecall(tf.keras.metrics.Metric):
def __init__(self, recall, name='wss_at_recall', **kwargs):
if recall < 0 or recall > 1:
raise ValueError('`recall` must be in the range [0, 1].')
self.recall = recall
super(WorkSavedOverSamplingAtRecall, self).__init__(name=name, **kwargs)
self.wss = self.add_weight(name='wss', initializer='zeros')
def update_state(self, y_true, y_pred, sample_weight=None):
y_pred_pos = tf.cast(backend.round(backend.clip(y_pred, 0, 1)), tf.float32)
y_pred_neg = 1 - y_pred_pos
y_neg = 1 - y_pos
fn = backend.sum(y_neg * y_pred_pos)
tn = backend.sum(y_neg * y_pred_neg)
n = len(y_true) # number of studies in batch
self.wss.assign(((tn+fn)/n)-(1-self.recall))
One other solution would be to extend from the tensorflow class SensitivitySpecificityBase and to implement the WSS as the PresicionAtRecall class is implemented.
By using this class, here's how the WSS is calculated :
Compute the recall at all the thresholds (200 thresholds by default).
Find the index of the threshold where the recall is closest to the requested value. (0.95 in that case).
Compute the WSS at that index.
The number of thresholds is use to match the given recall.
import tensorflow as tf
from tensorflow.python.keras.metrics import SensitivitySpecificityBase
class WorkSavedOverSamplingAtRecall(SensitivitySpecificityBase):
def __init__(self, recall, num_thresholds=200, name="wss_at_recall", dtype=None):
if recall < 0 or recall > 1:
raise ValueError('`recall` must be in the range [0, 1].')
self.recall = recall
self.num_thresholds = num_thresholds
super(WorkSavedOverSamplingAtRecall, self).__init__(
value=recall, num_thresholds=num_thresholds, name=name, dtype=dtype
)
def result(self):
recalls = tf.math.div_no_nan(
self.true_positives, self.true_positives + self.false_negatives
)
n = self.true_negatives + self.true_positives + self.false_negatives + self.false_positives
wss = tf.math.div_no_nan(
self.true_negatives+self.false_negatives, n
)
return self._find_max_under_constraint(
recalls, wss, tf.math.greater_equal
)
def get_config(self):
"""For serialization purposes"""
config = {'num_thresholds': self.num_thresholds, 'recall': self.recall}
base_config = super(WorkSavedOverSamplingAtRecall, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
I want to perform a binary-search using e.g. np.searchsorted, however, I do not want to create an explicit array containing values. Instead, I want to define a function giving the value to be expected at the desired position of the array, e.g. p(i) = i, where i denotes the position within the array.
Generating an array of values regarding the function would, in my case, be neither efficient nor elegant. Is there any way to achieve this?
What about something like:
import collections
class GeneratorSequence(collections.Sequence):
def __init__(self, func, size):
self._func = func
self._len = size
def __len__(self):
return self._len
def __getitem__(self, i):
if 0 <= i < self._len:
return self._func(i)
else:
raise IndexError
def __iter__(self):
for i in range(self._len):
yield self[i]
This would work with np.searchsorted(), e.g.:
import numpy as np
gen_seq = GeneratorSequence(lambda x: x ** 2, 100)
np.searchsorted(gen_seq, 9)
# 3
You could also write your own binary search function, you do not really need NumPy in this case, and it can actually be beneficial:
def bin_search(seq, item):
first = 0
last = len(seq) - 1
found = False
while first <= last and not found:
midpoint = (first + last) // 2
if seq[midpoint] == item:
first = midpoint
found = True
else:
if item < seq[midpoint]:
last = midpoint - 1
else:
first = midpoint + 1
return first
Which gives identical results:
all(bin_search(gen_seq, i) == np.searchsorted(gen_seq, i) for i in range(100))
# True
Incidentally, this is also WAY faster:
gen_seq = GeneratorSequence(lambda x: x ** 2, 1000000)
%timeit np.searchsorted(gen_seq, 10000)
# 1 loop, best of 3: 1.23 s per loop
%timeit bin_search(gen_seq, 10000)
# 100000 loops, best of 3: 16.1 µs per loop
Inspired by #norok2 comment, I think you can use something like this:
def f(i):
return i*2 # Just an example
class MySeq(Sequence):
def __init__(self, f, maxi):
self.maxi = maxi
self.f = f
def __getitem__(self, x):
if x < 0 or x > self.maxi:
raise IndexError()
return self.f(x)
def __len__(self):
return self.maxi + 1
In this case f is your function while maxi is the maximum index. This of course only works if the function f return values in sorted order.
At this point you can use an object of type MySeq inside np.searchsorted.
I'm trying to plot the difference between two variables. I'm following the example set here (search for true_p_A and it will be in the right section)
Here is my code
def cool(test):
n_data_points = len(test)
alpha = 1.0/np.mean(test)
lambda_1 = pm.Exponential("lambda_1", alpha) # prior on first behaviour
lambda_2 = pm.Exponential("lambda_2", alpha) # prior on second behaviour
tau = pm.DiscreteUniform("tau", lower=0, upper=len(test)) # prior on behaviour change
"""
The below deterministic functions map an assignment, in this case 0 or 1,
to a set of parameters, located in the (1,2) arrays `taus` and `centers`.
"""
#pm.deterministic
def lambda_(tau=tau, lambda_1=lambda_1, lambda_2=lambda_2):
out = np.zeros(n_data_points)
out[:tau] = lambda_1 # lambda before tau is lambda1
out[tau:] = lambda_2 # lambda after tau is lambda2
return out
def delta(p_A=lambda_1, p_B=lambda_2):
return p_A - p_B
obs = pm.Poisson("obs", lambda_, value=test, observed=True)
model = pm.Model([obs, lambda_, lambda_1, lambda_2, tau,delta])
mcmc = pm.MCMC(model)
mcmc.sample(5000, 1000, 1)
return mcmc,5000,1
def main_plotter(stats,test):
mcmc,N,bin = stats
n_count_data = len(test)
lambda_1_samples = mcmc.trace('lambda_1')[:]
lambda_2_samples = mcmc.trace('lambda_2')[:]
tau_samples = mcmc.trace('tau')[:]
delta_samples = mcmc.trace('delta')
print(delta_samples)
data = [1,2,1,2.2,5,5.5,6,5.4]
main_plotter( cool(data),data)
In the example no variable is created for delta so no key value is inserted. Whenever I run this code is tells me it can't find the key. My question is what do I need to do to access the delta posterior data?
You are missing the deterministic decorator before the delta function definition. It works if you change starting at line 21:
#pm.deterministic
def delta(p_A=lambda_1, p_B=lambda_2):
return p_A - p_B
Considder the following interactive example
>>> l=imap(str,xrange(1,4))
>>> list(l)
['1', '2', '3']
>>> list(l)
[]
Does anyone know if there is already an implementation somewhere out there with a version of imap (and the other itertools functions) such that the second time list(l) is executed you get the same as the first. And I don't want the regular map because building the entire output in memory can be a waste of memory if you use larger ranges.
I want something that basically does something like
class cmap:
def __init__(self, function, *iterators):
self._function = function
self._iterators = iterators
def __iter__(self):
return itertools.imap(self._function, *self._iterators)
def __len__(self):
return min( map(len, self._iterators) )
But it would be a waste of time to do this manually for all itertools if someone already did this.
ps.
Do you think containers are more zen then iterators since for an iterator something like
for i in iterator:
do something
implicitly empties the iterator while a container you explicitly need to remove elements.
You do not have to build such an object for each type of container. Basically, you have the following:
mkimap = lambda: imap(str,xrange(1,4))
list(mkimap())
list(mkimap())
Now you onlky need a nice wrapping object to prevent the "ugly" function calls. This could work this way:
class MultiIter(object):
def __init__(self, f, *a, **k):
if a or k:
self.create = lambda: f(*a, **k)
else: # optimize
self.create = f
def __iter__(self):
return self.create()
l = MultiIter(lambda: imap(str, xrange(1,4)))
# or
l = MultiIter(imap, str, xrange(1,4))
# or even
#MultiIter
def l():
return imap(str, xrange(1,4))
# and then
print list(l)
print list(l)
(untested, hope it works, but you should get the idea)
For your 2nd question: Iterators and containers both have their uses. You should take whatever best fits your needs.
You may be looking for itertools.tee()
Iterators are my favorite topic ;)
from itertools import imap
class imap2(object):
def __init__(self, f, *args):
self.g = imap(f,*args)
self.lst = []
self.done = False
def __iter__(self):
while True:
try: # try to get something from g
x = next(self.g)
except StopIteration:
if self.done:
# give the old values
for x in self.lst:
yield x
else:
# g was consumed for the first time
self.done = True
return
else:
self.lst.append(x)
yield x
l=imap2(str,xrange(1,4))
print list(l)
print list(l)