Maxima solve function find no solution - system

I am new to Maxima.
If I do:
solve([x = 2, y = 3], [x, y]);
I get:
[[x = 2, y = 3]]
Which is correct !
If I do:
solve([sin(x) = 1], [x]);
I get:
%pi
[x = ---]
2
Which is also correct !
But if I do:
solve([sin(x) = 1, y = 3], [x, y]);
I get:
[]
Why ?
Thanks for your help.

After load(to_poly_solve); try
to_poly_solve([sin(x)= 1, y = 3], [x,y]);

Related

takes 1 positional argument but 2 were given in #tf.function

I have a network written with tensorflow Keras, in part of my code I need to use scipy.cKDTree, so I decorated my function with #tf.function. When I want to make the tree I receive the following error. (Let me know if more details are required.)
The error happens when it tries to make cKDTree. The size of the pc2e is shape=(46080, 3).
In similar questions I found that it could be because of the Pillow version, I changed the version and didn't solve the error.
Also is there a better way to have KDTree in tensorflow?
TypeError: in user code:
/home/***/My_Models.py:731 var_layer *
tree2 = cKDTree(pc2e, leafsize=500, balanced_tree=False)
ckdtree.pyx:522 scipy.spatial.ckdtree.cKDTree.__init__ **
TypeError: __array__() takes 1 positional argument but 2 were given
Process finished with exit code 1
The function:
#tf.function
def var_layer(self, inputs, output): # output: x y z i j k w
inputs_v = tf.Variable(inputs)
pc1_raw, pc2_raw = tf.split(inputs_v, num_or_size_splits=2, axis=4)
# B x T x W x H x Channels
s0, s1, s2, s3, s4 = pc1_raw.shape[0], pc1_raw.shape[1], pc1_raw.shape[2], pc1_raw.shape[3], pc1_raw.shape[4]
pc1 = tf.reshape(pc1_raw[:, -1, :, :, 0:3], shape=[-1, s2 * s3, 3])
pc2 = tf.reshape(pc2_raw[:, -1, :, :, 0:3], shape=[-1, s2 * s3, 3])
# normal2 = tf.reshape(pc2_raw[:, -1, :, :, 3:6], [-1, s2 * s3, 3])
# normal1 = tf.reshape(pc1_raw[:, -1, :, :, 3:6], [-1, s2 * s3, 3])
Rq, Tr3 = tfg.dual_quaternion.to_rotation_translation(output)
R33 = tfg.rotation_matrix_3d.from_quaternion(Rq)
RT = tf.concat([R33, tf.expand_dims(Tr3, axis=2)], -1)
RT = tf.pad(RT, [[0, 0], [0, 1], [0, 0]], constant_values=[0.0, 0.0, 0.0, 1.0])
pc1 = tf.pad(pc1, [[0, 0], [0, 0], [0, 1]], constant_values=1)
pc1 = tf.transpose(pc1, perm=[0, 2, 1])
pc1_tr = tf.linalg.matmul(RT, pc1)
pc1_tr = pc1_tr[:, 0:3]
pc1_tr = tf.transpose(pc1_tr, perm=[0, 2, 1]) # B x WH x 3
# remove zero values
for epoch in range(self.Epochs):
pc2e = pc2[epoch]
print(pc2e)
tree2 = cKDTree(pc2e, leafsize=500, balanced_tree=False)
dist_in, ind = tree2.query(pc1_tr[epoch], k=1)
nonempty = np.count_nonzero(dist_in)
dist_in = np.sum(np.abs(dist_in))
if nonempty != 0:
dist_in = np.divide(dist_in, nonempty)
dist_p2p = dist_in
print(dist_p2p)
return dist_p2p
versions:
Tensorflow 2.3.0
Scipy 1.4.1
pillow==8.2.0
Input of the function is a point cloud with this shape: Batch x Time x W x H x Channels
and the size of pc2e is shape=(46080, 3)

how to slice the tensorflow tensor to multiple

the tensor is:
batch(3) * length(5) * dim(2)
tensor = tf.constant([[[1,1],[2,2],[3,3],[4,4],[5,5]],[[1,1],[2,2],[3,3],[4,4],[5,5]],[[1,1],[2,2],[3,3],[4,4],[5,5]]] )
and i want get more slices by length_index [0,0],[0,1] ... [3,4],[4,4] according to length_axis_index[0,1,2,3,4],the operation like
spans_length=0
with tf.variable_scope("loss_span"):
output=[]
for i in range(0,1+n_spans):
for j in range(1,seq_length):
if j + i < seq_length:
res = tf.slice(output_layer_sequence, [0, j, 0], [-1, j+i-j+1, -1])
res = tf.reduce_sum(res,axis=1)
output.append(res)
# output = tf.convert_to_tensor(output)
spans_length+=1
output = tf.convert_to_tensor(output)
vsp = tf.transpose(output, [1,0,2])#batch , spans_length,hidden_size
vsp = tf.reshape(vsp,[-1,hidden_size])#batch * span_length,hidden_size
span_logits = tf.matmul(vsp, output_span_weight, transpose_b=True) # output:[batch * spans_length,class_labels]
span_logits = tf.nn.bias_add(span_logits, output_span_bias) # output:[batch * spans_length,class_labels]
span_matrix = tf.reshape(span_logits,[-1,spans_length,class_labels],name="span_matrix_val")#[batch , spans_length,class_labels]
label_span_logists = tf.one_hot(indices=label_span,depth=class_labels, on_value=1, off_value=0, axis=-1, dtype=tf.int32)
label_span_logists=tf.cast(label_span_logists,tf.int64)
span_loss = tf.nn.softmax_cross_entropy_with_logits(logits=span_matrix, labels=label_span_logists)
span_loss = tf.reduce_mean(span_loss, name='loss_span')
when i doing such operation, training model 's time is very long;how to speed it.thanks
This code works:
# tensor = tf.constant([[[1,1],[2,2],[3,3],[4,4],[5,5]],[[1,1],[2,2],[3,3],[4,4],[5,5]],[[1,1],[2,2],[3,3],[4,4],[5,5]]] )
tensor = tf.random.uniform((3, 2000, 2))
length = tf.shape(tensor)[1].numpy()
output = []
for begins in range(length):
for size in range(length - begins):
res = tf.slice(tensor, [0, begins, 0], [-1, size + 1, -1])
res = tf.reduce_sum(res)
output.append(res)
output = tf.convert_to_tensor(output)
I tried to use tf.scan(), but I don't see any benefits:
output = tf.constant([], tf.int32)
for begins in range(length):
t = tensor[:, begins:, :]
t = tf.transpose(t, (1, 0, 2))
t = tf.scan(lambda a, x: a + x, t)
t = tf.transpose(t, (1, 0, 2))
t = tf.reduce_sum(t, [0, 2])
output = tf.concat([output, t], 0)
Edits:
Tried to apply reduce_sum() along the unused dimension [0, 2] in preprocessing:
tensor = tf.reduce_sum(tensor, [0, 2])
output = tf.constant([])
for begins in range(length):
t = tensor[begins:]
t = tf.scan(lambda a, x: a + x, t)
output = tf.concat([output, t], 0)
Still don't see performance benefits.
for i in range(0,50):
for j in range(1,200):
if j + i < 200:
res = tf.slice(output_layer_sequence, [0, j, 0], [-1, j+i-j+1, -1])
res = tf.reduce_sum(res,axis=1)
output.append(res)
output = tf.convert_to_tensor(output)
when i doing such operation, training time is very long;how to speed it.thanks

is there a version of tf.unique that behaves like the bash uniq?

I would like for non-consecutive identical elements in a tensor to be assigned distinct idx (i.e., second output of tf.unique). Is there a way to accomplish that? Thanks.
OK my brain is less foggy today than usual, and here is a quick solution:
x = [1, 1, 2, 2, 3, 2, 1]
def bash_unique_with_counts(vector):
segment_starts = tf.concat(
[[1], tf.to_int32(tf.not_equal(vector[:-1], vector[1:]))], axis=0)
new_vector = tf.cumsum(segment_starts, exclusive=False)
return tf.unique_with_counts(new_vector)
y, idx, count = bash_unique_with_counts(x)
print(tf.Session().run(count))
[2 2 1 1 1]

how to implement the variable array with one and zero in tensorflow

I'm totally new on tensorflow, and I just want to implement a kind of selection function by using matrices multiplication.
example below:
#input:
I = [[9.6, 4.1, 3.2]]
#selection:(single "1" value , and the other are "0s")
s = tf.transpose(tf.Variable([[a, b, c]]))
e.g. s could be [[0, 1, 0]] or [[0, 0, 1]] or [[1, 0, 0]]
#result:(multiplication)
o = tf.matul(I, s)
sorry for the poor expression,
I intend to find the 'solution' in distribution functions with different means and sigmas. (value range from 0 to 1).
so now, i have three variable i, j, index.
value1 = np.exp(-((index - m1[i]) ** 2.) / s1[i]** 2.)
value2 = np.exp(-((index - m2[j]) ** 2.) / s2[j]** 2.)
m1 = [1, 3, 5] s = [0.2, 0.4, 0.5]. #first graph
m2 = [3, 5, 7]. s = [0.5, 0.5, 1.0]. #second graph
I want to get the max or optimization of total value
e.g. value1 + value2 = 1+1 = 2 and one of the solutions: i = 2, j=1, index=5
or I could do this in the other module?

How to find an index of the first matching element in TensorFlow

I am looking for a TensorFlow way of implementing something similar to Python's list.index() function.
Given a matrix and a value to find, I want to know the first occurrence of the value in each row of the matrix.
For example,
m is a <batch_size, 100> matrix of integers
val = 23
result = [0] * batch_size
for i, row_elems in enumerate(m):
result[i] = row_elems.index(val)
I cannot assume that 'val' appears only once in each row, otherwise I would have implemented it using tf.argmax(m == val). In my case, it is important to get the index of the first occurrence of 'val' and not any.
It seems that tf.argmax works like np.argmax (according to the test), which will return the first index when there are multiple occurrences of the max value.
You can use tf.argmax(tf.cast(tf.equal(m, val), tf.int32), axis=1) to get what you want. However, currently the behavior of tf.argmax is undefined in case of multiple occurrences of the max value.
If you are worried about undefined behavior, you can apply tf.argmin on the return value of tf.where as #Igor Tsvetkov suggested.
For example,
# test with tensorflow r1.0
import tensorflow as tf
val = 3
m = tf.placeholder(tf.int32)
m_feed = [[0 , 0, val, 0, val],
[val, 0, val, val, 0],
[0 , val, 0, 0, 0]]
tmp_indices = tf.where(tf.equal(m, val))
result = tf.segment_min(tmp_indices[:, 1], tmp_indices[:, 0])
with tf.Session() as sess:
print(sess.run(result, feed_dict={m: m_feed})) # [2, 0, 1]
Note that tf.segment_min will raise InvalidArgumentError when there is some row containing no val. In your code row_elems.index(val) will raise exception too when row_elems don't contain val.
Looks a little ugly but works (assuming m and val are both tensors):
idx = list()
for t in tf.unpack(m, axis=0):
idx.append(tf.reduce_min(tf.where(tf.equal(t, val))))
idx = tf.pack(idx, axis=0)
EDIT:
As Yaroslav Bulatov mentioned, you could achieve the same result with tf.map_fn:
def index1d(t):
return tf.reduce_min(tf.where(tf.equal(t, val)))
idx = tf.map_fn(index1d, m, dtype=tf.int64)
Here is another solution to the problem, assuming there is a hit on every row.
import tensorflow as tf
val = 3
m = tf.constant([
[0 , 0, val, 0, val],
[val, 0, val, val, 0],
[0 , val, 0, 0, 0]])
# replace all entries in the matrix either with its column index, or out-of-index-number
match_indices = tf.where( # [[5, 5, 2, 5, 4],
tf.equal(val, m), # [0, 5, 2, 3, 5],
x=tf.range(tf.shape(m)[1]) * tf.ones_like(m), # [5, 1, 5, 5, 5]]
y=(tf.shape(m)[1])*tf.ones_like(m))
result = tf.reduce_min(match_indices, axis=1)
with tf.Session() as sess:
print(sess.run(result)) # [2, 0, 1]
Here is a solution which also considers the case the element is not included by the matrix (solution from github repository of DeepMind)
def get_first_occurrence_indices(sequence, eos_idx):
'''
args:
sequence: [batch, length]
eos_idx: scalar
'''
batch_size, maxlen = sequence.get_shape().as_list()
eos_idx = tf.convert_to_tensor(eos_idx)
tensor = tf.concat(
[sequence, tf.tile(eos_idx[None, None], [batch_size, 1])], axis = -1)
index_all_occurrences = tf.where(tf.equal(tensor, eos_idx))
index_all_occurrences = tf.cast(index_all_occurrences, tf.int32)
index_first_occurrences = tf.segment_min(index_all_occurrences[:, 1],
index_all_occurrences[:, 0])
index_first_occurrences.set_shape([batch_size])
index_first_occurrences = tf.minimum(index_first_occurrences + 1, maxlen)
return index_first_occurrences
And:
import tensorflow as tf
mat = tf.Variable([[1,2,3,4,5], [2,3,4,5,6], [3,4,5,6,7], [0,0,0,0,0]], dtype = tf.int32)
idx = 3
first_occurrences = get_first_occurrence_indices(mat, idx)
sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())
sess.run(first_occurrence) # [3, 2, 1, 5]