I am very new to Python and ML. I have been doing few courses from Kaggle and working on pipelines.Everything seemed to work fine without the pipelines but got XGBoostError when I piped it all. I have an issue with my code but I cannot figure it out. Here below is the code and the error after:
X_full = pd.read_csv(train_path).copy()
X_test = pd.read_csv(test_path).copy()
def cleaning(var):
q1, q3 = np.percentile(var['Fare'], [25, 75])
iqr = q3 - q1
lower_bound_val = q1 - (1.5 * iqr)
upper_bound_val = q3 + (1.5 * iqr)
var = var[(var['Fare'] >= lower_bound_val) & (var['Fare'] < upper_bound_val)].copy()
var['family_size'] = var.SibSp + var.Parch
drop_cols = ['PassengerId', 'Name', 'Parch', 'SibSp', 'Ticket', 'Cabin', 'Embarked']
var = var.drop(drop_cols, axis=1)
return var
get_cleaning = FunctionTransformer(cleaning, validate=False)
age_transformer = SimpleImputer(missing_values=np.nan, strategy='median')
age_col = ['Age']
sex_transformer = OneHotEncoder(handle_unknown='ignore', sparse=False)
sex_col = ['Sex']
# Define the model
xgboost_m = XGBRegressor(random_state=0)
prepro_col = ColumnTransformer(
transformers=[
('age', age_transformer, age_col),
('sex', sex_transformer, sex_col)
])
pl = Pipeline(steps=[('get_cleaning', get_cleaning),
('prepro_col', prepro_col),
('XGBoost', xgboost_m)
])
# Drop assign target to y and drop from X_full
y = X_full.Survived
X_full.drop(['Survived'], axis=1, inplace=True)
# Split data
X_train, X_valid, y_train, y_valid = train_test_split(X_full, y, train_size=0.8, test_size=0.2, random_state=0)
pl.fit(X_train, y_train)
And here the error:
---------------------------------------------------------------------------
XGBoostError Traceback (most recent call last)
<ipython-input-887-676d922c8ba5> in <module>
----> 1 pl.fit(X_train, y_train)
/opt/conda/lib/python3.7/site-packages/sklearn/pipeline.py in fit(self, X, y, **fit_params)
333 if self._final_estimator != 'passthrough':
334 fit_params_last_step = fit_params_steps[self.steps[-1][0]]
--> 335 self._final_estimator.fit(Xt, y, **fit_params_last_step)
336
337 return self
/opt/conda/lib/python3.7/site-packages/xgboost/sklearn.py in fit(self, X, y, sample_weight, base_margin, eval_set, eval_metric, early_stopping_rounds, verbose, xgb_model, sample_weight_eval_set, callbacks)
546 obj=obj, feval=feval,
547 verbose_eval=verbose, xgb_model=xgb_model,
--> 548 callbacks=callbacks)
549
550 if evals_result:
/opt/conda/lib/python3.7/site-packages/xgboost/training.py in train(params, dtrain, num_boost_round, evals, obj, feval, maximize, early_stopping_rounds, evals_result, verbose_eval, xgb_model, callbacks)
210 evals=evals,
211 obj=obj, feval=feval,
--> 212 xgb_model=xgb_model, callbacks=callbacks)
213
214
/opt/conda/lib/python3.7/site-packages/xgboost/training.py in _train_internal(params, dtrain, num_boost_round, evals, obj, feval, xgb_model, callbacks)
73 # Skip the first update if it is a recovery step.
74 if version % 2 == 0:
---> 75 bst.update(dtrain, i, obj)
76 bst.save_rabit_checkpoint()
77 version += 1
/opt/conda/lib/python3.7/site-packages/xgboost/core.py in update(self, dtrain, iteration, fobj)
1159 _check_call(_LIB.XGBoosterUpdateOneIter(self.handle,
1160 ctypes.c_int(iteration),
-> 1161 dtrain.handle))
1162 else:
1163 pred = self.predict(dtrain, output_margin=True, training=True)
/opt/conda/lib/python3.7/site-packages/xgboost/core.py in _check_call(ret)
186 """
187 if ret != 0:
--> 188 raise XGBoostError(py_str(_LIB.XGBGetLastError()))
189
190
XGBoostError: [22:28:42] ../src/data/data.cc:530: Check failed: labels_.Size() == num_row_ (712 vs. 622) : Size of labels must equal to number of rows.
Stack trace:
[bt] (0) /opt/conda/lib/python3.7/site-packages/xgboost/lib/libxgboost.so(+0xa5dc4) [0x7f27232f2dc4]
[bt] (1) /opt/conda/lib/python3.7/site-packages/xgboost/lib/libxgboost.so(+0x106c92) [0x7f2723353c92]
[bt] (2) /opt/conda/lib/python3.7/site-packages/xgboost/lib/libxgboost.so(+0x1a84b7) [0x7f27233f54b7]
[bt] (3) /opt/conda/lib/python3.7/site-packages/xgboost/lib/libxgboost.so(+0x1aae4e) [0x7f27233f7e4e]
[bt] (4) /opt/conda/lib/python3.7/site-packages/xgboost/lib/libxgboost.so(XGBoosterUpdateOneIter+0x55) [0x7f27232e4f35]
[bt] (5) /opt/conda/lib/python3.7/lib-dynload/../../libffi.so.6(ffi_call_unix64+0x4c) [0x7f2783ff0630]
[bt] (6) /opt/conda/lib/python3.7/lib-dynload/../../libffi.so.6(ffi_call+0x22d) [0x7f2783feffed]
[bt] (7) /opt/conda/lib/python3.7/lib-dynload/_ctypes.cpython-37m-x86_64-linux-gnu.so(_ctypes_callproc+0x2ce) [0x7f278323c60e]
[bt] (8) /opt/conda/lib/python3.7/lib-dynload/_ctypes.cpython-37m-x86_64-linux-gnu.so(+0x13044) [0x7f278323d044]
The error indicates that, labels_.Size() == num_row_ (712 vs. 622) , your have 622 rows and 712 label, that isn't equal. Check your dataset and try again. In your dataset y = X_full.Survived is label/ Target Output.
Related
Dimension error appears when trying to call minimize function
import numpy as np
import math
import scipy
from scipy import optimize
from scipy.optimize import minimize, line_search
x1=[1,2,1] ; y1=0
x2=[1,1,2] ; y2=0
x3=[2,3,3] ; y3=1
x4=[2,2,1] ; y4=1
x5=[1,2,3] ; y5=0
x6=[1,3,1] ; y6=1
x7=[1,1,1] ; y7=0
x8=[1,2,2] ; y8=0
x9=[1,2,1] ; y9=0
x10=[1,1,1] ; y10=0
x11=[2,2,2] ; y11=1
x12=[1,2,2] ; y12=0
X= np.array([x1,x2,x3,x4,x5,x6,x7,x8,x9,x10,x11,x12])
y=np.array([y1,y2,y3,y4,y5,y6,y7,y8,y9,y10,y11,y12])
n=len(y)
def h(x):
return 1/(1+np.exp(-x))
def r(beta):
f=0
for i in range(n):
f=f+ (1-y[i])* np.dot(X[i],beta) + np.log( 1+np.exp(-np.dot(X[i],beta) ))
return np.array([f/n])
#gradient of r
def gradr(beta):
f=0
for i in range(n):
mu= h(np.dot(X[i],beta))
f=f+ (mu-y[i])*X[i]
return (f/n).reshape(3,1)
def exactsearch(beta_0,d):
phi_aux = lambda alfa : r(beta_0+ alfa*d)
alfa_0=np.array([1])
bds=[(0,None)]
res = minimize(phi_aux, alfa_0, bounds=bds)
alfa=np.array([res.x])
return alfa
def GradientMethod(beta,f):
N=0
e=10**(-5)
p=-gradr(beta)
alfa=f(beta,p)
while True:
if r(beta)==r(beta+alfa*p):break
if N==10000:break
if alfa<=e:break
else:
N=N+1
beta=beta+alfa*p
p=-gradr(beta)
alfa=f(beta,p)
return [beta,r(beta),N]
GradientMethod(np.array([1,1,1]),exactsearch)
X is a 3 by 12 matrix, r is a function that takes a size 3 vector and operates it with the vectors of X
When changing np.exp to math.exp the error changes to TypeError: only size-1 arrays can be converted to Python scalars. Also, previously I encountered the error ValueError: shapes (3,) and (1,3) not aligned: 3 (dim 0) != 1 (dim 0) but it went away when reshaping gradr.
I must add that I don't understand that much the function exactsearch, since it was given to me.
Full error
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-20-bb9e6dc26271> in <module>
62 return [beta,r(beta),N]
63
---> 64 GradientMethod(np.array([1,1,1]),exactsearch)
<ipython-input-20-bb9e6dc26271> in GradientMethod(beta, f)
50 e=10**(-5)
51 p=-gradr(beta)
---> 52 alfa=f(beta,p)
53 while True:
54 if r(beta)==r(beta+alfa*p):break
<ipython-input-20-bb9e6dc26271> in exactsearch(beta_0, d)
42 alfa_0=np.array([1])
43 bds=[(0,None)]
---> 44 res = minimize(phi_aux, alfa_0, bounds=bds)
45 alfa=np.array([res.x])
46 return alfa
~/anaconda3/lib/python3.8/site-packages/scipy/optimize/_minimize.py in minimize(fun, x0, args, method, jac, hess, hessp, bounds, constraints, tol, callback, options)
615 **options)
616 elif meth == 'l-bfgs-b':
--> 617 return _minimize_lbfgsb(fun, x0, args, jac, bounds,
618 callback=callback, **options)
619 elif meth == 'tnc':
~/anaconda3/lib/python3.8/site-packages/scipy/optimize/lbfgsb.py in _minimize_lbfgsb(fun, x0, args, jac, bounds, disp, maxcor, ftol, gtol, eps, maxfun, maxiter, iprint, callback, maxls, finite_diff_rel_step, **unknown_options)
304 iprint = disp
305
--> 306 sf = _prepare_scalar_function(fun, x0, jac=jac, args=args, epsilon=eps,
307 bounds=new_bounds,
308 finite_diff_rel_step=finite_diff_rel_step)
~/anaconda3/lib/python3.8/site-packages/scipy/optimize/optimize.py in _prepare_scalar_function(fun, x0, jac, args, bounds, epsilon, finite_diff_rel_step, hess)
259 # ScalarFunction caches. Reuse of fun(x) during grad
260 # calculation reduces overall function evaluations.
--> 261 sf = ScalarFunction(fun, x0, args, grad, hess,
262 finite_diff_rel_step, bounds, epsilon=epsilon)
263
~/anaconda3/lib/python3.8/site-packages/scipy/optimize/_differentiable_functions.py in __init__(self, fun, x0, args, grad, hess, finite_diff_rel_step, finite_diff_bounds, epsilon)
93
94 self._update_grad_impl = update_grad
---> 95 self._update_grad()
96
97 # Hessian Evaluation
~/anaconda3/lib/python3.8/site-packages/scipy/optimize/_differentiable_functions.py in _update_grad(self)
169 def _update_grad(self):
170 if not self.g_updated:
--> 171 self._update_grad_impl()
172 self.g_updated = True
173
~/anaconda3/lib/python3.8/site-packages/scipy/optimize/_differentiable_functions.py in update_grad()
89 self._update_fun()
90 self.ngev += 1
---> 91 self.g = approx_derivative(fun_wrapped, self.x, f0=self.f,
92 **finite_diff_options)
93
~/anaconda3/lib/python3.8/site-packages/scipy/optimize/_numdiff.py in approx_derivative(fun, x0, method, rel_step, abs_step, f0, bounds, sparsity, as_linear_operator, args, kwargs)
386 f0 = np.atleast_1d(f0)
387 if f0.ndim > 1:
--> 388 raise ValueError("`f0` passed has more than 1 dimension.")
389
390 if np.any((x0 < lb) | (x0 > ub)):
ValueError: `f0` passed has more than 1 dimension.
So apparently the problem was with the dimensions of the arrays, somewhere for some reason an array went from being like [1,2,3] to [[1,2,3]], changing its shape from (3,) to (1,3), I tried to fix it by reshaping gradr to (3,1) but this just made everything worse, instead the solution was to eliminate the reshape of gradr and reshape every beta to (3,) before its operated.
def r(beta):
f=0
beta=beta.reshape(3,)
for i in range(n):
f=f+ (1-y[i])* np.dot(X[i],beta) + np.log( 1+np.exp(-np.dot(X[i],beta) ))
return np.array([f/n])
#gradient of r
def gradr(beta):
f=0
beta=beta.reshape(3,)
for i in range(n):
mu= h(np.dot(X[i],beta))
f=f+ (mu-y[i])*X[i]
return (f/n)
What is the solution to the following error in tensorflow.
ValueError: The two structures don't have the same sequence length.
Input structure has length 1, while shallow structure has length 2.
I tried tensorflow versions: 2.9.1 and 2.4.0.
The toy example is given to reproduce the error.
import tensorflow as tf
d1 = tf.data.Dataset.range(10)
d1 = d1.map(lambda x:tf.cast([x], tf.float32))
def func1(x):
y1 = 2.0 * x
y2 = -3.0 * x
return tuple([y1, y2])
d2 = d1.map(lambda x: tf.py_function(func1, [x], [tf.float32, tf.float32]))
d3 = d2.padded_batch(3, padded_shapes=(None,))
for x, y in d2.as_numpy_iterator():
pass
The full error is:
ValueError Traceback (most recent call last)
~/Documents/pythonProject/tfProjects/asr/transformer/dataset.py in <module>
256 return tuple([y1, y2])
257 d2 = d1.map(lambda x: tf.py_function(func1, [x], [tf.float32, tf.float32]))
---> 258 d3 = d2.padded_batch(3, padded_shapes=(None,))
259 for x, y in d2.as_numpy_iterator():
260 pass
~/miniconda3/envs/jtf2/lib/python3.7/site-packages/tensorflow/python/data/ops/dataset_ops.py in padded_batch(self, batch_size, padded_shapes, padding_values, drop_remainder, name)
1887 padding_values,
1888 drop_remainder,
-> 1889 name=name)
1890
1891 def map(self,
~/miniconda3/envs/jtf2/lib/python3.7/site-packages/tensorflow/python/data/ops/dataset_ops.py in __init__(self, input_dataset, batch_size, padded_shapes, padding_values, drop_remainder, name)
5171
5172 input_shapes = get_legacy_output_shapes(input_dataset)
-> 5173 flat_padded_shapes = nest.flatten_up_to(input_shapes, padded_shapes)
5174
5175 flat_padded_shapes_as_tensors = []
~/miniconda3/envs/jtf2/lib/python3.7/site-packages/tensorflow/python/data/util/nest.py in flatten_up_to(shallow_tree, input_tree)
377 `input_tree`.
378 """
--> 379 assert_shallow_structure(shallow_tree, input_tree)
380 return list(_yield_flat_up_to(shallow_tree, input_tree))
381
~/miniconda3/envs/jtf2/lib/python3.7/site-packages/tensorflow/python/data/util/nest.py in assert_shallow_structure(shallow_tree, input_tree, check_types)
290 if len(input_tree) != len(shallow_tree):
291 raise ValueError(
--> 292 "The two structures don't have the same sequence length. Input "
293 f"structure has length {len(input_tree)}, while shallow structure "
294 f"has length {len(shallow_tree)}.")
ValueError: The two structures don't have the same sequence length. Input structure has length 1, while shallow structure has length 2.
The following modification in padded_shapes argument will resolve the error.
import tensorflow as tf
d1 = tf.data.Dataset.range(10)
d1 = d1.map(lambda x:tf.cast([x], tf.float32))
def func1(x):
y1 = 2.0 * x
y2 = -3.0 * x
return tuple([y1, y2])
d2 = d1.map(lambda x: tf.py_function(func1, [x], [tf.float32, tf.float32]))
d3 = d2.padded_batch(3, padded_shapes=([None],[None]))
for x, y in d2.as_numpy_iterator():
pass
I have trained a deep Q-Learning model using Chanier:
class Q_Network (chainer.Chain):
def __init__(self, input_size, hidden_size, output_size):
super (Q_Network, self).__init__ (
fc1=L.Linear (input_size, hidden_size),
fc2=L.Linear (hidden_size, hidden_size),
fc3=L.Linear (hidden_size, output_size)
)
def __call__(self, x):
h = F.relu (self.fc1 (x))
h = F.relu (self.fc2 (h))
y = self.fc3 (h)
return y
def reset(self):
self.zerograds ()
def train_dqn(env):
Q = Q_Network (input_size=env.history_t + 1, hidden_size=100, output_size=3)
Q_ast = copy.deepcopy (Q)
optimizer = chainer.optimizers.Adam ()
optimizer.setup (Q)
epoch_num = 50
step_max = len (env.data) - 1
memory_size = 200
batch_size = 20
# epsilon = 1.0
epsilon = 0.9
epsilon_decrease = 1e-3
epsilon_min = 0.1
start_reduce_epsilon = 200
train_freq = 10
update_q_freq = 20
# gamma = 0.97
gamma = 0.9
show_log_freq = 5
memory = []
total_step = 0
total_rewards = []
total_losses = []
start = time.time ()
for epoch in range (epoch_num):
pobs = env.reset ()
step = 0
done = False
total_reward = 0
total_loss = 0
while not done and step < step_max:
# select act
pact = np.random.randint (3)
if np.random.rand () > epsilon:
pact = Q (np.array (pobs, dtype=np.float32).reshape (1, -1))
pact = np.argmax (pact.data)
# act
obs, reward, done = env.step (pact)
# add memory
memory.append ((pobs, pact, reward, obs, done))
if len (memory) > memory_size:
memory.pop (0)
# train or update q
if len (memory) == memory_size:
if total_step % train_freq == 0:
shuffled_memory = np.random.permutation (memory)
memory_idx = range (len (shuffled_memory))
for i in memory_idx[::batch_size]:
batch = np.array (shuffled_memory[i:i + batch_size])
b_pobs = np.array (batch[:, 0].tolist (), dtype=np.float32).reshape (batch_size, -1)
b_pact = np.array (batch[:, 1].tolist (), dtype=np.int32)
b_reward = np.array (batch[:, 2].tolist (), dtype=np.int32)
b_obs = np.array (batch[:, 3].tolist (), dtype=np.float32).reshape (batch_size, -1)
b_done = np.array (batch[:, 4].tolist (), dtype=np.bool)
q = Q (b_pobs)
maxq = np.max (Q_ast (b_obs).data, axis=1)
target = copy.deepcopy (q.data)
for j in range (batch_size):
target[j, b_pact[j]] = b_reward[j] + gamma * maxq[j] * (not b_done[j])
Q.reset ()
loss = F.mean_squared_error (q, target)
total_loss += loss.data
loss.backward ()
optimizer.update ()
if total_step % update_q_freq == 0:
Q_ast = copy.deepcopy (Q)
# epsilon
if epsilon > epsilon_min and total_step > start_reduce_epsilon:
epsilon -= epsilon_decrease
# next step
total_reward += reward
pobs = obs
step += 1
total_step += 1
total_rewards.append (total_reward)
total_losses.append (total_loss)
if (epoch + 1) % show_log_freq == 0:
log_reward = sum (total_rewards[((epoch + 1) - show_log_freq):]) / show_log_freq
log_loss = sum (total_losses[((epoch + 1) - show_log_freq):]) / show_log_freq
elapsed_time = time.time () - start
print ('\t'.join (map (str, [epoch + 1, epsilon, total_step, log_reward, log_loss, elapsed_time])))
start = time.time ()
return Q, total_losses, total_rewards
if __name__ == "__main__":
Q, total_losses, total_rewards = train_dqn (Environment1 (train))
serializers.save_npz(r'C:\Users\willi\Desktop\dqn\dqn.model', Q)
After saved the model,I call the model again and feed data in it to let it predict:
load model:
model = Q_Network (input_size=91, hidden_size=100, output_size=3)
serializers.load_npz(r'C:\Users\willi\Desktop\dqn\dqn.model', model)
feed one row dataļ¼
data = pd.read_csv (r'C:\Users\willi\Downloads\spyv.csv')
the data is looks like:
open high low close volume datetime
0 236.250 239.01 236.22 238.205 2327395 30600
1 238.205 240.47 238.00 239.920 1506096 30660
2 239.955 240.30 238.85 239.700 1357531 30720
3 239.690 243.33 239.66 241.650 1265604 30780
4 241.570 242.13 240.20 240.490 896000 30840
Now predict:
x = data.iloc[1].to_numpy()
y = model(x)
But the error says:
IndexError: tuple index out of range
The full error is:
IndexError Traceback (most recent call last)
<ipython-input-7-b745008aa965> in <module>
64
65 x = data.iloc[1].to_numpy()
---> 66 y = Q(x)
67
68
~\ddqn.ipynb in __call__(self, x)
~\Anaconda3\lib\site-packages\chainer\link.py in __call__(self, *args, **kwargs)
285 # forward is implemented in the child classes
286 forward = self.forward # type: ignore
--> 287 out = forward(*args, **kwargs)
288
289 # Call forward_postprocess hook
~\Anaconda3\lib\site-packages\chainer\links\connection\linear.py in forward(self, x, n_batch_axes)
181 in_size = utils.size_of_shape(x.shape[n_batch_axes:])
182 self._initialize_params(in_size)
--> 183 return linear.linear(x, self.W, self.b, n_batch_axes=n_batch_axes)
~\Anaconda3\lib\site-packages\chainer\functions\connection\linear.py in linear(x, W, b, n_batch_axes)
306 args = x, W, b
307
--> 308 y, = LinearFunction().apply(args)
309 if n_batch_axes > 1:
310 y = y.reshape(batch_shape + (-1,))
~\Anaconda3\lib\site-packages\chainer\function_node.py in apply(self, inputs)
305
306 if configuration.config.type_check:
--> 307 self._check_data_type_forward(in_data)
308
309 self.check_layout_forward(input_vars)
~\Anaconda3\lib\site-packages\chainer\function_node.py in _check_data_type_forward(self, in_data)
444 try:
445 with type_check.light_mode:
--> 446 self.check_type_forward(in_type)
447 return
448 except type_check.InvalidType:
~\Anaconda3\lib\site-packages\chainer\functions\connection\linear.py in check_type_forward(self, in_types)
27 x_type.ndim == 2,
28 w_type.ndim == 2,
---> 29 x_type.shape[1] == w_type.shape[1],
30 )
31 if type_check.eval(n_in) == 3:
IndexError: tuple index out of range
TypeError: incompatible array types are mixed in the forward input (LinearFunction).
Actual: <class 'pandas.core.frame.DataFrame'>, <class 'numpy.ndarray'>, <class 'numpy.ndarray'>
The error says that your input is a pandas.core.frame.DataFrame while your model parameters are numpy.ndarray.
You need to convert your pandas dataframe data to numpy using .to_numpy().
Probably you will face other issues with the format of the data and you will need to manipulate it to match your training examples.
x = data.iloc[1].to_numpy()
y = model(x)
I have a model with a custom activation. As a result,
model2 = keras.models.clone_model(model)
gives an error. I'm able to load saved models using custom_objects keyword, but I see no such option on clone_model. Is there a way around it besides remaking the model and transferring weights?
EDIT:
Here's example code (toy problem):
import tensorflow.keras as keras
import tensorflow.keras.backend as K
def myTanh(x):
return K.tanh(x)
inp = keras.Input(shape=(10,10,1))
flat = keras.layers.Flatten()(inp)
out = keras.layers.Dense(20, activation=myTanh)(flat)
model = keras.Model(inp,out)
model.compile(optimizer=keras.optimizers.Adam(lr=0.001),loss='categorical_crossentropy')
model2 = keras.models.clone_model(model)
And the error dump:
~/.conda/envs/tf-gpu/lib/python3.6/site-packages/tensorflow/python/keras/models.py in clone_model(model, input_tensors)
269 return _clone_sequential_model(model, input_tensors=input_tensors)
270 else:
--> 271 return _clone_functional_model(model, input_tensors=input_tensors)
272
273
~/.conda/envs/tf-gpu/lib/python3.6/site-packages/tensorflow/python/keras/models.py in _clone_functional_model(model, input_tensors)
129 if layer not in layer_map:
130 # Clone layer.
--> 131 new_layer = layer.__class__.from_config(layer.get_config())
132 layer_map[layer] = new_layer
133 layer = new_layer
~/.conda/envs/tf-gpu/lib/python3.6/site-packages/tensorflow/python/keras/engine/base_layer.py in from_config(cls, config)
400 A layer instance.
401 """
--> 402 return cls(**config)
403
404 def compute_output_shape(self, input_shape):
~/.conda/envs/tf-gpu/lib/python3.6/site-packages/tensorflow/python/keras/layers/core.py in __init__(self, units, activation, use_bias, kernel_initializer, bias_initializer, kernel_regularizer, bias_regularizer, activity_regularizer, kernel_constraint, bias_constraint, **kwargs)
920 activity_regularizer=regularizers.get(activity_regularizer), **kwargs)
921 self.units = int(units)
--> 922 self.activation = activations.get(activation)
923 self.use_bias = use_bias
924 self.kernel_initializer = initializers.get(kernel_initializer)
~/.conda/envs/tf-gpu/lib/python3.6/site-packages/tensorflow/python/keras/activations.py in get(identifier)
209 if isinstance(identifier, six.string_types):
210 identifier = str(identifier)
--> 211 return deserialize(identifier)
212 elif callable(identifier):
213 return identifier
~/.conda/envs/tf-gpu/lib/python3.6/site-packages/tensorflow/python/keras/activations.py in deserialize(name, custom_objects)
200 module_objects=globals(),
201 custom_objects=custom_objects,
--> 202 printable_module_name='activation function')
203
204
~/.conda/envs/tf-gpu/lib/python3.6/site-packages/tensorflow/python/keras/utils/generic_utils.py in deserialize_keras_object(identifier, module_objects, custom_objects, printable_module_name)
210 if fn is None:
211 raise ValueError('Unknown ' + printable_module_name + ':' +
--> 212 function_name)
213 return fn
214 else:
ValueError: Unknown activation function:myTanh
I solved the issue by calling
keras.utils.get_custom_objects().update(custom_objects)
Right after the definition of the additional objects that keras must be aware of to properly clone the model.
def lrelu(x, alpha=0.2):
return tf.nn.relu(x) * (1 - alpha) + x * alpha
custom_object = {
'lrelu': lrelu,
}
keras.utils.get_custom_objects().update(custom_objects)
This is an open bug in Keras.
The suggested way around is to use a Lambda layer in stead of an Activation layer.
x = keras.layers.Lambda(my_custom_activation_function)(x)
This will work:
tf.keras.layers.Concatenate()([features['a'], features['b']])
While this:
tf.keras.layers.Concatenate()((features['a'], features['b']))
Results in:
TypeError: int() argument must be a string or a number, not 'TensorShapeV1'
Is that expected? If so - why does it matter what sequence do I pass?
Thanks,
Zach
EDIT (adding a code example):
import pandas as pd
import numpy as np
data = {
'a': [1.0, 2.0, 3.0],
'b': [0.1, 0.3, 0.2],
}
with tf.Session() as sess:
ds = tf.data.Dataset.from_tensor_slices(data)
ds = ds.batch(1)
it = ds.make_one_shot_iterator()
features = it.get_next()
concat = tf.keras.layers.Concatenate()((features['a'], features['b']))
try:
while True:
print(sess.run(concat))
except tf.errors.OutOfRangeError:
pass
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-135-0e1a45017941> in <module>()
6 features = it.get_next()
7
----> 8 concat = tf.keras.layers.Concatenate()((features['a'], features['b']))
9
10
google3/third_party/tensorflow/python/keras/engine/base_layer.py in __call__(self, inputs, *args, **kwargs)
751 # the user has manually overwritten the build method do we need to
752 # build it.
--> 753 self.build(input_shapes)
754 # We must set self.built since user defined build functions are not
755 # constrained to set self.built.
google3/third_party/tensorflow/python/keras/utils/tf_utils.py in wrapper(instance, input_shape)
148 tuple(tensor_shape.TensorShape(x).as_list()) for x in input_shape]
149 else:
--> 150 input_shape = tuple(tensor_shape.TensorShape(input_shape).as_list())
151 output_shape = fn(instance, input_shape)
152 if output_shape is not None:
google3/third_party/tensorflow/python/framework/tensor_shape.py in __init__(self, dims)
688 else:
689 # Got a list of dimensions
--> 690 self._dims = [as_dimension(d) for d in dims_iter]
691
692 #property
google3/third_party/tensorflow/python/framework/tensor_shape.py in as_dimension(value)
630 return value
631 else:
--> 632 return Dimension(value)
633
634
google3/third_party/tensorflow/python/framework/tensor_shape.py in __init__(self, value)
183 raise TypeError("Cannot convert %s to Dimension" % value)
184 else:
--> 185 self._value = int(value)
186 if (not isinstance(value, compat.bytes_or_text_types) and
187 self._value != value):
TypeError: int() argument must be a string or a number, not 'TensorShapeV1'
https://github.com/keras-team/keras/blob/master/keras/layers/merge.py#L329
comment on the concanate class states it requires a list.
this class calls K.backend's concatenate function
https://github.com/keras-team/keras/blob/master/keras/backend/tensorflow_backend.py#L2041
which also states it requires a list.
in tensorflow https://github.com/tensorflow/tensorflow/blob/r1.12/tensorflow/python/ops/array_ops.py#L1034
also states it requires a list of tensors. Why? I don't know. in this function the tensors (variable called "values") actually gets checked if its a list or tuple. but somewhere along the way you still get an error.