Using validation_split munges my shapes, why? - tensorflow

I have a working model, and I wish to begin using validation_split(=~0.1) to do my validation 'on the fly'. When I pass a validation_split other than 0.0, I get an error.
I have been tweaking the batch_size value that I pass to fit() as well as the one I pass to tf.keras.layers.Conv2D(), essentially making it proportional. No joy.
Here be how I make my model:
def make_convnet_model(flags, shape):
model = tf.keras.models.Sequential(
[
tf.keras.layers.Conv2D(32,(8,8), strides=2, activation='relu',input_shape=shape,batch_size=flags.batch_size,name='conv2d_1'),
tf.keras.layers.Conv2D(24, (4,4), strides=1, activation='relu',name='conv2d_2'),
tf.keras.layers.MaxPool2D(),
tf.keras.layers.Conv2D(16, (3, 3), strides=2, activation='sigmoid', input_shape=shape,batch_size=flags.batch_size, name='conv2d_3'),
tf.keras.layers.Conv2D(8, (3, 3), strides=1, activation='sigmoid', name='conv2d_4'),
tf.keras.layers.MaxPool2D(),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(64, activation='sigmoid', name='d3'),
tf.keras.layers.Dense(5, activation='softmax', name='softmax_d4')
])
return model
Here is how I call fit():
history = model.fit(x=X, y=Y, batch_size=flags.batch_size, epochs=flags.epochs, callbacks=[tensorboard,logger], verbose=flags.verbosity, validation_split=flags.validation_split)
Here is my reward. I have taken out some of the spooge:
Namespace(***batch_size=20***, columns=320, csv_path='../csv/', data_path='f:/downloads/aptos2019-blindness-detection/', epochs=2,
gray=False, learning_rate=0.001, loss='mean_squared_error',
metric=['accuracy'], model='conv2d', rows=320,
test_path_fragment='test_images/',
train_path_fragment='train_images/', validation_split=0.1,
verbosity=2)
Tensorflow version:1.14.0
Processed data path:f:/downloads/aptos2019-blindness-detection/train_images/color_320x320/
***Train on 18 samples, validate on 2 samples***
Epoch 1/2
Traceback (most recent call last):
File "F:/projects/retinas/retina.py", line 212, in <module>
main(sys.argv)
File "F:/projects/retinas/retina.py", line 122, in main
history = model.fit(x=X, y=Y, batch_size=flags.batch_size, epochs=flags.epochs, callbacks=[tensorboard,logger],
verbose=flags.verbosity, validation_split=flags.validation_split)
File "C:\Users\WascallyWabbit\AppData\Local\Programs\Python\Python36\lib\site-packages\tensorflow\python\keras\engine\training.py",
line 780, in fit
steps_name='steps_per_epoch')
File "C:\Users\WascallyWabbit\AppData\Local\Programs\Python\Python36\lib\site-packages\tensorflow\python\keras\engine\training_arrays.py",
line 363, in model_iteration
batch_outs = f(ins_batch)
File "C:\Users\WascallyWabbit\AppData\Local\Programs\Python\Python36\lib\site-packages\tensorflow\python\keras\backend.py",
line 3292, in call
run_metadata=self.run_metadata)
File "C:\Users\WascallyWabbit\AppData\Local\Programs\Python\Python36\lib\site-packages\tensorflow\python\client\session.py",
line 1458, in call
run_metadata_ptr)
tensorflow.python.framework.errors_impl.InvalidArgumentError: Incompatible shapes: [20,5] vs. [18,5]
[[{{node Adam/gradients/loss/softmax_d4_loss/SquaredDifference_grad/BroadcastGradientArgs}}]]

The problem turns out to have stemmed from unnecessarily specifying a batch_size in my call to Conv2D(). I now accept the default for that parameter, and it is working.
Not sure why. Don't really care :-|

Related

problems to compile and fit the model in tensorflow/keras

I'm trying to compile and fit a model but, this error is occurring:
ValueError: Shapes (None, 10, 10, 10) and (None, 10) are incompatible
code:
from tensorflow.keras.layers import Conv2D
model = Sequential()
model.add(Conv2D(filters=32, kernel_size=3, activation="relu", input_shape=(28, 28, 1)))
model.add(Conv2D(filters=32, kernel_size=3, activation="relu"))
model.add(Flatten())
model.add(Dense(128, activation="relu"))
model.add(Dense(10, activation="softmax"))
# Compile the model
model.compile(loss="categorical_crossentropy", metrics=["accuracy"], optimizer="adam")
# Fit the model
model.fit( x=X_train, y=y_train, batch_size=32, epochs=10, validation_split = 0.3)
Output:
Epoch 1/10
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-18-a118e2826a10> in <module>()
3
4 # Fit the model
----> 5 model.fit( x=X_train, y=y_train, batch_size=32, epochs=10, validation_split = 0.3)
1 frames
/usr/local/lib/python3.7/dist-packages/tensorflow/python/framework/func_graph.py in autograph_handler(*args, **kwargs)
1145 except Exception as e: # pylint:disable=broad-except
1146 if hasattr(e, "ag_error_metadata"):
-> 1147 raise e.ag_error_metadata.to_exception(e)
1148 else:
1149 raise
ValueError: in user code:
File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 1021, in train_function *
return step_function(self, iterator)
File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 1010, in step_function **
outputs = model.distribute_strategy.run(run_step, args=(data,))
File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 1000, in run_step **
outputs = model.train_step(data)
File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 860, in train_step
loss = self.compute_loss(x, y, y_pred, sample_weight)
File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 919, in compute_loss
y, y_pred, sample_weight, regularization_losses=self.losses)
File "/usr/local/lib/python3.7/dist-packages/keras/engine/compile_utils.py", line 201, in __call__
loss_value = loss_obj(y_t, y_p, sample_weight=sw)
File "/usr/local/lib/python3.7/dist-packages/keras/losses.py", line 141, in __call__
losses = call_fn(y_true, y_pred)
File "/usr/local/lib/python3.7/dist-packages/keras/losses.py", line 245, in call **
return ag_fn(y_true, y_pred, **self._fn_kwargs)
File "/usr/local/lib/python3.7/dist-packages/keras/losses.py", line 1790, in categorical_crossentropy
y_true, y_pred, from_logits=from_logits, axis=axis)
File "/usr/local/lib/python3.7/dist-packages/keras/backend.py", line 5083, in categorical_crossentropy
target.shape.assert_is_compatible_with(output.shape)
ValueError: Shapes (None, 10, 10, 10) and (None, 10) are incompatible
I'm using tensorflow 2.8.2
How can i put it to work ?
What am i doing wrong ?
edit :
the problem was that I ran this part of the program twice (dumb mistake):
from tensorflow.keras.utils import to_categorical
y_train = to_categorical(y_train, num_classes=10)
y_test = to_categorical(y_test, num_classes=10)
print("Shape of y_train:", y_train.shape)
print("One value of y_train:", y_train[0])
I restarted the environment, and the problem was fixed.

ValueError: Input 0 of layer "sequential_13" is incompatible with the layer: expected shape=(None, 21367, 9000), found shape=(None, 9000)

I don't know why this error keep coming when I run the code below
CNN.fit(X_train_vector, y_train, epochs=10)
My CNN code is this:
CNN = tf.keras.models.Sequential()
CNN.add(tf.keras.layers.Conv1D(120, kernel_size=3, padding='valid', activation='relu', input_shape = (21367, 9000)))
CNN.add(tf.keras.layers.MaxPooling1D(2))
CNN.add(tf.keras.layers.Dropout(0.2))
CNN.add(tf.keras.layers.Flatten())
CNN.add(tf.keras.layers.Dense(200, activation='relu'))
CNN.add(tf.keras.layers.Dense(20, activation='relu'))
CNN.add(tf.keras.layers.Dense(1, activation='softmax'))
My "X_train_vector" has a shape:
(21367, 9000)
My "y_train" has a shape:
(21367, 1)
The Error I am getting:
Epoch 1/10
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-108-895976bf38cd> in <module>()
----> 1 CNN.fit(X_train_vector, y_train, epochs=10)
1 frames
/usr/local/lib/python3.7/dist-packages/tensorflow/python/framework/func_graph.py in autograph_handler(*args, **kwargs)
1145 except Exception as e: # pylint:disable=broad-except
1146 if hasattr(e, "ag_error_metadata"):
-> 1147 raise e.ag_error_metadata.to_exception(e)
1148 else:
1149 raise
ValueError: in user code:
File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 1021, in train_function *
return step_function(self, iterator)
File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 1010, in step_function **
outputs = model.distribute_strategy.run(run_step, args=(data,))
File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 1000, in run_step **
outputs = model.train_step(data)
File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 859, in train_step
y_pred = self(x, training=True)
File "/usr/local/lib/python3.7/dist-packages/keras/utils/traceback_utils.py", line 67, in error_handler
raise e.with_traceback(filtered_tb) from None
File "/usr/local/lib/python3.7/dist-packages/keras/engine/input_spec.py", line 264, in assert_input_compatibility
raise ValueError(f'Input {input_index} of layer "{layer_name}" is '
ValueError: Input 0 of layer "sequential_13" is incompatible with the layer: expected shape=(None, 21367, 9000), found shape=(None, 9000)
I have tried several solutions, including changing my first line of CNN to this:
CNN.add(tf.keras.layers.Conv1D(120, kernel_size=3, padding='valid', activation='relu', input_shape = (9000)))
But running it says:
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-109-dd8d734d0a9f> in <module>()
1 CNN = tf.keras.models.Sequential()
----> 2 CNN.add(tf.keras.layers.Conv1D(120, kernel_size=3, padding='valid', activation='relu', input_shape = (9000)))
3 CNN.add(tf.keras.layers.MaxPooling1D(2))
4 CNN.add(tf.keras.layers.Dropout(0.2))
5 CNN.add(tf.keras.layers.Flatten())
3 frames
/usr/local/lib/python3.7/dist-packages/keras/engine/base_layer.py in __init__(self, trainable, name, dtype, dynamic, **kwargs)
441 else:
442 batch_size = None
--> 443 batch_input_shape = (batch_size,) + tuple(kwargs['input_shape'])
444 self._batch_input_shape = batch_input_shape
445
TypeError: 'int' object is not iterable
Can anyone help me. I have been looking for the solution for two days, it should work the way I am trying. Is there a mistake I am making? Please let me know.
Thanks In Advance.
The input array should have the shape (None, shape_0, shape_1), where None represent the batch size, and (shape_0, shape_1) represents the shape of the feature. So, you should reshape your input array:
X_train_vector = X_train_vector.reshape(-1, 9000, 1)
And you don't really need to specify the batch size when building the model, so remove that and just use (9000, 1) as the input_shape. Try this:
CNN = tf.keras.models.Sequential()
CNN.add(tf.keras.layers.Conv1D(120, kernel_size=3, padding='valid', activation='relu', input_shape = (9000, 1)))
CNN.add(tf.keras.layers.MaxPooling1D(2))
CNN.add(tf.keras.layers.Dropout(0.2))
CNN.add(tf.keras.layers.Flatten())
CNN.add(tf.keras.layers.Dense(200, activation='relu'))
CNN.add(tf.keras.layers.Dense(20, activation='relu'))
CNN.add(tf.keras.layers.Dense(1, activation='softmax'))
And this should solve the problem the same error would not appear again.

Keras model errors

I'm new to machine learning and have been stuck with this error for awhile now:
Traceback (most recent call last):
File "model1.py", line 77, in
model.fit(train_generator,
File "C:\Python38\lib\site-packages\tensorflow\python\keras\engine\training.py", line 108, in _method_wrapper
return method(self, *args, **kwargs)
File "C:\Python38\lib\site-packages\tensorflow\python\keras\engine\training.py", line 1098, in fit
tmp_logs = train_function(iterator)
File "C:\Python38\lib\site-packages\tensorflow\python\eager\def_function.py", line 780, in call
result = self._call(*args, **kwds)
File "C:\Python38\lib\site-packages\tensorflow\python\eager\def_function.py", line 840, in _call
return self._stateless_fn(*args, **kwds)
File "C:\Python38\lib\site-packages\tensorflow\python\eager\function.py", line 2829, in call
return graph_function._filtered_call(args, kwargs) # pylint: disable=protected-access
File "C:\Python38\lib\site-packages\tensorflow\python\eager\function.py", line 1843, in _filtered_call
return self._call_flat(
File "C:\Python38\lib\site-packages\tensorflow\python\eager\function.py", line 1923, in _call_flat
return self._build_call_outputs(self._inference_function.call(
File "C:\Python38\lib\site-packages\tensorflow\python\eager\function.py", line 545, in call
outputs = execute.execute(
File "C:\Python38\lib\site-packages\tensorflow\python\eager\execute.py", line 59, in quick_execute
tensors = pywrap_tfe.TFE_Py_Execute(ctx._handle, device_name, op_name,
tensorflow.python.framework.errors_impl.NotFoundError: No algorithm worked!
[[node sequential/conv2d/Conv2D (defined at model1.py:77) ]] [Op:__inference_train_function_806]
I'm using CUDA 10.1/Cudnn 8.05, which works fine for the other convolution projects I've tried. Just switching to the basic VGG16 model here lets me run the program without any accuracy gains, so I'm not sure if the issue is with my GPU's libraries, a mistake in the tensor dimensions, or a failure to initialize something.
import tensorflow as tf
import tensorflow_datasets as tfds
# Helper libraries
import os
import numpy as np
import matplotlib.pyplot as plt
import keras
from keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras import layers
batch_size=10
train_dir="C:/draw/imagenet-object-localization-challenge/ILSVRC/Data/CLS-LOC/train"
validation_dir="C:/draw/imagenet-object-localization-challenge/ILSVRC/Data/CLS-LOC/val"
train_datagen = ImageDataGenerator(
rescale=1./255,
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
validation_split=0.2,
fill_mode='nearest')
train_generator = train_datagen.flow_from_directory(
train_dir,
target_size=(224, 224),
batch_size=batch_size,
class_mode='categorical')
validation_generator = train_datagen.flow_from_directory(
train_dir,
target_size=(224, 224),
batch_size=batch_size,
class_mode='categorical')
model = tf.keras.Sequential()
model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(224, 224, 1)))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(32, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Flatten())
model.add(layers.Dense(64))
model.add(layers.Dense(1000, activation='softmax'))
model.compile(optimizer='adam',
loss=tf.keras.losses.CategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
num_epochs=20
steps_per_epoch=10000
val_steps=10
model.fit(train_generator,
epochs=num_epochs,
steps_per_epoch = steps_per_epoch,
validation_data=validation_generator,
validation_steps=val_steps)
Thanks in advance for any advice provided in how to fix this error.
Your problem is that in flow_from_directory you need to specify the parameter subset. For the train_generator specify the subset as subset='training'. In the validation_generator specify it as subset='validation'.

ValueError: Input 0 of layer cu_dnnlstm is incompatible with the layer: expected ndim=3, found ndim=2. Full shape received: [None, 175]

I am experimenting with CuDNNLSTMs and, i dont know why, even though i am following a tutorial on this, i get this weird error, that i can understand, but i can't debug:
So i have a 4073 time-series * 175 features array and i am trying to pass those 175 features to the Sequential model, one at a time, to a CuDNNLSTM layer, in order for the model to learn something from it.
"AlvoH" is the target of the RNN.
The code:
train_x, train_y = trainDF, trainDF["AlvoH"]
validation_x, validation_y = validationDF[:-Sequencia], validationDF["AlvoH"][:-Sequencia]
print(train_x.shape[1:])
model = Sequential()
model.add(CuDNNLSTM(512, input_shape=(train_x.shape[1:]), return_sequences=True))
model.add(Dropout(0.2))
model.add(BatchNormalization())
model.add(Dense(3, activation="softmax"))
opt = tf.keras.optimizers.Adam(learning_rate=0.001, decay=1e-6)
model.compile(loss="sparse_categorical_crossentropy", optimizer=opt, metrics=["accuracy"])
tensorboard = TensorBoard(log_dir=f'logs/{NAME}')
checkpoint = tf.keras.callbacks.ModelCheckpoint("models/{}.model".format("RNN_Final-{EPOCH:02d}",
monitor="val_acc",
verbose=1,
save_best_only=True,
mode="max"))
history = model.fit(train_x, train_y,
batch_size=BATCH_SIZE,
epochs=EPOCHS,
validation_data=(validation_x, validation_y),
callbacks=[tensorboard, checkpoint])
the error:
Traceback (most recent call last):
File "ml.py", line 64, in
model.add(CuDNNLSTM(512, input_shape=(train_x.shape[1:None]), return_sequences=True))
File "C:\Users\Anaconda3\lib\site-packages\tensorflow\python\training\tracking\base.py", line 456, in _method_wrapper
result = method(self, *args, **kwargs)
File "C:\Users\Anaconda3\lib\site-packages\tensorflow\python\keras\engine\sequential.py", line 198, in add
layer(x)
File "C:\Users\Anaconda3\lib\site-packages\tensorflow\python\keras\layers\recurrent.py", line 654, in call
return super(RNN, self).call(inputs, **kwargs)
File "C:\Users\Anaconda3\lib\site-packages\tensorflow\python\keras\engine\base_layer.py", line 886, in call
self.name)
File "C:\Users\Anaconda3\lib\site-packages\tensorflow\python\keras\engine\input_spec.py", line 180, in assert_input_compatibility
str(x.shape.as_list()))
ValueError: Input 0 of layer cu_dnnlstm is incompatible with the layer: expected ndim=3, found ndim=2. Full shape received: [None, 175]
As long as i can understand, this tutorial was made before Tensorflow 2.0 and, have 2.0 installed, i noticed some things have changed, in particular, to the CuDNNLSTMs layer, which have a dif method to import, so the problem may be there.
Is this a result of those 2.0 changes? I tried everything, from passing train_x.shape, train_x.shape[1:], train_x.shape[:1], even though it should make any sense, and so on and i am feeling stuck.
Thanks for the answers, in advance!
Changes that i had to make in order for the code to properly train.
train_x, train_y = array(trainDF[trainDF.columns.tolist()[:-1]]), array(trainDF["AlvoH"])
validation_x, validation_y = array(validationDF[validationDF.columns.tolist()[:-1]][:-Sequencia]), array(validationDF["AlvoH"][:-Sequencia])
train_x = train_x.reshape(train_x.shape[0],train_x.shape[1], 1)
train_y = train_y.reshape(train_y.shape[0], 1, 1)
validation_x = validation_x.reshape(validation_x.shape[0],validation_x.shape[1], 1)
validation_y = validation_y.reshape(validation_y.shape[0], 1, 1)
model = Sequential()
model.add(LSTM(1024, input_shape=(train_x.shape[1:]), return_sequences=True))
model.add(Dropout(0.2))
model.add(BatchNormalization())
model.add(Dense(3, activation="softmax"))
opt = tf.keras.optimizers.Adam(learning_rate=0.0001, decay=1e-8)
model.compile(loss="sparse_categorical_crossentropy", optimizer=opt, metrics=["accuracy"])
tensorboard = TensorBoard(log_dir=f'logs/{NAME}')
filepath = "RNN_Final-{epoch:02d}"
checkpoint = ModelCheckpoint("TEMP/{}.model".format(filepath,
monitor="val_acc",
verbose=1,
save_best_only=True,
mode="max"))
history = model.fit(train_x, train_y,
batch_size=BATCH_SIZE,
epochs=EPOCHS,
validation_data=(validation_x, validation_y),
callbacks=[tensorboard, checkpoint])
The first aspect was SOLVED, as i had to pass the array to a numpy array and do some changes, as suggested by https://machinelearningmastery.com/reshape-input-data-long-short-term-memory-networks-keras/
But now, i have another issue, which is confusing me and it is related with this: if i code, at the end of the training:
print(len(model.layers[0].get_weights()[0][0]))
print(train_x.shape[1:])
I will get:
4096
(174, 1)
which means, i think, i that i have 4096 weights for the first LSTM layer, where i should have only 174. Am i right?
In tensorflow 2.x you don't have to use CuDNNLSTM and simple LSTM layer will use the CuDNNLSTM at low level by default.
the shape of input_shape=(train_x.shape[1:]) must be of rang 2 ,change the input to shape (4073 ,175 ,1 ) and try e.g :
model = Sequential()
model.add(LSTM(512, input_shape=(175 ,1), return_sequences=True))
model.add(Dropout(0.2))
model.add(BatchNormalization())

RNN giving error ValueError: An operation has `None` for gradient

Deep RNN Model was working like a month ago. Lest it as a differnt project took over. Now coming back and trying to run training I get an error.
Getting an error:
Traceback (most recent call last):
File "/home/matiss/.local/share/JetBrains/Toolbox/apps/PyCharm-P/ch-0/201.7223.92/plugins/python/helpers/pydev/_pydevd_bundle/pydevd_exec2.py", line 3, in Exec
exec(exp, global_vars, local_vars)
File "", line 1, in
File "/home/matiss/Documents/python_work/PycharmProjects/NectCleave/functions.py", line 358, in weighted_model
File "/usr/local/lib/python3.8/dist-packages/keras/engine/training.py", line 1213, in fit
self._make_train_function()
File "/usr/local/lib/python3.8/dist-packages/keras/engine/training.py", line 314, in _make_train_function
training_updates = self.optimizer.get_updates(
File "/usr/local/lib/python3.8/dist-packages/keras/legacy/interfaces.py", line 91, in wrapper
return func(*args, **kwargs)
File "/usr/local/lib/python3.8/dist-packages/keras/backend/tensorflow_backend.py", line 75, in symbolic_fn_wrapper
return func(*args, **kwargs)
File "/usr/local/lib/python3.8/dist-packages/keras/optimizers.py", line 504, in get_updates
grads = self.get_gradients(loss, params)
File "/usr/local/lib/python3.8/dist-packages/keras/optimizers.py", line 93, in get_gradients
raise ValueError('An operation has None for gradient. '
ValueError: An operation has None for gradient. Please make sure that all of your ops have a gradient defined (i.e. are differentiable). Common ops without gradient: K.argmax, K.round, K.eval.
My model arhitecture:
def make_model(metrics='', output_bias=None, timesteps=None, features=None):
from keras import regularizers
if output_bias is not None:
output_bias = Constant(output_bias)
K.clear_session()
model = Sequential()
# First LSTM layer
model.add(
Bidirectional(LSTM(units=50, return_sequences=True, recurrent_dropout=0.1), input_shape=(timesteps, features)))
model.add(Dropout(0.5))
# Second LSTM layer
model.add(Bidirectional(LSTM(units=50, return_sequences=True)))
model.add(Dropout(0.5))
# Third LSTM layer
model.add(Bidirectional(LSTM(units=50, return_sequences=True)))
model.add(Dropout(0.5))
# Forth LSTM layer
model.add(Bidirectional(LSTM(units=50, return_sequences=False)))
model.add(Dropout(0.5))
# First Dense Layer
model.add(Dense(units=128, kernel_initializer='he_normal', activation='relu'))
model.add(Dropout(0.5))
# Adding the output layer
if output_bias == None:
model.add(Dense(units=1, activation='sigmoid', kernel_regularizer=regularizers.l2(0.001)))
else:
model.add(Dense(units=1, activation='sigmoid',
bias_initializer=output_bias, kernel_regularizer=regularizers.l2(0.001)))
# https://keras.io/api/losses/
model.compile(optimizer=Adam(lr=1e-3), loss=BinaryCrossentropy(), metrics=metrics)
return model
Please helpo. Why is this happening?
Okay, so after a half of the day of googling and checking stuff I could not find a solution.
Then I decided to just set up a new python virtual enviroment, install all the required packages and boom: it works again.
Have no idea what was issue and how did it come to happen but it works now.
Hope this saves some time to others in same problems.