IndexError when trying to run Pytorch Network - indexing

I'm trying to train my first CNN. I split the training images into train and validation data by randomly choosing indices and using Subset and DataLoader. The validation and training splits don't have any of the same indices, so that's not the problem. They also cover the entire dataset.
train = datasets.ImageFolder('train_images', transform=transform)
torch.manual_seed(37)
val_split = random.sample(range(len(img_sizes)), int(0.1 * len(img_sizes)))
train_split = [x for x in range(len(img_sizes)) if x not in val_split]
train_data = Subset(train, train_split)
val_data = Subset(train, val_split)
train_loader = DataLoader(train_data, batch_size = 10, shuffle = True)
val_loader = DataLoader(val_data, batch_size = 10, shuffle = False)
However, when I try to enumerate through the train_loader, I get this index out of range error:
---------------------------------------------------------------------------
IndexError Traceback (most recent call last)
~\AppData\Local\Temp\ipykernel_8652\2928585573.py in <module>
13
14 # Run the training batches
---> 15 for b, (X_train, y_train) in enumerate(train_loader):
16
17 # Apply the model
D:\dum\envs\pytorchenv\lib\site-packages\torch\utils\data\dataloader.py in __next__(self)
558 if self.num_workers == 0: # same-process loading
559 indices = next(self.sample_iter) # may raise StopIteration
--> 560 batch = self.collate_fn([self.dataset[i] for i in indices])
561 if self.pin_memory:
562 batch = _utils.pin_memory.pin_memory_batch(batch)
D:\dum\envs\pytorchenv\lib\site-packages\torch\utils\data\dataloader.py in <listcomp>(.0)
558 if self.num_workers == 0: # same-process loading
559 indices = next(self.sample_iter) # may raise StopIteration
--> 560 batch = self.collate_fn([self.dataset[i] for i in indices])
561 if self.pin_memory:
562 batch = _utils.pin_memory.pin_memory_batch(batch)
D:\dum\envs\pytorchenv\lib\site-packages\torch\utils\data\dataset.py in __getitem__(self, idx)
105
106 def __getitem__(self, idx):
--> 107 return self.dataset[self.indices[idx]]
108
109 def __len__(self):
D:\dum\envs\pytorchenv\lib\site-packages\torchvision\datasets\folder.py in __getitem__(self, index)
129 tuple: (sample, target) where target is class_index of the target class.
130 """
--> 131 path, target = self.samples[index]
132 sample = self.loader(path)
133 if self.transform is not None:
IndexError: list index out of range
Anyone know what the problem is?

Related

Tensorflow mixed_precision error `x` and `y` must have the same dtype, got tf.float16 != tf.float32

mixed_precision.set_global_policy(policy="mixed_float16") gives an error when I add this line
error =
TypeError Traceback (most recent call
last) in
5 #mixed_precision.set_global_policy(policy="float32")
6 input_shape = (224, 224, 3)
----> 7 base_model = tf.keras.applications.EfficientNetB0(include_top=False)
8 base_model.trainable = False # freeze base model layers
9
4 frames
/usr/local/lib/python3.7/dist-packages/keras/applications/efficientnet.py
in EfficientNetB0(include_top, weights, input_tensor, input_shape,
pooling, classes, classifier_activation, **kwargs)
559 classes=classes,
560 classifier_activation=classifier_activation,
--> 561 **kwargs)
562
563
/usr/local/lib/python3.7/dist-packages/keras/applications/efficientnet.py
in EfficientNet(width_coefficient, depth_coefficient, default_size,
dropout_rate, drop_connect_rate, depth_divisor, activation,
blocks_args, model_name, include_top, weights, input_tensor,
input_shape, pooling, classes, classifier_activation)
332 # original implementation.
333 # See https://github.com/tensorflow/tensorflow/issues/49930 for more details
--> 334 x = x / tf.math.sqrt(IMAGENET_STDDEV_RGB)
335
336 x = layers.ZeroPadding2D(
/usr/local/lib/python3.7/dist-packages/tensorflow/python/util/traceback_utils.py
in error_handler(*args, **kwargs)
151 except Exception as e:
152 filtered_tb = _process_traceback_frames(e.traceback)
--> 153 raise e.with_traceback(filtered_tb) from None
154 finally:
155 del filtered_tb
/usr/local/lib/python3.7/dist-packages/keras/layers/core/tf_op_layer.py
in handle(self, op, args, kwargs)
105 isinstance(x, keras_tensor.KerasTensor)
106 for x in tf.nest.flatten([args, kwargs])):
--> 107 return TFOpLambda(op)(*args, **kwargs)
108 else:
109 return self.NOT_SUPPORTED
/usr/local/lib/python3.7/dist-packages/keras/utils/traceback_utils.py
in error_handler(*args, **kwargs)
65 except Exception as e: # pylint: disable=broad-except
66 filtered_tb = _process_traceback_frames(e.traceback)
---> 67 raise e.with_traceback(filtered_tb) from None
68 finally:
69 del filtered_tb
TypeError: Exception encountered when calling layer
"tf.math.truediv_3" (type TFOpLambda).
x and y must have the same dtype, got tf.float16 != tf.float32.
Call arguments received by layer "tf.math.truediv_3" (type
TFOpLambda): • x=tf.Tensor(shape=(None, None, None, 3),
dtype=float16) • y=tf.Tensor(shape=(3,), dtype=float32) •
name=None
this is code =
from tensorflow.keras import layers
# Create base model
mixed_precision.set_global_policy(policy="mixed_float16")
input_shape = (224, 224, 3)
base_model = tf.keras.applications.EfficientNetB0(include_top=False)
base_model.trainable = False # freeze base model layers
# Create Functional model
inputs = layers.Input(shape=input_shape, name="input_layer")
# Note: EfficientNetBX models have rescaling built-in but if your model didn't you could have a layer like below
# x = layers.Rescaling(1./255)(x)
x = base_model(inputs, training=False) # set base_model to inference mode only
x = layers.GlobalAveragePooling2D(name="pooling_layer")(x)
x = layers.Dense(len(class_names))(x) # want one output neuron per class
# Separate activation of output layer so we can output float32 activations
outputs = layers.Activation("softmax", dtype=tf.float32, name="softmax_float32")(x)
model = tf.keras.Model(inputs, outputs)
# Compile the model
model.compile(loss="sparse_categorical_crossentropy", # Use sparse_categorical_crossentropy when labels are *not* one-hot
optimizer=tf.keras.optimizers.Adam(),
metrics=["accuracy"])
When I change this line with float32 instead of mixed_float16,like
this mixed_precision.set_global_policy(policy="float32") the
error goes away. I want to use Mixed_precision, how can I do it?

How to fix "pop from empty list" error while using Keras tuner search method with TPU in google colab?

I previously was able to run the search method of keras tuner on my model with GPU runtime of Google colab. But when I switched to the TPU runtime, I get the following error. I haven't been able to come to the conclusion of how to access a google cloud storage for the TPU runtime to save the checkpoint folder that the keras tuner saves model checkpoints in. I also don't know how to do it and I'm getting the following error. Please help me resolve this issue.
My code:
def post_se(hp):
ip = Input(shape=(6, 128))
x = Masking()(ip)
x = LSTM(units=hp.Choice('lstm_1', values = [8,16,32,64,128,256,512]),return_sequences=True)(x)
x = Dropout(hp.Choice(name='Dropout', values = [0.0,0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8]))(x)
x = LSTM(units=hp.Choice('lstm_2', values = [8,16,32,64,128,256,512]))(x)
x = Dropout(hp.Choice(name='Dropout_2', values = [0.0,0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8]))(x)
y = Permute((2, 1))(ip)
y = Conv1D(hp.Choice('conv_1_filter', values = [32,64,128,256,512]), hp.Choice(name='conv_1_filter_size', values = [3,5,7,8,9]), padding='same', kernel_initializer='he_uniform')(y)
y = BatchNormalization()(y)
y = Activation('relu')(y)
y = squeeze_excite_block(y)
y = Conv1D(hp.Choice('conv_2_filter', values = [32,64,128,256,512]), hp.Choice(name='conv_2_filter_size',values = [3,5,7,8,9]), padding='same', kernel_initializer='he_uniform')(y)
y = BatchNormalization()(y)
y = Activation('relu')(y)
y = squeeze_excite_block(y)
y = Conv1D(hp.Choice('conv_3_filter', values = [32,64,128,256,512,]), hp.Choice(name='conv_3_filter_size',values = [3,5,7,8,9]), padding='same', kernel_initializer='he_uniform')(y)
y = BatchNormalization()(y)
y = Activation('relu')(y)
y = GlobalAveragePooling1D()(y)
x = concatenate([x,y])
# batch_size = hp.Choice('batch_size', values=[32, 64, 128, 256, 512, 1024, 2048, 4096])
out = Dense(num_classes, activation='softmax')(x)
model = Model(ip, out)
if gpu:
opt = keras.optimizers.Adam(learning_rate=0.001)
if tpu:
opt = keras.optimizers.Adam(learning_rate=8*0.001)
model.compile(optimizer=opt, loss='categorical_crossentropy',metrics=['accuracy'])
# model.summary()
return model
if gpu:
tuner = kt.tuners.BayesianOptimization(post_se,
objective='val_accuracy',
max_trials=30,
seed=42,
project_name='Model_gpu')
# Will stop training if the "val_loss" hasn't improved in 30 epochs.
tuner.search(X_train, train_label, epochs=200, validation_split=0.1, shuffle=True, callbacks=[tensorflow.keras.callbacks.EarlyStopping('val_loss', patience=30)])
if tpu:
print("TPU")
with strategy.scope():
tuner = kt.tuners.BayesianOptimization(post_se,
objective='val_accuracy',
max_trials=30,
seed=42,
project_name='Model_tpu')
# Will stop training if the "val_loss" hasn't improved in 30 epochs.
tuner.search(X_train, train_label, epochs=200, validation_split=0.1, shuffle=True, callbacks=[tensorflow.keras.callbacks.EarlyStopping('val_loss', patience=30)])
The error log
---------------------------------------------------------------------------
UnimplementedError Traceback (most recent call last)
/usr/lib/python3.7/contextlib.py in __exit__(self, type, value, traceback)
129 try:
--> 130 self.gen.throw(type, value, traceback)
131 except StopIteration as exc:
10 frames
/usr/local/lib/python3.7/dist-packages/tensorflow/python/framework/ops.py in resource_creator_scope(resource_type, resource_creator)
2957 resource_creator):
-> 2958 yield
2959
<ipython-input-15-24c1e1bb603d> in <module>()
17 # Will stop training if the "val_loss" hasn't improved in 30 epochs.
---> 18 tuner.search(X_train, train_label, epochs=200, validation_split=0.1, shuffle=True, callbacks=[tensorflow.keras.callbacks.EarlyStopping('val_loss', patience=30)])
/usr/local/lib/python3.7/dist-packages/keras_tuner/engine/base_tuner.py in search(self, *fit_args, **fit_kwargs)
178 self.on_trial_begin(trial)
--> 179 results = self.run_trial(trial, *fit_args, **fit_kwargs)
180 # `results` is None indicates user updated oracle in `run_trial()`.
/usr/local/lib/python3.7/dist-packages/keras_tuner/engine/tuner.py in run_trial(self, trial, *args, **kwargs)
303 copied_kwargs["callbacks"] = callbacks
--> 304 obj_value = self._build_and_fit_model(trial, *args, **copied_kwargs)
305
/usr/local/lib/python3.7/dist-packages/keras_tuner/engine/tuner.py in _build_and_fit_model(self, trial, *args, **kwargs)
233 model = self._try_build(hp)
--> 234 return self.hypermodel.fit(hp, model, *args, **kwargs)
235
/usr/local/lib/python3.7/dist-packages/keras_tuner/engine/hypermodel.py in fit(self, hp, model, *args, **kwargs)
136 """
--> 137 return model.fit(*args, **kwargs)
138
/usr/local/lib/python3.7/dist-packages/keras/utils/traceback_utils.py in error_handler(*args, **kwargs)
66 filtered_tb = _process_traceback_frames(e.__traceback__)
---> 67 raise e.with_traceback(filtered_tb) from None
68 finally:
/usr/local/lib/python3.7/dist-packages/tensorflow/python/framework/ops.py in _numpy(self)
1116 except core._NotOkStatusException as e: # pylint: disable=protected-access
-> 1117 raise core._status_to_exception(e) from None # pylint: disable=protected-access
1118
UnimplementedError: File system scheme '[local]' not implemented (file: './untitled_project/trial_78ed6883514d67dc6222064095c134cb/checkpoints/epoch_0/checkpoint_temp/part-00000-of-00001')
Encountered when executing an operation using EagerExecutor. This error cancels all future operations and poisons their output tensors.
During handling of the above exception, another exception occurred:
IndexError Traceback (most recent call last)
<ipython-input-15-24c1e1bb603d> in <module>()
16 seed=42)
17 # Will stop training if the "val_loss" hasn't improved in 30 epochs.
---> 18 tuner.search(X_train, train_label, epochs=200, validation_split=0.1, shuffle=True, callbacks=[tensorflow.keras.callbacks.EarlyStopping('val_loss', patience=30)])
/usr/local/lib/python3.7/dist-packages/tensorflow/python/distribute/distribute_lib.py in __exit__(self, exception_type, exception_value, traceback)
454 "tf.distribute.set_strategy() out of `with` scope."),
455 e)
--> 456 _pop_per_thread_mode()
457
458
/usr/local/lib/python3.7/dist-packages/tensorflow/python/distribute/distribution_strategy_context.py in _pop_per_thread_mode()
64
65 def _pop_per_thread_mode():
---> 66 ops.get_default_graph()._distribution_strategy_stack.pop(-1) # pylint: disable=protected-access
67
68
IndexError: pop from empty list
For some extra info, I am attaching my code in this post.
This is your error:
UnimplementedError: File system scheme '[local]' not implemented (file: './untitled_project/trial_78ed6883514d67dc6222064095c134cb/checkpoints/epoch_0/checkpoint_temp/part-00000-of-00001')
See https://stackoverflow.com/a/62881833/14043558 for a solution.

I am getting OOM while running PRE TRAINED Bert Model with new dataset with 20k

I have pre trained model with Accuracy of 96 with 2 epochs and I am trying to use that model on new dataset of 20k tweets for sentiment analysis. while doing that I am getting below error.
I haven't faced any issues while training model with same size of data but not sure why I am getting while using that model.
ResourceExhaustedError: OOM when allocating tensor with shape[1079190,768] and type float on /job:localhost/replica:0/task:0/device:GPU:0 by allocator GPU_0_bfc [Op:ResourceGather]
Code:
from transformers import BertTokenizer, TFBertForSequenceClassification
from transformers import InputExample,InputFeatures
model = TFBertForSequenceClassification.from_pretrained('bert-base-uncased')
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model.summary()
Model: "tf_bert_for_sequence_classification"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
bert (TFBertMainLayer) multiple 109482240
_________________________________________________________________
dropout_37 (Dropout) multiple 0
_________________________________________________________________
classifier (Dense) multiple 1538
=================================================================
Total params: 109,483,778
Trainable params: 109,483,778
Non-trainable params: 0
train = tf.keras.preprocessing.text_dataset_from_directory('aclImdb/train',batch_size=30000,validation_split=0.2,
subset='training',seed=123)
test = tf.keras.preprocessing.text_dataset_from_directory('aclImdb/train',batch_size=30000,validation_split=0.2,
subset='validation',seed=123)
Found 25000 files belonging to 2 classes.
Using 20000 files for training.
Found 25000 files belonging to 2 classes.
Using 5000 files for validation.
for data in train.take(1):
train_feat = data[0].numpy()
train_lab = data[1].numpy()
train = pd.DataFrame([train_feat,train_lab]).T
train.columns = ['DATA_COLUMN','LABEL_COLUMN']
train['DATA_COLUMN'] = train['DATA_COLUMN'].str.decode('utf-8')
for data in test.take(1):
test_feat = data[0].numpy()
test_lab = data[1].numpy()
test = pd.DataFrame([test_feat,test_lab]).T
test.columns = ['DATA_COLUMN','LABEL_COLUMN']
test['DATA_COLUMN'] = test['DATA_COLUMN'].str.decode('utf-8')
test.head()
def convert_data_to_examples(train, test, DATA_COLUMN, LABEL_COLUMN):
train_InputExamples = train.apply(lambda x: InputExample(guid=None, # Globally unique ID for bookkeeping, unused in this case
text_a = x[DATA_COLUMN],
text_b = None,
label = x[LABEL_COLUMN]), axis = 1)
validation_InputExamples = test.apply(lambda x: InputExample(guid=None, # Globally unique ID for bookkeeping, unused in this case
text_a = x[DATA_COLUMN],
text_b = None,
label = x[LABEL_COLUMN]), axis = 1)
return train_InputExamples, validation_InputExamples
train_InputExamples, validation_InputExamples = convert_data_to_examples(train,
test,
'DATA_COLUMN',
'LABEL_COLUMN')
def convert_examples_to_tf_dataset(examples, tokenizer, max_length=128):
features = [] # -> will hold InputFeatures to be converted later
for e in examples:
# Documentation is really strong for this method, so please take a look at it
input_dict = tokenizer.encode_plus(
e.text_a,
add_special_tokens=True,
max_length=max_length, # truncates if len(s) > max_length
return_token_type_ids=True,
return_attention_mask=True,
pad_to_max_length=True, # pads to the right by default # CHECK THIS for pad_to_max_length
truncation=True
)
input_ids, token_type_ids, attention_mask = (input_dict["input_ids"],
input_dict["token_type_ids"], input_dict['attention_mask'])
features.append(
InputFeatures(
input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, label=e.label
)
)
def gen():
for f in features:
yield (
{
"input_ids": f.input_ids,
"attention_mask": f.attention_mask,
"token_type_ids": f.token_type_ids,
},
f.label,
)
return tf.data.Dataset.from_generator(
gen,
({"input_ids": tf.int32, "attention_mask": tf.int32, "token_type_ids": tf.int32}, tf.int64),
(
{
"input_ids": tf.TensorShape([None]),
"attention_mask": tf.TensorShape([None]),
"token_type_ids": tf.TensorShape([None]),
},
tf.TensorShape([]),
),
)
DATA_COLUMN = 'DATA_COLUMN'
LABEL_COLUMN = 'LABEL_COLUMN'
# We can call the functions we created above with the following lines:
train_InputExamples,validation_InputExamples = convert_data_to_examples(train,test,DATA_COLUMN,LABEL_COLUMN)
train_data = convert_examples_to_tf_dataset(list(train_InputExamples),tokenizer)
train_data = train_data.shuffle(100).batch(32).repeat(2)
validation_data = convert_examples_to_tf_dataset(list(validation_InputExamples),tokenizer)
validation_data = validation_data.shuffle(100).batch(32)
model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08, clipnorm=1.0),
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=[tf.keras.metrics.SparseCategoricalAccuracy('accuracy')])
model.fit(train_data, epochs=2, validation_data=validation_data)
#this is my new data with 20k rows on which I want to run pretrained model:
tweets_list = statement_df['sentiment'].tolist()
#this part of the code is serving that purpose
tf_batch = tokenizer(tweets_list, max_length=128, padding=True, truncation=True, return_tensors='tf')
#print(tf_batch)
tf_outputs = model(tf_batch) # this line is thrown OOM issues
tf_predictions = tf.nn.softmax(tf_outputs[0], axis=-1)
labels = ['Negative','Positive']
label = tf.argmax(tf_predictions, axis=1)
label = label.numpy()
for i in range(len(tweets_list)):
print(tweets_list[i], ": \n", labels[label[i]])
Error:
/usr/local/lib/python3.7/dist-packages/tensorflow/python/util/dispatch.py in wrapper(*args, **kwargs)
199 """Call target, and fall back on dispatchers if there is a TypeError."""
200 try:
--> 201 return target(*args, **kwargs)
202 except (TypeError, ValueError):
203 # Note: convert_to_eager_tensor currently raises a ValueError, not a
/usr/local/lib/python3.7/dist-packages/tensorflow/python/ops/array_ops.py in gather_v2(params, indices, validate_indices, axis, batch_dims, name)
4830 name=name,
4831 axis=axis,
-> 4832 batch_dims=batch_dims)
4833
4834
/usr/local/lib/python3.7/dist-packages/tensorflow/python/util/dispatch.py in wrapper(*args, **kwargs)
199 """Call target, and fall back on dispatchers if there is a TypeError."""
200 try:
--> 201 return target(*args, **kwargs)
202 except (TypeError, ValueError):
203 # Note: convert_to_eager_tensor currently raises a ValueError, not a
/usr/local/lib/python3.7/dist-packages/tensorflow/python/ops/array_ops.py in gather(***failed resolving arguments***)
4811 # TODO(apassos) find a less bad way of detecting resource variables
4812 # without introducing a circular dependency.
-> 4813 return params.sparse_read(indices, name=name)
4814 except AttributeError:
4815 return gen_array_ops.gather_v2(params, indices, axis, name=name)
/usr/local/lib/python3.7/dist-packages/tensorflow/python/ops/resource_variable_ops.py in sparse_read(self, indices, name)
701 variable_accessed(self)
702 value = gen_resource_variable_ops.resource_gather(
--> 703 self._handle, indices, dtype=self._dtype, name=name)
704
705 if self._dtype == dtypes.variant:
/usr/local/lib/python3.7/dist-packages/tensorflow/python/ops/gen_resource_variable_ops.py in resource_gather(resource, indices, dtype, batch_dims, validate_indices, name)
547 return _result
548 except _core._NotOkStatusException as e:
--> 549 _ops.raise_from_not_ok_status(e, name)
550 except _core._FallbackException:
551 pass
/usr/local/lib/python3.7/dist-packages/tensorflow/python/framework/ops.py in raise_from_not_ok_status(e, name)
6860 message = e.message + (" name: " + name if name is not None else "")
6861 # pylint: disable=protected-access
-> 6862 six.raise_from(core._status_to_exception(e.code, message), None)
6863 # pylint: enable=protected-access
6864
/usr/local/lib/python3.7/dist-packages/six.py in raise_from(value, from_value)
ResourceExhaustedError: OOM when allocating tensor with shape[1079190,768] and type float on /job:localhost/replica:0/task:0/device:GPU:0 by allocator GPU_0_bfc [Op:ResourceGather]

`_UserObject` object has no attribute `call_and return_conditional_losses`

I am trying to use multistep training. The prediction of the first neural network is used as input for the second neural network. So I need to load the first neural network and call prediction while using the second neural network. I need the first neural network's graph /function and use it in the second neural network.
#neural network class
class Linear(Model):
def __init__(self, n_layers, activate = "tanh", dtype = "float32"):
super(Linear, self).__init__()
self.activation = activate
self.title=title
self.model = Sequential()
self.n_layers=n_layers
#self.model._set_inputs(inputs)
num_layers = len(n_layers)
for l in range(0, num_layers-1):
name = "layer_" + str(l)
m = Dense(n_layers[l+1], input_shape=(n_layers[l],), activation = self.activation, name = name, dtype=dtype)
self.model.add(m)
name = "layer_" + str(num_layers-1)
m = Dense(n_layers[-1], input_shape=(n_layers[-2],), name = name, dtype=dtype)
self.model.add(m)
#tf.function
def __call__(self, X):
Y = self.model(X)
#Pred=self.model.predict(X)
return Y
#Saving and loading
def save(self,name):
tf.saved_model.save(self.model_NN,'saved_model/')
def load(self,name):
restored_saved_model=keras.models.load_model('saved_model/')
return restored_saved_model
#I am training models one after another and using pretection of first model as input of second. So I need to load and save.
def train(self, epoch,multistepping):
for l in range(0, self.number_of_NN):
print("making neural network object",l)
self.NN_list.append(self.net)
for l in range(0,self.number_of_NN):
model = self.pde(self.NN_list[l],self.D,self.dt,self.q, self.cond_i,self.cond_b, self.lr, self.lr_schedule, self.dtype)
print(" training model number ",l)
model.train(epoch[l])
model.save(l)
model.title=l
#model_old=copy.copy(model)
#Bring it outside
def function(xy):
#load weights from previous model
if l>0:
temp_model=model.load(l-1)
x_1 = xy[:, 0][:, None]
y_1 = xy[:, 1][:, None]
U = temp_model.predict(x_1, y_1)
U=U[:,-1]
return np.asarray(U)
if multistepping==1:
self.cond_i.u_func = function
#The error message I am prompted
~/Desktop/V3/v3/v2/Ishrak/pde_d_Poisson_2D_v3.py in load(self, name)
131
132 def load(self,name):
--> 133 restored_saved_model=keras.models.load_model('saved_model/')
134 return restored_saved_model
135 #Have to check
~/anaconda3/envs/tf2.1/lib/python3.7/site-packages/tensorflow_core/python/keras/saving/save.py in load_model(filepath, custom_objects, compile)
148 if isinstance(filepath, six.string_types):
149 loader_impl.parse_saved_model(filepath)
--> 150 return saved_model_load.load(filepath, compile)
151
152 raise IOError(
~/anaconda3/envs/tf2.1/lib/python3.7/site-packages/tensorflow_core/python/keras/saving/saved_model/load.py in load(path, compile)
87 # TODO(kathywu): Add saving/loading of optimizer, compiled losses and metrics.
88 # TODO(kathywu): Add code to load from objects that contain all endpoints
---> 89 model = tf_load.load_internal(path, loader_cls=KerasObjectLoader)
90
91 # pylint: disable=protected-access
~/anaconda3/envs/tf2.1/lib/python3.7/site-packages/tensorflow_core/python/saved_model/load.py in load_internal(export_dir, tags, loader_cls)
550 loader = loader_cls(object_graph_proto,
551 saved_model_proto,
--> 552 export_dir)
553 root = loader.get(0)
554 root.tensorflow_version = meta_graph_def.meta_info_def.tensorflow_version
~/anaconda3/envs/tf2.1/lib/python3.7/site-packages/tensorflow_core/python/keras/saving/saved_model/load.py in __init__(self, *args, **kwargs)
117 def __init__(self, *args, **kwargs):
118 super(KerasObjectLoader, self).__init__(*args, **kwargs)
--> 119 self._finalize()
120
121 def _finalize(self):
~/anaconda3/envs/tf2.1/lib/python3.7/site-packages/tensorflow_core/python/keras/saving/saved_model/load.py in _finalize(self)
137 for node in self._nodes:
138 if isinstance(node, RevivedNetwork):
--> 139 call_fn = node.keras_api.call_and_return_conditional_losses
140 if call_fn.input_signature is None:
141 inputs = infer_inputs_from_restored_call_function(call_fn)
AttributeError: '_UserObject' object has no attribute 'call_and_return_conditional_losses'
How do I save and load a TensorFlow model in this scenario?

Can anyone tell me what's wrong here in cnn own model in mxnet?

def acc(output, label):
correct_preds = output.argmax(axis=1) == label.astype('float32')
return correct_preds.mean().asscalar()
for epoch in range(10):
train_loss, train_acc, valid_acc = 0., 0., 0.
tic = time()
for data, label in train_data:
data = data.copyto(mx.cpu(0))
label = label.copyto(mx.cpu(0))
with autograd.record():
output = net(data)
loss = softmax_cross_entropy(output, label)
loss.backward()
trainer.step(batch_size)
train_loss += loss.mean().asscalar()
train_acc += acc(output, label)
When running this part I get the error and my dataset is in pascol voc format
ValueError
Traceback (most recent call last)
<ipython-input-7-9926ba7deb21> in <module>()
12 label = label.copyto(mx.cpu(0))
13 with autograd.record():
---> 14 output = net(data)
15 loss = softmax_cross_entropy(output, label)
16
/home/manasi/.local/lib/python2.7/site-packages/mxnet/gluon/block.pyc in __call__(self, *args)
539 hook(self, args)
540
--> 541 out = self.forward(*args)
542
543 for hook in self._forward_hooks.values():
/home/manasi/.local/lib/python2.7/site-packages/mxnet/gluon/nn/basic_layers.pyc in forward(self, x)
51 def forward(self, x):
52 for block in self._children.values():
---> 53 x = block(x)
54 return x
55
/home/manasi/.local/lib/python2.7/site-packages/mxnet/gluon/block.pyc in __call__(self, *args)
539 hook(self, args)
540
--> 541 out = self.forward(*args)
542
543 for hook in self._forward_hooks.values():
/home/manasi/.local/lib/python2.7/site-packages/mxnet/gluon/block.pyc in forward(self, x, *args)
911 params = {i: j.data(ctx) for i, j in self._reg_params.items()}
912 except DeferredInitializationError:
--> 913 self._deferred_infer_shape(x, *args)
914 for _, i in self.params.items():
915 i._finish_deferred_init()
/home/manasi/.local/lib/python2.7/site-packages/mxnet/gluon/block.pyc in _deferred_infer_shape(self, *args)
792 error_msg = "Deferred initialization failed
because shape"\
793 " cannot be inferred. {}".format(e)
--> 794 raise ValueError(error_msg)
795
796 def _call_cached_op(self, *args):
ValueError: Deferred initialization failed because shape cannot be inferred. Error in operator conv2_fwd: [10:56:15] src/operator/nn/convolution.cc:196: Check failed: dilated_ksize_x <= AddPad(dshape[3], param_.pad[1]) (5 vs. 3) kernel size exceed input
kernel size exceed input error is usually seen when your input image is too small for the network. You either need to resize your input image, or change the network architecture to remove layers that reduce the spatial dimensions of the feature maps (e.g. pooling layers, or convolution with stride).