Getting ValueError and TypeError while training model using resnet50 - tensorflow

I am working on medical image classification using Resnet50 model. Whenever I try to flatten the layer I am getting this error.
ValueError: Attempt to convert a value (None) with an unsupported type (<class 'NoneType'>) to a Tensor.
My code is as below
from PIL import Image
import numpy as np
import tensorflow
from tensorflow.keras import layers
from tensorflow.keras.callbacks import Callback, ModelCheckpoint, ReduceLROnPlateau, TensorBoard, EarlyStopping
from tensorflow.keras.models import Model
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from keras.utils.np_utils import to_categorical
from tensorflow.keras.models import Sequential
from tensorflow.keras.optimizers import Adam
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.metrics import cohen_kappa_score, accuracy_score
import scipy
from tensorflow.keras import backend as K
import gc
from functools import partial
from tqdm import tqdm
from sklearn import metrics
from collections import Counter
import json
import itertools
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D,GlobalAveragePooling2D
from keras.layers import Input, Lambda, Dense, Flatten
from keras.preprocessing import image
from glob import glob
pre_trained_model = tensorflow.keras.applications.ResNet50(input_shape=(224,224,3), include_top=False, weights="imagenet")
from keras.applications.resnet50 import ResNet50
from keras.models import Model
import keras
restnet = ResNet50(include_top=False, weights='imagenet', input_shape=(224,224,3))
output = restnet.layers[-1].output
output = keras.layers.Flatten()(output)
restnet = Model(restnet.input, output=output)
for layer in restnet.layers:
layer.trainable = False
restnet.summary()
I also tried adding the output layer this way:
last_layer = pre_trained_model.get_layer('conv5_block3_out')
print('last layer output shape:', last_layer.output_shape)
last_output = last_layer.output
x = GlobalAveragePooling2D()(last_output)
x = layers.Dropout(0.5)(x)
x = layers.Dense(3, activation='softmax')(x)
But got this error:
TypeError: Cannot convert a symbolic Keras input/output to a numpy array. This error may indicate that you're trying to pass a symbolic value to a NumPy call, which is not supported. Or, you may be trying to pass Keras symbolic inputs/outputs to a TF API that does not register dispatching, preventing Keras from automatically converting the API call to a lambda layer in the Functional Model.
I am unable to understand both errors, checked the soln given here, but that didn't solve my problem.

You are mixing tensorflow and keras libraries. Recommended to use only tensorflow.keras.* instead of keras.*.
Here is the modified code:
from PIL import Image
import numpy as np
import tensorflow
from tensorflow.keras import layers
from tensorflow.keras.callbacks import Callback, ModelCheckpoint, ReduceLROnPlateau, TensorBoard, EarlyStopping
from tensorflow.keras.models import Model
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from keras.utils.np_utils import to_categorical
from tensorflow.keras.models import Sequential
from tensorflow.keras.optimizers import Adam
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.metrics import cohen_kappa_score, accuracy_score
import scipy
from tensorflow.keras import backend as K
import gc
from functools import partial
from tqdm import tqdm
from sklearn import metrics
from collections import Counter
import json
import itertools
from tensorflow.keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D,GlobalAveragePooling2D
from tensorflow.keras.layers import Input, Lambda, Dense, Flatten
from tensorflow.keras.preprocessing import image
from glob import glob
pre_trained_model = tensorflow.keras.applications.ResNet50(input_shape=(224,224,3), include_top=False, weights="imagenet")
from keras.applications.resnet50 import ResNet50
from keras.models import Model
import keras
restnet = ResNet50(include_top=False, weights='imagenet', input_shape=(224,224,3))
output = restnet.layers[-1].output
output = tensorflow.keras.layers.Flatten()(output)
restnet = tensorflow.keras.models.Model(restnet.input, outputs=output)
for layer in restnet.layers:
layer.trainable = False
restnet.summary()

Related

Unable to Import Efficientnet in Colab

I am working in colab to test a code. While importing models, its giving error No module named 'efficientnet'
I am sharing the code and error here.
# for accessing tabular data
import pandas as pd
import numpy as np
import os
os.chdir('/content/drive/My Drive/')
# adding classweight
from sklearn.utils import class_weight
# Evaluation Metric
from sklearn.metrics import cohen_kappa_score
from sklearn.metrics import confusion_matrix, precision_score, recall_score
# for visualization
import cv2
import matplotlib.pyplot as plt
import seaborn as sns
from prettytable import PrettyTable
# backend
import keras
from keras import backend as K
import tensorflow as tf
from keras.callbacks import Callback
# for transfer learning
from tensorflow.keras.applications import VGG16, VGG19
from tensorflow.keras.applications import DenseNet121
from tensorflow.keras.applications import ResNet50, ResNet152
from tensorflow.keras.applications import InceptionV3
from efficientnet.keras import EfficientNetB0, EfficientNetB3, EfficientNetB4
from keras.applications import Xception
# for model architecture
from keras.models import Sequential
from keras.layers import GlobalAveragePooling2D, Dropout, Dense, Conv2D, MaxPooling2D, Activation, Flatten
# for Tensorboard visualization
from keras.callbacks import TensorBoard
# for Data Augmentation
from keras.preprocessing.image import ImageDataGenerator
enter image description here
It should be,
from tensorflow.keras.applications import EfficientNetB0, EfficientNetB3, EfficientNetB4

keras.utils.layer_utils and keras.utils.generic_utils equivalent in tf.keras

So i've been following this guide (https://towardsdatascience.com/faster-r-cnn-object-detection-implemented-by-keras-for-custom-data-from-googles-open-images-125f62b9141a) when trying to implement my custom dataset for object detection with faster r-cnn. So in the source code to train the dataset, here are all the list of all Keras-related modules.
from keras import backend as K
from keras.optimizers import Adam, SGD, RMSprop
from keras.layers import Flatten, Dense, Input, Conv2D, MaxPooling2D, Dropout
from keras.layers import GlobalAveragePooling2D, GlobalMaxPooling2D, TimeDistributed
from keras.engine.topology import get_source_inputs
from keras.utils import layer_utils
from keras.utils.data_utils import get_file
from keras.objectives import categorical_crossentropy
from keras.models import Model
from keras.utils import generic_utils
from keras.engine import Layer, InputSpec
from keras import initializers, regularizers
because i use the Tensorflow specific implementation of the Keras API, I convert all these above keras module to tensorflow.keras and so on. There's some minor adjustment like these:
from keras.engine.topology import get_source_inputs
to
from tensorflow.keras.utils import get_source_inputs
I've found most of the tensorflow.keras equivalent module, expect for two of these line:
from keras.utils import layer_utils
from keras.utils import generic_utils
I can't even find keras.utils documentation for those two modules on https://keras.io/api/utils/
How do i convert those two module into tensorflow.keras? I'm currently running tensorflow version 2.1.0 locally. And if there's now way that I can't import these module, is there any solution? I'm planning to train my data using my gpu locally.
In tensorflow 2.x, you can use
from tensorflow.python.keras.utils import layer_utils
from tensorflow.python.keras.utils import generic_utils
With current version of Tensorflow (2.8.2), the following code in working fine for me. So the above code can be changed with this code
from tensorflow.keras import backend as K
from tensorflow.keras.optimizers import Adam, SGD, RMSprop
from tensorflow.keras.layers import Flatten, Dense, Input, Conv2D, MaxPooling2D, Dropout
from tensorflow.keras.layers import GlobalAveragePooling2D, GlobalMaxPooling2D, TimeDistributed
from tensorflow.keras.utils import get_source_inputs
from tensorflow.python.keras.utils import layer_utils
from tensorflow.python.keras.utils.data_utils import get_file
from tensorflow.keras.metrics import categorical_crossentropy
from tensorflow.keras.models import Model
from tensorflow.python.keras.utils import generic_utils
from tensorflow.keras.layers import Layer, InputSpec
from tensorflow.keras import initializers, regularizers

Keras load_model with tf.keras.losses.LogCosh as custom_object

I'm want to load my saved model and retrain it. However, I'm having problems to set the custom_objects parameter. The custom object is the tf.keras.losses.LogCosh loss. My code is the following:
import pandas as pd
import numpy as np
from keras.models import load_model
from sklearn.model_selection import train_test_split
from keras.callbacks import EarlyStopping, ModelCheckpoint
import neptune.new as neptune
from tensorflow.keras.losses import LogCosh
from tensorflow.python.keras.utils import losses_utils
logcosh = LogCosh(reduction= losses_utils.ReductionV2.AUTO, name= 'logcosh')
model = load_model('my_model.hdf5', custom_objects={'logcosh': LogCosh(reduction= losses_utils.ReductionV2.AUTO, name= 'logcosh')
})
The error is the following
ValueError: Unknown metric function: {'class_name': 'LogCosh', 'config': {'reduction': 'auto', 'name': <tensorflow.python.keras.losses.LogCosh object at 0x7f8e00d29400>}}

cannot import name 'PlotLossesTensorFlowKeras' in jupyter notebook even though I have installed all dependencies

This problem appears on my jupyter notebook.
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import utils
import os
%matplotlib inline
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D
#from keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.layers import Dense, Input, Dropout,Flatten
from tensorflow.keras.layers import BatchNormalization, Activation, MaxPooling2D
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import ModelCheckpoint, ReduceLROnPlateau
from tensorflow.keras.utils import plot_model
from tensorflow.keras.models import Sequential
from IPython.display import SVG, Image
from livelossplot import PlotLossesTensorFlowKeras
import tensorflow as tf
print("Tensorflow version:", tf.__version__)
I have tried pip install livelossplot but no use.
Any help is appreciated
I also faced the same issue then what I did which worked for me is, replace that with this code:
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import utils
import os
%matplotlib inline
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.layers import Dense, Input, Dropout,Flatten, Conv2D
from tensorflow.keras.layers import BatchNormalization, Activation, MaxPooling2D
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import ModelCheckpoint, ReduceLROnPlateau
from tensorflow.keras.utils import plot_model
from IPython.display import SVG, Image
#from livelossplot import PlotLossesTensorFlowKeras
from livelossplot import PlotLossesKeras
from livelossplot.keras import PlotLossesCallback
import tensorflow as tf
print("Tensorflow version:", tf.__version__)
You may now proceed further.
I think we followed the same tutorial in coursera.
The problem is a version mismatch
pip uninstall livelossplot
pip install livelossplot==0.5.2
Then use this
from livelossplot import PlotLossesKeras
instead of the old one.

keras tensorflow load_weights fails

I am using keras 1.2 with tensorflow 1.0.0 backend.
I have a function that loads a pre-calibrated model from json and then loads its weights from a hdf5 file.
def load():
model = model_from_json(open(model_path).read())
model.load_weights(model_weights_path)
This function, more precisely the call to load_weights results in the following exception:
RuntimeError: The Session graph is empty. Add operations to the graph before calling run()
I wonder if that is due to these lines that I put in the beginning of my module to set the tensorflow seed for reproducibility:
tf.set_random_seed(123) # To set Tensorflow seed
sess = tf.Session()
keras.backend.set_session(sess)
It seems the keras session does not automatically set the loaded model as the graph associated to the session, hence failing to initialize the weights.
Any explanation and workaround to avoid the exception?
I pretty much am using the same code as you and it works for me.
from keras.models import Sequential
from keras.models import Model
from keras.layers import Dense, Dropout, Activation, Flatten, Input, GlobalAveragePooling2D
from keras.optimizers import RMSprop
from keras.utils import np_utils
from keras.models import model_from_json
from keras.layers import Convolution2D, MaxPooling2D
from keras.layers.pooling import AveragePooling2D
from keras.layers.normalization import BatchNormalization
from keras.layers.convolutional import ZeroPadding2D
from keras.engine.topology import Merge
from keras.layers import merge
from keras.optimizers import Adam
from keras import backend as K
from keras.layers.pooling import MaxPooling2D
from keras.layers.convolutional import ZeroPadding2D
import PIL
import inception
import tensorflow as tf
import keras
import glob
import pandas as pd
import pickle
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
# load json and create model
json_file = open('model.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
model = model_from_json(loaded_model_json)
# load weights into new model
model.load_weights("model.h5")
print("Loaded model from disk")
model.summary()
model.compile(Adam(lr=0.0001), loss='categorical_crossentropy', metrics=['accuracy'])
score = model.predict(transfer_values_test)
Indeed it seems that Keras doesn't respect the session set by set_session when loading models.
Try forcing Keras to use a particular session by Tensorflow's context manager:
def load():
with sess.as_default():
model = model_from_json(open(model_path).read())
model.load_weights(model_weights_path)''
If Keras still complains, predefine a graph (graph=tf.Graph()) and force model.load_weights to use it by introducing an additional with statement:
def load():
with graph.as_default():
with sess.as_default():
model = model_from_json(open(model_path).read())
model.load_weights(model_weights_path)''