import numpy as np
from keras.applications.vgg19 import decode_predictions
from prettytable import PrettyTable
import time
from keras import backend as K
from tensorflow import keras
from tensorflow.python import keras
from keras import models, layers, Model, Input
import tensorflow as tf
model_2=keras.models.load_model('model_2.h5',compile=False)
model_2.summary()
predictions1= np.load('D:/predictions_result.npy')
def profiler(model, test_input):
data_input=test_input
for layer in model.layers:
start = time.time()
im_imput=keras.layers.Input(batch_shape=model.get_layer(layer.name).get_input_shape_at(0))
im_out = layer(im_imput)
new_model = keras.models.Model(inputs=im_imput,outputs=im_out)
data_input = new_model.predict(data_input)
end = time.time() - start
print(end)
result=1
profiler(model_2,predictions1)
tmp=np.zeros((1,224,224,64))
for i in range(0,1):
tmp[i,:,:,:]=predictions1[i,:]
predictions2 = model_2.predict(tmp)
label_vgg19 = decode_predictions(predictions2)
print ('label_vgg19 =', label_vgg19)
When I try to run the above code I get below error. My question is how to remove the first layer of the model after loading. I initially split VGG model into sub-models and then load the submodel. I tried different approaches but none is working. Help is highly appreciated.
Traceback (most recent call last):
File "C:/Users/40227422/PycharmProjects/model_partititon/model_2_sock.py", line 42, in <module>
profiler(model_2,predictions1)
File "C:/Users/40227422/PycharmProjects/model_partititon/model_2_sock.py", line 28, in the
profiler
data_input = new_model.predict(data_input)
File "C:\Users\40227422\AppData\Local\Continuum\miniconda3\envs\tensorflow\lib\site-
packages\tensorflow\python\keras\engine\training_utils.py", line 332, in standardize_input_data
' but got array with shape ' + str(data_shape))
ValueError: Error when checking input: expected input_1 to have shape (224, 224, 3) but got array
with shape (224, 224, 64)
when I try to use kerassurgeon to delete a layer using below code I get error
ValueError: not enough values to unpack (expected 2, got 0)
from kerassurgeon import Surgeon
surgeon = Surgeon(model_2)
layer_1 = model_2.layers[0] # selecting 2nd layer
surgeon.add_job('delete_layer', layer_1)
new_model = surgeon.operate()
I was not able to recreate the error you were facing. May be you can share the reproducible code. Below options I have tried to delete a layer and it worked.
from kerassurgeon.operations import delete_layer
# delete layer_1 from a model
model = delete_layer(model_2, layer_1)
OR
# delete layer_1 from a model
from kerassurgeon import Surgeon
surgeon = Surgeon(model_2)
surgeon.add_job('delete_layer', layer_1)
new_model = surgeon.operate()
Related
I am going to train my model quantization aware. However, when i use it , the tensorflow_model_optimization cannot quantize tf.reshape function , and throws an error.
tensorflow version : '2.4.0-dev20200903'
python version : 3.6.9
the code:
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '3'
from tensorflow.keras.applications import VGG16
import tensorflow_model_optimization as tfmot
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
quantize_model = tfmot.quantization.keras.quantize_model
inputs = keras.Input(shape=(784,))
# img_inputs = keras.Input(shape=(32, 32, 3))
dense = layers.Dense(64, activation="relu")
x = dense(inputs)
x = layers.Dense(64, activation="relu")(x)
outputs = layers.Dense(10)(x)
outputs = tf.reshape(outputs, [-1, 2, 5])
model = keras.Model(inputs=inputs, outputs=outputs, name="mnist_model")
# keras.utils.plot_model(model, "my_first_model.png")
q_aware_model = quantize_model(model)
and the output:
Traceback (most recent call last):
File "<ipython-input-39-af601b78c010>", line 14, in <module>
q_aware_model = quantize_model(model)
File "/home/essys/.local/lib/python3.6/site-packages/tensorflow_model_optimization/python/core/quantization/keras/quantize.py", line 137, in quantize_model
annotated_model = quantize_annotate_model(to_quantize)
File "/home/essys/.local/lib/python3.6/site-packages/tensorflow_model_optimization/python/core/quantization/keras/quantize.py", line 210, in quantize_annotate_model
to_annotate, input_tensors=None, clone_function=_add_quant_wrapper)
...
File "/home/essys/anaconda3/envs/tf_gpu/lib/python3.6/site-packages/tensorflow/python/autograph/impl/api.py", line 667, in wrapper
raise e.ag_error_metadata.to_exception(e)
TypeError: in user code:
TypeError: tf__call() got an unexpected keyword argument 'shape'
If somebody know, please help ?
The reason behind is because your layer is not yet support for QAT at the moment. If you want to quantize it, you have to self writing your quantization by quantize_annotate_layer and pass it through quantize_scope and apply to your model by quantize_apply as describe in here: https://www.tensorflow.org/model_optimization/guide/quantization/training_comprehensive_guide?hl=en#quantize_custom_keras_layer
I have create a batch_norm_layer in here as an example
Tensorflow 2.x is not complete for QAT layer, pls consider using tf1.x by adding FakeQuant after operators.
I am getting this error while using keras multi_gpu_model. The code run fines if I eliminate this line. Also, with CNN model it works fines, it's just that while dense network it gives the error. Could you please help me to solve this issue. Thanks.
import numpy as np
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.layers import LSTM, BatchNormalization,Flatten
from keras.utils.vis_utils import model_to_dot
from keras.optimizers import adam
from keras.models import load_model
import pylab
from sklearn.model_selection import train_test_split
from keras.utils import multi_gpu_model
from scipy.io import wavfile
X=np.ones(10000)
y=np.zeros(100)
x_train=X
y_train=y
x_train=np.array(x_train)
y_train=np.array(y_train)
x_train.shape=(1,10000)
y_train.shape=(1,100)
model = Sequential()
model.add(Dense(500,activation = 'tanh'))
model.add(Dense(450, activation = 'tanh'))
model.add(Dense(412, activation = 'tanh'))
model.add(Dense(100, activation = 'tanh'))
opt = adam(lr=0.002, decay=1e-6)
model = multi_gpu_model(model, gpus=4)
model.compile(loss='mae', optimizer=opt, metrics=['accuracy'])
model.fit(x_train,y_train,epochs=50, batch_size = 40000)
Error: Traceback (most recent call last):
File "p.py", line 37, in <module>
model = multi_gpu_model(model, gpus=4)
File "/home/ENG/benipas1/anaconda3/envs/new/lib/python3.7/site-packages/keras/utils/multi_gpu_utils.py", line 203, in multi_gpu_model
for i in range(len(model.outputs)):
TypeError: object of type 'NoneType' has no len()
The problem is here:
model = Sequential()
model.add(Dense(500,activation = 'tanh'))
You are not giving an input shape to the first layer, so the outputs of the model are completely undefined and model.outputs is None. If you provide the input shape to the first layer, then the outputs are defined and it should work fine. You are probably providing the input shape to your CNN models and that is why it works:
model.add(Dense(500,activation = 'tanh', input_shape=(something,)))
Follwing the tutorial at https://www.tensorflow.org/tutorials/images/hub_with_keras resulted in a file model.h5. Converting to tensorflow-js with the command
tensorflowjs_converter --input_format keras ./model.h5 /tmp/jsmodel/
failed with
Exception: Error dumping weights, duplicate weight name Variable
Why is this and how can it be fixed?
MCVE
from __future__ import absolute_import, division, print_function
import tensorflow as tf
import tensorflow_hub as hub
from tensorflow.keras import layers
import numpy as np
data_root = tf.keras.utils.get_file(
'flower_photos','https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz',
untar=True)
image_generator = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1/255)
IMAGE_SHAPE = (224, 224)
image_data = image_generator.flow_from_directory(str(data_root), target_size=IMAGE_SHAPE)
feature_extractor_url = "https://tfhub.dev/google/tf2-preview/mobilenet_v2/feature_vector/2" ##param {type:"string"}
feature_extractor_layer = hub.KerasLayer(feature_extractor_url,
input_shape=(224,224,3))
for image_batch, label_batch in image_data:
print("Image batch shape: ", image_batch.shape)
print("Labe batch shape: ", label_batch.shape)
break
feature_extractor_layer.trainable = False
model = tf.keras.Sequential([
feature_extractor_layer,
layers.Dense(image_data.num_classes, activation='softmax')
])
model.compile(
optimizer=tf.keras.optimizers.Adam(),
loss='categorical_crossentropy',
metrics=['acc'])
steps_per_epoch = np.ceil(image_data.samples/image_data.batch_size)
history = model.fit(image_data, epochs=2,
steps_per_epoch=steps_per_epoch) # removed callback
model.save("/tmp/so_model.h5")
This fails with a
RuntimeError: Unable to create link (name already exists)
but the model is created. Calling the above tensorflowjs_converter --input_format keras /tmp/model.h5 /tmp/jsmodel fails with the above
Exception: Error dumping weights, duplicate weight name Variable
UPDATE: see also Retrain image detection with MobileNet
I am having trouble with a seemingly arbitrary bug when using keras. I run into the error "NotFoundError: FeedInputs: unable to find feed output dense_3_target:0" when trying to build a model in keras. The error seems to depend on the number of layers I put in the network (bug when number of layers not equal to 4). Does anyone know what is going on here?
The code and error message:
import tensorflow as tf
import numpy as np
import keras
from keras.models import Sequential
from keras.layers import Dense
tf.reset_default_graph()
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("mnist", one_hot=True)
X_train = mnist.train.images
y_train = mnist.train.labels
X_test = mnist.test.images
y_test = mnist.test.labels
# Hyper Parameters
n_features = 784
n_classes = 10
learning_rate = 0.5
training_epochs = 2
model = Sequential()
model.add(Dense(units = 100, activation = 'relu', input_dim = n_features))
model.add(Dense(units = 50,activation = 'relu'))
model.add(Dense(50,activation = 'relu'))
model.add(Dense(units = n_classes, activation = 'softmax'))
# Step 3: Compile the Model
model.compile(optimizer='adam',loss='categorical_crossentropy')
## Step 4: Train the Model
history = model.fit(X_train,y_train,epochs=10,batch_size = 100,validation_data=(X_test,y_test))
===================================================================
File "<ipython-input-14-1076cda88cc6>", line 43, in <module>
history = model.fit(X_train,y_train,epochs=10,batch_size = 100,validation_data=(X_test,y_test))
File "/Users/liyuan/anaconda2/envs/tensorflow35/lib/python3.6/site-packages/keras/engine/training.py", line 1037, in fit
validation_steps=validation_steps)
File "/Users/liyuan/anaconda2/envs/tensorflow35/lib/python3.6/site-packages/keras/engine/training_arrays.py", line 199, in fit_loop
outs = f(ins_batch)
File "/Users/liyuan/anaconda2/envs/tensorflow35/lib/python3.6/site-packages/keras/backend/tensorflow_backend.py", line 2666, in __call__
return self._call(inputs)
File "/Users/liyuan/anaconda2/envs/tensorflow35/lib/python3.6/site-packages/keras/backend/tensorflow_backend.py", line 2635, in _call
session)
File "/Users/liyuan/anaconda2/envs/tensorflow35/lib/python3.6/site-packages/keras/backend/tensorflow_backend.py", line 2587, in _make_callable
callable_fn = session._make_callable_from_options(callable_opts)
File "/Users/liyuan/anaconda2/envs/tensorflow35/lib/python3.6/site-packages/tensorflow/python/client/session.py", line 1480, in _make_callable_from_options
return BaseSession._Callable(self, callable_options)
File "/Users/liyuan/anaconda2/envs/tensorflow35/lib/python3.6/site-packages/tensorflow/python/client/session.py", line 1441, in __init__
session._session, options_ptr, status)
File "/Users/liyuan/anaconda2/envs/tensorflow35/lib/python3.6/site-packages/tensorflow/python/framework/errors_impl.py", line 519, in __exit__
c_api.TF_GetCode(self.status.status))
NotFoundError: FeedInputs: unable to find feed output dense_3_target:0
I am trying to create an image classifier that utilizes the pre-trained ResNet V2 model provided in the slim documentation.
Here is the code so far:
import tensorflow as tf
slim = tf.contrib.slim
from PIL import Image
from inception_resnet_v2 import *
import numpy as np
checkpoint_file = 'inception_resnet_v2_2016_08_30.ckpt'
sample_images = ['carrot.jpg']
input_tensor = tf.placeholder(tf.float32, shape=(None,299,299,3), name='input_image')
scaled_input_tensor = tf.scalar_mul((1.0/255), input_tensor)
scaled_input_tensor = tf.subtract(scaled_input_tensor, 0.5)
scaled_input_tensor = tf.multiply(scaled_input_tensor, 2.0)
variables_to_restore = slim.get_model_variables()
print(variables_to_restore)
init_fn = slim.assign_from_checkpoint_fn(
checkpoint_file,
slim.get_model_variables('InceptionResnetV2'))
sess = tf.Session()
init_fn(sess)
arg_scope = inception_resnet_v2_arg_scope()
with slim.arg_scope(arg_scope):
logits, end_points = inception_resnet_v2(scaled_input_tensor, is_training=False)
for image in sample_images:
im = Image.open(image).resize((299,299))
im = np.array(im)
im = im.reshape(-1,299,299,3)
predict_values, logit_values = sess.run([end_points['Predictions'], logits], feed_dict={input_tensor: im})
print (np.max(predict_values), np.max(logit_values))
print (np.argmax(predict_values), np.argmax(logit_values))
The problem is I keep getting this error:
Traceback (most recent call last):
File "./classify.py", line 21, in <module>
slim.get_model_variables('InceptionResnetV2'))
File "/home/ubuntu/tensorflow/local/lib/python2.7/site-packages/tensorflow/contrib/framework/python/ops/variables.py", line 584, in assign_from_checkpoint_fn
saver = tf_saver.Saver(var_list, reshape=reshape_variables)
File "/home/ubuntu/tensorflow/local/lib/python2.7/site-packages/tensorflow/python/training/saver.py", line 1040, in __init__
self.build()
File "/home/ubuntu/tensorflow/local/lib/python2.7/site-packages/tensorflow/python/training/saver.py", line 1061, in build
raise ValueError("No variables to save")
ValueError: No variables to save
So it seems TF/Slim is unable to find any variables and this is made clear when I call:
variables_to_restore = slim.get_model_variables()
print(variables_to_restore)
As it outputs an empty array.
How can I go about using the pre-trained model?
This happens because you haven't constructed the model in your graph yet to have any variables starting with the name "InceptionResnetV2" to be captured and restored by the saver.
I believe you should put the model construction before using slim.get_variables_to_restore().
For instance:
with slim.arg_scope(arg_scope):
logits, end_points = inception_resnet_v2(scaled_input_tensor, is_training=False)
variables_to_restore = slim.get_model_variables()
This way, the Tensor variables will be constructed and you should see variables_to_restore is no longer empty.
You need to manually add the model variables.
Try this
with slim.arg_scope(arg_scope):
logits, end_points = inception_resnet_v2(scaled_input_tensor, is_training=False)
# Add model variables
for var in tf.global_variables(scope='inception_resnet_v2'):
slim.add_model_variable(var)