how to convert Tensor objects into numpy array? - numpy

i have built and trained a CNN, and i want to get the wieghts of the first dense layer as numpy array . after i trained the model i loaded the model using this code
f = Path("model_structure.json")
model_structure = f.read_text()
model_wieghts = model_from_json(model_structure)
model_wieghts.load_weights("model_weights.h5")
in order to get the wieghts of the first dense layer i used :
wieghts_tf = model_wieghts.layers[9].output
wieghts_tf has this value:
<tf.Tensor 'dense_1/Relu:0' shape=(?, 496) dtype=float32>
the question is , i want to convert the type of wieghts_tf from tensor to numpy array . so i created a session and used the eval() function to do so . as shown below :
sess = tf.Session()
with sess.as_default() :
vector = wieghts_tf.eval()
but im getting this error
InvalidArgumentError: You must feed a value for placeholder tensor 'conv2d_1_input' with dtype float and shape [?,180,180,3]
how can i solve it ?
here is the code of the CNN model :
#creating nueral network
model = Sequential()
conv1_2d = model.add(Conv2D(180, (3, 3), padding='same', input_shape=(180, 180, 3), activation="relu")) #180 is the number of filters
conv2_2d = model.add(Conv2D(180, (3, 3), activation="relu"))
max_pool1 = model.add(MaxPooling2D(pool_size=(3, 3)))
drop_1 = model.add(Dropout(0.25))
conv3_2d =model.add(Conv2D(360, (3, 3), padding='same', activation="relu"))
conv4_2d =model.add(Conv2D(360, (3, 3), activation="relu"))
max_pool2 = model.add(MaxPooling2D(pool_size=(3, 3)))
drop_2 = model.add(Dropout(0.25))
flat = model.add(Flatten())
dense_1 = model.add(Dense(496, activation="relu"))
drop_3 = model.add(Dropout(0.5))
dense_2 = dense_layer = model.add(Dense(376, activation="softmax"))
model.compile(
loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy']
)
model.fit(
train_data,
train_label,
batch_size=32,
epochs=40,
verbose = 2 ,
validation_split=0.1,
shuffle=True)
# Save neural network structure
model_structure = model.to_json()
f = Path("model_structure.json")
f.write_text(model_structure)
# Save neural network's trained weights
model.save_weights("model_weights.h5")

Found the solution:
x = np.frombuffer(layer.convolution.weights.float16Value, dtype=np.float16)

Related

Training Multi Class Image classification model

I am trying to create a model using Tensorflow and Python, I get the data from a folder on my pc
The Folder Structure
An Example from the data
Almost all data are the same size [237 items,223 items,495 items,387 items,301 items]
That's how I load my data:
lables = {'Basic T-shrit':0, 'Bikini Bottom':1, 'Cargo Pants':2, 'Jeans':3, 'Oversize T-Shirt':4}
#Data
train_datagen = ImageDataGenerator(rescale=1/256)
train_generator = train_datagen.flow_from_directory(
'Dataset', # This is the source directory for training images
target_size=(256, 256), # All images will be resized to 200 x 200
batch_size=batch_size,
# Specify the classes explicitly
classes = lables,
# Since we use categorical_crossentropy loss, we need categorical labels
class_mode='categorical')
That's a model I tried:
#Model
model = Sequential()
model.add(Conv2D(32, (3,3), 1, activation='relu', input_shape=(image_width,image_height,3)))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Conv2D(32,3,3, activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Conv2D(64,3,3, activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Conv2D(64,3,3, activation='relu'))
model.add(Flatten())
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(categorys_size, activation='softmax'))
model.summary()
model.compile(loss='categorical_crossentropy',
optimizer="adam",
metrics=['acc'])
Then I start the learning process:
model.fit_generator(train_generator,
steps_per_epoch=epoch_steps,
epochs=Epoch,
validation_data=train_generator)
But it's not working good, The model sees the oversize shirt and normal shirt the same and any kind of pants as jeans
Model Train result
Then I tested this model:
model = tf.keras.models.Sequential([
keras.layers.Conv2D(32, kernel_size=(5, 5), activation=tf.keras.activations.relu, input_shape=IMAGE_SHAPE),
keras.layers.MaxPooling2D(pool_size=(2, 2)),
keras.layers.BatchNormalization(axis = 1),
keras.layers.Dropout(0.22),
keras.layers.Conv2D(32, kernel_size=(5, 5), activation=tf.keras.activations.relu),
keras.layers.AveragePooling2D(pool_size=(2, 2)),
keras.layers.BatchNormalization(axis = 1),
keras.layers.Dropout(0.25),
keras.layers.Conv2D(32, kernel_size=(4, 4), activation=tf.keras.activations.relu),
keras.layers.AveragePooling2D(pool_size=(2, 2)),
keras.layers.BatchNormalization(axis = 1),
keras.layers.Dropout(0.15),
keras.layers.Conv2D(32, kernel_size=(3, 3), activation=tf.keras.activations.relu),
keras.layers.AveragePooling2D(pool_size=(2, 2)),
keras.layers.BatchNormalization(axis = 1),
keras.layers.Dropout(0.15),
keras.layers.Flatten(),
keras.layers.Dense(256, activation=tf.keras.activations.relu,kernel_regularizer=keras.regularizers.l2(0.001)),
#keras.layers.Dropout(0.25),
keras.layers.Dense(64, activation=tf.keras.activations.relu,kernel_regularizer=keras.regularizers.l2(0.001)),
#keras.layers.Dropout(0.1),
keras.layers.Dense(len(lables), activation=tf.keras.activations.softmax)])
model.compile(optimizer=tf.keras.optimizers.Adam(),
loss=tf.keras.losses.sparse_categorical_crossentropy,
metrics=['accuracy'])
Then I start learning:
#Start Learning
checkpoint_path="/chk/cp-{epoch:04d}.ckpt"
cp_callback = tf.keras.callbacks.ModelCheckpoint(checkpoint_path,
save_weights_only=True,
verbose=1,
period=10)
model.fit(train,
epochs=Epoch,callbacks = [cp_callback],
validation_data=val,verbose=1)
And that's how I load the data
data = tf.keras.utils.image_dataset_from_directory('Dataset',labels = 'inferred',image_size = (192,192))
data_iterator = data.as_numpy_iterator()
batch = data_iterator.next()
fig, ax = plt.subplots(ncols=4, figsize=(20,20))
for idx, img in enumerate(batch[0][:4]):
ax[idx].imshow(img.astype(int))
ax[idx].title.set_text(batch[1][idx])
#Scale Data
data = data.map(lambda x,y: (x/192, y))
data.as_numpy_iterator().next()
train_size = int(len(data)*.7)
val_size = int(len(data)*.2)
test_size = int(len(data)*.1)
print(train_size)
train = data.take(train_size)
val = data.skip(train_size).take(val_size)
test = data.skip(train_size+val_size).take(test_size)
What I am doing wrong? and is the data I collected good enough for what I am trying to do? Am I missing something ?
Thanks

Merge two sequential models on Keras for hybrid model

I want to combine two sequential models for a hybrid model (with Keras 2.6.0). The first model is a succession of dense layer of a set of 4 parameters, and the second is a succession of 2D convolution of an image ((32,32)). The goal is to predict a curve of 128 points.
My actual model:
def get_model_v2(params_shape, img_shape):
params_model = models.Sequential()
params_model.add(layers.Dense(512, kernel_regularizer=regularizers.l2(0.001), activation='relu', name='Dense_n1'))
params_model.add(layers.Dense(512, kernel_regularizer=regularizers.l2(0.001), activation='relu', name='Dense_n2'))
params_model.add(layers.Dense(256, name='Output'))
img_model = models.Sequential()
img_model.add(layers.Input(img_shape, name='InputLayer2'))
img_model.add(layers.Conv2D(64, kernel_size=4, strides=2, padding="same"))
img_model.add(layers.LeakyReLU(alpha=0.2))
img_model.add(layers.Conv2D(16, kernel_size=4, strides=2, padding="same"))
img_model.add(layers.LeakyReLU(alpha=0.2))
img_model.add(layers.Flatten())
concat = tf.keras.layers.concatenate([params_model, img_model])
model = models.Sequential()
model.add(layers.Input(concat, name='InputLayer3'))
model.add(layers.Dense(256, kernel_regularizer=regularizers.l2(0.001), activation='relu', name='Dense_n1'))
model.add(layers.Dense(128, name='Output'))
model.compile(optimizer = 'adam',
loss = 'mse',
metrics = ['mae', 'mse'])
return model
model = get_model_v2 ( (4,), (32, 32, 1) )
My problem is when I have to combine the two models, I don't know what to use, with this "concatenate" example I have an error like: TypeError: 'NoneType' object is not subscriptable. I understand the problem, but I can't find an other solution...
Few issues here,
You are not using params_shape for your params_model (which comes out with an undefined shape).
As you understood, you can't concatenate models with a concatenation layer
The final model needs to through the Functional API
You got a bunch of layers with same name - you cannot have the same name for two layers in the same model
import tensorflow.keras.layers as layers
import tensorflow.keras.models as models
import tensorflow.keras.regularizers as regularizers
import tensorflow as tf
def get_model_v2(params_shape, img_shape):
params_model = models.Sequential()
params_model.add(layers.Dense(512, kernel_regularizer=regularizers.l2(0.001), activation='relu', name='Dense_n1', input_shape=params_shape))
params_model.add(layers.Dense(512, kernel_regularizer=regularizers.l2(0.001), activation='relu', name='Dense_n2'))
params_model.add(layers.Dense(256, name='Output'))
img_model = models.Sequential()
img_model.add(layers.Input(img_shape, name='InputLayer2'))
img_model.add(layers.Conv2D(64, kernel_size=4, strides=2, padding="same"))
img_model.add(layers.LeakyReLU(alpha=0.2))
img_model.add(layers.Conv2D(16, kernel_size=4, strides=2, padding="same"))
img_model.add(layers.LeakyReLU(alpha=0.2))
img_model.add(layers.Flatten())
param_out = params_model.outputs[0]
img_out = img_model.outputs[0]
concat_out = tf.keras.layers.concatenate([param_out, img_out])
full_dense_out = layers.Dense(256, kernel_regularizer=regularizers.l2(0.001), activation='relu', name='Dense_n3')(concat_out)
final_out = layers.Dense(128, name='Output_final')(full_dense_out)
model = models.Model(inputs=[params_model.inputs, img_model.inputs], outputs=final_out)
model.summary()
model.compile(optimizer = 'adam',
loss = 'mse',
metrics = ['mae', 'mse'])
return model
model = get_model_v2 ( (4,), (32, 32, 1) )

Merging tensors based on a key

I am dealing with a problem in which network design is such that it requires merging output of one part of the network with a tabular input(other input) data based on a key and training the network further with the merged data. It appeared that there is no way two tensors can be merged based on a key. Hence though of converting tensor to numpy to pandas data and them merging. The merged data would be converted back to tensor and used further in the network. Below is the code for it:
def build_convnet(shape=(112, 112, 1)):
from keras.layers import Conv2D, BatchNormalization, MaxPool2D, GlobalMaxPool2D
momentum = .9
model = keras.Sequential()
model.add(Conv2D(64, (3,3), input_shape=shape,
padding='same', activation='relu'))
model.add(Conv2D(64, (3,3), padding='same', activation='relu'))
model.add(BatchNormalization(momentum=momentum))
model.add(MaxPool2D())
model.add(Conv2D(128, (3,3), padding='same', activation='relu'))
model.add(Conv2D(128, (3,3), padding='same', activation='relu'))
model.add(BatchNormalization(momentum=momentum))
model.add(MaxPool2D())
model.add(Conv2D(256, (3,3), padding='same', activation='relu'))
model.add(Conv2D(256, (3,3), padding='same', activation='relu'))
model.add(BatchNormalization(momentum=momentum))
model.add(MaxPool2D())
model.add(Conv2D(512, (3,3), padding='same', activation='relu'))
model.add(Conv2D(512, (3,3), padding='same', activation='relu'))
model.add(BatchNormalization(momentum=momentum))
# flatten...
model.add(GlobalMaxPool2D())
return model
def action_model(shape=(3, 112, 112, 1)):
from keras.layers import TimeDistributed, GRU, Dense, Dropout, Concatenate
# Create our convnet with (224, 224, 3) input shape
convnet = build_convnet(shape[1:])
# then create our final model
model = keras.Sequential()
# add the convnet with (5, 224, 224, 3) shape
model.add(TimeDistributed(convnet, input_shape=shape))
# here, you can also use GRU or LSTM
model.add(GRU(64))
# and finally, we make a decision network
model.add(Dense(1024, activation='relu'))
model.add(Dropout(.5))
model.add(Dense(512, activation='relu'))
model.add(Dropout(.5))
model.add(Dense(128, activation='relu'))
model.add(Dropout(.5))
model.add(Dense(64, activation='relu'))
model.add(Dense(4, activation='relu'))
return model
# create the tab_data and cnn_gru models
tab_dt = keras.Input(shape=(trainX.shape[1],))
cnn_gru = action_model(X_train.shape[1:])
# converting tensor to numpy array and merging with a tabular data on a key(Patient)
cnn_gru_np = cnn_gru.output.eval()
cnn_gru_pd = pd.Dataframe(cnn_gru_np, names = ["V1", "V2", "V3", "V4"])
cnn_gru_pd["Patient"] = train_p
tab_dt_np = tab_dt.eval()
tab_dt_pd = pd.Dataframe(tab_dt_np, names = ["Weeks", "Percent", "Age", "Sex_Male", "SmokingStatus_Ex-smoker", "SmokingStatus_Never smoked"])
tab_dt_pd["Patient"] = train_p.numpy()
combinedInput_pd = pd.merge(tab_dt_pd, cnn_gru_pd, on = ["Patient"], how = "left")
combinedInput_pd.drop(["Patient"], axis = 1, inplace = True)
combinedInput_np = np.array(combinedInput_pd)
combinedInput = tf.convert_to_tensor(combinedInput_np)
# being our regression head
x = Dense(8, activation="relu")(combinedInput)
x = Dense(1, activation="relu")(x)
model = Model(inputs=[tab_dt, cnn_gru.input], outputs=x)
I am getting the below error for eval function in the line "cnn_gru_np = cnn_gru.output.eval()"
ValueError: Cannot evaluate tensor u`enter code here`sing `eval()`: No default session is registered. Use `with sess.as_default()` or pass an explicit session to `eval(session=sess)`
Please help with suggesting what is going wrong here.
The reason you're getting a ValueError is that the output of a keras model isn't an eager tensor, and thus does not support eval like that.
Just try
some_model = keras.Sequential([keras.layers.Dense(10, input_shape=(5,))])
print(type(some_model.output))
print(type(tf.zeros((2,))))
some_model.output.eval()
# <class 'tensorflow.python.framework.ops.Tensor'>
# <class 'tensorflow.python.framework.ops.EagerTensor'>
# ValueError
However, there is a bigger problem with your approach: there is no connected computation graph from your models inputs to your models outputs because none of the pandas stuff are tensorflow ops. I.E. even if you were able to use eager tensors, you still wouldn't be able to train your model with automatic differentiation.
You're going to have to specify your entire model in tf I'm afraid.
Maybe you could do the data processing before giving it as input to the model? Then you only need split concat ops to put everything together?

Tensor name has no shape information ERROR when predicting with tensorflow serving

I'm using tensorflow serving to serve a savedmodel. I have two signatures: 1st outputting keras model.output and the 2nd outputting post processing of model.output. When I try a predict call of the 2nd signature on tensorflow serving it is giving me an error { "error": "Tensor name: prediction has no shape information " }
this is the code to build the savedmodel
shape1 = 92
shape2 = 92
reg=0.000001
learning_rate=0.001
sess = tf.Session()
K.set_session(sess)
K._LEARNING_PHASE = tf.constant(0)
K.set_learning_phase(0)
#preprocessing
x_input = tf.placeholder(tf.string, name='x_input', shape=[None])
reshaped = tf.reshape(x_input, shape=[])
image = tf.image.decode_jpeg(reshaped, channels=3)
image2 = tf.expand_dims(image,0)
resized = tf.image.resize_images(image2, (92,92))
meaned = tf.math.subtract(resized, tf.constant(116.0))
normalized = tf.math.divide(meaned, tf.constant(66.0))
#keras model
model = tf.keras.Sequential()
model.add(InputLayer(input_tensor=normalized))
model.add(Conv2D(32, (3, 3), padding='same', activation='relu', kernel_regularizer=l2(reg)))
model.add(Conv2D(32, (3, 3), padding='same', activation='relu', kernel_regularizer=l2(reg)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3), padding='same', activation='relu', kernel_regularizer=l2(reg)))
model.add(Dropout(0.1))
model.add(Conv2D(64, (3, 3), padding='same', activation='relu', kernel_regularizer=l2(reg)))
model.add(Dropout(0.1))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(128, (3, 3), padding='same', activation='relu', kernel_regularizer=l2(reg)))
model.add(Dropout(0.2))
model.add(Conv2D(128, (3, 3), padding='same', activation='relu', kernel_regularizer=l2(reg)))
model.add(Dropout(0.2))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(256, (3, 3), padding='same', activation='relu', kernel_regularizer=l2(reg)))
model.add(Dropout(0.3))
model.add(Conv2D(256, (3, 3), padding='same', activation='relu', kernel_regularizer=l2(reg)))
model.add(Dropout(0.3))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(256, activation='relu', kernel_regularizer=l2(reg)))
model.add(Dropout(0.5))
model.add(Dense(256, activation='relu', kernel_regularizer=l2(reg)))
model.add(Dropout(0.5))
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer=tf.train.RMSPropOptimizer(learning_rate=learning_rate),
metrics=['accuracy'])
#post processing to output label
pred = tf.gather_nd(model.output, (0,0))
label = tf.cond(pred > 0.5, lambda: tf.constant('Dog', shape=[]), lambda: tf.constant('Cat', shape=[]))
model.load_weights(r'./checkpoints/4.ckpt')
export_path = './saved_models/1'
init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
sess.run(init_op)
model.load_weights(r'./checkpoints/4.ckpt')
if os.path.isdir(export_path):
print('\nAlready saved a model, cleaning up\n')
print(subprocess.run(['rm', '-r', export_path]))
#first signature(this works)
x_info = tf.saved_model.utils.build_tensor_info(x_input)
y_info = tf.saved_model.utils.build_tensor_info(model.output)
sigmoid_signature = build_signature_def(inputs={"image": x_info}, outputs={"prediction":y_info}, method_name='tensorflow/serving/predict')
#2nd signature(this doesn't work)
x_info = tf.saved_model.utils.build_tensor_info(x_input)
y_info = tf.saved_model.utils.build_tensor_info(label)
label_signature = build_signature_def(inputs={"image": x_info}, outputs={"prediction":y_info}, method_name='tensorflow/serving/predict')
builder = tf.saved_model.builder.SavedModelBuilder(export_path)
legacy_init_op = tf.group(tf.tables_initializer(), name='legacy_init_op')
builder.add_meta_graph_and_variables(sess=sess,
tags=["serve"],
signature_def_map={'sigmoid': sigmoid_signature, 'label': label_signature})
builder.save()
this is code to call tf serving
imgs = ['./Dog/' + img for img in imgs]
img = open('./Dog/3.jpg', 'rb').read()
img = base64.b64encode(img).decode('utf-8')
data = json.dumps(
{"signature_name": "label",
"instances": [
{'image': {'b64': img}}
]
}
)
json_response = requests.post('http://localhost:8501/v1/models/pet:predict', data=data)
print(json_response.text)
Instead of getting a response of {"predictions": "Dog"}, i am getting an error { "error": "Tensor name: prediction has no shape information " }
I managed to fix this. I used tf.reshape on what i wanted to output and passed that into the signature builder.
#post processing to output label
pred = tf.gather_nd(model.output, (0,0))
label = tf.cond(pred > 0.5, lambda: tf.constant('Dog', shape=[]), lambda: tf.constant('Cat', shape=[]))
label_reshaped = tf.reshape(label, [None])
...
#2nd signature(this doesn't work)
x_info = tf.saved_model.utils.build_tensor_info(x_input)
y_info = tf.saved_model.utils.build_tensor_info(label_reshaped)
label_signature = build_signature_def(inputs={"image": x_info}, outputs={"prediction":y_info}, method_name='tensorflow/serving/predict')
Reading the tensorflow serving documentation, you'll see that there are two ways to specify input tensors in your request, the row format (using instances like your example), and the column format (using inputs).
Since the row format requires that all inputs and outputs have the same 0th dimension, if you did not export the model with explicit output shape, you cannot use the row format.
Therefore, in your case (without having to re-export the model with explicit reshaping, like the other answer has provided), you can send this payload instead
data = json.dumps(
{
"signature_name": "label",
"inputs": {'image': {'b64': img}}
}
)
On the other hand, keep in mind that if you do want to send multiple b64 encoded images, your best bet would be to use the row format with multiple instances (such as if you want to run batch predict on multiple images).

tensorflow core dumped after check failed

System information
**I leveraged keras using TensorFlow backend to train a batch of 300*300*3 rgb image.
Linux CentOS 7:
TensorFlow installed from binary:
TensorFlow version 1.4/1.5:
Python version 3.6:
Describe the problem
I used the following code to build CNN model
model = Sequential()
model.add(Convolution2D(32, (5, 5), activation='relu', input_shape=(3, height, width), data_format='channels_first'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(filters=36, kernel_size=(5, 5), activation='relu'))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(2, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
batch_steps = get_training_data_batch()
model.fit_generator(training_data_generator(), steps_per_epoch=batch_steps, epochs=15, verbose=2)
model.summary()
x_test, y_test = get_testing_data()
score = model.evaluate(x_test, y_test, verbose=0)
print("score:", score[1])
batch_size = 2000
def training_data_generator():
for benchmark_num in training_benchmark_num_list:
x_data, y_data = get_feature_data(benchmark_num)
samples_totals = x_data.shape[0]
print("samples totals:", samples_totals)
batch = samples_totals / batch_size + 1
for x in range(0, int(batch), 1):
x_sub_data = x_data[x*batch_size:(x+1)*batch_size]
y_sub_data = y_data[x*batch_size:(x+1)*batch_size]
x_train = x_sub_data.reshape(x_sub_data.shape[0], 3, height, width)
x_train = x_train.astype('float32')
x_train /= 255
y_train = np_utils.to_categorical(y_sub_data, 2)
yield (x_train, y_train)
Program shows the error before code dumped:
F tensorflow/core/kernels/maxpooling_op.cc:177]
Check failed: input_backprop_index >= in_start && input_backprop_index < in_end Invalid input backprop index: -1491167680, 2803712000, 2806515712
I traced tensorflow source code, it should be check operation. And backprop index can't be negative. I don't know tensorflow well, why can appear such problem? I think this error is root cause of code dumped. Could you help me solve this problem?