I am trying to take a simple keras model with an Add operation and convert to TFLite and then to EdgeTPU.
Quantization for int8 needs to take place, but depending on the conversion parameters provided it results in either an unsupported operation FlexAddV2, or unsupported data type int32, or an error with AddV2 Error code: ERROR_NEEDS_FLEX_OPS.
The model and conversion are relatively simple and straightforward:
from tensorflow import keras
import numpy as np
import random
def representative_dataset():
for _ in range(100):
#data = random.randint(0, 1)
#yield [data]
data = np.random.rand(32)*2
yield [data.astype(np.int8)]
input = keras.Input(shape=(32,), name="dummy_input", dtype=tf.int8)
output = tf.add(input, 1)
model = keras.Model(inputs=input, outputs=output)
converter = tf.lite.TFLiteConverter.from_keras_model(model)
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_dataset
converter.target_spec.supported_ops = [
tf.lite.OpsSet.TFLITE_BUILTINS_INT8, # enable TensorFlow Lite ops.
tf.lite.OpsSet.TFLITE_BUILTINS, # enable TensorFlow Lite ops.
tf.lite.OpsSet.SELECT_TF_OPS # enable TensorFlow ops.
]
converter.target_spec.supported_types = [tf.int8]
converter.inference_input_type = tf.int8 # or tf.uint8
converter.inference_output_type = tf.int8 # or tf.uint8
converter.experimental_new_quantizer = True # It will enable conversion and quantization of MLIR ops
converter.experimental_new_converter = False
tflite_quant_model = converter.convert()
Output from running the conversion:
Traceback (most recent call last):
File "/home/gsosnow/doc/gt2tf.py", line 27, in
tflite_quant_model = converter.convert()
File "/home/gsosnow/anaconda3/lib/python3.9/site-packages/tensorflow/lite/python/lite.py", line 929, in wrapper
return self._convert_and_export_metrics(convert_func, *args, **kwargs)
File "/home/gsosnow/anaconda3/lib/python3.9/site-packages/tensorflow/lite/python/lite.py", line 908, in _convert_and_export_metrics
result = convert_func(self, *args, **kwargs)
File "/home/gsosnow/anaconda3/lib/python3.9/site-packages/tensorflow/lite/python/lite.py", line 1338, in convert
saved_model_convert_result = self._convert_as_saved_model()
File "/home/gsosnow/anaconda3/lib/python3.9/site-packages/tensorflow/lite/python/lite.py", line 1320, in _convert_as_saved_model
return super(TFLiteKerasModelConverterV2,
File "/home/gsosnow/anaconda3/lib/python3.9/site-packages/tensorflow/lite/python/lite.py", line 1131, in convert
result = _convert_graphdef(
File "/home/gsosnow/anaconda3/lib/python3.9/site-packages/tensorflow/lite/python/convert_phase.py", line 212, in wrapper
raise converter_error from None # Re-throws the exception.
File "/home/gsosnow/anaconda3/lib/python3.9/site-packages/tensorflow/lite/python/convert_phase.py", line 205, in wrapper
return func(*args, **kwargs)
File "/home/gsosnow/anaconda3/lib/python3.9/site-packages/tensorflow/lite/python/convert.py", line 794, in convert_graphdef
data = convert(
File "/home/gsosnow/anaconda3/lib/python3.9/site-packages/tensorflow/lite/python/convert.py", line 311, in convert
raise converter_error
tensorflow.lite.python.convert_phase.ConverterError: /home/gsosnow/anaconda3/lib/python3.9/site-packages/tensorflow/python/saved_model/save.py:1325:0: error: 'tf.AddV2' op is neither a custom op nor a flex op
:0: note: loc(fused["PartitionedCall:", "PartitionedCall"]): called from
/home/gsosnow/anaconda3/lib/python3.9/site-packages/tensorflow/python/saved_model/save.py:1325:0: note: Error code: ERROR_NEEDS_FLEX_OPS
:0: error: failed while converting: 'main':
Some ops are not supported by the native TFLite runtime, you can enable TF kernels fallback using TF Select. See instructions: https://www.tensorflow.org/lite/guide/ops_select
TF Select ops: AddV2
Details:
tf.AddV2(tensor<?x32xi8>, tensor) -> (tensor<?x32xi8>) : {device = ""}
This was resolved here:
https://github.com/google-coral/edgetpu/issues/655
Here is the python conversion code to accomplish this:
import tensorflow as tf
from tensorflow import keras
import numpy as np
import random
def representative_dataset():
for _ in range(100):
#data = random.randint(0, 1)
#yield [data]
data = np.random.rand(32)*2
yield [data.astype(np.float32)]
input = keras.Input(shape=(32,), name="dummy_input", dtype=tf.float32)
output = tf.add(input, 1)
# output = tf.keras.layers.Add()([input, input])
model = keras.Model(inputs=input, outputs=output)
print(model.summary())
converter = tf.lite.TFLiteConverter.from_keras_model(model)
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_dataset
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.target_spec.supported_types = [tf.int8]
converter.inference_input_type = tf.int8 # or tf.uint8
converter.inference_output_type = tf.int8 # or tf.uint8
tflite_quant_model = converter.convert()
Related
I'm trying to convert the tensorflow weights to tensorflow lite. First of all, I converted.tflite from generating TensorFlow SavedModel. I do this in Google colab. Can I execute this code?
%cd /content/tensorflow-yolov4-tflite
!python convert_tflite.py --weights ./checkpoints/yolov4-tiny-pretflite-416 --output ./checkpoints/yolov4-tiny-416.tflite
then the result
`tensorflow.lite.python.convert_phase.ConverterError`: Variable constant folding is failed. Please consider using enabling `experimental_enable_resource_variables` flag in the TFLite converter object. For example, converter.experimental_enable_resource_variables = True
btw, this my file convert_tflite.py
import tensorflow as tf
from absl import app, flags, logging
from absl.flags import FLAGS
import numpy as np
import cv2
from core.yolov4 import YOLOv4, YOLOv3, YOLOv3_tiny, decode
import core.utils as utils
import os
from core.config import cfg
flags.DEFINE_string('weights', './checkpoints/yolov4-416', 'path to weights file')
flags.DEFINE_string('output', './checkpoints/yolov4-416-fp32.tflite', 'path to output')
flags.DEFINE_integer('input_size', 416, 'path to output')
flags.DEFINE_string('quantize_mode', 'float32', 'quantize mode (int8, float16, float32)')
flags.DEFINE_string('dataset', "/Volumes/Elements/data/coco_dataset/coco/5k.txt", 'path to dataset')
def representative_data_gen():
fimage = open(FLAGS.dataset).read().split()
for input_value in range(10):
if os.path.exists(fimage[input_value]):
original_image=cv2.imread(fimage[input_value])
original_image = cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB)
image_data = utils.image_preprocess(np.copy(original_image), [FLAGS.input_size, FLAGS.input_size])
img_in = image_data[np.newaxis, ...].astype(np.float32)
print("calibration image {}".format(fimage[input_value]))
yield [img_in]
else:
continue
def save_tflite():
converter = tf.lite.TFLiteConverter.from_saved_model(FLAGS.weights)
if FLAGS.quantize_mode == 'float16':
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.target_spec.supported_types = [tf.compat.v1.lite.constants.FLOAT16]
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS]
converter.allow_custom_ops = True
elif FLAGS.quantize_mode == 'int8':
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS]
converter.allow_custom_ops = True
converter.representative_dataset = representative_data_gen
tflite_model = converter.convert()
open(FLAGS.output, 'wb').write(tflite_model)
logging.info("model saved to: {}".format(FLAGS.output))
def demo():
interpreter = tf.lite.Interpreter(model_path=FLAGS.output)
interpreter.allocate_tensors()
logging.info('tflite model loaded')
input_details = interpreter.get_input_details()
print(input_details)
output_details = interpreter.get_output_details()
print(output_details)
input_shape = input_details[0]['shape']
input_data = np.array(np.random.random_sample(input_shape), dtype=np.float32)
interpreter.set_tensor(input_details[0]['index'], input_data)
interpreter.invoke()
output_data = [interpreter.get_tensor(output_details[i]['index']) for i in range(len(output_details))]
print(output_data)
def main(_argv):
save_tflite()
demo()
if __name__ == '__main__':
try:
app.run(main)
except SystemExit:
pass
I save this file in folder tensorflow-yolov4 tflite.
Can someone know how to solve this problem? I need you guys. Thank you
Add two lines before tflite_model = converter.convert() in save_tflite() function like this
converter.experimental_enable_resource_variables = True
converter.experimental_new_converter = True
tflite_model = converter.convert()
I have an exported Frozen Graph .pb file, and converted it to tflite using
graph_def_file = "model.pb"
input_arrays = ["Placeholder"]
input_shape = {"Placeholder": [1024, 2048, 3]}
output_arrays = ["final_output"]
converter = tf.compat.v1.lite.TFLiteConverter.from_frozen_graph(
graph_def_file, input_arrays, output_arrays, input_shape)
tflite_model = converter.convert()
with open("my_model.tflite", "wb") as f:
f.write(tflite_model)
And I could not restore my model with
interpreter = tf.lite.Interpreter(model_path="my_model.tflite")
interpreter.resize_tensor_input(0, [1024, 2048, 3], strict=True)
interpreter.allocate_tensors()
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-28-3cfaee7dc51c> in <module>
1 interpreter = tf.lite.Interpreter(model_path=model_path)
2 interpreter.resize_tensor_input(0, [1024, 2048, 3], strict=True)
----> 3 interpreter.allocate_tensors()
~/.local/lib/python3.9/site-packages/tensorflow/lite/python/interpreter.py in allocate_tensors(self)
512 def allocate_tensors(self):
513 self._ensure_safe()
--> 514 return self._interpreter.AllocateTensors()
515
516 def _safe_to_run(self):
RuntimeError: tensorflow/lite/kernels/conv.cc:349 input->dims->data[3] != filter->dims->data[3] (65 != 64)Node number 11 (CONV_2D) failed to prepare.Failed to apply the default TensorFlow Lite delegate indexed at 0.
However, I can successfully reload my .pb file frozen graph by using
with tf.gfile.GFile('my_model.pb', "rb") as pb:
graph_def = tf.GraphDef()
graph_def.ParseFromString(pb.read())
with tf.Graph().as_default() as graph:
tf.import_graph_def(
graph_def,
name="",
)
and also successfully generated result by
node_in = graph.get_tensor_by_name('Placeholder:0')
node_out = graph.get_tensor_by_name('final_output:0')
with tf.Session(graph=graph) as sess: # Session()
# sess.run(tf.global_variables_initializer())
feed_dict = {node_in: input_img}
pred = sess.run(node_out, feed_dict)
print(pred)
sess.close()
I've checked the node 11 of the .tflite file in Netron, but everything seemed fine.
Node 11
What could be the problem?
I am trying to implement that pysyft code for federated learing for my csv data . The tutorial i am following is this https://github.com/bt-s/Split-Learning-and-Federated-Learning/blob/master/src/federated_learning.py they used torch library FMNIST data which is iamge . I am having difficulty in customizing this code for my csv data.
This is error i am getting
File "C:/user/python/PCA/federated_learning.py", line 175, in
train_loader = sy.FederatedDataLoader(train_set, transform=data_transformer.federate(workers), train=True,
batch_size=args.batch_size, shuffle=True, **kwargs) AttributeError:
'Compose' object has no attribute 'federate
# Pysyft needs to be hooked to PyTorch to enable its features
hook = sy.TorchHook(torch)
# Define the workers
alfa = sy.VirtualWorker(hook, id="alfa")
bravo = sy.VirtualWorker(hook, id="bravo")
workers = (alfa, bravo)
device = "cuda" if torch.cuda.is_available() else "cpu"
device = torch.device(device)
kwargs = {'num_workers': 1, 'pin_memory': True} if device=="cuda" else {}
# Specify required data transformation
data_transformer = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,))
])
import pandas as pd
print("Loading CSV...")
test_set = pd.read_csv("C:/user/python/PCA/data/test.csv", encoding = "UTF-8")
train_set = pd.read_csv("C:/user/python/PCA/data/train.csv", encoding = "UTF-8")
train_loader = sy.FederatedDataLoader(train_set, transform=data_transformer.federate(workers), train=True, batch_size=args.batch_size, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(test_set, transform=data_transformer, train=False, batch_size=args.batch_size, shuffle=True, **kwargs)
Here is the error: ValueError: Output tensors to a Model must be the output of a TensorFlow Layer (thus holding past layer metadata)
I try to train and deploy a multi-input Keras model with AWS Sagemaker, but there seem to be some showstopper issues with the needed libraries that expect single input for Keras models.
I have 3 categorical inputs variables and one numeric variable. The target variable is also of type categorical.I have no test or validation data. I am only interested in the training without errors.
I merged the arrays after data preparation as follows and then stored them in s3
input_train = np.column_stack((input_cat1, input_cat2, input_num, input_cat3))
training_input_path = sage_maker_session.upload_data('data/training.npz', key_prefix=prefix + training_folder)
print(training_input_path)
s3://sagemaker-eu-central-1-xxxxxxxxxxxxx/user_tracking/training/training.npz
In the train.py script (entry_point), I again fetched the file from s3. And I compiled the Train.py file again without problems, as if I were outside SageMaker.
%%writefile train.py
### import library ###
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--epochs', type=int, default=60)
parser.add_argument('--batch-size', type=int, default=50)
parser.add_argument('--model-dir', type=str, default=os.environ['SM_MODEL_DIR'])
#parser.add_argument('--model-dir', type=str)
parser.add_argument('--training', type=str, default=os.environ['SM_CHANNEL_TRAINING'])
#parser.add_argument('--training', type=str, default='data')
args, _ = parser.parse_known_args()
epochs = args.epochs
batch_size = args.batch_size
model_dir = args.model_dir
training_dir = args.training
input_train =np.load(os.path.join(training_dir, 'training.npz'))['train_input']
target =np.load(os.path.join(training_dir, 'training.npz'))['train_output']
input_cat1 = input_train[:,0].astype(np.int32)
input_cat2 = input_train[:,1].astype(np.int32)
input_cat3 = input_train[:,3:].astype(np.int32)
input_num = input_train[:,2].astype(np.float32)
n_steps = 2 # number of timesteps in each sample
num_unique_os = 5 #len(le_betriebsystem.classes_)+1
num_unique_browser = 10 #len(le_browser.classes_)+1
num_unique_actions = 210 #len(le_actionen.classes_)+1
#numeric Input
numerical_input = tf.keras.Input(shape=(1,), name='numeric_input')
#categorical Input
os_input = tf.keras.Input(shape=(1,), name='os_input')
browser_input = tf.keras.Input(shape=(1,), name='browser_input')
action_input= tf.keras.Input(shape=(max_seq_len,), name='action_input')
emb_os = tf.keras.layers.Embedding(num_unique_os, 32)(os_input)
emb_browser = tf.keras.layers.Embedding(num_unique_browser, 32)(browser_input)
emb_actions = tf.keras.layers.Embedding(num_unique_actions, 64)(action_input)
actions_repr = tf.keras.layers.LSTM(300, return_sequences=True)(emb_actions)
actions_repr = tf.keras.layers.LSTM(200)(emb_actions)
emb_os = tf.squeeze(emb_os, axis=1)
emb_browser = tf.squeeze(emb_browser, axis=1)
activity_repr = tf.keras.layers.Concatenate()([emb_os, emb_browser, actions_repr,
numerical_input])
x = tf.keras.layers.RepeatVector(n_steps)(activity_repr)
x = tf.keras.layers.LSTM(288, return_sequences=True)(x)
next_n_actions = tf.keras.layers.Dense(num_unique_actions-1, activation='softmax')(x)
model = tf.keras.Model(inputs=[numerical_input, os_input, browser_input, action_input], outputs =
next_n_actions)
model.summary()
model.compile('adam', 'categorical_crossentropy', metrics=['accuracy'])
history = model.fit({'numeric_input': input_num,
'os_input': input_cat1,
'browser_input': input_cat2,
'action_input': input_cat3}, target, batch_size=50, epochs=130)
tf.saved_model.simple_save(
tf.keras.backend.get_session(),
os.path.join(model_dir, '1'),
inputs={'inputs': model.input},
outputs={t.name: t for t in model.outputs})
I received this:
Model Sommary
Meric Tendency
when trying to do the whole thing again with the Tensorflow instance, the following error occurred:
Traceback (most recent call last): File "train.py", line 105, in model = tf.keras.Model(inputs=[numerical_input, os_input, browser_input, action_input], outputs = next_n_actions) File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/training.py", line 121, in init super(Model, self).init(*args, **kwargs) File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/network.py", line 80, in init self._init_graph_network(*args, **kwargs) File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/training/checkpointable/base.py", line 474, in _method_wrapper method(self, *args, **kwargs) File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/network.py", line 224, in _init_graph_network '(thus holding past layer metadata). Found: ' + str(x)) ValueError: Output tensors to a Model must be the output of a TensorFlow Layer (thus holding past layer metadata). Found: Tensor("dense/truediv:0", shape=(?, 2, 209), dtype=float32) 2021-03-08 21:52:04,761 sagemaker-containers ERROR ExecuteUserScriptError: Command "/usr/bin/python train.py --batch-size 50 --epochs 150--model_dir s3://sagemaker-eu-central-1-xxxxxxxxxxxxxxxxx/sagemaker-tensorflow-scriptmode
I used the Tensorflow versions '2.0.4' and '1.15.4' respectly with the kernels conda_tensorflow_p36 and conda_tensorflow2_p36
For more of Code: https://gitlab.com/patricksardin08/data-science/-/tree/master/
Please i need your Helps. I'm here around the clock if anyone wants me to explain the question in more detail.
I'm deploying a text matching tensorflow program on docker referring to the official website, the installation steps and the test steps are all OK, including the server running status as well as OK, but client has a problem.
Let me explain it in detail.
This is my model graph with four inputs:
This is my model server:
import os import shutil
import tensorflow as tf from preprocess import Word2Vec, TestQA, WebQA from ABCNN import ABCNN from utils import build_path import numpy as np Max_len = 40 d0 = 300 ''' Loads the saved bcnn model, injects additional layers for the input transformation and export the model into protobuf format '''
# Command line arguments tf.app.flags.DEFINE_string('checkpoint_dir', './models/',
"Directory where to read training checkpoints.") tf.app.flags.DEFINE_string('output_dir', './models-export',
"Directory where to export the model.") tf.app.flags.DEFINE_integer('model_version', 1,
"Version number of the model.") FLAGS = tf.app.flags.FLAGS
def test(w, l2_reg, epoch, max_len, model_type, num_layers, data_type, classifier, num_classes):
model_path = build_path("./models/", data_type, model_type, num_layers)
model = ABCNN(s=max_len, w=w, l2_reg=l2_reg, model_type=model_type, num_classes=num_classes, num_layers=num_layers)
with tf.Session() as sess:
saver = tf.train.Saver()
print(model_path + "-" + str(12))
saver.restore(sess, model_path + "-" + str(12))
x1 = tf.placeholder(tf.float32, shape=[None, d0, max_len])
x2 = tf.placeholder(tf.float32, shape=[None, d0, max_len])
y = tf.placeholder(tf.int32, shape=[None])
features = tf.placeholder(tf.float32, shape=[None, 4]) #num_features = 4
export_path = os.path.join(
tf.compat.as_bytes(FLAGS.output_dir),
tf.compat.as_bytes(str(FLAGS.model_version)))
if os.path.exists(export_path):
shutil.rmtree(export_path)
# create model builder
builder = tf.saved_model.builder.SavedModelBuilder(export_path)
# create tensors info
inputs = {
"x1": tf.saved_model.utils.build_tensor_info(x1),
"x2": tf.saved_model.utils.build_tensor_info(x2),
"label": tf.saved_model.utils.build_tensor_info(y),
"features": tf.saved_model.utils.build_tensor_info(features)
}
output = {"predict_score": tf.saved_model.utils.build_tensor_info(model.prediction)}
# build prediction signature
prediction_signature = (
tf.saved_model.signature_def_utils.build_signature_def(
inputs=inputs,
outputs=output,
method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME
)
)
# save the model
legacy_init_op = tf.group(tf.tables_initializer(), name='legacy_init_op')
builder.add_meta_graph_and_variables(
sess, [tf.saved_model.tag_constants.SERVING],
signature_def_map={
'predict_score': prediction_signature
},
legacy_init_op=legacy_init_op)
builder.save()
print("Successfully exported BCNN model version '{}' into '{}'".format(
FLAGS.model_version, FLAGS.output_dir))
def main(_):
# default parameters
params = {
"ws": 4,
"l2_reg": 0.0004,
"epoch": 20,
"max_len": 40,
"model_type": "BCNN",
"num_layers": 2,
"num_classes": 2,
"data_type": "WebQA",
"classifier": "LR",
# "word2vec": Word2Vec()
}
test(w=int(params["ws"]), l2_reg=float(params["l2_reg"]), epoch=int(params["epoch"]),
max_len=int(params["max_len"]), model_type=params["model_type"],
num_layers=int(params["num_layers"]), data_type=params["data_type"],
classifier=params["classifier"], num_classes=params["num_classes"])
if __name__ == '__main__':
tf.app.run()
And this is my client:
import sys sys.path.insert(0, \"./\")
# from tensorflow_serving_client.protos import predict_pb2, prediction_service_pb2_grpc from tensorflow_serving.apis import predict_pb2 from tensorflow_serving.apis import prediction_service_pb2 from grpc.beta import implementations
# import grpc import tensorflow as tf import numpy as np from tensorflow.python.framework import dtypes import time from preprocess import Word2Vec, MSRP, WikiQA,WebQA
im_name = "dir/files" if __name__ == '__main__':
test_data = WebQA(word2vec=Word2Vec(), max_len=40)
test_data.open_file(mode="test")
s1s, s2s, labels, features = test_data.only_batch()
s1s = s1s[0]
s2s = s2s[0]
labels = labels[0]
features = features[0]
print(s1s.shape)
print(s2s.shape)
print(labels.shape)
print(features.shape)
start_time = time.time()
channel = implementations.insecure_channel("localhost", 9000)
stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)
request = predict_pb2.PredictRequest()
request.model_spec.name = "bcnn"
request.model_spec.signature_name = "predict_score"
request.inputs["x1"].CopyFrom(tf.contrib.util.make_tensor_proto(s1s, dtype=dtypes.float32))
request.inputs["x2"].CopyFrom(tf.contrib.util.make_tensor_proto(s2s, dtype=dtypes.float32))
request.inputs["label"].CopyFrom(tf.contrib.util.make_tensor_proto(labels, dtype=dtypes.int32))
request.inputs["features"].CopyFrom(tf.contrib.util.make_tensor_proto(features, dtype=dtypes.float32))
response = stub.Predict(request, 10.0)
results = {}
for key in response.outputs:
tensor_proto = response.outputs[key]
nd_array = tf.contrib.util.make_ndarray(tensor_proto)
results[key] = nd_array
print("cost %ss to predict: " % (time.time() - start_time))
print(results["predict_score"])
And this is the error:
root#15bb1c2766e3:/ABCNN-master# python3 client.py
(1, 300, 40)
(1, 300, 40)
(1,)
(1, 4)
WARNING:tensorflow:From /usr/local/lib/python3.5/dist-packages/tensorflow/contrib/learn/python/learn/datasets/base.py:198: retry (from tensorflow.contrib.learn.python.learn.datasets.base) is deprecated and will be removed in a future version.
Instructions for updating:
Use the retry module or similar alternatives.
Traceback (most recent call last):
File "/usr/local/lib/python3.5/dist-packages/grpc/beta/_client_adaptations.py", line 193, in _blocking_unary_unary
credentials=_credentials(protocol_options))
File "/usr/local/lib/python3.5/dist-packages/grpc/_channel.py", line 487, in __call__
return _end_unary_response_blocking(state, call, False, deadline)
File "/usr/local/lib/python3.5/dist-packages/grpc/_channel.py", line 437, in _end_unary_response_blocking
raise _Rendezvous(state, None, None, deadline)
grpc._channel._Rendezvous: <_Rendezvous of RPC that terminated with (StatusCode.INVALID_ARGUMENT, You must feed a value for placeholder tensor 'x2' with dtype float and shape [?,300,40]
[[Node: x2 = Placeholder[dtype=DT_FLOAT, shape=[?,300,40], _device="/job:localhost/replica:0/task:0/device:CPU:0"]()]])>
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "client.py", line 50, in <module>
response = stub.Predict(request, 10.0)
File "/usr/local/lib/python3.5/dist-packages/grpc/beta/_client_adaptations.py", line 309, in __call__
self._request_serializer, self._response_deserializer)
File "/usr/local/lib/python3.5/dist-packages/grpc/beta/_client_adaptations.py", line 195, in _blocking_unary_unary
raise _abortion_error(rpc_error_call)
grpc.framework.interfaces.face.face.AbortionError: AbortionError(code=StatusCode.INVALID_ARGUMENT, details="You must feed a value for placeholder tensor 'x2' with dtype float and shape [?,300,40]
[[Node: x2 = Placeholder[dtype=DT_FLOAT, shape=[?,300,40], _device="/job:localhost/replica:0/task:0/device:CPU:0"]()]]")
ns.py", line 309, in __call__er# on3.5/dist-packages/grpc/beta/_client_adaptatio
I have printed the shape of features: (1,4), why does it still say I have fed the wrong tensor 'features'? I can not get it.
Thanks for your any suggestion in advance.