I try to print the actual value of the tensor which is loaded from my custom tfds dataset.
I can't figure out how to do it. I am using Tensorflow2 so the session is no longer encouraged.
I tried using .numpy() tf.print. tf.executing.eagerly() but nothing works. It either prints
just the tensor object showing me the shape or in case of .numpy() it throws the error in the title.
I need the value of tensors and I need to bring it back to numpy in order to debugg the code.
This is how I create the dataset:
class dt(tfds.core.GeneratorBasedBuilder):
''' Dataset builder'''
# DOuble check
VERSION = tfds.core.Version('1.0.0')
RELEASE_NOTES = {
'1.0.0': 'Initial release.',
}
def _info(self) ->tfds.core.DatasetInfo:
'''Dataset metadata'''
return tfds.core.DatasetInfo(
builder=self,
features=tfds.features.FeaturesDict({
"id": tf.int64,
"image": tfds.features.Image(shape=(IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS), encoding_format='png'),
"mask": tfds.features.Image(shape=(IMG_HEIGHT, IMG_WIDTH, 1), encoding_format='png'),
"label": tfds.features.ClassLabel(names=CLASSES),
}),
supervised_keys=('image', 'mask')
)
def _split_generators(self, dl_manager: tfds.download.DownloadManager):
'''Splitgenerator for train and test splits'''
path = DATASETS_ROOT
return {
"train": self._generate_examples(
images_path=os.path.join(path, "train/rgb"),
masks_path=os.path.join(path, "train/masks")
),
"test": self._generate_examples(
images_path=os.path.join(path, "test/rgb"),
masks_path=os.path.join(path, "test/masks")
)
}
def _generate_examples(self, images_path, masks_path):
'''Generator of examples for each split'''
for i, (image, mask) in enumerate(zip(glob.glob(images_path + "/*.png"), glob.glob(masks_path + "/*.png"))):
yield i, {
"id": i,
"image": image,
"mask": mask,
"label": CLASSES[3],
}
This is how I try to extract the numpy array
def custom_load_X_Y(training=True):
if training:
dt, dt_info = tfds.load("dt", split="train", shuffle_files=True, as_supervised=True, with_info=True)
print(f'EAGERLY {tf.executing_eagerly()}')
print(f'MOde type {type(dt)}')
tf.print(f"aaaaa {dt.numpy()} aaaaaa")
Console output:
Console output
You can print the image dataset by using tfds.show_examples()
import tensorflow_datasets as tfds
ds, ds_info = tfds.load('cifar10', split='train', with_info=True)
fig = tfds.show_examples(ds, ds_info)
output:
For more details please refer to this gist.Thank You.
Related
I'm stuck on one line of code and have been stalled on a project all weekend as a result.
I am working on a project that uses BERT for sentence classification. I have successfully trained the model, and I can test the results using the example code from run_classifier.py.
I can export the model using this example code (which has been reposted repeatedly, so I believe that it's right for this model):
def export(self):
def serving_input_fn():
label_ids = tf.placeholder(tf.int32, [None], name='label_ids')
input_ids = tf.placeholder(tf.int32, [None, self.max_seq_length], name='input_ids')
input_mask = tf.placeholder(tf.int32, [None, self.max_seq_length], name='input_mask')
segment_ids = tf.placeholder(tf.int32, [None, self.max_seq_length], name='segment_ids')
input_fn = tf.estimator.export.build_raw_serving_input_receiver_fn({
'label_ids': label_ids, 'input_ids': input_ids,
'input_mask': input_mask, 'segment_ids': segment_ids})()
return input_fn
self.estimator._export_to_tpu = False
self.estimator.export_savedmodel(self.output_dir, serving_input_fn)
I can also load the exported estimator (where the export function saves the exported model into a subdirectory labeled with a timestamp):
predict_fn = predictor.from_saved_model(self.output_dir + timestamp_number)
However, for the life of me, I cannot figure out what to provide to predict_fn as input for inference. Here is my best code at the moment:
def predict(self):
input = 'Test input'
guid = 'predict-0'
text_a = tokenization.convert_to_unicode(input)
label = self.label_list[0]
examples = [InputExample(guid=guid, text_a=text_a, text_b=None, label=label)]
features = convert_examples_to_features(examples, self.label_list,
self.max_seq_length, self.tokenizer)
predict_input_fn = input_fn_builder(features, self.max_seq_length, False)
predict_fn = predictor.from_saved_model(self.output_dir + timestamp_number)
result = predict_fn(predict_input_fn) # this generates an error
print(result)
It doesn't seem to matter what I provide to predict_fn: the examples array, the features array, the predict_input_fn function. Clearly, predict_fn wants a dictionary of some type - but every single thing that I've tried generates an exception due to a tensor mismatch or other errors that generally mean: bad input.
I presumed that the from_saved_model function wants the same sort of input as the model test function - apparently, that's not the case.
It seems that lots of people have asked this very question - "how do I use an exported BERT TensorFlow model for inference?" - and have gotten no answers:
Thread #1
Thread #2
Thread #3
Thread #4
Any help? Thanks in advance.
Thank you for this post. Your serving_input_fn was the piece I was missing! Your predict function needs to be changed to feed the features dict directly, rather than use the predict_input_fn:
def predict(sentences):
labels = [0, 1]
input_examples = [
run_classifier.InputExample(
guid="",
text_a = x,
text_b = None,
label = 0
) for x in sentences] # here, "" is just a dummy label
input_features = run_classifier.convert_examples_to_features(
input_examples, labels, MAX_SEQ_LEN, tokenizer
)
# this is where pred_input_fn is replaced
all_input_ids = []
all_input_mask = []
all_segment_ids = []
all_label_ids = []
for feature in input_features:
all_input_ids.append(feature.input_ids)
all_input_mask.append(feature.input_mask)
all_segment_ids.append(feature.segment_ids)
all_label_ids.append(feature.label_id)
pred_dict = {
'input_ids': all_input_ids,
'input_mask': all_input_mask,
'segment_ids': all_segment_ids,
'label_ids': all_label_ids
}
predict_fn = predictor.from_saved_model('../testing/1589418540')
result = predict_fn(pred_dict)
print(result)
pred_sentences = [
"That movie was absolutely awful",
"The acting was a bit lacking",
"The film was creative and surprising",
"Absolutely fantastic!",
]
predict(pred_sentences)
{'probabilities': array([[-0.3579178 , -1.2010787 ],
[-0.36648935, -1.1814401 ],
[-0.30407643, -1.3386648 ],
[-0.45970002, -0.9982413 ],
[-0.36113673, -1.1936386 ],
[-0.36672896, -1.1808994 ]], dtype=float32), 'labels': array([0, 0, 0, 0, 0, 0])}
However, the probabilities returned for sentences in pred_sentences do not match the probabilities I get use estimator.predict(predict_input_fn) where estimator is the fine-tuned model being used within the same (python) session. For example, [-0.27276006, -1.4324446 ] using estimator vs [-0.26713806, -1.4505868 ] using predictor.
i am founding an error in plant disease detection using resnet50 deep learning model every time it raises an error message in decode_predictions
error
expects a batch of predictions (i.e. a 2D array of shape (samples, 1000)). Found array with shape: (1, 38)"
enter code here
model = ResNet50(weights='imagenet',include_top=False,classes=38)
try:
model = load_model('/content/drive/My
Drive/color/checkpoints/ResNet50_model_weights.h5')
print("model loaded")
except:
print("model not loaded")
img_path = '/content/drive/My Drive/color/test/0/appleblackrot188.jpg'
img = image.load_img(img_path, target_size=(300, 300))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
preds = model.predict(x)
print('Predicted:', decode_predictions(preds,top=3)[0])
You can try using the preprocesing function:
import tensorflow as tf
# Using the keras wrapper on tensorflow (it must be the same using just keras).
IMAGE = [] # From image source, i did it from the camera.
toPred = tf.keras.applications.resnet50.preprocess_input(np.expand_dims(tf.keras.preprocessing.image.img_to_array(IMAGE), axis=0))
Maybe that can help :)
decode_predictions works only for ImageNet (no. of classes = 1000). For these 38 classes of plants, you have to write your own decode predictions based on the ground truth label you've assigned for each plant.
First, you need an index JSON file and create a new decode_predictions function.
For example
This HAM10000 that has 7 classes and you need to split to each folder like this
then make an index JSON file like this
{
"0" : [
"AKIEC",
"akiec"
],
"1" : [
"BCC",
"bcc"
],
"2" : [
"BKL",
"bkl"
],
"3" : [
"DF",
"df"
],
"4" : [
"MEL",
"mel"
],
"5" : [
"NV",
"nv"
],
"6" : [
"VASC",
"vasc"
]
}
Then try this code
def decode_predictions(preds, top=4, class_list_path='/content/SKIN_DATA/HAM10000_index.json'):
if len(preds.shape) != 2 or preds.shape[1] != 7: # your classes number
raise ValueError('`decode_predictions` expects '
'a batch of predictions '
'(i.e. a 2D array of shape (samples, 1000)). '
'Found array with shape: ' + str(preds.shape))
index_list = json.load(open(class_list_path))
results = []
for pred in preds:
top_indices = pred.argsort()[-top:][::-1]
result = [tuple(index_list[str(i)]) + (pred[i],) for i in top_indices]
result.sort(key=lambda x: x[2], reverse=True)
results.append(result)
return results
I'm creating a basic LinearClassifier in Tensorflow, but it seems that my input function returns the whole dataset at the first iteration, instead of just one example & its label.
My TFRecord has the following structure (obtained with print( tf.train.Example.FromString(example.SerializeToString())) )
features {
feature {
key: "attackType"
value {
int64_list {
value: 0
value: 0
...
feature {
key: "dst_ip_addr"
value {
bytes_list {
value: "OPENSTACK_NET"
value: "EXT_SERVER"
...
It seems the TFRecord file is well formatted. However, when I try to parse it with the following snippet:
def input_fn_train(repeat=10, batch_size=32):
"""
Reads dataset from tfrecord, apply parser with map
"""
# Import MNIST data
dataset = tf.data.TFRecordDataset([processed_bucket+processed_key])
# Map the parser over dataset, and batch results by up to batch_size
dataset = dataset.map(_decode)
dataset = dataset.repeat(repeat)
dataset = dataset.batch(batch_size)
return dataset
def _decode(serialized_ex):
features={
'src_ip_addr': tf.FixedLenFeature(src_ip_size,tf.string),
'src_pt': tf.FixedLenFeature(src_pt_size,tf.int64),
'dst_ip_addr': tf.FixedLenFeature(dst_ip_size,tf.string),
'dst_pt': tf.FixedLenFeature(dst_pt_size,tf.int64),
'proto': tf.FixedLenFeature(proto_size,tf.string),
'packets': tf.FixedLenFeature(packets_size,tf.int64),
'subnet': tf.FixedLenFeature(subnet_size,tf.int64),
'attackType': tf.FixedLenFeature(attack_type_size,tf.int64)
}
parsed_features = tf.parse_single_example(serialized_ex, features)
label = parsed_features.pop('attackType')
return parsed_features, label
sess = tf.Session()
it = input_fn_train().make_one_shot_iterator()
print(sess.run(it.get_next()))
It shows that it.get_next() returns
({'dst_ip_addr': array([[b'OPENSTACK_NET', b'EXT_SERVER',...
This is incorrect since it yields an array of array! The result should be
array([b'OPENSTACK_NET',...
Any thoughts ? I've been trying to change the shape parameter of FixedLenFeature, with no success.
Ok, seems it's the dataset.batch command that created this strange behavior. Removed it, and it works fine now !
I'm new to TensorFlow and trying to use the Estimator API for some simple classification experiments. I have a sparse dataset in libsvm format. The following input function works for small datasets:
def libsvm_input_function(file):
def input_function():
indexes_raw = []
indicators_raw = []
values_raw = []
labels_raw = []
i=0
for line in open(file, "r"):
data = line.split(" ")
label = int(data[0])
for fea in data[1:]:
id, value = fea.split(":")
indexes_raw.append([i,int(id)])
indicators_raw.append(int(1))
values_raw.append(float(value))
labels_raw.append(label)
i=i+1
indexes = tf.SparseTensor(indices=indexes_raw,
values=indicators_raw,
dense_shape=[i, num_features])
values = tf.SparseTensor(indices=indexes_raw,
values=values_raw,
dense_shape=[i, num_features])
labels = tf.constant(labels_raw, dtype=tf.int32)
return {"indexes": indexes, "values": values}, labels
return input_function
However, for a dataset of a few GB size I get the following error:
ValueError: Cannot create a tensor proto whose content is larger than 2GB.
How can I avoid this error? How should I write an input function to read medium-sized sparse datasets (in libsvm format)?
When use estimator, for libsvm data input, you can create dense index list, dense value list, then use feature_column.categorical_column_with_identity and feature_column.weighted_categorical_column to create feature column, finally, put feature columns to estimator. Maybe your input features length is variable, you can use padded_batch to handle it.
here some codes:
## here is input_fn
def input_fn(data_dir, is_training, batch_size):
def parse_csv(value):
## here some process to create feature_indices list, feature_values list and labels
return {"index": feature_indices, "value": feature_values}, labels
dataset = tf.data.Dataset.from_tensor_slices(your_filenames)
ds = dataset.flat_map(
lambda f: tf.data.TextLineDataset(f).map(parse_csv)
)
ds = ds.padded_batch(batch_size, ds.output_shapes, padding_values=(
{
"index": tf.constant(-1, dtype=tf.int32),
"value": tf.constant(0, dtype=tf.float32),
},
tf.constant(False, dtype=tf.bool)
))
return ds.repeat().prefetch(batch_size)
## create feature column
def build_model_columns():
categorical_column = tf.feature_column.categorical_column_with_identity(
key='index', num_buckets=your_feature_dim)
sparse_columns = tf.feature_column.weighted_categorical_column(
categorical_column=categorical_column, weight_feature_key='value')
dense_columns = tf.feature_column.embedding_column(sparse_columns, your_embedding_dim)
return [sparse_columns], [dense_columns]
## when created feature column, you can put them into estimator, eg. put dense_columns into DNN, and sparse_columns into linear model.
## for export savedmodel
def raw_serving_input_fn():
feature_spec = {"index": tf.placeholder(shape=[None, None], dtype=tf.int32),
"value": tf.placeholder(shape=[None, None], dtype=tf.float32)}
return tf.estimator.export.build_raw_serving_input_receiver_fn(feature_spec)
Another way, you can create your custom feature column, like this: _SparseArrayCategoricalColumn
I have been using tensorflow.contrib.libsvm. Here's an example (i am using eager execution with generators)
import os
import tensorflow as tf
import tensorflow.contrib.libsvm as libsvm
def all_libsvm_files(folder_path):
for file in os.listdir(folder_path):
if file.endswith(".libsvm"):
yield os.path.join(folder_path, file)
def load_libsvm_dataset(path_to_folder):
return tf.data.TextLineDataset(list(all_libsvm_files(path_to_folder)))
def libsvm_iterator(path_to_folder):
dataset = load_libsvm_dataset(path_to_folder)
iterator = dataset.make_one_shot_iterator()
next_element = iterator.get_next()
yield libsvm.decode_libsvm(tf.reshape(next_element, (1,)),
num_features=666,
dtype=tf.float32,
label_dtype=tf.float32)
libsvm_iterator gives you a feature-label pair back on each iteration, from multiple files inside a folder that you specify.
I pushed up some test data to gcloud for prediction as a binary tfrecord-file. Running my script I got the error ('No JSON object could be decoded', 162). What do you think I am doing wrong?
To push a prediction job to gcloud, i use this script:
REGION=us-east1
MODEL_NAME=mymodel
VERSION=v_hopt_22
INPUT_PATH=gs://mydb/test-data.tfr
OUTPUT_PATH=gs://mydb/prediction.tfr
JOB_NAME=pred_${MODEL_NAME}_${VERSION}_b
args=" --model "$MODEL_NAME
args+=" --version "$VERSION
args+=" --data-format=TF_RECORD"
args+=" --input-paths "$INPUT_PATH
args+=" --output-path "$OUTPUT_PATH
args+=" --region "$REGION
gcloud ml-engine jobs submit prediction $JOB_NAME $args
test-data.tfr has been generated from a numpy array, as so:
import numpy as np
filename = './Datasets/test-data.npz'
data = np.load(filename)
features = data['X'] # features[channel, example, feature]
np_features = np.swapaxes(features, 0, 1) # features[example, channel, feature]
import tensorflow as tf
import nnscoring.data as D
def floats_feature(arr):
return tf.train.Feature(float_list=tf.train.FloatList(value=arr.flatten().tolist()))
writer = tf.python_io.TFRecordWriter("./Datasets/test-data.tfr")
for i, np_example in enumerate(np_features):
if i%1000==0: print(i)
tf_feature = {
ch: floats_feature(x)
for ch, x in zip(D.channels, np_example)
}
tf_features = tf.train.Features(feature=tf_feature)
tf_example = tf.train.Example(features=tf_features)
writer.write(tf_example.SerializeToString())
writer.close()
Update (following yxshi):
I define the following serving function
def tfrecord_serving_input_fn():
import tensorflow as tf
seq_length = int(dt*sr)
examples = tf.placeholder(tf.string, shape=())
feat_map = {
channel: tf.FixedLenSequenceFeature(shape=(seq_length,),
dtype=tf.float32, allow_missing=True)
for channel in channels
}
parsed = tf.parse_single_example(examples, features=feat_map)
features = {
channel: tf.expand_dims(tensor, -1)
for channel, tensor in parsed.iteritems()
}
from collections import namedtuple
InputFnOps = namedtuple("InputFnOps", "features labels receiver_tensors")
tf.contrib.learn.utils.input_fn_utils.InputFnOps = InputFnOps
return InputFnOps(features=features, labels=None, receiver_tensors=examples)
# InputFnOps = tf.contrib.learn.utils.input_fn_utils.InputFnOps
# return InputFnOps(features, None, parsed)
# Error: InputFnOps has no attribute receiver_tensors
.., which I pass to generate_experiment_fn as so:
export_strategies = [
saved_model_export_utils.make_export_strategy(
tfrecord_serving_input_fn,
exports_to_keep = 1,
default_output_alternative_key = None,
)]
gen_exp_fn = generate_experiment_fn(
train_steps_per_iteration = args.train_steps_per_iteration,
train_steps = args.train_steps,
export_strategies = export_strategies
)
(aside: note the dirty patch of InputFnOps)
It looks like the input is not correctly specified in the inference graph. To use tf_record as input data format, your inference graph must accept strings as the input placeholder. In your case, you should have something like below in your inference code:
examples = tf.placeholder(tf.string, name='input', shape=(None,))
with tf.name_scope('inputs'):
feature_map = {
ch: floats_feature(x)
for ch, x in zip(D.channels, np_example)
}
parsed = tf.parse_example(examples, features=feature_map)
f1 = parsed['feature_name_1']
f2 = parsed['feature_name_2']
...
A close example is here:
https://github.com/GoogleCloudPlatform/cloudml-samples/blob/master/flowers/trainer/model.py#L253
Hope it helps.