Making prediction on Iris dataset - tensorflow

I have a basic classification code for Irish dataset.
import tensorflow as tf
import pandas as pd
COLUMN_NAMES = [
'SepalLength',
'SepalWidth',
'PetalLength',
'PetalWidth',
'Species'
]
# Import training dataset
training_dataset = pd.read_csv('iris_training.csv', names=COLUMN_NAMES, header=0)
train_x = training_dataset.iloc[:, 0:4]
train_y = training_dataset.iloc[:, 4]
# Import testing dataset
test_dataset = pd.read_csv('iris_test.csv', names=COLUMN_NAMES, header=0)
test_x = test_dataset.iloc[:, 0:4]
test_y = test_dataset.iloc[:, 4]
columns_feat = [
tf.feature_column.numeric_column(key='SepalLength'),
tf.feature_column.numeric_column(key='SepalWidth'),
tf.feature_column.numeric_column(key='PetalLength'),
tf.feature_column.numeric_column(key='PetalWidth')
]
classifier = tf.estimator.DNNClassifier(
feature_columns=columns_feat,
# Two hidden layers of 10 nodes each.
hidden_units=[10, 10],
# The model is classifying 3 classes
n_classes=3)
def train_function(inputs, outputs, batch_size):
dataset = tf.data.Dataset.from_tensor_slices((dict(inputs), outputs))
dataset = dataset.shuffle(1000).repeat().batch(batch_size)
return dataset.make_one_shot_iterator().get_next()
# Train the Model.
classifier.train(
input_fn=lambda:train_function(train_x, train_y, 100),
steps=1000)
def evaluation_function(attributes, classes, batch_size):
attributes=dict(attributes)
if classes is None:
inputs = attributes
else:
inputs = (attributes, classes)
dataset = tf.data.Dataset.from_tensor_slices(inputs)
assert batch_size is not None, "batch_size must not be None"
dataset = dataset.batch(batch_size)
return dataset.make_one_shot_iterator().get_next()
# Evaluate the model.
eval_result = classifier.evaluate(
input_fn=lambda:evaluation_function(test_x, test_y, 100))
I evaluate the result but how can i make a prediction on my data because now i get only console info of loss and epochs, accuracy. For example if i have everything except species. I want to give my own sepal length and etc so i can get prediction of the species and it will be another variable. Do i have to create variables like pred_x or pred_y(pandas dataframe) and then put them into eval_result?

Is that what you mean? for example:new_samples = np.array([[6.4, 3.2, 4.5, 1.5], [5.8, 3.1, 5.0, 1.7]], dtype=np.float32) If you want new data like this to make predictions, then you can refer to this code.TensorFlow-Iris-Classification

Like all estimator classes, the DNNClassifier class has a predict method that makes real-world predictions. The documentation is here.

Related

How to get the labels from tensorflow dataset

ds_test = tf.data.experimental.make_csv_dataset(
file_pattern = "./dfj_test/part-*.csv.gz",
batch_size=batch_size, num_epochs=1,
#column_names=use_cols,
label_name='label_id',
#select_columns= select_cols,
num_parallel_reads=30, compression_type='GZIP',
shuffle_buffer_size=12800)
This is my tesetset during training. After completing the model, I want to zip the columns of predictions and labels for the df_test .
preds = model.predict(df_test)
Getting the predictions is quite simple, and it is of numpy array format. However, I don't know how to get the corresponding labels from the df_test.
I want to zip(preds, labels) for further analysis.
Any hint? Thanks.
(tf version 2.3.1)
You can map each example to return the field you want
# load some exemplary data
TRAIN_DATA_URL = "https://storage.googleapis.com/tf-datasets/titanic/train.csv"
train_file_path = tf.keras.utils.get_file("train.csv", TRAIN_DATA_URL)
dataset = tf.data.experimental.make_csv_dataset(train_file_path, batch_size=100, num_epochs=1)
# get field by unbatching
labels_iterator= dataset.unbatch().map(lambda x: x['survived']).as_numpy_iterator()
labels = np.array(list(labels_iterator))
# get field by concatenating batches
labels_iterator= dataset.map(lambda x: x['survived']).as_numpy_iterator()
labels = np.concatenate(list(labels_iterator))

Classification metrics can't handle a mix of multiclass and continuous-multioutput targets

I am running a BERT pretrained model on a multiclass dataset for text classification purposes. Since it is multiclass I cannot figure out how to generate a classification report. The solutions I found were this and this. I understand since its a multiclass classification I have to one-hot-encode the test_y values (which I did)
test_y = to_categorical(np.asarray(test_y.factorize()[0]))
but when I do
from sklearn.metrics import classification_report
print(classification_report(test_y, y_pred, digits=8))
I get still get this error :
88 if len(y_type) > 1:
89 raise ValueError("Classification metrics can't handle a mix of {0} "
---> 90 "and {1} targets".format(type_true, type_pred))
91
92 # We can't have more than one value on y_type => The set is no more needed
ValueError: Classification metrics can't handle a mix of multilabel-indicator and continuous-multioutput targets
Why?
And if I try to calculate accuracy_score I get 0.0 accuracy: (but my accuracy is around 60%)
from sklearn.metrics import accuracy_score
y_pred = np.argmax(y_pred, axis=1)
accuracy_score(test_y, y_pred)
>> 0.0
Why?
Details of the model is given below:
train_test_split
train, test, train_y, test_y = train_test_split(df['text'], df['label'],test_size = 0.3)
Model:
def build_model(bert_layer, max_len=512):
input_word_ids = Input(shape=(max_len,), dtype=tf.int32, name="input_word_ids")
input_mask = Input(shape=(max_len,), dtype=tf.int32, name="input_mask")
segment_ids = Input(shape=(max_len,), dtype=tf.int32, name="segment_ids")
_, sequence_output = bert_layer([input_word_ids, input_mask, segment_ids])
clf_output = sequence_output[:, 0, :]
#out = Dense(1, activation='sigmoid')(clf_output)
out = Dense(8, activation='sigmoid')(clf_output)
model = Model(inputs=[input_word_ids, input_mask, segment_ids], outputs=out)
model.compile(Adam(lr=2e-6), loss='categorical_crossentropy', metrics=['accuracy'])
return model
Model.fit
train_history = model.fit(train_input, train_labels, validation_split=0.2, epochs=1,batch_size=16 )
model.predict
y_pred = model.predict(test_input)
Shape of parameters
print(type(y_pred))
print(y_pred.shape)
>> <class 'numpy.ndarray'>
>> (621,)
print(type(test_y)) #before running to_categorical
print(test_y.shape)
>> <class 'pandas.core.series.Series'>
>>(621,)
Well, your output layer is defined as out = Dense(1, activation='sigmoid')(clf_output), which means there is a single output node followed by sigmoid activation. This is meant to train on a objective of Binary classification or regression where the output values is a real number ranging between 0 and 1. To change it to binary labels using a threshold. This can be done using
threshold =0.5 # this can be changed. For simplistic example, let uss consider 0.5
y_pred = np.where(y<threshold,0,1)
Or, if it is multiclass problem, then change out = Dense(1, activation='sigmoid')(clf_output) to out = Dense(number_of_classes, activation='sigmoid')(clf_output)

How to use tensorflow feature_columns as input to a keras model

Tensorflow's feature_columns API is quite useful for non-numerical feature processing. However, the current API doc is more about using feature_columns with tensorflow Estimator. Is there a possible way to use feature_columns for categorical features representation and then build a model based on tf.keras?
The only reference I found is the following tutorial. It shows how to feed feature columns to a Keras Sequential model: Link
The code snippet is as follows:
from tensorflow.python.feature_column import feature_column_v2 as fc
feature_columns = [fc.embedding_column(ccv, dimension=3), ...]
feature_layer = fc.FeatureLayer(feature_columns)
model = tf.keras.Sequential([
feature_layer,
tf.keras.layers.Dense(128, activation=tf.nn.relu),
tf.keras.layers.Dense(64, activation=tf.nn.relu),
tf.keras.layers.Dense(1, activation=tf.nn.sigmoid)
])
...
model.fit(dataset, steps_per_epoch=8) # dataset is created from tensorflow Dataset API
The question is how to use a customed model with keras functional model API. I tried the following, but it did not work (tensorflow version 1.12)
feature_layer = fc.FeatureLayer(feature_columns)
dense_features = feature_layer(features) # features is a dict of ndarrays in dataset
layer1 = tf.keras.layers.Dense(128, activation=tf.nn.relu)(dense_features)
layer2 = tf.keras.layers.Dense(64, activation=tf.nn.relu)(layer1)
output = tf.keras.layers.Dense(1, activation=tf.nn.sigmoid)(layer2)
model = Model(inputs=dense_features, outputs=output)
The error log:
ValueError: Input tensors to a Model must come from `tf.layers.Input`. Received: Tensor("feature_layer/concat:0", shape=(4, 3), dtype=float32) (missing previous layer metadata).
I don't kown how to transform feature columns to keras model's input.
The behavior you desire could be achieved and it's able to combine tf.feature_column and keras functional API. And, actually, is not mentioned in TF docs.
This works at least in TF 2.0.0-beta1, but may being changed or even simplified in further releases.
Please check out issue in TensorFlow github repository Unable to use FeatureColumn with Keras Functional API #27416. There you will find useful comments about tf.feature_column and Keras Functional API.
Because you ask about general approach I would just copy the snippet with example from the link above. update: the code below should work
from __future__ import absolute_import, division, print_function
import numpy as np
import pandas as pd
#!pip install tensorflow==2.0.0-alpha0
import tensorflow as tf
from tensorflow import feature_column
from tensorflow import keras
from tensorflow.keras import layers
from sklearn.model_selection import train_test_split
csv_file = tf.keras.utils.get_file('heart.csv', 'https://storage.googleapis.com/download.tensorflow.org/data/heart.csv')
dataframe = pd.read_csv(csv_file, nrows = 10000)
dataframe.head()
train, test = train_test_split(dataframe, test_size=0.2)
train, val = train_test_split(train, test_size=0.2)
print(len(train), 'train examples')
print(len(val), 'validation examples')
print(len(test), 'test examples')
# Define method to create tf.data dataset from Pandas Dataframe
# This worked with tf 2.0 but does not work with tf 2.2
def df_to_dataset_tf_2_0(dataframe, label_column, shuffle=True, batch_size=32):
dataframe = dataframe.copy()
#labels = dataframe.pop(label_column)
labels = dataframe[label_column]
ds = tf.data.Dataset.from_tensor_slices((dict(dataframe), labels))
if shuffle:
ds = ds.shuffle(buffer_size=len(dataframe))
ds = ds.batch(batch_size)
return ds
def df_to_dataset(dataframe, label_column, shuffle=True, batch_size=32):
dataframe = dataframe.copy()
labels = dataframe.pop(label_column)
#labels = dataframe[label_column]
ds = tf.data.Dataset.from_tensor_slices((dataframe.to_dict(orient='list'), labels))
if shuffle:
ds = ds.shuffle(buffer_size=len(dataframe))
ds = ds.batch(batch_size)
return ds
batch_size = 5 # A small batch sized is used for demonstration purposes
train_ds = df_to_dataset(train, label_column = 'target', batch_size=batch_size)
val_ds = df_to_dataset(val,label_column = 'target', shuffle=False, batch_size=batch_size)
test_ds = df_to_dataset(test, label_column = 'target', shuffle=False, batch_size=batch_size)
age = feature_column.numeric_column("age")
feature_columns = []
feature_layer_inputs = {}
# numeric cols
for header in ['age', 'trestbps', 'chol', 'thalach', 'oldpeak', 'slope', 'ca']:
feature_columns.append(feature_column.numeric_column(header))
feature_layer_inputs[header] = tf.keras.Input(shape=(1,), name=header)
# bucketized cols
age_buckets = feature_column.bucketized_column(age, boundaries=[18, 25, 30, 35])
feature_columns.append(age_buckets)
# indicator cols
thal = feature_column.categorical_column_with_vocabulary_list(
'thal', ['fixed', 'normal', 'reversible'])
thal_one_hot = feature_column.indicator_column(thal)
feature_columns.append(thal_one_hot)
feature_layer_inputs['thal'] = tf.keras.Input(shape=(1,), name='thal', dtype=tf.string)
# embedding cols
thal_embedding = feature_column.embedding_column(thal, dimension=8)
feature_columns.append(thal_embedding)
# crossed cols
crossed_feature = feature_column.crossed_column([age_buckets, thal], hash_bucket_size=1000)
crossed_feature = feature_column.indicator_column(crossed_feature)
feature_columns.append(crossed_feature)
feature_layer = tf.keras.layers.DenseFeatures(feature_columns)
feature_layer_outputs = feature_layer(feature_layer_inputs)
x = layers.Dense(128, activation='relu')(feature_layer_outputs)
x = layers.Dense(64, activation='relu')(x)
baggage_pred = layers.Dense(1, activation='sigmoid')(x)
model = keras.Model(inputs=[v for v in feature_layer_inputs.values()], outputs=baggage_pred)
model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy'])
model.fit(train_ds)
If you use tensorflow dataset API, that code could do well.
featurlayer = keras.layers.DenseFeatures(feature_columns=feature_columns)
train_dataset = train_dataset.map(lambda x, y: (featurlayer(x), y))
test_dataset = test_dataset.map(lambda x, y: (featurlayer(x), y))
model.fit(train_dataset, epochs=, steps_per_epoch=, # all_data/batch_num =
validation_data=test_dataset,
validation_steps=)
tf.feature_column.input_layer
user this function, and this api doc has a sample .
you can transform featur_columns into Tensor, and then use it into Mode()
I have been recently reading this document in TensorFlow 2.0 alpha version. It has examples using Keras together with the feature column API. Not sure if TF 2.0 is what you are going to use

How to load MNIST via TensorFlow (including download)?

The TensorFlow documentation for MNIST recommends multiple different ways to load the MNIST dataset:
https://www.tensorflow.org/tutorials/layers
https://www.tensorflow.org/versions/r1.2/get_started/mnist/beginners
https://www.tensorflow.org/versions/r1.2/get_started/mnist/pros
All ways described in the documentation throw many deprecated warnings with TensorFlow 1.8.
The way I'm currently loading MNIST and creating batches for training:
class MNIST:
def __init__(self, optimizer):
...
self.mnist_dataset = input_data.read_data_sets("/tmp/data/", one_hot=True)
self.test_data = self.mnist_dataset.test.images.reshape((-1, self.timesteps, self.num_input))
self.test_label = self.mnist_dataset.test.labels
...
def train_run(self, sess):
batch_input, batch_output = self.mnist_dataset.train.next_batch(self.batch_size, shuffle=True)
batch_input = batch_input.reshape((self.batch_size, self.timesteps, self.num_input))
_, loss = sess.run(fetches=[self.train_step, self.loss], feed_dict={self.input_placeholder: batch_input, self.output_placeholder: batch_output})
...
def test_run(self, sess):
loss = sess.run(fetches=[self.loss], feed_dict={self.input_placeholder: self.test_data, self.output_placeholder: self.test_label})
...
How could I do exactly the same thing, just with the current method of doing this?
I couldn't find any documentation on this.
It seems to me that the new way is something in the lines of:
train, test = tf.keras.datasets.mnist.load_data()
self.mnist_train_ds = tf.data.Dataset.from_tensor_slices(train)
self.mnist_test_ds = tf.data.Dataset.from_tensor_slices(test)
But how can I use these datasets in my train_run and test_run method?
An example of loading the MNIST dataset using TF dataset API:
Create a mnist dataset to load train, valid and test images:
You can create a dataset for numpy inputs, either using Dataset.from_tensor_slices or Dataset.from_generator. Dataset.from_tensor_slices adds the whole dataset to the computational graph, so we will use Dataset.from_generator instead.
#load mnist data
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
def create_mnist_dataset(data, labels, batch_size):
def gen():
for image, label in zip(data, labels):
yield image, label
ds = tf.data.Dataset.from_generator(gen, (tf.float32, tf.int32), ((28,28 ), ()))
return ds.repeat().batch(batch_size)
#train and validation dataset with different batch size
train_dataset = create_mnist_dataset(x_train, y_train, 10)
valid_dataset = create_mnist_dataset(x_test, y_test, 20)
A feedable iterator that can toggle between training and validation
handle = tf.placeholder(tf.string, shape=[])
iterator = tf.data.Iterator.from_string_handle(
handle, train_dataset.output_types, train_dataset.output_shapes)
image, label = iterator.get_next()
train_iterator = train_dataset.make_one_shot_iterator()
valid_iterator = valid_dataset.make_one_shot_iterator()
A sample run:
#A toy network
y = tf.layers.dense(tf.layers.flatten(image),1,activation=tf.nn.relu)
loss = tf.losses.mean_squared_error(tf.squeeze(y), label)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
# The `Iterator.string_handle()` method returns a tensor that can be evaluated
# and used to feed the `handle` placeholder.
train_handle = sess.run(train_iterator.string_handle())
valid_handle = sess.run(valid_iterator.string_handle())
# Run training
train_loss, train_img, train_label = sess.run([loss, image, label],
feed_dict={handle: train_handle})
# train_image.shape = (10, 784)
# Run validation
valid_pred, valid_img = sess.run([y, image],
feed_dict={handle: valid_handle})
#test_image.shape = (20, 784)

Tensorflow: Input pipeline with sparse data for the SVM estimator

Introduction:
I am trying to train the tensorflow svm estimator tensorflow.contrib.learn.python.learn.estimators.svm with sparse data. Sample usage with sparse data at the github repo at tensorflow/contrib/learn/python/learn/estimators/svm_test.py#L167 (I am not allowed to post more links, so here the relative path).
The svm estimator expects as parameter example_id_column and feature_columns, where the feature columns should be derived of class FeatureColumn such as tf.contrib.layers.feature_column.sparse_column_with_hash_bucket. See Github repo at tensorflow/contrib/learn/python/learn/estimators/svm.py#L85 and the documentation at tensorflow.org at python/contrib.layers#Feature_columns.
Question:
How do I have to set up my input pipeline to format sparse data in such a way that I can use one of the tf.contrib.layers feature_columns as input for the svm estimator.
How would a dense input function with many features look like?
Background
The data that I use is the a1a dataset from the LIBSVM website. The data set has 123 features (that would correspond to 123 feature_columns if the data would be dense). I wrote an user op to read the data like tf.decode_csv() but for the LIBSVM format. The op returns the labels as dense tensor and the features as sparse tensor. My input pipeline:
NUM_FEATURES = 123
batch_size = 200
# my op to parse the libsvm data
decode_libsvm_module = tf.load_op_library('./libsvm.so')
def input_pipeline(filename_queue, batch_size):
with tf.name_scope('input'):
reader = tf.TextLineReader(name="TextLineReader_")
_, libsvm_row = reader.read(filename_queue, name="libsvm_row_")
min_after_dequeue = 1000
capacity = min_after_dequeue + 3 * batch_size
batch = tf.train.shuffle_batch([libsvm_row], batch_size=batch_size,
capacity=capacity,
min_after_dequeue=min_after_dequeue,
name="text_line_batch_")
labels, sp_indices, sp_values, sp_shape = \
decode_libsvm_module.decode_libsvm(records=batch,
num_features=123,
OUT_TYPE=tf.int64,
name="Libsvm_decoded_")
# Return the features as sparse tensor and the labels as dense
return tf.SparseTensor(sp_indices, sp_values, sp_shape), labels
Here is an example batch with batch_size = 5.
def input_fn(dataset_name):
maybe_download()
filename_queue_train = tf.train.string_input_producer([dataset_name],
name="queue_t_")
features, labels = input_pipeline(filename_queue_train, batch_size)
return {
'example_id': tf.as_string(tf.range(1,123,1,dtype=tf.int64)),
'features': features
}, labels
This is what I tried so far:
with tf.Session().as_default() as sess:
sess.run(tf.global_variables_initializer())
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
feature_column = tf.contrib.layers.sparse_column_with_hash_bucket(
'features', hash_bucket_size=1000, dtype=tf.int64)
svm_classifier = svm.SVM(feature_columns=[feature_column],
example_id_column='example_id',
l1_regularization=0.0,
l2_regularization=1.0)
svm_classifier.fit(input_fn=lambda: input_fn(TRAIN),
steps=30)
accuracy = svm_classifier.evaluate(
input_fn= lambda: input_fn(features, labels),
steps=1)['accuracy']
print(accuracy)
coord.request_stop()
coord.join(threads)
sess.close()
Here's an example, with made up data, that works for me in TensorFlow 1.1.0-rc2. I think my comment was misleading; you're best off converting ~100 binary features to real valued features (tf.sparse_tensor_to_dense) and using a real_valued_column, since sparse_column_with_integerized_feature is hiding most of the useful information from the SVM Estimator.
import tensorflow as tf
batch_size = 10
num_features = 123
num_examples = 100
def input_fn():
example_ids = tf.random_uniform(
[batch_size], maxval=num_examples, dtype=tf.int64)
# Construct a SparseTensor with features
dense_features = (example_ids[:, None]
+ tf.range(num_features, dtype=tf.int64)[None, :]) % 2
non_zeros = tf.where(tf.not_equal(dense_features, 0))
sparse_features = tf.SparseTensor(
indices=non_zeros,
values=tf.gather_nd(dense_features, non_zeros),
dense_shape=[batch_size, num_features])
features = {
'some_sparse_features': tf.sparse_tensor_to_dense(sparse_features),
'example_id': tf.as_string(example_ids)}
labels = tf.equal(dense_features[:, 0], 1)
return features, labels
svm = tf.contrib.learn.SVM(
example_id_column='example_id',
feature_columns=[
tf.contrib.layers.real_valued_column(
'some_sparse_features')],
l2_regularization=0.1, l1_regularization=0.5)
svm.fit(input_fn=input_fn, steps=1000)
positive_example = lambda: {
'some_sparse_features': tf.sparse_tensor_to_dense(
tf.SparseTensor([[0, 0]], [1], [1, num_features])),
'example_id': ['a']}
print(svm.evaluate(input_fn=input_fn, steps=20))
print(next(svm.predict(input_fn=positive_example)))
negative_example = lambda: {
'some_sparse_features': tf.sparse_tensor_to_dense(
tf.SparseTensor([[0, 0]], [0], [1, num_features])),
'example_id': ['b']}
print(next(svm.predict(input_fn=negative_example)))
Prints:
{'accuracy': 1.0, 'global_step': 1000, 'loss': 1.0645389e-06}
{'logits': array([ 0.01612902], dtype=float32), 'classes': 1}
{'logits': array([ 0.], dtype=float32), 'classes': 0}
Since TensorFlow 1.5.0 there is an inbuilt function to read LIBSVM data,
refer to my answer here
https://stackoverflow.com/a/56354308/3885491