I implemented a custom constraints, and would like to let keras handle the serialize and deserialize procedure. However I failed to run the commented lines as below:
import tensorflow as tf
from tensorflow.keras import backend
#tf.keras.utils.register_keras_serializable(package='mypackage', name='UnitL1Norm')
class UnitL1Norm(tf.keras.constraints.Constraint):
def __init__(self, axis=0):
self.axis = axis
def __call__(self, w):
return w / (
backend.epsilon + tf.reduce_sum(
backend.abs(w), axis=self.axis, keepdims=True
)
)
def get_config(self):
return {'axis': self.axis}
# ValueError: Unknown constraint: UnitL1Norm. Please ensure this object is passed to the `custom_objects` argument. See https://www.tensorflow.org/guide/keras/save_and_serialize#registering_the_custom_object for details.
#a = tf.keras.constraints.get(dict(class_name='mypackage.UnitL1Norm', config=dict(axis=1)))
a = tf.keras.layers.Dense(3, kernel_constraint=UnitL1Norm(1))
I have read through this section but still have no idea how to achieve my goal, could anyone hint me a little bit? Thanks!
Related
I have a dataset which I have to process in such a way that it works with a convolutional neural network of PyTorch (I'm completely new to PyTorch). The data is stored in a dataframe with a column for pictures (28 x 28 ndarrays with int32 entries) and a column with its class labels. The pixels of the images merely adopt values +1 and -1 (since it is simulation data of a classical 2d Ising Model). The dataframe looks like this.
I imported the following (a lot of this is not relevant for now, but I included everything for completeness. "data_loader" is a custom py file.):
import numpy as np
import matplotlib.pyplot as plt
import data_loader
import pandas as pd
import torch
import torchvision.transforms as T
from torchvision.utils import make_grid
from torch.nn import Module
from torch.nn import Conv2d
from torch.nn import Linear
from torch.nn import MaxPool2d
from torch.nn import ReLU
from torch.nn import LogSoftmax
from torch import flatten
from sklearn.metrics import classification_report
import time as time
from torch.utils.data import DataLoader, Dataset
Then, I want to get this in the correct shape in order to make it useful for PyTorch. I do this by defining the following class
class MetropolisDataset(Dataset):
def __init__(self, data_frame, transform=None):
self.data_frame = data_frame
self.transform = transform
def __len__(self):
return len(self.data_frame)
def __getitem__(self,idx):
if torch.is_tensor(idx):
idx = idx.tolist()
label = self.data_frame['label'].iloc[idx]
image = self.data_frame['image'].iloc[idx]
image = np.array(image)
if self.transform:
image = self.transform(image)
return (image, label)
I call instances of this class as:
train_set = MetropolisDataset(data_frame = df_train,
transform = T.Compose([
T.ToPILImage(),
T.ToTensor()]))
validation_set = MetropolisDataset(data_frame = df_validation,
transform = T.Compose([
T.ToPILImage(),
T.ToTensor()]))
test_set = MetropolisDataset(data_frame = df_test,
transform = T.Compose([
T.ToPILImage(),
T.ToTensor()]))
The problem does not yet arise here, because I am able to read out and show images from these instances of the above defined class.
Then, as far as I found out, it is necessary to let this go through the DataLoader of PyTorch, which I do as follows:
batch_size = 64
train_dl = DataLoader(train_set, batch_size, shuffle=True, num_workers=3, pin_memory=True)
validation_dl = DataLoader(validation_set, batch_size, shuffle=True, num_workers=3, pin_memory=True)
test_dl = DataLoader(test_set, batch_size, shuffle=True, num_workers=3, pin_memory=True)
However, if I want to use these instances of the DataLoader, simply nothing happens. I neither get an error, nor the computation seems to get anywhere. I tried to run a CNN but it does not seem to compute anything. Something else I tried was to show some sample images with the code provided by this article, but the same issue occurs. The sample code is:
def show_images(images, nmax=10):
fig, ax = plt.subplots(figsize=(8, 8))
ax.set_xticks([]); ax.set_yticks([])
ax.imshow(make_grid((images.detach()[:nmax]), nrow=8).permute(1, 2, 0))
def show_batch(dl, nmax=64):
for images in dl:
show_images(images, nmax)
break
show_batch(test_dl)
It seems that there is some error in the implementation of my MetropolisDataset class or with the DataLoader itself. How could this problem be solved?
As mentioned in the comments, the problem was partly solved by setting num_workers to zero since I was working in a Jupyter notebook, as answered here. However, this left open one further problem that I got errors when I wanted to apply the DataLoader to run a CNN. The issue was then that my data did consist of int32 numbers instead of float32. I do not include further codes, because this was related directly to my data - however, the issue was (as very often) merely a wrong datatype.
I'm converting a CSV file into a TFRecords file like this:
File: ./dataset/csv/file.csv
feature_1, feture_2, output
1, 1, 1
2, 2, 2
3, 3, 3
import tensorflow as tf
import csv
import os
print(tf.__version__)
def create_csv_iterator(csv_file_path, skip_header):
with tf.io.gfile.GFile(csv_file_path) as csv_file:
reader = csv.reader(csv_file)
if skip_header: # Skip the header
next(reader)
for row in reader:
yield row
def _int64_feature(value):
"""Returns an int64_list from a bool / enum / int / uint."""
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def create_example(row):
"""
Returns a tensorflow.Example Protocol Buffer object.
"""
features = {}
for feature_index, feature_name in enumerate(["feature_1", "feture_2", "output"]):
feature_value = row[feature_index]
features[feature_name] = _int64_feature(int(feature_value))
return tf.train.Example(features=tf.train.Features(feature=features))
def create_tfrecords_file(input_csv_file):
"""
Creates a TFRecords file for the given input data
"""
output_tfrecord_file = input_csv_file.replace("csv", "tfrecords")
writer = tf.io.TFRecordWriter(output_tfrecord_file)
print("Creating TFRecords file at", output_tfrecord_file, "...")
for i, row in enumerate(create_csv_iterator(input_csv_file, skip_header=True)):
if len(row) == 0:
continue
example = create_example(row)
content = example.SerializeToString()
writer.write(content)
writer.close()
print("Finish Writing", output_tfrecord_file)
create_tfrecords_file("./dataset/csv/file.csv")
Then I'll read the generated TFRecords files using ImportExampleGen class:
import os
import absl
import tensorflow_model_analysis as tfma
tf.get_logger().propagate = False
from tfx import v1 as tfx
from tfx.orchestration.experimental.interactive.interactive_context import InteractiveContext
%load_ext tfx.orchestration.experimental.interactive.notebook_extensions.skip
context = InteractiveContext()
example_gen = tfx.components.ImportExampleGen(input_base="./dataset/tfrecords")
context.run(example_gen, enable_cache=True)
statistics_gen = tfx.components.StatisticsGen(
examples=example_gen.outputs['examples'])
context.run(statistics_gen, enable_cache=True)
schema_gen = tfx.components.SchemaGen(
statistics=statistics_gen.outputs['statistics'],
infer_feature_shape=False)
context.run(schema_gen, enable_cache=True)
File: ./transform.py
def preprocessing_fn(inputs):
"""tf.transform's callback function for preprocessing inputs.
Args:
inputs: map from feature keys to raw not-yet-transformed features.
Returns:
Map from string feature key to transformed feature operations.
"""
print(inputs)
return inputs
transform = tfx.components.Transform(
examples=example_gen.outputs['examples'],
schema=schema_gen.outputs['schema'],
module_file=os.path.abspath("./transform.py"))
context.run(transform, enable_cache=True)
In the preprocessing_fn function shows that inputs is a SparseTensor objects. My question is why? As far as I can tell, my dataset's samples are dense and they should be Tensor instead. Am I doing something wrong?
For anyone else who might be struggling with the same issue, I found the culprit. It's the SchemaGen class. This is how I was instantiating its object:
schema_gen = tfx.components.SchemaGen(
statistics=statistics_gen.outputs['statistics'],
infer_feature_shape=False)
I don't know what's the use case for asking SchemaGen class not to infer the shape of the features but the tutorial I was following had it set to False and I had just copied and pasted the same thing. Comparing with some other tutorials, I realized that it could be the reason why I was getting SparseTensor.
So, if you let SchemaGen infer the shape of your features or you load a hand crafted schema in which you've set the shapes yourself, you'll be getting a Tensor in your preprocessing_fn. But if the shapes are not set, the features will be instances of SparseTensor.
For the sake of completeness, this is the fixed snippet:
schema_gen = tfx.components.SchemaGen(
statistics=statistics_gen.outputs['statistics'],
infer_feature_shape=True)
I am trying to get a GLCM implementation running in a custom Keras Layer in a reasonable fast time. So far I took the _glcm_loop from skimage-implementation, reduced it to what I needed and put it into a basic layer, like this:
import numpy as np
import tensorflow as tf
from time import time
from tensorflow import keras
from tensorflow.keras.preprocessing import image
from tensorflow.keras import layers
from skimage.feature import *
from numpy import array
from math import sin, cos
from time import time
import matplotlib.pyplot as plt
class GLCMLayer(keras.layers.Layer):
def __init__(self, greylevels=32, angles=[0], distances=[1], name=None, **kwargs):
self.greylevels = greylevels
self.angles = angles
self.distances = distances
super(GLCMLayer, self).__init__(name=name, **kwargs)
def _glcm_loop(self, image, distances, angles, levels, out):
rows = image.shape[0]
cols = image.shape[1]
for a_idx in range(len(angles)):
angle = angles[a_idx]
for d_idx in range(len(distances)):
distance = distances[d_idx]
offset_row = round(sin(angle) * distance)
offset_col = round(cos(angle) * distance)
start_row = max(0, -offset_row)
end_row = min(rows, rows - offset_row)
start_col = max(0, -offset_col)
end_col = min(cols, cols - offset_col)
for r in range(start_row, end_row):
for c in range(start_col, end_col):
i = image[r, c]
row = r + offset_row
col = c + offset_col
j = image[row, col]
out[i, j, d_idx, a_idx] += 1
def call(self, inputs):
P = np.zeros((self.greylevels, self.greylevels, len(self.distances), len(self.angles)), dtype=np.uint32, order='C')
self._glcm_loop(inputs, self.distances, self.angles, self.greylevels, P)
return P
def get_config(self):
config = {
'angle': self.angle,
'distance': self.distance,
'greylevel': self.greylevel,
}
base_config = super(GLCMLayer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
My execution code looks like this:
def quant(img, greylevels):
return array(img)//(256//greylevels)
if __name__ == "__main__":
source_file = "<some sour file>"
img_raw = image.load_img(source_file, target_size=(150,150), color_mode="grayscale")
img = quant(img_raw, 32)
layer = GLCMLayer()
start = time()
aug = layer(img)
tf.print(time()-start)
This is my first step to create it as a preprocessing layer. The second step then will be to modify it to run it also as hidden layer inside a model. That is why I didn't put it to a complete model yet, but I feel like there will be additional changes required when doing so.
For some reason the execution time is about 15-20 seconds long. Executing the code on the CPU without the layer takes about 0.0009 seconds. Obviously, something is going wrong here.
I am fairly new to tf and keras, so I fear I am missing something in the way on how to use the framework. In order to resolve it, I read about (which doesn't mean I understood):
do not use np-functions inside tensorflow, but tf-functions instead,
use tf.Variable,
use tf.Data,
unfolding is not possible in some way (whatever that means)
I tried a little here and there, but couldn't get them running, instead finding various different exceptions. So my questions are:
What is the correct way to use tf-functions in a GLCM to get the best performance on the GPU?
What do I need to take care of when using the layer in a complete model?
From that point on, I should hopefully be able to then implement the GLCM properties.
Any help is greatly appreciated.
(Disclaimer: I assume that there is a lot of other stuff not optimal yet, if anything comes to your mind just add it.)
While doing GP regression in GPflow 2.0, I want to set hard bounds on lengthscale (i.e. limiting lengthscale optimization range). Following this thread (Setting hyperparameter optimization bounds in GPflow 2.0), I constructed a TensorFlow Bijector chain (see bounded_lengthscale function below). However, the bijector chain below does not prevent the model from optimizing outside the supposed bounds. What do I need to change to make the bounded_lengthscale function put hard bounds on optimization?
Below is the MRE:
import gpflow
import numpy as np
from gpflow.utilities import print_summary
import tensorflow as tf
from tensorflow_probability import bijectors as tfb
# Noisy training data
noise = 0.3
X = np.arange(-3, 4, 1).reshape(-1, 1).astype('float64')
Y = (np.sin(X) + noise * np.random.randn(*X.shape)).reshape(-1,1)
def bounded_lengthscale(low, high, lengthscale):
"""Returns lengthscale Parameter with optimization bounds."""
affine = tfb.AffineScalar(shift=low, scale=high-low)
sigmoid = tfb.Sigmoid()
logistic = tfb.Chain([affine, sigmoid])
parameter = gpflow.Parameter(lengthscale, transform=logistic, dtype=tf.float32)
parameter = tf.cast(parameter, dtype=tf.float64)
return parameter
# build GPR model
k = gpflow.kernels.Matern52()
m = gpflow.models.GPR(data=(X, Y), kernel=k)
m.kernel.lengthscale.assign(bounded_lengthscale(0, 1, 0.5))
print_summary(m)
# train model
#tf.function(autograph=False)
def objective_closure():
return - m.log_marginal_likelihood()
opt = gpflow.optimizers.Scipy()
opt_logs = opt.minimize(objective_closure,
m.trainable_variables)
print_summary(m)
Thanks!
tfb.Sigmoid now accepts low and high parameters, as #Brian Patton forecasted in a comment.
Therefore, the code can be simplified to :
from tensorflow_probability import bijectors as tfb
def bounded_lengthscale(low, high, lengthscale):
"""Make lengthscale tfp Parameter with optimization bounds."""
sigmoid = tfb.Sigmoid(low, high)
parameter = gpflow.Parameter(lengthscale, transform=sigmoid, dtype='float32')
return parameter
m.kernel.lengthscale = bounded_lengthscale(0, 1, 0.5)
In the MWE you assign a new value to a Parameter that is already existing (and does not have the logistic transform). This value is the constrained-space value that the Parameter constructed with logistic transform has, but the transform isn't carried over. Instead, you need to replace the Parameter without logistic transform with one with the transform you want: m.kernel.lengthscale = bounded_lengthscale(0,1,0.5).
Note that the object that you assign to the kernel.lengthscale attribute must be a Parameter instance; if you assign the return value of tf.cast(parameter) as in the MWE this is equivalent to a constant, and it won't actually be optimised!
Simply temoving the tf.cast in the MWE in this question won't immediately work due to float32/float64 mismatch. To fix it, the AffineScalar bijector needs to be in float64; it does not have a dtype argument, instead cast the arguments to shift= and scale= to the required type:
def bounded_lengthscale(low, high, lengthscale):
"""Make lengthscale tfp Parameter with optimization bounds."""
affine = tfb.AffineScalar(shift=tf.cast(low, tf.float64),
scale=tf.cast(high-low, tf.float64))
sigmoid = tfb.Sigmoid()
logistic = tfb.Chain([affine, sigmoid])
parameter = gpflow.Parameter(lengthscale, transform=logistic, dtype=tf.float64)
return parameter
m.kernel.lengthscale = bounded_lengthscale(0, 1, 0.5)
(GPflow should probably contain a helper function like this to make bounded parameter transforms easier to use - GPflow always appreciates people helping out, so if you want to turn this into a pull request, please do!)
I am trying to use TF.dataset.map to port over this old code because I get a deprecation warning.
Old code which reads a set of custom protos from a TFRecord file:
record_iterator = tf.python_io.tf_record_iterator(path=filename)
for record in record_iterator:
example = MyProto()
example.ParseFromString(record)
I am trying to use eager mode and map, but I get this error.
def parse_proto(string):
proto_object = MyProto()
proto_object.ParseFromString(string)
dataset = tf.data.TFRecordDataset(dataset_paths)
parsed_protos = raw_tf_dataset.map(parse_proto)
This code works:
for raw_record in raw_tf_dataset:
proto_object = MyProto()
proto_object.ParseFromString(raw_record.numpy())
But the map gives me an error:
TypeError: a bytes-like object is required, not 'Tensor'
What is the right way to take use the argument the function results of the map and treat them like a string?
You need to extract string form the tensor and use in the map function. Below are the steps to be implemented in the code to achieve this.
You have to decorate the map function with tf.py_function(get_path, [x], [tf.float32]). You can find more about tf.py_function here. In tf.py_function, first argument is the name of map function, second argument is the element to be passed to map function and final argument is the return type.
You can get your string part by using bytes.decode(file_path.numpy()) in map function.
So modify your program as below,
parsed_protos = raw_tf_dataset.map(parse_proto)
to
parsed_protos = raw_tf_dataset.map(lambda x: tf.py_function(parse_proto, [x], [function return type]))
Also modify parse_proto as below,
def parse_proto(string):
proto_object = MyProto()
proto_object.ParseFromString(string)
to
def parse_proto(string):
proto_object = MyProto()
proto_object.ParseFromString(bytes.decode(string.numpy()))
In the below simple program, we are using tf.data.Dataset.list_files to read path of the image. Next in the map function we are reading the image using load_img and later doing the tf.image.central_crop function to crop central part of the image.
Code -
%tensorflow_version 2.x
import tensorflow as tf
from keras.preprocessing.image import load_img
from keras.preprocessing.image import img_to_array, array_to_img
from matplotlib import pyplot as plt
import numpy as np
def load_file_and_process(path):
image = load_img(bytes.decode(path.numpy()), target_size=(224, 224))
image = img_to_array(image)
image = tf.image.central_crop(image, np.random.uniform(0.50, 1.00))
return image
train_dataset = tf.data.Dataset.list_files('/content/bird.jpg')
train_dataset = train_dataset.map(lambda x: tf.py_function(load_file_and_process, [x], [tf.float32]))
for f in train_dataset:
for l in f:
image = np.array(array_to_img(l))
plt.imshow(image)
Output -
Hope this answers your question. Happy Learning.