Add metadata to tensorflow serving api call - tensorflow-serving

Is it possible to add metadata to a tensorflow serving servable, such that this metadata is also populated in the response from the servable?
If I have a servable with the file structure:
my_servable/
1541778457/
variables/
saved_model.pb
For example:
```
outputs {
key: "classes"
value {
dtype: DT_STRING
tensor_shape {
dim {
size: 8
}
}
string_val: "a"
string_val: "b"
string_val: "c"
string_val: "d"
string_val: "e"
string_val: "f"
string_val: "g"
string_val: "h"
}
}
outputs {
key: "scores"
value {
dtype: DT_FLOAT
tensor_shape {
dim {
size: 1
}
dim {
size: 8
}
}
float_val: 1.212528104588273e-06
float_val: 5.094948463124638e-08
float_val: 0.0009737954242154956
float_val: 0.9988483190536499
float_val: 3.245145592245535e-07
float_val: 0.00010837535955943167
float_val: 4.101086960872635e-05
float_val: 2.676981057447847e-05
}
}
model_spec {
name: "my_model"
version {
value: 1541778457
}
signature_name: "prediction"
}
If I have something like a git hash or unique identifier for the code that generated this servable like f6ca434910504532a0d50dfd12f22d4c, is it possible to get this data in the client request?
Ideally something like:
```
outputs {
key: "classes"
value {
dtype: DT_STRING
tensor_shape {
dim {
size: 8
}
}
string_val: "a"
string_val: "b"
string_val: "c"
string_val: "d"
string_val: "e"
string_val: "f"
string_val: "g"
string_val: "h"
}
}
outputs {
key: "scores"
value {
dtype: DT_FLOAT
tensor_shape {
dim {
size: 1
}
dim {
size: 8
}
}
float_val: 1.212528104588273e-06
float_val: 5.094948463124638e-08
float_val: 0.0009737954242154956
float_val: 0.9988483190536499
float_val: 3.245145592245535e-07
float_val: 0.00010837535955943167
float_val: 4.101086960872635e-05
float_val: 2.676981057447847e-05
}
}
model_spec {
name: "my_model"
version {
value: 1541778457
}
hash {
value: f6ca434910504532a0d50dfd12f22d4c
}
signature_name: "prediction"
}
I tried changing the directory from 1541778457 to the hash, but this gave:
W tensorflow_serving/sources/storage_path/file_system_storage_path_source.cc:268] No versions of servable default found under base path

I suppose you could approach this problem in a couple of ways. If you want the idea to change the folder name to work, remember that folder name in this case describe your model version which I think must be an integer. I would therefore assume that you would need to convert your hash to either binary or decimal and then convert it back when you receive it.
A better solution in my opinion would be if you were able to change your model and add a variable containing your hash. And add it to the models signature_def. In python that would look something like:
// create your field
hash = tf.placeholder("f6ca434910504532a0d50dfd12f22d4c",tf.string, name="HASH")
// build tensor
hash_info = tf.saved_model.utils.build_tensor_info(hash)
// add hash_info in your output in signature_def
// then you should be able to receive that data in your request

Related

Does naming of the ops impacts the memory/compute performance of TensorFlow?

To make the question clear lets use an example. Assume that we pass a huge Tensor to a series of operations (reshape, transpose, etc.), is it more memory/compute/etc efficient to keep using the same variable name or it does not matter? See two cases below:
Case 1: change name
x = Conv2d (...)
x_transposed = tf.transpose(x)
x_expanded = tf.expand_dims(x_transposed , -1)
x_reshaped = tf.reshape(x_expanded , [...])
Case 2: keep names
x = Conv2d (...)
x = tf.transpose(x)
x = tf.expand_dims(x, -1)
x = tf.reshape(x, [...])
By converting the lines from the code snippet provided into two different Python functions, wrapping them with tf.function to compile them into a callable Tensorflow graph (see here for more information), and printing the concrete graph, it appears they are both identical, indicating the variable names utilized do not make a difference when constructing the graph. The example below should illustrate (tweaked slightly from the provided snippet):
import tensorflow as tf
def same_name():
x = tf.convert_to_tensor([1, 2, 3], dtype=tf.float32)
x = tf.transpose(x)
x = tf.expand_dims(x, -1)
x = tf.reshape(x, [3, 1])
x = tf.nn.relu(x)
def diff_name():
x = tf.convert_to_tensor([1, 2, 3], dtype=tf.float32)
x_transposed = tf.transpose(x)
x_expanded = tf.expand_dims(x_transposed, -1)
x_reshaped = tf.reshape(x_expanded, [3, 1])
x_relued = tf.nn.relu(x_reshaped)
if __name__ == "__main__":
print(tf.function(same_name).get_concrete_function().graph.as_graph_def())
print(tf.function(diff_name).get_concrete_function().graph.as_graph_def())
The output in both cases is:
node {
name: "Const"
op: "Const"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_FLOAT
tensor_shape {
dim {
size: 3
}
}
tensor_content: "\000\000\200?\000\000\000#\000\000##"
}
}
}
}
node {
name: "transpose/perm"
op: "Const"
attr {
key: "dtype"
value {
type: DT_INT32
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {
dim {
size: 1
}
}
int_val: 0
}
}
}
}
node {
name: "transpose"
op: "Transpose"
input: "Const"
input: "transpose/perm"
attr {
key: "T"
value {
type: DT_FLOAT
}
}
attr {
key: "Tperm"
value {
type: DT_INT32
}
}
}
node {
name: "ExpandDims/dim"
op: "Const"
attr {
key: "dtype"
value {
type: DT_INT32
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {
}
int_val: -1
}
}
}
}
node {
name: "ExpandDims"
op: "ExpandDims"
input: "transpose"
input: "ExpandDims/dim"
attr {
key: "T"
value {
type: DT_FLOAT
}
}
attr {
key: "Tdim"
value {
type: DT_INT32
}
}
}
node {
name: "Reshape/shape"
op: "Const"
attr {
key: "dtype"
value {
type: DT_INT32
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {
dim {
size: 2
}
}
tensor_content: "\003\000\000\000\001\000\000\000"
}
}
}
}
node {
name: "Reshape"
op: "Reshape"
input: "ExpandDims"
input: "Reshape/shape"
attr {
key: "T"
value {
type: DT_FLOAT
}
}
attr {
key: "Tshape"
value {
type: DT_INT32
}
}
}
node {
name: "Relu"
op: "Relu"
input: "Reshape"
attr {
key: "T"
value {
type: DT_FLOAT
}
}
}
versions {
producer: 440
}

Convert properties from properties file into json in Dataweave 2.0

How to convert properties from a properties file
creditmaster.metadata.AverageFicoScore=700
creditmaster.a.b.c=xyz
into this json format in a generic way
{
creditmasterMetaData: [
{
attributeKey: "AverageFicoScore",
attributeValue: 700
}
]
}
This script is generic in that it doesn't matter what are the parts of the key, it only groups by the first element (before of the first dot) and the key name after the last dot, it ignores everything in the middle:
%dw 2.3
output application/java
import * from dw::core::Strings
fun mapProperties(props) =
entriesOf(props) // since Mule 4.3 / DW 2.3
filter (substringAfter($.key, ".") startsWith "metadata.") // to filter keys with .metadata.
groupBy ((item, index) -> substringBefore(item.key, "."))
mapObject ((value, key, index) ->
(key): value map {
attributeKey: substringAfterLast($.key, "."),
attributeValue: if (isInteger($.value)) $.value as Number else $.value
}
)
---
mapProperties(payload)
Input file:
creditmaster.metadata.AverageFicoScore= 700
other.a.b= 123
creditmaster.a.b.c=xyz
something.metadata.another.maximum=456
creditmaster.metadata.different.minimum=500
Output (in JSON for clarity):
{
"something": [
{
"attributeKey": "maximum",
"attributeValue": "456"
}
],
"creditmaster": [
{
"attributeKey": "minimum",
"attributeValue": "500"
},
{
"attributeKey": "AverageFicoScore",
"attributeValue": "700"
}
]
}
One alternative is using the pluck function. It lets you iterate over an object receiving the entries.
If you have this input
{
"creditmaster": {
"metadata": {
"AverageFicoScore": "700",
"OtherData": "Some value"
}
}
}
with this transformation
{
creditmasterMetaData:
payload.creditmaster.metadata pluck ((value, key, index) ->
{
attributeKey: key,
attributeValue: value
}
)
}
you get this output
{
"creditmasterMetaData": [
{
"attributeKey": "AverageFicoScore",
"attributeValue": "700"
},
{
"attributeKey": "OtherData",
"attributeValue": "Some value"
}
]
}

Query inside dictionary object with cosmos?

I have a data structure like this:
{
dictionary: {
anotherThing: {
id: "hello";
type: "string"
}
}
}
another document:
{
dictionary: {
a: {
id: "123";
type: "number"
},
random: "cba"
}
}
Is there a query in cosmos I can do that allows me to iterate over the keys in the dictionary object to query inside the nested objects (i.e. where dictionary.x.type = "number")?
1.use UDF. But the performance of UDF is worse and cost can be expensive.
2.change your schema like this:
dictionary: [
{
name:"a",
value:{
id: "123",
type: "number"
}
}
]
Then you can try this sql:
SELECT c FROM c JOIN d IN c.dictionary Where d['value'].type = "number"

generate line number in data weave 2.0

My requirement is to generate a line number for every new line generated in the json message. The input message is having array inside array, i.e, parent and child array.
Input message
[
{
id:"1",
Details:[
{
Name:"RAM",
LastName:"Manohar",
DOB:"20-10-1990",
Report:[
{
DateOfJoin:"03-03-2019",
Dept:"HR",
BillCode:"acx-12s",
EffectiveDate:"03-03-2019"
},
{
DateOfJoin:"03-04-2019",
Dept:"HR",
BillCode:"abc-12s",
EffectiveDate:"03-04-2019"
},
{
Name:"Alex",
LastName:"Ham",
DOB:"20-11-1980",
Report:[
{
DateOfJoin:"03-03-2019",
Dept:"HR",
BillCode:"acx-12s",
EffectiveDate:"03-03-2019"
},
{
DateOfJoin:"03-04-2019",
Dept:"HR",
BillCode:"abc-12s",
EffectiveDate:"03-04-2019"
}
]
}
]
},
{
id:"2",
Details:[
{
Name:"Kiran",
LastName:"Kurella",
DOB:"20-10-1980",
Report:[
{
DateOfJoin:"03-03-2019",
Dept:"DC",
BillCode:"acx-12s",
EffectiveDate:"03-03-2019"
},
{
DateOfJoin:"03-04-2019",
Dept:"DC",
BillCode:"abc-12s",
EffectiveDate:"03-04-2019"
},
{
Name:"Sunil",
LastName:"Kumar",
DOB:"20-11-1980",
Report:[
{
DateOfJoin:"03-01-2019",
Dept:"DC",
BillCode:"acx-12s",
EffectiveDate:"03-03-2019"
},
{
DateOfJoin:"03-04-2019",
Dept:"DC",
BillCode:"abc-12s",
EffectiveDate:"03-04-2019"
}
]
}
]
}
]
}
]
}
]
expected output:
[{LineNumber:1,
Dept:"HR",
Name: "Ram"},
{LineNumber:2,
Dept:"HR",
Name: "Alex"},
{LineNumber:3,
Dept:"HR",
Name: "Kiran"},
{LineNumber:4,
Dept:"HR",
Name: "Sunil"}]
Linenumber needs to be generated sequentially and irrespective of parent array or sub array. any help on this will be very appreciated. I have the logic in which i can generate the number using java function but in that case i need to set the variable value (flow variable) inside data weave which can be used in the java function to call recursively.
Use:
payload map {
count: $$
}

How to read a subsection from a firebase database

I am using this code to access a Firebase Database, but I am cannot figure out how to read the sub array of strings. (the JSON structure is below)). The code I have returns the top level items, but not the list of strings. Would someone be able to assist with this issue?
Here is my function to read from the DB:
func sizes(userId: String = Auth.auth().currentUser!.uid, success: #escaping ([Sizes]) -> ()) {
let ref = Router.sizes.reference()
let query = ref.queryOrdered(byChild: "name") //userId)
query.observe(.value, with: { snapshot in
var array = [Sizes]()
for child in snapshot.children {
if let size = Mapper<Sizes>().map(JSON: (child as! DataSnapshot).value as! [String : AnyObject]) {
array.append(size)
}
}
success(array)
})
}
My Firebase JSON is as follows:
{
"-SzCat_001": {
"name": "Womans",
"sizeCategories": {
"name": "Pants",
"sizeDescriptor": [
"00",
"0",
"2",
"4",
"6",
"8",
"10",
"12",
"XL"
]
}
}
}
And this is what I get returned?
[0] = {
name = "Womans"
sizeCategories = 0 values {} }
I am trying to figure out how to read the sizeCategories list of strings as a subarray of sizes.
Here is my definition of sizes and sizeCategories:
struct Sizes: Mappable {
var name: String = ""
var sizeCategories = [SizeCategories]()
init() {
}
init?(map: Map) {
}
mutating func mapping(map: Map) {
name <- map["name"]
sizeCategories <- map["sizeCategories"]
}
}
struct SizeCategories: Mappable {
var name: String = ""
var sizeDescriptor = [String]()
init() {
}
init?(map: Map) {
}
mutating func mapping(map: Map) {
name <- map["name"]
sizeDescriptor <- map["sizeDescriptor"]
}
}
Thanks for any help!!!
You're jumping through a lot of hoops to read the data here. You could just let allMyData = snapshot.value as! [String: AnyObject] and know that each internal value is also a [String: AnyObject]. But if you really want to destructure into something more typed with this mapping technique, have a look at your sizes definition:
var sizeCategories = [SizeCategories]()
This says "sizeCategories is an array of SizeCategories type". But your data is not structured as an array, it is a dict:
"sizeCategories": {
"name": "Pants",
"sizeDescriptor": [
"00",
"0",
"2",
"4",
"6",
"8",
"10",
"12",
"XL"
]
}
You need to adjust your definition and mapping method here for this field.