Casting problems after updating from Swift 4 to 4.1 - alamofire

I've bumped into an odd issue, after updating to Swift 4.1 the cast from JSON to [[Float]] fails. Let me show the code real quick.
Alamofire.request(url,
method: .get, parameters: nil, headers: headers()).responseJSON(queue: queue, completionHandler: { response in
if let floatDoubleArray = response.result.value as? [[Float]] {
// Do stuff
}
}
The above worked just great in Swift 4.0, but suddenly it fails. The response I get is as follows.
<__NSArrayI 0x7fb9d02fd800>(
0,
0.05,
0.051,
0.052,
0.051,
0.05,
0.05,
0.049,
0.048,
0.048,
0.047,
0.047,
0.047,
0.046,
0.046,
0.047,
0.047,
0.047,
0.048,
0.048,
0.049,
0.049,
0.05,
0.051,
0.051,
0,
0,
0,
0,
0,
0,
0
)
,
<__NSArrayI 0x7fb9d02fde00>(
0,
0.051,
0.051,
0.051,
0.05,
0.05,
0.049,
0.048,
0.048,
0.048,
0.047,
0.047,
0.047,
0,
0,
0,
0,
0,
0,
0
)
)
)
If I try to hardcast it to [[Float]], I get
"Unable to bridge NSNumber to Float"
Any ideas how to fix this? It run all fine using Swift 4.0.

Can you try
if let floatDoubleArray = response.result.value as? [[NSNumber]] {
let floatArr = floatDoubleArray.map{$0.map{$0.floatValue}}
}

Related

ContentView not updating

I’m somewhat new to Swiftui programming and probably making all kinds of beginner mistakes. That said, I’m trying to learn the language by grinding my teeth on a number of small programs and in this one, can’t seem to get the ContentView to update for numPlayers and numRows when returning from Sheet but the two Bool variables (showRowIndicators and showColumnIndicators) update correctly in ContentView.
Any help would be appreciated! :)
import SwiftUI
struct Player: Identifiable, Hashable {
var id: Int = 0
var name: String = ""
}
struct ContentView: View {
#State var players = [
Player(id: 0, name: "Player1"),
Player(id: 1, name: "Player2"),
Player(id: 2, name: "Player3"),
Player(id: 3, name: "Player4"),
Player(id: 4, name: "Player5"),
Player(id: 5, name: "Player6"),
Player(id: 6, name: "Player7"),
Player(id: 7, name: "Player8"),
Player(id: 8, name: "Player9"),
Player(id: 9, name: "Player10"),
]
#State var numbers = [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
#AppStorage("numRows") var numRows: Int = 10
#AppStorage("numPlayers") var numPlayers: Int = 4
#AppStorage("showRowIndicators") var showRowIndicators: Bool = false
#AppStorage("showColumnIndicators") var showColumnIndicators: Bool = false
#State var backgroundColor: Color = Color.green
#State var showSheet: Bool = false
var rowOddBColor: Color = Color.white.opacity(0.2)
var rowEvenBColor: Color = Color.green.opacity(0.2)
var maxRows = 20
let boxWidth = 65.0
let boxHeight = 22.0
let boxPadH = 2.0
let HSspace = 7.0
let boxBackColor = Color(#colorLiteral(red: 0.8993894458, green: 0.8753863573, blue: 0.7580057979, alpha: 1))
let boxForeColor = Color(#colorLiteral(red: 0, green: 0, blue: 0, alpha: 1))
let totalBackColor = Color(#colorLiteral(red: 0.8039215803, green: 0.8039215803, blue: 0.8039215803, alpha: 1))
let topScoreBackColor = Color(#colorLiteral(red: 1, green: 1, blue: 1, alpha: 1))
var winningPlayer: Int = 0
var isTopScore: Int = 0
var body: some View {
ZStack {
//background
backgroundColor
.ignoresSafeArea()
//content
DisplayColumnGuides(numPlayers: $numPlayers, showColumnIndicators: $showColumnIndicators, backgroundColor: $backgroundColor, HSspace: HSspace, boxWidth: boxWidth)
ScrollView(.vertical) {
VStack(spacing: 0) {
HStack {
Image(systemName: "gear")
.font(.largeTitle)
.frame(width: 50, alignment: .leading)
.onTapGesture {
showSheet.toggle()
}
Spacer()
}
DisplayPlayerNames(players: $players, numPlayers: $numPlayers, HSspace: HSspace, boxWidth: boxWidth, boxHeight: boxHeight)
let rowCount: Int = (numRows <= maxRows ? numRows : maxRows)
ForEach(0..<rowCount) { row in
ZStack {
Text("\(row+1)")
.font(.footnote)
.foregroundColor(.white)
.frame(maxWidth: .infinity, alignment: .leading)
HStack(spacing: HSspace) {
ForEach(0..<numPlayers) { column in
TextField("", value: self.$numbers[row][column], format: .number)
.foregroundColor(boxForeColor)
.font(.title3)
.multilineTextAlignment(.center)
.keyboardType(.numbersAndPunctuation)
.frame(width: boxWidth, height: boxHeight)
// .background(boxBackColor)
.cornerRadius(5.0)
// .border(.black, width: 0.2)
}
}
.frame(maxWidth: .infinity)
.frame(height: boxHeight+5)
.background(
showRowIndicators
? ( isNumberEven(number: row)
? rowEvenBColor
: rowOddBColor
)
: nil
)
}
}
VStack(spacing: 10) {
ZStack {
Text("Total")
// .bold()
.font(.footnote)
.foregroundColor(Color.blue)
.frame(maxWidth: .infinity, alignment: .leading)
.padding(.top, 5)
HStack(spacing: HSspace) {
ForEach(0..<numPlayers) { player in
let playerScore = addScore(playerNumber: player)
let topPlayer = getTopPlayer()
Text("\(playerScore)")
.font(.title3)
.foregroundColor(boxForeColor)
.bold()
.frame(width: boxWidth, height: boxHeight+5)
.background(topPlayer == player ? topScoreBackColor : totalBackColor)
// .border(.black)
.cornerRadius(10.0)
}
}
.padding(.top, 5)
}
}
Spacer()
}
}
.sheet(isPresented: $showSheet) {
OptionsView()
}
}
.onTapGesture {
//--- Hide Keyboard ---
UIApplication.shared.sendAction(#selector(UIResponder.resignFirstResponder), to: nil, from: nil, for: nil)
}
.gesture(
DragGesture(minimumDistance: 0, coordinateSpace: .local).onEnded({ gesture in
//--- Hide keyboard on swipe down ---
if gesture.translation.height > 0 {
UIApplication.shared.sendAction(#selector(UIResponder.resignFirstResponder), to: nil, from: nil, for: nil)
}
}))
}
func isNumberEven(number: Int) -> Bool {
return (number % 2 == 0)
}
func getTopPlayer() -> Int {
var topScore: Int = 0
var topPlayer: Int = -1
for p in 0..<players.count {
let tPlayer = addScore(playerNumber: p)
if tPlayer > topScore {
topScore = tPlayer
topPlayer = p
}
}
return topPlayer
}
func addScore(playerNumber: Int) -> Int {
let rowCount = maxRows
var playerTotal: Int = 0
for row in 0..<rowCount {
playerTotal += numbers[row][playerNumber]
}
return playerTotal
}
}
struct OptionsView: View {
#Environment(\.presentationMode) var presentationMode
#AppStorage("numRows") var numRows: Int?
#AppStorage("numPlayers") var numPlayers: Int = 4
#AppStorage("showRowIndicators") var showRowIndicators: Bool = false
#AppStorage("showColumnIndicators") var showColumnIndicators: Bool = false
var body: some View {
ZStack {
Color.brown.opacity(0.3)
VStack {
Text("Settings")
.font(.largeTitle)
.underline()
.padding(.vertical)
// Button(action: {
// presentationMode.wrappedValue.dismiss()
// }, label: {
// Image(systemName: "xmark")
// .foregroundColor(.red)
// .font(.title)
// .padding(10)
// })
// .frame(maxWidth: .infinity, alignment: .leading)
HStack {
Text("Enter # score rows (max 20)")
.frame(maxWidth: .infinity, alignment: .leading)
Spacer()
TextField("Enter # rows", value: $numRows, format: .number)
.keyboardType(.numberPad)
.multilineTextAlignment(.center)
.frame(width: 100)
.background(Color.white)
.cornerRadius(10.0)
}
.padding(.horizontal)
HStack {
Text("Enter # players (max 10)")
.frame(maxWidth: .infinity, alignment: .leading)
Spacer()
TextField("Enter # players", value: $numPlayers, format: .number)
.keyboardType(.numberPad)
.multilineTextAlignment(.center)
.frame(width: 100, alignment: .center)
.background(Color.white)
.cornerRadius(10.0)
}
.padding(.horizontal)
Toggle(isOn: $showRowIndicators, label: {
Text("Show row guides")
})
.padding(.horizontal)
.padding(.top, 20)
Toggle(isOn: $showColumnIndicators, label: {
Text("Show column guides")
})
.padding(.horizontal)
.padding(.top, 5)
Button(action: {
presentationMode.wrappedValue.dismiss()
}, label: {
Text("Close")
.foregroundColor(.blue)
.font(.title)
.frame(width: 100, height: 40.0)
.background(Color.white)
.cornerRadius(25)
.padding(10)
})
.padding(.top, 40)
Spacer()
}
}
}
}
struct ContentView_Previews: PreviewProvider {
static var previews: some View {
ContentView()
// OptionsView()
}
}
struct DisplayColumnGuides: View {
#Binding var numPlayers: Int
#Binding var showColumnIndicators: Bool
#Binding var backgroundColor: Color
var HSspace: Double
var boxWidth: Double
var body: some View {
HStack(spacing: HSspace) {
ForEach(0..<numPlayers) { player in
Rectangle()
.fill(
showColumnIndicators
? Color.blue.opacity(0.2)
: backgroundColor
)
.frame(width: boxWidth, alignment: .center)
}
}
.padding(.vertical, 5)
}
}
struct DisplayPlayerNames: View {
#Binding var players: [Player]
#Binding var numPlayers: Int
var HSspace: Double
var boxWidth: Double
var boxHeight: Double
var body: some View {
HStack(spacing: HSspace) {
ForEach(0..<numPlayers) { player in
TextField("", text: self.$players[player].name)
// .underline()
.multilineTextAlignment(.center)
.frame(width: boxWidth, height: boxHeight, alignment: .center)
}
}
.padding(.vertical, 5)
}
}

is it possible to pad a numpy array with a tuple?

I'm trying to pad a numpy array with a tuple (the array itself has only tuples)...all I can find is padding an array with 0s or 1s, which I can get to work, but that doesn't help me. Is it possible to pad with a tuple?
The crucial line is :
cells = np.pad(cells, pad_width=1, mode='constant', constant_values=material)
Replacing material, which is a 4-tuple, with a 0 works fine...but I really need it to be a tuple.
I get the error message:
operands could not be broadcast together with remapped shapes [original->remapped]: (4,) and requested shape (2,2)
Here is the code I am using, but using 0s and 1s instead:
import numpy as np
side_len = 3
a = [1 for x in range(9)]
a = np.array(a)
a = a.reshape(side_len,side_len)
a = np.pad(a, pad_width=1, mode='constant', constant_values=0)
The goal is instead of a list of 1s, to pass a list of tuples, and instead of a constant_values=0, to have constant_values=material, where material is an arbitrary 4-tuple.
A flat list of tuples are passed to this function (the function is not shown here), eg:
[(0, 0, 255, 255), (0, 0, 255, 255), (0, 0, 255, 255), (0, 0, 255, 255), (0, 0, 255, 255), (0, 0, 255, 255), (0, 0, 255, 255), (0, 0, 255, 255), (0, 0, 255, 255)]
Which I convert to a numpy array using:
cells = np.array(cells, dtype='i,i,i,i').reshape(side_len,side_len)
Perhaps this is wonky, but the rest of my program just uses lists, I don't need numpy for it; but for this padding issue, I originally was manually iterating over my flat list and doing the padding, which took forever as the list grew, so I thought I'd try numpy because it might be faster.
the solution was:
import numpy as np
side_len = 3
material = (0,0,0,255)
a = [(255,0,0,255) for x in range(9)]
a = np.array(a,dtype='i,i,i,i').reshape(side_len,side_len)
_material = np.array(material,dtype='i,i,i,i')
a = np.pad(a, pad_width=1, mode='constant', constant_values=_material)
a
array([[( 0, 0, 0, 255), ( 0, 0, 0, 255), ( 0, 0, 0, 255),
( 0, 0, 0, 255), ( 0, 0, 0, 255)],
[( 0, 0, 0, 255), (255, 0, 0, 255), (255, 0, 0, 255),
(255, 0, 0, 255), ( 0, 0, 0, 255)],
[( 0, 0, 0, 255), (255, 0, 0, 255), (255, 0, 0, 255),
(255, 0, 0, 255), ( 0, 0, 0, 255)],
[( 0, 0, 0, 255), (255, 0, 0, 255), (255, 0, 0, 255),
(255, 0, 0, 255), ( 0, 0, 0, 255)],
[( 0, 0, 0, 255), ( 0, 0, 0, 255), ( 0, 0, 0, 255),
( 0, 0, 0, 255), ( 0, 0, 0, 255)]],
dtype=[('f0', '<i4'), ('f1', '<i4'), ('f2', '<i4'), ('f3', '<i4')])

why does model.predict in tensorflowjs keeps returning the same incorrect output regardless of the tensor given?

I'm trying to get a keras model i converted to tensorflow js, to work in react native but the model keeps giving bad responses. Did some digging and found realized that the tensor i passed into model.predict is some how being changed causing it to give the same incorrect prediction. Any suggestions would be appreciated. I'm pretty much hard stuck. Code below:
import React, {useState, useEffect} from 'react';
import {View, Text, Button} from 'react-native';
import * as tf from '#tensorflow/tfjs';
import {
bundleResourceIO
} from '#tensorflow/tfjs-react-native';
import * as mobilenet from '#tensorflow-models/mobilenet';
function thing() {
const [model, setModel] = useState(null);
const [tensor, setTensor] = useState(null);
async function loadModel() {
const modelJson = require('./assets/model.json');
const weight = require('./assets/group1-shard1of1.bin');
const backend = await tf.ready();
const item = await tf.loadLayersModel(
bundleResourceIO(modelJson, weight)
);
const tfTensor = tf.tensor([[0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]);
setModel(item);
setTensor(tfTensor);
}
useEffect(() => {
loadModel();
}, []);
async function test() {
if(tensor !== null && model !== null) {
const result = await model.predict(tensor);
console.log(result.dataSync())
}
}
return (
<View>
<Button
onPress={test}
title="click"
color="#841584"
accessibilityLabel="Learn more about this purple button"
/>
</View>
);
}
export default thing;
Just like changing a single pixel in an image doesn't change the image, changing one bit in an array doesn't significantly adjust the prediction.
I ran mobilenet on a black 224x224 image and it predicted class 819 (whatever that is). Then I changed the top-left pixel to white and re-ran mobilenet and it still classifies as class 819.
See example code here
Changing a single bit does not have a cascading effect like a hash function. Mobilenet, by its nature is resilient to noise.

Python - Convert from Response Variable to Pandas Dataframe

I ran a LIWC analysis and it gives me the following results (below). I would like to turn the result into a pandas dataframe. If anyone can chip in, that would be wonderful.
Thanks in advance :)
Best,
David
resp = requests.post(url, auth=(api_key, api_secret), data=data)
resp1 = resp
print(resp.json())
{'plan_usage': {'call_limit': 1000, 'calls_made': 6, 'calls_remaining': 994, 'percent_used': 0.6, 'start_date': '2020-12-09T03:05:57.779556Z', 'end_date': '2020-12-23T03:05:57.779556Z'}, 'results': [{'response_id': 'd1382f42-5c28-4528-ab2e-81b80ba185e2', 'request_id': 'req-1', 'language': 'en', 'version': 'v1.0.0', 'summary': {'word_count': 57, 'words_per_sentence': 11.4, 'sentence_count': 5, 'six_plus_words': 0.2982456140350877, 'emojis': 0, 'emoticons': 0, 'hashtags': 0, 'urls': 0}, 'liwc': {'scores': {'analytical_thinking': 80.77394876079086, 'authentic': 38.8220872694557, 'clout': 50, 'emotional_tone': 97.58138119866139, 'dictionary_words': 0.8771929824561403, 'categories': {'achievement': 0, 'adjectives': 0.017543859649122806, 'adverbs': 0.03508771929824561, 'affect': 0.05263157894736842, 'affiliation': 0.017543859649122806, 'all_punctuation': 0.10526315789473684, 'anger_words': 0, 'anxiety_words': 0, 'apostrophes': 0, 'articles': 0.12280701754385964, 'assent': 0, 'auxiliary_verbs': 0.14035087719298245, 'biological_processes': 0, 'body': 0, 'causation': 0, 'certainty': 0, 'cognitive_processes': 0.05263157894736842, 'colons': 0, 'commas': 0.017543859649122806, 'comparisons': 0, 'conjunctions': 0.07017543859649122, 'dashes': 0, 'death': 0, 'differentiation': 0, 'discrepancies': 0.017543859649122806, 'drives': 0.03508771929824561, 'exclamations': 0, 'family': 0, 'feel': 0, 'female': 0, 'filler_words': 0, 'focus_future': 0, 'focus_past': 0, 'focus_present': 0.14035087719298245, 'friends': 0.017543859649122806, 'function_words': 0.543859649122807, 'health': 0, 'hear': 0, 'home': 0, 'i': 0.03508771929824561, 'impersonal_pronouns': 0.03508771929824561, 'informal_language': 0, 'ingestion': 0, 'insight': 0, 'interrogatives': 0.017543859649122806, 'leisure': 0.14035087719298245, 'male': 0, 'money': 0, 'motion': 0.05263157894736842, 'negations': 0, 'negative_emotion_words': 0, 'netspeak': 0, 'nonfluencies': 0, 'numbers': 0, 'other_grammar': 0.2807017543859649, 'other_punctuation': 0, 'parentheses': 0, 'perceptual_processes': 0.017543859649122806, 'periods': 0.08771929824561403, 'personal_concerns': 0.14035087719298245, 'personal_pronouns': 0.03508771929824561, 'positive_emotion_words': 0.05263157894736842, 'power': 0, 'prepositions': 0.10526315789473684, 'pronouns': 0.07017543859649122, 'quantifiers': 0.05263157894736842, 'question_marks': 0, 'quotes': 0, 'relativity': 0.17543859649122806, 'religion': 0, 'reward': 0.017543859649122806, 'risk': 0, 'sad_words': 0, 'see': 0.017543859649122806, 'semicolons': 0, 'sexual': 0, 'she_he': 0, 'social': 0.03508771929824561, 'space': 0.10526315789473684, 'swear_words': 0, 'tentative': 0.03508771929824561, 'they': 0, 'time': 0.017543859649122806, 'time_orientation': 0.14035087719298245, 'verbs': 0.19298245614035087, 'we': 0, 'work': 0, 'you': 0}}}, 'sallee': {'counts': {'emotions': {'admiration': 5, 'amusement': 0, 'anger': 0, 'boredom': 0, 'calmness': 0, 'curiosity': 0, 'desire': 0, 'disgust': 0, 'excitement': 0.375, 'fear': 0, 'gratitude': 2, 'joy': 6.375, 'love': 5, 'pain': 0, 'sadness': 0, 'surprise': 0}, 'goodfeel': 13.375, 'ambifeel': 0, 'badfeel': 0, 'emotionality': 13.375, 'sentiment': 13.375, 'non_emotion': None}, 'scores': {'emotions': {'admiration': 0.3333333333333333, 'amusement': 0, 'anger': 0, 'boredom': 0, 'calmness': 0, 'curiosity': 0, 'desire': 0, 'disgust': 0, 'excitement': 0.03614457831325301, 'fear': 0, 'gratitude': 0.16666666666666666, 'joy': 0.3893129770992366, 'love': 0.3333333333333333, 'pain': 0, 'sadness': 0, 'surprise': 0}, 'goodfeel': 0.2015065913370998, 'ambifeel': 0, 'badfeel': 0, 'emotionality': 0.2015065913370998, 'sentiment': 0.6541600137038615, 'non_emotion': 0.7984934086629002}, 'emotion_word_count': 4}}]}
js = resp.json()
df = pd.json_normalize(js['results'][0])
df.columns
Index(['response_id', 'request_id', 'language', 'version',
'summary.word_count', 'summary.words_per_sentence',
'summary.sentence_count', 'summary.six_plus_words', 'summary.emojis',
'summary.emoticons',
...
'sallee.scores.emotions.pain', 'sallee.scores.emotions.sadness',
'sallee.scores.emotions.surprise', 'sallee.scores.goodfeel',
'sallee.scores.ambifeel', 'sallee.scores.badfeel',
'sallee.scores.emotionality', 'sallee.scores.sentiment',
'sallee.scores.non_emotion', 'sallee.emotion_word_count'],
dtype='object', length=150)
df.iloc[0]
response_id d1382f42-5c28-4528-ab2e-81b80ba185e2
request_id req-1
language en
version v1.0.0
summary.word_count 57
...
sallee.scores.badfeel 0
sallee.scores.emotionality 0.202
sallee.scores.sentiment 0.654
sallee.scores.non_emotion 0.798
sallee.emotion_word_count 4
Name: 0, Length: 150, dtype: object

Can't use deployed TF BERT model to get GCloud online predictions from SavedModel: "Bad Request" error

I trained a BERT model based on this notebook.
I export it as a tf SavedModel this way:
def serving_input_fn():
receiver_tensors = {
"input_ids": tf.placeholder(dtype=tf.int32, shape=[1, MAX_SEQ_LENGTH])
}
features = {
"input_ids": receiver_tensors['input_ids'],
"input_mask": 1 - tf.cast(tf.equal(receiver_tensors['input_ids'], 0), dtype=tf.int32),
"segment_ids": tf.zeros(dtype=tf.int32, shape=[1, MAX_SEQ_LENGTH]),
"label_ids": tf.placeholder(tf.int32, [None], name='label_ids')
}
return tf.estimator.export.ServingInputReceiver(features, receiver_tensors)
estimator._export_to_tpu = False
estimator.export_saved_model("export", serving_input_fn)
Then if I try to use the saved model locally it works:
from tensorflow.contrib import predictor
predict_fn = predictor.from_saved_model("export/1575241274/")
print(predict_fn({
"input_ids": [[101, 10468, 99304, 11496, 171, 112, 10176, 22873, 119, 102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
}))
# {'probabilities': array([[-0.01023898, -4.5866656 ]], dtype=float32), 'labels': 0}
Then I uploaded the SavedModel to a bucket and created a model and a model version on gcloud this way:
gcloud alpha ai-platform versions create v1gpu --model [...] --origin=[...] --python-version=3.5 --runtime-version=1.14 --accelerator=^:^count=1:type=nvidia-tesla-k80 --machine-type n1-highcpu-4
No issue there, the model is deployed and displayed as working in the console.
But if I try to get predictions, as such:
import googleapiclient.discovery
service = googleapiclient.discovery.build('ml', 'v1')
name = 'projects/[project_name]/models/[model_name]/versions/v1gpu'
response = service.projects().predict(
name=name,
body={'instances': [{
"input_ids": [[101, 10468, 99304, 11496, 171, 112, 10176, 22873, 119, 102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
}]}
).execute()
print(response["predictions"])
All I get is the following error:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/local/lib/python3.6/dist-packages/googleapiclient/_helpers.py", line 130, in positional_wrapper
return wrapped(*args, **kwargs)
File "/usr/local/lib/python3.6/dist-packages/googleapiclient/http.py", line 851, in execute
raise HttpError(resp, content, uri=self.uri)
googleapiclient.errors.HttpError: <HttpError 400 when requesting https://ml.googleapis.com/v1/projects/[project_name]/models/[model_name]/versions/v1gpu:predict?alt=json returned "Bad Request">
I get the same error if I test the model from the gcloud console using the "Test your model with sample input data" feature.
Edit:
The saved_model has a tagset "serve" and signature_def "serving_default".
Output of "saved_model_cli show --dir 1575241274/ --tag_set serve --signature_def serving_default":
The given SavedModel SignatureDef contains the following input(s):
inputs['input_ids'] tensor_info:
dtype: DT_INT32
shape: (1, 128)
name: Placeholder:0
The given SavedModel SignatureDef contains the following output(s):
outputs['labels'] tensor_info:
dtype: DT_INT32
shape: ()
name: loss/Squeeze:0
outputs['probabilities'] tensor_info:
dtype: DT_FLOAT
shape: (1, 2)
name: loss/LogSoftmax:0
Method name is: tensorflow/serving/predict
The body of the request sent to the API has the form:
{"instances": [<instance 1>, <instance 2>, ...]}
As specified in documentation we need something like this:
{
"instances": [
<object>
...
]
}
In this case you have:
{
"instances": [
{
"input_ids":
[ <object> ]
}
...
]
}
You need to replace input_ids to instances:
{
"instances":
[
[101, 10468, 99304, 11496, 171, 112, 10176, 22873, 119, 102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
]
}
Note If you can show the saved_model_cli will be great.
Also gcloud local predict command is also a good option for testing.
It depend of the signature of the model. In my case I have the following signature (just keeping the input part):
The given SavedModel SignatureDef contains the following input(s):
inputs['attention_mask'] tensor_info:
dtype: DT_INT32
shape: (-1, 128)
name: serving_default_attention_mask:0
inputs['input_ids'] tensor_info:
dtype: DT_INT32
shape: (-1, 128)
name: serving_default_input_ids:0
inputs['token_type_ids'] tensor_info:
dtype: DT_INT32
shape: (-1, 128)
name: serving_default_token_type_ids:0
and I need to pass data in the following format (in this case 2 examples):
{'instances':
[
{'input_ids': [101, 143, 18267, 15470, 90395, ...],
'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, .....],
'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, .....]
},
{'input_ids': [101, 17664, 143, 30728, .........],
'attention_mask': [1, 1, 1, 1, 1, 1, 1, .......],
'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, ....]
}
]
}
I am using it with a Keras model with Tensorflow 2.2.0
I guess in your case you need (for 2 examples):
{'instances':
[
{'input_ids': [101, 143, 18267, 15470, 90395, ...]},
{'input_ids': [101, 17664, 143, 30728, .........]}
]
}