Rally.ui.chart.CumulativeFlowChart Error - rally

I am new to node js and rally app development. I am trying to build a custom rally cumulative flow chart.
I am following the youtube tutorials by rally engineers. Using rally-app-builder to build the app.
My custom app throws below error on run.
sdk-debug.js:6582 [Deprecation] Synchronous XMLHttpRequest on the main thread is deprecated because of its detrimental effects to the end user's experience. For more help, check https://xhr.spec.whatwg.org/.
loadScriptFile # sdk-debug.js:6582
require # sdk-debug.js:6758
syncRequire # sdk-debug.js:6624
(anonymous) # sdk-debug.js:2303
instantiate # sdk-debug.js:5686
(anonymous) # sdk-debug.js:2303
_loadCumulativeFlowChart # App.js?_dc=0.7676518726557986:11
launch # App.js?_dc=0.7676518726557986:5
_applyDefaultSettingsAndLaunch # sdk-debug.js:155148
loadSettingsAndLaunch # sdk-debug.js:155054
initComponent # sdk-debug.js:155032
constructor # sdk-debug.js:30211
callParent # sdk-debug.js:4469
constructor # sdk-debug.js:34291
callParent # sdk-debug.js:4469
constructor # sdk-debug.js:144823
callParent # sdk-debug.js:4469
constructor # sdk-debug.js:154985
constructor # sdk-debug.js:5100
(anonymous) # VM302:3
instantiate # sdk-debug.js:5692
(anonymous) # sdk-debug.js:2303
_createApp # sdk-debug.js:225546
_launchAppInViewport # sdk-debug.js:225453
(anonymous) # sdk-debug.js:225410
_loadTimeboxScope # sdk-debug.js:225541
(anonymous) # sdk-debug.js:225409
(anonymous) # sdk-debug.js:225308
(anonymous) # sdk-debug.js:10091
fire # sdk-debug.js:10046
Ext.apply.readyEvent.readyEvent.fire # sdk-debug.js:10285
fireReadyEvent # sdk-debug.js:10363
onDocumentReady # sdk-debug.js:10381
fn # sdk-debug.js:6865
onReady # sdk-debug.js:6870
Ext.onReady # sdk-debug.js:11162
_callOnReadyFns # sdk-debug.js:225304
(anonymous) # sdk-debug.js:146417
notify # sdk-debug.js:146514
complete # sdk-debug.js:146495
resolve # sdk-debug.js:146359
(anonymous) # sdk-debug.js:2260
(anonymous) # sdk-debug.js:146417
notify # sdk-debug.js:146514
complete # sdk-debug.js:146495
resolve # sdk-debug.js:146359
(anonymous) # sdk-debug.js:146435
notify # sdk-debug.js:146514
complete # sdk-debug.js:146495
resolve # sdk-debug.js:146359
(anonymous) # sdk-debug.js:2260
(anonymous) # sdk-debug.js:146417
notify # sdk-debug.js:146514
complete # sdk-debug.js:146495
resolve # sdk-debug.js:146359
(anonymous) # sdk-debug.js:2260
(anonymous) # sdk-debug.js:146417
notify # sdk-debug.js:146514
complete # sdk-debug.js:146495
resolve # sdk-debug.js:146359
(anonymous) # sdk-debug.js:146423
notify # sdk-debug.js:146514
complete # sdk-debug.js:146495
resolve # sdk-debug.js:146359
(anonymous) # sdk-debug.js:146435
notify # sdk-debug.js:146514
complete # sdk-debug.js:146495
resolve # sdk-debug.js:146359
(anonymous) # sdk-debug.js:146435
notify # sdk-debug.js:146514
complete # sdk-debug.js:146495
resolve # sdk-debug.js:146359
callback # sdk-debug.js:223192
sdk-debug.js:6583 GET http://localhost:1337/Rally/ui/chart/CumulativeFlowChart.js?_dc=1512571924759 404 (Not Found)
loadScriptFile # sdk-debug.js:6583
require # sdk-debug.js:6758
syncRequire # sdk-debug.js:6624
(anonymous) # sdk-debug.js:2303
instantiate # sdk-debug.js:5686
(anonymous) # sdk-debug.js:2303
_loadCumulativeFlowChart # App.js?_dc=0.7676518726557986:11
launch # App.js?_dc=0.7676518726557986:5
_applyDefaultSettingsAndLaunch # sdk-debug.js:155148
loadSettingsAndLaunch # sdk-debug.js:155054
initComponent # sdk-debug.js:155032
constructor # sdk-debug.js:30211
callParent # sdk-debug.js:4469
constructor # sdk-debug.js:34291
callParent # sdk-debug.js:4469
constructor # sdk-debug.js:144823
callParent # sdk-debug.js:4469
constructor # sdk-debug.js:154985
constructor # sdk-debug.js:5100
(anonymous) # VM302:3
instantiate # sdk-debug.js:5692
(anonymous) # sdk-debug.js:2303
_createApp # sdk-debug.js:225546
_launchAppInViewport # sdk-debug.js:225453
(anonymous) # sdk-debug.js:225410
_loadTimeboxScope # sdk-debug.js:225541
(anonymous) # sdk-debug.js:225409
(anonymous) # sdk-debug.js:225308
(anonymous) # sdk-debug.js:10091
fire # sdk-debug.js:10046
Ext.apply.readyEvent.readyEvent.fire # sdk-debug.js:10285
fireReadyEvent # sdk-debug.js:10363
onDocumentReady # sdk-debug.js:10381
fn # sdk-debug.js:6865
onReady # sdk-debug.js:6870
Ext.onReady # sdk-debug.js:11162
_callOnReadyFns # sdk-debug.js:225304
(anonymous) # sdk-debug.js:146417
notify # sdk-debug.js:146514
complete # sdk-debug.js:146495
resolve # sdk-debug.js:146359
(anonymous) # sdk-debug.js:2260
(anonymous) # sdk-debug.js:146417
notify # sdk-debug.js:146514
complete # sdk-debug.js:146495
resolve # sdk-debug.js:146359
(anonymous) # sdk-debug.js:146435
notify # sdk-debug.js:146514
complete # sdk-debug.js:146495
resolve # sdk-debug.js:146359
(anonymous) # sdk-debug.js:2260
(anonymous) # sdk-debug.js:146417
notify # sdk-debug.js:146514
complete # sdk-debug.js:146495
resolve # sdk-debug.js:146359
(anonymous) # sdk-debug.js:2260
(anonymous) # sdk-debug.js:146417
notify # sdk-debug.js:146514
complete # sdk-debug.js:146495
resolve # sdk-debug.js:146359
(anonymous) # sdk-debug.js:146423
notify # sdk-debug.js:146514
complete # sdk-debug.js:146495
resolve # sdk-debug.js:146359
(anonymous) # sdk-debug.js:146435
notify # sdk-debug.js:146514
complete # sdk-debug.js:146495
resolve # sdk-debug.js:146359
(anonymous) # sdk-debug.js:146435
notify # sdk-debug.js:146514
complete # sdk-debug.js:146495
resolve # sdk-debug.js:146359
callback # sdk-debug.js:223192
sdk-debug.js:147611 Error: success callback for Deferred transformed result of Deferred transformed result of Deferred threw: TypeError: c is not a constructor
at eval (eval at getInstantiator (sdk-debug.js:5720), <anonymous>:3:8)
at Object.instantiate (sdk-debug.js:5692)
at Object.create (sdk-debug.js:2303)
at constructor._loadCumulativeFlowChart (App.js?_dc=0.7676518726557986:11)
at constructor.launch (App.js?_dc=0.7676518726557986:5)
at constructor._applyDefaultSettingsAndLaunch (sdk-debug.js:155148)
at constructor.loadSettingsAndLaunch (sdk-debug.js:155054)
at constructor.initComponent (sdk-debug.js:155032)
at constructor (sdk-debug.js:30211)
at constructor.callParent (sdk-debug.js:4469)
Below is my source code
Ext.define('CustomApp', {
extend: 'Rally.app.App',
componentCls: 'app',
launch: function () {
this._loadCumulativeFlowChart();
},
_loadCumulativeFlowChart: function(){
var endDate = Rally.util.DateTime.fromIsoString('2017-06-30T00:00:00Z');
var startDate = Rally.util.DateTime.add(endDate, 'day', -5);
var cumulativeFlowChart = Ext.create('Rally.ui.chart.CumulativeFlowChart', {
storeConfig: {
context: {
workspace: '/workspace/14370822888'
},
filters: [
{
property: '_ItemHierarchy', value: 6342428390
},
{
property: '_Type', value: 'HierarchicalRequirement'
},
{
property: 'Children', value: null
}
]
},
cumulativeFlowConfig: {
timeZone: 'America/Denver',
aggregationType: 'count',
groupByFieldValues: ['Defined', 'In-Progress', 'Completed', 'Accepted'],
startDate: startDate,
endDate: endDate
}
});
this.add(cumulativeFlowChart)
}
});
Let me know what i am doing wrong here. Thanks in advance.

That error usually happens when you're trying to create something that doesn't exist. In this case, probably the Rally.ui.chart.CumulativeFlowChart object. Is that a component that you have created? Is it specified in the javascript section of your config.json before App.js?

Related

Use `sentence-transformers` inside of a Tensorflow-recommendation keras model in SageMaker

I've been going crazy for a few days over a problem that I thought trivial. My end-goal is to deploy to AWS Sagemaker a Tensorflow model that uses a simple string as input, calculates the embedding using a 'sentence-transformer' pre-trained model and eventually uses TensorFlow Recommenders to suggest the knn among a collection of embedding I already have calculated. I would like to do this entirely from the model, including the preprocessing (tokenization).
I made the predictions works with different approaches in my notebook. I start having troubles when I try to save my model.
The problem seems to be that HF's AutoTokenizer needs a pure List of Strings as input, and I hit a roadblock whenever I try to save my model using , and trying to go around this with tf.py_function using this approach results in problems with Sagemaker.
My approaches so far:
1. THE 'I THOUGHT IT WAS SO SIMPLE'
startups_ids: list, startup_vectors
):
import tensorflow as tf
import tensorflow_recommenders as tfrs
import numpy as np
from random import randint
exported_model = tfrs.layers.factorized_top_k.BruteForce(SentenceTransformer("all-mpnet-base-v2").encode)
exported_model.index(np.array(startup_vectors), np.array(startups_ids))
# TESTS the model
#for some reason this seems to be needed in order to save the model :/
# https://github.com/tensorflow/recommenders/issues/131
test = exported_model(['Test Text Query'])
print(test)
return exported_model
text_to_startup_model(search_db_ids, search_db_embeddings)
#--> WORKS PERFECTLY, AS I GET SOME SUGGESTIONS
tf.saved_model.save(text_to_startup_model(search_db_ids, search_db_embeddings), export_dir="/home/nicholas/test_model_save/1")
#TypeError Traceback (most recent call last)
# /home/nicholas/Documents/Dev/Rialto-predict-1/notebooks/t2s_different_approaches.ipynb Cell 5 in <cell line: 22>()
# 19 text_to_startup_model(search_db_ids, search_db_embeddings)
# 20 #--> WORKS PERFECTLY, AS I GET SOME SUGGESTIONS
# ---> 22 tf.saved_model.save(text_to_startup_model(search_db_ids, search_db_embeddings), export_dir="/home/nicholas/test_model_save/1")
# File ~/Documents/Dev/Rialto-predict-1/venv/lib/python3.10/site-packages/tensorflow/python/saved_model/save.py:1334, in save(obj, export_dir, signatures, options)
# 1332 # pylint: enable=line-too-long
# 1333 metrics.IncrementWriteApi(_SAVE_V2_LABEL)
# -> 1334 save_and_return_nodes(obj, export_dir, signatures, options)
# 1335 metrics.IncrementWrite(write_version="2")
#
# .........
#
#
# File ~/Documents/Dev/Rialto-predict-1/venv/lib/python3.10/site-packages/tensorflow/python/eager/def_function.py:677, in Function._defun_with_scope.<locals>.wrapped_fn(*args, **kwds)
# 673 with default_graph._variable_creator_scope(scope, priority=50): # pylint: disable=protected-access
# 674 # __wrapped__ allows AutoGraph to swap in a converted function. We give
# 675 # the function a weak reference to itself to avoid a reference cycle.
# 676 with OptionalXlaContext(compile_with_xla):
# --> 677 out = weak_wrapped_fn().__wrapped__(*args, **kwds)
# 678 return out
# File ~/Documents/Dev/Rialto-predict-1/venv/lib/python3.10/site-packages/tensorflow/python/framework/func_graph.py:1147, in func_graph_from_py_func.<locals>.autograph_handler(*args, **kwargs)
# 1145 except Exception as e: # pylint:disable=broad-except
# 1146 if hasattr(e, "ag_error_metadata"):
# -> 1147 raise e.ag_error_metadata.to_exception(e)
# 1148 else:
# 1149 raise
# TypeError: in user code:
# File "/home/nicholas/Documents/Dev/Rialto-predict-1/venv/lib/python3.10/site-packages/keras/saving/saving_utils.py", line 138, in _wrapped_model *
# outputs = model(*args, **kwargs)
# File "/home/nicholas/Documents/Dev/Rialto-predict-1/venv/lib/python3.10/site-packages/keras/utils/traceback_utils.py", line 67, in error_handler **
# raise e.with_traceback(filtered_tb) from None
# TypeError: Exception encountered when calling layer "brute_force_3" (type BruteForce).
# in user code:
# File "/home/nicholas/Documents/Dev/Rialto-predict-1/venv/lib/python3.10/site-packages/tensorflow_recommenders/layers/factorized_top_k.py", line 567, in call *
# queries = self.query_model(queries)
# File "/home/nicholas/Documents/Dev/Rialto-predict-1/venv/lib/python3.10/site-packages/sentence_transformers/SentenceTransformer.py", line 160, in encode *
# features = self.tokenize(sentences_batch)
# File "/home/nicholas/Documents/Dev/Rialto-predict-1/venv/lib/python3.10/site-packages/sentence_transformers/SentenceTransformer.py", line 318, in tokenize *
# return self._first_module().tokenize(texts)
# File "/home/nicholas/Documents/Dev/Rialto-predict-1/venv/lib/python3.10/site-packages/sentence_transformers/models/Transformer.py", line 102, in tokenize *
# batch1.append(text_tuple[0])
# TypeError: 'NoneType' object is not subscriptable
# ...
# Call arguments received:
# • queries=['None']
# • k=None
2. THE tf.py_function
As from my understanding the problem with the first approach is that it has no knowledge of the input type/value this second approach, from Use `sentence-transformers` inside of a keras model was supposedly gonna work, as it uses tf.py_function to accept a List of Strings as first input, without complaining.
def approach_2(startups_ids: list, startup_vectors):
import tensorflow as tf
import tensorflow_recommenders as tfrs
import numpy as np
from transformers import MPNetTokenizer, TFMPNetModel
# Here it loads the specific pre-trained model we are using for Rialto
tokenizer = MPNetTokenizer.from_pretrained(
"sentence-transformers/all-mpnet-base-v2"
)
model = TFMPNetModel.from_pretrained(
"sentence-transformers/all-mpnet-base-v2", from_pt=True
)
class SBert(tf.keras.layers.Layer):
def __init__(self, tokenizer, model):
super(SBert, self).__init__()
self.tokenizer = tokenizer
self.model = model
def tf_encode(self, inputs):
def encode(inputs):
inputs = [x[0].decode("utf-8") for x in inputs.numpy()]
outputs = self.tokenizer(
inputs, padding=True, truncation=True, return_tensors="tf"
)
return outputs["input_ids"], outputs["attention_mask"]
return tf.py_function(
func=encode, inp=[inputs], Tout=[tf.int32, tf.int32]
)
def process(self, i, a):
def __call(i, a):
model_output = self.model(
{"input_ids": i.numpy(), "attention_mask": a.numpy()}
)
return model_output[0]
return tf.py_function(func=__call, inp=[i, a], Tout=[tf.float32])
def mean_pooling(self, model_output, attention_mask):
token_embeddings = tf.squeeze(tf.stack(model_output), axis=0)
input_mask_expanded = tf.cast(
tf.broadcast_to(
tf.expand_dims(attention_mask, -1), tf.shape(token_embeddings)
),
tf.float32,
)
a = tf.math.reduce_sum(token_embeddings * input_mask_expanded, axis=1)
b = tf.clip_by_value(
tf.math.reduce_sum(input_mask_expanded, axis=1),
1e-9,
tf.float32.max,
)
embeddings = a / b
embeddings, _ = tf.linalg.normalize(embeddings, 2, axis=1)
return embeddings
def call(self, inputs):
input_ids, attention_mask = self.tf_encode(inputs)
model_output = self.process(input_ids, attention_mask)
embeddings = self.mean_pooling(model_output, attention_mask)
return embeddings
# Uses the keras-ified model in a Keras model
sbert = SBert(tokenizer, model)
inputs = tf.keras.layers.Input((1,), dtype=tf.string)
outputs = sbert(inputs)
model = tf.keras.Model(inputs, outputs)
# Implements the model we just build for top KNN retrieval, from the pool of pre-calculated startups embeddings.
exported_model = tfrs.layers.factorized_top_k.BruteForce(model)
exported_model.index(np.array(startup_vectors), np.array(startups_ids))
# TESTS the model
# for some reason this seems to be needed in order to save the model :/
# https://github.com/tensorflow/recommenders/issues/131
print(exported_model(tf.constant(["'Test Text Query'"])))
return exported_model
model_to_store_1 = approach_2(search_db_ids, search_db_embeddings)
tf.saved_model.save(model_to_store_1, export_dir="/home/nicholas/test_model_save/2")
# THIS ONE WORKS LIKE A CHARM, saving the model and everything. Deploy on sagemaker is successful.
# FAILS TO WORK ON SAGEMAKER. BELOW THE LOGS WHEN THE MODEL IS CALLED
# ModelError: An error occurred (ModelError) when calling the InvokeEndpoint operation: Received client error (400) from model with message "{
# "error": "No OpKernel was registered to support Op 'EagerPyFunc' used by {{node StatefulPartitionedCall/brute_force/model/s_bert/EagerPyFunc}} with these attrs: [is_async=false, Tin=[DT_STRING], _output_shapes=[<unknown>, <unknown>], Tout=[DT_INT32, DT_INT32], token=\"pyfunc_4\"]\nRegistered devices: [CPU]\nRegistered kernels:\n <no registered kernels>\n\n\t [[StatefulPartitionedCall/brute_force/model/s_bert/EagerPyFunc]]\n\t [[StatefulPartitionedCall]]"
# }". See https://us-west-2.console.aws.amazon.com/cloudwatch/home?region=us-west-2#logEventViewer:group=/aws/sagemaker/Endpoints/rialto-t2s-model-endpoint in account 634470116418 for more information
As you can see from the log, that the problem seems to be with the Eager mode and py_functions. I tried to google and found absolutely nothing on how to address this issue.
3. THE Classes approach
I've tried implementing something building upon this article, but I am running into similar issues that with the first approach, as when I go to save the model, the expected input clashed with the requirements of tokenizer.
EDIT 1 - here a coolab showcasing the approach: https://colab.research.google.com/drive/1gibFdEoHTs0hzD5yiXzLT_-asmilUoAQ?usp=sharing#scrollTo=TibAssWm3D5e
All of this journey triggered some questions:
Question 1 Is this even a best practice? Should I serve my model the tokenized sentences as a tensor?
Question 2 How the hell do I make it work? :)

Chrome 67 and Dojo

1.Which version of dojo works fine with chrome 67 or other higher versions?
2. Recently after the chrome upgrade- dojo.js does not loads and throws following error-
dojo.js:15 Error: multipleDefine
at _f (dojo.js:15)
at _f3 (dojo.js:15)
at def (dojo.js:15)
at dojo_en-us.js:1
(anonymous) # dojo.js:15
(anonymous) # dojo.js:15
_9 # dojo.js:15
req.signal # dojo.js:15
_f3 # dojo.js:15
def # dojo.js:15
(anonymous) # dojo_en-us.js:1
dojo.js:15 Error: multipleDefine
at _f (dojo.js:15)
at _f3 (dojo.js:15)
at def (dojo.js:15)
at Grid.js.uncompressed.js:11
(anonymous) # dojo.js:15
(anonymous) # dojo.js:15
_9 # dojo.js:15
req.signal # dojo.js:15
_f3 # dojo.js:15
def # dojo.js:15
(anonymous) # Grid.js.uncompressed.js:11
dojo.js:15 Error: multipleDefine
at _f (dojo.js:15)
at _f3 (dojo.js:15)
at def (dojo.js:15)
at OnDemandList.js.uncompressed.js:70
(anonymous) # dojo.js:15
(anonymous) # dojo.js:15
_9 # dojo.js:15
req.signal # dojo.js:15
_f3 # dojo.js:15
def # dojo.js:15
(anonymous) # OnDemandList.js.uncompressed.js:70
dojo.js:15 Error: multipleDefine
at _f (dojo.js:15)
at _f3 (dojo.js:15)
at def (dojo.js:15)
at List.js.uncompressed.js:51
(anonymous) # dojo.js:15
(anonymous) # dojo.js:15
_9 # dojo.js:15
req.signal # dojo.js:15
_f3 # dojo.js:15
def # dojo.js:15
(anonymous) # List.js.uncompressed.js:51
VM185:1 Uncaught ReferenceError: normal is not defined
at eval (eval at f (css.js.uncompressed.js:1), <anonymous>:1:1)
at f (css.js.uncompressed.js:1)
at Object.load (css.js.uncompressed.js:1)
at _e4 (dojo.js:15)
at dojo.js:15
at _9 (dojo.js:15)
at _ce (dojo.js:15)
at _35 (dojo.js:15)
at _36 (dojo.js:15)
at dojo.js:15
We are using dojo version 1.9.1
Could anyone please suggest about this?
It was working perfectly fine with previous versions of Chrome browser and we do not see any such error in IE browser for same version of dojo.
Thanks.
1. From my point of view 1.9 should work. I use latest in my project, but tried some basic things in JS fiddle and got no problems. My chrome version is 67.
2. Just and idea where to look... As far as I can see, this error can happen in case when you have multiple define functions in one file (module). At least I tried this and got same error.
if(module.injected === arrived){
signal(error, makeError("multipleDefine", module));
return module;
}
This code is from dojo.js
You could place a breakpoint there and check what's wrong with module.

Vue.js some errors/warnings are quite difficult to debug

While running a Vue.js app, I get a warning in the console that I cannot determine where it's coming from (in source code) or at least from which component I get it raised.
Here is the console output:
TypeError: Cannot read property 'length' of undefined
at _c.attrs.rules (eval at ./node_modules/vue-loader/lib/template-compiler/index.js?{"id":"data-v-012a975a","hasScoped":true,"optionsId":"0","buble":{"transforms":{}}}!./node_modules/vue-loader/lib/selector.js?type=template&index=0!./src/components/Contact_Form.vue (0.js:300), <anonymous>:252:41)
at VueComponent.validate (vuetify.js?dc48:8564)
at VueComponent.mounted (vuetify.js?dc48:8535)
at callHook (vue.runtime.esm.js?ff9b:2917)
at insert (vue.runtime.esm.js?ff9b:4154)
at Object.invoker [as insert] (vue.runtime.esm.js?ff9b:2019)
at invokeInsertHook (vue.runtime.esm.js?ff9b:5956)
at VueComponent.patch [as __patch__] (vue.runtime.esm.js?ff9b:6175)
at VueComponent.Vue._update (vue.runtime.esm.js?ff9b:2666)
at VueComponent.updateComponent (vue.runtime.esm.js?ff9b:2784)
logError # vue.runtime.esm.js?ff9b:1737
globalHandleError # vue.runtime.esm.js?ff9b:1728
handleError # vue.runtime.esm.js?ff9b:1717
callHook # vue.runtime.esm.js?ff9b:2919
insert # vue.runtime.esm.js?ff9b:4154
invoker # vue.runtime.esm.js?ff9b:2019
invokeInsertHook # vue.runtime.esm.js?ff9b:5956
patch # vue.runtime.esm.js?ff9b:6175
Vue._update # vue.runtime.esm.js?ff9b:2666
updateComponent # vue.runtime.esm.js?ff9b:2784
get # vue.runtime.esm.js?ff9b:3138
run # vue.runtime.esm.js?ff9b:3215
flushSchedulerQueue # vue.runtime.esm.js?ff9b:2977
(anonymous) # vue.runtime.esm.js?ff9b:1833
flushCallbacks # vue.runtime.esm.js?ff9b:1754
Promise.then (async)
microTimerFunc # vue.runtime.esm.js?ff9b:1802
nextTick # vue.runtime.esm.js?ff9b:1846
queueWatcher # vue.runtime.esm.js?ff9b:3064
update # vue.runtime.esm.js?ff9b:3205
notify # vue.runtime.esm.js?ff9b:693
reactiveSetter # vue.runtime.esm.js?ff9b:1010
(anonymous) # vue-router.esm.js?fe87:2508
(anonymous) # vue-router.esm.js?fe87:2507
updateRoute # vue-router.esm.js?fe87:1997
(anonymous) # vue-router.esm.js?fe87:1875
(anonymous) # vue-router.esm.js?fe87:1984
step # vue-router.esm.js?fe87:1714
step # vue-router.esm.js?fe87:1721
runQueue # vue-router.esm.js?fe87:1725
(anonymous) # vue-router.esm.js?fe87:1979
step # vue-router.esm.js?fe87:1714
(anonymous) # vue-router.esm.js?fe87:1718
(anonymous) # vue-router.esm.js?fe87:1964
(anonymous) # vue-router.esm.js?fe87:1757
(anonymous) # vue-router.esm.js?fe87:1833
Promise.then (async)
(anonymous) # vue-router.esm.js?fe87:1780
(anonymous) # vue-router.esm.js?fe87:1801
(anonymous) # vue-router.esm.js?fe87:1801
flatMapComponents # vue-router.esm.js?fe87:1800
(anonymous) # vue-router.esm.js?fe87:1736
iterator # vue-router.esm.js?fe87:1943
step # vue-router.esm.js?fe87:1717
step # vue-router.esm.js?fe87:1721
runQueue # vue-router.esm.js?fe87:1725
confirmTransition # vue-router.esm.js?fe87:1972
transitionTo # vue-router.esm.js?fe87:1874
init # vue-router.esm.js?fe87:2494
beforeCreate # vue-router.esm.js?fe87:540
callHook # vue.runtime.esm.js?ff9b:2917
Vue._init # vue.runtime.esm.js?ff9b:4622
Vue # vue.runtime.esm.js?ff9b:4725
(anonymous) # main.js?1c90:48
./src/main.js # app.js:1735
__webpack_require__ # app.js:708
fn # app.js:113
0 # app.js:1813
__webpack_require__ # app.js:708
(anonymous) # app.js:806
(anonymous) # app.js:809
It's from ./src/components/Contact_Form.vue
Look at the second line

how to select parameter for tf.contrib.learn.DNNRegressor

I am now doing a DNN regression analysis. I use the tensflow's DNNRegressor. But I don't know how to adjust the appropriate parameters to get a good neural network model?
regressor = tf.contrib.learn.DNNRegressor(feature_columns=feature_cols,
# hidden_units=[10, 128], # loss:42.252525
# hidden_units=[50, 320], # 7.66
# hidden_units=[50, 640], # 22.162941
# hidden_units=[100, 640], # 5.249118
# hidden_units=[100, 320], # 6.54
# hidden_units=[300, 640], # 41.01174
# hidden_units=[300, 896], # 17.183
# hidden_units=[50, 100, 640], # 17.760363
# hidden_units=[50, 320, 640], # 16.38122
# hidden_units=[50, 320, 128, 50], # 52.36839
# hidden_units=[640, 100], # 53
hidden_units=[100, 320, 640], # 22.162941
model_dir='./models/dnnregressor',
weight_column_name = None,
optimizer=None,
activation_fn=tf.nn.relu,
dropout=None,
gradient_clip_norm=None,
enable_centered_bias=False,
config=config,
feature_engineering_fn=None,
label_dimension = 4,
embedding_lr_multipliers=None,
input_layer_min_slice_size=None)
My dataset looks like this.
df = conv2_dataframe(CmdS_X=CmdS_X, CmdS_Y=CmdS_Y, CmdS_Z=CmdS_Z, CmdV=CmdV, halfV=halfV,
ActS_X=ActS_X, ActS_Y=ActS_Y, ActS_Z=ActS_Z, ActV=ActV)
labels = ['ActS_X', 'ActS_Y', 'ActS_Z', 'ActV']
dnnRegressor(df, labels)
shape: (12686, 9)
Data description diagram
There is no simple answer. It depends on your data characteristics and exact problem. This problem is generally referred to as "hyper-parameter tuning".
If you want a short answer - do a proper training, validation, and test dataset split and use whatever parameters give best results on your test dataset.

How to export test values on Tensorflow

I'm using a similar code to this as main train/test database and this to run the model.
I can print predictions in json but I can't print the test values to see which prediction refeers to each test.
How can I do that?
I'would like to export the tested datas.
Here is my code of import datas
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A dataset loader for imports85.data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
import tensorflow as tf
try:
import pandas as pd # pylint: disable=g-import-not-at-top
except ImportError:
pass
# Order is important for the csv-readers, so we use an OrderedDict here.
defaults = collections.OrderedDict([
("mes", [""]),
("marca", [""]),
("linha", [""]),
("grupo", [""]),
("capacidade", [0.0]),
("grade", [0.0]),
("custo", [0.0]),
("benef", [""]),
("desenvolvimento", [""]),
("leadtime", [0.0])
]) # pyformat: disable
types = collections.OrderedDict((key, type(value[0]))
for key, value in defaults.items())
def dataset(file_name="treino.csv", y_name="leadtime", train_fraction=0.7):
"""Load the imports85 data as a (train,test) pair of `Dataset`.
Each dataset generates (features_dict, label) pairs.
Args:
y_name: The name of the column to use as the label.
train_fraction: A float, the fraction of data to use for training. The
remainder will be used for evaluation.
Returns:
A (train,test) pair of `Datasets`
"""
# Download and cache the data
path = file_name
# Define how the lines of the file should be parsed
def decode_line(line):
"""Convert a csv line into a (features_dict,label) pair."""
# Decode the line to a tuple of items based on the types of
# csv_header.values().
items = tf.decode_csv(line, list(defaults.values()),field_delim=';')
# Convert the keys and items to a dict.
pairs = zip(defaults.keys(), items)
features_dict = dict(pairs)
# Remove the label from the features_dict
label = features_dict.pop(y_name)
return features_dict, label
def has_no_question_marks(line):
"""Returns True if the line of text has no question marks."""
# split the line into an array of characters
chars = tf.string_split(line[tf.newaxis], "").values
# for each character check if it is a question mark
is_question = tf.equal(chars, "?")
any_question = tf.reduce_any(is_question)
no_question = ~any_question
return no_question
def in_training_set(line):
"""Returns a boolean tensor, true if the line is in the training set."""
# If you randomly split the dataset you won't get the same split in both
# sessions if you stop and restart training later. Also a simple
# random split won't work with a dataset that's too big to `.cache()` as
# we are doing here.
num_buckets = 1000000
bucket_id = tf.string_to_hash_bucket_fast(line, num_buckets)
# Use the hash bucket id as a random number that's deterministic per example
return bucket_id < int(train_fraction * num_buckets)
def in_test_set(line):
"""Returns a boolean tensor, true if the line is in the training set."""
# Items not in the training set are in the test set.
# This line must use `~` instead of `not` because `not` only works on python
# booleans but we are dealing with symbolic tensors.
return ~in_training_set(line)
base_dataset = (tf.contrib.data
# Get the lines from the file.
.TextLineDataset(path)
# drop lines with question marks.
.filter(has_no_question_marks))
train = (base_dataset
# Take only the training-set lines.
.filter(in_training_set)
# Decode each line into a (features_dict, label) pair.
.map(decode_line)
# Cache data so you only decode the file once.
.cache())
# Do the same for the test-set.
test = (base_dataset.filter(in_test_set).cache().map(decode_line))
return train, test
def raw_dataframe():
"""Load the imports85 data as a pd.DataFrame."""
# Download and cache the data
path = file_name
# Load it into a pandas dataframe
df = pd.read_csv(path, names=types.keys(), dtype=types, na_values="?")
return df
def load_data(y_name="leadtime", train_fraction=0.7, seed=None):
"""Get the imports85 data set.
A description of the data is available at:
https://archive.ics.uci.edu/ml/datasets/automobile
The data itself can be found at:
https://archive.ics.uci.edu/ml/machine-learning-databases/autos/imports-85.data
Args:
y_name: the column to return as the label.
train_fraction: the fraction of the dataset to use for training.
seed: The random seed to use when shuffling the data. `None` generates a
unique shuffle every run.
Returns:
a pair of pairs where the first pair is the training data, and the second
is the test data:
`(x_train, y_train), (x_test, y_test) = get_imports85_dataset(...)`
`x` contains a pandas DataFrame of features, while `y` contains the label
array.
"""
# Load the raw data columns.
data = raw_dataframe()
# Delete rows with unknowns
data = data.dropna()
# Shuffle the data
np.random.seed(seed)
# Split the data into train/test subsets.
x_train = data.sample(frac=train_fraction, random_state=seed)
x_test = data.drop(x_train.index)
# Extract the label from the features dataframe.
y_train = x_train.pop(y_name)
y_test = x_test.pop(y_name)
return (x_train, y_train), (x_test, y_test)
and here is my code to test, evaluate and predict
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Regression using the DNNRegressor Estimator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import pandas as pd
import importar_dados # pylint: disable=g-bad-import-order
STEPS = 100
LT_NORM_FACTOR = 199
def my_dnn_regression_fn(features, labels, mode, params):
"""A model function implementing DNN regression for a custom Estimator."""
# Extract the input into a dense layer, according to the feature_columns.
top = tf.feature_column.input_layer(features, params["feature_columns"])
# Iterate over the "hidden_units" list of layer sizes, default is [20].
for units in params.get("hidden_units", [100]):
# Add a hidden layer, densely connected on top of the previous layer.
top = tf.layers.dense(inputs=top, units=units, activation=tf.nn.relu)
# Connect a linear output layer on top.
output_layer = tf.layers.dense(inputs=top, units=1)
# Reshape the output layer to a 1-dim Tensor to return predictions
predictions = tf.squeeze(output_layer, 1)
if mode == tf.estimator.ModeKeys.PREDICT:
# In `PREDICT` mode we only need to return predictions.
return tf.estimator.EstimatorSpec(
mode=mode, predictions={"leadtime": predictions})
# Calculate loss using mean squared error
average_loss = tf.losses.mean_squared_error(labels, predictions)
# Pre-made estimators use the total_loss instead of the average,
# so report total_loss for compatibility.
batch_size = tf.shape(labels)[0]
total_loss = tf.to_float(batch_size) * average_loss
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = params.get("optimizer", tf.train.AdamOptimizer)
optimizer = optimizer(params.get("learning_rate", None))
train_op = optimizer.minimize(
loss=average_loss, global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(
mode=mode, loss=total_loss, train_op=train_op)
# In evaluation mode we will calculate evaluation metrics.
assert mode == tf.estimator.ModeKeys.EVAL
# Calculate root mean squared error
rmse = tf.metrics.root_mean_squared_error(labels, predictions)
# Add the rmse to the collection of evaluation metrics.
eval_metrics = {"rmse": rmse}
return tf.estimator.EstimatorSpec(
mode=mode,
# Report sum of error for compatibility with pre-made estimators
loss=total_loss,
eval_metric_ops=eval_metrics)
def main(argv):
"""Builds, trains, and evaluates the model."""
assert len(argv) == 1
(train, test) = importar_dados.dataset()
# Switch the labels to units of thousands for better convergence.
def normalize_lt(features, labels):
return features, labels / LT_NORM_FACTOR
train = train.map(normalize_lt)
test = test.map(normalize_lt)
# Build the training input_fn.
def input_train():
return (
# Shuffling with a buffer larger than the data set ensures
# that the examples are well mixed.
train.shuffle(1000).batch(128)
# Repeat forever
.repeat().make_one_shot_iterator().get_next())
# Build the validation input_fn.
def input_test():
return (test.shuffle(1000).batch(128)
.make_one_shot_iterator().get_next())
# The first way assigns a unique weight to each category. To do this you must
# specify the category's vocabulary (values outside this specification will
# receive a weight of zero). Here we specify the vocabulary using a list of
# options. The vocabulary can also be specified with a vocabulary file (using
# `categorical_column_with_vocabulary_file`). For features covering a
# range of positive integers use `categorical_column_with_identity`.
marca_vocab = ["ANIMALE","FABULA","FARM","A.BRAND","F.Y.I","MAS ANIMALE"]
marca = tf.feature_column.categorical_column_with_vocabulary_list(
key="marca", vocabulary_list=marca_vocab)
mes_vocab = ["1","2","3","4","5","6","7","8","9","10","11","12"]
mes = tf.feature_column.categorical_column_with_vocabulary_list(
key="mes", vocabulary_list=mes_vocab)
linha_vocab = ["A+","SEDA","TRICOLINE","MALHA","JNS","SARJA","TECIDO","TECIDO PLANO","DESFILE ABRAND","ARTESANAL",
"TREND","NOITE","BB","JEANS","HANDMADE","ESI","ALFAIATARIA","PRO","COURO","EST","CONCEPT","OFF PREMIUM",
"ACESSORIOS","MOVE","NOITE CASUAL","TAT","RESORT","EMI","EMT","FITNESS","BALADA","HOME VESTUARIO",
"UNIFORME","BOT","VTL","TECIDO PLANO BASICO","HOM","PRAIA","INTIMATES","BTP","TRICOT","QUERO","EMB",
"ATL","BMA","SAPATO","PRINCESS","BLUE","BOLSA","ESB","TECIDO PLANO ELABORADO","NOVOS DESEJOS","FESTA",
"FANTASIA","MARKETING","ACE","TECIDO PLANO ESTAMPADO","ADMINISTRATIVO","FAN","TECIDO PLANO LISO","AGA",
"CDO","AGE","BIJOUX","COBRANDING","NEUTROS","ESM"]
linha = tf.feature_column.categorical_column_with_vocabulary_list(
key="linha", vocabulary_list=linha_vocab)
grupo_vocab = ["VESTIDOS","TOP","TOP NEUTRO","TOP ELABORADO","SHORT","BLUSA","TOP BASICO","BOTTOM BASICO","VESTIDO BASICO",
"BLUSA ESTAMPADA","BOTTOM","MACACAO","TOP FUN","OVERTOPS","VESTIDO ESTAMPADO","BOTTOM ESTAMPADO",
"BOTTOM ELABORADO","CALCAS","CAMISA","SAIAS","AGASALHO","CALCA ESTAMPADA","ACESSORIOS","DIVERSOS",
"CINTOS","BIQUINI","TOP TECIDO","BIQUINI/MAIO","VESTUARIO","OVERTOP ESTAMPADO","CALCINHA","BERMUDA",
"LINGERIE","MAIO","VESTIDOS ELABORADO","OUTROS","SAPATOS","BOLSAS","CAMISA ESTAMPADA","LENCO","CHAPEU",
"FANTASIA","OVERTOP PESADO","TOP LEVE","HOME","PRAIA","OVERTOP LEVE","OVERTOP ELABORADO","STREET","ESPECIAL",
"PIJAMA","CANGA","BRINCO","SOUTIEN","OVERTOP BASICO","UNDERWEAR"]
grupo = tf.feature_column.categorical_column_with_vocabulary_list(
key="grupo", vocabulary_list=grupo_vocab)
benef_vocab = ["S","N"]
benef = tf.feature_column.categorical_column_with_vocabulary_list(
key="benef", vocabulary_list=benef_vocab)
desenvolvimento_vocab = ["INT","EX"]
desenvolvimento = tf.feature_column.categorical_column_with_vocabulary_list(
key="desenvolvimento", vocabulary_list=desenvolvimento_vocab)
# make = tf.feature_column.categorical_column_with_hash_bucket(
# key="make", hash_bucket_size=50)
feature_columns = [
tf.feature_column.indicator_column(mes),
tf.feature_column.indicator_column(marca),
tf.feature_column.indicator_column(linha),
tf.feature_column.indicator_column(grupo),
tf.feature_column.numeric_column(key="capacidade"),
tf.feature_column.numeric_column(key="grade"),
tf.feature_column.numeric_column(key="custo"),
# Since this is a DNN model, convert categorical columns from sparse
# to dense.
# Wrap them in an `indicator_column` to create a
# one-hot vector from the input.
tf.feature_column.indicator_column(benef),
tf.feature_column.indicator_column(desenvolvimento)#,
# Or use an `embedding_column` to create a trainable vector for each
# index.
# tf.feature_column.embedding_column(make, dimension=3),
]
# Build a custom Estimator, using the model_fn.
# `params` is passed through to the `model_fn`.
model = tf.estimator.Estimator(
model_fn=my_dnn_regression_fn,
params={
"feature_columns": feature_columns,
"learning_rate": 0.001,
"optimizer": tf.train.AdamOptimizer,
"hidden_units": [100,500,100]
},
model_dir="resultados")
# Train the model.
model.train(input_fn=input_train, steps=STEPS)
# Evaluate how the model performs on data it has not yet seen.
eval_result = model.evaluate(input_fn=input_test)
pred_result = model.predict(input_fn = input_test,
predict_keys=None,
hooks=None,
checkpoint_path=None)
sess = tf.Session()
# Print the Root Mean Square Error (RMSE).
print("\n" + 80 * "*")
print("\nRMS error for the test set: {:.0f} Dias"
.format(LT_NORM_FACTOR * eval_result["rmse"]))
#prediction_df = pd.DataFrame(list(pred_result))
#prediction_df.to_csv('prediction.csv')
print(list(pred_result))
print()
if __name__ == "__main__":
# The Estimator periodically generates "INFO" logs; make these logs visible.
tf.logging.set_verbosity(tf.logging.INFO)
tf.app.run(main=main)