How to deploy Column View artifact on CloudFoundry - sql

I have a Column View in SAP HANA SQL that is used in a sql procedure
CREATE COLUMN VIEW "ABC_COLUMN_VIEW" TYPE hierarchy WITH PARAMETERS (
'hierarchyDefinition'='{
"MULTIPARENT":true,
"NODETYPE":"STRING",
"NODETYPE_INFERRED":"STRING",
"RUNTIMEOBJECTTYPE":"tree",
"SOURCEQUERY":"SELECT "A_COLUMN", "B_COLUMN" FROM "ABC_TABLE"",
"SOURCEQUERYSCHEMA":"ABC_SCHEMA",
"SOURCETYPE":"RECURSIVE"}')
and im trying to deploy it on cf but now i see that the .hdiconfig file doesnt have anything about column views
{
"plugin_version" : "2.0.0.0",
"file_suffixes" : {
"hdbsynonym" : {
"plugin_name" : "com.sap.hana.di.synonym"
},
"hdbsynonymconfig" : {
"plugin_name" : "com.sap.hana.di.synonym.config"
},
"hdbtable" : {
"plugin_name" : "com.sap.hana.di.table"
},
"hdbdropcreatetable" : {
"plugin_name" : "com.sap.hana.di.dropcreatetable"
},
"hdbvirtualtable" : {
"plugin_name" : "com.sap.hana.di.virtualtable"
},
"hdbvirtualtableconfig" : {
"plugin_name" : "com.sap.hana.di.virtualtable.config"
},
"hdbindex" : {
"plugin_name" : "com.sap.hana.di.index"
},
"hdbfulltextindex" : {
"plugin_name" : "com.sap.hana.di.fulltextindex"
},
"hdbconstraint" : {
"plugin_name" : "com.sap.hana.di.constraint"
},
"hdbtrigger" : {
"plugin_name" : "com.sap.hana.di.trigger"
},
"hdbstatistics" : {
"plugin_name" : "com.sap.hana.di.statistics"
},
"hdbview" : {
"plugin_name" : "com.sap.hana.di.view"
},
"hdbcalculationview" : {
"plugin_name" : "com.sap.hana.di.calculationview"
},
"hdbprojectionview" : {
"plugin_name" : "com.sap.hana.di.projectionview"
},
"hdbprojectionviewconfig" : {
"plugin_name" : "com.sap.hana.di.projectionview.config"
},
"hdbresultcache" : {
"plugin_name" : "com.sap.hana.di.resultcache"
},
"hdbcds" : {
"plugin_name" : "com.sap.hana.di.cds"
},
"hdbfunction" : {
"plugin_name" : "com.sap.hana.di.function"
},
"hdbvirtualfunction" : {
"plugin_name" : "com.sap.hana.di.virtualfunction"
},
"hdbvirtualfunctionconfig" : {
"plugin_name" : "com.sap.hana.di.virtualfunction.config"
},
"hdbhadoopmrjob" : {
"plugin_name" : "com.sap.hana.di.virtualfunctionpackage.hadoop"
},
"jar" : {
"plugin_name" : "com.sap.hana.di.virtualfunctionpackage.hadoop"
},
"hdbtabletype" : {
"plugin_name" : "com.sap.hana.di.tabletype"
},
"hdbprocedure" : {
"plugin_name" : "com.sap.hana.di.procedure"
},
"hdbvirtualprocedure" : {
"plugin_name" : "com.sap.hana.di.virtualprocedure"
},
"hdbvirtualprocedureconfig" : {
"plugin_name" : "com.sap.hana.di.virtualprocedure.config"
},
"hdbafllangprocedure" : {
"plugin_name" : "com.sap.hana.di.afllangprocedure"
},
"hdblibrary" : {
"plugin_name" : "com.sap.hana.di.library"
},
"hdbsequence" : {
"plugin_name" : "com.sap.hana.di.sequence"
},
"hdbrole" : {
"plugin_name" : "com.sap.hana.di.role"
},
"hdbroleconfig" : {
"plugin_name" : "com.sap.hana.di.role.config"
},
"hdbstructuredprivilege" : {
"plugin_name" : "com.sap.hana.di.structuredprivilege"
},
"hdbanalyticprivilege" : {
"plugin_name" : "com.sap.hana.di.analyticprivilege"
},
"hdbtabledata" : {
"plugin_name" : "com.sap.hana.di.tabledata"
},
"csv" : {
"plugin_name" : "com.sap.hana.di.tabledata.source"
},
"properties" : {
"plugin_name" : "com.sap.hana.di.tabledata.properties"
},
"tags" : {
"plugin_name" : "com.sap.hana.di.tabledata.properties"
},
"hdbgraphworkspace" : {
"plugin_name" : "com.sap.hana.di.graphworkspace"
},
"hdbflowgraph" : {
"plugin_name" : "com.sap.hana.di.flowgraph"
},
"hdbreptask" : {
"plugin_name" : "com.sap.hana.di.reptask"
},
"hdbsearchruleset" : {
"plugin_name" : "com.sap.hana.di.searchruleset"
},
"hdbtextconfig" : {
"plugin_name" : "com.sap.hana.di.textconfig"
},
"hdbtextdict" : {
"plugin_name" : "com.sap.hana.di.textdictionary"
},
"hdbtextrule" : {
"plugin_name" : "com.sap.hana.di.textrule"
},
"hdbtextinclude" : {
"plugin_name" : "com.sap.hana.di.textrule.include"
},
"hdbtextlexicon" : {
"plugin_name" : "com.sap.hana.di.textrule.lexicon"
},
"hdbtextminingconfig" : {
"plugin_name" : "com.sap.hana.di.textminingconfig"
},
"txt" : {
"plugin_name" : "com.sap.hana.di.copyonly"
}
}
}
So my problem is can i add a column view artifact somehow so i can push to cf or there is another way now to implement hierarchy?

You’re right, there isn’t a content type for column views because these are runtime objects.
The column view you posted looks like it belongs to a hierarchy view, which is a kind of column view. You can model hierarchy functions in HANA 2 WebIDE, so that’s what I’d recommend to try here.

Related

Mapping ElasticSearch apache module field

I am new to ES and I am facing a little problem I am struggling with.
I integrated metricbeat apache module with ES and the it works fine.
The problem is that metricbeat apache module reports the KB of web traffic of apache (field apache.status.total_kbytes), instead I would like to create my own field, the name of which would be "apache.status.total_mbytes).
I am trying to create a new mapping via Dev Console using the followind api commands:
PUT /metricbeat-7.2.0/_mapping
{
"settings":{
},
"mappings" : {
"apache.status.total_mbytes" : {
"full_name" : "apache.status.total_mbytes",
"mapping" : {
"total_mbytes" : {
"type" : "long"
}
}
}
}
}
Still ES returns the following error:
{
"error" : {
"root_cause" : [
{
"type" : "mapper_parsing_exception",
"reason" : "Root mapping definition has unsupported parameters: [settings : {}] [mappings : {apache.status.total_mbytes={mapping={total_mbytes={type=long}}, full_name=apache.status.total_mbytes}}]"
}
],
"type" : "mapper_parsing_exception",
"reason" : "Root mapping definition has unsupported parameters: [settings : {}] [mappings : {apache.status.total_mbytes={mapping={total_mbytes={type=long}}, full_name=apache.status.total_mbytes}}]"
},
"status" : 400
}
FYI
The following may shed some light
GET /metricbeat-*/_mapping/field/apache.status.total_kbytes
Returns
{
"metricbeat-7.9.2-2020.10.06-000001" : {
"mappings" : {
"apache.status.total_kbytes" : {
"full_name" : "apache.status.total_kbytes",
"mapping" : {
"total_kbytes" : {
"type" : "long"
}
}
}
}
},
"metricbeat-7.2.0-2020.10.05-000001" : {
"mappings" : {
"apache.status.total_kbytes" : {
"full_name" : "apache.status.total_kbytes",
"mapping" : {
"total_kbytes" : {
"type" : "long"
}
}
}
}
}
}
What am I missing? Is the _mapping command wrong?
Thanks in advance,
A working example:
Create new index
PUT /metricbeat-7.2.0
{
"settings": {},
"mappings": {
"properties": {
"apache.status.total_kbytes": {
"type": "long"
}
}
}
}
Then GET metricbeat-7.2.0/_mapping/field/apache.status.total_kbytes will result in (same as your example):
{
"metricbeat-7.2.0" : {
"mappings" : {
"apache.status.total_kbytes" : {
"full_name" : "apache.status.total_kbytes",
"mapping" : {
"total_kbytes" : {
"type" : "long"
}
}
}
}
}
}
Now if you want to add a new field to an existing mapping use the API this way:
Update an existing index
PUT /metricbeat-7.2.0/_mapping
{
"properties": {
"total_mbytes": {
"type": "long"
}
}
}
Then GET metricbeat-7.2.0/_mapping will show you the updated mapping:
{
"metricbeat-7.2.0" : {
"mappings" : {
"properties" : {
"apache" : {
"properties" : {
"status" : {
"properties" : {
"total_kbytes" : {
"type" : "long"
}
}
}
}
},
"total_mbytes" : {
"type" : "long"
}
}
}
}
}
Also, take a look at Put Mapping Api

Setting API Key required to true using serverless.template in AWS API Gateway

I am deploying an ASP.Net Core project on AWS Lambda and I am struggling with making the API Key required.
Here is my Json template:
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Transform" : "AWS::Serverless-2016-10-31",
"Description" : "An AWS Serverless Application that uses the ASP.NET Core framework running in Amazon Lambda.",
"Parameters" : {
"ShouldCreateBucket" : {
"Type" : "String",
"AllowedValues" : ["true", "false"],
"Description" : "If true then the S3 bucket that will be proxied will be created with the CloudFormation stack."
},
"BucketName" : {
"Type" : "String",
"Description" : "Name of S3 bucket that will be proxied. If left blank a name will be generated.",
"MinLength" : "0"
}
},
"Conditions" : {
"CreateS3Bucket" : {"Fn::Equals" : [{"Ref" : "ShouldCreateBucket"}, "true"]},
"BucketNameGenerated" : {"Fn::Equals" : [{"Ref" : "BucketName"}, ""]}
},
"Resources" : {
"AspNetCoreFunction" : {
"Type" : "AWS::Serverless::Function",
"Properties": {
"Handler": "SmartClockAPI::SmartClockAPI.LambdaEntryPoint::FunctionHandlerAsync",
"Runtime": "dotnetcore2.1",
"CodeUri": "",
"MemorySize": 256,
"Timeout": 30,
"Role": null,
"Policies": [ "AWSLambdaFullAccess","AmazonCognitoPowerUser","AmazonAPIGatewayAdministrator"],
"Environment" : {
"Variables" : {
"AppS3Bucket" : { "Fn::If" : ["CreateS3Bucket", {"Ref":"Bucket"}, { "Ref" : "BucketName" } ] }
}
},
"Events": {
"PutResource": {
"Type": "Api",
"Properties": {
"Path": "/{proxy+}",
"Method": "ANY"
}
}
}
}
},
"BasicUsagePlan" : {
"Type" : "AWS::ApiGateway::UsagePlan",
"Properties" : {
"UsagePlanName" : "Basic plan",
"Quota" : {
"Limit" : 100,
"Period" : "MONTH"
},
"Throttle" : {
"RateLimit" : 10,
"BurstLimit" : 10
}
}
},
"Bucket" : {
"Type" : "AWS::S3::Bucket",
"Condition" : "CreateS3Bucket",
"Properties" : {
"BucketName" : { "Fn::If" : ["BucketNameGenerated", {"Ref" : "AWS::NoValue" }, { "Ref" : "BucketName" } ] }
}
}
},
"Outputs" : {
"ApiURL" : {
"Description" : "API endpoint URL for Prod environment",
"Value" : { "Fn::Sub" : "https://${ServerlessRestApi}.execute-api.${AWS::Region}.amazonaws.com/Prod/" }
},
"S3ProxyBucket" : {
"Value" : { "Fn::If" : ["CreateS3Bucket", {"Ref":"Bucket"}, { "Ref" : "BucketName" } ] }
}
}
}
What I am trying to achieve is setting this value to true from the Json template.
I was expecting some extra property for Proxy where I can specify this value.
Any ideas?
Use AWS::ApiGateway::UsagePlanKey
{
"Type" : "AWS::ApiGateway::UsagePlanKey",
"Properties" : {
"KeyId" : String,
"KeyType" : String,
"UsagePlanId" : String
}
}
Add a DependsOn to ApiKey, ApiUsagePlan, and ApiUsagePlanKey to ensure they were created in the correct order. This is a nice working example

Elasticsearchj implementation of Exists and Not Exists

Select COUNT(distinct name)
From index1
Where date between X and y
And name in (Select name
From index1
Where date between p and s)
Equivalent query in elasticsearch ?
The Filter Aggregation may be the answer.
Something like this:
{
"size" : 0,
"query" : {
"filtered" : {
"query" : {
"match_all" : { }
},
"filter" : {
"range" : {
"date" : {
"from" : "2015-03-10T21:51:47.703-04:00",
"to" : "2015-03-20T21:51:47.727-04:00",
"include_lower" : true,
"include_upper" : true
}
}
}
}
},
"aggregations" : {
"names1" : {
"filter" : {
"range" : {
"date" : {
"from" : "2015-02-28T21:51:47.733-05:00",
"to" : "2015-03-20T21:51:47.734-04:00",
"include_lower" : true,
"include_upper" : true
}
}
},
"aggregations" : {
"names2" : {
"terms" : {
"field" : "name"
}
}
}
}
}
}

SQL Where clause equivalent for Elastic Search

I am trying to create a aggregate results in elastic search but filter option is not working for me.
I can aggregate data without filter e.g.
select name , material ,sum(price)
from products group by name , material
curl -XGET 'http://localhost:9200/products/_search?pretty=true' -d'
{
"aggs" : {
"product" : {
"terms" : {
"field" : "name"
},
"aggs" : {
"material" : {
"terms" : {
"field" : "material"
},
"aggs" : {
"sum_price" : {
"sum" : {
"field" : "price"
}
}
}
}
}
}
},
"size" : 0
}'
but I am facing problems to write equivalent DSL query of :
select name , material ,sum(price)
from products
where material = "wood"
group by name , material
Should be something like this:
{
"query": {
"filtered": {
"query": {
"match_all": {}
},
"filter": {
"term": {
"material": "wood"
}
}
}
},
"aggs" : {
"product" : {
"terms" : {
"field" : "name"
},
"aggs" : {
"material" : {
"terms" : {
"field" : "material"
},
"aggs" : {
"sum_price" : {
"sum" : {
"field" : "price"
}
}
}
}
}
}
},
"size" : 0
}
Use a filter if you know the exact value and do not need a match, else use a match query instead of the filtered query.
You can use match
{
"query": {
"bool": {
"must": [
{
"match": {
"material": "wood"
}
}
],
"filter": [
{
"match_all": {}
},
]
}
},
"aggs" : {
"product" : {
"terms" : {
"field" : "name"
},
"aggs" : {
"material" : {
"terms" : {
"field" : "material"
},
"aggs" : {
"sum_price" : {
"sum" : {
"field" : "price"
}
}
}
}
}
}
},
"size" : 0
}

How does MongoDB index arrays?

In MongoDB, if I were to store an array (say ["red", "blue"]) in a field "color", does it index "red" and "blue" so I could query for "red", for example, or does in make {"red", "blue"} a composite index?
When it comes to indexing arrays, MongoDB indexes each value of the array so you can query for individual items, such as "red." For example:
> db.col1.save({'colors': ['red','blue']})
> db.col1.ensureIndex({'colors':1})
> db.col1.find({'colors': 'red'})
{ "_id" : ObjectId("4ccc78f97cf9bdc2a2e54ee9"), "colors" : [ "red", "blue" ] }
> db.col1.find({'colors': 'blue'})
{ "_id" : ObjectId("4ccc78f97cf9bdc2a2e54ee9"), "colors" : [ "red", "blue" ] }
For more information, check out MongoDB's documentation on Multikeys: http://www.mongodb.org/display/DOCS/Multikeys
You can simply test index usage by appending "explain" to your query:
> db.col1.save({'colors': ['red','blue']})
# without index
> db.col1.find({'colors': 'red'}).explain()
{
"queryPlanner" : {
"plannerVersion" : 1,
"namespace" : "protrain.col1",
"indexFilterSet" : false,
"parsedQuery" : {
"colors" : {
"$eq" : "red"
}
},
"winningPlan" : {
"stage" : "COLLSCAN", <--- simple column scan
"filter" : {
"colors" : {
"$eq" : "red"
}
},
"direction" : "forward"
},
"rejectedPlans" : [ ]
},
"serverInfo" : {
"host" : "bee34f15fe28",
"port" : 27017,
"version" : "3.4.4",
"gitVersion" : "888390515874a9debd1b6c5d36559ca86b44babd"
},
"ok" : 1
}
# query with index
> db.col1.createIndex( { "colors":1 } )
> db.col1.find({'colors': 'red'}).explain()
{
"queryPlanner" : {
"plannerVersion" : 1,
"namespace" : "protrain.col1",
"indexFilterSet" : false,
"parsedQuery" : {
"colors" : {
"$eq" : "red"
}
},
"winningPlan" : {
"stage" : "FETCH",
"inputStage" : {
"stage" : "IXSCAN", <!---- INDEX HAS BEEN USED
"keyPattern" : {
"colors" : 1
},
"indexName" : "colors_1",
"isMultiKey" : true,
"multiKeyPaths" : {
"colors" : [
"colors"
]
},
"isUnique" : false,
"isSparse" : false,
"isPartial" : false,
"indexVersion" : 2,
"direction" : "forward",
"indexBounds" : {
"colors" : [
"[\"red\", \"red\"]"
]
}
}
},
"rejectedPlans" : [ ]
},
"serverInfo" : {
"host" : "bee34f15fe28",
"port" : 27017,
"version" : "3.4.4",
"gitVersion" : "888390515874a9debd1b6c5d36559ca86b44babd"
},
"ok" : 1
}
For structures where you have structured indexes, you could use the array position to index fields inside arrays:
{
'_id': 'BB167E2D61909E848EBC96C7B33251AC',
'hist': {
'map': {
'10': 1
}
},
'wayPoints': [{
'bhf_name': 'Zinsgutstr.(Berlin)',
'ext_no': 900180542,
'lat': 52.435158,
'lon': 13.559086,
'puic': 86,
'time': {
'dateTime': '2018-01-10T09: 38: 00',
'offset': {
'totalSeconds': 3600
}
},
'train_name': 'Bus162'
},
{
'bhf_name': 'SAdlershof(Berlin)',
'ext_no': 900193002,
'lat': 52.435104,
'lon': 13.54055,
'puic': 86,
'time': {
'dateTime': '2018-01-10T09: 44: 00',
'offset': {
'totalSeconds': 3600
}
},
'train_name': 'Bus162'
}]
}
db.col.createIndex( { "wayPoints.0.ext_no":1 } )
db.col.createIndex( { "wayPoints.0.train_name":1 } )
db.col.createIndex( { "wayPoints.1.ext_no":1 } )
db.col.createIndex( { "wayPoints.1.train_name":1 } )
> db.col.find(
... {
... "wayPoints.ext_no": 900180542
... }
... ,
... {
... "wayPoints.ext_no":1,
... "wayPoints.train_name":1,
... "wayPoints.time":1
... }
... ).explain()
{
"queryPlanner" : {
"plannerVersion" : 1,
"namespace" : "db.col",
"indexFilterSet" : false,
"parsedQuery" : {
"wayPoints.ext_no" : {
"$eq" : 900180542
}
},
"winningPlan" : {
"stage" : "PROJECTION",
"transformBy" : {
"wayPoints.ext_no" : 1,
"wayPoints.train_name" : 1,
"wayPoints.time" : 1
},
"inputStage" : {
"stage" : "FETCH",
"inputStage" : {
"stage" : "IXSCAN",
"keyPattern" : {
"wayPoints.ext_no" : 1
},
"indexName" : "wayPoints.ext_no_1",
"isMultiKey" : true,
"multiKeyPaths" : {
"wayPoints.ext_no" : [
"wayPoints"
]
},
"isUnique" : false,
"isSparse" : false,
"isPartial" : false,
"indexVersion" : 2,
"direction" : "forward",
"indexBounds" : {
"wayPoints.ext_no" : [
"[900180542.0, 900180542.0]"
]
}
}
}
},
"rejectedPlans" : [ ]
},
"serverInfo" : {
"host" : "bee34f15fe28",
"port" : 27017,
"version" : "3.4.4",
"gitVersion" : "888390515874a9debd1b6c5d36559ca86b44babd"
},
"ok" : 1
}