ELasticsearch Post bulk on elastic xpack role - api

I have an Elastic cluster with xpack enable.
I'd like to make a backup of all xpack roles created :
GET _xpack/security/role
=> I get a big JSON, ex :
{
"kibana_dashboard_only_user": {
"cluster": [],
"indices": [
{
"names": [
".kibana*"
],
"privileges": [
"read",
"view_index_metadata"
]
}
],
"run_as": [],
"metadata": {
"_reserved": true
},
"transient_metadata": {
"enabled": true
}
},
"watcher_admin": {
"cluster": [
"manage_watcher"
],
"indices": [
{
"names": [
".watches",
".triggered_watches",
".watcher-history-*"
],
"privileges": [
"read"
]
}
],
"run_as": [],
"metadata": {
"_reserved": true
},
"transient_metadata": {
"enabled": true
}
},
....
}
And now I'd like to put it back in the cluster (or another). I cannot just PUT it to _xpack/security/role. If i understand correctly I have to use bulk :
$ curl --user elastic:password https://elastic:9200/_xpack/security/_bulk?pretty -XPOST -H 'Content-Type: application/json' -d '
{"index":{"_index": "_xpack/security/role"}}
{"ROOOOLE" : {"cluster" : [ ],"indices" : [{"names" : [".kibana*"],"privileges" : ["read","view_index_metadata"]}],"run_as" : [ ],"metadata" : {"_reserved" : true},"transient_metadata" : {"enabled" : true}}}
'
But I get an error:
{
"took" : 3,
"errors" : true,
"items" : [
{
"index" : {
"_index" : "_xpack/security/role",
"_type" : "security",
"_id" : null,
"status" : 400,
"error" : {
"type" : "invalid_index_name_exception",
"reason" : "Invalid index name [_xpack/security/role], must not contain the following characters [ , \", *, \\, <, |, ,, >, /, ?]",
"index_uuid" : "_na_",
"index" : "_xpack/security/role"
}
}
}
]
}
Is there a way to do this easily? Or do I have to parse the JSON, and put each role one by one to:
_xpack/security/role/rolexxx
_xpack/security/role/roleyyy
...
More globally, is there a way to get all data of an index (config index), then upload it back or put it into another cluster?

Related

How do i change MongoDB JSON data to array

I need to update the MongoDB field with the array of objects where JSON object to be updated with as an array
if I have something like this in MongoDB
"designSectionContents" : [
{
"_id" : "5bae17ecbd7595540145ec98",
"type" : "subSection",
"columns" : [
{
"0" : {
"itemId" : "5b7465980783d9a37058f160",
"type" : "field"
}
},
{
"0" : {
"itemId" : "5b7465630783d9a37058f15c",
"type" : "field"
}
},
{
"0" : {
"itemId" : "5b7465810783d9a37058f15e",
"type" : "field"
}
}
],
"subSectionContentLayout" : {
"labelPlacement" : "Top",
"columns" : 3
}
}
]
I want to change the above snippet to below in MongoDB
"designSectionContents" : [
{
"_id" : ObjectId("5bae17ecbd7595540145ec98"),
"type" : "subSection",
"columns" : [
[
{
"itemId" : "5b7465980783d9a37058f160",
"type" : "field"
}
],
[
{
"itemId" : "5b7465630783d9a37058f15c",
"type" : "field"
}
],
[
{
"itemId" : "5b7465810783d9a37058f15e",
"type" : "field"
}
]
]
}
]
curly braces opening and closing tag has to be changed to array
This should work:
db.collection.aggregate([
{
"$project": {
"designSectionContents": {
"$map": {
"input": "$designSectionContents",
"as": "designSectionContent",
"in": {
"_id": "$$designSectionContent._id",
"type": "$$designSectionContent.type",
"columns": {
"$map": {
"input": "$$designSectionContent.columns",
"as": "inp",
"in": [
"$$inp.0"
]
}
}
}
}
}
}
}
]);
Here's the working link.

mapper_parsing_exception in Elasticsearch(Reason: No type specified for field [X])

I wanted to provide explicit mapping to the fields in my document, So I defined a mapping for my index demo and It looks like this below:
PUT /demo
{
"mappings": {
"properties": {
"X" : {
"X" : {
"type" : "text",
"fields" : {
"keyword" : {
"type" : "keyword",
"ignore_above" : 256
}
}
},
"Sub_X" : {
"type" : "text",
"fields" : {
"keyword" : {
"type" : "keyword",
"ignore_above" : 256
}
}
}
}
}
}
}
After running the query , I am getting error as :
{
"error" : {
"root_cause" : [
{
"type" : "mapper_parsing_exception",
"reason" : "No type specified for field [X]"
}
],
"type" : "mapper_parsing_exception",
"reason" : "Failed to parse mapping [_doc]: No type specified for field [X]",
"caused_by" : {
"type" : "mapper_parsing_exception",
"reason" : "No type specified for field [X]"
}
},
"status" : 400
}
The field X in json document looks like :
"X" : {
"X" : [
"a"
],
"Sub_X" : [
[
"b"
]
]
},
Please help me out with this elastic search mapper_parse_exception error.
What you have is called nested data type
You have X which in turn contains X and Sub_X.
Mapping:
{
"properties": {
"X": {
"type": "nested"
}
}
}
Data:
{
"X": {
"X": [
"a"
],
"Sub_X": [
[
"b"
]
]
}
}
Query:
{
"query": {
"nested": {
"path": "X",
"query": {
"bool": {
"must": [
{ "match": { "X.X": "a" }},
{ "match": { "X.Sub_X": "b" }}
]
}
}
}
}
}
It outputs the document.

How to load Nested json data into a single column in druid

I am trying to load nested json data in Apache druid:
Data-->
{
"a": "a_data",
"b": "b_data",
"c_blob_Column": {"aaaa"{"k":"sample"{"c":"sample2"}}}}
Spec -->
{ "type" : "kafka", "dataSchema" : { "dataSource" : "blob", "parser" : { "type" : "string", "parseSpec" : { "format" : "json", "dimensionsSpec" : { "dimensions" : [ "a", "b", "c_blob_Column"
]
},
"timestampSpec": {
"column": "timestamp",
"format": "iso"
}
}
},
"metricsSpec" : [],
"granularitySpec" : {
"type" : "uniform",
"segmentGranularity" : "DAY",
"queryGranularity" : "none",
"rollup" : false
}
},
"ioConfig" : {
"topic":"blob_topic",
"consumerProperties":{
"bootstrap.servers":"<local server>"
},
"appendToExisting" : false,
"useEarliestOffset": true,
"taskDuration": "PT15M"
},
"tuningConfig" : {
"type" : "kafka",
"maxRowsPerSegment" : 5000000,
"maxRowsInMemory" : 25000
}
}
Output columns-->
a,b,c_blob_Column,__time
I am able to load the data but the issue is in the column c_blob_Column the data is not coming as in json form data Could someone please help me to find how to load the json blob data?
you can use jq expression:
"flattenSpec": {
"fields": [
{
"type": "jq",
"name": "c_blob_Column",
"expr": ".c_blob_Column | tojson"
}
]
}

Elasticsearch Not Returning Document By Field Name

Elasticsearch newb here. I seem to be having an issue selecting documents by a certain field. It feels like a corrupt index to me, but I'm not sure.
Here is a document that I can retrieve, and get the fields event.type and event.accountId:
$ curl -XGET 'http://127.0.0.1:9200/events-2015.04.08/event/AUyYpkl-r99VdGrSLpIX?pretty=1&fields=event.type,event.accountId'
{
"_index" : "events-2015.04.08",
"_type" : "event",
"_id" : "AUyYpkl-r99VdGrSLpIX",
"_version" : 1,
"found" : true,
"fields" : {
"event.type" : [ "USER_LOGIN" ],
"event.accountId" : [ 10399 ]
}
}
Notice the event.type: USER_LOGIN. Now I want to find all documents that have this field/value combination:
curl -XGET 'http://127.0.0.1:9200/events-2015.04.08/_search?q=event.type:USER_LOGIN&pretty=1'
{
"took" : 2,
"timed_out" : false,
"_shards" : {
"total" : 5,
"successful" : 5,
"failed" : 0
},
"hits" : {
"total" : 0,
"max_score" : null,
"hits" : [ ]
}
}
No results. I can find the document by event.accountId though:
$ curl -XGET 'http://127.0.0.1:9200/events-2015.04.08/_search?q=event.accountId:10399&pretty=1'
{
"took" : 2,
"timed_out" : false,
"_shards" : {
"total" : 5,
"successful" : 5,
"failed" : 0
},
"hits" : {
"total" : 2,
"max_score" : 1.0,
"hits" : [ {
"_index" : "events-2015.04.08",
"_type" : "event",
"_id" : "AUyYpkjCr99VdGrSLpIW",
"_score" : 1.0,
"_source": {...}
}, {
"_index" : "events-2015.04.08",
"_type" : "event",
"_id" : "AUyYpkl-r99VdGrSLpIX", # <-- This is the doc I want
"_score" : 1.0,
"_source": {...}
} ]
}
}
So is this field corrupt or something? How do I check? I expect to be able to find this document by event.type.
UPDATE
The document is being indexed with the SQS plugin to Logstash. Here is the relevant part of logstash.conf:
input {
sqs {
queue => "the_queue"
region => "us-west-2"
type => "event"
}
}
filter {
json {
source => "Message"
target => "event"
remove_field => [ "Message" ]
}
mutate {
rename => { "Type" => "EventType" }
}
date {
match => [ "Timestamp", "ISO8601" ]
}
}

How can I access protected S3 files in a CFN script?

I am trying to retrieve a file in my cloudformation script. If I make the file publicly available, then it works fine. If the file is private, then the cfn script fails, but with a 404 error in /var/log/. Trying to retrieve the file via wget results in the appropriate 403 error.
How can I retrieve private files from S3?
My file clause looks like:
"files" : {
"/etc/httpd/conf/httpd.conf" : {
"source" : "https://s3.amazonaws.com/myConfigBucket/httpd.conf"
}
},
I added an authentication clause and appropriate parameter:
"Parameters" : {
"BucketRole" : {
"Description" : "S3 role for access to bucket",
"Type" : "String",
"Default" : "S3Access",
"ConstraintDescription" : "Must be a valid IAM Role"
}
}
"AWS::CloudFormation::Authentication": {
"default" : {
"type": "s3",
"buckets": [ "myConfigBucket" ],
"roleName": { "Ref" : "BucketRole" }
}
},
My IAM Role looks like:
{
"Statement": [
{
"Effect": "Allow",
"Action": [
"s3:Get*",
"s3:List*"
],
"Resource": "*"
}
]
}
The solution is to add an IamInstanceProfile property to the instance creation:
"Parameters" : {
...
"RoleName" : {
"Description" : "IAM Role for access to S3",
"Type" : "String",
"Default" : "DefaultRoleName",
"ConstraintDescription" : "Must be a valid IAM Role"
}
},
"Resources" : {
"InstanceName" : {
"Type" : "AWS::EC2::Instance",
"Properties" : {
"ImageId" : { "Fn::FindInMap" : [ "RegionMap", { "Ref" : "AWS::Region" }, "64"] },
"InstanceType" : { "Ref" : "InstanceType" },
"SecurityGroups" : [ {"Ref" : "SecurityGroup"} ],
"IamInstanceProfile" : { "Ref" : "RoleName" },
"KeyName" : { "Ref" : "KeyName" }
}
},
...