How to search Host Groups in Zabbix API - api

I want to list all Host Groups that match some search criteria.
I've tried that:
data = '{"jsonrpc": "2.0",
"method": "hostgroup.get",
"params": {
"output": "extend",
"search": {
"name": [
"' + group_name + '"
]
},
},
"id":' + str(msg_id) + ',
"auth": "' + auth + '"
}'
But that is not a correct syntax.
I also tried this:
data = '{"jsonrpc": "2.0",
"method": "hostgroup.get",
"params": {
"output": "extend",
"filter": {
"name": [
"' + group_name + '"
]
},
},
"id":' + str(msg_id) + ',
"auth": "' + auth + '"
}'
This one works, but it only matches exactly the group name. And, so, it always returns 1 or 0 matches.
I tried adding the "options":"searchWildcardsEnabled" option in this last query, but it didn't make a difference in the result (i.e. it didn't produce multiple groups as output).

I've found the correct way. I'll post it here in case anyone else needs it later.
data = '{"jsonrpc": "2.0",
"method": "hostgroup.get",
"params": {
"output": "extend",
"search": {
"name": [
"' + group_name + '"
]
}
},
"id":' + str(msg_id) + ',
"auth": "' + auth + '"
}'
You don't need to specify the wildcard, it's default. Also, you don't need to put the % inside your query.

Related

Create parent-children query from many-to-many relationship

Good day,
I struggle to create a query in Postgres to obtain a many-to-many relationship in parent-children form.
There are two tables:
'Projects'
projects table
'Services'
services table
They are connected with the third project_service table
project_service
The query SELECT projects.id_project, projects.title, projects.level, services.id_service, services.service FROM projects, services, project_service WHERE projects.id_project = project_service.id_project AND services.id_service = project_service.id_service return this table:
query result table
How to write a query to get this table in parent-children form like this:
[
{ "id": "1000", "level": "Projects", "title": "Project 1", "children": [
{ "id": "2000", "service": "Service 1},
{ "id": "2001", "service": "Service 2},
{ "id": "2002", "service": "Service 3},
{ "id": "2003", "service": "Service 4}
]},
{ "id": "1001", "level": "Projects", "title": "Project 2", "children": [
{ "id": "2004", "service": "Service 5}
]}]
Thank you for help
Is this what you mean?
with svc_json as (
select id_service, to_jsonb(services) as svc
from services
), together as (
select p.*, jsonb_agg(s.svc) as children
from project_service ps
join svc_json s on s.id_service = ps.id_service
join projects p on p.id_project = ps.id_project
group by p.id_project, p.title, p.level
)
select jsonb_pretty(jsonb_agg(to_jsonb(together)))
from together;
jsonb_pretty
-----------------------------------------
[ +
{ +
"level": "Projects", +
"title": "Project 1", +
"children": [ +
{ +
"service": "Service 1",+
"id_service": 2000 +
}, +
{ +
"service": "Service 2",+
"id_service": 2001 +
}, +
{ +
"service": "Service 3",+
"id_service": 2002 +
}, +
{ +
"service": "Service 4",+
"id_service": 2003 +
} +
], +
"id_project": 1000 +
}, +
{ +
"level": "Projects", +
"title": "Project 2", +
"children": [ +
{ +
"service": "Service 5",+
"id_service": 2004 +
} +
], +
"id_project": 1001 +
} +
]
(1 row)

Query the ouput and get latest file name

Below is the kusto query:
ADFActivityRun
| where PipelineName contains "MyPipeline"
| where OperationName == "Failed"
| order by TimeGenerated desc
| take 1
The Output column gives below result:
"{
""name"": ""varFileNames"",
""value"": [
{
""name"": ""Z400_EEE_20191110_ERR.txt"",
""type"": ""File""
},
{
""name"": ""Z400_CCC_20191119_ERR.txt"",
""type"": ""File""
},
{
""name"": ""Z400_DDD_20191121_ERR.txt"",
""type"": ""File""
},
{
""name"": ""Z400_EEE_20191122_ERR.txt"",
""type"": ""File""
},
{
""name"": ""Z400_AAA_20191202_ERR.txt"",
""type"": ""File""
}
]
}"
File names has yyyymmdd in it. I want to get only the text file name which is latest. In above case - Z400_AAA_20191202_ERR.txt
Intent is to send alert that - "The above error file is available, please check this file"
you could use mv-apply for achieving that.
for example:
print d = dynamic({
"name": "varFileNames",
"value": [
{
"name": "Z400_EEE_20191110_ERR.txt",
"type": "File"
},
{
"name": "Z400_CCC_20191119_ERR.txt",
"type": "File"
},
{
"name": "Z400_DDD_20191121_ERR.txt",
"type": "File"
},
{
"name": "Z400_EEE_20191122_ERR.txt",
"type": "File"
},
{
"name": "Z400_AAA_20191202_ERR.txt",
"type": "File"
}
]
})
| mv-apply value = d.value on (
parse value.name with * "_" * "_" dt:datetime "_" *
| top 1 by dt desc
| project name = value.name
)
| project name

SSMS Tabular Model: create multiple partition via one single xmla script

I need to create a considerable number of partitions using XMLA scripts run through SSMS.
Is there a way to combine the creation of multiple partitions into one single script.
In below example, I need to execute the first script. When it finishes, I can open the other script and execute it as well. Very time consuming.
How can I restructure the code to run it in only one execution?
Script 1:
"createOrReplace": {
"object": {
"database": "MYDB",
"table": "MYTABLE1", "partition": "Partition"
},
"partition": {
"name": "Process_OLD", "dataView": "full",
"source": {
"type": "m",
"expression": [
"let",
" Source = #\"mySQL/MY_SCHEMA\",", " MY_SCHEMA= Source{[Schema=\"MY_SCHEMA\"]}[Data],",
" AllData = MY_SCHEMA{[Name=\"MYTABLE1\"]}[Data],", "\t#\"Filtered Rows\" = Table.SelectRows(AllData, each [DATE] < 20170101)",
"in",
" #\"Filtered Rows\""
]
}
}
}
}
Script 2:
"createOrReplace": {
"object": {
"database": "MYDB",
"table": "MYTABLE2", "partition": "Partition"
},
"partition": {
"name": "Process_NEW", "dataView": "full",
"source": {
"type": "m",
"expression": [
"let",
" Source = #\"mySQL/MY_SCHEMA\",", " MY_SCHEMA= Source{[Schema=\"MY_SCHEMA\"]}[Data],",
" AllData = MY_SCHEMA{[Name=\"MYTABLE1\"]}[Data],", "\t#\"Filtered Rows\" = Table.SelectRows(AllData, each [DATE] >= 20170101)",
"in",
" #\"Filtered Rows\""
]
}
}
}
}
You can put a sequence command around it:
{
"sequence": {
"operations": [
{
"createOrReplace": {
"object": {
"database": "MYDB",
"table": "MYTABLE1",
"partition": "Partition"
},
"partition": {
"name": "Process_OLD",
"dataView": "full",
"source": {
"type": "m",
"expression": [
"let",
" Source = #\"mySQL/MY_SCHEMA\",",
" MY_SCHEMA= Source{[Schema=\"MY_SCHEMA\"]}[Data],",
" AllData = MY_SCHEMA{[Name=\"MYTABLE1\"]}[Data],",
"\t#\"Filtered Rows\" = Table.SelectRows(AllData, each [DATE] < 20170101)",
"in",
" #\"Filtered Rows\""
]
}
}
}
},
{
"createOrReplace": {
"object": {
"database": "MYDB",
"table": "MYTABLE2",
"partition": "Partition"
},
"partition": {
"name": "Process_NEW",
"dataView": "full",
"source": {
"type": "m",
"expression": [
"let",
" Source = #\"mySQL/MY_SCHEMA\",",
" MY_SCHEMA= Source{[Schema=\"MY_SCHEMA\"]}[Data],",
" AllData = MY_SCHEMA{[Name=\"MYTABLE1\"]}[Data],",
"\t#\"Filtered Rows\" = Table.SelectRows(AllData, each [DATE] >= 20170101)",
"in",
" #\"Filtered Rows\""
]
}
}
}
}
]
}
}

Is there any way to insert below nested json data into sql server

I am working on a nested JSON data which I need to load into SQL SERVER 2012. The nested JSON contains two roots i.e. one column and another rows. I need to put value from row into the column. Please see the structure as below:
{
"tables": [
{
"name": "PrimaryResult",
"columns": [
{
"name": "timestamp",
"type": "datetime"
},
{
"name": "id",
"type": "string"
},
{
"name": "name",
"type": "string"
},
{
"name": "url",
"type": "string"
},
{
"name": "duration",
"type": "real"
}
],
"rows": [
[
"2019-04-08T13:09:52.871Z",
"244",
"Internal",
"https://google.com",
1245,
]
]
}
]
}
Result:
timestamp id name url duration
2019-04-08 244 Internal https://google.com 1245
Here , in sql server it should take column names from columns and value for each column from rows
assuming that you store the json into a file named json.txt
import json
with open('json.txt') as f:
data = json.load(f)
tableName = data['tables'][0]["name"]
sqlCreateTable = 'CREATE TABLE ' + tableName + ' (\n'
sqlInsertInto = 'INSERT INTO ' + tableName + ' ('
for i in range(0,len(data['tables'][0]['columns'])):
columnName = data['tables'][0]['columns'][i]['name']
type = data['tables'][0]['columns'][i]['type']
sqlCreateTable += columnName + " " + type + ',\n'
sqlInsertInto += columnName + ', '
sqlCreateTable = sqlCreateTable[:-2] + '\n);'
sqlInsertInto = sqlInsertInto[:-2] + ')\nVALUES ('
for value in data['tables'][0]['rows'][0]:
sqlInsertInto += str(value) + ', '
sqlInsertInto = sqlInsertInto[:-2] + ');'
print(sqlCreateTable)
print(sqlInsertInto)
Output for create table:
CREATE TABLE PrimaryResult (
timestamp datetime,
id string,
name string,
url string,
duration real
);
Output for insert into table:
INSERT INTO PrimaryResult (timestamp, id, name, url, duration)
VALUES (2019-04-08T13:09:52.871Z, 244, Internal, https://google.com, 1245);

Join Strings in Query output

I have this query that "i'd like to convert to MongoDB. Is it possible to do this without resorting to javascript?
SELECT FirstName, Lastname, FirstName + " " + LastName AS FullName FROM Users
Also, I am using the most recent version of MongoDB
The only things that can "alter" the form of a returned document are .aggregate() and .mapReduce(). In this case, .aggregate() is the better form with $project and the $concat operator:
db.users.aggregate([
{ "$project": {
"FirstName": 1,
"Lastname": 1,
"FullName": { "$concat": [ "$FirstName", " ", "$Lastname" ] }
}}
])