Create nested json in Snowflake - sql

I am trying to create a nested json in Snowflake and have narrowed down the query like below where I have nested it on id. However, I want the nested json to also apply to the inner layer and I am finding it hard to get the right query for it.
WITH subquery AS (
SELECT id, placeId, actionId, resultValue
FROM my_table
)
SELECT id,
'{"resultValues": {' || listagg('"' || placeId || '": {"' || actionId || '": ' || resultValue || '}', ',') within group (order by placeId) || '}}' as nested_json
FROM subquery
GROUP BY id;
Below is how the current result is looking like for each id.
I am trying to get the actionId1 and actionId2 grouped under the placeId1 and placeId2 so that it looks like below. How do I get this done? Any ideas would be appreciated.

Meet FLATTEN() and LATERAL they like to hang out with OBJECT_AGG() who needs his own space via CTE's.
WITH CTE AS (
SELECT
parse_json(
' { "resultValues": [
{ "placeId1": { "actionId1": 1.1 } }, { "placeId1": { "actionId2": 1.2 } },
{ "placeId2": { "actionId1": 1.3 } }, { "placeId2":{ "actionId2": 1.4} } ] }'
) VOLIA
),
CTE2 AS (
SELECT
DISTINCT KIAORA.PATH KIAORA,
TE_REO.PATH TE_REO,
OBJECT_AGG(MAORI.PATH, MAORI.VALUE) OVER (PARTITION BY TE_REO.PATH) MAORI
FROM
CTE,
LATERAL FLATTEN(INPUT => VOLIA) KIAORA,
LATERAL FLATTEN(KIAORA.VALUE) HELLO,
LATERAL FLATTEN(HELLO.VALUE) TE_REO,
LATERAL FLATTEN (INPUT => TE_REO.VALUE) MAORI
)
SELECT
DISTINCT OBJECT_CONSTRUCT(
KIAORA,
ARRAY_CONSTRUCT(
OBJECT_AGG(TE_REO, MAORI) OVER (PARTITION BY KIAORA)
)
) ANSWER,
VOLIA
FROM
CTE2, CTE
after starting :

Related

Snowflake get_path() or flatten() array query - to find latest key:value

I have a column 'amp' in a table 'EXAMPLE'. Column 'amp' is an array which looks like this:
[{
"list": [{
"element": {
"x_id": "12356789XXX",
"y_id": "12356789XXX38998",
}
},
{
"element": {
"x_id": "5677888356789XXX",
"y_id": "1XXX387688",
}
}]
}]
How should I query using get_path() or flatten() to extract the latest x_id and y_id value (or other alternative)
In this example it is only 2 elements, but there could 1 to 6000 elements containing x_id and y_id.
Help much appreciated!
Someone may have a more elegant way than this, but you can use a CTE. In the first table expression, grab the max of the array. In the second part, grab the values you need.
set json = '[{"list": [{"element": {"x_id": "12356789XXX","y_id": "12356789XXX38998"}},{"element": {"x_id": "5677888356789XXX","y_id": "1XXX387688",}}]}]';
create temp table foo(v variant);
insert into foo select parse_json($json);
with
MAX_INDEX(M) as
(
select max("INDEX") MAX_INDEX
from foo, lateral flatten(v, recursive => true)
),
VALS(V, P, K) as
(
select "VALUE", "PATH", "KEY"
from foo, lateral flatten(v, recursive => true)
)
select k as "KEY", V::string as VALUE from vals, max_index
where VALS.P = '[0].list[' || max_index.m || '].element.x_id' or
VALS.P = '[0].list[' || max_index.m || '].element.y_id'
;
Assuming that the outer array ALWAYS contains a single dictionary element, you could use this:
SELECT amp[0]:"list"[ARRAY_SIZE(amp[0]:"list")-1]:"element":"x_id"::VARCHAR AS x_id
,amp[0]:"list"[ARRAY_SIZE(amp[0]:"list")-1]:"element":"y_id"::VARCHAR AS y_id
FROM T
;
Or if you prefer a bit more modularity/readability, you could use this:
WITH CTE1 AS (
SELECT amp[0]:"list" AS _ARRAY
FROM T
)
,CTE2 AS (
SELECT _ARRAY[ARRAY_SIZE(_ARRAY)-1]:"element" AS _DICT
FROM CTE1
)
SELECT _DICT:"x_id"::VARCHAR AS x_id
,_DICT:"y_id"::VARCHAR AS y_id
FROM CTE2
;
Note: I have not used FLATTEN here because I did not see a good reason to use it.

BigQuery JSON EXTRACT

[
{
"key":"expiry_date",
"type":"date",
"label":"Expiry Date",
"required":false,
"default_value":"2029-12-15"
},
{
"key":"brand",
"type":"text",
"label":"Brand",
"required":false,
"default_value":"clipsal"
}
]
Is there a way that I could extract the default_value of "expiry_date" in the nested JSON above? The data is under a column called attributes.
Have you tried any of these functions described here? Maybe it can help.
Also, if the first element of the json array will be always what you want, you could use something like:
WITH test_table AS (
SELECT "[{\"key\":\"expiry_date\",\"type\":\"date\",\"label\":\"Expiry Date\",\"required\":false,\"default_value\":\"2029-12-15\"},{\"key\":\"brand\",\"type\":\"text\",\"label\":\"Brand\",\"required\":false,\"default_value\":\"clipsal\"}]" AS json_text_field
)
SELECT JSON_EXTRACT(json_text_field, '$[0].default_value') FROM test_table
If the keys it's not always the first, you could use this instead:
WITH test_table AS (
SELECT "[{\"key\":\"expiry_date\",\"type\":\"date\",\"label\":\"Expiry Date\",\"required\":false,\"default_value\":\"2029-12-15\"},{\"key\":\"brand\",\"type\":\"text\",\"label\":\"Brand\",\"required\":false,\"default_value\":\"clipsal\"}]" AS json_text_field
)
SELECT value FROM (
SELECT JSON_EXTRACT(json_text_field, '$.key') AS id, JSON_EXTRACT(json_text_field, '$.default_value') AS value FROM test_table, UNNEST(JSON_EXTRACT_ARRAY(json_text_field, '$')) AS json_value
) WHERE id = '"expiry_date"'
Below is for BigQuery Standard SQL
#standardSQL
SELECT JSON_EXTRACT_SCALAR(el, '$.default_value') AS default_value
FROM `project.dataset.table`,
UNNEST(JSON_EXTRACT_ARRAY(json)) el
WHERE JSON_EXTRACT_SCALAR(el, '$.key') = 'expiry_date'
You can test above with sample / dummy data from y our question as in below example
#standardSQL
WITH `project.dataset.table` AS (
SELECT '''
[
{
"key":"expiry_date",
"type":"date",
"label":"Expiry Date",
"required":false,
"default_value":"2029-12-15"
},
{
"key":"brand",
"type":"text",
"label":"Brand",
"required":false,
"default_value":"clipsal"
}
]
''' json
)
SELECT JSON_EXTRACT_SCALAR(el, '$.default_value') AS default_value
FROM `project.dataset.table`,
UNNEST(JSON_EXTRACT_ARRAY(json)) el
WHERE JSON_EXTRACT_SCALAR(el, '$.key') = 'expiry_date'
with output
Row default_value
1 2029-12-15
Depends on your real use case - you can consider below variation
#standardSQL
SELECT *,
(
SELECT JSON_EXTRACT_SCALAR(el, '$.default_value')
FROM UNNEST(JSON_EXTRACT_ARRAY(json)) el
WHERE JSON_EXTRACT_SCALAR(el, '$.key') = 'expiry_date'
) AS default_value
FROM `project.dataset.table`

sql query to select multiple items in sorted order

I am writing a post api in c# to select some values in Azure Cosmos db and is using direct sql queries.
The aim to get the highest value against each id from the request.
request body:
[
{
"userid":"1"
},
{
"userid":"4"
}
]
Db looks like:
{
"userid":"1",
"value":"10",
"Date":"10-9-19"
}
{
"userid":"1",
"value":"20",
"Date":"11-8-19"
}
{
"userid":"4",
"value":"30",
"Date":"10-9-19"
}
{
"userid":"4",
"value":"40",
"Date":"11-9-19"
}
Expected output:
[
{
"userid":"4",
"value":"40",
"Date":"11-9-19"
},
{
"userid":"1",
"value":"20",
"Date":"11-8-19"
}
]
I tried to get the id's into an array then used 'IN' operator, but it would be helpful and appreciated is there more simple query would help.
try the following to get the results.
As per your data, this will work.
SELECT userid,
MAX(value) value,
MAX(Date) Date
FROM YourTable
GROUP BY userid
ORDER BY userid
If you want related date for the MAX(Value), then try this.
SELECT Y.userid, Y.Value, Y.Date
FROM YourTable Y
JOIN
(
SELECT userid,
MAX(value) value
FROM YourTable
GROUP BY userid
)D ON D.userid = Y.userid AND D.value = Y.value

MDX query to join two different dimension members

I have below select statement which I want to convert into cube members and calculate the sum.
SELECT NON EMPTY { [Measures].[volume] } ON COLUMNS, NON EMPTY { ([par_Account].[Account].[Account].ALLMEMBERS * [Product].[Hierarchy].[Local product var].ALLMEMBERS ) } ON ROWS FROM ( SELECT ( { [Product].[Hierarchy].[local product Group].&[-33554010354150679].&[-952789350662854159].&[8639428195894987853] } ) ON COLUMNS FROM ( SELECT ( { [Time].[Financial Period].&[-8540082585673218205] } ) ON COLUMNS FROM ( SELECT ( { [Market].[Market].&[-3381499019102906042] } ) ON COLUMNS FROM [cube]))) WHERE ( [Market].[Market].&[-3381499019102906042], [Time].[Financial Period].&[-8540082585673218205] )
Please help me with this.
I guess this is what you are looking for. However its always nice to explain what your problem is, or perhaps provide screenshot.
SELECT
NON EMPTY { [Measures].[volume] } ON COLUMNS,
NON EMPTY {
(
[Market].[Market].&[-3381499019102906042] *
[Time].[Financial Period].&[-8540082585673218205] *
{ [Product].[Hierarchy].[local product Group].&[-33554010354150679].&[-952789350662854159].&[8639428195894987853] } *
[par_Account].[Account].[Account].ALLMEMBERS *
[Product].[Hierarchy].[Local product var].ALLMEMBERS
)
} ON ROWS
from [cube]

Sorting results in recursive query

I have a basic CATEGORIES-like table consisting of fields like the primary_key, a parent_id, title and a sorting integer.
I am able to retrieve the results using CTE and convert them to a json array but I want to fetch them, aside from the parent_id, based on the sorting value.
So far:
with recursive parents as
(
select n.boat_type_id, n.title, '{}'::int[] as parents, 0 as level
from boat_types n
where n.parent_id is NULL
union all
select n.boat_type_id, n.title, parents || n.parent_id, level+1
from parents p
join boat_types n on n.parent_id = p.boat_type_id
where not n.boat_type_id = any(parents)
),
children as
(
select n.parent_id, json_agg(jsonb_build_object('title', n.title->>'en'))::jsonb as js
from parents tree
join boat_types n using(boat_type_id)
where level > 0 and not boat_type_id = any(parents)
group by n.parent_id
union all
select n.parent_id, jsonb_build_object('category', n.title->>'en') || jsonb_build_object('subcategories', js) as js
from children tree
join boat_types n on n.boat_type_id = tree.parent_id
)
select jsonb_agg(js) as categories
from children
where parent_id is null
The above provides me with the result-set and the structure I want, but how can I make them follow the sorting value for both the nodes and the leafs.
Sample response:
[
{
"sorting":0,
"category":"Motor",
"subcategories":[
{
"title":"Motor Yacht",
"sorting":2
},
{
"title":"Mega Yacht",
"sorting":1
}
]
},
{
"sorting":1,
"category":"Sailing",
"subcategories":[
{
"title":"Sailing Yacht",
"sorting":2
},
{
"title":"Cruiser Racer",
"sorting":1
}
]
},
{
"sorting":2,
"category":"Catamaran",
"subcategories":[
{
"title":"Catamaran",
"sorting":2
},
{
"title":"Trimaran",
"sorting":1
}
]
},
{
"sorting":3,
"category":"Other",
"subcategories":[
{
"title":"Other",
"sorting":2
},
{
"title":"Airboat",
"sorting":1
}
]
}
]
I have tried aggregating the sorting values in ARRAY fields and sort by it, but it doesn't work.
You can use the order by clause in the json_agg() aggregate:
...
children as
(
select
n.parent_id,
json_agg(jsonb_build_object('title', n.title->>'en', 'sorting', n.sorting) order by n.sorting)::jsonb as js
from parents tree
join boat_types n using(boat_type_id)
where level > 0 and not boat_type_id = any(parents)
group by n.parent_id
union all
select
n.parent_id,
jsonb_build_object('category', n.title->>'en', 'sorting', n.sorting) || jsonb_build_object('subcategories', js) as js
from children tree
join boat_types n on n.boat_type_id = tree.parent_id
)
...