[
{
"key":"expiry_date",
"type":"date",
"label":"Expiry Date",
"required":false,
"default_value":"2029-12-15"
},
{
"key":"brand",
"type":"text",
"label":"Brand",
"required":false,
"default_value":"clipsal"
}
]
Is there a way that I could extract the default_value of "expiry_date" in the nested JSON above? The data is under a column called attributes.
Have you tried any of these functions described here? Maybe it can help.
Also, if the first element of the json array will be always what you want, you could use something like:
WITH test_table AS (
SELECT "[{\"key\":\"expiry_date\",\"type\":\"date\",\"label\":\"Expiry Date\",\"required\":false,\"default_value\":\"2029-12-15\"},{\"key\":\"brand\",\"type\":\"text\",\"label\":\"Brand\",\"required\":false,\"default_value\":\"clipsal\"}]" AS json_text_field
)
SELECT JSON_EXTRACT(json_text_field, '$[0].default_value') FROM test_table
If the keys it's not always the first, you could use this instead:
WITH test_table AS (
SELECT "[{\"key\":\"expiry_date\",\"type\":\"date\",\"label\":\"Expiry Date\",\"required\":false,\"default_value\":\"2029-12-15\"},{\"key\":\"brand\",\"type\":\"text\",\"label\":\"Brand\",\"required\":false,\"default_value\":\"clipsal\"}]" AS json_text_field
)
SELECT value FROM (
SELECT JSON_EXTRACT(json_text_field, '$.key') AS id, JSON_EXTRACT(json_text_field, '$.default_value') AS value FROM test_table, UNNEST(JSON_EXTRACT_ARRAY(json_text_field, '$')) AS json_value
) WHERE id = '"expiry_date"'
Below is for BigQuery Standard SQL
#standardSQL
SELECT JSON_EXTRACT_SCALAR(el, '$.default_value') AS default_value
FROM `project.dataset.table`,
UNNEST(JSON_EXTRACT_ARRAY(json)) el
WHERE JSON_EXTRACT_SCALAR(el, '$.key') = 'expiry_date'
You can test above with sample / dummy data from y our question as in below example
#standardSQL
WITH `project.dataset.table` AS (
SELECT '''
[
{
"key":"expiry_date",
"type":"date",
"label":"Expiry Date",
"required":false,
"default_value":"2029-12-15"
},
{
"key":"brand",
"type":"text",
"label":"Brand",
"required":false,
"default_value":"clipsal"
}
]
''' json
)
SELECT JSON_EXTRACT_SCALAR(el, '$.default_value') AS default_value
FROM `project.dataset.table`,
UNNEST(JSON_EXTRACT_ARRAY(json)) el
WHERE JSON_EXTRACT_SCALAR(el, '$.key') = 'expiry_date'
with output
Row default_value
1 2029-12-15
Depends on your real use case - you can consider below variation
#standardSQL
SELECT *,
(
SELECT JSON_EXTRACT_SCALAR(el, '$.default_value')
FROM UNNEST(JSON_EXTRACT_ARRAY(json)) el
WHERE JSON_EXTRACT_SCALAR(el, '$.key') = 'expiry_date'
) AS default_value
FROM `project.dataset.table`
Related
I am trying to create a nested json in Snowflake and have narrowed down the query like below where I have nested it on id. However, I want the nested json to also apply to the inner layer and I am finding it hard to get the right query for it.
WITH subquery AS (
SELECT id, placeId, actionId, resultValue
FROM my_table
)
SELECT id,
'{"resultValues": {' || listagg('"' || placeId || '": {"' || actionId || '": ' || resultValue || '}', ',') within group (order by placeId) || '}}' as nested_json
FROM subquery
GROUP BY id;
Below is how the current result is looking like for each id.
I am trying to get the actionId1 and actionId2 grouped under the placeId1 and placeId2 so that it looks like below. How do I get this done? Any ideas would be appreciated.
Meet FLATTEN() and LATERAL they like to hang out with OBJECT_AGG() who needs his own space via CTE's.
WITH CTE AS (
SELECT
parse_json(
' { "resultValues": [
{ "placeId1": { "actionId1": 1.1 } }, { "placeId1": { "actionId2": 1.2 } },
{ "placeId2": { "actionId1": 1.3 } }, { "placeId2":{ "actionId2": 1.4} } ] }'
) VOLIA
),
CTE2 AS (
SELECT
DISTINCT KIAORA.PATH KIAORA,
TE_REO.PATH TE_REO,
OBJECT_AGG(MAORI.PATH, MAORI.VALUE) OVER (PARTITION BY TE_REO.PATH) MAORI
FROM
CTE,
LATERAL FLATTEN(INPUT => VOLIA) KIAORA,
LATERAL FLATTEN(KIAORA.VALUE) HELLO,
LATERAL FLATTEN(HELLO.VALUE) TE_REO,
LATERAL FLATTEN (INPUT => TE_REO.VALUE) MAORI
)
SELECT
DISTINCT OBJECT_CONSTRUCT(
KIAORA,
ARRAY_CONSTRUCT(
OBJECT_AGG(TE_REO, MAORI) OVER (PARTITION BY KIAORA)
)
) ANSWER,
VOLIA
FROM
CTE2, CTE
after starting :
I created this simple table in SQL:
create table testTable (
date date not null,
leader varchar(20),
name varchar(20)
)
insert into testTable
values
('2021-01-01', 'KIM', 'Anders'),
('2021-01-02', 'KIM', 'Annika'),
('2021-01-03', 'KIM', 'Anna'),
('2021-01-04', 'KIM', 'Anna'),
('2021-01-03', 'KIM', 'Annika'),
('2021-01-01', 'JOHAN', 'Sara'),
('2021-01-02', 'JOHAN', 'Sara'),
('2021-01-03', 'JOHAN', 'Sara')
I am trying to get an ekvivalent solution to the following code in a dax measure if possible
select max(leader), name, count(name)
from testTable
group by name
having count(name) >= 2
The result that im looking for is.
Leader
Measure
KIM
2
JOHAN
1
Think about HAVING as a filter that happens after a grouping. So something like
Measure = COUNTROWS(filter(SUMMARIZECOLUMNS('Table'[Name],"Count",count('Table'[Name])), [Count]>=2))
And here's a simple way to present test data for DAX questions, entirely in DAX:
testTable = SELECTCOLUMNS
(
{
(date(2021,01,01),"KIM","Anders")
,(date(2021,01,02),"KIM","Annika")
,(date(2021,01,03),"KIM","Anna")
,(date(2021,01,04),"KIM","Anna")
,(date(2021,01,03),"KIM","Annika")
,(date(2021,01,01),"JOHAN","Sara")
,(date(2021,01,02),"JOHAN","Sara")
,(date(2021,01,03),"JOHAN","Sara")
}, "date", [Value1]
, "leader", [Value2]
, "name", [Value3]
)
This is much easier way to reproduce a scenario than creating a table in SQL Server, and loading it through Power Query, or using the "Enter Data" form in PowerBI which creates the table in Power Query.
Edit: after adding the desired result to the question, the answer changes like follows
A possible solution is to implement a measure that counts the number of names that appear more than once for the selected leader
# Names ge 2 =
COUNTROWS (
FILTER (
VALUES ( Test[name] ),
CALCULATE ( COUNTROWS ( Test ), ALLEXCEPT ( Test, Test[name], Test[leader] ) ) > 1
)
)
here is a working example on dax.do
DEFINE
TABLE Test =
DATATABLE (
"date", DATETIME,
"leader", STRING,
"name", STRING,
{
{ "2021-01-01", "KIM", "Anders" },
{ "2021-01-02", "KIM", "Annika" },
{ "2021-01-03", "KIM", "Anna" },
{ "2021-01-04", "KIM", "Anna" },
{ "2021-01-03", "KIM", "Annika" },
{ "2021-01-01", "JOHAN", "Sara" },
{ "2021-01-02", "JOHAN", "Sara" },
{ "2021-01-03", "JOHAN", "Sara" }
}
)
MEASURE Test[# Names ge 2] =
COUNTROWS (
FILTER (
VALUES ( Test[name] ),
CALCULATE ( COUNTROWS ( Test ), ALLEXCEPT ( Test, Test[name], Test[leader] ) ) > 1
)
)
EVALUATE
SUMMARIZECOLUMNS (
Test[leader],
"# Names ge 2", [# Names ge 2]
)
and the resulting output
I've left the measure of my previous answer on the original dax.do, that returned this output
I have a column 'amp' in a table 'EXAMPLE'. Column 'amp' is an array which looks like this:
[{
"list": [{
"element": {
"x_id": "12356789XXX",
"y_id": "12356789XXX38998",
}
},
{
"element": {
"x_id": "5677888356789XXX",
"y_id": "1XXX387688",
}
}]
}]
How should I query using get_path() or flatten() to extract the latest x_id and y_id value (or other alternative)
In this example it is only 2 elements, but there could 1 to 6000 elements containing x_id and y_id.
Help much appreciated!
Someone may have a more elegant way than this, but you can use a CTE. In the first table expression, grab the max of the array. In the second part, grab the values you need.
set json = '[{"list": [{"element": {"x_id": "12356789XXX","y_id": "12356789XXX38998"}},{"element": {"x_id": "5677888356789XXX","y_id": "1XXX387688",}}]}]';
create temp table foo(v variant);
insert into foo select parse_json($json);
with
MAX_INDEX(M) as
(
select max("INDEX") MAX_INDEX
from foo, lateral flatten(v, recursive => true)
),
VALS(V, P, K) as
(
select "VALUE", "PATH", "KEY"
from foo, lateral flatten(v, recursive => true)
)
select k as "KEY", V::string as VALUE from vals, max_index
where VALS.P = '[0].list[' || max_index.m || '].element.x_id' or
VALS.P = '[0].list[' || max_index.m || '].element.y_id'
;
Assuming that the outer array ALWAYS contains a single dictionary element, you could use this:
SELECT amp[0]:"list"[ARRAY_SIZE(amp[0]:"list")-1]:"element":"x_id"::VARCHAR AS x_id
,amp[0]:"list"[ARRAY_SIZE(amp[0]:"list")-1]:"element":"y_id"::VARCHAR AS y_id
FROM T
;
Or if you prefer a bit more modularity/readability, you could use this:
WITH CTE1 AS (
SELECT amp[0]:"list" AS _ARRAY
FROM T
)
,CTE2 AS (
SELECT _ARRAY[ARRAY_SIZE(_ARRAY)-1]:"element" AS _DICT
FROM CTE1
)
SELECT _DICT:"x_id"::VARCHAR AS x_id
,_DICT:"y_id"::VARCHAR AS y_id
FROM CTE2
;
Note: I have not used FLATTEN here because I did not see a good reason to use it.
below two queries are returning a separate JSON when I merge the data. it's showing in invalid data.
SELECT JSON_OBJECT('Agriculture_Expenses' value
JSON_ARRAYAGG(JSON_OBJECT('Acreage12' value acreage,
'Farmer_projected' value
sellingprice,
'Projected_Expense' value attr1,
'Pattern1' value crop
)
-- JSON_OBJECT('ITEM' VALUE .50)
))
INTO AFL_JSON
FROM AGRI_INCOME
WHERE ATTR2 = 'AGRICULTURE_INCOME';
SELECT JSON_OBJECT('Cash_Flow_Form' value
JSON_ARRAYAGG(JSON_OBJECT('Agri_Coapp1' value rec.Coapp1,
'Agri_Coapp2' value rec.Coapp2,
'Agri_Coapp3' value rec.Coapp3,
'Lease_Land' value
rec.lease_land,
'Total_Land' value '',
'Land_under_cultivation1' value
rec.Land_under_cultivation,
'App_agr_lan' value
rec.Land_Holding)
))
INTO AFL_JSON
FROM dual;
DBMS_OUTPUT.PUT_LINE(AFL_JSON_DATA);
below two query is returning a data as below format extra { is coming when i merge two json object how can i get the data in below format.
{
"Cash_Flow_Form": [
{
"Agri_Coapp1": "1",
"Agri_Coapp2": "2",
"Agri_Coapp3": null,
"Lease_Land": "2",
"Total_Land": null,
"Land_under_cultivation1": "4",
"App_agr_lan": "5"
}
],
"Agriculture_Expenses": [
{
"Acreage12": "2",
"Farmer_projected": "2500",
"Projected_Expense": "81400",
"Pattern1": "Khariff"
}
]
}
You can use JSON_MERGEPATCH() function within a SELECT statement CROSS JOINing those JSON_OBJECTs
WITH AGRI_INCOME_JS AS
(
SELECT JSON_OBJECT( 'Agriculture_Expenses' value
JSON_ARRAYAGG(JSON_OBJECT ('Acreage12' value accreage,
'Farmer_projected' value sellingprice,
'Projected_Expense' value attr1,
'Pattern1' value crop
)
)
) AS JS2
FROM AGRI_INCOME
WHERE ATTR2 = 'AGRICULTURE_INCOME'
), CASH_FLOW_JS AS
(
SELECT JSON_OBJECT('Cash_Flow_Form' value
JSON_ARRAYAGG(JSON_OBJECT('Agri_Coapp1' value coapp1,
'Agri_Coapp2' value coapp2,
'Agri_Coapp3' value coapp3,
'Lease_Land' value lease_land,
'Total_Land' value Total_Land,
'Land_under_cultivation1' value Land_under_cultivation,
'App_agr_lan' value Land_Holding
)
)
) AS JS1
FROM CASH_FLOW
)
SELECT JSON_MERGEPATCH(JS1,JS2) AS "Result JSON"
FROM CASH_FLOW_JS
CROSS JOIN AGRI_INCOME_JS
The above query works for 18+, If your DB version is 12c, then use :
WITH AGRI_INCOME_JS AS
(
SELECT JSON_OBJECT( 'Agriculture_Expenses' value
JSON_ARRAYAGG(JSON_OBJECT ('Acreage12' value accreage,
'Farmer_projected' value sellingprice,
'Projected_Expense' value attr1,
'Pattern1' value crop
)
)
) AS JS2
FROM AGRI_INCOME
WHERE ATTR2 = 'AGRICULTURE_INCOME'
), CASH_FLOW_JS AS
(
SELECT JSON_OBJECT('Cash_Flow_Form' value
JSON_ARRAYAGG(JSON_OBJECT('Agri_Coapp1' value coapp1,
'Agri_Coapp2' value coapp2,
'Agri_Coapp3' value coapp3,
'Lease_Land' value lease_land,
'Total_Land' value Total_Land,
'Land_under_cultivation1' value Land_under_cultivation,
'App_agr_lan' value Land_Holding
)
)
) AS JS1
FROM CASH_FLOW
), JS(JS) AS
( SELECT JS1 FROM CASH_FLOW_JS
UNION ALL
SELECT JS2 FROM AGRI_INCOME_JS)
SELECT REPLACE(REPLACE(REPLACE(REPLACE(JSON_ARRAYAGG(JS),'\'),'["'),'"]'),'}","{',',')
AS "Result JSON"
FROM JS
Demo
I am writing a post api in c# to select some values in Azure Cosmos db and is using direct sql queries.
The aim to get the highest value against each id from the request.
request body:
[
{
"userid":"1"
},
{
"userid":"4"
}
]
Db looks like:
{
"userid":"1",
"value":"10",
"Date":"10-9-19"
}
{
"userid":"1",
"value":"20",
"Date":"11-8-19"
}
{
"userid":"4",
"value":"30",
"Date":"10-9-19"
}
{
"userid":"4",
"value":"40",
"Date":"11-9-19"
}
Expected output:
[
{
"userid":"4",
"value":"40",
"Date":"11-9-19"
},
{
"userid":"1",
"value":"20",
"Date":"11-8-19"
}
]
I tried to get the id's into an array then used 'IN' operator, but it would be helpful and appreciated is there more simple query would help.
try the following to get the results.
As per your data, this will work.
SELECT userid,
MAX(value) value,
MAX(Date) Date
FROM YourTable
GROUP BY userid
ORDER BY userid
If you want related date for the MAX(Value), then try this.
SELECT Y.userid, Y.Value, Y.Date
FROM YourTable Y
JOIN
(
SELECT userid,
MAX(value) value
FROM YourTable
GROUP BY userid
)D ON D.userid = Y.userid AND D.value = Y.value