How to set more than a filter over JSON data using JSON_VALUE? - sql

I'm just trying to set a query to get data from a collection of object JSON:
create table test (LINE_SPECS nvarchar(max));
insert into test values (N'
{
"lineName":"GHjr",
"pipeDiameter":"12",
"pipeLength":"52000",
"pressure":"15",
"volume":"107"
},
{
"lineName":"Ks3R",
"pipeDiameter":"9",
"pipeLength":"40000",
"pressure":"15",
"volume":"80"
}
');
Now, as getting lineName of the first object ( lineName : Ghjr) is a success
select
JSON_VALUE(LINE_SPECS, '$.lineName') as line_name
, JSON_VALUE(LINE_SPECS, '$.pipeDiameter') as diameter
from test
WHERE JSON_VALUE(LINE_SPECS, '$.lineName') = 'GHjr'
;
that is not possible when I try to get the second that is "Ks3R" :
select
JSON_VALUE(LINE_SPECS, '$.lineName') as line_name
, JSON_VALUE(LINE_SPECS, '$.pipeDiameter') as diameter
from test
WHERE JSON_VALUE(LINE_SPECS, '$.lineName') = 'Ks3R'
How can I do that ?
Thanks.

First your JSON data isn't valid, it might be an array.
look like this.
create table test (LINE_SPECS nvarchar(max));
insert into test values (N'
[
{
"lineName":"GHjr",
"pipeDiameter":"12",
"pipeLength":"52000",
"pressure":"15",
"volume":"107"
},
{
"lineName":"Ks3R",
"pipeDiameter":"9",
"pipeLength":"40000",
"pressure":"15",
"volume":"80"
}
]');
You can try to use OPENJSON with CROSS APPLY to parse JSON and make it.
select
t2.*
from test t1
CROSS APPLY
OPENJSON(t1.LINE_SPECS)
WITH
(
line_name varchar(MAX) N'$.lineName',
diameter varchar(MAX) N'$.pipeDiameter'
) AS t2
WHERE line_name = 'Ks3R'
sqlfiddle

Related

Query key values in a json column

I have a table "jobs" with one of the columns called "check_list" ( varchar(max) that has JSON values, an example value would be
{
"items":[
{
"name":"machine 1",
"state":"",
"comment":"",
"isReleaseToProductionCheck":true,
"mnachine_id":10
},
{
"name":"machine 2",
"state":"",
"comment":"",
"isReleaseToProductionCheck":true,
"machine_id":12
}
]
}
Now how would I write a SQL query to only return the rows where the column "check_list" has items[machine_id] = 12
In the end after some trial and error this was the solution that worked for me. I had to add the ISJSON check because some of the older data was invalid
WITH jobs (id, workorder, selectedMachine) AS(
SELECT
[id],
[workorder],
(
select
*
from
openjson(check_list, '$.items') with (machine_id int '$.machine_id')
where
machine_id = 12
) as selectedMachine
FROM
engineering_job_schedule
WHERE
ISJSON(check_list) > 0
)
Select
*
from
jobs
where
selectedMachine = 12

Migrating data from jsonb to integer[] SQL

I have jsonb field(data) in Postgresql with a structure like:
{ "id" => { "some_key" => [1, 2, 3] } }
I need to migrate the value to a different field.
t.jsonb "data"
t.integer "portals", default: [], array: true
When I'm trying to do like this:
UPDATE table_name
SET portals = ARRAY[data -> '1' ->> 'portals']
WHERE id = 287766
It raises an error:
Caused by PG::DatatypeMismatch: ERROR: column "portals" is of type integer[] but expression is of type text[]
Here is one way to do it. But if you search the site, as you should had to do, you get more.
Schema
create table t (
data jsonb
);
insert into t values ('{"1" : { "k1" : [1,2,3,5]} }');
insert into t values ('{"2" : { "k2" : [4,5,6,7]} }');
create table i (
id int,
v int[]
)
Some tests
select data -> '1' -> 'k1'
from t
where data ? '1'
;
insert into i values(1,ARRAY[1,2,3]);
update i
set v = (select replace(replace(data -> '1' ->> 'k1', '[', '{'), ']', '}')::int[] from t where data ? '1')
where id = 1;
select * from i;
The above gets array as a text, as you did. After that, just some text replacements to cast the text to an integer array literal.
DB Fiddle

Simple Having Clause created with Power BI measure

I created this simple table in SQL:
create table testTable (
date date not null,
leader varchar(20),
name varchar(20)
)
insert into testTable
values
('2021-01-01', 'KIM', 'Anders'),
('2021-01-02', 'KIM', 'Annika'),
('2021-01-03', 'KIM', 'Anna'),
('2021-01-04', 'KIM', 'Anna'),
('2021-01-03', 'KIM', 'Annika'),
('2021-01-01', 'JOHAN', 'Sara'),
('2021-01-02', 'JOHAN', 'Sara'),
('2021-01-03', 'JOHAN', 'Sara')
I am trying to get an ekvivalent solution to the following code in a dax measure if possible
select max(leader), name, count(name)
from testTable
group by name
having count(name) >= 2
The result that im looking for is.
Leader
Measure
KIM
2
JOHAN
1
Think about HAVING as a filter that happens after a grouping. So something like
Measure = COUNTROWS(filter(SUMMARIZECOLUMNS('Table'[Name],"Count",count('Table'[Name])), [Count]>=2))
And here's a simple way to present test data for DAX questions, entirely in DAX:
testTable = SELECTCOLUMNS
(
{
(date(2021,01,01),"KIM","Anders")
,(date(2021,01,02),"KIM","Annika")
,(date(2021,01,03),"KIM","Anna")
,(date(2021,01,04),"KIM","Anna")
,(date(2021,01,03),"KIM","Annika")
,(date(2021,01,01),"JOHAN","Sara")
,(date(2021,01,02),"JOHAN","Sara")
,(date(2021,01,03),"JOHAN","Sara")
}, "date", [Value1]
, "leader", [Value2]
, "name", [Value3]
)
This is much easier way to reproduce a scenario than creating a table in SQL Server, and loading it through Power Query, or using the "Enter Data" form in PowerBI which creates the table in Power Query.
Edit: after adding the desired result to the question, the answer changes like follows
A possible solution is to implement a measure that counts the number of names that appear more than once for the selected leader
# Names ge 2 =
COUNTROWS (
FILTER (
VALUES ( Test[name] ),
CALCULATE ( COUNTROWS ( Test ), ALLEXCEPT ( Test, Test[name], Test[leader] ) ) > 1
)
)
here is a working example on dax.do
DEFINE
TABLE Test =
DATATABLE (
"date", DATETIME,
"leader", STRING,
"name", STRING,
{
{ "2021-01-01", "KIM", "Anders" },
{ "2021-01-02", "KIM", "Annika" },
{ "2021-01-03", "KIM", "Anna" },
{ "2021-01-04", "KIM", "Anna" },
{ "2021-01-03", "KIM", "Annika" },
{ "2021-01-01", "JOHAN", "Sara" },
{ "2021-01-02", "JOHAN", "Sara" },
{ "2021-01-03", "JOHAN", "Sara" }
}
)
MEASURE Test[# Names ge 2] =
COUNTROWS (
FILTER (
VALUES ( Test[name] ),
CALCULATE ( COUNTROWS ( Test ), ALLEXCEPT ( Test, Test[name], Test[leader] ) ) > 1
)
)
EVALUATE
SUMMARIZECOLUMNS (
Test[leader],
"# Names ge 2", [# Names ge 2]
)
and the resulting output
I've left the measure of my previous answer on the original dax.do, that returned this output

Snowflake get_path() or flatten() array query - to find latest key:value

I have a column 'amp' in a table 'EXAMPLE'. Column 'amp' is an array which looks like this:
[{
"list": [{
"element": {
"x_id": "12356789XXX",
"y_id": "12356789XXX38998",
}
},
{
"element": {
"x_id": "5677888356789XXX",
"y_id": "1XXX387688",
}
}]
}]
How should I query using get_path() or flatten() to extract the latest x_id and y_id value (or other alternative)
In this example it is only 2 elements, but there could 1 to 6000 elements containing x_id and y_id.
Help much appreciated!
Someone may have a more elegant way than this, but you can use a CTE. In the first table expression, grab the max of the array. In the second part, grab the values you need.
set json = '[{"list": [{"element": {"x_id": "12356789XXX","y_id": "12356789XXX38998"}},{"element": {"x_id": "5677888356789XXX","y_id": "1XXX387688",}}]}]';
create temp table foo(v variant);
insert into foo select parse_json($json);
with
MAX_INDEX(M) as
(
select max("INDEX") MAX_INDEX
from foo, lateral flatten(v, recursive => true)
),
VALS(V, P, K) as
(
select "VALUE", "PATH", "KEY"
from foo, lateral flatten(v, recursive => true)
)
select k as "KEY", V::string as VALUE from vals, max_index
where VALS.P = '[0].list[' || max_index.m || '].element.x_id' or
VALS.P = '[0].list[' || max_index.m || '].element.y_id'
;
Assuming that the outer array ALWAYS contains a single dictionary element, you could use this:
SELECT amp[0]:"list"[ARRAY_SIZE(amp[0]:"list")-1]:"element":"x_id"::VARCHAR AS x_id
,amp[0]:"list"[ARRAY_SIZE(amp[0]:"list")-1]:"element":"y_id"::VARCHAR AS y_id
FROM T
;
Or if you prefer a bit more modularity/readability, you could use this:
WITH CTE1 AS (
SELECT amp[0]:"list" AS _ARRAY
FROM T
)
,CTE2 AS (
SELECT _ARRAY[ARRAY_SIZE(_ARRAY)-1]:"element" AS _DICT
FROM CTE1
)
SELECT _DICT:"x_id"::VARCHAR AS x_id
,_DICT:"y_id"::VARCHAR AS y_id
FROM CTE2
;
Note: I have not used FLATTEN here because I did not see a good reason to use it.

how to create a multi-statement table valued function

I am having a SQL function and I want to return a table, but I'm getting an error:
A RETURN statement with a return value cannot be used in this context.
Here is my query:
ALTER FUNCTION [dbo].[Fnc_GetParentCategories]-- 21
(
#catId int
)
Returns table
As
Begin
Declare #Table table(Id int identity(1,1),Category_Id int,ParentId int);
declare #cid int;
WITH x AS (
SELECT a.Category_Id, a.ParentId
FROM t_Category a
WHERE a.Category_Id=#CatId -- enter dead node walking here
UNION ALL
SELECT b.Category_Id, b.ParentId
FROM t_Category b
JOIN x ON x.Category_Id =b.ParentId
)
insert into #Table select * from x;
return #Table
end
And the error is:
A RETURN statement with a return value cannot be used in this context
Your query is incorrect:
ALTER FUNCTION [dbo].[Fnc_GetParentCategories]-- 21
(
#catId int
)
Returns #Table table
(
Id int identity(1,1),
Category_Id int,
ParentId int
)
AS
Begin
WITH x AS
(
SELECT a.Category_Id, a.ParentId
FROM t_Category a
WHERE a.Category_Id=#CatId -- enter dead node walking here
UNION ALL
SELECT b.Category_Id, b.ParentId
FROM t_Category b
JOIN x ON x.Category_Id =b.ParentId
)
insert into #Table select * from x;
return
end
you declare your table definition just before as
--Transact-SQL Multistatement Table-valued Function Syntax
CREATE FUNCTION [ schema_name. ] function_name
( [ { #parameter_name [ AS ] [ type_schema_name. ] parameter_data_type
[ = default ] [READONLY] }
[ ,...n ]
]
)
RETURNS #return_variable TABLE <table_type_definition>
[ WITH <function_option> [ ,...n ] ]
[ AS ]
BEGIN
function_body
RETURN
END
[ ; ]
Your modified function
ALTER FUNCTION [dbo].[Fnc_GetParentCategories]-- 21
(
#catId int
)
RETURNS #Table TABLE(Id int identity(1,1),Category_Id int,ParentId int)
As
BEGIN
WITH x AS
(
SELECT a.Category_Id, a.ParentId
FROM t_Category a
WHERE a.Category_Id = #CatId -- enter dead node walking here
UNION ALL
SELECT b.Category_Id, b.ParentId
FROM t_Category b
JOIN x ON x.Category_Id = b.ParentId
)
INSERT #Table select * from x;
RETURN
END