I have table with data like this
Id | Name | Phone | OtherField
----+---------+--------+-----------
1 | ABC | 12344 | NULL
2 | XYZ | NULL | NULL
I want a SQL query to transform it like this
[
{
"ID":1,
"Name":"ABC",
"Phone":[
{"Home":"12344"}
],
"OtherFields":NULL
},
{
"ID":1,
"Name":"ABC",
"OtherFields":NULL
}
]
I know about INCLUDE_NULL_VALUES it includes all the empty field.
I want to include all other fields except Phone.
I have edited my answer as you have changed your original request.
I don't believe you can have it both ways, keeping some NULLs and not others. The best way I can think of at the moment is to use ISNULL on columns you must keep.
For example:
DECLARE #Table TABLE ( Id INT, Name VARCHAR(10), Phone VARCHAR(10), OtherField VARCHAR(10) );
INSERT INTO #Table ( Id, Name, Phone ) VALUES
( 1, 'ABC', '12344' ), ( 2, 'XYZ', NULL );
SELECT
Id, Name,
JSON_QUERY ( CASE
WHEN t.Phone IS NOT NULL THEN x.Phone
ELSE NULL
END ) AS Phone,
ISNULL( OtherField, '' ) AS OtherFields
FROM #Table t
CROSS APPLY (
SELECT ( SELECT Phone AS Home FOR JSON PATH ) AS Phone
) x
FOR JSON PATH;
Returns
[{
"Id": 1,
"Name": "ABC",
"Phone": [{
"Home": "12344"
}],
"OtherFields": ""
}, {
"Id": 2,
"Name": "XYZ",
"OtherFields": ""
}]
Update:
The original question was edited and I don't think that you can generate the expected ouptut using a single FOR JSON and INCLUDE_NULL_VALUES, because now the table has more than one column with NULL values (OtherField in the example).
As a possible solution you may try a mixed approach (using FOR JSON and STRING_AGG()) to build the final JSON output and keep the NULL values for all columns, except Phones:
CREATE TABLE Data (
Id int,
Name varchar(100),
Phone varchar(100),
OtherField varchar(1)
);
INSERT INTO Data (Id, Name, Phone, OtherField)
VALUES
(1, 'ABC', '12344', NULL),
(2, 'ABC', NULL, NULL),
(3, 'ABC', NULL, NULL)
Statement:
SELECT CONCAT(
'[',
(
SELECT STRING_AGG(j.Json, ',')
FROM Data d
CROSS APPLY (
SELECT CASE
WHEN Phone IS NOT NULL THEN (
SELECT Id, Name, (SELECT Phone AS Home FOR JSON PATH) AS Phone, OtherField
FOR JSON PATH, INCLUDE_NULL_VALUES, WITHOUT_ARRAY_WRAPPER
)
ELSE (
SELECT Id, Name, OtherField
FOR JSON PATH, INCLUDE_NULL_VALUES, WITHOUT_ARRAY_WRAPPER
)
END
) j (Json)
),
']'
)
Result:
[
{"Id":1,"Name":"ABC","Phone":[{"Home":"12344"}],"OtherField":null},
{"Id":2,"Name":"ABC","OtherField":null},
{"Id":3,"Name":"ABC","OtherField":null}
]
Original answer:
You may try the following statement:
Table:
CREATE TABLE Data (
Id int,
Name varchar(100),
Phone varchar(100)
);
INSERT INTO Data (Id, Name, Phone)
VALUES
(1, 'ABC', '12344'),
(2, 'ABC', NULL )
Statement:
SELECT
Id,
Name,
JSON_QUERY(CASE WHEN Phone IS NOT NULL THEN (SELECT Phone AS Home FOR JSON PATH) END) AS Phone
FROM Data
FOR JSON PATH
Result:
[
{"Id":1,"Name":"ABC","Phone":[{"Home":"12344"}]},
{"Id":2,"Name":"ABC"}
]
Related
I want to check Projects column values have the same values for all the same values of PartNo and PartName columns. Projects column data type is character variyng[].
For example:
PartNo
PartName
Projects
1
3
6;5
1
3
1
3
3
2
5;5
In this case, Projects have different values (6;5) and () for the same PartName(3) and PartNo(1).
This is my query, but it does not work with empty character variyng[] in projects column!
SELECT COUNT(*) from (
select c.partno, c.partname
FROM unnest(items) as c
GROUP BY c.partno, c.partname
HAVING COUNT(distinct c.projects) > 1) as xxx
INTO errCount;
IF errCount > 0 THEN
RETURN QUERY
SELECT 0 as status, format('Projects value should be the same for all Codes of the Part No %s and Name %s',c.partno,c.partname) as message
FROM unnest(items) as c
GROUP BY c.partno, c.partname
HAVING COUNT(distinct c.projects) > 1
;
RETURN;
END IF;
In the case of two different values in projects (not empty array), it works.
you can use a query like this with
coalesce
function to convert null in array[null]
WITH tt AS (
SELECT
partno,
partname,
COALESCE ( project, ARRAY [null] ) AS pro
FROM
tab1
) SELECT
*,
COUNT ( pro ) AS num
FROM
tt
GROUP BY
partno,
partname,
pro
to create test table:
CREATE TABLE "tab1" (
"pk" serial4 primary key,
"partno" int4,
"partname" int4,
"project" varchar[]
);
INSERT INTO "tab1" (partno,partname,project) VALUES ( 1, 3, '{6,5}');
INSERT INTO "tab1" (partno,partname,project) VALUES ( 1, 3, NULL);
INSERT INTO "tab1" (partno,partname,project) VALUES ( 1, 3, NULL);
INSERT INTO "tab1" (partno,partname,project) VALUES ( 3, 2, '{5,5}');
Looking for non fancy, easily debugable for junior developer solution...
In SQL Server 2008 R2, I have to update data from #data table to #tests table in desired format. I am not sure how I would archive result using T-SQL query?
NOTE: temp tables have only 3 columns for sample purpose only but real table have more than 50 columns for each set.
Here is what my tables look like:
IF OBJECT_ID('tempdb..#tests') IS NOT NULL
DROP TABLE #tests
GO
CREATE TABLE #tests
(
id int,
FirstName varchar(100),
LastName varchar(100),
UniueNumber varchar(100)
)
IF OBJECT_ID('tempdb..#data') IS NOT NULL
DROP TABLE #data
GO
CREATE TABLE #data
(
id int,
FirstName1 varchar(100),
LastName1 varchar(100),
UniueNumber1 varchar(100),
FirstName2 varchar(100),
LastName2 varchar(100),
UniueNumber2 varchar(100),
FirstName3 varchar(100),
LastName3 varchar(100),
UniueNumber3 varchar(100),
FirstName4 varchar(100),
LastName4 varchar(100),
UniueNumber4 varchar(100),
FirstName5 varchar(100),
LastName5 varchar(100),
UniueNumber5 varchar(100),
FirstName6 varchar(100),
LastName6 varchar(100),
UniueNumber6 varchar(100),
FirstName7 varchar(100),
LastName7 varchar(100),
UniueNumber7 varchar(100)
)
INSERT INTO #data
VALUES (111, 'Tom', 'M', '12345', 'Sam', 'M', '65432', 'Chris', 'PATT', '54656', 'Sean', 'Meyer', '865554', 'Mike', 'Max', '999999', 'Tee', 'itc', '656546444', 'Mickey', 'Mul', '65443231')
INSERT INTO #data
VALUES (222, 'Kurr', 'P', '22222', 'Yammy', 'G', '33333', 'Saras', 'pi', '55555', 'Man', 'Shey', '666666', 'Max', 'Dopit', '66666678', '', '', '', '', '', '')
INSERT INTO #data
VALUES (333, 'Mia', 'K', '625344', 'Tee', 'TE', '777766', 'david', 'mot', '4444444', 'Jeff', 'August', '5666666', 'Mylee', 'Max', '0000000', '', '', '', 'Amy', 'Marr', '55543444')
SELECT *
FROM #data
I want to insert/update data into #tests table from #data table.
Insert data into #tests table if id and UniqueNumber combination does not exists from #data table. If combination exists then update data into #tests table from #data table
This is desired output into #tests table
Here is an option that will dynamically UNPIVOT your data without using Dynamic SQL
To be clear: UNPIVOT would be more performant, but you don't have to enumerate the 50 columns.
This is assuming your columns end with a NUMERIC i.e. FirstName##
Example
Select ID
,FirstName
,LastName
,UniueNumber -- You could use SSN = UniueNumber
From (
SELECT A.ID
,Grp
,Col = replace([Key],Grp,'')
,Value
FROM #data A
Cross Apply (
Select [Key]
,Value
,Grp = substring([Key],patindex('%[0-9]%',[Key]),25)
From OpenJson( (Select A.* For JSON Path,Without_Array_Wrapper ) )
) B
) src
Pivot ( max(Value) for Col in ([FirstName],[LastName],[UniueNumber]) ) pvt
Order By ID,Grp
Results
UPDATE XML Version
Select ID
,FirstName
,LastName
,UniueNumber
From (
SELECT A.ID
,Grp = substring(Item,patindex('%[0-9]%',Item),50)
,Col = replace(Item,substring(Item,patindex('%[0-9]%',Item),50),'')
,Value
FROM #data A
Cross Apply ( values (convert(xml,(Select A.* for XML RAW)))) B(XData)
Cross Apply (
Select Item = xAttr.value('local-name(.)', 'varchar(100)')
,Value = xAttr.value('.','varchar(max)')
From B.XData.nodes('//#*') xNode(xAttr)
) C
Where Item not in ('ID')
) src
Pivot ( max(Value) for Col in (FirstName,LastName,UniueNumber) ) pvt
Order By ID,Grp
One way is to query each group of columns separately and UNION the results
SELECT
id int,
FirstName1 as FirstName,
LastName1 as LastName,
UniueNumber1 AS SSN
FROM #data
UNION
SELECT
id int,
FirstName2 as FirstName,
LastName2 as LastName,
UniueNumber2 AS SSN
FROM #data
UNION
...
There's not a way to cleanly "loop through" the 7 groups of columns - you'll spend more time building a loop to create the query dynamically than just copying and pasting the query 6 times and changing the number.
Of course, it's best to avoid the type of structure you have in #data now if at all possible.
I have table called foo like this :
create table foo
(
Id Int,
Description Nvarchar(Max)
)
I have json in array in Description like this:
[
{
"name":"a",
"date":"2021-03-01"
},
{
"name":"b",
"date":"2021-03-02"
}
]
I want sort foo table by max date in json list - something like this:
select *
from foo
order by json_value(Description, '$.date')
but this query does not work.
How can I fix this?
One possible approach is to parse the stored JSON with OPENJSON() and find the max date:
Table:
CREATE TABLE foo (
Id Int,
Description Nvarchar(Max)
)
INSERT INTO foo (Id, Description)
VALUES
(1, N'[{"name":"a","date":"2021-03-01"},{"name":"b","date":"2021-03-02"}]'),
(2, N'[{"name":"a","date":"2021-03-11"},{"name":"b","date":"2021-03-12"}]')
Statement:
SELECT Id, Description
FROM (
SELECT *
FROM foo f
OUTER APPLY (
SELECT MAX([date]) AS [Date]
FROM OPENJSON(f.Description) WITH ([date] nvarchar(10) '$.date')
) j
) t
ORDER BY TRY_CONVERT(date, [Date], 23)
Result:
Id Description
1 [{"name":"a","date":"2021-03-01"},{"name":"b","date":"2021-03-02"}]
2 [{"name":"a","date":"2021-03-11"},{"name":"b","date":"2021-03-12"}]
I have a table that looks as follows:
I need to unpivot the Rating and the Comments as follows:
What is the best way to do this in Snowflake?
Note: there are some cells in the comment columns that are NULL
Adding details:
create or replace table reviews(name varchar(50), acting_rating int, acting_comments text, comedy_rating int, comedy_comments text);
insert into reviews values
('abc', 4, NULL, 1, 'NO'),
('xyz', 3, 'some', 1, 'haha'),
('lmn', 1, 'what', 4, NULL);
select * from reviews;
select name, skill, skill_rating, comments
from reviews
unpivot(skill_rating for skill in (acting_rating, comedy_rating))
unpivot(comments for skill_comments in (acting_comments,comedy_comments))
--Following where clause is added to filter the irrelevant comments due to multiple unpivots
where substr(skill,1,position('_',skill)-1) = substr(skill_comments,1,position('_',skill_comments)-1)
order by name;
will produce produce the desired results, but with data that has NULLs, the unpivoted rows that have NULLs go missing from the output:
NAME SKILL SKILL_RATING COMMENTS
abc COMEDY_RATING 1 NO
lmn ACTING_RATING 1 what
xyz ACTING_RATING 3 some
xyz COMEDY_RATING 1 haha
If all you need to solve is for the table specified in the question - you can do it manually with a set of UNION ALL:
select NAME
, 'ACTING_RATING' as SKILL, ACTING_RATING as SKILL_RATING, ACTING_COMMENTS as SKILL_COMMENTS
from DATA
union all
select NAME
, 'COMEDY_RATING', COMEDY_RATING, COMEDY_COMMENTS
from DATA
union all
select NAME
, 'MUSICAL_PERFORMANCE_RATING', MUSICAL_PERFORMANCE_RATING, MUSICAL_PERFORMANCE_COMMENTS
from DATA
This is a basic script and should give the desired output
create or replace table reviews(name varchar(50), acting_rating int, acting_comments text, comedy_rating int, comedy_comments text);
insert into reviews values
('abc', 4, 'something', 1, 'NO'),
('xyz', 3, 'some', 1, 'haha'),
('lmn', 1, 'what', 4, 'hahaha');
select * from reviews;
select name, skill, skill_rating, comments
from reviews
unpivot(skill_rating for skill in (acting_rating, comedy_rating))
unpivot(comments for skill_comments in (acting_comments,comedy_comments))
--Following where clause is added to filter the irrelevant comments due to multiple unpivots
where substr(skill,1,position('_',skill)-1) = substr(skill_comments,1,position('_',skill_comments)-1)
order by name;
If the goal is to store the unpivoted result as a table then INSERT ALL could be used to unpivot mutliple columns at once:
Setup:
create or replace table reviews(
name varchar(50), acting_rating int,
acting_comments text, comedy_rating int, comedy_comments text);
insert into reviews values
('abc', 4, NULL, 1, 'NO'),
('xyz', 3, 'some', 1, 'haha'),
('lmn', 1, 'what', 4, NULL);
select * from reviews;
Query:
CREATE OR REPLACE TABLE reviews_transposed(
name VARCHAR(50)
,skill TEXT
,skill_rating INT
,skill_comments TEXT
);
INSERT ALL
INTO reviews_transposed(name, skill, skill_rating, skill_comments)
VALUES (name, 'ACTING_RATING', acting_rating, acting_comments)
INTO reviews_transposed(name, skill, skill_rating, skill_comments)
VALUES (name, 'COMEDY_RATING', comedy_rating, comedy_comments)
SELECT *
FROM reviews;
SELECT *
FROM reviews_transposed;
Before:
After:
This approach has one significant advantage over UNION ALL approach proposed by Felippe, when saving into table (the number of table scans and thus partition read is growing for each UNION ALL wheareas INSERT ALL scans source table only once.
INSERT INTO reviews_transposed
select NAME
, 'ACTING_RATING' as SKILL, ACTING_RATING as SKILL_RATING, ACTING_COMMENTS as SKILL_COMMENTS
from reviews
union all
select NAME
, 'COMEDY_RATING', COMEDY_RATING, COMEDY_COMMENTS
from reviews;
vs INSERT ALL
Back in TSQL days i'd just use a CROSS APPLY. The nearest equivalent in snowflake would be something like:
create or replace TEMPORARY table reviews(name varchar(50), acting_rating int, acting_comments text, comedy_rating int, comedy_comments text);
insert into reviews values
('abc', 4, NULL, 1, 'NO'),
('xyz', 3, 'some', 1, 'haha'),
('lmn', 1, 'what', 4, NULL);
SELECT R.NAME
,P.VALUE:SKILL::VARCHAR(100) AS SKILL
,P.VALUE:RATING::NUMBER AS RATING
,P.VALUE:COMMENTS::VARCHAR(1000) AS COMMENTS
FROM reviews R
,TABLE(FLATTEN(INPUT => ARRAY_CONSTRUCT(
OBJECT_CONSTRUCT('SKILL','COMEDY','RATING',R.COMEDY_RATING,'COMMENTS',R.COMEDY_COMMENTS),
OBJECT_CONSTRUCT('SKILL','ACTING','RATING',R.ACTING_RATING,'COMMENTS',R.ACTING_COMMENTS)
)
)) AS P;
This only hits the source table once and preserves NULLs.
ResultSet
I've had same problem,
Here is my solution for unpivoting by two categories AND keeping nulls:
First you replace NULL's with some string, for example: 'NULL'
Then brake the two unpivots into two separate cte's and create common category column to join them again later, 'skill' in your case.
Lastly, join the two cte's by name and skill category, replace the 'NULL' string with actual NULL
create or replace table reviews(name varchar(50), acting_rating int, acting_comments text, comedy_rating int, comedy_comments text);
insert into reviews values
('abc', 4, 'something', 1, 'NO'),
('xyz', 3, 'some', 1, 'haha'),
('lmn', 1, 'what', 4, 'hahaha');
WITH base AS (SELECT name
, acting_rating
, IFNULL(acting_comments, 'NULL') AS acting_comments
, comedy_rating
, IFNULL(comedy_comments, 'NULL') AS comedy_comments
FROM reviews
)
, skill_rating AS (SELECT name
, REPLACE(skill, '_RATING', '') AS skill
, skill_rating
FROM base
UNPIVOT (skill_rating FOR skill IN (acting_rating, comedy_rating))
)
, comments AS (SELECT name
, REPLACE(skill_comments, '_COMMENTS', '') AS skill
, comments
FROM base
UNPIVOT (comments FOR skill_comments IN (acting_comments,comedy_comments))
)
SELECT s.name
, s.skill
, s.skill_rating
, NULLIF(c.comments, 'NULL') AS comments
FROM skill_rating AS s
JOIN comments AS c
ON s.name = c.name
AND s.skill = c.skill
ORDER BY name;
The result:
name skill skill_rating comments
abc ACTING 4 <null>
abc COMEDY 1 NO
lmn ACTING 1 what
lmn COMEDY 4 <null>
xyz ACTING 3 some
xyz COMEDY 1 haha
Having simple database structure
PERSON_ID FIRST_NAME LAST_NAME
1 John Doe
2 John Doe
3 Peter Jackson
need to construct single row output with JSON ARRAY structure containing unique data filtered by first_name,last_name criteria.
Expected outcome:
[{
"firstname": "John",
"lastname": "Doe"
},
{
"firstname": "Peter",
"lastname": "Jackson"
}]
Using group by on array level results in two rows
SELECT json_array(
json_object( key 'firstname' VALUE t.first_name,
key 'lastname' VALUE t.last_name)
) RESPONSEJSON
FROM TESTDATA t
GROUP BY t.first_name, t.last_name
RESPONSEJSON
1 [{"firstname":"Peter","lastname":"Jackson"}]
2 [{"firstname":"John","lastname":"Doe"}]
Use a subquery to create distinct objects. Then aggregate them together in the array:
create table t (
person_id int, first_name varchar2(10), last_name varchar2(10)
);
insert into t values (1, 'John', 'Doe' );
insert into t values (2, 'John', 'Doe' );
insert into t values (3, 'Peter', 'Jackson' );
commit;
with jobjs as (
select distinct json_object(
key 'firstname' VALUE t.first_name,
key 'lastname' VALUE t.last_name
) responsejson
from t
)
SELECT json_arrayagg ( responsejson )
FROM jobjs;
[
{"firstname":"John","lastname":"Doe"},
{"firstname":"Peter","lastname":"Jackson"}
]
There's a bug in 12.2 which gives wrong results when using distinct as above. You can get around this with group by instead:
with jobjs as (
select json_object(
key 'firstname' VALUE t.first_name,
key 'lastname' VALUE t.last_name
) responsejson
from t
group by t.first_name, t.last_name
)
SELECT json_arrayagg ( responsejson )
FROM jobjs;
[
{"firstname":"Peter","lastname":"Jackson"},
{"firstname":"John","lastname":"Doe"}
]