How to insert JSON to an existing table without specifying column names? - sql

To a table with two columns, named Id and Name, how can I insert the following json as rows, but without explicitly specifying the column names of the table (i.e. without WITH (Id ..., Name ...)?
[
{
"Id": "f08af9c2-8e67-4a7f-9413-1afffa2de96b",
"SomeOtherKey": " ... ",
"Name": "The One",
...
},
{
"Name": "All is one"
"Id": "9bbb094b-aa64-4c36-90a2-50e10f91c6a3",
"Whatever": 99,
...
},
{
"Id": "af9d22d8-1e46-4d57-8179-75f094d2efa1",
"SomeArrayWhyNot": [0, 1, 1, 2, 3, 5, 8, 13, 21]
"Surprise": "This one does not have a Name value!!! 😱"
...
},
...
]
The question is basically how to make SQL match the key-name to its suitable column name, ignoring json values with keys that do not have suitable column names, resulting with the following table (for the above json example):
Id
Name
f08af9c2-8e67-4a7f-9413-1afffa2de96b
The One
9bbb094b-aa64-4c36-90a2-50e10f91c6a3
All is one
af9d22d8-1e46-4d57-8179-75f094d2efa1
NULL
...
...

Not sure why you want this, but you can also do this:
INSERT INTO YourTable (Id, Name)
SELECT JSON_VALUE(x.value, '$.Id'), JSON_VALUE(x.value, '$.Name')
FROM OPENJSON('[{
"Id": "f08af9c2-8e67-4a7f-9413-1afffa2de96b",
"SomeOtherKey": " ... ",
"Name": "The One"
},
{
"Name": "All is one",
"Id": "9bbb094b-aa64-4c36-90a2-50e10f91c6a3",
"Whatever": 99
},
{
"Id": "af9d22d8-1e46-4d57-8179-75f094d2efa1",
"SomeArrayWhyNot": [0, 1, 1, 2, 3, 5, 8, 13, 21],
"Surprise": "This one doesn''t have a Name value!!! 😱"
}]') x
EDIT dynamic version.
It has many caveats though, your column must match exactly the case in the json, it's very longwinded but if that's what floats your vessel...
declare #json nvarchar(max) = '[{
"Id": "f08af9c2-8e67-4a7f-9413-1afffa2de96b",
"SomeOtherKey": " ... ",
"Name": "The One"
},
{
"Name": "All is one",
"Id": "9bbb094b-aa64-4c36-90a2-50e10f91c6a3",
"Whatever": 99
},
{
"Id": "af9d22d8-1e46-4d57-8179-75f094d2efa1",
"SomeArrayWhyNot": [0, 1, 1, 2, 3, 5, 8, 13, 21],
"Surprise": "This one doesn''t have a Name value!!! 😱"
}]'
create table YourTable (Id nvarchar(100), Name NVARCHAR(MAX))
create table #cols (name sysname, row_id int identity)
insert into #cols (name)
select STRING_ESCAPE(name, 'json')
from sys.columns sc
where sc.object_id = OBJECT_ID('YourTable')
DECLARE #sql nvarchar(max)
set #sql = N'
INSERT INTO YourTable ([COLS])
SELECT [JSONS]
FROM OPENJSON(#json) x'
SET #sql = REPLACE(#sql, '[COLS]',
STUFF((select ',' + QUOTENAME(name) from #cols order by row_id for xml Path(''), type).value('.', 'nvarchar(max)'), 1, 1, ''))
set #sql = replace(#sql, '[JSONS]',
stuff((SELECT ', JSON_VALUE(x.value, ''$."' + REPLACE(name,'''', '''''') + '"'')'
from #cols order by row_id for xml path(''), type).value('.', 'nvarchar(max)'), 1, 1, ''))
exec sp_executesql #sql, N'#json nvarchar(max)', #json = #json
select * FROM YourTable

You can use the OPENJSON function in SQL Server to parse the JSON data and then use INSERT INTO to insert the data into the table.
INSERT INTO YourTable (Id, Name)
SELECT *
FROM OPENJSON('[
{
"Id": "f08af9c2-8e67-4a7f-9413-1afffa2de96b",
"SomeOtherKey": " ... ",
"Name": "The One"
},
{
"Name": "All is one",
"Id": "9bbb094b-aa64-4c36-90a2-50e10f91c6a3",
"Whatever": 99
},
{
"Id": "af9d22d8-1e46-4d57-8179-75f094d2efa1",
"SomeArrayWhyNot": [0, 1, 1, 2, 3, 5, 8, 13, 21],
"Surprise": "This one doesn\'t have a Name value!!! 😱"
}
]') WITH (Id nvarchar(50) '$.Id', Name nvarchar(50) '$.Name')

Related

Format SQL output to custom JSON

I have this table which is very simple with this data
CREATE TABLE #Prices
(
ProductId int,
SizeId int,
Price int,
Date date
)
INSERT INTO #Prices
VALUES (1, 1, 100, '2020-01-01'),
(1, 1, 120, '2020-02-01'),
(1, 1, 130, '2020-03-01'),
(1, 2, 100, '2020-01-01'),
(1, 2, 100, '2020-02-01'),
(2, 1, 100, '2020-01-01'),
(2, 1, 120, '2020-02-01'),
(2, 1, 130, '2020-03-01'),
(2, 2, 100, '2020-01-01'),
(2, 2, 100, '2020-02-01')
I would like to format the output to be something like this:
{
"Products": [
{
"Product": 2,
"UnitSizes": [
{
"SizeId": 1,
"PerDate": [
{
"Date": "2020-01-02",
"Price": 870.0
},
{
"Date": "2021-04-29",
"Price": 900.0
}
]
},
{
"SizeId": 2,
"PerDate": [
{
"Date": "2020-01-02",
"Price": 435.0
},
{
"Date": "2021-04-29",
"Price": 450.0
}
]
}
]
},
{
"Product": 4,
"UnitSizes": [
{
"SizeId": 1,
"PerDate": [
{
"Date": "2020-01-02",
"Price": 900.0
}
]
}
]
}
]
}
I almost have it but I don't know how to format to get the array inside 'PerDate'. This is what I have
SELECT
ProductId AS [Product],
SizeId AS 'Sizes.SizeId',
date AS 'Sizes.PerDate.Date',
price AS 'Sizes.PerDate.Price'
FROM
#Prices
ORDER BY
ProductId, [Sizes.SizeId], Date
FOR JSON PATH, ROOT('Products')
I have tried with FOR JSON AUTO and nothing, I've tried with JSON_QUERY() but I was not able to achieve the result I want.
Every help will be very appreciated.
Thanks
Unfortunately, SQL Server does not have the JSON_AGG function, which means you would normally need to use a number of correlated subqueries and keep on rescanning the base table.
However, we can simulate it by using STRING_AGG against single JSON objects generated in an APPLY. This means that we only scan the base table once.
Use of JSON_QUERY with no path prevents double-escaping
WITH PerDate AS (
SELECT
p.ProductId,
p.SizeId,
PerDate = '[' + STRING_AGG(j.PerDate, ',') WITHIN GROUP (ORDER BY p.Date) + ']'
FROM #Prices AS p
CROSS APPLY ( -- This produces multiple rows of single JSON objects
SELECT p.Date, p.Price
FOR JSON PATH, WITHOUT_ARRAY_WRAPPER
) j(PerDate)
GROUP BY
p.ProductId,
p.SizeId
),
UnitSizes AS (
SELECT
p.ProductId,
UnitSizes = '[' + STRING_AGG(j.UnitSizes, ',') WITHIN GROUP (ORDER BY p.SizeId) + ']'
FROM PerDate p
CROSS APPLY (
SELECT p.SizeId, PerDate = JSON_QUERY(p.PerDate)
FOR JSON PATH, WITHOUT_ARRAY_WRAPPER
) j(UnitSizes)
GROUP BY
p.ProductId
)
SELECT
Product = p.ProductId,
UnitSizes = JSON_QUERY(p.UnitSizes)
FROM UnitSizes p
ORDER BY p.ProductId
FOR JSON PATH, ROOT('Products');
db<>fiddle
This is one way of doing it
DROP TABLE IF EXISTS #Prices
CREATE TABLE #Prices
(
ProductId INT,
SizeId INT,
Price INT,
Date DATE
)
-- SQL Prompt formatting off
INSERT INTO #Prices
VALUES (1, 1, 100, '2020-01-01'),
(1, 1, 120, '2020-02-01'),
(1, 1, 130, '2020-03-01'),
(1, 2, 100, '2020-01-01'),
(1, 2, 100, '2020-02-01'),
(2, 1, 100, '2020-01-01'),
(2, 1, 120, '2020-02-01'),
(2, 1, 130, '2020-03-01'),
(2, 2, 100, '2020-01-01'),
(2, 2, 100, '2020-02-01')
-- SQL Prompt formatting on
SELECT m.ProductId AS Product,
(
SELECT s.SizeId,
(
SELECT p.Date,
p.Price
FROM #Prices AS p
WHERE p.SizeId = s.SizeId
GROUP BY p.Date,
p.Price
ORDER BY p.Date
FOR JSON PATH
) AS PerDate
FROM #Prices AS s
WHERE s.ProductId = m.ProductId
GROUP BY s.SizeId
ORDER BY s.SizeId
FOR JSON PATH
) AS UnitSizes
FROM #Prices AS m
GROUP BY m.ProductId
ORDER BY m.ProductId
FOR JSON PATH, ROOT('Products')
Output:
{
"Products":
[
{
"Product": 1,
"UnitSizes":
[
{
"SizeId": 1,
"PerDate":
[
{
"Date": "2020-01-01",
"Price": 100
},
{
"Date": "2020-02-01",
"Price": 120
},
{
"Date": "2020-03-01",
"Price": 130
}
]
},
{
"SizeId": 2,
"PerDate":
[
{
"Date": "2020-01-01",
"Price": 100
},
{
"Date": "2020-02-01",
"Price": 100
}
]
}
]
},
{
"Product": 2,
"UnitSizes":
[
{
"SizeId": 1,
"PerDate":
[
{
"Date": "2020-01-01",
"Price": 100
},
{
"Date": "2020-02-01",
"Price": 120
},
{
"Date": "2020-03-01",
"Price": 130
}
]
},
{
"SizeId": 2,
"PerDate":
[
{
"Date": "2020-01-01",
"Price": 100
},
{
"Date": "2020-02-01",
"Price": 100
}
]
}
]
}
]
}

Query group by a column and return JSON

I have a table as below:
id
mid
handphone
coupono
status
1
1
0811111111
1
1
2
1
08222222222
2
1
3
1
08222222222
3
1
4
1
08222222222
4
1
5
1
08111111111
5
1
6
2
08333333333
6
1
7
2
08333333333
7
1
8
2
08444444444
8
1
-----
-----
---------------
--------
-------
I want to query the table using WHERE clause on mId column and filtered the couponno or listed on handphone number. How to query that?
The result that I want is:
{
"08111111111": [{
"Id": 1,
"CouponNo": 1,
"Status": 1
}, {
"Id": 5,
"CouponNo": 5,
"Status": 1
}],
"08222222222": [{
"Id": 2,
"CouponNo": 2,
"Status": 1
}, {
"Id": 3,
"CouponNo": 3,
"Status": 1
}, {
"Id": 4,
"CouponNo": 4,
"Status": 1
}]
}
Requiring Handphone to be object keys in your JSON makes it difficult to produce from SQL and probably won't scale well on the receiving side either as you add more data over time.
Here is some pivot-based SQL that will produce your desired JSON...
create table dbo.PivotJsonStuff (
Id int,
[Mid] int,
Handphone varchar(11),
CouponNo int,
Status int
);
insert dbo.PivotJsonStuff (Id, [Mid], Handphone, CouponNo, Status)
values
(1, 1, '08111111111', 1, 1),
(2, 1, '08222222222', 2, 1),
(3, 1, '08222222222', 3, 1),
(4, 1, '08222222222', 4, 1),
(5, 1, '08111111111', 5, 1),
(6, 2, '08333333333', 6, 1),
(7, 2, '08333333333', 7, 1),
(8, 2, '08444444444', 8, 1);
select
[08111111111] = json_query([08111111111]),
[08222222222] = json_query([08222222222])
from (
select Handphone,
[JSON] = (
select PJS2.Id, PJS2.CouponNo, PJS2.Status
from dbo.PivotJsonStuff PJS2
where PJS2.Handphone = PJS1.Handphone
for json path
)
from dbo.PivotJsonStuff PJS1
group by Handphone
) src
pivot (max([JSON]) for Handphone in ([08111111111], [08222222222])) pvt
for json path, without_array_wrapper;
{
"08111111111": [
{
"Id": 1,
"CouponNo": 1,
"Status": 1
},
{
"Id": 5,
"CouponNo": 5,
"Status": 1
}
],
"08222222222": [
{
"Id": 2,
"CouponNo": 2,
"Status": 1
},
{
"Id": 3,
"CouponNo": 3,
"Status": 1
},
{
"Id": 4,
"CouponNo": 4,
"Status": 1
}
]
}

Edit fields of a jsonb array in postgresql

I have the following jsonb in db:
[
{
"state": 2,
"activity": "EJECUCIÓN",
"final_date": "2020-02-24",
"activity_id": 1,
"current_days": -7,
"initial_date": "2020-02-24",
},
{
"state": 2,
"activity": "REVISIÓN",
"final_date": "2020-02-25",
"activity_id": 2,
"current_days": 0,
"initial_date": "2020-02-25",
},
{
"state": 2,
"activity": "RECEPCIÓN",
"final_date": "2020-02-27",
"activity_id": 4,
"current_days": 0,
"initial_date": "2020-02-27"
} ]
I run the following query to update the current_days field:
WITH activity_state as ( SELECT taex_id,('{'||index-1||',current_days}')::text[] as pathe ,
((task_activity->>'final_date')::date - current_date) as current_days,
task_activity->'state' as state,
task_activity->>'final_date' as final_date,
task_activity->>'current_days' as curren FROM task_executions,jsonb_array_elements(taex_activitygraph) with ordinality arr(task_activity,index) WHERE task_activity->>'state' = '2' )
update task_executions SET taex_activitygraph = jsonb_set(taex_activitygraph,activity_state.pathe,to_jsonb(current_days),true) FROM activity_state WHERE task_executions.taex_id = activity_state.taex_id AND activity_state.state = '2'
But that query only updates me the first element of the JSON array that exists the others do not undergo changes although in the first part of the query.
( SELECT taex_id,('{'||index-1||',current_days}')::text[] as pathe ,
((task_activity->>'final_date')::date - current_date) as current_days,
task_activity->'state' as state,
task_activity->>'final_date' as final_date,
task_activity->>'current_days' as curren FROM task_executions,jsonb_array_elements(taex_activitygraph) with ordinality arr(task_activity,index) WHERE task_activity->>'state' = '2' )
It brings me all the elements of the array that should be updated but the second part that is where it is supposed to update them:
update task_executions SET taex_activitygraph = jsonb_set(taex_activitygraph,activity_state.pathe,to_jsonb(current_days),true) FROM activity_state WHERE task_executions.taex_id = activity_state.taex_id AND activity_state.state = '2'
Just update me the first item.
Assuming this structure and data:
postgres=# \d task_executions
Table "public.task_executions"
Column | Type | Collation | Nullable | Default
--------------------+-------+-----------+----------+---------
task_activitygraph | jsonb | | |
postgres=# SELECT jsonb_pretty(task_activitygraph) FROM task_executions ;
jsonb_pretty
--------------------------------------
[ +
{ +
"state": 2, +
"activity": "EJECUCIÓN", +
"final_date": "2020-02-24", +
"activity_id": 1, +
"current_days": -7, +
"initial_date": "2020-02-24"+
}, +
{ +
"state": 2, +
"activity": "REVISIÓN", +
"final_date": "2020-02-25", +
"activity_id": 2, +
"current_days": 0, +
"initial_date": "2020-02-25"+
} +
]
(1 row)
... this UPDATE should work:
postgres=# UPDATE task_executions
SET task_activitygraph = (
SELECT jsonb_agg(
CASE WHEN elem->>'state' = '2'
THEN
jsonb_set(
elem,
'{current_days}',
to_jsonb((elem->>'final_date')::date - current_date)
)
ELSE
elem
END
)
FROM jsonb_array_elements(task_activitygraph) AS a(elem)
);
UPDATE 1
Documentation: https://www.postgresql.org/docs/9.5/functions-json.html
Side note: In transactional databases (where you have many concurrent clients, and processing speed and storage efficiency matters), and if your objects have fixed structure, DO NOT STORE your data as JSON. Use relational data model instead.

Append Multiple Objects into Existing Array Using JSON_MODIFY

The problem was in appending new JSON array to the existing JSON array:
Suppose I have the following JSON Array
[{"id": 1, "data": "One"}, {"id": 2, "data": "Two"}]
How do I append [{"id": 3, "data": "Three"}, {"id": 4, "data": "Four"}] to it using JSON_MODIFY?
resulting value for updated column:
[{"id": 1, "data": "One"}, {"id": 2, "data": "Two"}, {"id": 3, "data": "Three"}, {"id": 4, "data": "Four"}]
I don't think, that you can merge two JSON arrays with one JSON_MODIFY() call, but the following statement (using JSON_MODIFY()) is a possible solution:
Statement:
DECLARE #json NVARCHAR(500)='[{"id": 1, "data": "One"}, {"id": 2, "data": "Two"}]'
DECLARE #new NVARCHAR(500)='[{"id": 3, "data": "Three"}, {"id": 4, "data": "Four"}]'
SELECT #json = JSON_MODIFY(
#json,
'append $',
JSON_QUERY([value])
)
FROM OPENJSON(#new)
SELECT #json
Result:
[{"id": 1, "data": "One"}, {"id": 2, "data": "Two"},{"id": 3, "data": "Three"},{"id": 4, "data": "Four"}]
You can use "JSON_MODIFY" function and append key to modify JSON object like below:
SQL-FIDDLE
It's for individual JSON array:
DECLARE #json1 NVARCHAR(500)='[{"id": 1, "data": "One"}, {"id": 2, "data": "Two"}]';
DECLARE #json2 NVARCHAR(500)='[{"id": 3, "data": "Three"}, {"id": 4, "data": "Four"}]';
SELECT t.id, t.[data]
FROM
(
SELECT * FROM OPENJSON(#json1) WITH(id int,[data] NVARCHAR(MAX))
UNION ALL
SELECT * FROM OPENJSON(#json2) WITH(id int,[data] NVARCHAR(MAX))
) t
FOR JSON PATH;
It's for individual JSON hash:
DECLARE #info NVARCHAR(500)='[{"id": 1, "data": "One"}, {"id": 2, "data": "Two"}]';
PRINT #info;
SET #info = JSON_MODIFY(#info, 'append $', JSON_QUERY('{"id": 3, "data": "Three"}'))
SET #info = JSON_MODIFY(#info, 'append $', JSON_QUERY('{"id": 4, "data": "Four"}'))
PRINT #info;
Workaround I found for my project
I have two tables t1 and t2 which are identical in structure. Table t1 keeps records of supplier certificates. Table t2 gets with API new certificates obtained by supplier. So Table t1 shall be updated with new certificates from Table t2. Certificates data are placed in JSON array of objects, similar to the example of the topic starter.
Task
JSON array in t1 col JSON_t1 shall be appended with JSON array from t2 col JSON_t2. Here's the structure simplified for the example purposes:
Table "t1"
recordId
JSON_t1
1
[{"id": 1, "data": "One"}, {"id": 2, "data": "Two"}]
Table "t2"
recordId
JSON_t2
1
[{"id": 3, "data": "Three"}, {"id": 4, "data": "Four"}]
RESULT
appended t1.JSON_t1
[{"id": 1, "data": "One"}, {"id": 2, "data": "Two"},{"id": 3, "data": "Three"}, {"id": 4, "data": "Four"}]
SQL method
SELECT
t1.JSON_t1,
t2.JSON_t2,
concat('[', replace(replace(json_modify(t1.JSON_t1, 'append $', json_query(t2.JSON_t2)), '[', ''), ']', ''), ']') as "appended t1.JSON_t1"
FROM t1
INNER JOIN t2 ON t1.recordId = t2.recordId
Method explained
JSON_t2 is converted to JSON format with json_query(t2.JSON_t2) to avoid escaping of characters
JSON_t1 is appended with JSON_t2 with json_modify(t1.JSON_t1, 'append $', json_query(t2.JSON_t2)) resulting to the following output: [{"id": 1, "data": "One"}, {"id": 2, "data": "Two"},[{"id": 3, "data": "Three"}, {"id": 4, "data": "Four"}]]. Note square brackets in bold which shall be fixed as this will not be a correct final array of objects.
Final JSON is stripped from all square brackets with replace method used two times: for "[" and "]" replace(replace(json_modify(t1.JSON_t1, 'append $', json_query(t2.JSON_t2)), '[', ''), ']', '')
Final JSON is added with two square brackets at start and end to make a valid JSON array concat('[', replace(replace(json_modify(t1.JSON_t1, 'append $', json_query(t2.JSON_t2)), '[', ''), ']', ''), ']')
You can test if the final JSON is valid with ISJSON()
Points to note
If you don't use json_query, you get the following result:
[{"id": 1, "data": "One"}, {"id": 2, "data": "Two"},"[{\"id\": 3, \"data\": \"Three\"}, {\"id\": 4, \"data\": \"Four\"}]"]. See more on this here.
I tried to strip from square brackets only JSON_t2 and use json_modify like this json_modify(t1.JSON_t1, 'append $', json_query('{"id": 3, "data": "Three"}, {"id": 4, "data": "Four"}') but this results to appending only the first item from the JSON_t2 like this: [{"id": 1, "data": "One"}, {"id": 2, "data": "Two"},{"id": 3, "data": "Three"}]
NB. if you have nested arrays, this method is not suitable. In my case it works well since I have a simple array of objects (certificates with various key/value pairs like validity, type, issue date, etc.)

Postgres Build Complex JSON Object from Wide Column Like Design to Key Value

I could really use some help here before my mind explodes...
Given the following data structure:
SELECT * FROM (VALUES (1, 1, 1, 1), (2, 2, 2, 2)) AS t(day, apple, banana, orange);
day | apple | banana | orange
-----+-------+--------+--------
1 | 1 | 1 | 1
2 | 2 | 2 | 2
I want to construct a JSON object which looks like the following:
{
"data": [
{
"day": 1,
"fruits": [
{
"key": "apple",
"value": 1
},
{
"key": "banana",
"value": 1
},
{
"key": "orange",
"value": 1
}
]
}
]
}
Maybe I am not so far away from my goal:
SELECT json_build_object(
'data', json_agg(
json_build_object(
'day', t.day,
'fruits', t)
)
) FROM (VALUES (1, 1, 1, 1), (2, 2, 2, 2)) AS t(day, apple, banana, orange);
Results in:
{
"data": [
{
"day": 1,
"fruits": {
"day": 1,
"apple": 1,
"banana": 1,
"orange": 1
}
}
]
}
I know that there is json_each which may do the trick. But I am struggling to apply it to the query.
Edit:
This is my updated query which, I guess, is pretty close. I have dropped the thought to solve it with json_each. Now I only have to return an array of fruits instead appending to the fruits object:
SELECT json_build_object(
'data', json_agg(
json_build_object(
'day', t.day,
'fruits', json_build_object(
'key', 'apple',
'value', t.apple,
'key', 'banana',
'value', t.banana,
'key', 'orange',
'value', t.orange
)
)
)
) FROM (VALUES (1, 1, 1, 1), (2, 2, 2, 2)) AS t(day, apple, banana, orange);
Would I need to add a subquery to prevent a nested aggregate function?
Use the function jsonb_each() to get pairs (key, value), so you do not have to know the number of columns and their names to get a proper output:
select jsonb_build_object('data', jsonb_agg(to_jsonb(s) order by day))
from (
select day, jsonb_agg(jsonb_build_object('key', key, 'value', value)) as fruits
from (
values (1, 1, 1, 1), (2, 2, 2, 2)
) as t(day, apple, banana, orange),
jsonb_each(to_jsonb(t)- 'day')
group by 1
) s;
The above query gives this object:
{
"data": [
{
"day": 1,
"fruits": [
{
"key": "apple",
"value": 1
},
{
"key": "banana",
"value": 1
},
{
"key": "orange",
"value": 1
}
]
},
{
"day": 2,
"fruits": [
{
"key": "apple",
"value": 2
},
{
"key": "banana",
"value": 2
},
{
"key": "orange",
"value": 2
}
]
}
]
}