Db2 nested JSON - sql

I am trying to use Db2 JSON capabilities and in particular nested tables.
CREATE TABLE JSON.TEST1 (COL1 VARBINARY(2000));
INSERT INTO JSON.TEST1 (COL1) VALUES (JSON_TO_BSON(
'{"id" : 103,
"orderDate": "2014-06-20",
"items": {
"item": [ { "partNum": "872-AA",
"productName": "Lawnmower",
"quantity": 1,
"USPrice": 749.99
},
{ "partNum": "837-CM",
"productName": "Digital Camera",
"quantity": 2,
"USPrice": 199.99
}
]
}
}'
));
This works fine, however obviously items in the array are hardcoded references.
SELECT id
,orderDate
,product1
,product2
FROM json.TEST1 AS js,
JSON_TABLE
(js.COL1, 'strict $'
COLUMNS( id INTEGER PATH '$.id'
,orderDate DATE PATH '$.orderDate'
,product1 VARCHAR(32) PATH '$.items.item[0].productName'
,product2 VARCHAR(32) PATH '$.items.item[1].productName'
)
ERROR ON ERROR) AS t
;
The following is what I am trying to get working:
SELECT id
,orderDate
,productName
FROM json.TEST1 AS js,
JSON_TABLE
(js.COL1, '$'
COLUMNS( id INTEGER PATH '$.id'
,orderDate DATE PATH '$.orderDate'
,NESTED 'lax $.items.item[]'
COLUMNS (
"productName" VARCHAR(32)
)
)
) as t;
For reference the error I am receiving
1) [Code: -104, SQL State: 42601] An unexpected token "'lax $.items.item[]'
COLUMNS (
" was found following ",NESTED". Expected tokens may include: "<space>".. SQLCODE=-104, SQLSTATE=42601, DRIVER=4.26.14
2) [Code: -727, SQL State: 56098] An error occurred during implicit system action type "2". Information returned for the error includes SQLCODE "-104", SQLSTATE "42601" and message tokens "'lax $.items.item[]'
COLUMNS (
|,N".. SQLCODE=-727, SQLSTATE=56098, DRIVER=4.26.14

Unfortunately, you must unnest JSON arrays on your own, for example, with Recursive Common Table Expression (RCTE):
-- A table with JSON documents
WITH TAB (DOC_ID, DOC) AS
(
VALUES
(
1,
'{"id" : 103,
"orderDate": "2014-06-20",
"items": {
"item": [ { "partNum": "872-AA",
"productName": "Lawnmower",
"quantity": 1,
"USPrice": 749.99
},
{ "partNum": "837-CM",
"productName": "Digital Camera",
"quantity": 2,
"USPrice": 199.99
}
]
}
}'
)
)
-- get a JSON array only for each record
, ITEMS_ARRAY (DOC_ID, ITEMS) AS
(
SELECT DOC_ID, JSON_OBJECT(KEY 'items' VALUE JSON_QUERY(DOC, '$.items.item') FORMAT JSON)
FROM TAB
)
-- Use RCTE to unnest it
, ITEMS (DOC_ID, INDEX, ITEM) AS
(
SELECT DOC_ID, 0, JSON_QUERY(ITEMS, '$.items[0]')
FROM ITEMS_ARRAY
WHERE JSON_EXISTS(ITEMS, '$.items[0]')
UNION ALL
SELECT I.DOC_ID, I.INDEX+1, JSON_QUERY(A.ITEMS, '$.items['|| TRIM(I.INDEX+1) ||']')
FROM ITEMS I, ITEMS_ARRAY A
WHERE I.DOC_ID = A.DOC_ID AND JSON_EXISTS(A.ITEMS, '$.items['|| TRIM(I.INDEX+1) ||']')
)
SELECT D.*, IT.*
--, I.*
FROM TAB T
JOIN ITEMS I ON I.DOC_ID = T.DOC_ID
-- array element to row
CROSS JOIN JSON_TABLE
(
I.ITEM, 'strict $' COLUMNS
(
PARTNUM VARCHAR(20) PATH '$.partNum'
, PRODCUCTNAME VARCHAR(20) PATH '$.productName'
, QUANTITY INT PATH '$.quantity'
, USPRICE DECFLOAT PATH '$.USPrice'
) ERROR ON ERROR
) IT
-- other elements of original JSON to row
CROSS JOIN JSON_TABLE
(
T.DOC, 'strict $' COLUMNS
(
ID INT PATH '$.id'
, ORDERDATE DATE PATH '$.orderDate'
) ERROR ON ERROR
) D
;
There result is:
|ID |ORDERDATE |PARTNUM|PRODCUCTNAME |QUANTITY|USPRICE|
|---|----------|-------|--------------|--------|-------|
|103|2014-06-20|872-AA |Lawnmower |1 |749.99 |
|103|2014-06-20|837-CM |Digital Camera|2 |199.99 |
db<>fiddle example.
Update
It's convenient to create a generic function suitable for any JSON array:
-- WITH A GENERIC TABLE FUNCTION
CREATE OR REPLACE FUNCTION UNNEST_JSON (P_DOC CLOB(1M), P_PATH VARCHAR(128))
RETURNS TABLE
(
INDEX INT
, ITEM CLOB(1M)
)
RETURN
WITH ITEMS_ARRAY (ITEMS) AS
(
VALUES JSON_OBJECT(KEY 'items' VALUE JSON_QUERY(P_DOC, P_PATH) FORMAT JSON)
)
, ITEMS (INDEX, ITEM) AS
(
SELECT 0, JSON_QUERY(ITEMS, '$.items[0]')
FROM ITEMS_ARRAY
WHERE JSON_EXISTS(ITEMS, '$.items[0]')
UNION ALL
SELECT I.INDEX+1, JSON_QUERY(A.ITEMS, '$.items['|| TRIM(I.INDEX+1) ||']')
FROM ITEMS I, ITEMS_ARRAY A
WHERE JSON_EXISTS(A.ITEMS, '$.items['|| TRIM(I.INDEX+1) ||']')
)
SELECT INDEX, ITEM
FROM ITEMS
#
Such a generic function simplifies the solution:
WITH TAB (DOC_ID, DOC) AS
(
VALUES
(
1,
'{"id" : 103,
"orderDate": "2014-06-20",
"items": {
"item": [ { "partNum": "872-AA",
"productName": "Lawnmower",
"quantity": 1,
"USPrice": 749.99
},
{ "partNum": "837-CM",
"productName": "Digital Camera",
"quantity": 2,
"USPrice": 199.99
}
]
}
}'
)
,
(
2,
'{"id" : 203,
"orderDate": "2014-06-20",
"items": {
"item": [ { "partNum": "002-AA",
"productName": "Lawnmower",
"quantity": 10,
"USPrice": 749.99
},
{ "partNum": "002-BB",
"productName": "Digital Camera",
"quantity": 20,
"USPrice": 199.99
}
]
}
}'
)
)
SELECT T.DOC_ID, A.INDEX, D.*, IT.*
FROM
TAB T
-- unnesting
, TABLE(UNNEST_JSON(T.DOC, '$.items.item')) A
-- array element to row
, JSON_TABLE
(
A.ITEM, 'strict $' COLUMNS
(
PARTNUM VARCHAR(20) PATH '$.partNum'
, PRODCUCTNAME VARCHAR(20) PATH '$.productName'
, QUANTITY INT PATH '$.quantity'
, USPRICE DECFLOAT PATH '$.USPrice'
) ERROR ON ERROR
) IT
-- other elements of original JSON to row
, JSON_TABLE
(
T.DOC, 'strict $' COLUMNS
(
ID INT PATH '$.id'
, ORDERDATE DATE PATH '$.orderDate'
) ERROR ON ERROR
) D;
The result is:
|DOC_ID|INDEX|ID |ORDERDATE |PARTNUM|PRODCUCTNAME |QUANTITY|USPRICE|
|------|-----|---|----------|-------|--------------|--------|-------|
|1 |0 |103|2014-06-20|872-AA |Lawnmower |1 |749.990|
|1 |1 |103|2014-06-20|837-CM |Digital Camera|2 |199.990|
|2 |0 |203|2014-06-20|002-AA |Lawnmower |10 |749.990|
|2 |1 |203|2014-06-20|002-BB |Digital Camera|20 |199.990|

A couple of UDFs with the same functionality which should work faster, since they don't use RCTE.
The example of use is in my another older answer here.
-- Uses XML, should work in all environments
CREATE OR REPLACE FUNCTION UNNEST_JSON2 (P_DOC CLOB(1M), P_PATH VARCHAR(128))
RETURNS TABLE
(
INDEX INT
, ITEM CLOB (1M)
)
DETERMINISTIC
NO EXTERNAL ACTION
BEGIN ATOMIC
DECLARE L_IDX INT DEFAULT 0;
DECLARE L_XML XML;
L1:
WHILE TRUE DO
IF NOT JSON_EXISTS (P_DOC, P_PATH || '[' || L_IDX || ']') THEN LEAVE L1; END IF;
SET (L_XML, L_IDX) =
(
XMLCONCAT (L_XML, XMLELEMENT (NAME "A", JSON_QUERY (P_DOC, P_PATH || '[' || L_IDX || ']')))
, L_IDX + 1
);
END WHILE L1;
RETURN
SELECT SEQ - 1, T.ITEM
FROM XMLTABLE
(
'$D' PASSING L_XML AS "D"
COLUMNS
SEQ FOR ORDINALITY
, ITEM CLOB (1M) PATH '.'
) T
WHERE L_XML IS NOT NULL;
END
#
-- Doesn't work in DPF environment, but should be the fastest one
CREATE OR REPLACE FUNCTION UNNEST_JSON3 (P_DOC CLOB(1M), P_PATH VARCHAR(128))
RETURNS TABLE
(
INDEX INT
, ITEM CLOB (1M)
)
DETERMINISTIC
NO EXTERNAL ACTION
BEGIN
DECLARE L_IDX INT DEFAULT 0;
L1:
WHILE TRUE DO
IF NOT JSON_EXISTS (P_DOC, P_PATH || '[' || L_IDX || ']') THEN LEAVE L1; END IF;
PIPE (L_IDX, JSON_QUERY (P_DOC, P_PATH || '[' || L_IDX || ']'));
SET L_IDX = L_IDX + 1;
END WHILE L1;
RETURN;
END
#

Related

PostgreSQL regexp_replace square brackets to other format

I have this column text in a table which contains following string
{
"person": {
"id": "b01d9bf1-998f-4fa8-879a-0f8d0de4b626",
"creationDate": [
2022,
1,
22
],
"modificationDate": [
2022,
1,
27
]
}
}
I have the following regexp_matches query:
select regexp_matches('"creationDate": [2022,1,22], "modificationDate": [2022,1,27],', '\[(.[^)]+)\]', 'g')
but I need to replace
"creationDate": [2022,1,22], "modificationDate": [2022,1,27],
to
"creationDate": "2022-01-22", "modificationDate": "2022-01-27",
I'm not very good working with regular expressions. Also the difficulty is in adding a leading zero to the month as you can see.
Regex-based
A nested regex replacement does the trick:
select regexp_replace(
regexp_replace(
'"creationDate": [2022,1,22], "modificationDate": [2022,1,27],'
, '\[(\d+),(\d+),(\d+)\]'
, '"\1-\2-\3"'
, 'g'
)
, '-(\d)-'
, '-0\1-'
, 'g');
The outer replacement only fires if the month is represented by a single digit.
JSON-based
Dwelling on the comment by #a_horse_with_no_name, the following query operates uses json operators:
select x.key
, (x.value ->> 0) || '-' || LPAD(x.value ->> 1, 2, '0') || '-' || LPAD(x.value ->> 2, 2, '0') mydate
from json_each ( '{"creationDate": [2022,1,22], "modificationDate": [2022,1,27] }'::json ) x
;
The query builds a set of records from a JSON object consisting of a key (the JSON property name) and a value of the native JSON datatype, which happens to be an array. The array elements are extracted, padded with leading zeros where appropriate and concatenated.
See the Postgresql docs for JSON operators and functions for more info.
Full-fledged example
Query to produce a recordset of persons containing their id plus the creation and modification date based on a json array of objects as given in the question.
select id
, ("creationDate" ->> 0) || '-' || LPAD("creationDate" ->> 1, 2, '0') || '-' || LPAD("creationDate" ->> 2, 2, '0') creation_date
, ("modificationDate" ->> 0) || '-' || LPAD("modificationDate" ->> 1, 2, '0') || '-' || LPAD("modificationDate" ->> 2, 2, '0') modification_date
from jsonb_to_recordset (
(
select jsonb_path_query_array ( orig.j, '$.person' ) part
from (
select '[
{ "person": { "id": "b01d9bf1-998f-4fa8-879a-0f8d0de4b626", "creationDate": [2022,1,22], "modificationDate": [2022,1,27] } }
, { "person": { "id": "deadcafe-998f-4fa8-879a-0f8d0de4b626", "creationDate": [2000,1,1], "modificationDate": [2000,12,31] } }
]'::jsonb j
) orig
)
) as x( id varchar(50), "creationDate" json, "modificationDate" json )
;
Available live here (dbfiddle.co.uk).

Extract a JSON array items to a single column separated with comma

Struggling to find an answer to the below JSON problem. I would like to display the entire SKU/Quantity list in the "shipmentItems" array to their respective column with a comma-separated value. My example below only allows me to display the first SKU/quantity from the array but, my goal is to get all listed in the columns with comma-separated.
JSON example:
{"shipments": [
{
"shipmentId": 100003768,
"orderNumber": "9648219086",
"shipDate": "2021-10-28",
"serviceCode": "ups_ground",
"shipmentItems": [
{
"orderItemId": 1464643208,
"lineItemKey": "10322938560608",
"sku": "SCPXTSS-BAG-06",
"name": "SCOOP-PLATE-06 (1000ml)",
"weight": {
"value": 0,
"units": "ounces",
"WeightUnits": 1
},
"quantity": 1,
"unitPrice": 0,
"taxAmount": null
},
{
"orderItemId": 1464643207,
"lineItemKey": "10322938527840",
"sku": "SCPZRTS-TRAY-01",
"name": "Beef: Tray 3 (Fill 004)<br>",
"weight": {
"value": 60,
"units": "ounces",
"WeightUnits": 1
},
"quantity": 1,
"unitPrice": 102.72,
"taxAmount": null
}
],
"labelData": null,
"formData": null
}
]
}
SQL query I'm using:
DECLARE #JSON varchar(max)
SELECT #JSON = BulkColumn
FROM OPENROWSET (BULK 'C:\Users\XPS-LT\json\today\shipments_20211031.json', SINGLE_CLOB)
IMPORT
SELECT *
FROM OPENJSON (#JSON, '$.shipments')
WITH
(
[shipmentId] bigint,
[orderNumber] nvarchar(60),
[shipDate] date,
[serviceCode] nvarchar(30),
[sku] nvarchar(MAX) N'$.shipmentItems[0].sku',
[quantity] int N'$.shipmentItems[0].quantity'
)
;
The "shipmentItems" part of the input JSON is an array, so you need an AS JSON clause in the first explicit schema and an additional OPENJSON() call:
DECLARE #json nvarchar(max)
...
SELECT
j.[shipmentId], j.[orderNumber], j.[shipDate], j.[serviceCode],
a.[sku], a.[quantity]
FROM OPENJSON (#json, '$.shipments') WITH (
[shipmentId] bigint,
[orderNumber] nvarchar(60),
[shipDate] date,
[serviceCode] nvarchar(30),
[shipmentItems] nvarchar(max) AS JSON
) j
OUTER APPLY (
SELECT
STRING_AGG([sku], ',') WITHIN GROUP (ORDER BY [orderItemId]),
STRING_AGG([quantity], ',') WITHIN GROUP (ORDER BY [orderItemId])
FROM OPENJSON (j.shipmentItems) WITH (
[orderItemId] int '$.orderItemId',
[sku] nvarchar(max) '$.sku',
[quantity] int N'$.quantity'
)
) a ([sku], [quantity])
Result:
shipmentId orderNumber shipDate serviceCode sku quantity
100003768 9648219086 2021-10-28 ups_ground SCPZRTS-TRAY-01,SCPXTSS-BAG-06 1,1

Create json key value from table column name and data

Is it possible to create JSON key value from a table SELECT statement, where column name as key and the column value as value
declare #T table(Id int, ItemName varchar(10), CategoryId int, ItemDate date)
insert into #T
values(1,'ABC',100, '1/1/2020')
to return something as below
{
"id": 1,
"table": "tableName",
"data": [{
"key": "ItemName",
"value": "ABC"
},
{
"key": "CategoryId",
"value": "100"
},
{
"key": "ItemDate",
"value": "1/1/2020"
}
]
}
I have looked at selecting as JSON but stuck here
select *
from #T
for json auto
You may try to use VALUES table value constructor and FOR JSON AUTO. As is mentioned in the documentation, when ... you specify the AUTO option, the format of the JSON output is automatically determined based on the order of columns in the SELECT list and their source tables.
Table:
CREATE TABLE Tbl (
Id int,
ItemName varchar(10),
CategoryId int,
ItemDate date
)
INSERT INTO Tbl
VALUES
(1, 'ABC', 100, '1/1/2020'),
(2, 'DEF', 200, '2/2/2020')
Statement:
SELECT t.Id, data.[key], data.[value]
FROM Tbl t
CROSS APPLY (VALUES
('ItemName', CONVERT(varchar(max), ItemName)),
('CategoryId', CONVERT(varchar(max), CategoryId)),
('ItemDate', CONVERT(varchar(max), ItemDate))
) Data ([key], [value])
FOR JSON AUTO
Result:
[
{
"Id":1,
"Data":[
{"key":"ItemName", "value":"ABC"},
{"key":"CategoryId","value":"100"},
{"key":"ItemDate","value":"2020-01-01"}
]
},
{
"Id":2,
"Data":[
{"key":"ItemName", "value":"DEF"},
{"key":"CategoryId", "value":"200"},
{"key":"ItemDate", "value":"2020-02-02"}
]
}
]
As an additional option you may try to build the inner JSON for each row:
SELECT
Id,
(
SELECT [key], [value]
FROM (VALUES
('ItemName', CONVERT(varchar(max), ItemName)),
('CategoryId', CONVERT(varchar(max), CategoryId)),
('ItemDate', CONVERT(varchar(max), ItemDate))
) v ([key], [value])
FOR JSON PATH
) AS Data
FROM Tbl
FOR JSON AUTO

SQL Query to get the json array values by comma separated

I have the below Json object. I need to get the task names by comma (,) separated.
{
"Model": [
{
"ModelName": "Test Model",
"Object": [
{
"ID": 1,
"Name": "ABC",
"Task" : [
{
TaskID : 1222,
Name: "TaskA"
},
{
TaskID : 154,
Name: "TaskB"
}
]
},
{
"ID": 11,
"Name": "ABCD",
"Task" : [
{
TaskID : 222,
Name: "TaskX"
},
{
TaskID : 234,
Name: "TaskY"
}
]
},
]
}]}
The expected Output should be in the below table. I need the task names should be comma separated.
ModelName ObjectID ObjectName TaskName
Test Model 1 ABC TaskA, TaskB
Test Model 11 ABCD TaskX, TaskY
I tried the below query. But I don't know how to group Task names.
SELECT S1.ModelName,
S2.ID AS ObjectID,
S2.Name AS ObjectName,
S3.TaskName
FROM TableA
CROSS APPLY OPENJSON(JsonData)
WITH (Model NVARCHAR(MAX) '$.Model[0]' AS JSON) S1
CROSS APPLY OPENJSON (S1.Model)
WITH (Object NVARCHAR(MAX) '$.Object' AS JSON,
ID INT '$.ID',
Name NVARCHAR(250) '$.Name') S2
CROSS APPLY OPENJSON (S2.Object)
WITH (Task NVARCHAR(MAX) '$.Task' AS JSON ,
TaskName NVARCHAR(MAX) '$.TaskName') S3
Corrected the json to a valid format and here is the SQL query for your desired output. The below query compiles and runs well in Oracle Compiler
SELECT * FROM JSON_TABLE(
'{"Model":[{"ModelName":"Test Model","Object":[{"ID":1,"Name":"ABC","Task":[{"TaskID":1222,"Name":"TaskA"},{"TaskID":154,"Name":"TaskB"}]},{"ID":11,"Name":"ABCD","Task":[{"TaskID":222,"Name":"TaskX"},{"TaskID":234,"Name":"TaskY"}]}]}]}',
'$.Model[*]'
COLUMNS (
-- L_MODEL_ROWNUM FOR ORDINALITY,
L_MODEL_NAME VARCHAR2(10) PATH '$.ModelName',
NESTED PATH '$.Object[*]' COLUMNS (
--L_OBJECT_ROWNUM FOR ORDINALITY,
L_OBJECT_ID NUMBER PATH '$.ID',
L_OBJECT_NAME VARCHAR2(10) PATH '$.Name',
L_TASK_NAME VARCHAR2(100) FORMAT JSON WITH WRAPPER PATH '$.Task[*].Name'
)
)
)
Here is the query which runs on SQL Server 2019,
DECLARE #JSONDATA NVARCHAR(MAX);
SET
#JSONDATA = N'{"Model":[{"ModelName":"Test Model","Object":[{"ID":1,"Name":"ABC","Task":[{"TaskID":1222,"Name":"TaskA"},{"TaskID":154,"Name":"TaskB"}]},{"ID":11,"Name":"ABCD","Task":[{"TaskID":222,"Name":"TaskX"},{"TaskID":234,"Name":"TaskY"}]}]}]}';
WITH TASK AS
(
SELECT
MODEL,
ID,
NAME,
TASK_NAME
FROM
OPENJSON(#JSONDATA) WITH (MODELS NVARCHAR(MAX) '$.Model' AS JSON) CROSS APPLY OPENJSON(MODELS) WITH (MODEL NVARCHAR(80) '$.ModelName', OBJECTS NVARCHAR(MAX) '$.Object' AS JSON) CROSS APPLY OPENJSON(OBJECTS ) WITH (ID INT '$.ID', NAME NVARCHAR(250) '$.Name', TASKS NVARCHAR(MAX) '$.Task' AS JSON ) CROSS APPLY OPENJSON (TASKS) WITH (TASK_NAME NVARCHAR(80) '$.Name')
)
SELECT DISTINCT
MODEL AS MODELNAME,
ID AS OBJECTID,
NAME AS OBJECTNAME,
STUFF((
SELECT
',' + [TASK_NAME]
FROM
TASK T1
WHERE
T1.[ID] = T2.[ID] FOR XML PATH('')), 1, 1, '') AS TASKNAME
FROM
TASK T2;

Postgres LTREE display by grouping parents and children

I have a sample Ltree Structure than I want to return it as JSON structure.
I've tried searching on stack overflow but the results are giving wrong responses.
create table node
(
id integer not null,
name varchar(255),
path ltree not null
);
and I have these data
INSERT INTO node (id,name,path) VALUES (1,'Residential','1');
INSERT INTO node (id,name,path) VALUES (2,'Commercial','2');
INSERT INTO node (id,name,path) VALUES (3,'Industrial','3');
INSERT INTO node (id,name,path) VALUES (4,'Res type 1','1.4');
INSERT INTO node (id,name,path) VALUES (5,'Comm type 1','2.5');
INSERT INTO node (id,name,path) VALUES (6,'Industrial 1','3.6');
INSERT INTO node (id,name,path) VALUES (7,'Residential 2','1.4.7');
INSERT INTO node (id,name,path) VALUES (8,'Commercial 2','2.5.8');
INSERT INTO node (id,name,path) VALUES (9,'Industrial 2','3.6.9');
and this is what I want to collect with my query
[
{
"name": "Residentioal",
"children": [
{
"name": "Res type 1",
"children": [
{
"name": "Residential 2",
"children": []
}
]
}
]
},
{
"name": "Commercial",
"children": [
{
"name": "Comm type 1",
"children": [
{
"name": "Commercial 2",
"children": []
}
]
}
]
},
{
"name": "Industrial",
"children": [
{
"name": "Industrial 1",
"children": [
{
"name": "Industrial 2",
"children": []
}
]
}
]
}
]
I tried recursive with .. but it keeps looping through without returning proper value.
You need two parts, the recursion one and additionally a function. I explained this already here, here and here, so please have a look there for further explanations.
demo:db<>fiddle
Recursion
WITH RECURSIVE cte AS (
SELECT
id,
name,
path,
json_build_object('name', name, 'children', ARRAY[]::text[]) AS jsonobject,
ARRAY[]::text[] || (row_number() OVER () - 1)::text as jsonpath,
0 as depth
FROM node
WHERE path = subpath(path, 0, 1) --parents
UNION ALL
SELECT
n.id,
n.name,
n.path,
json_build_object('name', n.name, 'children', ARRAY[]::text[]),
jsonpath || '{children}' || (row_number() OVER (PARTITION BY subpath(n.path, depth, 1)::text ORDER BY subpath(n.path, depth + 1, 1)::text::int) - 1)::text,
c.depth + 1
FROM
node n
JOIN cte c
ON c.id = subpath(n.path, depth, 1)::text::int
AND nlevel(n.path) = depth + 2 AND subpath(n.path, depth + 1, 1)::text::int = n.id
)
SELECT * FROM cte
The function
CREATE OR REPLACE FUNCTION nested_json() RETURNS jsonb AS $$
DECLARE
_json_output jsonb;
_temprow record;
BEGIN
_json_output := '[]'::jsonb;
FOR _temprow IN
-- <Add the CTE from above here>
LOOP
SELECT
jsonb_insert(
_json_output,
_temprow.jsonpath,
_temprow.jsonobject
)
INTO _json_output;
END LOOP;
RETURN _json_output;
END;
$$ LANGUAGE plpgsql;
Please notice: The ltree structure is not a really good choice for this use case because you need to calculate the subpaths again and again. A simple reference to the parent would be more helpful and faster.
Edit: The db<>fiddle admin is great and installed the ltree extension, so there is a new fiddle