Update all element of an array in json postgresql - sql

In my table I have a jsonb type column.
Example jsonb data:
{
"id": "1",
"customer":[{"id": "1", "isPosted": "false"},{"id": "2","isPosted": "false"}]
}
Is it possible to update all element named isPosted to 'true'?

you can use a monkey hack here - replace the value and use result as jsonb:
t=# with c(j) as (values('{"id": "1", "customer":[{"id": "1", "isPosted": "false"},{"id": "2","isPosted": "false"}]} '::jsonb))
select *,replace(j::text,'"isPosted": "false"','"isPosted": "true"')::jsonb from c;
-[ RECORD 1 ]------------------------------------------------------------------------------------------
j | {"id": "1", "customer": [{"id": "1", "isPosted": "false"}, {"id": "2", "isPosted": "false"}]}
replace | {"id": "1", "customer": [{"id": "1", "isPosted": "true"}, {"id": "2", "isPosted": "true"}]}
finally you can do it the right way:
t=# with c(j) as (values('{"id": "1", "customer":[{"id": "1", "isPosted": "false"},{"id": "2","isPosted": "false"}]} '::jsonb))
, n as (select jsonb_set(e,'{isPosted}'::text[],'true'),j from c, jsonb_array_elements(j->'customer') with ordinality a (e,o))
select jsonb_set(j,'{customer}'::text[],jsonb_agg(jsonb_set)) from n group by j;
jsonb_set
-----------------------------------------------------------------------------------------
{"id": "1", "customer": [{"id": "1", "isPosted": true}, {"id": "2", "isPosted": true}]}
(1 row)

Related

How to update each json object in json array?

I have simple table table_b
id (integer)
data (json)
text (text)
1
{}
yes
2
{}
no
Json look like
{"types": [{"key": "first_event", "value": false}, {"key": "second_event", "value": false}, {"key": "third_event", "value": false}...]}
I just want to modify data and add to each json object in array ["test1", "test2"] to look like this :
{"types": [{"key": "first_event", "value": false, "can":["test1", "test2"] }, {"key": "second_event", "value": false , "can":["test1", "test2"]}, {"key": "third_event", "value": false , "can":["test1", "test2"]}...]}
ofcourse only where text like "yes"
I have tryed :
UPDATE table_b
SET data = jsonb_set(data , '{types,can}', '["test1", "test2"]'::jsonb, true)
where text like 'yes';
But it does not work. How can i loop over table_b and over data column?
The array value of a JSON object can be modified. It can be simply done by modifying the value present at a given index.
Using this sample data
create table table_b as
select * from (values
(1,'{"types": [{"key": "first_event", "value": false}, {"key": "second_event", "value": false}, {"key": "third_event", "value": false}]}'::jsonb,'yes'),
(2,'{}'::jsonb,'no'),
(3,'{"types": [{"key": "first_event", "value": false}]}'::jsonb,'yes')
) table_b(id,data,txt)
This query updates each array element, keeping the order in the ORDINALITY column
with table_b2 as (
select
id,item, index,
jsonb_set(item,('{"can"}')::TEXT[],'["test1", "test2"]') new_item
from table_b cross join
jsonb_array_elements(data -> 'types') WITH ORDINALITY arr(item, index)
where txt = 'yes')
select * from table_b2
|
id|item |index|new_item |
--+---------------------------------------+-----+------------------------------------------------------------------+
1|{"key": "first_event", "value": false} | 1|{"can": ["test1", "test2"], "key": "first_event", "value": false} |
1|{"key": "second_event", "value": false}| 2|{"can": ["test1", "test2"], "key": "second_event", "value": false}|
1|{"key": "third_event", "value": false} | 3|{"can": ["test1", "test2"], "key": "third_event", "value": false} |
3|{"key": "first_event", "value": false} | 1|{"can": ["test1", "test2"], "key": "first_event", "value": false} |
The next query concatenates the updated elements back to the array (keeping the right order) and performs a simple update
with table_b2 as (
select
id,item, index,
('{types,' || index - 1 || ',"can"}')::TEXT[] AS path,
jsonb_set(item,('{"can"}')::TEXT[],'["test1", "test2"]') new_item
from table_b cross join
jsonb_array_elements(data -> 'types') WITH ORDINALITY arr(item, index)
where txt = 'yes'),
table_b3 as (
select
id,
jsonb_agg(new_item order by index) new_data
from table_b2
group by id)
update table_b t
set data = table_b3.new_data
from table_b3
where t.id = table_b3.id
select * from table_b order by id;
id|data |txt|
--+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+---+
1|[{"can": ["test1", "test2"], "key": "first_event", "value": false}, {"can": ["test1", "test2"], "key": "second_event", "value": false}, {"can": ["test1", "test2"], "key": "third_event", "value": false}]|yes|
2|{} |no |
3|[{"can": ["test1", "test2"], "key": "first_event", "value": false}] |yes|

extract nested values as a list

I have a table with a column "TAGS". Each row in this column has a bunch of dictionaries separated by commas. It looks like this:
{
"id": "334",
"name": "A",
"synonyms": "tul",
"path": [
"179",
"1689",
]
},
{
"id": "8999",
"name": "B",
"synonyms": "hh",
"path": [
"1098",
"167",
]
}
I want to create a new column "NAMES" that contains a list of all names. For example this:
NAMES
["A", "B"]
Select * from TAGS_TABLE
How can I do this?
Well your data is "almost" JSON, so if we convert it to json, we can then parse it, and flatten it:
with data as (
select parse_json('['||column1||']') as json from values
('{
"id": "334",
"name": "A",
"synonyms": "tul",
"path": [
"179",
"1689",
]
},
{
"id": "8999",
"name": "B",
"synonyms": "hh",
"path": [
"1098",
"167",
]
}'),
('{
"id": "334",
"name": "C",
"synonyms": "tul",
"path": [
"179",
"1689",
]
},
{
"id": "8999",
"name": "D",
"synonyms": "hh",
"path": [
"1098",
"167",
]
}')
)
select array_agg(f.value:name) within group (order by f.index) as output
from data d,
lateral flatten(input=>d.json) f
group by f.seq
order by f.seq
gives:
OUTPUT
[ "A", "B" ]
[ "C", "D" ]
REGEXP_SUBSTR_ALL
As already given to you in your other question...
select regexp_substr_all(column1, '"name"\\s*:\\s*"([^"]*)"',1,1,'e') as answer
from data;

Query data from a text file and get a JSON column

Using SQL Server 2019 Express Edition.
I have a text file like this:
/type/author /authors/OL1002354A 2 2008-08-20T18:07:53.62084 {"name": "Don L. Brigham", "personal_name": "Don L. Brigham", "last_modified": {"type": "/type/datetime", "value": "2008-08-20T18:07:53.62084"}, "key": "/authors/OL1002354A", "type": {"key": "/type/author"}, "revision": 2}
/type/author /authors/OL100246A 1 2008-04-01T03:28:50.625462 {"name": "Talib Samat.", "personal_name": "Talib Samat.", "last_modified": {"type": "/type/datetime", "value": "2008-04-01T03:28:50.625462"}, "key": "/authors/OL100246A", "type": {"key": "/type/author"}, "revision": 1}
/type/author /authors/OL1002700A 1 2008-04-01T03:28:50.625462 {"name": "Bengt E. Gustafsson Symposium (5th 1988 Stockholm, Sweden)", "last_modified": {"type": "/type/datetime", "value": "2008-04-01T03:28:50.625462"}, "key": "/authors/OL1002700A", "type": {"key": "/type/author"}, "revision": 1}
/type/author /authors/OL1002807A 2 2008-08-20T18:12:02.683498 {"name": "Ary J. Lamme", "personal_name": "Ary J. Lamme", "last_modified": {"type": "/type/datetime", "value": "2008-08-20T18:12:02.683498"}, "key": "/authors/OL1002807A", "birth_date": "1940", "type": {"key": "/type/author"}, "revision": 2}
/type/author /authors/OL1002994A 5 2012-03-03T06:50:39.836886 {"name": "R. Baxter Miller", "personal_name": "R. Baxter Miller", "created": {"type": "/type/datetime", "value": "2008-04-01T03:28:50.625462"}, "photos": [7075806, 6974916], "last_modified": {"type": "/type/datetime", "value": "2012-03-03T06:50:39.836886"}, "latest_revision": 5, "key": "/authors/OL1002994A", "type": {"key": "/type/author"}, "revision": 5}
/type/author /authors/OL100301A 1 2008-04-01T03:28:50.625462 {"name": "Ghazali Basri.", "personal_name": "Ghazali Basri.", "last_modified": {"type": "/type/datetime", "value": "2008-04-01T03:28:50.625462"}, "key": "/authors/OL100301A", "type": {"key": "/type/author"}, "revision": 1}
/type/author /authors/OL1003201A 2 2008-08-20T18:14:55.775993 {"name": "Robert Smaus", "personal_name": "Robert Smaus", "last_modified": {"type": "/type/datetime", "value": "2008-08-20T18:14:55.775993"}, "key": "/authors/OL1003201A", "type": {"key": "/type/author"}, "revision": 2}
/type/author /authors/OL1003202A 2 2008-08-20T18:14:56.005766 {"name": "Richard Mark Friedhoff", "personal_name": "Richard Mark Friedhoff", "last_modified": {"type": "/type/datetime", "value": "2008-08-20T18:14:56.005766"}, "key": "/authors/OL1003202A", "type": {"key": "/type/author"}, "revision": 2}
/type/author /authors/OL1003235A 1 2008-04-01T03:28:50.625462 {"name": "Hunbatz Men", "personal_name": "Hunbatz Men", "last_modified": {"type": "/type/datetime", "value": "2008-04-01T03:28:50.625462"}, "key": "/authors/OL1003235A", "birth_date": "1941", "type": {"key": "/type/author"}, "revision": 1}
/type/author /authors/OL1003719A 1 2008-04-01T03:28:50.625462 {"name": "NATO Advanced Research Workshop on Ras Oncogenes (1988 Athens, Greece)", "last_modified": {"type": "/type/datetime", "value": "2008-04-01T03:28:50.625462"}, "key": "/authors/OL1003719A", "type": {"key": "/type/author"}, "revision": 1}
/type/author /authors/OL1003744A 2 2008-08-20T18:20:16.351762 {"name": "Jeanne Thieme", "personal_name": "Jeanne Thieme", "last_modified": {"type": "/type/datetime", "value": "2008-08-20T18:20:16.351762"}, "key": "/authors/OL1003744A", "type": {"key": "/type/author"}, "revision": 2}
/type/author /authors/OL1003901A 2 2008-08-20T18:21:31.331678 {"name": "Kiiti Morita", "personal_name": "Kiiti Morita", "last_modified": {"type": "/type/datetime", "value": "2008-08-20T18:21:31.331678"}, "key": "/authors/OL1003901A", "birth_date": "1915", "type": {"key": "/type/author"}, "revision": 2}
/type/author /authors/OL1004047A 1 2008-04-01T03:28:50.625462 {"name": "Murphy, William M.", "personal_name": "Murphy, William M.", "last_modified": {"type": "/type/datetime", "value": "2008-04-01T03:28:50.625462"}, "key": "/authors/OL1004047A", "birth_date": "1942", "type": {"key": "/type/author"}, "revision": 1}
The columns are delimited by tabulation and rows by line feed.
I need to get the data inside the 4th column that is a JSON structure. For example I need the value of all "name" attributes.
I've imported the data using SSIS into a table and then I can CROSS APPLY OPENJSON(json_column) just fine to get the keys and values. But I was wondering if that couldn't be done with SQL/TSQL alone, using OPENROWSET directly and working with just the column that is formatted in JSON. Tried using OPENROWSET with CROSS APPLY OPENJSON(BulkColumn) but cannot be done since the rest of the columns aren't JSON formatted.
Any idea on how to avoid this error or a different approach?
You can use BULK INSERT to get the file into a temp-table and get it parsed as Tab-delimited file. Then using OPENJSON to get the JSON-data. The following worked for me:
DROP TABLE IF EXISTS #Temp;
CREATE TABLE #Temp (
/* Just some random column names*/
Author NVARCHAR(100),
AuthorPath NVARCHAR(100),
IntValue INT,
Created DATETIME2(3),
JsonData NVARCHAR(MAX)
);
BULK INSERT #Temp
FROM 'C:\Users\andre\Documents\temp\test.txt'
WITH (
FIELDTERMINATOR = '\t', --Tab delimited
ROWTERMINATOR = '\n' --New-line character for row termination
)
SELECT
Temp.*,
JsonData.[name]
FROM #Temp Temp
CROSS APPLY OPENJSON(Temp.JsonData,'$')
WITH(
[name] NVARCHAR(200) '$.name'
) JsonData

PostgreSQL (v9.6) query that filters JSON array elements by key/value

We have a jsonb column with data of the type:
"basket": {
"total": 6,
"items": [
{ "type": "A", "name": "A", "price": 1 },
{ "type": "A", "name": "B", "price": 2 },
{ "type": "C", "name": "C", "price": 3 },
]
}
We need to construct few queries that will filter specific elements of the items[] array for SELECT and SUM.
We have PG v9.6 so using jsonb_path_query_array didn't work.
Using basket->'items' #> '{"type":"A"}' works to find all entries that has type-A.
But how do we get subquery to
select only basket items of type-A
sum of prices of items of type-A
Thank you!
This will select the required items:
select * from jsonb_array_elements('{"basket":
{
"total": 6,
"items": [
{ "type": "A", "name": "A", "price": 1 },
{ "type": "A", "name": "B", "price": 2 },
{ "type": "C", "name": "C", "price": 3 }
]
}}'::jsonb#>'{basket,items}') e(it)
where it->>'type' = 'A';
and this the sum of prices:
select sum(cast(it->>'price' as numeric)) from jsonb_array_elements('{"basket":
{
"total": 6,
"items": [
{ "type": "A", "name": "A", "price": 1 },
{ "type": "A", "name": "B", "price": 2 },
{ "type": "C", "name": "C", "price": 3 }
]
}}'::jsonb#>'{basket,items}') e(it)
where it->>'type' = 'A';

array of json object

database screenshot [
{
"id": "901651",
"supplier_id": "180",
"price": "18.99",
"product_id": "books",
"name": "bookmate",
"quantity": "1"
},
{
"id": "1423326",
"supplier_id": "180",
"price": "53.99",
"product_id": "books",
"name": "classmate",
"quantity": "5"
}
]
"
[{"id":"3811088","supplier_id":"2609","price":"22.99","product_id":"book","name":"classmate","quantity":"10"}]"
I have my purchased books details stored in an array of json object in a field named items in table purchase_list. This corresponds to only one order.Field may contain single or multiple orders. There are multiple orders like this. how can i get the total number of each type of book purchased and the type of books only using pgsql query to generate jasper report. for eg: classmate:15, bookmate:1
you can unnest array and aggregate it:
t=# with c(j) as (values('[
{
"id": "901651",
"supplier_id": "180",
"price": "18.99",
"product_id": "books",
"name": "bookmate",
"quantity": "1"
},
{
"id": "1423326",
"supplier_id": "180",
"price": "53.99",
"product_id": "books",
"name": "classmate",
"quantity": "5"
}
,{"id":"3811088","supplier_id":"2609","price":"22.99","product_id":"book","name":"classmate","quantity":"10"}]'::jsonb))
, agg as (select jsonb_array_elements(j) jb from c)
, mid as (select format('"%s":"%s"',jb->>'name',sum((jb->>'quantity')::int)) from agg group by jb->>'name')
select format('{%s}',string_agg(format,','))::jsonb from mid;
format
--------------------------------------
{"bookmate": "1", "classmate": "15"}
(1 row)
looks ugly, but gives the idea