PostgreSQL Output to JSON format - sql

I have data that need to result in JSON format. I am using the following code:
WITH cte AS (
SELECT 'docker' AS type, '564df5a6sdf4654f6da4sf56a' AS id, 1 AS segment, 1 AS value
UNION ALL
SELECT 'docker' AS type, '564df5a6sdf4654f6da4sf56a' AS id, 2 AS segment, 100 AS value
)
SELECT type
, id
, json_agg(json_build_object(segment, value)) AS json_result
FROM cte
GROUP BY type
, id
Result for json_result column is: [{"1" : 1}, {"2" : 100}]
But the desired result is: {"1" : 1, "2" : 100}
How to adjust the query so it returns the desired output?

Use json_object_agg():
SELECT type
, id
, json_object_agg(segment, value) AS json_result
FROM cte
GROUP BY type
, id

Related

Oracle JSON output group by key

I generate SQL statements dynamically from the input file and I want to have the output in JSON format grouped by a key which I provide in an alias in the select statement.
The input file comes from another system and it looks like this:
'abc' AS [x.test1],
'cde' AS [y.test2],
'fgh' AS [y.test3]
In SQL Server I have a working query like this:
SELECT
(SELECT
'abc' AS [x.test1],
'cde' AS [y.test2],
'fgh' AS [y.test3]
FROM "dbo"."TEST"
FOR JSON PATH,
WITHOUT_ARRAY_WRAPPER
) AS RESULT
It returns this output which is grouped by key and this is working perfectly:
{"x":{"test1":"abc"},"y":{"test2":"cde","test3":"fgh"}}
I want to achieve the same output with oracle.
Currently, I got to here:
SELECT
(
SELECT json_object(
KEY '[x.test1]' IS 'abc',
KEY '[y.test2]' IS 'cde',
KEY '[y.test3]' IS 'fgh'
)
FROM test
)
AS RESULT from DUAL;
Problem is that this doesn't group my output by the key:
{"[x.test1]":"abc","[y.test2]":"cde","[y.test3]":"fgh"}
You could nest json_object() calls:
SELECT json_object(
KEY 'x' IS json_object(
KEY 'test1' IS 'abc'
),
KEY 'y' IS json_object(
KEY 'test2' IS 'cde',
KEY 'test3' IS 'fgh'
)
)
AS RESULT from DUAL;
RESULT
{"x":{"test1":"abc"},"y":{"test2":"cde","test3":"fgh"}}
fiddle
Or as you refer to grouping, if your data is really coming from tables, you could use json_objectagg() and the table data, with something like:
select json_object(
'x' value json_object(x.j_key value x.j_value),
'y' value json_objectagg(y.j_key, y.j_value)
) as result
from x
left join y on y.id = x.id
group by x.id, x.j_key, x.j_value
RESULT
{"x":{"test1":"abc"},"y":{"test2":"cde","test3":"fgh"}}
fiddle
WITH data (expr) AS (
SELECT q'~'abc' AS [x.test1],'cde' AS [y.test2],'fgh' AS [y.test3]~' FROM DUAL
),
rdata(expr) AS (
SELECT regexp_substr(expr,'[^,]+',1,LEVEL) AS expr
FROM data
CONNECT BY regexp_substr(expr,'[^,]+',1,LEVEL) IS NOT NULL
),
exprs AS (
SELECT expr, regexp_substr(expr, q'~'(.*)'~', 1, 1, 'i', 1) as val,
regexp_substr(expr, q'~\[(.*)\]~', 1, 1, 'i', 1) as path
FROM rdata
),
spaths AS (
SELECT e.*, LEVEL as lvl, regexp_substr(path,'[^\.]+',1,LEVEL) as pitem
FROM exprs e
CONNECT BY regexp_substr(path,'[^\.]+',1,LEVEL) IS NOT NULL AND prior val = val AND PRIOR sys_guid() IS NOT NULL
)
SELECT json_object(
s.pitem VALUE json_objectagg(
p.pitem VALUE p.val
)
) AS js
FROM spaths s
JOIN spaths p ON s.val = p.val AND p.lvl = 2
WHERE s.lvl = 1
GROUP BY s.pitem
;
JS
{"x":{"test1":"abc"}}
{"y":{"test2":"cde","test3":"fgh"}}

how to convert jsonarray to multi column from hive

example:
there is a json array column(type:string) from a hive table like:
"[{"filed":"name", "value":"alice"}, {"filed":"age", "value":"14"}......]"
how to convert it into :
name age
alice 14
by hive sql?
I've tried lateral view explode but it's not working.
thanks a lot!
This is working example of how it can be parsed in Hive. Customize it yourself and debug on real data, see comments in the code:
with your_table as (
select stack(1,
1,
'[{"field":"name", "value":"alice"}, {"field":"age", "value":"14"}, {"field":"something_else", "value":"somevalue"}]'
) as (id,str) --one row table with id and string with json. Use your table instead of this example
)
select id,
max(case when field_map['field'] = 'name' then field_map['value'] end) as name,
max(case when field_map['field'] = 'age' then field_map['value'] end) as age --do the same for all fields
from
(
select t.id,
t.str as original_string,
str_to_map(regexp_replace(regexp_replace(trim(a.field),', +',','),'\\{|\\}|"','')) field_map --remove extra characters and convert to map
from your_table t
lateral view outer explode(split(regexp_replace(regexp_replace(str,'\\[|\\]',''),'\\},','}|'),'\\|')) a as field --remove [], replace "}," with '}|" and explode
) s
group by id --aggregate in single row
;
Result:
OK
id name age
1 alice 14
One more approach using get_json_object:
with your_table as (
select stack(1,
1,
'[{"field":"name", "value":"alice"}, {"field":"age", "value":"14"}, {"field":"something_else", "value":"somevalue"}]'
) as (id,str) --one row table with id and string with json. Use your table instead of this example
)
select id,
max(case when field = 'name' then value end) as name,
max(case when field = 'age' then value end) as age --do the same for all fields
from
(
select t.id,
get_json_object(trim(a.field),'$.field') field,
get_json_object(trim(a.field),'$.value') value
from your_table t
lateral view outer explode(split(regexp_replace(regexp_replace(str,'\\[|\\]',''),'\\},','}|'),'\\|')) a as field --remove [], replace "}," with '}|" and explode
) s
group by id --aggregate in single row
;
Result:
OK
id name age
1 alice 14

How to parse JSON in Standard SQL BigQuery?

After streaming some json data into BQ, we have a record that looks like:
"{\"Type\": \"Some_type\", \"Identification\": {\"Name\": \"First Last\"}}"
How would I extract the type from this? E.g. I would like to get Some_type.
I tried all possible combinations shown in https://cloud.google.com/bigquery/docs/reference/standard-sql/json_functions without success, namely, I thought:
SELECT JSON_EXTRACT_SCALAR(raw_json , "$[\"Type\"]") as parsed_type FROM `table` LIMIT 1000
is what I need. However, I get:
Invalid token in JSONPath at: ["Type"]
Picture of rows preview
Below example is for BigQuery Standard SQL
#standardSQL
WITH `project.dataset.table` AS (
SELECT 1 id, "{\"Type\": \"Some_type\", \"Identification\": {\"Name\": \"First Last\"}}" raw_json UNION ALL
SELECT 2, '{"Type": "Some_type", "Identification": {"Name": "First Last"}}'
)
SELECT id, JSON_EXTRACT_SCALAR(raw_json , "$.Type") AS parsed_type
FROM `project.dataset.table`
with result
Row id parsed_type
1 1 Some_type
2 2 Some_type
See below update example - take a look at third record which I think mimic your case
#standardSQL
WITH `project.dataset.table` AS (
SELECT 1 id, "{\"Type\": \"Some_type\", \"Identification\": {\"Name\": \"First Last\"}}" raw_json UNION ALL
SELECT 2, '''{"Type": "Some_type", "Identification": {"Name": "First Last"}}''' UNION ALL
SELECT 3, '''"{\"Type\": \"
null1\"}"
'''
)
SELECT id,
JSON_EXTRACT_SCALAR(REGEXP_REPLACE(raw_json, r'^"|"$', '') , "$.Type") AS parsed_type
FROM `project.dataset.table`
with result
Row id parsed_type
1 1 Some_type
2 2 Some_type
3 3 null1
Note: I use null1 instead of null so you can easily see that it is not a NULL but rather string null1

extract the second found matching substring using Postgresql

I use the bellow query to extract a value from a column that stores JSON objects.
The issue with it, it does only pull the first value matching to the regex inside SUBSTRING which is -$4,000.00, is there's a parameter to pass to the SUBSTRING to pull the value -$1,990.00 as well in another column.
SELECT attribute_actions_text
, SUBSTRING(attribute_actions_text FROM '"Member [Dd]iscount:":"(.+?)"') AS column_1
, '' AS column_2
FROM (
VALUES
('[{"Member Discount:":"-$4,000.00"},{"Member discount:":"-$1,990.00"}]')
, (NULL)
) ls(attribute_actions_text)
Desired result :
column_1 column_2
-$4,000.00 -$1,990.00
Try this
WITH data(id,attribute_actions_text) as (
VALUES
(1,'[{"Member Discount:":"-$4,000.00"},{"Member Discount:":"-$1,990.00"}]')
, (2,'[{"Member Discount:":"-$4,200.00"},{"Member Discount:":"-$1,890.00"}]')
, (3,NULL)
), match as (
SELECT
id,
m,
ROW_NUMBER()
OVER (PARTITION BY id) AS r
FROM data, regexp_matches(data.attribute_actions_text, '"Member [Dd]iscount:":"(.+?)"', 'g') AS m
)
SELECT
id
,(select m from match where id = d.id AND r=1) as col1
,(select m from match where id = d.id AND r=2) as col2
FROM data d
Result
1,"{-$4,000.00}","{-$1,990.00}"
2,"{-$4,200.00}","{-$1,890.00}"
3,NULL,NULL

How can I replace a column value with a name in an SQL query?

I am running this query:
SELECT type, COUNT(type) FROM "table" where id = 8 GROUP BY type;
With this result:
6 3814
8 341
5 328
I'd like to have something like this, where I can specify names:
Arbitrary Name 3814
Other name 341
Test Name 328
Instead of the type column listing 6, how can I get it to have a custom name like Test Column 6, or Fun Column 5? I'm using postgres.
SELECT
CASE WHEN type = X THEN 'SOME TEXT' ELSE 'OTHER TEXT' END ,
COUNT(type) FROM "table" where id = 8 GROUP BY type;
1st ) If you have alot of "type" in your record then you should create a new table to store it information ( typeid , name , detail ) then you can get it like :
Example
SELECT typegroup.typename,Count(typeid) from 'table',typegroup where 'table'.id=8 and typegroup.typeid='table'.typeid GROUP BY 'table'.type
2nd ) If you don't have alot of "type" in your record then you can try these statement
SELECT
IF (type = '6','8','5',REPLACE(type, 'Arbitrary Name','Other name','Test Name')) AS type
FROM table
WHERE id = 8
GROUP BY type