PL SQL comma delimited to JSON array conversion - sql

I have values in one column with delimit as comma.
Col1
a,b,c,d
I want to convert this into JSON Array. I know JSON_ARRAY function is available in PL/SQL from 12.2 onwards. But JSON_ARRAY converts multiple columns to the array. I have values in the single column.
output: ["a","b","c","d"]

You can use JSON_ARRAYAGG() instead of JSON_ARRAY() function without using PL/SQL, after converting those letters into row-wise style through splitting by commas such as
WITH t(id,col1) AS
(
SELECT 1,'a,b,c,d' FROM dual UNION ALL
SELECT 2,'d,e,f,g,h,i' FROM dual
), t2 AS
(
SELECT REGEXP_SUBSTR(col1,'[^,]+',1,level) AS col, id
FROM t
CONNECT BY level <= REGEXP_COUNT(col1,',')+1
AND PRIOR SYS_GUID() IS NOT NULL
AND PRIOR col1 = col1
)
SELECT id, JSON_ARRAYAGG(col ORDER BY col RETURNING VARCHAR2(100)) As "JSON value"
FROM t2
GROUP BY id
Demo

Just use replace:
SELECT '["'
||REPLACE(REPLACE(col1,'"','\"'),',','","')
||'"]' AS json_value
FROM table_name;
Or, in PL/SQL:
DECLARE
col1 VARCHAR2(50) := 'a,b,c,d';
json VARCHAR2(50);
BEGIN
json := '["'||REPLACE(REPLACE(col1,'"','\"'),',','","')||'"]';
DBMS_OUTPUT.PUT_LINE(json);
END;
/
db<>fiddle here

Related

How can I get value with key in JSON_DATAGUIDE (dynamically)

JSON_DATAGUIDE gives me only keys not value like "$.a" . How can I get key-value pair that example in below.
select json_dataguide('{a:100, b:200, c:300}')
from dual;
JSON_DATAGUIDE('{A:100,B:200,C:300}')
--------------------------------------------------------------------------------
[{"o:path":"$.a","type":"number","o:length":4},{"o:path":"$.b","type":"number","
o:length":4},{"o:path":"$.c","type":"number","o:length":4}]
I need like this as table:
Column Key, Column Value
a 100
b 200
c 300
I want to find it without using declare,begin etc. Only Built-in function for example json_table,json_dataguide. I don't want to declare function or something.
An alternative would be
declare
j JSON_OBJECT_T;
i NUMBER;
k JSON_KEY_LIST;
CURSOR c_json IS
SELECT '{a:100, b:200, c:300}' as myJsonCol from dual;
begin
FOR rec IN c_json
LOOP
j := JSON_OBJECT_T.parse(rec.myJsonCol);
k := j.get_keys;
dbms_output.put_line('KEY VAL');
FOR i in 1..k.COUNT
LOOP
dbms_output.put_line(k(i) || ' ' || j.get_Number(k(i)));
END LOOP;
END LOOP;
END;
/
Demo
db<>fiddle
Then you can store the result if you want in sys_refcursor, or even create a table function.
In later Oracle versions, you can include functions in a sub-query factoring (WITH) clause of a SELECT statement. Then, you can use this answer:
WITH FUNCTION get_key(
pos IN PLS_INTEGER,
json IN CLOB
) RETURN VARCHAR2
AS
doc_keys JSON_KEY_LIST;
BEGIN
doc_keys := JSON_OBJECT_T.PARSE ( json ).GET_KEYS;
RETURN doc_keys( pos );
END get_key;
SELECT get_key( j.pos, t.value ) AS key,
j.value
FROM table_name t
CROSS APPLY JSON_TABLE(
t.value,
'$.*'
COLUMNS (
pos FOR ORDINALITY,
value PATH '$'
)
) j;
Which, for the sample data:
CREATE TABLE table_name ( value VARCHAR2(4000) CHECK (value IS JSON) );
INSERT INTO table_name (value) VALUES ('{a:100, b:200, c:300}');
Outputs:
KEY
VALUE
a
100
b
200
c
300
Only Built-in function for example json_table,json_dataguide
You are going to struggle with those limitations as:
JSON_QUERY only allows literal values for the path; you cannot pass a dynamic path value.
JSON_TABLE does appear to allow dynamic paths in the COLUMNS clause but does not return a value for those dynamic paths.
For example:
SELECT t.value AS json,
SUBSTR(p.path, 3) AS key,
JSON_QUERY(t.value, p.path) AS value
FROM table_name t
CROSS JOIN LATERAL(
SELECT JSON_DATAGUIDE(t.value) AS data
FROM DUAL
) d
CROSS JOIN LATERAL(
SELECT path
FROM JSON_TABLE(
d.data,
'$[*]'
COLUMNS(
path VARCHAR2(20) PATH '$."o:path"'
)
)
) p;
Outputs:
ORA-40454: path expression not a literal
and:
SELECT t.value AS json,
SUBSTR(p.path, 3) AS key,
v.val AS value
FROM table_name t
CROSS JOIN LATERAL(
SELECT JSON_DATAGUIDE(t.value) AS data
FROM DUAL
) d
CROSS JOIN LATERAL(
SELECT path
FROM JSON_TABLE(
d.data,
'$[*]'
COLUMNS(
path VARCHAR2(20) PATH '$."o:path"'
)
)
) p
CROSS JOIN LATERAL(
SELECT val
FROM JSON_TABLE(
t.value,
'$'
COLUMNS(
val VARCHAR2(20) PATH p.path
)
)
) v;
Outputs:
JSON
KEY
VALUE
{"a":100, "b":200, "c":300}
a
<null>
{"a":100, "b":200, "c":300}
b
<null>
{"a":100, "b":200, "c":300}
c
<null>
Although the query works it does not dynamically get the value. (Note: The query would work if you use a literal path instead of a dynamic path.)
db<>fiddle here

ORA-00932: inconsistent datatypes: expected - got CLOB - while using clob in regexp_substr function

I am trying to use CLOB variable in regexp_substr function as below
UPDATE TableName
SET Tab_DATE = SYSDATE
WHERE Tab_ID IN (
select regexp_substr(clob_variable,'[^~]+', 1, level) from dual
connect by regexp_substr(clob_variable, '[^~]+', 1, level) is not null
)
clob_variable is of type CLOB and contains ~ separated ids.
While executing the update statement I am getting below error:
ORA-00932: inconsistent datatypes: expected - got CLOB
Can we use CLOB with RegExp? If not, is there any way to convert CLOB values to tabular format?
the data should be converted to string(CHAR or a numeric type considering Tab_ID column as an INTEGER within this case) from CLOB such as
UPDATE TableName
SET Tab_DATE = SYSDATE
WHERE Tab_ID IN
(
SELECT TO_NUMBER(REGEXP_SUBSTR(clb, '[^~]+', 1, level))
FROM (SELECT clb
FROM t -- the other table with CLOB column
CONNECT BY level <= CEIL(DBMS_LOB.GETLENGTH(clb) / 4000))
CONNECT BY level <= REGEXP_COUNT(clb, '~') + 1
AND PRIOR SYS_GUID() IS NOT NULL
)
Try this:
UPDATE TableName
SET Tab_DATE = SYSDATE
WHERE Tab_ID IN (
select replace(dbms_lob.substr(regexp_substr(clob_variable,'[^~]+~', 1, level)), '~', '') from dual
connect by dbms_lob.compare(regexp_substr(clob_variable,'[^~]+~', 1, level), empty_clob() ) != 0
)
regexp_substr returns CLOB in case of your first input parameter is CLOB, but you can't compare CLOBs' content with anything using in or =. So you need to convert your returned CLOBs to the data type of your TAB_ID.
So if your TAB_ID is number type, it should be:
UPDATE TableName
SET Tab_DATE = SYSDATE
WHERE Tab_ID IN (
select to_number(to_char(regexp_substr(clob_variable,'[^~]+', 1, level))
from dual
connect by level<=regexp_count(clob_variable, '[^~]+')
)
and if your TAB_ID is varchar2 or char:
UPDATE TableName
SET Tab_DATE = SYSDATE
WHERE Tab_ID IN (
select to_char(regexp_substr(clob_variable,'[^~]+', 1, level)
from dual
connect by level<=regexp_count(clob_variable, '[^~]+')
)
Update:
It's much better to use collections for such things, instead of concatenated strings. Just create own collection, for example:
create or replace type numbers as table of number;
/
And bind you list of numbers as a collection, so you query will look like this:
select * from tablename where id in (select * from table(:numbers))
Example of using collections in queries:
SQL> select * from table(numbers(1,2,3,4));
COLUMN_VALUE
------------
1
2
3
4

Postgres array value must starts

I want to make an array and put into it two id's, but I got a mistake:
array value must start with “{” or dimension information
ids_list character varying[] := ' || (SELECT COALESCE(quote_literal((array_agg(DISTINCT house_guid)) || ''',''' || quote_literal(array_agg(DISTINCT guid))), 'NULL') FROM tb) || ';
use array_agg function
with t1 as
(
select * from
(
select 'test_SQL_01' as ID
union
select 'test_SQL_02_PQR_01'
union
select 'test_SQL_03_055'
union
select 'test_SQL_04_ABC_99'
) as t
) select array_agg(ID) from t1
You seem to be using this inside a PL/pgSQL function. You should be using SELECT ... INTO variable FROM... instead:
declare
ids_list character varying[];
begin
.....
select array_agg(id)
into ids_list
from (
select house_guid
from tab
union
select guid
from tab
) t;
.... work with the ids_list variable
end;
The UNION will automatically remove all duplicates (as you tried to do with DISTINCT.

PostgreSQL: Get values of a register as multiple rows

Using PostgreSQL 9.3, I'm creating a Jasper reports template to make a pdf report. I want to create reports of different tables, with multiple columns, all with the same template. A solution could be to get values of register as pairs of column name and value per id.
By example, if I had a table like:
id | Column1 | Column2 | Column3
-------------------------------------------------
1 | Register1C1 | Register1C2 | Register1C3
I would like to get the register as:
Id | ColumnName | Value
-----------------------------
1 | Column1 | Register1C1
1 | Column2 | Register1C2
1 | Column3 | Register1C3
The data type of value columns can vary!
Is it possible? How can I do this?
If all your columns share the same data type and order of rows does not have to be enforced:
SELECT t.id, v.*
FROM tbl t, LATERAL (
VALUES
('col1', col1)
, ('col2', col2)
, ('col3', col3)
-- etc.
) v(col, val);
About LATERAL (requires Postgres 9.3 or later):
What is the difference between LATERAL and a subquery in PostgreSQL?
Combining it with a VALUES expression:
Crosstab transpose query request
SELECT DISTINCT on multiple columns
For varying data types, the common denominator would be text, since every type can be cast to text. Plus, order enforced:
SELECT t.id, v.col, v.val
FROM tbl t, LATERAL (
VALUES
(1, 'col1', col1::text)
, (2, 'col2', col2::text)
, (3, 'col3', col3::text)
-- etc.
) v(rank, col, val)
ORDER BY t.id, v.rank;
In Postgres 9.4 or later use the new unnest() for multiple arrays:
SELECT t.id, v.*
FROM tbl t, unnest('{col1,col2,col3}'::text[]
, ARRAY[col1,col2,col3]) v(col, val);
-- , ARRAY[col1::text,col2::text,col3::text]) v(col, val);
The commented alternative for varying data types.
Full automation for Postgres 9.4:
The query above is convenient to automate for a dynamic set of columns:
CREATE OR REPLACE FUNCTION f_transpose (_tbl regclass, VARIADIC _cols text[])
RETURNS TABLE (id int, col text, val text) AS
$func$
BEGIN
RETURN QUERY EXECUTE format(
'SELECT t.id, v.* FROM %s t, unnest($1, ARRAY[%s]) v'
, _tbl, array_to_string(_cols, '::text,') || '::text'))
-- , _tbl, array_to_string(_cols, ','))) -- simple alternative for only text
USING _cols;
END
$func$ LANGUAGE plpgsql;
Call - with table name and any number of column names, any data types:
SELECT * FROM f_transpose('table_name', 'column1', 'column2', 'column3');
Weakness: the list of column names is not safe against SQL injection. You could gather column names from pg_attribute instead. Example:
How to perform the same aggregation on every column, without listing the columns?
SELECT id
,unnest(string_to_array('col1,col2,col3', ',')) col_name
,unnest(string_to_array(col1 || ',' || col2 || ',' || col3, ',')) val
FROM t
Try following method:
My sample table name is t,to get the n columns name you can use this query
select string_agg(column_name,',') cols from information_schema.columns where
table_name='t' and column_name<>'id'
this query will selects all columns in your table except id column.If you want to specify schema name then use table_schema='your_schema_name' in where clause
To create select query dynamically
SELECT 'select id,unnest(string_to_array(''' || cols || ''','','')) col_name,unnest(string_to_array(' || cols1 || ','','')) val from t'
FROM (
SELECT string_agg(column_name, ',') cols -- here we'll get all the columns in table t
,string_agg(column_name, '||'',''||') cols1
FROM information_schema.columns
WHERE table_name = 't'
AND column_name <> 'id'
) tb;
And using following plpgsql function dynamically creates SELECT id,unnest(string_to_array('....')) col_name,unnest(string_to_array(.., ',')) val FROM t and execute.
CREATE OR replace FUNCTION fn ()
RETURNS TABLE (
id INT
,columname TEXT
,columnvalues TEXT
) AS $$
DECLARE qry TEXT;
BEGIN
SELECT 'select id,unnest(string_to_array(''' || cols || ''','','')) col_name,unnest(string_to_array(' || cols1 || ','','')) val from t'
INTO qry
FROM (
SELECT string_agg(column_name, ',') cols
,string_agg(column_name, '||'',''||') cols1
FROM information_schema.columns
WHERE table_name = 't'
AND column_name <> 'id'
) tb;
RETURN QUERY
EXECUTE format(qry);
END;$$
LANGUAGE plpgsql
Call this function like select * from fn()

Oracle - pl sql selecting from SYS_REFCURSOR

I have a function that returns a SYS_REFCURSOR that has a single row but multiple columns. What I'm looking to do is to be able to have a SQL query that has nested sub-queries using the column values returned in the SYS_REFCURSOR. Alternative ideas such as types, etc would be appreciated. Code below is me writing on-the-fly and hasn't been validated for syntax.
--Oracle function
CREATE DummyFunction(dummyValue AS NUMBER) RETURN SYS_REFCURSOR
IS
RETURN_DATA SYS_REFCURSOR;
BEGIN
OPEN RETURN_DATA
SELECT
TO_CHAR(dummyValue) || 'A' AS ColumnA
,TO_CHAR(dummyValue) || 'B' AS ColumnB
FROM
DUAL;
RETURN RETURN_DATA;
END;
--sample query with sub-queries; does not work
SELECT
SELECT ColumnA FROM DummyFunction(1) FROM DUAL AS ColumnA
,SELECT ColumnB FROM DummyFunction(1) FROM DUAL AS ColumnB
FROM
DUAL;
A SYS_REFCURSOR won't work for the intended use - you need to create an Oracle TYPE:
CREATE TYPE your_type IS OBJECT (
ColumnA VARCHAR2(100),
ColumnB VARCHAR2(100)
)
Update your function:
CREATE DummyFunction(dummyValue AS NUMBER)
RETURN your_type
IS
BEGIN
INSERT INTO your_type
SELECT TO_CHAR(dummyValue) || 'A' AS ColumnA,
TO_CHAR(dummyValue) || 'B' AS ColumnB
FROM DUAL;
RETURN your_type;
END;
Then you can use:
SELECT (SELECT ColumnA FROM table(DummyFunction(1))) AS ColumnA,
(SELECT ColumnB FROM table(DummyFunction(1))) AS ColumnB
FROM DUAL
The example is overcomplicated - all you need to use is:
SELECT x.columna,
x.columnb
FROM table(DummyFunction(1)) x