How to iterate over multiple keys using JSON_VALUE - sql

I have JSON stored in the database, that looks like this:
{
"EmpName": "John Doe",
"Department": "IT",
"Address-1": "101, Sector 1, NY",
"Address-2": "102, Sector 2, SC",
"Address-3": "103, Sector 3, NY",
"Address-4": "104, Sector 4, NY",
"Salary": 100000
}
I am trying to check if the json has the address "102, Sector 2, SC" in it or not using JSON_VALUE.
But there can be multiple addresses for a single JSON object, which are indexed like Address-1/2/3/4
Here is what I am trying to do:
select *
from emp
where JSON_VALUE(emp.index_data, '$."Address-[*]"') = '102, Sector 2, SC'
I know we cannot have that '[*]' in the key, but is there any way we can achieve this?

Try this.
SELECT *
FROM emp
CROSS APPLY OPENJSON(emp.index_data)
with(
varAddressLine1 nvarchar(Max) '$.Address-1',
varAddressLine2 nvarchar(Max) '$.Address-2',
varAddressLine3 nvarchar(Max) '$.Address-3',
varAddressLine4 nvarchar(Max) '$.Address-4'
)
where varAddressLine1 = '102, Sector 2, SC'
OR varAddressLine2 = '102, Sector 2, SC'
OR varAddressLine3 = '102, Sector 2, SC'
OR varAddressLine4 = '102, Sector 2, SC'

An option if you don't know the exact column names, is to use OPENJSON without a schema:
SELECT *
FROM emp
WHERE EXISTS (SELECT 1
FROM OPENJSON(emp.index_data) j
WHERE j.[key] LIKE 'Address-%' AND
j.[value] = '102, Sector 2, SC'
)

Related

How to insert JSON to an existing table without specifying column names?

To a table with two columns, named Id and Name, how can I insert the following json as rows, but without explicitly specifying the column names of the table (i.e. without WITH (Id ..., Name ...)?
[
{
"Id": "f08af9c2-8e67-4a7f-9413-1afffa2de96b",
"SomeOtherKey": " ... ",
"Name": "The One",
...
},
{
"Name": "All is one"
"Id": "9bbb094b-aa64-4c36-90a2-50e10f91c6a3",
"Whatever": 99,
...
},
{
"Id": "af9d22d8-1e46-4d57-8179-75f094d2efa1",
"SomeArrayWhyNot": [0, 1, 1, 2, 3, 5, 8, 13, 21]
"Surprise": "This one does not have a Name value!!! 😱"
...
},
...
]
The question is basically how to make SQL match the key-name to its suitable column name, ignoring json values with keys that do not have suitable column names, resulting with the following table (for the above json example):
Id
Name
f08af9c2-8e67-4a7f-9413-1afffa2de96b
The One
9bbb094b-aa64-4c36-90a2-50e10f91c6a3
All is one
af9d22d8-1e46-4d57-8179-75f094d2efa1
NULL
...
...
Not sure why you want this, but you can also do this:
INSERT INTO YourTable (Id, Name)
SELECT JSON_VALUE(x.value, '$.Id'), JSON_VALUE(x.value, '$.Name')
FROM OPENJSON('[{
"Id": "f08af9c2-8e67-4a7f-9413-1afffa2de96b",
"SomeOtherKey": " ... ",
"Name": "The One"
},
{
"Name": "All is one",
"Id": "9bbb094b-aa64-4c36-90a2-50e10f91c6a3",
"Whatever": 99
},
{
"Id": "af9d22d8-1e46-4d57-8179-75f094d2efa1",
"SomeArrayWhyNot": [0, 1, 1, 2, 3, 5, 8, 13, 21],
"Surprise": "This one doesn''t have a Name value!!! 😱"
}]') x
EDIT dynamic version.
It has many caveats though, your column must match exactly the case in the json, it's very longwinded but if that's what floats your vessel...
declare #json nvarchar(max) = '[{
"Id": "f08af9c2-8e67-4a7f-9413-1afffa2de96b",
"SomeOtherKey": " ... ",
"Name": "The One"
},
{
"Name": "All is one",
"Id": "9bbb094b-aa64-4c36-90a2-50e10f91c6a3",
"Whatever": 99
},
{
"Id": "af9d22d8-1e46-4d57-8179-75f094d2efa1",
"SomeArrayWhyNot": [0, 1, 1, 2, 3, 5, 8, 13, 21],
"Surprise": "This one doesn''t have a Name value!!! 😱"
}]'
create table YourTable (Id nvarchar(100), Name NVARCHAR(MAX))
create table #cols (name sysname, row_id int identity)
insert into #cols (name)
select STRING_ESCAPE(name, 'json')
from sys.columns sc
where sc.object_id = OBJECT_ID('YourTable')
DECLARE #sql nvarchar(max)
set #sql = N'
INSERT INTO YourTable ([COLS])
SELECT [JSONS]
FROM OPENJSON(#json) x'
SET #sql = REPLACE(#sql, '[COLS]',
STUFF((select ',' + QUOTENAME(name) from #cols order by row_id for xml Path(''), type).value('.', 'nvarchar(max)'), 1, 1, ''))
set #sql = replace(#sql, '[JSONS]',
stuff((SELECT ', JSON_VALUE(x.value, ''$."' + REPLACE(name,'''', '''''') + '"'')'
from #cols order by row_id for xml path(''), type).value('.', 'nvarchar(max)'), 1, 1, ''))
exec sp_executesql #sql, N'#json nvarchar(max)', #json = #json
select * FROM YourTable
You can use the OPENJSON function in SQL Server to parse the JSON data and then use INSERT INTO to insert the data into the table.
INSERT INTO YourTable (Id, Name)
SELECT *
FROM OPENJSON('[
{
"Id": "f08af9c2-8e67-4a7f-9413-1afffa2de96b",
"SomeOtherKey": " ... ",
"Name": "The One"
},
{
"Name": "All is one",
"Id": "9bbb094b-aa64-4c36-90a2-50e10f91c6a3",
"Whatever": 99
},
{
"Id": "af9d22d8-1e46-4d57-8179-75f094d2efa1",
"SomeArrayWhyNot": [0, 1, 1, 2, 3, 5, 8, 13, 21],
"Surprise": "This one doesn\'t have a Name value!!! 😱"
}
]') WITH (Id nvarchar(50) '$.Id', Name nvarchar(50) '$.Name')

How can I put SELECT result set to SELECT column?

I have two tables: Job(ID,Name, etc.) and Address(ID, Job_ID, Name etc). I want to get result like this:
[
{
"Job_ID": 1,
"JobName": "Test",
"Addresses": [
{
"ID": 1,
"Name": "King street"
},
{
"ID": 2,
"Name": "Queen`s street
}
]
}
]
My current query that gets only one address for a job looks like this:
SELECT TOP 100
JO.ID,
JO.Closed as Deleted,
JO.Number as JobNumber,
JO.Name as JobName,
Convert(date, JO.Start_Date) as Start_Date,
JO.Job_Status_ID as Status,
A.ID as Address_ID,
A.Name as Name,
A.Number as Number,
A.Sort_Name as Sort_Name,
A.Address_1 as Address_1,
A.Address_2 as Address_2,
A.ZipCode as ZIP,
A.E_Mail_Address as Email,
A.Web_Site_URL as Web_Site_URL,
A.TAXRATE as Tax_Rate,
A.State
FROM Job JO
INNER JOIN Address A ON A.Job_Id = JO.ID
Is it possible without pivot table(Address_ID, Job_ID)?
You can use FOR JSON to convert you results to JSON. This gives the result you are looking for:
CREATE TABLE #Job (ID INT NOT NULL, Name VARCHAR(50));
INSERT #Job (ID, Name)
VALUES (1, 'Job 1'), (2, 'Job 2');
CREATE TABLE #Address (ID INT NOT NULL, JobID INT NOT NULL, Name VARCHAR(50));
INSERT #Address (ID, JobID, Name)
VALUES (1, 1, 'King street'), (2, 1, 'Queen''s street'), (3, 2, 'Address 3'), (4, 2, 'Address 4');
SELECT JobID = j.ID,
JobName = j.Name,
Addresses = ( SELECT a.ID, a.Name
FROM #Address AS a
WHERE a.JobID = j.ID
FOR JSON AUTO
)
FROM #Job AS j
FOR JSON AUTO;

Only extract json if field not null

I want to extract a key value from a (nullable) JSONB field. If the field is NULL, I want the record still present in my result set, but with a null field.
customer table:
id, name, phone_num, address
1, "john", 983, [ {"street":"23, johnson ave", "city":"Los Angeles", "state":"California", "current":true}, {"street":"12, marigold drive", "city":"Davis", "state":"California", "current":false}]
2, "jane", 9389, null
3, "sally", 352, [ "street":"90, park ave", "city":"Los Angeles", "state":"California", "current":true} ]
Current PostgreSQL query:
select id, name, phone_num, items.city
from customer,
jsonb_to_recordset(customer) as items(city str, current bool)
where items.current=true
It returns:
id, name, phone_num, city
1, "john", 983, "Los Angeles"
3, "sally", 352, "Los Angeles"
Required Output:
id, name, phone_num, city
1, "john", 983, "Los Angeles"
2, "jane", 9389, null
3, "sally", 352, "Los Angeles"
How do I achieve the above output?
Use a left join lateral instead of an implicit lateral join:
select c.id, c.name, c.phone_num, i.city
from customer c
left join lateral jsonb_to_recordset(c.address) as i(city str, current bool)
on i.current=true

Select within Structs within Arrays in SQL

I'm trying to find rows with N count of identifier A AND M count of identifier B in an array of structs within a Google BigQuery table, using the new Standard SQL. The data in the table (simplified) where each row looks a bit like this:
{
"Session": "abc123",
"Information" [
{
"Identifier": "A",
"Count": 1,
},
{
"Identifier": "B"
"Count": 2,
},
{
"Identifier": "C"
"Count": 3,
}
...
]
}
I've been struggling to work with the struct in an array. Any way I can do that?
Below is for BigQuery Standard SQL
#standardSQL
SELECT *
FROM `project.dataset.table`
WHERE 2 = (SELECT COUNT(1) FROM UNNEST(information) kv WHERE kv IN (('a', 5), ('b', 10)))
If to apply to dummy data as in below example
#standardSQL
WITH `project.dataset.table` AS (
SELECT 'abc123' session, [STRUCT('a' AS identifier, 1 AS `count`), ('b', 2), ('c', 3)] information UNION ALL
SELECT 'abc456', [('a', 5), ('b', 10), ('c', 20)]
)
SELECT *
FROM `project.dataset.table`
WHERE 2 = (SELECT COUNT(1) FROM UNNEST(information) kv WHERE kv IN (('a', 5), ('b', 10)))
result is
Row session information.identifier information.count
1 abc456 a 5
b 10
c 20

In PostgreSQL, what's the best way to select an object from a JSONB array?

Right now, I have an an array that I'm able to select off a table.
[{"_id": 1, "count: 3},{"_id": 2, "count: 14},{"_id": 3, "count: 5}]
From this, I only need the count for a particular _id. For example, I need the count for
_id: 3
I've read the documentation but I haven't been able to figure out the correct way to get the object.
WITH test_array(data) AS ( VALUES
('[
{"_id": 1, "count": 3},
{"_id": 2, "count": 14},
{"_id": 3, "count": 5}
]'::JSONB)
)
SELECT val->>'count' AS result
FROM
test_array ta,
jsonb_array_elements(ta.data) val
WHERE val #> '{"_id":3}'::JSONB;
Result:
result
--------
5
(1 row)