Related
We have the following table
WITH fake_data(columnA, columnB, columnC) as (
select * from values
(1, 'hello1', 'world18'),
(1, 'hello2', 'world27'),
(2, 'hello9', 'world36')
(3, NULL, 'world35')
(10, 'hello13', 'world5')
)
We convert the entire table into a single column that has a JSON-like structure
CREATE OR REPLACE TEMPORARY TABLE LISTE_JSON (V variant)
AS
WITH COLONNE_KEY
AS (
SELECT
ROW_NUMBER () OVER (ORDER BY columnA DESC) KEY_AUTO
,A.*
FROM fake_data A
),
COLONNE_OBJECT
AS (
SELECT
object_agg(
TO_CHAR(KEY_AUTO ) ,
object_construct(
'columnA', IFNULL(columnA,''),
'columnB', IFNULL(columnB,''),
'columnC', IFNULL(columnC,''),
)
)AS COLONNE_OBJECT
FROM COLONNE_KEY
)
SELECT *
FROM COLONNE_OBJECT;
So far everything is going well.
Now how do I read the variant column through a SELECT and see it as a table, as it was at the beginning?
Ex:
SELECT *
FROM LISTE_JSON
COLUMNA COLUMNB COLUMNC
1 hello1 world18
1 hello2 world27
2 hello9 world36
3 '' world35
10 hello13 world5
You can ether use PIVOT to pull with parts, or you can hand roll the pivot via GROUP BY
SELECT
columna
,max(iff(columnb='hello1', columnc, null)) as hello1
,max(iff(columnb='hello2', columnc, null)) as hello2
,max(iff(columnb='hello3', columnc, null)) as hello3
from table
group by 1 order by 1;
So lets start with working "example code"
WITH fake_data(columnA, columnB, columnC) as (
select * from values
(1, 'hello1', 'world18'),
(1, 'hello2', 'world27'),
(2, 'hello9', 'world36'),
(3, NULL, 'world35'),
(10, 'hello13', 'world5')
), COLONNE_KEY AS (
SELECT
ROW_NUMBER () OVER (ORDER BY columnA DESC) KEY_AUTO
,A.*
FROM fake_data A
), COLONNE_OBJECT AS (
SELECT
object_agg( KEY_AUTO::text ,
object_construct('columnA', IFNULL(columnA::text,''),
'columnB', IFNULL(columnB::text,''),
'columnC', IFNULL(columnC::text,'')
)
)AS COLONNE_OBJECT
FROM COLONNE_KEY
)
SELECT *
FROM COLONNE_OBJECT;
gives:
COLONNE_OBJECT
{ "1": { "columnA": "10", "columnB": "hello13", "columnC": "world5" }, "2": { "columnA": "3", "columnB": "", "columnC": "world35" }, "3": { "columnA": "2", "columnB": "hello9", "columnC": "world36" }, "4": { "columnA": "1", "columnB": "hello1", "columnC": "world18" }, "5": { "columnA": "1", "columnB": "hello2", "columnC": "world27" } }
which you would like to get back into it original table form
thus
SELECT
f.value:"columnA"::number as columna,
f.value:"columnB"::text as columnb,
f.value:"columnC"::text as columnc
FROM COLONNE_OBJECT, table(flatten(input=>colonne_object)) f;
gives you back
COLUMNA
COLUMNB
COLUMNC
10
hello13
world5
3
<empty string>
world35
2
hello9
world36
1
hello1
world18
1
hello2
world27
and the empty string can be swapped back in via
nullif(f.value:"columnB"::text,'') as columnb,
In SQL Server 2017, I have a table as follows:
CREATE TABLE #Data
(
Code VARCHAR (2)
, RegionCode VARCHAR (10)
, Prop INT
, Val VARCHAR (200)
, PropF VARCHAR (50)
, PropFD VARCHAR (200)
)
INSERT INTO #Data
(
Code, RegionCode, Prop, Val, PropF, PropFD
)
VALUES
('AD', 'DLSO324', 1, 'Abcdefg', 'SD', 'SomeDescription')
, ('AD', 'DLSO324', 2, 'sdfadf', 'SA', 'SomethingA')
, ('AD', 'DLSO324', 3, 'gfdsdfg', 'SB', 'SomethingB')
, ('AD', 'DLSO324', 4, 'r43df', 'SC', 'SomethingC')
, ('AD', 'DLSO324', 5, 'GHD-123', 'SD2', 'SomethingD')
, ('AD', 'DLSO324', 6, '2013-03-42', 'SE', 'SomethingE')
, ('AD', 'XR1046', 34, 'Value1', 'dsf', 'Desc1')
, ('AD', 'XR1046', 65, 'Value1', 'gfsd', 'Desc1')
, ('AD', 'XR1046', 23, 'Value1', 'dg', 'Desc1')
, ('AD', 'XR1046', 67, 'Value1', 'fgh', 'Desc1')
, ('AD', 'XR1046', 45, 'Value1', 'fh', 'Desc1')
, ('AD', 'XR1046', 99, 'Value1', 'hfgfgh', 'Desc1')
SELECT *
FROM #Data
where you'll notice that a code and region code has multiple props with each prop having a value (val), a property code (propF), and a property field description (PropFD). The number of properties a Code and RegionCode combination can have varies from anywhere between 1 and 100 and different combinations of Code and RegionCode can have different PropF and PropFD values even if they share the same prop number.
What I need to do is write a query that pivots the data and produces one row per Code and RegionCode with some JSON data. I need to completely flatten out the JSON so that each Prop number has its own Val, PropF, and PropFD field. My desired structure is as follow (you'll notice that the _number corresponds to the prop value in the #Data table):
[
{
"Val_1": "Abcdefg",
"PropF_1": "SD",
"PropFD_1": "SomeDescription",
"Val_2": "sdfadf",
"PropF_2": "SA",
"PropFD_2": "SomethingA",
"Val_3": "gfdsdfg",
"PropF_3": "SB",
"PropFD_3": "SomethingB",
"Val_4": "r43df",
"PropF_4": "SC",
"PropFD_4": "SomethingC",
"Val_5": "GHD-123",
"PropF_5": "SD2",
"PropFD_5": "SomethingD",
"Val_6": "2013-03-42",
"PropF_6": "SE",
"PropFD_6": "SomethingE"
}
]
So far I have the following query:
SELECT x.Code
, x.RegionCode
, ( SELECT y.Prop id
, y.Val
, y.PropF
, y.PropFD
FROM #Data y
WHERE y.Code = x.Code
AND y.RegionCode = x.RegionCode
FOR JSON PATH) FieldData
FROM #Data x
GROUP BY x.Code
, x.RegionCode
Is there a way for me to get my desired structure using JOINs and the SQL Server 2017 JSON functions? I want to avoid using PIVOT if possible due to performance reasons.
Since SQL Server is declarative by design, your desired results would require either Dynamic SQL or some String Manipulation.
The following demonstrates a little string manipulation in concert with string_agg()
Example
SELECT Code
,RegionCode
,FieldData = '[{'+string_agg(concat('"Val_',prop,'":"',Val,'","PropF_',Prop,'":"',PropF,'","PropFD_',Prop,'":"',PropFD,'"'),',')+'}]'
FROM #Data
Group By Code,RegionCode
Results
Results First Record's JSON
[
{
"Val_1": "Abcdefg",
"PropF_1": "SD",
"PropFD_1": "SomeDescription",
"Val_2": "sdfadf",
"PropF_2": "SA",
"PropFD_2": "SomethingA",
"Val_3": "gfdsdfg",
"PropF_3": "SB",
"PropFD_3": "SomethingB",
"Val_4": "r43df",
"PropF_4": "SC",
"PropFD_4": "SomethingC",
"Val_5": "GHD-123",
"PropF_5": "SD2",
"PropFD_5": "SomethingD",
"Val_6": "2013-03-42",
"PropF_6": "SE",
"PropFD_6": "SomethingE"
}
]
The Second Record's JSON
[
{
"Val_34": "Value1",
"PropF_34": "dsf",
"PropFD_34": "Desc1",
"Val_65": "Value1",
"PropF_65": "gfsd",
"PropFD_65": "Desc1",
"Val_23": "Value1",
"PropF_23": "dg",
"PropFD_23": "Desc1",
"Val_67": "Value1",
"PropF_67": "fgh",
"PropFD_67": "Desc1",
"Val_45": "Value1",
"PropF_45": "fh",
"PropFD_45": "Desc1",
"Val_99": "Value1",
"PropF_99": "hfgfgh",
"PropFD_99": "Desc1"
}
]
I have JSON stored in the database, that looks like this:
{
"EmpName": "John Doe",
"Department": "IT",
"Address-1": "101, Sector 1, NY",
"Address-2": "102, Sector 2, SC",
"Address-3": "103, Sector 3, NY",
"Address-4": "104, Sector 4, NY",
"Salary": 100000
}
I am trying to check if the json has the address "102, Sector 2, SC" in it or not using JSON_VALUE.
But there can be multiple addresses for a single JSON object, which are indexed like Address-1/2/3/4
Here is what I am trying to do:
select *
from emp
where JSON_VALUE(emp.index_data, '$."Address-[*]"') = '102, Sector 2, SC'
I know we cannot have that '[*]' in the key, but is there any way we can achieve this?
Try this.
SELECT *
FROM emp
CROSS APPLY OPENJSON(emp.index_data)
with(
varAddressLine1 nvarchar(Max) '$.Address-1',
varAddressLine2 nvarchar(Max) '$.Address-2',
varAddressLine3 nvarchar(Max) '$.Address-3',
varAddressLine4 nvarchar(Max) '$.Address-4'
)
where varAddressLine1 = '102, Sector 2, SC'
OR varAddressLine2 = '102, Sector 2, SC'
OR varAddressLine3 = '102, Sector 2, SC'
OR varAddressLine4 = '102, Sector 2, SC'
An option if you don't know the exact column names, is to use OPENJSON without a schema:
SELECT *
FROM emp
WHERE EXISTS (SELECT 1
FROM OPENJSON(emp.index_data) j
WHERE j.[key] LIKE 'Address-%' AND
j.[value] = '102, Sector 2, SC'
)
I want to extract a key value from a (nullable) JSONB field. If the field is NULL, I want the record still present in my result set, but with a null field.
customer table:
id, name, phone_num, address
1, "john", 983, [ {"street":"23, johnson ave", "city":"Los Angeles", "state":"California", "current":true}, {"street":"12, marigold drive", "city":"Davis", "state":"California", "current":false}]
2, "jane", 9389, null
3, "sally", 352, [ "street":"90, park ave", "city":"Los Angeles", "state":"California", "current":true} ]
Current PostgreSQL query:
select id, name, phone_num, items.city
from customer,
jsonb_to_recordset(customer) as items(city str, current bool)
where items.current=true
It returns:
id, name, phone_num, city
1, "john", 983, "Los Angeles"
3, "sally", 352, "Los Angeles"
Required Output:
id, name, phone_num, city
1, "john", 983, "Los Angeles"
2, "jane", 9389, null
3, "sally", 352, "Los Angeles"
How do I achieve the above output?
Use a left join lateral instead of an implicit lateral join:
select c.id, c.name, c.phone_num, i.city
from customer c
left join lateral jsonb_to_recordset(c.address) as i(city str, current bool)
on i.current=true
Trying to create a query to select all jobs that are unpaid and who the customer is for that job.
So the is required to first work out a calculation of what they owe (by a sum of s_partorders qty x price found in s_parts) and then minus what they have paid from s_payments.
This query joins it all together but I don't know how to group it by jobNumber because there can be multiple payments and multiple part orders.
SELECT a.jobNumber, a.customerID, a.quoteStatus, a.costDelivery, a.costCallout, a.costLabour, b.customerID, b.firstName, b.lastName, c.paymentID, c.jobNumber, c.amount, d.orderID, d.jobNumber, d.partID, d.quantity, e.partID, e.sellPrice
FROM s_jobcards a
INNER JOIN s_customers b ON a.customerID = b.customerID
INNER JOIN s_payments c ON a.jobNumber = c.jobNumber
INNER JOIN s_partOrders d ON a.jobNumber = d.jobNumber
INNER JOIN s_parts e ON d.partID = e.partID
WHERE a.quoteStatus = 0
Sorry it's quite messy and incomplete...
Included table structure and some test data.
CREATE TABLE IF NOT EXISTS `s_customers` (
`customerID` int(20) NOT NULL AUTO_INCREMENT,
`firstName` text NOT NULL,
`lastName` text NOT NULL,
`address` text NOT NULL,
`suburb` text NOT NULL,
`state` text NOT NULL,
`postcode` text NOT NULL,
`phone` text NOT NULL,
`altPhone` text NOT NULL,
`email` text NOT NULL,
`notes` text NOT NULL,
`postAddress` text NOT NULL,
`serviceDueDate` date NOT NULL,
PRIMARY KEY (`customerID`)
) ENGINE=InnoDB DEFAULT CHARSET=latin1 AUTO_INCREMENT=4 ;
INSERT INTO `s_customers` (`customerID`, `firstName`, `lastName`, `address`, `suburb`, `state`, `postcode`, `phone`, `altPhone`, `email`, `notes`, `postAddress`, `serviceDueDate`) VALUES
(1, 'David', 'Davinci', '654 Fake Road', 'Canning Vale', 'WA', '6164', '9546446', '45645646', 'dave#website.com', 'This guy is a butt', 'Cockburn Central', '2014-12-24'),
(2, 'Timmy', 'Trumpet', '69 something Street', 'Cockburn Central', 'WA', '6164', '9456456', '92344643', 'timmy#trumpet.com', 'Timmah?', '45 Timmy Street', '0000-00-00'),
(3, 'Jerry', 'Tester', '', '', '', '', '', '', '', '', '', '0000-00-00');
CREATE TABLE IF NOT EXISTS `s_jobcards` (
`jobNumber` int(6) NOT NULL AUTO_INCREMENT,
`dateReceived` date NOT NULL,
`workRequired` text NOT NULL,
`workCompleted` text NOT NULL,
`dateCompleted` date NOT NULL,
`customerID` int(5) NOT NULL,
`serviceTime` int(5) NOT NULL,
`serviceTech` int(1) NOT NULL,
`workOutstanding` text NOT NULL,
`quoteStatus` tinyint(1) NOT NULL DEFAULT '1',
`costDelivery` double NOT NULL,
`costCallout` double NOT NULL,
`costLabour` double NOT NULL,
PRIMARY KEY (`jobNumber`)
) ENGINE=MyISAM DEFAULT CHARSET=latin1 AUTO_INCREMENT=6 ;
INSERT INTO `s_jobcards` (`jobNumber`, `dateReceived`, `workRequired`, `workCompleted`, `dateCompleted`, `customerID`, `serviceTime`, `serviceTech`, `workOutstanding`, `quoteStatus`, `costDelivery`, `costCallout`, `costLabour`) VALUES
(1, '2013-11-18', 'Create new service software. Yeah! 4534', 'Not a whole lot yet.?', '0000-00-00', 1, 5, 2, 'Complete this software?', 0, 50, 90, 90),
(2, '2013-11-18', 'work required', 'work done!', '0000-00-00', 1, 1, 3, 'work outstanding', 0, 0, 0, 0),
(3, '2014-12-01', 'Work harder.23432432 gdf', 'Go go!', '2014-12-01', 2, 1, 3, '', 1, 0, 0, 0),
(4, '0000-00-00', 'Whack some moles.', '', '0000-00-00', 3, 0, 1, '', 1, 0, 0, 0),
(5, '0000-00-00', '', '', '0000-00-00', 1, 0, 0, '', 1, 0, 0, 0);
CREATE TABLE IF NOT EXISTS `s_partOrders` (
`orderID` int(11) NOT NULL AUTO_INCREMENT,
`jobNumber` int(11) NOT NULL,
`partID` int(11) NOT NULL,
`quantity` int(11) NOT NULL,
PRIMARY KEY (`orderID`)
) ENGINE=InnoDB DEFAULT CHARSET=latin1 AUTO_INCREMENT=5 ;
INSERT INTO `s_partOrders` (`orderID`, `jobNumber`, `partID`, `quantity`) VALUES
(2, 0, 1, 5),
(3, 1, 1, 2),
(4, 1, 1, 6);
CREATE TABLE IF NOT EXISTS `s_parts` (
`partID` int(10) NOT NULL AUTO_INCREMENT,
`partNumber` varchar(50) NOT NULL,
`partDescription` text NOT NULL,
`modelID` int(5) NOT NULL,
`buyPrice` double NOT NULL,
`sellPrice` double NOT NULL,
`notes` text NOT NULL,
PRIMARY KEY (`partID`),
UNIQUE KEY `partNumber` (`partNumber`)
) ENGINE=InnoDB DEFAULT CHARSET=latin1 AUTO_INCREMENT=2 ;
INSERT INTO `s_parts` (`partID`, `partNumber`, `partDescription`, `modelID`, `buyPrice`, `sellPrice`, `notes`) VALUES
(1, '3453453453', 'Test Part', 1, 10.02, 30.5, 'This is a test part.');
CREATE TABLE IF NOT EXISTS `s_payments` (
`paymentID` int(11) NOT NULL AUTO_INCREMENT,
`amount` double NOT NULL,
`type` text NOT NULL,
`jobNumber` int(11) NOT NULL,
`paymentDate` date NOT NULL,
PRIMARY KEY (`paymentID`)
) ENGINE=InnoDB DEFAULT CHARSET=latin1 AUTO_INCREMENT=3 ;
INSERT INTO `s_payments` (`paymentID`, `amount`, `type`, `jobNumber`, `paymentDate`) VALUES
(2, 200, 'Visa', 1, '2014-12-05'),
(3, 20, 'Visa', 1, '2014-12-05');
Use outer joins where there might be no matching records, and aggregating data to jobnumber before joining will assist in ensuring the numbers are accurate:
select
j.jobNumber, j.customerID, j.quoteStatus, j.costDelivery, j.costCallout, j.costLabour
, c.customerID, c.firstName, c.lastName
, p.parts_sellprice
, sum(jp.amount) as paid
from s_jobcards as j
inner join s_customers as c on j.customerID = c.customerID
left outer join s_payments as jp on j.jobNumber = jp.jobNumber
left outer join (
select
d.jobNumber, sum(d.quantity * e.sellPrice) parts_sellprice
from s_partOrders d
left outer join s_parts e ON d.partID = e.partID
group by
d.jobNumber
) as p on j.jobNumber = p.jobNumber
group by
j.jobNumber, j.customerID, j.quoteStatus, j.costDelivery, j.costCallout, j.costLabour
, c.customerID, c.firstName, c.lastName
;
nb: I have assumed the sell price is multiplied by quantity
see this sqlfiddle demo: http://sqlfiddle.com/#!2/96f4c/1
select
j.jobNumber, j.customerID, j.quoteStatus, j.costDelivery, j.costCallout, j.costLabour
, c.customerID, c.firstName, c.lastName
, p.parts_sellprice
, sum(jp.amount) as paid
, (j.costDelivery + j.costCallout + j.costLabour + p.parts_sellprice) as Total_Cost
, (j.costDelivery + j.costCallout + j.costLabour + p.parts_sellprice) - sum(jp.amount) as Amount_Outstanding
from s_jobcards as j
inner join s_customers as c on j.customerID = c.customerID
left outer join s_payments as jp on j.jobNumber = jp.jobNumber
left outer join (
select
d.jobNumber, sum(d.quantity * e.sellPrice) parts_sellprice
from s_partOrders d
left outer join s_parts e ON d.partID = e.partID
group by
d.jobNumber
) as p on j.jobNumber = p.jobNumber
group by
j.jobNumber, j.customerID, j.quoteStatus, j.costDelivery, j.costCallout, j.costLabour
, c.customerID, c.firstName, c.lastName
;