Return Multiple Rows in One Cell - sql

So I've looked around and seen the XML trick and the Variable trick, and neither really made enough sense to me to implement. What I have is a table with 4 Columns, The first is a unique identifier, the second is a relation to a different table, the third is varbinary(max), the last is a string. I want to combine columns three and four over column two. Is this possible?
Example of Data:
| FileId | UniqueI1 | BinaryData | FileName |
|---------+------------+--------------+----------|
| 1 | 1 | <byte> | asp.jpg |
| 2 | 1 | <byte> | asp1.jpg |
| 3 | 2 | <byte> | asp2.jpg |
| 4 | 2 | <byte> | asp3.jpg |
| 5 | 2 | <byte> | asp4.jpg |
Preferred Output:
| UniqueI1 | BinaryData | FileName |
|------------+------------------------------+------------------------------|
| 1 | <byte>, <byte> | asp.jpg, asp1.jpg |
| 2 | <byte>, <byte>, <byte> | asp2.jpg, asp3.jpg, asp4.jpg |
I appreciate any help you may be able to provide me.

Sounds like you're trying to group your data and aggregate the BinaryData and FileName columns by concatenating their values.
There are no built-in aggregates for concatenation in t-sql, but there are a couple of ways to reach the same results.
In my opinion, by far the easiest way is to write a custom aggregate in c# leveraging the CLR. But it can also be done using STUFF or XML. You should have a look at Does T-SQL have an aggregate function to concatenate strings?

Try this:
DECLARE #t TABLE
(
FileID INT ,
UniqueID INT ,
Data VARBINARY(100) ,
FileName VARCHAR(10)
)
INSERT INTO #t
VALUES ( 1, 1, 1, 'asp.jpg' ),
( 2, 1, 2, 'asp1.jpg' ),
( 3, 2, 3, 'asp2.jpg' ),
( 4, 2, 4, 'asp3.jpg' ),
( 5, 2, 5, 'asp4.jpg' )
SELECT UniqueID ,
MAX(ca.data) AS Data,
MAX(ca.name) AS Name
FROM #t t1
CROSS APPLY ( SELECT STUFF(
(SELECT ', ' + CONVERT(VARCHAR(MAX), t2.Data, 2)
FROM #t t2
WHERE t1.UniqueID = t2.UniqueID
ORDER BY FileID
FOR XML PATH('') ,
TYPE
).value('.', 'varchar(max)'), 1, 2, '') AS DATA ,
STUFF(
(SELECT ', ' + t2.FileName
FROM #t t2
WHERE t1.UniqueID = t2.UniqueID
ORDER BY FileID
FOR XML PATH('') ,
TYPE
).value('.', 'varchar(max)'), 1, 2, '') AS NAME
) ca
GROUP BY UniqueID
Output:
UniqueID Data Name
1 00000001, 00000002 asp.jpg, asp1.jpg
2 00000003, 00000004, 00000005 asp2.jpg, asp3.jpg, asp4.jpg
For pivoting:
WITH cte
AS ( SELECT * ,
ROW_NUMBER() OVER ( PARTITION BY UniqueID ORDER BY FileID ) AS rn
FROM #t
)
SELECT c.UniqueID ,
ca1.[1] AS Data1 ,
ca1.[2] AS Data2 ,
ca1.[3] AS Data3 ,
ca2.[1] AS File1 ,
ca2.[2] AS File2 ,
ca2.[3] AS File3
FROM cte c
CROSS APPLY ( SELECT *
FROM ( SELECT UniqueID ,
rn ,
Data
FROM cte ci
WHERE ci.UniqueID = c.UniqueID
) t PIVOT( MAX(Data) FOR rn IN ( [1], [2], [3] ) ) p
) ca1
CROSS APPLY ( SELECT *
FROM ( SELECT UniqueID ,
rn ,
FileName
FROM cte ci
WHERE ci.UniqueID = c.UniqueID
) t PIVOT( MAX(FileName) FOR rn IN ( [1], [2], [3] ) ) p
) ca2
GROUP BY c.UniqueID, ca1.[1], ca1.[2], ca1.[3], ca2.[1], ca2.[2], ca2.[3]
Output:
UniqueID Data1 Data2 Data3 File1 File2 File3
1 0x00000001 0x00000002 NULL asp.jpg asp1.jpg NULL
2 0x00000003 0x00000004 0x00000005 asp2.jpg asp3.jpg asp4.jpg
You can change this to dynamic query if you don't want to manually add additional files.

Related

Redshift, count items in column seperated with comma

I have data that a column saved a group of number
| user | col |
| ------- | ------- |
| 1 | 3,7,11,25,44,56,77,32,34,55 |
| 2 | 3,7,25,44,37,89,56,99,103,13 |
| 1 | 3,10,11,25,44,56,33,32,34,55 |
I know I can split part the columns and count but do we have any different way to count the numbers?
|user| new-col | count|
| ------- | ------- |
| 1 | 3 | 2 |
| 1 | 7 | 1 |
| 1 | 11 | 2 |
| 1 | 25 | 2 |
| 1 | 44 |2 |
| 1 | 56 |1 |
| 1 | 77 | 1 |
| 1 | 32 | 2 |
You could use a union query along with SPLIT_PART:
WITH cte AS (
SELECT user, SPLIT_PART(col, ',', 1) AS val FROM yourTable UNION ALL
SELECT user, SPLIT_PART(col, ',', 2) FROM yourTable UNION ALL
SELECT user, SPLIT_PART(col, ',', 3) FROM yourTable UNION ALL
SELECT user, SPLIT_PART(col, ',', 4) FROM yourTable UNION ALL
SELECT user, SPLIT_PART(col, ',', 5) FROM yourTable UNION ALL
SELECT user, SPLIT_PART(col, ',', 6) FROM yourTable UNION ALL
SELECT user, SPLIT_PART(col, ',', 7) FROM yourTable UNION ALL
SELECT user, SPLIT_PART(col, ',', 8) FROM yourTable UNION ALL
SELECT user, SPLIT_PART(col, ',', 9) FROM yourTable UNION ALL
SELECT user, SPLIT_PART(col, ',', 10) FROM yourTable
)
SELECT
user,
val,
COUNT(*) AS cnt
FROM cte
GROUP BY
user,
val;
But note that all we are doing above in the CTE is really just normalizing your data so that each user-value relationship occupies a separate record. Ideally you should change your table design and move away from storing CSV.
If you instead want just the count of numbers per user, then use:
SELECT
user,
COUNT(*) AS cnt
FROM cte
GROUP BY
user;
Query.
with t as (
select 1 as user, '3,7,11,25,44,56,77,32,34,55' as col
union all
select 2 as user, '3,7,25,44,37,89,56,99,103,13' as col
union all
select 1 as user, '3,10,11,25,44,56,33,32,34,55' as col
)
select a.user, a.val, count(*) as cnt
from (
select a.user
, SPLIT_PART(a.col, ',', b.no) as val
from t a
cross join (
select * from generate_series(1,10) as no
) b
) a
group by a.user, a.val
order by a.user, a.val
Count the number of commas in the string using REGEXP_COUNT and add 1.
CREATE TEMP TABLE examples (
user_id INT
, value_list VARCHAR
);
INSERT INTO examples
SELECT 1 , '3,7,11,25,44,56,77,32,34,55'
UNION ALL SELECT 2 , '3,7,25,44,37,89,56,99,103,13'
UNION ALL SELECT 1 , '3,10,11,25,44,56,33,32,34,55'
;
SELECT user_id
, SUM(REGEXP_COUNT(value_list,',')+1) value_count
FROM examples
GROUP BY 1
;
Output
user_id | value_count
---------+-------------
1 | 20
2 | 10
This answers the original version of the question.
You can count the number of comma-delimited values with:
select (case when col = '' then 0
else length(col) - length(replace(col, ',', '')) + 1
end) as values_count
from t;
That said, you should fix your data model so you are not storing multiple values in a column. It is particularly irksome that you are storing numbers as strings, as well. You want a junction/association table.

Cache SQL query to create 1 row from multiple records

I have the below record and would like to create 1 row record.
I tried STUFF, FOR XML PATH and did not work
+-----------+-------+---------+
| CLIENT_ID | Event | DX_Code |
+-----------+-------+---------+
| 54 | 5 | F45.72 |
| 54 | 5 | X45.34 |
| 54 | 5 | M98.32 |
+-----------+-------+---------+
Output = 54, 5, F45.72 X45.34 M98.32
You can do that by using STUFF() with FOR XML PATH('') as
CREATE TABLE T
([CLIENT_ID] int, [Event] int, [DX_Code] varchar(6))
;
INSERT INTO T
([CLIENT_ID], [Event], [DX_Code])
VALUES
(54, 5, 'F45.72'),
(54, 5, 'X45.34'),
(54, 5, 'M98.32')
;
SELECT DISTINCT T1.[CLIENT_ID],
T1.[Event],
STUFF(
(
SELECT ',' + T2.[DX_Code]
FROM T T2
WHERE T2.[CLIENT_ID] = T1.[CLIENT_ID]
AND T2.[Event] = T1.[Event]
FOR XML PATH ('')
) , 1, 1, ''
) Result
FROM T T1;
This should give you the expected result
SELECT CAST(t1.CLIENT_ID AS VARCHAR) + ','+ CAST(t1.Event AS VARCHAR)+ ','+
STUFF(( SELECT ' ' + t2.DX_Code AS [text()]
FROM #temp t2
WHERE
t2.CLIENT_ID = t1.CLIENT_ID
and t2.Event = t1.Event
FOR XML PATH('')
), 1, 1, '' )
AS OutputText
FROM #temp t1
GROUP BY t1.CLIENT_ID,t1.Event
Output:
54,5,F45.72 X45.34 M98.32

SQL server and STUFF with two tables

I'm facing a problem. I have two tables as below.
table 1
+----+------+
| ks | keys |
+----+------+
| 11 | 1122|
+----+------+
| 12 | 2211|
+----+------+
| 13 | 2233|
+----+------+
| 14 | 3322|
+----+------+
table 2
+----+--+-------+
| Id | ks|codes|
+----+-----------+
| 1 | 11 |aaaaa|
+----+-----------+
| 2 | 11 |bbbbb|
+----+-----------+
| 3 | 12 |aaaaa|
+----+-----------+
| 3 | 13 |ccccc|
+----+-----------+
| 4 | 12 |bbbbb|
+----+-----------+
I tried to implement a following query in order to get my required output but did not work:
SELECT ks,
STUFF (
(SELECT ', ' + t2.codes as [text()]
from table2 as t2 where t1.ks = t2.ks FOR XML PATH('')
),1,1,''
) as "codes"
from table1 t1
group by ks;
I get this table as result:
+----+------+
| ks | codes|
+----+------+
| 11 | aaaa |
+----+------+
| 11 | bbbb |
+----+------+
| 12 | cccc |
+----+------+
| 12 | dddd |
+----+------+
then this image below shows my required output:
required result
I did something wrong but I do not know what could be. Any chance someone help me? Thanks!
Try this. I think you posted the wrong output.
Create table #tbl (ks int , codes varchar(10))
Insert into #tbl values
(11 ,'aaaa'),
(12 ,'bbbb'),
(13 ,'cccc'),
(14 ,'dddd')
Create table #tbl2 (id int, ks int , codes varchar(10))
Insert into #tbl2 values
( 1 ,11 ,'aaaaa'),
( 2 ,11 ,'bbbbb'),
( 3 ,12 ,'aaaaa'),
( 3 ,13 ,'ccccc'),
( 4 ,12 ,'bbbbb')
with cte as
(Select t1.ks, t2.codes
from #tbl t1 join #tbl2 t2 on t1.ks = t2.ks)
Select ks, STUFF(
(SELECT ',' + codes FROM cte c1
where c1.ks = c2.ks FOR XML PATH ('')), 1, 1, ''
)
from cte c2
group by ks
Output:
ks
11 aaaaa,bbbbb
12 aaaaa,bbbbb
13 ccccc
I cannot say that I fully understand what is going on in your tables--especially given your output image appears to have no relation to your sample tables--but it looks like you want a comma-delimited list of sub-values from table2 that are associated with table1.
Here's a working example that I think addresses your need. You can use CROSS APPLY in these situations. Doing so allows you to return all values from table1 regardless of a matching record in table2.
DECLARE #table1 TABLE ( [ks] INT, [code] VARCHAR(10) );
DECLARE #table2 TABLE ( [id] INT, [ks] INT, [code] VARCHAR(10) );
-- populate table1 --
INSERT INTO #table1 (
[ks], [code]
)
VALUES
( 11, 'aaaa' )
, ( 12, 'bbbb' )
, ( 13, 'cccc' )
, ( 14, 'dddd' );
-- populate table two --
INSERT INTO #table2 (
[id], [ks], [code]
)
VALUES
( 1, 11, 'aaaaa' )
, ( 2, 11, 'bbbbb' )
, ( 3, 12, 'aaaaa' )
, ( 3, 13, 'ccccc' )
, ( 4, 12, 'bbbbb' );
SELECT
t1.ks, codes.codes
FROM #table1 t1
CROSS APPLY (
SELECT (
STUFF(
( SELECT ', ' + t2.code AS "text()" FROM #table2 t2 WHERE t2.ks = t1.ks FOR XML PATH ( '' ) )
, 1, 2, ''
)
) AS [codes]
) AS codes
ORDER BY
t1.ks;
Resulting Output:
ks codes
11 aaaaa, bbbbb
12 aaaaa, bbbbb
13 ccccc
14 NULL

Sql Query Join on Comma Separated Value

I have a table that has a composite key and a comma separated value. I need the single row split into one row for each comma separated element. I have seen similar questions and similar answers but have not been able to translate them into a solution for myself.
I'm running SQL Server 2008 R2.
| Key Part 1 | Key Part 2 | Key Part 3 | Values |
|------------------------------------------------------|
| A | A | A | PDE,PPP,POR |
| A | A | B | PDE,XYZ |
| A | B | A | PDE,RRR |
|------------------------------------------------------|
and I need this as output
| Key Part 1 | Key Part 2 | Key Part 3 | Values | Sequence |
|-------------------------------------------------------------------|
| A | A | A | PDE | 0 |
| A | A | A | PPP | 1 |
| A | A | A | POR | 2 |
| A | A | B | PDE | 0 |
| A | A | B | XYZ | 1 |
| A | B | A | PDE | 0 |
| A | B | A | RRR | 1 |
|-------------------------------------------------------------------|
Thanks
Geoff
Here is a simple inline approach if you don't have or want a Split/Parse UDF
Example
Select A.[Key Part 1]
,A.[Key Part 2]
,A.[Key Part 3]
,B.*
From YourTable A
Cross Apply (
Select [Values] = LTrim(RTrim(X2.i.value('(./text())[1]', 'varchar(max)')))
,[Sequence] = Row_Number() over (Order By (Select null))-1
From (Select x = Cast('<x>' + replace(A.[Values],',','</x><x>')+'</x>' as xml)) X1
Cross Apply x.nodes('x') X2(i)
) B
Returns
EDIT - If Open to a Table-Valued Function
The Query would Look Like This
Select A.[Key Part 1]
,A.[Key Part 2]
,A.[Key Part 3]
,[Values] = B.RetVal
,[Sequence] = B.RetSeq-1
From #YourTable A
Cross Apply [dbo].[udf-Str-Parse-8K](A.[Values],',') B
The UDF if Interested
CREATE FUNCTION [dbo].[udf-Str-Parse-8K] (#String varchar(max),#Delimiter varchar(25))
Returns Table
As
Return (
with cte1(N) As (Select 1 From (Values(1),(1),(1),(1),(1),(1),(1),(1),(1),(1)) N(N)),
cte2(N) As (Select Top (IsNull(DataLength(#String),0)) Row_Number() over (Order By (Select NULL)) From (Select N=1 From cte1 a,cte1 b,cte1 c,cte1 d) A ),
cte3(N) As (Select 1 Union All Select t.N+DataLength(#Delimiter) From cte2 t Where Substring(#String,t.N,DataLength(#Delimiter)) = #Delimiter),
cte4(N,L) As (Select S.N,IsNull(NullIf(CharIndex(#Delimiter,#String,s.N),0)-S.N,8000) From cte3 S)
Select RetSeq = Row_Number() over (Order By A.N)
,RetVal = LTrim(RTrim(Substring(#String, A.N, A.L)))
From cte4 A
);
--Orginal Source http://www.sqlservercentral.com/articles/Tally+Table/72993/
--Select * from [dbo].[udf-Str-Parse-8K]('Dog,Cat,House,Car',',')
--Select * from [dbo].[udf-Str-Parse-8K]('John||Cappelletti||was||here','||')
If all CSV values are exactly 3 characters (as you have in your test data) you can use a a tally table in an incredibly efficient manner by creating the exact number of rows needed up front (as opposed to creating a row for every character to find the delimiter character)... because you already know the delimiter location.
In this case, I'll use a tally function but you can use a fixed tally table as well.
Code for the tfn_Tally function...
SET QUOTED_IDENTIFIER ON
SET ANSI_NULLS ON
GO
CREATE FUNCTION dbo.tfn_Tally
/* ============================================================================
07/20/2017 JL, Created. Capable of creating a sequense of rows
ranging from -10,000,000,000,000,000 to 10,000,000,000,000,000
============================================================================ */
(
#NumOfRows BIGINT,
#StartWith BIGINT
)
RETURNS TABLE WITH SCHEMABINDING AS
RETURN
WITH
cte_n1 (n) AS (SELECT 1 FROM (VALUES (1),(1),(1),(1),(1),(1),(1),(1),(1),(1)) n (n)), -- 10 rows
cte_n2 (n) AS (SELECT 1 FROM cte_n1 a CROSS JOIN cte_n1 b), -- 100 rows
cte_n3 (n) AS (SELECT 1 FROM cte_n2 a CROSS JOIN cte_n2 b), -- 10,000 rows
cte_n4 (n) AS (SELECT 1 FROM cte_n3 a CROSS JOIN cte_n3 b), -- 100,000,000 rows
cte_Tally (n) AS (
SELECT TOP (#NumOfRows)
(ROW_NUMBER() OVER (ORDER BY (SELECT NULL)) - 1) + #StartWith
FROM
cte_n4 a CROSS JOIN cte_n4 b -- 10,000,000,000,000,000 rows
)
SELECT
t.n
FROM
cte_Tally t;
GO
How to use it in the solution...
-- create some test data...
IF OBJECT_ID('tempdb..#TestData', 'U') IS NOT NULL
DROP TABLE #TestData;
CREATE TABLE #TestData (
KeyPart1 CHAR(1),
KeyPart2 CHAR(1),
KeyPart3 CHAR(1),
[Values] varchar(50)
);
INSERT #TestData (KeyPart1, KeyPart2, KeyPart3, [Values]) VALUES
('A', 'A', 'A', 'PDE,PPP,POR'),
('A', 'A', 'B', 'PDE,XYZ'),
('A', 'B', 'A', 'PDE,RRR,XXX,YYY,ZZZ,AAA,BBB,CCC');
--==========================================================
-- solution query...
SELECT
td.KeyPart1,
td.KeyPart2,
td.KeyPart3,
x.SplitValue,
[Sequence] = t.n
FROM
#TestData td
CROSS APPLY dbo.tfn_Tally(LEN(td.[Values]) - LEN(REPLACE(td.[Values], ',', '')) + 1, 0) t
CROSS APPLY ( VALUES (SUBSTRING(td.[Values], t.n * 4 + 1, 3)) ) x (SplitValue);
And the results...
KeyPart1 KeyPart2 KeyPart3 SplitValue Sequence
-------- -------- -------- ---------- --------------------
A A A PDE 0
A A A PPP 1
A A A POR 2
A A B PDE 0
A A B XYZ 1
A B A PDE 0
A B A RRR 1
A B A XXX 2
A B A YYY 3
A B A ZZZ 4
A B A AAA 5
A B A BBB 6
A B A CCC 7
If the assumption that all of the csv elements are the number of characters is incorrect, you'd be better off using a traditional tally based splitter. In which case my recommendation is DelimitedSplit8K written by Jeff Moden.
In that case, the solution query would look like this...
SELECT
td.KeyPart1,
td.KeyPart2,
td.KeyPart3,
SplitValue = dsk.Item,
[Sequence] = dsk.ItemNumber - 1
FROM
#TestData td
CROSS APPLY dbo.DelimitedSplit8K(td.[Values], ',') dsk;
Ann the result...
KeyPart1 KeyPart2 KeyPart3 SplitValue Sequence
-------- -------- -------- ---------- --------------------
A A A PDE 0
A A A PPP 1
A A A POR 2
A A B PDE 0
A A B XYZ 1
A B A PDE 0
A B A RRR 1
A B A XXX 2
A B A YYY 3
A B A ZZZ 4
A B A AAA 5
A B A BBB 6
A B A CCC 7
HTH, Jason
-- Create Table
Create table YourTable
(
p1 varchar(50),
p2 varchar(50),
p3 varchar(50),
pval varchar(50)
)
go
-- Insert Data
insert into YourTable values ('A','A','A','PDE,PPP,POR'),
('A','A','B','PDE,XYZ'),('A','B','A','PDE,RRR')
go
-- View Sample Data
SELECT p1, p2, p3 , pval FROM YourTable
go
-- Required Result
SELECT p1,p2,p3, LTRIM(RTRIM(Split.a.value('.', 'VARCHAR(100)'))) as Value1 , ROW_NUMBER() OVER(PARTITION BY id ORDER BY id ASC)-1 AS SequenceNo
FROM
(SELECT ROW_NUMBER() over (order by (SELECT NULL)) AS ID, p1,p2,p3, pval, CAST ('<M>' + REPLACE(pval, ',', '</M><M>') + '</M>' AS XML) AS Data from YourTable
) AS A
CROSS APPLY Data.nodes ('/M') AS Split(a)
go
-- Remove Temp created table
drop table YourTable
go

tSQL UNPIVOT of comma concatenated column into multiple rows

I have a table that has a value column. The value could be one value or it could be multiple values separated with a comma:
id | assess_id | question_key | item_value
---+-----------+--------------+-----------
1 | 859 | Cust_A_1 | 1,5
2 | 859 | Cust_B_1 | 2
I need to unpivot the data based on the item_value to look like this:
id | assess_id | question_key | item_value
---+-----------+--------------+-----------
1 | 859 | Cust_A_1 | 1
1 | 859 | Cust_A_1 | 5
2 | 859 | Cust_B_1 | 2
How does one do that in tSQL on SQL Server 2012?
We have a user defined function that we use for stuff like this that we called "split_delimiter":
CREATE FUNCTION [dbo].[split_delimiter](#delimited_string VARCHAR(8000), #delimiter_type CHAR(1))
RETURNS TABLE AS
RETURN
WITH cte10(num) AS
(
SELECT 1 UNION ALL SELECT 1 UNION ALL SELECT 1 UNION ALL
SELECT 1 UNION ALL SELECT 1 UNION ALL SELECT 1 UNION ALL
SELECT 1 UNION ALL SELECT 1 UNION ALL SELECT 1 UNION ALL SELECT 1
)
,cte100(num) AS
(
SELECT 1
FROM cte10 t1, cte10 t2
)
,cte10000(num) AS
(
SELECT 1
FROM cte100 t1, cte100 t2
)
,cte1(num) AS
(
SELECT TOP (ISNULL(DATALENGTH(#delimited_string),0)) ROW_NUMBER() OVER (ORDER BY (SELECT NULL))
FROM cte10000
)
,cte2(num) AS
(
SELECT 1
UNION ALL
SELECT t.num+1
FROM cte1 t
WHERE SUBSTRING(#delimited_string,t.num,1) = #delimiter_type
)
,cte3(num,[len]) AS
(
SELECT t.num
,ISNULL(NULLIF(CHARINDEX(#delimiter_type,#delimited_string,t.num),0)-t.num,8000)
FROM cte2 t
)
SELECT delimited_item_num = ROW_NUMBER() OVER(ORDER BY t.num)
,delimited_value = SUBSTRING(#delimited_string, t.num, t.[len])
FROM cte3 t;
GO
It will take a varchar value up to 8000 characters and will return a table with the delimited elements broken into rows. In your example, you'll want to use an outer apply to turn those delimited values into separate rows:
SELECT my_table.id, my_table.assess_id, question_key, my_table.delimited_items.item_value
FROM my_table
OUTER APPLY(
SELECT delimited_value AS item_value
FROM my_database.dbo.split_delimiter(my_table.item_value, ',')
) AS delimited_items