aggregation of comma-separated values in-position - sql

I have one table where there is two column loan no and counter_value.
Against each loan no there is the list of comma separated values are stored.
declare #tbl table (loanno varchar(100) , counter_value varchar(200) )
insert into #tbl
values(‘pr0021’,‘1000,200,300,100,800,230’),
(‘pr0021’,‘500,300,300,100,600,200’),
(‘pr0021’,‘500,100,200,190,400,100’)
I need to do grouping according to loan no and in-position aggregation (summation) on counter values.
I need the output like below.
loanno counter_value
pr0021 2000,600,800,390,1800,530

Since you have denormalized data you will first have to split this into columns, do the aggregation and then recreate the delimited column. There are plenty of splitters out there but here is my favorite for this type of thing. http://www.sqlservercentral.com/articles/Tally+Table/72993/ The main advantage of this splitter is that it returns the position of each value which most other splitter do not.
Utilizing that splitter you can do this like this.
with AggregateData as
(
select t.loanno
, s.ItemNumber
, TotalValue = sum(convert(int, s.Item))
from #tbl t
cross apply dbo.DelimitedSplit8K(t.counter_value, ',') s
group by t.loanno
, s.ItemNumber
)
select ad.loanno
, STUFF((select ',' + convert(varchar(10), ad2.TotalValue)
from AggregateData ad2
where ad2.loanno = ad.loanno
order by ad2.ItemNumber
FOR XML PATH('')), 1, 1, '')
from AggregateData ad
group by ad.loanno

Sean's would be my first choice (+1).
However, if you have a known (or fixed) number of positions, consider the following:
Example
Select A.loanno
,NewAggr = concat(sum(Pos1),',',sum(Pos2),',',sum(Pos3),',',sum(Pos4),',',sum(Pos5),',',sum(Pos6))
From #tbl A
Cross Apply (
Select Pos1 = n.value('/x[1]','int')
,Pos2 = n.value('/x[2]','int')
,Pos3 = n.value('/x[3]','int')
,Pos4 = n.value('/x[4]','int')
,Pos5 = n.value('/x[5]','int')
,Pos6 = n.value('/x[6]','int')
From (Select cast('<x>' + replace(A.counter_value,',','</x><x>')+'</x>' as xml) as n) X
) B
Group By A.loanno
Returns
loanno NewAggr
pr0021 2000,600,800,390,1800,530
If it Helps with the Visualization, the CROSS APPLY Generates

Related

SQL - text functions not working (Reverse/Left/Substring/LTrim) - gotta be easy

I've got a text field on a table that I'm trying to dissect into two separate columns in a select statement. I swear this worked for me last time I used it, but now it's throwing an error "Invalid length parameter". What am I doing wrong?
Splitting the data from a single column which is like this:
"CORP - DIVISION - REGION - TEAM - SUPERVISOR"
Into two columns like:
SUPERVISOR | TEAM
Here's what I had that I swear used to work, but it doesn't anymore and I can't figure it out!
Reverse(Left(Reverse(table.column),CHARINDEX(' ', Reverse(table.column))-1)) AS 'SUPERVISOR'
,LTRIM(LEFT(Substring(table.column,18,150),CHARINDEX(' - ', Substring(table.column,18,150))-1)) AS 'TEAM'
If you have a known or maximum number of items, consider a little XML. Perhaps a little easier to read and maintain.
Also, you could eliminate Pos1,Pos2,Pos3 if you are only interested in Team & Supervisor.
Example
Declare #YourTable Table ([ID] varchar(50),[SomeCol] varchar(50))
Insert Into #YourTable Values
(1,'CORP - DIVISION - REGION - TEAM - SUPERVISOR')
Select A.ID
,B.*
From #YourTable A
Cross Apply (
Select Pos1 = ltrim(rtrim(xDim.value('/x[1]','varchar(100)')))
,Pos2 = ltrim(rtrim(xDim.value('/x[2]','varchar(100)')))
,Pos3 = ltrim(rtrim(xDim.value('/x[3]','varchar(100)')))
,Pos4 = ltrim(rtrim(xDim.value('/x[4]','varchar(100)')))
,Pos5 = ltrim(rtrim(xDim.value('/x[5]','varchar(100)')))
,Pos6 = ltrim(rtrim(xDim.value('/x[6]','varchar(100)')))
,Pos7 = ltrim(rtrim(xDim.value('/x[7]','varchar(100)')))
,Pos8 = ltrim(rtrim(xDim.value('/x[8]','varchar(100)')))
,Pos9 = ltrim(rtrim(xDim.value('/x[9]','varchar(100)')))
From (Select Cast('<x>' + replace(SomeCol,'-','</x><x>')+'</x>' as xml) as xDim) as A
) B
Returns
ID Pos1 Pos2 Pos3 Pos4 Pos5 Pos6 Pos7 Pos8 Pos9
1 CORP DIVISION REGION TEAM SUPERVISOR NULL NULL NULL NULL
EDIT
If you have non XML safe characters (<,>,,...) use
...
From ( values (cast('<x>' + replace((Select replace(SomeCol,'-','§§Split§§') as [*] For XML Path('')),'§§Split§§','</x><x>')+'</x>' as xml))) A(xDim)
...
From SQL Server 2016 you can use STRING_SPLIT() to do it.
The STRING_SPLIT() can help normalize the data by splitting these
multi-valued columns.
I also used TRIM() function (introduced with SQL Server 2017) in order to remove the spaces, CTE, ROW_NUMBER() and PIVOT.
Below the script:
—-1 Create a test table
CREATE TABLE #TestTable
(
TestColumn varchar(100)
)
—-2 Inserting your string into table
INSERT INTO #TestTable
VALUES ('0 - CORP - DIVISION - REGION - TEAM - SUPERVISOR')
--3 Final query
;WITH CTE_Table AS (
SELECT
TestColumn = TRIM(TestColumn)
FROM
#TestTable
)
,CTE_Table2 AS (
SELECT
S.Value
FROM
CTE_Table
CROSS APPLY STRING_SPLIT([TestColumn],'-') AS S
)
,CTE_FinalTable AS (
SELECT TOP 5
Value
,ROW_NUMBER() OVER (ORDER BY Value) AS RowNumber
FROM
CTE_Table2
ORDER BY
Value
)
SELECT
[1],[2],[3],[4],[5]
FROM
CTE_FinalTable
PIVOT
(MAX([value])
The FOR [RowNumber] IN ([1],[2],[3],[4],[5])
) AS P

How to split multiple strings and insert SQL Server FN_SplitStr

I have 2 strings and one integer:
#categoryID int = 163,
#Ids nvarchar(2000) = '1,2,3',
#Names nvarchar(2000) = 'Bob,Joe,Alex'
I need to select 3 columns 3 rows; The most accomplished is 3 rows 2 columns:
select #categoryID,items from FN_SplitStr(#Ids,',')
resulting:
163,1
163,2
163,3
But I can't figure out how to split both strings.
I tried many ways like:
select #categoryID,items from FN_SplitStr((#Ids,#Names),',')
select #categoryID,items from FN_SplitStr(#Ids,','),items from FN_SplitStr(#Names,',')
EXPECTED OUTPUT:
163,1,Bob
163,2,Joe
163,3,Alex
NOTE1: I looked over tens of questions the most similar is:
How to split string and insert values into table in SQL Server AND SQL Server : split multiple strings into one row each but this question is different.
NOTE2: FN_SplitStr is a function for spliting strings in SQL. And I'm trying to create a stored procedure.
Based on your expected output, you have to use cross apply twice and then create some sort of ranking to make sure that you are getting the right value. As IDs and Names don't seem to have any relationship cross apply will create multiple rows (when you split the string to Names and ID)
There might be better way but this also gives your expected output. You can change this string split to your local function.
1st Dense rank is to make sure that we get three unique names and 2nd dense rank is the rank within the name based on order by with ID and outside of the sub query you have to do some comparison to get only 3 rows.
Declare #categoryID int = 163,
#Ids nvarchar(2000) = '1,2,3',
#Names nvarchar(2000) = 'Bob,Joe,Alex'
select ConcatenatedValue, CategoryID, IDs, Names from (
select concat(#categoryID,',',a.value,',',b.value) ConcatenatedValue, #categoryID CategoryID,
A.value as IDs, b.value as Names , DENSE_RANK() over (order by b.value) as Rn,
DENSE_RANK() over (partition by b.value order by a.value) as Ranked
from string_split(#IDs,',') a
cross apply string_split(#names,',') B ) t
where Rn - Ranked = 0
Output:
Inside your stored procedure do a string split of #Ids and insert into #temp1 table with an identity(1,1) column rowed. You will get:
163,1,1
163,2,2
163,3,3
Then do the second string split of #Names and insert into #temp2 table with an identity(1,1) column rowed. You will get:
Bob,1
Joe,2
Alex,3
You can then do an inner join with #temp1 and #temp2 on #temp1.rowid = #temp2.rowid and get:
163,1,Bob
163,2,Joe
163,3,Alex
I hope this solves your problem.
You can do this with a recursive CTE:
with cte as (
select #categoryId as categoryId,
convert(varchar(max), left(#ids, charindex(',', #ids + ',') - 1)) as id,
convert(varchar(max), left(#names, charindex(',', #names + ',') - 1)) as name,
convert(varchar(max), stuff(#ids, 1, charindex(',', #ids + ','), '')) as rest_ids,
convert(varchar(max), stuff(#names, 1, charindex(',', #names + ','), '')) as rest_names
union all
select categoryId,
convert(varchar(max), left(rest_ids, charindex(',', rest_ids + ',') - 1)) as id,
convert(varchar(max), left(rest_names, charindex(',', rest_names + ',') - 1)) as name,
convert(varchar(max), stuff(rest_ids, 1, charindex(',', rest_ids + ','), '')) as rest_ids,
convert(varchar(max), stuff(rest_names, 1, charindex(',', rest_names + ','), '')) as rest_names
from cte
where rest_ids <> ''
)
select categoryid, id, name
from cte;
Here is a db<>fiddle.
You need to split CSV value with record number. For that you need to use ROW_NUMBER() function to generate record wise unique ID as column like "RID", while you split CSV columns in row.
You can use table value split function or XML as used below.
Please check this let us know your solution is found or not.
DECLARE
#categoryID int = 163,
#Ids nvarchar(2000) = '1,2,3',
#Names nvarchar(2000) = 'Bob,Joe,Alex'
SELECT
#categoryID AS categoryID,
q.Id,
w.Names
FROM
(
SELECT
ROW_NUMBER() OVER (ORDER BY f.value('.','VARCHAR(10)')) AS RID,
f.value('.','VARCHAR(10)') AS Id
FROM
(
SELECT
CAST('<a>' + REPLACE(#Ids,',','</a><a>') + '</a>' AS XML) AS idXML
) x
CROSS APPLY x.idXML.nodes('a') AS e(f)
) q
INNER JOIN
(
SELECT
ROW_NUMBER() OVER (ORDER BY h.value('.','VARCHAR(10)')) AS RID,
h.value('.','VARCHAR(10)') AS Names
FROM
(
SELECT
CAST('<a>' + REPLACE(#Names,',','</a><a>') + '</a>' AS XML) AS namesXML
) y
CROSS APPLY y.namesXML.nodes('a') AS g(h)
) w ON w.RID = q.RID

Order Concatenated field

I have a field which is a concatenation of single letters. I am trying to order these strings within a view. These values can't be hard coded as there are too many. Is someone able to provide some guidance on the function to use to achieve the desired output below? I am using MSSQL.
Current output
CustID | Code
123 | BCA
Desired output
CustID | Code
123 | ABC
I have tried using a UDF
CREATE FUNCTION [dbo].[Alphaorder] (#str VARCHAR(50))
returns VARCHAR(50)
BEGIN
DECLARE #len INT,
#cnt INT =1,
#str1 VARCHAR(50)='',
#output VARCHAR(50)=''
SELECT #len = Len(#str)
WHILE #cnt <= #len
BEGIN
SELECT #str1 += Substring(#str, #cnt, 1) + ','
SET #cnt+=1
END
SELECT #str1 = LEFT(#str1, Len(#str1) - 1)
SELECT #output += Sp_data
FROM (SELECT Split.a.value('.', 'VARCHAR(100)') Sp_data
FROM (SELECT Cast ('<M>' + Replace(#str1, ',', '</M><M>') + '</M>' AS XML) AS Data) AS A
CROSS APPLY Data.nodes ('/M') AS Split(a)) A
ORDER BY Sp_data
RETURN #output
END
This works when calling one field
ie.
Select CustID, dbo.alphaorder(Code)
from dbo.source
where custid = 123
however when i try to apply this to top(10) i receive the error
"Invalid length parameter passed to the LEFT or SUBSTRING function."
Keeping in mind my source has ~4million records, is this still the best solution?
Unfortunately i am not able to normalize the data into a separate table with records for each Code.
This doesn't rely on a id column to join with itself, performance is almost as fast
as the answer by #Shnugo:
SELECT
CustID,
(
SELECT
chr
FROM
(SELECT TOP(LEN(Code))
SUBSTRING(Code,ROW_NUMBER() OVER(ORDER BY (SELECT NULL)),1)
FROM sys.messages) A(Chr)
ORDER by chr
FOR XML PATH(''), type).value('.', 'varchar(max)'
) As CODE
FROM
source t
First of all: Avoid loops...
You can try this:
DECLARE #tbl TABLE(ID INT IDENTITY, YourString VARCHAR(100));
INSERT INTO #tbl VALUES ('ABC')
,('JSKEzXO')
,('QKEvYUJMKRC');
--the cte will create a list of all your strings separated in single characters.
--You can check the output with a simple SELECT * FROM SeparatedCharacters instead of the actual SELECT
WITH SeparatedCharacters AS
(
SELECT *
FROM #tbl
CROSS APPLY
(SELECT TOP(LEN(YourString)) ROW_NUMBER() OVER(ORDER BY (SELECT NULL)) FROM master..spt_values) A(Nmbr)
CROSS APPLY
(SELECT SUBSTRING(YourString,Nmbr,1))B(Chr)
)
SELECT ID,YourString
,(
SELECT Chr As [*]
FROM SeparatedCharacters sc1
WHERE sc1.ID=t.ID
ORDER BY sc1.Chr
FOR XML PATH(''),TYPE
).value('.','nvarchar(max)') AS Sorted
FROM #tbl t;
The result
ID YourString Sorted
1 ABC ABC
2 JSKEzXO EJKOSXz
3 QKEvYUJMKRC CEJKKMQRUvY
The idea in short
The trick is the first CROSS APPLY. This will create a tally on-the-fly. You will get a resultset with numbers from 1 to n where n is the length of the current string.
The second apply uses this number to get each character one-by-one using SUBSTRING().
The outer SELECT calls from the orginal table, which means one-row-per-ID and use a correalted sub-query to fetch all related characters. They will be sorted and re-concatenated using FOR XML. You might add DISTINCT in order to avoid repeating characters.
That's it :-)
Hint: SQL-Server 2017+
With version v2017 there's the new function STRING_AGG(). This would make the re-concatenation very easy:
WITH SeparatedCharacters AS
(
SELECT *
FROM #tbl
CROSS APPLY
(SELECT TOP(LEN(YourString)) ROW_NUMBER() OVER(ORDER BY (SELECT NULL)) FROM master..spt_values) A(Nmbr)
CROSS APPLY
(SELECT SUBSTRING(YourString,Nmbr,1))B(Chr)
)
SELECT ID,YourString
,STRING_AGG(sc.Chr,'') WITHIN GROUP(ORDER BY sc.Chr) AS Sorted
FROM SeparatedCharacters sc
GROUP BY ID,YourString;
Considering your table having good amount of rows (~4 Million), I would suggest you to create a persisted calculated field in the table, to store these values. As calculating these values at run time in a view, will lead to performance problems.
If you are not able to normalize, add this as a denormalized column to the existing table.
I think the error you are getting could be due to empty codes.
If LEN(#str) = 0
BEGIN
SET #output = ''
END
ELSE
BEGIN
... EXISTING CODE BLOCK ...
END
I can suggest to split string into its characters using referred SQL function.
Then you can concatenate string back, this time ordered alphabetically.
Are you using SQL Server 2017? Because with SQL Server 2017, you can use SQL String_Agg string aggregation function to concatenate characters splitted in an ordered way as follows
select
t.CustId, string_agg(strval, '') within GROUP (order by strval)
from CharacterTable t
cross apply dbo.SPLIT(t.code) s
where strval is not null
group by CustId
order by CustId
If you are not working on SQL2017, then you can follow below structure using SQL XML PATH for concatenation in SQL
select
CustId,
STUFF(
(
SELECT
'' + strval
from CharacterTable ct
cross apply dbo.SPLIT(t.code) s
where strval is not null
and t.CustId = ct.CustId
order by strval
FOR XML PATH('')
), 1, 0, ''
) As concatenated_string
from CharacterTable t
order by CustId

How to SORT in order as entered in SQL Server?

I'm using SQL Server and I'm trying to find results but I would like to get the results in the same order as I had input the conditions.
My code:
SELECT
AccountNumber, EndDate
FROM
Accounts
WHERE
AccountNumber IN (212345, 312345, 145687, 658975, 256987, 365874, 568974, 124578, 125689) -- I would like the results to be in the same order as these numbers.
Here is an in-line approach
Example
Declare #List varchar(max)='212345, 312345, 145687, 658975, 256987, 365874, 568974, 124578, 125689'
Select A.AccountNumber
,A.EndDate
From Accounts A
Join (
Select RetSeq = Row_Number() over (Order By (Select null))
,RetVal = v.value('(./text())[1]', 'int')
From (values (convert(xml,'<x>' + replace(#List,',','</x><x>')+'</x>'))) x(n)
Cross Apply n.nodes('x') node(v)
) B on A.AccountNumber = B.RetVal
Order By B.RetSeq
EDIT - the subquery Returns
RetSeq RetVal
1 212345
2 312345
3 145687
4 658975
5 256987
6 365874
7 568974
8 124578
9 125689
You can replace IN with a JOIN, and set a field for ordering, like this:
SELECT AccountNumber , EndDate
FROM Accounts a
JOIN (
SELECT 212345 AS Number, 1 AS SeqOrder
UNION ALL
SELECT 312345 AS Number, 2 AS SeqOrder
UNION ALL
SELECT 145687 AS Number, 3 AS SeqOrder
UNION ALL
... -- and so on
) AS inlist ON inlist.Number = a.AccountNumber
ORDER BY inlist.SeqOrder
I will offer one more approach I just found out, but this needs v2016. Regrettfully the developers forgot to include the index into the resultset of STRING_SPLIT(), but this would work and is documented:
A solution via FROM OPENJSON():
DECLARE #str VARCHAR(100) = 'val1,val2,val3';
SELECT *
FROM OPENJSON('["' + REPLACE(#str,',','","') + '"]');
The result
key value type
0 val1 1
1 val2 1
2 val3 1
The documentation tells clearly:
When OPENJSON parses a JSON array, the function returns the indexes of the elements in the JSON text as keys.
This is not an answer, just some test-code to check John Cappelletti's approach.
DECLARE #tbl TABLE(ID INT IDENTITY,SomeGuid UNIQUEIDENTIFIER);
--Create more than 6 mio rows with an running number and a changing Guid
WITH tally AS (SELECT ROW_NUMBER()OVER(ORDER BY (SELECT NULL)) AS Nmbr
FROM master..spt_values v1
CROSS JOIN master..spt_values v2)
INSERT INTO #tbl
SELECT NEWID() from tally;
SELECT COUNT(*) FROM #tbl; --6.325.225 on my machine
--Create an XML with nothing more than a list of GUIDs in the order of the table's ID
DECLARE #xml XML=
(SELECT SomeGuid FRom #tbl ORDER BY ID FOR XML PATH(''),ROOT('root'),TYPE);
--Create one invalid entry
UPDATE #tbl SET SomeGuid = NEWID() WHERE ID=10000;
--Read all GUIDs out of the XML and number them
DECLARE #tbl2 TABLE(Position INT,TheGuid UNIQUEIDENTIFIER);
INSERT INTO #tbl2(Position,TheGuid)
SELECT ROW_NUMBER() OVER(ORDER BY (SELECT NULL))
,g.value(N'text()[1]',N'uniqueidentifier')
FROM #xml.nodes(N'/root/SomeGuid') AS A(g);
--then JOIN them via "Position" and check,
--if there are rows, where not the same values get into the same row.
SELECT *
FROM #tbl t
INNER JOIN #tbl2 t2 ON t2.Position=t.ID
WHERE t.SomeGuid<>t2.TheGuid;
At least in this simple case I always get exactly only the one record back which was invalidated...
Okay, after some re-thinking I'll offer the ultimative XML based type-safe and sort-safe splitter:
Declare #List varchar(max)='212345, 312345, 145687, 658975, 256987, 365874, 568974, 124578, 125689';
DECLARE #delimiter VARCHAR(10)=', ';
WITH Casted AS
(
SELECT (LEN(#List)-LEN(REPLACE(#List,#delimiter,'')))/LEN(REPLACE(#delimiter,' ','.')) + 1 AS ElementCount
,CAST('<x>' + REPLACE((SELECT #List AS [*] FOR XML PATH('')),#delimiter,'</x><x>')+'</x>' AS XML) AS ListXml
)
,Tally(Nmbr) As
(
SELECT TOP((SELECT ElementCount FROM Casted)) ROW_NUMBER() OVER(ORDER BY (SELECT NULL)) FROM master..spt_values v1 CROSS JOIN master..spt_values v2
)
SELECT Tally.Nmbr AS Position
,(SELECT ListXml.value('(/x[sql:column("Tally.Nmbr")])[1]','int') FROM Casted) AS Item
FROM Tally;
The trick is to create a list of running numbers with the fitting number of element (a number's table was even better) and to pick the elements according to their position.
Hint: This is rather slow...
UPDATE: even better:
WITH Casted AS
(
SELECT (LEN(#List)-LEN(REPLACE(#List,#delimiter,'')))/LEN(REPLACE(#delimiter,' ','.')) + 1 AS ElementCount
,CAST('<x>' + REPLACE((SELECT #List AS [*] FOR XML PATH('')),#delimiter,'</x><x>')+'</x>' AS XML)
.query('
for $x in /x
return <x p="{count(/x[. << $x])}">{$x/text()[1]}</x>
') AS ListXml
)
SELECT x.value('#p','int') AS Position
,x.value('text()[1]','int') AS Item
FROM Casted
CROSS APPLY Casted.ListXml.nodes('/x') AS A(x);
Elements are create as
<x p="99">TheValue</x>
Regrettfully the XQuery function position() is not available to retrieve the value. But you can use the trick to count all elements before a given node. this is scaling badly, as this count must be performed over and over. The more elements the worse it goes...
UPDATE2: With a known count of elements one might use this (much better performance)
Use XQuery to iterate a literally given list:
WITH Casted AS
(
SELECT (LEN(#List)-LEN(REPLACE(#List,#delimiter,'')))/LEN(REPLACE(#delimiter,' ','.')) + 1 AS ElementCount
,CAST('<x>' + REPLACE((SELECT #List AS [*] FOR XML PATH('')),#delimiter,'</x><x>')+'</x>' AS XML)
.query('
for $i in (1,2,3,4,5,6,7,8,9)
return <x p="{$i}">{/x[$i]/text()[1]}</x>
') AS ListXml
)
SELECT x.value('#p','int') AS Position
,x.value('text()[1]','int') AS Item
FROM Casted
CROSS APPLY Casted.ListXml.nodes('/x') AS A(x);
In Azure SQL, there is now extended version of STRING_SPLIT which also can return the order of items if the third optional argument enable_ordinal is set to 1.
Then this simple task is finally easy:
DECLARE #string AS varchar(200) = 'a/b/c/d/e'
DECLARE #position AS int = 3
SELECT value FROM STRING_SPLIT(#string, '/', 1) WHERE ordinal = #position
Unfortunately not available in SQL Server 2019, only in Azure for now, lets hope it will be in SQL Server 2022.

SSRS selecting results based on comma delimited list with like statement

Based on my question SSRS selecting results based on comma delimited list
Is it possible to do this, but instead of doing this as a an EQUALS, can it be done as below?
WHERE value like 'abc%','def%'
One thing to note is that the % is not included in the list.
One option is to split the passed in SSRS variable (CSV) into a table and join on that
DECLARE #tab TABLE (Col1 NVARCHAR(200))
INSERT INTO #tab (Col1)
VALUES (N'abc'),(N'def'),(N'xyz'),(N'nop'),(N'ghi'),(N'lmn')
DECLARE #substrings NVARCHAR(200) = 'abc,def,ghi'
;WITH cteX
AS( --dynamically split the string
SELECT Strings = y.i.value('(./text())[1]', 'nvarchar(4000)')
FROM
(
SELECT x = CONVERT(XML, '<i>'
+ REPLACE(#substrings, ',', '</i><i>')
+ '</i>').query('.')
) AS a CROSS APPLY x.nodes('i') AS y(i)
)
SELECT
T.*
FROM #tab T
INNER JOIN cteX X ON X.Strings = T.Col1
gives the following result