SQL Sting Split into Single Column - sql

Very new to SQL but I require some help with something that I am sure is a simple fix.
I have a single column of data within a table called 'Produce' where types of fruit are stored in a column called 'Fruit'. Some values within this column are separated by commas.
Is there an easy way to split the below so that the results come as a single column of unique entries?
E.g. Example Table
Fruit
-----
Apple
Plum
Pear, Mango
Pear
What I am hoping to return is the below:
Fruit
-----
Apple
Plum
Pear
Mango
I have tried to use the string split function but I think I have it completely. Can anyone help provide some explanation as to how to do this, please? I am using T-SQL if that helps.
Thanks in advance.

The core problem to fix would be to stop storing your values as comma separated lists. Keep your data normalized. With that being said... everyone needs a good splitter...
declare #table table (Fruit varchar(64))
insert into #table
values
('Apple'),
('Plum'),
('Pear,Mango'),
('Pear')
select distinct
Item
from
#table
cross apply
dbo.DelimitedSplit8K(Fruit,',')
OR, If you are on SQL Server 2016...
select distinct
Item
from
#table
cross apply
string_split(Fruit,',')
THE FUNCTION
SET ANSI_NULLS ON
GO
SET QUOTED_IDENTIFIER ON
GO
CREATE FUNCTION [dbo].[DelimitedSplit8K] (#pString VARCHAR(8000), #pDelimiter CHAR(1))
--WARNING!!! DO NOT USE MAX DATA-TYPES HERE! IT WILL KILL PERFORMANCE!
RETURNS TABLE WITH SCHEMABINDING AS
RETURN
/* "Inline" CTE Driven "Tally Table" produces values from 1 up to 10,000...
enough to cover VARCHAR(8000)*/
WITH E1(N) AS (
SELECT 1 UNION ALL SELECT 1 UNION ALL SELECT 1 UNION ALL
SELECT 1 UNION ALL SELECT 1 UNION ALL SELECT 1 UNION ALL
SELECT 1 UNION ALL SELECT 1 UNION ALL SELECT 1 UNION ALL SELECT 1
), --10E+1 or 10 rows
E2(N) AS (SELECT 1 FROM E1 a, E1 b), --10E+2 or 100 rows
E4(N) AS (SELECT 1 FROM E2 a, E2 b), --10E+4 or 10,000 rows max
cteTally(N) AS (--==== This provides the "base" CTE and limits the number of rows right up front
-- for both a performance gain and prevention of accidental "overruns"
SELECT TOP (ISNULL(DATALENGTH(#pString),0)) ROW_NUMBER() OVER (ORDER BY (SELECT NULL)) FROM E4
),
cteStart(N1) AS (--==== This returns N+1 (starting position of each "element" just once for each delimiter)
SELECT 1 UNION ALL
SELECT t.N+1 FROM cteTally t WHERE SUBSTRING(#pString,t.N,1) = #pDelimiter
),
cteLen(N1,L1) AS(--==== Return start and length (for use in substring)
SELECT s.N1,
ISNULL(NULLIF(CHARINDEX(#pDelimiter,#pString,s.N1),0)-s.N1,8000)
FROM cteStart s
)
--===== Do the actual split. The ISNULL/NULLIF combo handles the length for the final element when no delimiter is found.
SELECT ItemNumber = ROW_NUMBER() OVER(ORDER BY l.N1),
Item = SUBSTRING(#pString, l.N1, l.L1)
FROM cteLen l
;
GO
Jeff Moden Article for Function

This can be done with pure SQL, no user written functions needed.
SQL Server
WITH
fruittable
AS
( SELECT 'Apple' fruit, 1 id
UNION ALL
SELECT 'Banana,Apple', 2
UNION ALL
SELECT 'Tomato,Grapefruit,Apple', 3
UNION ALL
SELECT 'Watermelon,Persimmons', 4
),
split (fruit, id, leftover)
AS
(SELECT case when len(fruit) = 0 or fruit is null then null else left(fruit + ',', charindex(',',fruit + ',') -1 ) end AS fruit
, id
, case when len(fruit) = 0 or fruit is null then null else right(fruit + ',', len(fruit) - charindex(',',fruit + ',') + 1) end as leftover
FROM fruittable
UNION ALL
SELECT case when len(leftover) = 0 or leftover is null then null else left(leftover, charindex(',',leftover) - 1) end AS fruit
, id
, case when len(leftover) = 0 or leftover is null then null else substring(leftover, charindex(',',leftover) + 1, len(leftover)) end as leftover
FROM split
WHERE fruit IS NOT NULL)
SELECT fruit, id
FROM split where fruit is not null
order by fruit, id;
Oracle
WITH
fruittable
AS
(SELECT 'Apple' fruit, 1 id
FROM DUAL
UNION ALL
SELECT 'Banana,Apple', 2
FROM DUAL
UNION ALL
SELECT 'Tomato,Grapefruit,Apple', 3
FROM DUAL
UNION ALL
SELECT 'Watermelon,Persimmons', 4
FROM DUAL),
split (fruit, id, leftover)
AS
(SELECT SUBSTR (fruit || ',', 1, INSTR (fruit || ',', ',') - 1) AS fruit
, id
, SUBSTR (fruit || ',', INSTR (fruit || ',', ',') + 1) AS leftover
FROM fruittable
UNION ALL
SELECT SUBSTR (leftover, 1, INSTR (leftover, ',') - 1) AS fruit
, id
, SUBSTR (leftover, INSTR (leftover, ',') + 1) AS leftover
FROM split
WHERE fruit IS NOT NULL)
SELECT fruit, id
FROM split
WHERE fruit IS NOT NULL
ORDER BY fruit, id

Related

How to extract a string between two of the SAME delimiters T-SQL?

I'm wanting to extract part of a string from a value which has a number of the same delimiters.
Here is an example of the data I am working with (these file paths could be even longer depending on the depth of the file):
FilePath:
Q:\12345\downloads\randomfilename.png
Q:\123_4566\downloads\randomfilename.pdf
Q:\CCCMUD\downloads\randomfilename.mp4
I want to extract part of the string between the first two delimiters ( \ ) for every row into a new column e.g.
12345
123_4566
CCCMUD
I know I need to be using SUBSTRING and CHARINDEX but I'm not sure how. I would appreciate any help. Thanks.
Use CHAR_INDEX twice:
SELECT *, SUBSTRING(path, pos1 + 1, pos2 - pos1 - 1)
FROM tests
CROSS APPLY (SELECT NULLIF(CHARINDEX('\', path), 0)) AS ca1(pos1)
CROSS APPLY (SELECT NULLIF(CHARINDEX('\', path, pos1 + 1), 0)) AS ca2(pos2)
-- NULLIF is used to convert 0 value (character not found) to NULL
Test on db<>fiddle
In all your examples, the first \ is at character 3 in the string. If so, then you can simply use:
select v.*,
substring(filepath, 4, charindex('\', filepath, 4) - 4)
from (values ('Q:\123_4566\downloads\randomfilename.pdf')) v(filepath)
DECLARE #s table (path varchar(4000));
INSERT #s(path) VALUES
('Q:\12345\downloads\randomfilename.png'),
('Q:\123_4566\downloads\randomfilename.pdf'),
('Q:\CCCMUD\downloads\randomfilename.mp4');
SELECT folder = LEFT(o, CHARINDEX('\', o) - 1) FROM
(
SELECT o = SUBSTRING(path, CHARINDEX('\', path) + 1, 4000)
FROM #s
) AS o;
Output:
folder
----------
12345
123_4566
CCCMUD
This will error, though, for paths that don't contain two \ characters. So you may want to add a filter to the inner query (or determine how you want to handle the output differently in that case):
WHERE path LIKE '%\%\%'
An easy and efficient way to do this is to use an ordinal splitter (like this one). To make sure the split value only contains numbers you could add WHERE try_cast(ds.Item as int) is not null. Something like this
splitter
CREATE FUNCTION [dbo].[DelimitedSplit8K_LEAD]
--===== Define I/O parameters
(#pString VARCHAR(8000), #pDelimiter CHAR(1))
RETURNS TABLE WITH SCHEMABINDING AS
RETURN
WITH E1(N) AS (
SELECT 1 UNION ALL SELECT 1 UNION ALL SELECT 1 UNION ALL
SELECT 1 UNION ALL SELECT 1 UNION ALL SELECT 1 UNION ALL
SELECT 1 UNION ALL SELECT 1 UNION ALL SELECT 1 UNION ALL SELECT 1
), --10E+1 or 10 rows
E2(N) AS (SELECT 1 FROM E1 a, E1 b), --10E+2 or 100 rows
E4(N) AS (SELECT 1 FROM E2 a, E2 b), --10E+4 or 10,000 rows max
cteTally(N) AS (--==== This provides the "zero base" and limits the number of rows right up front
-- for both a performance gain and prevention of accidental "overruns"
SELECT 0 UNION ALL
SELECT TOP (DATALENGTH(ISNULL(#pString,1))) ROW_NUMBER() OVER (ORDER BY (SELECT NULL)) FROM E4
),
cteStart(N1) AS (--==== This returns N+1 (starting position of each "element" just once for each delimiter)
SELECT t.N+1
FROM cteTally t
WHERE (SUBSTRING(#pString,t.N,1) = #pDelimiter OR t.N = 0)
)
--===== Do the actual split. The ISNULL/NULLIF combo handles the length for the final element when no delimiter is found.
SELECT ItemNumber = ROW_NUMBER() OVER(ORDER BY s.N1),
Item = SUBSTRING(#pString,s.N1,ISNULL(NULLIF((LEAD(s.N1,1,1) OVER (ORDER BY s.N1) - 1),0)-s.N1,8000))
FROM cteStart s
;
query
select ds.*
from #s s
cross apply dbo.DelimitedSplit8K_LEAD(s.[path], '\') ds
where ds.ItemNumber=2
and try_cast(ds.Item as int) is not null;
ItemNumber Item
2 12345

Pivoting a string from a table function in different columns

I have a task to create a table function that receives a string and sorts it into different columns.
The string itself may vary, but the columns are supposed to be the same always.
The string is: '100^TEst¬200^TEst2¬300^Test3', but for example if I add "¬400^Test4" that should be in the result set as well.
Here is what I've managed to do so far.
if object_id('stringSplit1') is not null
drop function stringSplit1
go
create function stringSplit1(#input varchar(1000))
returns #outputtable table (ord varchar(1000), dta_1 varchar(1000), dta_2 varchar(1000))
as
begin
return
end
go
select [1] as dta_1, [2] as dta_2 from (
select row_number() over(partition by id order by (select null)) as rn, value
from (select value as id from string_split('100^TEst¬200^TEst2¬300^Test3', '¬')) as bo
cross apply string_split(id, '^') as bk) t
pivot(
max([value]) for [rn] in ([1], [2])
) as pvt
Sometimes, in similar cases, a JSON-based approach is also an option. You need to transform the input text into a valid JSON structure ('100^TEst¬200^TEst2¬300^Test3' into [{"100":"TEst"},{"200":"TEst2"},{"300":"Test3"}] ) and parse it with OPENJSON(). The result from this call is a table with columns [Key], [Value] and [Type]. When the parsed text is a JSON array, the [Key] column holds the index of each element in the JSON array.
UDF:
CREATE FUNCTION dbo.StringSplit1 (#Text varchar(8000))
RETURNS table AS
RETURN
SELECT
(CONVERT(int, j1.[key]) + 1) AS [Ord],
j2.[key] AS dat_1,
j2.[value] AS dat_2
FROM OPENJSON(CONCAT('[{"', REPLACE(REPLACE(#Text, '¬', '"},{"'), '^', '":"'), '"}]')) j1
OUTER APPLY OPENJSON(j1.[value]) j2
Statement:
SELECT *
FROM dbo.StringSplit1('100^TEst¬200^TEst2¬300^Test3')
Result:
Ord dat_1 dat_2
---------------
1 100 TEst
2 200 TEst2
3 300 Test3
As an additional note, if you use SQL Server 2017+, you may use STRING_ESCAPE() to escape the input text:
...
FROM OPENJSON(CONCAT(
'[{"',
REPLACE(REPLACE(STRING_ESCAPE(#Text, 'json'), '¬', '"},{"'), '^', '":"'),
'"}]'
)) j1
...
Firstly, although you can use STRING_SPLIT the order of the value returned by it is not guaranteed. As you define the length of your parameter as 1000 in your function stringSplit1 this means we can safely use a function like DelimitedSplit8K_LEAD which does guarantee the order by providing the ordinal position of each value. This means we can do something like this:
SELECT SSr.ItemNumber AS Ord,
MAX(CASE SSc.ItemNumber WHEN 1 THEN SSc.Item END) AS dta_1,
MAX(CASE SSc.ItemNumber WHEN 2 THEN SSc.Item END) AS dta_2
FROM (VALUES('100^TEst¬200^TEst2¬300^Test3'))V(YourColumn)
CROSS APPLY dbo.DelimitedSplit8K_LEAD(V.YourColumn,'¬') SSr
CROSS APPLY dbo.DelimitedSplit8K_LEAD(SSr.Item,'^') SSc
GROUP BY SSr.ItemNumber;
If you want this as a functionm, use an inline table value function, not a multi-line TVF like you have done:
CREATE FUNCTION dbo.DatasetSplit (#Input varchar(8000))
RETURNS table AS
RETURN
SELECT SSr.ItemNumber AS Ord,
MAX(CASE SSc.ItemNumber WHEN 1 THEN SSc.Item END) AS dta_1,
MAX(CASE SSc.ItemNumber WHEN 2 THEN SSc.Item END) AS dta_2
FROM (VALUES(#Input))V(YourColumn)
CROSS APPLY dbo.DelimitedSplit8K_LEAD(V.YourColumn,'¬') SSr
CROSS APPLY dbo.DelimitedSplit8K_LEAD(SSr.Item,'^') SSc
GROUP BY SSr.ItemNumber;
GO
SELECT *
FROM dbo.DatasetSplit ('100^TEst¬200^TEst2¬300^Test3') DS;
If you want to parametrise your row and column delimiters, then you could do it like this:
ALTER FUNCTION dbo.DatasetSplit (#Input varchar(8000), #RowDelimiter char(1), #ColumnDelimiter char(1))
RETURNS table AS
RETURN
SELECT SSr.ItemNumber AS Ord,
MAX(CASE SSc.ItemNumber WHEN 1 THEN SSc.Item END) AS dta_1,
MAX(CASE SSc.ItemNumber WHEN 2 THEN SSc.Item END) AS dta_2
FROM (VALUES(#Input))V(YourColumn)
CROSS APPLY dbo.DelimitedSplit8K_LEAD(V.YourColumn,#RowDelimiter) SSr
CROSS APPLY dbo.DelimitedSplit8K_LEAD(SSr.Item,#ColumnDelimiter) SSc
GROUP BY SSr.ItemNumber;
GO
SELECT *
FROM dbo.DatasetSplit ('100^TEst¬200^TEst2¬300^Test3','¬','^') DS;
Definition of DelimitedSplit8K_LEAD for completeness:
CREATE FUNCTION [dbo].[DelimitedSplit8K_LEAD]
--===== Define I/O parameters
(#pString VARCHAR(8000), #pDelimiter CHAR(1))
RETURNS TABLE WITH SCHEMABINDING AS
RETURN
--===== "Inline" CTE Driven "Tally Table” produces values from 0 up to 10,000...
-- enough to cover VARCHAR(8000)
WITH E1(N) AS (
SELECT 1 UNION ALL SELECT 1 UNION ALL SELECT 1 UNION ALL
SELECT 1 UNION ALL SELECT 1 UNION ALL SELECT 1 UNION ALL
SELECT 1 UNION ALL SELECT 1 UNION ALL SELECT 1 UNION ALL SELECT 1
), --10E+1 or 10 rows
E2(N) AS (SELECT 1 FROM E1 a, E1 b), --10E+2 or 100 rows
E4(N) AS (SELECT 1 FROM E2 a, E2 b), --10E+4 or 10,000 rows max
cteTally(N) AS (--==== This provides the "zero base" and limits the number of rows right up front
-- for both a performance gain and prevention of accidental "overruns"
SELECT 0 UNION ALL
SELECT TOP (DATALENGTH(ISNULL(#pString,1))) ROW_NUMBER() OVER (ORDER BY (SELECT NULL)) FROM E4
),
cteStart(N1) AS (--==== This returns N+1 (starting position of each "element" just once for each delimiter)
SELECT t.N+1
FROM cteTally t
WHERE (SUBSTRING(#pString,t.N,1) = #pDelimiter OR t.N = 0)
)
--===== Do the actual split. The ISNULL/NULLIF combo handles the length for the final element when no delimiter is found.
SELECT ItemNumber = ROW_NUMBER() OVER(ORDER BY s.N1),
Item = SUBSTRING(#pString,s.N1,ISNULL(NULLIF((LEAD(s.N1,1,1) OVER (ORDER BY s.N1) - 1),0)-s.N1,8000))
FROM cteStart s;
GO

Getting most used words from a column of strings in SQL

So we have this database filled with a bunch of strings, in this case post titles.
What I want to do is:
Split the string up in to words
Count how many times words appear in strings
Give me to top 50 words
Not have this timeout in a data.se query
I tried using the info from this SO question adapted to data.se as follows:
select word, count(*) from (
select (case when instr(substr(p.Title, nums.n+1), ' ') then substr(p.Title, nums.n+1)
else substr(p.Title, nums.n+1, instr(substr(p.Title, nums.n+1), ' ') - 1)
end) as word
from (select ' '||Title as string
from Posts p
)Posts cross join
(select 1 as n union all select 2 union all select 10
) nums
where substr(p.Title, nums.n, 1) = ' ' and substr(p.Title, nums.n, 1) <> ' '
) w
group by word
order by count(*) desc
Unfortunately, this gives me a slew of errors:
'substr' is not a recognized built-in function name. Incorrect syntax
near '|'. Incorrect syntax near 'nums'.
So given a column of strings in SQL with a variable amount of text in each string, how can I get a list of the most frequently used X words?
As Blogbeard said, the query you provided does not work with SQL Server. Here is one way to count the most used word. This is based from a function, DelimitedSplitN4K, written by Jeff Moden and improved by members of the SQL Server Central community.
ONLINE DEMO
WITH E1(N) AS (
SELECT 1 FROM (VALUES
(1),(1),(1),(1),(1),(1),(1),(1),(1),(1)
) t(N)
),
E2(N) AS (SELECT 1 FROM E1 a CROSS JOIN E1 b),
E4(N) AS (SELECT 1 FROM E2 a CROSS JOIN E2 b)
SELECT TOP 50
x.Item,
COUNT(*)
FROM Posts p
CROSS APPLY (
SELECT
ItemNumber = ROW_NUMBER() OVER(ORDER BY l.N1),
Item = LTRIM(RTRIM(SUBSTRING(p.Title, l.N1, l.L1)))
FROM (
SELECT s.N1,
L1 = ISNULL(NULLIF(CHARINDEX(' ',p.Title,s.N1),0)-s.N1,4000)
FROM(
SELECT 1 UNION ALL
SELECT t.N+1
FROM(
SELECT TOP (ISNULL(DATALENGTH(p.Title)/2,0))
ROW_NUMBER() OVER (ORDER BY (SELECT NULL))
FROM E4
) t(N)
WHERE SUBSTRING(p.Title ,t.N,1) = ' '
) s(N1)
) l(N1, L1)
) x
WHERE x.item <> ''
GROUP BY x.Item
ORDER BY COUNT(*) DESC
Since creation of function is not allowed, I've written it that way. Here is the function definition if you're interested:
CREATE FUNCTION [dbo].[DelimitedSplitN4K](
#pString NVARCHAR(4000),
#pDelimiter NCHAR(1)
)
RETURNS TABLE WITH SCHEMABINDING AS
RETURN
WITH E1(N) AS (
SELECT 1 UNION ALL SELECT 1 UNION ALL SELECT 1 UNION ALL SELECT 1 UNION ALL SELECT 1 UNION ALL
SELECT 1 UNION ALL SELECT 1 UNION ALL SELECT 1 UNION ALL SELECT 1 UNION ALL SELECT 1
),
E2(N) AS (SELECT 1 FROM E1 a, E1 b),
E4(N) AS (SELECT 1 FROM E2 a, E2 b),
cteTally(N) AS(
SELECT TOP (ISNULL(DATALENGTH(#pString)/2,0)) ROW_NUMBER() OVER (ORDER BY (SELECT NULL)) FROM E4
),
cteStart(N1) AS (
SELECT 1 UNION ALL
SELECT t.N+1 FROM cteTally t WHERE SUBSTRING(#pString,t.N,1) = #pDelimiter
),
cteLen(N1,L1) AS(
SELECT s.N1,
ISNULL(NULLIF(CHARINDEX(#pDelimiter,#pString,s.N1),0)-s.N1,4000)
FROM cteStart s
)
SELECT
ItemNumber = ROW_NUMBER() OVER(ORDER BY l.N1),
Item = SUBSTRING(#pString, l.N1, l.L1)
FROM cteLen l
;
And here is how you would use it:
SELECT TOP 50
x.Item,
COUNT(*)
FROM Posts p
CROSS APPLY dbo.DelimitedSplitN4K(p.Title, ' ') x
WHERE LTRIM(RTRIM(x.Item)) <> ''
GROUP BY x.Item
ORDER BY COUNT(*) DESC
The result:
Item
-------- -------
to 3812411
in 3331522
a 2543636
How 1770915
the 1534298
with 1341632
of 1297468
and 1166664
on 970554
from 964449
for 886007
not 835979
is 704724
using 703007
I 633838
- 632441
an 548450
when 449169
file 409717
how 358745
data 335271
do 323854
can 310298
get 305922
or 266317
error 263563
use 258408
value 254392
it 251254
my 238902
function 235832
by 231025
Android 228308
as 216654
array 209157
working 207445
does 207274
Is 205613
multiple 203336
that 197826
Why 196979
into 196591
after 192056
string 189053
PHP 187018
one 182360
class 179965
if 179590
text 174878
table 169393
Query solution (No Split Function Required)
PostgreSQL
select word, count(*) from
(
-- get 1st words
select split_part(title, ' ', 1) as word
from posts
union all
-- get 2nd words
select split_part(title, ' ', 2) as word
from posts
union all
-- get 3rd words
select split_part(title, ' ', 3) as word
from posts
-- can do this as many times as the number of words in longest title
) words
where word is not null
and word NOT IN ('', 'and', 'for', 'of', 'on')
group by word
order by count desc
limit 50;
for a concise version, see: https://dba.stackexchange.com/a/82456/95929
With the now available STRING_SPLIT function (since SQL Server 2016, Compatability Level 130) this query becomes much easier:
SELECT TOP 50
value [word]
, COUNT(*) [#times]
FROM posts p
CROSS APPLY STRING_SPLIT(p.title, ' ')
GROUP BY value
ORDER BY COUNT(*) DESC
See it in action on the Stack Exchange Data Explorer where it still runs under 2 minutes for the current number of posts in the Stack Overflow database. On Stack Overflow em Português it runs without having to fear for the dreaded timeout.
Results are similar to what you saw in the answer from Felix:

How to replace all numbers of exactly 8 characters in length eg 12345678

I've done a good bit of searching all over so don't berate me yet.
I have a column with string values showing the name of shows.
Eg:
[Titles]
World Cup 2014
Family Guy
UFC Fight Night
9pm News and Weather
2014 Media Awards
Homeland 25242324
Simpsons 25242314
Shameless
Soccer Night 45342324 International
Rugby Live 45342324 HTML5
I wish to use a select statement to strip out the numbers where the numbers are exactly 8 characters in length.
I have only read access so cannot create functions and I'm using SQL Server 2005.
There are a number of split functions that can be found around the internet. My personal preference is the one created by Jeff Moden and enhanced by the community over the last few years. You can find his article here, http://www.sqlservercentral.com/articles/Tally+Table/72993/. Make sure you look at the comments...there are a few hundred at the time of this posting. Here is the code for that splitter.
CREATE FUNCTION [dbo].[DelimitedSplit8K]
--===== Define I/O parameters
(#pString VARCHAR(8000), #pDelimiter CHAR(1))
RETURNS TABLE WITH SCHEMABINDING AS
RETURN
--===== "Inline" CTE Driven "Tally Table" produces values from 0 up to 10,000...
-- enough to cover VARCHAR(8000)
WITH E1(N) AS (
SELECT 1 UNION ALL SELECT 1 UNION ALL SELECT 1 UNION ALL
SELECT 1 UNION ALL SELECT 1 UNION ALL SELECT 1 UNION ALL
SELECT 1 UNION ALL SELECT 1 UNION ALL SELECT 1 UNION ALL SELECT 1
), --10E+1 or 10 rows
E2(N) AS (SELECT 1 FROM E1 a, E1 b), --10E+2 or 100 rows
E4(N) AS (SELECT 1 FROM E2 a, E2 b), --10E+4 or 10,000 rows max
cteTally(N) AS (--==== This provides the "zero base" and limits the number of rows right up front
-- for both a performance gain and prevention of accidental "overruns"
SELECT 0 UNION ALL
SELECT TOP (DATALENGTH(ISNULL(#pString,1))) ROW_NUMBER() OVER (ORDER BY (SELECT NULL)) FROM E4
),
cteStart(N1) AS (--==== This returns N+1 (starting position of each "element" just once for each delimiter)
SELECT t.N+1
FROM cteTally t
WHERE (SUBSTRING(#pString,t.N,1) = #pDelimiter OR t.N = 0)
)
--===== Do the actual split. The ISNULL/NULLIF combo handles the length for the final element when no delimiter is found.
SELECT ItemNumber = ROW_NUMBER() OVER(ORDER BY s.N1),
Item = SUBSTRING(#pString,s.N1,ISNULL(NULLIF(CHARINDEX(#pDelimiter,#pString,s.N1),0)-s.N1,8000))
FROM cteStart s
;
No matter which splitter you use (as long as it is a table valued function) it can work just like this.
if OBJECT_ID('tempdb..#Something') is not null
drop table #Something;
create table #Something
(
SomeValue varchar(100)
);
insert #Something
select 'World Cup 2014' union all
select 'Family Guy' union all
select 'UFC Fight Night' union all
select '9pm News and Weather' union all
select '2014 Media Awards' union all
select 'Homeland 2524232' union all
select 'Simpsons 2524231' union all
select 'Shameless' union all
select 'Soccer Night 4534232 International' union all
select 'Rugby Live 4534232 HTML5';
with ParsedData as
(
select *
from #Something s
cross apply dbo.DelimitedSplit8K(s.SomeValue, ' ')
where Item not like '[0-9][0-9][0-9][0-9][0-9][0-9][0-9]'
)
select distinct
Stuff((SELECT ' ' + Item
FROM ParsedData p2
WHERE p1.SomeValue = p2.SomeValue
ORDER BY p1.ItemNumber --The split function keeps track of the order for us already
FOR XML PATH('')), 1, 1, ' ') as Details
from ParsedData p1
create table #tmp(myString varchar(255))
insert into #tmp
values('Soccer Night 45342327 International'),('9pm News and Weather')
Select newstring = case when patindex('%[0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9]%',mystring) > 0 then
left(mystring, patindex('%[0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9]%',mystring) - 1)
+ substring(mystring,patindex('%[0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9]%',mystring) + 8,9999)
else mystring end
from #tmp
drop table #tmp

function in where clause

I have two scenarios. The first one is fast, but not an option. The second one uses a function and kills the indexing. The values would look similar to this 'ww,tt,tt,bb'. They can put as many codes as they want. Is there a better approach than using the function? This is a stored proc using server 2008.
Without Function:
WHERE date BETWEEN '20140701' AND '20140731' and
LEFT(id, 2) IN ('wp')
With Function:
WHERE date BETWEEN '20140701' AND '20140731' and
LEFT(id, 2) IN (SELECT* FROM Toolbox.dbo.Split_DelimitedString_fn(#string,',') )
My suggestion is to split the string before hand and insert the result into a temp table:
INSERT INTO #tmp
SELECT *
FROM Toolbox.dbo.Split_DelimitedString_fn(#string,',')
Then rewrite your query as
WHERE date BETWEEN '20140701' AND '20140731' and
LEFT(id, 2) IN (SELECT * FROM #tmp)
Declare a table variable, and insert the values:
DECLARE #SplitIds TABLE
(
SplitId varchar(10)
)
Insert into #SplitIds (SplitIds)
SELECT field FROM Toolbox.dbo.Split_DelimitedString_fn(#string,',')
You could then join against this table...
SELECT columns
FROM TableName a INNER JOIN #SplitIds b ON LEFT(id, 2) = b.SplitId
WHERE a.date BETWEEN '20140701' AND '20140731'
Not sure if this will speed the query up, but you can use the Execution plan to help and also check your indexes.
So if I understand the question correctly you want to have the 'in' clause be dynamically determined by what is in another table.
Have you tried:
WHERE date BETWEEN '20140701' AND '20140731' and
LEFT(id, 2) IN (SELECT <column> FROM Toolbox.dbo.tblname)
Second attempt:
move the Left(id,2) into the select clause and then filter on that columns
select
<current query>
,Left(id,2) as sort
from
<current query>
WHERE date BETWEEN '20140701' AND '20140731' and
sort IN (SELECT* FROM Toolbox.dbo.Split_DelimitedString_fn(#string,',') )
If I understand correctly, the performance issue centers on how to efficiently split the CSV string in SQL. If so my answer here on how to employ Jeff Moden's Tally-Ho CSV Splitter might be the ticket. Repeated here for clarity:
Function definition:
CREATE FUNCTION [dbo].[DelimitedSplit8K]
--===== Define I/O parameters
(#pString VARCHAR(8000), #pDelimiter CHAR(1))
--WARNING!!! DO NOT USE MAX DATA-TYPES HERE! IT WILL KILL PERFORMANCE!
RETURNS TABLE WITH SCHEMABINDING AS
RETURN
--===== "Inline" CTE Driven "Tally Table" produces values from 1 up to 10,000...
-- enough to cover VARCHAR(8000)
WITH E1(N) AS (
SELECT 1 UNION ALL SELECT 1 UNION ALL SELECT 1 UNION ALL
SELECT 1 UNION ALL SELECT 1 UNION ALL SELECT 1 UNION ALL
SELECT 1 UNION ALL SELECT 1 UNION ALL SELECT 1 UNION ALL SELECT 1
), --10E+1 or 10 rows
E2(N) AS (SELECT 1 FROM E1 a, E1 b), --10E+2 or 100 rows
E4(N) AS (SELECT 1 FROM E2 a, E2 b), --10E+4 or 10,000 rows max
cteTally(N) AS (--==== This provides the "base" CTE and limits the number of rows for
-- both performance gain and prevention of accidental "overruns"
SELECT TOP (ISNULL(DATALENGTH(#pString),0)) ROW_NUMBER()
OVER (ORDER BY (SELECT NULL)) FROM E4
),
cteStart(N1) AS (--==== This returns N+1 (starting position of each "element" just
-- once for each delimiter)
SELECT 1 UNION ALL
SELECT t.N+1 FROM cteTally t WHERE SUBSTRING(#pString,t.N,1) = #pDelimiter
),
cteLen(N1,L1) AS(--==== Return start and length (for use in substring)
SELECT s.N1,
ISNULL(NULLIF(CHARINDEX(#pDelimiter,#pString,s.N1),0)-s.N1,8000)
FROM cteStart s
)
--===== Do the actual split. The ISNULL/NULLIF combo handles the length for the final
-- element when no delimiter is found.
SELECT ItemNumber = ROW_NUMBER() OVER(ORDER BY l.N1),
Item = SUBSTRING(#pString, l.N1, l.L1)
FROM cteLen l
;
go
Example usage from previous answer;
with data as (
select Code,Location,Quantity,Store from ( values
('L698-W-EA', NULL, 2, 'A')
,('L82009-EA', 'A1K2, A1N2, C4Y3, CBP2', 2, 'A')
,('L80401-A-EA', 'A1S2, SHIP, R2F1, CBP5, BRP, BRP1-20', 17,'A')
,('CWD2132W-BOX-25PK', 'A-AISLE', 1, 'M')
,('GM22660003-EA', 'B1K2', 1, 'M')
)data(Code,Location,Quantity,Store)
)
,shredded as (
select Code,Location,Quantity,Store,t.*
from data
cross apply [dbo].[DelimitedSplit8K](data.Location,',') as t
)
select
pvt.Code,pvt.Quantity,pvt.Store
,cast(isnull(pvt.[1],' ') as varchar(8)) as Loc1
,cast(isnull(pvt.[2],' ') as varchar(8)) as Loc2
,cast(isnull(pvt.[3],' ') as varchar(8)) as Loc3
,cast(isnull(pvt.[4],' ') as varchar(8)) as Loc4
,cast(isnull(pvt.[5],' ') as varchar(8)) as Loc5
,cast(isnull(pvt.[6],' ') as varchar(8)) as Loc6
from shredded
pivot (max(Item) for ItemNumber in ([1],[2],[3],[4],[5],[6])) pvt;
;
go
yielding:
Code Quantity Store Loc1 Loc2 Loc3 Loc4 Loc5 Loc6
----------------- ----------- ----- -------- -------- -------- -------- -------- --------
L698-W-EA 2 A
L82009-EA 2 A A1K2 A1N2 C4Y3 CBP2
L80401-A-EA 17 A A1S2 SHIP R2F1 CBP5 BRP BRP1-20
CWD2132W-BOX-25PK 1 M A-AISLE
GM22660003-EA 1 M B1K2