Destruct column into multiple columns with Regex on Azure? - sql

I've encountered a problem which I think can only be solved by regex functions only.
Sadly the support for regex based operations seems to be very poor on Microsoft side.
(Forgive me if I'm wrong, that's the first case when I've to use this platform)
What I've:
~400 million records in MS Azure SQL DB
encoded functionality what I've to decode into multiple columns (later I'll join metadata by these columns)
What I need:
a regex based function which can parse out the data (the output will be written into de1-6 columns)
The encoded column (what needs to be decoded) looks like this:
|encoded_val |
|-------------|
|PIT273OF_21 |
|PT273CT_21 |
|LT171CT2_31 |
|TV273JM_11 |
|TV273CND_13 |
|FIT865_11_CLC|
|AT865_104 |
|E865MFSP01 |
|LIT273CU_61 |
|E273_RH |
|E273CU_GTH |
|VSZ171JM_31 |
|E171CU_GTH |
|IT171RC_11 |
|WY171CU_61N |
|FV864_11 |
I need to decode this column with a regexp and create multiple columns with
| encoded | de1 | de2 | de3 | de4 | de5 | de6 |
|---------------|-----|-----|------|------|------|------|
| PIT273OF_21 | PIT | 273 | OF | NULL | 21 | NULL |
| PT273CT_21 | PT | 273 | CT | NULL | 21 | NULL |
| LT171CT2_31 | LT | 171 | CT | 2 | 31 | NULL |
| TV273JM_11 | TV | 273 | JM | NULL | 11 | NULL |
| TV273CND_13 | TV | 273 | CND | NULL | 13 | NULL |
| FIT865_11_CLC | FIT | 865 | NULL | NULL | 11 | CLC |
| AT865_104 | AT | 865 | NULL | NULL | 104 | NULL |
| E865MFSP01 | E | 865 | MFSP | 01 | NULL | NULL |
| LIT273CU_61 | LIT | 273 | CU | NULL | 61 | NULL |
| E273_RH | E | 273 | NULL | NULL | NULL | RH |
| E273CU_GTH | E | 273 | CU | NULL | NULL | GTH |
| VSZ171JM_31 | VSZ | 171 | JM | NULL | 31 | NULL |
| E171CU_GTH | E | 171 | CU | NULL | NULL | GTH |
| IT171RC_11 | IT | 171 | RC | NULL | 11 | NULL |
| WY171CU_61N | WY | 171 | CU | NULL | 61 | N |
| FV864_11 | FV | 864 | NULL | NULL | 11 | NULL |
Problems
the format is not fixed
the length of the blocks can vary
there can be missing values
... but let's say, I've some magic regex pattern, what can parse useful data from any string
What I've tried:
compute columns - they seem to only construct new columns from existing ones, not to destruct existing columns into new columns in clever way with pattern matching
user defined function - I did not figured out yet, how they could help, but they seem promising
overcomplicated functions what are impossible to understand/maintain, their execution time seems to be unacceptable
update [bd].[table]
set unitid = cast(LEFT(SUBSTRING([colname],PATINDEX('%[0-9]%',[colname]),100),PATINDEX('%[^0-9]%',SUBSTRING([colname],PATINDEX('%[0-9]%',[colname]),100) + '*') -1) as smallint);
Question:
What is the appropriate way to do this fast (on 400M records)?

CREATE TABLE [dbo].[Sample](
[id] [int] IDENTITY(1,1) NOT NULL,
[encoded] [varchar](60) NOT NULL,
CONSTRAINT [PK_Sample] PRIMARY KEY CLUSTERED
(
[id] ASC
)WITH (PAD_INDEX = OFF, STATISTICS_NORECOMPUTE = OFF, IGNORE_DUP_KEY = OFF, ALLOW_ROW_LOCKS = ON, ALLOW_PAGE_LOCKS = ON) ON [PRIMARY]
) ON [PRIMARY]
GO
insert into [Sample] values ('FIT865_11_CLC')
insert into [Sample] values ('PIT273OF_21')
declare #count int
declare #id int
declare #content varchar(50)
declare #table table
(
id int,
String varchar(256)
)
set #count = (select count(1) from sample)
set #id = 0
while(#count > 0)
begin
if (#id > 0)
select top(1) #id = id, #content = encoded from Sample where id > #id order by id
else
select top(1) #id = id, #content = encoded from Sample order by id
declare #len int = len(#content)
declare #position int = 1
declare #output varchar(256) = ''
declare #currentChar varchar(1)
declare #lastChar varchar(1)
while(#len > 0)
begin
set #currentChar = (select substring(#content, #position, 1))
if(#position = 1)
begin
set #lastChar = #currentChar
end
if(ISNUMERIC(#currentChar)<> ISNUMERIC(#lastChar)) --alpha
set #output = #output + ',' + #currentChar
else
set #output = #output + #currentChar
set #len = #len -1
set #position = #position + 1
set #lastChar = #currentChar
end
set #count = #count - 1
set #output = (select replace (#output,'_',','))
insert into #table values (#id, #output)
end
SELECT DISTINCT B.*
FROM #table A
CROSS APPLY (
SELECT
JSON_VALUE(J,'$[0]') AS de0
,JSON_VALUE(J,'$[1]') AS de1
,JSON_VALUE(J,'$[2]') AS de2
,JSON_VALUE(J,'$[3]') AS de3
,JSON_VALUE(J,'$[4]') AS de4
,JSON_VALUE(J,'$[5]') AS de5
,JSON_VALUE(J,'$[6]') AS de6
,JSON_VALUE(J,'$[7]') AS de7
FROM (VALUES ('["'+replace(replace(String,'"','\"'),',','","')+'"]')) A(J)
) B

Related

Linking Related IDs together through two other ID columns

I have a table of about 100k rows with the following layout:
+----+-----------+------------+-------------------+
| ID | PIN | RAID | Desired Output ID |
+----+-----------+------------+-------------------+
| 1 | 80602627 | 1737852-1 | 1 |
| 2 | 80602627 | 34046655-1 | 1 |
| 3 | 351418172 | 33661 | 2 |
| 4 | 351418172 | 33661 | 2 |
| 5 | 351418172 | 33661 | 2 |
| 6 | 351418172 | 34443321-1 | 2 |
| 7 | 491863017 | 26136 | 3 |
| 8 | 491863017 | 34575 | 3 |
| 9 | 491863017 | 34575 | 3 |
| 10 | 661254727 | 26136 | 3 |
| 11 | 661254727 | 26136 | 3 |
| 12 | NULL | 7517 | 4 |
| 13 | NULL | 7517 | 4 |
| 14 | NULL | 7517 | 4 |
| 15 | NULL | 7517 | 4 |
| 16 | NULL | 7517 | 4 |
| 17 | 554843813 | 33661 | 2 |
| 18 | 554843813 | 33661 | 2 |
+----+-----------+------------+-------------------+
The ID column has unique values, with the PIN and RAID columns being two separate identifying numbers used to group linked IDs together. The Desired Output ID column is what I would like SQL to do, essentially looking at both the PIN and RAID columns to spot where there are any relationships between them.
So for example Where Desired Output ID = 2, IDs 3-6 match on PIN = 351418172, and then IDs 17-18 also match as the RAID of 33661 was in the rows for IDs 3-5.
To add as well, NULLs will be in the PIN Column but not in any others.
I did spot a similar question Text however as it is in BigQuery I wasnt sure it would help.
Have been trying to crack this one for a while with no luck, any help massively appreciated.
I suppose DENSE_RANK can solve your problem. Not sure what the combination of PIN and RAID should be, but I think you'll be able to figure it out how to do it like this:
SELECT *,DENSE_RANK( ) over (ORDER BY isnull(pin,id) ),DENSE_RANK( ) over (ORDER BY raid)
FROM accounts
I believe I have found a bit of a bodged solution to this. It runs very slowly as it goes row by row and will only go two links deep on PIN/RAID, but this should be sufficient for 99%+ cases.
Would appreciate any suggestions to speeding it up if anything is immediately obvious.
ID in post above is DebtorNo in Code:
DECLARE #Counter INT = 1
DECLARE #EndCounter INT = 0
IF OBJECT_ID('Tempdb..#OrigACs') IS NOT NULL
BEGIN
DROP TABLE #OrigACs
END
SELECT DebtorNo,
Name,
PostCode,
DOB,
RAJoin,
COALESCE(PIN,DebtorNo COLLATE DATABASE_DEFAULT) AS PIN,
RelatedAssets,
RAID,
PINRelatedAssets
INTO #OrigACs
FROM MIReporting..HC_RA_Test_Data RA
IF OBJECT_ID('Tempdb..#Accounts') IS NOT NULL
BEGIN
DROP TABLE #Accounts
END
SELECT *,
ROW_NUMBER() OVER (ORDER BY CAST(RA.DebtorNo AS INT)) AS Row
INTO #Accounts
FROM #OrigACs RA
ORDER BY CAST(RA.DebtorNo AS INT)
CREATE INDEX Temp_HC_Index ON #OrigACs (RAID,PIN)
SET #EndCounter = (SELECT MAX(Row) FROM #Accounts)
WHILE #Counter <= #EndCounter
BEGIN
IF OBJECT_ID('Tempdb..#RAID1') IS NOT NULL
BEGIN
DROP TABLE #RAID1
END
SELECT *
INTO #RAID1
FROM #OrigACs A
WHERE A.RAID IN (SELECT RAID FROM #Accounts WHERE [Row] = #Counter)
IF OBJECT_ID('Tempdb..#PIN1') IS NOT NULL
BEGIN
DROP TABLE #PIN1
END
SELECT *
INTO #PIN1
FROM #OrigACs A
WHERE A.PIN IN (SELECT PIN FROM #RAID1)
IF OBJECT_ID('Tempdb..#RAID2') IS NOT NULL
BEGIN
DROP TABLE #RAID2
END
SELECT *
INTO #RAID2
FROM #OrigACs A
WHERE A.RAID IN (SELECT RAID FROM #PIN1)
IF OBJECT_ID('Tempdb..#PIN2') IS NOT NULL
BEGIN
DROP TABLE #PIN2
END
SELECT *
INTO #PIN2
FROM #OrigACs A
WHERE A.PIN IN (SELECT PIN FROM #RAID2)
INSERT INTO MIReporting..HC_RA_Final_ACs
SELECT DebtorNo,
Name,
PostCode,
DOB,
RAJoin,
CASE
WHEN PIN = DebtorNo COLLATE DATABASE_DEFAULT THEN NULL
ELSE PIN
END AS PIN,
RelatedAssets,
RAID,
PINRelatedAssets,
COALESCE((SELECT MAX(FRAID) FROM MIReporting..HC_RA_Final_ACs),0) + 1 AS FRAID
FROM #PIN2
SET #Counter = (SELECT MIN([ROW]) FROM #Accounts O WHERE O.DebtorNo NOT IN (SELECT DebtorNo FROM MIReporting..HC_RA_Final_ACs));
END;
SELECT *
FROM MIReporting..HC_RA_Final_ACs
DROP TABLE #OrigACs
DROP TABLE #Accounts
DROP TABLE #RAID1
DROP TABLE #PIN1
DROP TABLE #RAID2
DROP TABLE #PIN2

Bring information from 2 tables and put it in the same column

I have this table of articles
---------------------------------------------
| Article | Location | existence |
---------------------------------------------
| 200116 | cc3111 | 1 |
---------------------------------------------
| 200116 | ee3091 | 1 |
---------------------------------------------
And this count table
----------------------------------------------------
| Article | Location | Quantity |
----------------------------------------------------
| 200116 | cc3111 | 10 |
----------------------------------------------------
| 200116 | EE3091 | 8 |
----------------------------------------------------
| 200116 | EE2102 | 5 |
----------------------------------------------------
| 200116 | DD5131 | 7 |
----------------------------------------------------
What I'm trying to do is give me one result like the next.
--------------------------------------------------------------------------
| Article | Location | Existence | Quantity | DIF |
--------------------------------------------------------------------------
| 200116 | CC3111 | 1 | 10 | 9 |
--------------------------------------------------------------------------
| 200116 | EE3091 | 1 | 8 | 7 |
--------------------------------------------------------------------------
| 200116 | DD5131 | 0 | 7 | 7 |
--------------------------------------------------------------------------
| 200116 | EE2102 | 0 | 5 | 5 |
----------------------------- -------------------------------------------
But without doing it with a Union since then I have to make another query, I just do not know how to join it without usingUnion.
-SOME HELP TO PERFORM THIS QUERY
-I do not know how to add the location in the same column
This looks like a left join:
select t2.*, coalesce(t1.existence, 0) as existence,
(t2.quantity - coalesce(t1.existence, 0)) as dif
from t2 left join
t1
on t2.article = t1.article and t2.location = t1.location;
Here's another option...
IF OBJECT_ID('tempdb..#Article', 'U') IS NOT NULL
DROP TABLE #Article;
CREATE TABLE #Article (
Article INT NOT NULL,
Location CHAR(6) NOT NULL,
Existance INT NOT NULL
);
IF OBJECT_ID('tempdb..#Counts', 'U') IS NOT NULL
DROP TABLE #Counts;
CREATE TABLE #Counts (
Article INT NOT NULL,
Location CHAR(6) NOT NULL,
Quantity INT NOT NULL
);
INSERT #Article(Article, Location, Existance) VALUES
(200116, 'cc3111', 1), (200116, 'ee3091', 1);
INSERT #Counts(Article, Location, Quantity) VALUES
(200116, 'cc3111', 10), (200116, 'EE3091', 8),
(200116, 'EE2102', 5), (200116, 'DD5131', 10);
SELECT * FROM #Article a;
SELECT * FROM #Counts c;
--=====================================================
SELECT
Article = COALESCE(a.Article, c.Article),
Location = COALESCE(a.Location, c.Location),
Existance = COALESCE(a.Existance, 0),
Quantity = COALESCE(c.Quantity, 0),
DIF = COALESCE(c.Quantity, 0) - COALESCE(a.Existance, 0)
FROM
#Article a
FULL JOIN #Counts c
ON a.Article = c.Article
AND a.Location = c.Location;
You need to JOIN these tables instead of doing a UNION. The former will put columns side by side, whereas the latter will stack two similar sources together...

Transforming Data From 2 Tables into 1

I'm working with a database that allows the storage of "Custom Property" fields with each record in an "Item" table. This is done by having preset fields called [CustomString00] through [CustomString199], [CustomNumber00] through [CustomNumber199] and [CustomDate00] through [CustomDate199] in the Item table. There is another table called the "CustomProperty" table that assigns the name to each custom field and the column to use in the Item table. Here is how it looks.
Item:
| Id | CustomString00| ... | CustomString199 | CustomNumber00 | ... | CustomNumber199 | CustomDate00 | ... | CustomDate199 |
| 1 | 'IN REPAIR' | ... | NULL | 78.4 | ... | NULL | 2017-03-04 | ... | NULL |
| 2 | 'FINISHED' | ... | NULL | 68.5 | ... | NULL | 2017-03-05 | ... | NULL |
| 3 | 'WIP' | ... | NULL | NULL | ... | NULL | 2017-03-07 | ... | NULL |
CustomProperty:
| Name | Type| ColumnName |
| 'Status' | 0 | 'CustomString00' |
| 'Temperature' | 1 | 'CustomNumber00' |
| 'Made Date' | 2 | 'CustomDate00' |
For each Custom Property that is defined, there will be a record in the CustomProperty table that will indicate what data type it is and which column to use for that property. Currently, there could be up to 200 Custom Properties defined for each type, ie, 200 Text, 200 Date and 200 Numeric. The user defines the Custom Properties as they need them. If a user is only using 55 total custom properties, then a lot of the fields in the Item table will not be used.
I would like to create a view that is more 'friendly' so that our users can create their own reports to show these properties. This view would use these two tables to create a new table that looked like this:
| Id | Status | Temperature | Made Date |
| 1 | 'IN REPAIR' | 78.4 | 2017-03-04 |
| 2 | 'FINISHED' | 68.5 | 2017-03-05 |
| 3 | 'WIP' | NULL | 2017-03-07 |
This view should show a column for each property that is defined in the Custom Property table. For This example, there are only 3 Custom Properties defined, so 3 fields are shown in this view. If all 600 Custom Properties were defined, then there would be 600 fields in this view. If there is a value stored for that Custom Property in the Item table, then that value is shown. If there is no value then a NULL would be shown for that property (as shown in Temperature for Item 3).
Using Dynamic SQL I've got some results, but not what I'm looking for. I've made a query that Unpivots the Custom Property fields and returns a result of Items like this:
| Id | CPName | CPTextValue | CPNumberValue | CPDateValue |
| 1 | 'Status' | 'IN REPAIR' | NULL | NULL |
| 1 | 'Temperature' | NULL | 78.4 | NULL |
| 1 | 'Made Date' | NULL | NULL | 2017-03-04 |
| 2 | 'Status' | 'FINISHED ' | NULL | NULL |
| 2 | 'Temperature' | NULL | 68.5 | NULL |
| 2 | 'Made Date' | NULL | NULL | 2017-03-05 |
| 3 | 'Status' | 'WIP' | NULL | NULL |
| 3 | 'Made Date' | NULL | NULL | 2017-03-07 |
My query is getting pretty complicated, so I'm wondering if I'm taking the wrong approach. Here is what I've done so far.
DECLARE #textcolsUnpivot AS NVARCHAR(MAX),
#datecolsUnpivot AS NVARCHAR(MAX),
#numbercolsUnpivot AS NVARCHAR(MAX),
#query AS NVARCHAR(MAX)
select #textcolsUnpivot
= stuff((select ','+quotename(columnname)
from customproperty
where custompropertytype = 0
order by columnname
for xml path('')), 1, 1, '')
select #datecolsUnpivot
= stuff((select ','+quotename(columnname)
from customproperty
where custompropertytype = 1
order by columnname
for xml path('')), 1, 1, '')
select #numbercolsUnpivot
= stuff((select ','+quotename(columnname)
from customproperty
where custompropertytype = 2
order by columnname
for xml path('')), 1, 1, '')
set #query
= 'select id, CPName, CPTextValue, NULL as CPDateValue, NULL as CPNumberValue from
(select id, CPTextValue, CPCol from item
unpivot
(
CPTextValue
for CPCol in ('+ #textcolsunpivot +')
) unpiv ) as pv
inner join
(select columnname, name as CPName, custompropertytype from customproperty) as cp
on cp.columnname = pv.CPCol
union
select id, CPName, NULL, CPDateValue, NULL from
(select id, CPDateValue, CPCol from item
unpivot
(
CPDateValue
for CPCol in ('+ #datecolsunpivot +')
) unpiv ) as pv
inner join
(select columnname, name as CPName, custompropertytype from customproperty) as cp
on cp.columnname = pv.CPCol
union
select id, CPName, NULL, NULL, CPNumberValue from
(select id, CPNumberValue, CPCol from item
unpivot
(
CPNumberValue
for CPCol in ('+ #numbercolsunpivot +')
) unpiv ) as pv
inner join
(select columnname, name as CPName, custompropertytype from customproperty) as cp
on cp.columnname = pv.CPCol
'
exec sp_executesql #query;
For additional clarification, the schema of the tables are:
Item:
Id - pk, (it's actually a GUID, but I'm using an int for this example.), not null
CustomString00 through CustomString199 - nvarchar(max), null
CustomDate00 through CustomDate199 - datetime, null
CustomNumber00 through CustomNumber199 - float, null
CustomProperty:
Name - nvarchar(100),not null
Type - int, not null
ColumnName - nvarchar(50), not null
If I was to continue my current approach, I think I need to now PIVOT the results of my previous query to put it in the form that I'm looking for. Is this correct?

I want generate XML file in a hierarchical form

I have a table like this (Actually it contains more 6000 records)
IdIndustry | IndustryCode | IndustryName | ParentId
---------------------------------
1 | IND | Industry | NULL
2 | PHARM | Pharmacy | 1
3 | FIN | Finance | NULL
4 | CFIN | Corporate | 3
5 | CMRKT | Capital M | 4
DDL:
CREATE TABLE [dbo].[tblIndustryCodes](
[IdIndustry] [int] IDENTITY(1,1) NOT NULL,
[IndustryCode] [nvarchar](5) NULL,
[IndustryName] [nvarchar](50) NULL,
[ParentId] [int] NULL,
CONSTRAINT [PK_tblIndustryCodes] PRIMARY KEY CLUSTERED ([IdIndustry] ASC)
Inserts:
INSERT INTO [tblIndustryCodes]
([IndustryCode]
,[IndustryName]
,[ParentId])
VALUES
('IND','Industry',NULL),
('PHARM','Pharmacy',1),
('FIN','Finance',NULL),
('CFIN','Corporate Finance',3),
('CMRKT','Capital Markets',4)
And i want to generate a XML file like this(Simplified tree like structure)
<IND>
<PHARM>
</PHARM>
</IND>
<FIN>
<CFIN>
<CMRKT>
</CMRKT>
</CFIN>
<FIN>
I don't want to use recursion as it would downgrade the performance dramatically as this table has more than 60000 records in table.
I would be glad if i get the output in same format, since i will be using this output XML to send a request.
And more importantly it will be dynamic in nature.
Try this procedure not much sure about its efficiency as I am creating a temp table to get result
create procedure get_path as begin
DECLARE #cnt INT
DECLARE #n INT
DECLARE #tmpTable TABLE(id int,
indCode varchar(50),
indName varchar(100),
parentId int,
path varchar(500))
insert #tmpTable
select [IdIndustry], [IndustryCode], [IndustryName], [ParentId],
null from tbl
select #cnt = count(*) from #tmpTable where parentId is null
update a set a.path = CONCAT(b.indName,'/',a.indName) from #tmpTable a, #tmpTable b where b.parentid is null and a.parentid = b.id
select #n = count(*) from #tmpTable where path is null
while (#cnt < #n) begin
update a set a.path = concat(b.path, '/', b.indName, '/', a.indName) from #tmpTable a, #tmpTable b where b.path is not null and a.parentid = b.id
select #n = count(*) from #tmpTable where path is null
end
update #tmpTable set path = indName where parentid is null
select * from #tmpTable order by path
end
go
Query 1:
exec get_path
Results:
| ID | INDCODE | INDNAME | PARENTID | PATH |
-------------------------------------------------------------------------------
| 3 | FIN | Finance | (null) | Finance |
| 4 | CFIN | Corporate | 3 | Finance/Corporate |
| 5 | CMRKT | Capital M | 4 | Finance/Corporate/Corporate/Capital M |
| 1 | IND | Industry | (null) | Industry |
| 2 | PHARM | Pharmacy | 1 | Industry/Pharmacy |
Hope this helps.....
SQL FIDDLE

How to filter empty columns from query in SQL?

I have a fairly wide table, which is sparsely populated with data. When I query it, I tend to get lots of VARCHAR columns that are empty.
A | B | C | D | E | F | G | H | I | J | K | L |
| x | | | x | x | | x | | | | |
| | | | x | x | | | | | | x |
| | | | | x | | | | | | |
| x | | | x | x | | | | | | x |
| | | | x | x | | | | x | | |
| | x | | x | | | | | x | | x |
| x | | | x | x | | x | | x | | x |
How can I filter out empty columns from the result set? I can't find a SQL keyword that seems to apply.
B | C | E | F | H | J | L |
x | | x | x | x | | |
| | x | x | | | x |
| | | x | | | |
x | | x | x | | | x |
| | x | x | | x | |
| x | x | | | x | x |
x | | x | x | x | x | x |
Edit: This is for display purposes, and I wasn't planning to modify the table with data from the result set. I did consider that from an MVC perspective that it makes sense to leave the display of data to the view, but thought it not very efficient in terms of bandwidth. Perhaps that's not a worthwhile argument for doing it this way.
That's a... really weird request. Are you using select *? The easiest way to fix that by far is to just say what columns you do want and you'll only get those back.
Why would you even want to randomly have disappearing columns depending on the range of values you try to select? What if your program/report/whatever is expecting a specific column to be present (even if null) and it gets silently removed because it is always null for the range?
OK, so this is a somewhat complex set of dynamic-SQL generation that will give you what you're looking for. You'd have to stick this into a stored procedure if you want to use it.
FilterColTest is a test table I used for testing. I'll leave the definition, etc., in the query so that you can make the appropriate adjustments for your table/columns.
/*
create table FilterColTest (
a int, b int, c int, d int, e int, f int, g int, h int, i int, j int)
insert into FilterColTest
select null,1,null,null,1,0,null,1,null,null
union select null,null,null,null,0,0,null,null,null,null
union select null,1,null,null,1,0,null,1,null,1
union select null,1,null,null,1,1,null,1,null,null
union select 1,1,0,null,1,0,null,1,null,null
--select * from FilterColTest
go
*/
declare #ColumnList table (ID int identity, colName varchar(max))
insert into #ColumnList(colName)
select column_name
from information_schema.columns
where table_name = 'FilterColTest'
declare
#id int, #maxid int, #count int,
#cols varchar(max), #sql nvarchar(max)
select #id = 1, #maxid = max(ID)
from #ColumnList
while #id <= #maxid
begin
select #sql = 'select #count = count(*) from FilterColTest where ' +
colName + ' is not null'
from #ColumnList
where ID = #id
exec sp_executesql #sql, N'#count int output', #count output
select #cols = isnull(#cols + ', ' + colName, colName)
from #ColumnList
where ID = #id and #count > 0
set #id = #id + 1
end
select #sql = 'select ' + #cols + ' from FilterColTest'
exec sp_executesql #sql
go
/*
drop table FilterColTest
go
*/