Drop empty column SQL Server - sql

I need to remove all the empty column for a given table.
As it could by any table, I do not know the name of the columns.
For example, the input table is table1 :
Id
Value1
Value2
Value3
Value4
1
Cell 2
NULL
Cell 2
NULL
2
Cell 4
NULL
Cell 4
NULL
The output table should be :
Id
Value1
Value3
1
Cell 2
Cell 2
2
Cell 4
Cell 4

You can try that.
set #sql = null;
select concat_ws(', ',
case when count(nullif(ID, '')) > 0 then 'ID' end,
case when count(nullif(Value1, '')) > 0 then 'Value1' end,
case when count(nullif(Value2, '')) > 0 then 'Value2' end,
case when count(nullif(Value3, '')) > 0 then 'Value3' end,
case when count(nullif(Value4, '')) > 0 then 'Value4' end
) into #sql
from table1 ;
set #sql = concat('select ', #sql, ' from imported_data where',
(
SELECT INSERT( GROUP_CONCAT('OR `', `COLUMN_NAME`, '` != \'\' ' SEPARATOR ' '), 1, 3, '')
FROM `information_schema`.`COLUMNS`
WHERE `TABLE_SCHEMA` = 'mydb'
AND `TABLE_NAME` = 'table1'
)
);
prepare stmt from #sql;
execute stmt;
deallocate prepare stmt;
The nullif clause checks if there are any strictly empty fields, if you want to include null values you will have to remove the nullif.

Related

Split string in column and add value in column

I have a table with several rows of data like this :
16 W:\2-Work\ALBO\00_Proposal\ALxO_Amendement #1_20091022_signed.pdf
17 W:\2-Work\ALBO\00_Proposal\Level1\ALBO_Amendment #1_20110418.docx
18 W:\2-Work\ALBO\00_Proposal\A\BR\T\X_#1_20110418_final.docx
19 W:\2-Work\ALBO\MyOptionl\AO_Amendment_2 August 2013.docx
I have created columns from Col1 to Col10
I would like to separate each value with the delimiter '\'
The idea is to have on each column :
Col1 | Col2 | Col3 | Col4 | Col5 |etc...
W: 2-Work ALBO 00_Proposal ALxO_Amendement #1_20091022_signed.pdf
I know how to use charindex and substring but the number of '\' are different on each line (8500 rows).
Could you help me?
I'm using Microsoft SQL Server 2012.
Thank you very much
Edit 2014/06/24
My goal is to generate an XML of the full path and split path.
Actually, here is my idea :
1 - Identify all the ID in a temporary table to do loop
--On déclare une table tempo
declare #IdTable Table (
id int,
src nvarchar(max))
--On injecte tous les id existant de la table
insert into #IdTable (id, src)
select id, src from albo
--on déclare l'id de début en commencant par le plus petit
declare #id int = (select min(id) from ALBO)
--Tnat qu'il reste des ID on continue la boucle
while #id is not null
begin
print #id
select #id = min(id) from #IdTable where ID > #id
end
--Fin de la boucle des ID
2 - Split each row and update column (Colx => The Clolumns have been created before)
This code should be placed into my previous loop.
Declare #products varchar(max) = 'W:\2-Work\ALBO\13_WP Reporting\13_07_Monthly reports\13_07_01 Archives\2012\201211\Draft\ALBO-MR-201211\gp_scripts\v1\Top10_duree_final.txt'
Declare #individual varchar(max) = null
WHILE LEN(#products) > 0
BEGIN
IF PATINDEX('%\%',#products) > 0
BEGIN
SET #individual = SUBSTRING(#products, 0, PATINDEX('%\%',#products))
select #individual --i have to make and update with the ID
SET #products = SUBSTRING(#products, LEN(#individual + '\') + 1,
LEN(#products))
END
ELSE
BEGIN
SET #individual = #products
SET #products = NULL
print #individual
END
END
As others have said, this probably isn't the best way to do things, if you explain what you'll be doing with the results it might help us provide a better option
[Also, for some reason the colours of the code below are showing up odd, so copy and paste it into your Sql server to see it better]
drop table #Path
create table #Path (item bigint,location varchar(1000))
insert into #Path
select 16 ,'W:\2-Work\ALBO\00_Proposal\ALxO_Amendement #1_20091022_signed.pdf' union
select 17 ,'W:\2-Work\ALBO\00_Proposal\Level1\ALBO_Amendment #1_20110418.docx' union
select 18 ,'W:\2-Work\ALBO\00_Proposal\A\BR\T\X_#1_20110418_final.docx' union
select 19 ,'W:\2-Work\ALBO\MyOptionl\AO_Amendment_2 August 2013.docx'
select * from #Path;
with Path_Expanded(item,subitem,location, start, ending, split)
as(
select item
, 1 --subitem begins at 1
, location -- full location path
, 0 --start searching the file from the 0 position
, charindex('\',location) -- find the 1st '\' charactor
, substring(location,0,charindex('\',location)) --return the string from the start position, 0, to the 1st '\' charactor
from #Path
union all
select item
, subitem+1 --add 1 to subitem
, location -- full location path
, ending+1 -- start searching the file from the position after the last '\' charactor
, charindex('\',location,ending+1)-- find the 1st '\' charactor that occurs after the last '\' charactor found
, case when charindex('\',location,ending+1) = 0 then substring(location,ending+1,1000) --if you cant find anymore '\', return everything else after the last '\'
else substring(location,ending+1, case when charindex('\',location,ending+1)-(ending+1) <= 0 then 0
else charindex('\',location,ending+1)-(ending+1) end )--returns the string between the last '\' charactor and the next '\' charactor
end
from Path_Expanded
where ending > 0 --stop once you can't find anymore '\' charactors
)
--pivots the results
select item
, max(case when subitem = 1 then split else '' end) as col1
, max(case when subitem = 2 then split else '' end) as col2
, max(case when subitem = 3 then split else '' end) as col3
, max(case when subitem = 4 then split else '' end) as col4
, max(case when subitem = 5 then split else '' end) as col5
, max(case when subitem = 6 then split else '' end) as col6
, max(case when subitem = 7 then split else '' end) as col7
, max(case when subitem = 8 then split else '' end) as col8
, max(case when subitem = 9 then split else '' end) as col9
, max(case when subitem = 10 then split else '' end) as col10
from Path_Expanded
group by item
you might prefer to have each folder on its own row, if so replace the pivot part above with the below query instead
select item
, subitem
, location
, split from Path_Expanded where item = 16
The following query will get what you are looking for; as others have noted, it's not a particularly good design. For example, what happens when you're looking for the file name and it's in a different column each time?
Regardless, this will do what you asked for (and maybe even what you want):
-- Test Data
CREATE TABLE #FilePath (FileNumber INT IDENTITY(1,1), FilePath VARCHAR(1000))
INSERT INTO #FilePath (FilePath)
SELECT 'W:\2-Work\ALBO\00_Proposal\ALxO_Amendement #1_20091022_signed.pdf' UNION
SELECT 'W:\2-Work\ALBO\00_Proposal\Level1\ALBO_Amendment #1_20110418.docx' UNION
SELECT 'W:\2-Work\ALBO\00_Proposal\A\BR\T\X_#1_20110418_final.docx' UNION
SELECT 'W:\2-Work\ALBO\MyOptionl\AO_Amendment_2 August 2013.docx'
GO
-- Numbers CTE
WITH Numbers AS
(
SELECT n = 1
UNION ALL
SELECT n + 1
FROM Numbers
WHERE n+1 <= 1000 -- set this to the maximum length of your file path
)
SELECT
FilePath,
[1] AS Col1,
[2] AS Col2,
[3] AS Col3,
[4] AS Col4,
[5] AS Col5,
[6] AS Col6,
[7] AS Col7,
[8] AS Col8,
[9] AS Col9,
[10] AS Col10
FROM
(
SELECT
FilePath,
ROW_NUMBER() OVER (PARTITION BY FilePath ORDER BY n) RowNum,
CAST(LTRIM(RTRIM(NULLIF(SUBSTRING('\' + FilePath + '\' , n , CHARINDEX('\' , '\' + FilePath + '\' , n) - n) , ''))) AS VARCHAR(1000)) FolderName
FROM Numbers, #FilePath
WHERE
n <= Len('\' + FilePath + '\') AND SubString('\' + FilePath + '\' , n - 1, 1) = '\' AND
CharIndex('\' , '\' + FilePath+ '\' , n) - n > 0
)P
PIVOT
(MAX(FolderName) FOR RowNum IN
([1],[2],[3],[4],[5],[6],[7],[8],[9],[10])
) UP
OPTION (MAXRECURSION 1000)-- set this to the maximum length of your file path
-- Clean up
DROP TABLE #FilePath
One way (de-dupes):
;with T(ordinal, path, starts, pos) as (
select 1, path, 1, charindex('\', path) from #tbl
union all
select ordinal + 1, path, pos + 1, charindex('\', path, pos + 1)
from t where pos > 0
)
select [1],[2],[3],[4],[5],[6],[7],[8],[9],[10] from (
select
ordinal, path, substring(path, starts, case when pos > 0 then pos - starts else len(path) end) token
from T
) T2
pivot (max(token) for ordinal in ([1],[2],[3],[4],[5],[6],[7],[8],[9],[10])) T3

COUNT the number of columns where a condition is true? SQL Server 2008 R2

I have a table that looks something like
ID Col1 Col2 Col3 Col4
1 3 5 3 3
What I want to do is COUNT the number of 3s in this particular row.
I have tried the
select COUNT(*)
from INFORMATION_SCHEMA.COLUMNS
where TABLE_NAME = 'TableName' -- but obviously I need WHERE Col1 = 3 OR Col2 = 3...
What would be the best way to achieve this?
Based on what OP asked, this can be done
select
CASE WHEN Col1 = 3 then 1 ELSE 0 END +
CASE WHEN Col2 = 3 then 1 ELSE 0 END +
CASE WHEN Col3 = 3 then 1 ELSE 0 END +
CASE WHEN Col4 = 3 then 1 ELSE 0 END
From TableName
I don't really enjoy working with PIVOT so here a solution using APPLY.
SELECT
T.id
, Val
, COUNT(*)
FROM MyTable AS T
CROSS APPLY (
VALUES
(T.C1)
, (T.C2)
, (T.C3)
, (T.C4)
) AS X(Val)
GROUP BY T.Id, X.Val
ORDER BY T.Id, X.val
Please find the sample code:
DECLARE #Query VARCHAR(MAX) = 'SELECT Count = '
SELECT
#Query += '( CASE WHEN '+ COLUMN_NAME + ' = 3 THEN 1 ELSE 0 END ) + '
FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'TEST' AND COLUMN_NAME <> 'ID'
SET #Query = SUBSTRING(#Query, 1, DATALENGTH(#Query) - 2) + ' FROM TEST WHERE ID = 1'
EXEC(#Query)

Dynamic pivot data with multiple datatypes

I have a trick problem with a pivot table to make:
I have a table which looks like:
id table object name type nvarchar date int bit
1 1 2 name 1 tables NULL NULL NULL
2 1 2 name 1 columns NULL NULL NULL
3 1 2 name 1 datatypes NULL NULL NULL
4 1 2 name 1 _users NULL NULL NULL
1 1 3 active 3 NULL NULL NULL 1
2 1 3 active 3 NULL NULL NULL 1
3 1 3 active 3 NULL NULL NULL 1
4 1 3 active 3 NULL NULL NULL 1
the output should look like:
id name active
1 tables 1
2 columns 1
3 datatypes 1
4 _users 1
Based upon the "type" I should put the correct data from the column in it, these columns are formated in nvarchar, bit, datetime, int, ect.
The "id" is the row id, the "name, active" comes from the name column and the values from nvarchar, date, int and bit columns.
UPDATE: the columns like nvarchar, date, int and bit (and most other SQL formats) are actually contain this type of data. The column "type" gives which column contains the data to being used, so if "type" is "1", than I want to use the "nvarchar" if "type" is "3" than I want to use the "bit" which contains really a bit and not a nvarchar. In the Pivot I want to have the bit under "active" column, if I have in the example a 3th column (name) for example "activation_date" I want to see a third column with the value (type = 2) from the date column.
I am lost in this, please help
Assuming there's only one not null column for each row:
with cte as (
select
id,
name,
coalesce(
[nvarchar],
convert(nvarchar(max), [date], 120),
cast([int] as nvarchar(max)),
cast([bit] as nvarchar(max))
) as value
from Table1 as t
)
select
id,
max(case when [name] = 'name' then value end) as [name],
max(case when [name] = 'active' then value end) as [active]
from cte
group by id
sql fiddle demo
But I must warn you, this types of database schema is not best way to use SQL.
If you want to do this dynamically without hardcoding columns:
declare #stmt nvarchar(max)
select #stmt =
isnull(#stmt + ', ', '') +
'max(case when [name] = ''' + name + ''' then value end) as ' + quotename([name])
from (select distinct [name] from Table1) as t
select #stmt = '
with cte as (
select
id,
name,
coalesce(
[nvarchar],
convert(nvarchar(max), [date], 120),
cast([int] as nvarchar(max)),
cast([bit] as nvarchar(max))
) as value
from Table1 as t
)
select
id, ' + #stmt + '
from cte
group by id
'
exec sp_executesql
#stmt = #stmt
sql fiddle demo
If you have some Mapping table like this:
name value
--------------------
name nvarchar
active bit
you can use this query:
declare #stmt nvarchar(max)
select #stmt =
isnull(#stmt + ', ', '') +
'max(case when [name] = ''' + name + ''' then [' + value + '] end) as ' + quotename([name])
from Mapping
select #stmt = '
select
id, ' + #stmt + '
from Table1
group by id
'
exec sp_executesql
#stmt = #stmt
sql fiddle demo

Count the Null columns in a row in SQL

I was wondering about the possibility to count the null columns of row in SQL, I have a table Customer that has nullable values, simply I want a query that return an int of the number of null columns for certain row(certain customer).
This method assigns a 1 or 0 for null columns, and adds them all together. Hopefully you don't have too many nullable columns to add up here...
SELECT
((CASE WHEN col1 IS NULL THEN 1 ELSE 0 END)
+ (CASE WHEN col2 IS NULL THEN 1 ELSE 0 END)
+ (CASE WHEN col3 IS NULL THEN 1 ELSE 0 END)
...
...
+ (CASE WHEN col10 IS NULL THEN 1 ELSE 0 END)) AS sum_of_nulls
FROM table
WHERE Customer=some_cust_id
Note, you can also do this perhaps a little more syntactically cleanly with IF() if your RDBMS supports it.
SELECT
(IF(col1 IS NULL, 1, 0)
+ IF(col2 IS NULL, 1, 0)
+ IF(col3 IS NULL, 1, 0)
...
...
+ IF(col10 IS NULL, 1, 0)) AS sum_of_nulls
FROM table
WHERE Customer=some_cust_id
I tested this pattern against a table and it appears to work properly.
My answer builds on Michael Berkowski's answer, but to avoid having to type out hundreds of column names, what I did was this:
Step 1: Get a list of all of the columns in your table
SELECT COLUMN_NAME FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'myTable';
Step 2: Paste the list in Notepad++ (any editor that supports regular expression replacement will work). Then use this replacement pattern
Search:
^(.*)$
Replace:
\(CASE WHEN \1 IS NULL THEN 1 ELSE 0 END\) +
Step 3: Prepend SELECT identityColumnName, and change the very last + to AS NullCount FROM myTable and optionally add an ORDER BY...
SELECT
identityColumnName,
(CASE WHEN column001 IS NULL THEN 1 ELSE 0 END) +
-- ...
(CASE WHEN column200 IS NULL THEN 1 ELSE 0 END) AS NullCount
FROM
myTable
ORDER BY
NullCount DESC
For ORACLE-DBMS only.
You can use the NVL2 function:
NVL2( string1, value_if_not_null, value_if_null )
Here is a select with a similiar approach as Michael Berkowski suggested:
SELECT (NVL2(col1, 0, 1)
+ NVL2(col2, 0, 1)
+ NVL2(col3, 0, 1)
...
...
+ NVL2(col10, 0, 1)
) AS sum_of_nulls
FROM table
WHERE Customer=some_cust_id
A more generic approach would be to write a PL/SQL-block and use dynamic SQL. You have to build a SELECT string with the NVL2 method from above for every column in the all_tab_columns of a specific table.
Unfortunately, in a standard SQL statement you will have to enter each column you want to test, to test all programatically you could use T-SQL. A word of warning though, ensure you are working with genuine NULLS, you can have blank stored values that the database will not recognise as a true NULL (I know this sounds strange).
You can avoid this by capturing the blank values and the NULLS in a statement like this:
CASE WHEN col1 & '' = '' THEN 1 ELSE 0 END
Or in some databases such as Oracle (not sure if there are any others) you would use:
CASE WHEN col1 || '' = '' THEN 1 ELSE 0 END
You don't state RDBMS. For SQL Server 2008...
SELECT CustomerId,
(SELECT COUNT(*) - COUNT(C)
FROM (VALUES(CAST(Col1 AS SQL_VARIANT)),
(Col2),
/*....*/
(Col9),
(Col10)) T(C)) AS NumberOfNulls
FROM Customer
Depending on what you want to do, and if you ignore mavens, and if you use SQL Server 2012, you could to it another way. .
The total number of candidate columns ("slots") must be known.
1. Select all the known "slots" column by column (they're known).
2. Unpivot that result to get a
table with one row per original column. This works because the null columns don't
unpivot, and you know all the column names.
3. Count(*) the result to get the number of non-nulls;
subtract from that to get your answer.
Like this, for 4 "seats" in a car
select 'empty seats' = 4 - count(*)
from
(
select carId, seat1,seat2,seat3,seat4 from cars where carId = #carId
) carSpec
unpivot (FieldValue FOR seat in ([seat1],[seat2],[seat3],[seat4])) AS results
This is useful if you may need to do more later than just count the number of non-null columns, as it gives you a way to manipulate the columns as a set too.
This will give you the number of columns which are not null. you can apply this appropriately
SELECT ISNULL(COUNT(col1),'') + ISNULL(COUNT(col2),'') +ISNULL(COUNT(col3),'')
FROM TABLENAME
WHERE ID=1
The below script gives you the NULL value count within a row i.e. how many columns do not have values.
{SELECT
*,
(SELECT COUNT(*)
FROM (VALUES (Tab.Col1)
,(Tab.Col2)
,(Tab.Col3)
,(Tab.Col4)) InnerTab(Col)
WHERE Col IS NULL) NullColumnCount
FROM (VALUES(1,2,3,4)
,(NULL,2,NULL,4)
,(1,NULL,NULL,NULL)) Tab(Col1,Col2,Col3,Col4) }
Just to demonstrate I am using an inline table in my example.
Try to cast or convert all column values to a common type it will help you to compare the column of different type.
I haven't tested it yet, but I'd try to do it using a PL\SQL function
CREATE OR REPLACE TYPE ANYARRAY AS TABLE OF ANYDATA
;
CREATE OR REPLACE Function COUNT_NULL
( ARR IN ANYARRAY )
RETURN number
IS
cnumber number ;
BEGIN
for i in 1 .. ARR.count loop
if ARR(i).column_value is null then
cnumber := cnumber + 1;
end if;
end loop;
RETURN cnumber;
EXCEPTION
WHEN OTHERS THEN
raise_application_error
(-20001,'An error was encountered - '
||SQLCODE||' -ERROR- '||SQLERRM);
END
;
Then use it in a select query like this
CREATE TABLE TEST (A NUMBER, B NUMBER, C NUMBER);
INSERT INTO TEST (NULL,NULL,NULL);
INSERT INTO TEST (1 ,NULL,NULL);
INSERT INTO TEST (1 ,2 ,NULL);
INSERT INTO TEST (1 ,2 ,3 );
SELECT ROWNUM,COUNT_NULL(A,B,C) AS NULL_COUNT FROM TEST;
Expected output
ROWNUM | NULL_COUNT
-------+-----------
1 | 3
2 | 2
3 | 1
4 | 0
This is how i tried
CREATE TABLE #temptablelocal (id int NOT NULL, column1 varchar(10) NULL, column2 varchar(10) NULL, column3 varchar(10) NULL, column4 varchar(10) NULL, column5 varchar(10) NULL, column6 varchar(10) NULL);
INSERT INTO #temptablelocal
VALUES (1,
NULL,
'a',
NULL,
'b',
NULL,
'c')
SELECT *
FROM #temptablelocal
WHERE id =1
SELECT count(1) countnull
FROM
(SELECT a.ID,
b.column_title,
column_val = CASE b.column_title
WHEN 'column1' THEN a.column1
WHEN 'column2' THEN a.column2
WHEN 'column3' THEN a.column3
WHEN 'column4' THEN a.column4
WHEN 'column5' THEN a.column5
WHEN 'column6' THEN a.column6
END
FROM
( SELECT id,
column1,
column2,
column3,
column4,
column5,
column6
FROM #temptablelocal
WHERE id =1 ) a
CROSS JOIN
( SELECT 'column1'
UNION ALL SELECT 'column2'
UNION ALL SELECT 'column3'
UNION ALL SELECT 'column4'
UNION ALL SELECT 'column5'
UNION ALL SELECT 'column6' ) b (column_title) ) AS pop WHERE column_val IS NULL
DROP TABLE #temptablelocal
Similary, but dynamically:
drop table if exists myschema.table_with_nulls;
create table myschema.table_with_nulls as
select
n1::integer,
n2::integer,
n3::integer,
n4::integer,
c1::character varying,
c2::character varying,
c3::character varying,
c4::character varying
from
(
values
(1,2,3,4,'a','b','c','d'),
(1,2,3,null,'a','b','c',null),
(1,2,null,null,'a','b',null,null),
(1,null,null,null,'a',null,null,null)
) as test_records(n1, n2, n3, n4, c1, c2, c3, c4);
drop function if exists myschema.count_nulls(varchar,varchar);
create function myschema.count_nulls(schemaname varchar, tablename varchar) returns void as
$BODY$
declare
calc varchar;
sqlstring varchar;
begin
select
array_to_string(array_agg('(' || trim(column_name) || ' is null)::integer'),' + ')
into
calc
from
information_schema.columns
where
table_schema in ('myschema')
and table_name in ('table_with_nulls');
sqlstring = 'create temp view count_nulls as select *, ' || calc || '::integer as count_nulls from myschema.table_with_nulls';
execute sqlstring;
return;
end;
$BODY$ LANGUAGE plpgsql STRICT;
select * from myschema.count_nulls('myschema'::varchar,'table_with_nulls'::varchar);
select
*
from
count_nulls;
Though I see that I didn't finish parametising the function.
My answer builds on Drew Chapin's answer, but with changes to get the result using a single script:
use <add_database_here>;
Declare #val Varchar(MAX);
Select #val = COALESCE(#val + str, str) From
(SELECT
'(CASE WHEN '+COLUMN_NAME+' IS NULL THEN 1 ELSE 0 END) +' str
FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = '<add table name here>'
) t1 -- getting column names and adding the case when to replace NULLs for zeros or ones
Select #val = SUBSTRING(#val,1,LEN(#val) - 1) -- removing trailling add sign
Select #val = 'SELECT <add_identity_column_here>, ' + #val + ' AS NullCount FROM <add table name here>' -- adding the 'select' for the column identity, the 'alias' for the null count column, and the 'from'
EXEC (#val) --executing the resulting sql
With ORACLE:
Number_of_columns - json_value( json_array( comma separated list of columns ), '$.size()' ) from your_table
json_array will build an array with only the non null columns and the json_query expression will give you the size of the array
There isn't a straightforward way of doing so like there would be with counting rows. Basically, you have to enumerate all the columns that might be null in one expression.
So for a table with possibly null columns a, b, c, you could do this:
SELECT key_column, COALESCE(a,0) + COALESCE(b,0) + COALESCE(c,0) null_col_count
FROM my_table

SQL Server Comma Separated value among columns

I want to select columns as comma-separated values by doing something like:
select column1+','+column2+','+column3+','+coulmn4 from someTable
except if any of the columns hold null values i have to skip that column from adding comma
how to do this is SQL Server?
[All columns are of type varchar so no casting needed]
Select
Case When Len(IsNull(Column1),'') > 0 Then Column1 + ',' Else '' End,
Case When Len(IsNull(Column2),'') > 0 Then Column2 + ',' Else '' End,
Case When Len(IsNull(Column3),'') > 0 Then Column3 + ',' Else '' End,
Case When Len(IsNull(Column4),'') > 0 Then Column4 + ',' Else '' End,
Case When Len(IsNull(ColumnN),'') > 0 Then ColumnN + ',' Else '' End
From
SomeTable
try
Test table
create table #testCol (column1 varchar(10), column2 varchar(10),
column3 varchar(10), column4 varchar(10))
insert #testCol values('a', null,null,'b')
insert #testCol values(null,'a',null,'b' )
insert #testCol values(null,'a','Z','b' )
Query
select isnull(column1,'')+ case when column1 is null then '' else ',' end
+ isnull(column2,'')+ case when column2 is null then '' else ',' end
+ isnull(column3,'')+ case when column3 is null then '' else ',' end
+ isnull(column4,'')
from #testCol
Output
a,b
a,b
a,Z,b
Can you export to csv and then strip out all the double commas?
select isnull(column1 + ',', '') + isnull(column2 + ',', '') + isnull(column3 + ',', '') + isnull(coulmn4, '') from someTable