SQL dynamic columns and Update multiple columns - sql

I have a table UserPermission which has a number of columns of TINYINT type. e.g Read, Write, Update, Delete, Access etc.
I get three parameters in the stored procedure: #UserId, #ColNames, #ColValues where #ColNames and #ColValues are comma separated values.
How can I insert or update the table row (if already exists) with the passed column names and corresponding values.
I try to write the dynamic query which runs fine for INSERT but I was unable to write the UPDATE query dynamically with each column and its value to be concatenate.
Any response would be appreciated
Thanks in advance.

This is a somewhat dirty way to do what you require. However, if you create the following Stored Procedure:
CREATE FUNCTION [dbo].[stringSplit]
(
#String NVARCHAR(4000),
#Delimiter NCHAR(1)
)
RETURNS TABLE
AS
RETURN
(
WITH Split(stpos,endpos)
AS(
SELECT 0 AS stpos, CHARINDEX(#Delimiter,#String) AS endpos
UNION ALL
SELECT endpos+1, CHARINDEX(#Delimiter,#String,endpos+1)
FROM Split
WHERE endpos > 0
)
SELECT 'Id' = ROW_NUMBER() OVER (ORDER BY (SELECT 1)),
'Data' = SUBSTRING(#String,stpos,COALESCE(NULLIF(endpos,0),LEN(#String)+1)-stpos)
FROM Split
)
You can then use that Procedure to join the data together:
DECLARE #TotalCols INT
DECLARE #TotalVals INT
SET #TotalCols = (
SELECT COUNT(ID) AS Total
FROM dbo.stringSplit('department, teamlead', ',')
);
SET #TotalVals = (
SELECT COUNT(ID) AS Total
FROM dbo.stringSplit('IT, Bob', ',')
);
IF #TotalCols = #TotalVals
BEGIN
IF OBJECT_ID('tempdb..#temptable') IS NOT NULL
DROP TABLE #temptable
CREATE TABLE #temptable (
ColName VARCHAR(MAX) NULL
,ColValue VARCHAR(MAX) NULL
)
INSERT INTO #temptable
SELECT a.DATA
,b.DATA
FROM dbo.stringSplit('department, teamlead', ',') AS a
INNER JOIN dbo.stringSplit('IT, Bob', ',') AS b ON a.Id = b.Id
SELECT *
FROM #temptable;
END
It's not very efficient, but it will bring you the desired results.
You can then use the temp table to update, insert and delete as required.

Instead of having a comma delimited list I would create a separate parameter for each Column and make its default value to NULL and in the code update nothing if its null or insert 0. Something like this....
CREATE PROCEDURE usp_UserPermissions
#UserID INT
,#Update INT = NULL --<-- Make default values NULL
,#Delete INT = NULL
,#Read INT = NULL
,#Write INT = NULL
,#Access INT = NULL
AS
BEGIN
SET NOCOUNT ON;
Declare #t TABLE (UserID INT, [Update] INT,[Read] INT
,[Write] INT,[Delete] INT,[Access] INT)
INSERT INTO #t (Userid, [Update],[Read],[Write],[Delete],[Access])
VALUES (#UserID , #Update , #Read, #Write , #Delete, #Access)
IF EXISTS (SELECT 1 FROM UserPermission WHERE UserID = #UserID)
BEGIN
UPDATE up -- Only update if a value was provided else update to itself
SET up.[Read] = ISNULL(t.[Read] , up.[Read])
,up.[Write] = ISNULL(t.[Write] , up.[Write])
,up.[Update] = ISNULL(t.[Update] , up.[Update])
,up.[Delete] = ISNULL(t.[Delete] , up.[Delete])
,up.[Access] = ISNULL(t.[Access] , up.[Access])
FROM UserPermission up
INNER JOIN #t t ON up.UserID = t.UserID
END
ELSE
BEGIN
-- if already no row exists for that User add a row
-- If no value was passed for a column add 0 as default
INSERT INTO UserPermission (Userid, [Update],[Read],[Write],[Delete],[Access])
SELECT Userid
, ISNULL([Update], 0)
, ISNULL([Read], 0)
, ISNULL([Write], 0)
, ISNULL([Delete], 0)
, ISNULL([Access], 0)
FROM #t
END
END

Related

How can I speed up this SQL Server CURSOR used for change tracking? [closed]

Closed. This question needs to be more focused. It is not currently accepting answers.
Want to improve this question? Update the question so it focuses on one problem only by editing this post.
Closed last year.
Improve this question
Can anyone recommend how I can speed up this code and primarily the cursor? The code is an SQL Server db query that creates a trigger on INSERT, UPDATE, or DELETE. It writes a record to a changlog table identifying the type of change (I, U, or D) and then saves the old value and new value of each affected column for each row in a details table.
I want this to be generic so I can easily reuse it for any table I throw at it that has a unique column I can filter on. Writing the whole row of changes to a cloned structure audit table is not an option unfortunately.
Any help is greatly appreciated, I am not the greatest at query optimization and welcome any feedback or rewrites.. Thanks!
ALTER TRIGGER [dbo].[tbl_Address_ChangeTracking] ON [dbo].[tbl_Address]
AFTER INSERT, DELETE, UPDATE
AS
BEGIN
-- SET NOCOUNT ON added to prevent extra result sets from
-- interfering with SELECT statements.
SET NOCOUNT ON
--SET XACT_ABORT ON
-- Get the table name of the current process
DECLARE #TableName VARCHAR(25)
SET #TableName = COALESCE(
(
SELECT SCHEMA_NAME(schema_id) + '.' + OBJECT_NAME(parent_object_id)
FROM sys.objects
WHERE sys.objects.name = OBJECT_NAME(##PROCID) AND
SCHEMA_NAME(sys.objects.schema_id) = OBJECT_SCHEMA_NAME(##PROCID)
), 'Unknown')
--Declare our cursor to navigate the records in inserted and deleted
DECLARE #cursorSQL AS NVARCHAR(MAX) = ''
DECLARE #PrimaryID AS VARCHAR(MAX) = ''
DROP TABLE IF EXISTS #inserted1TableTemp
DROP TABLE IF EXISTS #inserted2TableTemp
DROP TABLE IF EXISTS #deletedTableTemp
DECLARE #ourLogCursor CURSOR
--If we have a record in inserted and deleted this is an update record and we should pull from the inserted table and assume
--this is one update or many update statements
IF EXISTS
(
SELECT 1
FROM inserted
) AND
EXISTS
(
SELECT 1
FROM deleted
)
BEGIN
SELECT *
INTO #inserted1TableTemp
FROM inserted
SET #cursorSQL = 'SET #ourLogCursor = CURSOR FOR SELECT AddressID FROM #inserted1TableTemp; OPEN #ourLogCursor;'
END
--If we have an inserted record and no deleted record this is an insert and we pull from the inserted table
IF EXISTS
(
SELECT 1
FROM inserted
) AND
NOT EXISTS
(
SELECT 1
FROM deleted
)
BEGIN
DROP TABLE IF EXISTS #inserted2TableTemp
DROP TABLE IF EXISTS #inserted1TableTemp
DROP TABLE IF EXISTS #deletedTableTemp
SELECT *
INTO #inserted2TableTemp
FROM inserted
SET #cursorSQL = 'SET #ourLogCursor = CURSOR FOR SELECT AddressID FROM #inserted2TableTemp; OPEN #ourLogCursor;'
END
--If we have a deleted record and no insert record this is a deletion and we pull from the deleted table
IF NOT EXISTS
(
SELECT 1
FROM inserted
) AND
EXISTS
(
SELECT 1
FROM deleted
)
BEGIN
DROP TABLE IF EXISTS #inserted1TableTemp
DROP TABLE IF EXISTS #inserted2TableTemp
DROP TABLE IF EXISTS #deletedTableTemp
SELECT *
INTO #deletedTableTemp
FROM deleted
SET #cursorSQL = 'SET #ourLogCursor = CURSOR FOR SELECT AddressID FROM #deletedTableTemp; OPEN #ourLogCursor;'
END
--If we have a deleted record and no insert record this is a deletion and we pull from the deleted table
IF NOT EXISTS
(
SELECT 1
FROM inserted
) AND
NOT EXISTS
(
SELECT 1
FROM deleted
)
BEGIN
RETURN;
END
--Execute our dynamic SQL that sets the correct FOR SELECT statment for the cursor. Pass #ourCursorLog as an input param, and then grab the output
--so the results are available outside the scope of the executesql call
EXEC sys.sp_executesql #cursorSQL, N'#ourLogCursor CURSOR OUTPUT', #ourLogCursor OUTPUT;
FETCH NEXT FROM #ourLogCursor INTO #PrimaryID
DECLARE #xmlOld XML
DECLARE #xmlNew XML
DECLARE #SummaryID INT
SET #TableName = COALESCE(
(
SELECT SCHEMA_NAME(schema_id) + '.' + OBJECT_NAME(parent_object_id)
FROM sys.objects
WHERE sys.objects.name = OBJECT_NAME(##PROCID) AND
SCHEMA_NAME(sys.objects.schema_id) = OBJECT_SCHEMA_NAME(##PROCID)
), 'Unknown')
--Navigate all our rows
WHILE ##FETCH_STATUS = 0
BEGIN
DROP TABLE IF EXISTS #tmp_AddressesChangelogTrigger
DROP TABLE IF EXISTS #tmp_AddressesChangelogTriggerXML1
DROP TABLE IF EXISTS #tmp_AddressesChangelogTriggerXML2
DROP TABLE IF EXISTS #tmp_AddressesChangelogTriggerXMLsWithDifferences
--Get the deleted and inserted records as xml for comparison against each other
SET #xmlNew =
(
SELECT *
FROM deleted AS [TABLE]
WHERE AddressID = #PrimaryID
ORDER BY AddressID FOR XML AUTO, ELEMENTS
)
SET #xmlOld =
(
SELECT *
FROM inserted AS [TABLE]
WHERE AddressID = #PrimaryID
ORDER BY AddressID FOR XML AUTO, ELEMENTS
)
CREATE TABLE #tmp_AddressesChangelogTriggerXML1
(
NodeName VARCHAR(MAX), Value VARCHAR(MAX)
)
CREATE TABLE #tmp_AddressesChangelogTriggerXML2
(
NodeName VARCHAR(MAX), Value VARCHAR(MAX)
)
--Extract the values and column names
INSERT INTO #tmp_AddressesChangelogTriggerXML2( NodeName, Value )
--Throw the XML into temp tables with the column name and value
SELECT N.value( 'local-name(.)', 'nvarchar(MAX)' ) AS NodeName, N.value( 'text()[1]', 'nvarchar(MAX)' ) AS VALUE
FROM #xmlNew.nodes( '/TABLE/*' ) AS T(N)
INSERT INTO #tmp_AddressesChangelogTriggerXML1( NodeName, Value )
SELECT N.value( 'local-name(.)', 'nvarchar(MAX)' ) AS NodeName, N.value( 'text()[1]', 'nvarchar(MAX)' ) AS VALUE
FROM #xmlOld.nodes( '/TABLE/*' ) AS T(N)
--Get the differences into a temp table
SELECT *
INTO #tmp_AddressesChangelogTriggerXMLsWithDifferences
FROM
(
SELECT COALESCE(A.NodeName, B.NodeName) AS NodeName, B.Value AS OldValue, A.Value AS NewValue
FROM #tmp_AddressesChangelogTriggerXML1 AS A
FULL OUTER JOIN #tmp_AddressesChangelogTriggerXML2 AS B ON A.NodeName = B.NodeName
WHERE A.Value <> B.Value
) AS tmp
--If anything changed thhen start our write statments
IF
(
SELECT COUNT(*)
FROM #tmp_AddressesChangelogTriggerXMLsWithDifferences
) > 0
BEGIN
BEGIN TRY
-- Now create the Summary record
--BEGIN TRANSACTION WRITECHANGELOGRECORDS
INSERT INTO TableChangeLogSummary( ID, ModifiedDate, ChangeType, TableName )
--Get either insert, or if no insert value, get the delete value
--Set the update type, I, D, U
--Compare values with a full outer join
--Filter on the ID we are on in the CURSOR
SELECT COALESCE(I.AddressID, D.AddressID), GETDATE(),
CASE
WHEN D.AddressID IS NULL THEN 'I'
WHEN I.AddressID IS NULL THEN 'D'
ELSE 'U'
END, #TableName
FROM inserted AS I
FULL OUTER JOIN deleted AS D ON I.AddressID = D.AddressID
WHERE( I.AddressID = #PrimaryID OR
I.AcesAddressID IS NULL
) AND
( D.AddressID = #PrimaryID OR
D.AcesAddressID IS NULL
)
--Get the last summary id that was inserted so we can use it in the detail record
SET #SummaryID = (SELECT IDENT_CURRENT('TableChangeLogSummary'))
--Insert our
INSERT INTO TableChangeLogDetail( SummaryID, ColumnName, OldValue, NewValue )
SELECT #SummaryID, T.NodeName, T.OldValue, T.NewValue
FROM #tmp_AddressesChangelogTriggerXMLsWithDifferences AS T
--COMMIT TRANSACTION WRITECHANGELOGRECORDS
--PRINT 'RECORD WRITTEN'
END TRY
BEGIN CATCH
DECLARE #errorXML XML
SET #errorXML = (SELECT ERROR_NUMBER() AS ErrorNumber, ERROR_STATE() AS ErrorState, ERROR_SEVERITY() AS ErrorSeverity, ERROR_PROCEDURE() AS ErrorProcedure, ERROR_LINE() AS ErrorLine, ERROR_MESSAGE() AS ErrorMessage FOR XML RAW)
DECLARE #errorXMLText NVARCHAR(MAX) = ''
SET #errorXMLText = (SELECT CAST(#errorXML AS NVARCHAR(MAX)))
RAISERROR(#errorXMLText, 16, 1) WITH NOWAIT
END CATCH
END
--Go to the next record and process
FETCH NEXT FROM #ourLogCursor INTO #PrimaryID
END
CLOSE #ourLogCursor
DEALLOCATE #ourLogCursor
END
Acknowledging the recommendation for using change data tracking and caution against putting too much logic into triggers, the following is a refactoring (and some outright rewriting) of your change capture logic.
The updated logic makes a single pass through the data, handing all affected records at once. Given the requirements, I think it is pretty close to optimal, but there may still be room for improvements. The conversion to and from XML likely adds a significant bit of overhead. The alternative would be to dynamically generate and apply custom triggers for each table that explicitly reference all of the data columns individually to get the details and UNION them together.
I also refined the value comparison to better handle nulls, case sensitivity, and potential trailing space changes.
The code below is not in the form of a trigger, but in a form suitable for stand-alone testing. I figured you (and any others who may be interested) would want to test Once checked out, you should be able to retrofit it back into your trigger.
Note that this is not a 100% generalized solution. Some column types may not be supported. The logic currently assumes a single column primary key of type integer. Changes would be required to handle deviations from these (and possibly some currently unidentified) constraints.
-- Simulated change log tables
DECLARE #TableChangeLogSummary TABLE (ID INT IDENTITY(1,1), KeyValue INT NOT NULL, ModifiedDate DATETIME NOT NULL, ChangeType CHAR(1) NOT NULL, TableName NVARCHAR(1000) NOT NULL )
DECLARE #TableChangeLogDetails TABLE (ID INT IDENTITY(1,1), SummaryID int NOT NULl, ColumnName NVARCHAR(1000) NOT NULL, OldValue NVARCHAR(MAX), NewValue NVARCHAR(MAX))
-- Simulated system defined inserted/deleted tables
DECLARE #inserted TABLE (ID INTEGER, Value1 NVARCHAR(100), Value2 BIT, Value3 FLOAT)
DECLARE #deleted TABLE (ID INTEGER, Value1 NVARCHAR(100), Value2 BIT, Value3 FLOAT)
-- Test data
INSERT #inserted
VALUES
(1, 'AAA', 0, 3.14159), -- Insert
(2, 'BBB', 1, null), -- Mixed updates including null to non-null and non-null to null
(3, 'CCC', 0, 0), -- Trailing space change
(4, 'DDD', null, 1.68), -- No changes
(5, '', 0, null), -- No changes with blanks and nulls
(6, null, null, null), -- No changes all nulls
(7, null, null, null) -- Insert all nulls (summary with key, but no details will be logged)
INSERT #deleted
VALUES
(2, 'bbb', null, 2.73),
(3, 'CCC ', 0, 0),
(4, 'DDD', null, 1.68),
(5, '', 0, null),
(6, null, null, null),
(8, null, null, null), -- Delete all null values (summary with key, but no details will be logged)
(9, 'ZZZ', 999, 999.9) -- Delete non-nulls
--- Now the real work begins...
-- Set table and information. Assumes table has exactly one PK column. Later logic assumes an INT.
DECLARE #TableName NVARCHAR(1000) = 'MyTable' -- To be extracted from the parent object of the trigger
DECLARE #KeyColumnName SYSNAME = 'ID' -- This can be fixed if known or derived on the fly from the primary key definition
-- Extract inserted and/or deleted data
DECLARE #InsertedXml XML = (
SELECT *
FROM #inserted
FOR XML PATH('inserted'), TYPE
)
DECLARE #DeletedXml XML = (
SELECT *
FROM #deleted
FOR XML PATH('deleted'), TYPE
)
-- Parse and reassange the captured key and data values
DECLARE #TempDetails TABLE(
KeyValue INT NOT NULL,
ChangeType CHAR(1) NOT NULL,
ColumnName VARCHAR(1000) NOT NULL,
IsKeyColumn BIT NOT NULL,
NewValue NVARCHAR(MAX),
OldValue NVARCHAR(MAX))
INSERT #TempDetails
SELECT
KeyValue = COALESCE(I.KeyValue, D.KeyValue),
ChangeType = CASE WHEN D.KeyValue IS NULL THEN 'I' WHEN I.KeyValue IS NULL THEN 'D' ELSE 'U' END,
ColumnName = COALESCE(I.ColumnName, D.ColumnName),
IsKeyColumn = K.IsKeyColumn,
NewValue = I.Value,
OldValue = D.Value
FROM (
SELECT K.KeyValue, C.ColumnName, C.Value
FROM #InsertedXml.nodes( '/inserted' ) R(Row)
CROSS APPLY (
SELECT KeyValue = C.Col.value('text()[1]', 'int')
FROM R.Row.nodes( './*' ) C(Col)
WHERE C.Col.value( 'local-name(.)', 'nvarchar(MAX)' ) = #KeyColumnName
) K
CROSS APPLY (
SELECT ColumnName = C.Col.value('local-name(.)', 'nvarchar(MAX)'), Value = C.Col.value('text()[1]', 'nvarchar(MAX)')
FROM R.Row.nodes( './*' ) C(Col)
) C
) I
FULL OUTER JOIN (
SELECT K.KeyValue, C.ColumnName, C.Value
FROM #DeletedXml.nodes( '/deleted' ) R(Row)
CROSS APPLY (
SELECT KeyValue = C.Col.value('text()[1]', 'int')
FROM R.Row.nodes( './*' ) C(Col)
WHERE C.Col.value( 'local-name(.)', 'nvarchar(MAX)' ) = #KeyColumnName
) K
CROSS APPLY (
SELECT ColumnName = C.Col.value('local-name(.)', 'nvarchar(MAX)'), Value = C.Col.value('text()[1]', 'nvarchar(MAX)')
FROM R.Row.nodes( './*' ) C(Col)
) C
) D
ON D.KeyValue = I.KeyValue
AND D.ColumnName = I.ColumnName
CROSS APPLY (
SELECT IsKeyColumn = CASE WHEN COALESCE(I.ColumnName, D.ColumnName) = #KeyColumnName THEN 1 ELSE 0 END
) K
WHERE ( -- We need to be careful about edge cases here
(I.Value IS NULL AND D.Value IS NOT NULL)
OR (I.Value IS NOT NULL AND D.Value IS NULL)
OR I.Value <> D.Value COLLATE Latin1_General_Bin -- Precise compare (case and accent sensitive)
OR DATALENGTH(I.Value) <> DATALENGTH(D.Value) -- Catch trailing space cases
OR K.IsKeyColumn = 1
)
-- Get rid of updates with no changes, but keep key-only inserts or deletes
DELETE T
FROM #TempDetails T
WHERE T.IsKeyColumn = 1
AND T.ChangeType = 'U'
AND NOT EXISTS (
SELECT *
FROM #TempDetails T2
WHERE T2.KeyValue = T.KeyValue
AND T2.IsKeyColumn = 0
)
-- Local table to capture and link SummaryID between the summary and details tables
DECLARE #CaptureSummaryID TABLE (SummaryID int, KeyValue INT NOT NULL)
-- Insert change summary and capture the assigned Summary ID via the OUTPUT clause
INSERT INTO #TableChangeLogSummary (KeyValue, ModifiedDate, ChangeType, TableName)
OUTPUT INSERTED.id, INSERTED.KeyValue INTO #CaptureSummaryID
SELECT T.KeyValue, ModifiedDate = GETDATE(), T.ChangeType, TableName = #TableName
FROM #TempDetails T
WHERE T.IsKeyColumn = 1
ORDER BY T.KeyValue -- Optional, but adds consistancy
-- Insert change details
INSERT INTO #TableChangeLogDetails (SummaryID, ColumnName, OldValue, NewValue)
SELECT S.SummaryID, T.ColumnName, T.OldValue, T.NewValue
FROM #CaptureSummaryID S
JOIN #TempDetails T ON T.KeyValue = S.KeyValue
WHERE T.IsKeyColumn = 0
ORDER BY T.ColumnName -- Optional, but adds consistancy
-- View test results
SELECT 'Change Log:', *
FROM #TableChangeLogSummary S
LEFT JOIN #TableChangeLogDetails D ON D.SummaryID = S.ID
ORDER BY S.ID, D.ID

SQL Loop through tables and columns to find which columns are NOT empty

I created a temp table #test containing 3 fields: ColumnName, TableName, and Id.
I would like to see which rows in the #test table (columns in their respective tables) are not empty? I.e., for every column name that i have in the ColumnName field, and for the corresponding table found in the TableName field, i would like to see whether the column is empty or not. Tried some things (see below) but didn't get anywhere. Help, please.
declare #LoopCounter INT = 1, #maxloopcounter int, #test varchar(100),
#test2 varchar(100), #check int
set #maxloopcounter = (select count(TableName) from #test)
while #LoopCounter <= #maxloopcounter
begin
DECLARE #PropIDs TABLE (tablename varchar(max), id int )
Insert into #PropIDs (tablename, id)
SELECT [tableName], id FROM #test
where id = #LoopCounter
set #test2 = (select columnname from #test where id = #LoopCounter)
declare #sss varchar(max)
set #sss = (select tablename from #PropIDs where id = #LoopCounter)
set #check = (select count(#test2)
from (select tablename
from #PropIDs
where id = #LoopCounter) A
)
print #test2
print #sss
print #check
set #LoopCounter = #LoopCounter + 1
end
In order to use variables as column names and table names in your #Check= query, you will need to use Dynamic SQL.
There is most likely a better way to do this but I cant think of one off hand. Here is what I would do.
Use the select and declare a cursor rather than a while loop as you have it. That way you dont have to count on sequential id's. The cursor would fetch fields columnname, id and tablename
In the loop build a dynamic sql statement
Set #Sql = 'Select Count(*) Cnt Into #Temp2 From ' + TableName + ' Where ' + #columnname + ' Is not null And ' + #columnname <> '''''
Exec(#Sql)
Then check #Temp2 for a value greater than 0 and if this is what you desire you can use the #id that was fetched to update your #Temp table. Putting the result into a scalar variable rather than a temp table would be preferred but cant remember the best way to do that and using a temp table allows you to use an update join so it would well in my opinion.
https://www.mssqltips.com/sqlservertip/1599/sql-server-cursor-example/
http://www.sommarskog.se/dynamic_sql.html
Found a way to extract all non-empty tables from the schema, then just joined with the initial temp table that I had created.
select A.tablename, B.[row_count]
from (select * from #test) A
left join
(SELECT r.table_name, r.row_count, r.[object_id]
FROM sys.tables t
INNER JOIN (
SELECT OBJECT_NAME(s.[object_id]) table_name, SUM(s.row_count) row_count, s.[object_id]
FROM sys.dm_db_partition_stats s
WHERE s.index_id in (0,1)
GROUP BY s.[object_id]
) r on t.[object_id] = r.[object_id]
WHERE r.row_count > 0 ) B
on A.[TableName] = B.[table_name]
WHERE ROW_COUNT > 0
order by b.row_count desc
How about this one - bitmask computed column checks for NULLability. Value in the bitmask tells you if a column is NULL or not. Counting base 2.
CREATE TABLE FindNullComputedMask
(ID int
,val int
,valstr varchar(3)
,NotEmpty as
CASE WHEN ID IS NULL THEN 0 ELSE 1 END
|
CASE WHEN val IS NULL THEN 0 ELSE 2 END
|
CASE WHEN valstr IS NULL THEN 0 ELSE 4 END
)
INSERT FindNullComputedMask
SELECT 1,1,NULL
INSERT FindNullComputedMask
SELECT NULL,2,NULL
INSERT FindNullComputedMask
SELECT 2,NULL, NULL
INSERT FindNullComputedMask
SELECT 3,3,3
SELECT *
FROM FindNullComputedMask

Function return table variable

I'm trying to create a function that return a table variable.So firstly i get data from Table1 and put it in another table variable. Here i want check if this variable isempty the function return the parameter result else return the result of the table variable
The function script is bellow :
USE[DATABase1]
GO
IF OBJECT_ID (N'CodeFunc', N'TF') IS NOT NULL DROP FUNCTION dbo.CodeFunc;
GO
CREATE FUNCTION CodeFunc ( #Code nvarchar(4) , #Table nvarchar(40) = '' )
RETURNS #VirtualDAT TABLE
(
RowID INT IDENTITY ( 1 , 1 ),
Code nvarchar(400)
)
AS
BEGIN
DECLARE #CodeM nvarchar(400)
DECLARE #imax INT SET #imax = ##ROWCOUNT
DECLARE #i INT SET #i = 1
DECLARE #SelectDAT TABLE
(
RowID INT IDENTITY ( 1 , 1 ),
Code nvarchar(400)
)
INSERT #SelectDAT
SELECT Code FROM table1
WHERE table1.id = 41
IF(EXISTS (SELECT 1 FROM #SelectDAT))
BEGIN
WHILE (#i <= #imax)
BEGIN
SELECT #CodeM = Code FROM #SelectDAT WHERE RowID = #i
INSERT INTO #VirtualDAT(Code) VALUES (#CodeM)
SET #i = #i + 1
END
END
ELSE
INSERT INTO #VirtualDAT(Code) VALUES (#Code)
RETURN
END
So this script works without put it inside function.
And i test this function like this :SELECT * FROM dbo.CodeFunc( 'toto',Default ) the result is :
IF(EXISTS (SELECT 1 FROM #SelectDAT)) no record returned
esle the result is ok
As VR46 says. The ##ROWCOUNT will be set to 0 because there is no query before it. Any code executing in a function happens as a seperate set of queries. It was probably returning a value outside the function because you had previously used the query window for another unrelated query
You could re-factor this function quite dramatically. Look below, ##ROWCOUNT will work here as it is just after the insert query and will definitely have a value based on the insert.
I have not been able to test this, but I think something like this should do the same job.
USE[DATABase1]
GO
IF OBJECT_ID (N'CodeFunc', N'TF') IS NOT NULL DROP FUNCTION dbo.CodeFunc;
GO
CREATE FUNCTION CodeFunc ( #Code nvarchar(4) , #Table nvarchar(40) = '' )
RETURNS #VirtualDAT TABLE
(
RowID INT IDENTITY ( 1 , 1 ),
Code nvarchar(400)
)
AS
BEGIN
insert into #VirtualDAT
Select Code from table1 where table1.id = 41
if ##ROWCOUNT = 0
begin
INSERT INTO #VirtualDAT(Code) VALUES (#Code)
end
RETURN
END
Since you are assigning #imax with ##ROWCOUNT right after declaration of variable will be initialized with zero.
From MSDN ##ROWCOUNT
Returns the number of rows affected by the last statement.
If am not wrong you need to assign value to #imax after the insert into..select query.
INSERT #SelectDAT
SELECT Code FROM table1
WHERE table1.id = 41
SET #imax= ##ROWCOUNT
You can do the same in SET BASED APPROACH without using while loop.
CREATE FUNCTION Codefunc (#Code NVARCHAR(4),
#Table NVARCHAR(40) = '')
returns #VirtualDAT TABLE (
rowid INT IDENTITY ( 1, 1 ),
code NVARCHAR(400))
AS
BEGIN
IF EXISTS (SELECT code
FROM table1
WHERE table1.id = 41)
BEGIN
INSERT INTO #VirtualDAT
(code)
SELECT code
FROM table1
WHERE table1.id = 41
END
ELSE
INSERT INTO #VirtualDAT
(code)
VALUES (#Code)
RETURN
END

In operator matching all rows

I want to return matching all values of csv as the traditional "in" operator matches any of the items present in csv:
SELECT * FROM #MyTable
WHERE [UserID] IN (1,2)
The above query will not serve my purpose as I want to match the rows which have both records for a group. In my case group will by typeid.
Query to populate the table:
DECLARE #MyTable TABLE
(
[TypeID] INT ,
[UserID] INT
)
INSERT INTO #MyTable
SELECT 1 ,
1
UNION
SELECT 1 ,
2
UNION
SELECT 2 ,
1
UNION
SELECT 2 ,
2
UNION
SELECT 2 ,
3
UNION
SELECT 3 ,
1
UNION
SELECT 3 ,
2
UNION
SELECT 3 ,
3
UNION
SELECT 3 ,
4
To query the above table I have input string of userid
DECLARE #UserIDString VARCHAR(256)
Here is my requirement:
When the input is '1,2'; I want typeid 1 as the output as that group has all the records present in csv.
If the input is '1,2,3' ; 2 typeid should be returned as that group has all the values present in csv.
If the input is '1,2,3,4' ; 3 typeid should be returned as that group has all the values present in csv.
EDIT:
Here is the split function to split the csv:
CREATE FUNCTION [dbo].[Split_String]
(
#inputString NVARCHAR(2000) ,
#delimiter NVARCHAR(20) = ' '
)
RETURNS #Strings TABLE
(
[position] INT IDENTITY
PRIMARY KEY ,
[value] NVARCHAR(2000)
)
AS
BEGIN
DECLARE #index INT
SET #index = -1
WHILE ( LEN(#inputString) > 0 )
BEGIN-- Find the first delimiter
SET #index = CHARINDEX(#delimiter, #inputString)
-- No delimiter left?
-- Insert the remaining #inputString and break the loop
IF ( #index = 0 )
AND ( LEN(#inputString) > 0 )
BEGIN
INSERT INTO #Strings
VALUES ( RTRIM(LTRIM(CAST(#inputString AS NVARCHAR(2000))) ))
BREAK
END
-- Found a delimiter
-- Insert left of the delimiter and truncate the #inputString
IF ( #index > 1 )
BEGIN
INSERT INTO #Strings
VALUES ( RTRIM(LTRIM(CAST(LEFT(#inputString, #index - 1) AS NVARCHAR(2000)) ) ))
SET #inputString = RIGHT(#inputString,
( LEN(#inputString) - #index ))
END -- Delimiter is 1st position = no #inputString to insert
ELSE
SET #inputString = CAST(RIGHT(#inputString,
( LEN(#inputString) - #index )) AS NVARCHAR(2000))
END
RETURN
END
GO
Edit:
Thanks #Tab, with further modifications I have come to solution:
DECLARE #InputString VARCHAR(256)
DECLARE #Count VARCHAR(256)
--SET #InputString = '1,2'
DECLARE #DummyTable TABLE
(
[position] INT ,
[value] INT
)
INSERT INTO #DummyTable
( [position] ,
[value]
)
SELECT [position] ,
[value]
FROM [dbo].[Split_String](#InputString, ',')
SELECT #Count = COUNT(1)
FROM #DummyTable
SELECT TypeID
FROM #MyTable
WHERE TypeID NOT IN (
SELECT TypeID
FROM #MyTable T
LEFT OUTER JOIN #DummyTable ss ON t.UserId = ss.Value
WHERE ss.Position IS NULL )
GROUP BY TypeID
HAVING COUNT(TypeID) = #Count
Using your split function, you can do an OUTER JOIN and make sure there are no NULL rows:
SELECT TypeID
FROM #MyTable
WHERE TypeID NOT IN (
SELECT TypeID
FROM #MyTable t
LEFT OUTER JOIN [dbo].[Split_String] (#InputString,',') ss
ON t.UserId=ss.Value
WHERE ss.Position IS NULL
) x
Untested, but I think that should do it.
However, this should return ALL the types that meet the requirement of:
that group has all the records present in csv.
In your question, you seem to imply that only one row should be returned, but why would that be the case if more than one row matches all the values in the csv? And what is the rule for determining which row is returned when there is more than one match?

Update SQL table with values to update and values to compare both in separate comma delimited strings

I have a SQL table where I need to update Date field of multiple Users. The primary key (userId) field is int. Now I am sending the Date values in a comma separated string (like "10/06/2013,12/05/2013,16/07/2013") and corresponding userId values also in a comma separated string (like "1001,1002,1005").
How can I update all relevant Users in my stored procedure? Or should I send the userIds and Dates in any other way?
try this
DECLARE #dates VARCHAR(8000) = '10/06/2013,12/05/2013,16/07/2013'
DECLARE #userid VARCHAR(8000) = '1001,1002,1005'
DECLARE #t1 TABLE
(
dates VARCHAR(50) ,
userid VARCHAR(50)
)
WHILE CHARINDEX(',', #dates) > 0
BEGIN
INSERT INTO #t1
( dates ,
userid
)
VALUES ( SUBSTRING(#dates, 1, ( CHARINDEX(',', #dates) - 1 )) ,
SUBSTRING(#userid, 1, ( CHARINDEX(',', #userid) - 1 ))
)
SET #dates = SUBSTRING(#dates, CHARINDEX(',', #dates) + 1,
LEN(#dates))
SET #userid = SUBSTRING(#userid, CHARINDEX(',', #userid) + 1,
LEN(#userid))
END
INSERT INTO #t1
( dates, userid )
VALUES ( #dates, #userid )
SELECT *
FROM #t1 AS t
UPDATE LMS.dbo.Employee
SET JoiningDate = ( SELECT dates
FROM #t1 AS t
WHERE LMS.dbo.Employee.Code = t.userid
)
It would be good if you send a XML including date and user id. That is comparatively faster as well.
That is what prepared statements are for. Create a prepared UPDATE statement. And then execute it with different values as many times as you need.