Divide using formula from different table in sql - sql

I need to evaluate a formula in sql server 2008
Table 1 contains
Entity Value
A 2424053.500000
B 1151425.412500
C 484810.700000
Table 2 contains
Entity Formula
A (2100*(1-0.0668)*24*mday*10)
B (1000*(1-0.0575)*24*mday*10)
C (1260*(1-0.09)*24*mday*10)
Where mday is number of days taken from user. Data type of Formula is a string.
I need to calculate the output of value/formula for each entity
can you provide me the query for the same

Example solution for SQL Server 2008, adjust as required...
IF EXISTS (SELECT * FROM sys.tables WHERE object_id = object_id('EntityValue'))
BEGIN
DROP TABLE EntityValue;
END;
CREATE TABLE EntityValue
(
Id CHAR(1),
mdayValue DECIMAL(13, 6)
)
INSERT INTO EntityValue
VALUES ('1', 2424053.500000)
, ('2', 1151425.412500)
, ('3', 484810.700000)
IF EXISTS (SELECT * FROM sys.tables WHERE object_id = object_id('EntityFormula'))
BEGIN
DROP TABLE EntityFormula;
END;
CREATE TABLE EntityFormula
(
Id CHAR(1),
Formula NVARCHAR(MAX)
)
INSERT INTO EntityFormula
VALUES ('1', '(2100*(1-0.0668)*24*mday*10)')
, ('2', '(1000*(1-0.0575)*24*mday*10)')
, ('3', '(1260*(1-0.09)*24*mday*10)')
DECLARE #FormulaTable AS TABLE
(
RowId INT IDENTITY(1,1)
,Formula NVarchar(max)
);
INSERT INTO #FormulaTable (Formula)
SELECT Formula = REPLACE(eFormula.Formula, 'mday', CAST(eValue.mdayValue AS NVARCHAR(MAX)))
FROM EntityFormula AS eFormula
INNER JOIN EntityValue AS eValue ON eValue.ID = eFormula.ID;
DECLARE #TSql NVarchar(max), #CurrentRowId INT;
SET #CurrentRowId = 1;
WHILE(1=1)
BEGIN
SELECT #TSql = 'SELECT ' + Formula
FROM #FormulaTable
WHERE RowID = #CurrentRowId
IF(##ROWCOUNT = 0)
BEGIN
BREAK;
END
EXEC sp_executesql #Tsql
SET #CurrentRowId = #CurrentRowId + 1;
END

Related

SQL - query for combining multiple rows

I'm searching but i can't find a solution for my problem.
I've a table (say T1) with this data:
SampleCode
Name
Content
#
1
A#
#
2
B#
#
3
C#
#
1
A#
#
2
B#
#
3
C#
So i need a select query resulting in :
Column 1
Column 2
Column 3
SampleCode
#
#
Name.1
A#
A#
Name.2
B#
B#
Name.3
C#
C#
Does anyone has an hint?
Thanks
Fabio
Check out the 2 options in this article: https://www.sqlshack.com/multiple-options-to-transposing-rows-into-columns/
If using TSQL, you could use a TRANSPOSE statement. Or you can create the result set using cursors/loops and dynamic SQL.
Fully Dynamic Solution
I can't believe I hammered this out... maybe I did it just to see if I could. It's long and complicated and uses several loops. I think you should re-evaluate what you're trying to do. This can't be run in a view. You would have to put into a function or stored procedure. You may want to evaluate the temp table names to see if they are compatible with your environment. Also, since the temp table name are not unique, you cannot run multiple instances; if you want to run multiple instances, you would have to add a unique identifier to a column in the temp tables or to temp table names. This is fully dynamic... you don't have to know how many columns you need ahead of time.
--**************************************************
--Header area... sample data and temp tables.
--**************************************************
--Clean up sample and result tables if they exist.
IF OBJECT_ID(N'tempdb..#T1') IS NOT NULL
DROP TABLE #T1;
IF OBJECT_ID(N'tempdb..#ResultsTemp') IS NOT NULL
DROP TABLE #ResultsTemp;
IF OBJECT_ID(N'tempdb..#codeTable') IS NOT NULL
DROP TABLE #codeTable;
--For Debugging.
DECLARE #debug int = 1; --0=no debug, 1=show debug messages, do not exec all SQL statements.
--Table var to hold sample data.
CREATE TABLE #T1 (
SampleCode nvarchar(50),
[Name] nvarchar(50),
Content nvarchar(50),
row_num int
);
--Load Sample Data.
INSERT INTO #T1 (SampleCode, [Name], Content)
VALUES
('#', '1', 'A#')
, ('#', '2', 'B#')
, ('#', '3', 'C#')
, ('#', '1', 'A#')
, ('#', '2', 'B#')
, ('#', '3', 'C#')
;
--**********END HEADER**********
--Number the rows so we can loop over them.
UPDATE #T1
SET row_num = newT1.row_num
FROM
(
SELECT t12.SampleCode
, t12.[Name]
, T12.Content
, ROW_NUMBER()OVER(ORDER BY SampleCode, [Name], Content) as row_num
FROM #T1 as t12
) AS newT1
WHERE #T1.SampleCode = newT1.SampleCode
AND #T1.[Name] = newT1.[Name]
AND #T1.Content = newT1.Content
;
SELECT * FROM #T1; --Debugging... just show the contents of #T1 after adding row_num
--Create temp table to load results.
CREATE TABLE #ResultsTemp (
Column1 nvarchar(50)
);
--Create some variable to track looping (without a cursor).
DECLARE #loopCount int = 1;
DECLARE #maxLoops int = (SELECT COUNT(DISTINCT SampleCode) FROM #T1);
DECLARE #sql nvarchar(512);
--Add columns to #ResultsTable dynamically
WHILE (#loopCount <= (#maxLoops))
BEGIN
SET #sql = 'ALTER TABLE #ResultsTemp ADD ' + QUOTENAME('Column' + CAST((#loopCount + 1) as nvarchar)) + ' nvarchar(50) NULL';
IF (#debug = 1) PRINT #sql;
EXEC (#sql);
SET #loopCount = #loopCount + 1;
END
--************************************************************
--SECTION FOR INSERTING FIRST ROW for "SampleCode"
--************************************************************
--Vars for tracking the SampleCode variations processed.
DECLARE #sampleLoop int = 1;
DECLARE #sampleCount int = (SELECT COUNT(DISTINCT SampleCode) FROM #T1);
CREATE TABLE #codeTable (
SampleCode nvarchar(50)
, row_num int
);
--Create a list of unique SampleCodes to loop over.
INSERT INTO #codeTable (SampleCode)
SELECT DISTINCT
SampleCode
FROM #T1;
UPDATE #codeTable
SET row_num = newCT.row_num
FROM
(
SELECT ct2.SampleCode
, ROW_NUMBER()OVER(ORDER BY SampleCode) as row_num
FROM #codeTable as ct2
) AS newCT
WHERE #codeTable.SampleCode = newCT.SampleCode
;
SET #sql = 'INSERT INTO #ResultsTemp (Column1) SELECT ''SampleCode''';
IF (#debug = 1) PRINT #sql;
EXEC (#sql);
WHILE (#sampleLoop <= #sampleCount)
BEGIN
SET #sql = 'UPDATE #ResultsTemp SET Column' + CAST(#SampleLoop + 1 as nvarchar) + '=(SELECT TOP 1 SampleCode FROM #codeTable WHERE row_num = ' + CAST(#sampleLoop as nvarchar) + ');';
IF (#debug = 1) PRINT #sql;
EXEC (#sql);
SET #sampleLoop = #sampleLoop + 1;
END
IF (#debug = 1) SELECT * FROM #ResultsTemp;
--**********END SECTION**********
--**************************************************
--SECTION FOR INSERTING THE REST OF THE CONTENT DATA
--**************************************************
--Vars for tracking number of rows processed from T1.
SET #loopCount = 1;
SET #maxLoops = (SELECT MAX(row_num) FROM #T1);
--Loop over each row in T1.
WHILE (#loopCount <= #maxLoops)
BEGIN
--Create a name for this row.
DECLARE #nameRaw nvarchar(50) = (SELECT TOP 1 [Name] FROM #T1 WHERE row_num = #loopCount);
DECLARE #codeNum nvarchar(50) = (
SELECT TOP 1 ct.row_num
FROM #T1 as t
INNER JOIN #codeTable as ct
ON ct.SampleCode = t.SampleCode
WHERE t.row_num = #loopCount);
DECLARE #name nvarchar(50) = 'Name.' + #nameRaw;
--First insert a row for this Name... if not already in the list.
SET #sql = 'INSERT INTO #ResultsTemp (Column1)
SELECT ''Name.'' + t.[Name]
FROM #T1 as t
LEFT OUTER JOIN #ResultsTemp as rt
ON rt.Column1 = ''' + #name + '''
WHERE t.row_num = ' + CAST(#loopCount as nvarchar) + '
AND rt.Column1 IS NULL;';
IF (#debug = 1) PRINT #sql;
EXEC (#sql);
--Update this Name row with the "content".
SET #sql = 'UPDATE rt
SET Column' + CAST(#codeNum + 1 as nvarchar) + '=t.Content
FROM #ResultsTemp as rt
INNER JOIN #T1 as t
ON t.row_num = ' + CAST(#loopCount as nvarchar) + '
AND t.[Name] = ''' + #nameRaw + '''
WHERE rt.Column1 = ''' + #name + ''';';
IF (#debug = 1) PRINT #sql;
EXEC (#sql);
SET #loopCount = #loopCount + 1;
END
--Show everything in the temp Results table.
SELECT *
FROM #ResultsTemp;
Result Set:
Static Select Solution
This next option is not dynamic. You have to know how many columns you need and then you can copy the "Column3" code to any new columns. You have to change the column name and update the "Select TOP 1" statement as commented for each new column you would copy.
WITH CodeTable AS (
SELECT DISTINCT t.SampleCode
FROM #T1 as t
)
SELECT DISTINCT
'Name.' + [Name]
, (
SELECT TOP 1 Content
FROM #T1 as t2
WHERE t2.SampleCode = (
SELECT TOP 1 SampleCode
FROM CodeTable as ct
ORDER BY SampleCode
)
AND t2.[Name] = t.[Name]
) as Column2
, (
SELECT TOP 1 Content
FROM #T1 as t2
WHERE t2.SampleCode = (
SELECT TOP 1 SampleCode
FROM CodeTable as ct
WHERE ct.SampleCode NOT IN (
SELECT TOP 1 SampleCode --Update the TOP 1 to be TOP [number of previous columns]
FROM CodeTable as ct2
ORDER BY ct2.SampleCode
)
ORDER BY ct.SampleCode
)
AND t2.[Name] = t.[Name]
) as Column3
FROM #T1 as t
Static PivotTable Solution
This solution is from the link I posted. Again, you have to know the number of columns you need and it doesn't have generic column names like you specify. But, it is another solution if you reconsider your output.
--Use PivotTable.
SELECT *
FROM
(
SELECT [Name], Content
FROM #T1
) AS SourceTable PIVOT(MAX(Content) FOR [Content] IN([A#],
[A#]
)) AS PivotTable;
Pivot table can solve your prooblem:
DECLARE #T TABLE (SampleCode sysname, Name sysname, Content sysname)
INSERT #T (SampleCode, Name, Content)
VALUES
('#', '1', 'A#'),
('#', '2', 'B#'),
('#', '3', 'C#'),
('#', '1', 'A#'),
('#', '2', 'B#'),
('#', '3', 'C#')
SELECT CONCAT('Name.', PIVOTTABLE.Name), [#], [#]
FROM
(
SELECT * FROM #T AS t
) AS SOURCE
PIVOT (
MAX(Content)
FOR SampleCode IN ([#], [#])
) AS PIVOTTABLE
Of course this is the solution for what you provided in the question.
When the columns are too many to manually write down then use Dynamic query and the only parts needed to be generate is the values in FOR expression and the same value in SELECT

How can I speed up this SQL Server CURSOR used for change tracking? [closed]

Closed. This question needs to be more focused. It is not currently accepting answers.
Want to improve this question? Update the question so it focuses on one problem only by editing this post.
Closed last year.
Improve this question
Can anyone recommend how I can speed up this code and primarily the cursor? The code is an SQL Server db query that creates a trigger on INSERT, UPDATE, or DELETE. It writes a record to a changlog table identifying the type of change (I, U, or D) and then saves the old value and new value of each affected column for each row in a details table.
I want this to be generic so I can easily reuse it for any table I throw at it that has a unique column I can filter on. Writing the whole row of changes to a cloned structure audit table is not an option unfortunately.
Any help is greatly appreciated, I am not the greatest at query optimization and welcome any feedback or rewrites.. Thanks!
ALTER TRIGGER [dbo].[tbl_Address_ChangeTracking] ON [dbo].[tbl_Address]
AFTER INSERT, DELETE, UPDATE
AS
BEGIN
-- SET NOCOUNT ON added to prevent extra result sets from
-- interfering with SELECT statements.
SET NOCOUNT ON
--SET XACT_ABORT ON
-- Get the table name of the current process
DECLARE #TableName VARCHAR(25)
SET #TableName = COALESCE(
(
SELECT SCHEMA_NAME(schema_id) + '.' + OBJECT_NAME(parent_object_id)
FROM sys.objects
WHERE sys.objects.name = OBJECT_NAME(##PROCID) AND
SCHEMA_NAME(sys.objects.schema_id) = OBJECT_SCHEMA_NAME(##PROCID)
), 'Unknown')
--Declare our cursor to navigate the records in inserted and deleted
DECLARE #cursorSQL AS NVARCHAR(MAX) = ''
DECLARE #PrimaryID AS VARCHAR(MAX) = ''
DROP TABLE IF EXISTS #inserted1TableTemp
DROP TABLE IF EXISTS #inserted2TableTemp
DROP TABLE IF EXISTS #deletedTableTemp
DECLARE #ourLogCursor CURSOR
--If we have a record in inserted and deleted this is an update record and we should pull from the inserted table and assume
--this is one update or many update statements
IF EXISTS
(
SELECT 1
FROM inserted
) AND
EXISTS
(
SELECT 1
FROM deleted
)
BEGIN
SELECT *
INTO #inserted1TableTemp
FROM inserted
SET #cursorSQL = 'SET #ourLogCursor = CURSOR FOR SELECT AddressID FROM #inserted1TableTemp; OPEN #ourLogCursor;'
END
--If we have an inserted record and no deleted record this is an insert and we pull from the inserted table
IF EXISTS
(
SELECT 1
FROM inserted
) AND
NOT EXISTS
(
SELECT 1
FROM deleted
)
BEGIN
DROP TABLE IF EXISTS #inserted2TableTemp
DROP TABLE IF EXISTS #inserted1TableTemp
DROP TABLE IF EXISTS #deletedTableTemp
SELECT *
INTO #inserted2TableTemp
FROM inserted
SET #cursorSQL = 'SET #ourLogCursor = CURSOR FOR SELECT AddressID FROM #inserted2TableTemp; OPEN #ourLogCursor;'
END
--If we have a deleted record and no insert record this is a deletion and we pull from the deleted table
IF NOT EXISTS
(
SELECT 1
FROM inserted
) AND
EXISTS
(
SELECT 1
FROM deleted
)
BEGIN
DROP TABLE IF EXISTS #inserted1TableTemp
DROP TABLE IF EXISTS #inserted2TableTemp
DROP TABLE IF EXISTS #deletedTableTemp
SELECT *
INTO #deletedTableTemp
FROM deleted
SET #cursorSQL = 'SET #ourLogCursor = CURSOR FOR SELECT AddressID FROM #deletedTableTemp; OPEN #ourLogCursor;'
END
--If we have a deleted record and no insert record this is a deletion and we pull from the deleted table
IF NOT EXISTS
(
SELECT 1
FROM inserted
) AND
NOT EXISTS
(
SELECT 1
FROM deleted
)
BEGIN
RETURN;
END
--Execute our dynamic SQL that sets the correct FOR SELECT statment for the cursor. Pass #ourCursorLog as an input param, and then grab the output
--so the results are available outside the scope of the executesql call
EXEC sys.sp_executesql #cursorSQL, N'#ourLogCursor CURSOR OUTPUT', #ourLogCursor OUTPUT;
FETCH NEXT FROM #ourLogCursor INTO #PrimaryID
DECLARE #xmlOld XML
DECLARE #xmlNew XML
DECLARE #SummaryID INT
SET #TableName = COALESCE(
(
SELECT SCHEMA_NAME(schema_id) + '.' + OBJECT_NAME(parent_object_id)
FROM sys.objects
WHERE sys.objects.name = OBJECT_NAME(##PROCID) AND
SCHEMA_NAME(sys.objects.schema_id) = OBJECT_SCHEMA_NAME(##PROCID)
), 'Unknown')
--Navigate all our rows
WHILE ##FETCH_STATUS = 0
BEGIN
DROP TABLE IF EXISTS #tmp_AddressesChangelogTrigger
DROP TABLE IF EXISTS #tmp_AddressesChangelogTriggerXML1
DROP TABLE IF EXISTS #tmp_AddressesChangelogTriggerXML2
DROP TABLE IF EXISTS #tmp_AddressesChangelogTriggerXMLsWithDifferences
--Get the deleted and inserted records as xml for comparison against each other
SET #xmlNew =
(
SELECT *
FROM deleted AS [TABLE]
WHERE AddressID = #PrimaryID
ORDER BY AddressID FOR XML AUTO, ELEMENTS
)
SET #xmlOld =
(
SELECT *
FROM inserted AS [TABLE]
WHERE AddressID = #PrimaryID
ORDER BY AddressID FOR XML AUTO, ELEMENTS
)
CREATE TABLE #tmp_AddressesChangelogTriggerXML1
(
NodeName VARCHAR(MAX), Value VARCHAR(MAX)
)
CREATE TABLE #tmp_AddressesChangelogTriggerXML2
(
NodeName VARCHAR(MAX), Value VARCHAR(MAX)
)
--Extract the values and column names
INSERT INTO #tmp_AddressesChangelogTriggerXML2( NodeName, Value )
--Throw the XML into temp tables with the column name and value
SELECT N.value( 'local-name(.)', 'nvarchar(MAX)' ) AS NodeName, N.value( 'text()[1]', 'nvarchar(MAX)' ) AS VALUE
FROM #xmlNew.nodes( '/TABLE/*' ) AS T(N)
INSERT INTO #tmp_AddressesChangelogTriggerXML1( NodeName, Value )
SELECT N.value( 'local-name(.)', 'nvarchar(MAX)' ) AS NodeName, N.value( 'text()[1]', 'nvarchar(MAX)' ) AS VALUE
FROM #xmlOld.nodes( '/TABLE/*' ) AS T(N)
--Get the differences into a temp table
SELECT *
INTO #tmp_AddressesChangelogTriggerXMLsWithDifferences
FROM
(
SELECT COALESCE(A.NodeName, B.NodeName) AS NodeName, B.Value AS OldValue, A.Value AS NewValue
FROM #tmp_AddressesChangelogTriggerXML1 AS A
FULL OUTER JOIN #tmp_AddressesChangelogTriggerXML2 AS B ON A.NodeName = B.NodeName
WHERE A.Value <> B.Value
) AS tmp
--If anything changed thhen start our write statments
IF
(
SELECT COUNT(*)
FROM #tmp_AddressesChangelogTriggerXMLsWithDifferences
) > 0
BEGIN
BEGIN TRY
-- Now create the Summary record
--BEGIN TRANSACTION WRITECHANGELOGRECORDS
INSERT INTO TableChangeLogSummary( ID, ModifiedDate, ChangeType, TableName )
--Get either insert, or if no insert value, get the delete value
--Set the update type, I, D, U
--Compare values with a full outer join
--Filter on the ID we are on in the CURSOR
SELECT COALESCE(I.AddressID, D.AddressID), GETDATE(),
CASE
WHEN D.AddressID IS NULL THEN 'I'
WHEN I.AddressID IS NULL THEN 'D'
ELSE 'U'
END, #TableName
FROM inserted AS I
FULL OUTER JOIN deleted AS D ON I.AddressID = D.AddressID
WHERE( I.AddressID = #PrimaryID OR
I.AcesAddressID IS NULL
) AND
( D.AddressID = #PrimaryID OR
D.AcesAddressID IS NULL
)
--Get the last summary id that was inserted so we can use it in the detail record
SET #SummaryID = (SELECT IDENT_CURRENT('TableChangeLogSummary'))
--Insert our
INSERT INTO TableChangeLogDetail( SummaryID, ColumnName, OldValue, NewValue )
SELECT #SummaryID, T.NodeName, T.OldValue, T.NewValue
FROM #tmp_AddressesChangelogTriggerXMLsWithDifferences AS T
--COMMIT TRANSACTION WRITECHANGELOGRECORDS
--PRINT 'RECORD WRITTEN'
END TRY
BEGIN CATCH
DECLARE #errorXML XML
SET #errorXML = (SELECT ERROR_NUMBER() AS ErrorNumber, ERROR_STATE() AS ErrorState, ERROR_SEVERITY() AS ErrorSeverity, ERROR_PROCEDURE() AS ErrorProcedure, ERROR_LINE() AS ErrorLine, ERROR_MESSAGE() AS ErrorMessage FOR XML RAW)
DECLARE #errorXMLText NVARCHAR(MAX) = ''
SET #errorXMLText = (SELECT CAST(#errorXML AS NVARCHAR(MAX)))
RAISERROR(#errorXMLText, 16, 1) WITH NOWAIT
END CATCH
END
--Go to the next record and process
FETCH NEXT FROM #ourLogCursor INTO #PrimaryID
END
CLOSE #ourLogCursor
DEALLOCATE #ourLogCursor
END
Acknowledging the recommendation for using change data tracking and caution against putting too much logic into triggers, the following is a refactoring (and some outright rewriting) of your change capture logic.
The updated logic makes a single pass through the data, handing all affected records at once. Given the requirements, I think it is pretty close to optimal, but there may still be room for improvements. The conversion to and from XML likely adds a significant bit of overhead. The alternative would be to dynamically generate and apply custom triggers for each table that explicitly reference all of the data columns individually to get the details and UNION them together.
I also refined the value comparison to better handle nulls, case sensitivity, and potential trailing space changes.
The code below is not in the form of a trigger, but in a form suitable for stand-alone testing. I figured you (and any others who may be interested) would want to test Once checked out, you should be able to retrofit it back into your trigger.
Note that this is not a 100% generalized solution. Some column types may not be supported. The logic currently assumes a single column primary key of type integer. Changes would be required to handle deviations from these (and possibly some currently unidentified) constraints.
-- Simulated change log tables
DECLARE #TableChangeLogSummary TABLE (ID INT IDENTITY(1,1), KeyValue INT NOT NULL, ModifiedDate DATETIME NOT NULL, ChangeType CHAR(1) NOT NULL, TableName NVARCHAR(1000) NOT NULL )
DECLARE #TableChangeLogDetails TABLE (ID INT IDENTITY(1,1), SummaryID int NOT NULl, ColumnName NVARCHAR(1000) NOT NULL, OldValue NVARCHAR(MAX), NewValue NVARCHAR(MAX))
-- Simulated system defined inserted/deleted tables
DECLARE #inserted TABLE (ID INTEGER, Value1 NVARCHAR(100), Value2 BIT, Value3 FLOAT)
DECLARE #deleted TABLE (ID INTEGER, Value1 NVARCHAR(100), Value2 BIT, Value3 FLOAT)
-- Test data
INSERT #inserted
VALUES
(1, 'AAA', 0, 3.14159), -- Insert
(2, 'BBB', 1, null), -- Mixed updates including null to non-null and non-null to null
(3, 'CCC', 0, 0), -- Trailing space change
(4, 'DDD', null, 1.68), -- No changes
(5, '', 0, null), -- No changes with blanks and nulls
(6, null, null, null), -- No changes all nulls
(7, null, null, null) -- Insert all nulls (summary with key, but no details will be logged)
INSERT #deleted
VALUES
(2, 'bbb', null, 2.73),
(3, 'CCC ', 0, 0),
(4, 'DDD', null, 1.68),
(5, '', 0, null),
(6, null, null, null),
(8, null, null, null), -- Delete all null values (summary with key, but no details will be logged)
(9, 'ZZZ', 999, 999.9) -- Delete non-nulls
--- Now the real work begins...
-- Set table and information. Assumes table has exactly one PK column. Later logic assumes an INT.
DECLARE #TableName NVARCHAR(1000) = 'MyTable' -- To be extracted from the parent object of the trigger
DECLARE #KeyColumnName SYSNAME = 'ID' -- This can be fixed if known or derived on the fly from the primary key definition
-- Extract inserted and/or deleted data
DECLARE #InsertedXml XML = (
SELECT *
FROM #inserted
FOR XML PATH('inserted'), TYPE
)
DECLARE #DeletedXml XML = (
SELECT *
FROM #deleted
FOR XML PATH('deleted'), TYPE
)
-- Parse and reassange the captured key and data values
DECLARE #TempDetails TABLE(
KeyValue INT NOT NULL,
ChangeType CHAR(1) NOT NULL,
ColumnName VARCHAR(1000) NOT NULL,
IsKeyColumn BIT NOT NULL,
NewValue NVARCHAR(MAX),
OldValue NVARCHAR(MAX))
INSERT #TempDetails
SELECT
KeyValue = COALESCE(I.KeyValue, D.KeyValue),
ChangeType = CASE WHEN D.KeyValue IS NULL THEN 'I' WHEN I.KeyValue IS NULL THEN 'D' ELSE 'U' END,
ColumnName = COALESCE(I.ColumnName, D.ColumnName),
IsKeyColumn = K.IsKeyColumn,
NewValue = I.Value,
OldValue = D.Value
FROM (
SELECT K.KeyValue, C.ColumnName, C.Value
FROM #InsertedXml.nodes( '/inserted' ) R(Row)
CROSS APPLY (
SELECT KeyValue = C.Col.value('text()[1]', 'int')
FROM R.Row.nodes( './*' ) C(Col)
WHERE C.Col.value( 'local-name(.)', 'nvarchar(MAX)' ) = #KeyColumnName
) K
CROSS APPLY (
SELECT ColumnName = C.Col.value('local-name(.)', 'nvarchar(MAX)'), Value = C.Col.value('text()[1]', 'nvarchar(MAX)')
FROM R.Row.nodes( './*' ) C(Col)
) C
) I
FULL OUTER JOIN (
SELECT K.KeyValue, C.ColumnName, C.Value
FROM #DeletedXml.nodes( '/deleted' ) R(Row)
CROSS APPLY (
SELECT KeyValue = C.Col.value('text()[1]', 'int')
FROM R.Row.nodes( './*' ) C(Col)
WHERE C.Col.value( 'local-name(.)', 'nvarchar(MAX)' ) = #KeyColumnName
) K
CROSS APPLY (
SELECT ColumnName = C.Col.value('local-name(.)', 'nvarchar(MAX)'), Value = C.Col.value('text()[1]', 'nvarchar(MAX)')
FROM R.Row.nodes( './*' ) C(Col)
) C
) D
ON D.KeyValue = I.KeyValue
AND D.ColumnName = I.ColumnName
CROSS APPLY (
SELECT IsKeyColumn = CASE WHEN COALESCE(I.ColumnName, D.ColumnName) = #KeyColumnName THEN 1 ELSE 0 END
) K
WHERE ( -- We need to be careful about edge cases here
(I.Value IS NULL AND D.Value IS NOT NULL)
OR (I.Value IS NOT NULL AND D.Value IS NULL)
OR I.Value <> D.Value COLLATE Latin1_General_Bin -- Precise compare (case and accent sensitive)
OR DATALENGTH(I.Value) <> DATALENGTH(D.Value) -- Catch trailing space cases
OR K.IsKeyColumn = 1
)
-- Get rid of updates with no changes, but keep key-only inserts or deletes
DELETE T
FROM #TempDetails T
WHERE T.IsKeyColumn = 1
AND T.ChangeType = 'U'
AND NOT EXISTS (
SELECT *
FROM #TempDetails T2
WHERE T2.KeyValue = T.KeyValue
AND T2.IsKeyColumn = 0
)
-- Local table to capture and link SummaryID between the summary and details tables
DECLARE #CaptureSummaryID TABLE (SummaryID int, KeyValue INT NOT NULL)
-- Insert change summary and capture the assigned Summary ID via the OUTPUT clause
INSERT INTO #TableChangeLogSummary (KeyValue, ModifiedDate, ChangeType, TableName)
OUTPUT INSERTED.id, INSERTED.KeyValue INTO #CaptureSummaryID
SELECT T.KeyValue, ModifiedDate = GETDATE(), T.ChangeType, TableName = #TableName
FROM #TempDetails T
WHERE T.IsKeyColumn = 1
ORDER BY T.KeyValue -- Optional, but adds consistancy
-- Insert change details
INSERT INTO #TableChangeLogDetails (SummaryID, ColumnName, OldValue, NewValue)
SELECT S.SummaryID, T.ColumnName, T.OldValue, T.NewValue
FROM #CaptureSummaryID S
JOIN #TempDetails T ON T.KeyValue = S.KeyValue
WHERE T.IsKeyColumn = 0
ORDER BY T.ColumnName -- Optional, but adds consistancy
-- View test results
SELECT 'Change Log:', *
FROM #TableChangeLogSummary S
LEFT JOIN #TableChangeLogDetails D ON D.SummaryID = S.ID
ORDER BY S.ID, D.ID

SQL Loop through tables and columns to find which columns are NOT empty

I created a temp table #test containing 3 fields: ColumnName, TableName, and Id.
I would like to see which rows in the #test table (columns in their respective tables) are not empty? I.e., for every column name that i have in the ColumnName field, and for the corresponding table found in the TableName field, i would like to see whether the column is empty or not. Tried some things (see below) but didn't get anywhere. Help, please.
declare #LoopCounter INT = 1, #maxloopcounter int, #test varchar(100),
#test2 varchar(100), #check int
set #maxloopcounter = (select count(TableName) from #test)
while #LoopCounter <= #maxloopcounter
begin
DECLARE #PropIDs TABLE (tablename varchar(max), id int )
Insert into #PropIDs (tablename, id)
SELECT [tableName], id FROM #test
where id = #LoopCounter
set #test2 = (select columnname from #test where id = #LoopCounter)
declare #sss varchar(max)
set #sss = (select tablename from #PropIDs where id = #LoopCounter)
set #check = (select count(#test2)
from (select tablename
from #PropIDs
where id = #LoopCounter) A
)
print #test2
print #sss
print #check
set #LoopCounter = #LoopCounter + 1
end
In order to use variables as column names and table names in your #Check= query, you will need to use Dynamic SQL.
There is most likely a better way to do this but I cant think of one off hand. Here is what I would do.
Use the select and declare a cursor rather than a while loop as you have it. That way you dont have to count on sequential id's. The cursor would fetch fields columnname, id and tablename
In the loop build a dynamic sql statement
Set #Sql = 'Select Count(*) Cnt Into #Temp2 From ' + TableName + ' Where ' + #columnname + ' Is not null And ' + #columnname <> '''''
Exec(#Sql)
Then check #Temp2 for a value greater than 0 and if this is what you desire you can use the #id that was fetched to update your #Temp table. Putting the result into a scalar variable rather than a temp table would be preferred but cant remember the best way to do that and using a temp table allows you to use an update join so it would well in my opinion.
https://www.mssqltips.com/sqlservertip/1599/sql-server-cursor-example/
http://www.sommarskog.se/dynamic_sql.html
Found a way to extract all non-empty tables from the schema, then just joined with the initial temp table that I had created.
select A.tablename, B.[row_count]
from (select * from #test) A
left join
(SELECT r.table_name, r.row_count, r.[object_id]
FROM sys.tables t
INNER JOIN (
SELECT OBJECT_NAME(s.[object_id]) table_name, SUM(s.row_count) row_count, s.[object_id]
FROM sys.dm_db_partition_stats s
WHERE s.index_id in (0,1)
GROUP BY s.[object_id]
) r on t.[object_id] = r.[object_id]
WHERE r.row_count > 0 ) B
on A.[TableName] = B.[table_name]
WHERE ROW_COUNT > 0
order by b.row_count desc
How about this one - bitmask computed column checks for NULLability. Value in the bitmask tells you if a column is NULL or not. Counting base 2.
CREATE TABLE FindNullComputedMask
(ID int
,val int
,valstr varchar(3)
,NotEmpty as
CASE WHEN ID IS NULL THEN 0 ELSE 1 END
|
CASE WHEN val IS NULL THEN 0 ELSE 2 END
|
CASE WHEN valstr IS NULL THEN 0 ELSE 4 END
)
INSERT FindNullComputedMask
SELECT 1,1,NULL
INSERT FindNullComputedMask
SELECT NULL,2,NULL
INSERT FindNullComputedMask
SELECT 2,NULL, NULL
INSERT FindNullComputedMask
SELECT 3,3,3
SELECT *
FROM FindNullComputedMask

SQL dynamic columns and Update multiple columns

I have a table UserPermission which has a number of columns of TINYINT type. e.g Read, Write, Update, Delete, Access etc.
I get three parameters in the stored procedure: #UserId, #ColNames, #ColValues where #ColNames and #ColValues are comma separated values.
How can I insert or update the table row (if already exists) with the passed column names and corresponding values.
I try to write the dynamic query which runs fine for INSERT but I was unable to write the UPDATE query dynamically with each column and its value to be concatenate.
Any response would be appreciated
Thanks in advance.
This is a somewhat dirty way to do what you require. However, if you create the following Stored Procedure:
CREATE FUNCTION [dbo].[stringSplit]
(
#String NVARCHAR(4000),
#Delimiter NCHAR(1)
)
RETURNS TABLE
AS
RETURN
(
WITH Split(stpos,endpos)
AS(
SELECT 0 AS stpos, CHARINDEX(#Delimiter,#String) AS endpos
UNION ALL
SELECT endpos+1, CHARINDEX(#Delimiter,#String,endpos+1)
FROM Split
WHERE endpos > 0
)
SELECT 'Id' = ROW_NUMBER() OVER (ORDER BY (SELECT 1)),
'Data' = SUBSTRING(#String,stpos,COALESCE(NULLIF(endpos,0),LEN(#String)+1)-stpos)
FROM Split
)
You can then use that Procedure to join the data together:
DECLARE #TotalCols INT
DECLARE #TotalVals INT
SET #TotalCols = (
SELECT COUNT(ID) AS Total
FROM dbo.stringSplit('department, teamlead', ',')
);
SET #TotalVals = (
SELECT COUNT(ID) AS Total
FROM dbo.stringSplit('IT, Bob', ',')
);
IF #TotalCols = #TotalVals
BEGIN
IF OBJECT_ID('tempdb..#temptable') IS NOT NULL
DROP TABLE #temptable
CREATE TABLE #temptable (
ColName VARCHAR(MAX) NULL
,ColValue VARCHAR(MAX) NULL
)
INSERT INTO #temptable
SELECT a.DATA
,b.DATA
FROM dbo.stringSplit('department, teamlead', ',') AS a
INNER JOIN dbo.stringSplit('IT, Bob', ',') AS b ON a.Id = b.Id
SELECT *
FROM #temptable;
END
It's not very efficient, but it will bring you the desired results.
You can then use the temp table to update, insert and delete as required.
Instead of having a comma delimited list I would create a separate parameter for each Column and make its default value to NULL and in the code update nothing if its null or insert 0. Something like this....
CREATE PROCEDURE usp_UserPermissions
#UserID INT
,#Update INT = NULL --<-- Make default values NULL
,#Delete INT = NULL
,#Read INT = NULL
,#Write INT = NULL
,#Access INT = NULL
AS
BEGIN
SET NOCOUNT ON;
Declare #t TABLE (UserID INT, [Update] INT,[Read] INT
,[Write] INT,[Delete] INT,[Access] INT)
INSERT INTO #t (Userid, [Update],[Read],[Write],[Delete],[Access])
VALUES (#UserID , #Update , #Read, #Write , #Delete, #Access)
IF EXISTS (SELECT 1 FROM UserPermission WHERE UserID = #UserID)
BEGIN
UPDATE up -- Only update if a value was provided else update to itself
SET up.[Read] = ISNULL(t.[Read] , up.[Read])
,up.[Write] = ISNULL(t.[Write] , up.[Write])
,up.[Update] = ISNULL(t.[Update] , up.[Update])
,up.[Delete] = ISNULL(t.[Delete] , up.[Delete])
,up.[Access] = ISNULL(t.[Access] , up.[Access])
FROM UserPermission up
INNER JOIN #t t ON up.UserID = t.UserID
END
ELSE
BEGIN
-- if already no row exists for that User add a row
-- If no value was passed for a column add 0 as default
INSERT INTO UserPermission (Userid, [Update],[Read],[Write],[Delete],[Access])
SELECT Userid
, ISNULL([Update], 0)
, ISNULL([Read], 0)
, ISNULL([Write], 0)
, ISNULL([Delete], 0)
, ISNULL([Access], 0)
FROM #t
END
END

SQL - Replacing all "ASCII/special characters" in a string

Edit: I have about 80 characters that are causing problems in my application so I don't want to hard code a REPLACE for every single character. I think it would be easier to create a separate table with two columns,"special characters" and "replacement characters", and I will remove those columns from the original table which contains the column "StringTest". My goal will be figuring out how to use the characters table to replace characters in the string table.
I am trying to replace all "special characters" (ie À, Æ, Ç) with "MappedCharacters" (A, AE, C) in SQL Server. I have tried two different techniques, one using a cursor, one without a cursor, to search through a string and replace all special characters with mapped characters. Each of my methods only replaces characters they are in the same row as the string.
Example before:
num SpecialCharacter MappedCharacter StringTest
1 À A StringÀÆ
2 Æ AE ÆStringÆ
3 Ç C StrÇÀing
Example after:
num SpecialCharacter MappedCharacter StringTest
1 À A StringAÆ
2 Æ AE AEStringAE
3 Ç C StrCÀing
Preferred Output:
num SpecialCharacter MappedCharacter StringTest
1 À A StringAAE
2 Æ AE AEStringAE
3 Ç C StrCAing
So you can see that I want to replace all "special characters" in StringTest but only characters that are in the same row are getting replaced.
I haven't quite figured out how to do that just yet.
Here are the two SQL code that I have been trying to modify (I only need one to work)
First Method:
DECLARE #cASCIINum INT;
DECLARE #cSpecialChar VARCHAR(50);
DECLARE #cMappedChar VARCHAR(50);
DECLARE #cStringTest VARCHAR(50);
DECLARE #mapCursor as CURSOR;
SET #mapCursor = CURSOR FOR
SELECT [ASCIINum]
,[SpecialChar]
,[MappedChar]
,[StringTest]
FROM [intranet].[dbo].[CharMapTestTab];
OPEN #mapCursor;
FETCH NEXT FROM #mapCursor INTO #cASCIINum,
#cSpecialChar,
#cMappedChar,
#cStringTest;
WHILE ##FETCH_STATUS = 0
BEGIN
UPDATE [intranet].[dbo].[CharMapTestTab]
SET StringTest = REPLACE(StringTest, SpecialChar, MappedChar)
WHERE SpecialChar <> MappedChar
END
CLOSE #mapCursor;
DEALLOCATE #mapCursor;
Second Method:
DECLARE #ASCIINum INT = 0
WHILE (1 = 1)
BEGIN
SELECT #ASCIINum = ASCIINum
FROM [intranet].[dbo].[CharMapTestTab]
WHERE ASCIINum > #ASCIINum
ORDER BY ASCIINum
IF ##ROWCOUNT = 0 BREAK;
UPDATE [intranet].[dbo].[CharMapTestTab]
SET StringTest = REPLACE(StringTest, SpecialChar, MappedChar)
WHERE SpecialChar <> MappedChar
SELECT TOP 1000 [ASCIINum]
,[SpecialChar]
,[MappedChar]
,[StringTest]
FROM [intranet].[dbo].[CharMapTestTab]
END
Try this, it works better than looping because there is only 1 update:
-- create test table vc
create table vc(StringTest varchar(20))
insert vc values('StringÀÆ'), ('ÆStringÆ')
go
-- create test table CharacterMapping
create table CharacterMapping(SpecialCharacter char(1), MappedCharacter varchar(2))
insert CharacterMapping values('À', 'A'),('Æ', 'AE'), ('Ç', 'C')
go
--build the varchar for updating
declare #x varchar(max) = 'StringTest'
select #x = 'replace('+#x+', ''' + SpecialCharacter + ''','''+MappedCharacter+''')'
from CharacterMapping
set #x = 'update vc set StringTest=' + #x +' from vc'
exec (#x)
select * from vc
Result:
StringAAE
AEStringAE
I would make a separate mapping table which contains the bad character and its corresponding good character, one set per row. Then loop over that table and do a replace for each character set.
DECLARE #map TABLE (
id INT,
badChar CHAR,
goodChar CHAR
)
DECLARE #strings TABLE (
searchString VARCHAR(50)
)
INSERT INTO #map
VALUES
(1, 'y', 'a'),
(2, 'z', 'b')
DECLARE #curRow INT, #totalRows INT
SET #curRow = 1
SELECT #totalRows = COUNT(*) FROM #map
INSERT INTO #strings
VALUES
('zcccyccz'),
('cccyccz')
WHILE #curRow <= #totalRows
BEGIN
UPDATE #strings
SET searchString = REPLACE(searchString, badChar, goodChar)
FROM #map
WHERE id = #curRow
SET #curRow = #curRow + 1
END
SELECT * FROM #strings
--Output
--bcccaccb
--cccaccb
It would be helpful to know how many rows are in your table and how many you estimate to have "special characters". Also, are there only 3 special characters? if you have 40 or less special characters, it may look ridiculous, but I'd just nest as many REPLACE() calls as you have special characters, like:
UPDATE YourTable SET YourColumn = REPLACE(
REPLACE(
REPLACE(YourColumn,'Ç','C')
,'Æ','AE')
,'À','A')
if most rows have special characters, I'd skip any WHERE. if only a few rows have special characters, I'd use a CTE to identify them:
;WITH AllSpecialRows AS
(
SELECT PrimaryKey FROM YourTable WHERE YourColumn LIKE '%À%'
UNION
SELECT PrimaryKey FROM YourTable WHERE YourColumn LIKE '%Æ%'
UNION
SELECT PrimaryKey FROM YourTable WHERE YourColumn LIKE '%Ç%'
)
UPDATE y
SET YourColumn = REPLACE(
REPLACE(
REPLACE(YourColumn,'Ç','C')
,'Æ','AE')
,'À','A')
FROM YourTable y
INNER JOIN AllSpecialRows s ON y.PrimaryKey =s.PrimaryKey
update table
set column = REPLACE(column,'À','A')
where column like ('%À%')
update table
set column = REPLACE(column,'Æ','AE')
where column like ('%Æ%')
I will leave the 3rd to you
Or this might be more efficient
update table
set column = REPLACE(REPLACE(column,'À','A'),'Æ','AE')
where column like ('%À%')
or column like ('%Æ%')
If you really want to process a list of mapped characters then this is not a proper answer
#t-clausen.dk answer with Table variables and temp tables, just to avoid people mess up their dev databases with additional tables.
TABLE Variables:
-- Create test table variable #CharacterMapping
DECLARE #CharacterMapping TABLE (SpecialCharacter char(1), MappedCharacter varchar(2))
INSERT #CharacterMapping VALUES('À', 'A'), ('Æ', 'AE'), ('Ç', 'C')
--Build the varchar for updating
DECLARE #x varchar(max) = 'StringTest'
SELECT #x = 'replace('+#x+', ''' + SpecialCharacter + ''',''' + MappedCharacter + ''')'
FROM #CharacterMapping
SET #x = 'DECLARE #vc TABLE(StringTest varchar(20));'
+ ' insert #vc values(''StringÀÆ''), (''ÆStringÆ'');'
+ 'update #vc set StringTest=' + #x +' from #vc;'
+ 'SELECT * FROM #vc;'
Exec (#x)
GO
With Temp table:
-- Create test temp table #vc
CREATE TABLE #vc(StringTest varchar(20))
INSERT #vc VALUES('StringÀÆ'), ('ÆStringÆ')
-- Create test table CharacterMapping
DECLARE #CharacterMapping TABLE (SpecialCharacter char(1), MappedCharacter varchar(2))
INSERT #CharacterMapping VALUES('À', 'A'), ('Æ', 'AE'), ('Ç', 'C')
--Build the varchar for updating
DECLARE #x varchar(max) = 'StringTest'
SELECT #x = 'replace('+#x+', ''' + SpecialCharacter + ''',''' + MappedCharacter + ''')'
FROM #CharacterMapping
SET #x = 'update #vc set StringTest=' + #x +' from #vc'
-- Execute
EXEC (#x)
-- Select the results
SELECT * FROM #vc;
-- Drop temp table
DROP TABLE #vc;
GO