Transactions not being committed although I have "Commit Transaction" statement - sql

I'm using SQL Azure and trying to do conditional delete in batches for a large table, sample:
DECLARE
#LargestKeyProcessed BIGINT =1,
#NextBatchMax BIGINT,
#msg varchar(max) ='';
WHILE (#LargestKeyProcessed <= 1000000)
BEGIN
Begin Transaction
SET #NextBatchMax = #LargestKeyProcessed + 50000;
DELETE From mytable
WHERE Id > #LargestKeyProcessed AND Id <= #NextBatchMax And some logic
SET #LargestKeyProcessed = #NextBatchMax;
set #msg=''+#LargestKeyProcessed;
RAISERROR(#msg, 0, 1) WITH NOWAIT
Commit Transaction
END
After the command gets executed successfully I close the tab but SSMS says there are uncomitted transactions although the commit statement is in every iteration. Also the database size seems to remain the same.
I kindly seek your support in explaining why this happens
Thank you very much

I think you can try to add SET IMPLICIT_TRANSACTIONS OFF to the sql. As follows to see if solved your issue.
DECLARE
#LargestKeyProcessed BIGINT =1,
#NextBatchMax BIGINT,
#msg varchar(max) ='';
WHILE (#LargestKeyProcessed <= 1000000)
BEGIN
SET IMPLICIT_TRANSACTIONS OFF
Begin Transaction
SET #NextBatchMax = #LargestKeyProcessed + 50000;
DELETE From mytable
WHERE Id > #LargestKeyProcessed AND Id <= #NextBatchMax And some logic
SET #LargestKeyProcessed = #NextBatchMax;
set #msg=''+#LargestKeyProcessed;
RAISERROR(#msg, 0, 1) WITH NOWAIT
Commit Transaction
END

Related

SPROC that returns unique calculated INT for each call

I'm implementing in my application an event logging system to save some event types from my code, so I've created a table to store the log type and an Incremental ID:
|LogType|CurrentId|
|info | 1 |
|error | 5 |
And also a table to save the concrete log record
|LogType|IdLog|Message |
|info |1 |Process started|
|error |5 |some error |
So, every time I need to save a new record I call a SPROC to calculate the new id for the log type, basically: newId = (currentId + 1). But I am facing an issue with that calculation because if multiple processes calls the SPROC at the same time the "generated Id" is the same, so I'm getting log records with the same Id, and every record must be Id-unique.
This is my SPROC written for SQL Server 2005:
ALTER PROCEDURE [dbo].[usp_GetLogId]
#LogType VARCHAR(MAX)
AS
BEGIN
SET NOCOUNT ON;
BEGIN TRANSACTION
BEGIN TRY
DECLARE #IdCreated VARCHAR(MAX)
IF EXISTS (SELECT * FROM TBL_ApplicationLogId WHERE LogType = #LogType)
BEGIN
DECLARE #CurrentId BIGINT
SET #CurrentId = (SELECT CurrentId FROM TBL_ApplicationLogId WHERE LogType = #LogType)
DECLARE #NewId BIGINT
SET #NewId = (#CurrentId + 1)
UPDATE TBL_ApplicationLogId
SET CurrentId = #NewId
WHERE LogType = #LogType
SET #IdCreated = CONVERT(VARCHAR, #NewId)
END
ELSE
BEGIN
INSERT INTO TBL_ApplicationLogId VALUES(#LogType, 0)
EXEC #IdCreated = usp_GetLogId #LogType
END
END TRY
BEGIN CATCH
DECLARE #ErrorMessage NVARCHAR(MAX)
SET #ErrorMessage = ERROR_MESSAGE()
IF ##TRANCOUNT > 0
ROLLBACK TRANSACTION;
RAISERROR (#ErrorMessage, 16, 1)
END CATCH
IF ##TRANCOUNT > 0
COMMIT TRANSACTION
SELECT #IdCreated
END
I would appreciate your help to fix the sproc to return an unique id on every call.
It has to work on SQL Server 2005. Thanks
Can you achieve what you want with an identity column?
Then you can just let SQL Server guarantee uniqueness.
Example:
create table my_test_table
(
ID int identity
,SOMEVALUE nvarchar(100)
);
insert into my_test_table(somevalue)values('value1');
insert into my_test_table(somevalue)values('value2');
select * from my_test_table
If you must issue the new ID values yourself for some reason, try using a sequence, as shown here:
if object_id('my_test_table') is not null
begin
drop table my_test_table;
end;
go
create table my_test_table
(
ID int
,SOMEVALUE nvarchar(100)
);
go
if object_id('my_test_sequence') is not null
begin
drop sequence my_test_sequence;
end;
go
CREATE SEQUENCE my_test_sequence
AS INT --other options are here: https://msdn.microsoft.com/en-us/library/ff878091.aspx
START WITH 1
INCREMENT BY 1
MINVALUE 0
NO MAXVALUE;
go
insert into my_test_table(id,somevalue)values(next value for my_test_sequence,'value1');
insert into my_test_table(id,somevalue)values(next value for my_test_sequence,'value2');
insert into my_test_table(id,somevalue)values(next value for my_test_sequence,'value3');
select * from my_test_table
One more edit: I think this is an improvement to the existing stored procedure, given the requirements. Include the new value calculation directly in the UPDATE, ultimately return the value directly from the table (not from a variable which could be out of date) and avoid recursion.
A full test script is below.
if object_id('STACKOVERFLOW_usp_getlogid') is not null
begin
drop procedure STACKOVERFLOW_usp_getlogid;
end
go
if object_id('STACKOVERFLOW_TBL_ApplicationLogId') is not null
begin
drop table STACKOVERFLOW_TBL_ApplicationLogId;
end
go
create table STACKOVERFLOW_TBL_ApplicationLogId(CurrentID int, LogType nvarchar(max));
go
create PROCEDURE [dbo].[STACKOVERFLOW_USP_GETLOGID](#LogType VARCHAR(MAX))
AS
BEGIN
SET NOCOUNT ON;
BEGIN TRANSACTION
BEGIN TRY
DECLARE #IdCreated VARCHAR(MAX)
IF EXISTS (SELECT * FROM STACKOVERFLOW_TBL_ApplicationLogId WHERE LogType = #LogType)
BEGIN
UPDATE STACKOVERFLOW_TBL_APPLICATIONLOGID
SET CurrentId = CurrentID + 1
WHERE LogType = #LogType
END
ELSE
BEGIN
--first time: insert 0.
INSERT INTO STACKOVERFLOW_TBL_ApplicationLogId(CurrentID,LogType) VALUES(0,#LogType);
END
END TRY
BEGIN CATCH
DECLARE #ErrorMessage NVARCHAR(MAX)
SET #ErrorMessage = ERROR_MESSAGE()
IF ##TRANCOUNT > 0
begin
ROLLBACK TRANSACTION;
end
RAISERROR(#ErrorMessage, 16, 1);
END CATCH
select CurrentID from STACKOVERFLOW_TBL_APPLICATIONLOGID where LogType = #LogType;
IF ##TRANCOUNT > 0
begin
COMMIT TRANSACTION
END
end
go
exec STACKOVERFLOW_USP_GETLOGID 'TestLogType1';
exec STACKOVERFLOW_USP_GETLOGID 'TestLogType1';
exec STACKOVERFLOW_USP_GETLOGID 'TestLogType1';
exec STACKOVERFLOW_USP_GETLOGID 'TestLogType2';
exec STACKOVERFLOW_USP_GETLOGID 'TestLogType2';
exec STACKOVERFLOW_USP_GETLOGID 'TestLogType2';
You want your increment and read to be atomic, with a guarantee that no other process can increment in between.
You also need to ensure that the log type exists, and again for it to be thread-safe.
Here's how I would go about that, but you would be advised to read up on how it all works in SQL Server 2005 as I have not had to deal with these things in nearly 8 years.
This should complete the two actions atomically, and also without transactions, in order to prevent threads blocking each other. (Not just performance, but also to avoid DEADLOCKs when interacting with other code.)
ALTER PROCEDURE [dbo].[usp_GetLogId]
#LogType VARCHAR(MAX)
AS
BEGIN
SET NOCOUNT ON;
-- Hold our newly created id in a temp table, so we can use OUTPUT
DECLARE #new_id TABLE (id BIGINT);
-- I think this is thread safe, doing all things in a single statement
----> Check that the log-type has no records
----> If so, then insert an initialising row
----> Output the newly created id into our temp table
INSERT INTO
TBL_ApplicationLogId (
LogType,
CurrentId
)
OUTPUT
INSERTED.CurrentID
INTO
#new_id
SELECT
#LogType, 1
FROM
TBL_ApplicationLogId
WHERE
LogType = #LogType
GROUP BY
LogType
HAVING
COUNT(*) = 0
;
-- I think this is thread safe, doing all things in a single statement
----> Ensure we don't already have a new id created
----> Increment the current id
----> Output it to our temp table
UPDATE
TBL_ApplicationLogId
SET
CurrentId = CurrentId + 1
OUTPUT
INSERTED.CurrentID
INTO
#new_id
WHERE
LogType = #LogType
AND NOT EXISTS (SELECT * FROM #new_id)
;
-- Select the result from our temp table
----> It must be populated either from the INSERT or the UPDATE
SELECT
MAX(id) -- MAX used to tell the system that it's returning a scalar
FROM
#new_id
;
END
Not much you can do here, but validate that:
table TBL_ApplicationLogId is indexed by column LogType.
#LogType sp parameter is the same data type as column LogType in table TBL_ApplicationLogId, so it can actually use the index if/when it exists.
If you have a concurrency issue, maybe forcing the lock level on table TBL_ApplicationLogId during select and update can help. Just add (ROWLOCK) after the table name, Eg: TBL_ApplicationLogId (ROWLOCK)

How to log errors even if the transaction is rolled back?

Lets say we have following commands:
SET XACT_ABORT OFF;
SET IMPLICIT_TRANSACTIONS OFF
DECLARE #index int
SET #index = 4;
DECLARE #errorCount int
SET #errorCount = 0;
BEGIN TRANSACTION
WHILE #index > 0
BEGIN
SAVE TRANSACTION Foo;
BEGIN TRY
-- commands to execute...
INSERT INTO AppDb.dbo.Customers VALUES('Jalal', '1990-03-02');
-- make a problem
IF #index = 3
INSERT INTO AppDb.dbo.Customers VALUES('Jalal', '9999-99-99');
END TRY
BEGIN CATCH
ROLLBACK TRANSACTION Foo; -- I want to keep track of previous logs but not works! :(
INSERT INTO AppDb.dbo.LogScripts VALUES(NULL, 'error', 'Customers', suser_name());
SET #errorCount = #errorCount + 1;
END CATCH
SET #index = #index - 1;
END
IF #errorCount > 0
ROLLBACK TRANSACTION
ELSE
COMMIT TRANSACTION
I want to execute a batch, keep all errors in log and then, if no error was occurred, commit all changes. How can implement it in Sql Server?
The transaction is tied to the connection, and as such, all writes will be rolled back on the outer ROLLBACK TRANSACTION (irrespective of the nested savepoints).
What you can do is log the errors to an in-memory structure, like a Table Variable, and then, after committing / rolling back the outer transaction, you can then insert the logs collected.
I've simplified your Logs and Customers tables for the purpose of brevity:
CREATE TABLE [dbo].[Logs](
[Description] [nvarchar](max) NULL
) ON [PRIMARY] TEXTIMAGE_ON [PRIMARY]
GO
CREATE TABLE [dbo].[Customers](
[ID] [int] NOT NULL,
[Name] [nvarchar](50) NULL
);
GO
And then you can track the logs in the table variable:
SET XACT_ABORT OFF;
SET IMPLICIT_TRANSACTIONS OFF
GO
DECLARE #index int;
SET #index = 4;
DECLARE #errorCount int
SET #errorCount = 0;
-- In memory storage to accumulate logs, outside of the transaction
DECLARE #TempLogs AS TABLE (Description NVARCHAR(MAX));
BEGIN TRANSACTION
WHILE #index > 0
BEGIN
-- SAVE TRANSACTION Foo; As per commentary below, savepoint is futile here
BEGIN TRY
-- commands to execute...
INSERT INTO Customers VALUES(1, 'Jalal');
-- make a problem
IF #index = 3
INSERT INTO Customers VALUES(NULL, 'Broken');
END TRY
BEGIN CATCH
-- ROLLBACK TRANSACTION Foo; -- Would roll back to the savepoint
INSERT INTO #TempLogs(Description)
VALUES ('Something bad happened on index ' + CAST(#index AS VARCHAR(50)));
SET #errorCount = #errorCount + 1;
END CATCH
SET #index = #index - 1;
END
IF #errorCount > 0
ROLLBACK TRANSACTION
ELSE
COMMIT TRANSACTION
-- Finally, do the actual insertion of logs, outside the boundaries of the transaction.
INSERT INTO dbo.Logs(Description)
SELECT Description FROM #TempLogs;
One thing to note is that this is quite an expensive way to process data (i.e. attempt to insert all data, and then roll back a batch if there were any problems encountered). An alternative here would be to validate all the data (and return and report errors) before attempting to insert any data.
Also, in the example above, the Savepoint serves no real purpose, as even 'successful' Customer inserts will be eventually rolled back if any errors were detected for the batch.
SqlFiddle here - The loop is completed, and despite 3 customers being inserted, the ROLLBACK TRANSACTION removes all successfully inserted customers. However, the log is still written, as the Table Variable is not subjected to the outer transaction.

About lock behavior when update row in sql server

Now, I'm trying to increment number sequential in SQL Server with the number provided from users.
I have a problem when multiple user insert a row same time with same number.
I try to update the number that user provided to a temporary table, and I expect when I update the same table with same condition, SQL Server will lock any modified to this row until the current update finished, but it not.
Here is the update statement I used:
UPDATE GlobalParam
SET ValueString = (CAST(ValueString as bigint) + 1)
WHERE Id = 'xxxx'
Could you tell me any way to force the other update command wait until the current command finished ?
This is entire my command :
DECLARE #Result bigint;
UPDATE GlobalParam SET ValueString = (SELECT MAX(Code) FROM Item)
DECLARE #SelectTopStm nvarchar(MAX);
DECLARE #ExistRow int
SET #SelectTopStm = 'SELECT #ExistRow = 1 FROM (SELECT TOP 1 Code FROM Item WHERE Code = '999') temp'
EXEC sp_executesql #SelectTopStm, N'#ExistRow int output', #ExistRow output
IF (#ExistRow is not null)
BEGIN
DECLARE #MaxValue bigint
DECLARE #ReturnUpdateTbl table (ValueString nvarchar(max));
UPDATE GlobalParam SET ValueString = (CAST(ValueString as bigint) + 1)
OUTPUT inserted.ValueString INTO #ReturnUpdateTbl
WHERE [Id] = '333A8E1F-16DD-E411-8280-D4BED9D726B3'
SELECT TOP 1 #MaxValue = CAST(ValueString as bigint) FROM #ReturnUpdateTbl
SET #Result = #MaxValue
END
ELSE
BEGIN
SET #Output = 999
END
END
I write the codes above as a stored procedure.
Here is the real code when I insert one Item:
DECLARE #IncrementResult BIGINT
EXEC IncrementNumberUnique
, (some parameters)..
,#Result = #IncrementResult OUTPUT
INSERT INTO ITEM (Id, Code) VALUES ('xxxx', #IncrementResult)
I create 3 threads and make it run at the same time.
The return result :
Id Code
1 999
2 1000
3 1000
Thanks
If I understood your requirements, try ROWLOCK hint to tell the optimizer to start with locking the rows one by one as the update needs them.
UPDATE GlobalParam WITH(ROWLOCK)
SET ValueString = (CAST(ValueString as bigint) + 1)
WHERE Id = 'xxxx'
By default SQL Server does READ Committed locking which releases READ locks once the read operation is committed. Once the Update statement below is complete, all read locks are released from the table Item.
UPDATE GlobalParam SET ValueString = (SELECT MAX(Code) FROM Item)
Since your INSERT into Item is outside the scope of your procedure. you can run the thread in SERIALIZABLE isolation level. Something like this.
SET TRANSACTION ISOLATION LEVEL SERIALIZABLE
DECLARE #IncrementResult BIGINT
EXEC IncrementNumberUnique
, (some parameters)..
,#Result = #IncrementResult OUTPUT
INSERT INTO ITEM (Id, Code) VALUES ('xxxx', #IncrementResult)
Changing the isolation level to SERIALIZABLE will increase blocking and contention of resources on item table.
To know more about isolation level, refer this
you should look into identity columns and remove such manual computation of incremental columns if possible.

Concurrent table creation

Have a situation when table can be created from different places.
So, I have ~10 working applications, that can simultaneously try to create a same table.
Question. How can I synchronize them ? So I don't have any exceptions or errors ?
All instances of the application are trying to create a new table when the day ends, so when there is something like 00:00:00 they all will try to create it.
Sorry, for possible 'stupid question', have been googling for a while, no results.
Thank you.
You can use sp_getapplock to take arbitrary locks. You could make your app take such a lock before creating the table. Like that:
exec sp_getapplock
if tabledoesnotexist
create table ...
As alluded to in the comments, your first step is to perform an existence check. Then, on the off chance that there are two simultaneous creations you can use TRY...CATCH.
IF Object_ID('test', 'U') IS NULL
BEGIN
BEGIN TRY
CREATE TABLE test ( a int )
END TRY
BEGIN CATCH
SELECT Error_Message()
END CATCH
END
UPDATE
You do not want to create a table every day. Seriously. This is very poor database design.
Instead you want to add a datetime column to your table that indicates when each record was created.
Can you please go through the following code ... Concurrent execution is handled using ISOLATION LEVEL SERIALIZABLE.
CREATE PROCEDURE [dbo].[GetNextID](
#IDName nvarchar(255)
)
AS
BEGIN
/*
Description: Increments and returns the LastID value from tblIDs
for a given IDName
Author: Max Vernon
Date: 2012-07-19
*/
DECLARE #Retry int;
DECLARE #EN int, #ES int, #ET int;
SET #Retry = 5;
DECLARE #NewID int;
SET TRANSACTION ISOLATION LEVEL SERIALIZABLE;
SET NOCOUNT ON;
WHILE #Retry > 0
BEGIN
BEGIN TRY
BEGIN TRANSACTION;
SET #NewID = COALESCE((SELECT LastID FROM tblIDs WHERE IDName = #IDName),0)+1;
IF (SELECT COUNT(IDName) FROM tblIDs WHERE IDName = #IDName) = 0
INSERT INTO tblIDs (IDName, LastID) VALUES (#IDName, #NewID)
ELSE
UPDATE tblIDs SET LastID = #NewID WHERE IDName = #IDName;
COMMIT TRANSACTION;
SET #Retry = -2; /* no need to retry since the operation completed */
END TRY
BEGIN CATCH
IF (ERROR_NUMBER() = 1205) /* DEADLOCK */
SET #Retry = #Retry - 1;
ELSE
BEGIN
SET #Retry = -1;
SET #EN = ERROR_NUMBER();
SET #ES = ERROR_SEVERITY();
SET #ET = ERROR_STATE()
RAISERROR (#EN,#ES,#ET);
END
ROLLBACK TRANSACTION;
END CATCH
END
IF #Retry = 0 /* must have deadlock'd 5 times. */
BEGIN
SET #EN = 1205;
SET #ES = 13;
SET #ET = 1
RAISERROR (#EN,#ES,#ET);
END
ELSE
SELECT #NewID AS NewID;
END
GO

SQL Transactions with nolocks and TRANSACTION ISOLATION LEVEL READ COMMITTED

I have a stored procedure like this.
CREATE PROCEDURE [dbo].[mysp]
AS
SET NOCOUNT ON
SET TRANSACTION ISOLATION LEVEL READ COMMITTED
BEGIN
BEGIN TRAN
DECLARE #TrackingCode INT
SELECT #TrackingCode = DefaultsData
FROM dbo.Defaults
WHERE DefaultsID=77
UPDATE dbo.Defaults
SET DefaultsData = #PassedTrackingCode+1
WHERE DefaultsID=77
SELECT #TrackingCode
COMMIT TRAN
END
Assuming that we execute this stored procedure at the same time (concurrently) twice, what will be the #TrackingCode value that is returned at both the time. What if I used NOLOCK on the SELECT statement in the stored proc.
1) If your intention is to generate unique tracking codes then it's a bad idea. You could do this simple test:
CREATE TABLE dbo.Defaults (
DefaultsData INT,
DefaultsID INT PRIMARY KEY
);
GO
INSERT INTO dbo.Defaults VALUES (21, 77);
GO
CREATE PROCEDURE [dbo].[mysp]
AS
SET NOCOUNT ON
SET TRANSACTION ISOLATION LEVEL READ COMMITTED
BEGIN
BEGIN TRAN
DECLARE #TrackingCode INT
SELECT #TrackingCode = DefaultsData
FROM dbo.Defaults
WHERE DefaultsID=77
WAITFOR DELAY '00:00:05' -- 5 seconds delay
UPDATE dbo.Defaults
SET DefaultsData = #TrackingCode+1
WHERE DefaultsID=77
SELECT #TrackingCode -- Maybe it should return the new code
COMMIT TRAN
END;
GO
and then open a new window in SQL Server Management Studio (Ctrl + N) and execute (F5)
EXEC [dbo].[mysp]
and also open a second new window and execute (in less than 5 seconds)
EXEC [dbo].[mysp]
In this case, you will get the same value. NOLOCK is a bad idea (generally speaking) and in this case doesn't help you.
2) If (I repeat my self - sorry) your intention is to generate unique tracking codes then you could use
ALTER PROCEDURE [dbo].[mysp]
#TrackingCode INT OUTPUT
AS
BEGIN
SET NOCOUNT ON;
UPDATE dbo.Defaults
SET #TrackingCode = DefaultsData = DefaultsData + 1 -- It generate the new code
WHERE DefaultsID=77;
END;
GO
or you could use sequences (SQL Server 2012+).