Sql update in batches - sql

What is most effective way to update a table in sql server in order to put a limit of 10k records in one single transaction?
I read about top and ROWCOUNT approach by adding it in a while loop. Which is more effective among those? Or please share if you know alternate effective ways. Thank you.

Here is one potential approach without using set rowcount
-- prepare test data
use tempdb
drop table dbo.t;
create table dbo.t (a int identity, b int)
go
insert into dbo.t ( b)
values (1), (2), (3), (4), (5), (6), (7), (8), (9), (10), (11);
go
-- assume we do 3 records per time, put 10000 here if you wnat 10K records
-- also the update is just to update column [b] to [b] * 2, here is the code
declare #N int = 3; -- do a batch of #N records
declare #i int = 0, #max_loop int;
select #max_loop = count(*)/#N from dbo.t
-- the first batch may include records <= #N-1 and the last batch may include records <= #N
while (#i <= #max_loop)
begin
; with c as (
select rnk=ROW_NUMBER() over (order by a)/#N, a, b from dbo.t
)
update c set b = b*2 -- doule b
where rnk = #i;
set #i = #i + 1;
end
go
-- check the result
select * from dbo.t

You can try with the below approach:
WHILE (1=1)
BEGIN
BEGIN TRANSACTION
UPDATE TOP (10000) XXX
SET XXX.YYY = <ValueToUpdate>
FROM XXX -- Update 10000 nonupdated rows
WHERE <condition> -- make sure that condition makes sure that it does not become infinite loop
IF ##ROWCOUNT = 0
BEGIN
COMMIT TRANSACTION
BREAK
END
COMMIT TRANSACTION
END
EDIT
Update for all employees of an organization and making sure that it does not become infinte loop. Here, I am updating modifiedDate for an employee record.
DECLARE #updatedids table(id int)
WHILE (1=1)
BEGIN
BEGIN TRANSACTION
UPDATE TOP(10000) a
SET a.ModifiedDate = GETDATE()
OUTPUT inserted.BusinessEntityID INTO #updatedids
FROM HumanResources.Employee a
LEFT JOIN #updatedids u
ON a.BusinessEntityID = u.id
WHERE u.id IS NULL
-- Update 10000 nonupdated rows
IF ##ROWCOUNT = 0
BEGIN
COMMIT TRANSACTION
BREAK
END
COMMIT TRANSACTION
END

Related

Trigger that prevents update of column based on result of the user defined function

We have DVD Rental company. In this particular scenario we consider only Member, Rental and Membership tables.
The task is to write a trigger that prevents a customer from being shipped a DVD
if they have reached their monthly limit for DVD rentals as per their membership contract using the function.
My trigger leads to infinite loop. It works without While loop, but then it does not work properly, if I consider multiple updates to the Rental table. Where I am wrong?
-- do not run, infinite loop
CREATE OR ALTER TRIGGER trg_Rental_StopDvdShip
ON RENTAL
FOR UPDATE
AS
BEGIN
DECLARE #MemberId INT
DECLARE #RentalId INT
SELECT * INTO #TempTable FROM inserted
WHILE (EXISTS (SELECT RentalId FROM #TempTable))
BEGIN
IF UPDATE(RentalShippedDate)
BEGIN
IF (SELECT TotalDvdLeft FROM dvd_numb_left(#MemberId)) <= 0
BEGIN
ROLLBACK
RAISERROR ('YOU HAVE REACHED MONTHLY LIMIT FOR DVD RENTALS', 16, 1)
END;
END;
DELETE FROM #TempTable WHERE RentalID = #RentalId
END;
END;
My function looks as follows:
CREATE OR ALTER FUNCTION dvd_numb_left(#member_id INT)
RETURNS #tab_dvd_numb_left TABLE(MemberId INT, Name VARCHAR(50), TotalDvdLeft INT, AtTimeDvdLeft INT)
AS
BEGIN
DECLARE #name VARCHAR(50)
DECLARE #dvd_total_left INT
DECLARE #dvd_at_time_left INT
DECLARE #dvd_limit INT
DECLARE #dvd_rented INT
DECLARE #dvd_at_time INT
DECLARE #dvd_on_rent INT
SET #dvd_limit = (SELECT Membership.MembershipLimitPerMonth FROM Membership
WHERE Membership.MembershipId = (SELECT Member.MembershipId FROM Member WHERE Member.MemberId = #member_id))
SET #dvd_rented = (SELECT COUNT(Rental.MemberId) FROM Rental
WHERE CONCAT(month(Rental.RentalShippedDate), '.', year(Rental.RentalShippedDate)) = CONCAT(month(GETDATE()), '.', year(GETDATE())) AND Rental.MemberId = #member_id)
SET #dvd_at_time = (SELECT Membership.DVDAtTime FROM Membership
WHERE Membership.MembershipId = (SELECT Member.MembershipId FROM Member WHERE Member.MemberId = #member_id))
SET #dvd_on_rent = (SELECT COUNT(Rental.MemberId) FROM Rental
WHERE Rental.MemberId = #member_id AND Rental.RentalReturnedDate IS NULL)
SET #name = (SELECT CONCAT(Member.MemberFirstName, ' ', Member.MemberLastName) FROM Member WHERE Member.MemberId = #member_id)
SET #dvd_total_left = #dvd_limit - #dvd_rented
SET #dvd_at_time_left = #dvd_at_time - #dvd_on_rent
IF #dvd_total_left < 0
BEGIN
SET #dvd_total_left = 0
SET #dvd_at_time_left = 0
INSERT INTO #tab_dvd_numb_left(MemberId, Name, TotalDvdLeft, AtTimeDvdLeft)
VALUES(#member_id, #name, #dvd_total_left, #dvd_at_time_left)
RETURN;
END
INSERT INTO #tab_dvd_numb_left(MemberId, Name, TotalDvdLeft, AtTimeDvdLeft)
VALUES(#member_id, #name, #dvd_total_left, #dvd_at_time_left)
RETURN;
END;
Will be glad for any advice.
Your main issue is that even though you populate #TempTable you never pull any values from it.
CREATE OR ALTER TRIGGER trg_Rental_StopDvdShip
ON RENTAL
FOR UPDATE
AS
BEGIN
DECLARE #MemberId INT, #RentalId INT;
-- Move test for column update to the first test as it applies to the entire update, not per row.
IF UPDATE(RentalShippedDate)
BEGIN
SELECT * INTO #TempTable FROM inserted;
WHILE (EXISTS (SELECT RentalId FROM #TempTable))
BEGIN
-- Actually pull some information from #TempTable - this wasn't happening before
SELECT TOP 1 #RentalID = RentalId, #MemberId = MemberId FROM #TempTable;
-- Select our values to its working
-- SELECT #RentalID, #MemberId;
IF (SELECT TotalDvdLeft FROM dvd_numb_left(#MemberId)) <= 0
BEGIN
ROLLBACK
RAISERROR ('YOU HAVE REACHED MONTHLY LIMIT FOR DVD RENTALS', 16, 1)
END;
-- Delete the current handled row
DELETE FROM #TempTable WHERE RentalID = #RentalId
END;
-- For neatness I always drop temp tables, makes testing easier also
DROP TABLE #TempTable;
END;
END;
An easy way to debug simply triggers like this is to copy the T-SQL out and then create an #Inserted table variable e.g.
DECLARE #Inserted table (RentalId INT, MemberId INT);
INSERT INTO #Inserted (RentalId, MemberId)
VALUES (1, 1), (2, 2);
DECLARE #MemberId INT, #RentalId INT;
-- Move test for column update to the first test as it applies to the entire update, not per row.
-- IF UPDATE(RentalShippedDate)
BEGIN
SELECT * INTO #TempTable FROM #inserted;
WHILE (EXISTS (SELECT RentalId FROM #TempTable))
BEGIN
-- Actually pull some information from #TempTable - this wasn't happening before
SELECT TOP 1 #RentalID = RentalId, #MemberId = MemberId FROM #TempTable;
-- Select our values to its working
SELECT #RentalID, #MemberId;
-- IF (SELECT TotalDvdLeft FROM dvd_numb_left(#MemberId)) <= 0
-- BEGIN
-- ROLLBACK
-- RAISERROR ('YOU HAVE REACHED MONTHLY LIMIT FOR DVD RENTALS', 16, 1)
-- END;
-- Delete the current handled row
DELETE FROM #TempTable WHERE RentalID = #RentalId
END;
-- For neatness I always drop temp tables, makes testing easier also
DROP TABLE #TempTable;
END;
Note: throw is the recommended way to throw an error instead of raiserror.
Another thing to consider is that you must try to transform your UDF into an inline TVF because of some side effects.
Like this one:
CREATE OR ALTER FUNCTION dvd_numb_left(#member_id INT)
RETURNS TABLE
AS
RETURN
(
WITH
TM AS
(SELECT Membership.MembershipLimitPerMonth AS dvd_limit,
Membership.DVDAtTime AS dvd_at_time,
CONCAT(Member.MemberFirstName, ' ', Member.MemberLastName) AS [name]
FROM Membership AS MS
JOIN Member AS M
ON MS.MembershipId = M.MembershipId
WHERE M.MemberId = #member_id
),
TR AS
(SELECT COUNT(Rental.MemberId) AS dvd_rented
FROM Rental
WHERE YEAR(Rental.RentalShippedDate ) = YEAR(GETDATE)
AND MONTH(Rental.RentalShippedDate ) = MONTH(GETDATE)
AND Rental.MemberId = #member_id
)
SELECT MemberId, [Name],
CASE WHEN dvd_limit - dvd_rented < 0 THEN 0 ELSE dvd_limit - dvd_rented END AS TotalDvdLeft,
CASE WHEN dvd_limit - dvd_rented < 0 THEN 0 ELSE dvd_at_time - dvd_on_rent END AS AtTimeDvdLeft
FROM TM CROSS JOIN TR
);
GO
Which will be much more efficient.
The absolute rule to have performances is: TRY TO STAY IN A "SET BASED" CODE instead of iterative code.
The above function can be optimized by the optimzer whilet yours cannot and will needs 4 access to the same tables.

SQL Server: Why isn't this logic working when Chunking on inserts?

Fellow Techies--
I've got an endless loop condition happening here. Why is ##rowcount never getting set back to 0? I must not be understanding what ##rowcount really does--or I am setting the value in the wrong place. I think the value should be decrementing on each pass until I eventually hit zero.
DECLARE #ChunkSize int = 250000;
WHILE #ChunkSize <> 0
BEGIN
BEGIN TRANSACTION
INSERT TableName
(col1,col2)
SELECT TOP (#ChunkSize)
col1,col2
FROM TableName2
COMMIT TRANSACTION;
SET #ChunkSize = ##ROWCOUNT
END -- transaction block
END -- while-loop block
I'm not sure, by what you posted, how you are going to ensure you catch rows that you haven't already inserted. If you don't, it'll be an infinite loop of course. Here is a way using test data--but naturally you'd want to base it off a PK or other unique column. Perhaps you just left that part off, or I'm missing something all together. I'm just interested in what your final code is for your chunking and the logic behind it, so this is an answer and inquiry.
if object_id('tempdb..#source') is not null drop table #source
if object_id('tempdb..#destination') is not null drop table #destination
create table #source(c1 int, c2 int)
create table #destination (c1 int, c2 int)
insert into #source (c1,c2) values
(1,1),
(2,1),
(3,1),
(4,1),
(5,1),
(6,1),
(7,1),
(8,1),
(9,1),
(10,1),
(11,1),
(12,1)
DECLARE #ChunkSize int = 2;
WHILE #ChunkSize <> 0
BEGIN
INSERT INTO #destination (c1,c2)
SELECT TOP (#ChunkSize) c1,c2 FROM #source WHERE c1 NOT IN (SELECT DISTINCT c1 FROM #destination) ORDER BY ROW_NUMBER() OVER (ORDER BY C1)
SET #ChunkSize = ##ROWCOUNT
--SELECT #ChunkSize
END
select * from #source
select * from #destination
Nothing is happening because you're setting chunksize to itself without ever looking at what you've already inserted. Using your example, #Chunksize = 250000. First, select performs SELECT TOP 250000 and returns (presumably) 250000 rows. You then use ##RowCount to update #Chunksize, but the row count returned will be 250000, so you just set it to 250000 again. Which could be fine, except there is no way that number will ever change without ruling out rows that you've already inserted - you will keep inserting the same 250000 rows over and over.
You need something like NOT EXISTS to filter out the rows you've already inserted:
DECLARE #ChunkSize int = 250000;
WHILE #ChunkSize > 0
BEGIN
BEGIN TRANSACTION
INSERT INTO TableName
(col1,col2)
SELECT TOP (#ChunkSize)
col1,col2
FROM TableName2 T2
WHERE NOT EXISTS (SELECT *
FROM TableName T
WHERE T.Col1 = T2.Col1
AND T.Col2 = T2.Col2)
SET #ChunkSize = ##ROWCOUNT
PRINT CONVERT(nvarchar(10),#ChunkSize) + ' Rows Inserted.';
COMMIT TRANSACTION
END -- while-loop block
Implemented solution
In the end, I decided to pump the SQL through SSIS, where I could set the commit batch size accordingly. Had I not chosen hat route, I would have had to follow #scsimon's suggestion and basically maintain a tracking table for the records completed and the records left to cycle through.

Efficient SQL Server stored procedure

I am using SQL Server 2008 and running the following stored procedure that needs to "clean" a 70 mill table from about 50 mill rows to another table, the id_col is integer (primary identity key)
According to the last running I made it is working good but it is expected to last for about 200 days:
SET NOCOUNT ON
-- define the last ID handled
DECLARE #LastID integer
SET #LastID = 0
declare #tempDate datetime
set #tempDate = dateadd(dd,-20,getdate())
-- define the ID to be handled now
DECLARE #IDToHandle integer
DECLARE #iCounter integer
DECLARE #watch1 nvarchar(50)
DECLARE #watch2 nvarchar(50)
set #iCounter = 0
-- select the next to handle
SELECT TOP 1 #IDToHandle = id_col
FROM MAIN_TABLE
WHERE id_col> #LastID and DATEDIFF(DD,someDateCol,otherDateCol) < 1
and datediff(dd,someDateCol,#tempDate) > 0 and (some_other_int_col = 1745 or some_other_int_col = 1548 or some_other_int_col = 4785)
ORDER BY id_col
-- as long as we have s......
WHILE #IDToHandle IS NOT NULL
BEGIN
IF ((select count(1) from SOME_OTHER_TABLE_THAT_CONTAINS_20k_ROWS where some_int_col = #IDToHandle) = 0 and (select count(1) from A_70k_rows_table where some_int_col =#IDToHandle )=0)
BEGIN
INSERT INTO SECONDERY_TABLE
SELECT col1,col2,col3.....
FROM MAIN_TABLE WHERE id_col = #IDToHandle
EXEC [dbo].[DeleteByID] #ID = #IDToHandle --deletes the row from 2 other tables that is related to the MAIN_TABLE and than from the MAIN_TABLE
set #iCounter = #iCounter +1
END
IF (#iCounter % 1000 = 0)
begin
set #watch1 = 'iCounter - ' + CAST(#iCounter AS VARCHAR)
set #watch2 = 'IDToHandle - '+ CAST(#IDToHandle AS VARCHAR)
raiserror ( #watch1, 10,1) with nowait
raiserror (#watch2, 10,1) with nowait
end
-- set the last handled to the one we just handled
SET #LastID = #IDToHandle
SET #IDToHandle = NULL
-- select the next to handle
SELECT TOP 1 #IDToHandle = id_col
FROM MAIN_TABLE
WHERE id_col> #LastID and DATEDIFF(DD,someDateCol,otherDateCol) < 1
and datediff(dd,someDateCol,#tempDate) > 0 and (some_other_int_col = 1745 or some_other_int_col = 1548 or some_other_int_col = 4785)
ORDER BY id_col
END
Any ideas or directions to improve this procedure run-time will be welcomed
Yes, try this:
Declare #Ids Table (id int Primary Key not Null)
Insert #Ids(id)
Select id_col
From MAIN_TABLE m
Where someDateCol >= otherDateCol
And someDateCol < #tempDate -- If there are times in these datetime fields,
-- then you may need to modify this condition.
And some_other_int_col In (1745, 1548, 4785)
And Not exists (Select * from SOME_OTHER_TABLE_THAT_CONTAINS_20k_ROWS
Where some_int_col = m.id_col)
And Not Exists (Select * From A_70k_rows_table
Where some_int_col = m.id_col)
Select id from #Ids -- this to confirm above code generates the correct list of Ids
return -- this line to stop (Not do insert/deletes) until you have verified #Ids is correct
-- Once you have verified that above #Ids is correctly populated,
-- then delete or comment out the select and return lines above so insert runs.
Begin Transaction
Delete OT -- eliminate row-by-row call to second stored proc
From OtherTable ot
Join MAIN_TABLE m On m.id_col = ot.FKCol
Join #Ids i On i.Id = m.id_col
Insert SECONDERY_TABLE(col1, col2, etc.)
Select col1,col2,col3.....
FROM MAIN_TABLE m Join #Ids i On i.Id = m.id_col
Delete m -- eliminate row-by-row call to second stored proc
FROM MAIN_TABLE m
Join #Ids i On i.Id = m.id_col
Commit Transaction
Explaanation.
You had numerous filtering conditions that were not SARGable, i.e., they would force a complete table scan for every iteration of your loop, instead of being able to use any existing index. Always try to avoid filter conditions that apply processing logic to a table column value before comparing it to some other value. This eliminates the opportunity for the query optimizer to use an index.
You were executing the inserts one at a time... Way better to generate a list of PK Ids that need to be processed (all at once) and then do all the inserts at once, in one statement.

Update multiple rows in table from table variable

I'm writing a stored procedure to update multiple records based on a table variable parameter.
The existing table is: Tb_Project_Image with relevant columns:
id PK (identity 1,1)
cat_ord decimal(4,2)
The procedure will receive a temporary table variable (shown in the code below) containing the id as PI_ID, and the new value for cat_ord as newCatOrd. idx is a simple identity for each row containing 1...n where n is the rowcount of #tempTable.
For each row in #tempTable, I want to update Tb_Project_Image where id = PI_ID to the corresponding value.
DECLARE #tempTable table (
idx smallint Primary Key IDENTITY(1,1),
PI_ID bigint,
newCatOrd decimal(4, 2) not null )
INSERT INTO #tempTable values (3, 7.01)
INSERT INTO #tempTable values (4, 7.02)
INSERT INTO #tempTable values (5, 7.03)
--etc...
DECLARE #error int
DECLARE #update int
DECLARE #iter int
SET #iter = 1
BEGIN TRAN
WHILE #iter <= (select COUNT(*) from #tempTable)
BEGIN
UPDATE Tb_Project_Image
SET cat_ord = (SELECT newCatOrd FROM #tempTable
WHERE idx = #iter)
WHERE id = (SELECT PI_ID FROM #tempTable
WHERE idx = #iter)
--error checking
set #error = ##ERROR
set #update = ##ROWCOUNT
IF ((#error = 0) AND (#update = 1))
BEGIN
SET #iter = #iter + 1
CONTINUE
END
ELSE
BREAK
END
IF ((#error = 0) AND (#update = 1))
COMMIT TRAN
ELSE
ROLLBACK TRAN
GO
Now, the error checking is because, to ensure integrity, EACH row in the temporary table MUST make 1 update. (explanation omitted to save space) If a single iteration of the while loop threw an error, or didn't effect exactly 1 row, I want to break the loop and rollback the transaction
THE PROBLEM I'm having is that this error checking is not working. I'm currently running it with 14 rows in #tempTable and the 11th uses a PI_ID not found in the Project_Image table. Therefore, #update = 0... but it continues the loop and commits the data.
I'd be doubly glad if someone had a method of doing this that only used a single update statement.
You cannot do it this way, because even SET resets the state of ##ERROR and ##ROWNUMBER variables. In this case ##ROWCOUNT is set to 1 after set #error = ##ERROR. If you do not assign the values to local variables, your code will work:
IF ((##error = 0) AND (##rowcount = 1))
But you might rather try try...catch error handling and test ##rowcount separately after update.
UPDATE: doing it in single update:
UPDATE t
SET cat_ord = tt.newCatOrd
FROM Tb_Project_Image t
INNER JOIN #tempTable tt
ON t.id = tt.PI_ID
-- If there was PI_ID not found in Tb_Project_Image
-- But I think that this should have been dealt with
-- During the initial loading of temporary table
IF ##ROWCOUNT <> (select count (*) from #tempTable)
BEGIN
-- Error reporting here
ROLLBACK TRANSACTION
END
Instead of updating and then rolling back, you could also use a CTE to determine if any records should be updated prior to performing the update. Something like this should work:
WITH NON_SINGLETON AS (
-- Find any records in #tempTable that don't match
-- exactly one record in Tb_Project_Image
SELECT t.PI_ID, COUNT(pi.id) C
FROM #tempTable t
LEFT JOIN Tb_Project_Image pi ON t.PI_ID = pi.id
GROUP BY t.PI_ID
HAVING COUNT(pi.id) != 1
)
UPDATE Tb_Project_Image
SET cat_ord = t.newCatOrd
FROM Tb_Project_Image pi
JOIN #tempTable t ON pi.id = t.PI_ID
-- If any invalid records were found in the CTE,
-- then this condition will fail for all rows
-- and nothing will be updated
WHERE NOT EXISTS(SELECT 1 FROM NON_SINGLETON)
If it's possible for #tempTable to have duplicate entries for the same PI_ID, then this will handle those scenarios as well. And since it's a single statement, you don't have to explicitly managing the transaction in the proc (if it's the only thing that needs to be included in the transaction).

SQL: Query timeout expired

I have a simple query for update table (30 columns and about 150 000 rows).
For example:
UPDATE tblSomeTable set F3 = #F3 where F1 = #F1
This query will affected about 2500 rows.
The tblSomeTable has a trigger:
ALTER TRIGGER [dbo].[trg_tblSomeTable]
ON [dbo].[tblSomeTable]
AFTER INSERT,DELETE,UPDATE
AS
BEGIN
declare #operationType nvarchar(1)
declare #createDate datetime
declare #UpdatedColumnsMask varbinary(500) = COLUMNS_UPDATED()
-- detect operation type
if not exists(select top 1 * from inserted)
begin
-- delete
SET #operationType = 'D'
SELECT #createDate = dbo.uf_DateWithCompTimeZone(CompanyId) FROM deleted
end
else if not exists(select top 1 * from deleted)
begin
-- insert
SET #operationType = 'I'
SELECT #createDate = dbo..uf_DateWithCompTimeZone(CompanyId) FROM inserted
end
else
begin
-- update
SET #operationType = 'U'
SELECT #createDate = dbo..uf_DateWithCompTimeZone(CompanyId) FROM inserted
end
-- log data to tmp table
INSERT INTO tbl1
SELECT
#createDate,
#operationType,
#status,
#updatedColumnsMask,
d.F1,
i.F1,
d.F2,
i.F2,
d.F3,
i.F3,
d.F4,
i.F4,
d.F5,
i.F5,
...
FROM (Select 1 as temp) t
LEFT JOIN inserted i on 1=1
LEFT JOIN deleted d on 1=1
END
And if I execute the update query I have a timeout.
How can I optimize a logic to avoid timeout?
Thank you.
This query:
SELECT *
FROM (
SELECT 1 AS temp
) t
LEFT JOIN
INSERTED i
ON 1 = 1
LEFT JOIN
DELETED d
ON 1 = 1
will yield 2500 ^ 2 = 6250000 records from a cartesian product of INSERTED and DELETED (that is all possible combinations of all records in both tables), which will be inserted into tbl1.
Is that what you wanted to do?
Most probably, you want to join the tables on their PRIMARY KEY:
INSERT
INTO tbl1
SELECT #createDate,
#operationType,
#status,
#updatedColumnsMask,
d.F1,
i.F1,
d.F2,
i.F2,
d.F3,
i.F3,
d.F4,
i.F4,
d.F5,
i.F5,
...
FROM INSERTED i
FULL JOIN
DELETED d
ON i.id = d.id
This will treat update to the PK as deleting a record and inserting another, with a new PK.
Thanks Quassnoi, It's a good idea with "FULL JOIN". It is helped me.
Also I try to update table in portions (1000 items in one time) to make my code works faster because for some companyId I need to update more than 160 000 rows.
Instead of old code:
UPDATE tblSomeTable set someVal = #someVal where companyId = #companyId
I use below one:
declare #rc integer = 0
declare #parts integer = 0
declare #index integer = 0
declare #portionSize int = 1000
-- select Ids for update
declare #tempIds table (id int)
insert into #tempIds
select id from tblSomeTable where companyId = #companyId
-- calculate amount of iterations
set #rc=##rowcount
set #parts = #rc / #portionSize + 1
-- update table in portions
WHILE (#parts > #index)
begin
UPDATE TOP (#portionSize) t
SET someVal = #someVal
FROM tblSomeTable t
JOIN #tempIds t1 on t1.id = t.id
WHERE companyId = #companyId
delete top (#portionSize) from #tempIds
set #index += 1
end
What do you think about this? Does it make sense? If yes, how to choose correct portion size?
Or simple update also good solution? I just want to avoid locks in the future.
Thanks