Distribution Orders for Users - sql

I have orders and users. I distribute evenly for each user's orders.
I need to redistribute when new users are added. And it is necessary to take into account the difference between the fulfilled and the limit in the algorithm. If users have made a limit on the first distribution, they do not include in the new distribution and expose the column to the limit value from the column made.
declare #orderCount int
set #orderCount = 50 --Orders for Distribution
--Result table
declare #t table (
users char(3),
limit int,
Made int
)
--Add users
insert into #t (users, limit, Made) values
('us1',0,0),
('us2',0,0),
('us3',0,0)
--Table for the Distribution algorithm
declare #c table (
users char(3),
limit int,
Made int,
Cnt int,
Rn int
)
--Count users and row_number
insert into #c
select
*,
COUNT(*) OVER () as Cnt,
ROW_NUMBER() OVER (ORDER BY users) as Rn
from
#t
--var for check new distribution>made
DECLARE #check int = 0
--Distribution
update t
set #check = (#orderCount/Cnt) + CASE WHEN #orderCount % Cnt >= Rn THEN 1 ELSE 0 END,
limit=
CASE WHEN
(#check >= t.Made)
THEN
#check
ELSE
t.Made
END
FROM #t t
INNER JOIN #c cn ON cn.users=t.users
--Test data
UPDATE #t SET Made=12 WHERE users='us1'
UPDATE #t SET Made=10 WHERE users='us2'
UPDATE #t SET Made=5 WHERE users='us3'
--Check result
SELECT * FROM #t
--add to distribution new users
INSERT INTO #t (users, limit, Made) values ('us4',0,0)
INSERT INTO #t (users, limit, Made) values ('us5',0,0)
--Clear table
DELETE FROM #c
--Check new data
insert into #c
select
*,
COUNT(*) OVER () as Cnt,
ROW_NUMBER() OVER (ORDER BY users) as Rn
from
#t
--Distribution
update t
set #check = (#orderCount/Cnt) + CASE WHEN #orderCount % Cnt >= Rn THEN 1 ELSE 0 END,
limit=
CASE WHEN
(#check >= t.Made)
THEN
#check
ELSE
t.Made
END
FROM #t t
INNER JOIN #c cn ON cn.users=t.users
--Check result
SELECT * FROM #t
It divides the number of orders for users. 50/3 ~16 orders for 1 users. If I add 2 new users then 50/5 ~ 10. But 1 users do 12 orders. 12 more new distribution orders (10). then him past into column limit 12. And 50-12=38. Then 38/4 ~ 9 orders the rest uesrs.

SELECT x.users,
x.work,
x.Made,
sum(CASE WHEN Made>NewLimit THEN Made ELSE 0 END) OVER() as Dif,
count(CASE WHEN Made<NewLimit THEN NewLimit END) OVER() as Cnt,
CASE WHEN Made>NewLimit THEN 0 ELSE 1 END as IsUsed,
(CASE WHEN Made<NewLimit THEN
ROW_NUMBER() OVER (PARTITION BY x.work, (case when Made<NewLimit then 1 else 0 end) ORDER BY work) ELSE 0 END) as Rn
FROM (
SELECT t.users ,(#orderCount/Cnt) + CASE WHEN #orderCount % Cnt >= Rn THEN 1 ELSE 0 END as NewLimit,
t.Made,
t.work
FROM #t t
INNER JOIN #c cn ON cn.users=t.users
) x
I consider the amount where the old distribution is more. I'm setting the isUsed flag for the new distribution. I consider the number. I deduct from all orders the amount where the new lehmite is accustomed and divide the amount received for a quantity that is suitable for a new distribution.

Related

How to effectively split grouped records into batches

For each group in table I need to split that group into specific amount of records (batches) and mark each record in batch with according batch id.
Right now, my implementation based on cursors is IMHO clumsy. It takes 1 minute to split set of 10 000 rows which is, needless to say, very slow. Any clues how to make that work faster?
Here is test script.
-- Needed to generate big data
DECLARE #Naturals TABLE (ID INT)
INSERT INTO #Naturals (ID)
VALUES (1),(2),(3),(4),(5),(6),(7),(8),(9),(10)
DECLARE #TestData TABLE
(
LINK INT,
F_House INT,
F_Batch UNIQUEIDENTIFIER
)
INSERT INTO #TestData (LINK, F_House)
SELECT ROW_NUMBER() OVER (order by T1.ID), ROW_NUMBER() OVER (order by T1.ID) % 5
FROM
#Naturals T1
CROSS JOIN #Naturals T2
CROSS JOIN #Naturals T3
CROSS JOIN #Naturals T4
--CROSS JOIN #Naturals T5 -- that would give us 100 000
-- Finished preparing Data (10 000 rows)
SELECT 'Processing:', COUNT(*) FROM #TestData
DECLARE #batchSize INT -- That would be amount of rows in each batch
SET #batchSize = 50
IF OBJECT_ID('tempdb..#G') IS NOT NULL -- Split set of data into groups. We need to create batches in each group.
DROP TABLE #G
SELECT
buf.F_House, COUNT(*) AS GroupCount
INTO #G
FROM #TestData buf
GROUP BY buf.F_House -- That logic could be tricky one. Right now simplifying
DECLARE #F_House INT -- That would be group key
DECLARE db_cursor CURSOR FOR
SELECT F_House
FROM #G
ORDER BY F_House
OPEN db_cursor FETCH NEXT FROM db_cursor INTO #F_House
WHILE ##FETCH_STATUS = 0
BEGIN
PRINT 'Processing house group: ' + CAST(#F_House AS VARCHAR(10))
-- For each group let's create batches
WHILE EXISTS (SELECT 1 FROM #TestData AS itmds
WHERE itmds.F_House = #F_House
AND itmds.F_Batch IS NULL
)
BEGIN
DECLARE #batchLink UNIQUEIDENTIFIER
SET #batchLink = NEWID()
UPDATE itmds
SET itmds.F_Batch = #batchLink
FROM #TestData AS itmds
WHERE itmds.F_House = #F_House
AND itmds.F_Batch IS NULL
AND itmds.LINK IN
(
SELECT TOP (#batchSize)
sub.LINK
FROM #TestData sub
WHERE sub.F_House = #F_House
AND sub.F_Batch IS NULL
)
END
FETCH NEXT FROM db_cursor INTO #F_House
END
CLOSE db_cursor
DEALLOCATE db_cursor
SELECT
buf.F_House, COUNT(distinct F_Batch) AS BatchCountInHouse
FROM #TestData buf
GROUP BY buf.F_House
ORDER BY buf.F_House
Expected output (considering batchsize = 50)
10 000 rows / 5 houses = 2000 rows/house
2000 rows/house / 50(batchSize) = 40 batches/house
This is set based avoiding a cursor. The assigned F_Batch is a BIGINT:
;with baseRowNum as
(
SELECT LINK, F_House,
-- row number per F_House
Row_Number() Over (PARTITION BY F_House ORDER BY LINK) AS rn
FROM #TestData
)
SELECT *,
-- combine F_House & group number into a unique result
F_House * 10000 +
-- start a new sub group for every F_House or after #batchSize rows
Sum(CASE WHEN rn % #batchSize = 1 THEN 1 ELSE 0 END)
Over (ORDER BY F_House, rn
ROWS Unbounded Preceding) AS F_Batch
FROM baseRowNum
If you really need a UNIQUEINDENTIFIER you can join back:
;with baseRowNums as
(
SELECT LINK, F_House,
-- row number per F_House
Row_Number() Over (PARTITION BY F_House ORDER BY LINK) AS rn
FROM #TestData
)
,batchNums as
(
SELECT *,
-- combine F_House & group number into a unique result
F_House * 10000 +
-- start a new sub group for every F_House or after #batchSize rows
Sum(CASE WHEN rn % #batchSize = 1 THEN 1 ELSE 0 END)
Over (ORDER BY F_House, rn
ROWS Unbounded Preceding) AS F_Batch
FROM baseRowNums
)
,GUIDs as
(
select F_Batch, MAX(newid()) as GUID
from batchNums
group by F_Batch
)
-- select * from
--from batchNums join GUIDs
-- on batchNums.F_Batch = GUIDs.F_Batch
select F_House, GUID, count(*)
from batchNums join GUIDs
on batchNums.F_Batch = GUIDs.F_Batch
group by F_House, GUID
order by F_House, count(*) desc
See Fiddle.
I would use an inner looping inside of a looping referencing a grouping level.
Then you can iterate through from the grouping down into the BatchGrouping. However as you pointed out speed is an issue with table variables and CTE's for that reason I in this case tested with a tempdb # table. This way I could index after the insert and optimize performance. I can run a million rows of aggregation logic in about 16 seconds. I consider that acceptable performance. But my Dev Box is an I7 6700, with 16 gigs of DDR4, and an SSD. Performance times may vary based on hardware obviously.
--Make up some fake data for example
DECLARE
#Start INT = 1
, #End INT = 100000
;
SET NOCOUNT ON;
IF OBJECT_ID('tempdb..#Temp') IS NOT NULL
DROP TABLE tempdb..#Temp
CREATE Table #Temp (Id INT, Grp int, Val VARCHAR(8), BatchGroup int)
WHILE #Start <= #End
BEGIN
INSERT INTO #Temp (Id, Grp, Val)
VALUES (#Start, CAST(RAND() * 8 AS INT) + 1, LEFT(NEWID(), 8))
SELECT #Start += 1;
END
CREATE CLUSTERED INDEX IX_Temp_Grp ON #Temp(Grp, BatchGroup)
--Determine Batch Size You want for groupings
DECLARE #BatchSize INT = 1000;
--Let's randomly mess with groupings
DECLARE #X INT = 1
WHILE #X <= 4
BEGIN
; WITH x AS
(
SELECT TOP (#BatchSize * 4)
Id
, Grp
, Val
FROM #Temp
WHERE Grp = CAST(RAND() * 8 AS INT) + 1
)
UPDATE x
SET Grp = CAST(RAND() * 8 AS INT) + 1
SELECT #X += 1
END
DECLARE
#CurrentGroup INT = 1
, #CurrentBatch INT = 1
WHILE #CurrentGroup <= (SELECT MAX(Grp) FROM #Temp) -- Exists (SELECT 1 FROM #Temp WHERE BatchGroup IS NULL)
BEGIN
WHILE EXISTS (SELECT 1 FROM #Temp WHERE Grp = #CurrentGroup AND BatchGroup IS NULL)
BEGIN
; WITH x AS
(
SELECT TOP (#BatchSize) *
FROM #Temp
WHERE Grp = #CurrentGroup
AND BatchGroup IS NULL
)
update x
SET BatchGroup = #CurrentBatch
SELECT #CurrentBatch += 1;
END
SELECT #CurrentBatch = 1
SELECT #CurrentGroup += 1;
END
--Proof
Select
Grp
, COUNT(DISTINCT Id)
, COUNT(DISTINCT BatchGroup)
From #Temp
GROUP BY Grp
Actually, I've tried NTILE() with cursors and it's quite fast(I mean its faster then 1 minute for 10 000 rows).
10 000 rows for 0 seconds.
100 000 rows for 3 seconds.
1 000 000 rows for 34 seconds.
10 000 000 rows for 6 minutes
Linear grow in complexity which is nice.
-- Needed to generate big data
DECLARE #Naturals TABLE (ID INT)
INSERT INTO #Naturals (ID)
VALUES (1),(2),(3),(4),(5),(6),(7),(8),(9),(10)
DECLARE #TestData TABLE
(
LINK INT,
F_House INT,
F_Batch UNIQUEIDENTIFIER
)
INSERT INTO #TestData (LINK, F_House)
SELECT ROW_NUMBER() OVER (order by T1.ID), ROW_NUMBER() OVER (order by T1.ID) % 5
FROM
#Naturals T1
CROSS JOIN #Naturals T2
CROSS JOIN #Naturals T3
CROSS JOIN #Naturals T4
--CROSS JOIN #Naturals T5 -- that would give us 100 000
-- Finished preparing Data (10 000 rows)
SELECT 'Processing:', COUNT(*) FROM #TestData
DECLARE #batchSize INT -- That would be amount of rows in each batch
SET #batchSize = 50
IF OBJECT_ID('tempdb..#G') IS NOT NULL -- Split set of data into groups. We need to create batches in each group.
DROP TABLE #G
SELECT
buf.F_House, COUNT(*) AS GroupCount
INTO #G
FROM #TestData buf
GROUP BY buf.F_House -- That logic could be tricky one. Right now simplifying
DECLARE #F_House INT -- That would be group key
DECLARE db_cursor CURSOR FOR
SELECT F_House
FROM #G
ORDER BY F_House
OPEN db_cursor FETCH NEXT FROM db_cursor INTO #F_House
WHILE ##FETCH_STATUS = 0
BEGIN
PRINT 'Processing house group: ' + CAST(#F_House AS VARCHAR(10))
DECLARE #rowsInGroup INT
SELECT #rowsInGroup = COUNT(*) FROM #TestData
WHERE F_House = #F_House
IF OBJECT_ID('tempdb..#TileBatch') IS NOT NULL
DROP TABLE #TileBatch
SELECT
T.[NTile], NEWID() AS F_Batch
INTO #TileBatch
FROM
(
SELECT distinct
NTILE(#rowsInGroup / #batchSize) OVER (ORDER BY LINK) AS [NTile]
from
#TestData
WHERE F_House = #F_House
) T
UPDATE D
SET D.F_Batch = B.F_Batch
FROM
#TestData D
INNER JOIN
(
SELECT
*, NTILE(#rowsInGroup / #batchSize) OVER (ORDER BY LINK) AS [NTile]
from
#TestData
WHERE F_House = #F_House
) DT ON D.LINK = DT.LINK
INNER JOIN
#TileBatch B ON DT.[NTile] = B.[NTile]
WHERE D.F_House = #F_House
FETCH NEXT FROM db_cursor INTO #F_House
END
CLOSE db_cursor
DEALLOCATE db_cursor
SELECT
buf.F_House, COUNT(distinct F_Batch) AS BatchCountInHouse
FROM #TestData buf
GROUP BY buf.F_House
ORDER BY buf.F_House

SQL Server - loop through table and update based on count

I have a SQL Server database. I need to loop through a table to get the count of each value in the column 'RevID'. Each value should only be in the table a certain number of times - for example 125 times. If the count of the value is greater than 125 or less than 125, I need to update the column to ensure all values in the RevID (are over 25 different values) is within the same range of 125 (ok to be a few numbers off)
For example, the count of RevID = "A2" is = 45 and the count of RevID = 'B2' is = 165 then I need to update RevID so the 45 count increases and the 165 decreases until they are within the 125 range.
This is what I have so far:
DECLARE #i INT = 1,
#RevCnt INT = SELECT RevId, COUNT(RevId) FROM MyTable group by RevId
WHILE(#RevCnt >= 50)
BEGIN
UPDATE MyTable
SET RevID= (SELECT COUNT(RevID) FROM MyTable)
WHERE RevID < 50)
#i = #i + 1
END
I have also played around with a cursor and instead of trigger. Any idea on how to achieve this? Thanks for any input.
Okay I cam back to this because I found it interesting even though clearly there are some business rules/discussion that you and I and others are not seeing. anyway, if you want to evenly and distribute arbitrarily there are a few ways you could do it by building recursive Common Table Expressions [CTE] or by building temp tables and more. Anyway here is a way that I decided to give it a try, I did utilize 1 temp table because sql was throwing in a little inconsistency with the main logic table as a cte about every 10th time but the temp table seems to have cleared that up. Anyway, this will evenly spread RevId arbitrarily and randomly assigning any remainder (# of Records / # of RevIds) to one of the RevIds. This script also doesn't rely on having a UniqueID or anything it works dynamically over row numbers it creates..... here you go just subtract out test data etc and you have what you more than likely want. Though rebuilding the table/values would probably be easier.
--Build Some Test Data
DECLARE #Table AS TABLE (RevId VARCHAR(10))
DECLARE #C AS INT = 1
WHILE #C <= 400
BEGIN
IF #C <= 200
BEGIN
INSERT INTO #Table (RevId) VALUES ('A1')
END
IF #c <= 170
BEGIN
INSERT INTO #Table (RevId) VALUES ('B2')
END
IF #c <= 100
BEGIN
INSERT INTO #Table (RevId) VALUES ('C3')
END
IF #c <= 400
BEGIN
INSERT INTO #Table (RevId) VALUES ('D4')
END
IF #c <= 1
BEGIN
INSERT INTO #Table (RevId) VALUES ('E5')
END
SET #C = #C+ 1
END
--save starting counts of test data to temp table to compare with later
IF OBJECT_ID('tempdb..#StartingCounts') IS NOT NULL
BEGIN
DROP TABLE #StartingCounts
END
SELECT
RevId
,COUNT(*) as Occurences
INTO #StartingCounts
FROM
#Table
GROUP BY
RevId
ORDER BY
RevId
/************************ This is the main method **********************************/
--clear temp table that is the main processing logic
IF OBJECT_ID('tempdb..#RowNumsToChange') IS NOT NULL
BEGIN
DROP TABLE #RowNumsToChange
END
--figure out how many records there are and how many there should be for each RevId
;WITH cteTargetNumbers AS (
SELECT
RevId
--,COUNT(*) as RevIdCount
--,SUM(COUNT(*)) OVER (PARTITION BY 1) / COUNT(*) OVER (PARTITION BY 1) +
--CASE
--WHEN ROW_NUMBER() OVER (PARTITION BY 1 ORDER BY NEWID()) <=
--SUM(COUNT(*)) OVER (PARTITION BY 1) % COUNT(*) OVER (PARTITION BY 1)
--THEN 1
--ELSE 0
--END as TargetNumOfRecords
,SUM(COUNT(*)) OVER (PARTITION BY 1) / COUNT(*) OVER (PARTITION BY 1) +
CASE
WHEN ROW_NUMBER() OVER (PARTITION BY 1 ORDER BY NEWID()) <=
SUM(COUNT(*)) OVER (PARTITION BY 1) % COUNT(*) OVER (PARTITION BY 1)
THEN 1
ELSE 0
END - COUNT(*) AS NumRecordsToUpdate
FROM
#Table
GROUP BY
RevId
)
, cteEndRowNumsToChange AS (
SELECT *
,SUM(CASE WHEN NumRecordsToUpdate > 1 THEN NumRecordsToUpdate ELSE 0 END)
OVER (PARTITION BY 1 ORDER BY RevId) AS ChangeEndRowNum
FROM
cteTargetNumbers
)
SELECT
*
,LAG(ChangeEndRowNum,1,0) OVER (PARTITION BY 1 ORDER BY RevId) as ChangeStartRowNum
INTO #RowNumsToChange
FROM
cteEndRowNumsToChange
;WITH cteOriginalTableRowNum AS (
SELECT
RevId
,ROW_NUMBER() OVER (PARTITION BY RevId ORDER BY (SELECT 0)) as RowNumByRevId
FROM
#Table t
)
, cteRecordsAllowedToChange AS (
SELECT
o.RevId
,o.RowNumByRevId
,ROW_NUMBER() OVER (PARTITION BY 1 ORDER BY (SELECT 0)) as ChangeRowNum
FROM
cteOriginalTableRowNum o
INNER JOIN #RowNumsToChange t
ON o.RevId = t.RevId
AND t.NumRecordsToUpdate < 0
AND o.RowNumByRevId <= ABS(t.NumRecordsToUpdate)
)
UPDATE o
SET RevId = u.RevId
FROM
cteOriginalTableRowNum o
INNER JOIN cteRecordsAllowedToChange c
ON o.RevId = c.RevId
AND o.RowNumByRevId = c.RowNumByRevId
INNER JOIN #RowNumsToChange u
ON c.ChangeRowNum > u.ChangeStartRowNum
AND c.ChangeRowNum <= u.ChangeEndRowNum
AND u.NumRecordsToUpdate > 0
IF OBJECT_ID('tempdb..#RowNumsToChange') IS NOT NULL
BEGIN
DROP TABLE #RowNumsToChange
END
/***************************** End of Main Method *******************************/
-- Compare the results and clean up
;WITH ctePostUpdateResults AS (
SELECT
RevId
,COUNT(*) as AfterChangeOccurences
FROM
#Table
GROUP BY
RevId
)
SELECT *
FROM
#StartingCounts s
INNER JOIN ctePostUpdateResults r
ON s.RevId = r.RevId
ORDER BY
s.RevId
IF OBJECT_ID('tempdb..#StartingCounts') IS NOT NULL
BEGIN
DROP TABLE #StartingCounts
END
Since you've given no rules for how you'd like the balance to operate we're left to speculate. Here's an approach that would find the most overrepresented value and then find an underrepresented value that can take on the entire overage.
I have no idea how optimal this is and it will probably run in an infinite loop without more logic.
declare #balance int = 125;
declare #cnt_over int;
declare #cnt_under int;
declare #revID_overrepresented varchar(32);
declare #revID_underrepresented varchar(32);
declare #rowcount int = 1;
while #rowcount > 0
begin
select top 1 #revID_overrepresented = RevID, #cnt_over = count(*)
from T
group by RevID
having count(*) > #balance
order by count(*) desc
select top 1 #revID_underrepresented = RevID, #cnt_under = count(*)
from T
group by RevID
having count(*) < #balance - #cnt_over
order by count(*) desc
update top #cnt_over - #balance T
set RevId = #revID_underrepresented
where RevId = #revID_overrepresented;
set #rowcount = ##rowcount;
end
The problem is I don't even know what you mean by balance...You say it needs to be evenly represented but it seems like you want it to be 125. 125 is not "even", it is just 125.
I can't tell what you are trying to do, but I'm guessing this is not really an SQL problem. But you can use SQL to help. Here is some helpful SQL for you. You can use this in your language of choice to solve the problem.
Find the rev values and their counts:
SELECT RevID, COUNT(*)
FROM MyTable
GROUP BY MyTable
Update #X rows (with RevID of value #RevID) to a new value #NewValue
UPDATE TOP #X FROM MyTable
SET RevID = #NewValue
WHERE RevID = #RevID
Using these two queries you should be able to apply your business rules (which you never specified) in a loop or whatever to change the data.

distribute value to all rows while updating table

I have table structure like tblCustData
ID UserID Fee FeePaid
1 12 150 0
2 12 100 0
3 12 50 0
And value to be update in FeePaid Column such that if i have value in #Amt variable in 200 Then it should update any two rows
Output should be like
ID UserID Fee FeePaid
1 12 150 150
2 12 100 50
3 12 50 0
FeePaid should not be grater than Fee Column But if i pass 350 in #Amt variable it should produce output like
ID UserID Fee FeePaid
1 12 150 200
2 12 100 100
3 12 50 50
Only if #Amt is exceeding the total value in Fee column
I can not think beyond this query
Update tblCustData
Set FeePaid=#Amt
Where UserID=12
First with CTE syntax we prepare a table with sums distribution and then using unique field Code update the main table using CASE to handle all possible ways (including first row with remainder).
Declare #Amt int;
SET #Amt=250;
with T as
(
SELECT ROW_NUMBER() OVER (ORDER BY Fee desc) as rn, *
FROM tblCustData WHERE UserId=12
)
,T2 as
(
SELECT *,
ISNULL((SELECT SUM(Fee-FeePaid) FROM T WHERE T1.RN<RN),0) as PrevSum
FROM T as T1
)
UPDATE
A
SET A.FeePaid = A.FeePaid+ CASE WHEN (B.PrevSum+B.Fee-B.FeePaid<=#Amt)
AND (B.RN<>1)
THEN B.Fee-B.FeePaid
WHEN (B.PrevSum+B.Fee-B.FeePaid<=#Amt) AND (B.RN=1)
THEN #Amt-B.PrevSum
WHEN B.PrevSum>=#Amt
THEN 0
WHEN B.PrevSum+B.Fee-B.FeePaid>#Amt
THEN #Amt-B.PrevSum
END
FROM
tblCustData A
JOIN T2 B ON A.Code = B.Code
GO
SQLFiddle demo
Try ..
declare #t table (id int identity, UserId int, Fee money, FeePaid money)
insert into #t (UserID, Fee, FeePaid)
values
(12, 150, 0)
,(12, 100, 0)
,(12, 50 , 0)
declare #amt money = 200; -- change to 400 to test over paid
declare #Fees money;
select #Fees = sum(Fee) from #t;
declare #derivedt table (deid int, id int, UserId int, Fee money, FeePaid money)
insert into #derivedt (deid, id, UserId, Fee, FeePaid)
select row_number() over (order by case when #amt <= #Fees then id else -id end asc), id, UserId, Fee, FeePaid
from #t
-- order by case when #amt <= #Fees then id else -id end asc
; with cte(deid, id, UserId, Fee, FeePaid, Remainder)
as
(
select 0 as deid, 0 as id, 0 as UserId, cast(0.00 as money) as Fee, cast(0.00 as money) as FeePaid , #Amt as Remainder
from #derivedt
where id = 1
union all
select t.deid, t.id, t.UserId, t.Fee, case when cte.Remainder > t.Fee then t.Fee else cte.Remainder end as FeePaid
, case when cte.Remainder > t.Fee then cte.Remainder - t.Fee else 0 end as Remainder
from #derivedt t inner join cte cte on t.deid = (cte.deid + 1)
)
update origt
set FeePaid = det.FeePaid
from #t origt
inner join
(
select cte1.deid, cte1.id, cte1.UserId, cte1.Fee, cte1.FeePaid + ISNULL(cte2.Remainder, 0) as FeePaid
from cte cte1
left outer join (select top 1 deid, Remainder from cte order by deid desc) cte2
on cte1.deid = cte2.deid
where cte1.deid > 0
) det
on origt.id = det.id
select *
from #t
Modified to continuous update of value..
-- Create table once and insert into table once
create table #t (id int identity, UserId int, Fee money, FeePaid money)
insert into #t (UserID, Fee, FeePaid)
values
(12, 150, 0)
,(12, 100, 0)
,(12, 50 , 0)
-- ===============================
-- Run multiple times to populate #t table
declare #amt money = 100; -- change to 400 to test over paid
declare #Fees money;
select #Fees = sum(Fee - FeePaid) from #t;
declare #derivedt table (deid int, id int, UserId int, Fee money, FeePaid money)
insert into #derivedt (deid, id, UserId, Fee, FeePaid)
select row_number() over (order by case when #amt <= #Fees then id else -id end asc), id, UserId, (Fee - FeePaid) as Fee, FeePaid
from #t
-- order by case when #amt <= #Fees then id else -id end asc
; with cte(deid, id, UserId, Fee, FeePaid, Remainder)
as
(
select 0 as deid, 0 as id, 0 as UserId, cast(0.00 as money) as Fee, cast(0.00 as money) as FeePaid , #Amt as Remainder
from #derivedt
where id = 1
union all
select t.deid, t.id, t.UserId, t.Fee, case when cte.Remainder >= t.Fee then t.Fee else cte.Remainder end as FeePaid
, case when cte.Remainder >= t.Fee then cte.Remainder - t.Fee else 0 end as Remainder
from #derivedt t inner join cte cte on t.deid = (cte.deid + 1)
)
update origt
set FeePaid = origt.FeePaid + det.FeePaid
from #t origt
inner join
(
select cte1.deid, cte1.id, cte1.UserId, cte1.Fee, cte1.FeePaid + ISNULL(cte2.Remainder, 0) as FeePaid, cte1.Remainder
from cte cte1
left outer join (select top 1 deid, Remainder from cte order by deid desc) cte2
on cte1.deid = cte2.deid
where cte1.deid > 0
) det
on origt.id = det.id
select *
from #t
-- Drop temp table after
-- drop table #t
Apart from your code, I added an identity column to your table. See the code.
DECLARE #TAB TABLE(ID INT IDENTITY(1,1),USERID INT, FEE INT, FEEPAID INT)
INSERT INTO #TAB VALUES (12,150,0),(12,100,0),(12,50,0)
DECLARE #AMOUNT INT = 230,
#AMOUNTNEW INT = 0,
#B INT = 1,
#S INT = 1,#E INT = (SELECT COUNT(*) FROM #TAB)
WHILE #S <= #E
BEGIN
UPDATE LU
SET LU.FEEPAID = CASE WHEN #AMOUNT >= FEE THEN FEE ELSE #AMOUNT END
FROM #TAB LU
WHERE LU.ID = #S
SET #AMOUNT = #AMOUNT - (SELECT FEE FROM #TAB WHERE ID = #S)
IF #AMOUNT <= 0
SET #S = #E
SET #S = #S + 1
END
SELECT * FROM #TAB
Result:
I hope the idea is clear, we can work from here.

SQL Server 2008 filling gaps with dimension

I have a data table as below
#data
---------------
Account AccountType
---------------
1 2
2 0
3 5
4 2
5 1
6 5
AccountType 2 is headers and 5 is totals. Meaning accounts of type 2 have to look after the next 1 or 0 to determin if its Dim value is 1 or 0. Totals of type 5 have to look up at nearest 1 or 0 to determin its Dim value. Accounts of type 1 or 0 have there type as Dim.
Accounts of type 2 appear as islands so its not enough to just check RowNumber + 1 and same goes for accounsts of type 5.
I have arrived at the following table using CTE's. But can't find a quick way to go from here to my final result of Account, AccountType, Dim for all accounts
T3
-------------------
StartRow EndRow AccountType Dim
-------------------
1 1 2 0
2 2 0 0
3 3 5 0
4 4 2 1
5 5 0 1
6 6 5 1
Below code is MS TSQL copy paste it all and see it run. The final join on the CTE select statement is extremly slow for even 500 rows it takes 30 sec. I have 100.000 rows i need to handle. I done a cursor based solution which do it in 10-20 sec thats workable and a fast recursive CTE solution that do it in 5 sec for 100.000 rows, but it dependent on the fragmentation of the #data table. I should add this is simplified the real problem have alot more dimension that need to be taking into account. But it will work the same for this simple problem.
Anyway is there a fast way to do this using joins or another set based solution.
SET NOCOUNT ON
IF OBJECT_ID('tempdb..#data') IS NOT NULL
DROP TABLE #data
CREATE TABLE #data
(
Account INTEGER IDENTITY(1,1),
AccountType INTEGER,
)
BEGIN -- TEST DATA
DECLARE #Counter INTEGER = 0
DECLARE #MaxDataRows INTEGER = 50 -- Change here to check performance
DECLARE #Type INTEGER
WHILE(#Counter < #MaxDataRows)
BEGIN
SET #Type = CASE
WHEN #Counter % 10 < 3 THEN 2
WHEN #Counter % 10 >= 8 THEN 5
WHEN #Counter % 10 >= 3 THEN (CASE WHEN #Counter < #MaxDataRows / 2.0 THEN 0 ELSE 1 END )
ELSE 0
END
INSERT INTO #data VALUES(#Type)
SET #Counter = #Counter + 1
END
END -- TEST DATA END
;WITH groupIds_cte AS
(
SELECT *,
ROW_NUMBER() OVER (PARTITION BY AccountType ORDER BY Account) - Account AS GroupId
FROM #data
),
islandRanges_cte AS
(
SELECT
MIN(Account) AS StartRow,
MAX(Account) AS EndRow,
AccountType
FROM groupIds_cte
GROUP BY GroupId,AccountType
),
T3 AS
(
SELECT I.*, J.AccountType AS Dim
FROM islandRanges_cte I
INNER JOIN islandRanges_cte J
ON (I.EndRow + 1 = J.StartRow AND I.AccountType = 2)
UNION ALL
SELECT I.*, J.AccountType AS Dim
FROM islandRanges_cte I
INNER JOIN islandRanges_cte J
ON (I.StartRow - 1 = J.EndRow AND I.AccountType = 5)
UNION ALL
SELECT *, AccountType AS Dim
FROM islandRanges_cte
WHERE AccountType = 0 OR AccountType = 1
),
T4 AS
(
SELECT Account, Dim
FROM (
SELECT FlattenRow AS Account, StartRow, EndRow, Dim
FROM T3 I
CROSS APPLY (VALUES(StartRow),(EndRow)) newValues (FlattenRow)
) T
)
--SELECT * FROM T3 ORDER BY StartRow
--SELECT * FROM T4 ORDER BY Account
-- Final correct result but very very slow
SELECT D.Account, D.AccountType, I.Dim FROM T3 I
INNER JOIN #data D
ON D.Account BETWEEN I.StartRow AND I.EndRow
ORDER BY Account
EDIT with some time testing
SET NOCOUNT ON
IF OBJECT_ID('tempdb..#data') IS NULL
CREATE TABLE #times
(
RecId INTEGER IDENTITY(1,1),
Batch INTEGER,
Method NVARCHAR(255),
MethodDescription NVARCHAR(255),
RunTime INTEGER
)
IF OBJECT_ID('tempdb..#batch') IS NULL
CREATE TABLE #batch
(
Batch INTEGER IDENTITY(1,1),
Bit BIT
)
INSERT INTO #batch VALUES(0)
IF OBJECT_ID('tempdb..#data') IS NOT NULL
DROP TABLE #data
CREATE TABLE #data
(
Account INTEGER
)
CREATE NONCLUSTERED INDEX data_account_index ON #data (Account)
IF OBJECT_ID('tempdb..#islands') IS NOT NULL
DROP TABLE #islands
CREATE TABLE #islands
(
AccountFrom INTEGER ,
AccountTo INTEGER,
Dim INTEGER,
)
CREATE NONCLUSTERED INDEX islands_from_index ON #islands (AccountFrom, AccountTo, Dim)
BEGIN -- TEST DATA
INSERT INTO #data
SELECT TOP 100000 ROW_NUMBER() OVER(ORDER BY t1.number) AS N
FROM master..spt_values t1
CROSS JOIN master..spt_values t2
INSERT INTO #islands
SELECT MIN(Account) AS Start, MAX(Account), Grp
FROM (SELECT *, NTILE(10) OVER (ORDER BY Account) AS Grp FROM #data) T
GROUP BY Grp ORDER BY Start
END -- TEST DATA END
--SELECT * FROM #data
--SELECT * FROM #islands
--PRINT CONVERT(varchar(20),DATEDIFF(MS,#RunDate,GETDATE()))+' ms Sub Query'
DECLARE #RunDate datetime
SET #RunDate=GETDATE()
SELECT Account, (SELECT Dim From #islands WHERE Account BETWEEN AccountFrom AND AccountTo) AS Dim
FROM #data
INSERT INTO #times VALUES ((SELECT MAX(Batch) FROM #batch) ,'subquery','',DATEDIFF(MS,#RunDate,GETDATE()))
SET #RunDate=GETDATE()
SELECT D.Account, V.Dim
FROM #data D
CROSS APPLY
(
SELECT Dim From #islands V
WHERE D.Account BETWEEN V.AccountFrom AND V.AccountTo
) V
INSERT INTO #times VALUES ((SELECT MAX(Batch) FROM #batch) ,'crossapply','',DATEDIFF(MS,#RunDate,GETDATE()))
SET #RunDate=GETDATE()
SELECT D.Account, I.Dim
FROM #data D
JOIN #islands I
ON D.Account BETWEEN I.AccountFrom AND I.AccountTo
INSERT INTO #times VALUES ((SELECT MAX(Batch) FROM #batch) ,'join','',DATEDIFF(MS,#RunDate,GETDATE()))
SET #RunDate=GETDATE()
;WITH cte AS
(
SELECT Account, AccountFrom, AccountTo, Dim, 1 AS Counting
FROM #islands
CROSS APPLY (VALUES(AccountFrom),(AccountTo)) V (Account)
UNION ALL
SELECT Account + 1 ,AccountFrom, AccountTo, Dim, Counting + 1
FROM cte
WHERE (Account + 1) > AccountFrom AND (Account + 1) < AccountTo
)
SELECT Account, Dim, Counting FROM cte OPTION(MAXRECURSION 32767)
INSERT INTO #times VALUES ((SELECT MAX(Batch) FROM #batch) ,'recursivecte','',DATEDIFF(MS,#RunDate,GETDATE()))
You can select from the #times table to see the run times :)
I think you want a join, but using an inequality rather than an equality:
select tt.id, tt.dim1, it.dim2
from TallyTable tt join
IslandsTable it
on tt.id between it."from" and it."to"
This works for the data that you provide in the question.
Here is another idea that might work. Here is the query:
select d.*,
(select top 1 AccountType from #data d2 where d2.Account > d.Account and d2.AccountType not in (2, 5)
) nextAccountType
from #data d
order by d.account;
I just ran this on 50,000 rows and this version took 17 seconds on my system. Changing the table to:
CREATE TABLE #data (
Account INTEGER IDENTITY(1,1) primary key,
AccountType INTEGER,
);
Has actually slowed it down to about 1:33 -- quite to my surprise. Perhaps one of these will help you.

Sequential numbers randomly selected and added to table

The SO Question has lead me to the following question.
If a table has 16 rows I'd like to add a field to the table with the numbers 1,2,3,4,5,...,16 arranged randomly i.e in the 'RndVal' field for row 1 this could be 2, then for row 2 it could be 5 i.e each of the 16 integers needs to appear once without repetition.
Why doesn't the following work? Ideally I'd like to see this working then to see alternative solutions.
This creates the table ok:
IF OBJECT_ID('tempdb..#A') IS NOT NULL BEGIN DROP TABLE #A END
IF OBJECT_ID('tempdb..#B') IS NOT NULL BEGIN DROP TABLE #B END
IF OBJECT_ID('tempdb..#C') IS NOT NULL BEGIN DROP TABLE #C END
IF OBJECT_ID('tempdb..#myTable') IS NOT NULL BEGIN DROP TABLE #myTable END
CREATE TABLE #B (B_ID INT)
CREATE TABLE #C (C_ID INT)
INSERT INTO #B(B_ID) VALUES
(10),
(20),
(30),
(40)
INSERT INTO #C(C_ID)VALUES
(1),
(2),
(3),
(4)
CREATE TABLE #A
(
B_ID INT
, C_ID INT
, RndVal INT
)
INSERT INTO #A(B_ID, C_ID, RndVal)
SELECT
#B.B_ID
, #C.C_ID
, 0
FROM #B CROSS JOIN #C;
Then I'm attempting to add the random column using the following. The logic is to add random numbers between 1 and 16 > then to effectively overwrite any that are duplicated with other numbers > in a loop ...
SELECT
ROW_NUMBER() OVER(ORDER BY B_ID) AS Row
, B_ID
, C_ID
, RndVal
INTO #myTable
FROM #A
DECLARE #rowsRequired INT = (SELECT COUNT(*) CNT FROM #myTable)
DECLARE #i INT = (SELECT #rowsRequired - SUM(CASE WHEN RndVal > 0 THEN 1 ELSE 0 END) FROM #myTable)--0
DECLARE #end INT = 1
WHILE #end > 0
BEGIN
SELECT #i = #rowsRequired - SUM(CASE WHEN RndVal > 0 THEN 1 ELSE 0 END) FROM #myTable
WHILE #i>0
BEGIN
UPDATE x
SET x.RndVal = FLOOR(RAND()*#rowsRequired)
FROM #myTable x
WHERE x.RndVal = 0
SET #i = #i-1
END
--this is to remove possible duplicates
UPDATE c
SET c.RndVal = 0
FROM
#myTable c
INNER JOIN
(
SELECT RndVal
FROM #myTable
GROUP BY RndVal
HAVING COUNT(RndVal)>1
) t
ON
c.RndVal = t.RndVal
SET #end = ##ROWCOUNT
END
TRUNCATE TABLE #A
INSERT INTO #A
SELECT
B_ID
, C_ID
, RndVal
FROM #myTable
If the original table has 6 rows then the result should end up something like this
B_ID|C_ID|RndVal
----------------
| | 5
| | 4
| | 1
| | 6
| | 3
| | 2
I don't understand your code, frankly
This will update each row with a random number, non-repeated number between 1 and the number of rows in the table
UPDATE T
SET SomeCol = T2.X
FROM
MyTable T
JOIN
(
SELECT
KeyCol, ROW_NUMBER() OVER (ORDER BY NEWID()) AS X
FROM
MyTable
) T2 ON T.KeyCol = T2.KeyCol
This is more concise but can't test to see if it works as expected
UPDATE T
SET SomeCol = X
FROM
(
SELECT
SomeCol, ROW_NUMBER() OVER (ORDER BY NEWID()) AS X
FROM
MyTable
) T
When you add TOP (1) (because you need to update first RndVal=0 record) and +1 (because otherwise your zero mark means nothing) to your update, things will start to move. But extremely slowly (around 40 seconds on my rather outdated laptop). This is because, as #myTable gets filled with generated random numbers, it becomes less and less probable to get missing numbers - you usually get duplicate, and have to start again.
UPDATE top (1) x
SET x.RndVal = FLOOR(RAND()*#rowsRequired) + 1
FROM #myTable x
WHERE x.RndVal = 0
Of course, #gbn has perfectly valid solution.
This is basically the same as the previous answer, but specific to your code:
;WITH CTE As
(
SELECT B_ID, C_ID, RndVal,
ROW_NUMBER() OVER(ORDER BY NewID()) As NewOrder
FROM #A
)
UPDATE CTE
SET RndVal = NewOrder
SELECT * FROM #A ORDER BY RndVal