I have a SQL Server database. I need to loop through a table to get the count of each value in the column 'RevID'. Each value should only be in the table a certain number of times - for example 125 times. If the count of the value is greater than 125 or less than 125, I need to update the column to ensure all values in the RevID (are over 25 different values) is within the same range of 125 (ok to be a few numbers off)
For example, the count of RevID = "A2" is = 45 and the count of RevID = 'B2' is = 165 then I need to update RevID so the 45 count increases and the 165 decreases until they are within the 125 range.
This is what I have so far:
DECLARE #i INT = 1,
#RevCnt INT = SELECT RevId, COUNT(RevId) FROM MyTable group by RevId
WHILE(#RevCnt >= 50)
BEGIN
UPDATE MyTable
SET RevID= (SELECT COUNT(RevID) FROM MyTable)
WHERE RevID < 50)
#i = #i + 1
END
I have also played around with a cursor and instead of trigger. Any idea on how to achieve this? Thanks for any input.
Okay I cam back to this because I found it interesting even though clearly there are some business rules/discussion that you and I and others are not seeing. anyway, if you want to evenly and distribute arbitrarily there are a few ways you could do it by building recursive Common Table Expressions [CTE] or by building temp tables and more. Anyway here is a way that I decided to give it a try, I did utilize 1 temp table because sql was throwing in a little inconsistency with the main logic table as a cte about every 10th time but the temp table seems to have cleared that up. Anyway, this will evenly spread RevId arbitrarily and randomly assigning any remainder (# of Records / # of RevIds) to one of the RevIds. This script also doesn't rely on having a UniqueID or anything it works dynamically over row numbers it creates..... here you go just subtract out test data etc and you have what you more than likely want. Though rebuilding the table/values would probably be easier.
--Build Some Test Data
DECLARE #Table AS TABLE (RevId VARCHAR(10))
DECLARE #C AS INT = 1
WHILE #C <= 400
BEGIN
IF #C <= 200
BEGIN
INSERT INTO #Table (RevId) VALUES ('A1')
END
IF #c <= 170
BEGIN
INSERT INTO #Table (RevId) VALUES ('B2')
END
IF #c <= 100
BEGIN
INSERT INTO #Table (RevId) VALUES ('C3')
END
IF #c <= 400
BEGIN
INSERT INTO #Table (RevId) VALUES ('D4')
END
IF #c <= 1
BEGIN
INSERT INTO #Table (RevId) VALUES ('E5')
END
SET #C = #C+ 1
END
--save starting counts of test data to temp table to compare with later
IF OBJECT_ID('tempdb..#StartingCounts') IS NOT NULL
BEGIN
DROP TABLE #StartingCounts
END
SELECT
RevId
,COUNT(*) as Occurences
INTO #StartingCounts
FROM
#Table
GROUP BY
RevId
ORDER BY
RevId
/************************ This is the main method **********************************/
--clear temp table that is the main processing logic
IF OBJECT_ID('tempdb..#RowNumsToChange') IS NOT NULL
BEGIN
DROP TABLE #RowNumsToChange
END
--figure out how many records there are and how many there should be for each RevId
;WITH cteTargetNumbers AS (
SELECT
RevId
--,COUNT(*) as RevIdCount
--,SUM(COUNT(*)) OVER (PARTITION BY 1) / COUNT(*) OVER (PARTITION BY 1) +
--CASE
--WHEN ROW_NUMBER() OVER (PARTITION BY 1 ORDER BY NEWID()) <=
--SUM(COUNT(*)) OVER (PARTITION BY 1) % COUNT(*) OVER (PARTITION BY 1)
--THEN 1
--ELSE 0
--END as TargetNumOfRecords
,SUM(COUNT(*)) OVER (PARTITION BY 1) / COUNT(*) OVER (PARTITION BY 1) +
CASE
WHEN ROW_NUMBER() OVER (PARTITION BY 1 ORDER BY NEWID()) <=
SUM(COUNT(*)) OVER (PARTITION BY 1) % COUNT(*) OVER (PARTITION BY 1)
THEN 1
ELSE 0
END - COUNT(*) AS NumRecordsToUpdate
FROM
#Table
GROUP BY
RevId
)
, cteEndRowNumsToChange AS (
SELECT *
,SUM(CASE WHEN NumRecordsToUpdate > 1 THEN NumRecordsToUpdate ELSE 0 END)
OVER (PARTITION BY 1 ORDER BY RevId) AS ChangeEndRowNum
FROM
cteTargetNumbers
)
SELECT
*
,LAG(ChangeEndRowNum,1,0) OVER (PARTITION BY 1 ORDER BY RevId) as ChangeStartRowNum
INTO #RowNumsToChange
FROM
cteEndRowNumsToChange
;WITH cteOriginalTableRowNum AS (
SELECT
RevId
,ROW_NUMBER() OVER (PARTITION BY RevId ORDER BY (SELECT 0)) as RowNumByRevId
FROM
#Table t
)
, cteRecordsAllowedToChange AS (
SELECT
o.RevId
,o.RowNumByRevId
,ROW_NUMBER() OVER (PARTITION BY 1 ORDER BY (SELECT 0)) as ChangeRowNum
FROM
cteOriginalTableRowNum o
INNER JOIN #RowNumsToChange t
ON o.RevId = t.RevId
AND t.NumRecordsToUpdate < 0
AND o.RowNumByRevId <= ABS(t.NumRecordsToUpdate)
)
UPDATE o
SET RevId = u.RevId
FROM
cteOriginalTableRowNum o
INNER JOIN cteRecordsAllowedToChange c
ON o.RevId = c.RevId
AND o.RowNumByRevId = c.RowNumByRevId
INNER JOIN #RowNumsToChange u
ON c.ChangeRowNum > u.ChangeStartRowNum
AND c.ChangeRowNum <= u.ChangeEndRowNum
AND u.NumRecordsToUpdate > 0
IF OBJECT_ID('tempdb..#RowNumsToChange') IS NOT NULL
BEGIN
DROP TABLE #RowNumsToChange
END
/***************************** End of Main Method *******************************/
-- Compare the results and clean up
;WITH ctePostUpdateResults AS (
SELECT
RevId
,COUNT(*) as AfterChangeOccurences
FROM
#Table
GROUP BY
RevId
)
SELECT *
FROM
#StartingCounts s
INNER JOIN ctePostUpdateResults r
ON s.RevId = r.RevId
ORDER BY
s.RevId
IF OBJECT_ID('tempdb..#StartingCounts') IS NOT NULL
BEGIN
DROP TABLE #StartingCounts
END
Since you've given no rules for how you'd like the balance to operate we're left to speculate. Here's an approach that would find the most overrepresented value and then find an underrepresented value that can take on the entire overage.
I have no idea how optimal this is and it will probably run in an infinite loop without more logic.
declare #balance int = 125;
declare #cnt_over int;
declare #cnt_under int;
declare #revID_overrepresented varchar(32);
declare #revID_underrepresented varchar(32);
declare #rowcount int = 1;
while #rowcount > 0
begin
select top 1 #revID_overrepresented = RevID, #cnt_over = count(*)
from T
group by RevID
having count(*) > #balance
order by count(*) desc
select top 1 #revID_underrepresented = RevID, #cnt_under = count(*)
from T
group by RevID
having count(*) < #balance - #cnt_over
order by count(*) desc
update top #cnt_over - #balance T
set RevId = #revID_underrepresented
where RevId = #revID_overrepresented;
set #rowcount = ##rowcount;
end
The problem is I don't even know what you mean by balance...You say it needs to be evenly represented but it seems like you want it to be 125. 125 is not "even", it is just 125.
I can't tell what you are trying to do, but I'm guessing this is not really an SQL problem. But you can use SQL to help. Here is some helpful SQL for you. You can use this in your language of choice to solve the problem.
Find the rev values and their counts:
SELECT RevID, COUNT(*)
FROM MyTable
GROUP BY MyTable
Update #X rows (with RevID of value #RevID) to a new value #NewValue
UPDATE TOP #X FROM MyTable
SET RevID = #NewValue
WHERE RevID = #RevID
Using these two queries you should be able to apply your business rules (which you never specified) in a loop or whatever to change the data.
Related
I have orders and users. I distribute evenly for each user's orders.
I need to redistribute when new users are added. And it is necessary to take into account the difference between the fulfilled and the limit in the algorithm. If users have made a limit on the first distribution, they do not include in the new distribution and expose the column to the limit value from the column made.
declare #orderCount int
set #orderCount = 50 --Orders for Distribution
--Result table
declare #t table (
users char(3),
limit int,
Made int
)
--Add users
insert into #t (users, limit, Made) values
('us1',0,0),
('us2',0,0),
('us3',0,0)
--Table for the Distribution algorithm
declare #c table (
users char(3),
limit int,
Made int,
Cnt int,
Rn int
)
--Count users and row_number
insert into #c
select
*,
COUNT(*) OVER () as Cnt,
ROW_NUMBER() OVER (ORDER BY users) as Rn
from
#t
--var for check new distribution>made
DECLARE #check int = 0
--Distribution
update t
set #check = (#orderCount/Cnt) + CASE WHEN #orderCount % Cnt >= Rn THEN 1 ELSE 0 END,
limit=
CASE WHEN
(#check >= t.Made)
THEN
#check
ELSE
t.Made
END
FROM #t t
INNER JOIN #c cn ON cn.users=t.users
--Test data
UPDATE #t SET Made=12 WHERE users='us1'
UPDATE #t SET Made=10 WHERE users='us2'
UPDATE #t SET Made=5 WHERE users='us3'
--Check result
SELECT * FROM #t
--add to distribution new users
INSERT INTO #t (users, limit, Made) values ('us4',0,0)
INSERT INTO #t (users, limit, Made) values ('us5',0,0)
--Clear table
DELETE FROM #c
--Check new data
insert into #c
select
*,
COUNT(*) OVER () as Cnt,
ROW_NUMBER() OVER (ORDER BY users) as Rn
from
#t
--Distribution
update t
set #check = (#orderCount/Cnt) + CASE WHEN #orderCount % Cnt >= Rn THEN 1 ELSE 0 END,
limit=
CASE WHEN
(#check >= t.Made)
THEN
#check
ELSE
t.Made
END
FROM #t t
INNER JOIN #c cn ON cn.users=t.users
--Check result
SELECT * FROM #t
It divides the number of orders for users. 50/3 ~16 orders for 1 users. If I add 2 new users then 50/5 ~ 10. But 1 users do 12 orders. 12 more new distribution orders (10). then him past into column limit 12. And 50-12=38. Then 38/4 ~ 9 orders the rest uesrs.
SELECT x.users,
x.work,
x.Made,
sum(CASE WHEN Made>NewLimit THEN Made ELSE 0 END) OVER() as Dif,
count(CASE WHEN Made<NewLimit THEN NewLimit END) OVER() as Cnt,
CASE WHEN Made>NewLimit THEN 0 ELSE 1 END as IsUsed,
(CASE WHEN Made<NewLimit THEN
ROW_NUMBER() OVER (PARTITION BY x.work, (case when Made<NewLimit then 1 else 0 end) ORDER BY work) ELSE 0 END) as Rn
FROM (
SELECT t.users ,(#orderCount/Cnt) + CASE WHEN #orderCount % Cnt >= Rn THEN 1 ELSE 0 END as NewLimit,
t.Made,
t.work
FROM #t t
INNER JOIN #c cn ON cn.users=t.users
) x
I consider the amount where the old distribution is more. I'm setting the isUsed flag for the new distribution. I consider the number. I deduct from all orders the amount where the new lehmite is accustomed and divide the amount received for a quantity that is suitable for a new distribution.
Sample data:
create table #temp (id int, qty int, checkvalue int)
insert into #temp values (1,1,3)
insert into #temp values (2,2,3)
insert into #temp values (3,1,3)
insert into #temp values (4,1,3)
According to data above, I would like to show exact number of lines from top to bottom where sum(qty) = checkvalue. Note that checkvalue is same for all the records all the time. Regarding the sample data above, the desired output is:
Id Qty checkValue
1 1 3
2 2 3
Because 1+2=3 and no more data is needed to show. If checkvalue was 4, we would show the third record: Id:3 Qty:1 checkValue:4 as well.
This is the code I am handling this problem. The code is working very well.
declare #checkValue int = (select top 1 checkvalue from #temp);
declare #counter int = 0, #sumValue int = 0;
while #sumValue < #checkValue
begin
set #counter = #counter + 1;
set #sumValue = #sumValue + (
select t.qty from
(
SELECT * FROM (
SELECT
ROW_NUMBER() OVER (ORDER BY id ASC) AS rownumber,
id,qty,checkvalue
FROM #temp
) AS foo
WHERE rownumber = #counter
) t
)
end
declare #sql nvarchar(255) = 'select top '+cast(#counter as varchar(5))+' * from #temp'
EXECUTE sp_executesql #sql, N'#counter int', #counter = #counter;
However, I am not sure if this is the best way to deal with it and wonder if there is a better approach. There are many professionals here and I'd like to hear from them about what they think about my approach and how we can improve it. Any advice would be appreciated!
Try this:
select id, qty, checkvalue from (
select t1.*,
sum(t1.qty) over (partition by t2.id) [sum]
from #temp [t1] join #temp [t2] on t1.id <= t2.id
) a where checkvalue = [sum]
Smart self-join is all you need :)
For SQL Server 2012, and onwards, you can easily achieve this using ROWS BETWEEN in your OVER clause and the use of a CTE:
WITH Running AS(
SELECT *,
SUM(qty) OVER (ORDER BY id
ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS RunningQty
FROM #temp t)
SELECT id, qty, checkvalue
FROM Running
WHERE RunningQty <= checkvalue;
One basic improvement is to try & reduce the no. of iterations. You're incrementing by 1, but if you repurpose the logic behind binary searching, you'd get something close to this:
DECLARE #RoughAverage int = 1 -- Some arbitrary value. The closer it is to the real average, the faster things should be.
DECLARE #CheckValue int = (SELECT TOP 1 checkvalue FROM #temp)
DECLARE #Sum int = 0
WHILE 1 = 1 -- Refer to BREAK below.
BEGIN
SELECT TOP (#RoughAverage) #Sum = SUM(qty) OVER(ORDER BY id)
FROM #temp
ORDER BY id
IF #Sum = #CheckValue
BREAK -- Indicating you reached your objective.
ELSE
SET #RoughAverage = #CheckValue - #Sum -- Most likely incomplete like this.
END
For SQL 2008 you can use recursive cte. Top 1 with ties limits result with first combination. Remove it to see all combinations
with cte as (
select
*, rn = row_number() over (order by id)
from
#temp
)
, rcte as (
select
i = id, id, qty, sumV = qty, checkvalue, rn
from
cte
union all
select
a.id, b.id, b.qty, a.sumV + b.qty, a.checkvalue, b.rn
from
rcte a
join cte b on a.rn + 1 = b.rn
where
a.sumV < b.checkvalue
)
select
top 1 with ties id, qty, checkvalue
from (
select
*, needed = max(case when sumV = checkvalue then 1 else 0 end) over (partition by i)
from
rcte
) t
where
needed = 1
order by dense_rank() over (order by i)
For each group in table I need to split that group into specific amount of records (batches) and mark each record in batch with according batch id.
Right now, my implementation based on cursors is IMHO clumsy. It takes 1 minute to split set of 10 000 rows which is, needless to say, very slow. Any clues how to make that work faster?
Here is test script.
-- Needed to generate big data
DECLARE #Naturals TABLE (ID INT)
INSERT INTO #Naturals (ID)
VALUES (1),(2),(3),(4),(5),(6),(7),(8),(9),(10)
DECLARE #TestData TABLE
(
LINK INT,
F_House INT,
F_Batch UNIQUEIDENTIFIER
)
INSERT INTO #TestData (LINK, F_House)
SELECT ROW_NUMBER() OVER (order by T1.ID), ROW_NUMBER() OVER (order by T1.ID) % 5
FROM
#Naturals T1
CROSS JOIN #Naturals T2
CROSS JOIN #Naturals T3
CROSS JOIN #Naturals T4
--CROSS JOIN #Naturals T5 -- that would give us 100 000
-- Finished preparing Data (10 000 rows)
SELECT 'Processing:', COUNT(*) FROM #TestData
DECLARE #batchSize INT -- That would be amount of rows in each batch
SET #batchSize = 50
IF OBJECT_ID('tempdb..#G') IS NOT NULL -- Split set of data into groups. We need to create batches in each group.
DROP TABLE #G
SELECT
buf.F_House, COUNT(*) AS GroupCount
INTO #G
FROM #TestData buf
GROUP BY buf.F_House -- That logic could be tricky one. Right now simplifying
DECLARE #F_House INT -- That would be group key
DECLARE db_cursor CURSOR FOR
SELECT F_House
FROM #G
ORDER BY F_House
OPEN db_cursor FETCH NEXT FROM db_cursor INTO #F_House
WHILE ##FETCH_STATUS = 0
BEGIN
PRINT 'Processing house group: ' + CAST(#F_House AS VARCHAR(10))
-- For each group let's create batches
WHILE EXISTS (SELECT 1 FROM #TestData AS itmds
WHERE itmds.F_House = #F_House
AND itmds.F_Batch IS NULL
)
BEGIN
DECLARE #batchLink UNIQUEIDENTIFIER
SET #batchLink = NEWID()
UPDATE itmds
SET itmds.F_Batch = #batchLink
FROM #TestData AS itmds
WHERE itmds.F_House = #F_House
AND itmds.F_Batch IS NULL
AND itmds.LINK IN
(
SELECT TOP (#batchSize)
sub.LINK
FROM #TestData sub
WHERE sub.F_House = #F_House
AND sub.F_Batch IS NULL
)
END
FETCH NEXT FROM db_cursor INTO #F_House
END
CLOSE db_cursor
DEALLOCATE db_cursor
SELECT
buf.F_House, COUNT(distinct F_Batch) AS BatchCountInHouse
FROM #TestData buf
GROUP BY buf.F_House
ORDER BY buf.F_House
Expected output (considering batchsize = 50)
10 000 rows / 5 houses = 2000 rows/house
2000 rows/house / 50(batchSize) = 40 batches/house
This is set based avoiding a cursor. The assigned F_Batch is a BIGINT:
;with baseRowNum as
(
SELECT LINK, F_House,
-- row number per F_House
Row_Number() Over (PARTITION BY F_House ORDER BY LINK) AS rn
FROM #TestData
)
SELECT *,
-- combine F_House & group number into a unique result
F_House * 10000 +
-- start a new sub group for every F_House or after #batchSize rows
Sum(CASE WHEN rn % #batchSize = 1 THEN 1 ELSE 0 END)
Over (ORDER BY F_House, rn
ROWS Unbounded Preceding) AS F_Batch
FROM baseRowNum
If you really need a UNIQUEINDENTIFIER you can join back:
;with baseRowNums as
(
SELECT LINK, F_House,
-- row number per F_House
Row_Number() Over (PARTITION BY F_House ORDER BY LINK) AS rn
FROM #TestData
)
,batchNums as
(
SELECT *,
-- combine F_House & group number into a unique result
F_House * 10000 +
-- start a new sub group for every F_House or after #batchSize rows
Sum(CASE WHEN rn % #batchSize = 1 THEN 1 ELSE 0 END)
Over (ORDER BY F_House, rn
ROWS Unbounded Preceding) AS F_Batch
FROM baseRowNums
)
,GUIDs as
(
select F_Batch, MAX(newid()) as GUID
from batchNums
group by F_Batch
)
-- select * from
--from batchNums join GUIDs
-- on batchNums.F_Batch = GUIDs.F_Batch
select F_House, GUID, count(*)
from batchNums join GUIDs
on batchNums.F_Batch = GUIDs.F_Batch
group by F_House, GUID
order by F_House, count(*) desc
See Fiddle.
I would use an inner looping inside of a looping referencing a grouping level.
Then you can iterate through from the grouping down into the BatchGrouping. However as you pointed out speed is an issue with table variables and CTE's for that reason I in this case tested with a tempdb # table. This way I could index after the insert and optimize performance. I can run a million rows of aggregation logic in about 16 seconds. I consider that acceptable performance. But my Dev Box is an I7 6700, with 16 gigs of DDR4, and an SSD. Performance times may vary based on hardware obviously.
--Make up some fake data for example
DECLARE
#Start INT = 1
, #End INT = 100000
;
SET NOCOUNT ON;
IF OBJECT_ID('tempdb..#Temp') IS NOT NULL
DROP TABLE tempdb..#Temp
CREATE Table #Temp (Id INT, Grp int, Val VARCHAR(8), BatchGroup int)
WHILE #Start <= #End
BEGIN
INSERT INTO #Temp (Id, Grp, Val)
VALUES (#Start, CAST(RAND() * 8 AS INT) + 1, LEFT(NEWID(), 8))
SELECT #Start += 1;
END
CREATE CLUSTERED INDEX IX_Temp_Grp ON #Temp(Grp, BatchGroup)
--Determine Batch Size You want for groupings
DECLARE #BatchSize INT = 1000;
--Let's randomly mess with groupings
DECLARE #X INT = 1
WHILE #X <= 4
BEGIN
; WITH x AS
(
SELECT TOP (#BatchSize * 4)
Id
, Grp
, Val
FROM #Temp
WHERE Grp = CAST(RAND() * 8 AS INT) + 1
)
UPDATE x
SET Grp = CAST(RAND() * 8 AS INT) + 1
SELECT #X += 1
END
DECLARE
#CurrentGroup INT = 1
, #CurrentBatch INT = 1
WHILE #CurrentGroup <= (SELECT MAX(Grp) FROM #Temp) -- Exists (SELECT 1 FROM #Temp WHERE BatchGroup IS NULL)
BEGIN
WHILE EXISTS (SELECT 1 FROM #Temp WHERE Grp = #CurrentGroup AND BatchGroup IS NULL)
BEGIN
; WITH x AS
(
SELECT TOP (#BatchSize) *
FROM #Temp
WHERE Grp = #CurrentGroup
AND BatchGroup IS NULL
)
update x
SET BatchGroup = #CurrentBatch
SELECT #CurrentBatch += 1;
END
SELECT #CurrentBatch = 1
SELECT #CurrentGroup += 1;
END
--Proof
Select
Grp
, COUNT(DISTINCT Id)
, COUNT(DISTINCT BatchGroup)
From #Temp
GROUP BY Grp
Actually, I've tried NTILE() with cursors and it's quite fast(I mean its faster then 1 minute for 10 000 rows).
10 000 rows for 0 seconds.
100 000 rows for 3 seconds.
1 000 000 rows for 34 seconds.
10 000 000 rows for 6 minutes
Linear grow in complexity which is nice.
-- Needed to generate big data
DECLARE #Naturals TABLE (ID INT)
INSERT INTO #Naturals (ID)
VALUES (1),(2),(3),(4),(5),(6),(7),(8),(9),(10)
DECLARE #TestData TABLE
(
LINK INT,
F_House INT,
F_Batch UNIQUEIDENTIFIER
)
INSERT INTO #TestData (LINK, F_House)
SELECT ROW_NUMBER() OVER (order by T1.ID), ROW_NUMBER() OVER (order by T1.ID) % 5
FROM
#Naturals T1
CROSS JOIN #Naturals T2
CROSS JOIN #Naturals T3
CROSS JOIN #Naturals T4
--CROSS JOIN #Naturals T5 -- that would give us 100 000
-- Finished preparing Data (10 000 rows)
SELECT 'Processing:', COUNT(*) FROM #TestData
DECLARE #batchSize INT -- That would be amount of rows in each batch
SET #batchSize = 50
IF OBJECT_ID('tempdb..#G') IS NOT NULL -- Split set of data into groups. We need to create batches in each group.
DROP TABLE #G
SELECT
buf.F_House, COUNT(*) AS GroupCount
INTO #G
FROM #TestData buf
GROUP BY buf.F_House -- That logic could be tricky one. Right now simplifying
DECLARE #F_House INT -- That would be group key
DECLARE db_cursor CURSOR FOR
SELECT F_House
FROM #G
ORDER BY F_House
OPEN db_cursor FETCH NEXT FROM db_cursor INTO #F_House
WHILE ##FETCH_STATUS = 0
BEGIN
PRINT 'Processing house group: ' + CAST(#F_House AS VARCHAR(10))
DECLARE #rowsInGroup INT
SELECT #rowsInGroup = COUNT(*) FROM #TestData
WHERE F_House = #F_House
IF OBJECT_ID('tempdb..#TileBatch') IS NOT NULL
DROP TABLE #TileBatch
SELECT
T.[NTile], NEWID() AS F_Batch
INTO #TileBatch
FROM
(
SELECT distinct
NTILE(#rowsInGroup / #batchSize) OVER (ORDER BY LINK) AS [NTile]
from
#TestData
WHERE F_House = #F_House
) T
UPDATE D
SET D.F_Batch = B.F_Batch
FROM
#TestData D
INNER JOIN
(
SELECT
*, NTILE(#rowsInGroup / #batchSize) OVER (ORDER BY LINK) AS [NTile]
from
#TestData
WHERE F_House = #F_House
) DT ON D.LINK = DT.LINK
INNER JOIN
#TileBatch B ON DT.[NTile] = B.[NTile]
WHERE D.F_House = #F_House
FETCH NEXT FROM db_cursor INTO #F_House
END
CLOSE db_cursor
DEALLOCATE db_cursor
SELECT
buf.F_House, COUNT(distinct F_Batch) AS BatchCountInHouse
FROM #TestData buf
GROUP BY buf.F_House
ORDER BY buf.F_House
I would like to select some rows multiple-times, depending on the column's value.
Source table
Article | Count
===============
A | 1
B | 4
C | 2
Wanted result
Article
===============
A
B
B
B
B
C
C
Any hints or samples, please?
You could use:
SELECT m.Article
FROM mytable m
CROSS APPLY (VALUES (1),(2),(3),(4),(5),(6),(7),(8),(9),(10)) AS s(n)
WHERE s.n <= m.[Count];
LiveDemo
Note: CROSS APLLY with any tally table. Here values up to 10.
Related: What is the best way to create and populate a numbers table?
You could also use a recursive CTE which works with numbers > 10 (here up to 1000):
With NumberSequence( Number ) as
(
Select 0 as Number
union all
Select Number + 1
from NumberSequence
where Number BETWEEN 0 AND 1000
)
SELECT Article
FROM ArticleCounts
CROSS APPLY NumberSequence
WHERE Number BETWEEN 1 AND [Count]
ORDER BY Article
Option (MaxRecursion 0)
Demo
A number-table will certainly be the best option.
http://sqlperformance.com/2013/01/t-sql-queries/generate-a-set-2
Please check following SQL script
Before executing the SELECT statement, note that I used a user function which is used to simulate a numbers table
You can find the sql codes of numbers table in SQL Server at referred tutorial
----create table myTempTbl (Article varchar(10), Count int)
--insert into myTempTbl select 'A',1
--insert into myTempTbl select 'B',4
--insert into myTempTbl select 'C',2
select t.*
from myTempTbl t
cross apply dbo.NumbersTable(1,100,1) n
where n.i <= t.Count
order by t.Article
one more CTE
with cte_t as (
select c as c, 1 as i
from mytable
group by c
union all
select t.c, ctet.i + 1
from mytable t
join cte_t ctet
on ctet.c = t.c
and ctet.i < t.i
)
select cte_t.c
from cte_t
order by cte_t.c
Can obtain the output using simple WHILE LOOP
DECLARE #table TABLE
(ID int ,Article varchar(5),[Count] int)
INSERT INTO #table
(ID,Article,Count)
VALUES
(1,'A',1),(2,'B',4),(3,'C',2)
DECLARE #temp TABLE
(Article varchar(5))
DECLARE #Cnt1 INT
DECLARE #Cnt2 INT
DECLARE #Check INT
DECLARE #max INT
SET #max =0
SET #Cnt1 = (SELECT Count(Article) FROM #table)
WHILE (#max < #Cnt1)
BEGIN
SET #max = #max +1
SET #Cnt2 = (SELECT [Count] FROM #table WHERE ID =#max)
SET #Check =(SELECT [Count] FROM #table WHERE ID =#max)
WHILE (#Cnt2 > 0)
BEGIN
INSERT INTO #temp
SELECT Article FROM #table WHERE [Count] =#Check
SET #Cnt2 = #Cnt2 -1
END
END
SELECT * FROM #temp
I've got a SQL Server db with quite a few dupes in it. Removing the dupes manually is just not going to be fun, so I was wondering if there is any sort of sql programming or scripting I can do to automate it.
Below is my query that returns the ID and the Code of the duplicates.
select a.ID, a.Code
from Table1 a
inner join (
SELECT Code
FROM Table1 GROUP BY Code HAVING COUNT(Code)>1)
x on x.Code= a.Code
I'll get a return like this, for example:
5163 51727
5164 51727
5165 51727
5166 51728
5167 51728
5168 51728
This snippet shows three returns for each ID/Code (so a primary "good" record and two dupes). However this isnt always the case. There can be up to [n] dupes, although 2-3 seems to be the norm.
I just want to somehow loop through this result set and delete everything but one record. THE RECORDS TO DELETE ARE ARBITRARY, as any of them can be "kept".
You can use row_number to drive your delete.
ie
CREATE TABLE #table1
(id INT,
code int
);
WITH cte AS
(select a.ID, a.Code, ROW_NUMBER() OVER(PARTITION by COdE ORDER BY ID) AS rn
from #Table1 a
)
DELETE x
FROM #table1 x
JOIN cte ON x.id = cte.id
WHERE cte.rn > 1
But...
If you are going to be doing a lot of deletes from a very large table you might be better off to select out the rows you need into a temp table & then truncate your table and re-insert the rows you need.
Keeps the Transaction log from getting hammered, your CI getting Fragged and should be quicker too!
It is actually very simple:
DELETE FROM Table1
WHERE ID NOT IN
(SELECT MAX(ID)
FROM Table1
GROUP BY CODE)
Self join solution with a performance test VS cte.
create table codes(
id int IDENTITY(1,1) NOT NULL,
code int null,
CONSTRAINT [PK_codes_id] PRIMARY KEY CLUSTERED
(
id ASC
))
declare #counter int, #code int
set #counter = 1
set #code = 1
while (#counter <= 1000000)
begin
print ABS(Checksum(NewID()) % 1000)
insert into codes(code) select ABS(Checksum(NewID()) % 1000)
set #counter = #counter + 1
end
GO
set statistics time on;
delete a
from codes a left join(
select MIN(id) as id from codes
group by code) b
on a.id = b.id
where b.id is null
set statistics time off;
--set statistics time on;
-- WITH cte AS
-- (select a.id, a.code, ROW_NUMBER() OVER(PARTITION by code ORDER BY id) AS rn
-- from codes a
-- )
-- delete x
-- FROM codes x
-- JOIN cte ON x.id = cte.id
-- WHERE cte.rn > 1
--set statistics time off;
Performance test results:
With Join:
SQL Server Execution Times:
CPU time = 3198 ms, elapsed time = 3200 ms.
(999000 row(s) affected)
With CTE:
SQL Server Execution Times:
CPU time = 4197 ms, elapsed time = 4229 ms.
(999000 row(s) affected)
It's basically done like this:
WITH CTE_Dup AS
(
SELECT*,
ROW_NUMBER()OVER (PARTITIONBY SalesOrderno, ItemNo ORDER BY SalesOrderno, ItemNo)
AS ROW_NO
from dbo.SalesOrderDetails
)
DELETEFROM CTE_Dup WHERE ROW_NO > 1;
NOTICE: MUST INCLUDE ALL FIELDS!!
Here is another example:
CREATE TABLE #Table (C1 INT,C2 VARCHAR(10))
INSERT INTO #Table VALUES (1,'SQL Server')
INSERT INTO #Table VALUES (1,'SQL Server')
INSERT INTO #Table VALUES (2,'Oracle')
SELECT * FROM #Table
;WITH Delete_Duplicate_Row_cte
AS (SELECT ROW_NUMBER()OVER(PARTITION BY C1, C2 ORDER BY C1,C2) ROW_NUM,*
FROM #Table )
DELETE FROM Delete_Duplicate_Row_cte WHERE ROW_NUM > 1
SELECT * FROM #Table