Where clause on Running total - sql

I have this table which stores containers by region and the number of coffee pouches in each of the containers.
if object_id( 'dbo.Container' ) is not null
drop table dbo.Container
go
create table dbo.Container
(
Id int not null,
Region int not null,
NumberOfCoffeePouches int not null,
constraint pkc_Container__Id primary key clustered(Id asc)
)
go
insert into dbo.Container
( Id , Region , NumberOfCoffeePouches )
values
( 1, 1, 10 ),
( 2, 1, 30 ),
( 3, 1, 5),
( 4, 1, 7),
( 5, 1, 1),
( 6, 1, 3),
( 7, 2, 4),
( 8, 2, 4),
( 9, 2, 4)
I need to list out the container Ids that will be used to fulfill an order of, say 50, coffee pouches. Over supplying is OK.
Here is query I have come up with
declare #RequiredCoffeePouches int = 50
select
sq2.Id,
sq2.NumberOfCoffeePouches,
sq2.RunningTotal,
sq2.LagRunningTotal
from
(
select
sq1.Id,
sq1.NumberOfCoffeePouches,
sq1.RunningTotal,
lag(sq1.RunningTotal, 1, 0) over (order by sq1.Id asc)
as 'LagRunningTotal'
from
(
select
c.Id,
c.NumberOfCoffeePouches,
sum(c.NumberOfCoffeePouches)
over (order by c.Id asc) as 'RunningTotal'
from
dbo.Container as c
where
c.Region = 1
) as sq1
) as sq2
where
sq2.LagRunningTotal <= #RequiredCoffeePouches
It gives the expected result
Id NumberOfCoffeePouches RunningTotal LagRunningTotal
----------- --------------------- ------------ ---------------
1 10 10 0
2 30 40 10
3 5 45 40
4 7 52 45
Question:
Is there a better and more optimized way to achieve this?
Specially the Container table is very large table and I think the sub query sq1 will unnecessarily calculate the RunningTotals for all the containers in the region. I was wondering if there is anyway to have sq1 stop processing more rows once the RunnningTotal exceeds over the #RequiredCoffeePouches.

Two things:
Moving your WHERE clause inside of the relevant sub-select can greatly increase the speed of the query because it'll pull less data. Using your example:
SELECT
sq2.Id,
sq2.NumberOfCoffeePouches,
sq2.RunningTotal,
sq2.LagRunningTotal
FROM
(
SELECT
sq1.Id,
sq1.NumberOfCoffeePouches,
sq1.RunningTotal,
lag(sq1.RunningTotal, 1, 0) over (order by sq1.Id asc) AS 'LagRunningTotal'
FROM
(
SELECT
c.Id,
c.NumberOfCoffeePouches,
SUM(c.NumberOfCoffeePouches) OVER (order by c.Id asc) AS 'RunningTotal'
FROM dbo.Container AS c
WHERE c.Region = 1
) AS sq1
WHERE sq2.LagRunningTotal <= #RequiredCoffeePouches
) AS sq2
CTEs can also improve performance:
;WITH sql1CTE AS (
SELECT
c.Id,
c.NumberOfCoffeePouches,
SUM(c.NumberOfCoffeePouches) OVER (order by c.Id asc) AS 'RunningTotal'
FROM dbo.Container AS c
WHERE c.Region = 1
),
sql2CTE AS (
SELECT
Id,
NumberOfCoffeePouches,
RunningTotal,
lag(RunningTotal, 1, 0) over (order by Id asc) AS 'LagRunningTotal'
FROM sql1CTE
WHERE LagRunningTotal <= #RequiredCoffeePouches
)
SELECT
Id,
NumberOfCoffeePouches,
RunningTotal,
LagRunningTotal
FROM sql2CTE
SQL Server CTE Basics
If you're using SSMS, select "Include Client Statistics" and "Include Actual Execution Plan" to keep track of how your query performs while you're crafting it.

Related

SQL - Getting Sum of 'X' Consecutive Values where X is an Integer in another Row (With Categories)

Say for example, I wanted to SUM all the values from the current row until the provided count. See table below:
For example:
Category A, Row 1: 10+15+25 = 50 (because it adds Rows 1 to 3 due to Count)
Category A, Row 2: 15+25+30+40 = 110 (because it adds Rows 2 to 5 due to count)
Category A, Row 5: 40+60 = 100 (because it Adds Rows 5 and 6. Since the count is 5, but the category ends at Row 6, so instead of that, it sums all available data which is Rows 5 and 6 only, thus having a value of 100.
Same goes for Category B.
How do I do this?
You can do this using window functions:
with tt as (
select t.*,
sum(quantity) over (partition by category order by rownumber) as running_quantity,
max(rownumber) over (partition by category) as max_rownumber
from t
)
select tt.*,
coalesce(tt2.running_quantity, ttlast.running_quantity) - tt.running_quantity + tt.quantity
from tt left join
tt tt2
on tt2.category = tt.category and
tt2.rownumber = tt.rownumber + tt.count - 1 left join
tt ttlast
on ttlast.category = tt.category and
ttlast.rownumber = ttlast.max_rownumber
order by category, rownumber;
I can imagine that under some circumstances this would be much faster -- particularly if the count values are relatively large. For small values of count, the lateral join is probably faster, but it is worth checking if performance is important.
Actually, a pure window functions approach is probably the best approach:
with tt as (
select t.*,
sum(quantity) over (partition by category order by rownumber) as running_quantity
from t
)
select tt.*,
(coalesce(lead(tt.running_quantity, tt.count - 1) over (partition by tt.category order by tt.rownumber),
first_value(tt.running_quantity) over (partition by tt.category order by tt.rownumber desc)
) - tt.running_quantity + tt.quantity
)
from tt
order by category, rownumber;
Here is a db<>fiddle.
Try this:
DECLARE #DataSource TABLE
(
[Category] CHAR(1)
,[Row Number] BIGINT
,[Quantity] INT
,[Count] INT
);
INSERT INTO #DataSource ([Category], [Row Number], [Quantity], [Count])
VALUES ('A', 1, 10, 3)
,('A', 2, 15, 4)
,('A', 3, 25, 2)
,('A', 4, 30, 1)
,('A', 5, 40, 5)
,('A', 6, 60, 2)
--
,('B', 1, 12, 2)
,('B', 2, 13, 3)
,('B', 3, 17, 1)
,('B', 4, 11, 2)
,('B', 5, 10, 5)
,('B', 6, 7, 3);
SELECT *
FROM #DataSource E
CROSS APPLY
(
SELECT SUM(I.[Quantity])
FROM #DataSource I
WHERE I.[Row Number] <= E.[Row Number] + E.[Count] - 1
AND I.[Row Number] >= E.[Row Number]
AND E.[Category] = I.[Category]
) DS ([Sum]);

SQL Server loop through a table for every 5 rows

I need to write a stored procedure or table function to return a new data table as a new data source.
I wish to loop through the original table for every 5 rows base on the invoice ID column (it's possible not start from 1), the first 5 rows add to the left of the new table and the second 5 rows add to the right of the new table, the third 5 rows to the left and so on.
For example, Here is the original table:
Here is the expect table:
Thanks in advance!
declare #rowCount int = 5;
with cte as (
select *,( (IN_InvoiceID-1) / #rowCount ) % 2 group1
,( (IN_InvoiceID-1) / #rowCount ) group2
,IN_InvoiceID % #rowCount group3
from T
)
select * from cte
select T1.INID,T1.IN_InvoiceID,T1.IN_InvoiceAmount,T2.INID,T2.IN_InvoiceID,T2.IN_InvoiceAmount
from CTE T1
left join CTE T2 on T2.group1 = 1 and T1.group2 = T2.group2-1 and T1.group3 = T2.group3
where T1.group1 = 0
Test DDL
CREATE TABLE T
([INID] varchar(38), [IN_InvoiceID] int, [IN_InvoiceAmount] int)
;
INSERT INTO T
([INID], [IN_InvoiceID], [IN_InvoiceAmount])
VALUES
('DB3E17E6-35C5-41:121-93B1-F809BF6B2972', 1, 2999),
('3212F048-8213-4FCC-AB64-121485B77D4E43', 2, 3737),
('E3526373-A204-40F5-801C-7F8302A4E5E2', 3, 3175),
('76CC9C19-BF79-4E8A-8034-A33805AD3390', 4, 391),
('EC7A2FBC-B62D-4865-88DE-A8097975F125', 5, 1206),
('52AD3046-21331-4F0A-BD1D-67F232C54244', 6, 402),
('CA48F132-A9F5-4516-9E58-CDEE6644AAD1', 7, 1996),
('02E10C31-CAB2-4220-B66A-CEE5E67A9378', 8, 3906),
('98F1EEFF-B07A-4B65-87F4-E165264284DD', 9, 2575),
('91EBDD8B-B73C-470C-8900-DD66078483DB', 10, 2965),
('6E2490E5-C4DE-4833-877F-1590F7BDC1B8', 11, 1603),
('00985921-AC3C-4E3E-BAE1-7F58302F831A', 12, 1302)
;
Result:
Could you please check article Display Data in Multiple Columns using SQL showing with example case how a database developer can show the list of data rows in a columnar mode using Row_Number() function and mode arithmetic expression
You need to add additional columns from the same row that is different in the sample
Seems as if you want to split the table into 2 tables with alternating 5 rows. An easy way to do this would be:
Take data into a temp table having an extra column (lets say
grouping_id)
Update the grouping id so that each 5 rows have the same id. You can
use in_invoiceId % 5 (the nod function). After this step the first 5
rows will have grouping_id 0, next 5 will have 1, next will have 2
(assuming your invoice id is incremented +1 for all rows).
You can just do a normal select with where clause for odd and even grouping_id
Ideally, you can manage with the 2 tables Master and detail table.
But due to my curiosity, I am able to solve and give the answer as
Declare #table table(id int identity, invoice_id int)
; WITH Numbers AS
(
SELECT n = 1
UNION ALL
SELECT n + 1
FROM Numbers
WHERE n+1 <= 50
)
insert into #table SELECT n
FROM Numbers
Select (a.id )%5 ,* from #table a join #table b on a.id+5 = b.id and a.id != b.id
;WITH Numbers AS
(
SELECT n = 1, o = 5
UNION ALL
SELECT n + 10, o = o+10
FROM Numbers
WHERE n+1 <= 50
)
select a.id ParentId,a.invoice_id ParentInvoiceId, --b.n, b.o,
c.invoice_id childInvoiceID from #table a
join Numbers b on a.id between b.n and b.o
left join #table c on a.id + 5 = c.id
Here is my solution
First i create grps based on whether the in_invoiceid is divisible by 5 or not.(Ignore the remainders)
After that i create a category to indicate between alternative groups(ie by checking if the remainder is 0 or otherise)
Then its a matter of dense_ranking the records on the basis of the category field ordered by in_invoiceid
Lastly a join with category=1 rows with same dense_rank as those records in category=0
create table Invoicetable(IN_ID varchar(100), IN_InvoiceID int)
INSERT INTO Invoicetable (IN_ID, IN_InvoiceID)
VALUES
('2345-BCDE-6645-1DDF', 1),
('2345-BCDE-6645-3DDF', 2),
('2345-BCDE-6645-4DDF', 3),
('2345-BCDE-6645-5DDF', 4),
('2345-BCDE-6645-6DDF', 5),
('2345-BCDE-6645-7DDF', 6),
('2345-BCDE-6645-aDDF', 7),
('2345-BCDE-6645-sDDF', 8),
('2345-BCDE-6645-dDDF', 9),
('2345-BCDE-6645-dDDF', 10),
('2345-BCDE-6645-dDDF', 11),
('2345-BCDE-6645-dDDF', 12);
with data
as (
select *
,(in_invoiceid-1)/5 as grp
,case when ((in_invoiceid-1)/5)%2=0 then '1' else '0' end as category
,dense_rank() over(partition by case when ((in_invoiceid-1)/5)%2=0 then '1' else '0' end
order by in_invoiceid) as rnk
from invoicetable a
)
select *
from data a
left join data b
on a.rnk=b.rnk
and b.category=0
where a.category=1
Here is db fiddle link.
https://dbfiddle.uk/?rdbms=sqlserver_2017&fiddle=287f101737c580ca271940764b2536ae
You may try with the following approach. Dividing the table is done with (((ROW_NUMBER() OVER (ORDER BY IN_InvoiceID) - 1) / 5) % 2 = 0) which groups records in left and right groups.
CREATE TABLE #InvoiceTable(
IN_ID varchar(24),
IN_InvoiceID int
)
INSERT INTO #InvoiceTable (IN_ID, IN_InvoiceID)
VALUES
('2345-BCDE-6645-1DDF', 1),
('2345-BCDE-6645-3DDF', 2),
('2345-BCDE-6645-4DDF', 3),
('2345-BCDE-6645-5DDF', 4),
('2345-BCDE-6645-6DDF', 5),
('2345-BCDE-6645-7DDF', 6),
('2345-BCDE-6645-aDDF', 7),
('2345-BCDE-6645-sDDF', 8),
('2345-BCDE-6645-dDDF', 9),
('2345-BCDE-6645-dDDF', 10),
('2345-BCDE-6645-dDDF', 11),
('2345-BCDE-6645-dDDF', 12);
WITH cte AS (
SELECT
IN_ID,
IN_InvoiceID,
CASE
WHEN (((ROW_NUMBER() OVER (ORDER BY IN_InvoiceID) - 1) / 5) % 2 = 0) THEN 'L'
ELSE 'R'
END AS IN_Position
FROM #InvoiceTable
),
cteL AS (
SELECT IN_ID, IN_InvoiceID, ROW_NUMBER() OVER (ORDER BY IN_InvoiceID) AS IN_RowNumber
FROM cte
WHERE IN_Position = 'L'
),
cteR AS (
SELECT IN_ID, IN_InvoiceID, ROW_NUMBER() OVER (ORDER BY IN_InvoiceID) AS IN_RowNumber
FROM cte
WHERE IN_Position = 'R'
)
SELECT cteL.IN_ID, cteL.IN_InvoiceID, cteR.IN_ID, cteR.IN_InvoiceID
FROM cteL
LEFT JOIN cteR ON (cteL.IN_RowNumber = cteR.IN_RowNumber)
Output:
IN_ID IN_InvoiceID IN_ID IN_InvoiceID
2345-BCDE-6645-1DDF 1 2345-BCDE-6645-7DDF 6
2345-BCDE-6645-3DDF 2 2345-BCDE-6645-aDDF 7
2345-BCDE-6645-4DDF 3 2345-BCDE-6645-sDDF 8
2345-BCDE-6645-5DDF 4 2345-BCDE-6645-dDDF 9
2345-BCDE-6645-6DDF 5 2345-BCDE-6645-dDDF 10
2345-BCDE-6645-dDDF 11 NULL NULL
2345-BCDE-6645-dDDF 12 NULL NULL

SQL: Get last referring and post referring page during a signup process

I'm trying to write an efficient SQL query to select 'before' and 'after' pages for the signup process. I have a solution using for loops which doesn't scale and am hoping to get a SQL native solution.
For a single clientId, I would want to get the latest pages before sign up and after signup (only 1 from each side of the join process).
The join process ALWAYS has /join/complete
Input:
clientId time path
1 0 /page1
1 10 /page2
1 20 /join/<random_token_id>
1 30 /join/<random_token_id>/step2
1 40 /join/complete
1 50 /page2
2 0 /page3
2 10 /join/complete
Output
ClientId Before After
1 /page2 /page2
2 /page3 null
I would be grateful if there is an easy solution in SQL. If it's complex, just leave it out. I will leave the code running overnight.
#standardSQL
WITH lineup AS (
SELECT clientId, time, path,
ROW_NUMBER() OVER(PARTITION BY clientId ORDER BY time) pos
FROM `project.dataset.table`
), start AS (
SELECT row.clientId, row.pos FROM (
SELECT ARRAY_AGG(t ORDER BY pos LIMIT 1)[OFFSET(0)] row
FROM lineup t WHERE STARTS_WITH(path, '/join/')
GROUP BY clientId)
), complete AS (
SELECT clientId, pos FROM lineup WHERE path = '/join/complete'
), before AS (
SELECT lineup.clientId, path FROM lineup JOIN start
ON lineup.clientId = start.clientId AND lineup.pos = start.pos - 1
), after AS (
SELECT lineup.clientId, path FROM lineup JOIN complete
ON lineup.clientId = complete.clientId AND lineup.pos = complete.pos + 1
)
SELECT clientId, before.path AS before, after.path AS after
FROM before FULL OUTER JOIN after USING (clientId)
You can test / play with above using dummy data from your question as below
#standardSQL
WITH `project.dataset.table` AS (
SELECT 1 clientId, 0 time, '/page1' path UNION ALL
SELECT 1, 10, '/page2' UNION ALL
SELECT 1, 20, '/join/<random_token_id>' UNION ALL
SELECT 1, 30, '/join/<random_token_id>/step2' UNION ALL
SELECT 1, 40, '/join/complete' UNION ALL
SELECT 1, 50, '/page2' UNION ALL
SELECT 2, 0, '/page3' UNION ALL
SELECT 2, 10, '/join/complete' UNION ALL
SELECT 3, 0, '/join/complete' UNION ALL
SELECT 3, 10, '/page4'
), lineup AS (
SELECT clientId, time, path,
ROW_NUMBER() OVER(PARTITION BY clientId ORDER BY time) pos
FROM `project.dataset.table`
), start AS (
SELECT row.clientId, row.pos FROM (
SELECT ARRAY_AGG(t ORDER BY pos LIMIT 1)[OFFSET(0)] row
FROM lineup t WHERE STARTS_WITH(path, '/join/')
GROUP BY clientId)
), complete AS (
SELECT clientId, pos FROM lineup WHERE path = '/join/complete'
), before AS (
SELECT lineup.clientId, path FROM lineup JOIN start
ON lineup.clientId = start.clientId AND lineup.pos = start.pos - 1
), after AS (
SELECT lineup.clientId, path FROM lineup JOIN complete
ON lineup.clientId = complete.clientId AND lineup.pos = complete.pos + 1
)
SELECT clientId, before.path AS before, after.path AS after
FROM before FULL OUTER JOIN after USING (clientId)
with result as
Row clientId before after
1 1 /page2 /page2
2 2 /page3 null
3 3 null /page4

SQL reporting query

I have a database with following structure.
CREATE TABLE Party
(
PartyID INT IDENTITY
PRIMARY KEY ,
StatusID INT ,
Weigth INT ,
OldWeigth INT
);
GO
CREATE TABLE PartyLocation
(
PartyLocationID INT IDENTITY
PRIMARY KEY ,
PartyID INT FOREIGN KEY REFERENCES dbo.Party ( PartyID ) ,
LocationID INT ,
Distance INT
);
GO
CREATE TABLE PartyRole
(
PartyRoleID INT IDENTITY
PRIMARY KEY ,
PartyID INT FOREIGN KEY REFERENCES dbo.Party ( PartyID ) ,
RoleID INT
);
with some simple data.
INSERT INTO dbo.Party
( StatusID, Weigth, OldWeigth )
VALUES ( 1, -- StatusID - int
10, -- Age - int
20 -- OldAge - int
),
( 1, 15, 25 ),
( 2, 20, 30 );
INSERT INTO dbo.PartyLocation
( PartyID, LocationID, Distance )
VALUES ( 1, -- PartyID - int
1, -- LocationID - int
100 -- Distance - int
),
( 1, 2, 200 ),
( 1, 3, 300 ),
( 2, 1, 1000 ),
( 2, 2, 2000 ),
( 3, 1, 10000 );
INSERT INTO dbo.PartyRole
( PartyID, RoleID )
VALUES ( 1, -- PartyID - int
1 -- RoleID - int
),
( 1, 2 ),
( 1, 3 ),
( 2, 1 ),
( 2, 2 ),
( 3, 1 );
I want to query the following information
Return sum of Weigth of all parties that has roleID = 1 in PartyRole table
Return sum of OldWeigth of all parties that has statusID = 2
Return sum of distances of all parties that has locationID = 3
Return sum of distances of all parties that has roleID = 2
So the expected results are
FilteredWeigth FilteredOldWeigth FilteredDistance AnotherFilteredDistance
-------------- ----------------- ---------------- -----------------------
45 30 600 3600
Can we write a query that will query each table just once? If no what will be the most optimal way to query the data?
You can try this.
SELECT
FilteredWeigth = SUM(CASE WHEN RoleID = 1 AND RN_P = 1 THEN Weigth END) ,
FilteredOldWeigth = SUM(CASE WHEN StatusID = 2 AND RN_P = 1 THEN OldWeigth END),
FilteredDistance = SUM(CASE WHEN LocationID = 3 AND RN_L = 1 THEN Distance END),
AnotherFilteredDistance = SUM(CASE WHEN RoleID = 2 THEN Distance END)
FROM (
SELECT P.Weigth, P.StatusID, P.OldWeigth, PL.LocationID, PL.Distance, PR.RoleID,
RN_P = ROW_NUMBER() OVER (PARTITION BY P.PartyID ORDER BY PL.PartyLocationID),
RN_L = ROW_NUMBER() OVER (PARTITION BY PL.LocationID ORDER BY PR.PartyRoleID)
FROM Party P
INNER JOIN PartyLocation PL ON P.PartyID = PL.PartyID
INNER JOIN PartyRole PR ON P.PartyID = PR.PartyID
) AS T
the below gives
45 20 300 3600
the third column gives 300 which does not correspond to your expected result.
with q1
as
(
select sum(weigth) FilteredWeigth
from party join partyrole on party.partyid = partyrole.partyid
where partyrole.RoleID = '1'
),
q2 as
(
select sum(weigth) OldWeigth from party where StatusID = '2'
),
q3 as (
select sum(Distance) FilteredDistance
from party join PartyLocation on party.partyid = PartyLocation.partyid
where PartyLocation.locationID = '3'
),
q4 as
(
select sum(Distance) AnotherFilteredDistance
from party join partyrole on party.partyid = partyrole.partyid
join PartyLocation on party.partyid = PartyLocation.partyid
where partyrole.RoleID = '2'
)
select FilteredWeigth,OldWeigth,FilteredDistance,AnotherFilteredDistance
from q1,q2,q3,q4
When Using Individual Queries, you can achieve this using the following
Return sum of Weight of all parties that has roleID = 1 in PartyRole table
SELECT
SUM(Weight) FilteredWeigth
FROM dbo.Party P
WHERE EXISTS
(
SELECT
1
FROM dbo.PartyRole PR
WHERE PR. PartyID = P.PartyID
AND PR.RoleId = 1
)
Return sum of OldWeigth of all parties that has statusID = 2
SELECT
SUM(OldWeigth) FilteredOldWeigth
FROM dbo.Party P
WHERE EXISTS
(
SELECT
1
FROM dbo.PartyRole PR
WHERE PR. PartyID = P.PartyID
AND PR.RoleId = 2
)
Return sum of distances of all parties that has locationID = 3
SELECT
SUM(Distance) FilteredDistance
FROM dbo.PartyLocation
WHERE LocationID = 3
Return sum of distances of all parties that has roleID = 2
SELECT SUM(Distance) FROM PartyLocation PL
WHERE EXISTS
(
SELECT 1 FROM PartyRole PR
WHERE PR.PartyID = PL.PartyID
AND PR.Roleid = 2
)
If you want to get the result of all these in a single result set. then maybe you can try a pivot query. Like this
WITH CTE
AS
(
SELECT
'FilteredWeigth' ColNm,
SUM(Weigth) Val
FROM dbo.Party P
WHERE EXISTS
(
SELECT
1
FROM dbo.PartyRole PR
WHERE PR. PartyID = P.PartyID
AND PR.RoleId = 1
)
UNION
SELECT
'FilteredOldWeigth' ColNm,
SUM(OldWeigth) Val
FROM dbo.Party P
WHERE EXISTS
(
SELECT
1
FROM dbo.PartyRole PR
WHERE PR. PartyID = P.PartyID
AND PR.RoleId = 2
)
UNION
SELECT
'FilteredDistance' ColNm,
SUM(Distance) Val
FROM dbo.PartyLocation
WHERE LocationID = 3
UNION
SELECT
'AnotherFilteredDistance' ColNm,
SUM(Distance) Val FROM PartyLocation PL
WHERE EXISTS
(
SELECT 1 FROM PartyRole PR
WHERE PR.PartyID = PL.PartyID
AND PR.Roleid = 2
)
)
SELECT
*
FROM CTE
PIVOT
(
SUM(Val)
FOR ColNm IN
(
[FilteredWeigth],[FilteredOldWeigth],[FilteredDistance],[AnotherFilteredDistance]
)
)Pvt
The Result Will be
I could think of only three possible options:
Union query with four different select statements as answered by #ab-bennett
Join all tables then use select statements as answered by sarslan
Mix of 1 and 2, based on experiments
Coming to the question you asked:
Can we write a query that will query each table just once?
Assuming best performance is the goal, following could happen in each of the above cases:
All select statements would have their own where clause. This would perform best when where produces few rows compared to the count(*). Note that Joins are terrible for very large tables.
A join is made once, and the desired output is obtained from the same Joined table. This would perform optimal when where produces significant number of rows and the table is not too big to join.
You can mix JOIN / IN / EXISTS / WHERE to optimize your queries based on number of rows you are having in table. This approach could be used when your dataset cardinality might not vary a lot.

Find overlapping sets of data in a table

I need to identify duplicate sets of data and give those sets who's data is similar a group id.
id threshold cost
-- ---------- ----------
1 0 9
1 100 7
1 500 6
2 0 9
2 100 7
2 500 6
I have thousands of these sets, most are the same with different id's. I need find all the like sets that have the same thresholds and cost amounts and give them a group id. I'm just not sure where to begin. Is the best way to iterate and insert each set into a table and then each iterate through each set in the table to find what already exists?
This is one of those cases where you can try to do something with relational operators. Or, you can just say: "let's put all the information in a string and use that as the group id". SQL Server seems to discourage this approach, but it is possible. So, let's characterize the groups using:
select d.id,
(select cast(threshold as varchar(8000)) + '-' + cast(cost as varchar(8000)) + ';'
from data d2
where d2.id = d.id
for xml path ('')
order by threshold
) as groupname
from data d
group by d.id;
Oh, I think that solves your problem. The groupname can serve as the group id. If you want a numeric id (which is probably a good idea, use dense_rank():
select d.id, dense_rank() over (order by groupname) as groupid
from (select d.id,
(select cast(threshold as varchar(8000)) + '-' + cast(cost as varchar(8000)) + ';'
from data d2
where d2.id = d.id
for xml path ('')
order by threshold
) as groupname
from data d
group by d.id
) d;
Here's the solution to my interpretation of the question:
IF OBJECT_ID('tempdb..#tempGrouping') IS NOT NULL DROP Table #tempGrouping;
;
WITH BaseTable AS
(
SELECT 1 id, 0 as threshold, 9 as cost
UNION SELECT 1, 100, 7
UNION SELECT 1, 500, 6
UNION SELECT 2, 0, 9
UNION SELECT 2, 100, 7
UNION SELECT 2, 500, 6
UNION SELECT 3, 1, 9
UNION SELECT 3, 100, 7
UNION SELECT 3, 500, 6
)
, BaseCTE AS
(
SELECT
id
--,dense_rank() over (order by threshold, cost ) as GroupId
,
(
SELECT CAST(TblGrouping.threshold AS varchar(8000)) + '/' + CAST(TblGrouping.cost AS varchar(8000)) + ';'
FROM BaseTable AS TblGrouping
WHERE TblGrouping.id = BaseTable.id
ORDER BY TblGrouping.threshold, TblGrouping.cost
FOR XML PATH ('')
) AS MultiGroup
FROM BaseTable
GROUP BY id
)
,
CTE AS
(
SELECT
*
,DENSE_RANK() OVER (ORDER BY MultiGroup) AS GroupId
FROM BaseCTE
)
SELECT *
INTO #tempGrouping
FROM CTE
-- SELECT * FROM #tempGrouping;
UPDATE BaseTable
SET BaseTable.GroupId = #tempGrouping.GroupId
FROM BaseTable
INNER JOIN #tempGrouping
ON BaseTable.Id = #tempGrouping.Id
IF OBJECT_ID('tempdb..#tempGrouping') IS NOT NULL DROP Table #tempGrouping;
Where BaseTable is your table, and and you don't need the CTE "BaseTable", because you have a data table.
You may need to take extra-precautions if your threshold and cost fields can be NULL.