SQL - split values into buckets using reference table - sql
I've tried to describe the challenge below; but perhaps the best way to understand might be to run the sample SQL and work backwards from the sample output table (#IncomingSplitBucket)
Im certain there is an eloquent way to code this – but its beyond any of my best efforts.
The challenge is titled:
Splitting Numbers into Buckets
Each Customer has Payment Buckets (#bucket). I’m looking to Assign payments into Buckets as the payments are made see table (#incoming).
Payments can span buckets and can be for +/- amounts.
Using the (#incoming) and (#buckets) information – the AIM is to assign the payments into buckets. Payments should be split when they span a bucket amount.
The table #IncomingSplitBucket – provide the desired OUTPUT. One way to understand the requirements is to perhaps look at this and work backwards.
I have tried and failed many approaches to this problem.
/*
Please run code and review
*/
--===========================================
--t1 - PAYMENT SCHEDULE SPLIT INTO BUCKET
--===========================================
DECLARE #bucket TABLE (
CustID INT,
BucketSeqID char(1),
Amount money
)
INSERT INTO #bucket
SELECT 1,'a', '1000' union
SELECT 1,'b', '1000' union
SELECT 1,'c', '2000' union
SELECT 1,'d', '2000' union
SELECT 2,'a', '5000'union
SELECT 2,'b', '5000'union
SELECT 2,'c', '1000'union
SELECT 2,'d', '1000'union
SELECT 3,'a', '5000' union
SELECT 3,'b', '5000'
--===========================================
--t2 - PAYMENTS COMING IN
--===========================================
DECLARE #incoming TABLE (
CustID INT,
IncomingSeqID INT,
Amount money
)
INSERT INTO #incoming
SELECT 1,1, '1000' union
SELECT 1,2, '2000' union
SELECT 1,3, '3000' union
SELECT 2,1, '5000' union
SELECT 2,2, '3000' union
SELECT 2,3, '2000' union
SELECT 2,4, '2000' union
SELECT 3,1, '3000' union
SELECT 3,2, '3000' union
SELECT 3,3, '3000' union
SELECT 3,4, '1000'
--=================================================================
--t3 - THIS IS WHAT THE OUTPUT DATA SHOULD LOOK LIKE
--================================================================
DECLARE #IncomingSplitBucket TABLE (
CustID INT,
IncomingSeqID INT,
BucketSeqID char(1),
AmountBucket money
)
INSERT INTO #IncomingSplitBucket
SELECT 1,1,'a','1000' union
SELECT 1,2,'b','1000' union
SELECT 1,2,'c','1000' union
SELECT 1,3,'c','1000' union
SELECT 1,3,'d','2000' union
SELECT 2,1,'a','5000' union
SELECT 2,2,'b','3000' union
SELECT 2,3,'b','2000' union
SELECT 2,4,'c','1000' union
SELECT 2,4,'d','1000' union
SELECT 3,1,'a','3000' union
SELECT 3,2,'a','2000' union
SELECT 3,2,'b','1000' union
SELECT 3,3,'b','3000' union
SELECT 3,4,'b','1000'
--=================================================================
--Outputs and Data Checks
--================================================================
--REVIEW DATA
select * from #bucket
select * from #incoming
select * from #IncomingSplitBucket --(sample output)
--DATA Check - The SUM AmountBucket of Grouped BucketSeqID = the #bucket amounts see table
SELECT CustID, BucketSeqID, SUM(AmountBucket) AS BucketCheck
FROM #IncomingSplitBucket
GROUP BY CustID, BucketSeqID
order by 1,2
--DATA Check - The SUM AmountBucket of Grouped IncomingSeqID = the #incoming amounts see table
SELECT CustID, IncomingSeqID, SUM(AmountBucket) AS BucketCheck
FROM #IncomingSplitBucket
GROUP BY CustID, IncomingSeqID
order by 1,2
Updated complexity request: (10/12/2019)
When negative amounts are received that take money out of the
buckets.
When the amount received is greater than buckets – an ‘overflow
bucket’ is used (called ‘x’ in the Expect Output)
Thanks
--===========================================
--t1 - BUCKETS
--===========================================
DECLARE #bucket TABLE (
CustID INT,
BucketSeqID char(1),
Amount money
)
INSERT INTO #bucket
SELECT 1,'a', '1000' union
SELECT 1,'b', '1000' union
SELECT 1,'c', '2000' union
SELECT 1,'d', '2000' union
SELECT 2,'a', '5000'union
SELECT 2,'b', '5000'union
SELECT 2,'c', '1000'union
SELECT 2,'d', '1000'union
SELECT 3,'a', '5000' union
SELECT 3,'b', '5000'
--===========================================
--t2 - PAYMENTS
--===========================================
DECLARE #incoming TABLE (
CustID INT,
IncomingSeqID INT,
Amount money
)
INSERT INTO #incoming
SELECT 1,1, '1000' union
SELECT 1,2, '2000' union
SELECT 1,3, '3000' union
SELECT 2,1, '5000' union
SELECT 2,2, '3000' union
SELECT 2,3, '2000' union
SELECT 2,4, '2000' union
SELECT 2,5, '-3000' union
SELECT 3,1, '3000' union
SELECT 3,2, '3000' union
SELECT 3,3, '3000' union
SELECT 3,4, '500' union
SELECT 3,5, '200' union
SELECT 3,6, '-500' union
SELECT 3,7, '800' union
SELECT 3,8, '-400' union
SELECT 3,9, '500'
--=================================================================
--t3 - EXPECTED OUTPUT
--================================================================
DECLARE #IncomingSplitBucket TABLE (
CustID INT,
IncomingSeqID INT,
BucketSeqID char(1),
AmountBucket money
)
INSERT INTO #IncomingSplitBucket
SELECT 1,1,'a','1000' union
SELECT 1,2,'b','1000' union
SELECT 1,2,'c','1000' union
SELECT 1,3,'c','1000' union
SELECT 1,3,'d','2000' union
SELECT 2,1,'a','5000' union
SELECT 2,2,'b','3000' union
SELECT 2,3,'b','2000' union
SELECT 2,4,'c','1000' union
SELECT 2,4,'d','1000' union
SELECT 2,5,'d','-1000' union
SELECT 2,5,'c','-1000' union
SELECT 2,5,'b','-1000' union
SELECT 3,1,'a','3000' union
SELECT 3,2,'a','2000' union
SELECT 3,2,'b','1000' union
SELECT 3,3,'b','3000' union
SELECT 3,4,'b','200' union
SELECT 3,5,'b','-500' union
SELECT 3,6,'b','800' union
SELECT 3,7,'b','-400' union
SELECT 3,8,'b','400' union
SELECT 3,8,'x','100'
--=================================================================
--Outputs and Data Checks
--================================================================
--REVIEW DATA
select * from #bucket
select * from #incoming
select * from #IncomingSplitBucket --(expected output)
--DATA Check - The SUM AmountBucket of Grouped BucketSeqID = the #bucket amounts see table
SELECT CustID, BucketSeqID, SUM(AmountBucket) AS BucketCheck
FROM #IncomingSplitBucket
GROUP BY CustID, BucketSeqID
order by 1,2
--DATA Check - The SUM AmountBucket of Grouped IncomingSeqID = the #incoming amounts see table
SELECT CustID, IncomingSeqID, SUM(AmountBucket) AS BucketCheck
FROM #IncomingSplitBucket
GROUP BY CustID, IncomingSeqID
order by 1,2
First I will use a common table expression (cte) to change the column names so I dont have similar ones between tables to make our life easy, plus will convert the bucket names a,b,c,d to a seq of 1,2,3,4 for simplicity.
Then I will continue with another recursive cte to take the first bucket and the first incoming payment, if the bucket is not filled for the next record I will use the same unfilled bucket else i will use the next bucket, same thing with the incoming payment if the incoming payment fits in the remaining part of the bucket on next row i will go to the next payment if not I will use the rest of the incoming payment till all incoming payment are finished.
Please see the below CTE
;with bucket as (
select CustID BucketCustID,BucketSeqID,
case BucketSeqID when 'a' then 1 when 'b' then 2 when 'c' then 3 when 'd' then 4 end BucketSeq
,Amount bucketAmount from #bucket
),incoming as (
select CustID IncomingCustID, IncomingSeqID ,Amount [IncomingAmount] from #incoming
),result as (
select BucketCustID,IncomingSeqID,BucketSeqID,BucketSeq
,case when bucketAmount<IncomingAmount then 0 else bucketAmount-IncomingAmount end bucketAmount
,case when bucketAmount>IncomingAmount then 0 else IncomingAmount-bucketAmount end IncomingAmount
,case when bucketAmount>IncomingAmount then IncomingAmount else bucketAmount end InBucket
from bucket b
inner join incoming i on i.IncomingCustID=b.BucketCustID and i.IncomingSeqID=1
where b.BucketSeq=1
union all
select BucketCustID,IncomingSeqID,BucketSeqID,BucketSeq
,case when bucketAmount<IncomingAmount then 0 else bucketAmount-IncomingAmount end bucketAmount
,case when bucketAmount>IncomingAmount then 0 else IncomingAmount-bucketAmount end IncomingAmount
,case when bucketAmount>IncomingAmount then IncomingAmount else bucketAmount end InBucket
from (
select
b.BucketCustID,i.IncomingSeqID,b.BucketSeqID,b.BucketSeq
,case when r.BucketSeq=b.BucketSeq then r.bucketAmount else b.bucketAmount end bucketAmount
,case when r.IncomingSeqID=i.IncomingSeqID then r.IncomingAmount else i.IncomingAmount end IncomingAmount
from result r
inner join bucket b on b.BucketCustID=r.BucketCustID and b.BucketSeq=r.BucketSeq+(case when r.bucketAmount=0 then 1 else 0 end)
inner join incoming i on i.IncomingCustID=r.BucketCustID and i.IncomingSeqID=r.IncomingSeqID+(case when r.IncomingAmount=0 then 1 else 0 end)
) Prev
)
select BucketCustID CustID,IncomingSeqID,BucketSeqID,InBucket AmountBucket
from result r
order by BucketCustID,IncomingSeqID,BucketSeqID
The output matches your desired output as below:-
CustID IncomingSeqID BucketSeqID AmountBucket
1 1 a 1000.00
1 2 b 1000.00
1 2 c 1000.00
1 3 c 1000.00
1 3 d 2000.00
2 1 a 5000.00
2 2 b 3000.00
2 3 b 2000.00
2 4 c 1000.00
2 4 d 1000.00
3 1 a 3000.00
3 2 a 2000.00
3 2 b 1000.00
3 3 b 3000.00
3 4 b 1000.00
Related
SQL Get First Non Null Value By Date From A Table
Im trying a CASE expression, something like this: LEFT OUTER JOIN table_1 AS T1 ON T2.common_id = T1.common_id CASE WHEN T1.column_1 IS NOT NULL THEN T1.column_1 WHEN T1.column_1 IS NULL THEN get first value from T1.column that is not null by date ELSE 0 END Its the logic for that 6th line which I have written out what I want that I can't quite get right. THEN get first value from T1.column that is not null by date I have been looking at the FIRST_VALUE function but can't quite figure it out correctly. But there are maybe other ways to get it to work.
you can to find FIRST_VALUE that is not null with use this query First use order by date for find date is null after get value FIRST_VALUE date with FIRST_VALUE select *,FIRST_VALUE(date) over(partition by T1.common_id order by case when date is not null then 1 else 2 end) from #table_2 T2 LEFT OUTER JOIN #table_1 AS T1 ON T2.common_id = T1.common_id You can to insert the basic data with the following codes create table #table_1 (id int,common_id int,date datetime) create table #table_2 (id int,common_id int,name varchar(100)) insert into #table_1(id,common_id,date) select 1,1,'2015-05-24' union select 2,1, null union select 3,1, '2014-09-01' union select 4,4,null union select 5,4, '2019-08-05' union select 6,4, '2000-09-07' union select 7,7,null union select 8,7, '2019-08-05' union select 9,12,'2019-08-06' union select 10,12, '2019-08-05' union select 11,18,'2019-08-06' union select 12,19, '2019-08-05' insert into #table_2(id,common_id,name) select 1,1,'a' union select 2,1, null union select 3,1, 'b' union select 4,4,'k' union select 5,4, 't' union select 6,4, 'c' union select 7,7,'aaa' union select 8,7, 'sada' union select 9,12,'44dd' union select 9,12, '44'
What's the best way of re-using classification rules for multiple queries within big query standard SQL?
I'm using Big Query to analyse Google Analytics data. I need to classify visits dependent on whether they visit particular URLs that indicate they were in the booking process or purchased etc. There is a long list of URLs that represent each step and hence it would be advantageous to include the classifications within a view and re-use with appropriate joins for whatever query requires the classification. I have the following view that seems to do what I need: SELECT fullVisitorId, visitID, LOWER(h.page.pagePath) AS path, CASE WHEN LOWER(h.page.pagePath) = '/' THEN '/' WHEN LOWER(h.page.pagePath) LIKE '{path-here}%' OR .... .... .... ELSE 'other' END AS path_classification, _TABLE_SUFFIX AS date FROM `{project-id}.{data-id}.ga_sessions_*`, UNNEST(hits) AS h WHERE REGEXP_CONTAINS(_TABLE_SUFFIX, r'[0-9]{8}') AND h.type = 'PAGE' I'm wondering if there's a simpler way of achieving this that doesn't require selecting from a pre-existing table as this doesn't seem necessary to define the classifications. I get the feeling that it's possible to use something more straight forward, but I'm not sure how to do it. Does anyone know how to put these definitions into a view without querying a table within the view?
Let's consider simple example: #standardSQL WITH yourTable AS ( SELECT 1 AS id, '123' AS path UNION ALL SELECT 2, '234' UNION ALL SELECT 3, '345' UNION ALL SELECT 4, '456' ) SELECT id, path, CASE path WHEN '123' THEN 'a' WHEN '234' THEN 'b' WHEN '345' THEN 'c' ELSE 'other' END AS path_classification FROM yourTable ORDER BY id Above can be refactored into below #standardSQL WITH yourTable AS ( SELECT 1 AS id, '123' AS path UNION ALL SELECT 2, '234' UNION ALL SELECT 3, '345' UNION ALL SELECT 4, '456' ) SELECT id, path, IFNULL( ( SELECT rr.crule FROM UNNEST(r.rules) AS rr WHERE rr.cpath = path LIMIT 1), ( SELECT rr.crule FROM UNNEST(r.rules) AS rr WHERE rr.cpath IS NULL LIMIT 1) ) AS path_classification FROM yourTable, (SELECT ARRAY_AGG(STRUCT<cpath STRING, crule STRING>(path, rule)) AS rules FROM `project.dataset.rules`) AS r ORDER BY id which relies on rules view that is defined as below #standardSQL SELECT '123' AS path, 'a' AS rule UNION ALL SELECT '234', 'b' UNION ALL SELECT '345', 'c' UNION ALL SELECT NULL, 'other' As you can see all classification rules are only in rules view! You can play around this approach with below : #standardSQL WITH yourTable AS ( SELECT 1 AS id, '123' AS path UNION ALL SELECT 2, '234' UNION ALL SELECT 3, '345' UNION ALL SELECT 4, '456' ), rules AS ( SELECT '123' AS path, 'a' AS rule UNION ALL SELECT '234', 'b' UNION ALL SELECT '345', 'c' UNION ALL SELECT NULL, 'other' ) SELECT id, path, IFNULL( ( SELECT rr.crule FROM UNNEST(r.rules) AS rr WHERE rr.cpath = path LIMIT 1), ( SELECT rr.crule FROM UNNEST(r.rules) AS rr WHERE rr.cpath IS NULL LIMIT 1) ) AS path_classification FROM yourTable, (SELECT ARRAY_AGG(STRUCT<cpath STRING, crule STRING>(path, rule)) AS rules FROM rules) AS r ORDER BY id this can be further "simplified" by moving ARRAY_AGG inside view as below #standardSQL SELECT ARRAY_AGG(STRUCT<cpath STRING, crule STRING>(path, rule)) AS rules FROM ( SELECT '123' AS path, 'a' AS rule UNION ALL SELECT '234', 'b' UNION ALL SELECT '345', 'c' UNION ALL SELECT NULL, 'other' ) In this case final query is as simple as below #standardSQL SELECT id, path, IFNULL( ( SELECT rr.crule FROM UNNEST(r.rules) AS rr WHERE rr.cpath = path LIMIT 1), ( SELECT rr.crule FROM UNNEST(r.rules) AS rr WHERE rr.cpath IS NULL LIMIT 1) ) AS path_classification FROM yourTable, rules AS r ORDER BY id Depends on your specific rules - above can /should be adjusted/optimized respectively - but I hope this gives you a main direction Q in comment: does your solution enable the use of matching with the LIKE keyword or matching with regex? Original question was - What's the … way of re-using classification rules for multiple queries within big query standard SQL? So above examples in my initial answer just show you how to make this happen (focus on “reuse”) How you will use it (matching with the LIKE keyword or matching with regex) is totally up to you! See example below Take a look at path_classification_exact_match vs path_classification_like_match vs path_classification_regex_match #standardSQL WITH yourTable AS ( SELECT 1 AS id, '123' AS path UNION ALL SELECT 2, '234' UNION ALL SELECT 3, '345' UNION ALL SELECT 4, '456' UNION ALL SELECT 5, '234abc' UNION ALL SELECT 6, '345bcd' UNION ALL SELECT 7, '456cde' ), rules AS ( SELECT ARRAY_AGG(STRUCT<cpath STRING, crule STRING>(path, rule)) AS rules FROM ( SELECT '123' AS path, 'a' AS rule UNION ALL SELECT '234', 'b' UNION ALL SELECT '345', 'c' UNION ALL SELECT NULL, 'other' ) ) SELECT id, path, IFNULL( ( SELECT rr.crule FROM UNNEST(r.rules) AS rr WHERE rr.cpath = path LIMIT 1), ( SELECT rr.crule FROM UNNEST(r.rules) AS rr WHERE rr.cpath IS NULL LIMIT 1) ) AS path_classification_exact_match, IFNULL( ( SELECT rr.crule FROM UNNEST(r.rules) AS rr WHERE path LIKE CONCAT('%',rr.cpath,'%') LIMIT 1), ( SELECT rr.crule FROM UNNEST(r.rules) AS rr WHERE rr.cpath IS NULL LIMIT 1) ) AS path_classification_like_match, IFNULL( ( SELECT rr.crule FROM UNNEST(r.rules) AS rr WHERE REGEXP_CONTAINS(path, rr.cpath) LIMIT 1), ( SELECT rr.crule FROM UNNEST(r.rules) AS rr WHERE rr.cpath IS NULL LIMIT 1) ) AS path_classification_regex_match FROM yourTable, rules AS r ORDER BY id Output is: id path path_classification_exact_match path_classification_like_match path_classification_regex_match 1 123 a a a 2 234 b b b 3 345 c c c 4 456 other other other 5 234abc other b b 6 345bcd other c c 7 456cde other other other Hope this helps :o)
It sounds like you may be interested in WITH clauses, which let you compose queries without having to use subqueries. For example, #standardSQL WITH Sales AS ( SELECT 1 AS sku, 3.14 AS price UNION ALL SELECT 2 AS sku, 1.00 AS price UNION ALL SELECT 3 AS sku, 9.99 AS price UNION ALL SELECT 2 AS sku, 0.90 AS price UNION ALL SELECT 1 AS sku, 3.56 AS price ), ItemTotals AS ( SELECT sku, SUM(price) AS total FROM Sales GROUP BY sku ) SELECT sku, total FROM ItemTotals; If you want to compose expressions, you can use CREATE TEMP FUNCTION statements to provide "macro-like" functionality: #standardSQL CREATE TEMP FUNCTION LooksLikeCheese(s STRING) AS ( LOWER(s) IN ('gouda', 'gruyere', 'havarti') ); SELECT s1, LooksLikeCheese(s1) AS s1_is_cheese, s2, LooksLikeCheese(s2) AS s2_is_cheese FROM ( SELECT 'spam' AS s1, 'ham' AS s2 UNION ALL SELECT 'havarti' AS s1, 'crackers' AS s2 UNION ALL SELECT 'gruyere' AS s1, 'ice cream' AS s2 );
Condition in subquery- select one value if subquery return 2 records else the actual value
I have a subquery inside a big query which returns multiple values sometime and some time only one value. Below is my query and the returned values select tran.customer_type from transaction_record tran where tran.TRANSACTION_ID=txn.id customer_type can be 2 records - "LP" and "NA" or customer_type can be 2 records - "SOEMTHING ELSE" and "NA" or customer_type can be 1 records - "NA" Here my probem is if i have 2 records i have to print value without NA and if i have one record i have to print what ever be the value is
Not exectly efficient (2 queries), but it should work! Inner query counts status, id combinatios per group and outer query removes all NA statuses that have another record on same ID. Innermost query is just for table simulation (I like it more than create table, insert scripts). SELECT * FROM ( SELECT status, id, count(*) OVER (PARTITION BY id ORDER BY 3 ) AS rn from ( SELECT 'NA' status, 1 id FROM dual UNION ALL SELECT 'LP' status, 1 id FROM dual UNION ALL SELECT 'NA' status, 2 id FROM dual UNION ALL SELECT 'SOEMTHING ELSE' status, 2 id FROM dual UNION ALL SELECT 'NA' status, 3 id FROM dual UNION ALL SELECT 'NA' status, 5 id FROM dual UNION ALL SELECT 'LP' status, 5 id FROM dual UNION ALL SELECT 'NA' status, 6 id FROM dual UNION ALL SELECT 'SOEMTHING ELSE' status, 6 id FROM dual UNION ALL SELECT 'NA' status, 22 id FROM dual )) WHERE NOT (status = 'NA' AND rn=2)
Changing columns to rows in oracle
I need to convert the result of the below query into row output. select 'Purchase','Sale','Discount','Out of Stock' from dual Output: Purchase Sale Discount Out of Stock
You have to use UNPIVOT to get it. UNPIVOT is opposite of PIVOT and it converts column values to with tbl(col1,col2,col3,col4) as ( select 'Purchase','Sale','Discount','Out of Stock' from dual ),tbl2 as( SELECT * FROM tbl UNPIVOT (dat for col in (col1,col2,col3, col4))) select dat from tbl2 If you have 1 more row, then it is better to populate the column names also like below. with tbl(col1,col2,col3,col4) as ( select 'Purchase','Sale','Discount','Out of Stock' from dual union select 'foo','bar','data','blah' from dual ) SELECT * FROM tbl UNPIVOT (dat for col in (col1,col2,col3, col4)); But if you just want the values, then select only 'dat' column as in first example.
You could use UNION to have the values as different rows. For example, SQL> WITH DATA(item) AS 2 ( SELECT 'Purchase' FROM dual 3 UNION 4 SELECT 'Sale' FROM dual 5 UNION 6 SELECT 'Discount' FROM dual 7 UNION 8 SELECT 'Out of Stock' FROM dual 9 ) 10 SELECT item FROM DATA; ITEM ------------ Discount Out of Stock Purchase Sale SQL> Remember, union doesn't allow duplicates, so use UNION ALL if you want to allow duplicate rows.
SQL Select Condition Question
I have a quick question about a select statement condition. I have the following table with the following items. What I need to get is the object id that matches both type id's. TypeId ObjectId 1 10 2 10 1 11 So I need to get both object 10 because it matches type id 1 and 2. SELECT ObjectId FROM Table WHERE TypeId = 1 AND TypeId = 2 Obviously this doesn't work because it won't match both conditions for the same row. How do I perform this query? Also note that I may pass in 2 or more type id's to narrow down the results.
Self-join: SELECT t1.ObjectId FROM Table AS t1 INNER JOIN Table AS t2 ON t1.ObjectId = t2.ObjectId AND t1.TypeId = 1 AND t2.TypeId = 2 Note sure how you want the behavior to work when passing in values, but that's a start.
I upvoted the answer from #Cade Roux, and that's how I would do it. But FWIW, here's an alternative solution: SELECT ObjectId FROM Table WHERE TypeId IN (1, 2) GROUP BY ObjectId HAVING COUNT(*) = 2; Assuming uniqueness over TypeId, ObjectId. Re the comment from #Josh that he may need to search for three or more TypeId values: The solution using JOIN requires a join per value you're searching for. The solution above using GROUP BY may be easier if you find yourself searching for an increasing number of values.
This code is written with Oracle in mind. It should be general enough for other flavors of SQL select t1.ObjectId from Table t1 join Table t2 on t2.TypeId = 2 and t1.ObjectId = t2.ObjectId where t1.TypeId = 1; To add additional TypeIds, you just have to add another join: select t1.ObjectId from Table t1 join Table t2 on t2.TypeId = 2 and t1.ObjectId = t2.ObjectId join Table t3 on t3.TypeId = 3 and t1.ObjectId = t3.ObjectId join Table t4 on t4.TypeId = 4 and t1.ObjectId = t4.ObjectId where t1.TypeId = 1; Important note: as you add more joins, performance will suffer a LOT. In regards to Bill's answer you can change it to the following to get rid of the need to assume uniqueness: SELECT ObjectId FROM (SELECT distinct ObjectId, TypeId from Table) WHERE TypeId IN (1, 2) GROUP BY ObjectId HAVING COUNT(*) = 2; His way of doing it scales better as the number of types gets larger.
Try this Sample Input:(Case 1) declare #t table(Typeid int,ObjectId int) insert into #t select 1,10 union all select 2,10 union all select 1,11 select * from #t Sample Input:(Case 2) declare #t table(Typeid int,ObjectId int) insert into #t select 1,10 union all select 2,10 union all select 3,10 union all select 4,10 union all select 5,10 union all select 6,10 union all select 1,11 union all select 2,11 union all select 3,11 union all select 4,11 union all select 5,11 union all select 1,12 union all select 2,12 union all select 3,12 union all select 4,12 union all select 5,12 union all select 6,12 select * from #t Sample Input:(Case 3)[Duplicate entries are there] declare #t table(Typeid int,ObjectId int) insert into #t select 1,10 union all select 2,10 union all select 1,10 union all select 2,10 union all select 3,10 union all select 4,10 union all select 5,10 union all select 6,10 union all select 1,11 union all select 2,11 union all select 3,11 union all select 4,11 union all select 5,11 union all select 1,12 union all select 2,12 union all select 3,12 union all select 4,12 union all select 5,12 union all select 6,12 union all select 3,12 For case 1, the output should be 10 For case 2 & 3, the output should be 10 and 12 Query: select X.ObjectId from ( select T.ObjectId ,count(ObjectId) cnt from(select distinct ObjectId,Typeid from #t)T where T.Typeid in(select Typeid from #t) group by T.ObjectId )X join (select max(Typeid) maxcnt from #t)Y on X.cnt = Y.maxcnt