Remove duplicates values when all values are the same - sql

I am using SQL workbench/J connecting to amazon redshift.
I have the following data in a table (there are more columns that need to be kept but are all the exact same values for each unique claim_id regardless of line number):
Member ID | Claim_ID | Line_Number |
1 100 1
1 100 2
1 100 1
1 100 2
2 101 13
2 101 13
2 101 13
2 101 13
3 102 12
3 102 12
1 103 2
1 103 2
I want it to become the following which will remove any duplicates based on claim_id (it does not matter which line number is kept):
Member ID | Claim_ID | Line_Number |
1 100 1
2 101 13
3 102 12
1 103 2
I have tried the following:
select er_main.member_id, er_main.claim_id, er_main.line_number,
temp.claim_id, temp.line_number
from OK_ER_30 er_main
inner join (
select row_number() over (partition by claim_id order by line_number desc) as seqnum
from
OK_ER_30 temp) temp
ON er_main.claim_id = temp.claim_id and seqnum = 1
Order by er_main.claim_id, temp.line_number
and this:
select * from ok_er_30
where claim_id in
(select distinct claim_id
from ok_er_30
group by claim_id
)
order by claim_id desc
I have checked many other ways of pulling only one row per distinct claim_id but nothing has worked.

try this
select Distant(Member_ID,Claim_ID,max(Line_Number)) group by Member_ID,Claim_ID

Check out the following code.
declare #OK_ER_30 table(Member_ID int, Claim_ID int, Line_Number int);
insert #OK_ER_30 values
(1, 100, 1),
(1, 100, 2),
(1, 100, 1),
(1, 100, 2),
(2, 101, 13),
(2, 101, 13),
(2, 101, 13),
(2, 101, 13),
(3, 102, 12),
(3, 102, 12),
(1, 103, 2),
(1, 103, 2);
with
t as(
select *, row_number() over(
partition by Member_ID, Claim_ID order by (select 0)
) rn
from #OK_ER_30
)
delete from t where rn > 1;
select * from #OK_ER_30;

Try this,
select Member_ID,Claim_ID,max(Line_Number) group by Member_ID,Claim_ID

Related

Distribute values to several rows in SQL Server

I need help with SQL Server on how to distribute a row value to several rows with the same id. To illustrate,
Id = ProductInventoryCode
Qty = QuantityInStock
ForDistribution:
Id | Qty | TotalNoOfBranchesWithId
---+--------+-------------------------
1 | 40 | 2
2 | 33 | 3
3 | 21 | 2
A table that will receive the distributed values
Id | BranchCode | Qty | QtyFromForDistributionTable
-------------------------------------------------------
1 101 13 20
1 102 8 20
2 101 10 11
2 102 2 10
2 103 3 12
3 101 1 11
3 102 12 10
As much as possible the distribution should be near equal for each id and branches.
I got something like below, but somewhat got confused and lost path.
with rs as
(
select
r.*, cume.cumequantity,
coalesce(s.shipped, 0) AS shipped
from
tmpForDistribution r
cross apply
(SELECT SUM([QuantityInStock]) AS cumequantity
FROM tmpForDistribution r2
WHERE r2.ProductInventoryCode = r.ProductInventoryCode) cume
left join
(SELECT ProductInventoryCode, COUNT(ProductInventoryCode) AS shipped
FROM tmpDistributed s
GROUP BY s.ProductInventoryCode) s ON r.ProductInventoryCode = s.ProductInventoryCode
)
select
rs.ProductInventoryCode, rs.cumequantity, rs.QuantityInStock,
***"how to distribute"***
from rs
I'm currently using SQL Server 2008
Here's a sample screen output
The upper result is 145 Branches, below we use to distribute the ForDistributionQty field which is 3130, I am ending up with a fraction (DistVal = 21.586) which is not correct for this problem, it should be a whole number such as 21, however, if its just 21, then 21 x 145 is just 3045 which is shy of 85 units.
Here we distribute the values, and then make a final "adjustment" to the record which has the largest quantity (arbitrary). But at the end of the day, the math works and the distributed values are square.
Note: Not sure why in your sample why ID 2 did not get an even distribution
Declare #Table table (Id int,BranchCode int,Qty int)
Insert Into #Table values
(1, 101, 13),
(1, 102, 8),
(2, 101, 10),
(2, 102, 2),
(2, 103, 3),
(3, 101, 1),
(3, 102, 12)
Declare #Dist table (ID int,Qty int)
Insert Into #Dist values
(1,40),
(2,33),
(3,49)
;with cte0 as (
Select A.*
,ToDist = cast(D.Qty as int)
,DistVal = cast(D.Qty as int)/C.Cnt
,RN = Row_Number() over (Partition By A.ID Order By cast(D.Qty as int)/C.Cnt Desc,A.Qty Desc)
From #Table A
Join (Select ID,Cnt=count(*) from #Table Group By ID) C on A.ID=C.ID
Join #Dist D on A.ID=D.ID )
, cte1 as (
Select ID,AdjVal=Sum(DistVal)-max(ToDist) From cte0 Group By ID
)
Select A.ID
,A.BranchCode
,A.Qty
,DistVal = DistVal - case when A.RN<=abs(AdjVal) then 1*sign(AdjVal) else 0 end
From cte0 A
Join cte1 B on (A.ID=B.Id)
Order By 1,2
Returns
ID BranchCode Qty DistVal
1 101 13 20
1 102 8 20
2 101 10 11
2 102 2 11
2 103 3 11
3 101 1 24
3 102 12 25
If you can tolerate decimal values, a subquery seems to give a better query plan (tested on SQL 2014, with some sensible keys in place, this avoids a table spool and some additional index scans):
Declare #Table table (Id int,BranchCode int,Qty int, primary key(id, branchcode))
Insert Into #Table values
(1, 101, 13),
(1, 102, 8),
(2, 101, 10),
(2, 102, 2),
(2, 103, 3),
(3, 101, 1),
(3, 102, 12)
Declare #Dist table (ID int primary key,Qty int)
Insert Into #Dist values
(1,40),
(2,33),
(3,21)
SELECT
t.id
,t.BranchCode
,t.Qty
,(d.Qty / CAST((SELECT COUNT(*) as cnt FROM #table t2 where t.id = t2.id) AS decimal(10,2))) as DistributedQty
FROM #Table t
INNER JOIN #Dist d
ON d.id = t.Id
outputs:
Id BranchCode Qty DistributedQty
1 101 13 20.00000000000
1 102 82 20.00000000000
2 101 10 11.00000000000
2 102 21 11.00000000000
2 103 31 11.00000000000
3 101 11 10.50000000000
3 102 12 10.50000000000
If you need DistributedQty to be an int and retain remainders then I can't think of a better solution than #John Cappelletti's, noting that uneven quantities may not be as exactly even as you might hope (e.g. 32 distributed by three would result in a 12/10/10 distribution instead of an 11/11/10 distribution).

SQL Server 2008: find number of contiguous rows with equal values

I have a table with multiple Ids. Each Id has values arranged by a sequential index.
create table myValues
(
id int,
ind int,
val int
)
insert into myValues
values
(21, 5, 300),
(21, 4, 310),
(21, 3, 300),
(21, 2, 300),
(21, 1, 345),
(21, 0, 300),
(22, 5, 300),
(22, 4, 300),
(22, 3, 300),
(22, 2, 300),
(22, 1, 395),
(22, 0, 300)
I am trying to find the number of consecutive values that are the same.
The value field represents some data that should be change on each entry (but need not be unique overall).
The problem is to find out when there are more than two consecutive rows with the same value (given the same id).
Thus I'm looking for an output like this:
id ind val count
21 5 300 1
21 4 310 1
21 3 300 2
21 2 300 2
21 1 345 1
21 0 300 1
22 5 300 4
22 4 300 4
22 3 300 4
22 2 300 4
22 1 395 1
22 0 300 1
I'm aware this is similar to the island and gaps problem discussed here.
However, those solutions all hinge on the ability to use a partition statement with values that are supposed to be consecutively increasing.
A solution that generates the ranges of "islands" as an intermediary would work as well, e.g.
id startind endind
21 3 2
22 5 2
Note that there can be many islands for each id.
I'm sure there is a simple adaptation of the island solution, but for the life of me I can't think of it.
find the continuous group and then do a count() partition by that
select id, ind, val, count(*) over (partition by id, val, grp)
from
(
select *, grp = dense_rank() over (partition by id, val order by ind) - ind
from myValues
) d
order by id, ind desc
The other solution is obviously more elegant. I'll have to study it a little closer myself.
with agg(id, min_ind, max_ind, cnt) as (
select id, min(ind), max(ind), count(*)
from
(
select id, ind, val, sum(brk) over (partition by id order by ind desc) as grp
from
(
select
id, ind, val,
coalesce(sign(lag(ind) over (partition by id, val order by ind desc) - ind - 1), 1) as brk
from myValues
) as d
) as d
group by id, grp
)
select v.id, v.ind, v.val, a.cnt
from myValues v inner join agg a on a.id = v.id and v.ind between min_ind and max_ind
order by v.id, v.ind desc;

SELECT TOP 20 Percent SQL

I have a query which can select TOP 20 percent of TOP highest with GrandTotal. But there is something is not fair. For example, in between the Top 20 out of 10 People is 2. So the out put is show this:
EmpName GrandTotal
Kelvin 50
Gem 40
But the grand total of the 3rd and 4th people also having 40 as Grand Total. I need some idea and advice, how i going to do solve this problem?
SELECT TOP 20 PERCENT
EmpName,
SUM(Scoring) AS GrandTotal
FROM
[masterView]
GROUP BY
EmpName
ORDER BY
GrandTotal DESC, EmpName ASC
On SQL server you can use WITH TIES in order to include ties
SELECT TOP 20 PERCENT WITH TIES Id, sum(Score) as GrandTotal
FROM myTable GROUP BY Id
ORDER BY GrandTotal DESC
SQL Fiddle Demo
Test Data
CREATE TABLE Table1
([ID] int, [Score] int)
;
INSERT INTO Table1
([ID], [Score])
VALUES
(1, 10), (2, 20),
(3, 30), (4, 20),
(5, 10), (6, 40),
(7, 40), (8, 50),
(9, 10), (10, 5);
Query
with ranked as (
select
id,
rank() over (order by Score desc) as rnk
from Table1
),
total as (
select count(*) as total
from Table1
)
SELECT *
FROM ranked
CROSS JOIN total
WHERE ranked.rnk <= 0.2 * total.total
OUTPUT
| id | rnk | total |
|----|-----|-------|
| 8 | 1 | 10 |
| 6 | 2 | 10 |
| 7 | 2 | 10 |

Finding Missing Numbers When Data Is Grouped In SQL Server

I need to to write a query that will calculate the missing numbers in a sequence when the data is "grouped". The data in each group is in sequence, but each individual group would have its own sequence. The data would look something like this:
Id| Number|
-----------
1 | 250 |
1 | 270 | <260 Missing
1 | 280 | <290 Missing
1 | 300 |
1 | 310 |
2 | 110 |
2 | 130 | <120 Missing
2 | 140 |
3 | 260 |
3 | 270 |
3 | 290 | <280 Missing
3 | 300 |
3 | 340 | <310, 320 & 330 Missing
I have found a solution based on this post from CELKO here:
http://bytes.com/topic/sql-server/answers/511668-query-find-missing-number
In essence to set up a demo run the following:
CREATE TABLE Sequence
(seq INT NOT NULL
PRIMARY KEY (seq));
INSERT INTO Sequence VALUES (1);
INSERT INTO Sequence VALUES (2);
INSERT INTO Sequence VALUES (3);
INSERT INTO Sequence VALUES (4);
INSERT INTO Sequence VALUES (5);
INSERT INTO Sequence VALUES (6);
INSERT INTO Sequence VALUES (7);
INSERT INTO Sequence VALUES (8);
INSERT INTO Sequence VALUES (9);
INSERT INTO Sequence VALUES (10);
CREATE TABLE Tickets
(buyer CHAR(5) NOT NULL,
ticket_nbr INTEGER DEFAULT 1 NOT NULL
PRIMARY KEY (buyer, ticket_nbr));
INSERT INTO Tickets VALUES ('a', 2);
INSERT INTO Tickets VALUES ('a', 3);
INSERT INTO Tickets VALUES ('a', 4);
INSERT INTO Tickets VALUES ('b', 4);
INSERT INTO Tickets VALUES ('c', 1);
INSERT INTO Tickets VALUES ('c', 2);
INSERT INTO Tickets VALUES ('c', 3);
INSERT INTO Tickets VALUES ('c', 4);
INSERT INTO Tickets VALUES ('c', 5);
INSERT INTO Tickets VALUES ('d', 1);
INSERT INTO Tickets VALUES ('d', 6);
INSERT INTO Tickets VALUES ('d', 7);
INSERT INTO Tickets VALUES ('d', 9);
INSERT INTO Tickets VALUES ('e', 10);
SELECT DISTINCT T1.buyer, S1.seq
FROM Tickets AS T1, Sequence AS S1
WHERE seq <= (SELECT MAX(ticket_nbr) -- set the range
FROM Tickets AS T2
WHERE T1.buyer = T2.buyer)
AND seq NOT IN (SELECT ticket_nbr -- get missing numbers
FROM Tickets AS T3
WHERE T1.buyer = T3.buyer);
CELKO does mention that this is for a small number of tickets, in my example my numbers table is limited to 200 rows with a single column which is a primary key with each row an increment of 10 as that is what I am interested in. I modified CELKOs query as follows (added in min range):
SELECT DISTINCT T1.buyer, S1.seq
FROM Tickets AS T1, Sequence AS S1
WHERE seq <= (SELECT MIN(ticket_nbr) -- set the MIN range
FROM Tickets AS T2
WHERE T1.buyer = T2.buyer)
AND seq <= (SELECT MAX(ticket_nbr) -- set the MAX range
FROM Tickets AS T2
WHERE T1.buyer = T2.buyer)
AND seq NOT IN (SELECT ticket_nbr -- get missing numbers
FROM Tickets AS T3
WHERE T1.buyer = T3.buyer)
ORDER BY buyer, seq;
The output would be those numbers that are missing:
buyer seq
a 1
b 1
b 2
b 3
e 1
e 2
e 3
e 4
e 5
e 6
e 7
e 8
e 9
This works exactly as I want, however, on my data set it is very slow (11 second run time at the moment - it appears to be the DISTINCT which slows things down tremendously and presumably will gt worse as the base data set grows). I have tried all manner of things to make it more efficient but sadly my ambition exceeds my knowledge. Is it possible to make the query above more efficient/faster. My only constraint is that the dataset I am making needs to be a SQL View (as it feeds a report) and will execute on SQL Azure.
Cheers
David
If my understanding is correct, you want to fill in the missing data from the table. The table would consist of ID and a Number which is incremented by 10.
CREATE TABLE Test(
ID INT,
Number INT
)
INSERT INTO Test VALUES
(1, 250), (1, 270), (1, 280), (1, 300), (1, 310),
(2, 110), (2, 130), (2, 140), (3, 260), (3, 270),
(3, 290), (3, 300), (3, 340);
You could do this by using a Tally Table and doing a CROSS JOIN on the Test table:
;WITH E1(N) AS(
SELECT 1 UNION ALL SELECT 1 UNION ALL SELECT 1 UNION ALL SELECT 1 UNION ALL SELECT 1 UNION ALL
SELECT 1 UNION ALL SELECT 1 UNION ALL SELECT 1 UNION ALL SELECT 1 UNION ALL SELECT 1
)
,E2(N) AS(SELECT 1 FROM E1 a, E1 b)
,E4(N) AS(SELECT 1 FROM E2 a, E2 b)
,Tally(N) AS(
SELECT TOP (SELECT MAX(Number)/10 FROM Test)
(ROW_NUMBER() OVER(ORDER BY(SELECT NULL)) - 1) * 10
FROM E4
),
MinMax AS(
SELECT
ID,
Minimum = MIN(Number),
Maximum = MAX(Number)
FROM Test
GROUP BY ID
),
CrossJoined AS(
SELECT
m.ID,
Number = Minimum + t.N
FROM MinMax m
CROSS JOIN Tally t
WHERE
Minimum + t.N <= Maximum
)
SELECT * FROM CrossJoined c
ORDER BY c.ID, c.Number
RESULT
ID Seq
----------- --------------------
1 250
1 260
1 270
1 280
1 290
1 300
1 310
2 110
2 120
2 130
2 140
3 260
3 270
3 280
3 290
3 300
3 310
3 320
3 330
3 340
If you only want to find the missing Number from Test grouped by ID, just replace the final SELECT statement:
SELECT * FROM CrossJoined c
ORDER BY c.ID, c.Number
to:
SELECT c.ID, c.Number
FROM CrossJoined c
WHERE NOT EXISTS(
SELECT 1 FROM Test t
WHERE
t.ID = c.ID
AND t.Number = c.Number
)
ORDER BY c.ID, c.Number
RESULT
ID Number
----------- --------------------
1 260
1 290
2 120
3 280
3 310
3 320
3 330

SQL : how to find leaf rows?

i have a self related table myTable like :
ID | RefID
----------
1 | NULL
2 | 1
3 | 2
4 | NULL
5 | 2
6 | 5
7 | 5
8 | NULL
9 | 7
i need to get leaf rows on any depth
based on the table above, the result must be :
ID | RefID
----------
3 | 2
4 | NULL
6 | 5
8 | NULL
9 | 7
thank you
PS: the depth may vary , here is very small example
Try:
SELECT id,
refid
FROM mytable t
WHERE NOT EXISTS (SELECT 1
FROM mytable
WHERE refid = t.id)
DECLARE #t TABLE (id int NOT NULL, RefID int NULL);
INSERT #t VALUES (1, NULL), (2, 1), (3, 2), (5, NULL),
(6, 5), (4, NULL), (7, 5), (8, NULL), (9, 8), (10, 7);
WITH CTE AS
(
-- top level
SELECT id, RefID, id AS RootId, 0 AS CTELevel FROM #t WHERE REfID IS NULL
UNION ALL
SELECT T.id, T.RefID, RootId, CTELevel + 1 FROM #t T JOIN CTE ON T.RefID = CTE.id
), Leafs AS
(
SELECT
id, RefID, DENSE_RANK() OVER (PARTITION BY CTE.RootId ORDER BY CTELevel DESC) AS Rn
FROM CTE
)
SELECT
id, RefID
FROM
Leafs
WHERE
rn = 1
select ID, RefId
from myTable t1 left join myTable t2 on t1.ID = t2.RefID
where t2.RefID is null
try this:
SELECT *
FROM
my_table
WHERE
id NOT IN
(
SELECT DISTINCT
refId
FROM
my_table
WHERE
refId IS NOT NULL
)