Doing a comparison using the previous row? - sql

I'm trying to work out an efficient way of comparing two rows in SQL Server 2008. I need to write a query which finds all rows in the Movement table which have Speed < 10 N consecutive times.
The structure of the table is:
EventTime
Speed
If the data were:
2012-02-05 13:56:36.980, 2
2012-02-05 13:57:36.980, 11
2012-02-05 13:57:46.980, 2
2012-02-05 13:59:36.980, 2
2012-02-05 14:06:36.980, 22
2012-02-05 15:56:36.980, 2
Then it would return rows 3/4 (13:57:46.980 / 13:59:36.980) if I looked for 2 consecutive rows, and would return nothing if I looked for three consecutive rows. The order of the data is EventTime/DateTime only.
Any help you could give me would be great. I'm considering using cursors but they're usually pretty inefficient. Also, this table is approximately 10m rows in size, so the more efficient the better! :)
Thanks!

DECLARE
#n INT,
#speed_limit INT
SELECT
#n = 5,
#speed_limit = 10
;WITH
partitioned AS
(
SELECT
*,
CASE WHEN speed < #speed_limit THEN 1 ELSE 0 END AS PartitionID
FROM
Movement
)
,
sequenced AS
(
SELECT
ROW_NUMBER() OVER ( ORDER BY EventTime) AS MasterSeqID,
ROW_NUMBER() OVER (PARTITION BY PartitionID ORDER BY EventTime) AS PartIDSeqID,
*
FROM
partitioned
)
,
filter AS
(
SELECT
MasterSeqID - PartIDSeqID AS GroupID,
MIN(MasterSeqID) AS GroupFirstMastSeqID,
MAX(MasterSeqID) AS GroupFinalMastSeqID
FROM
sequenced
WHERE
PartitionID = 1
GROUP BY
MasterSeqID - PartIDSeqID
HAVING
COUNT(*) >= #n
)
SELECT
sequenced.*
FROM
filter
INNER JOIN
sequenced
ON sequenced.MasterSeqID >= filter.GroupFirstMastSeqID
AND sequenced.MasterSeqID <= filter.GroupFinalMastSeqID
Alternative final steps (inspired by #t-clausen-dk), to avoid an additional JOIN. I would test both to see which is more performant.
,
filter AS
(
SELECT
MasterSeqID - PartIDSeqID AS GroupID,
COUNT(*) OVER (PARTITION BY MasterSeqID - PartIDSeqID) AS GroupSize,
*
FROM
sequenced
WHERE
PartitionID = 1
)
SELECT
*
FROM
filter
WHERE
GroupSize >= #n

declare #t table(EventTime datetime, Speed int)
insert #t values('2012-02-05 13:56:36.980', 2)
insert #t values('2012-02-05 13:57:36.980', 11)
insert #t values('2012-02-05 13:57:46.980', 2)
insert #t values('2012-02-05 13:59:36.980', 2)
insert #t values('2012-02-05 14:06:36.980', 22)
insert #t values('2012-02-05 15:56:36.980', 2)
declare #N int = 1
;with a as
(
select EventTime, Speed, row_number() over (order by EventTime) rn from #t
), b as
(
select EventTime, Speed, 1 grp, rn from a where rn = 1
union all
select a.EventTime, a.Speed, case when a.speed < 10 and b.speed < 10 then grp else grp + 1 end, a.rn
from a join b on a.rn = b.rn+1
), c as
(
select EventTime, Speed, count(*) over (partition by grp) cnt from b
)
select * from c
where cnt > #N
OPTION (MAXRECURSION 0) -- Thx Dems

Almost the same ideea as Dems, a little bit different:
select * from (
select eventtime, speed, rnk, new_rnk,
rnk - new_rnk,
max(rnk) over (partition by speed, new_rnk-rnk) -
min(rnk) over (partition by speed, new_rnk-rnk) + 1 as no_consec
from (
select eventtime, rnk, speed,
row_number() over (partition by speed order by eventtime) as new_rnk
from (
select eventtime, speed,
row_number() over (order by eventtime) as rnk
from a
) a
where a.speed < 5
)
order by eventtime
)
where no_consec >= 2;
5 is speed limit and 2 is min number of consecutive events.
I put date as number for simplicity of writing the create database.
SQLFIDDLE
EDIT:
To answer to comments, I've added three columns in the first inner query. To get only the first row you need to add an pos_in_group = 1 to WHERE clause and the distance is at your fingers.
SQLFIDDLE
select eventtime, speed, min_date, max_date, pos_in_group
from (
select eventtime, speed, rnk, new_rnk,
rnk - new_rnk,
row_number() over (partition by speed, new_rnk-rnk order by eventtime) pos_in_group,
min(eventtime) over (partition by speed, new_rnk-rnk) min_date,
max(eventtime) over (partition by speed, new_rnk-rnk) max_date,
max(rnk) over (partition by speed, new_rnk-rnk) -
min(rnk) over (partition by speed, new_rnk-rnk) + 1 as no_consec
from (
select eventtime, rnk, speed,
row_number() over (partition by speed order by eventtime) as new_rnk
from (
select eventtime, speed,
row_number() over (order by eventtime) as rnk
from a
) a
where a.speed < 5
)
order by eventtime
)
where no_consec > 1;

Related

Use of count in where statement sql

I have N transactions with camera_id = 6 and i want to sample every N // 100 transaction.
I have the following query:
SELECT t.id from (
SELECT id, camera_id, start_ts, ROW_NUMBER() OVER (ORDER BY start_ts) AS rownum
FROM transactions
WHERE camera_id = 6
) as t
where t.rownum % (N / 100) = 1
order by t.start_ts
How can i change it so i don't need additional query for determining N?
Untested
Does the following work for you - add a windowed count in addition to your Rownumber and use that:
SELECT t.id from (
SELECT id, camera_id, start_ts,
Row_Number() OVER (ORDER BY start_ts) AS rownum,
Count(*) over() Qty
FROM transactions
WHERE camera_id = 6
) as t
where t.rownum % (Qty / 100) = 1
order by t.start_ts

Lag functions and SUM

I need to get the list of users that have been offline for at least 20 min every day. Here's my data
I have this starting query but am stuck on how to sum the difference in offline_mins i.e. need to add "and sum(offline_mins)>=20" to the where clause
SELECT
userid,
connected,
LAG(recordeddt) OVER(PARTITION BY userid
ORDER BY userid,
recordeddt) AS offline_period,
DATEDIFF(minute, LAG(recordeddt) OVER(PARTITION BY userid
ORDER BY userid,
recordeddt),recordeddt) offline_mins
FROM device_data where connected=0;
My expected results :
Thanks in advance.
This reads like a gaps-and-island problem, where you want to group together adjacent rows having the same userid and status.
As a starter, here is a query that computes the islands:
select userid, connected, min(recordeddt) startdt, max(lead_recordeddt) enddt,
datediff(min(recordeddt), max(lead_recordeddt)) duration
from (
select dd.*,
row_number() over(partition by userid order by recordeddt) rn1,
row_number() over(partition by userid, connected order by recordeddt) rn2,
lead(recordeddt) over(partition by userid order by recordeddt) lead_recordeddt
from device_data dd
) dd
group by userid, connected, rn1 - rn2
Now, say you want users that were offline for at least 20 minutes every day. You can breakdown the islands per day, and use a having clause for filtering:
select userid
from (
select recordedday, userid, connected,
datediff(min(recordeddt), max(lead_recordeddt)) duration
from (
select dd.*, v.*,
row_number() over(partition by v.recordedday, userid order by recordeddt) rn1,
row_number() over(partition by v.recordedday, userid, connected order by recordeddt) rn2,
lead(recordeddt) over(partition by v.recordedday, userid order by recordeddt) lead_recordeddt
from device_data dd
cross apply (values (convert(date, recordeddt))) v(recordedday)
) dd
group by convert(date, recordeddt), userid, connected, rn1 - rn2
) dd
group by userid
having count(distinct case when connected = 0 and duration >= 20 then recordedday end) = count(distinct recordedday)
As noted this is a gaps and island problem. This is my take on it using a simple lag function to create groups, filter out the connected rows and then work on the date ranges.
CREATE TABLE #tmp(ID int, UserID int, dt datetime, connected int)
INSERT INTO #tmp VALUES
(1,1,'11/2/20 10:00:00',1),
(2,1,'11/2/20 10:05:00',0),
(3,1,'11/2/20 10:10:00',0),
(4,1,'11/2/20 10:15:00',0),
(5,1,'11/2/20 10:20:00',0),
(6,2,'11/2/20 10:00:00',1),
(7,2,'11/2/20 10:05:00',1),
(8,2,'11/2/20 10:10:00',0),
(9,2,'11/2/20 10:15:00',0),
(10,2,'11/2/20 10:20:00',0),
(11,2,'11/2/20 10:25:00',0),
(12,2,'11/2/20 10:30:00',0)
SELECT UserID, connected,DATEDIFF(minute,MIN(DT), MAX(DT)) OFFLINE_MINUTES
FROM
(
SELECT *, SUM(CASE WHEN connected <> LG THEN 1 ELSE 0 END) OVER (ORDER BY UserID,dt) grp
FROM
(
select *, LAG(connected,1,connected) OVER(PARTITION BY UserID ORDER BY UserID,dt) LG
from #tmp
) x
) y
WHERE connected <> 1
GROUP BY UserID,grp,connected
HAVING DATEDIFF(minute,MIN(DT), MAX(DT)) >= 20

How to get the validity date range of a price from individual daily prices in SQL

I have some prices for the month of January.
Date,Price
1,100
2,100
3,115
4,120
5,120
6,100
7,100
8,120
9,120
10,120
Now, the o/p I need is a non-overlapping date range for each price.
price,from,To
100,1,2
115,3,3
120,4,5
100,6,7
120,8,10
I need to do this using SQL only.
For now, if I simply group by and take min and max dates, I get the below, which is an overlapping range:
price,from,to
100,1,7
115,3,3
120,4,10
This is a gaps-and-islands problem. The simplest solution is the difference of row numbers:
select price, min(date), max(date)
from (select t.*,
row_number() over (order by date) as seqnum,
row_number() over (partition by price, order by date) as seqnum2
from t
) t
group by price, (seqnum - seqnum2)
order by min(date);
Why this works is a little hard to explain. But if you look at the results of the subquery, you will see how the adjacent rows are identified by the difference in the two values.
SELECT Lag.price,Lag.[date] AS [From], MIN(Lead.[date]-Lag.[date])+Lag.[date] AS [to]
FROM
(
SELECT [date],[Price]
FROM
(
SELECT [date],[Price],LAG(Price) OVER (ORDER BY DATE,Price) AS LagID FROM #table1 A
)B
WHERE CASE WHEN Price <> ISNULL(LagID,1) THEN 1 ELSE 0 END = 1
)Lag
JOIN
(
SELECT [date],[Price]
FROM
(
SELECT [date],Price,LEAD(Price) OVER (ORDER BY DATE,Price) AS LeadID FROM [#table1] A
)B
WHERE CASE WHEN Price <> ISNULL(LeadID,1) THEN 1 ELSE 0 END = 1
)Lead
ON Lag.[Price] = Lead.[Price]
WHERE Lead.[date]-Lag.[date] >= 0
GROUP BY Lag.[date],Lag.[price]
ORDER BY Lag.[date]
Another method using ROWS UNBOUNDED PRECEDING
SELECT price, MIN([date]) AS [from], [end_date] AS [To]
FROM
(
SELECT *, MIN([abc]) OVER (ORDER BY DATE DESC ROWS UNBOUNDED PRECEDING ) end_date
FROM
(
SELECT *, CASE WHEN price = next_price THEN NULL ELSE DATE END AS abc
FROM
(
SELECT a.* , b.[date] AS next_date, b.price AS next_price
FROM #table1 a
LEFT JOIN #table1 b
ON a.[date] = b.[date]-1
)AA
)BB
)CC
GROUP BY price, end_date

Calculate percent changes in contiguous ranges in Postgresql

I need to calculate price percent change in contiguous ranges. For example if price start moving up or down and I have sequence of decreasing or increasing values I need to grab first and last value of that sequence and calculate the change.
I'm using window lag function to calculate direction, my problem- I can't generate unique RANK for the sequences to calculate percent changes.
I tired combination of RANK, ROW_NUMBER, etc. with no luck.
Here's my query
WITH partitioned AS (
SELECT
*,
lag(price, 1) over(ORDER BY time) AS lag_price
FROM prices
),
sequenced AS (
SELECT
*,
CASE
WHEN price > lag_price THEN 'up'
WHEN price < lag_price THEN 'down'
ELSE 'equal'
END
AS direction
FROM partitioned
),
ranked AS (
SELECT
*,
-- Here's is the problem
-- I need to calculate unique rnk value for specific sequence
DENSE_RANK() OVER ( PARTITION BY direction ORDER BY time) + ROW_NUMBER() OVER ( ORDER BY time DESC) AS rnk
-- DENSE_RANK() OVER ( PARTITION BY seq ORDER BY time),
-- ROW_NUMBER() OVER ( ORDER BY seq, time DESC),
-- ROW_NUMBER() OVER ( ORDER BY seq),
-- RANK() OVER ( ORDER BY seq)
FROM sequenced
),
changed AS (
SELECT *,
FIRST_VALUE(price) OVER(PARTITION BY rnk ) first_price,
LAST_VALUE(price) OVER(PARTITION BY rnk ) last_price,
(LAST_VALUE(price) OVER(PARTITION BY rnk ) / FIRST_VALUE(price) OVER(PARTITION BY rnk ) - 1) * 100 AS percent_change
FROM ranked
)
SELECT
*
FROM changed
ORDER BY time DESC;
and SQLFiddle with sample data
If anyone interested here's solution, form another forum:
with ct1 as /* detecting direction: up, down, equal */
(
select
price, time,
case
when lag(price) over (order by time) < price then 'down'
when lag(price) over (order by time) > price then 'up'
else 'equal'
end as dir
from
prices
)
, ct2 as /* setting reset points */
(
select
price, time, dir,
case
when coalesce(lag(dir) over (order by time), 'none') <> dir
then 1 else 0
end as rst
from
ct1
)
, ct3 as /* making groups */
(
select
price, time, dir,
sum(rst) over (order by time) as grp
from
ct2
)
select /* calculates min, max price per group */
price, time, dir,
min(price) over (partition by grp) as min_price,
max(price) over (partition by grp) as max_price
from
ct3
order by
time desc;

Simultaneous calls

I'm trying to calculate the number of simultaneous calls at the time a particular call is made by looking at the datetime ranges. My query works, but takes ~10 minutes to perform for only 95,000 records, which is too long. Any ideas for optimization?
SELECT r.*,
rr.ChannelsActive 'ChannelsActive'
FROM #rg r
OUTER APPLY
(
SELECT SUM(1) AS ChannelsActive
FROM #rg r_inner
WHERE
(
r_inner.CallStart BETWEEN r.CallStart AND r.CallEnd
OR r_inner.CallEnd BETWEEN r.CallStart AND r.CallEnd
OR r.CallStart BETWEEN r_inner.CallStart AND r_inner.CallEnd
OR r.CallEnd BETWEEN r_inner.CallStart AND r_inner.CallEnd
)
) rr
Example Data
CREATE TABLE #rg
(
CallStart DATETIME,
CallEnd DATETIME
)
CREATE INDEX ix1
ON #rg(CallStart, CallEnd)
CREATE INDEX ix2
ON #rg(CallEnd, CallStart);
WITH T(N, R)
AS (SELECT TOP (95000) ROW_NUMBER() OVER (ORDER BY (SELECT 0)) AS RN,
ABS(120 + 30 * SQRT(-2 * LOG(ABS(CAST(CAST(CRYPT_GEN_RANDOM(8) AS BIGINT) AS FLOAT) / 9223372036854775807))) * COS(2 * PI() * ABS(CAST(CAST(CRYPT_GEN_RANDOM(8) AS BIGINT) AS FLOAT) / 9223372036854775807)))
FROM sys.all_objects o1,
sys.all_objects o2)
INSERT INTO #rg
SELECT DATEADD(SECOND, N, GETDATE()),
DATEADD(SECOND, N + R, GETDATE())
FROM T
This should do it:
;WITH cteCallEvents As
(
SELECT *, CallStart As EventTime, 1 As EventType FROM #rg r
UNION ALL
SELECT *, CallEnd As EventTime, 0 As EventType FROM #rg r
)
, cteCallCounts As
(
SELECT *,
ROW_NUMBER() OVER(Order By EventTime) as EventCount,
ROW_NUMBER() OVER(Partition By EventType Order By EventTime) as TypeCount
FROM cteCallEvents
)
SELECT *,
2*TypeCount - EventCount As OpenCalls
FROM cteCallCounts
WHERE EventType = 1
It should take a couple of seconds at most. Should work on any SQL Server 2005+.
Use SQL like this to get a list of start/end events...
Select CallStart, 1 As CallCount From #rg
Union All
Select CallEnd, -1 From #rg
Order By CallStart
...then treat this as a simple running totals problem, which is solved differently depending upon your SQL Server version or can be easily addressed in code if that's an option.