I understand that we can rewrite percentile_cont within Teradata as:
SELECT
part_col
,data_col
+ ((MIN(data_col) OVER (PARTITION BY part_col ORDER BY data_col ROWS BETWEEN 1 FOLLOWING AND 1 FOLLOWING) - data_col)
* (((COUNT(*) OVER (PARTITION BY part_col) - 1) * x) MOD 1)) AS percentile_cont
FROM tab
QUALIFY ROW_NUMBER() OVER (PARTITION BY part_col ORDER BY data_col)
= CAST((COUNT(*) OVER (PARTITION BY part_col) - 1) * x AS INT) + 1;
See this very helpful discussion for more information.
Understanding that replacing x with 0.90 would return the 90th percentile, is there an elegant way of extending this and returning multiple percentiles in one pass?
For example, say I want to extend this example and return the 25th, 50th, and 75th percentiles in one pass? Is this possible? Seems like I would need multiple QUALIFY statements? Similarly, if I desire multiple GROUP BY equivalents, is this akin to passing more columns in the PARTITION BY?
-- SQL:2008 Equivalent pseudo-code
SELECT
part_col_a
,part_col_b
,PERCENTILE_CONT(0.25) WITHIN GROUP (ORDER BY order_col) AS p25
,PERCENTILE_CONT(0.50) WITHIN GROUP (ORDER BY order_col) AS p50
,PERCENTILE_CONT(0.75) WITHIN GROUP (ORDER BY order_col) AS p75
FROM tab
GROUP BY
part_col_a
,part_col_b
You should fully read my blog, the final query is doing exactly what you want :-)
SELECT part_col
,MIN(pc25) OVER (PARTITION BY part_col) AS quartile_1
,MIN(pc50) OVER (PARTITION BY part_col) AS quartile_2
,MIN(pc75) OVER (PARTITION BY part_col) AS quartile_3
FROM
(
SELECT
part_col
,COUNT(*) OVER (PARTITION BY part_col) - 1 AS N
,ROW_NUMBER() OVER (PARTITION BY part_col ORDER BY data_col) - 1 AS rowno
,MIN(data_col) OVER (PARTITION BY part_col ORDER BY data_col ROWS BETWEEN 1 FOLLOWING AND 1 FOLLOWING) - data_col AS diff
,CASE
WHEN rowno = CAST(N * 0.25 AS INT)
THEN data_col +(((N * 0.25) MOD 1) * diff)
END AS pc25
,CASE
WHEN rowno = CAST(N * 0.50 AS INT)
THEN data_col +(((N * 0.50) MOD 1) * diff)
END AS pc50
,CASE
WHEN rowno = CAST(N * 0.75 AS INT)
THEN data_col +(((N * 0.75) MOD 1) * diff)
END AS pc75
FROM tab
QUALIFY rowno = CAST(N * 0.25 AS INT)
OR rowno = CAST(N * 0.50 AS INT)
OR rowno = CAST(N * 0.75 AS INT)
) AS dt
QUALIFY ROW_NUMBER() OVER (PARTITION BY part_col ORDER BY part_col) = 1
Related
I have N transactions with camera_id = 6 and i want to sample every N // 100 transaction.
I have the following query:
SELECT t.id from (
SELECT id, camera_id, start_ts, ROW_NUMBER() OVER (ORDER BY start_ts) AS rownum
FROM transactions
WHERE camera_id = 6
) as t
where t.rownum % (N / 100) = 1
order by t.start_ts
How can i change it so i don't need additional query for determining N?
Untested
Does the following work for you - add a windowed count in addition to your Rownumber and use that:
SELECT t.id from (
SELECT id, camera_id, start_ts,
Row_Number() OVER (ORDER BY start_ts) AS rownum,
Count(*) over() Qty
FROM transactions
WHERE camera_id = 6
) as t
where t.rownum % (Qty / 100) = 1
order by t.start_ts
I'm trying to calculate an Exponential Moving Average of 3 periods without the use of any loops. I got the math down and in order to calculate it, I have to do something like:
EMA(t) = SUM( Value(t) * K * (1 - K) ^ (n - t) )
Where EMA(t) is the moving average, n is the number of items to sum, t is the item and K is a constant.
So, I tried something like this in T-SQL.
select EMA03 = SUM( xValue * (0.5) * POWER( 0.5, MAX(rn) - rn ) ) OVER ( PARTITION BY nClient ORDER BY myDate ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW )
from ( select myDate
, xValue
, nClient
, rn = ROW_NUMBER() OVER ( PARTITION BY nClient ORDER BY myDate )
from myTable ) A
But the problem is I can't use MAX(rn) inside a window function already. I have to somehow figure out how many rows the over clause contains and use it on my function. Is there any way to do it?
How about defining the count in the subquery?
select EMA03 = SUM( xValue * (0.5) * POWER( 0.5, cnt - rn ) ) OVER
( PARTITION BY nClient
ORDER BY myDate
ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW
)
from (select myDate, xValue, nClient
ROW_NUMBER() OVER (PARTITION BY nClient ORDER BY myDate) as rn,
count(*) over (partition by nClient) as cnt
from myTable
) A
Try this if it works for you
select EMA03 = SUM( xValue * (0.5)
* POWER( 0.5, (select
count(distinct *) from myTable
where nClient=A.nClient) x
group by nClient
) - x ) )
OVER
( PARTITION BY nClient ORDER BY
myDate ROWS BETWEEN
UNBOUNDED PRECEDING AND
CURRENT ROW )
from ( select myDate
, xValue
, nClient
, rn = ROW_NUMBER() OVER (
PARTITION BY nClient ORDER BY
myDate )
from myTable ) A
You probably just need to add another layer of sub-query.
And while at it, let's use CTE's for readability sake.
WITH CTE1 AS
(
SELECT myDate, xValue, nClient
, rn = ROW_NUMBER() OVER (PARTITION BY nClient ORDER BY myDate)
FROM myTable
),
CTE2 AS
(
SELECT c.*, max_rn = MAX(rn) OVER ()
FROM CTE1 c
)
SELECT c.*
, EMA03 = SUM(xValue * 0.5 * POWER(0.5, max_rn - rn)) OVER (PARTITION BY nClient ORDER BY myDate ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW)
FROM CTE2 c;
I had got stuck few hours back on around something similar and worked out a less messy code for outputting 25th, 50th, 75th percentiles in a single Teradata query. Can be further extended to produce a "5 point summary". For minimum and maximum change static values according to your population estimate.
Somewhere someone had asked for an elegant approach. Sharing mine.
Here's the code:
SELECT MAX(PER_MIN) AS PER_MIN,
MAX(PER_25) AS PER_25,
MAX(PER_50) AS PER_50,
MAX(PER_75) AS PER_75,
MAX(PER_MAX) AS PER_MAX
FROM (SELECT CASE WHEN ROW_NUMBER() OVER(ORDER BY DURATION_MACRO_CURR ASC) = CAST(COUNT(*) OVER() * 0.01 AS INT) THEN DURATION_MACRO_CURR END AS PER_MIN,
CASE WHEN ROW_NUMBER() OVER(ORDER BY DURATION_MACRO_CURR ASC) = CAST(COUNT(*) OVER() * 0.25 AS INT) THEN DURATION_MACRO_CURR END AS PER_25,
CASE WHEN ROW_NUMBER() OVER(ORDER BY DURATION_MACRO_CURR ASC) = CAST(COUNT(*) OVER() * 0.50 AS INT) THEN DURATION_MACRO_CURR END AS PER_50
CASE WHEN ROW_NUMBER() OVER(ORDER BY DURATION_MACRO_CURR ASC) = CAST(COUNT(*) OVER() * 0.75 AS INT) THEN DURATION_MACRO_CURR END AS PER_75
CASE WHEN ROW_NUMBER() OVER(ORDER BY DURATION_MACRO_CURR ASC) = CAST(COUNT(*) OVER() * 0.99 AS INT) THEN DURATION_MACRO_CURR END AS PER_MAX
FROM PROD_EXP_DL_CVM.PROD_CVM
WHERE PW_END_DATE = '2016-10-18'
) BASE
Here's the desired output:
I would do this using conditional aggregation:
select min(DURATION_MACRO_CURR) as min_val,
min(case when seqnum / 0.25 >= cnt then DURATION_MACRO_CURR end) as 25_percentile,
min(case when seqnum / 0.50 >= cnt then DURATION_MACRO_CURR end) as 50_percentile,
min(case when seqnum / 0.75 >= cnt then DURATION_MACRO_CURR end) as 75_percentile,
max(DURATION_MACRO_CURR) as max_val
from (select pc.*,
row_number() over (order by DURATION_MACRO_CURR) as seqnum,
count(*) over () as cnt
from PROD_EXP_DL_CVM.PROD_CVM pc
where pc.PW_END_DATE = '2016-10-18'
) pc;
I have the same problem in this question but in Firebird 2.5
Calculating the Weighted Average Cost of products stock
And this answer didn't work (as Firebird 2.5 doesn't have row_number)
with recursive
stock_temp as (
select
*,
row_number() over(partition by product_id order by row_num) as rn
from
stock_table
)
,cte as (
select
document_type, document_date,
product_id, qty_out, qty_in, price,
row_num, stock_balance, rn,
price as wac
from
stock_temp where document_type = 'SI'
union all
select
sub.document_type, sub.document_date,
sub.product_id, sub.qty_out, sub.qty_in, sub.price,
sub.row_num, sub.stock_balance, sub.rn,
case when sub.qty_in = 0 then main.wac else
((sub.stock_balance - sub.qty_in) * main.wac + sub.qty_in * sub.price)
/ ((sub.stock_balance - sub.qty_in) + sub.qty_in) end as wac
from
cte as main
join stock_temp as sub
on (main.product_id = sub.product_id and main.rn + 1 = sub.rn)
)
select * from cte
I'm trying to work out an efficient way of comparing two rows in SQL Server 2008. I need to write a query which finds all rows in the Movement table which have Speed < 10 N consecutive times.
The structure of the table is:
EventTime
Speed
If the data were:
2012-02-05 13:56:36.980, 2
2012-02-05 13:57:36.980, 11
2012-02-05 13:57:46.980, 2
2012-02-05 13:59:36.980, 2
2012-02-05 14:06:36.980, 22
2012-02-05 15:56:36.980, 2
Then it would return rows 3/4 (13:57:46.980 / 13:59:36.980) if I looked for 2 consecutive rows, and would return nothing if I looked for three consecutive rows. The order of the data is EventTime/DateTime only.
Any help you could give me would be great. I'm considering using cursors but they're usually pretty inefficient. Also, this table is approximately 10m rows in size, so the more efficient the better! :)
Thanks!
DECLARE
#n INT,
#speed_limit INT
SELECT
#n = 5,
#speed_limit = 10
;WITH
partitioned AS
(
SELECT
*,
CASE WHEN speed < #speed_limit THEN 1 ELSE 0 END AS PartitionID
FROM
Movement
)
,
sequenced AS
(
SELECT
ROW_NUMBER() OVER ( ORDER BY EventTime) AS MasterSeqID,
ROW_NUMBER() OVER (PARTITION BY PartitionID ORDER BY EventTime) AS PartIDSeqID,
*
FROM
partitioned
)
,
filter AS
(
SELECT
MasterSeqID - PartIDSeqID AS GroupID,
MIN(MasterSeqID) AS GroupFirstMastSeqID,
MAX(MasterSeqID) AS GroupFinalMastSeqID
FROM
sequenced
WHERE
PartitionID = 1
GROUP BY
MasterSeqID - PartIDSeqID
HAVING
COUNT(*) >= #n
)
SELECT
sequenced.*
FROM
filter
INNER JOIN
sequenced
ON sequenced.MasterSeqID >= filter.GroupFirstMastSeqID
AND sequenced.MasterSeqID <= filter.GroupFinalMastSeqID
Alternative final steps (inspired by #t-clausen-dk), to avoid an additional JOIN. I would test both to see which is more performant.
,
filter AS
(
SELECT
MasterSeqID - PartIDSeqID AS GroupID,
COUNT(*) OVER (PARTITION BY MasterSeqID - PartIDSeqID) AS GroupSize,
*
FROM
sequenced
WHERE
PartitionID = 1
)
SELECT
*
FROM
filter
WHERE
GroupSize >= #n
declare #t table(EventTime datetime, Speed int)
insert #t values('2012-02-05 13:56:36.980', 2)
insert #t values('2012-02-05 13:57:36.980', 11)
insert #t values('2012-02-05 13:57:46.980', 2)
insert #t values('2012-02-05 13:59:36.980', 2)
insert #t values('2012-02-05 14:06:36.980', 22)
insert #t values('2012-02-05 15:56:36.980', 2)
declare #N int = 1
;with a as
(
select EventTime, Speed, row_number() over (order by EventTime) rn from #t
), b as
(
select EventTime, Speed, 1 grp, rn from a where rn = 1
union all
select a.EventTime, a.Speed, case when a.speed < 10 and b.speed < 10 then grp else grp + 1 end, a.rn
from a join b on a.rn = b.rn+1
), c as
(
select EventTime, Speed, count(*) over (partition by grp) cnt from b
)
select * from c
where cnt > #N
OPTION (MAXRECURSION 0) -- Thx Dems
Almost the same ideea as Dems, a little bit different:
select * from (
select eventtime, speed, rnk, new_rnk,
rnk - new_rnk,
max(rnk) over (partition by speed, new_rnk-rnk) -
min(rnk) over (partition by speed, new_rnk-rnk) + 1 as no_consec
from (
select eventtime, rnk, speed,
row_number() over (partition by speed order by eventtime) as new_rnk
from (
select eventtime, speed,
row_number() over (order by eventtime) as rnk
from a
) a
where a.speed < 5
)
order by eventtime
)
where no_consec >= 2;
5 is speed limit and 2 is min number of consecutive events.
I put date as number for simplicity of writing the create database.
SQLFIDDLE
EDIT:
To answer to comments, I've added three columns in the first inner query. To get only the first row you need to add an pos_in_group = 1 to WHERE clause and the distance is at your fingers.
SQLFIDDLE
select eventtime, speed, min_date, max_date, pos_in_group
from (
select eventtime, speed, rnk, new_rnk,
rnk - new_rnk,
row_number() over (partition by speed, new_rnk-rnk order by eventtime) pos_in_group,
min(eventtime) over (partition by speed, new_rnk-rnk) min_date,
max(eventtime) over (partition by speed, new_rnk-rnk) max_date,
max(rnk) over (partition by speed, new_rnk-rnk) -
min(rnk) over (partition by speed, new_rnk-rnk) + 1 as no_consec
from (
select eventtime, rnk, speed,
row_number() over (partition by speed order by eventtime) as new_rnk
from (
select eventtime, speed,
row_number() over (order by eventtime) as rnk
from a
) a
where a.speed < 5
)
order by eventtime
)
where no_consec > 1;