CTE query Not returning any result - sql

The following query works
select TOP 100 T.DWH_ID,T.date_time, T.TimeDiff, T.[End Date], T.SPS_Bereich, T.txtName from (
SELECT sto.[DWH_ID]
,sto.[SPS_Bereich]
,FORMAT(sto.[DateTime], 'dd-MM-yyyy HH:mm') as date_time
,sto.[txtName]
,sto.[TimeDiff]
, DATEADD(second,sto.[TimeDiff],FORMAT(sto.[DateTime], 'dd-MM-yyyy HH:mm'))as [End Date]
FROM [Stoerdaten].[sta].[Stoerungen] sto where sto.Classname='Alarm' and sto.TimeDiff>60 ) as T
join [IgnitionServer].[dbo].[scheduled_events_ISTProduction] cal on
((T.date_time between cal.start_date and cal.end_date) and T.[End Date] between cal.start_date and cal.end_date) where cal.typ=1 order by [DWH_ID] desc
But when I change to CTE it didn't give me any result.
CTE Query
;with q1 as
(
select TOP 1000 [DWH_ID],
SPS_Bereich ,
FORMAT([DateTime], 'dd-MM-yyyy HH:mm') as date_time,
[txtName],
[TimeDiff]
, DATEADD(second,[TimeDiff],FORMAT([DateTime], 'dd-MM-yyyy HH:mm'))as [End_Date]
FROM [Stoerdaten].[sta].[Stoerungen] where Classname='Alarm' and TimeDiff>60
)
select q1.DWH_ID,
q1.date_time,
q1.TimeDiff, q1.[End_Date], q1.txtName, q1.SPS_Bereich from q1 join [IgnitionServer].[dbo].[scheduled_events_ISTProduction] cal on
((q1.date_time between cal.start_date and cal.end_date) and q1.[End_Date] between cal.start_date and cal.end_date) where cal.typ=1
I don't understand what I am missing here. Any help is greatly appreciated.

Your CTE contains the TOP 1000 records in [sta].[Stoerungen] with the WHERE criteria and you then join from that to [scheduled_events_ISTProduction].
In your initial query, you're returning the TOP 100 after the JOIN has been made, so I imagine that whatever is appearing in the result of your CTE can't be joined to the records in [scheduled_events_ISTProduction].
If you just select everything out of your CTE you should see that there are up to 1000 records in there but should also be able to verify the JOIN issue.

Related

How to retrieve default value even when do no exist record in database

Querying data as showed below there is no record on database for day 2021-10-03.
date
value
2021-10-01
100
2021-10-02
90
2021-10-04
10
2021-10-05
40
I would like to execute the query using date between as SELECT ... WHERE date BETWEEN '2021-10-01' AND '2021-10-05' and in case of do not exist data for a specific day, to retrieve zero as exemplified below:
date
value
2021-10-01
100
2021-10-02
90
2021-10-03
10
2021-10-04
10
2021-10-05
40
Is it possible? in bigQuery?
I tried the query below, but retrieved duplicated values.
WITH `project.myproject` AS (
SELECT
DATA_VENDA AS date,
CAST(SUM(VLR_VENDA_TABELA) AS FLOAT64) AS total,
FROM `project.myproject`
WHERE
(DATA_VENDA BETWEEN '2020-10-02'
AND '2020-10-07')
AND COD_CP = '0000010232'
GROUP BY
DATA_VENDA
ORDER BY
DATA_VENDA
),
dates AS (
SELECT total, date
FROM `project.myproject`, UNNEST(GENERATE_DATE_ARRAY(date('2020-10-02'), date('2020-10-07'))) AS date
)
SELECT d.date, IFNULL(t.total, 0) total
FROM dates d
LEFT JOIN `project.myproject` t
ON d.date = t.date
AND d.total = t.total
ORDER BY d.date
I found out the answers running command below. The difference from that to this is that in this new one I removed the line AND d.total = t.total, who was responsible for creating duplicated data. The final answer follow below:
WITH `project.myproject` AS (
SELECT
DATA_VENDA AS date,
CAST(SUM(VLR_VENDA_TABELA) AS FLOAT64) AS total,
FROM `project.myproject`
WHERE
(DATA_VENDA BETWEEN '2020-10-02'
AND '2020-10-07')
AND COD_CP = '0000010232'
GROUP BY
DATA_VENDA
ORDER BY
DATA_VENDA
),
dates AS (
SELECT total, date
FROM `project.myproject`, UNNEST(GENERATE_DATE_ARRAY(date('2020-10-02'), date('2020-10-07'))) AS date
)
SELECT d.date, IFNULL(t.total, 0) total
FROM dates d
LEFT JOIN `project.myproject` t
ON d.date = t.date
ORDER BY d.date
You can simply do that with the common table expression(CTE) as shown below.
DECLARE #Datatemp TABLE (
Id INT IDENTITY(1,1) NOT NULL,
CDate DATETIME,
Val INT
)
INSERT INTO #Datatemp SELECT '2021-10-01',10
INSERT INTO #Datatemp SELECT '2021-10-02',50
INSERT INTO #Datatemp SELECT '2021-10-04',24
INSERT INTO #Datatemp SELECT '2021-10-05',18
;WITH DateTemp(Date) AS (
SELECT CAST('2021-10-01' AS DATETIME)
UNION ALL
SELECT [Date]+1
FROM DateTemp
WHERE [Date] < '2021-10-05'
)
SELECT DateTemp.[Date] CDat
,ISNULL(t.Val, 0) Val
FROM DateTemp
LEFT JOIN #Datatemp t ON t.CDate = DateTemp.[Date]
ORDER BY DateTemp.[Date]
--OPTION (MAXRECURSION 0)
By default number of iterations for recursive CTE is 100. As long as this number is exceeded, the query will be interrupted and an error will be generated. If you want to remove this restriction, you can specify MAXRECURSION 0.

SQL find duplicate records from past 7 days

Hi I am trying to find duplicate webvisits within the past 7 days, I have built a query but it is taking too long to run. Any help in optimizing this query would be much appreciated. I am finding duplicates using the visitorguid.
WITH TooClose
AS
(
SELECT
a.visitid AS BeforeID,
b.visitID AS AfterID,
a.omniturecid as [Before om id],
b.omniturecid as [after om id],
a.pubid as [Before pub id],
b.pubid as [after pub id],
a.VisitorGuid as [Before guid],
b.VisitorGuid as [after guid],
a.date as [Before date],
b.date as [after date]
FROM
webvisits a
INNER JOIN WebVisits b ON a.VisitorGuid = b.VisitorGuid
AND a.date < b.Date
AND DATEDIFF(DAY, a.date, b.date) < 7
Where a.Date >= '7/1/2015')
SELECT
*
FROM
TooClose
WHERE
BeforeID NOT IN (SELECT AfterID FROM TooClose)
If I understand your question correctly, you are trying to find all duplicate webvisits within the past 7 days. Not sure what qualifies as a duplicate webvisit, but here is my attempt to what might work for you:
;WITH q1
AS (
SELECT a.VisitorGuid
,a.date
FROM webvisits a
WHERE a.DATE >= DATEADD(DAY, -7, cast(getdate() as date))
)
,q2 AS
(SELECT q1.VisitorGuid
,count(*) as rcount
FROM q1
GROUP BY q1.VisitorGuid
)
SELECT q2.VisitorGuid
FROM q2
WHERE q2.rcount > 1
SQL Fiddle Demo
UPDATED
;WITH q1
AS (
SELECT a.VisitorGuid
,a.date
,a.omniturecid
FROM webvisits a
WHERE a.DATE >= DATEADD(DAY, -7, cast(getdate() as date))
)
,q2 AS
(SELECT q1.VisitorGuid
,count(*) as rcount
FROM q1
GROUP BY q1.VisitorGuid
HAVING Count(*)> 1
)
SELECT q1.VisitorGuid,
q1.omniturecid,
q1.date
FROM q1
INNER JOIN q2 on q1.VisitorGuid = q2.VisitorGuid
SQL Fiddle Demo2
As others have pointed out, it is a good idea to provide sample data. Preferable as an sqlfiddle or as create and insert statements. Here's one approach, I'm to lazy to invent the structure and fill it with data to test with, so it might contain some errors:
SELECT ... FROM (
SELECT VisitorGuid
, date
, lead(date) over (partition by visitorguid
order by date) as next_date
, omniturecid
, lead(omniturecid) over (partition by visitorguid
order by date) as next_omniturecid
, ...
, lead(...) ...
FROM webvisits a
) as x
WHERE DATEDIFF(DAY, date, next_date) < 7

How to self-join table in a way that every record is joined with the "previous" record?

I have a MS SQL table that contains stock data with the following columns: Id, Symbol, Date, Open, High, Low, Close.
I would like to self-join the table, so I can get a day-to-day % change for Close.
I must create a query that will join the table with itself in a way that every record contains also the data from the previous session (be aware, that I cannot use yesterday's date).
My idea is to do something like this:
select * from quotes t1
inner join quotes t2
on t1.symbol = t2.symbol and
t2.date = (select max(date) from quotes where symbol = t1.symbol and date < t1.date)
However I do not know if that's the correct/fastest way. What should I take into account when thinking about performance? (E.g. will putting UNIQUE index on a (Symbol, Date) pair improve performance?)
There will be around 100,000 new records every year in this table. I am using MS SQL Server 2008
One option is to use a recursive cte (if I'm understanding your requirements correctly):
WITH RNCTE AS (
SELECT *, ROW_NUMBER() OVER (PARTITION BY symbol ORDER BY date) rn
FROM quotes
),
CTE AS (
SELECT symbol, date, rn, cast(0 as decimal(10,2)) perc, closed
FROM RNCTE
WHERE rn = 1
UNION ALL
SELECT r.symbol, r.date, r.rn, cast(c.closed/r.closed as decimal(10,2)) perc, r.closed
FROM CTE c
JOIN RNCTE r on c.symbol = r.symbol AND c.rn+1 = r.rn
)
SELECT * FROM CTE
ORDER BY symbol, date
SQL Fiddle Demo
If you need a running total for each symbol to use as the percentage change, then easy enough to add an additional column for that amount -- wasn't completely sure what your intentions were, so the above just divides the current closed amount by the previous closed amount.
Something like this w'd work in SQLite:
SELECT ..
FROM quotes t1, quotes t2
WHERE t1.symbol = t2.symbol
AND t1.date < t2.date
GROUP BY t2.ID
HAVING t2.date = MIN(t2.date)
Given SQLite is a simplest of a kind, maybe in MSSQL this will also work with minimal changes.
Index on (symbol, date)
SELECT *
FROM quotes q_curr
CROSS APPLY (
SELECT TOP(1) *
FROM quotes
WHERE symbol = q_curr.symbol
AND date < q_curr.date
ORDER BY date DESC
) q_prev
You do something like this:
with OrderedQuotes as
(
select
row_number() over(order by Symbol, Date) RowNum,
ID,
Symbol,
Date,
Open,
High,
Low,
Close
from Quotes
)
select
a.Symbol,
a.Date,
a.Open,
a.High,
a.Low,
a.Close,
a.Date PrevDate,
a.Open PrevOpen,
a.High PrevHigh,
a.Low PrevLow,
a.Close PrevClose,
b.Close-a.Close/a.Close PctChange
from OrderedQuotes a
join OrderedQuotes b on a.Symbol = b.Symbol and a.RowNum = b.RowNum + 1
If you change the last join to a left join you get a row for the first date for each symbol, not sure if you need that.
You can use option with CTE and ROW_NUMBER ranking function
;WITH cte AS
(
SELECT symbol, date, [Open], [High], [Low], [Close],
ROW_NUMBER() OVER(PARTITION BY symbol ORDER BY date) AS Id
FROM quotes
)
SELECT c1.Id, c1.symbol, c1.date, c1.[Open], c1.[High], c1.[Low], c1.[Close],
ISNULL(c2.[Close] / c1.[Close], 0) AS perc
FROM cte c1 LEFT JOIN cte c2 ON c1.symbol = c2.symbol AND c1.Id = c2.Id + 1
ORDER BY c1.symbol, c1.date
For improving performance(avoiding sorting and RID Lookup) use this index
CREATE INDEX ix_symbol$date_quotes ON quotes(symbol, date) INCLUDE([Open], [High], [Low], [Close])
Simple demo on SQLFiddle
What you had is fine. I don't know if translating the sub-query into the join will help. However, you asked for it, so the way to do it might be to join the table to itself once more.
select *
from quotes t1
inner join quotes t2
on t1.symbol = t2.symbol and t1.date > t2.date
left outer join quotes t3
on t2.symbol = t3.symbol and t2.date > t3.date
where t3.date is null
You could do something like this:
DECLARE #Today DATETIME
SELECT #Today = DATEADD(DAY, 0, DATEDIFF(DAY, 0, CURRENT_TIMESTAMP))
;WITH today AS
(
SELECT Id ,
Symbol ,
Date ,
[OPEN] ,
High ,
LOW ,
[CLOSE],
DATEADD(DAY, -1, Date) AS yesterday
FROM quotes
WHERE date = #today
)
SELECT *
FROM today
LEFT JOIN quotes yesterday ON today.Symbol = yesterday.Symbol
AND today.yesterday = yesterday.Date
That way you limit your "today" results, if that's an option.
EDIT: The CTEs listed as other questions may work well, but I tend to be hesitant to use ROW_NUMBER when dealing with 100K rows or more. If the previous day may not always be yesterday, I tend to prefer to pull out the check for the previous day in its own query then use it for reference:
DECLARE #Today DATETIME, #PreviousDay DATETIME
SELECT #Today = DATEADD(DAY, 0, DATEDIFF(DAY, 0, CURRENT_TIMESTAMP));
SELECT #PreviousDay = MAX(Date) FROM quotes WHERE Date < #Today;
WITH today AS
(
SELECT Id ,
Symbol ,
Date ,
[OPEN] ,
High ,
LOW ,
[CLOSE]
FROM quotes
WHERE date = #today
)
SELECT *
FROM today
LEFT JOIN quotes AS previousday
ON today.Symbol = previousday.Symbol
AND previousday.Date = #PreviousDay

Filling in missing dates DB2 SQL

My initial query looks like this:
select process_date, count(*) batchCount
from T1.log_comments
order by process_date asc;
I need to be able to do some quick analysis for weekends that are missing, but wanted to know if there was a quick way to fill in the missing dates not present in process_date.
I've seen the solution here but am curious if there's any magic hidden in db2 that could do this with only a minor modification to my original query.
Note: Not tested, framed it based on my exposure to SQL Server/Oracle. I guess this gives you the idea though:
*now amended and tested on DB2*
WITH MaxDateQry(MaxDate) AS
(
SELECT MAX(process_date) FROM T1.log_comments
),
MinDateQry(MinDate) AS
(
SELECT MIN(process_date) FROM T1.log_comments
),
DatesData(ProcessDate) AS
(
SELECT MinDate from MinDateQry
UNION ALL
SELECT (ProcessDate + 1 DAY) FROM DatesData WHERE ProcessDate < (SELECT MaxDate FROM MaxDateQry)
)
SELECT a.ProcessDate, b.batchCount
FROM DatesData a LEFT JOIN
(
SELECT process_date, COUNT(*) batchCount
FROM T1.log_comments
) b
ON a.ProcessDate = b.process_date
ORDER BY a.ProcessDate ASC;

SQL moving average

How do you create a moving average in SQL?
Current table:
Date Clicks
2012-05-01 2,230
2012-05-02 3,150
2012-05-03 5,520
2012-05-04 1,330
2012-05-05 2,260
2012-05-06 3,540
2012-05-07 2,330
Desired table or output:
Date Clicks 3 day Moving Average
2012-05-01 2,230
2012-05-02 3,150
2012-05-03 5,520 4,360
2012-05-04 1,330 3,330
2012-05-05 2,260 3,120
2012-05-06 3,540 3,320
2012-05-07 2,330 3,010
This is an Evergreen Joe Celko question.
I ignore which DBMS platform is used. But in any case Joe was able to answer more than 10 years ago with standard SQL.
Joe Celko SQL Puzzles and Answers citation:
"That last update attempt suggests that we could use the predicate to
construct a query that would give us a moving average:"
SELECT S1.sample_time, AVG(S2.load) AS avg_prev_hour_load
FROM Samples AS S1, Samples AS S2
WHERE S2.sample_time
BETWEEN (S1.sample_time - INTERVAL 1 HOUR)
AND S1.sample_time
GROUP BY S1.sample_time;
Is the extra column or the query approach better? The query is
technically better because the UPDATE approach will denormalize the
database. However, if the historical data being recorded is not going
to change and computing the moving average is expensive, you might
consider using the column approach.
MS SQL Example:
CREATE TABLE #TestDW
( Date1 datetime,
LoadValue Numeric(13,6)
);
INSERT INTO #TestDW VALUES('2012-06-09' , '3.540' );
INSERT INTO #TestDW VALUES('2012-06-08' , '2.260' );
INSERT INTO #TestDW VALUES('2012-06-07' , '1.330' );
INSERT INTO #TestDW VALUES('2012-06-06' , '5.520' );
INSERT INTO #TestDW VALUES('2012-06-05' , '3.150' );
INSERT INTO #TestDW VALUES('2012-06-04' , '2.230' );
SQL Puzzle query:
SELECT S1.date1, AVG(S2.LoadValue) AS avg_prev_3_days
FROM #TestDW AS S1, #TestDW AS S2
WHERE S2.date1
BETWEEN DATEADD(d, -2, S1.date1 )
AND S1.date1
GROUP BY S1.date1
order by 1;
One way to do this is to join on the same table a few times.
select
(Current.Clicks
+ isnull(P1.Clicks, 0)
+ isnull(P2.Clicks, 0)
+ isnull(P3.Clicks, 0)) / 4 as MovingAvg3
from
MyTable as Current
left join MyTable as P1 on P1.Date = DateAdd(day, -1, Current.Date)
left join MyTable as P2 on P2.Date = DateAdd(day, -2, Current.Date)
left join MyTable as P3 on P3.Date = DateAdd(day, -3, Current.Date)
Adjust the DateAdd component of the ON-Clauses to match whether you want your moving average to be strictly from the past-through-now or days-ago through days-ahead.
This works nicely for situations where you need a moving average over only a few data points.
This is not an optimal solution for moving averages with more than a few data points.
select t2.date, round(sum(ct.clicks)/3) as avg_clicks
from
(select date from clickstable) as t2,
(select date, clicks from clickstable) as ct
where datediff(t2.date, ct.date) between 0 and 2
group by t2.date
Example here.
Obviously you can change the interval to whatever you need. You could also use count() instead of a magic number to make it easier to change, but that will also slow it down.
General template for rolling averages that scales well for large data sets
WITH moving_avg AS (
SELECT 0 AS [lag] UNION ALL
SELECT 1 AS [lag] UNION ALL
SELECT 2 AS [lag] UNION ALL
SELECT 3 AS [lag] --ETC
)
SELECT
DATEADD(day,[lag],[date]) AS [reference_date],
[otherkey1],[otherkey2],[otherkey3],
AVG([value1]) AS [avg_value1],
AVG([value2]) AS [avg_value2]
FROM [data_table]
CROSS JOIN moving_avg
GROUP BY [otherkey1],[otherkey2],[otherkey3],DATEADD(day,[lag],[date])
ORDER BY [otherkey1],[otherkey2],[otherkey3],[reference_date];
And for weighted rolling averages:
WITH weighted_avg AS (
SELECT 0 AS [lag], 1.0 AS [weight] UNION ALL
SELECT 1 AS [lag], 0.6 AS [weight] UNION ALL
SELECT 2 AS [lag], 0.3 AS [weight] UNION ALL
SELECT 3 AS [lag], 0.1 AS [weight] --ETC
)
SELECT
DATEADD(day,[lag],[date]) AS [reference_date],
[otherkey1],[otherkey2],[otherkey3],
AVG([value1] * [weight]) / AVG([weight]) AS [wavg_value1],
AVG([value2] * [weight]) / AVG([weight]) AS [wavg_value2]
FROM [data_table]
CROSS JOIN weighted_avg
GROUP BY [otherkey1],[otherkey2],[otherkey3],DATEADD(day,[lag],[date])
ORDER BY [otherkey1],[otherkey2],[otherkey3],[reference_date];
select *
, (select avg(c2.clicks) from #clicks_table c2
where c2.date between dateadd(dd, -2, c1.date) and c1.date) mov_avg
from #clicks_table c1
Use a different join predicate:
SELECT current.date
,avg(periods.clicks)
FROM current left outer join current as periods
ON current.date BETWEEN dateadd(d,-2, periods.date) AND periods.date
GROUP BY current.date HAVING COUNT(*) >= 3
The having statement will prevent any dates without at least N values from being returned.
assume x is the value to be averaged and xDate is the date value:
SELECT avg(x) from myTable WHERE xDate BETWEEN dateadd(d, -2, xDate) and xDate
In hive, maybe you could try
select date, clicks, avg(clicks) over (order by date rows between 2 preceding and current row) as moving_avg from clicktable;
For the purpose, I'd like to create an auxiliary/dimensional date table like
create table date_dim(date date, date_1 date, dates_2 date, dates_3 dates ...)
while date is the key, date_1 for this day, date_2 contains this day and the day before; date_3...
Then you can do the equal join in hive.
Using a view like:
select date, date from date_dim
union all
select date, date_add(date, -1) from date_dim
union all
select date, date_add(date, -2) from date_dim
union all
select date, date_add(date, -3) from date_dim
NOTE: THIS IS NOT AN ANSWER but an enhanced code sample of Diego Scaravaggi's answer. I am posting it as answer as the comment section is insufficient. Note that I have parameter-ized the period for Moving aveage.
declare #p int = 3
declare #t table(d int, bal float)
insert into #t values
(1,94),
(2,99),
(3,76),
(4,74),
(5,48),
(6,55),
(7,90),
(8,77),
(9,16),
(10,19),
(11,66),
(12,47)
select a.d, avg(b.bal)
from
#t a
left join #t b on b.d between a.d-(#p-1) and a.d
group by a.d
--#p1 is period of moving average, #01 is offset
declare #p1 as int
declare #o1 as int
set #p1 = 5;
set #o1 = 3;
with np as(
select *, rank() over(partition by cmdty, tenor order by markdt) as r
from p_prices p1
where
1=1
)
, x1 as (
select s1.*, avg(s2.val) as avgval from np s1
inner join np s2
on s1.cmdty = s2.cmdty and s1.tenor = s2.tenor
and s2.r between s1.r - (#p1 - 1) - (#o1) and s1.r - (#o1)
group by s1.cmdty, s1.tenor, s1.markdt, s1.val, s1.r
)
I'm not sure that your expected result (output) shows classic "simple moving (rolling) average" for 3 days. Because, for example, the first triple of numbers by definition gives:
ThreeDaysMovingAverage = (2.230 + 3.150 + 5.520) / 3 = 3.6333333
but you expect 4.360 and it's confusing.
Nevertheless, I suggest the following solution, which uses window-function AVG. This approach is much more efficient (clear and less resource-intensive) than SELF-JOIN introduced in other answers (and I'm surprised that no one has given a better solution).
-- Oracle-SQL dialect
with
data_table as (
select date '2012-05-01' AS dt, 2.230 AS clicks from dual union all
select date '2012-05-02' AS dt, 3.150 AS clicks from dual union all
select date '2012-05-03' AS dt, 5.520 AS clicks from dual union all
select date '2012-05-04' AS dt, 1.330 AS clicks from dual union all
select date '2012-05-05' AS dt, 2.260 AS clicks from dual union all
select date '2012-05-06' AS dt, 3.540 AS clicks from dual union all
select date '2012-05-07' AS dt, 2.330 AS clicks from dual
),
param as (select 3 days from dual)
select
dt AS "Date",
clicks AS "Clicks",
case when rownum >= p.days then
avg(clicks) over (order by dt
rows between p.days - 1 preceding and current row)
end
AS "3 day Moving Average"
from data_table t, param p;
You see that AVG is wrapped with case when rownum >= p.days then to force NULLs in first rows, where "3 day Moving Average" is meaningless.
We can apply Joe Celko's "dirty" left outer join method (as cited above by Diego Scaravaggi) to answer the question as it was asked.
declare #ClicksTable table ([Date] date, Clicks int)
insert into #ClicksTable
select '2012-05-01', 2230 union all
select '2012-05-02', 3150 union all
select '2012-05-03', 5520 union all
select '2012-05-04', 1330 union all
select '2012-05-05', 2260 union all
select '2012-05-06', 3540 union all
select '2012-05-07', 2330
This query:
SELECT
T1.[Date],
T1.Clicks,
-- AVG ignores NULL values so we have to explicitly NULLify
-- the days when we don't have a full 3-day sample
CASE WHEN count(T2.[Date]) < 3 THEN NULL
ELSE AVG(T2.Clicks)
END AS [3-Day Moving Average]
FROM #ClicksTable T1
LEFT OUTER JOIN #ClicksTable T2
ON T2.[Date] BETWEEN DATEADD(d, -2, T1.[Date]) AND T1.[Date]
GROUP BY T1.[Date]
Generates the requested output:
Date Clicks 3-Day Moving Average
2012-05-01 2,230
2012-05-02 3,150
2012-05-03 5,520 4,360
2012-05-04 1,330 3,330
2012-05-05 2,260 3,120
2012-05-06 3,540 3,320
2012-05-07 2,330 3,010