How to calculate value based on average of previous month and average of same month last year in SQL - sql

I would like to calculate targets for opened rates and clicked rates based on actuals of the last month and the same month last year.
My table is aggregated at daily level and I have grouped it by month and year to get the monthly averages. I have then created a self-join to join my current dates on the results of the previous months. This works fine for all months except for January because SQL can't know that it's supposed to join 1 on 12. Is there a way to specify this in my join clause?
Essentially, the results for January 2021 shouldn't be null because I have December 2020 data.
This is my data and my query:
CREATE TABLE exasol_last_year_avg(
date_col date,
country text,
brand text,
category text,
delivered integer,
opened integer,
clicked integer
)
INSERT INTO exasol_last_year_avg
(date_col,country,brand,category,delivered,opened,clicked) VALUES
(2021-01-01,'AT','brand1','cat1',100,60,23),
(2021-01-01,'AT','brand1','cat2',200,50,45),
(2021-01-01,'AT','brand2','cat1',300,49,35),
(2021-01-01,'AT','brand2','cat2',400,79,57),
(2021-02-02,'AT','brand1','cat1',130,78,30),
(2021-02-02,'AT','brand1','cat2',260,65,59),
(2021-02-02,'AT','brand2','cat1',390,64,46),
(2021-02-02,'AT','brand2','cat2',520,103,74),
(2020-12-02,'AT','brand1','cat1',130,78,30),
(2020-12-02,'AT','brand1','cat2',260,65,59),
(2020-12-02,'AT','brand2','cat1',390,64,46),
(2020-12-02,'AT','brand2','cat2',520,103,74),
(2020-02-02,'AT','brand1','cat2',236,59,53),
(2020-02-02,'AT','brand2','cat1',355,58,41),
(2020-02-02,'AT','brand2','cat2',473,93,67),
(2020-02-02,'AT','brand1','cat1',118,71,27)
This is written in PostgresSQL because I think it's more accessible to most people, but my production database is Exasol!
select *
from
(Select month_col,
year_col,
t_campaign_cmcategory,
t_country,
t_brand,
(t2_clicktoopenrate + t3_clicktoopenrate)/2 as target_clicktoopenrate,
(t2_openrate + t3_openrate)/2 as target_openrate
from (
with CTE as (
select extract(month from date_col) as month_col,
extract(year from date_col) as year_col,
category as t_campaign_cmcategory,
country as t_country,
brand as t_brand,
round(sum(opened)/nullif(sum(delivered),0),3) as OpenRate,
round(sum(clicked)/nullif(sum(opened),0),3) as ClickToOpenRate
from public.exasol_last_year_avg
group by 1, 2, 3, 4, 5)
select t1.month_col,
t1.year_col,
t2.month_col as t2_month_col,
t2.year_col as t2_year_col,
t3.month_col as t3_month_col,
t3.year_col as t3_year_col,
t1.t_campaign_cmcategory,
t1.t_country,
t1.t_brand,
t1.OpenRate,
t1.ClickToOpenRate,
t2.OpenRate as t2_OpenRate,
t2.ClickToOpenRate as t2_ClickToOpenRate,
t3.OpenRate as t3_OpenRate,
t3.ClickToOpenRate as t3_ClickToOpenRate
from CTE t1
left join CTE t2
on t1.month_col = t2.month_col + 1
and t1.year_col = t2.year_col
and t1.t_campaign_cmcategory = t2.t_campaign_cmcategory
and t1.t_country = t2.t_country
and t1.t_brand = t2.t_brand
left join CTE t3
on t1.month_col = t3.month_col
and t1.year_col = t3.year_col + 1
and t1.t_campaign_cmcategory = t3.t_campaign_cmcategory
and t1.t_country = t3.t_country
and t1.t_brand = t3.t_brand) as target_base) as final_tbl

Start with an aggregation query:
select date_trunc('month', date_col), country, brand,
sum(opened) * 1.0 / nullif(sum(delivered), 0) as OpenRate,
sum(clicked) * 1.0 / nullif(sum(opened), 0) as ClickToOpenRate
from exasol_last_year_avg
group by 1, 2, 3;
Then, use window functions. Assuming you have a value for every month (with no gaps). you can just use lag(). I'm not sure what your final calculation is, but this brings in the data:
with mcb as (
select date_trunc('month', date_col) as yyyymm, country, brand,
sum(opened) * 1.0 / nullif(sum(delivered), 0) as OpenRate,
sum(clicked) * 1.0 / nullif(sum(opened), 0) as ClickToOpenRate
from exasol_last_year_avg
group by 1, 2, 3
)
select mcb.*,
lag(openrate, 1) over (partition by country, brand order by yyyymm) as prev_month_openrate,
lag(ClickToOpenRate, 1) over (partition by country, brand order by yyyymm) as prev_month_ClickToOpenRate,
lag(openrate, 12) over (partition by country, brand order by yyyymm) as prev_year_openrate,
lag(ClickToOpenRate, 12) over (partition by country, brand order by yyyymm) as prev_year_ClickToOpenRate
from mcb;

This works with a different join condition:
select *
from
(Select month_col,
year_col,
t_campaign_cmcategory,
t_country,
t_brand,
(t2_clicktoopenrate + t3_clicktoopenrate)/2 as target_clicktoopenrate,
(t2_openrate + t3_openrate)/2 as target_openrate
from (
with CTE as (
select extract(month from date_col) as month_col,
extract(year from date_col) as year_col,
category as t_campaign_cmcategory,
country as t_country,
brand as t_brand,
round(sum(opened)/nullif(sum(delivered),0),3) as OpenRate,
round(sum(clicked)/nullif(sum(opened),0),3) as ClickToOpenRate
from public.exasol_last_year_avg
group by 1, 2, 3, 4, 5)
select t1.month_col,
t1.year_col,
t2.month_col as t2_month_col,
t2.year_col as t2_year_col,
t3.month_col as t3_month_col,
t3.year_col as t3_year_col,
t1.t_campaign_cmcategory,
t1.t_country,
t1.t_brand,
t1.OpenRate,
t1.ClickToOpenRate,
t2.OpenRate as t2_OpenRate,
t2.ClickToOpenRate as t2_ClickToOpenRate,
t3.OpenRate as t3_OpenRate,
t3.ClickToOpenRate as t3_ClickToOpenRate
from CTE t1
left join CTE t2
-- adjusted join condition
on ((t1.month_col = (CASE WHEN t1.month_col = 1 then t2.month_col - 11 END) and t1.year_col = t2.year_col + 1)
or (t1.month_col = (CASE WHEN t1.month_col != 1 then t2.month_col + 1 END) and t1.year_col = t2.year_col))
and t1.t_campaign_cmcategory = t2.t_campaign_cmcategory
and t1.t_country = t2.t_country
and t1.t_brand = t2.t_brand
left join CTE t3
on t1.month_col = t3.month_col
and t1.year_col = t3.year_col + 1
and t1.t_campaign_cmcategory = t3.t_campaign_cmcategory
and t1.t_country = t3.t_country
and t1.t_brand = t3.t_brand) as target_base) as final_tbl

Related

How to get all months in a query that has no matches?

My current output looks like this where I filtered out the result with only one SKU in my query.
I want to display results like the below image
So I tweaked my query a little bit but only got results like this
what should I do to display SKU as well, I don't want the Null value in the SKU column.
current query written so far
month_agg as
(select sku_number, extract(month from date_) as month, sum(v) as viewed, sum(a) as add_to_cart, sum(p) as purchased
from merge_and_pivot
where sku_number = '10671924'
group by sku_number, 2 order by 1,2)
, month_generate as
(SELECT extract(month from date_) as month
FROM UNNEST(GENERATE_DATE_ARRAY('2018-01-01', '2018-12-01', INTERVAL 1 MONTH)) AS date_)
select a.month, b.sku_number, coalesce(b.viewed, 0) , coalesce(b.add_to_cart, 0), coalesce(b.purchased, 0)
from month_generate a
left JOIN month_agg b on a.month = b.month
Consider below query
SELECT month,
MAX(sku_number) OVER() sku_number, -- assuming all sku_number is same
COALESCE(viewed, 0) viewed,
COALESCE(add_to_cart, 0) add_to_cart,
COALESCE(purchased, 0) purchased,
FROM UNNEST(GENERATE_ARRAY(1, 12)) month LEFT JOIN sample USING(month)
ORDER BY month;
output:
with sample:
CREATE TEMP TABLE sample AS
SELECT '10671924' sku_number, 1 month, 9 viewed, 6 add_to_cart, 0 purchased UNION ALL
SELECT '10671924', 10, 32, 8, 0 UNION ALL
SELECT '10671924', 11, 948, 688, 163 UNION ALL
SELECT '10671924', 12, 630, 299, 83;
Updated Query :
SELECT month,
sku.sku_number,
COALESCE(viewed, 0) viewed,
COALESCE(add_to_cart, 0) add_to_cart,
COALESCE(purchased, 0) purchased,
FROM (SELECT DISTINCT sku_number FROM sample) sku, UNNEST(GENERATE_ARRAY(1, 12)) month
LEFT JOIN sample USING(sku_number, month)
ORDER BY sku_number, month;
output:

Calculating missing months

I use the following query in ordre to filling missing months
Declare #Sample Table(year int, month int,product as nvarchar(50), qty_ytd int);
Insert #Sample(year, month, qty_ytd) Values
(2017, 01,'book', 20),
(2017, 02, 'pc', 30),
(2018, 01, 'book', 50);
;With Months As
(Select 1 As month
Union All
Select month + 1 From Months Where month < 12)
, YearsAndMonths As
(Select distinct year,m.month from #Sample cross join Months m)
select ym.*, coalesce(s.qty_ytd, s2.qty_ytd) qty_ytd, coalesce(s.qty_ytd, 0) QTY from YearsAndMonths ym
left join #sample s on ym.year = s.year and ym.month = s.month
left join (select qty_ytd, year,
row_number() over (partition by year order by month desc) rn
from #Sample) s2 on ym.year = s2.year and rn = 1
How could I add 'product ' as well ?
Firstly, I would recommend creating a calendar table since this pops up as a use case every once in a while. A quick example can be found here
Now, once you have the calendar table (let's call it static.calendar) ready, the code is fairly simple as follows:
with Products
as
(
SELECT distinct product
FROM #Sample
),
TimeRange
as
(
SELECT DISTINCT year,
month
FROM static.calendar
)
ProductTimeRange
as
(
SELECT p.products,
tr.year,
tr.month
FROM Products as p
CROSS JOIN TimeRange as tr
)
SELECT ptr.products,
ptr.year,
ptr.month,
s.qty_ytd
FROM ProductTimeRange as ptr
LEFT JOIN #sample as s
ON ptr.products = s.products
AND ptr.year = s.year
AND ptr.month = s.month
ORDER BY ptr.products,
ptr.year,
ptr.month
Use a cross join to generate the rows that you want -- all the years, months, and products.
Then use left join to bring in the data you want:
With Months As (
Select 1 As month
Union All
Select month + 1
From Months
Where month < 12
)
select y.year, m.month, s.product, coalesce(qty_ytd, 0) as qty_ytd
from (select distinct year from #sample) y cross join
months m cross join
(select distinct product from #sample) p left join
#sample s
on s.year = y.year and s.month = m.month and s.product = p.product;
Here is a db<>fiddle.

Group by in columns and rows, counts and percentages per day

I have a table that has data like following.
attr |time
----------------|--------------------------
abc |2018-08-06 10:17:25.282546
def |2018-08-06 10:17:25.325676
pqr |2018-08-05 10:17:25.366823
abc |2018-08-06 10:17:25.407941
def |2018-08-05 10:17:25.449249
I want to group them and count by attr column row wise and also create additional columns in to show their counts per day and percentages as shown below.
attr |day1_count| day1_%| day2_count| day2_%
----------------|----------|-------|-----------|-------
abc |2 |66.6% | 0 | 0.0%
def |1 |33.3% | 1 | 50.0%
pqr |0 |0.0% | 1 | 50.0%
I'm able to display one count by using group by but unable to find out how to even seperate them to multiple columns. I tried to generate day1 percentage with
SELECT attr, count(attr), count(attr) / sum(sub.day1_count) * 100 as percentage from (
SELECT attr, count(*) as day1_count FROM my_table WHERE DATEPART(week, time) = DATEPART(day, GETDate()) GROUP BY attr) as sub
GROUP BY attr;
But this also is not giving me correct answer, I'm getting all zeroes for percentage and count as 1. Any help is appreciated. I'm trying to do this in Redshift which follows postgresql syntax.
Let's nail the logic before presenting:
with CTE1 as
(
select attr, DATEPART(day, time) as theday, count(*) as thecount
from MyTable
)
, CTE2 as
(
select theday, sum(thecount) as daytotal
from CTE1
group by theday
)
select t1.attr, t1.theday, t1.thecount, t1.thecount/t2.daytotal as percentofday
from CTE1 t1
inner join CTE2 t2
on t1.theday = t2.theday
From here you can pivot to create a day by day if you feel the need
I am trying to enhance the query #johnHC btw if you needs for 7days then you have to those days in case when
with CTE1 as
(
select attr, time::date as theday, count(*) as thecount
from t group by attr,time::date
)
, CTE2 as
(
select theday, sum(thecount) as daytotal
from CTE1
group by theday
)
,
CTE3 as
(
select t1.attr, EXTRACT(DOW FROM t1.theday) as day_nmbr,t1.theday, t1.thecount, t1.thecount/t2.daytotal as percentofday
from CTE1 t1
inner join CTE2 t2
on t1.theday = t2.theday
)
select CTE3.attr,
max(case when day_nmbr=0 then CTE3.thecount end) as day1Cnt,
max(case when day_nmbr=0 then percentofday end) as day1,
max(case when day_nmbr=1 then CTE3.thecount end) as day2Cnt,
max( case when day_nmbr=1 then percentofday end) day2
from CTE3 group by CTE3.attr
http://sqlfiddle.com/#!17/54ace/20
In case that you have only 2 days:
http://sqlfiddle.com/#!17/3bdad/3 (days descending as in your example from left to right)
http://sqlfiddle.com/#!17/3bdad/5 (days ascending)
The main idea is already mentioned in the other answers. Instead of joining the CTEs for calculating the values I am using window functions which is a bit shorter and more readable I think. The pivot is done the same way.
SELECT
attr,
COALESCE(max(count) FILTER (WHERE day_number = 0), 0) as day1_count, -- D
COALESCE(max(percent) FILTER (WHERE day_number = 0), 0) as day1_percent,
COALESCE(max(count) FILTER (WHERE day_number = 1), 0) as day2_count,
COALESCE(max(percent) FILTER (WHERE day_number = 1), 0) as day2_percent
/*
Add more days here
*/
FROM(
SELECT *, (count::float/count_per_day)::decimal(5, 2) as percent -- C
FROM (
SELECT DISTINCT
attr,
MAX(time::date) OVER () - time::date as day_number, -- B
count(*) OVER (partition by time::date, attr) as count, -- A
count(*) OVER (partition by time::date) as count_per_day
FROM test_table
)s
)s
GROUP BY attr
ORDER BY attr
A counting the rows per day and counting the rows per day AND attr
B for more readability I convert the date into numbers. Here I take the difference between current date of the row and the maximum date available in the table. So I get a counter from 0 (first day) up to n - 1 (last day)
C calculating the percentage and rounding
D pivot by filter the day numbers. The COALESCE avoids the NULL values and switched them into 0. To add more days you can multiply these columns.
Edit: Made the day counter more flexible for more days; new SQL Fiddle
Basically, I see this as conditional aggregation. But you need to get an enumerator for the date for the pivoting. So:
SELECT attr,
COUNT(*) FILTER (WHERE day_number = 1) as day1_count,
COUNT(*) FILTER (WHERE day_number = 1) / cnt as day1_percent,
COUNT(*) FILTER (WHERE day_number = 2) as day2_count,
COUNT(*) FILTER (WHERE day_number = 2) / cnt as day2_percent
FROM (SELECT attr,
DENSE_RANK() OVER (ORDER BY time::date DESC) as day_number,
1.0 * COUNT(*) OVER (PARTITION BY attr) as cnt
FROM test_table
) s
GROUP BY attr, cnt
ORDER BY attr;
Here is a SQL Fiddle.

Group a query by every month

I have the following query :
select
(select Sum(Stores) from XYZ where Year = '2013' and Month = '8' )
-
(select Sum(SalesStores) from ABC where Year = '2013' and Month = '8') as difference
Here in the above query Year and Month are also columns of a table.
I would like to know if there is a way to run the same query so that , it is run against every month of the year ?
If there are months without data/rows within XYZ or ABC tables then I would use FULL OUTER JOIN:
SELECT ISNULL(x.[Month], y.[Month]) AS [Month],
ISNULL(x.Sum_Stores, 0) - ISNULL(y.Sum_SalesStores, 0) AS Difference
FROM
(
SELECT [Month], Sum(Stores) AS Sum_Stores
FROM XYZ
WHERE [Year] = '2013'
GROUP BY [Month]
) AS x
FULL OUTER JOIN
(
SELECT [Month], Sum(SalesStores) AS Sum_SalesStores
FROM ABC
WHERE [Year] = '2013'
GROUP BY [Month]
) AS y ON x.[Month] = y.[Month]
;WITH Months(Month) AS
(
SELECT 1
UNION ALL
SELECT Month + 1
FROM Months
where Month < 12
)
SELECT '2013' [Year], m.Month, COALESCE(SUM(Stores), 0) - COALESCE(SUM(SalesStores), 0) [Difference]
FROM months m
LEFT JOIN XYZ x ON m.Month = x.Month
LEFT JOIN ABC a ON a.Month = m.Month
GROUP BY m.Month
You could use GROUP BY in your inner trades, and then run a join, like this:
SELECT left.Month, (left.sum - COALESCE(right.sum, 0)) as difference
FROM (
SELECT Month, SUM(Stores) as sum
FROM XYZ WHERE Year = '2013'
GROUP BY Month
) left
LEFT OUTER JOIN (
SELECT Month, SUM(Stores) as sum
FROM ABC WHERE Year = '2013'
GROUP BY Month
) right ON left.Month = right.Months
Note the use of COALESCE. It lets you preserve the value of the first SUM in case when there are no records for the month in the ABC table.
In the following example uses the UNION ALL operator with CTE
;WITH cte AS
(SELECT SUM(Stores) AS Stores, [Month]
FROM dbo.XYZ
WHERE [Year] = '2013'
GROUP BY [Month]
UNION ALL
SELECT -1.00 * SUM(SalesStores), [Month]
FROM dbo.ABC
WHERE [Year] = '2013'
GROUP BY [Month]
)
SELECT [Month], SUM(Stores) AS Difference
FROM cte
GROUP BY [Month]
Demo on SQLFiddle
;WITH Months(Month) AS
(
SELECT 1
UNION ALL
SELECT Month + 1
FROM Months
where Month < 12
)
SELECT Months. Month ,
(select isnull(Sum(Stores),0) from XYZ where Year = '2013' and Month = Months.Month) - (select isnull(Sum(SalesStores),0) from ABC where Year = '2013' and Month =Months.Month) as difference
FROM Months

SQL Query in CRM Report

A "Case" in CRM has a field called "Status" with four options.
I'm trying to
build a report in CRM that fills a table with every week of the year (each row is a different week), and then counts the number of cases that have each Status option (the columns would be each of the Status options).
The table would look like this
Status 1 Status 2 Status 3
Week 1 3 55 4
Week 2 5 23 5
Week 3 14 11 33
So far I have the following:
SELECT
SUM(case WHEN status = 1 then 1 else 0 end) Status1,
SUM(case WHEN status = 2 then 1 else 0 end) Status2,
SUM(case WHEN status = 3 then 1 else 0 end) Status3,
SUM(case WHEN status = 4 then 1 else 0 end) Status4,
SUM(case WHEN status = 5 then 1 else 0 end) Status5
FROM [DB].[dbo].[Contact]
Which gives me the following:
Status 1 Status 2 Status 3
2 43 53
Now I need to somehow split this into 52 rows for the past year and filter these results by date (columns in the Contact table). I'm a bit new to SQL queries and CRM - any help here would be much appreciated.
Here is a SQLFiddle with my progress and sample data: http://sqlfiddle.com/#!2/85b19/1
Sounds like you want to group by a range. The trick is to create a new field that represents each range (for you one per year) and group by that.
Since it also seems like you want an infinite range of dates, marc_s has a good summary for how to do the group by trick with dates in a generic way: SQL group by frequency within a date range
So, let's break this down:
You want to make a report that shows, for each contact, a breakdown, week by week, of the number of cases registered to that contact, which is divided into three columns, one for each StateCode.
If this is the case, then you would need to have 52 date records (or so) for each contact. For calendar like requests, it's always good to have a separate calendar table that lets you query from it. Dan Guzman has a blog entry that creates a useful calendar table which I'll use in the query.
WITH WeekNumbers AS
(
SELECT
FirstDateOfWeek,
-- order by first date of week, grouping calendar year to produce week numbers
WeekNumber = row_number() OVER (PARTITION BY CalendarYear ORDER BY FirstDateOfWeek)
FROM
master.dbo.Calendar -- created from script
GROUP BY
FirstDateOfWeek,
CalendarYear
), Calendar AS
(
SELECT
WeekNumber =
(
SELECT
WeekNumber
FROM
WeekNumbers WN
WHERE
C.FirstDateOfWeek = WN.FirstDateOfWeek
),
*
FROM
master.dbo.Calendar C
WHERE
CalendarDate BETWEEN '1/1/2012' AND getutcdate()
)
SELECT
C.FullName,
----include the below if the data is necessary
--Cl.WeekNumber,
--Cl.CalendarYear,
--Cl.FirstDateOfWeek,
--Cl.LastDateOfWeek,
'Week: ' + CAST(Cl.WeekNumber AS VARCHAR(20))
+ ', Year: ' + CAST(Cl.CalendarYear AS VARCHAR(20)) WeekNumber
FROM
CRM.dbo.Contact C
-- use a cartesian join to produce a table list
CROSS JOIN
(
SELECT
DISTINCT WeekNumber,
CalendarYear,
FirstDateOfWeek,
LastDateOfWeek
FROM
Calendar
) Cl
ORDER BY
C.FullName,
Cl.WeekNumber
This is different from the solution Ben linked to because Marc's query only returns weeks where there is a matching value, whereas you may or may not want to see even the weeks where there is no activity.
Once you have your core tables of contacts split out week by week as in the above (or altered for your specific time period), you can simply add a subquery for each StateCode to see the breakdown in columns as in the final query below.
WITH WeekNumbers AS
(
SELECT
FirstDateOfWeek,
WeekNumber = row_number() OVER (PARTITION BY CalendarYear ORDER BY FirstDateOfWeek)
FROM
master.dbo.Calendar
GROUP BY
FirstDateOfWeek,
CalendarYear
), Calendar AS
(
SELECT
WeekNumber =
(
SELECT
WeekNumber
FROM
WeekNumbers WN
WHERE
C.FirstDateOfWeek = WN.FirstDateOfWeek
),
*
FROM
master.dbo.Calendar C
WHERE
CalendarDate BETWEEN '1/1/2012' AND getutcdate()
)
SELECT
C.FullName,
--Cl.WeekNumber,
--Cl.CalendarYear,
--Cl.FirstDateOfWeek,
--Cl.LastDateOfWeek,
'Week: ' + CAST(Cl.WeekNumber AS VARCHAR(20)) +', Year: ' + CAST(Cl.CalendarYear AS VARCHAR(20)) WeekNumber,
(
SELECT
count(*)
FROM
CRM.dbo.Incident I
INNER JOIN CRM.dbo.StringMap SM ON
I.StateCode = SM.AttributeValue
INNER JOIN
(
SELECT
DISTINCT ME.Name,
ME.ObjectTypeCode
FROM
CRM.MetadataSchema.Entity ME
) E ON
SM.ObjectTypeCode = E.ObjectTypeCode
WHERE
I.ModifiedOn >= Cl.FirstDateOfWeek
AND I.ModifiedOn < dateadd(day, 1, Cl.LastDateOfWeek)
AND E.Name = 'incident'
AND SM.AttributeName = 'statecode'
AND SM.LangId = 1033
AND I.CustomerId = C.ContactId
AND SM.Value = 'Active'
) ActiveCases,
(
SELECT
count(*)
FROM
CRM.dbo.Incident I
INNER JOIN CRM.dbo.StringMap SM ON
I.StateCode = SM.AttributeValue
INNER JOIN
(
SELECT
DISTINCT ME.Name,
ME.ObjectTypeCode
FROM
CRM.MetadataSchema.Entity ME
) E ON
SM.ObjectTypeCode = E.ObjectTypeCode
WHERE
I.ModifiedOn >= Cl.FirstDateOfWeek
AND I.ModifiedOn < dateadd(day, 1, Cl.LastDateOfWeek)
AND E.Name = 'incident'
AND SM.AttributeName = 'statecode'
AND SM.LangId = 1033
AND I.CustomerId = C.ContactId
AND SM.Value = 'Resolved'
) ResolvedCases,
(
SELECT
count(*)
FROM
CRM.dbo.Incident I
INNER JOIN CRM.dbo.StringMap SM ON
I.StateCode = SM.AttributeValue
INNER JOIN
(
SELECT
DISTINCT ME.Name,
ME.ObjectTypeCode
FROM
CRM.MetadataSchema.Entity ME
) E ON
SM.ObjectTypeCode = E.ObjectTypeCode
WHERE
I.ModifiedOn >= Cl.FirstDateOfWeek
AND I.ModifiedOn < dateadd(day, 1, Cl.LastDateOfWeek)
AND E.Name = 'incident'
AND SM.AttributeName = 'statecode'
AND SM.LangId = 1033
AND I.CustomerId = C.ContactId
AND SM.Value = 'Canceled'
) CancelledCases
FROM
CRM.dbo.Contact C
CROSS JOIN
(
SELECT
DISTINCT WeekNumber,
CalendarYear,
FirstDateOfWeek,
LastDateOfWeek
FROM
Calendar
) Cl
ORDER BY
C.FullName,
Cl.WeekNumber