TABLE1:
ARTIKEL SUPPLIERID SALE_SUM_PIECES
TV SONY 7
TABLE2:
ROW_ID ARTIKEL SUPPLIERID PIECES
1 TV SONY 6
2 TV SONY 10
3 TV SONY 6
4 TV SONY 14
5 TV SONY 18
6 TV SONY 4
I need to subtract value X=23 on TABLE2."PIECES", only when the value TABLE1."SALE_SUM_PIECES" is less than the SUM of "PIECES" in TABLE2. For example: the value of TABLE1."SALE_SUM_PIECES" is 7. NOw I need to check at which row the value 7 goes less than the SUM of TABLE2."PIECES".In the below example the first row in TABLE2 is not valid because 7 is greater than 6. But the second row in TABLE2 is valid since the SUM OF "PIECES" from row1 and row2 in TABLE2 i.e 6+10=16 is greater than 7. So, I need to subtract value of X=23 from the second row to the following rows in TABLE2.
The query I have is as follows:
SELECT "SUPPLIERID", "ARTIKEL",
(case when ( cumulativesum - (select "SALE_SUM_PIECES" from T1 where T1."SUPPLIERID"=T2."SUPPLIERID" and T1."ARTIKEL" = T2."ARTIKEL" )) <= 0
then NULL
when ( cumulativesum - (select "SALE_SUM_PIECES" from TABLE1 T1 where T1."SUPPLIERID"=T2."SUPPLIERID" and T1."ARTIKEL" = T2."ARTIKEL" )) > 0
then
(case when #x - cumulativesum <= 0 and #x - (cumulativesum -PIECES) > 0
then 0
when #x - "cumulativesum" <= 0
then NULL
else #x - "cumulativesum"
end) as "VALUE_DRILL_DOWN"
from (SELECT T1."ARTIKEL", T1."SUPPLIERID", T1.PIECES
(select sum("PIECES")
from EXAMPLE_TABLE T2
where T2."ROW_ID" <= T1."ROW_ID" and T2."SUPPLIERID" = T1."SUPPLIERID" and T2."ARTIKEL"=T1."ARTIKEL"
) as "cumulativesum"
from EXAMPLE_TABLE T1
)
When I execute the above query I get the result as follows:
ROW_ID ARTIKEL SUPPLIERID PIECES VALUE_DRILL_DOWN
1 TV SONY 6 NULL
2 TV SONY 10 7
3 TV SONY 6 1
4 TV SONY 14 0
5 TV SONY 18 Null
6 TV SONY 4 Null
But I expect a result to be as follows:
ROW_ID ARTIKEL SUPPLIERID PIECES VALUE_DRILL_DOWN
1 TV SONY 6 NULL
2 TV SONY 10 13
3 TV SONY 6 7
4 TV SONY 14 0
5 TV SONY 18 Null
6 TV SONY 4 Null
I want the subtraction of ´X=23´ to start from the row in TABLE2 where the condition TABLE1."SALE_SUM_PIECES" < TABLE2."PIECES" i.e from row2. Any suggestions?
Thanks in advance.
The below solution gives the desired results. See SqlFiddle
1 TV SONY 922 6 110 2.50 NULL
2 TV SONY 922 10 80 1.00 13
3 TV SONY 922 6 65 1.50 7
4 TV SONY 922 14 95 1.50 0
5 TV SONY 922 18 95 1.50 NULL
6 TV SONY 922 4 95 1.50 NULL
DECLARE #x INT = 23
; WITH cte AS
(
SELECT t2.*, t1.SALE_SUM_PIECES,
CASE
WHEN SUM(t2.PIECES) OVER (PARTITION BY t2.ARTIKEL, t2.SUPPLIERID ORDER BY ROW_ID) < t1.SALE_SUM_PIECES THEN 'None'
ELSE t2.ARTIKEL + t2.SUPPLIERID
END AS GroupId
FROM #Table2 t2
JOIN #Table1 t1 ON t2.ARTIKEL = t1.ARTIKEL AND t2.SUPPLIERID = t1.SUPPLIERID
),
cumulative AS
(
SELECT *, SUM(PIECES) OVER (PARTITION BY GroupId ORDER BY ROW_ID) AS CumulativeSum
FROM cte
)
SELECT ROW_ID, ARTIKEL, SUPPLIERID, ORGID, PIECES, COSTPRICE, DISCOUNT,
CASE
WHEN CumulativeSum < SALE_SUM_PIECES THEN NULL
WHEN #x - CumulativeSum <= 0 AND #x - (CumulativeSum - PIECES) > 0 THEN 0
WHEN #x - CumulativeSum <= 0 THEN NULL
ELSE #x - CumulativeSum
END AS VALUE_DRILL_DOWN
FROM cumulative
I'm not 100% sure what the expected calulation for VALUE_DRILL_DOWN is supposed to be.
You've said that for a valid row, it should be (sum of all rows at or above row) - 23
In this case the following code should work, but it gives different numbers from your expected results.
I've used a self-joining common table expression to work out the cumulative sums of the rows as you count up .
The (total of all rows at or above) is equal to the (total) - (cumulative sum at row before)
Therefore the following code should be correct...
DECLARE #table1 TABLE(ARTIKEL NVARCHAR(50), SUPPLIERID NVARCHAR(50), ORGID INT,
STORE INT, SALE_SUM_PIECES INT, [DATE] DATETIME)
DECLARE #table2 TABLE(ROW_ID INT, ARTIKEL NVARCHAR(50), SUPPLIERID NVARCHAR(50),
ORGID INT, PIECES INT, COSTPRICE MONEY, DISCOUNT DECIMAL(9,5))
DECLARE #x INT = 23
DECLARE #SumOfAll INT
INSERT #table1
( ARTIKEL , SUPPLIERID , ORGID , STORE , SALE_SUM_PIECES , [DATE] )
VALUES ( 'TV', 'SONY', 922,100,7 ,'2014-01-01' )
INSERT #table2
(ROW_ID, ARTIKEL , SUPPLIERID , ORGID , PIECES , COSTPRICE , DISCOUNT )
VALUES (1, 'TV', 'SONY', 922, 6, 110, 2.5 ),
(2, 'TV', 'SONY', 922, 10 , 80, 1 ) ,
(3, 'TV', 'SONY', 922, 6 , 65 , 1.5 ) ,
(4, 'TV', 'SONY', 922, 14 , 95 , 1.5 ),
(5, 'TV', 'SONY', 922, 18 , 95 , 1.5 ),
(6, 'TV', 'SONY', 922, 4 , 95 , 1.5 )
SELECT #SumOfAll = SUM(PIECES) FROM #table2 AS t
;WITH t2C AS (
SELECT ROW_ID, PIECES AS [cumulativeSum]
FROM #table2 AS t
WHERE ROW_ID = 1 -- assume starting at 1
UNION ALL
SELECT t.ROW_ID, t.PIECES + cumulativeSum AS [cumlativeSum]
FROM #table2 AS t
JOIN t2C ON t2C.ROW_ID + 1 = t.ROW_ID --assume rowIDs are sequential
)
SELECT
t2.ROW_ID,
t1.SUPPLIERID,
t1.ORGID,
t2.PIECES,
t2.COSTPRICE,
t2.DISCOUNT,
CASE WHEN t2C.cumulativeSum - t1.SALE_SUM_PIECES <= 0 THEN NULL
ELSE
CASE
WHEN #x - t2C.cumulativeSum <= 0
AND #x - (t2C.cumulativeSum - t2.PIECES) > 0 THEN 0
WHEN #x - t2C.cumulativeSum <= 0 THEN NULL
ELSE (#SumOfAll - t2cPrev.cumulativeSum) - #x
END
END AS [VALUE_DRILL_DOWN]
FROM t2C
LEFT JOIN t2C AS t2cPrev ON t2cPrev.ROW_ID = t2C.ROW_ID - 1
JOIN #table2 AS t2 ON t2.ROW_ID = t2C.ROW_ID
JOIN #table1 AS t1 ON t1.SUPPLIERID = t2.SUPPLIERID AND t1.ARTIKEL = t2.ARTIKEL
However this gives:
ROW_ID SUPPLIERID ORGID PIECES COSTPRICE DISCOUNT VALUE_DRILL_DOWN cumulativeSum sumOfRowsOnOrAfter
1 SONY 922 6 110.00 2.50000 NULL 6 NULL
2 SONY 922 10 80.00 1.00000 29 16 52
3 SONY 922 6 65.00 1.50000 19 22 42
4 SONY 922 14 95.00 1.50000 0 36 36
5 SONY 922 18 95.00 1.50000 NULL 54 22
6 SONY 922 4 95.00 1.50000 NULL 58 4
EDIT:
SQL Fiddle by WiiMaxx
select
*,
case when 23 - sum(c) over (order by row_id) >= 0
then 23 - sum(c) over (order by row_id)
when 23 - sum(c) over (order by row_id) + pieces > 0
then 0
end value_drill_down
from (
select *,
case when sum(pieces) over (order by row_id) >= (select sale_sum_pieces from t1)
then pieces
end c
from t2) x;
Related
I am trying to break up a running (ordered) sum into groups of a max value. When I implement the following example logic...
IF OBJECT_ID(N'tempdb..#t') IS NOT NULL DROP TABLE #t
SELECT TOP (ABS(CHECKSUM(NewId())) % 1000) ROW_NUMBER() OVER (ORDER BY name) AS ID,
LEFT(CAST(NEWID() AS NVARCHAR(100)),ABS(CHECKSUM(NewId())) % 30) AS Description
INTO #t
FROM sys.objects
DECLARE #maxGroupSize INT
SET #maxGroupSize = 100
;WITH t AS (
SELECT
*,
LEN(Description) AS DescriptionLength,
SUM(LEN(Description)) OVER (/*PARTITION BY N/A */ ORDER BY ID) AS [RunningLength],
SUM(LEN(Description)) OVER (/*PARTITION BY N/A */ ORDER BY ID)/#maxGroupSize AS GroupID
FROM #t
)
SELECT *, SUM(DescriptionLength) OVER (PARTITION BY GroupID) AS SumOfGroup
FROM t
ORDER BY GroupID, ID
I am getting groups that are larger than the maximum group size (length) of 100.
A recusive common table expression (rcte) would be one way to resolve this.
Sample data
Limited set of fixed sample data.
create table data
(
id int,
description nvarchar(20)
);
insert into data (id, description) values
( 1, 'qmlsdkjfqmsldk'),
( 2, 'mldskjf'),
( 3, 'qmsdlfkqjsdm'),
( 4, 'fmqlsdkfq'),
( 5, 'qdsfqsdfqq'),
( 6, 'mds'),
( 7, 'qmsldfkqsjdmfqlkj'),
( 8, 'qdmsl'),
( 9, 'mqlskfjqmlkd'),
(10, 'qsdqfdddffd');
Solution
For every recursion step evaluate (r.group_running_length + len(d.description) <= #group_max_length) if the previous group must be extended or a new group must be started in a case expression.
Set group target size to 40 to better fit the sample data.
declare #group_max_length int = 40;
with rcte as
(
select d.id,
d.description,
len(d.description) as description_length,
len(d.description) as running_length,
1 as group_id,
len(d.description) as group_running_length
from data d
where d.id = 1
union all
select d.id,
d.description,
len(d.description),
r.running_length + len(d.description),
case
when r.group_running_length + len(d.description) <= #group_max_length
then r.group_id
else r.group_id + 1
end,
case
when r.group_running_length + len(d.description) <= #group_max_length
then r.group_running_length + len(d.description)
else len(d.description)
end
from rcte r
join data d
on d.id = r.id + 1
)
select r.id,
r.description,
r.description_length,
r.running_length,
r.group_id,
r.group_running_length,
gs.group_sum
from rcte r
cross apply ( select max(r2.group_running_length) as group_sum
from rcte r2
where r2.group_id = r.group_id ) gs -- group sum
order by r.id;
Result
Contains both the running group length as well as the group sum for every row.
id description description_length running_length group_id group_running_length group_sum
-- ---------------- ------------------ -------------- -------- -------------------- ---------
1 qmlsdkjfqmsldk 14 14 1 14 33
2 mldskjf 7 21 1 21 33
3 qmsdlfkqjsdm 12 33 1 33 33
4 fmqlsdkfq 9 42 2 9 39
5 qdsfqsdfqq 10 52 2 19 39
6 mds 3 55 2 22 39
7 qmsldfkqsjdmfqlkj 17 72 2 39 39
8 qdmsl 5 77 3 5 28
9 mqlskfjqmlkd 12 89 3 17 28
10 qsdqfdddffd 11 100 3 28 28
Fiddle to see things in action (includes random data version).
I have a SQL table of Customer_ID, showing Payments by Year. The first (of many) customer appears like this:
ID Payment Year
112 0 2004
112 0 2005
112 0 2006
112 9592 2007
112 12332 2008
112 9234 2011
112 5400 2012
112 7392 2014
112 8321 2015
Note that some years are missing. I need to create 10 new columns, showing the Payments in the previous 10 years, for each row. The resulting table should look like this:
ID Payment Year T-1 T-2 T-3 T-4 T-5 T-6 T-7 T-8 T-9 T-10
112 0 2004 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL
112 0 2005 0 NULL NULL NULL NULL NULL NULL NULL NULL NULL
112 0 2006 0 0 NULL NULL NULL NULL NULL NULL NULL NULL
112 952 2007 0 0 0 NULL NULL NULL NULL NULL NULL NULL
112 1232 2008 952 0 0 0 NULL NULL NULL NULL NULL NULL
112 924 2011 NULL NULL 1232 952 0 0 0 NULL NULL NULL
112 500 2012 924 NULL NULL 1232 952 0 0 0 NULL NULL
112 392 2014 NULL 500 924 NULL NULL 1232 952 0 0 0
112 821 2015 392 NULL 500 924 NULL NULL 1232 952 0 0
I am well aware that this is a large duplication of data, and so seems like a strange thing to do. However, I would still like to do it! (the data is being prepared for a predictive model, in which previous payments (and other info) will be used to predict the current year's payment)
I'm not really sure where to start with this. I have been looking at using pivot, but can't figure out how to get it to select values from a customer's previous year.
I would very much like to do this in SQL. If that is not possible I may be able to copy the table into R - but SQL is my preference.
Any help much appreciated.
You could use lag() if you had full data:
select t.*,
lag(payment, 1) over (partition by id order by year) as t_1,
lag(payment, 2) over (partition by id order by year) as t_2,
. . .
from t;
However, for your situation with missing intermediate years, left join may be simpler:
select t.*,
t1.payment as t_1,
t2.payment as t_2,
. . .
from t left join
t t1
on t1.id = t.id and
t1.year = t.year - 1 left join
t t2
on t1.id = t.id and
t1.year = t.year - 2 left join
. . .;
I thnk your friend will be LAG
Here's an implementation:
Declare #t table (
ID int,
Payment int,
Yr int
)
Insert Into #t Values(112,0,2004)
Insert Into #t Values(112,0,2005)
Insert Into #t Values(112,0,2006)
Insert Into #t Values(112,9592,2007)
Insert Into #t Values(112,12332,2008)
Insert Into #t Values(112,9234,2011)
Insert Into #t Values(112,5400,2012)
Insert Into #t Values(112,7392,2014)
Insert Into #t Values(112,8321,2015)
Insert Into #t Values(113,0,2009)
Insert Into #t Values(113,9234,2011)
Insert Into #t Values(113,5400,2013)
Insert Into #t Values(113,8321,2015)
;with E1(n) as (Select 1 Union All Select 1 Union All Select 1 Union All Select 1 Union All Select 1 Union All Select 1 Union All Select 1 Union All Select 1 Union All Select 1 Union All Select 1)
,E2(n) as (Select 1 From E1 a, E1 b)
,E4(n) as (Select 1 From E2 a, E2 b)
,E5(n) as (Select row_number() over(order by isnull(null,1)) From E4 a, E1 b)
,IDYears as (
Select z.ID, Yr = y.n
From (
Select
Id,
MinYear = min(Yr),
MaxYear = max(Yr)
From #t a
Group By Id
) z
Inner Join E5 y On y.n between z.MinYear and z.MaxYear
)
Select
*,
[t-1] = Lag(B.Payment, 1) Over(Partition By a.ID Order By a.Yr),
[t-2] = Lag(B.Payment, 2) Over(Partition By a.ID Order By a.Yr),
[t-3] = Lag(B.Payment, 3) Over(Partition By a.ID Order By a.Yr),
[t-4] = Lag(B.Payment, 4) Over(Partition By a.ID Order By a.Yr),
[t-5] = Lag(B.Payment, 5) Over(Partition By a.ID Order By a.Yr),
[t-6] = Lag(B.Payment, 6) Over(Partition By a.ID Order By a.Yr),
[t-7] = Lag(B.Payment, 7) Over(Partition By a.ID Order By a.Yr),
[t-8] = Lag(B.Payment, 8) Over(Partition By a.ID Order By a.Yr),
[t-9] = Lag(B.Payment, 9) Over(Partition By a.ID Order By a.Yr),
[t-10] = Lag(B.Payment, 10) Over(Partition By a.ID Order By a.Yr)
From IDYears a
Left Join #t b On a.ID = b.ID and a.Yr = b.Yr
Order By A.ID
I have results like this
TimeDiffMin | OrdersCount
10 | 2
12 | 5
09 | 6
20 | 15
27 | 11
I would like the following
TimeDiffMin | OrdersCount
05 | 0
10 | 8
15 | 5
20 | 15
25 | 0
30 | 11
So you can see that i want the grouping of every 5 minutes and show the total order count in those 5 minutes. eg. 0-5 minutes 0 orders, 5-10 minutes 8 orders
any help would be appreciated.
current query:
SELECT TimeDifferenceInMinutes, count(OrderId) NumberOfOrders FROM (
SELECT AO.OrderID, AO.OrderDate, AON.CreatedDate AS CancelledDate, DATEDIFF(minute, AO.OrderDate, AON.CreatedDate) AS TimeDifferenceInMinutes
FROM
(SELECT OrderID, OrderDate FROM AC_Orders) AO
JOIN
(SELECT OrderID, CreatedDate FROM AC_OrderNotes WHERE Comment LIKE '%has been cancelled.') AON
ON AO.OrderID = AON.OrderID
WHERE DATEDIFF(minute, AO.OrderDate, AON.CreatedDate) <= 100 AND AO.OrderDate >= '2016-12-01'
) AS Temp1
GROUP BY TimeDifferenceInMinutes
Now, if you are open to a TVF.
I use this UDF to create dynamic Date/Time Ranges. You supply the range and increment
Declare #YourTable table (TimeDiffMin int,OrdersCount int)
Insert Into #YourTable values
(10, 2),
(12, 5),
(09, 6),
(20,15),
(27,11)
Select TimeDiffMin = cast(R2 as int)
,OrdersCount = isnull(sum(OrdersCount),0)
From (Select R1=RetVal,R2=RetVal+5 From [dbo].[udf-Range-Number](0,25,5)) A
Left Join (
-- Your Complicated Query
Select * From #YourTable
) B on TimeDiffMin >= R1 and TimeDiffMin<R2
Group By R1,R2
Order By 1
Returns
TimeDiffMin OrdersCount
5 0
10 6
15 7
20 0
25 15
30 11
The UDF if interested
CREATE FUNCTION [dbo].[udf-Range-Number] (#R1 money,#R2 money,#Incr money)
Returns Table
Return (
with cte0(M) As (Select cast((#R2-#R1)/#Incr as int)),
cte1(N) As (Select 1 From (Values(1),(1),(1),(1),(1),(1),(1),(1),(1),(1)) N(N)),
cte2(N) As (Select Top (Select M from cte0) Row_Number() over (Order By (Select NULL)) From cte1 a,cte1 b,cte1 c,cte1 d,cte1 e,cte1 f,cte1 g,cte1 h )
Select RetSeq=1,RetVal=#R1 Union All Select N+1,(N*#Incr)+#R1
From cte2
)
-- Max 100 million observations
-- Select * from [dbo].[udf-Range-Number](0,4,0.25)
You can do this using a derived table to first build up your time difference windows and then joining from that to sum up all the Orders that fall within that window.
declare #t table(TimeDiffMin int
,OrdersCount int
);
insert into #t values
(10, 2)
,(12, 5)
,(09, 6)
,(20,15)
,(27,11);
declare #Increment int = 5; -- Set your desired time windows here.
with n(n)
as
( -- Select 10 rows to start with:
select n from(values(1),(1),(1),(1),(1),(1),(1),(1),(1),(1)) as n(n)
),n2 as
( -- CROSS APPLY these 10 rows to get 10*10=100 rows we can use to generate incrementing ROW_NUMBERs. Use more CROSS APPLYs to get more rows:
select (row_number() over (order by (select 1))-1) * #Increment as StartMin
,(row_number() over (order by (select 1))) * #Increment as EndMin
from n -- 10 rows
cross apply n n2 -- 100 rows
--cross apply n n3 -- 1000 rows
--cross apply n n4 -- 10000 rows
)
select m.EndMin as TimeDiffMin
,isnull(sum(t.OrdersCount),0) as OrdersCount
from n2 as m
left join #t t
on(t.TimeDiffMin >= m.StartMin
and t.TimeDiffMin < m.EndMin
)
where m.EndMin <= 30 -- Filter as required
group by m.EndMin
order by m.EndMin
Query result:
TimeDiffMin OrdersCount
5 0
10 6
15 7
20 0
25 15
30 11
I have a data which is something like this
stories value
--------------------------
0 2194940472.78964
1 1651820586.1447
2 627935051.75
3 586994698.4272
4 89132137.57
5 134608008
6 40759564
7 0
8 0
10 0
11 0
12 0
13 26060602
17 0
18 0
19 84522335
20 316478066.045
24 0
I want to sum it up as per the range
Output which I am expected
stories value
0-3 125201021
4-7 215453123
8-12 453121545
12-max(numstories) 21354322
I tried this but not able to figure it out what is wrong
select t.NumStories, SUM(t.bldnvalue)
from
(select
a.NumStories,
case
when a.NumStories between 0 and 3 then sum(a.BldgValue)
when a.NumStories between 4 and 7 then sum(a.BldgValue)
when a.NumStories between 8 and 12 then sum(a.BldgValue)
when a.NumStories between 13 and max(a.NumStories) then sum(a.BldgValue)
end as bldnvalue
from
dbo.EDM_CocaCola_Coca_Cola_Company_1_1 a
group by
a.NumStories) t
group by
t.NumStories
With this query I am getting this output
NumStories value
-------------------------------
0 2194940472.78964
3 586994698.4272
12 0
6 40759564
7 0
1 1651820586.1447
24 0
18 0
10 0
4 89132137.57
19 84522335
13 26060602
5 134608008
2 627935051.75
17 0
11 0
20 316478066.045
8 0
I like this result, I tried to use the BIN concept. I think the only issue would be with your max bin. I don't understand how you got your output sums. the first records value is '2,194,940,472.78964' which is bigger than your value in 0-3 bin
if OBJECT_ID('tempdb..#Test') is not null
drop table #Test;
Create table #Test (
Stories int
, Value float
)
insert into #Test
values
(0 , 2194940472.78964)
, (1 , 1651820586.1447 )
, (2 , 627935051.75 )
, (3 , 586994698.4272 )
, (4 , 89132137.57 )
, (5 , 134608008 )
, (6 , 40759564 )
, (7 , 0 )
, (8 , 0 )
, (10, 0 )
, (11, 0 )
, (12, 0 )
, (13, 26060602 )
, (17, 0 )
, (18, 0 )
, (19, 84522335 )
, (20, 316478066.045 )
, (24, 0 )
if OBJECT_ID('tempdb..#Bins') is not null
drop table #Bins;
create Table #Bins(
Label varchar(20)
, Min int
, Max int
)
insert into #Bins values
('0-3', 0, 3)
, ('4-7', 4, 7)
, ('8-12', 8, 12)
, ('13 - Max', 13, 999999999)
Select b.Label
, sum(t.Value) as Value
from #Test t
join #Bins b
on t.stories between b.Min and b.Max
Group by b.Label
order by 1
Output:
Label Value
-------------------- ----------------------
0-3 5061690809.11154
13 - Max 427061003.045
4-7 264499709.57
8-12 0
Just build the grouping string first that you want and group by that variable.
select
case
when a.NumStories between 0 and 3 then '0-3'
when a.NumStories between 4 and 7 then '4-7'
when a.NumStories between 8 and 12 then '8-12'
when a.NumStories >= 13 then '13-max'
end as stories,
sum(a.BldgValue) as value
from
dbo.EDM_CocaCola_Coca_Cola_Company_1_1 a
group by 1;
If you really want to print the max too, then you can put in a subquery in the "13-max" line as (SELECT MAX(BldgValue) FROM dbo.EDM_CocaCola_Coca_Cola_Company_1_1)
You can try this:
SELECT '0-3' AS stories,
SUM(value) AS value
FROM dbo.EDM_CocaCola_Coca_Cola_Company_1_1
WHERE stories BETWEEN 0 AND 3
UNION ALL
SELECT '4-7' AS stories,
SUM(value) AS value
FROM dbo.EDM_CocaCola_Coca_Cola_Company_1_1
WHERE stories BETWEEN 4 AND 7
UNION ALL
...
Here is solution with CTE that should work for any data set, without copying the code.
declare #YourTable table(stories int, value money)
declare #GroupMemberCount int=4
insert #YourTable (stories,value) values (0,5),(1,10),(2,11),(3,7),(4,18),(5,13),(7,15)
;with cte as
(
select c.stories+v.i*#GroupMemberCount FirstGroupMember, c.stories+v.i*#GroupMemberCount+#GroupMemberCount -1 LastGroupMember
,CAST(c.stories+v.i*#GroupMemberCount as varchar(50))
+'-'+CAST(c.stories+v.i*#GroupMemberCount+#GroupMemberCount -1 as varchar(50))GroupName
from (select MIN(stories) stories from #YourTable) c
cross join (values (0),(1),(2),(3),(4)/* and so on */) v(i)
where exists (select * from #YourTable yt where yt.stories>=c.stories+v.i*3)
)
select c.GroupName, SUM(yt.value)
from cte c
JOIN #YourTable yt ON yt.stories BETWEEN c.FirstGroupMember AND C.LastGroupMember
GROUP BY c.GroupName
I'm working on the following query and table
SELECT dd.actual_date, dd.week_number_overall, sf.branch_id, AVG(sf.overtarget_qnt) AS targetreach
FROM sales_fact sf, date_dim dd
WHERE dd.date_id = sf.date_id
AND dd.week_number_overall BETWEEN 88-2 AND 88
AND sf.branch_id = 1
GROUP BY dd.actual_date, branch_id, dd.week_number_overall
ORDER BY dd.actual_date ASC;
ACTUAL_DATE WEEK_NUMBER_OVERALL BRANCH_ID TARGETREACH
----------- ------------------- ---------- -----------
13/08/14 86 1 -11
14/08/14 86 1 12
15/08/14 86 1 11.8
16/08/14 86 1 1.4
17/08/14 86 1 -0.2
19/08/14 86 1 7.2
20/08/14 87 1 16.6
21/08/14 87 1 -1.4
22/08/14 87 1 14.4
23/08/14 87 1 2.8
24/08/14 87 1 18
26/08/14 87 1 13.4
27/08/14 88 1 -1.8
28/08/14 88 1 10.6
29/08/14 88 1 7.2
30/08/14 88 1 14
31/08/14 88 1 9.6
02/09/14 88 1 -3.2
the "TargetReach" column shows whether target has been reach or not.
A negative value means target wasn't reached on that day.
How can I get calculate the number of ROW with positive value for this query?
that will show something like:
TOTAL_POSITIVE_TARGET_REACH WEEK_NUMBER_OVERALL
--------------------------- ------------------
13 88
I have tried to use CASE but still not working right.
Thanks a lot.
You want to use conditional aggregation:
with t as (
<your query here>
)
select week_number_overall, sum(case when targetreach > 0 then 1 else 0 end)
from t
group by week_number_overall;
However, I would rewrite your original query to use proper join syntax. Then the query would look like:
SELECT week_number_overall,
SUM(CASE WHEN targetreach > 0 THEN 1 ELSE 0 END)
FROM (SELECT dd.actual_date, dd.week_number_overall, sf.branch_id, AVG(sf.overtarget_qnt) AS targetreach
FROM sales_fact sf JOIN
date_dim dd
ON dd.date_id = sf.date_id
WHERE dd.week_number_overall BETWEEN 88-2 AND 88 AND sf.branch_id = 1
GROUP BY dd.actual_date, branch_id, dd.week_number_overall
) t
GROUP BY week_number_overall
ORDER BY week_number_overall;
THe difference between a CTE (the first solution) and a subquery is (in this case) just a matter of preference.
SELECT WEEK_NUMBER_OVERALL, COUNT(*) TOTAL_POSITIVE_TARGET_REACH
FROM (your original query)
WHERE TARGETREACH >= 0
GROUP BY WEEK_NUMBER_OVERALL
select sum( decode( sign( TARGETREACH ) , -1 , 0 , 0 , 0 , 1 , 1 ) )
from ( "your query here" );
Use HAVING Clause
SELECT dd.actual_date, dd.week_number_overall, sf.branch_id, AVG(sf.overtarget_qnt) AS targetreach
FROM sales_fact sf, date_dim dd
WHERE dd.date_id = sf.date_id
AND dd.week_number_overall BETWEEN 88-2 AND 88
AND sf.branch_id = 1
GROUP BY dd.actual_date, branch_id, dd.week_number_overall
HAVING AVG(sf.overtarget_qnt)>0
ORDER BY dd.actual_date ASC;
Using decode(), sign() get both positive count & negative count.
drop table test;
create table test (
key number(5),
value number(5));
insert into test values ( 1, -9 );
insert into test values ( 2, -8 );
insert into test values ( 3, 10 );
insert into test values ( 4, 12 );
insert into test values ( 5, -9 );
insert into test values ( 6, 8 );
insert into test values ( 7, 51 );
commit;
select sig , count ( sig ) from
(
select key, ( (decode( sign( value ) , -1 , '-ve' , 0 , 'zero' , 1 , '+ve' ) ) ) sig
from test
)
group by sig
SIG COUNT(SIG)
---- ----------------------
+ve 4
-ve 3