Psql - generate series with running total - sql

I have the following table:
create table account_info(
id int not null unique,
creation_date date,
deletion_date date,
gather boolean)
Adding sample data to it:
insert into account_info(id,creation_date,deletion_date,gather)
values(1,'2019-09-10',null,true),
(2,'2019-09-12',null,true),
(3,'2019-09-14','2019-10-08',true),
(4,'2019-09-15','2019-09-18',true),
(5,'2019-09-22',null,false),
(6,'2019-09-27','2019-09-29',true),
(7,'2019-10-04','2019-10-17',false),
(8,null,'2019-10-20',true),
(9,'2019-10-12',null,true),
(10,'2019-10-18',null,true)
I would like to see how many accounts have been added grouped by week and how many accounts have been deleted grouped by week.
I have tried the following:
select dd, count(distinct ai.id) as created ,count(distinct ai2.id) as deleted
from generate_series('2019-09-01'::timestamp,
'2019-10-21'::timestamp, '1 week'::interval) dd
left join account_info ai on ai.creation_date::DATE <= dd::DATE
left join account_info ai2 on ai2.deletion_date::DATE <=dd::DATE
where ai.gather is true
and ai2.gather is true
group by dd
order by dd asc
This produces the following output:
dd | Created | Deleted |
+------------+---------+---------+
| 2019-09-22 | 4 | 1 |
| 2019-09-29 | 5 | 2 |
| 2019-10-06 | 5 | 2 |
| 2019-10-13 | 6 | 3 |
| 2019-10-20 | 7 | 4 |
This output shows me the the running total of how many have been created and how many been deleted.
I would like to see however something like this:
+------------+---------+---------+-------------------+-------------------+
| dd | Created | Deleted | Total Sum Created | Total Sum Deleted |
+------------+---------+---------+-------------------+-------------------+
| 2019-09-22 | 4 | 1 | 4 | 1 |
| 2019-09-29 | 1 | 1 | 5 | 2 |
| 2019-10-06 | NULL | NULL | 5 | 2 |
| 2019-10-13 | 1 | 1 | 6 | 3 |
| 2019-10-20 | 1 | 1 | 7 | 4 |
I get an error message, when trying to sum up the created and deletedcolumns in psql. As I cannot nest aggregate functions.

You could just turn your existing query to a subquery and use lag() to compute the difference between consecutive records:
select
dd,
created - coalesce(lag(created) over(order by dd), 0) created,
deleted - coalesce(lag(deleted) over(order by dd), 0) deleted,
created total_sum_created,
deleted total_sum_deleted
from (
select
dd,
count(distinct ai.id) as created ,
count(distinct ai2.id) as deleted
from
generate_series(
'2019-09-01'::timestamp,
'2019-10-21'::timestamp,
'1 week'::interval
) dd
left join account_info ai
on ai.creation_date::DATE <= dd::DATE and ai.gather is true
left join account_info ai2
on ai2.deletion_date::DATE <=dd::DATE and ai2.gather is true
group by dd
) x
order by dd asc
I moved conditions ai[2].gather = true to the on side of the join: putting these conditions in the where clause basically turns you left joins to inner joins.
Demo on DB Fiddle:
| dd | created | deleted | total_sum_created | total_sum_deleted |
| ------------------------ | ------- | ------- | ----------------- | ----------------- |
| 2019-09-01T00:00:00.000Z | 0 | 0 | 0 | 0 |
| 2019-09-08T00:00:00.000Z | 0 | 0 | 0 | 0 |
| 2019-09-15T00:00:00.000Z | 4 | 0 | 4 | 0 |
| 2019-09-22T00:00:00.000Z | 0 | 1 | 4 | 1 |
| 2019-09-29T00:00:00.000Z | 1 | 1 | 5 | 2 |
| 2019-10-06T00:00:00.000Z | 0 | 0 | 5 | 2 |
| 2019-10-13T00:00:00.000Z | 1 | 1 | 6 | 3 |
| 2019-10-20T00:00:00.000Z | 1 | 1 | 7 | 4 |
Another option would be to use lag() in combination with generate_series() to generate a list of date ranges. Then you can do just one join on the original table, and do conditional aggregation in the outer query:
select
dd,
count(distinct case
when ai.creation_date::date <= dd::date and ai.creation_date::date > lag_dd::date
then ai.id
end) created,
count(distinct case
when ai.deletion_date::date <= dd::date and ai.deletion_date::date > lag_dd::date
then ai.id
end) deleted,
count(distinct case
when ai.creation_date::date <= dd::date
then ai.id
end) total_sum_created,
count(distinct case
when ai.deletion_date::date <= dd::date
then ai.id
end) total_sum_deleted
from
(
select dd, lag(dd) over(order by dd) lag_dd
from generate_series(
'2019-09-01'::timestamp,
'2019-10-21'::timestamp,
'1 week'::interval
) dd
) dd
left join account_info ai on ai.gather is true
group by dd
order by dd
Demo on DB Fiddle

A lateral join and aggregation is soooo well suited to this problem. If you are content with the weeks in the data:
select date_trunc('week', dte) as week,
sum(is_create) as creates_in_week,
sum(is_delete) as deletes_in_week,
sum(sum(is_create)) over (order by min(v.dte)) as running_creates,
sum(sum(is_delete)) over (order by min(v.dte)) as running_deletes
from account_info ai cross join lateral
(values (ai.creation_date, 1, 0), (ai.deletion_date, 0, 1)
) v(dte, is_create, is_delete)
where v.dte is not null and ai.gather
group by week
order by week;
If you want it for a specified set of weeks:
select gs.wk,
sum(v.is_create) as creates_in_week,
sum(v.is_delete) as deletes_in_week,
sum(sum(v.is_create)) over (order by min(v.dte)) as running_creates,
sum(sum(v.is_delete)) over (order by min(v.dte)) as running_deletes
from generate_series('2019-09-01'::timestamp,
'2019-10-21'::timestamp, '1 week'::interval) gs(wk) left join
( account_info ai cross join lateral
(values (ai.creation_date, 1, 0), (ai.deletion_date, 0, 1)
) v(dte, is_create, is_delete)
)
on v.dte >= gs.wk and
v.dte < gs.wk + interval '1 week'
where dte is not null and ai.gather
group by gs.wk
order by gs.wk;
Here is a db<>fiddle.

You can generate the results you want using a series of CTEs to build up the data tables:
with dd as
(select *
from generate_series('2019-09-01'::timestamp,
'2019-10-21'::timestamp, '1 week'::interval) d),
ddl as
(select d, coalesce(lag(d) over (order by d), '1970-01-01'::timestamp) as pd
from dd),
counts as
(select d, count(distinct ai.id) as created, count(distinct ai2.id) as deleted
from ddl
left join account_info ai on ai.creation_date::DATE > ddl.pd::DATE AND ai.creation_date::DATE <= ddl.d::DATE AND ai.gather is true
left join account_info ai2 on ai2.deletion_date::DATE > ddl.pd::DATE AND ai2.deletion_date::DATE <= ddl.d::DATE AND ai2.gather is true
group by d)
select d, created, deleted,
sum(created) over (rows unbounded preceding) as "total created",
sum(deleted) over (rows unbounded preceding) as "total deleted"
from counts
order by d asc
Note that the gather condition needs to be part of the left join to avoid turning those into inner joins.
Output:
d created deleted total created total deleted
2019-09-01 00:00:00 0 0 0 0
2019-09-08 00:00:00 0 0 0 0
2019-09-15 00:00:00 4 0 4 0
2019-09-22 00:00:00 0 1 4 1
2019-09-29 00:00:00 1 1 5 2
2019-10-06 00:00:00 0 0 5 2
2019-10-13 00:00:00 1 1 6 3
2019-10-20 00:00:00 1 1 7 4
Note this query gives the results for the week ending with d. If you want results for the week starting with d, the lag can be changed to lead. You can see this in my demo.
Demo on dbfiddle

Related

BigQuery: fill null for missing column in table

I have a query to show daily aggregation based on some metrics. something like
select date(timestamp), metric, count(*) from aggs GROUP BY 1,2 ORDER BY 1,2;
Problem is sometime certain metric is missing for certain day like:
date | metric | count
03/01 | B | 50
03/02 | A | 60
03/02 | B | 10
03/02 | C | 70
03/03 | C | 10
I want to fill in 0 or null for missing date/metric pair - ie how can we do something like:
date | metric | count
03/01 | A | 0
03/01 | B | 50
03/01 | C | 0
03/02 | A | 60
03/02 | B | 10
03/02 | C | 70
03/03 | A | 0
03/03 | B | 0
03/03 | C | 10
You can generate the rows using a cross join and then fill in the values using a left join:
select date, metric, coalesce(t.count, 0)
from (select distinct date from t) d cross join
(select distinct metric from t) m left join
t
using (date, metric);
If you don't have all dates that you want, you can use:
unnest(generate_date_array(<date1>, <date2>, interval 1 day)) u(dte)
There are several ways to do time-series null-exposure in BigQuery. If the query performance is not an issue, then the easier way to do it is
WITH original_result AS (
SELECT date(timestamp) as date, metric, count(*)
FROM aggs
GROUP BY 1,2
)
SELECT
*
FROM
UNNEST(
GENERATE_DATE_ARRAY(<start_date>, <end_date>, INTERVAL 1 DAY)
) AS date
LEFT JOIN original_result USING (date)
ORDER BY 1, 2

Split a date range in SQL Server

I'm struggling with a solution for a problem but I couldn't find anything similar here.
I have a table "A" like:
+---------+------------+------------+-----------+
| user_id | from | to | attribute |
+---------+------------+------------+-----------+
| 1 | 2020-01-01 | 2020-12-31 | abc |
+---------+------------+------------+-----------+
and I get a table "B" like:
+---------+------------+------------+-----------+
| user_id | from | to | attribute |
+---------+------------+------------+-----------+
| 1 | 2020-03-01 | 2020-04-15 | def |
+---------+------------+------------+-----------+
And what I need is:
+---------+------------+------------+-----------+
| user_id | from | to | attribute |
+---------+------------+------------+-----------+
| 1 | 2020-01-01 | 2020-02-29 | abc |
| 1 | 2020-03-01 | 2020-04-15 | def |
| 1 | 2020-04-16 | 2020-12-31 | abc |
+---------+------------+------------+-----------+
I tried just using insert and update but I couldn't figure out how to simultaneously do both. Is there a much simpler way? I read about CTE, could this be an approach?
I'd be very thankful for your help!
Edit: more examples
TABLE A
| user_id | from | to | attribute |
+=========+============+============+===========+
| 1 | 2020-01-01 | 2020-12-31 | atr1 |
| 1 | 2021-01-01 | 2021-12-31 | atr2 |
| 2 | 2020-01-01 | 2021-06-15 | atr1 |
| 3 | 2020-01-01 | 2021-06-15 | atr3 |
TABLE B
| user_id | from | to | attribute |
+=========+============+============+===========+
| 1 | 2020-09-01 | 2021-02-15 | atr3 |
| 2 | 2020-04-15 | 2020-05-31 | atr2 |
| 3 | 2021-04-01 | 2022-01-01 | atr1 |
OUTPUT:
| user_id | from | to | attribute |
+=========+============+============+===========+
| 1 | 2020-01-01 | 2020-08-31 | atr1 |
| 1 | 2020-09-01 | 2021-02-15 | atr3 |
| 1 | 2021-02-16 | 2021-12-31 | atr2 |
| 2 | 2020-01-01 | 2020-04-14 | atr1 |
| 2 | 2020-04-15 | 2020-05-31 | atr2 |
| 2 | 2020-06-01 | 2021-06-15 | atr1 |
| 3 | 2020-01-01 | 2021-03-31 | atr3 |
| 3 | 2021-04-01 | 2022-01-01 | atr1 |
Initially I just asked to split the date range and make a new row because the new attribute of table B is between the one in table A. But it's only a part of the problem. Maybe it's more clear with the new dataset(?)
Sample data,
create table #TableA( userid int, fromdt date
,todt date, attribute varchar(10))
insert into #TableA (userid , fromdt , todt , attribute)
values
( 1 ,'2020-01-01','2020-12-31' , 'atr1' ),
( 1 ,'2021-01-01','2021-12-31' , 'atr2' ),
( 2 ,'2020-01-01','2021-06-15' , 'atr1' ),
( 3 ,'2020-01-01','2021-06-15' , 'atr3' )
create table #TableB( userid int,fromdt date
,todt date, attribute varchar(10))
insert into #TableB (userid,fromdt, todt, attribute)
values
( 1 ,'2020-09-01','2021-02-15' , 'atr3' ),
( 2 ,'2020-04-15','2020-05-31' , 'atr2' ),
( 3 ,'2021-04-01','2022-01-01' , 'atr1' )
;
The Script,
;WITH CTE
AS (
SELECT *
FROM #TableA
UNION ALL
SELECT *
FROM #TableB
)
,CTE2
AS (
SELECT userid
,min(fromdt) minfromdt
,max(todt) maxtodt
FROM CTE
GROUP BY userid
)
,CTE3
AS (
SELECT c.userid
,c.fromdt
,c.todt
,c.attribute
,LEAD(c.fromdt, 1) OVER (
PARTITION BY c.userid ORDER BY c.fromdt
) LeadFromdt
FROM CTE c
)
,CTE4
AS (
SELECT c3.userid
,c3.fromdt
,CASE
WHEN c3.todt > c3.LeadFromdt
THEN dateadd(day, - 1, c3.leadfromdt)
--when c3.todt<c3.LeadFromdt then dateadd(day,-1,c3.leadfromdt)
ELSE c3.todt
END AS Todt
,
--c3.todt as todt1,
c3.attribute
FROM CTE3 c3
)
,CTE5
AS (
SELECT userid
,fromdt
,todt
,attribute
FROM CTE4
UNION ALL
SELECT c2.userid
,dateadd(day, 1, c4.Todt) AS Fromdt
,maxtodt AS Todt
,c4.attribute
FROM CTE2 c2
CROSS APPLY (
SELECT TOP 1 c4.todt
,c4.attribute
FROM cte4 c4
WHERE c2.userid = c4.userid
ORDER BY c4.Todt DESC
) c4
WHERE c2.maxtodt > c4.Todt
)
SELECT *
FROM CTE5
ORDER BY userid
,fromdt
drop table #TableA, #TableB
Your output is wrong.
Also append other sample data in same example
where my script is not working.
The easiest way is to work with a calendar table. You can create one and reuse it later.
When you have one (here I called it "AllDates"), you can do something like this:
WITH cte
as
(
select ad.theDate,u.userid,isnull(b.attrib,a.attrib) as attrib,
ROW_NUMBER() over (PARTITION BY u.userid, isnull(b.attrib,a.attrib)ORDER BY ad.theDate)
- ROW_NUMBER() over (PARTITION BY u.userid ORDER BY ad.theDate) as grp
from AllDates ad
cross join (select userid from tableA union select userid from tableB) u
left join tableB b on ad.theDate between b.frm and b.toD and u.userid = b.userid
left join tableA a on ad.theDate between a.frm and a.toD and u.userid = a.userid
where b.frm is not null
or a.frm is not null
)
SELECT userid,attrib,min(theDate) as frmD, max(theDate) as toD
FROM cte
GROUP BY userid,attrib,grp
ORDER BY 1,3;
If I understand the request correctly the data from table A should be merged into table B to fill the gaps based on four scenarios, here is how I achieved it:
/*
Scenario 1 - Use dates from B as base to be filled in from A
- Start and end dates from B
*/
SELECT
B.UserId,
B.StartDate,
B.EndDate,
B.Attr
FROM #tmpB AS B
UNION
/*
Scenario 2 - Start date between start and end date of another record
- End date from B plus one day as start date
- End date from A as end date
*/
SELECT
B.UserId,
DATEADD(DD, 1, B.EndDate) AS StartDate,
A.EndDate,
A.Attr
FROM #tmpB AS B
JOIN #tmpA AS A ON
B.UserId = A.UserId
AND B.StartDate < A.StartDate
AND B.EndDate > A.StartDate
UNION
/*
Scenario 3 - End date between start and end date of another record or both dates between start and end date of another record
- Start date from A as start date
- Start date from B minus one as end date
*/
SELECT
B.UserId,
A.StartDate,
DATEADD(DD, -1, B.StartDate) AS EndDate,
A.Attr
FROM #tmpB AS B
JOIN #tmpA AS A ON
B.UserId = A.UserId
AND (B.StartDate < A.EndDate AND B.EndDate > A.EndDate
OR B.StartDate BETWEEN A.StartDate AND A.EndDate AND B.EndDate BETWEEN A.StartDate AND A.EndDate)
UNION
/*
Scenario 4 - Both dates between start and end date of another record
- End date from B minus one as start date
- End date from A as end date
*/
SELECT
B.UserId,
DATEADD(DD, -1, B.EndDate) AS StartDate,
A.EndDate,
A.Attr
FROM #tmpB AS B
JOIN #tmpA AS A ON
B.UserId = A.UserId
AND B.StartDate BETWEEN A.StartDate AND A.EndDate
AND B.EndDate BETWEEN A.StartDate AND A.EndDate

SQL :Fill null date and null value in the rol

I have table like this
user | date | Balance|
AAA | 2019-10-25 | 100 |
AAA | 2019-10-23 | 125 |
AAA | 2019-10-22 | 150 |
AAA | 2019-10-20 | 100 |
I want to fill missing date and value on that day with previous date & value.
and fill the first row(and other day that missing) with current date with previous value too.
user | date | Balance|
**AAA | 2019-10-27 | 100 |**
**AAA | 2019-10-26 | 100 |**
AAA | 2019-10-25 | 100 |
**AAA | 2019-10-24 | 125 |**
AAA | 2019-10-23 | 125 |
AAA | 2019-10-22 | 150 |
**AAA | 2019-10-21 | 100 |**
AAA | 2019-10-20 | 100 |
The key here is generating the dates:
select u.dte
from (values (sequence(cast('2019-10-20' as date),
cast('2019-10-27' as date),
interval '1' day
)
)
) v(date_array) cross join
unnest(v.date_array) u(dte)
Then, you can use this information to fill in the values:
with dates as (
select u.dte
from (values (sequence(cast('2019-10-20' as date),
cast('2019-10-27' as date),
interval '1' day
)
)
) v(date_array) cross join
unnest(v.date_array) u(dte)
)
select user, dte,
max(balance) over (partition by user, grp) as balance
from (select d.dte, u.user, t.balance,
count(t.user) over (partition by u.user order by d.dte) as grp
from dates d cross join
(select distinct user from t) u left join
t
on t.date = d.dte and t.user = u.user
) du
order by user, dte;
The final query is implementing lag(ignore nulls). What it does is assign a grouping based on the presence of a record in your data -- that is what the count(t.user) over () is doing. The outer select then spreads this value over the entire group.
EDIT:
According to Piotr's comment:
select user, dte,
coalesce(balance,
lag(balance) ignore nulls over (partition by user, grp)
) as balance
from dates d cross join
(select distinct user from t) u left join
t
on t.date = d.dte and t.user = u.user
order by user, dte;

Summing dates across multiple rows in SQL?

We have a Table that stores alarms for certain SetPoints in our system. I'm attempting to write a query that first gets the difference between two dates (spread across two rows), and then sums all of the date differences to get a total sum for the amount of time the setpoint was in alarm.
We have one database where I've accomplished similar, but in that case, both the startTime and endTime were in the same row. In this case, this is not adequate
Some example Data
| Row | TagID | SetPointID | EventLogTime | InAlarm |
-------------------------------------------------------------------------------------
| 1 | 1 | 2 | 2016-01-01 01:49:18.070 | 1 |
| 2 | 1 | 1 | 2016-01-01 03:23:39.970 | 1 |
| 3 | 1 | 2 | 2016-01-01 03:23:40.070 | 0 |
| 4 | 1 | 1 | 2016-01-01 08:04:01.260 | 0 |
| 5 | 1 | 2 | 2016-01-01 08:04:01.370 | 1 |
| 6 | 1 | 1 | 2016-01-01 11:40:36.367 | 1 |
| 7 | 1 | 2 | 2016-01-01 11:40:36.503 | 0 |
| 8 | 1 | 1 | 2016-01-01 13:00:30.263 | 0 |
Results
| TagID | SetPointID | TotalTimeInAlarm |
------------------------------------------------------
| 1 | 1 | 6.004443 (hours) |
| 1 | 2 | 5.182499 (hours) |
Essentially, what I need to do is to get the start time and end time for each tag and each setpoint, then I need to get the total time in alarm. I'm thing CTEs might be able to help, but I'm not sure.
I believe the pseudo query logic would be similar to
Define #startTime DATETIME, #endTime DATETIME
SELECT TagID,
SetPointID,
ABS(First Occurrence of InAlarm = True (since last occurrence WHERE InAlarm = False)
- First Occurrence of InAlarm = False (since last occurrence WHERE InAlarm = True))
-- IF no InAlarm = False use #endTime.
GROUP BY TagID, SetPointID
You can use the LEAD windowed function (or LAG) to do this pretty easily. This assumes that the rows always come in pairs with 1-0-1-0 for "InAlarm". If that doesn't happen then it's going to throw things off. You would need to have business rules for these situations in any event.
;WITH CTE_Timespans AS
(
SELECT
TagID,
SetPointID,
InAlarm,
EventLogTime,
LEAD(EventLogTime, 1) OVER (PARTITION BY TagID, SetPointID ORDER BY EventLogTime) AS EndingEventLogTime
FROM
My_Table
)
SELECT
TagID,
SetPointID,
SUM(DATEDIFF(SS, EventLogTime, EndingEventLogTime))/3600.0 AS TotalTime
FROM
CTE_Timespans
WHERE
InAlarm = 1
GROUP BY
TagID,
SetPointID
One easy way is to use OUTER APPLY to get the next date that is not InAlarm
SELECT mt.TagID,
mt.SetPointID,
SUM(DATEDIFF(ss,mt.EventLogTime,oa.EventLogTime)) / 3600.0 AS [TotalTimeInAlarm]
FROM MyTable mt
OUTER APPLY (SELECT MIN([EventLogTime]) EventLogTime
FROM MyTable mt2
WHERE mt.TagID = mt2.TagID
AND mt.SetPointID = mt2.SetPointID
AND mt2.EventLogTime > mt.EventLogTime
AND InAlarm = 0
) oa
WHERE mt.InAlarm = 1
GROUP BY mt.TagID,
mt.SetPointID
LEAD() might perform better if using MSSQL 2012+
In SQL Server 2014+:
SELECT tagId, setPointId, SUM(DATEDIFF(second, pt, eventLogTime)) / 3600. AS diff
FROM (
SELECT *,
LAG(inAlarm) OVER (PARTITION BY tagId, setPointId ORDER BY eventLogTime, row) ppa,
LAG(eventLogTime) OVER (PARTITION BY tagId, setPointId ORDER BY eventLogTime, row) pt
FROM (
SELECT LAG(inAlarm) OVER (PARTITION BY tagId, setPointId ORDER BY eventLogTime, row) pa,
*
FROM mytable
) q
WHERE EXISTS
(
SELECT pa
EXCEPT
SELECT inAlarm
)
) q
WHERE ppa = 0
AND inAlarm = 1
GROUP BY
tagId, setPointId
This will filter out consecutive events with same alarm state

How to merge two different rows(how to assign different value is zero)

I am trying to use union for merging two output but these rows value are different.I need different rows value are zero.like output(third) table.I was struggle with pass two days please help me.
Select t1.round,
t1.SC,
t1.ST,
t1.OTHERS,
t2.round_up,
t2.SC_up,
t2.ST_up,
t2.OTHERS_up
From
(Select round as round,
Sum (non_slsc_qty) as SC,
Sum (non_slst_qty) as ST,
Sum (non_slot_qty) as OTHERS
FROM vhn_issue
where (date between '2015-08-01' and '2015-08-31')AND
dvn_cd='15' AND phc_cd='012' AND hsc_cd='05' GROUP BY round) t1
,
(Select round as round_up,
Sum (non_slsc_qty) as SC_up,
Sum (non_slst_qty) as ST_up,
Sum (non_slot_qty) as OTHERS_up,
FROM vhn_issue
where (date between '2015-04-01' and '2015-08-31')AND
dvn_cd='15' AND phc_cd='012' AND hsc_cd='05' GROUP BY round) t2
This first table result
+-----------------------------------+------------+--------+--------
| round | SC | ST | OTHERS |
+-----------------------------------+------------+--------+--------
| 1 | 20 | 30 | 50 |
| | | | |
| | | | |
+-----------------------------------+------------+--------+--------+
This is second table result
+-----------------------------------+------------+--------+----------
| round_up | SC_up | ST_up | OTHERS_up |
+-----------------------------------+------------+--------+-----------
| 1 | 21 | 31 | 51 |
| 3 | 10 | 5 | 2 |
| | | | |
+-----------------------------------+------------+--------+--------+---
I need output like this
+------------+--------+----------------------------------------------
| round_up | SC | ST |OTHERS | SC_up | ST_up |OTHERS_up |
+------------+--------+-----------------------------------------------
| 1 | 20 | 30 | 50 | 21 | 31 | 51 |
| | | | | | | |
| 3 | 0 | 0 | 0 | 10 | 5 | 2 |
+------------+--------+--------+---------------------------------------
You can use WITH Queries (Common Table Expressions) to wrap the two selects and use RIGHT JOIN to get the desired output,COALESCE is used to print 0 instead of NULL.
WITH a
AS (
SELECT round AS round
,Sum(non_slsc_qty) AS SC
,Sum(non_slst_qty) AS ST
,Sum(non_slot_qty) AS OTHERS
FROM vhn_issue
WHERE (
DATE BETWEEN '2015-08-01'
AND '2015-08-31'
)
AND dvn_cd = '15'
AND phc_cd = '012'
AND hsc_cd = '05'
GROUP BY round
)
,b
AS (
SELECT round AS round_up
,Sum(non_slsc_qty) AS SC_up
,Sum(non_slst_qty) AS ST_up
,Sum(non_slot_qty) AS OTHERS_up
,
FROM vhn_issue
WHERE (
DATE BETWEEN '2015-04-01'
AND '2015-08-31'
)
AND dvn_cd = '15'
AND phc_cd = '012'
AND hsc_cd = '05'
GROUP BY round
)
SELECT coalesce(b.round_up, 0) round_up
,coalesce(a.sc, 0) sc
,coalesce(a.st, 0) st
,coalesce(a.others, 0) others
,coalesce(b.sc_up, 0) sc_up
,coalesce(b.st_up, 0) st_up
,coalesce(b.others_up, 0) others_up
FROM a
RIGHT JOIN b ON a.round = b.round_up
WITH Results_CTE AS
(
Select t1.round as round_up ,
t1.SC as SC,
t1.ST as ST,
t1.OTHERS as OTHERS,
0 as SC_up,
0 as ST_up,
0 as OTHERS_up
from round t1
union all
t2.round_up as round_up ,
0 as SC,
0 as ST,
0 as OTHERS,
t2.SC_up,
t2.ST_up,
t2.OTHERS_up from round t2
)
select round_up , sum(SC) as SC,sum (ST) as ST, sum(OTHERS) as OTHERS, sum(SC_up) as SC_up, sum(ST_up) as ST_up, sum(OTHERS_up) as OTHERS_ up
from Results_CTE group by round_up