Appending Name based on Min and Max datetime - sql

I have Event_No, Events, Date Range in my table like below.
[Event_No, Events, Date Range
1 PR 2/6/2018 12:01:00 AM
1 PR 2/6/2018 12:02:00 AM
1 PR 2/6/2018 12:03:00 AM
1 RR 2/6/2018 12:04:00 AM
1 RR 2/6/2018 12:05:00 AM
1 RR 2/6/2018 12:06:00 AM
1 SR 2/6/2018 12:07:00 AM
1 SR 2/6/2018 12:08:00 AM
1 SR 2/6/2018 12:09:00 AM
2 PR 2/6/2018 01:01:00 AM
2 PR 2/6/2018 01:02:00 AM
2 PR 2/6/2018 01:03:00 AM
2 RR 2/6/2018 01:04:00 AM
2 RR 2/6/2018 01:05:00 AM
2 RR 2/6/2018 01:06:00 AM
2 SR 2/6/2018 01:07:00 AM
2 SR 2/6/2018 01:08:00 AM
2 SR 2/6/2018 01:09:00 AM
I need to show Min datetime respective Event name with 'IN'(a concatenation of (Event-'IN'))and Max datetime respective Event with Out(a concatenation of (Event-'Out')). I need My Final Output like below
Event_No Events Date Range EventInOut
1 PR 2/6/2018 12:01:00 AM PR-IN
1 PR 2/6/2018 12:03:00 AM PR-OUT
1 RR 2/6/2018 12:04:00 AM RR-IN
1 RR 2/6/2018 12:06:00 AM RR-OUT
1 SR 2/6/2018 12:07:00 AM SR-IN
1 SR 2/6/2018 12:09:00 AM SR-OUT
2 PR 2/6/2018 01:01:00 AM PR-IN
2 PR 2/6/2018 01:03:00 AM PR-OUT
2 RR 2/6/2018 01:04:00 AM RR-IN
2 RR 2/6/2018 01:06:00 AM RR-OUT
2 SR 2/6/2018 01:07:00 AM SR-IN
2 SR 2/6/2018 01:09:00 AM SR-OUT
Thanks

This is a gaps and islands problem.
select event_no, date,
min(date), max(date)
from (select t.*,
row_number() over (partition by event_no order by date) as seqnum,
row_number() over (partition by event_no, event order by date) as seqnum_e
from t
) t
group by event_no, event;
This puts the values on one row, which might meet your needs.
You can also use lead() and lag():
select t.*,
(event || '-' || (case when prev_event is null then 'IN' else 'OUT' end))
from (select t.*,
lag(event) over (partition by event_no order by date) as prev_event,
lead(event) over (partition by event_no order by date) as next_event
from t
) t
where prev_event is null or next_event is null;

The following should get you the results....
create table events(event_no int, events varchar(10),date_range timestamp)
insert into events
select 1,'PR',timestamp '2/6/2018 12:01:00 AM' union all
select 1,'PR',timestamp '2/6/2018 12:02:00 AM' union all
select 1,'PR',timestamp '2/6/2018 12:03:00 AM' union all
select 1,'RR',timestamp '2/6/2018 12:04:00 AM' union all
select 1,'RR',timestamp '2/6/2018 12:05:00 AM' union all
select 1,'RR',timestamp '2/6/2018 12:06:00 AM' union all
select 1,'SR',timestamp '2/6/2018 12:07:00 AM' union all
select 1,'SR',timestamp '2/6/2018 12:08:00 AM' union all
select 1,'SR',timestamp '2/6/2018 12:09:00 AM' union all
select 2,'PR',timestamp '2/6/2018 01:01:00 AM' union all
select 2,'PR',timestamp '2/6/2018 01:02:00 AM' union all
select 2,'PR',timestamp '2/6/2018 01:03:00 AM' union all
select 2,'RR',timestamp '2/6/2018 01:04:00 AM' union all
select 2,'RR',timestamp '2/6/2018 01:05:00 AM' union all
select 2,'RR',timestamp '2/6/2018 01:06:00 AM' union all
select 2,'SR',timestamp '2/6/2018 01:07:00 AM' union all
select 2,'SR',timestamp '2/6/2018 01:08:00 AM' union all
select 2,'SR',timestamp '2/6/2018 01:09:00 AM'
with data
as (
select *
,row_number() over(partition by event_no,events order by date_range) as rnk
,date_range - (row_number() over(partition by event_no,events order by date_range)* interval '1 minute' ) as col_range
from events
)
,iterim_data
as(
select *
,row_number() over(partition by col_range order by date_range asc) as rnk_asc
,row_number() over(partition by col_range order by date_range desc) as rnk_desc
from data
)
select event_no,events,date_range
,case when rnk_asc=1 then concat(events,'-IN')
when rnk_desc=1 then concat(events,'-OUT')
end
from iterim_data
where (rnk_asc=1 or rnk_desc=1)
db fiddle link
https://dbfiddle.uk/?rdbms=postgres_12&fiddle=00304c5f260a199a99c8d43bd351c3a1

Related

How to get the min/max time in continuous times and the count of times in this range?

Date:
2015-04-01 12:00
2015-04-01 11:00
2015-04-01 10:
2015-04-01 09:
2015-04-01 08:00 // <---
2015-04-01 05:00
2015-04-01 04:00
2015-04-01 03:00
2015-04-01 02:00
2015-04-01 01:00 // <---
2015-03-31 22:00
2015-03-31 21:00
2015-03-31 20:00
2015-03-31 19:00 // <---
I want to get this result:
MaxTime | MinTime | Count
2015-04-01 12:00 | 2015-04-01 08:00 | 5
2015-04-01 05:00 | 2015-04-01 01:00 | 5
2015-03-31 22:00 | 2015-03-31 19:00 | 4
Try this query
select
max(dateColumn), min(dateColumn), count(dateColumn)
from (
select
dateColumn, datepart(dayofyear, dateColumn)*24 + datepart(hh, dateColumn) - row_number() over (order by dateColumn) grp
from
MyTable
) t
group by grp
Sample Data
IF OBJECT_ID('tempdb..#Temp') IS NOT NULL
DROP TABLE #Temp
;With cte(Dates)
AS
(
SELECT '2015-04-01 12:00' UNION ALL
SELECT '2015-04-01 11:00' UNION ALL
SELECT '2015-04-01 10:00' UNION ALL
SELECT '2015-04-01 09:00' UNION ALL
SELECT '2015-04-01 08:00' UNION ALL
SELECT '2015-04-01 05:00' UNION ALL
SELECT '2015-04-01 04:00' UNION ALL
SELECT '2015-04-01 03:00' UNION ALL
SELECT '2015-04-01 02:00' UNION ALL
SELECT '2015-04-01 01:00' UNION ALL
SELECT '2015-03-31 22:00' UNION ALL
SELECT '2015-03-31 21:00' UNION ALL
SELECT '2015-03-31 20:00' UNION ALL
SELECT '2015-03-31 19:00'
)
SELECT * INTO #Temp FROM cte
SELECT * FROM #Temp
Your Expected Result script using Row_number()Over() Function
SELECT DISTINCT
MAX(Dates)OVER(PArtition by BatchSeq Order by (SELECT 1)) AS MaxTime
,MIN(Dates)OVER(PArtition by BatchSeq Order by (SELECT 1)) AS MinTime
,COUNT(Dates)OVER(PArtition by BatchSeq Order by (SELECT 1)) As [Count]
FROM
(
SELECT Dates , ((ROW_NUMBER()OVER(ORDER BY (SELECT 1))-1)/5+1) AS BatchSeq FROM
#Temp
)dt
ORDER BY 1 DESC
Your Expected Result script using Group by() Function
SELECT
MAX(Dates) AS MaxTime
,MIN(Dates) AS MinTime
,COUNT(Dates) As [Count]
FROM
(
SELECT Dates ,((ROW_NUMBER()OVER(ORDER BY (SELECT 1))-1)/5+1) AS BatchSeq FROM
#Temp
)dt
GROUP BY BatchSeq
ORDER BY 1 DESC
Result
MaxTime MinTime Count
--------------------------------------------
2015-04-01 12:00 2015-04-01 08:00 5
2015-04-01 05:00 2015-04-01 01:00 5
2015-03-31 22:00 2015-03-31 19:00 4

Oracle sql query to group consecutive records by date

With the below sample data, I am trying to group record with same rate.
id start_date end_date rate
-----------------------------------------------------------------
1 01/01/2017 12:00:00 am 01/01/2017 12:00:00 am 300
1 02/01/2017 12:00:00 am 02/01/2017 12:00:00 am 300
1 03/01/2017 12:00:00 am 03/01/2017 12:00:00 am 300
1 04/01/2017 12:00:00 am 04/01/2017 12:00:00 am 1000
1 05/01/2017 12:00:00 am 05/01/2017 12:00:00 am 500
1 06/01/2017 12:00:00 am 06/01/2017 12:00:00 am 500
1 07/01/2017 12:00:00 am 07/01/2017 12:00:00 am 1000
1 08/01/2017 12:00:00 am 08/01/2017 12:00:00 am 1000
1 09/01/2017 12:00:00 am 09/01/2017 12:00:00 am 300
What I've tried :
select distinct id, mn_date, mx_date,rate
from (
select id, min(start_date) over (partition by grp order by start_date) mn_date,
max(end_date) over(partition by grp order by start_date desc) mx_date, rate
from (
select t.*, row_number() over(partition by id order by start_date) -row_number() over(partition by rate order by start_date)grp
from t
)
)
order by mn_date;
Output :
id mn_date mx_date rate
--------------------------------------------------------
1 01/01/2017 12:00:00 am 03/01/2017 12:00:00 am 300
1 04/01/2017 12:00:00 am 04/01/2017 12:00:00 am 1000
1 05/01/2017 12:00:00 am 06/01/2017 12:00:00 am 500
1 07/01/2017 12:00:00 am 09/01/2017 12:00:00 am 300
1 07/01/2017 12:00:00 am 09/01/2017 12:00:00 am 1000
Desired Output:
id mn_date mx_date rate
--------------------------------------------------------
1 01/01/2017 12:00:00 am 03/01/2017 12:00:00 am 300
1 04/01/2017 12:00:00 am 04/01/2017 12:00:00 am 1000
1 05/01/2017 12:00:00 am 06/01/2017 12:00:00 am 500
1 07/01/2017 12:00:00 am 08/01/2017 12:00:00 am 1000
1 09/01/2017 12:00:00 am 09/01/2017 12:00:00 am 300
Final result to group by consecutive dates: (Thanks to Gordon )
select id, min(start_date), max(end_date), rate
from (
select id, start_date, end_date, rate, seqnum_i-seqnum_ir grp, sum(x) over(partition by id order by start_date) grp1
from (
select t.*,
row_number() over (partition by id order by start_date) as seqnum_i,
row_number() over (partition by id, rate order by start_date) as seqnum_ir,
case when LEAD(start_date) over (partition by id order by start_date)= end_date + 1
then 0
else 1
end x
from t
)
)
group by id, grp+grp1, rate
order by min(start_date);
Assuming we can just use start_date to identify the adjacent records (i.e., there are no gaps), then you can use the difference of row numbers approach:
select id, min(start_date) as mn_date, max(end_date) as mx_date, rate
from (select t.*,
row_number() over (partition by id order by start_date) as seqnum_i,
row_number() over (partition by id, rate order by start_date) as seqnum_ir
from t
) t
group by id (seqnum_i - seqnum_ir), rate;
To see how this works, look at the results of the subquery. You should be able to "see" how the difference of the two row numbers defines the groups of adjacent records with the same rate.
I found that the last value wasn't being grouped correctly as the calculation of X wasn't handling the NULL return, so I changed it to this:
,CASE
WHEN LEAD (start_date)
OVER (PARTITION BY id ORDER BY start_date)
IS NULL
THEN
0
WHEN LEAD (start_date)
OVER (PARTITION BY id ORDER BY start_date) =
end_date + 1
THEN
0
ELSE
1
END
x

Calculating concurrency from a set of ranges

I have a set of rows containing a start timestamp and a duration. I want to perform various summaries using the overlap or concurrency.
For example: peak daily concurrency, peak concurrency grouped on another column.
Example data:
timestamp,duration
2016-01-01 12:00:00,300
2016-01-01 12:01:00,300
2016-01-01 12:06:00,300
I would like to know that peak for the period was 12:01:00-12:05:00 at 2 concurrent.
Any ideas on how to achieve this using BigQuery or, less exciting, a Map/Reduce job?
For a per-minute resolution, with session lengths of up to 255 minutes:
SELECT session_minute, COUNT(*) c
FROM (
SELECT start, DATE_ADD(start, i, 'MINUTE') session_minute FROM (
SELECT * FROM (
SELECT TIMESTAMP("2015-04-30 10:14") start, 7 minutes
),(
SELECT TIMESTAMP("2015-04-30 10:15") start, 12 minutes
),(
SELECT TIMESTAMP("2015-04-30 10:15") start, 12 minutes
),(
SELECT TIMESTAMP("2015-04-30 10:18") start, 12 minutes
),(
SELECT TIMESTAMP("2015-04-30 10:23") start, 3 minutes
)
) a
CROSS JOIN [fh-bigquery:public_dump.numbers_255] b
WHERE a.minutes>b.i
)
GROUP BY 1
ORDER BY 1
STEP 1 - First you need find all periods (start and end) with
respective concurrent entries
SELECT ts AS start, LEAD(ts) OVER(ORDER BY ts) AS finish,
SUM(entry) OVER(ORDER BY ts) AS concurrent_entries
FROM (
SELECT ts, SUM(entry)AS entry
FROM
(SELECT ts, 1 AS entry FROM yourTable),
(SELECT DATE_ADD(ts, duration, 'second') AS ts, -1 AS entry FROM yourTable)
GROUP BY ts
HAVING entry != 0
)
ORDER BY ts
Assuming input as below
(SELECT TIMESTAMP('2016-01-01 12:00:00') AS ts, 300 AS duration),
(SELECT TIMESTAMP('2016-01-01 12:01:00') AS ts, 300 AS duration),
(SELECT TIMESTAMP('2016-01-01 12:06:00') AS ts, 300 AS duration),
(SELECT TIMESTAMP('2016-01-01 12:07:00') AS ts, 300 AS duration),
(SELECT TIMESTAMP('2016-01-01 12:10:00') AS ts, 300 AS duration),
(SELECT TIMESTAMP('2016-01-01 12:11:00') AS ts, 300 AS duration)
the output of above query will look somehow like this:
start finish concurrent_entries
2016-01-01 12:00:00 UTC 2016-01-01 12:01:00 UTC 1
2016-01-01 12:01:00 UTC 2016-01-01 12:05:00 UTC 2
2016-01-01 12:05:00 UTC 2016-01-01 12:07:00 UTC 1
2016-01-01 12:07:00 UTC 2016-01-01 12:10:00 UTC 2
2016-01-01 12:10:00 UTC 2016-01-01 12:12:00 UTC 3
2016-01-01 12:12:00 UTC 2016-01-01 12:15:00 UTC 2
2016-01-01 12:15:00 UTC 2016-01-01 12:16:00 UTC 1
2016-01-01 12:16:00 UTC null 0
You might still want to polish above query a little - but mainly it does what you need
STEP 2 - now you can do any stats off of above result
For example peak on whole period:
SELECT
start, finish, concurrent_entries, RANK() OVER(ORDER BY concurrent_entries DESC) AS peak
FROM (
SELECT ts AS start, LEAD(ts) OVER(ORDER BY ts) AS finish,
SUM(entry) OVER(ORDER BY ts) AS concurrent_entries
FROM (
SELECT ts, SUM(entry)AS entry FROM
(SELECT ts, 1 AS entry FROM yourTable),
(SELECT DATE_ADD(ts, duration, 'second') AS ts, -1 AS entry FROM yourTable)
GROUP BY ts
HAVING entry != 0
)
)
ORDER BY peak

SQL grouping on time interval

I have a data set which is based on a timestamp. The Data set present record on every shut down occurrence in a 5 minute time interval. If a shut down occurred in the specific 5 min, then the record is added else no record. Thus no record means system has recovered
Date
07-Jul-15 12:05:00
07-Jul-15 12:10:00
07-Jul-15 12:15:00
07-Jul-15 12:35:00
07-Jul-15 12:40:00
07-Jul-15 12:45:00
07-Jul-15 12:50:00
07-Jul-15 13:05:00
07-Jul-15 13:10:00
07-Jul-15 13:15:00
I would like to query and return
1.Number of shutdowns: The Number of shut down in this case is 3 based on between
12:15 to 12:35
12:50 to 13:05
The system recovered
Period Between every shut down
Example:
1.From: 07-Jul-15 12:05:00 To: 07-Jul-15 12:15:00 Duration : 15 Mins
2.From: 07-Jul-15 12:35:00 To: 07-Jul-15 12:50:00 Duration : 20 Mins
There is a similar Question although a very different solution is required for this one.
would appreciate a fiddle example
WITH changes AS (
SELECT "DATE",
CASE WHEN LAG( "DATE" ) OVER ( ORDER BY "DATE" ) + INTERVAL '5' MINUTE = "DATE" THEN 0 ELSE 1 END AS has_changed_group
FROM TEST
), grps AS (
SELECT "DATE",
SUM( has_changed_group ) OVER ( ORDER BY "DATE" ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW ) AS grp
FROM changes
)
SELECT MIN( "DATE" ) AS shutdown_start,
MAX( "DATE" ) AS shutdown_end,
MAX( "DATE" ) - MIN( "DATE" ) + INTERVAL '5' MINUTE AS shutdown_duration
FROM grps
GROUP BY grp;
Output:
SHUTDOWN_START SHUTDOWN_END SHUTDOWN_DURATION
---------------------------- ---------------------------- -----------------
07-JUL-15 12.05.00.000000000 07-JUL-15 12.15.00.000000000 0 0:15:0.0
07-JUL-15 12.35.00.000000000 07-JUL-15 12.50.00.000000000 0 0:20:0.0
07-JUL-15 13.05.00.000000000 07-JUL-15 13.15.00.000000000 0 0:15:0.0
Edit - Multiple machines:
SQL Fiddle
Oracle 11g R2 Schema Setup:
CREATE TABLE TEST ( MACHINE_ID, "DATE" ) AS
SELECT 1, TIMESTAMP '2015-07-07 12:05:00' FROM DUAL
UNION ALL SELECT 1, TIMESTAMP '2015-07-07 12:10:00' FROM DUAL
UNION ALL SELECT 1, TIMESTAMP '2015-07-07 12:15:00' FROM DUAL
UNION ALL SELECT 1, TIMESTAMP '2015-07-07 12:35:00' FROM DUAL
UNION ALL SELECT 1, TIMESTAMP '2015-07-07 12:40:00' FROM DUAL
UNION ALL SELECT 1, TIMESTAMP '2015-07-07 12:45:00' FROM DUAL
UNION ALL SELECT 1, TIMESTAMP '2015-07-07 12:50:00' FROM DUAL
UNION ALL SELECT 1, TIMESTAMP '2015-07-07 13:05:00' FROM DUAL
UNION ALL SELECT 1, TIMESTAMP '2015-07-07 13:10:00' FROM DUAL
UNION ALL SELECT 1, TIMESTAMP '2015-07-07 13:15:00' FROM DUAL
UNION ALL SELECT 2, TIMESTAMP '2015-07-07 12:35:00' FROM DUAL
UNION ALL SELECT 2, TIMESTAMP '2015-07-07 12:40:00' FROM DUAL
UNION ALL SELECT 2, TIMESTAMP '2015-07-07 12:45:00' FROM DUAL
UNION ALL SELECT 2, TIMESTAMP '2015-07-07 13:00:00' FROM DUAL
UNION ALL SELECT 2, TIMESTAMP '2015-07-07 13:05:00' FROM DUAL
UNION ALL SELECT 2, TIMESTAMP '2015-07-07 13:10:00' FROM DUAL
UNION ALL SELECT 2, TIMESTAMP '2015-07-07 13:15:00' FROM DUAL;
Query 1:
WITH changes AS (
SELECT MACHINE_ID,
"DATE",
CASE WHEN LAG( "DATE" ) OVER ( PARTITION BY MACHINE_ID ORDER BY "DATE" ) + INTERVAL '5' MINUTE = "DATE" THEN 0 ELSE 1 END AS has_changed_group
FROM TEST
), grps AS (
SELECT MACHINE_ID,
"DATE",
SUM( has_changed_group ) OVER ( PARTITION BY MACHINE_ID ORDER BY "DATE" ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW ) AS grp
FROM changes
)
SELECT MACHINE_ID,
TO_CHAR( MIN( "DATE" ), 'YYYY-MM-DD HH24:MI:SS' ) AS shutdown_start,
TO_CHAR( MAX( "DATE" ), 'YYYY-MM-DD HH24:MI:SS' ) AS shutdown_end,
TO_CHAR( MAX( "DATE" ) - MIN( "DATE" ) + INTERVAL '5' MINUTE ) AS shutdown_duration
FROM grps
GROUP BY MACHINE_ID, grp
ORDER BY 1,2
Results:
| MACHINE_ID | SHUTDOWN_START | SHUTDOWN_END | SHUTDOWN_DURATION |
|------------|---------------------|---------------------|-------------------------------|
| 1 | 2015-07-07 12:05:00 | 2015-07-07 12:15:00 | +000000000 00:15:00.000000000 |
| 1 | 2015-07-07 12:35:00 | 2015-07-07 12:50:00 | +000000000 00:20:00.000000000 |
| 1 | 2015-07-07 13:05:00 | 2015-07-07 13:15:00 | +000000000 00:15:00.000000000 |
| 2 | 2015-07-07 12:35:00 | 2015-07-07 12:45:00 | +000000000 00:15:00.000000000 |
| 2 | 2015-07-07 13:00:00 | 2015-07-07 13:15:00 | +000000000 00:20:00.000000000 |
Solution using Tom Kyte's "carry down" technique:
with test1 as (
select mydate,
-- mark starting records in each group
case when NVL((mydate - lag(mydate) over (order by mydate))*24*60,10) > 5
then row_number() over (order by mydate) end as group_id
from test),
test2 as (
select mydate,
-- propagate group_id to all records
LAST_VALUE(group_id IGNORE NULLS) over (order by mydate) as group_id
from test1)
select min(mydate) shutdown_from, max(mydate) shutdown_to
from test2
group by group_id;
Output
SHUTDOWN_FROM SHUTDOWN_TO
------------------- -------------------
07.07.0015 12:05:00 07.07.0015 12:15:00
07.07.0015 12:35:00 07.07.0015 12:50:00
07.07.0015 13:05:00 07.07.0015 13:15:00

Querying the weeks for the whole month and displaying the content according to that month

I have a question regarding my analysis programming. I already have a query to show the weeks in a whole year. But im stuck when i need to sum up values regarding to each week's occurence. So this is my SQL code,
SELECT LEVEL WEEK_NUM_INCR,
TO_CHAR (start_date + (LEVEL - 1) * 7, 'WW') WEEK_POSITION /* WEEK POSITION FOR THE WHOLE YEAR */
,
TO_CHAR (start_date + (LEVEL - 1) * 7, 'DD-MM-YYYY') START_WEEK_DATE,
TO_CHAR (start_date + (LEVEL) * 7, 'DD-MM-YYYY') END_WEEK_DATE,
(SELECT SUM(ONSITE_UPD_QTY) FROM DTL_ERC_UPD#WELTES_SITEMON_LINK WHERE UPD_DATE BETWEEN
TO_CHAR (start_date + (LEVEL) * 7, 'MM/DD/YYYY') AND TO_CHAR (start_date + (LEVEL) * 7, 'MM/DD/YYYY') CONNECT BY start_date + (LEVEL - 1) * 7 < end_date)
FROM (SELECT TO_DATE ('01/01/2015', 'MM/DD/YYYY') start_date,
TO_DATE ('12/31/2015', 'MM/DD/YYYY') end_date
FROM DUAL)
CONNECT BY start_date + (LEVEL - 1) * 7 < end_date;
it would display the weeks but when i added this part in the middle which i have now,
(SELECT SUM(ONSITE_UPD_QTY) FROM DTL_ERC_UPD#WELTES_SITEMON_LINK WHERE UPD_DATE BETWEEN
TO_CHAR (start_date + (LEVEL) * 7, 'MM/DD/YYYY') AND TO_CHAR (start_date + (LEVEL) * 7, 'MM/DD/YYYY') CONNECT BY start_date + (LEVEL - 1) * 7 < end_date)
it throws these errors,
ORA-01843: not a valid month
ORA-02063: preceding line from WELTES_SITEMON_LINK
So for the DTL_ERC_UPD, I have
ONSITE_UPD_QTY UPD_DATE
1 2/5/2015 12:00:01 AM
1 2/5/2015 12:00:01 AM
1 2/4/2015
1 2/4/2015
1 2/4/2015
I am hoping that it would show 5 during feb 1st until 8th and zero on the rest of the row.
Please help me with this issue
below one is the sample table
select cast('01/08/2013' as Date) dte INTO #temp union all select
'03/01/2013' union all select
'11/01/2013' union all select
'12/01/2013' union all select
'10/21/2014' union all select
'10/27/2014' union all select
'10/30/2014' union all select
'10/31/2014' union all select
'11/01/2014' union all select
'11/02/2014' union all select
'11/04/2014' union all select
'11/05/2014' union all select
'11/08/2014' union all select
'11/09/2014' union all select
'11/11/2014' union all select
'11/20/2014' union all select
'11/07/2014' union all select
'07/11/2014' union all select
'11/13/2013' union all select
'09/01/2014' union all select
'11/03/2014' union all select
'11/18/2014' union all select
'12/05/2014' union all select
'07/24/2014' union all select
'07/26/2014' union all select
'07/27/2014' union all select
'07/28/2014' union all select
'07/29/2014' union all select
'07/30/2014' union all select
'01/01/2014' union all select
'02/01/2014' union all select
'04/01/2014' union all select
'05/01/2014' union all select
'06/01/2014' union all select
'06/01/2014' union all select
'07/01/2014' union all select
'07/01/2014' union all select
'11/05/2013' union all select
'06/16/2014' union all select
'06/17/2014' union all select
'06/18/2014' union all select
'06/19/2014' union all select
'06/20/2014' union all select
'06/21/2014' union all select
'06/22/2014' union all select
'06/23/2014' union all select
'06/24/2014' union all select
'06/25/2014' union all select
'06/26/2014' union all select
'06/27/2014' union all select
'06/28/2014' union all select
'06/29/2014'
now you can get week count start date of the week and end date of the week
select count([Week]) cont,[Week],
DATEADD(Day,(cast(SUBSTRING([Week],5,LEN( [Week])) as integer)*7)-7,
DATEADD(year,cast(SUBSTRING([Week],1,4) as integer)-1900,0)) startDte,
DATEADD(Day,cast(SUBSTRING([Week],5,LEN( [Week])) as integer)*7,
DATEADD(year,cast(SUBSTRING([Week],1,4) as integer)-1900,0)) endDte
from (select cast(datepart(YYYY,cast(dte as DATE)) as varchar(4))+''+cast(datepart(WW,cast(dte as DATE)) as varchar(2)) [Week],dte from #temp
) AS temp GROUP BY [Week]
ouput is
cont startDte endDte
1 2013-01-08 00:00:00.000 2013-01-15 00:00:00.000
1 2013-10-29 00:00:00.000 2013-11-05 00:00:00.000
1 2013-11-05 00:00:00.000 2013-11-12 00:00:00.000
1 2013-11-12 00:00:00.000 2013-11-19 00:00:00.000
1 2013-12-03 00:00:00.000 2013-12-10 00:00:00.000
1 2013-02-26 00:00:00.000 2013-03-05 00:00:00.000
1 2014-01-01 00:00:00.000 2014-01-08 00:00:00.000
1 2014-04-02 00:00:00.000 2014-04-09 00:00:00.000
1 2014-04-30 00:00:00.000 2014-05-07 00:00:00.000
2 2014-06-04 00:00:00.000 2014-06-11 00:00:00.000
6 2014-06-18 00:00:00.000 2014-06-25 00:00:00.000
7 2014-06-25 00:00:00.000 2014-07-02 00:00:00.000
3 2014-07-02 00:00:00.000 2014-07-09 00:00:00.000
1 2014-07-09 00:00:00.000 2014-07-16 00:00:00.000
2 2014-07-23 00:00:00.000 2014-07-30 00:00:00.000
4 2014-07-30 00:00:00.000 2014-08-06 00:00:00.000
1 2014-09-03 00:00:00.000 2014-09-10 00:00:00.000
1 2014-10-22 00:00:00.000 2014-10-29 00:00:00.000
4 2014-10-29 00:00:00.000 2014-11-05 00:00:00.000
6 2014-11-05 00:00:00.000 2014-11-12 00:00:00.000
2 2014-11-12 00:00:00.000 2014-11-19 00:00:00.000
2 2014-11-19 00:00:00.000 2014-11-26 00:00:00.000
1 2014-12-03 00:00:00.000 2014-12-10 00:00:00.000
1 2014-01-29 00:00:00.000 2014-02-05 00:00:00.000
Second connect by in your code is needless. Organize query in logical sections using proper join. Do not use date to char conversion for comparison, as already mentioned. Below is sample query that you can use:
with period as (
select to_date('02/01/2015', 'MM/DD/YYYY') start_date,
to_date('03/01/2015', 'MM/DD/YYYY') end_date from dual),
weeks as (
select level week_num_inc,
to_char(start_date + (level - 1) * 7, 'WW') week_position,
start_date + (level - 1) * 7 start_week_date,
start_date + level * 7 end_week_date
from period
connect by start_date + (level - 1) * 7 < end_date)
select week_num_inc, week_position, start_week_date, end_week_date,
nvl(sum(dtl_erc_upd.onsite_upd_qty), 0) quantity
from weeks w
left join dtl_erc_upd on start_week_date <= upd_date
and upd_date < end_week_date
group by week_num_inc, week_position, start_week_date, end_week_date
order by start_week_date
result:
WEEK_NUM_INC WEEK_POSITION START_WEEK_DATE END_WEEK_DATE QUANTITY
1 05 2015-02-01 2015-02-08 5
2 06 2015-02-08 2015-02-15 0
3 07 2015-02-15 2015-02-22 0
4 08 2015-02-22 2015-03-01 0