oracle query to retrieve the date of the highest value which are grouped by month and year - sql

Data looks similar to this..
Here I need to retrieve the highest amount by grouping them by month and year but I couldn't retrieve the exact date of the highest amount....
The code I tried for the highest amount is ...
SELECT ACCT_ID AS ACCOUNT_ID,
COUNT(TRANS_AMOUNT) AS NUMBER_OF_TRANSACTION,
Sum(Trans_Amount) As Transaction_Amount,
To_Char( Trans_Date, 'MON-YYYY') As Month_Date,
Max(Trans_Amount) As Highest
From Trans_Amt
group by ACCT_ID, To_Char( Trans_Date, 'MON-YYYY');
And this worked but I could not retrieve the date here. If I try for the date I get "not a group by variable" error ...

Group by account and month(I used trunc at month level).
Select max(amount) for amount and use keep dense_rank first to get the date of maximum amount:
SELECT
ACCT_ID AS ACCOUNT_ID,
trunc(Trans_Date, 'MM') As Month_Date,
max(trans_date) keep (dense_rank first order by trans_amount desc) as trans_date
Max(Trans_Amount) As Highest
FROM Trans_Amt
GROUP BY
ACCT_ID, trunc(Trans_Date, 'MM');

with t as (
select 1000 ACCT_ID, to_date('11-JAN-2000', 'dd-MM-yyyy') TRANS_DATE, 201 TRANS_AMOUNT from dual union all
select 1000, to_date('22-JAN-2000', 'dd-MM-yyyy'), 209 from dual union all
select 1000, to_date('31-JAN-2000', 'dd-MM-yyyy'), 4504 from dual union all
select 1000, to_date('10-FEB-2000', 'dd-MM-yyyy'), 487 from dual union all
select 1001, to_date('10-FEB-2000', 'dd-MM-yyyy'), 4287 from dual union all
select 1001, to_date('17-FEB-2000', 'dd-MM-yyyy'), 4501 from dual union all
select 1001, to_date('22-FEB-2000', 'dd-MM-yyyy'), 1209 from dual union all
select 1000, to_date('22-FEB-2000', 'dd-MM-yyyy'), 4550 from dual union all
select 1001, to_date('23-FEB-2000', 'dd-MM-yyyy'), 120 from dual union all
select 1001, to_date('26-FEB-2000', 'dd-MM-yyyy'), 245 from dual union all
select 1000, to_date('28-FEB-2000', 'dd-MM-yyyy'), 4500 from dual union all
select 1000, to_date('08-MAR-2000', 'dd-MM-yyyy'), 256 from dual union all
select 1001, to_date('08-MAR-2000', 'dd-MM-yyyy'), 2561 from dual union all
select 1000, to_date('24-MAR-2000', 'dd-MM-yyyy'), 987 from dual union all
select 1001, to_date('24-MAR-2000', 'dd-MM-yyyy'), 75000 from dual union all
select 1000, to_date('31-MAR-2000', 'dd-MM-yyyy'), 1100 from dual union all
select 1001, to_date('31-MAR-2000', 'dd-MM-yyyy'), 11000 from dual union all
select 1001, to_date('04-APR-2000', 'dd-MM-yyyy'), 4287 from dual union all
select 1000, to_date('04-APR-2000', 'dd-MM-yyyy'), 487 from dual union all
select 1001, to_date('12-APR-2000', 'dd-MM-yyyy'), 1209 from dual union all
select 1001, to_date('14-APR-2000', 'dd-MM-yyyy'), 1092 from dual union all
select 1001, to_date('20-APR-2000', 'dd-MM-yyyy'), 1245 from dual union all
select 1000, to_date('20-APR-2000', 'dd-MM-yyyy'), 7500 from dual union all
select 1000, to_date('22-APR-2000', 'dd-MM-yyyy'), 1205 from dual union all
select 1000, to_date('26-APR-2000', 'dd-MM-yyyy'), 245 from dual
)
select
*
from
t t
where
not exists(
select
*
from
t n
where
trunc(t.TRANS_DATE, 'mm') = trunc(n.TRANS_DATE, 'mm')
and n.TRANS_AMOUNT > t.TRANS_AMOUNT
and t.ACCT_ID = n.ACCT_ID
)
order by
t.TRANS_DATE, t.ACCT_ID
Output

Related

how to use windows function during merge in sql

I am working in oracle sql. I have two table which is linked to each other by one column - company_id (see on the picture); I want to merge table 1 to table 2 and calculate 6 month average (6 month before period from table 2) of income for each company_id and each date of table2. I appreciate any code/idea how to solve this task.
You can use an analytic range window to calculate the averages for table1 and then JOIN the result to table2:
SELECT t2.*,
t1.avg_income_6,
t1.avg_income_12
FROM table2 t2
LEFT OUTER JOIN (
SELECT company_id,
dt,
ROUND(AVG(income) OVER (
PARTITION BY company_id
ORDER BY dt
RANGE BETWEEN INTERVAL '5' MONTH PRECEDING
AND INTERVAL '0' MONTH FOLLOWING
), 2) AS avg_income_6,
ROUND(AVG(income) OVER (
PARTITION BY company_id
ORDER BY dt
RANGE BETWEEN INTERVAL '11' MONTH PRECEDING
AND INTERVAL '0' MONTH FOLLOWING
), 2) AS avg_income_12
FROM table1
) t1
ON (t2.company_id = t1.company_id AND t2.dt = t1.dt);
Which, for the sample data:
CREATE TABLE table1 (company_id, dt, income) AS
SELECT 1, date '2019-01-01', 65 FROM DUAL UNION ALL
SELECT 1, date '2019-02-01', 58 FROM DUAL UNION ALL
SELECT 1, date '2019-03-01', 12 FROM DUAL UNION ALL
SELECT 1, date '2019-04-01', 81 FROM DUAL UNION ALL
SELECT 1, date '2019-05-01', 38 FROM DUAL UNION ALL
SELECT 1, date '2019-06-01', 81 FROM DUAL UNION ALL
SELECT 1, date '2019-07-01', 38 FROM DUAL UNION ALL
SELECT 1, date '2019-08-01', 69 FROM DUAL UNION ALL
SELECT 1, date '2019-09-01', 54 FROM DUAL UNION ALL
SELECT 1, date '2019-10-01', 90 FROM DUAL UNION ALL
SELECT 1, date '2019-11-01', 10 FROM DUAL UNION ALL
SELECT 1, date '2019-12-01', 12 FROM DUAL UNION ALL
SELECT 1, date '2020-01-01', 11 FROM DUAL UNION ALL
SELECT 1, date '2020-02-01', 83 FROM DUAL UNION ALL
SELECT 1, date '2020-03-01', 18 FROM DUAL UNION ALL
SELECT 1, date '2020-04-01', 28 FROM DUAL UNION ALL
SELECT 1, date '2020-05-01', 52 FROM DUAL UNION ALL
SELECT 1, date '2020-06-01', 21 FROM DUAL UNION ALL
SELECT 1, date '2020-07-01', 54 FROM DUAL UNION ALL
SELECT 1, date '2020-08-01', 30 FROM DUAL UNION ALL
SELECT 1, date '2020-09-01', 12 FROM DUAL UNION ALL
SELECT 1, date '2020-10-01', 25 FROM DUAL UNION ALL
SELECT 1, date '2020-11-01', 86 FROM DUAL UNION ALL
SELECT 1, date '2020-12-01', 4 FROM DUAL UNION ALL
SELECT 1, date '2021-01-01', 10 FROM DUAL UNION ALL
SELECT 1, date '2021-02-01', 72 FROM DUAL UNION ALL
SELECT 1, date '2021-03-01', 65 FROM DUAL UNION ALL
SELECT 1, date '2021-04-01', 25 FROM DUAL;
CREATE TABLE table2 (company_id, dt) AS
SELECT 1, date '2019-06-01' FROM DUAL UNION ALL
SELECT 1, date '2019-09-01' FROM DUAL UNION ALL
SELECT 1, date '2019-12-01' FROM DUAL UNION ALL
SELECT 1, date '2020-01-01' FROM DUAL UNION ALL
SELECT 1, date '2020-07-01' FROM DUAL UNION ALL
SELECT 1, date '2020-08-01' FROM DUAL UNION ALL
SELECT 1, date '2021-03-01' FROM DUAL UNION ALL
SELECT 1, date '2021-04-01' FROM DUAL;
Outputs:
COMPANY_ID
DT
AVG_INCOME_6
AVG_INCOME_12
1
2019-06-01 00:00:00
55.83
55.83
1
2019-09-01 00:00:00
60.17
55.11
1
2019-12-01 00:00:00
45.5
50.67
1
2020-01-01 00:00:00
41
46.17
1
2020-07-01 00:00:00
42.67
41.83
1
2020-08-01 00:00:00
33.83
38.58
1
2021-03-01 00:00:00
43.67
38.25
1
2021-04-01 00:00:00
43.67
38
db<>fiddle here
I don't think you need any window function here (if you were thinking of analytic functions); ordinary avg with appropriate join conditions should do the job.
Sample data:
SQL> with
2 table1 (company_id, datum, income) as
3 (select 1, date '2019-01-01', 65 from dual union all
4 select 1, date '2019-02-01', 58 from dual union all
5 select 1, date '2019-03-01', 12 from dual union all
6 select 1, date '2019-04-01', 81 from dual union all
7 select 1, date '2019-05-01', 38 from dual union all
8 select 1, date '2019-06-01', 81 from dual union all
9 select 1, date '2019-07-01', 38 from dual union all
10 select 1, date '2019-08-01', 69 from dual union all
11 select 1, date '2019-09-01', 54 from dual union all
12 select 1, date '2019-10-01', 90 from dual union all
13 select 1, date '2019-11-01', 10 from dual union all
14 select 1, date '2019-12-01', 12 from dual
15 ),
16 table2 (company_id, datum) as
17 (select 1, date '2019-06-01' from dual union all
18 select 1, date '2019-09-01' from dual union all
19 select 1, date '2019-12-01' from dual union all
20 select 1, date '2020-01-01' from dual union all
21 select 1, date '2020-07-01' from dual
22 )
Query begins here:
23 select b.company_id,
24 b.datum ,
25 round(avg(a.income), 2) result
26 from table1 a join table2 b on a.company_id = b.company_id
27 and a.datum > add_months(b.datum, -6)
28 and a.datum <= b.datum
29 group by b.company_id, b.datum;
COMPANY_ID DATUM RESULT
---------- -------- ----------
1 01.06.19 55,83
1 01.09.19 60,17
1 01.12.19 45,5
1 01.01.20 47
SQL>

How can I get comma separated values from a table in a single cell in Oracle SQL? How do I do it?

How can I get comma separated values from a table in a single cell in Oracle SQL? How do I do it?
For example, if the input table I have is the following::
id
value
datetime
9245
44
2021-10-15 00:00:00
9245
42
2021-09-14 00:00:00
9245
41
2021-08-13 00:00:00
9245
62
2021-05-14 00:00:00
9245
100
2021-04-15 00:00:00
9245
131
2021-03-16 00:00:00
9245
125
2021-02-12 00:00:00
9245
137
2021-01-18 00:00:00
8873
358
2021-10-15 00:00:00
8873
373
2021-09-14 00:00:00
8873
373
2021-08-13 00:00:00
8873
411
2021-07-14 00:00:00
8873
381
2021-06-14 00:00:00
8873
275
2021-05-14 00:00:00
8873
216
2021-04-15 00:00:00
8873
189
2021-03-16 00:00:00
8873
157
2021-02-12 00:00:00
8873
191
2021-01-18 00:00:00
My idea would be to achieve a grouping like the one below:
id
grouped_values
8873
191,157,Null,Null,Null,381,411,373,373,358
9245
137,125,131,100,62,Null,Null,41,42,44
As you can see in this case I have 2 different ids, when I group by id I would like the missing dates to have a null value and for the first value to correspond to the first date for that id. Also, when there are no values on that date, add a null value.
How can I put those null values in the correct place? How do I detect the absence of these values and set them as null? How to make the positions of the values correlate with the dates?
I've been trying to use the listgg or xmlagg function to group, but at the moment I don't know how to cover the missing places.
Another option; read comments within code. Sample data in lines #1 - 9; query begins at line #10.
SQL> with test(id, value, datum) as
2 (select 1, 5, date '2021-01-10' from dual union all --> missing February and March
3 select 1, 8, date '2021-04-13' from dual union all
4 select 1, 3, date '2021-05-22' from dual union all
5 --
6 select 2, 1, date '2021-03-21' from dual union all
7 select 2, 7, date '2021-04-22' from dual union all --> missing May and June
8 select 2, 9, date '2021-07-10' from dual
9 ),
10 -- calendar per ID
11 minimax as
12 (select id, trunc(min(datum), 'mm') mindat, trunc(max(datum), 'mm') maxdat
13 from test
14 group by id
15 ),
16 calendar as
17 (select m.id,
18 'null' value,
19 add_months(m.mindat, column_value - 1) datum
20 from minimax m
21 cross join table(cast(multiset(select level from dual
22 connect by level <= ceil(months_between(maxdat, mindat)) + 1
23 ) as sys.odcinumberlist))
24 )
25 select c.id,
26 listagg(nvl(to_char(t.value), c.value), ', ') within group (order by c.datum) result
27 from calendar c left join test t on t.id = c.id and trunc(t.datum, 'mm') = c.datum
28 group by c.id;
ID RESULT
---------- ----------------------------------------
1 5, null, null, 8, 3
2 1, 7, null, null, 9
SQL>
Use a PARTITIONed OUTER JOIN:
WITH calendar (day) AS (
SELECT DATE '2021-01-18' FROM DUAL UNION ALL
SELECT DATE '2021-02-12' FROM DUAL UNION ALL
SELECT DATE '2021-03-16' FROM DUAL UNION ALL
SELECT DATE '2021-04-15' FROM DUAL UNION ALL
SELECT DATE '2021-05-14' FROM DUAL UNION ALL
SELECT DATE '2021-06-14' FROM DUAL UNION ALL
SELECT DATE '2021-07-14' FROM DUAL UNION ALL
SELECT DATE '2021-08-13' FROM DUAL UNION ALL
SELECT DATE '2021-09-14' FROM DUAL UNION ALL
SELECT DATE '2021-10-15' FROM DUAL
-- Or
-- SELECT DISTINCT datetime FROM table_name
)
SELECT t.id,
LISTAGG(COALESCE(TO_CHAR(t.value), 'null'), ',')
WITHIN GROUP (ORDER BY c.day)
AS grouped_values
FROM calendar c
LEFT OUTER JOIN table_name t
PARTITION BY (t.id)
ON (c.day = t.datetime)
GROUP BY t.id
Or:
WITH calendar (day) AS (
SELECT ADD_MONTHS(DATE '2021-01-01', LEVEL - 1)
FROM DUAL
CONNECT BY LEVEL <= 10
-- or
-- SELECT ADD_MONTHS(min_dt, LEVEL - 1)
-- FROM (
-- SELECT MIN(TRUNC(datetime, 'MM')) AS min_dt,
-- MAX(TRUNC(datetime, 'MM')) AS max_dt
-- FROM table_name
-- )
-- CONNECT BY ADD_MONTHS(min_dt, LEVEL - 1) <= max_dt
)
SELECT t.id,
LISTAGG(COALESCE(TO_CHAR(t.value), 'null'), ',') WITHIN GROUP (ORDER BY c.day)
AS grouped_values
FROM calendar c
LEFT OUTER JOIN table_name t
PARTITION BY (t.id)
ON (c.day = TRUNC(t.datetime, 'MM'))
GROUP BY t.id
Which, for the sample data:
CREATE TABLE table_name (id, value, datetime) AS
SELECT 9245, 137, DATE '2021-01-18' FROM DUAL UNION ALL
SELECT 9245, 125, DATE '2021-02-12' FROM DUAL UNION ALL
SELECT 9245, 131, DATE '2021-03-16' FROM DUAL UNION ALL
SELECT 9245, 100, DATE '2021-04-15' FROM DUAL UNION ALL
SELECT 9245, 62, DATE '2021-05-14' FROM DUAL UNION ALL
SELECT 9245, 41, DATE '2021-08-13' FROM DUAL UNION ALL
SELECT 9245, 42, DATE '2021-09-14' FROM DUAL UNION ALL
SELECT 9245, 44, DATE '2021-10-15' FROM DUAL UNION ALL
SELECT 8873, 191, DATE '2021-01-18' FROM DUAL UNION ALL
SELECT 8873, 157, DATE '2021-02-12' FROM DUAL UNION ALL
SELECT 8873, 189, DATE '2021-03-16' FROM DUAL UNION ALL
SELECT 8873, 216, DATE '2021-04-15' FROM DUAL UNION ALL
SELECT 8873, 275, DATE '2021-05-14' FROM DUAL UNION ALL
SELECT 8873, 381, DATE '2021-06-14' FROM DUAL UNION ALL
SELECT 8873, 411, DATE '2021-07-14' FROM DUAL UNION ALL
SELECT 8873, 373, DATE '2021-08-13' FROM DUAL UNION ALL
SELECT 8873, 373, DATE '2021-09-14' FROM DUAL UNION ALL
SELECT 8873, 358, DATE '2021-10-15' FROM DUAL;
Both output:
ID
GROUPED_VALUES
8873
191,157,189,216,275,381,411,373,373,358
9245
137,125,131,100,62,null,null,41,42,44
db<>fiddle here
You can run this query directly without creating any tables. Here is a version with start date and end date with parameters:
SELECT
FE.id
,LISTAGG(NVL(TO_CHAR(TRUNC(CON.value)), 'null'), ',') WITHIN GROUP (ORDER BY FE.the_date ASC) GROUPED_VALUES
FROM
(--begin from1
SELECT id
,EXTRACT (YEAR FROM the_date) the_year
,EXTRACT (MONTH FROM the_date) the_month
,the_date
FROM
(
SELECT distinct id
FROM
(
SELECT 9245 id, 137 value, DATE '2021-01-18' datetime FROM DUAL UNION ALL
SELECT 9245, 125, DATE '2021-02-12' FROM DUAL UNION ALL
SELECT 9245, 131, DATE '2021-03-16' FROM DUAL UNION ALL
SELECT 9245, 100, DATE '2021-04-15' FROM DUAL UNION ALL
SELECT 9245, 62, DATE '2021-05-14' FROM DUAL UNION ALL
SELECT 9245, 41, DATE '2021-08-13' FROM DUAL UNION ALL
SELECT 9245, 42, DATE '2021-09-14' FROM DUAL UNION ALL
SELECT 9245, 44, DATE '2021-10-15' FROM DUAL UNION ALL
SELECT 8873, 191, DATE '2021-01-18' FROM DUAL UNION ALL
SELECT 8873, 157, DATE '2021-02-12' FROM DUAL UNION ALL
SELECT 8873, 189, DATE '2021-03-16' FROM DUAL UNION ALL
SELECT 8873, 216, DATE '2021-04-15' FROM DUAL UNION ALL
SELECT 8873, 275, DATE '2021-05-14' FROM DUAL UNION ALL
SELECT 8873, 381, DATE '2021-06-14' FROM DUAL UNION ALL
SELECT 8873, 411, DATE '2021-07-14' FROM DUAL UNION ALL
SELECT 8873, 373, DATE '2021-08-13' FROM DUAL UNION ALL
SELECT 8873, 373, DATE '2021-09-14' FROM DUAL UNION ALL
SELECT 8873, 358, DATE '2021-10-15' FROM DUAL
) table_name
) PS CROSS JOIN
( -- in this sub query you can change the **start date** and **end date** to change the ranges
SELECT
MIN(TO_DATE('2021-01-01' /*start date*/, 'YYYY-MM-DD') + LEVEL - 1) the_date
FROM DUAL
CONNECT BY
TO_DATE('2021-01-01' /*start date*/, 'YYYY-MM-DD') + LEVEL - 1 <= TO_DATE('2021-10-01' /*end date*/, 'YYYY-MM-DD')
GROUP BY EXTRACT (YEAR FROM TO_DATE('2021-01-01' /*start date*/, 'YYYY-MM-DD') + LEVEL - 1)
,EXTRACT (MONTH FROM TO_DATE('2021-01-01' /*start date*/, 'YYYY-MM-DD') + LEVEL - 1)
) the_dates
) FE LEFT OUTER JOIN --end from1
(
SELECT
table_name.id id
, EXTRACT(MONTH FROM table_name.datetime) the_month
, EXTRACT(YEAR FROM table_name.datetime) the_year
,MAX(table_name.datetime) datetime
,SUM(table_name.value) value
FROM
(
SELECT 9245 id, 137 value, DATE '2021-01-18' datetime FROM DUAL UNION ALL
SELECT 9245, 125, DATE '2021-02-12' FROM DUAL UNION ALL
SELECT 9245, 131, DATE '2021-03-16' FROM DUAL UNION ALL
SELECT 9245, 100, DATE '2021-04-15' FROM DUAL UNION ALL
SELECT 9245, 62, DATE '2021-05-14' FROM DUAL UNION ALL
SELECT 9245, 41, DATE '2021-08-13' FROM DUAL UNION ALL
SELECT 9245, 42, DATE '2021-09-14' FROM DUAL UNION ALL
SELECT 9245, 44, DATE '2021-10-15' FROM DUAL UNION ALL
SELECT 8873, 191, DATE '2021-01-18' FROM DUAL UNION ALL
SELECT 8873, 157, DATE '2021-02-12' FROM DUAL UNION ALL
SELECT 8873, 189, DATE '2021-03-16' FROM DUAL UNION ALL
SELECT 8873, 216, DATE '2021-04-15' FROM DUAL UNION ALL
SELECT 8873, 275, DATE '2021-05-14' FROM DUAL UNION ALL
SELECT 8873, 381, DATE '2021-06-14' FROM DUAL UNION ALL
SELECT 8873, 411, DATE '2021-07-14' FROM DUAL UNION ALL
SELECT 8873, 373, DATE '2021-08-13' FROM DUAL UNION ALL
SELECT 8873, 373, DATE '2021-09-14' FROM DUAL UNION ALL
SELECT 8873, 358, DATE '2021-10-15' FROM DUAL
) table_name
GROUP BY table_name.id, EXTRACT(YEAR FROM table_name.datetime), EXTRACT(MONTH FROM table_name.datetime)
) Con ON FE.id = Con.id AND FE.the_year = CON.the_year AND FE.the_month = CON.the_month
GROUP BY FE.id
Note: this query also recognizes the missing dates automatically

Generate between two dates in different rows SQL

I am working on generating lines depending on the days (a row by day) between two dates. I would have in the first row the first date and in the second row the second date but it all depends on the ID and the money. I consider it is better to show you with an example:
The origin table:
My target:
Could be possible with a loop?
Thank you very much.
You can use a recursive sub-query factoring clause:
WITH dt_range ( id, dt, next_dt, money ) AS (
SELECT id,
dt,
LEAD(dt) OVER (PARTITION BY id ORDER BY dt),
money
FROM table_name
UNION ALL
SELECT id,
dt + INTERVAL '1' DAY,
next_dt,
money
FROM dt_range
WHERE dt + INTERVAL '1' DAY < next_dt
)
SEARCH DEPTH FIRST BY dt SET dt_order
SELECT id, dt, money FROM dt_range;
Which, for your sample data:
CREATE TABLE table_name ( id, dt, money ) AS
SELECT 500, DATE '2017-02-23', 3500 FROM DUAL UNION ALL
SELECT 500, DATE '2017-02-26', 35000 FROM DUAL UNION ALL
SELECT 500, DATE '2017-02-28', 50000 FROM DUAL UNION ALL
SELECT 200, DATE '2020-05-01', 8888 FROM DUAL UNION ALL
SELECT 200, DATE '2020-05-05', 999 FROM DUAL UNION ALL
SELECT 200, DATE '2020-05-09', 1000 FROM DUAL;
Outputs:
ID
DT
MONEY
500
23-FEB-17
3500
500
24-FEB-17
3500
500
25-FEB-17
3500
500
26-FEB-17
35000
500
27-FEB-17
35000
500
28-FEB-17
50000
200
01-MAY-20
8888
200
02-MAY-20
8888
200
03-MAY-20
8888
200
04-MAY-20
8888
200
05-MAY-20
999
200
06-MAY-20
999
200
07-MAY-20
999
200
08-MAY-20
999
200
09-MAY-20
1000
db<>fiddle here
If you are using Oracle 11g then it has bugs iterating over dates; this can be easily fixed by iterating over a number and then adding it to a date (rather than iterating directly on the date):
WITH dt_range ( id, dt, offset, next_dt, money ) AS (
SELECT id,
dt,
0,
LEAD(dt) OVER (PARTITION BY id ORDER BY dt),
money
FROM table_name
UNION ALL
SELECT id,
dt,
offset + 1,
next_dt,
money
FROM dt_range
WHERE dt + offset + 1 < next_dt
)
SEARCH DEPTH FIRST BY dt SET dt_order
SELECT id, dt + offset AS dt, money FROM dt_range;
db<>fiddle here
for example:
with
simple_data( id,dates,money) as
(
select 500, date '2017-02-23', 3500 from dual union all
select 500, date '2017-02-26', 35000 from dual union all
select 500, date '2017-02-28', 50000 from dual union all
select 200, date '2020-05-01', 8888 from dual union all
select 200, date '2020-05-05', 999 from dual union all
select 200, date '2020-05-09', 1000 from dual
)
,step1 as
(
select
sd.id,
sd.money,
sd.dates,
lead (dates,1,dates) over(partition by id order by dates)-1 lead_dts
from simple_data sd
)
select
st1.id,
st1.dates + to_number(t.column_value)-1 as dates,
st1.money
from step1 st1,table(cast(multiset(select level from dual connect by level<= st1.lead_dts-st1.dates+1) as ora_mining_varchar2_nt)) t
order by id desc,dates;
----OR------
with
simple_data( id,dates,money) as
(
select 500, date '2017-02-23', 3500 from dual union all
select 500, date '2017-02-26', 35000 from dual union all
select 500, date '2017-02-28', 50000 from dual union all
select 200, date '2020-05-01', 8888 from dual union all
select 200, date '2020-05-05', 999 from dual union all
select 200, date '2020-05-09', 1000 from dual
)
,step1 as
(
select
sd.id,
sd.money,
sd.dates,
lag (dates,1,dates) over(partition by id order by dates desc) lag_dts
from simple_data sd
)
select
st1.id,
st1.lag_dts - to_number(t.column_value) as dates,
st1.money
from step1 st1,table(cast(multiset(select st1.lag_dts-st1.dates - level +1 from dual connect by level<= st1.lag_dts-st1.dates) as ora_mining_varchar2_nt)) t
order by id desc,dates;
12c and later:
with
simple_data( id,dates,money) as
(
select 500, date '2017-02-23', 3500 from dual union all
select 500, date '2017-02-26', 35000 from dual union all
select 500, date '2017-02-28', 50000 from dual union all
select 200, date '2020-05-01', 8888 from dual union all
select 200, date '2020-05-05', 999 from dual union all
select 200, date '2020-05-09', 1000 from dual
)
,step1 as
(
select
sd.id,
sd.money,
sd.dates,
lead (dates,1,dates) over(partition by id order by dates)-1 lead_dts
from simple_data sd
)
select
st1.id,
st1.dates + t.lvl-1 as dates,
st1.money
from step1 st1,lateral(select level lvl from dual connect by level<= st1.lead_dts-st1.dates+1) t
order by id desc,dates;
---OR-----
with
simple_data( id,dates,money) as
(
select 500, date '2017-02-23', 3500 from dual union all
select 500, date '2017-02-26', 35000 from dual union all
select 500, date '2017-02-28', 50000 from dual union all
select 200, date '2020-05-01', 8888 from dual union all
select 200, date '2020-05-05', 999 from dual union all
select 200, date '2020-05-09', 1000 from dual
)
,step1 as
(
select
sd.id,
sd.money,
sd.dates,
lag (dates,1,dates) over(partition by id order by dates desc) lag_dts
from simple_data sd
)
select
st1.id,
st1.lag_dts - t.lvl as dates,
st1.money
from step1 st1,lateral(select st1.lag_dts-st1.dates - level +1 lvl from dual connect by level<= st1.lag_dts-st1.dates ) t
order by id desc,dates;

Oracle list only records that changed

I have the following code that produces a table as shown in the image:
with test (code, datum) as
(select 600, date '2018-02-01' from dual union all
select 600, date '2018-02-02' from dual union all
select 0, date '2018-02-03' from dual union all
select 0, date '2018-02-04' from dual union all
select 0, date '2018-02-05' from dual union all
select 600, date '2018-02-06' from dual union all
select 600, date '2018-02-07' from dual union all
select 0, date '2018-02-08' from dual union all
select 0, date '2018-02-09' from dual
)
select * from test;
I have tried the following, but does not return what I need.
select * from (
select test.*, min(datum) over (partition by code order by code) as min_date,
max(datum) over (partition by code order by code) as max_date
from test) where min_date = datum;
What I would like to achieve is list only the records where a change occurs on the 'code' column (before and after record where the change occurs).
So the result set should look like this:
02/FEB/18 00:00:00 600
03/FEB/18 00:00:00 0
05/FEB/18 00:00:00 0
06/FEB/18 00:00:00 600
07/FEB/18 00:00:00 600
08/FEB/18 00:00:00 0
I referenced this question, but it does not address the same issue I have.
question
Any help appreciated, thank you.
UPDATE:
This is closer to what I would like to achieve. I can list all rows where columns code and change are not the same. However, I need to list the record after where these values are different as well.
with test (code, datum) as
(select 600, date '2018-02-01' from dual union all
select 600, date '2018-02-02' from dual union all
select 0, date '2018-02-03' from dual union all
select 0, date '2018-02-04' from dual union all
select 0, date '2018-02-05' from dual union all
select 600, date '2018-02-06' from dual union all
select 600, date '2018-02-07' from dual union all
select 0, date '2018-02-08' from dual union all
select 0, date '2018-02-09' from dual
)
,y1 as (
select test.datum, test.code, lead(code) over (order by datum) as change
from test
)
select * from y1;
The final result set should contain the highlighted rows only.
UPDATE 2:
I think I may have got it right, still need to verify but this seems to work:
with test (code, datum) as
(select 600, date '2018-02-01' from dual union all
select 600, date '2018-02-02' from dual union all
select 0, date '2018-02-03' from dual union all
select 0, date '2018-02-04' from dual union all
select 0, date '2018-02-05' from dual union all
select 600, date '2018-02-06' from dual union all
select 600, date '2018-02-07' from dual union all
select 0, date '2018-02-08' from dual union all
select 0, date '2018-02-09' from dual
)
,y1 as (
select test.datum, test.code, lag(nvl(code,code)) over (order by datum) as after, lead(nvl(code,code)) over (order by datum) as before
from test
)
select * from y1 where code != before or code != after;
Not sure if this would help i couldnot see any relevance to sort out the expected output in your question.
with test (code, datum) as
(select 600, date '2018-02-01' from dual union all
select 600, date '2018-02-02' from dual union all
select 0, date '2018-02-03' from dual union all
select 0, date '2018-02-04' from dual union all
select 0, date '2018-02-05' from dual union all
select 600, date '2018-02-06' from dual union all
select 600, date '2018-02-07' from dual union all
select 0, date '2018-02-08' from dual union all
select 0, date '2018-02-09' from dual
)
,y1 as (
select test.datum, test.code, lead(code) over (order by datum) as change
from test
UNION
select test.datum, test.code, lag(code) over (order by datum) as change
from test
)
select * from y1
where change = 600;
The following script produced the expected result set:
with test (code, datum) as
(select 600, date '2018-02-01' from dual union all
select 600, date '2018-02-02' from dual union all
select 0, date '2018-02-03' from dual union all
select 0, date '2018-02-04' from dual union all
select 0, date '2018-02-05' from dual union all
select 600, date '2018-02-06' from dual union all
select 600, date '2018-02-07' from dual union all
select 0, date '2018-02-08' from dual union all
select 0, date '2018-02-09' from dual
)
,y1 as (
select test.datum, test.code, lag(nvl(code,code)) over (order by datum) as after, lead(nvl(code,code)) over (order by datum) as before
from test
)
select * from y1 where code != before or code != after;

SQL logic for Decompression

RDW (Retake Data Warehouse) used to reduce data volume by compression logic. Compression refers to storing physical data that only reflects changes to the underlying data source.
Inventory fact table store data in following form.
Week Item Location stock_on_hand
--------------------------------------------------
201601 I1 L1 50
201602 I1 L1 30
201605 I1 L1 60
201608 I1 L1 50
But I need sql query to get following result
Week Item Location stock_on_hand
--------------------------------------------------
201601 I1 L1 50
201602 I1 L1 30
201603 I1 L1 30
201604 I1 L1 30
201605 I1 L1 60
201606 I1 L1 60
201607 I1 L1 60
201608 I1 L1 50
Test data(I add several rows for better understanding and split year and week columns)
with t(year, Week , Item, Location, stock_on_hand) as
(select 2016, 01, 'I1', 'L1', 50 from dual union all
select 2016, 02, 'I1', 'L1', 30 from dual union all
select 2016 ,05, 'I1', 'L1', 60 from dual union all
select 2016 ,08, 'I1', 'L1', 50 from dual union all
select 2016, 02, 'I2', 'L1', 30 from dual union all
select 2016, 08, 'I2', 'L1', 40 from dual union all
select 2016, 02, 'I1', 'L2', 10 from dual union all
select 2016, 08, 'I1', 'L2', 40 from dual union all
select 2016, 08, 'I1', 'L3', 40 from dual)
Query
with t(year, Week , Item, Location, stock_on_hand) as
(select 2016, 01, 'I1', 'L1', 50 from dual union all
select 2016, 02, 'I1', 'L1', 30 from dual union all
select 2016 ,05, 'I1', 'L1', 60 from dual union all
select 2016 ,08, 'I1', 'L1', 50 from dual union all
select 2016, 02, 'I2', 'L1', 30 from dual union all
select 2016, 08, 'I2', 'L1', 40 from dual union all
select 2016, 02, 'I1', 'L2', 10 from dual union all
select 2016, 08, 'I1', 'L2', 40 from dual union all
select 2016, 08, 'I1', 'L3', 40 from dual),
temp(year, Week , Item, Location, stock_on_hand, ct) as(
select year, Week , Item, Location, stock_on_hand, nvl(lead(Week) over(partition by Item, Location order by year, Week)-Week,1) from t)
select year, Week + rn - 1 as week, Item, Location, stock_on_hand
from temp, xmltable('1 to xs:integer($ct)' passing ct as "ct" columns rn number path '.')
order by Item, Location ,year, week
This approach also have one minor. If you have interval in different year. Ex
select 2016, 01, 'I1', 'L1', 50 from dual union all
select 2017, 02, 'I1', 'L1', 30 from dual union all
Then it works incorrectly. I don't know whether you data has the same pattern. If it has then, please, add information to post or answer.
UPDATE
For intervals which lives in several years you can do follow(For startDate I choose date in international week)
with t(dateStart , Item, Location, stock_on_hand) as
(select to_date('28/12/2015', 'dd-mm-yyyy'), 'I1', 'L1', 50 from dual union all
select to_date('04/01/2016', 'dd-mm-yyyy'), 'I1', 'L1', 30 from dual union all
select to_date('25/01/2016', 'dd-mm-yyyy'), 'I1', 'L1', 60 from dual union all
select to_date('15/02/2016', 'dd-mm-yyyy'), 'I1', 'L1', 50 from dual union all
select to_date('01/01/2018', 'dd-mm-yyyy'), 'I1', 'L1', 30 from dual union all
select to_date('04/01/2016', 'dd-mm-yyyy'), 'I2', 'L1', 40 from dual union all
select to_date('15/02/2016', 'dd-mm-yyyy'), 'I2', 'L1', 10 from dual union all
select to_date('04/01/2016', 'dd-mm-yyyy'), 'I1', 'L2', 30 from dual union all
select to_date('15/02/2016', 'dd-mm-yyyy'), 'I1', 'L2', 40 from dual union all
select to_date('15/02/2016', 'dd-mm-yyyy'), 'I1', 'L3', 40 from dual),
temp(dateStart, Item, Location, stock_on_hand, ct) as(
select dateStart , Item, Location, stock_on_hand, nvl((lead(dateStart) over(partition by Item, Location order by dateStart)-dateStart)/7,1) from t)
select dateStart + (rn - 1)*7 as week, Item, Location, stock_on_hand
from temp, xmltable('1 to xs:integer($ct)' passing ct as "ct" columns rn number path '.')
order by Item, Location , dateStart