I'm new in PL/SQL.
I have a procedure like:
create or replace procedure insert_charge is
v_count number;
begin
for i in (select t.name, t.hire_date, t.salary
from emp t
where t.create_date >= (sysdate - 30)
and t.salary = 0) loop
insert into charge
(name, hire_date, salary)
values
(i.name, hire_date, salary);
commit;
update emp l
set l.status = 1
where l.name = i.name
and l.status = 0
and l.hire_date = i.hire_date;
commit;
end loop;
exception
when others then
rollback;
end insert_charge;
How can use FORALL statement instead of this?
You can't.
The FORALL statement runs one DML statement multiple times
ONE DML statement. You have two (update and insert).
As of code you wrote:
move COMMIT out of the loop
remove that when others "handler" as it handles nothing. If error happens, Oracle will silently rollback and report that procedure completed successfully, while it - actually - failed
There are a few additional tasks for FORALL; namely defining a collection to define the bulk area and a variable of that collection type to contain the actual data. As a safety value you should place a LIMIT on the number of fetched at once. Bulk Collect/ Forall is a trade off of speed vs memory. And at a certain point (depending on your configuration) has diminishing returns. Besides the memory you use for it is unavailable to other processes in the database. Plat well with your fellow queries. Then as #Littlefoot points out DO NOT SQUASH EXCEPTIONS log them and re-raise. Finally, a note about commits. **Do not commit after each DML statement, You may want spend some time to investigate [transactions][1]. With this in mind your procedure becomes something like:
create or replace procedure insert_charge is
cursor c_emp_cur is
select t.name, t.hire_date, t.salary
from emp t
where t.create_date >= (sysdate - 30)
and t.salary = 0;
type c_emp_array_t is table of c_emp%rowtype ; -- define collection for rows selected
k_emp_rows_max constant integer := 500; -- defines the maximum rows per fetch
l_emp_list c_emp_array_t; -- define variable of rows collection
begin
open c_emp_cur;
loop
fetch c_emp_cur -- fetch up to LIMIT rows from cursor
bulk collect
into l_emp_collect
limit k_emp_rows_max;
forall i in 1 .. l_emp_collect.count -- run insert for ALL rows in the collection
insert into charge(name, hire_date, salary)
values( l_emp_collect(i).name
, l_emp_collect(i).hire_date
, l_emp_collect(i).salary);
forall i in 1 .. l_emp_collect.count -- run update for ALL rows in the collection
update emp l
set l.status = 1
where l.name = l_emp_collect(i).name
and l.status = 0
and l.hire_date = l_emp_collect(i).hire_date;
exit when c_emp_cur%notfound; -- no more rows so exit
end loop;
close c_emp_cur;
commit; -- JUST 1 COMMIT;
exception
when others then
generate_exception_log ('insert_charge', sysdate, sql_errm ); --ASSUMED Anonomous Transaction procedure for exception log table.
raise;
end insert_charge;
DISCLAIMER: Not tested.
[1]: https://www.techopedia.com/definition/16455/transaction-databases
Related
Assume there are two tables TST_SAMPLE (10000 rows) and TST_SAMPLE_STATUS (empty).
I want to iterate over each record in TST_SAMPLE and add exactly one record to TST_SAMPLE_STATUS accordingly.
In a single thread that would be simply this:
begin
for r in (select * from TST_SAMPLE)
loop
insert into TST_SAMPLE_STATUS(rec_id, rec_status)
values (r.rec_id, 'TOUCHED');
end loop;
commit;
end;
/
In a multithreaded solution there's a situation, which is not clear to me.
So could you explain what causes processing one row of TST_SAMPLE several times.
Please, see details below.
create table TST_SAMPLE(
rec_id number(10) primary key
);
create table TST_SAMPLE_STATUS(
rec_id number(10),
rec_status varchar2(10),
session_id varchar2(100)
);
begin
insert into TST_SAMPLE(rec_id)
select LEVEL from dual connect by LEVEL <= 10000;
commit;
end;
/
CREATE OR REPLACE PROCEDURE tst_touch_recs(pi_limit int) is
v_last_iter_count int;
begin
loop
v_last_iter_count := 0;
--------------------------
for r in (select *
from TST_SAMPLE A
where rownum < pi_limit
and NOT EXISTS (select null
from TST_SAMPLE_STATUS B
where B.rec_id = A.rec_id)
FOR UPDATE SKIP LOCKED)
loop
insert into TST_SAMPLE_STATUS(rec_id, rec_status, session_id)
values (r.rec_id, 'TOUCHED', SYS_CONTEXT('USERENV', 'SID'));
v_last_iter_count := v_last_iter_count + 1;
end loop;
commit;
--------------------------
exit when v_last_iter_count = 0;
end loop;
end;
/
In the FOR-LOOP I try to iterate over rows that:
- has no status (NOT EXISTS clause)
- is not currently locked in another thread (FOR UPDATE SKIP LOCKED)
There's no requirement for the exact amount of rows in an iteration.
Here pi_limit is just a maximal size of one batch. The only thing needed is to process each row of TST_SAMPLE in exactly one session.
So let's run this procedure in 3 threads.
declare
v_job_id number;
begin
dbms_job.submit(v_job_id, 'begin tst_touch_recs(100); end;', sysdate);
dbms_job.submit(v_job_id, 'begin tst_touch_recs(100); end;', sysdate);
dbms_job.submit(v_job_id, 'begin tst_touch_recs(100); end;', sysdate);
commit;
end;
Unexpectedly, we see that some rows were processed in several sessions
select count(unique rec_id) AS unique_count,
count(rec_id) AS total_count
from TST_SAMPLE_STATUS;
| unique_count | total_count |
------------------------------
| 10000 | 17397 |
------------------------------
-- run to see duplicates
select *
from TST_SAMPLE_STATUS
where REC_ID in (
select REC_ID
from TST_SAMPLE_STATUS
group by REC_ID
having count(*) > 1
)
order by REC_ID;
Please, help to recognize mistakes in implementation of procedure tst_touch_recs.
Here's a little example that shows why you're reading rows twice.
Run the following code in two sessions, starting the second a few seconds after the first:
declare
cursor c is
select a.*
from TST_SAMPLE A
where rownum < 10
and NOT EXISTS (select null
from TST_SAMPLE_STATUS B
where B.rec_id = A.rec_id)
FOR UPDATE SKIP LOCKED;
type rec is table of c%rowtype index by pls_integer;
rws rec;
begin
open c; -- data are read consistent to this time
dbms_lock.sleep ( 10 );
fetch c
bulk collect
into rws;
for i in 1 .. rws.count loop
dbms_output.put_line ( rws(i).rec_id );
end loop;
commit;
end;
/
You should see both sessions display the same rows.
Why?
Because Oracle Database has statement-level consistency, the result set for both is frozen when you open the cursor.
But when you have SKIP LOCKED, the FOR UPDATE locking only kicks in when you fetch the rows.
So session 1 starts and finds the first 9 rows not in TST_SAMPLE_STATUS. It then waits 10 seconds.
Provided you start session 2 within these 10 seconds, the cursor will look for the same nine rows.
At this point no rows are locked.
Now, here's where it gets interesting.
The sleep in the first session will finish. It'll then fetch the rows, locking them and skipping any that are already locked.
Very shortly after, it'll commit. Releasing the lock.
A few moments later, session 2 comes to read these rows. At this point the rows are not locked!
So there's nothing to skip.
How exactly you solve this depends on what you're trying to do.
Assuming you can't move to a set-based approach, you could make the transactions serializable by adding:
set transaction isolation level serializable;
before the cursor loop. This will then move to transaction-level consistency. Enabling the database to detect "something changed" when fetching rows.
But you'll need to catch ORA-08177: can't serialize access for this transaction errors in your within the outer loop. Or any process that re-reads the same rows will drop out at this point.
Or, as commenters have suggested used Advanced Queueing.
I have about 94,000 records that need to be deleted, but I have been told not to delete all at once because it will slow the performance due to the delete trigger. What would be the best solution to accomplish this? I was thinking of an additional loop after the commit of 1000, but not too sure how to implement or know if that will reduce performance even more.
DECLARE
CURSOR CLEAN IS
SELECT EMP_ID, ACCT_ID FROM RECORDS_TO_DELETE F; --Table contains the records that needs to be deleted.
COUNTER INTEGER := 0;
BEGIN
FOR F IN CLEAN LOOP
COUNTER := COUNTER + 1;
DELETE FROM EMPLOYEES
WHERE EMP_ID = F.EMP_ID AND ACCT_ID = F.ACCT_ID;
IF MOD(COUNTER, 1000) = 0 THEN
COMMIT;
END IF;
END LOOP;
COMMIT;
END;
You need to read a bit about BULK COLLECT statements in oracle. This is commonly considered as proper way working with large tables.
Example:
LOOP
FETCH c_delete BULK COLLECT INTO t_delete LIMIT l_delete_buffer;
FORALL i IN 1..t_delete.COUNT
DELETE ps_al_chk_memo
WHERE ROWID = t_delete (i);
COMMIT;
EXIT WHEN c_delete%NOTFOUND;
COMMIT;
END LOOP;
CLOSE c_delete;
You can do it in a single statement, this should be the fastest way in any kind:
DELETE FROM EMPLOYEES
WHERE (EMP_ID, ACCT_ID) =ANY (SELECT EMP_ID, ACCT_ID FROM RECORDS_TO_DELETE)
Since I can see the volume of record is not that much so can still go with SQL not by PLSQL.Whenever possible try SQL. I think it should not cause that much performance impact.
DELETE FROM EMPLOYEES
WHERE EXISTS
(SELECT 1 FROM RECORDS_TO_DELETE F
WHERE EMP_ID = F.EMP_ID
AND ACCT_ID= F.ACCT_ID);
Hope this helps.
i need trigger which will check if updated worker can be moved to other team
CREATE TABLE WORKERS
(
ID_WORKER NUMBER(4,0), --FK
ID_TEAM NUMBER(2,0) --FK
);
My trigger looks like:
CREATE OR REPLACE TRIGGER TEAM_LIMIT
BEFORE INSERT OR UPDATE OF ID_TEAM ON WORKERS
FOR EACH ROW
DECLARE
V_num NUMBER;
BEGIN
SELECT Count(*) INTO V_num FROM Worker WHERE ID_TEAM=:new.ID_TEAM;
IF V_num >= 5 THEN
RAISE_APPLICATION_ERROR(-20025,' Error nr ... bleble');
END IF;
END;
this generate error : "table %s.%s is mutating, trigger/function may not see it" when row is updated. How to write this statements properly to not generate this kind of error?
You can use a compound trigger, it looks like this (not tested):
CREATE OR REPLACE TRIGGER TEAM_LIMIT
FOR INSERT OR UPDATE OF ID_TEAM ON WORKERS
COMPOUND TRIGGER
V_num NUMBER;
TYPE Row_TableType IS TABLE OF WORKERS.ID_TEAM%TYPE;
AffectedTeams Row_TableType;
BEFORE STATEMENT IS
BEGIN
AffectedTeams := Row_TableType(); -- init the table variable
END BEFORE STATEMENT;
-------------------
BEFORE EACH ROW IS
BEGIN
AffectedTeams.EXTEND;
AffectedTeams(AffectedTeams.LAST) := :NEW.ID_TEAM;
END BEFORE EACH ROW;
--------------------
AFTER STATEMENT IS
BEGIN
FOR i IN AffectedTeams.FIRST..AffectedTeams.LAST LOOP
SELECT Count(*) INTO V_num FROM Worker WHERE ID_TEAM=AffectedTeams(i);
IF V_num >= 5 THEN
RAISE_APPLICATION_ERROR(-20025,' Error nr ... bleble');
END IF;
END LOOP;
END AFTER STATEMENT;
END TEAM_LIMIT;
/
You can use a statement trigger. Statement triggers are only fired once for each statement executed rather than once for each row affected. Statement triggers are useful because they don't have the restriction that they can't query the table on which the trigger is declared. They don't have access to the :OLD and :NEW row values but with a little thought you can still accomplish what you're trying to do:
CREATE OR REPLACE TRIGGER TEAM_LIMIT
BEFORE INSERT OR UPDATE OF ID_TEAM ON WORKERS
-- Note: no FOR EACH ROW - therefore, this is a statement trigger
DECLARE
nMax_team_count NUMBER;
BEGIN
SELECT MAX(TEAM_COUNT)
INTO nMax_team_count
FROM (SELECT ID_TEAM, COUNT(*) AS TEAM_COUNT
FROM WORKERS
GROUP BY ID_TEAM));
IF nMax_team_count >= 5 THEN
RAISE_APPLICATION_ERROR(-20025,' Error nr ... bleble');
END IF;
END TEAM_LIMIT;
Instead of looking at the team of the particular work which has been updated we find the counts of workers on each team, then extract the largest count, and if it's more than four we raise the appropriate exception.
Share and enjoy.
In Oracle 11g, I am using the following in a procedure.. can someone please provide a better solution to achieve the same results.
FOR REC IN
(SELECT E.EMP FROM EMPLOYEE E
JOIN
COMPANY C ON E.EMP=C.EMP
WHERE C.FLAG='Y')
LOOP
UPDATE EMPLOYEE SET FLAG='Y' WHERE EMP=REC.EMP;
END LOOP;
Is there a more efficient/better way to do this? I feel as if this method will run one update statement for each record found (Please correct me if I am wrong).
Here's the is actual code in full:
create or replace
PROCEDURE ACTION_MSC AS
BEGIN
-- ALL MIGRATED CONTACTS, CANDIDATES, COMPANIES, JOBS
-- ALL MIGRATED CANDIDATES, CONTACTS
FOR REC IN (SELECT DISTINCT AC.PEOPLE_HEX
FROM ACTION AC JOIN PEOPLE P ON AC.PEOPLE_HEX=P.PEOPLE_HEX
WHERE P.TO_MIGRATE='Y')
LOOP
UPDATE ACTION SET TO_MIGRATE='Y' WHERE PEOPLE_HEX=REC.PEOPLE_HEX;
END LOOP;
-- ALL MIGRATED COMPANIES
FOR REC IN (SELECT DISTINCT AC.COMPANY_HEX
FROM ACTION AC JOIN COMPANY CM ON AC.COMPANY_HEX=CM.COMPANY_HEX
WHERE CM.TO_MIGRATE='Y')
LOOP
UPDATE ACTION SET TO_MIGRATE='Y' WHERE COMPANY_HEX=REC.COMPANY_HEX;
END LOOP;
-- ALL MIGRATED JOBS
FOR REC IN (SELECT DISTINCT AC.JOB_HEX
FROM ACTION AC JOIN "JOB" J ON AC.JOB_HEX=J.JOB_HEX
WHERE J.TO_MIGRATE='Y')
LOOP
UPDATE ACTION SET TO_MIGRATE='Y' WHERE JOB_HEX=REC.JOB_HEX;
END LOOP;
COMMIT;
END ACTION_MSC;
You're right, it will do one update for each record found. Looks like you could just do:
UPDATE EMPLOYEE SET FLAG = 'Y'
WHERE EMP IN (SELECT EMP FROM COMPANY WHERE FLAG = 'Y')
AND FLAG != 'Y';
A single update will generally be faster and more efficient than multiple individual row updates in a loop; see this answer for another example. Apart from anything else, you're reducing the number of context switches between PL/SQL and SQL, which add up if you have a lot of rows. You could always benchmark this with your own data, of course.
I've added a check of the current flag state so you don't do a pointless update with no chamges.
It's fairly easy to compare the approaches to see that a single update is faster than one in a loop; with some contrived data:
create table people (id number, people_hex varchar2(16), to_migrate varchar2(1));
insert into people (id, people_hex, to_migrate)
select level, to_char(level - 1, 'xx'), 'Y'
from dual
connect by level <= 100;
create table action (id number, people_hex varchar2(16), to_migrate varchar2(1));
insert into action (id, people_hex, to_migrate)
select level, to_char(mod(level, 200), 'xx'), 'N'
from dual
connect by level <= 500000;
All of these will update half the rows in the action table. Updating in a loop:
begin
for rec in (select distinct ac.people_hex
from action ac join people p on ac.people_hex=p.people_hex
where p.to_migrate='Y')
loop
update action set to_migrate='Y' where people_hex=rec.people_hex;
end loop;
end;
/
Elapsed: 00:00:10.87
Single update (after rollback; I've left this in a block to mimic your procedure):
begin
update action set to_migrate = 'Y'
where people_hex in (select people_hex from people where to_migrate = 'Y');
end;
/
Elapsed: 00:00:07.14
Merge (after rollback):
begin
merge into action a
using (select people_hex, to_migrate from people where to_migrate = 'Y') p
on (a.people_hex = p.people_hex)
when matched then update set a.to_migrate = p.to_migrate;
end;
/
Elapsed: 00:00:07.00
There's some variation from repeated runs, particularly that update and merge are usually pretty close but sometimes swap which is faster in my environment; but both are always significantly faster than updating in a loop. You can repeat this in your own environment and with your own data spread and volumes, and you should if performance is that critical; but a single update is going to be faster than the loop. Whether you use update or merge isn't likely to make much difference.
I want to fetch around 6 millions rows from one table and insert them all into another table.
How do I do it using BULK COLLECT and FORALL ?
declare
-- define array type of the new table
TYPE new_table_array_type IS TABLE OF NEW_TABLE%ROWTYPE INDEX BY BINARY_INTEGER;
-- define array object of new table
new_table_array_object new_table_array_type;
-- fetch size on bulk operation, scale the value to tweak
-- performance optimization over IO and memory usage
fetch_size NUMBER := 5000;
-- define select statment of old table
-- select desiered columns of OLD_TABLE to be filled in NEW_TABLE
CURSOR old_table_cursor IS
select * from OLD_TABLE;
BEGIN
OPEN old_table_cursor;
loop
-- bulk fetch(read) operation
FETCH old_table_cursor BULK COLLECT
INTO new_table_array_object LIMIT fetch_size;
EXIT WHEN old_table_cursor%NOTFOUND;
-- do your business logic here (if any)
-- FOR i IN 1 .. new_table_array_object.COUNT LOOP
-- new_table_array_object(i).some_column := 'HELLO PLSQL';
-- END LOOP;
-- bulk Insert operation
FORALL i IN INDICES OF new_table_array_object SAVE EXCEPTIONS
INSERT INTO NEW_TABLE VALUES new_table_array_object(i);
COMMIT;
END LOOP;
CLOSE old_table_cursor;
End;
Hope this helps.
oracle
Below is an example From
CREATE OR REPLACE PROCEDURE fast_way IS
TYPE PartNum IS TABLE OF parent.part_num%TYPE
INDEX BY BINARY_INTEGER;
pnum_t PartNum;
TYPE PartName IS TABLE OF parent.part_name%TYPE
INDEX BY BINARY_INTEGER;
pnam_t PartName;
BEGIN
SELECT part_num, part_name
BULK COLLECT INTO pnum_t, pnam_t
FROM parent;
FOR i IN pnum_t.FIRST .. pnum_t.LAST
LOOP
pnum_t(i) := pnum_t(i) * 10;
END LOOP;
FORALL i IN pnum_t.FIRST .. pnum_t.LAST
INSERT INTO child
(part_num, part_name)
VALUES
(pnum_t(i), pnam_t(i));
COMMIT;
END
The SQL engine parse and executes the SQL Statements but in some cases ,returns data to the PL/SQL engine.
During execution a PL/SQL statement, every SQL statement cause a context switch between the two engine. When the PL/SQL engine find the SQL statement, it stop and pass the control to SQL engine. The SQL engine execute the statement and returns back to the data in to PL/SQL engine. This transfer of control is call Context switch. Generally switching is very fast between PL/SQL engine but the context switch performed large no of time hurt performance .
SQL engine retrieves all the rows and load them into the collection and switch back to PL/SQL engine. Using bulk collect multiple row can be fetched with single context switch.
Example : 1
DECLARE
Type stcode_Tab IS TABLE OF demo_bulk_collect.storycode%TYPE;
Type category_Tab IS TABLE OF demo_bulk_collect.category%TYPE;
s_code stcode_Tab;
cat_tab category_Tab;
Start_Time NUMBER;
End_Time NUMBER;
CURSOR c1 IS
select storycode,category from DEMO_BULK_COLLECT;
BEGIN
Start_Time:= DBMS_UTILITY.GET_TIME;
FOR rec in c1
LOOP
NULL;
--insert into bulk_collect_a values(rec.storycode,rec.category);
END LOOP;
End_Time:= DBMS_UTILITY.GET_TIME;
DBMS_OUTPUT.PUT_LINE('Time for Standard Fetch :-' ||(End_Time-Start_Time) ||' Sec');
Start_Time:= DBMS_UTILITY.GET_TIME;
Open c1;
FETCH c1 BULK COLLECT INTO s_code,cat_tab;
Close c1;
FOR x in s_code.FIRST..s_code.LAST
LOOP
null;
END LOOP;
End_Time:= DBMS_UTILITY.GET_TIME;
DBMS_OUTPUT.PUT_LINE('Using Bulk collect fetch time :-' ||(End_Time-Start_Time) ||' Sec');
END;
CREATE OR REPLACE PROCEDURE APPS.XXPPL_xxhil_wrmtd_bulk
AS
CURSOR cur_postship_line
IS
SELECT ab.process, ab.machine, ab.batch_no, ab.sales_ord_no, ab.spec_no,
ab.fg_item_desc brand_job_name, ab.OPERATOR, ab.rundate, ab.shift,
ab.in_qty1 input_kg, ab.out_qty1 output_kg, ab.by_qty1 waste_kg,
-- null,
xxppl_reports_pkg.cf_waste_per (ab.org_id,ab.process,ab.out_qty1,ab.by_qty1,ab.batch_no,ab.shift,ab.rundate) waste_percentage,
ab.reason_desc reasons, ab.cause_desc cause,
to_char(to_date(ab.rundate),'MON')month,
to_char(to_date(ab.rundate),'yyyy')year,
xxppl_org_name (ab.org_id) plant
FROM (SELECT a.org_id,
(SELECT so_line_no
FROM xx_ppl_logbook_h x
WHERE x.batch_no = a.batch_no
AND x.org_id = a.org_id) so_line_no,
(SELECT spec_no
FROM xx_ppl_logbook_h x
WHERE x.batch_no = a.batch_no
AND x.org_id = a.org_id
and x.SALES_ORD_NO=a.sales_ord_no
and x.OPRN_ID =a.OPRN_ID
and x.RESOURCES = a.RESOURCES) SPEC_NO,
(SELECT OPERATOR
FROM xx_ppl_logbook_f y, xx_ppl_logbook_h z
WHERE y.org_id = a.org_id
AND y.batch_no = a.batch_no
AND y.rundate = a.rundate
AND a.process = y.process
AND a.resources = y.resources
AND a.oprn_id = y.oprn_id
AND z.org_id = y.org_id
AND z.batch_no = y.batch_no
AND z.batch_id = z.batch_id
AND z.rundate = y.rundate
AND z.process = y.process) OPERATOR,
a.batch_no, a.oprn_id, a.rundate, a.fg_item_desc,
a.resources, a.machine, a.sales_ord_no, a.process,
a.process_desc, a.tech_desc, a.sub_invt, a.by_qty1,
a.by_qty2, a.um1, a.um2, a.shift,
DECODE (shift, 'I', 1, 'II', 2, 'III', 3) shift_rank,
a.reason_desc, a.in_qty1, a.out_qty1, a.cause_desc
FROM xxppl_byproduct_waste_v a
WHERE 1 = 1
--AND a.org_id = (CASE WHEN :p_orgid IS NULL THEN a.org_id ELSE :p_orgid END)
AND a.rundate BETWEEN to_date('01-'||to_char(add_months(TRUNC(TO_DATE(sysdate,'DD-MON-RRRR')) +1, -2),'MON-RRRR'),'DD-MON-RRRR') AND SYSDATE
---AND a.process = (CASE WHEN :p_process IS NULL THEN a.process ELSE :p_process END)
) ab;
TYPE postship_list IS TABLE OF xxhil_wrmtd_tab%ROWTYPE;
postship_trns postship_list;
v_err_count NUMBER;
BEGIN
delete from xxhil_wrmtd_tab;
OPEN cur_postship_line;
FETCH cur_postship_line
BULK COLLECT INTO postship_trns;
FORALL i IN postship_trns.FIRST .. postship_trns.LAST SAVE EXCEPTIONS
INSERT INTO xxhil_wrmtd_tab
VALUES postship_trns (i);
CLOSE cur_postship_line;
COMMIT;
END;
/
---- INSERING DATA INTO TABLE WITH THE HELP OF CUROSR