Bulk update with commit in oracle - sql

I am performing bulk update operation for a record of 1 million records. I need to COMMIT in between every 5000 records how can I perform?
update tab1 t1
set (col1,col2,col3,col4)=
(select col1,col2,col3,col4 from tab_m where row_id= t1.row_id);

Per th question, if you only want to continue updating even if record fails with error logging then i think you should go with the DML error logging clause of Oracle. Hope this helps.
BEGIN
DBMS_ERRLOG.CREATE_ERROR_LOG('TAB1');
UPDATE tab1 t1
SET
(
COL1,
COL2,
COL3,
COL4
)
=
(SELECT COL1,COL2,COL3,COL4 FROM TAB_M WHERE ROW_ID= T1.ROW_ID
) LOG ERRORS REJECT LIMITED UNLIMITED;
END;

If you are looking for a solution in PLSQL you can do it by using BULK INSERT/UPDATE as below:
DECLARE
c_limit PLS_INTEGER := 100;
CURSOR employees_cur
IS
SELECT employee_id
FROM employees
WHERE department_id = department_id_in;
TYPE employee_ids_t IS TABLE OF employees.employee_id%TYPE;
l_employee_ids employee_ids_t;
BEGIN
OPEN employees_cur;
LOOP
FETCH employees_cur
BULK COLLECT INTO l_employee_ids
LIMIT c_limit; -- This will make sure that every iteration has 100 records selected
EXIT WHEN l_employee_ids.COUNT = 0;
FORALL indx IN 1 .. l_employee_ids.COUNT SAVE EXCEPTIONS
UPDATE employees emp -- Updating 100 records at 1 go.
SET emp.salary =
emp.salary + emp.salary * increase_pct_in
WHERE emp.employee_id = l_employee_ids(indx);
commit;
END LOOP;
EXCEPTION
WHEN OTHERS
THEN
IF SQLCODE = -24381
THEN
FOR indx IN 1 .. SQL%BULK_EXCEPTIONS.COUNT
LOOP
-- Caputring errors occured during update
DBMS_OUTPUT.put_line (
SQL%BULK_EXCEPTIONS (indx).ERROR_INDEX
|| ‘: ‘
|| SQL%BULK_EXCEPTIONS (indx).ERROR_CODE);
--<You can inset the error records to a table here>
END LOOP;
ELSE
RAISE;
END IF;
END;

Related

Keep lock after commit in loop with select for update

How can i keep lock for table after commit in this situation?
for c in (
select from tbl1 .. for update
) loop
insert into tbl2 ..;
count := count + 1;
if count > 10000 then
commit;
count := 0;
end if;
end loop;

code goes into infinite loop when updating table's column

expert.
I'm new to PLSQL programming.
More than 200 tables have 'EXAMPLE' columns.
I want to update the column 'EXAMPLE' with 'YES'.
The purpose of the following code is to update the 'EXAMPLE' column for every 10000 records.
But I think the following code enters an infinite loop.
Where am I making a mistake?.
how can i fix this?
declare
v_match_count integer;
table_name varchar2(30);
begin
v_match_count:=0;
for tablolar in
(
SELECT table_name
FROM user_tab_columns,user_objects
WHERE
user_tab_columns.table_name=user_objects.object_name and user_objects.object_type not in ('VIEW') AND
column_name IN ( 'FILE_NO', 'PROT_NO' )
GROUP BY table_name
HAVING Count(*) > 1
) loop
begin
-- v_match_count:=v_match_count+1;
-- dbms_output.put_line(tablolar.table_name||' = '||v_match_count);
WHILE TRUE LOOP
IF tablolar.table_name||'.EXAMPLE' IS NOT NULL THEN --the line I changed in the code.
--dbms_output.put_line(tablolar.table_name||' = '||v_match_count||' girdi.');
execute immediate 'UPDATE HASTANE.'||tablolar.table_name|| ' SET EXAMPLE=''YES'' WHERE '||tablolar.table_name||'.EXAMPLE IS NULL AND ROWNUM<10000' ;
COMMIT;
END IF;
IF tablolar.table_name||'.EXAMPLE' IS NULL THEN --the line I changed in the code.
EXIT;
end if;
-- v_match_count:=v_match_count+1;
END LOOP;
v_match_count:=v_match_count+1;
dbms_output.put_line(tablolar.table_name||' = '||v_match_count);
end;
end loop;
end;
In the test for 'EXAMPLE' the value will never be NULL so you will never exit the loop. Try checking for the existence of the column 'EXAMPLE' in your query, then you can just do the update for each table and the loop will exit when all the records are read.
declare
v_match_count integer;
v_record_count INTEGER;
table_name varchar2(30);
begin
v_match_count:=0;
v_record_count := 0;
for tablolar in
(
SELECT table_name
FROM user_tab_columns,user_objects
WHERE
user_tab_columns.table_name=user_objects.object_name and user_objects.object_type not in ('VIEW') AND
column_name IN ( 'FILE_NO', 'PROT_NO' )
AND EXISTS (SELECT * FROM user_tab_columns x
WHERE x.table_name = user_tab_columns.table_name AND x.column_name = 'EXAMPLE')
GROUP BY table_name
HAVING Count(*) > 1
) loop
BEGIN
--- Get the record cound
execute immediate 'SELECT COUNT(*) FROM ' || 'HASTANE.' ||tablolar.table_name || ' WHERE '||tablolar.table_name||'.EXAMPLE IS NULL'
INTO v_record_count;
--- Do we have anything to update?
IF NVL(v_record_count,0) > 0 THEN
--- Update all the records that are NULL
FOR v_match_count IN 1..v_record_count LOOP
execute immediate 'UPDATE HASTANE.'||tablolar.table_name|| ' SET EXAMPLE=''YES'' WHERE '||tablolar.table_name||'.EXAMPLE IS NULL' ;
--- Check if it's time to COMMIT (every 10000 records)
IF MOD(v_match_count, 10000) = 0 THEN
COMMIT;
END IF;
END LOOP;
--- COMMIT remaining records since last COMMIT
COMMIT;
END IF;
end;
end loop;
end;

character string buffer too small ORA-06502

I am having a problem while concatenating the varchar2 datatype in a cursor loop.
Procedure is iterating in a loop to build the in clause for insert and delete operations in batch.The process will run in batch for every 1000 account numbers.
For small amount of records it works but when it tries to concatenate large amount of records(36451477 in temp table) in a loop it throws.
java.sql.SQLException: ORA-06502: PL/SQL: numeric or value error:
character string buffer too small ORA-06512: at
"QA01BT.LOAD_ITEM_DATA_TO_CONSOLIDATE", line 23 ORA-06512: at line 1
i have put a max limit of search id to 32767 but still it does not work.
is there any other way to achieve this?
create or replace PROCEDURE LOAD_ITEM_DATA_TO_CONSOLIDATE(updatecount OUT NUMBER
)
IS
cnt NUMBER := 0;
c_limit CONSTANT PLS_INTEGER DEFAULT 1000;
search_id varchar2(32727);
TYPE account_array
IS TABLE OF VARCHAR2(255) INDEX BY BINARY_INTEGER;
l_data ACCOUNT_ARRAY;
CURSOR account_cursor IS
SELECT DISTINCT account_no AS account_num
FROM item_temp;
BEGIN
OPEN account_cursor;
LOOP
FETCH account_cursor bulk collect INTO l_data limit c_limit;
search_id := '''';
FOR i IN 1 .. l_data.count LOOP
IF( i != 1 ) THEN
search_id := search_id
|| ','
|| ''''
|| l_data(i)
|| '''';
ELSE
search_id := search_id
|| l_data(i)
|| '''';
END IF;
END LOOP;
BEGIN
SAVEPOINT move_data_to_temp_table;
EXECUTE IMMEDIATE 'delete from item where ACCOUNT_NO IN('||search_id||')';
EXECUTE IMMEDIATE 'insert into item(ID,ACCOUNT_NO,ITEM_ID,ITEM_VALUE) select HIBERNATE_SEQUENCE.nextval,temp.ACCOUNT_NO,temp.ITEM_ID,temp.ITEM_VALUE from item_TEMP temp where ACCOUNT_NO IN('||search_id||')';
cnt := cnt + SQL%rowcount;
COMMIT;
EXCEPTION WHEN OTHERS THEN ROLLBACK to move_data_to_temp_table;
END;
EXIT WHEN account_cursor%NOTFOUND;
END LOOP;
updatecount := cnt;
CLOSE account_cursor;
END LOAD_ITEM_DATA_TO_CONSOLIDATE;
This seems somewhat over-engineered. Why not just this?
create or replace PROCEDURE LOAD_ITEM_DATA_TO_CONSOLIDATE
(updatecount OUT NUMBER)
IS
BEGIN
delete from item
where ACCOUNT_NO IN ( SELECT account_no
FROM item_temp);
insert into item(ID,ACCOUNT_NO,ITEM_ID,ITEM_VALUE)
select HIBERNATE_SEQUENCE.nextval, temp.ACCOUNT_NO, temp.ITEM_ID, temp.ITEM_VALUE
from item_TEMP temp ;
updatecount := SQL%rowcount;
END LOAD_ITEM_DATA_TO_CONSOLIDATE;
If you do decide you need to do this in batches and are worried about that string getting too long or having too many elements in the list (max is 1000), you should try putting your values into an array and then using IN against the array, via a table function or a direct reference to the table.
Extra bonus: no need for dynamic SQL!
Something like this:
CREATE OR REPLACE TYPE strings_t IS TABLE OF VARCHAR2 (255)
/
CREATE OR REPLACE PROCEDURE load_item_data_to_consolidate (
updatecount OUT NUMBER)
IS
cnt NUMBER := 0;
c_limit CONSTANT PLS_INTEGER DEFAULT 1000;
l_data strings_t;
CURSOR account_cursor
IS
SELECT DISTINCT account_no AS account_num FROM item_temp;
BEGIN
OPEN account_cursor;
LOOP
FETCH account_cursor BULK COLLECT INTO l_data LIMIT c_limit;
BEGIN
SAVEPOINT move_data_to_temp_table;
DELETE FROM item
WHERE account_no IN (SELECT COLUMN_VALUE FROM TABLE (l_data));
INSERT INTO item (id,
account_no,
item_id,
item_value)
SELECT hibernate_sequence.NEXTVAL,
temp.account_no,
temp.item_id,
temp.item_value
FROM item_temp temp
WHERE account_no IN (SELECT COLUMN_VALUE FROM TABLE (l_data));
cnt := cnt + SQL%ROWCOUNT;
COMMIT;
EXCEPTION
WHEN OTHERS
THEN
ROLLBACK TO move_data_to_temp_table;
END;
EXIT WHEN account_cursor%NOTFOUND;
END LOOP;
END;

How do I use select in loop continue when statement?

I want to continue a oracle sql loop when I find more then one result in a query - so my simplified code:
declare
cursor foo_cursor select * from foo_table;
foo foo_cursor%ROWTYPE;
begin
open foo_cursor;
loop
fetch foo_cursor into foo;
exit when foo_cursor%NOTFOUND;
continue when ( -- the next query has entries or an entry,
-- but how do I do this?
select count(*) from bar_table where bar_column=foo.foo_column
group by bar_column having count(1)>1;
)
insert into uninterresting_table (some_column) VALUES
(foo.foo_column);
end loop;
close foo_cursor;
end;
It looks like you only want to act on the records in FOO_TABLE when at least two related records exists in BAR_TABLE. You can alter the definition of foo_cursor to take that requirement into account as shown below. That way you don't need to iteratively check for the existence of a record in BAR_TABLE each time.
declare
cursor foo_cursor is
select *
from foo_table foo
where exists (select 1 from bar_table bar
where bar.bar_column = foo.foo_column
having count(*) > 1);
foo foo_cursor%ROWTYPE;
begin
open foo_cursor;
loop
fetch foo_cursor into foo;
exit when foo_cursor%NOTFOUND;
insert into uninteresting_table (some_column) VALUES
(foo.foo_column);
end loop;
close foo_cursor;
end;
/
On the other hand if you are looking to skip records in FOO_TABLE that already have two or more records in BAR_TABLE, you can just invert the existence check and all else would be the same:
declare
cursor foo_cursor is
select *
from foo_table foo
where NOT exists (select 1 from bar_table bar
where bar.bar_column = foo.foo_column
having count(*) > 1);
foo foo_cursor%ROWTYPE;
begin
open foo_cursor;
loop
fetch foo_cursor into foo;
exit when foo_cursor%NOTFOUND;
insert into uninteresting_table (some_column) VALUES
(foo.foo_column);
end loop;
close foo_cursor;
end;
/
If you want to process all records in FOO_TABLE but do additional actions when two or more records exist in BAR_TABLE, you can still do that with a change to your foo_cursor:
declare
cursor foo_cursor is
select foo.*
, case when exists (select 1 from bar_table bar
where bar.bar_column = foo.foo_column
having count(*) > 1)
then 'Y'
else 'N'
end has_two_or_more
from foo_table foo;
foo foo_cursor%ROWTYPE;
begin
open foo_cursor;
loop
fetch foo_cursor into foo;
exit when foo_cursor%NOTFOUND;
continue when foo.has_two_or_more = 'Y';
insert into uninteresting_table (some_column) VALUES
(foo.foo_column);
end loop;
close foo_cursor;
end;
/
if the [select has ... one and more entries] i want go into the next iteration else do more stuff in this iteration.
If there are no records in bar_table you want to do some more processing otherwise you want to skip the processing. There's a way to do that: goto.
Oh yes :)
declare
cursor foo_cursor select * from foo_table;
foo foo_cursor%ROWTYPE;
n pls_integer;
begin
open foo_cursor;
loop
fetch foo_cursor into foo;
exit when foo_cursor%NOTFOUND;
select count(*) into n
from bar_table
where bar_column=foo.foo_column
group by bar_column having count(1)>1;
if n > 0 then
goto skip_point;
end if;
insert into uninterresting_table (some_column) VALUES
(foo.foo_column);
<< skip_point >>
end loop;
close foo_cursor;
end;
Obviously you could just put the whole skippable section into a branch of an IF .. ELSE statement, but where's the fun in that?
So thanks to #APC and #Sentinel -- I still made my version work, but your answers gave me the right dirrections:
declare
cursor foo_cursor select * from foo_table;
foo foo_cursor%ROWTYPE;
n pls_integer
begin
open foo_cursor;
loop
fetch foo_cursor into foo;
exit when foo_cursor%NOTFOUND;
begin
select count(1) into n from bar_table where bar_column=foo.foo_column
group by bar_column;
exception when NO_DATA_FOUND then continue;
end
continue when (n>1);
insert into uninterresting_table (some_column) VALUES
(foo.foo_column);
-- do some more stuff
end loop;
close foo_cursor;
end;
```

INSERT and UPDATE a record using cursors in oracle

I have 2 tables- student and studLoad both having 2 fields studID and studName. I want to load data from student table into stuLoad table.
If the data already exists in the studLoad table, then it should be updated else it should be inserted. following is my code to do so:
create or replace procedure studentLoad is
v_id student.studID%type;
v_name student.studName%type;
v_sn studLoad.studName%type;
cursor cur_load is
select * from student;
begin
open cur_load;
loop
fetch cur_load into v_id,v_name;
exit when cur_load%notfound;
select studName into v_sn from studLoad where studID = v_id;
if(v_sn!= v_name) then
update studLoad set studName= v_name where studID= v_id;
else
insert into studLoad values(v_id,v_name);
dbms_output.put_line(v_id || ' ' || v_name);
end if;
end loop;
close cur_load;
end;
It's not working. the rows in studLoad table are noT updated. How do I solve this? In SQL server we use IF EXISTS(select...from stuLoad..) to check if the record exists in the table, is there a way to do the same in Oracle? if yes then please let me know the same.
This is a highly inefficient way of doing it. You can use the merge statement and then there's no need for cursors, looping or (if you can do without) PL/SQL.
MERGE INTO studLoad l
USING ( SELECT studId, studName FROM student ) s
ON (l.studId = s.studId)
WHEN MATCHED THEN
UPDATE SET l.studName = s.studName
WHERE l.studName != s.studName
WHEN NOT MATCHED THEN
INSERT (l.studID, l.studName)
VALUES (s.studId, s.studName)
Make sure you commit, once completed, in order to be able to see this in the database.
To actually answer your question I would do it something like as follows. This has the benefit of doing most of the work in SQL and only updating based on the rowid, a unique address in the table.
It declares a type, which you place the data within in bulk, 10,000 rows at a time. Then processes these rows individually.
However, as I say this will not be as efficient as merge.
declare
cursor c_data is
select b.rowid as rid, a.studId, a.studName
from student a
left outer join studLoad b
on a.studId = b.studId
and a.studName <> b.studName
;
type t__data is table of c_data%rowtype index by binary_integer;
t_data t__data;
begin
open c_data;
loop
fetch c_data bulk collect into t_data limit 10000;
exit when t_data.count = 0;
for idx in t_data.first .. t_data.last loop
if t_data(idx).rid is null then
insert into studLoad (studId, studName)
values (t_data(idx).studId, t_data(idx).studName);
else
update studLoad
set studName = t_data(idx).studName
where rowid = t_data(idx).rid
;
end if;
end loop;
end loop;
close c_data;
end;
/
If you would like to use your procedure, consider to change some lines:
create or replace procedure studentLoad is
v_id student.studID%type;
v_name student.studName%type;
v_sn studLoad.studName%type;
cursor cur_load is
select * from student;
begin
open cur_load;
loop
fetch cur_load into v_id,v_name;
exit when cur_load%notfound;
begin
select studName into v_sn from studLoad where studID = v_id;
if(v_sn!= v_name) then
update studLoad set studName= v_name where studID= v_id;
end if;
exception
when no_data_found then
insert into studLoad values(v_id,v_name);
end;
dbms_output.put_line(v_id || ' ' || v_name);
end loop;
close cur_load;
end;
I think it should work, didn't test it.