Keep lock after commit in loop with select for update - sql

How can i keep lock for table after commit in this situation?
for c in (
select from tbl1 .. for update
) loop
insert into tbl2 ..;
count := count + 1;
if count > 10000 then
commit;
count := 0;
end if;
end loop;

Related

code goes into infinite loop when updating table's column

expert.
I'm new to PLSQL programming.
More than 200 tables have 'EXAMPLE' columns.
I want to update the column 'EXAMPLE' with 'YES'.
The purpose of the following code is to update the 'EXAMPLE' column for every 10000 records.
But I think the following code enters an infinite loop.
Where am I making a mistake?.
how can i fix this?
declare
v_match_count integer;
table_name varchar2(30);
begin
v_match_count:=0;
for tablolar in
(
SELECT table_name
FROM user_tab_columns,user_objects
WHERE
user_tab_columns.table_name=user_objects.object_name and user_objects.object_type not in ('VIEW') AND
column_name IN ( 'FILE_NO', 'PROT_NO' )
GROUP BY table_name
HAVING Count(*) > 1
) loop
begin
-- v_match_count:=v_match_count+1;
-- dbms_output.put_line(tablolar.table_name||' = '||v_match_count);
WHILE TRUE LOOP
IF tablolar.table_name||'.EXAMPLE' IS NOT NULL THEN --the line I changed in the code.
--dbms_output.put_line(tablolar.table_name||' = '||v_match_count||' girdi.');
execute immediate 'UPDATE HASTANE.'||tablolar.table_name|| ' SET EXAMPLE=''YES'' WHERE '||tablolar.table_name||'.EXAMPLE IS NULL AND ROWNUM<10000' ;
COMMIT;
END IF;
IF tablolar.table_name||'.EXAMPLE' IS NULL THEN --the line I changed in the code.
EXIT;
end if;
-- v_match_count:=v_match_count+1;
END LOOP;
v_match_count:=v_match_count+1;
dbms_output.put_line(tablolar.table_name||' = '||v_match_count);
end;
end loop;
end;
In the test for 'EXAMPLE' the value will never be NULL so you will never exit the loop. Try checking for the existence of the column 'EXAMPLE' in your query, then you can just do the update for each table and the loop will exit when all the records are read.
declare
v_match_count integer;
v_record_count INTEGER;
table_name varchar2(30);
begin
v_match_count:=0;
v_record_count := 0;
for tablolar in
(
SELECT table_name
FROM user_tab_columns,user_objects
WHERE
user_tab_columns.table_name=user_objects.object_name and user_objects.object_type not in ('VIEW') AND
column_name IN ( 'FILE_NO', 'PROT_NO' )
AND EXISTS (SELECT * FROM user_tab_columns x
WHERE x.table_name = user_tab_columns.table_name AND x.column_name = 'EXAMPLE')
GROUP BY table_name
HAVING Count(*) > 1
) loop
BEGIN
--- Get the record cound
execute immediate 'SELECT COUNT(*) FROM ' || 'HASTANE.' ||tablolar.table_name || ' WHERE '||tablolar.table_name||'.EXAMPLE IS NULL'
INTO v_record_count;
--- Do we have anything to update?
IF NVL(v_record_count,0) > 0 THEN
--- Update all the records that are NULL
FOR v_match_count IN 1..v_record_count LOOP
execute immediate 'UPDATE HASTANE.'||tablolar.table_name|| ' SET EXAMPLE=''YES'' WHERE '||tablolar.table_name||'.EXAMPLE IS NULL' ;
--- Check if it's time to COMMIT (every 10000 records)
IF MOD(v_match_count, 10000) = 0 THEN
COMMIT;
END IF;
END LOOP;
--- COMMIT remaining records since last COMMIT
COMMIT;
END IF;
end;
end loop;
end;

character string buffer too small ORA-06502

I am having a problem while concatenating the varchar2 datatype in a cursor loop.
Procedure is iterating in a loop to build the in clause for insert and delete operations in batch.The process will run in batch for every 1000 account numbers.
For small amount of records it works but when it tries to concatenate large amount of records(36451477 in temp table) in a loop it throws.
java.sql.SQLException: ORA-06502: PL/SQL: numeric or value error:
character string buffer too small ORA-06512: at
"QA01BT.LOAD_ITEM_DATA_TO_CONSOLIDATE", line 23 ORA-06512: at line 1
i have put a max limit of search id to 32767 but still it does not work.
is there any other way to achieve this?
create or replace PROCEDURE LOAD_ITEM_DATA_TO_CONSOLIDATE(updatecount OUT NUMBER
)
IS
cnt NUMBER := 0;
c_limit CONSTANT PLS_INTEGER DEFAULT 1000;
search_id varchar2(32727);
TYPE account_array
IS TABLE OF VARCHAR2(255) INDEX BY BINARY_INTEGER;
l_data ACCOUNT_ARRAY;
CURSOR account_cursor IS
SELECT DISTINCT account_no AS account_num
FROM item_temp;
BEGIN
OPEN account_cursor;
LOOP
FETCH account_cursor bulk collect INTO l_data limit c_limit;
search_id := '''';
FOR i IN 1 .. l_data.count LOOP
IF( i != 1 ) THEN
search_id := search_id
|| ','
|| ''''
|| l_data(i)
|| '''';
ELSE
search_id := search_id
|| l_data(i)
|| '''';
END IF;
END LOOP;
BEGIN
SAVEPOINT move_data_to_temp_table;
EXECUTE IMMEDIATE 'delete from item where ACCOUNT_NO IN('||search_id||')';
EXECUTE IMMEDIATE 'insert into item(ID,ACCOUNT_NO,ITEM_ID,ITEM_VALUE) select HIBERNATE_SEQUENCE.nextval,temp.ACCOUNT_NO,temp.ITEM_ID,temp.ITEM_VALUE from item_TEMP temp where ACCOUNT_NO IN('||search_id||')';
cnt := cnt + SQL%rowcount;
COMMIT;
EXCEPTION WHEN OTHERS THEN ROLLBACK to move_data_to_temp_table;
END;
EXIT WHEN account_cursor%NOTFOUND;
END LOOP;
updatecount := cnt;
CLOSE account_cursor;
END LOAD_ITEM_DATA_TO_CONSOLIDATE;
This seems somewhat over-engineered. Why not just this?
create or replace PROCEDURE LOAD_ITEM_DATA_TO_CONSOLIDATE
(updatecount OUT NUMBER)
IS
BEGIN
delete from item
where ACCOUNT_NO IN ( SELECT account_no
FROM item_temp);
insert into item(ID,ACCOUNT_NO,ITEM_ID,ITEM_VALUE)
select HIBERNATE_SEQUENCE.nextval, temp.ACCOUNT_NO, temp.ITEM_ID, temp.ITEM_VALUE
from item_TEMP temp ;
updatecount := SQL%rowcount;
END LOAD_ITEM_DATA_TO_CONSOLIDATE;
If you do decide you need to do this in batches and are worried about that string getting too long or having too many elements in the list (max is 1000), you should try putting your values into an array and then using IN against the array, via a table function or a direct reference to the table.
Extra bonus: no need for dynamic SQL!
Something like this:
CREATE OR REPLACE TYPE strings_t IS TABLE OF VARCHAR2 (255)
/
CREATE OR REPLACE PROCEDURE load_item_data_to_consolidate (
updatecount OUT NUMBER)
IS
cnt NUMBER := 0;
c_limit CONSTANT PLS_INTEGER DEFAULT 1000;
l_data strings_t;
CURSOR account_cursor
IS
SELECT DISTINCT account_no AS account_num FROM item_temp;
BEGIN
OPEN account_cursor;
LOOP
FETCH account_cursor BULK COLLECT INTO l_data LIMIT c_limit;
BEGIN
SAVEPOINT move_data_to_temp_table;
DELETE FROM item
WHERE account_no IN (SELECT COLUMN_VALUE FROM TABLE (l_data));
INSERT INTO item (id,
account_no,
item_id,
item_value)
SELECT hibernate_sequence.NEXTVAL,
temp.account_no,
temp.item_id,
temp.item_value
FROM item_temp temp
WHERE account_no IN (SELECT COLUMN_VALUE FROM TABLE (l_data));
cnt := cnt + SQL%ROWCOUNT;
COMMIT;
EXCEPTION
WHEN OTHERS
THEN
ROLLBACK TO move_data_to_temp_table;
END;
EXIT WHEN account_cursor%NOTFOUND;
END LOOP;
END;

Query doesn't update all rows

There's quite big table, more than 10 000 000 rows. It has columns OBJ_ID, DATE_OF_CHANGE, USER. And I added a new column, RECORD_ID, it is empty for now.
I need to update it so RECORD_ID should have numeric values ascending for OBJ_ID and DATE_OF_CHANGE.
I came up with this:
CREATE SEQUENCE REC_ID_SEQ
START WITH 1
INCREMENT BY 1
CACHE 100;
/
CREATE OR REPLACE TRIGGER TRG_REC_ID_SEQ
BEFORE INSERT ON T_HISTORY
FOR EACH ROW
BEGIN
:NEW.RECORD_ID := REC_ID_SEQ.NEXTVAL;
END;
/
DECLARE
O_ID NUMBER := 0;
S_DATE DATE := SYSDATE;
HIST_NUM NUMBER := 0;
LOOP_COUNT NUMBER := 0;
BEGIN
FOR O IN (SELECT ROWID ROW_ID, D.* FROM T_HISTORY D ORDER BY D.OBJ_ID, D.DATE_OF_CHANGE)
LOOP
LOOP_COUNT := LOOP_COUNT + 1;
IF O.OBJ_ID != O_ID OR O.DATE_OF_CHANGE!= S_DATE
THEN
HIST_NUM := HIST_NUM + 1;
END IF;
UPDATE T_HISTORY T SET T.RECORD_ID = HIST_NUM WHERE T.ROWID = O.ROW_ID;
O_ID := O.OBJ_ID;
S_DATE := O.DATE_OF_CHANGE;
IF LOOP_COUNT > 100000 THEN
COMMIT; LOOP_COUNT := 0;
END IF;
END LOOP;
END;
/
But when the command stops working (no errors) I see that about half of rows were not updated. How do I do this the right way?
Use MERGE command and rowid pseudocolumn as a substitute of primary key:
merge into T_HISTORY t
using (
select rownum as xx, t.*
from (
select t.*, rowid as x_rowid
from T_HISTORY t
order by OBJ_ID, DATE_OF_CHANGE
) t
) xx
on (xx.x_rowid = t.rowid )
when matched then update
set t.RECORD_ID = xx;
Live demo: http://sqlfiddle.com/#!4/aad05/2
Similar to #krokodilko's solution, using analytical function:
MERGE INTO t_history t
USING (SELECT obj_id,
date_of_change,
ROW_NUMBER () OVER (ORDER BY obj_id, date_of_change) rn
FROM t_history) r
ON (t.obj_id = r.obj_id AND t.date_of_change = r.date_of_change)
WHEN MATCHED
THEN
UPDATE SET t.record_id = r.rn;

Bulk update with commit in oracle

I am performing bulk update operation for a record of 1 million records. I need to COMMIT in between every 5000 records how can I perform?
update tab1 t1
set (col1,col2,col3,col4)=
(select col1,col2,col3,col4 from tab_m where row_id= t1.row_id);
Per th question, if you only want to continue updating even if record fails with error logging then i think you should go with the DML error logging clause of Oracle. Hope this helps.
BEGIN
DBMS_ERRLOG.CREATE_ERROR_LOG('TAB1');
UPDATE tab1 t1
SET
(
COL1,
COL2,
COL3,
COL4
)
=
(SELECT COL1,COL2,COL3,COL4 FROM TAB_M WHERE ROW_ID= T1.ROW_ID
) LOG ERRORS REJECT LIMITED UNLIMITED;
END;
If you are looking for a solution in PLSQL you can do it by using BULK INSERT/UPDATE as below:
DECLARE
c_limit PLS_INTEGER := 100;
CURSOR employees_cur
IS
SELECT employee_id
FROM employees
WHERE department_id = department_id_in;
TYPE employee_ids_t IS TABLE OF employees.employee_id%TYPE;
l_employee_ids employee_ids_t;
BEGIN
OPEN employees_cur;
LOOP
FETCH employees_cur
BULK COLLECT INTO l_employee_ids
LIMIT c_limit; -- This will make sure that every iteration has 100 records selected
EXIT WHEN l_employee_ids.COUNT = 0;
FORALL indx IN 1 .. l_employee_ids.COUNT SAVE EXCEPTIONS
UPDATE employees emp -- Updating 100 records at 1 go.
SET emp.salary =
emp.salary + emp.salary * increase_pct_in
WHERE emp.employee_id = l_employee_ids(indx);
commit;
END LOOP;
EXCEPTION
WHEN OTHERS
THEN
IF SQLCODE = -24381
THEN
FOR indx IN 1 .. SQL%BULK_EXCEPTIONS.COUNT
LOOP
-- Caputring errors occured during update
DBMS_OUTPUT.put_line (
SQL%BULK_EXCEPTIONS (indx).ERROR_INDEX
|| ‘: ‘
|| SQL%BULK_EXCEPTIONS (indx).ERROR_CODE);
--<You can inset the error records to a table here>
END LOOP;
ELSE
RAISE;
END IF;
END;

INSERT and UPDATE a record using cursors in oracle

I have 2 tables- student and studLoad both having 2 fields studID and studName. I want to load data from student table into stuLoad table.
If the data already exists in the studLoad table, then it should be updated else it should be inserted. following is my code to do so:
create or replace procedure studentLoad is
v_id student.studID%type;
v_name student.studName%type;
v_sn studLoad.studName%type;
cursor cur_load is
select * from student;
begin
open cur_load;
loop
fetch cur_load into v_id,v_name;
exit when cur_load%notfound;
select studName into v_sn from studLoad where studID = v_id;
if(v_sn!= v_name) then
update studLoad set studName= v_name where studID= v_id;
else
insert into studLoad values(v_id,v_name);
dbms_output.put_line(v_id || ' ' || v_name);
end if;
end loop;
close cur_load;
end;
It's not working. the rows in studLoad table are noT updated. How do I solve this? In SQL server we use IF EXISTS(select...from stuLoad..) to check if the record exists in the table, is there a way to do the same in Oracle? if yes then please let me know the same.
This is a highly inefficient way of doing it. You can use the merge statement and then there's no need for cursors, looping or (if you can do without) PL/SQL.
MERGE INTO studLoad l
USING ( SELECT studId, studName FROM student ) s
ON (l.studId = s.studId)
WHEN MATCHED THEN
UPDATE SET l.studName = s.studName
WHERE l.studName != s.studName
WHEN NOT MATCHED THEN
INSERT (l.studID, l.studName)
VALUES (s.studId, s.studName)
Make sure you commit, once completed, in order to be able to see this in the database.
To actually answer your question I would do it something like as follows. This has the benefit of doing most of the work in SQL and only updating based on the rowid, a unique address in the table.
It declares a type, which you place the data within in bulk, 10,000 rows at a time. Then processes these rows individually.
However, as I say this will not be as efficient as merge.
declare
cursor c_data is
select b.rowid as rid, a.studId, a.studName
from student a
left outer join studLoad b
on a.studId = b.studId
and a.studName <> b.studName
;
type t__data is table of c_data%rowtype index by binary_integer;
t_data t__data;
begin
open c_data;
loop
fetch c_data bulk collect into t_data limit 10000;
exit when t_data.count = 0;
for idx in t_data.first .. t_data.last loop
if t_data(idx).rid is null then
insert into studLoad (studId, studName)
values (t_data(idx).studId, t_data(idx).studName);
else
update studLoad
set studName = t_data(idx).studName
where rowid = t_data(idx).rid
;
end if;
end loop;
end loop;
close c_data;
end;
/
If you would like to use your procedure, consider to change some lines:
create or replace procedure studentLoad is
v_id student.studID%type;
v_name student.studName%type;
v_sn studLoad.studName%type;
cursor cur_load is
select * from student;
begin
open cur_load;
loop
fetch cur_load into v_id,v_name;
exit when cur_load%notfound;
begin
select studName into v_sn from studLoad where studID = v_id;
if(v_sn!= v_name) then
update studLoad set studName= v_name where studID= v_id;
end if;
exception
when no_data_found then
insert into studLoad values(v_id,v_name);
end;
dbms_output.put_line(v_id || ' ' || v_name);
end loop;
close cur_load;
end;
I think it should work, didn't test it.