This question already has answers here:
using subquery instead of the tablename
(4 answers)
Closed 3 months ago.
I have been stuck on this error for few hours.
I have parameterized this select query
--Original query
SELECT DISTINCT col1 FROM table1;
to this which erroring out ORA-00904: "A"."col1": invalid identifier
00904. 00000 - "%s: invalid identifier"
--Parameterized query
SELECT DISTINCT A.col1 as col1 FROM (SELECT cont_code FROM dt5_campaign_code
where code_id = &test_cont_id) A;
Nested query outputs
SELECT cont_code FROM dt5_campaign_code
where code_id = &test_cont_id;
Found a solution here. Please help in translating this for my query.
SELECT DISTINCT A.col1 as col1
FROM (
SELECT cont_code
FROM dt5_campaign_code
where code_id = &test_cont_id
) A;
The sub-query A does not contain a col1 column.
You either want to use cont_code in the outer query:
SELECT DISTINCT A.cont_code as col1
FROM (
SELECT cont_code
FROM dt5_campaign_code
where code_id = &test_cont_id
) A;
or, alias cont_code to col1 in the inner query:
SELECT DISTINCT A.col1 as col1
FROM (
SELECT cont_code AS col1
FROM dt5_campaign_code
where code_id = &test_cont_id
) A;
or, get rid of the sub-query:
SELECT DISTINCT cont_code as col1
FROM dt5_campaign_code
WHERE code_id = &test_cont_id;
As a dynamic statement:
DECLARE
v_table_name VARCHAR2(30);
v_cur SYS_REFCURSOR;
v_str VARCHAR2(4000);
BEGIN
SELECT cont_code
INTO v_table_name
FROM dt5_campaign_code
WHERE code_id = 1; --&test_cont_id;
OPEN v_cur FOR 'SELECT DISTINCT col1 FROM ' || v_table_name;
LOOP
FETCH v_cur INTO v_str;
EXIT WHEN v_cur%NOTFOUND;
DBMS_OUTPUT.PUT_LINE(v_str);
END LOOP;
CLOSE V_cur;
END;
/
fiddle
Related
The query runs slow in Oracle PL/SQL. It takes about 8 minutes in PL/SQL whereas it takes only 4 seconds when run in SQL Editor or SQL Plus.
Please let me know what is causing this. Is it possible that a different execution plan is picked up by SQL and PL/SQL ?
----SQL Editor query---takes 4 seconds---400 row count--
SELECT count(*) FROM
(
SELECT col1, col2
FROM
my_tab1 t1, my_tab2 t2
WHERE
t1.pk_col1=t2.pk_col1
and t1.created_date < t2.created_date
)
--PL/SQL Code--takes about 8 minutes---400 row rount--
DECLARE
v_cnt PLS_INTEGER:=0;
BEGIN
SELECT count(*) INTO v_cnt
FROM
(
SELECT col1, col2
FROM
my_tab1 t1, my_tab2 t2
WHERE
t1.pk_col1=t2.pk_col1
and t1.created_date < t2.created_date
)
END;
/
The easiest way to capture the execution plan within an anonymous block is to call dbms_xplan.display_cursor in a loop and print each line of output:
declare
v_cnt pls_integer;
begin
execute immediate 'alter session set statistics_level = ALL';
select count(*) into v_cnt
from
(
select col1, col2
from my_tab1 t1, my_tab2 t2
where t1.pk_col1 = t2.pk_col1
and t1.created_date < t2.created_date
);
for r in (
select p.plan_table_output
from table(dbms_xplan.display_cursor(null,null,'ALLSTATS LAST -OUTLINE +NOTE +PREDICATE +IOSTATS +REPORT')) p
)
loop
dbms_output.put_line(r.plan_table_output);
end loop;
end;
You can make the same call from a SQL*Plus command prompt immediately after executing a SQL statement, but you first have to disable dbms_output, as otherwise the SQL statement is not the 'last' statement you made. (You can also specify the sql_id if you know it.) For more details see the dbms_xplan documentation.
set serverout off
alter session set statistics_level = ALL;
select count(*)
from
(
select col1, col2
from my_tab1 t1, my_tab2 t2
where t1.pk_col1 = t2.pk_col1
and t1.created_date < t2.created_date
);
select p.plan_table_output
from table(dbms_xplan.display_cursor(null,null,'ALLSTATS LAST +OUTLINE +ADAPTIVE +PARTITION +NOTE')) p;
For a handy script to call this in one line, see www.williamrobertson.net/documents/xplanx.html. Then it's just
select count(*)
from
(
select col1, col2
from my_tab1 t1, my_tab2 t2
where t1.pk_col1 = t2.pk_col1
and t1.created_date < t2.created_date
)
#xplanx
I have a store procedure that will
Update maximum 500 rows status from 0 to 1
Return those rows to program via cursor
Here is my store procedure code
PROCEDURE process_data_out (
o_rt_cursor OUT SYS_REFCURSOR
) IS
v_limit NUMBER;
l_data_ids VARCHAR2(32000);
BEGIN
v_limit := 500; -- limit 500
l_data_ids := '';
-- Create loop to get data
FOR i IN (
SELECT *
FROM
(
SELECT id FROM
TBL_DATA a
WHERE
a.created_at BETWEEN SYSDATE - 0.5 AND SYSDATE + 0.1
AND a.status = 0
AND a.phone NOT IN (SELECT phone FROM TBL_BIG_TABLE_1)
AND a.phone NOT IN (SELECT phone FROM TBL_BIG_TABLE_2 WHERE IS_DENY = 1)
ORDER BY
priority
)
WHERE
ROWNUM <= v_limit
) LOOP
BEGIN
-- Build string of ids like id1,id2,id3,
l_data_ids := l_data_ids
|| i.id
|| ',';
-- update row status to prevent future repeat
UPDATE TBL_DATA
SET
status = 1
WHERE
id = i.id;
END;
END LOOP;
COMMIT;
-- If string of ids length >0 open cursor to take data
IF ( length(l_data_ids) > 0 )
THEN
-- Cut last comma id1,id2,id3, --> id1,id2,id3
l_data_ids := substr(l_data_ids,1,length(l_data_ids) - 1);
-- open cursor
OPEN o_rt_cursor FOR
SELECT
id,
phone
FROM
TBL_DATA a
WHERE
a.id IN (
SELECT
to_number(column_value)
FROM
XMLTABLE ( l_data_ids )
);
END IF;
EXCEPTION
WHEN OTHERS THEN
ROLLBACK;
END process_data_out;
I want to optimize this performance and here is my question
Should I replace in by exists
Replace
AND a.phone NOT IN (SELECT phone FROM TBL_BIG_TABLE_1)
AND a.phone NOT IN (SELECT phone FROM TBL_BIG_TABLE_2 WHERE IS_DENY = 1)
by
AND NOT Exists (SELECT phone FROM TBL_BIG_TABLE_1 where TBL_BIG_TABLE_1.phone = a.phone)
AND NOT Exists (SELECT phone FROM TBL_BIG_TABLE_2 WHERE TBL_BIG_TABLE_2.phone = a.phone and IS_DENY = 1)
Is there a better way than
Save a string of ids like id1,id2,id3 after update row status
Open cursor by select from string of ids
I appreciate for any suggestion.
Thank for your concern
Row-by-row processing is always slower and you are also creating the string of ids, which again takes time so overall performance is going down.
You can use the collection DBMS_SQL.NUMBER_TABLE to store the updated ids from the UPDATE statement using the RETURNING clause and use it in the cursor query.
Also, I have changed your update statement so that it does not use NOT IN and uses the LEFT JOINS and ROW_NUMBER analytical function for increasing the performance as follows:
CREATE OR REPLACE PROCEDURE PROCESS_DATA_OUT (
O_RT_CURSOR OUT SYS_REFCURSOR
) IS
V_LIMIT NUMBER;
L_DATA_IDS DBMS_SQL.NUMBER_TABLE;
BEGIN
V_LIMIT := 500; -- limit 500
UPDATE TBL_DATA A
SET A.STATUS = 1
WHERE A.ID IN (
SELECT ID
FROM ( SELECT ID,
ROW_NUMBER() OVER(ORDER BY PRIORITY) AS RN
FROM TBL_DATA B
LEFT JOIN TBL_BIG_TABLE_1 T1 ON T1.PHONE = B.PHONE
LEFT JOIN TBL_BIG_TABLE_2 T2 ON T2.IS_DENY = 1 AND T2.PHONE = B.PHONE
WHERE B.CREATED_AT BETWEEN SYSDATE - 0.5 AND SYSDATE + 0.1
AND B.STATUS = 0
AND T1.PHONE IS NULL
AND T2.PHONE IS NULL)
WHERE RN <= V_LIMIT ) RETURNING ID BULK COLLECT INTO L_DATA_IDS;
OPEN O_RT_CURSOR FOR SELECT ID, PHONE
FROM TBL_DATA A
WHERE A.ID IN (SELECT COLUMN_VALUE FROM TABLE ( L_DATA_IDS ));
EXCEPTION
WHEN OTHERS THEN
ROLLBACK;
END PROCESS_DATA_OUT;
/
Below is my problem and desired solution.
Query1:
Select colnames from table1;
Query1 Result:
col1
col2
col3
col4
Query2:
Select a1.*
from table2 a1;
-- should translate to
select a1.col1, a1.col2, a1.col3, a1.col4 from table2 a1;
My first query will give the list of column names, I need to replace the .* with those column names in my second query. How can I achieve this?
You are looking for dynamic SQL. The idea is to generate the query string from the results of a SQL query. You can then run it with execute immediate.
In your use case, that would look like:
declare
p_sql varchar2(100);
begin
select
'select '
|| listagg('a1.' || colnames, ', ') within group(order by colnames)
|| ' from table2 a1'
into p_sql
from table1;
dbms_output.put_line('sql: ' || p_sql); -- debug
execute immediate p_sql; -- execute
end;
/
For your sample data, this generates:
dbms_output:
sql: select a1.col1, a1.col2, a1.col3, a1.col4 from table2 a1
Below trigger code(converted from MSSQL) in oracle is not working.
The two columns should not have duplicate row in the table. I'm creating a trigger for accomplishing this.
Can anyone help in updating/correcting the above code to be used in my trigger?
/*
**Unique Constraint for TestOracle - TestTinyInt.
*/
if (Update(UpdOperation) or Update(TestTinyInt)) THEN
IF Exists(
SELECT * FROM inserted i INNER LOOP JOIN TestOracle x ON
(i.TestTinyInt=x.TestTinyInt)
WHERE i.updoperation IN (0, 1) AND x.updoperation IN (0, 1) GROUP BY x.TestTinyInt
HAVING COUNT(*) > 1)
BEGIN
RAISERROR( 'Invalid attempt to enter duplicate TestTinyInt in TestOracle', 16, -1 )
ROLLBACK TRAN
RETURN
END
END
The best way is to create 2 unique index on each of columns. By doing this you are eliminating duplication in particual column(like #a_horse_with_no_name mentioned).
For other case you don't need to use triger, you need only simple where condition
where Column_A not in (select Column_B from table) and Column_B not in (Select Column_A in table).
EDIT:
It if have to be done in trigger THEN :
create or replace trigger ... instead of insert or update on ...
Declare
dummy number;
Begin
select 1 into dummy from dual where :new.Column_A in (select Column_B from table) or new:.Column_B in (Select Column_A in table);
if dummy <> 1 THEN
INSERT
END IF;
END;
EDIT2: IF you don't want unique index and tirgger here is solution :
create or replace trigger ... instead of insert or update on ...
Declare
dummy number;
Begin
select count(*) into dummy from(
SELECT COL1 FROM (
(select :new.Column_A col1 from dual
UNION
select :new.Column_B from dual))
INTERSECT
SELECT COL2 FROM (
( SELECT COLUMN_A COL2 from table
UNION
SELECT COLUMN_B from table));
if dummy = 0 THEN
INSERT
END IF;
END;
I need to search in a large DB a table that matches with a column name, but this table must have more than 0 rows.
Here is the query by the way:
SELECT * FROM all_tab_columns WHERE column_name LIKE '%ID_SUPPORT%';
You could use single query to filter names and get actual number of rows:
SELECT owner, table_name, cnt
FROM all_tab_columns, XMLTABLE('/ROWSET/ROW' passing
(dbms_xmlgen.getxmltype(REPLACE(REPLACE(
'select COUNT(*) AS cnt from <owner>.<table_name>', '<owner>', owner)
, '<table_name>', table_name))) COLUMNS cnt INT)
WHERE column_name LIKE '%ID_SUPPORT%' AND cnt > 0;
DBFiddle Demo
Any chance this can be expanded/tweaked to yield the values of the first few rows for all tables?
Yes, by flattening row using JSON_ARRAYAGG(JSON_OBJECT(*)) Oracle 19c:
-- generic approach Oracle 19c
SELECT owner, table_name, cnt, example
FROM all_tab_columns, XMLTABLE('/ROWSET/ROW' passing
(dbms_xmlgen.getxmltype(REPLACE(REPLACE(
'select COUNT(*) AS cnt,
MAX((SELECT JSON_ARRAYAGG(JSON_OBJECT(*))
FROM <owner>.<table_name>
WHERE rownum < 10) -- taking up to 10 rows as example
) as example
from <owner>.<table_name>', '<owner>', owner)
, '<table_name>', table_name)))
COLUMNS cnt INT
, example VARCHAR2(1000))
WHERE column_name LIKE '%ID_SUPPORT%'
AND cnt > 0;
Demo contains hardcoded column list inside JSON_OBJECT. Oracle 19c and JSON_OBJECT(*) would allow any column list per table.
db<>fiddle demo
How it works:
find all tables that have column named '%ID_SUPPORT'
run query per table using dbms_xml_gen.getxmltype
in sub query count the rows, flatten few rows an example to JSON
return rows that have at least one record table
One way:
SELECT * FROM all_tables WHERE num_rows > 0
AND table_name in (SELECT table_name FROM all_tab_columns WHERE column_name LIKE '%ID_SUPPORT%')
If your DB is periodically analyzed the direct way is to use the following SQL :
SELECT *
FROM all_tables t
WHERE t.table_name LIKE '%ID_SUPPORT%'
and t.num_rows > 0;
More precise way to determine is using the following :
declare
v_val pls_integer := 0;
begin
for c in (
SELECT *
FROM all_tables t
WHERE t.table_name LIKE '%ID_SUPPORT%'
)
loop
execute immediate 'select count(1) from '||c.owner||'.'||c.table_name into v_val;
if v_val > 0 then
dbms_output.put_line('Table Name : '||c.table_name||' with '||v_val||' rows ');
end if;
end loop;
end;
I'm confused with the word matches. If you mean column, but not table, you may use the following routine to get the desired tables with columns whose names are like ID_SUPPORT :
declare
v_val pls_integer := 0;
begin
for c in (
SELECT t.*
FROM all_tab_columns c
JOIN all_tables t on ( c.table_name = t.table_name )
WHERE c.column_name LIKE '%ID_SUPPORT%'
)
loop
execute immediate 'select count(1) from '||c.owner||'.'||c.table_name into v_val;
if v_val > 0 then
dbms_output.put_line('Table Name : '||c.table_name||' with '||v_val||' rows ');
end if;
end loop;
end;