How to obtain primary key value in trigger function if primary key column name is unknown? - sql

I'm using postgres.
Let's say I'm going to create following trigger:
CREATE OR REPLACE FUNCTION auditlogfunc() RETURNS TRIGGER AS $example_table$
BEGIN
INSERT INTO AUDIT(EMP_ID, ENTRY_DATE) VALUES (new.ID, current_timestamp);
RETURN NEW;
END;
$example_table$ LANGUAGE plpgsql;
I want to use this trigger with many tables and not every table has primary key with name 'id' (table can have no 'id' column at all).
So I need to find out in some way how to use primary key in my trigger function no matter which column name it has.
How can I achieve this?

What's your PostgreSQL version? You must query PostgreSQL catalog to achieve what you want. See script below:
I'm assuming that your PostgreSQL has JSONB support (9.4+).
CREATE TABLE public.test_table
(
id BIGINT NOT NULL,
a_column TEXT NOT NULL,
CONSTRAINT test_tablepkey PRIMARY KEY (id)
);
CREATE OR REPLACE FUNCTION auditlogfunc() RETURNS TRIGGER AS $example_table$
DECLARE
reg_id JSONB;
affected_row JSON;
BEGIN
IF TG_OP IN('INSERT', 'UPDATE') THEN
affected_row := row_to_json(NEW);
ELSE
affected_row := row_to_json(OLD);
END IF;
--Get PK columns
--You may want to extract this to a SQL function
WITH pk_columns (attname) AS (
SELECT
CAST(a.attname AS TEXT)
FROM
pg_index i
JOIN pg_attribute a ON a.attrelid = i.indrelid AND a.attnum = ANY(i.indkey)
WHERE
i.indrelid = TG_RELID
AND i.indisprimary
)
SELECT
json_object_agg(key, value) INTO reg_id
FROM
json_each_text(affected_row)
WHERE
key IN(SELECT attname FROM pk_columns);
--Raise collected PK
RAISE INFO 'PK: %', reg_id;
--TODO: your insert into audit table goes here
RETURN NEW;
END;
$example_table$ LANGUAGE plpgsql;
CREATE TRIGGER tg_audit_cadprodu_row AFTER INSERT OR UPDATE OR DELETE
ON public.test_table FOR EACH ROW EXECUTE PROCEDURE public.auditlogfunc();
--A simple test
INSERT INTO test_table VALUES (CAST((random() * 10000) AS INTEGER), 'Test');

I took Michel Milezzi's answer and cleaned it up so it only returns what we need, with better performance in a more concise statement. Tested on Postgres v12.1. I imagine it would work on 9.4+.
CREATE OR REPLACE FUNCTION auditlogfunc() RETURNS TRIGGER AS $example_table$
DECLARE
col TEXT = (
SELECT attname
FROM pg_index
JOIN pg_attribute ON
attrelid = indrelid
AND attnum = ANY(indkey)
WHERE indrelid = TG_RELID AND indisprimary
);
BEGIN
INSERT INTO AUDIT(EMP_ID, ENTRY_DATE) VALUES ((row_to_json(NEW) ->> col), current_timestamp);
RETURN NEW;
END;
$example_table$ LANGUAGE plpgsql;
First I'm querying for the name of the table's primary key column, and storing it in the col variable. I filter the query by the special variable TG_RELID, which is the object ID of the table which caused the trigger. (See the Postgres Docs.)
Then we can insert into the audit table. All I've changed from the question is new.ID. Now that we know the name of the primary key column, we can convert the new row--the row that will be (or has been) inserted, updated, or deleted--to json, and select the value with the col variable. This should work for both BEFORE and AFTER triggers. If triggering on DELETE, change row_to_json(NEW) to row_to_json(OLD).

Related

How to impose this exclusion constraint?

I have a key-value table.
CREATE TABLE keyvalues (
key TEXT NOT NULL,
value TEXT
)
I want to impose a constraint that if a key has an entry with NULL value, it cannot have any other entries.
How do I do that?
To clarify:
I want to allow ("key1", "value1"), ("key1", "value2"). But if I have ("key2", NULL), I want to not allow ("key2", "value3").
You can use a trigger, like this:
CREATE OR REPLACE FUNCTION trigger_function()
RETURNS trigger
LANGUAGE plpgsql
AS $function$
begin
if exists (select 1 from keyvalues key = new.key and value is null) then
RAISE EXCEPTION 'Key-value not allowed';
end if;
RETURN new;
end;
$function$
;
Then you create the trigger on the table
CREATE TRIGGER trigger_on_table
BEFORE INSERT OR UPDATE
ON keyvalues
FOR EACH ROW
EXECUTE PROCEDURE trigger_function();
And test it:
insert INTO keyvalues
SELECT 'a','a'
OK
insert INTO keyvalues
SELECT 'a','b'
OK
insert INTO keyvalues
SELECT 'b',null
OK
insert INTO keyvalues
SELECT 'b','b'
ERROR: Key-value not allowed

Return, but not write on INSERT Trigger Function with table split (postgres)

Good day,
I have the following problem:
I maintain a database with a huge table - which contents are split/clustered in daily tables.
To do so, I have a Trigger Function which inserts the data into the correct table
This is my trigger:
CREATE OR REPLACE FUNCTION "public"."insert_example"()
RETURNS "pg_catalog"."trigger" AS $BODY$
DECLARE
_tabledate text;
_tablename text;
_start_date text;
_end_date text;
BEGIN
--Takes the current inbound "time" value and determines when midnight is for the given date
_tabledate := to_char(NEW."insert_date", 'YYYYMMDD');
_start_date := to_char(NEW."insert_date", 'YYYY-MM-DD 00:00:00');
_end_date := to_char(NEW."insert_date", 'YYYY-MM-DD 23:59:59');
_tablename := 'zz_example_'||_tabledate;
-- Check if the partition needed for the current record exists
PERFORM 1
FROM pg_catalog.pg_class c
JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
WHERE c.relkind = 'r'
AND c.relname = _tablename
AND n.nspname = 'public';
-- If the partition needed does not yet exist, then we create it:
-- Note that || is string concatenation (joining two strings to make one)
IF NOT FOUND THEN
EXECUTE '
CREATE TABLE IF NOT EXISTS "'||quote_ident(_tablename)||'" ( LIKE "example_table" INCLUDING ALL )
INHERITS ("example_table");
ALTER TABLE "'||quote_ident(_tablename)||'"
ADD FOREIGN KEY ("log_id") REFERENCES "foreign_example1" ("log_id") ON DELETE SET NULL,
ADD FOREIGN KEY ("detail_log_id") REFERENCES "foreign_example2" ("log_id") ON DELETE SET NULL,
ADD FOREIGN KEY ("image_id") REFERENCES "foreign_example3" ("image_id") ON DELETE SET NULL,
ADD CONSTRAINT date_check CHECK ("insert_date" >= timestamptz '||quote_literal(_start_date)||' and "insert_date" <= timestamptz '||quote_literal(_end_date)||');
CREATE TRIGGER "'||quote_ident(_tablename)||'_create_alarm" AFTER INSERT ON "'||quote_ident(_tablename)||'"
FOR EACH ROW
EXECUTE PROCEDURE "public"."external_trigger_example"();
';
END IF;
--
-- Insert the current record into the correct partition, which we are sure will now exist.
EXECUTE 'INSERT INTO public.' || quote_ident(_tablename) || ' VALUES ($1.*)' USING NEW;
RETURN NULL;
--RETURN NEW;
END;
$BODY$
LANGUAGE plpgsql VOLATILE
COST 100
This Trigger actually works as expected, but this part is a problem:
RETURN NULL;
--RETURN NEW;
Whatever I return here, is written to the Table - Together with the written content from the contained INSERT Query!
So If I would RETURN NEW - I would write duplicates
This is no problem as long as I keep RETURN NULL - But when doing so, I Am unable to do any INSERT RETURNING Queries (As nothing is returned of course)
So my Question is:
How Could I return the just inserted ID - without creating Duplicates?
Or how could I write this trigger, to return something - but not to actually insert it (only use the contained INSERT from the Trigger)?
Thanks for any help!

PostgreSQL transactional DDL and to_regclass

Following the suggestion at this question, I'm using the to_regclass function to check if a table exists, creating it if it doesn't. However, it appears that if the table was created in the current transaction, to_regclass still returns null.
Is this behaviour expected? Or is this a bug?
Detail
Here's a short example of where this goes wrong:
begin;
create schema test;
create table test.test ( id serial, category integer );
create or replace function test.test_insert () returns trigger as $$
declare
child_table_name text;
table_id text;
begin
child_table_name = concat('test.test_', text(new.category));
table_id = to_regclass(child_table_name::cstring);
if table_id is null then
execute format('create table %I ( primary key (id), check ( category = %L ) ) inherits (test.test)', child_table_name, new.category);
end if;
execute format ('insert into %I values ($1.*)', child_table_name) using new;
return null;
end;
$$ language plpgsql;
create trigger test_insert before insert on test.test for each row execute procedure test.test_insert();
insert into test.test (category) values (1);
insert into test.test (category) values (1);
insert into test.test (category) values (1);
commit;
You're using the %I format specifier incorrectly.
If your category is 1, then you end up calling to_regclass('test.test_1'), i.e. checking for the table test_1 in schema test.
However, format('create table %I', 'test.test_1') will treat the format argument as a single identifier and quote it accordingly, evaluating to 'create table "test.test_1"'. This will create a table called "test.test_1" in your default schema (probably public).
Instead, you need to treat your schema and table names as separate identifiers. Define your table name as:
child_table_name = format('test.%I', 'test_' || new.category);
... and when building your SQL strings, just substitute this value directly (i.e. with %s rather than %I).

How can I modify this trigger to include column-name, old-value and new-value?

Suppose, a trigger that keeps track of AREA-table and records the changes in AREA_LOGGING_TABLE.
CREATE TABLE AREA
( AREA_NUMBER NUMBER,
AREA_NAME VARCHAR(20)
)
CREATE TABLE AREA_LOGGING_TABLE
( WHO_MODIFIED VARCHAR(20),
WHEN_MODIFIED DATE,
OLD_VALUE BLOB,
NEW_VALUE BLOB,
COLUMN_NAME VARCHAR(30)
)
I want to record username, date-time, column-name, old-data, and, new-data.
How can I do that?
CREATE OR REPLACE TRIGGER AREA_MODIFY_LOGGER_COLUMN_LVL
AFTER INSERT or UPDATE or DELETE
ON AREA
REFERENCING OLD AS old_data NEW AS new_data
FOR EACH ROW
DECLARE
v_username varchar2(10);
BEGIN
-- Find username of person performing the DELETE on the table
SELECT user INTO v_username
FROM dual;
-- Insert record into audit table
INSERT INTO AREA_LOGGING_TABLE(who_modified, when_modified, old_value, new_value)
VALUES ( v_username, sysdate, :old_data.area_number, :new_data.area_number);
END;
This is not working.
Besides, I don't know how to include column-name here.
The utl_raw.cast_to_raw function can be used to convert your values to BLOB.
Regarding the column_name, I think you can hard code it in the insert statement as you already doing it after :NEW and :OLD.
The nvl function was used to handle nulls in :NEW \ :OLD.
CREATE OR REPLACE TRIGGER AREA_MODIFY_LOGGER_COLUMN_LVL
AFTER INSERT or UPDATE or DELETE
ON AREA
REFERENCING OLD AS Old NEW AS New
FOR EACH ROW
DECLARE
v_username varchar2(10);
BEGIN
-- Find username of person performing the DELETE on the table
SELECT user INTO v_username
FROM dual;
if nvl(:old.area_number, -1) <> nvl(:new.area_number, -1) then
-- Insert record into audit table
INSERT INTO AREA_LOGGING_TABLE(who_modified, when_modified, old_value, new_value, column_name)
VALUES ( v_username, sysdate, utl_raw.cast_to_raw(:Old.area_number), utl_raw.cast_to_raw(:New.area_number), 'AREA_NUMBER');
end if;
if nvl(:old.area_name , '-1') <> nvl(:new.area_name, '-1') then
-- Insert record into audit table
INSERT INTO AREA_LOGGING_TABLE(who_modified, when_modified, old_value, new_value, column_name)
VALUES ( v_username, sysdate, utl_raw.cast_to_raw(:Old.AREA_NAME), utl_raw.cast_to_raw(:New.AREA_NAME), 'AREA_NAME');
end if;
END;
Just to giving you brief for trigger to enhance your concept for trigger
You have below inbuilt table created by SQL when trigger fired.
- deleted (i.e. select #empid=d.Emp_ID from deleted d)
- inserted (i.e. select #empid=i.Emp_ID from inserted i) (can be used in Insert/update operation)

Loop in function does not work as expected

Using PostgreSQL 9.0.4
Below is a very similar structure of my table:
CREATE TABLE departamento
(
id bigserial NOT NULL,
master_fk bigint,
nome character varying(100) NOT NULL
CONSTRAINT departamento_pkey PRIMARY KEY (id),
CONSTRAINT departamento_master_fk_fkey FOREIGN KEY (master_fk)
REFERENCES departamento (id) MATCH SIMPLE
ON UPDATE NO ACTION ON DELETE NO ACTION
)
And the function I created:
CREATE OR REPLACE FUNCTION fn_retornar_dptos_ate_raiz(bigint[])
RETURNS bigint[] AS
$BODY$
DECLARE
lista_ini_dptos ALIAS FOR $1;
dp_row departamento%ROWTYPE;
dpto bigint;
retorno_dptos bigint[];
BEGIN
BEGIN
PERFORM id FROM tbl_temp_dptos;
EXCEPTION
WHEN undefined_table THEN
EXECUTE 'CREATE TEMPORARY TABLE tbl_temp_dptos (id bigint NOT NULL) ON COMMIT DELETE ROWS';
END;
FOR i IN array_lower(lista_ini_dptos, 1)..array_upper(lista_ini_dptos, 1) LOOP
SELECT id, master_fk INTO dp_row FROM departamento WHERE id=lista_ini_dptos[i];
IF dp_row.id IS NOT NULL THEN
EXECUTE 'INSERT INTO tbl_temp_dptos VALUES ($1)' USING dp_row.id;
WHILE dp_row.master_fk IS NOT NULL LOOP
dpto := dp_row.master_fk;
SELECT id, master_fk INTO dp_row FROM departamento WHERE id=lista_ini_dptos[i];
EXECUTE 'INSERT INTO tbl_temp_dptos VALUES ($1)' USING dp_row.id;
END LOOP;
END IF;
END LOOP;
RETURN ARRAY(SELECT id FROM tbl_temp_dptos);
END;
$BODY$
LANGUAGE plpgsql VOLATILE
Any questions about the names I can translate ..
What is the idea of the function? I first check if the temporary table already exists (perform), and when the exception occurs I create a temporary table.
Then I take each element in the array and use it to fetch the id and master_fk of a department. If the search is successful (check if id is not null, it is even unnecessary) I insert the id in the temporary table and start a new loop.
The second loop is intended to get all parents of that department which was previously found by performing the previous steps (ie, pick a department and insert it into the temporary table).
At the end of the second loop returns to the first. When this one ends I return bigint[] refers to what was recorded in the temporary table.
My problem is that the function returns me the same list I provide. What am I doing wrong?
There is a lot I would do differently, and to great effect.
Table definition
Starting with the table definition and naming conventions. These are mostly just opinions:
CREATE TEMP TABLE conta (conta_id bigint primary key, ...);
CREATE TEMP TABLE departamento (
dept_id serial PRIMARY KEY
, master_id int REFERENCES departamento (dept_id)
, conta_id bigint NOT NULL REFERENCES conta (conta_id)
, nome text NOT NULL
);
Major points
Are you sure you need a bigserial for departments? There are hardly that many on this planet. A plain serial should suffice.
I hardly ever use character varying with a length restriction. Unlike with some other RDBMS there is no performance gain whatsoever by using a restriction. Add a CHECK constraint if you really need to enforce a maximum length. I just use text, mostly and save myself the trouble.
I suggest a naming convention where the foreign key column shares the name with the referenced column, so master_id instead of master_fk, etc. Also allows to use USING in joins.
And I rarely use the non-descriptive column name id. Using dept_id instead here.
PL/pgSQL function
It can be largely simplified to:
CREATE OR REPLACE FUNCTION f_retornar_plpgsql(lista_ini_depts VARIADIC int[])
RETURNS int[] AS
$func$
DECLARE
_row departamento; -- %ROWTYPE is just noise
BEGIN
IF NOT EXISTS ( -- simpler in 9.1+, see below
SELECT FROM pg_catalog.pg_class
WHERE relnamespace = pg_my_temp_schema()
AND relname = 'tbl_temp_dptos') THEN
CREATE TEMP TABLE tbl_temp_dptos (dept_id bigint NOT NULL)
ON COMMIT DELETE ROWS;
END IF;
FOR i IN array_lower(lista_ini_depts, 1) -- simpler in 9.1+, see below
.. array_upper(lista_ini_depts, 1) LOOP
SELECT * INTO _row -- since rowtype is defined, * is best
FROM departamento
WHERE dept_id = lista_ini_depts[i];
CONTINUE WHEN NOT FOUND;
INSERT INTO tbl_temp_dptos VALUES (_row.dept_id);
LOOP
SELECT * INTO _row
FROM departamento
WHERE dept_id = _row.master_id;
EXIT WHEN NOT FOUND;
INSERT INTO tbl_temp_dptos
SELECT _row.dept_id
WHERE NOT EXISTS (
SELECT FROM tbl_temp_dptos
WHERE dept_id =_row.dept_id);
END LOOP;
END LOOP;
RETURN ARRAY(SELECT dept_id FROM tbl_temp_dptos);
END
$func$ LANGUAGE plpgsql;
Call:
SELECT f_retornar_plpgsql(2, 5);
Or:
SELECT f_retornar_plpgsql(VARIADIC '{2,5}');
ALIAS FOR $1 is outdated syntax and discouraged. Use function parameters instead.
The VARIADIC parameter makes it more convenient to call. Related:
Pass multiple values in single parameter
You don't need EXECUTE for queries without dynamic elements. Nothing to gain here.
You don't need exception handling to create a table. Quoting the manual here:
Tip: A block containing an EXCEPTION clause is significantly more
expensive to enter and exit than a block without one. Therefore, don't
use EXCEPTION without need.
Postgres 9.1 or later has CREATE TEMP TABLE IF NOT EXISTS. I use a workaround for 9.0 to conditionally create the temp table.
Postgres 9.1 also offer FOREACH to loop through an arrays.
All that said, here comes the bummer: you don't need most of this.
SQL function with rCTE
Even in Postgres 9.0, a recursive CTE makes this a whole lot simpler:
CREATE OR REPLACE FUNCTION f_retornar_sql(lista_ini_depts VARIADIC int[])
RETURNS int[] AS
$func$
WITH RECURSIVE cte AS (
SELECT dept_id, master_id
FROM unnest($1) AS t(dept_id)
JOIN departamento USING (dept_id)
UNION ALL
SELECT d.dept_id, d.master_id
FROM cte
JOIN departamento d ON d.dept_id = cte.master_id
)
SELECT ARRAY(SELECT DISTINCT dept_id FROM cte) -- distinct values
$func$ LANGUAGE sql;
Same call.
Closely related answer with explanation:
Tree Structure and Recursion
SQL Fiddle demonstrating both.
I managed to fix my code. At the end of this response is its final form, but if you have any suggestions for improvement are welcome. Here are the changes:
1 - I have provided the essential structure of my table, but in reality it is much bigger. Before master_fk field, there is a field called account_fk, and because of the variable department dp_row%**ROWTYPE** the entire structure of my table is copied to the variable, so if I fill only the first two fields, i.e., id and account_fk, then master_fk that is the third field will be null.
2 - #Nicolas was right, and I ended up using the variable dpto for the second loop. And I had forgotten to fill it inside the loop. Besides using it in the search done within the loop.
3 - I added an if statement to make sure that would not have duplicates in the temporary table.
Correction in the structure of my table:
CREATE TABLE departamento
(
id bigserial NOT NULL,
account_fk bigint NOT NULL,
master_fk bigint,
nome character varying(100) NOT NULL,
CONSTRAINT departamento_pkey PRIMARY KEY (id),
CONSTRAINT departamento_account_fk_fkey FOREIGN KEY (account_fk)
REFERENCES conta (id) MATCH SIMPLE
ON UPDATE NO ACTION ON DELETE NO ACTION,
CONSTRAINT departamento_master_fk_fkey FOREIGN KEY (master_fk)
REFERENCES departamento (id) MATCH SIMPLE
ON UPDATE NO ACTION ON DELETE NO ACTION
)
My function as it is now:
CREATE OR REPLACE FUNCTION fn_retornar_dptos_ate_raiz(bigint[]) RETURNS bigint[] AS
$BODY$
DECLARE
lista_ini_dptos ALIAS FOR $1;
dp_row departamento%ROWTYPE;
dpto bigint;
BEGIN
BEGIN
PERFORM id FROM tbl_temp_dptos;
EXCEPTION
WHEN undefined_table THEN
EXECUTE 'CREATE TEMPORARY TABLE tbl_temp_dptos (id bigint NOT NULL) ON COMMIT DELETE ROWS';
END;
FOR i IN array_lower(lista_ini_dptos, 1)..array_upper(lista_ini_dptos, 1) LOOP
SELECT id, conta_fk, master_fk INTO dp_row FROM departamento WHERE id=lista_ini_dptos[i];
EXECUTE 'INSERT INTO tbl_temp_dptos VALUES ($1)' USING dp_row.id;
dpto := dp_row.master_fk;
-- RAISE NOTICE 'dp_row: (%); ', dp_row.master_fk;
WHILE dpto IS NOT NULL LOOP
SELECT id, conta_fk, master_fk INTO dp_row FROM departamento WHERE id=dpto;
IF NOT(select exists(select 1 from tbl_temp_dptos where id=dp_row.id limit 1)) THEN
EXECUTE 'INSERT INTO tbl_temp_dptos VALUES ($1)' USING dp_row.id;
END IF;
dpto := dp_row.master_fk;
-- RAISE NOTICE 'dp_row: (%); ', dp_row.master_fk;
END LOOP;
END LOOP;
RETURN ARRAY(SELECT id FROM tbl_temp_dptos);
END;
$BODY$
LANGUAGE plpgsql VOLATILE