PL/pgSQL: finding all groups person belongs to (also indirectly) - sql

Simple intro:
I have a database with users and groups.
Every user might be a member of one or more groups.
Every group might have one or more parent groups.
Schema:
CREATE TABLE users(
username VARCHAR(64) NOT NULL PRIMARY KEY,
password VARCHAR(64) NOT NULL,
enabled BOOLEAN NOT NULL);
CREATE TABLE groups (
id bigserial NOT NULL PRIMARY KEY,
group_name VARCHAR(64) NOT NULL);
CREATE TABLE groups_inheritance (
group_id bigint NOT NULL,
parent_group_id bigint NOT NULL,
CONSTRAINT fk_group_inheritance_group FOREIGN KEY(group_id) REFERENCES groups(id),
CONSTRAINT fk_group_inheritance_group_2 FOREIGN KEY(parent_group_id) REFERENCES groups(id),
CONSTRAINT unique_uk_groups_inheritance UNIQUE(group_id, parent_group_id));
CREATE TABLE group_members (
id bigint PRIMARY KEY,
username VARCHAR(64) NOT NULL,
group_id bigint NOT NULL,
CONSTRAINT fk_group_members_username FOREIGN KEY(username) REFERENCES users(username),
CONSTRAINT fk_group_members_group FOREIGN KEY(group_id) REFERENCES groups(id));
I'm looking for a PL/pgSQL function which finds all groups (their names) particular user belongs to.
Example:
group name: People,
group parent: null
group name: Students,
group parent: People
group name: Football_players,
group parent: People
group name: Basketball_players,
group parent: People
user name: Maciej,
groups : Students, Football_players
f("Maciej") = {"Students", "People", "Football_players"}
He belongs to "People" just because he belongs to "Students" or "Football_players". He is not a direct member of "People" group.
Thanks in advance!

WITH RECURSIVE group_ancestry AS (
SELECT group_id, username
FROM group_members
UNION
SELECT groups_inheritance.parent_group_id, username
FROM group_ancestry
JOIN groups_inheritance ON groups_inheritance.group_id = group_ancestry.group_id
)
SELECT username, group_id
FROM group_ancestry

If you have just one level of inheritance (as in example), then you could use such query:
WITH group_ids AS
(
SELECT group_id
FROM group_members
WHERE username LIKE 'Maciej'
)
SELECT group_name
FROM
(SELECT group_id FROM group_ids
UNION
SELECT DISTINCT parent_group_id
FROM groups_inheritance INNER JOIN group_ids USING(group_id)) g
INNER JOIN groups ON id = group_id;
Result:
group_name
------------------
People
Students
Football_players
(3 rows)
PL/pgSQL function:
DROP FUNCTION IF EXISTS f(varchar(64));
CREATE FUNCTION f(username varchar(64))
RETURNS text[] AS $$
DECLARE
gId bigint;
pgId bigint;
gName text;
result text[] = '{}';
BEGIN
FOR gId IN SELECT group_id FROM group_members WHERE username LIKE username
LOOP
SELECT INTO gName group_name FROM groupS WHERE id = gId;
result := result || gName;
FOR pgId IN SELECT parent_group_id FROM groups_inheritance WHERE group_id = gId
LOOP
SELECT INTO gName group_name FROM groups WHERE id = pgId;
IF NOT (result #> ARRAY[gName]) THEN
result := result || gName;
END IF;
END LOOP;
END LOOP;
RETURN result;
END $$
LANGUAGE 'plpgsql';
Result:
SELECT f('Maciej');
f
------------------------------------
{Students,People,Football_players}
(1 row)
However for nested parent groups I think that recursion should be suitable.
EDIT:
Here is recursion-based variant for nested parent groups:
CREATE OR REPLACE FUNCTION f_recursive(gIdParam bigint, resultArrayParam bigint[])
RETURNS bigint[] AS $$
DECLARE
pgId bigint;
resultArray bigint[];
BEGIN
FOR pgId IN SELECT parent_group_id FROM groups_inheritance WHERE group_id = gIdParam
LOOP
IF NOT (resultArrayParam #> ARRAY[pgId]) THEN
resultArray := resultArray || pgId;
resultArray := resultArray || f_recursive(pgId, resultArray);
END IF;
END LOOP;
RETURN resultArray;
END $$
LANGUAGE 'plpgsql';
CREATE OR REPLACE FUNCTION f(usernameParam varchar(64))
RETURNS text[] AS $$
DECLARE
gId bigint;
resultArray bigint[];
BEGIN
FOR gId IN SELECT group_id FROM group_members WHERE username LIKE usernameParam
LOOP
resultArray := resultArray || gId;
resultArray := resultArray || f_recursive(gId, resultArray);
END LOOP;
RETURN array_agg(group_name)
FROM groups INNER JOIN (SELECT unnest(resultArray)) u ON unnest = id;
END $$
LANGUAGE 'plpgsql';
Example insert:
INSERT INTO groups (id, group_name) VALUES
(1, 'People'), (2, 'Workers'), (3, 'Programmers'),
(4, 'AI-Programmers'), (5, 'Administators'), (6, 'Managers');
INSERT INTO groups_inheritance (group_id, parent_group_id) VALUES
(2, 1), (3, 2), (4, 3), (5, 2), (6, 2);
INSERT INTO users (username, password, enabled) VALUES
('Maciej', '12345', true);
INSERT INTO group_members (id, username, group_id) VALUES
(1, 'Maciej', 4), (2, 'Maciej', 5);
Result:
SELECT f('Maciej');
f
-----------------------------------------------------------
{AI-Programmers,Programmers,Workers,People,Administators}
(1 row)
Another way is to use WITH query along with RECURSIVE modifier as #araqnid shown.

Related

How to check if exists a row before insert - PL/SQL

Im new with SQL, and I want to knw how can I add a SQL in this structure to check if the row already exists before insert, Im tried this way, but I think its not the best.
DECLARE
v_project_id NUMBER;
v_group_name VARCHAR2(100);
v_user_id NUMBER;
v_group_id NUMBER;
BEGIN
FOR project IN (
SELECT project_id
FROM JSON_TABLE(:p_request,
'$' COLUMNS(NESTED PATH '$.projects[*]'
COLUMNS(
project_id VARCHAR2(100) PATH '$.project.id'
)
)
)
)
LOOP
v_project_id := project.project_id;
v_group_name := json_value (:p_request,'$.group_name');
v_user_id := json_value (:p_request,'$.user_id');
v_group_id := s_project_group.NEXTVAL;
dbms_output.put_line(v_group_id||' - '||v_group_name||' - '||v_user_id||' - '||v_project_id);
--------- HERE, I WANT TO CHECK IF THE NEW ROW ALREADY EXISTS BEFORE INSERT, IM TRIED THIS WAY, BUT I THINK ITS NOT THE BEST------------
(select case
when NOT exists (select 1 FROM APP_PROJECT_GROUP WHERE GROUP_NAME = :v_group_name AND USER_ID = :v_user_id)
INSERT INTO APP_PROJECT_GROUP (GROUP_ID, GROUP_NAME, PROJECT_ID, USER_ID) VALUES (v_group_id, v_group_name, v_project_id, v_user_i)
);
----------------------------------------------------------------------------------------------------------------------------------------
COMMIT;
END LOOP;
END;
As I commented on your previous question, use a MERGE statement then you can perform all the INSERTs in a single SQL statement (without having to use use cursor loops and repeatedly context-switching from PL/SQL to SQL):
MERGE INTO app_project_group dst
USING (
SELECT group_id,
group_name,
TO_NUMBER(project_id) AS project_id,
user_id
FROM JSON_TABLE(
:p_request,
'$'
COLUMNS(
group_name VARCHAR2(150) PATH '$.group_name',
group_id NUMBER(15) PATH '$.group_id',
user_id NUMBER(15) PATH '$.user_id',
NESTED PATH '$.projects[*]'
COLUMNS (
project_id VARCHAR2(15) PATH '$.project.id'
)
)
)
) src
ON (src.group_name = dst.group_name AND src.user_id = dst.user_id)
WHEN NOT MATCHED THEN
INSERT (group_id, group_name, project_id, user_id)
VALUES (s_project_group.NEXTVAL, src.group_name, src.project_id, src.user_id);
db<>fiddle here

How to insert foreign key from pre-generated table?

I have 3 tables:
create table customer
(
customer_id integer primary key,
customer_first_name varchar2(50) not null,
customer_surrname varchar2(50) not null,
phone_number varchar2(15) not null,
customer_details varchar2(200) default 'There is no special notes'
);
create table place
(
table_number integer primary key,
table_details varchar2(200) default 'There is no details'
);
create table booking
(
booking_id integer primary key,
date_of_booking date,
number_of_persons number(2) not null,
customer_id integer not null,
foreign key(customer_id) references customer(customer_id),
table_number integer not null,
foreign key(table_number) references place(table_number)
);
I have to generate customer table using this kind of generator:
set SERVEROUTPUT on format wrapped;
set define off;
drop sequence customer_seq;
drop sequence place_seq;
--CUSTOMER TABLE INSERT ROW GENERATOR
create sequence customer_seq START WITH 1 INCREMENT BY 1 NOMAXVALUE;
CREATE OR REPLACE TRIGGER customer_id_trigger
BEFORE INSERT ON customer
FOR EACH ROW
BEGIN
SELECT customer_seq.nextval INTO :new.customer_id FROM dual;
END;
/
DELETE FROM customer;
DECLARE
TYPE TABSTR IS TABLE OF VARCHAR2(250);
first_name TABSTR;
surrname TABSTR;
qname number(5);
phonenum number(15);
details TABSTR;
BEGIN
first_name := TABSTR ('Jhon','Poul','Jesica','Arnold','Max','Teemo','Tim','Mikel','Michael',
'Kristian','Adela','Mari','Anastasia','Robert','Jim','Juana','Adward',
'Jana','Ola','Kristine','Natali','Corey','Chester','Naomi','Chin-Chou');
surrname := TABSTR ('Grey','Brown','Robins','Chiho','Lee','Das','Edwins','Porter','Potter',
'Dali','Jordan','Jordison','Fox','Washington','Bal','Pitney','Komarowski',
'Banks','Albra','Shwiger');
details := TABSTR ('Exellent Customer','Good Customer','Always drunked','Left big tips',
'Bad Customer','Did not pay last bill','New Customer','VIP client');
qname := 100; — CHANGE THIS TO MANAGE HOW MANY ROWS YOU WANT TO BE ADDED
FOR i IN 1..qname LOOP
phonenum := dbms_random.value(111111111,999999999);
INSERT INTO customer VALUES (NULL, first_name(dbms_random.value(1,25)),
surrname(dbms_random.value(1,20)), phonenum, details(dbms_random.value(1,8)));
END LOOP;
DBMS_OUTPUT.put_line('Customers done!');
END;
/
--TABLE INSERT
DELETE FROM place;
create sequence place_seq start with 1 increment by 1;
insert into place values (place_seq.nextval, 'Near the window');
insert into place values (place_seq.nextval, default);
insert into place values (place_seq.nextval, 'Near the door');
insert into place values (place_seq.nextval, 'Near the window');
insert into place values (place_seq.nextval, 'Near the window');
insert into place values (place_seq.nextval, default);
insert into place values (place_seq.nextval, 'Near the door');
insert into place values (place_seq.nextval, 'Big table');
insert into place values (place_seq.nextval, default);
insert into place values (place_seq.nextval, 'Big table');
So the question is how can I insert client_id in "booking" table which have one of the numbers in "customers" table? Because every time I regenerate data in "Customers" table numbers are changing so I should somehow select numbers in an array and then randomly choose one of them from this array. The thing is I don't really know how to select from table to array. Can anybody help?
For PL/SQL version you can use BULK COLLECT and standard sys.odcinumberlist array.
create sequence booking_seq start with 1 increment by 1;
declare
customerIds sys.odcinumberlist;
placeIds sys.odcinumberlist;
number_of_generated_records number := 150; -- number of records to be generated
begin
-- fill the array of customer_id values
select customer_id
bulk collect into customerIds
from customer;
-- fill the array of place numbers
select table_number
bulk collect into placeIds
from place;
for i in 1..number_of_generated_records loop
insert into booking(booking_id,date_of_booking,number_of_persons,customer_id,table_number)
values(
booking_seq.nextval, -- booking_id
trunc(sysdate) + round(dbms_random.value(1,365)), -- date_of_booking
round(dbms_random.value(1,99)), -- number_of_persons
customerIds(round(dbms_random.value(1,customerIds.count))), -- customer_id
placeIds(round(dbms_random.value(1,placeIds.count))) -- table_number
);
end loop;
end;
But, for your case I would prefer pure sql:
insert into booking(booking_id,date_of_booking,number_of_persons,customer_id,table_number)
with
customer_subq as (
select customer_id, row_number() over (order by customer_id) rn from customer
),
place_subq as (
select table_number, row_number() over (order by table_number) rn from place
),
params as (
select 1500 number_of_generated_records,
(select count(1) from customer) customer_count,
(select count(1) from place) place_count
from dual
),
random_numbers as (
select round(dbms_random.value(1,1000)) random_number1,
round(dbms_random.value(1,1000)) random_number2,
round(dbms_random.value(1,1000)) random_number3,
round(dbms_random.value(1,1000)) random_number4
from dual,params
connect by level <= number_of_generated_records
)
select booking_seq.nextval booking_id,
trunc(sysdate) + mod(random_number1,365) date_of_booking,
mod(random_number1,100) number_of_persons,
customer_id,
table_number
from random_numbers,
params,
customer_subq,
place_subq
where mod(random_number1,customer_count) + 1 = customer_subq.rn
and mod(random_number2,place_count) + 1 = place_subq.rn

Insert rows based on array of UUIDs

Looking for a way to insert a list of records based on an array of UUIDs. Here's my example code:
CREATE OR REPLACE FUNCTION "AddGroupUsers" (
"#OrganizationID" UUID,
"#GroupID" UUID,
"#UserIDs" UUID[]
)
RETURNS viud AS
$func$
BEGIN
FOR index IN "#UserIDs" LOOP
INSERT INTO
"UserGroups" (
"UserID",
"GroupID",
"OrganizationID"
)
VALUES (
"#UserID"[index],
"#GroupID",
"#OrganizationID"
);
END LOOP;
END;
$func$ LANGUAGE PLPGSQL;
Obviously doesn't work, lol.
I want to be able to call:
SELECT "AddGroupUsers"(
'cb6e96db-73db-4b07-811f-c54b61d09fa4',
'451a9ab7-02f6-4f63-bb87-80ad531ab490'
array(
'451a9ab7-02f6-4f63-bb87-80ad531ab490',
'451a9ab7-02f6-4f63-bb87-80ad531ab491',
'451a9ab7-02f6-4f63-bb87-80ad531ab492',
'451a9ab7-02f6-4f63-bb87-80ad531ab493',
'451a9ab7-02f6-4f63-bb87-80ad531ab494'
)::uuid[]
);
As a side note I have a unique key constraint that ensures only one record for a UserID and GroupID every exist. If the second array value breaks that rule will the whole query fail and how can I ignore it to ensure the rest of the values get inserted?
Use unnest and plain sql in instead of plpgsql. With this table:
create table user_groups (
org_id uuid, grp_id uuid, use_id uuid,
unique (grp_id, use_id)
);
This function will insert non existent:
create or replace function AddGroupUsers(
_org_id uuid, _grp_id uuid, _use_id uuid[]
) returns setof user_groups as $$
insert into user_groups (org_id, grp_id, use_id)
select s.org_id, grp_id, use_id
from
(
select
_org_id as org_id,
_grp_id as grp_id,
unnest(_use_id) as use_id
) s
left join
user_groups ug using (grp_id, use_id)
where ug.grp_id is null
returning *
;
$$ language sql;
Usage:
select *
from AddGroupUsers(
'cb6e96db-73db-4b07-811f-c54b61d09fa4'::uuid,
'451a9ab7-02f6-4f63-bb87-80ad531ab490'::uuid,
array[
'451a9ab7-02f6-4f63-bb87-80ad531ab490',
'451a9ab7-02f6-4f63-bb87-80ad531ab491',
'451a9ab7-02f6-4f63-bb87-80ad531ab492',
'451a9ab7-02f6-4f63-bb87-80ad531ab493',
'451a9ab7-02f6-4f63-bb87-80ad531ab494'
]::uuid[]
);
org_id | grp_id | use_id
--------------------------------------+--------------------------------------+--------------------------------------
cb6e96db-73db-4b07-811f-c54b61d09fa4 | 451a9ab7-02f6-4f63-bb87-80ad531ab490 | 451a9ab7-02f6-4f63-bb87-80ad531ab490
cb6e96db-73db-4b07-811f-c54b61d09fa4 | 451a9ab7-02f6-4f63-bb87-80ad531ab490 | 451a9ab7-02f6-4f63-bb87-80ad531ab491
cb6e96db-73db-4b07-811f-c54b61d09fa4 | 451a9ab7-02f6-4f63-bb87-80ad531ab490 | 451a9ab7-02f6-4f63-bb87-80ad531ab492
cb6e96db-73db-4b07-811f-c54b61d09fa4 | 451a9ab7-02f6-4f63-bb87-80ad531ab490 | 451a9ab7-02f6-4f63-bb87-80ad531ab493
cb6e96db-73db-4b07-811f-c54b61d09fa4 | 451a9ab7-02f6-4f63-bb87-80ad531ab490 | 451a9ab7-02f6-4f63-bb87-80ad531ab494
Based on this answer and the official documentation, you could declare a variable to store each user ID, like this:
CREATE OR REPLACE FUNCTION AddGroupUsers (
"#OrganizationID" UUID,
"#GroupID" UUID,
"#UserIDs" UUID[]
)
RETURNS void AS
$func$
DECLARE uID UUID;
BEGIN
FOREACH uID IN ARRAY "#UserIDs" LOOP
INSERT INTO
UserGroups (
UserID,
GroupID,
OrganizationID
)
VALUES (
uID,
"#GroupID",
"#OrganizationID"
);
END LOOP;
END;
$func$ LANGUAGE PLPGSQL;
And to actually call it:
SELECT AddGroupUsers(
'cb6e96db-73db-4b07-811f-c54b61d09fa4'::uuid,
'451a9ab7-02f6-4f63-bb87-80ad531ab490'::uuid,
array[
'451a9ab7-02f6-4f63-bb87-80ad531ab490',
'451a9ab7-02f6-4f63-bb87-80ad531ab491',
'451a9ab7-02f6-4f63-bb87-80ad531ab492',
'451a9ab7-02f6-4f63-bb87-80ad531ab493',
'451a9ab7-02f6-4f63-bb87-80ad531ab494'
]::uuid[]
);
(Note the square brackets instead of parenthesis)

Optimize SQL query with 3 FOR loops

I have a fully working SQL query. However, it is very very slow. I am looking for a way to optimize it.
CREATE TABLE trajectory_geom (
id SERIAL PRIMARY KEY,
trajectory_id BIGINT,
user_id BIGINT,
geom GEOMETRY(Linestring, 4326)
);
INSERT INTO trajectory_geom (trajectory_id, user_id, geom)
SELECT
p.trajectory_id,
p.user_id,
ST_Transform(ST_MakeLine(p.geom), 4326)
FROM point p
GROUP BY p.trajectory_id
;
DO $$
DECLARE
urow record;
vrow record;
wrow record;
BEGIN
FOR wrow IN
SELECT DISTINCT(p.user_id) FROM point p
LOOP
raise notice 'User id: %', wrow.user_id;
FOR vrow IN
SELECT DISTINCT(p.trajectory_id) FROM point p WHERE p.user_id = wrow.user_id
LOOP
FOR urow IN
SELECT
analyzed_tr.*
FROM trajectory_start_end_geom analyzed_tr
WHERE
analyzed_tr.user_id = wrow.user_id
AND
ST_Intersects (
(
analyzed_tr.start_geom
)
,
(
SELECT g.geom
FROM trajectory_geom g
WHERE g.trajectory_id = vrow.trajectory_id
)
) = TRUE
LOOP
INSERT INTO trajectories_intercepting_with_starting_point (initial_trajectory_id, mathced_trajectory_id, user_id)
SELECT
vrow.trajectory_id,
urow.trajectory_id,
wrow.user_id
WHERE urow.trajectory_id <> vrow.trajectory_id
;
END LOOP;
END LOOP;
END LOOP;
END;
$$;
It has 3 loops...how can I avoid them?
Basically, I am looping all user IDs, for each user looping all trajectories and checking is trajectory interact with any other trajectory of this user.
Schema:
CREATE TABLE public.trajectory_start_end_geom
(
id integer NOT NULL DEFAULT nextval('trajectory_start_end_geom_id_seq'::regclass),
trajectory_id bigint,
user_id bigint,
start_geom geometry(Polygon,4326),
end_geom geometry(Polygon,4326),
CONSTRAINT trajectory_start_end_geom_pkey PRIMARY KEY (id)
)
WITH (
OIDS=FALSE
);
CREATE TABLE public.trajectory_geom
(
id integer NOT NULL DEFAULT nextval('trajectory_geom_id_seq'::regclass),
trajectory_id bigint,
user_id bigint,
geom geometry(LineString,4326),
CONSTRAINT trajectory_geom_pkey PRIMARY KEY (id)
)
WITH (
OIDS=FALSE
);
CREATE TABLE public.point
(
id integer NOT NULL DEFAULT nextval('point_id_seq'::regclass),
user_id bigint,
date date,
"time" time without time zone,
lat double precision,
lon double precision,
trajectory_id integer,
geom geometry(Geometry,4326),
CONSTRAINT point_pkey PRIMARY KEY (id)
)
WITH (
OIDS=FALSE
);
Try this SQL query. Hope this helps.
INSERT INTO trajectories_intercepting_with_starting_point
(initial_trajectory_id, mathced_trajectory_id, user_id)
SELECT
TG.trajectory_id AS first_trajectory_id,
TG2.trajectory_id AS last_trajectory_id,
TG.user_id
FROM Trajectory_geom AS TG
JOIN Trajectory_geom AS TG2 ON TG.user_id = TG2.user_id
AND TG.trajectory_id < TG2.trajectory_id
JOIN Trajectory_start_end_geom AS TSE ON TSE.trajectory_id = TG.trajectory_id
WHERE ST_Intersects(TSE.start_geom, TG2.geom) = TRUE
This should do the trick:
WITH vrow AS(
INSERT INTO trajectory_geom (trajectory_id, user_id, geom)
SELECT
p.trajectory_id,
p.user_id,
ST_Transform(ST_MakeLine(p.geom), 4326) AS geom
FROM point p
GROUP BY p.trajectory_id
RETURNING trajectory_id, user_id, geom
)
INSERT INTO trajectories_intercepting_with_starting_point (initial_trajectory_id, mathced_trajectory_id, user_id)
SELECT
vrow.trajectory_id,
urow.trajectory_id,
vrow.user_id
FROM trajectory_start_end_geom AS urow
JOIN vrow
ON urow.user_id = vrow.user_id
AND urow.trajectory_id <> vrow.trajectory_id
AND ST_Intersects(urow.start_geom, vrow.geom)
If you don't need insert into trajectory_geom eliminating it (and the CTE) will speed it up

SQL - Efficient versioning of DNS records

So far I have come up with this solution that needs further refinement (big thanks to #postgresql on freenode).
The problem I am trying to overcome is an efficient way of storing DNS records whilst maintaining some sort of history. The issue I am currently having is with the wCTE which is inserting new records and deleting old records correctly. It isn't, however, readding records. The wCTE is:
WITH deltas AS (
SELECT o, n FROM (
SELECT id, name, domain_id, class_id, addr FROM record WHERE tld_id = $1
) AS o FULL OUTER JOIN record_temp n
ON (
o.name = n.name AND
o.domain_id = n.domain_id AND
o.class_id = n.class_id AND
o.addr = n.addr
)
WHERE (o.name, o.domain_id, o.class_id, o.addr)
IS DISTINCT FROM (n.name, n.domain_id, n.class_id, n.addr)
), mark_dead AS (
UPDATE record SET alive = FALSE
WHERE id IN (
SELECT (o).id FROM deltas WHERE (o).id IS NOT NULL
) RETURNING *
)
INSERT INTO record (name, domain_id, tld_id, class_id, addr)
SELECT (n).name, (n).domain_id, (n).tld_id, (n).class_id, (n).addr
FROM deltas WHERE
(n).name IS NOT NULL AND
(n).domain_id IS NOT NULL AND
(n).tld_id IS NOT NULL AND
(n).class_id IS NOT NULL AND
(n).addr IS NOT NULL
;
The o result has all the old records that do not exist in record_temp, n has all the records that are new and need to be inserted. I expect I need to add another join which pulls in (an inner join?) results that exist on both tables (which if marked as dead, need to be marked as alive).
The rest of the schema for reference is:
CREATE TABLE record (
id SERIAL,
name VARCHAR(255),
domain_id INT,
tld_id INT,
class_id INT,
addr INET,
alive BOOLEAN DEFAULT TRUE,
PRIMARY KEY (id),
CONSTRAINT fk1 FOREIGN KEY (domain_id) REFERENCES domain (id) MATCH SIMPLE,
CONSTRAINT fk2 FOREIGN KEY (tld_id) REFERENCES tld (id) MATCH SIMPLE,
UNIQUE(name, domain_id, class_id, addr)
);
CREATE TABLE record_history (
id SERIAL,
record_id INT,
history_type record_history_type,
stamp TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
CONSTRAINT fk1 FOREIGN KEY (record_id) REFERENCES record (id) MATCH SIMPLE,
PRIMARY KEY(id)
);
CREATE TEMP TABLE record_temp (
name VARCHAR(255),
domain_id INT,
tld_id INT,
class_id INT,
addr INET,
UNIQUE(name, domain_id, class_id, addr)
)
ON COMMIT DROP;
record_history is populated using functions and triggers and is populating how I expect it to, below are these triggers:
RETURNS TRIGGER AS $$
BEGIN
INSERT INTO record_history (record_id, history_type) VALUES (NEW.id, 'added');
RETURN NEW;
END;
$$ language 'plpgsql';
RETURNS TRIGGER AS $$
BEGIN
IF NEW.alive = OLD.alive THEN
RETURN NEW;
END IF;
IF NEW.alive THEN
INSERT INTO record_history (record_id, history_type) VALUES (NEW.id, 'added');
END IF;
IF NOT NEW.alive THEN
INSERT INTO record_history (record_id, history_type) VALUES (NEW.id, 'deleted');
END IF;
RETURN NEW;
END;
$$ language 'plpgsql';
ON record FOR EACH ROW EXECUTE PROCEDURE
add_insert_record_history();
ON record FOR EACH ROW EXECUTE PROCEDURE
add_update_record_history();
I seem to have it working how I want with the following query, which I feel is incredibly unoptimized:
WITH deltas AS (
SELECT o, n FROM (
SELECT id, name, domain_id, class_id, addr FROM record WHERE tld_id = $1
) AS o FULL OUTER JOIN record_temp n
ON (
o.name = n.name AND
o.domain_id = n.domain_id AND
o.class_id = n.class_id AND
o.addr = n.addr
)
WHERE (o.name, o.domain_id, o.class_id, o.addr)
IS DISTINCT FROM (n.name, n.domain_id, n.class_id, n.addr)
), mark_dead AS (
UPDATE record SET alive = FALSE
WHERE id IN (
SELECT (o).id FROM deltas WHERE (o).id IS NOT NULL
) RETURNING *
), mark_alive AS (
UPDATE record SET alive = TRUE
WHERE alive = FALSE AND id IN (
SELECT id FROM (
SELECT id, name, domain_id, class_id, addr FROM record WHERE tld_id = $1
) AS o INNER JOIN record_temp n
ON (
o.name = n.name AND
o.domain_id = n.domain_id AND
o.class_id = n.class_id AND
o.addr = n.addr
)
) RETURNING *
)
INSERT INTO record (name, domain_id, tld_id, class_id, addr)
SELECT (n).name, (n).domain_id, (n).tld_id, (n).class_id, (n).addr
FROM deltas WHERE
(n).name IS NOT NULL AND
(n).domain_id IS NOT NULL AND
(n).tld_id IS NOT NULL AND
(n).class_id IS NOT NULL AND
(n).addr IS NOT NULL
;