PostgreSQL: Select field value and update with next value from second table - sql

CREATE TABLE t_a
(
a_id SERIAL PRIMARY KEY,
str VARCHAR(50)
)
CREATE TABLE t_b
(
b_id SERIAL PRIMARY KEY,
a_id_fk INTEGER REFERENCES (t_a(a_id),
)
Using the above tables, I want to SELECT a_id_fk FROM t_b WHERE b_id = 1 and then update a_id_fk with the next a_id in the sequence, but if I'm at the end of the available a_id's I cycle back to the first one. All this with multiple people querying/updating that specific row from t_b.
If it helps, the scenario I'm working on is multiple sites share a common list of words, but as each user for each sites grabs a word that sites index within the word list is moved to the next word until it hits the end then it loops back to the beginning.
Is there a way to do this in a single query? If not, what would be the best way to handle this? I can handle most of the logic, it's looping back when I run out of ids that has me stumped.

You could use something complicated like
UPDATE t_b
SET a_id_fk = COALESCE(
(SELECT MIN(a_id) FROM t_a WHERE a_id > t_b.a_id_fk),
(SELECT MIN(a_id) FROM t_a))
WHERE b_id = :b_id
but if I was given a requirement like that I'd probably maintain an auxiliary table that maps an a_id to the next a_id in the cycle...

This one is a bit more elegant (IMHO) than #pdw's solution:
DROP SCHEMA tmp CASCADE;
CREATE SCHEMA tmp ;
SET search_path=tmp;
CREATE TABLE t_a
( a_id SERIAL PRIMARY KEY
, str VARCHAR(50)
);
CREATE TABLE t_b
( b_id SERIAL PRIMARY KEY
, a_id_fk INTEGER REFERENCES t_a(a_id)
);
INSERT INTO t_a(str)
SELECT 'Str_' || gs::text
FROM generate_series(1,10) gs
;
INSERT into t_b(a_id_fk)
SELECT a_id FROM t_a
ORDER BY a_id
;
-- EXPLAIN ANALYZE
WITH src AS (
SELECT a_id AS a_id
, min(a_id) OVER (order BY a_id) AS frst
, lead(a_id) OVER (order BY a_id) AS nxt
FROM t_a
)
UPDATE t_b dst
SET a_id_fk = COALESCE(src.nxt, src.frst)
FROM src
WHERE dst.a_id_fk = src.a_id
AND dst.b_id IN ( 3, 10)
;
SELECT * FROM t_b
ORDER BY b_id
;
Result:
DROP SCHEMA
CREATE SCHEMA
SET
CREATE TABLE
CREATE TABLE
INSERT 0 10
INSERT 0 10
UPDATE 2
b_id | a_id_fk
------+---------
1 | 1
2 | 2
3 | 4
4 | 4
5 | 5
6 | 6
7 | 7
8 | 8
9 | 9
10 | 1
(10 rows)

Related

How to insert long-format data into two separate tables using SQL?

I have selected the following data that I want to insert into the database.
Letter
Value
A
1
A
2
B
3
B
4
Since there is a repetition of "A" and "B" in this format, I want to split data into two separate tables: table1 and table2.
table1:
ID
Letter
1
A
2
B
ID here is automatically inserted by database (using a sequence).
table2:
table1_id
Value
1
1
1
2
2
3
2
4
In this particular example, I don't gain anything on storage but it illustrates in the best way what problem I have encountered.
How can I use SQL or PL/SQL to insert data into table1 and table2?
First populate table1 from the source
insert table1(Letter)
select distinct Letter
from srcTable;
Then load data from the source decoding letter to id
insert table2(table1_id, Value)
select t1.id, src.value
from srcTable src
join table1 t1 on t1.Letter = src.Letter;
You may use multitable insert with workaround to get stable nextval on sequence. Since nextval is evaluated on each row regardless of when condition, it is not sufficient to use it inside values.
insert all
when rn = 1 then into l(id, val) values(seq, letter)
when rn > 0 then into t(l_id, val) values(seq, val)
with a(letter, val) as (
select 'A', 1 from dual union all
select 'A', 2 from dual union all
select 'B', 3 from dual union all
select 'B', 4 from dual union all
select 'C', 5 from dual
)
, b as (
select
a.*,
l.id as l_id,
row_number() over(partition by a.letter order by a.val asc) as rn
from a
left join l
on a.letter = l.val
)
select
b.*,
max(decode(rn, 1, coalesce(
l_id,
extractvalue(
/*Hide the reference to the sequence due to restrictions
of multitalbe insert*/
dbms_xmlgen.getxmltype('select l_sq.nextval as seq from dual')
, '/ROWSET/ROW/SEQ/text()'
) + 0
))
) over(partition by b.letter) as seq
from b
select *
from l
ID | VAL
-: | :--
1 | A
2 | B
3 | C
select *
from t
L_ID | VAL
---: | --:
1 | 1
1 | 2
2 | 3
2 | 4
3 | 5
db<>fiddle here
Principally you need to produce and ID value for the table1 to be inserted into table2. For this, You can use INSERT ... RETURNING ID INTO v_id statement after creating the tables containing some constraints especially unique ones such as PRIMARY KEY and UNIQUE
CREATE TABLE table1( ID INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY, letter VARCHAR2(1) NOT NULL );
ALTER TABLE table1 ADD CONSTRAINT uk_tab1_letter UNIQUE(letter);
CREATE TABLE table2( ID INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY, table1_id INT, value INT );
ALTER TABLE table2 ADD CONSTRAINT fk_tab2_tab1_id FOREIGN KEY(table1_id) REFERENCES table1 (ID);
and adding exception handling in order not to insert repeating letters to the first table. Then use the following code block ;
DECLARE
v_id table1.id%TYPE;
v_letter table1.letter%TYPE := 'A';
v_value table2.value%TYPE := 1;
BEGIN
BEGIN
INSERT INTO table1(letter) VALUES(v_letter) RETURNING ID INTO v_id;
EXCEPTION WHEN OTHERS THEN NULL;
END;
INSERT INTO table2(table1_id,value) SELECT id,v_value FROM table1 WHERE letter = v_letter;
COMMIT;
END;
/
and run by changing the initialized values for v_letter&v_value as 'A'&2, 'B'&1,'B'&2 ..etc respectively.
Alternatively you can convert the code block to a stored procedure or function such as
CREATE OR REPLACE PROCEDURE Pr_Ins_Tabs(
v_letter table1.letter%TYPE,
v_value table2.value%TYPE
) AS
v_id table1.id%TYPE;
BEGIN
BEGIN
INSERT INTO table1(letter) VALUES(v_letter) RETURNING ID INTO v_id;
EXCEPTION WHEN OTHERS THEN NULL;
END;
INSERT INTO table2(table1_id,value) SELECT id,v_value FROM table1 WHERE letter = v_letter;
COMMIT;
END;
/
in order to revoke resiliently such as
BEGIN
Pr_Ins_Tabs('A',2);
END;
/
Demo
PS. If your DB verion is prior to 12c, then create sequences(seq1&seq2) and use seq1.nextval&seq2.nextval within the Insert statements as not possible to use GENERATED ALWAYS AS IDENTITY clause within the table creation DDL statements.

Make sure no two rows contain identical values in Postgresql

I have a table and I want to make sure that no two rows can be alike.
So, for example, this table would be valid:
user_id | printer
---------+-------------
1 | LaserWriter
4 | LaserWriter
1 | ThinkJet
2 | DeskJet
But this table would not be:
user_id | printer
---------+-------------
1 | LaserWriter
4 | LaserWriter
1 | ThinkJet <--error (duplicate row)
2 | DeskJet
1 | ThinkJet <--error (duplicate row)
This is because the last table has two instances of 1 | ThinkJet.
So, user_id can be repeated (i.e. 1) and printer can be repeated (i.e. LaserWriter) but once a record like 1 | ThinkJet is entered once that combination cannot be entered again.
How can I prevent such occurrences in a Postgresql 11.5 table?
I would try experimenting with SQL code but alas I am still new on the matter.
Please note this is for INSERTing data into the table, not SELECTing it. Like a constraint iirc.
Thanks
Here's your script
ALTER TABLE tableA ADD CONSTRAINT some_constraint PRIMARY KEY(user_id,printer);
INSERT INTO tableA(user_id, printer)
VALUES
(
1,
'LaserWriter'
)
ON CONFLICT (user_id, printer)
DO NOTHING;
You can use DISTINCT. For example:
SELECT user_id, DISTINCT printer FROM my_table;
That's all. Hope it helps!
You need a series of steps (assuming there is no already assigned unique key).
Add a temporary column to make each row unique.
Assign a value to the new columns.
Remove the already existing duplicates.
Create a Unique or Primary Key on the composite columns.
Remove the temporary column.
alter table your_table add temp_unique integer unique;
do $$
declare
row_num integer = 1;
c_assign cursor for
select temp_unique
from your_table
for update;
begin
for rec in c_assign
loop
update your_table
set temp_unique = row_num
where current of c_assign;
row_num = row_num + 1;
end loop;
end;
$$
delete from your_table ytd
where exists ( select 1
from your_table ytk
where ytd.user_id = ytk.user_id
and ytd.printer = ytk.printer
and ytd.temp_unique > ytk.temp_unique
) ;
alter table your_table add constraint id_prt_uk unique (user_id, printer);
alter table your_table drop temp_unique;
I found the answer. When creating the table I needed to specify the two columns as UNIQUE. Observe:
CREATE TABLE foo (user_id INT, printer VARCHAR(20), UNIQUE (user_id, printer));
Now, here are my results:
=# INSERT INTO foo VALUES (1, 'LaserWriter');
INSERT 0 1
=# INSERT INTO foo VALUES (4, 'LaserWriter');
INSERT 0 1
=# INSERT INTO foo VALUES (1, 'ThinkJet');
INSERT 0 1
=# INSERT INTO foo VALUES (2, 'DeskJet');
INSERT 0 1
=# INSERT INTO foo VALUES (1, 'ThinkJet');
ERROR: duplicate key value violates unique constraint "foo_user_id_printer_key"
DETAIL: Key (user_id, printer)=(1, ThinkJet) already exists.
=# SELECT * FROM foo;
user_id | printer
---------+-------------
1 | LaserWriter
4 | LaserWriter
1 | ThinkJet
2 | DeskJet
(4 rows)

Create primary key with two columns

I have two tables, bank_data and sec_data. Table bank_data has the columns id, date, asset, and liability. The date column is divided into quarters.
id | date | asset | liability
--------+----------+--------------------
1 | 6/30/2001| 333860 | 308524
1 | 3/31/2001| 336896 | 311865
1 | 9/30/2001| 349343 | 308524
1 |12/31/2002| 353863 | 322659
2 | 6/30/2001| 451297 | 425156
2 | 3/31/2001| 411421 | 391846
2 | 9/30/2001| 430178 | 41356
2 |12/31/2002| 481687 | 46589
3 | 6/30/2001| 106506 | 104532
3 | 3/31/2001| 104196 | 102983
3 | 9/30/2001| 106383 | 104865
3 |12/31/2002| 107654 | 105867
Table sec_data has columns of id, date, and security. I combined the two tables into a new table named new_table in R using this code:
dbGetQuery(con, "CREATE TABLE new_table
AS (SELECT sec_data.id,
bank_data.date,
bank_data.asset,
bank_data.liability,
sec_data.security
FROM bank_data,bank_sec
WHERE (bank_data.id = sec_data.id) AND
(bank_data.date = sec_data.date)")
I would like to set two primary keys (id and date) in this R code without using pgAdmin. I want to use something like Constraint bankkey Primary Key (id, date) but the AS and SELECT functions are throwing me off.
First your query is wrong.. You say table sec_data but you assign table bank_sec and i am rephrase your query
CREATE TABLE new_table AS
SELECT
sec_data.id,
bank_data.date,
bank_data.asset,
bank_data.liability,
sec_data.security
FROM bank_data
INNER JOIN sec_data on bank_data.id = sec_data.id
and bank_data.date = sec_data.date
Avoid using Implicit Join and use Explicit Join instead.. And as stated by # a_horse_with_no_name you can't define more than 1 primary key in 1 table. So what you do are Composite Primary Key
Define :
is a combination of two or more columns in a table that can be used to
uniquely identify each row in the table
So you need to Alter Function because Your create statement base on other table..
ALTER TABLE new_table
ADD PRIMARY KEY (id, date);
You may run these two separate statements ( create table and Insert into )
CREATE TABLE new_table (
id int, date date, asset int, liability int, security int,
CONSTRAINT bankkey PRIMARY KEY (id, date)
) ;
INSERT INTO new_table (id,date,asset,liability,security)
SELECT s.id,
b.date,
b.asset,
b.liability,
s.security
FROM bank_data b JOIN bank_sec s
ON b.id = s.id AND b.date = s.date;
Demo
To create the primary key you desire, run the following SQL statement after your CREATE TABLE ... AS statement:
ALTER TABLE new_table
ADD CONSTRAINT bankkey PRIMARY KEY (id, date);
That has the advantage that the primary key index won't slow down the data insertion.

How to give a database constraint to ensure this behavior in a table?

I have a table with five columns: A, B, C, D and E.
And I need to comply with the following restrictions:
A is the primary key.
For a B there can only be one C, ie: 1-1 ; 2-1 ; 3-2 but not 1-2.
B-C and D can take any value but can not be repeated, ie: 1-1 1 ; 1-1 2 ; not 1-1 1 again.
E can take any value.
So, considering the following order
| A | B | C | D | E |
| 1 | 1 | 1 | 1 | 1 | -> OK
| 2 | 1 | 2 | 1 | 1 | -> Should fail, because there is a B with another C, 1-2 must be 1-1.
| 3 | 1 | 1 | 2 | 1 | -> OK
| 4 | 1 | 1 | 2 | 1 | -> Should fail, because relation between B-C and D is repeated.
| 5 | 2 | 1 | 1 | 1 | -> OK
Is there any way to comply with this behavior through some constraint in the database?
Thanks!
A and E are irrelevant to the question and can be ignored.
The BCD rule can be easily solved by creating a unique index on BCD.
If for every B there can be only one C then your DB is not normalized. Create a new table with B and C. Make B the primary key or create a unique index on B. Then remove C from the original table. (At which point the unique index on BCD becomes a unique index on BD.)
Without normalizing the tables, I don't think there's any way to do it with a constraint. You could certainly do it with a trigger or with code.
For B - C rule I would create a trigger
For the B - C - D rule looks like you want unique constraint
ALTER TABLE t ADD CONSTRAINT uni_BCD UNIQUE (B,C,D);
This condition is not trivial
For a B there can only be one C, ie: 1-1 ; 2-1 ; 3-2 but not 1-2.
, since Oracle does not support CREATE ASSERTION (soon, we hope!)
Therefore, you need to involve a second table to enforce this constraint, or else a statement-level AFTER INSERT/UPDATE trigger.
What I would do is create a second table and have it maintained via an INSTEAD OF trigger on a view, and ensure all my application DML happened via the view. (You could also just create a regular trigger on the table and have it maintain the second table. That's just not my preference. I find INSTEAD OF triggers to be more flexible and more visible.)
In case it's not clear, the purpose of the second table is that it allows you to enforce your constraint as a FOREIGN KEY constraint. The UNIQUE or PRIMARY KEY constraint on the second table ensures that each value of B appears only once.
Here's sample code for that approach:
--DROP TABLE table1_parent;
--DROP TABLE table1;
CREATE TABLE table1_parent
( b number NOT NULL,
c number NOT NULL,
constraint table1_parent_pk PRIMARY KEY (b),
constraint table1_parent_u1 UNIQUE (b, c) );
CREATE TABLE table1
(
a NUMBER NOT NULL,
b NUMBER NOT NULL,
c NUMBER NOT NULL,
d NUMBER NOT NULL,
e NUMBER NOT NULL,
CONSTRAINT table1_pk PRIMARY KEY (a), -- "A is the primary key."
CONSTRAINT table1_fk FOREIGN KEY ( b, c ) REFERENCES table1_parent ( b, c ), -- "For a B there can only be one C, ie: 1-1 ; 2-1 ; 3-2 but not 1-2."
CONSTRAINT table1_u2 UNIQUE ( b, c, d ) -- "B-C and D can take any value bue can not be repeated, ie: 1-1 1 ; 1-1 2 ; not 1-1 1 again."
);
CREATE INDEX table1_n1 ON table1 (b,c); -- Always index foreign keys
CREATE OR REPLACE VIEW table1_dml_v AS SELECT * FROM table1;
CREATE OR REPLACE TRIGGER table1_dml_v_trg INSTEAD OF INSERT OR UPDATE OR DELETE ON table1_dml_v
DECLARE
l_cnt NUMBER;
BEGIN
IF INSERTING THEN
BEGIN
INSERT INTO table1_parent (b, c) VALUES ( :new.b, :new.c );
EXCEPTION
WHEN dup_val_on_index THEN
NULL; -- parent already exists, no problem
END;
INSERT INTO table1 ( a, b, c, d, e ) VALUES ( :new.a, :new.b, :new.c, :new.d, :new.e );
END IF;
IF DELETING THEN
DELETE FROM table1 WHERE a = :old.a;
SELECT COUNT(*) INTO l_cnt FROM table1 WHERE b = :old.b AND c = :old.c;
IF l_cnt = 0 THEN
DELETE FROM table1_parent WHERE b = :old.b AND c = :old.c;
END IF;
END IF;
IF UPDATING THEN
BEGIN
INSERT INTO table1_parent (b, c) VALUES ( :new.b, :new.c );
EXCEPTION
WHEN dup_val_on_index THEN
NULL; -- parent already exists, no problem
END;
UPDATE table1 SET a = :new.a, b = :new.b, c = :new.c, d = :new.d, e = :new.d WHERE a = :old.a;
SELECT COUNT(*) INTO l_cnt FROM table1 WHERE b = :old.b AND c = :old.c;
IF l_cnt = 0 THEN
DELETE FROM table1_parent WHERE b = :old.b AND c = :old.c;
END IF;
END IF;
END;
insert into table1_dml_v ( a,b,c,d,e) VALUES (1,1,1,1,1);
insert into table1_dml_v ( a,b,c,d,e) VALUES (2,1,2,1,1);
insert into table1_dml_v ( a,b,c,d,e) VALUES (3,1,1,2,1);
insert into table1_dml_v ( a,b,c,d,e) VALUES (4,1,1,2,1);
insert into table1_dml_v ( a,b,c,d,e) VALUES (5,2,1,1,1);
If your system supports fast refreshed materialized views, please try the following.
Since I currently don't access to a this feature, I can't verify the solution.
create materialized view log on t with primary key;
create materialized view t_mv
refresh fast
as
select b,c
from t
group by b,c
;
alter table t_mv add constraint t_mv_uq_b unique (b);
and off course:
alter table t add constraint t_uq_a_b_c unique (b,c,d);

How to do I update existing records using a conditional clause?

I'm new to Oracle SQL so I have a question .. I have two tables, Table A and Table B .. Now Table A and Table B have the same column names, but in table A, only one column (named 'tracker') actually has data in it .. The rest of the columns in Table A are empty ... What I need to do is update each record in Table A, so that values for other columns are copied over from Table B, with the condition that the the 'tracker' columns value from Table A is matched with the 'tracker' column in Table B ..
Any ideas ?
MERGE INTO tableA a
USING tableB b
ON (a.tracker=b.tracker)
WHEN MATCHED THEN UPDATE SET
a.column1=b.column1,
a.column2=b.column2;
And if exist rows in B that does not exist in A:
MERGE INTO tableA a
USING tableB b
ON (a.tracker=b.tracker)
WHEN MATCHED THEN UPDATE SET
a.column1=b.column1,
a.column2=b.column2
WHEN NOT MATCHED THEN INSERT VALUES
a.tracker,a.column1,a.column2; --all columns
create table a (somedata varchar2(50), tracker number , constraint pk_a primary key (tracker));
create table b (somedata varchar2(50), tracker number, constraint pk_b primary key (tracker));
/
--insert some data
insert into a (somedata, tracker)
select 'data-a-' || level, level
from dual
connect by level < 10;
insert into b (somedata, tracker)
select 'data-b-' || -level, level
from dual
connect by level < 10;
select * from a;
SOMEDATA TRACKER
-------------------------------------------------- -------
data-a-1 1
data-a-2 2
data-a-3 3
data-a-4 4
data-a-5 5
data-a-6 6
data-a-7 7
data-a-8 8
data-a-9 9
select * from b;
SOMEDATA TRACKER
-------------------------------------------------- -------
data-b--1 1
data-b--2 2
data-b--3 3
data-b--4 4
data-b--5 5
data-b--6 6
data-b--7 7
data-b--8 8
data-b--9 9
commit;
update (select a.somedata a_somedata, b.somedata b_somedata
from a
inner join
b
on a.tracker = b.tracker)
set
a_somedata = b_somedata;
select * from a; --see below for results--
--or you can do it this way: (issuing rollback to get data back in previous state)
--for a one column update, either way will work, I would prefer the former in case there is a multi-column update necessary
-- merge *as posted by another person* will also work
update a
set somedata = (select somedata
from b
where a.tracker = b.tracker
);
select * from A; --see below for results--
-- clean up
-- drop table a;
-- drop table b;
this will give you the results:
SOMEDATA TRACKER
-------------------------------------------------- -------
data-b--1 1
data-b--2 2
data-b--3 3
data-b--4 4
data-b--5 5
data-b--6 6
data-b--7 7
data-b--8 8
data-b--9 9
here is a link to oracle's documentation on UPDATE