Reference lookup replace in Oracle - sql

I'm looking for an approach of doing hash and replace algorithm in Oracle.
I had a customer table which had customer ID and customer phone number. I have another table which had list of reference phone number values in a table. I wanted to use these reference values and replace them consistently in the main table.
There should be a consistent masking when applying this replace logic i.e. wherever the same customer appears in a different table, the phone number replacement should be the same.
Main table name 1 :- CUS_PHONE_NUM
Main table name 2 :- CUS_PHONE_NUM_2
Reference table name :- REF_PHONE_NUMBER
I had tried below approach, but the performance is hitting very badly — for 20000 customers it took 18 minutes. Can you please suggest a better approach to get this requirement.
DROP TABLE CUS_PHONE_NUM ;
DROP TABLE CUS_PHONE_NUM2 ;
DROP TABLE REF_PHONE_NUMBER ;
-- Main table 1
CREATE TABLE CUS_PHONE_NUM (
CUS_ID VARCHAR2(09) ,
PHONE_NUMBER NUMBER(12) );
-- Main table 2
CREATE TABLE CUS_PHONE_NUM_2 (
CUS_ID VARCHAR2(09) ,
PHONE_NUMBER NUMBER(12) );
-- reference table
CREATE TABLE REF_PHONE_NUMBER ( REF_PHONE_NUMBER_VALUE NUMBER(12));
INSERT INTO REF_PHONE_NUMBER VALUES ( 0470134321) ;
INSERT INTO REF_PHONE_NUMBER VALUES ( 0470134322) ;
INSERT INTO REF_PHONE_NUMBER VALUES ( 0470134323) ;
INSERT INTO REF_PHONE_NUMBER VALUES ( 0470134324) ;
INSERT INTO REF_PHONE_NUMBER VALUES ( 0470134325) ;
INSERT INTO REF_PHONE_NUMBER VALUES ( 0470134326) ;
INSERT INTO REF_PHONE_NUMBER VALUES ( 0470134327) ;
INSERT INTO REF_PHONE_NUMBER VALUES ( 0470134328) ;
INSERT INTO REF_PHONE_NUMBER VALUES ( 0470134329) ;
INSERT INTO REF_PHONE_NUMBER VALUES ( 0470134320) ;
TRUNCATE TABLE CUS_PHONE_NUM ;
COMMIT;
INSERT INTO CUS_PHONE_NUM VALUES ( '401795401' , 0426881030 ) ;
INSERT INTO CUS_PHONE_NUM VALUES ( '401795402' , 0426881031 ) ;
INSERT INTO CUS_PHONE_NUM VALUES ( '401795403' , 0426881032 ) ;
INSERT INTO CUS_PHONE_NUM VALUES ( '401795404' , 0426881033 ) ;
INSERT INTO CUS_PHONE_NUM VALUES ( '401795405' , 0426881034 ) ;
INSERT INTO CUS_PHONE_NUM VALUES ( '401795406' , 0426881035 ) ;
INSERT INTO CUS_PHONE_NUM VALUES ( '401795407' , 0426881036 ) ;
TRUNCATE TABLE CUS_PHONE_NUM_2 ;
COMMIT;
INSERT INTO CUS_PHONE_NUM_2 VALUES ( '401795401' , 0426881030 ) ;
INSERT INTO CUS_PHONE_NUM_2 VALUES ( '401795403' , 0426881032 ) ;
INSERT INTO CUS_PHONE_NUM_2 VALUES ( '401795405' , 0426881034 ) ;
INSERT INTO CUS_PHONE_NUM_2 VALUES ( '401795407' , 0426881036 ) ;
COMMIT ;
-- SQL1 to replace reference values for Main table 1 - CUS_PHONE_NUM
WITH REF_PHONE_NUMBER_HASH AS
(
SELECT REF_PHONE_NUMBER_VALUE ,
row_number() over(order by REF_PHONE_NUMBER_VALUE) - 1 REF_PHONE_NUMBER_VALUE_HASH,
count(*) over() max_buckets
FROM REF_PHONE_NUMBER
),
CUS_PHONE_NUM_HASH AS
(select CUS_ID,
PHONE_NUMBER
from CUS_PHONE_NUM )
SELECT c.*, n.* ,ora_hash(PHONE_NUMBER, n.max_buckets)
FROM CUS_PHONE_NUM_HASH c
JOIN REF_PHONE_NUMBER_HASH n
ON ora_hash(PHONE_NUMBER, n.max_buckets) = n.REF_PHONE_NUMBER_VALUE_HASH;
-- SQL1 to replace reference values for Main table 2 - CUS_PHONE_NUM_2
WITH REF_PHONE_NUMBER_HASH AS
(
SELECT REF_PHONE_NUMBER_VALUE ,
row_number() over(order by REF_PHONE_NUMBER_VALUE) - 1 REF_PHONE_NUMBER_VALUE_HASH,
count(*) over() max_buckets
FROM REF_PHONE_NUMBER
),
CUS_PHONE_NUM_HASH AS
(select CUS_ID,
PHONE_NUMBER
from CUS_PHONE_NUM_2 )
SELECT c.*, n.* ,ora_hash(PHONE_NUMBER, n.max_buckets)
FROM CUS_PHONE_NUM_HASH c
JOIN REF_PHONE_NUMBER_HASH n
ON ora_hash(PHONE_NUMBER, n.max_buckets) = n.REF_PHONE_NUMBER_VALUE_HASH;

Try to move max_bucket outside. It changed explain plan on your test data. But I don't know how it will work on your real one. I don't know your indexes too.
WITH REF_PHONE_NUMBER_HASH AS
(
SELECT REF_PHONE_NUMBER_VALUE ,
row_number() over(order by REF_PHONE_NUMBER_VALUE) - 1 REF_PHONE_NUMBER_VALUE_HASH
FROM REF_PHONE_NUMBER
),
CUS_PHONE_NUM_HASH AS
(select CUS_ID,
PHONE_NUMBER
from CUS_PHONE_NUM )
, BUCKET as
(select count(*) max_buckets from REF_PHONE_NUMBER)
SELECT c.*, n.* ,ora_hash(PHONE_NUMBER, b.max_buckets)
FROM CUS_PHONE_NUM_HASH c
cross join BUCKET b
JOIN REF_PHONE_NUMBER_HASH n
ON ora_hash(PHONE_NUMBER, b.max_buckets) = n.REF_PHONE_NUMBER_VALUE_HASH
Maybe you can also think about hard coding max_bucket value. After hadcoding you can create index for that:
create index idx_CUS_PHONE_NUM_01 on CUS_PHONE_NUM
( ora_hash(Phone_NUmber, 255) )

Related

Inserting values into a temp table in a stored procedure by reading from XML

I am working on a stored procedure in SQL Server and as part of it, I am trying to insert values into a table by reading from XML, and then fill one more column using a window function.
INSERT INTO #Table (Id, LinkedId, OrderNbr)
SELECT Id, LinkedId, OrderNbr
FROM OPENXML (#idoc, 'Request/Details', 2)
WITH
(
Id BIGINT 'Id',
LinkedId BIGINT 'LinkedId',
OrderNbr INT 'OrderNbr'
)
--
INSERT INTO #Table (Id, LinkedId, OrderNbr, Rank)
SELECT
Id,
LinkedId,
OrderNbr,
RANK() OVER (PARTITION BY LinkedId ORDER BY OrderNbr) AS Rank
FROM
#Table
If the do it this way, my rows are getting repeated with first insert filling Id, LinkedId, OrderNbr and Rank as null and the second insert repeating the rows of first insert and filling the Rank column too. How do I avoid this?
You should combine the queries into a single INSERT, like this:
with q as
(
SELECT Id, LinkedId, OrderNbr
FROM OPENXML (#idoc, 'Request/Details', 2)
WITH
(
Id BIGINT 'Id',
LinkedId BIGINT 'LinkedId',
OrderNbr INT 'OrderNbr'
)
)
INSERT INTO #Table (Id, LinkedId, OrderNbr, Rank)
SELECT
Id,
LinkedId,
OrderNbr,
RANK() OVER (PARTITION BY LinkedId ORDER BY OrderNbr) AS Rank
FROM q

How to select all records that do not have a *specific* record in a sub table

In SQL Server I am trying to select parcels that do not have a particular movement. Parcels can have zero or more movements. Movements are described in a third table.
I have some parcels
IF OBJECT_ID('tempdb..#Parcels','U') IS NOT NULL
BEGIN
DROP TABLE #Parcels
END;
CREATE TABLE #Parcels(
Id int
,LocalBarcode nvarchar(50)
,ForeignBarcode nvarchar(50)
)
INSERT INTO #Parcels VALUES (1, 'Sabc1', NULL)
INSERT INTO #Parcels VALUES (2, 'Sabc2', NULL)
INSERT INTO #Parcels VALUES (3, 'Sabc3', 'def1')
INSERT INTO #Parcels VALUES (4, 'xabc', NULL)
Associated with parcels are movements
IF OBJECT_ID('tempdb..#Movements','U') IS NOT NULL
BEGIN
DROP TABLE #Movements
END;
CREATE TABLE #Movements(
ParcelId int
,MovementCode nvarchar(3)
)
INSERT INTO #Movements VALUES (1,'MV1')
INSERT INTO #Movements VALUES (2,'MV1')
INSERT INTO #Movements VALUES (2,'MV2')
INSERT INTO #Movements VALUES (2,'MV3')
INSERT INTO #Movements VALUES (3,'MV1')
INSERT INTO #Movements VALUES (3,'MV2')
INSERT INTO #Movements VALUES (3,'MV3')
The movements are described in detail in a third table
IF OBJECT_ID('tempdb..#MovementDescriptions','U') IS NOT NULL
BEGIN
DROP TABLE #MovementDescriptions
END;
CREATE TABLE #MovementDescriptions(
MovementCode nvarchar(3)
,MovementDesc nvarchar(4)
)
INSERT INTO #MovementDescriptions VALUES ('MV1','Mov1')
INSERT INTO #MovementDescriptions VALUES ('MV2','Mov2')
INSERT INTO #MovementDescriptions VALUES ('MV3','Mov3');
I would like all parcels
that have no foreign barcode
and local barcode starts with S
and parcel has no Mov2 movement (the parcel can have other movements or no movements at all)
So for the above sample data I am expecting parcel Sabc1
Here is my attempt (which does not work)
WITH ParcelsWithNoForeignBarcodeAndNoMove2
AS (
SELECT
P.Id AS Id
,P.LocalBarcode AS LocalBarcode
,M.MovementCode
,MD.MovementDesc
,ROW_NUMBER() OVER (
PARTITION BY P.Id
ORDER BY P.LocalBarcode
) AS [RowNumber]
FROM #Parcels P
LEFT JOIN #Movements M ON M.ParcelId = P.Id
LEFT JOIN #MovementDescriptions MD ON MD.MovementCode = M.MovementCode AND MD.MovementDesc = 'Mov2'
WHERE
UPPER(P.LocalBarcode) LIKE 'S%'
AND P.ForeignBarcode IS NULL
AND MD.MovementCode IS NULL
)
SELECT
S.Id
,S.LocalBarcode
FROM ParcelsWithNoForeignBarcodeAndNoMove2 S
WHERE
S.RowNumber = 1
You're looking for not exists. Presumably also from your use of upper your collation is case-sensitive. From your simple description, you need:
select *
from #Parcels p
where p.ForeignBarcode is null
and upper(p.LocalBarcode) like 'S%'
and not exists (
select * from #Movements m
where m.ParcelId=p.Id and m.MovementCode='Mv2'
)
SELECT P.* FROM
#Parcels P
INNER JOIN (
SELECT ParcelId, COUNT(MovementCode)MovementCode FROM #Movements WHERE MovementCode!='MV2' GROUP BY ParcelId HAVING COUNT(MovementCode)=1
)T ON P.ID=T.ParcelId

Rownumber to select non duplicate or distinct rows from a table. Invalid name ''

I am trying to select non-duplicate rows from emp_demo2 table:
Table:
CREATE TABLE Emp_demo3 (
emp_ID INT,
emp_Name NVARCHAR (50),
emp_sal_K INT,
emp_manager INT,
joining_date date,
last_time date)
GO
INSERT INTO Emp_demo3 VALUES (1,'Ali', 200,2,'2010-01-28','2015-05-09')
INSERT INTO Emp_demo3 VALUES (2,'Zaid', 770,4,'2008-01-28','2015-05-09')
INSERT INTO Emp_demo3 VALUES (3,'Mohd', 1140,2,'2007-01-28','2015-05-09')
INSERT INTO Emp_demo3 VALUES (4,'LILY', 770,Null,'2013-01-28','2015-05-09')
INSERT INTO Emp_demo3 VALUES (5,'John', 1240,6,'2016-01-28','2015-05-09')
INSERT INTO Emp_demo3 VALUES (6,'Mike', 1140,4,'2018-01-28','2015-05-09')
INSERT INTO Emp_demo3 VALUES (5,'John', 1240,6,'2017-01-28','2015-05-09')
INSERT INTO Emp_demo3 VALUES (3,'Mohd', 1140,2,'2010-01-28','2015-05-09')
Code to add column date_difference
alter table emp_demo3
add date_diff date
go
update emp_demo3 set date_diff = datediff(day,joining_date, last_time)
I am trying to calculate date difference in days between 2 dates. Please note that this is just a random table I created. I cannot change date formats in my original table. So please tell how to get date difference with existing date formats.
Error
Operand type clash: int is incompatible with date
You can use ROW_Number to make partitions by columns which are the same. Then it is necessary just filter rows which have rownum = 1:
An example:
DECLARE #Emp_demo2 TABLE (
emp_ID INT,
emp_Name NVARCHAR (50),
emp_sal_K INT,
emp_manager INT)
INSERT INTO #Emp_demo2 VALUES (1,'Ali', 200,2)
INSERT INTO #Emp_demo2 VALUES (2,'Zaid', 770,4)
INSERT INTO #Emp_demo2 VALUES (3,'Mohd', 1140,2)
INSERT INTO #Emp_demo2 VALUES (4,'LILY', 770,Null)
INSERT INTO #Emp_demo2 VALUES (5,'John', 1240,6)
INSERT INTO #Emp_demo2 VALUES (6,'Mike', 1140,4)
INSERT INTO #Emp_demo2 VALUES (5,'John', 1240,6)
INSERT INTO #Emp_demo2 VALUES (3,'Mohd', 1140,2)
SELECT * FROM
(
SELECT
t.emp_ID
, t.emp_Name
, t.emp_sal_K
, t.emp_manager
, ROW_NUMBER() OVER (PARTITION BY t.emp_Name, t.emp_sal_K, t.emp_manager
ORDER BY t.emp_Name) AS RowNum
FROM #Emp_demo2 AS t
)q
WHERE q.RowNum = 1
ORDER BY q.emp_ID
Following query is to find duplicates
select
distinct emp_ID,
emp_Name,
emp_sal_K,
emp_manager
from
(
select *,
count(*) over (partition by emp_id) as total
from Emp_demo2 e
) val
where total > 1
order by
emp_ID
If you want only distinct values then you can use following
select
distinct emp_ID,
emp_Name,
emp_sal_K,
emp_manager
from Emp_demo2 e
order by
emp_ID
METHOD 1:
SELECT DISTINCT * from #EmP_demo2
METHOD 2:
;WITH CTE AS(
SELECT * , ROW_NUMBER() OVER(PARTITION BY EMP_ID ORDER BY EMP_ID) AS ROWNUM
FROM #EMP_DEMO2 E
)
SELECT * FROM CTE WHERE ROWNUM=1
METHOD 3:
By using Group by also we ca avoid this duplicates.
Hope this works fine for your case
you can use this query,
select * from (select ROW_NUMBER() over(partition by emp_id order by emp_id) AS Rownum, * from Emp_demo2 as e
)x
where x.Rownum = 1

How to secure table for avoid duplicate data

I cant resolve the problem how secure my table to avoid duplicate combination of attributes_positions. The best way to show you what I mean is the following image
column id_combination represents number of combination. Combination consists of attributes_positions. So Combination is sequence of attributes_positions.
And now I would secure table from insert exaclty the same sequence of attributes_positions.
Of course if already inserted combination contains one additional attributes_positions or one less than inserting combination is ok
image I show the different bettwen duplicate and not duplicate combination.
Is there a some way how I can do that?? Meaby something like 'before update'. But how to implement for this example. I`m not so pretty good with advanced sql.
The database where I trying to secure table is postgresql 9.4
I will be grateful for help
-- The data
CREATE TABLE theset (
set_id INTEGER NOT NULL PRIMARY KEY
, set_name text UNIQUE
);
INSERT INTO theset(set_id, set_name) VALUES
( 1, 'one'), ( 2, 'two'), ( 3, 'three'), ( 4, 'four');
CREATE TABLE theitem (
item_id integer NOT NULL PRIMARY KEY
, item_name text UNIQUE
);
INSERT INTO theitem(item_id, item_name) VALUES
( 1, 'one'), ( 2, 'two'), ( 3, 'three'), ( 4, 'four'), ( 5, 'five');
CREATE TABLE set_item (
set_id integer NOT NULL REFERENCES theset (set_id)
, item_id integer NOT NULL REFERENCES theitem(item_id)
, PRIMARY KEY (set_id,item_id)
);
-- swapped index is indicated for junction tables
CREATE UNIQUE INDEX ON set_item(item_id, set_id);
INSERT INTO set_item(set_id,item_id) VALUES
(1,1), (1,2), (1,3), (1,4),
(2,1), (2,2), (2,3), -- (2,4),
(3,1), (3,2), (3,3), (3,4), (3,5),
(4,1), (4,2), (4,4);
CREATE FUNCTION set_item_unique_set( ) RETURNS TRIGGER AS
$func$
BEGIN
IF EXISTS ( -- other set
SELECT * FROM theset oth
-- WHERE oth.set_id <> NEW.set_id -- only for insert/update
WHERE TG_OP = 'DELETE' AND oth.set_id <> OLD.set_id
OR TG_OP <> 'DELETE' AND oth.set_id <> NEW.set_id
-- count (common) members in the two sets
-- items not in common will have count=1
AND NOT EXISTS (
SELECT item_id FROM set_item x1
WHERE (x1.set_id = NEW.set_id OR x1.set_id = oth.set_id )
GROUP BY item_id
HAVING COUNT(*) = 1
)
) THEN
RAISE EXCEPTION 'Not unique set';
RETURN NULL;
ELSE
RETURN NEW;
END IF;
END;
$func$ LANGUAGE 'plpgsql'
;
CREATE CONSTRAINT TRIGGER check_item_set_unique
AFTER UPDATE OR INSERT OR DELETE
-- BEFORE UPDATE OR INSERT
ON set_item
FOR EACH ROW
EXECUTE PROCEDURE set_item_unique_set()
;
-- Test it
INSERT INTO set_item(set_id,item_id) VALUES(4,5); -- success
INSERT INTO set_item(set_id,item_id) VALUES(2,4); -- failure
DELETE FROM set_item WHERE set_id=1 AND item_id= 4; -- failure
Note: There should also be a trigger for the DELETE case.
UPDATE: added handling of DELETE
(the handling of deletes is not perfect; imagine the case where the last element from a set is removed)
My answer assumes that the target is without dupes, and that we want to insert a new set - which happens to be a duplicate. I choose the group of 4 with the id_comb of 1.
You would have to put the group of 4 into a staging table. Then, you have to pivot both staging and target horizontally - so that you get 5 columns named attr_pos1 to attr_pos5 (the biggest group in your example is 5). To pivot, you need a sequence number, which we get by using ROW_NUMBER(). That's for both tables, staging and target. Then, you pivot both. Then, you try to join pivoted staging and target on all 5 attr_pos# columns, and count the rows. If you get 0, you have no duplicates. If you get 1, you have duplicates.
Here's the whole scenario:
WITH
-- input section: a) target table, no dupes
target(id_comb,attr_pos) AS (
SELECT 2,1
UNION ALL SELECT 2,2
UNION ALL SELECT 2,3
UNION ALL SELECT 2,4
UNION ALL SELECT 3,1
UNION ALL SELECT 3,2\
UNION ALL SELECT 3,3
UNION ALL SELECT 3,4
UNION ALL SELECT 3,5
UNION ALL SELECT 4,1
UNION ALL SELECT 4,2
UNION ALL SELECT 4,3
)
,
-- input section: b) staging, input, would be a dupe
staging(id_comb,attr_pos) AS (
SELECT 1,1
UNION ALL SELECT 1,2
UNION ALL SELECT 1,3
UNION ALL SELECT 1,4
)
,
-- query section:
-- add sequence numbers to stage and target
target_s AS (
SELECT
ROW_NUMBER() OVER(PARTITION BY id_comb ORDER BY attr_pos) AS seq
, *
FROM target
)
,
staging_s AS (
SELECT
ROW_NUMBER() OVER(PARTITION BY id_comb ORDER BY attr_pos) AS seq
, *
FROM staging
)
,
-- horizontally pivot target, NULLS as -1 for later join
target_h AS (
SELECT
id_comb
, IFNULL(MAX(CASE seq WHEN 1 THEN attr_pos END),-1) AS attr_pos1
, IFNULL(MAX(CASE seq WHEN 2 THEN attr_pos END),-1) AS attr_pos2
, IFNULL(MAX(CASE seq WHEN 3 THEN attr_pos END),-1) AS attr_pos3
, IFNULL(MAX(CASE seq WHEN 4 THEN attr_pos END),-1) AS attr_pos4
, IFNULL(MAX(CASE seq WHEN 5 THEN attr_pos END),-1) AS attr_pos5
FROM target_s
GROUP BY id_comb ORDER BY id_comb
)
,
-- horizontally pivot staging, NULLS as -1 for later join
staging_h AS (
SELECT
id_comb
, IFNULL(MAX(CASE seq WHEN 1 THEN attr_pos END),-1) AS attr_pos1
, IFNULL(MAX(CASE seq WHEN 2 THEN attr_pos END),-1) AS attr_pos2
, IFNULL(MAX(CASE seq WHEN 3 THEN attr_pos END),-1) AS attr_pos3
, IFNULL(MAX(CASE seq WHEN 4 THEN attr_pos END),-1) AS attr_pos4
, IFNULL(MAX(CASE seq WHEN 5 THEN attr_pos END),-1) AS attr_pos5
FROM staging_s
GROUP BY id_comb ORDER BY id_comb
)
SELECT
COUNT(*)
FROM target_h
JOIN staging_h USING (
attr_pos1
, attr_pos2
, attr_pos3
, attr_pos4
, attr_pos5
);
Hope this helps ----
Marco
Interesting but not very useful solution by #wildplasser. I create script to insert sample data:
WITH param AS (
SELECT 8 AS max
), maxarray AS (
SELECT array_agg(i) as ma FROM (SELECT generate_series(1, max) as i FROM param) as i
), pre AS (
SELECT
*
FROM (
SELECT
*, CASE WHEN (id >> mbit) & 1 = 1 THEN ma[mbit + 1] END AS item_id
FROM (
SELECT *,
generate_series(0, array_upper(ma, 1) - 1) as mbit
FROM (
SELECT *,
generate_series(1,(2^max - 1)::int8) AS id
FROM param, maxarray
) AS pre1
) AS pre2
) AS pre3
WHERE item_id IS NOT NULL
), ins_item AS (
INSERT INTO theitem (item_id, item_name) SELECT i, i::text FROM generate_series(1, (SELECT max FROM param)) as i RETURNING *
), ins_set AS (
INSERT INTO theset (set_id, set_name)
SELECT id, id::text FROM generate_series(1, (SELECT 2^max - 1 FROM param)::int8) as id
RETURNING *
), ins_set_item AS (
INSERT INTO set_item (set_id, item_id)
SELECT id, item_id FROM pre WHERE (SELECT count(*) FROM ins_item) > 0 AND (SELECT count(*) FROM ins_set) > 0
RETURNING *
)
SELECT
'sets', count(*)
FROM ins_set
UNION ALL
SELECT
'items', count(*)
FROM ins_item
UNION ALL
SELECT
'sets_items', count(*)
FROM ins_set_item
;
When I call it with 8 (1024 - 2^8 rows for set_item) it run 21 seconds. It is very bad. When I off trigger it took less then 1 milliseconds.
My proposal
It is very interesting to use arrays in this case. Unfortunatelly PostgreSQL does not support foreighn key for arrays, but it may be done by TRIGGERs. I remove set_item table and add items int[] field for theset:
-- The data
CREATE TABLE theitem (
item_id integer NOT NULL PRIMARY KEY
, item_name text UNIQUE
);
CREATE TABLE theset (
set_id INTEGER NOT NULL PRIMARY KEY
, set_name text UNIQUE
, items integer[] UNIQUE NOT NULL
);
CREATE INDEX i1 ON theset USING gin (items);
CREATE OR REPLACE FUNCTION check_item_CU() RETURNS TRIGGER AS $sql$
BEGIN
IF (SELECT count(*) > 0 FROM unnest(NEW.items) AS u LEFT JOIN theitem ON (item_id = u) WHERE item_id IS NULL) THEN
RETURN NULL;
END IF;
NEW.items = ARRAY(SELECT unnest(NEW.items) ORDER BY 1);
RETURN NEW;
END;
$sql$ LANGUAGE plpgsql;
CREATE TRIGGER check_item_CU BEFORE INSERT OR UPDATE ON theset FOR EACH ROW EXECUTE PROCEDURE check_item_CU();
CREATE OR REPLACE FUNCTION check_item_UD() RETURNS TRIGGER AS $sql$
BEGIN
IF (TG_OP = 'DELETE' OR TG_OP = 'UPDATE' AND NEW.item_id != OLD.item_id) AND (SELECT count(*) > 0 FROM theset WHERE OLD.item_id = ANY(items)) THEN
RAISE EXCEPTION 'item_id % still used', OLD.item_id;
RETURN NULL;
END IF;
RETURN NEW;
END;
$sql$ LANGUAGE plpgsql;
CREATE TRIGGER check_item_UD BEFORE DELETE OR UPDATE ON theitem FOR EACH ROW EXECUTE PROCEDURE check_item_UD();
WITH param AS (
SELECT 10 AS max
), maxarray AS (
SELECT array_agg(i) as ma FROM (SELECT generate_series(1, max) as i FROM param) as i
), pre AS (
SELECT
*
FROM (
SELECT
*, CASE WHEN (id >> mbit) & 1 = 1 THEN ma[mbit + 1] END AS item_id
FROM (
SELECT *,
generate_series(0, array_upper(ma, 1) - 1) as mbit
FROM (
SELECT *,
generate_series(1,(2^max - 1)::int8) AS id
FROM param, maxarray
) AS pre1
) AS pre2
) AS pre3
WHERE item_id IS NOT NULL
), pre_arr AS (
SELECT id, array_agg(item_id) AS items
FROM pre
GROUP BY 1
), ins_item AS (
INSERT INTO theitem (item_id, item_name) SELECT i, i::text FROM generate_series(1, (SELECT max FROM param)) as i RETURNING *
), ins_set AS (
INSERT INTO theset (set_id, set_name, items)
SELECT id, id::text, items FROM pre_arr WHERE (SELECT count(*) FROM ins_item) > 0
RETURNING *
)
SELECT
'sets', count(*)
FROM ins_set
UNION ALL
SELECT
'items', count(*)
FROM ins_item
;
This variant run less than 1ms

generate custom id with conditions while inserting records to sql server

How can I generate custom ids upon inserting records base on a condition? For example, I have a donor database program which accepts donations from non-alumni and alumni. Custom id from an alumni should start on A00001 and for non-alumni is N00001. And to note, the record that will be inserted will come from another table which records came from an excel file.
Any help or clarifications would be appreciated.
Something like this perhaps?
DECLARE #Table TABLE ( ID VARCHAR(20) );
-- Non alumni
INSERT INTO #Table
( ID )
VALUES ( 'N00011' )
INSERT INTO #Table
( ID )
VALUES ( 'N00012' )
INSERT INTO #Table
( ID )
VALUES ( 'N00013' )
-- Alumni
INSERT INTO #Table
( ID )
VALUES ( 'A00011' )
INSERT INTO #Table
( ID )
VALUES ( 'A00012' )
INSERT INTO #Table
( ID )
VALUES ( 'A00013' )
-- Insert Alumni
INSERT INTO #Table
( ID
)
SELECT 'A' + RIGHT('00000'
+ CAST(MAX(RIGHT(ID, LEN(ID) - 1)) + 1 AS VARCHAR(10)),
5)
FROM #Table
WHERE LEFT(ID, 1) = 'A'
-- Insert Non Alumni
INSERT INTO #Table
( ID
)
SELECT 'N' + RIGHT('00000'
+ CAST(MAX(RIGHT(ID, LEN(ID) - 1)) + 1 AS VARCHAR(10)),
5)
FROM #Table
WHERE LEFT(ID, 1) = 'N'