Select all recursive value with one query SQLite - sql

i have some another case :
first I created a table:
CREATE TABLE tree(
id_tree integer PRIMARY KEY AUTOINCREMENT,
id_boss TEXT,
id_child TEXT,
answ TEXT);
insert some values :
INSERT INTO tree(id_boss,id_child,answ) VALUES('1','8','T');
INSERT INTO tree(id_boss,id_child,answ) VALUES('1',null,'F');
INSERT INTO tree(id_boss,id_child,answ) VALUES('8','P1','T');
INSERT INTO tree(id_boss,id_child,answ) VALUES('8','2','F');
INSERT INTO tree(id_boss,id_child,answ) VALUES('2','P2','T');
INSERT INTO tree(id_boss,id_child,answ) VALUES('2','P3','F');
and execute query:
WITH RECURSIVE
ancestors(id, answ) AS (
VALUES('P3', 'T')
UNION ALL
SELECT tree.id_boss, tree.answ
FROM tree JOIN ancestors ON tree.id_child = ancestors.id
)
SELECT id FROM ancestors WHERE answ = 'T';
the result is :
P3
1
that for P3 , and i want to make list with all recursive value , so it will be like this :
1 --- // P3
1 --- // P1
8 --- // P1
2 --- // P2
1 --- // P2

Is this somewhere near what you are looking for?
WITH seed (id, answ) as ( VALUES('P3', 'T') )
, ancestors1(id, answ) AS (
select * from seed
UNION ALL
SELECT tree.id_boss, tree.answ
FROM tree, ancestors1 where tree.id_child = ancestors1.id
)
, ancestors2(id, answ) AS (
select * from seed
UNION ALL
SELECT tree.id_boss, tree.answ
FROM tree, ancestors2 where tree.id_child = ancestors2.id
)
select id from (
select * from ancestors1
union all
select * from ancestors2
);
I had to rephrase certain things inorder to get thenm to compile with db2 (remove RECURSIVE, and use implicit join)

Related

How to store fetched bulk values into single variable and insert all values it on one variable using pl/sql

Note
Somebody Tell me how to store Multiples rows value for a single column store into a single variable and behalf of that variable returned values store in table on one single click how to do such scenario using pl/sql.
Like Such Query Return single column with Multi rows i want to store it into some var and insert all values on single click process.It will generate error image of error has attached
DECLARE
l_respondent_id VARCHAR(100);
BEGIN
FOR c IN (
SELECT
s.gr_number
FROM
student s
LEFT JOIN class_time ct ON ct.class_id = s.class_id
AND instr(s.class_time, ct.class_time) > 0
WHERE
upper(TRIM(ct.class_id)) = upper(TRIM(:app_user))
AND s.gr_number IS NOT NULL
AND is_active_flg = 'Y'
) LOOP
l_respondent_id := c.gr_number;
BEGIN
SELECT
s.gr_number
INTO l_respondent_id
FROM
student s
LEFT JOIN class_time ct ON ct.class_id = s.class_id
AND instr(s.class_time, ct.class_time) > 0
WHERE
upper(TRIM(ct.class_id)) = upper(TRIM(:app_user))
AND s.gr_number IS NOT NULL
AND is_active_flg = 'Y'
AND s.gr_number = c.gr_number;
EXCEPTION
WHEN no_data_found THEN
l_respondent_id := NULL;
END;
IF l_respondent_id IS NULL THEN
INSERT INTO student_class_attend ( gr_number ) VALUES ( c.gr_number ) RETURNING gr_number INTO l_respondent_id;
END IF;
END LOOP;
END;
You can probably use a nested table for this, together with a BULK COLLECT and a FORALL. Simplified example:
Tables and data
create table students( id primary key, fname, lname )
as
select 1, 'fname_1', 'lname_1' from dual union all
select 2, 'fname_2', 'lname_2' from dual union all
select 3, 'fname_3', 'lname_3' from dual union all
select 4, 'fname_4', 'lname_4' from dual union all
select 5, 'fname_5', 'lname_5' from dual union all
select 6, 'fname_6', 'lname_6' from dual ;
-- empty
create table attendance( studentid number references students, when_ date ) ;
Anonymous block
declare
-- This will allow you to store several studentIDs:
type studentid_t is table of students.id%type index by pls_integer ;
-- The current attendance list. Notice the TYPE.
attendancelist studentid_t ;
begin
-- Your query (including all the necessary joins and conditions) here.
-- In this case, we will pick up 3 of the 6 IDs.
select id bulk collect into attendancelist
from students
where mod( id, 2 ) = 0 ;
-- Write a _single_ DML statement after FORALL (which is not a loop - see docs)
forall i in attendancelist.first .. attendancelist.last
insert into attendance( studentid, when_ )
values( attendancelist( i ) , sysdate ) ; -- use whatever you need here
end ;
/
Result (after executing the anonymous block)
SQL> select * from attendance ;
STUDENTID WHEN_
---------- ---------
2 07-JUN-20
4 07-JUN-20
6 07-JUN-20
DBfiddle here.

How to secure table for avoid duplicate data

I cant resolve the problem how secure my table to avoid duplicate combination of attributes_positions. The best way to show you what I mean is the following image
column id_combination represents number of combination. Combination consists of attributes_positions. So Combination is sequence of attributes_positions.
And now I would secure table from insert exaclty the same sequence of attributes_positions.
Of course if already inserted combination contains one additional attributes_positions or one less than inserting combination is ok
image I show the different bettwen duplicate and not duplicate combination.
Is there a some way how I can do that?? Meaby something like 'before update'. But how to implement for this example. I`m not so pretty good with advanced sql.
The database where I trying to secure table is postgresql 9.4
I will be grateful for help
-- The data
CREATE TABLE theset (
set_id INTEGER NOT NULL PRIMARY KEY
, set_name text UNIQUE
);
INSERT INTO theset(set_id, set_name) VALUES
( 1, 'one'), ( 2, 'two'), ( 3, 'three'), ( 4, 'four');
CREATE TABLE theitem (
item_id integer NOT NULL PRIMARY KEY
, item_name text UNIQUE
);
INSERT INTO theitem(item_id, item_name) VALUES
( 1, 'one'), ( 2, 'two'), ( 3, 'three'), ( 4, 'four'), ( 5, 'five');
CREATE TABLE set_item (
set_id integer NOT NULL REFERENCES theset (set_id)
, item_id integer NOT NULL REFERENCES theitem(item_id)
, PRIMARY KEY (set_id,item_id)
);
-- swapped index is indicated for junction tables
CREATE UNIQUE INDEX ON set_item(item_id, set_id);
INSERT INTO set_item(set_id,item_id) VALUES
(1,1), (1,2), (1,3), (1,4),
(2,1), (2,2), (2,3), -- (2,4),
(3,1), (3,2), (3,3), (3,4), (3,5),
(4,1), (4,2), (4,4);
CREATE FUNCTION set_item_unique_set( ) RETURNS TRIGGER AS
$func$
BEGIN
IF EXISTS ( -- other set
SELECT * FROM theset oth
-- WHERE oth.set_id <> NEW.set_id -- only for insert/update
WHERE TG_OP = 'DELETE' AND oth.set_id <> OLD.set_id
OR TG_OP <> 'DELETE' AND oth.set_id <> NEW.set_id
-- count (common) members in the two sets
-- items not in common will have count=1
AND NOT EXISTS (
SELECT item_id FROM set_item x1
WHERE (x1.set_id = NEW.set_id OR x1.set_id = oth.set_id )
GROUP BY item_id
HAVING COUNT(*) = 1
)
) THEN
RAISE EXCEPTION 'Not unique set';
RETURN NULL;
ELSE
RETURN NEW;
END IF;
END;
$func$ LANGUAGE 'plpgsql'
;
CREATE CONSTRAINT TRIGGER check_item_set_unique
AFTER UPDATE OR INSERT OR DELETE
-- BEFORE UPDATE OR INSERT
ON set_item
FOR EACH ROW
EXECUTE PROCEDURE set_item_unique_set()
;
-- Test it
INSERT INTO set_item(set_id,item_id) VALUES(4,5); -- success
INSERT INTO set_item(set_id,item_id) VALUES(2,4); -- failure
DELETE FROM set_item WHERE set_id=1 AND item_id= 4; -- failure
Note: There should also be a trigger for the DELETE case.
UPDATE: added handling of DELETE
(the handling of deletes is not perfect; imagine the case where the last element from a set is removed)
My answer assumes that the target is without dupes, and that we want to insert a new set - which happens to be a duplicate. I choose the group of 4 with the id_comb of 1.
You would have to put the group of 4 into a staging table. Then, you have to pivot both staging and target horizontally - so that you get 5 columns named attr_pos1 to attr_pos5 (the biggest group in your example is 5). To pivot, you need a sequence number, which we get by using ROW_NUMBER(). That's for both tables, staging and target. Then, you pivot both. Then, you try to join pivoted staging and target on all 5 attr_pos# columns, and count the rows. If you get 0, you have no duplicates. If you get 1, you have duplicates.
Here's the whole scenario:
WITH
-- input section: a) target table, no dupes
target(id_comb,attr_pos) AS (
SELECT 2,1
UNION ALL SELECT 2,2
UNION ALL SELECT 2,3
UNION ALL SELECT 2,4
UNION ALL SELECT 3,1
UNION ALL SELECT 3,2\
UNION ALL SELECT 3,3
UNION ALL SELECT 3,4
UNION ALL SELECT 3,5
UNION ALL SELECT 4,1
UNION ALL SELECT 4,2
UNION ALL SELECT 4,3
)
,
-- input section: b) staging, input, would be a dupe
staging(id_comb,attr_pos) AS (
SELECT 1,1
UNION ALL SELECT 1,2
UNION ALL SELECT 1,3
UNION ALL SELECT 1,4
)
,
-- query section:
-- add sequence numbers to stage and target
target_s AS (
SELECT
ROW_NUMBER() OVER(PARTITION BY id_comb ORDER BY attr_pos) AS seq
, *
FROM target
)
,
staging_s AS (
SELECT
ROW_NUMBER() OVER(PARTITION BY id_comb ORDER BY attr_pos) AS seq
, *
FROM staging
)
,
-- horizontally pivot target, NULLS as -1 for later join
target_h AS (
SELECT
id_comb
, IFNULL(MAX(CASE seq WHEN 1 THEN attr_pos END),-1) AS attr_pos1
, IFNULL(MAX(CASE seq WHEN 2 THEN attr_pos END),-1) AS attr_pos2
, IFNULL(MAX(CASE seq WHEN 3 THEN attr_pos END),-1) AS attr_pos3
, IFNULL(MAX(CASE seq WHEN 4 THEN attr_pos END),-1) AS attr_pos4
, IFNULL(MAX(CASE seq WHEN 5 THEN attr_pos END),-1) AS attr_pos5
FROM target_s
GROUP BY id_comb ORDER BY id_comb
)
,
-- horizontally pivot staging, NULLS as -1 for later join
staging_h AS (
SELECT
id_comb
, IFNULL(MAX(CASE seq WHEN 1 THEN attr_pos END),-1) AS attr_pos1
, IFNULL(MAX(CASE seq WHEN 2 THEN attr_pos END),-1) AS attr_pos2
, IFNULL(MAX(CASE seq WHEN 3 THEN attr_pos END),-1) AS attr_pos3
, IFNULL(MAX(CASE seq WHEN 4 THEN attr_pos END),-1) AS attr_pos4
, IFNULL(MAX(CASE seq WHEN 5 THEN attr_pos END),-1) AS attr_pos5
FROM staging_s
GROUP BY id_comb ORDER BY id_comb
)
SELECT
COUNT(*)
FROM target_h
JOIN staging_h USING (
attr_pos1
, attr_pos2
, attr_pos3
, attr_pos4
, attr_pos5
);
Hope this helps ----
Marco
Interesting but not very useful solution by #wildplasser. I create script to insert sample data:
WITH param AS (
SELECT 8 AS max
), maxarray AS (
SELECT array_agg(i) as ma FROM (SELECT generate_series(1, max) as i FROM param) as i
), pre AS (
SELECT
*
FROM (
SELECT
*, CASE WHEN (id >> mbit) & 1 = 1 THEN ma[mbit + 1] END AS item_id
FROM (
SELECT *,
generate_series(0, array_upper(ma, 1) - 1) as mbit
FROM (
SELECT *,
generate_series(1,(2^max - 1)::int8) AS id
FROM param, maxarray
) AS pre1
) AS pre2
) AS pre3
WHERE item_id IS NOT NULL
), ins_item AS (
INSERT INTO theitem (item_id, item_name) SELECT i, i::text FROM generate_series(1, (SELECT max FROM param)) as i RETURNING *
), ins_set AS (
INSERT INTO theset (set_id, set_name)
SELECT id, id::text FROM generate_series(1, (SELECT 2^max - 1 FROM param)::int8) as id
RETURNING *
), ins_set_item AS (
INSERT INTO set_item (set_id, item_id)
SELECT id, item_id FROM pre WHERE (SELECT count(*) FROM ins_item) > 0 AND (SELECT count(*) FROM ins_set) > 0
RETURNING *
)
SELECT
'sets', count(*)
FROM ins_set
UNION ALL
SELECT
'items', count(*)
FROM ins_item
UNION ALL
SELECT
'sets_items', count(*)
FROM ins_set_item
;
When I call it with 8 (1024 - 2^8 rows for set_item) it run 21 seconds. It is very bad. When I off trigger it took less then 1 milliseconds.
My proposal
It is very interesting to use arrays in this case. Unfortunatelly PostgreSQL does not support foreighn key for arrays, but it may be done by TRIGGERs. I remove set_item table and add items int[] field for theset:
-- The data
CREATE TABLE theitem (
item_id integer NOT NULL PRIMARY KEY
, item_name text UNIQUE
);
CREATE TABLE theset (
set_id INTEGER NOT NULL PRIMARY KEY
, set_name text UNIQUE
, items integer[] UNIQUE NOT NULL
);
CREATE INDEX i1 ON theset USING gin (items);
CREATE OR REPLACE FUNCTION check_item_CU() RETURNS TRIGGER AS $sql$
BEGIN
IF (SELECT count(*) > 0 FROM unnest(NEW.items) AS u LEFT JOIN theitem ON (item_id = u) WHERE item_id IS NULL) THEN
RETURN NULL;
END IF;
NEW.items = ARRAY(SELECT unnest(NEW.items) ORDER BY 1);
RETURN NEW;
END;
$sql$ LANGUAGE plpgsql;
CREATE TRIGGER check_item_CU BEFORE INSERT OR UPDATE ON theset FOR EACH ROW EXECUTE PROCEDURE check_item_CU();
CREATE OR REPLACE FUNCTION check_item_UD() RETURNS TRIGGER AS $sql$
BEGIN
IF (TG_OP = 'DELETE' OR TG_OP = 'UPDATE' AND NEW.item_id != OLD.item_id) AND (SELECT count(*) > 0 FROM theset WHERE OLD.item_id = ANY(items)) THEN
RAISE EXCEPTION 'item_id % still used', OLD.item_id;
RETURN NULL;
END IF;
RETURN NEW;
END;
$sql$ LANGUAGE plpgsql;
CREATE TRIGGER check_item_UD BEFORE DELETE OR UPDATE ON theitem FOR EACH ROW EXECUTE PROCEDURE check_item_UD();
WITH param AS (
SELECT 10 AS max
), maxarray AS (
SELECT array_agg(i) as ma FROM (SELECT generate_series(1, max) as i FROM param) as i
), pre AS (
SELECT
*
FROM (
SELECT
*, CASE WHEN (id >> mbit) & 1 = 1 THEN ma[mbit + 1] END AS item_id
FROM (
SELECT *,
generate_series(0, array_upper(ma, 1) - 1) as mbit
FROM (
SELECT *,
generate_series(1,(2^max - 1)::int8) AS id
FROM param, maxarray
) AS pre1
) AS pre2
) AS pre3
WHERE item_id IS NOT NULL
), pre_arr AS (
SELECT id, array_agg(item_id) AS items
FROM pre
GROUP BY 1
), ins_item AS (
INSERT INTO theitem (item_id, item_name) SELECT i, i::text FROM generate_series(1, (SELECT max FROM param)) as i RETURNING *
), ins_set AS (
INSERT INTO theset (set_id, set_name, items)
SELECT id, id::text, items FROM pre_arr WHERE (SELECT count(*) FROM ins_item) > 0
RETURNING *
)
SELECT
'sets', count(*)
FROM ins_set
UNION ALL
SELECT
'items', count(*)
FROM ins_item
;
This variant run less than 1ms

SQL CTE Syntax to DELETE / INSERT rows

What's the CTE syntax to delete from a table, then insert to the same table and return the values of the insert?
Operating on 2 hours of sleep and something doesn't look right (besides the fact that this won't execute):
WITH delete_rows AS (
DELETE FROM <some_table> WHERE id = <id_value>
RETURNING *
)
SELECT * FROM delete_rows
UNION
(
INSERT INTO <some_table> ( id, text_field )
VALUES ( <id_value>, '<text_field_value>' )
RETURNING *
)
The expected behavior is to first clear all the records for an ID, then insert records for the same ID (intentionally not an upsert) and return those inserted records (not the deletions).
Your question update made clear that you cannot do this in a single statement.
Packed into CTEs of the same statement, both operations (INSERT and DELETE) would see the same snapshot of the table and execute virtually at the same time. I.e., the INSERT would still see all rows that you thought to be deleted already. The manual:
All the statements are executed with the same snapshot (see Chapter 13), so they cannot "see" one another's effects on the target tables.
You can wrap them as two independent statements into the same transaction - which doesn't seem strictly necessary either, but it would allow the whole operation to succeed / fail atomically:
BEGIN;
DELETE FROM <some_table> WHERE id = <id_value>;
INSERT INTO <some_table> (id, text_field)
VALUES ( <id_value>, '<text_field_value>')
RETURNING *;
COMMIT;
Now, the INSERT can see the results of the DELETE.
CREATE TABLE test_table (value TEXT UNIQUE);
INSERT INTO test_table SELECT 'value 1';
INSERT INTO test_table SELECT 'value 2';
WITH delete_row AS (DELETE FROM test_table WHERE value='value 2' RETURNING 0)
INSERT INTO test_table
SELECT DISTINCT 'value 2'
FROM (SELECT 'dummy') dummy
LEFT OUTER JOIN delete_row ON TRUE
RETURNING *;
The query above handles the situations when DELETE deletes 0/1/some rows.
Elaborating on skif1979's "DelSert" CTE method, the "Logged DelSert:"
-- setups
DROP TABLE IF EXISTS _zx_t1 ;
CREATE TEMP TABLE
IF NOT EXISTS
_zx_t1
( id bigint
, fld2 bigint
, UNIQUE (id)
);
-- unique records
INSERT INTO _zx_t1 SELECT 1, 99;
INSERT INTO _zx_t1 SELECT 2, 98;
WITH
_cte_del_row AS
( DELETE
FROM _zx_t1
WHERE id = 2
RETURNING id as _b4_id, fld2 as _b4_fld2 -- returns complete deleted row
)
, _cte_delsert AS
( INSERT
INTO _zx_t1
SELECT DISTINCT
_cte_del_row._b4_id
, _cte_del_row._b4_fld2 + 1
from (SELECT null::integer AS _zunk) _zunk -- skif1979's trick here
LEFT OUTER JOIN _cte_del_row -- clever LOJ magic
ON TRUE -- LOJ cartesian product
RETURNING id as _aft_id , fld2 as _aft_fld2 -- return newly "delserted" rows
)
SELECT * -- returns before & after snapshots from CTE's
FROM
_cte_del_row
, _cte_delsert ;
RESULT:
_b4_id | _b4_fld2 | _aft_id | _aft_fld2
--------+----------+---------+-----------
2 | 209 | 2 | 210
AFAICT these all occur linearly w/in a unit of work, akin to a journaled or logged update.
Workable for
Child records
OR Schema w/ no FK
OR FK w/ cascading deletes
Not workable for
Parent records w/ FK & no cascading deletes
A related (& IMO better) answer, akin to the "Logged DelSert" is this, a logged "SelUp" :
-- setups
DROP TABLE IF EXISTS _zx_t1 ;
CREATE TEMP TABLE
IF NOT EXISTS
_zx_t1
( id bigint
, fld2 bigint
, UNIQUE (id)
);
-- unique records
INSERT INTO _zx_t1 SELECT 1, 99;
INSERT INTO _zx_t1 SELECT 2, 98;
WITH
_cte_sel_row AS
( SELECT -- start unit of work with read
id as _b4_id -- fields need to be aliased
,fld2 as _b4_fld2 -- to prevent ambiguous column errors
FROM _zx_t1
WHERE id = 2
FOR UPDATE
)
, _cte_sel_up_ret AS -- we're in the same UOW
( UPDATE _zx_t1 -- actual table
SET fld2 = _b4_fld2 + 1 -- some actual work
FROM _cte_sel_row
WHERE id = _b4_id
AND fld2 < _b4_fld2 + 1 -- gratuitous but illustrates the point
RETURNING id as _aft_id, fld2 as _aft_fld2
)
SELECT
_cte_sel_row._b4_id
,_cte_sel_row._b4_fld2 -- before
,_cte_sel_up_ret._aft_id
,_cte_sel_up_ret._aft_fld2 -- after
FROM _cte_sel_up_ret
INNER JOIN _cte_sel_row
ON TRUE AND _cte_sel_row._b4_id = _cte_sel_up_ret._aft_id
;
RESULT:
_b4_id | _b4_fld2 | _aft_id | _aft_fld2
--------+----------+---------+-----------
2 | 209 | 2 | 210
See also:
https://rob.conery.io/2018/08/13/transactional-data-operations-in-postgresql-using-common-table-expressions/

Homogenous Containers with GROUP BY Part II

I have three tables, one of which I need to update:
CREATE TABLE Containers (
ID int PRIMARY KEY,
FruitType int
)
CREATE TABLE Contents (
ContainerID int,
ContainedFruit int
)
CREATE TABLE Fruit (
FruitID int,
Name VARCHAR(16)
)
INSERT INTO Fruit VALUES ( 0, 'Mixed' )
INSERT INTO Fruit VALUES ( 1, 'Apple' )
INSERT INTO Fruit VALUES ( 2, 'Banana' )
INSERT INTO Fruit VALUES ( 3, 'Cherry' )
INSERT INTO Fruit VALUES ( 4, 'Date' )
INSERT INTO Containers VALUES ( 101, 0 )
INSERT INTO Containers VALUES ( 102, 0 )
INSERT INTO Containers VALUES ( 103, 0 )
INSERT INTO Containers VALUES ( 104, 3 )
INSERT INTO Contents VALUES ( 101, 1 )
INSERT INTO Contents VALUES ( 101, 1 )
INSERT INTO Contents VALUES ( 102, 1 )
INSERT INTO Contents VALUES ( 102, 2 )
INSERT INTO Contents VALUES ( 102, 3 )
INSERT INTO Contents VALUES ( 103, 3 )
INSERT INTO Contents VALUES ( 103, 4 )
INSERT INTO Contents VALUES ( 104, 3 )
Let's assume this is the state of my database. Please note the Fruit table is used twice. Once to describe the contents of the container, and once to describe if the container is meant to contain only one type of fruit or if it can be mixed. Bad design IMO, but too late to change it.
The problem is that container 101 is incorrectly marked as MIXED when it should really be APPLE. Containers with multiple contents of the same type are still homogenous containers and should be marked as such.
I know how to do a query that finds the containers that are incorrectly marked as mixed:
SELECT Contents.ContainerID
FROM Contents
INNER JOIN Containers ON
Contents.ContainerID = Containers.ID AND
Containers.FruitType = 0
GROUP BY Contents.ContainerID
HAVING COUNT( DISTINCT Contents.ContainedFruit ) = 1
However, I don't know how to update every row in Container where this error has been made. That's my question.
This will do:
UPDATE Container
SET Container.FruitType = Proper.ProperType
FROM Container
INNER JOIN
(
SELECT Contents.ContainerID,
MAX(Contents.ContainedFruit) ProperType
FROM Contents
INNER JOIN Container ON
Contents.ContainerID = Container.ID AND
Container.FruitType = 0
GROUP BY Contents.ContainerID
HAVING COUNT( DISTINCT Contents.ContainedFruit ) = 1
) Proper
ON Container.ID = Proper.ContainerID
SQL Fiddle

How to generate a new code

Using VB.Net and SQL Server
Table1
Id Value .....
1001P0010001 100
1001P0010002 200
1001P0010003 300
1001P0010004 400
...
I have n columns and rows in table1, from the table1, I want to copy all the column details with new id no...
id no is like this 1001P0020001, 1001P0020002, .......
P002 is next id, P003 is the next Id.....
The first 4 digits and last 4 digits will remain as read from table1, middle 4 digits should change to next series
Expected output
Id Value .....
1001P0010001 100
1001P0010002 200
1001P0010003 300
1001P0010004 400
1001P0020001 100
1001P0020002 200
1001P0020003 300
1001P0020004 400
...
Which the best way to do this?
I can do it in VB.Net or a SQL query...? Please suggest ways of doing this.
CREATE TABLE CocoJambo (
Id CHAR(12) NOT NULL,
Value INT NULL,
CHECK( Id LIKE '[0-9][0-9][0-9][0-9][A-Z][0-9][0-9][0-9][0-9][0-9][0-9][0-9]' )
);
GO
CREATE UNIQUE INDEX IUN_CocoJambo_Id
ON CocoJambo (Id);
GO
INSERT CocoJambo (Id, Value)
SELECT '1001P0010001', 100
UNION ALL SELECT '1001P0010002', 200
UNION ALL SELECT '1001P0010003', 300
UNION ALL SELECT '1001P0010004', 400
UNION ALL SELECT '1001P0020001', 100
UNION ALL SELECT '1001P0020002', 200
UNION ALL SELECT '1001P0020003', 300
UNION ALL SELECT '1001P0020004', 400;
GO
-- Test 1: generating a single Id
DECLARE #Prefix CHAR(5),
#Sufix CHAR(4);
SELECT #Prefix = '1001P',
#Sufix = '0001';
BEGIN TRAN
DECLARE #LastGeneratedMiddleValue INT,
#LastValue INT;
SELECT #LastGeneratedMiddleValue = y.MiddleValue,
#LastValue = y.Value
FROM
(
SELECT x.MiddleValue, x.Value,
ROW_NUMBER() OVER(ORDER BY x.MiddleValue DESC) AS RowNum
FROM
(
SELECT CONVERT(INT,SUBSTRING(a.Id,6,3)) AS MiddleValue, a.Value
FROM CocoJambo a WITH(UPDLOCK) -- It will lock the rows (U lock) during transaction
WHERE a.Id LIKE #Prefix+'%'+#Sufix
) x
) y
WHERE y.RowNum=1;
SELECT #LastGeneratedMiddleValue = ISNULL(#LastGeneratedMiddleValue ,0)
SELECT #Prefix
+RIGHT('00'+CONVERT(VARCHAR(3),#LastGeneratedMiddleValue +1),3)
+#Sufix AS MyNewId,
#LastValue AS Value
COMMIT TRAN;
GO
-- Test 2: generating many Id's
BEGIN TRAN
DECLARE #Results TABLE (
Prefix CHAR(5) NOT NULL,
Sufix CHAR(4) NOT NULL,
LastGeneratedMiddleValue INT NOT NULL,
LastValue INT NULL
);
INSERT #Results (Prefix, Sufix, LastGeneratedMiddleValue, LastValue)
SELECT y.Prefix, y.Sufix, y.MiddleValue, y.Value
FROM
(
SELECT x.Prefix, x.MiddleValue, x.Sufix, x.Value,
ROW_NUMBER() OVER(PARTITION BY x.Prefix, x.Sufix ORDER BY x.MiddleValue DESC) AS RowNum
FROM
(
SELECT SUBSTRING(a.Id,1,5) AS Prefix,
CONVERT(INT,SUBSTRING(a.Id,6,3)) AS MiddleValue,
SUBSTRING(a.Id,9,4) AS Sufix,
a.Value
FROM CocoJambo a WITH(UPDLOCK) -- It will lock the rows (U lock) during transaction
) x
) y
WHERE y.RowNum=1;
SELECT r.*,
r.Prefix
+RIGHT('00'+CONVERT(VARCHAR(3),r.LastGeneratedMiddleValue +1),3)
+r.Sufix AS MyNewId,
r.LastValue AS Value
FROM #Results r;
COMMIT TRAN;
GO
insert into table1 (id, value)
select
l +
replicate('0', 3 - lenght(m)) + m +
r,
value
from (
select
left(id, 5) l,
cast(cast(substring(id, 6, 3) as integer) + 1 as varchar(3)) m,
right(id, 4),
value
from table1
) s