Formatting big int SQL Server - sql

I have table with following data example table (id int, sn bigint)
id sn
--------------------------
1 8921901414327625990
1 8921901414327625991
How can I remove the 892190 from sn?

You can do it this way, using modulo:
select sn % 10000000000000 from table1

Related

How to get in which criteria the range falls using sql query?

Code:
IF OBJECT_ID('tempdb..#TempMaster','U') IS NOT NULL
DROP TABLE #TempMaster
IF OBJECT_ID('tempdb..#TempTransaction','U') IS NOT NULL
DROP TABLE #TempTransaction
CREATE TABLE #TempMaster
(
Sno INT IDENTITY(1,1),
RangeDesc VARCHAR(100),
RangeFromValue INT,
RangeToValue INT
)
CREATE TABLE #TempTransaction
(
Sno INT IDENTITY(1,1),
[Values] INT
)
INSERT INTO #TempMaster(RangeDesc,RangeFromValue,RangeToValue)
SELECT * FROM (VALUES('Type A',1,10),('Type B',11,20),('Type C',21,30)) AS T(RangeDesc,RangeFromValue,RangeToValue)
INSERT INTO #TempTransaction([Values])
SELECT 1
UNION ALL
SELECT 15
UNION ALL
SELECT 5
SELECT * FROM #TempMaster
SELECT * FROM #TempTransaction
Please anyone help me to get a this kind of a solution based on which range the value falls.
I want to get a range description and write down a query for a below output. Thanks in advance.
-------------------------------
sno Values RangeDesc
-------------------------------
1 1 Type A
2 15 Type B
3 5 Type A
-------------------------------
You do left join based on range values.
select t1.Sno, t1.[Values], t2.RangeDesc
from #TempTransaction t1
left join #TempMaster t2 on t1.[Values] between t2.RangeFromValue and t2.RangeToValue

Inserting an auto generated value into a column with specific pattern

I have a table named tblSample which has columns ID, PID etc. I want to auto generate those two columns with a specific pattern.
For example:
ID PID
------ ------
ABC001 PAB001
ABC002 PAB002
ABC003 PAB003
ABC004 PAB004
| |
| |
ABC999 PAB999
As you can see, the pattern 'ABC' in ID and 'PAB' in PID is the same. How can I insert those records into a table automatically and the range between those three digits after 'ABC' or 'PAB' is 001-999?
My suggestion is to create table structure as below with one identity column as testID and other computed by using that column ID and PID:
CREATE TABLE #tmpOne(testID INT IDENTITY (1,1),
ID AS ('ABC'+ (CASE WHEN len(testID) <=3 THEN CAST(RIGHT(0.001*testID, 3) AS VARCHAR) ELSE CAST(testID AS VARCHAR) END)),
Ename VARCHAR(20))
INSERT INTO #tmpOne(Ename)
SELECT 'Test'
SELECT * FROM #tmpOne
CREATE TABLE #tt(ID VARCHAR(100),PID VARCHAR(100))
GO
INSERT INTO #tt(ID,PID)
SELECT 'ABC'+RIGHT('000'+LTRIM(a.ID),3),'PAB'+RIGHT('000'+LTRIM(a.ID),3) FROM (
SELECT ISNULL(MAX(CASE WHEN SUBSTRING(t.id,4,LEN(ID))> SUBSTRING(t.id,4,LEN(PID)) THEN SUBSTRING(t.id,4,LEN(ID)) ELSE SUBSTRING(t.id,4,LEN(PID)) END )+1,1) AS id
FROM #tt AS t
) AS a
GO 999

Postgresql - how to run a query on multiple tables with same schema

I have a postgres database that has several tables (a few hundreds). A subset Foo of the tables in the database have the same schema.
Ideally, I would like to create a stored procedure which can run a query against a single table, or against all tables in subset Foo.
Pseudocode:
CREATE TABLE tbl_a (id INTEGER, name VARCHAR(32), weight double, age INTEGER);
CREATE TABLE tbl_b (id INTEGER, name VARCHAR(32), weight double, age INTEGER);
CREATE TABLE tbl_c (id INTEGER, name VARCHAR(32), weight double, age INTEGER);
CREATE TABLE tbl_d (id INTEGER, name VARCHAR(32), weight double, age INTEGER);
CREATE TYPE person_info AS (id INTEGER, name VARCHAR(32), weight double, age INTEGER);
CREATE FUNCTION generic_func(ARRAY one_or_more_table_names)
RETURNS person_info
-- Run query on table or all specified tables
AS $$ $$
LANGUAGE SQL;
How could I implement this requirement in Postgresql 9.x ?
You should have a look at table inheritance in PostgreSQL, they allow exactly what you speak about.
For example, you could create a table parent_tbl:
CREATE TABLE parent_tbl (id INTEGER, name VARCHAR(32), weight numeric, age INTEGER);
Then link your tables to this parent table:
ALTER TABLE tbl_a INHERIT parent_tbl;
ALTER TABLE tbl_b INHERIT parent_tbl;
ALTER TABLE tbl_c INHERIT parent_tbl;
ALTER TABLE tbl_d INHERIT parent_tbl;
Then a SELECT query over parent_tbl will query all of tbl_x tables, while a query on tbl_x will query only this particular table.
INSERT INTO tbl_a VALUES (1, 'coucou', 42, 42);
SELECT * FROM tbl_a;
id | name | weight | age
----+--------+--------+-----
1 | coucou | 42 | 42
(1 row)
SELECT * FROM parent_tbl;
id | name | weight | age
----+--------+--------+-----
1 | coucou | 42 | 42
(1 row)
SELECT * FROM tbl_b;
id | name | weight | age
----+--------+--------+-----
(0 rows)
It is also possible to filter data from given children tables. For example, if you are interested in data coming from tables tbl_a and tbl_b, you can do
select id, name, weight, age
from parent_tbl
left join pg_class on oid = parent_tbl.tableoid
where relname in ('tbl_a', 'tbl_b');
EDIT : I put numeric for weight instead of double as this type is not supported on my server.
To create select query dynamically using items(table name) in an array you can use following select statement
SELECT string_agg(q, ' union all ')
FROM (
SELECT 'select * from ' || unnest(array ['tble_a','tble_b']) AS q
) t
Result:
string_agg
---------------------------------------------------
select * from tble_a union all select * from tble_b
You can create the function that returns table with columns
id INTEGER
,name VARCHAR(32)
,weight numeric
,age INTEGER
P.S: I am avoiding TYPE person_info
function:
CREATE
OR REPLACE FUNCTION generic_func (tbl varchar [])
RETURNS TABLE ( -- To store the output
id INTEGER
,name VARCHAR(32)
,weight numeric
,age INTEGER
) AS $BODY$
DECLARE qry text;
BEGIN
SELECT string_agg(q, ' union all ') --To create select query dynamically
INTO qry
FROM (
SELECT 'select * from ' || unnest(tbl) AS q
) t;
RAISE NOTICE 'qry %',qry; --optional
RETURN query --Executes the query to the defined table
EXECUTE qry;
END;$BODY$
LANGUAGE plpgsql VOLATILE
Usage:
select * from generic_func(array['tbl_a','tbl_b','tbl_c','tbl_d'])
Result:
id name weight age
-- ---- ------ ---
2 ABC 11 112
2 CBC 11 112
2 BBC 11 112
2 DBC 11 112
and
select * from generic_func(array['tbl_a'])
Result:
id name weight age
-- ---- ------ ---
2 ABC 11 112

SQL Sever 2008 R2 - Transforming a table with xml list columns to individual rows in new table

I'm trying to write some SQL to help transition from one database to another. It's gone well so far, but I ran into a problem I can't wrap my brain around.
Original:
Id (bigint) | ColA (XML) | ColB (XML) | ... | RecordCreation
The XML for each column with XML looks like the following:
<ColA count="3"><int>3</int><int>9</int><int>6</int></ColA>
For any particular row, the "count" is the same for each list, ColB will also have 3, etc., but some lists are of strings.
In the new database:
Id (bigint) | Index (int) | ColA (int) | ColB (nvarchar(20)) | ... | RecordCreation
So if I start with
5 | <ColA count="3"><int>9</int><int>8</int><int>7</int></ColA> | <ColB count="3"><string>A</string><string>B</string><string>C</string></ColB> | ... | 2014-01-15 ...
I need out:
5 | 1 | 9 | A | ... | 2014-01-15 ...
5 | 2 | 8 | B | ... | 2014-01-15 ...
5 | 3 | 7 | C | ... | 2014-01-15 ...
For each of the rows in the original DB where Index (second column) is the position in the XML list the values for that row are coming from.
Any ideas?
Thanks.
Edit:
A colleague showed me a dirty way that looks like it might get me there. This is to transfer some existing data into the new database for testing purposes, it's not production and won't be used often; we're just starving for data to test on.
declare #count int
set #count = 0
declare #T1 ( Id bigint, Index int, ColA int, ColB nvarchar(20),..., MaxIndex int)
while #count < 12 begin
Insert into #T1
select Id, #count,
CAST(CONVERT(nvarchar(max), ColA.query('/ColA/int[sql:variable("#count")]/text()')) as int),
CONVERT(nvarchar(20), ColB.query('/ColB/string[sql:variable("#count")]/text()')),
...,
CAST(CONVERT(nvarchar(max), ColA.query('data(/ColA/#count)')) as int)
From mytable
set #count = #count + 1
end
Then I can insert from the temp table where Index < MaxIndex. There'll never be more than 12 indices and I think index is 0 based; easy fix if not. And each row may have a different count in its lists (but all lists of the same row will have the same count); that's why I went with MaxIndex and a temp table. And I may switch to real table that I drop when I'm done if the performance is too bad.
Try this query:
DECLARE #MyTable TABLE (
ID INT PRIMARY KEY,
ColA XML,
ColB XML
);
INSERT #MyTable (ID, ColA, ColB)
SELECT 5, N'<ColA count="3"><int>9</int><int>8</int><int>7</int></ColA>', N'<ColB count="3"><string>A</string><string>B</string><string>C</string></ColB>';
SELECT x.ID,
ab.*
FROM #MyTable x
CROSS APPLY (
SELECT a.IntValue, b.VarcharValue
FROM
(
SELECT ax.XmlCol.value('(text())[1]', 'INT') AS IntValue,
ROW_NUMBER() OVER(ORDER BY ax.XmlCol) AS RowNum
FROM x.ColA.nodes('/ColA/int') ax(XmlCol)
) a INNER JOIN
(
SELECT bx.XmlCol.value('(text())[1]', 'VARCHAR(50)') AS VarcharValue,
ROW_NUMBER() OVER(ORDER BY bx.XmlCol) AS RowNum
FROM x.ColB.nodes('/ColB/string') bx(XmlCol)
) b ON a.RowNum = b.RowNum
) ab;
Output:
/*
ID IntValue VarcharValue
-- -------- ------------
5 9 A
5 8 B
5 7 C
*/
Note: very likely, the performance could be horrible (even for an ad-hoc task)
Assumption:
For any particular row, the "count" is the same for each list, ColB
will also have 3, etc., but some lists are of strings.
A colleague showed me a dirty way that looks like it might get me there. This is to transfer some existing data into the new database for testing purposes, it's not production and won't be used often; we're just starving for data to test on.
declare #count int
set #count = 0
declare #T1 ( Id bigint, Index int, ColA int, ColB nvarchar(20),..., MaxIndex int)
while #count < 12 begin
Insert into #T1
select Id, #count,
CAST(CONVERT(nvarchar(max), ColA.query('/ColA/int[sql:variable("#count")]/text()')) as int),
CONVERT(nvarchar(20), ColB.query('/ColB/string[sql:variable("#count")]/text()')),
...,
CAST(CONVERT(nvarchar(max), ColA.query('data(/ColA/#count)')) as int)
From mytable
set #count = #count + 1
end
Then I can insert from the temp table where Index < MaxIndex. There'll never be more than 12 indices and I think index is 0 based; easy fix if not. And each row may have a different count in its lists (but all lists of the same row will have the same count); that's why I went with MaxIndex and a temp table. And I may switch to real table that I drop when I'm done if the performance is too bad.

how create dynamic columns using tables data in sql server 2008?

i have four tables like below in sql server 2008 :
TABLE 1 -> Users
UserID UserName
-----------------------
1 Jhon
TABLE 2 -> PhoneBook
PhonebookID UserID Name MobileNumber
-------------------------------------------------------------------
1 1 MyBrother 252848
TABLE 3 -> PhonebookExtraField
PhonebookExtraFieldID UserID ExtraFieldName
-------------------------------------------------------------
1 1 Age
2 1 Job
3 1 Address
TABLE 4 -> phoneBookExtraFieldData
phoneBookExtraFieldDataID PhonebookExtraFieldID PhonebookID ExtraFieldValue
-----------------------------------------------------------------------------------------
101 1 1 30
102 2 1 Web Developer
103 3 1 A.V. Rose
how can i write a query for output below :
mean i am looking for a way for creating dynamic columns using Tables data...
UserName Phonebook(Name) Phonebook(MobileNumber) Age Job Address
-------------------------------------------------------------------------------------
Jhon MyBrother 252848 30 Web Developer A.V. Rose
what is the best way for doing this job?
how should i change my tables for this purpose?
thanks for attention and advance...
Maybe something like this:
Test data
CREATE TABLE #User
(
UserID INT,
UserName VARCHAR(100)
)
INSERT INTO #User
VALUES(1,'Jhon')
CREATE TABLE #PhoneBook
(
PhonebookID INT,
UserID INT,
Name VARCHAR(100),
MobileNumber INT
)
INSERT INTO #PhoneBook
VALUES(1,1,'MyBrother',252848)
CREATE TABLE #PhonebookExtraField
(
PhonebookExtraFieldID INT,
UserID INT,
ExtraFieldName VARCHAR(100)
)
INSERT INTO #PhonebookExtraField
VALUES(1,1,'Age'),(2,1,'Job'),(3,1,'Address')
CREATE TABLE #PhoneBookExtraFieldData
(
PhoneBookExtraFieldDataID INT,
PhonebookExtraFieldID INT,
PhonebookID INT,
ExtraFieldValue VARCHAR(100)
)
INSERT INTO #PhoneBookExtraFieldData
VALUES(101,1,1,'30'),(102,2,1,'Web Developer'),(103,3,1,'A.V. Rose')
The find the dynamic columns
DECLARE #cols VARCHAR(MAX)
SELECT #cols = COALESCE(#cols + ','+QUOTENAME(ExtraFieldName),
QUOTENAME(ExtraFieldName))
FROM
#PhonebookExtraField
The execute some dynamic sql with a Pivot:
DECLARE #query NVARCHAR(4000)=
N'SELECT
*
FROM
(
SELECT
#User.UserName,
#PhoneBook.Name AS [Phonebook(Name)],
#PhoneBook.MobileNumber AS [Phonebook(MobileNumber)],
#PhonebookExtraField.ExtraFieldName,
#PhoneBookExtraFieldData.ExtraFieldValue
FROM
#User
JOIN #PhoneBook
ON #User.UserID=#PhoneBook.UserID
JOIN #PhonebookExtraField
ON #PhoneBook.UserID=#PhonebookExtraField.UserID
JOIN #PhoneBookExtraFieldData
ON #PhonebookExtraField.PhonebookExtraFieldID=#PhoneBookExtraFieldData.PhonebookExtraFieldID
) AS p
PIVOT
(
MAX(ExtraFieldValue)
FOR ExtraFieldName IN('+#cols+')
) AS pvt'
EXECUTE(#query)
Then I will clean up after myself:
DROP TABLE #User
DROP TABLE #PhoneBook
DROP TABLE #PhonebookExtraField
DROP TABLE #PhoneBookExtraFieldData
Select
*
from
user as u,
PhoneBook as pb,
PhonebookExtraField as pbf,
phoneBookExtraFieldData as pbdf
where
u.userid=pb.userID
and pb.userID=pbf.userID
and pbf.PhonebookExtraFieldID =pfbd.PhonebookExtraFieldID
and u.Username='Jhon'
Replace * with required column names.