T-SQL cast and converting issues from source tables to destination - sql

I have a table as follows:
create table dbo.##Table_A00
(
RowNo int,
TRANSACTION_TYPE varchar(3),
ORGANISATION_ID numeric (10),
FILE_TYPE varchar(3),
CREATION_DATE datetime,
CREATION_TIME varchar(3),
GENERATION_NUMBER numeric (6)
)
However the source files I am using is a table used to capture flat files and they can be in any data format.
What I have in the source table the data type looks like this:
CREATE TABLE ##Table_Alltextfiles
(
rowID int identity (1,1),
[Col1] varchar(50),
[Col2] varchar(250),
[Col3] varchar(50),
[Col4] varchar(50),
[Col5] varchar(50),
[Col6] varchar(50),
[Col7] varchar(50)
)
What I want to do is insert into ##Table_A00 (destination) all rows from ##Table_Alltextfiles (source)
However I am having issues doing this as the data type are mis match and I have tried casting it without success.
What can I do to get the value of varchar to its appropiate destination i.e if its a date field or if its a numeric.
This is what I have been tring to do:
insert into dbo.##Table_A00
select
rowID,
col1, cast(col2 as numeric (10)),
col3, cast(col4 as date),
col5, cast(col6 as numeric (6))
from
##Table_Alltextfiles
where
col1 = 'A00'
Thank you

Try out with the below query.
insert into dbo.##Table_A00
select
rowID,
SUBSTRING(col1,1,3), case when ISNUMERIC(col2)=1 THEN cast(col2 as numeric (10)) ELSE NULL END,
SUBSTRING(col3,1,3), case when ISDATE(col4)=1 THEN cast(col4 as datetime) ELSE NULL END,
SUBSTRING(col5,1,3), case when ISNUMERIC(col6)=1 THEN cast(col6 as numeric (6)) ELSE NULL END
from
##Table_Alltextfiles
where
col1 = 'A00'

Related

Change row value to column in SQL

I have table in sql i want to change fieldColumnName as a column and fieldValue as a row.
this is my table image
As per your question, You want to alter table (change column name as well as data type)
ALTER TABLE tablename
CHANGE `fieldColumnName` `column` VARCHAR(255),
CHANGE `fieldValue` `row` VARCHAR(255)
In above query you change datatype as you wish.
If you know all the possible values in the fieldColumnName column then you could use pivot like this:
declare #data table(fieldValue varchar(50), fieldColumnName varchar(50))
INSERT INTO #data
SELECT '1 - value', 'col1'
UNION
SELECT '2 - value', 'col2'
UNION
SELECT NULL, 'col6'
select *
from #data
select col1, col2, col3
from
(
select fieldValue, fieldColumnName
from #data
) d
pivot
(
max(fieldValue)
for fieldColumnName in (col1, col2, col3)
) piv;

Conversion failed when converting the nvarchar value 'ABC113973' to data type int

I am facing TWO MAJOR PROBLEMS!!
PROBLEM 1:
I have two tables and want to show the required data into a specific gridview by using UNION in SQL. Table 1 contains columns {[Date] datetime, [Head] nvarchar(50), [Details] nvarchar(360), [ExpId] int, [Amount] decimal(18,2)}
Table 2 contains columns {[Purchase_Date] datetime, [VendorName] nvarchar(50), [Remarks] nvarchar(50), [Bill_No] nvarchar(50), [AmountPaid] decimal(18,2) }
My stored procedure is;
DECLARE #Ledger TABLE
(
DATE DATETIME,
DESCRIPTION NVARCHAR(350),
REF_NO NVARCHAR (50),
AMOUNT INT
)
INSERT INTO #Ledger
SELECT
[Date], [Head] + ' - ' + [Details], [ExpId], [Amount]
FROM
[dbo].[Table1]
UNION
SELECT
[Purchase_Date], 'PURCHASE' + ' ' + [VendorName] + ' ' + [Remarks], [Bill_No], [AmountPaid]
FROM
[dbo].[Table2]
SELECT * FROM #Ledger
When is execute the query I get an error
Conversion failed when converting the nvarchar value 'ABC113973' to data type int.
I wonder why its throwing this error when I try to execute it without Table1 it's fine. Is is due to the column ExpId with datatype int? If yes then how to deal with it?
PROBLEM 2:
In the above #Ledger table when I change Amount datatype to decimal(18,0) as I want to show the result in decimal figure it throws error
Conversion failed when converting varchar into numeric
sort of error. as the datatype of amount columns of both the actual tables are decimal(18,2).
Can anyone tell me the solution and the reasons of this problem? Thanks
Try this:
DECLARE #Ledger TABLE
(
DATE DATETIME,
DESCRIPTION NVARCHAR(350),
REF_NO NVARCHAR (50),
AMOUNT INT
)
INSERT INTO #Ledger
SELECT [Date], [Head] + ' - ' + [Details], CAST([ExpId] AS NVARCHAR(50)), [Amount] FROM [dbo].[Table1]
UNION
SELECT [Purchase_Date], 'PURCHASE' + ' ' + [VendorName] + ' ' + [Remarks], CAST([Bill_No] AS NVARCHAR(50)), [AmountPaid] FROM [dbo].[Table2]
SELECT * FROM #Ledger
You are getting an error because you are trying to insert bill_no and expid into a varchar column,but the data type of both fields are int. So, you will have to either cast or convert the int values and then insert it into the table.
Edit:
If you want to store the amount in Ledger table as decimal then change the data type of amount column to decimal(18,2) and make sure that columns of both the actual tables are also of the same data type.
DECLARE #Ledger TABLE
(
DATE DATETIME,
DESCRIPTION NVARCHAR(350),
REF_NO NVARCHAR (50),
AMOUNT DECIMAL(18,2)
)

Check Missing data when id is not null or blank

I am trying to check if data is present for ID but not for its corresponding value then entire row discard from output. If both value and id are blank then its ok, and those row retain in the output
DECLARE #TAB TABLE
(
ID VARCHAR (50),
SKU VARCHAR (50),
Resistor_ID1 VARCHAR (50),
Resistor_Value VARCHAR (50),
Capacitor_ID VARCHAR (50),
Capacitor_Value VARCHAR (50),
Inductor_ID VARCHAR (50),
Inductor_Value VARCHAR (50)
)
INSERT #TAB
SELECT '1', 'BPN1256', '1033', '' , 'RMA56', 'Ceramic', 'PVAN59', 'Ferrite' UNION ALL
SELECT '1', 'SAN9286', '' , '' , 'TMA56', 'FILM' , '' , '' UNION ALL
SELECT '1', 'RJA1896', '3033', '35OHM', 'UMA56', 'Ceramic', 'PVAN59', 'Ferrite' UNION ALL
SELECT '1', 'DNN5256', '4033', '45OHM', 'QMA56', '' , 'PVAN59', 'Ferrite' UNION ALL
SELECT '1', 'LXA6556', '5033', '65OHM', 'ZMA56', 'FILM' , 'PVAN59', ''
Expected Output
1 SAN9286 TMA56 FILM
1 RJA1896 3033 35OHM UMA56 Ceramic PVAN59 Ferrite
Please share your expertise.
Thanks
DECLARE #TAB TABLE
(
ID VARCHAR (50),
SKU VARCHAR (50),
Resistor_ID1 VARCHAR (50),
Resistor_Value VARCHAR (50),
Capacitor_ID VARCHAR (50),
Capacitor_Value VARCHAR (50),
Inductor_ID VARCHAR (50),
Inductor_Value VARCHAR (50)
)
INSERT #TAB
SELECT '1', 'BPN1256', '1033', '','RMA56', 'Ceramic', 'PVAN59', 'Ferrite' UNION ALL
SELECT '1', 'SAN9286', '', '', 'TMA56', 'FILM', '', '' UNION ALL
SELECT '1', 'RJA1896', '3033', '35OHM', 'UMA56', 'Ceramic', 'PVAN59', 'Ferrite' UNION ALL
SELECT '1', 'DNN5256', '4033', '45OHM', 'QMA56', '', 'PVAN59', 'Ferrite' UNION ALL
SELECT '1', 'LXA6556', '5033', '65OHM', 'ZMA56', 'FILM', 'PVAN59', ''
SELECT * FROM #TAB t WHERE ((t.Resistor_ID1<>'' AND t.Resistor_Value<>'') OR (t.Resistor_ID1='' AND t.Resistor_Value=''))
AND ((t.Capacitor_ID<>'' AND t.Capacitor_Value<>'') OR (t.Capacitor_ID='' AND t.Capacitor_Value=''))
AND ((t.Inductor_ID<>'' AND t.Inductor_Value<>'') OR (t.Inductor_ID='' AND t.Inductor_Value=''))
But you should take care on using empty string or null. There are significant differences. In your example you have used empty string to show absence of data and the select above is applicable for empty string.
Well, why not just do it directly? You need rows when both ID and value are blank, or when both ID and value are not blank. Put this into where clause, repeat for 3 id-value pairs, and you're done.
SELECT * FROM #TAB
WHERE (Resistor_ID1 != '' and Resistor_Value != '' or Resistor_ID1 = '' and Resistor_Value = '')
and (Capacitor_ID != '' and Capacitor_Value != '' or Capacitor_ID = '' and Capacitor_Value = '')
and (Inductor_ID != '' and Inductor_Value != '' or Inductor_ID = '' and Inductor_Value = '')
Important note: you didn't specify your DBMS, so you might need to alter that query syntax. For example, in Oracle a blank string('') is treated as null value and should be checked with value is null instead of value = ''. In Mysql, null and empty string are different things, and should be checked differently.
UPD: it should work fine using = and != for MS SQL server, if you actually inserted blank strings and not nulls. If you're not sure, you might want to take a look at How do I check if a Sql server string is null or empty

SQL-Multiple Insert into identity table

I need to to do a insert from a table with the following structure:
Table A
Col1 Col2 Col3 Col4
intID1 intID2 intID3 intID4
I need to select the rows from the above table that are null
for col1,col2,col3 and insert those rows into a table that will generate an identity
row that I need to use to insert into another table.I am not sure of the
sql statement or the general method used to select those rows and insert them multiple times and retrieve the identity id one by one to insert into the next table.
Any help is greatly appreciated!
Sample process:
Table A
Col1 Col2 Col3 Col4
1 3 7 null
null null null 45
null null null 67
1)Retrieve rows 2 and 3
2)Insert 2 and 3 into another table to retrieve identity id for both rows
3)Insert identities from step 2 into another table
Venk covered step 1 and 2 I think. For 3 can use the OUPUT clause to retrieve the identity value from set operation.
Get Identity of multiple insertion in sql server 2008
INSERT INTO TABLEB(Col1,Col2,Col3,Col4)
SELECT * FROM TABLEA WHERE Col1 is NULL AND Col2 is NULL AND Col3 is NULL;
Sounds like you need the output operator:
declare #TableA table(Col1 int, Col2 int, Col3 int, Col4 int);
declare #TableB table(id int identity(1,1), Col1 int, Col2 int, Col3 int, Col4 int);
declare #Audit table(id int);
insert into #TableA
select 1,3,7,null union all
select null, null, null, 45 union all
select null, null, null, 67;
-- copy null columns from #TableA to #TableB
-- and output id's to #Audit
insert into #TableB
output inserted.id
into #Audit
select *
from #TableA
where Col1 is null
and Col2 is null
and Col3 is null;
-- Copied #TableB values and #Audit values
select * from #TableB;
select * from #Audit;

Dependent insert statements

I have a table with data about a customer, Customer(name, address), with rows like "John Doe", "Some Street 123". For each row in the table, I want to insert one row in the Person(id, name) table and also one row in the Address(id, person_id, address) table.
I can accomplish this by running two insert statements for each row in Customer:
insert into Person(name) values (#name);
insert into Address(person_id, address) values (scope_identity(), #address);
But this is inefficient. I want to do the inserts in a batch, kind of like this:
-- This works, the problem is with the Address table...
insert into Person(name)
select name from Customer
-- This looks good but does not work because name is not unique.
insert into Address(person_id, address)
select p.person_id, c.address
from Customer c join Person p on c.name = p.name
Leaving this here for the fellow Google traveler that finds this post like me.
I found this solution, and it seems to work great, and doesn't require any funky schema alterations:
https://dba.stackexchange.com/questions/160210/splitting-data-into-two-tables-in-one-go
They use a MERGE statement to perform the initial insert into the first table (the table that is generating the identity to be used everywhere else). The reason it uses the MERGE statement is because it allows you to use an OUTPUT statement, which you can use to output both the new identity value as well as the identity value from the source table (as opposed to using an OUTPUT statement on a standard INSERT which does not allow you to output the source tables identity). You can insert this output data into a mapping table, and use that mapping table to perform the second insert.
Here's my sample code for the solution:
------------------------------------------------------------------------------
------------------------------------------------------------------------------
-- Set up sample schema and data
------------------------------------------------------------------------------
--Source Data
IF OBJECT_ID('dbo.tmp1') IS NOT NULL DROP TABLE dbo.tmp1 --SELECT * FROM dbo.tmp1
CREATE TABLE dbo.tmp1 (tmp1ID INT IDENTITY(1,1), Col1 CHAR(1) NOT NULL, Col2 CHAR(1) NOT NULL, Col3 CHAR(1) NOT NULL, Col4 CHAR(1) NOT NULL, Col5 CHAR(1) NOT NULL, Col6 CHAR(1) NOT NULL)
INSERT INTO dbo.tmp1 (Col1, Col2, Col3, Col4, Col5, Col6)
SELECT x.c1, x.c2, x.c3, x.c4, x.c5, x.c6
FROM (VALUES ('A','B','C','D','E','F'),
('G','H','I','J','K','L'),
('M','N','O','P','Q','R')
) x(c1,c2,c3,c4,c5,c6)
IF OBJECT_ID('dbo.tmp3') IS NOT NULL DROP TABLE dbo.tmp3 --SELECT * FROM dbo.tmp3
IF OBJECT_ID('dbo.tmp2') IS NOT NULL DROP TABLE dbo.tmp2 --SELECT * FROM dbo.tmp2
--Taget tables to split into
CREATE TABLE dbo.tmp2 (
tmp2ID INT IDENTITY(1,1) NOT NULL CONSTRAINT PK_tmp2 PRIMARY KEY CLUSTERED (tmp2ID ASC)
, Col1 CHAR(1) NOT NULL
, Col2 CHAR(1) NOT NULL
, Col3 CHAR(1) NOT NULL
)
CREATE TABLE dbo.tmp3 (
tmp2ID INT NOT NULL
, Col4 CHAR(1) NOT NULL
, Col5 CHAR(1) NOT NULL
, Col6 CHAR(1) NOT NULL
, CONSTRAINT FK_tmp3_tmp2ID FOREIGN KEY(tmp2ID) REFERENCES dbo.tmp2 (tmp2ID)
)
------------------------------------------------------------------------------
------------------------------------------------------------------------------
-- Split data into two tables
------------------------------------------------------------------------------
DECLARE #Mapping TABLE (tmp1ID INT NOT NULL, tmp2ID INT NOT NULL);
--Use merge statment to output the source data PK as well as the newly inserted identity to generate a mapping table
MERGE INTO dbo.tmp2 AS tgt
USING dbo.tmp1 AS src ON (1=0)
WHEN NOT MATCHED THEN
INSERT ( Col1, Col2, Col3)
VALUES (src.Col1, src.Col2, src.Col3)
OUTPUT src.tmp1ID, Inserted.tmp2ID INTO #Mapping (tmp1ID, tmp2ID);
--Use the mapping table to insert the split data into the second table
INSERT INTO dbo.tmp3 (tmp2ID, Col4, Col5, Col6)
SELECT t2.tmp2ID, t1.Col4, t1.Col5, t1.Col6
FROM dbo.tmp2 t2
JOIN #Mapping m ON m.tmp2ID = t2.tmp2ID
JOIN dbo.tmp1 t1 ON t1.tmp1ID = m.tmp1ID
SELECT tmp2ID, Col1, Col2, Col3 FROM dbo.tmp2
SELECT tmp2ID, Col4, Col5, Col6 FROM dbo.tmp3
------------------------------------------------------------------------------
------------------------------------------------------------------------------
-- Clean up
------------------------------------------------------------------------------
DROP TABLE dbo.tmp1
DROP TABLE dbo.tmp3
DROP TABLE dbo.tmp2
------------------------------------------------------------------------------
------------------------------------------------------------------------------
GO
there is no way to do this as you explain because you lost scope_identity() value of each row of first insert.
A work around may be add Customer primary key fields to Person table and then make join of second insert with this fields:
before insert create customerID field on Person
alter table Person add customerID int null;
then bulk inserts:
-- inserting customerID
insert into Person(name, customerID)
select name, customerID from Customer
-- joining on customerID.
insert into Address(person_id, address)
select p.person_id, c.address
from Customer c
join Person p on c.customerID = p.customerID
after that you can remove customerID field from Person table:
alter table Person drop column customerID
It's better that you create some field of unique types in both table are related them.otherwise you want join as you dont have unique field for condition