SQL Server replace function inside of merge operation - sql

I want to match the temp variable to my table column to merge its contents, but my temp variable contains a letter and I need to remove it to match the two columns from my table. I dont know what is the right code for this.
This is the content of the temp table
F20151103-1010|FSI|FRANCISCA MANALO BISCOCHO|DELIVERED|42317
F20151103-1019|FSI|NINA MARTINNE HAMOY CAJAYON|DELIVERED|42314
CREATE TABLE #records(
[index] int PRIMARY KEY IDENTITY
,refnum varchar(200)
,stat varchar(200)
,statdate varchar(200)
)
insert into #records (refnum, stat, statdate)
select
dbo.fn_Parsename(WHOLEROW,'|',0),
dbo.fn_Parsename(WHOLEROW,'|',3),
dbo.fn_Parsename(WHOLEROW,'|',4)
from #temp1
declare #refnum varchar(100)
declare #stat varchar(100)
declare #statdate varchar(100)
BEGIN
WHILE (#index <= (SELECT MAX([index]) FROM #records))
set #sql = '
MERGE gen_048_MAR2016 target
USING #records source
ON target.refdate'+'-'+'target.refcount = source.'+replace(refnum, 'F', '')+'
WHEN MATCHED THEN
UPDATE
SET
target.stat = source.stat
WHEN NOT MATCHED BY TARGET THEN
INSERT (stat, statdate)
VALUES (source.stat, source.statdate)
;'
select #refnum, #stat, #statdate
print #sql
exec (#sql)
SELECT 'File has been successfully uploaded', #fileDate,'success' as msg
set #index = #index + 1
END
ERROR:
Conversion failed when converting the varchar value 'F20160323-1000' to data type int.

Could you try replacing
'+'-'+' with +''-''+
set #sql = '
MERGE gen_048_MAR2016 target
USING #records source
ON target.refdate+''-''+target.refcount = source.'+replace(refnum, 'F', '')+'
WHEN MATCHED THEN
UPDATE
SET
target.stat = source.stat
WHEN NOT MATCHED BY TARGET THEN
INSERT (stat, statdate)
VALUES (source.stat, source.statdate)
;'
EDIT:
target.refdate'+'-'+'target.refcount is not contactination.
You are subtracting the 2 columns.
Why not just add the 'F' to the other side?
set #sql = '
MERGE gen_048_MAR2016 target
USING #records source
ON ''F''+target.refdate+''-''+target.refcount = source.refnum
WHEN MATCHED THEN
UPDATE
SET
target.stat = source.stat
WHEN NOT MATCHED BY TARGET THEN
INSERT (stat, statdate)
VALUES (source.stat, source.statdate)
;'

Related

Divide using formula from different table in sql

I need to evaluate a formula in sql server 2008
Table 1 contains
Entity Value
A 2424053.500000
B 1151425.412500
C 484810.700000
Table 2 contains
Entity Formula
A (2100*(1-0.0668)*24*mday*10)
B (1000*(1-0.0575)*24*mday*10)
C (1260*(1-0.09)*24*mday*10)
Where mday is number of days taken from user. Data type of Formula is a string.
I need to calculate the output of value/formula for each entity
can you provide me the query for the same
Example solution for SQL Server 2008, adjust as required...
IF EXISTS (SELECT * FROM sys.tables WHERE object_id = object_id('EntityValue'))
BEGIN
DROP TABLE EntityValue;
END;
CREATE TABLE EntityValue
(
Id CHAR(1),
mdayValue DECIMAL(13, 6)
)
INSERT INTO EntityValue
VALUES ('1', 2424053.500000)
, ('2', 1151425.412500)
, ('3', 484810.700000)
IF EXISTS (SELECT * FROM sys.tables WHERE object_id = object_id('EntityFormula'))
BEGIN
DROP TABLE EntityFormula;
END;
CREATE TABLE EntityFormula
(
Id CHAR(1),
Formula NVARCHAR(MAX)
)
INSERT INTO EntityFormula
VALUES ('1', '(2100*(1-0.0668)*24*mday*10)')
, ('2', '(1000*(1-0.0575)*24*mday*10)')
, ('3', '(1260*(1-0.09)*24*mday*10)')
DECLARE #FormulaTable AS TABLE
(
RowId INT IDENTITY(1,1)
,Formula NVarchar(max)
);
INSERT INTO #FormulaTable (Formula)
SELECT Formula = REPLACE(eFormula.Formula, 'mday', CAST(eValue.mdayValue AS NVARCHAR(MAX)))
FROM EntityFormula AS eFormula
INNER JOIN EntityValue AS eValue ON eValue.ID = eFormula.ID;
DECLARE #TSql NVarchar(max), #CurrentRowId INT;
SET #CurrentRowId = 1;
WHILE(1=1)
BEGIN
SELECT #TSql = 'SELECT ' + Formula
FROM #FormulaTable
WHERE RowID = #CurrentRowId
IF(##ROWCOUNT = 0)
BEGIN
BREAK;
END
EXEC sp_executesql #Tsql
SET #CurrentRowId = #CurrentRowId + 1;
END

SQL - Replacing all "ASCII/special characters" in a string

Edit: I have about 80 characters that are causing problems in my application so I don't want to hard code a REPLACE for every single character. I think it would be easier to create a separate table with two columns,"special characters" and "replacement characters", and I will remove those columns from the original table which contains the column "StringTest". My goal will be figuring out how to use the characters table to replace characters in the string table.
I am trying to replace all "special characters" (ie À, Æ, Ç) with "MappedCharacters" (A, AE, C) in SQL Server. I have tried two different techniques, one using a cursor, one without a cursor, to search through a string and replace all special characters with mapped characters. Each of my methods only replaces characters they are in the same row as the string.
Example before:
num SpecialCharacter MappedCharacter StringTest
1 À A StringÀÆ
2 Æ AE ÆStringÆ
3 Ç C StrÇÀing
Example after:
num SpecialCharacter MappedCharacter StringTest
1 À A StringAÆ
2 Æ AE AEStringAE
3 Ç C StrCÀing
Preferred Output:
num SpecialCharacter MappedCharacter StringTest
1 À A StringAAE
2 Æ AE AEStringAE
3 Ç C StrCAing
So you can see that I want to replace all "special characters" in StringTest but only characters that are in the same row are getting replaced.
I haven't quite figured out how to do that just yet.
Here are the two SQL code that I have been trying to modify (I only need one to work)
First Method:
DECLARE #cASCIINum INT;
DECLARE #cSpecialChar VARCHAR(50);
DECLARE #cMappedChar VARCHAR(50);
DECLARE #cStringTest VARCHAR(50);
DECLARE #mapCursor as CURSOR;
SET #mapCursor = CURSOR FOR
SELECT [ASCIINum]
,[SpecialChar]
,[MappedChar]
,[StringTest]
FROM [intranet].[dbo].[CharMapTestTab];
OPEN #mapCursor;
FETCH NEXT FROM #mapCursor INTO #cASCIINum,
#cSpecialChar,
#cMappedChar,
#cStringTest;
WHILE ##FETCH_STATUS = 0
BEGIN
UPDATE [intranet].[dbo].[CharMapTestTab]
SET StringTest = REPLACE(StringTest, SpecialChar, MappedChar)
WHERE SpecialChar <> MappedChar
END
CLOSE #mapCursor;
DEALLOCATE #mapCursor;
Second Method:
DECLARE #ASCIINum INT = 0
WHILE (1 = 1)
BEGIN
SELECT #ASCIINum = ASCIINum
FROM [intranet].[dbo].[CharMapTestTab]
WHERE ASCIINum > #ASCIINum
ORDER BY ASCIINum
IF ##ROWCOUNT = 0 BREAK;
UPDATE [intranet].[dbo].[CharMapTestTab]
SET StringTest = REPLACE(StringTest, SpecialChar, MappedChar)
WHERE SpecialChar <> MappedChar
SELECT TOP 1000 [ASCIINum]
,[SpecialChar]
,[MappedChar]
,[StringTest]
FROM [intranet].[dbo].[CharMapTestTab]
END
Try this, it works better than looping because there is only 1 update:
-- create test table vc
create table vc(StringTest varchar(20))
insert vc values('StringÀÆ'), ('ÆStringÆ')
go
-- create test table CharacterMapping
create table CharacterMapping(SpecialCharacter char(1), MappedCharacter varchar(2))
insert CharacterMapping values('À', 'A'),('Æ', 'AE'), ('Ç', 'C')
go
--build the varchar for updating
declare #x varchar(max) = 'StringTest'
select #x = 'replace('+#x+', ''' + SpecialCharacter + ''','''+MappedCharacter+''')'
from CharacterMapping
set #x = 'update vc set StringTest=' + #x +' from vc'
exec (#x)
select * from vc
Result:
StringAAE
AEStringAE
I would make a separate mapping table which contains the bad character and its corresponding good character, one set per row. Then loop over that table and do a replace for each character set.
DECLARE #map TABLE (
id INT,
badChar CHAR,
goodChar CHAR
)
DECLARE #strings TABLE (
searchString VARCHAR(50)
)
INSERT INTO #map
VALUES
(1, 'y', 'a'),
(2, 'z', 'b')
DECLARE #curRow INT, #totalRows INT
SET #curRow = 1
SELECT #totalRows = COUNT(*) FROM #map
INSERT INTO #strings
VALUES
('zcccyccz'),
('cccyccz')
WHILE #curRow <= #totalRows
BEGIN
UPDATE #strings
SET searchString = REPLACE(searchString, badChar, goodChar)
FROM #map
WHERE id = #curRow
SET #curRow = #curRow + 1
END
SELECT * FROM #strings
--Output
--bcccaccb
--cccaccb
It would be helpful to know how many rows are in your table and how many you estimate to have "special characters". Also, are there only 3 special characters? if you have 40 or less special characters, it may look ridiculous, but I'd just nest as many REPLACE() calls as you have special characters, like:
UPDATE YourTable SET YourColumn = REPLACE(
REPLACE(
REPLACE(YourColumn,'Ç','C')
,'Æ','AE')
,'À','A')
if most rows have special characters, I'd skip any WHERE. if only a few rows have special characters, I'd use a CTE to identify them:
;WITH AllSpecialRows AS
(
SELECT PrimaryKey FROM YourTable WHERE YourColumn LIKE '%À%'
UNION
SELECT PrimaryKey FROM YourTable WHERE YourColumn LIKE '%Æ%'
UNION
SELECT PrimaryKey FROM YourTable WHERE YourColumn LIKE '%Ç%'
)
UPDATE y
SET YourColumn = REPLACE(
REPLACE(
REPLACE(YourColumn,'Ç','C')
,'Æ','AE')
,'À','A')
FROM YourTable y
INNER JOIN AllSpecialRows s ON y.PrimaryKey =s.PrimaryKey
update table
set column = REPLACE(column,'À','A')
where column like ('%À%')
update table
set column = REPLACE(column,'Æ','AE')
where column like ('%Æ%')
I will leave the 3rd to you
Or this might be more efficient
update table
set column = REPLACE(REPLACE(column,'À','A'),'Æ','AE')
where column like ('%À%')
or column like ('%Æ%')
If you really want to process a list of mapped characters then this is not a proper answer
#t-clausen.dk answer with Table variables and temp tables, just to avoid people mess up their dev databases with additional tables.
TABLE Variables:
-- Create test table variable #CharacterMapping
DECLARE #CharacterMapping TABLE (SpecialCharacter char(1), MappedCharacter varchar(2))
INSERT #CharacterMapping VALUES('À', 'A'), ('Æ', 'AE'), ('Ç', 'C')
--Build the varchar for updating
DECLARE #x varchar(max) = 'StringTest'
SELECT #x = 'replace('+#x+', ''' + SpecialCharacter + ''',''' + MappedCharacter + ''')'
FROM #CharacterMapping
SET #x = 'DECLARE #vc TABLE(StringTest varchar(20));'
+ ' insert #vc values(''StringÀÆ''), (''ÆStringÆ'');'
+ 'update #vc set StringTest=' + #x +' from #vc;'
+ 'SELECT * FROM #vc;'
Exec (#x)
GO
With Temp table:
-- Create test temp table #vc
CREATE TABLE #vc(StringTest varchar(20))
INSERT #vc VALUES('StringÀÆ'), ('ÆStringÆ')
-- Create test table CharacterMapping
DECLARE #CharacterMapping TABLE (SpecialCharacter char(1), MappedCharacter varchar(2))
INSERT #CharacterMapping VALUES('À', 'A'), ('Æ', 'AE'), ('Ç', 'C')
--Build the varchar for updating
DECLARE #x varchar(max) = 'StringTest'
SELECT #x = 'replace('+#x+', ''' + SpecialCharacter + ''',''' + MappedCharacter + ''')'
FROM #CharacterMapping
SET #x = 'update #vc set StringTest=' + #x +' from #vc'
-- Execute
EXEC (#x)
-- Select the results
SELECT * FROM #vc;
-- Drop temp table
DROP TABLE #vc;
GO

Dynamically Create tables and Insert into it from another table with CSV values

Have a Table with the CSV Values in the columns as below
ID Name text
1 SID,DOB 123,12/01/1990
2 City,State,Zip NewYork,NewYork,01234
3 SID,DOB 456,12/21/1990
What is need to get is 2 tables in this scenario as out put with the corresponding values
ID SID DOB
1 123 12/01/1990
3 456 12/21/1990
ID City State Zip
2 NewYork NewYork 01234
Is there any way of achieving it using a Cursor or any other method in SQL server?
There are several ways that this can be done. One way that I would suggest would be to split the data from the comma separated list into multiple rows.
Since you are using SQL Server, you could implement a recursive CTE to split the data, then apply a PIVOT function to create the columns that you want.
;with cte (id, NameItem, Name, textItem, text) as
(
select id,
cast(left(Name, charindex(',',Name+',')-1) as varchar(50)) NameItem,
stuff(Name, 1, charindex(',',Name+','), '') Name,
cast(left(text, charindex(',',text+',')-1) as varchar(50)) textItem,
stuff(text, 1, charindex(',',text+','), '') text
from yt
union all
select id,
cast(left(Name, charindex(',',Name+',')-1) as varchar(50)) NameItem,
stuff(Name, 1, charindex(',',Name+','), '') Name,
cast(left(text, charindex(',',text+',')-1) as varchar(50)) textItem,
stuff(text, 1, charindex(',',text+','), '') text
from cte
where Name > ''
and text > ''
)
select id, SID, DOB
into table1
from
(
select id, nameitem, textitem
from cte
where nameitem in ('SID', 'DOB')
) d
pivot
(
max(textitem)
for nameitem in (SID, DOB)
) piv;
See SQL Fiddle with Demo. The recursive version will work great but if you have a large dataset, you could have some performance issues so you could also use a user defined function to split the data:
create FUNCTION [dbo].[Split](#String1 varchar(MAX), #String2 varchar(MAX), #Delimiter char(1))
returns #temptable TABLE (colName varchar(MAX), colValue varchar(max))
as
begin
declare #idx1 int
declare #slice1 varchar(8000)
declare #idx2 int
declare #slice2 varchar(8000)
select #idx1 = 1
if len(#String1)<1 or #String1 is null return
while #idx1 != 0
begin
set #idx1 = charindex(#Delimiter,#String1)
set #idx2 = charindex(#Delimiter,#String2)
if #idx1 !=0
begin
set #slice1 = left(#String1,#idx1 - 1)
set #slice2 = left(#String2,#idx2 - 1)
end
else
begin
set #slice1 = #String1
set #slice2 = #String2
end
if(len(#slice1)>0)
insert into #temptable(colName, colValue) values(#slice1, #slice2)
set #String1 = right(#String1,len(#String1) - #idx1)
set #String2 = right(#String2,len(#String2) - #idx2)
if len(#String1) = 0 break
end
return
end;
Then you can use a CROSS APPLY to get the result for each row:
select id, SID, DOB
into table1
from
(
select t.id,
c.colname,
c.colvalue
from yt t
cross apply dbo.split(t.name, t.text, ',') c
where c.colname in ('SID', 'DOB')
) src
pivot
(
max(colvalue)
for colname in (SID, DOB)
) piv;
See SQL Fiddle with Demo
You'd need to approach this as a multi-step ETL project. I'd probably start with exporting the two types of rows into a couple staging tables. So, for example:
select * from yourtable /* rows that start with a number */
where substring(text,1,1) in
('0','1','2','3','4','5','6','7','8','9')
select * from yourtable /* rows that don't start with a number */
where substring(text,1,1)
not in ('0','1','2','3','4','5','6','7','8','9')
/* or simply this to follow your example explicitly */
select * from yourtable where name like 'sid%'
select * from yourtable where name like 'city%'
Once you get the two types separated then you can split them out with one of the already written split functions found readily out on the interweb.
Aaron Bertrand (who is on here often) has written up a great post on the variety of ways to split comma delimted strings using SQL. Each of the methods are compared and contrasted here.
http://www.sqlperformance.com/2012/07/t-sql-queries/split-strings
If your row count is minimal (under 50k let's say) and it's going to be a one time operation than pick the easiest way and don't worry too much about all the performance numbers.
If you have a ton of rows or this is an ETL process that will run all the time then you'll really want to pay attention to that stuff.
A simple solution using cursors to build temporary tables. This has the limitation of making all columns VARCHAR and would be slow for large amounts of data.
--** Set up example data
DECLARE #Source TABLE (ID INT, Name VARCHAR(50), [text] VARCHAR(200));
INSERT INTO #Source
(ID, Name, [text])
VALUES (1, 'SID,DOB', '123,12/01/1990')
, (2, 'City,State,Zip', 'NewYork,NewYork,01234')
, (3, 'SID,DOB', '456,12/21/1990');
--** Declare variables
DECLARE #Name VARCHAR(200) = '';
DECLARE #Text VARCHAR(1000) = '';
DECLARE #SQL VARCHAR(MAX);
--** Set up cursor for the tables
DECLARE cursor_table CURSOR FAST_FORWARD READ_ONLY FOR
SELECT s.Name
FROM #Source AS s
GROUP BY Name;
OPEN cursor_table
FETCH NEXT FROM cursor_table INTO #Name;
WHILE ##FETCH_STATUS = 0
BEGIN
--** Dynamically create a temp table with the specified columns
SET #SQL = 'CREATE TABLE ##Table (' + REPLACE(#Name, ',', ' VARCHAR(50),') + ' VARCHAR(50));';
EXEC(#SQL);
--** Set up cursor to insert the rows
DECLARE row_cursor CURSOR FAST_FORWARD READ_ONLY FOR
SELECT s.Text
FROM #Source AS s
WHERE Name = #Name;
OPEN row_cursor;
FETCH NEXT FROM row_cursor INTO #Text;
WHILE ##FETCH_STATUS = 0
BEGIN
--** Dynamically insert the row
SELECT #SQL = 'INSERT INTO ##Table VALUES (''' + REPLACE(#Text, ',', ''',''') + ''');';
EXEC(#SQL);
FETCH NEXT FROM row_cursor INTO #Text;
END
--** Display the table
SELECT *
FROM ##Table;
--** Housekeeping
CLOSE row_cursor;
DEALLOCATE row_cursor;
DROP TABLE ##Table;
FETCH NEXT FROM cursor_table INTO #Name;
END
CLOSE cursor_table;
DEALLOCATE cursor_table;

Update multiple XML elements from SQL Variables

I need to update a value in an XML element with the contents of a TSQL variable. The added complication is that there can be multiple elements with the same name and all elements need to get updated. Here is a sample. Note that customer 1000000 has two customer_firstname elements.
CREATE TABLE Customer_Test
(
[customer_data] [xml] NULL
)
-- populate statements
insert into Customer_Test (customer_data) values ('<Customer Note="two customer_firstnames"><customer_id>1000000</customer_id><customer_firstname>Mary</customer_firstname><customer_firstname>Jane</customer_firstname><customer_lastname>Smith</customer_lastname></Customer>');
insert into Customer_Test (customer_data) values ('<Customer Note="normal, no problem"><customer_id>1000001</customer_id><customer_firstname>Joe</customer_firstname><customer_lastname>Bloggs</customer_lastname></Customer>');
The code below works just fine on xml structured like that in the customer_ID = '1000001' record but only the first customer_firstname element gets updated for situations like customer_ID = '1000000'
DECLARE #newName varchar(10) = 'xxx'
DECLARE #IDValue varchar(10) = '1000000'
UPDATE [Customer_Test]
SET customer_data.modify('replace value of (Customer/customer_firstname/text())[1] with sql:variable("#newName")')
WHERE customer_data.value('(/Customer/customer_id)[1]','varchar(50)') = #IDValue
I am really stuck on this - I need all values of the customer_firstname element to be set to the same value if they are present. I half suspect a CROSS APPLY is required but all my attempts to code one would not compile.
I would very much value any advice which might be provided. Thanks in advance.
It is not possible to update more than one value in the XML with one update statement.
You can do it in a while loop that iterates the number of first names you have in one XML.
DECLARE #newName varchar(10) = 'xxx'
DECLARE #IDValue varchar(10) = '1000000'
DECLARE #FirstNameCount INT
-- Get the max number of first names in one XML
SELECT #FirstNameCount = max(customer_data.value('count(Customer/customer_firstname)', 'int'))
FROM Customer_Test
WHERE customer_data.value('(/Customer/customer_id)[1]','varchar(50)') = #IDValue
-- Loop over #FirstNameCoount
WHILE #FirstNameCount > 0
BEGIN
UPDATE [Customer_Test]
SET customer_data.modify('replace value of (Customer/customer_firstname[sql:variable("#FirstNameCount")]/text())[1] with sql:variable("#newName")')
WHERE customer_data.value('(/Customer/customer_id)[1]','varchar(50)') = #IDValue
SET #FirstNameCount = #FirstNameCount - 1
END
Isn't it worth deduplicating your XML at this point? If you are setting all values to the same thing then there is really no point in storing the second name. You can delete the second customer_firstname elements based on their position, eg
SELECT 'before' s, DATALENGTH( customer_data ) dl, [customer_data] FROM Customer_Test
UPDATE [Customer_Test]
SET customer_data.modify('delete Customer/customer_firstname[position() > 1]')
SELECT 'after 1' s, DATALENGTH( customer_data ) dl, [customer_data] FROM Customer_Test
DECLARE #newName varchar(10) = 'xxx'
DECLARE #IDValue varchar(10) = '1000000'
UPDATE [Customer_Test]
SET customer_data.modify('replace value of (Customer/customer_firstname/text())[1] with sql:variable("#newName")')
WHERE customer_data.value('(/Customer/customer_id)[1]','varchar(50)') = #IDValue
SELECT 'after 2' s, DATALENGTH( customer_data ) dl, [customer_data] FROM Customer_Test

SQL: Retrieving Unique ID with Multiple Attributes

Ok, I had trouble describing this. I have:
material table (materialID, material, etc...)
ThicknessRange table (ThicknessRangeID, ThicknessRange)
MaterialThicknessRange table (MaterialID, ThicknessRangeID)
I am trying to retrieve all MaterialID's from the MaterialThicknessRange table that fit all required ThicknessRangeID's.
For example, any MaterialID with ThicknessRangeID 1 AND ThicknessRangeID 2, etc with a variable number of ThicknessRangeID's (selected from checkboxes by the user).
Thanks in advance.
Are you guaranteed to have only one entry in the MaterialThicknessRange table for a given Material/ThicknessRange combination?
SELECT MaterialID, COUNT(MaterialID) As NumMaterialThicknesses
FROM MaterialThicknessRange
WHERE ThicknessRangeID IN (1, 2)
GROUP BY MaterialID
HAVING COUNT(MaterialID) > 1
I'm using something like this
select MaterialID from MaterialThicknessRange MTR inner join
dbo.TransformCSVToTable('1,2,15') IDs on MTR.ThiknessRangeID = IDs.ID
where dbo.TransformCSVToTable is a user defined function to transform a csv string to a one column table. Bellow is one sample of such function
ALTER FUNCTION [dbo].[fn_IntegerParameterListFromString]
(
#IntegerParameterList varchar(max)
)
RETURNS #result TABLE (IntegerID int)
AS
begin
declare #temp table (IntegerID int)
declare #s varchar(max), #s1 varchar(10)
declare #len int
set #len =len(#IntegerParameterList)
set #s = #IntegerParameterList
if (right(#s,1)<>',') set #s = #s +','
while #s<>''
begin
set #s1 = substring(#s,1,charindex(',',#s)-1)
if (isnumeric(#s1)= 1)
insert #result (IntegerID) Values ( Cast(#s1 as int))
if (CHARINDEX(',',#s)>0)
begin
set #s = substring (#s, charindex(',',#s)+1, #Len)
end
else
begin
if isnumeric(#s) = 1
insert #result (IntegerID) Values ( Cast(#s as int))
set #s = ''
end
end
return
end