How to use the declared variable #CodeID inside the SQL string? When I run following statement I get the "Invalid object name (..)" error.
WHILE #FolderID <= #FolderMaxID
BEGIN
SELECT #Db = Db
FROM #Folders
WHERE ID = #FolderID
SET #Sql = N'
DECLARE #CodeID NVARCHAR(256)
SELECT TOP(1) #CodeID=CodeType
FROM ' + #Db + '.bla.Field
WHERE Name= ''Example''
SELECT DISTINCT C.Name
FROM ' + #Db + '.Document
INNER JOIN ' + #Db + '.bla.Code_#CodeID C ON D.ID = C.ID'
EXEC ( #Sql )
SET #FolderID = #FolderID + 1
END
It looks to me that you need two levels of dynamic SQL, with the first level inserting the database name (from #folders), and the second level inserting a constructed table name (based on the CodeType column of the database-local bla.Field table).
I do not know of any way to parameterize database names or table names using sp_executesql, so I'm sticking with build-up dynamic SQL and EXEC (). (If someone makes a case for preferring sp_executesql over EXEC when not useing parameters, then it may be worth the switch.)
Try something like:
WHILE #FolderID <= #FolderMaxID
BEGIN
SELECT #Db = Db
FROM #Folders
WHERE ID = #FolderID
SET #Sql = N'
DECLARE #CodeID NVARCHAR(256)
SELECT TOP(1) #CodeID=CodeType
FROM ' + QUOTENAME(#Db) + '.bla.Field
WHERE Name= ''Example''
DECLARE #Sql2 NVARCHAR(MAX) = N''
SELECT DISTINCT C.Name
FROM ' + QUOTENAME(#Db) + '.bla.Document D
INNER JOIN ' + QUOTENAME(#Db) + '.bla.'' + QUOTENAME(''Code_'' + #CodeID) + '' C ON D.ID = C.ID
''
EXEC #sql2
'
EXEC ( #Sql )
SET #FolderID = #FolderID + 1
END
This implements dynamic SQL within dynamic SQL. Doubled quotes in the outer sql template become single quotes in the inner sql. The original posted code seemed to be missing a schema qualifier and alias for the Document table, so I inserted them ("bla" and "D"). I also added QUOTENAME around the injected names as suggested by Larnu.
The first level of dynamic sql would generate something like:
SELECT TOP(1) #CodeID=CodeType
FROM [db1].bla.Field
WHERE Name= 'Example'
DECLARE #Sql2 NVARCHAR(MAX) = N'
SELECT DISTINCT C.Name
FROM [db1].bla.Document D
INNER JOIN [db1].bla.' + QUOTENAME('Code_' + #CodeID) + ' C ON D.ID = C.ID
'
EXEC #sql2
The second level would generate something like:
SELECT DISTINCT C.Name
FROM [db1].bla.Document D
INNER JOIN [db1].bla.[Code_Table1] C ON D.ID = C.ID
Note that each loop iteration will generate a separate result. If you wish to combine results, you will need to define a #temp table, insert the individual results into that table, and then select the combined results at the end of your script.
Note that I haven't tested the specific code above, so it might need some debugging (add "PRINT #sql2" before the EXEC) if it doesn't work straight out.
ADDENDUM
Per #trenton-ftw comments below, an out parameter can be used to capture the result of the first query so that it may be included in the second query without the need for nesting. Two executions are still required. Below is a revised example.
DECLARE #Folders TABLE (ID INT IDENTITY(1,1), Db sysname)
INSERT #Folders VALUES ('db1'), ('db2')
DECLARE #SearchName NVARCHAR(256) = 'Example'
DECLARE #Db sysname
DECLARE #Sql NVARCHAR(MAX)
DECLARE #CodeID NVARCHAR(256)
DECLARE #FolderMaxID INT = (SELECT MAX(ID) FROM #Folders)
DECLARE #FolderID INT = 1
WHILE #FolderID <= #FolderMaxID
BEGIN
SELECT #Db = Db
FROM #Folders
WHERE ID = #FolderID
SET #Sql = N'
SET #CodeID = #SearchName + ''-Test''
--SELECT TOP(1) #CodeID = CodeType
--FROM ' + QUOTENAME(#Db) + '.bla.Field
--WHERE Name = #SearchName'
PRINT #Sql
EXEC sp_executesql #Sql,
N'#SearchName NVARCHAR(256), #CodeID NVARCHAR(256) OUTPUT',
#SearchName, #CodeID OUTPUT
SET #Sql = N'
--SELECT DISTINCT C.Name
--FROM ' + QUOTENAME(#Db) + '.bla.Document D
-- INNER JOIN ' + QUOTENAME(#Db) + '.bla.' + QUOTENAME('Code_' + #CodeID) + ' C ON D.ID = C.ID'
PRINT #Sql
EXEC sp_executesql #sql
SET #FolderID = #FolderID + 1
END
For demo purposes, I also parameterized the search name as an input parameter and added some temporary code to make it stand-alone testable. A final version would uncomment the actual sql, and remove the print statements and the test #CodeID assignemnt.
Related
I was trying to create new tables using the identifier through a list.
DECLARE #Counter INT, #TableName NVARCHAR(20)
SET #Counter = 1
WHILE (#Counter <= 20)
BEGIN
SELECT #TableName = TableName FROM [dbo].[TableList] WHERE index = #Counter
SELECT * INTO [dbo].[#TableName.combine] FROM [dbo].[#TableName] t
LEFT JOIN [dbo].[cost] c ON t.x = c.y
SET #Counter = #Counter +1
END
And it keeps saying the object of [dbo].[#TableName] is invalid, but I already have [dbo].[#TableName] as a table.
I looked over the declare table variable, but [dbo].[#TableName] already existed in the database, how can I point to the table that I want?
You need to use dynamic SQL for this.
You can build one big query using STRING_AGG and then execute it
DECLARE #sql nvarchar(max);
SELECT #sql =
STRING_AGG(CAST('
SELECT *
INTO dbo.' + QUOTENAME(tl.TableName + '.combine') + '
FROM dbo.' + QUOTENAME(tl.TableName) + ' t
LEFT JOIN dbo.cost c ON t.x = c.y;
' AS nvarchar(max)), '
' )
FROM dbo.TableList tl;
EXEC sp_executesql #sql;
I would like to use a query to loop through tables that are similar in names but added a number after that (ie. tableJan01, tableJan02, tableJan03, etc..., tableJan30)
Is there a way in SQL Server to use the same query statement while varying the table name within it. (similar to using parameter values) (need this to add different input to each different month's table)
declare #x nvarchar(50) ='abc'
declare #z int =1
while (#z<30)
BEGIN
SET #z = #z + 1;
select * from (#x)
END;
this shows error
Must declare the scalar variable "#CharVariable".
this script shows too syntax error
declare #x nvarchar(50) ='abc'
declare #z int =1
while (#z<30)
BEGIN
SET #z = #z + 1;
select * from (#x+#z)
END;
also, simple code like this doesn't work too
declare #x nvarchar(50) ='abc'
select * from #x
I agree with John Cappelletti that this requirement feels like a design flaw, however, to get your list of table names you can do something like this:
declare #x nvarchar(50) ='abc'
declare #z int =1
declare #ListOfTableNames TABLE (TableName nvarchar(50));
while (#z<30)
BEGIN
SET #z = #z + 1;
INSERT INTO #ListOfTableNames (TableName) VALUES (#x + CONVERT(NVARCHAR(20), #z))
END
SELECT * FROM #ListOfTableNames
To do dynamic SQL on these tables you could build a query string and then pass that string to the sp_executesql proc. You could put that logic in place of the line where we populate the table variable with the numbered table names. Like this:
declare #x nvarchar(50) ='abc'
declare #z int =1
declare #sql NVARCHAR(100)
while (#z<30)
BEGIN
SET #z = #z + 1;
SET #sql ='SELECT * FROM '+ (#x + CONVERT(NVARCHAR(20), #z))
EXEC sp_executesql #sql
END
I would completely avoid a WHILE loop just use some pattern matching:
DECLARE #Prefix sysname = N'abc';
DECLARE #SQL nvarchar(MAX),
#CRLF nchar(2) = NCHAR(13) + NCHAR(10);
SET #SQL = STUFF((SELECT #CRLF +
N'SELECT *' + #CRLF +
--N' ,N' + QUOTENAME(t.[name],'''') + N' AS TableName' + #CRLF + --Uncomment if wanted
N'FROM ' + QUOTENAME(s.[name]) + N'.' + QUOTENAME(t.[name]) + N';'
FROM sys.schemas s
JOIN sys.tables t ON s.schema_id = t.schema_id
WHERE t.[name] LIKE #Prefix + '%'
AND t.[name] NOT LIKE #Prefix + N'%[^0-9]'
ORDER BY t.[name]
FOR XML PATH(N''),TYPE).value('.','nvarchar(MAX)'),1,2,N'');
--PRINT #SQL;
EXEC sp_executesql #SQL;
DB<>Fiddle
But John is right, you certainly have a design flaw here.
Using dynamic SQL, it would look something like this:
declare
#Base_table_name nvarchar(50) = 'my_table'
,#Step int = 1
,#SQL nvarchar(max);
while(#Step < 30)
begin
set #SQL = 'select * from ' + #Base_table_name + right('00' + cast(#Step as nvarchar(50)),2);
print(#SQL); --this displays the SQL that would be run
--exec(#SQL) --uncomment this to run the dynamic SQL
set #Step+=1;
end;
Alternatively, you can be more precise by using the sys.schemas and sys.tables tables like so:
declare
#Base_table_name sysname = 'my_table'
,#schema_name sysname = 'my_schema'
,#Step int = 1
,#StepCount int = 0
,#SQL nvarchar(max);
/* This will create a table variable and populate it with all the tables you'll want to query */
declare #tables_to_query table (Step int identity, SchemaName sysname, TableName sysname);
insert into #tables_to_query(SchemaName, TableName)
select
s.name
,t.name
from
sys.schemas s
inner join
sys.tables t on s.schema_id = t.schema_id
where
s.name = #schema_name --this will limit the tables to this schema
and t.name like #Base_table_name + '%' --this will look for any table that starts with the base table name
/* this loops through all the tables in the table variable */
while(#Step <= #StepCount)
begin
select
#SQL = 'select * from ' + quotename(SchemaName) + '.' + quotename(TableName)
from
#tables_to_query
where
Step = #Step
print(#SQL); --this displays the SQL that would be run
--exec(#SQL) --uncomment this to run the dynamic SQL
set #Step+=1;
end;
The dynamic SQL approaches laid out in other answers will certainly get the job done for you, but if you find you're querying all of these tables frequently, it might server you well to build out a VIEW and query that as needed.
In keeping with Larnu's suggestion of putting the source table name into the result set, I'd probably do something like this:
CREATE VIEW dbo.vwJan
AS
SELECT
'tableJan01' AS SourceTable,
<Column List>
FROM dbo.tableJan01
UNION ALL
...<28 other iterations>
SELECT
'tableJan30' AS SourceTable,
<Column List>
FROM dbo.tableJan30;
From there, you can go ahead and query them all to your heart's content with a single statement.
SELECT
SourceTable,
<Any other columns you're interested in>
FROM
vwJan;
I need to insert an almost duplicated row into table, while changing few values.
For example insert duplicated row with new id (I don't want automatic id) and different name but all other values the same.
The problem is that I need to make a select *
I know that there is a way to insert from select and changing values this way :
insert into Table1(id,name,surname) select newid(),'David',surname from Table1 where id=1
but I don't want to enlist all fields ,instead I want to use select *, so if fields added I won't have to change my stored procedure.
I want something like :
insert into Table1 (
update (SELECT *
FROM Table1
WHERE id= 1 ) t
set t.id= newid(),name='David')
Is there a way to do it ?
You can use temp hash table to accomplish this.
SELECT *
INTO #temp
FROM Table1
WHERE id= 1;
UPDATE #temp
SET ID = newid(),
Name='David'
INSERT INTO Table1 SELECT * FROM #temp;
Note that the #temp table is automatically dropped when the client disconnect from the DB server.
Also, as previously noted, I prefer to use column names separately instead of *.
Example: SQL Fiddle
The code I use:
declare #table sysname
declare #excludecols nvarchar(max)
declare #uniqueWhereToCopy nvarchar(max)
declare #valuesToChange nvarchar(max)
--copy settings
set #table = 'orsrg' --the tablename
set #excludecols='' --columnnames to exclude from the copy, seperated by commas
set #uniqueWhereToCopy = 'ID=1188'
set #valuesToChange = 'regel='' 4''' --columnName=<value>,columnName2=<value2>, .... (needed for unique indexes)
set #excludecols=#excludecols + ','
set #valuesToChange=#valuesToChange + ','
--get the columnnames to copy
declare #sqlcolumns nvarchar(max)
set #sqlcolumns = ''
SELECT #sqlcolumns = #sqlcolumns + name from
(select '[' + c.name + '], ' as name FROM sys.COLUMNS c inner join sys.objects o
on c.object_id = o.object_id
WHERE o.name = #table
and is_identity = 0 /*exclude identity*/
and is_rowguidcol = 0 /*exclude rowguids*/
and is_computed = 0 /*exclude computed columns*/
and system_type_id <> 189 /*exclude timestamp*/
and charindex(c.name, #excludecols,1) = 0 /*exclude user specified columns*/)q
--get the select columns and values
declare #sqlselectvalues nvarchar(max)
set #sqlselectvalues = #sqlcolumns
while len(#valuesToChange)>1
begin
declare #colValueSet nvarchar(max)
declare #colname sysname
declare #value nvarchar(max)
set #colValueSet = left(#valuesToChange,charindex(',',#valuesToChange,1)-1)
set #valuesToChange = substring(#valuesToChange,charindex(',',#valuesToChange,1)+1,len(#valuesToChange))
set #colname = '[' + left(#colValueSet,charindex('=',#colValueSet,1)-1) +']'
set #value = substring(#colValueSet,charindex('=',#colValueSet,1)+1,len(#colValueSet))
set #sqlselectvalues = REPLACE(#sqlselectvalues,#colname,#value)
end
--remove the last comma
set #sqlcolumns = left(#sqlcolumns, len(#sqlcolumns)-1)
set #sqlselectvalues = left(#sqlselectvalues, len(#sqlselectvalues)-1)
--create the statement
declare #stmt nvarchar(max)
set #stmt = 'Insert into ' + #table + '(' + #sqlcolumns + ') select ' + #sqlselectvalues + ' from ' + #table + ' with (nolock) where ' + #uniqueWhereToCopy
--copy the row
exec sp_executesql #stmt
No, because a SELECT * will always contain the id column.
Generally, you should avoid SELECT * anywhere except when querying interactively. When the stored procedure is compiled, the query text will be parsed and replaced with the correct columns, rendering your stored procedure invalid on every change to the structure anyway.
I am using the cursor in my stored procedure; I am hoping to remove the cursor from my SP. Please help me come up with a solution for how to avoid the cursor statement to normal update statement with dynamic.
Example Below:
Update Tablename set columnname(variable) = value from table A join Table B on A.condition = B.Condition where name = 'Test'(variable) and age = 18(variable)
Update Tablename set columnname(variable) = value from table A join Table B on A.condition = B.Condition where name = 'kumar'(variable) and age = 19(variable)
Update Tablename set columnname(variable) = value from table A join Table B on A.condition = B.Condition where name = 'babu'(variable) and age = 30(variable)
This is how my cursor will work. 300 Combination dynamically pick the data from table and update into the main table
I am trying to take out the cursor, and update statement should work similar to this, instead of writing 300 update statements, I want to write one update where all the 300 combinations should execute.
Below is my code which needs this solution:
BEGIN
DECLARE #Type VARCHAR(100)
DECLARE #TargetColumn VARCHAR(100)
DECLARE #SourceColumn VARCHAR(100)
DECLARE #SQL varchar(max)
DECLARE a_cursor CURSOR STATIC
FOR
SELECT [Type],[SourceColumn],[TargetColumn] FROM ref.tblEdsMap
GROUP BY [Type],[SourceColumn],[TargetColumn]
OPEN a_cursor
FETCH NEXT FROM a_cursor INTO #Type,#SourceColumn,#TargetColumn
WHILE ##FETCH_STATUS = 0
BEGIN
SET #SQL = 'UPDATE GCT SET GCT.' + #TargetColumn + ' = map.[TargetValue]
from EdsMap map
JOIN Table GCT
ON GCT.' + #SourceColumn + ' = map.[SourceValue]
where map.[Type]=''' + #Type + ''' and map.SourceColumn=''' + #SourceColumn+ ''''
Exec (#SQL)
PRINT #SQL
FETCH NEXT FROM a_cursor INTO #Type,#SourceColumn,#TargetColumn
END
CLOSE a_cursor
DEALLOCATE a_cursor
END
Rather than use an explicit cursor or a cursor cleverly disguised as a while loop, I prefer row concatenation operations for this type of problem.
DECLARE #cmd NVARCHAR(MAX) = N'';
SELECT #cmd += N'
UPDATE GCT
SET GCT.' + QUOTENAME(TargetColumn) + ' = map.TargetValue
FROM dbo.EdsMap AS map
INNER JOIN dbo.Table AS GCT
ON GCT.' + QUOTENAME(SourceColumn) + ' = map.SourceValue
WHERE map.[Type] = ''' + [Type] + '''
AND map.SourceColumn = ''' + [SourceColumn]+ ''';'
FROM ref.tblEdsMap
GROUP BY [Type], SourceColumn, TargetColumn;
EXEC sp_executesql #sql;
When I've done these in the past, I usually make up a transaction to encompass every update that's needed. Something like this:
CREATE TABLE #targets ([Type] VARCHAR(255),[SourceColumn] VARCHAR(255),[TargetColumn] VARCHAR(255));
INSERT INTO #targets
( [Type], [SourceColumn], [TargetColumn] )
SELECT [Type],[SourceColumn],[TargetColumn] FROM ref.tblEdsMap
GROUP BY [Type],[SourceColumn],[TargetColumn];
DECLARE #sql VARCHAR(MAX);
SET #sql = 'BEGIN TRAN' + CHAR(10) + CHAR(13);
SELECT #sql = #sql +
'UPDATE GCT SET GCT.' + [TargetColumn] + ' = map.[TargetValue]
from EdsMap map
JOIN Table GCT
ON GCT.' + [SourceColumn] + ' = map.[SourceValue]
where map.[Type]=''' + [Type] + ''' and map.SourceColumn=''' + [SourceColumn]+ ''';' + CHAR(10) + CHAR(13)
FROM #targets
SELECT #sql = #sql + 'COMMIT TRAN'
PRINT #sql
Exec (#SQL)
The update statements are still the same, i.e., you get one update per combination. But now you're running as one transaction batch. You could potentially be fancier with the dynamic SQL, so that you had just one update statement, but in my experience, it's too easy to get bad updates that way.
Doing it this way may not be any faster than a cursor. You'd have to test to be sure. With the examples where I've used this approach, it has generally been a faster approach.
Try using a table variable along with a WHILE loop instead, like so:
BEGIN
DECLARE #Type VARCHAR(100)
DECLARE #TargetColumn VARCHAR(100)
DECLARE #SourceColumn VARCHAR(100)
DECLARE #SQL varchar(max)
DECLARE #SomeTable TABLE
(
ID int IDENTITY (1, 1) PRIMARY KEY NOT NULL,
Type varchar(100),
SourceColumn varchar(100),
TargetColumn varchar(100)
)
DECLARE #Count int, #Max int
INSERT INTO #SomeTable (Type, SourceColumn, TargetColumn)
SELECT [Type],[SourceColumn],[TargetColumn]
FROM ref.tblEdsMap
GROUP BY [Type],[SourceColumn],[TargetColumn]
SELECT #Count = 1, #Max = COUNT(ID)
FROM #SomeTable
WHILE #Count <= #Max
BEGIN
SELECT
#Type = Type,
#SourceColumn = SourceColumn,
#TargetColumn = TargetColumn
FROM #SomeTable
WHERE ID = #Count
-- Your code
SET #SQL = 'UPDATE GCT SET GCT.' + #TargetColumn + ' = map.[TargetValue]
from EdsMap map
JOIN Table GCT
ON GCT.' + #SourceColumn + ' = map.[SourceValue]
where map.[Type]=''' + #Type + ''' and map.SourceColumn=''' + #SourceColumn+ ''''
Exec (#SQL)
PRINT #SQL
SET #Count = #Count + 1
END -- while
END
I have multiple table names like g_str_a , g_str_ab , g_str_abc . I would like to drop all those tables that start with g_str on SQL Server 2008.
Will DROP Tables like 'g_str' help?
Please help me with the script.
SELECT
'DROP TABLE ' + QUOTENAME(SCHEMA_NAME(schema_id)) + '.' + QUOTENAME(name) + ';'
FROM sys.tables
WHERE name LIKE 'g\_str%' ESCAPE '\'
Then review the script and run it.
You can also concatenate the result into a single string and execute with EXEC if you need an entirely automated solution.
You could use dynamic SQL:
DECLARE #SQL NVARCHAR(MAX) = '';
SELECT #SQL = #SQL + 'DROP TABLE ' + QUOTENAME(SCHEMA_NAME([Schema_ID])) + '.' + QUOTENAME([name]) + ';'
FROM sys.tables
WHERE Name LIKE 'g\_str%' ESCAPE('\');
EXECUTE SP_EXECUTESQL #SQL;
Following query will delete tables automatically:
BEGIN TRANSACTION
DECLARE #tmpTablesToDelete TABLE (
RowNumber INT PRIMARY KEY
,Query NVARCHAR(MAX)
)
INSERT INTO
#tmpTablesToDelete
SELECT
RowNumber = ROW_NUMBER() OVER (ORDER BY (SELECT (0)))
,'DROP TABLE '+schemas.name+'.'+objects.name AS Query
FROM
sys.objects
INNER JOIN
sys.schemas
ON
schemas.schema_id = objects.schema_id
WHERE
type = 'U' AND objects.name like 'g_str%'
DECLARE #Counter INT
SELECT #Counter = MAX(RowNumber) FROM #tmpTablesToDelete
WHILE(#Counter > 0) BEGIN
DECLARE #Query NVARCHAR(MAX)
SELECT #Query = Query FROM #tmpTablesToDelete WHERE RowNumber = #Counter
PRINT #Query
EXEC sp_executesql #statement = #Query
SET #Counter = #Counter - 1
END
COMMIT TRANSACTION