How do I create an index inside a stored procedure? - sql

How do I create an index inside a stored procedure? It complains
Msg 102, Level 15, State 1, Procedure createIndexModifiedOn, Line 12
Incorrect syntax near 'PRIMARY'.
But ON [PRIMARY] is what SQL Server itself uses if you create a new index and select Script As New Query.
If I remove ON [PRIMARY] then it gives this error
Msg 102, Level 15, State 1, Procedure createIndexModifiedOn, Line 12
Incorrect syntax near ')'.
Here is the procedure:
create proc [dbo].createIndexModifiedOn
#table char(256)
as begin
declare #idx char(256)
set #idx = 'idx_' + SUBSTRING(#table, 7, len(#table)-1) + '_modified_on';
IF EXISTS (SELECT * FROM sys.indexes WHERE object_id = OBJECT_ID(#table) AND name = #idx)
DROP INDEX [#idx] ON [#table]
CREATE NONCLUSTERED INDEX [#idx] ON [#table]
(
[modified_on] ASC
) ON [PRIMARY]
go
This ended up being the full query:
create proc [dbo].createIndexModifiedOn
#table varchar(256)
as
declare #idx varchar(256);
declare #sql nvarchar(999);
set #idx = 'idx_' + SUBSTRING(#table, 8, len(#table)-8) + '_modified_on';
set #sql = '
IF EXISTS (SELECT * FROM sys.indexes WHERE object_id = OBJECT_ID(''' + #table + ''') AND name = ''' + #idx + ''')
DROP INDEX [' + #idx + '] ON ' + #table + '
CREATE NONCLUSTERED INDEX [' + #idx + '] ON ' + #table + '
(
[modified_on] ASC
) ON [PRIMARY]
';
print #table + ', ' + #idx;
BEGIN TRY
EXEC sp_executesql #sql;
END TRY
BEGIN CATCH
PRINT 'errno: ' + ltrim(str(error_number()))
PRINT 'errmsg: ' + error_message()
END CATCH
GO
EXEC sp_MSforeachtable 'exec createIndexModifiedOn "?"'

You can't use variables in the CREATE INDEX statement as you have. To do this, you will need to generate a SQL string and execute it with sp_executesql.
Freehand example:
DECLARE #sql NVARCHAR(1024);
SET #sql = 'CREATE NONCLUSTERED INDEX [' + #idx + '] ON [' + #table + ']
(
[modified_on] ASC
) ON [PRIMARY];';
EXEC sp_executesql #sql;

Related

Dynamic Table name being recognised as a column name

Any reason why the dynamic schema and table name in the below script is being recognised as a column name? IF i hard code in the schema and table names outside of the dynamic script the function executes no issues.
Error = Msg 207, Level 16, State 1, Line 38
Invalid column name '55_Dataset'.
Msg 207, Level 16, State 1, Line 38
Invalid column name 'EPAPRIORITY'.
repeated for each iteration
Table example enter image description here
DECLARE #Counter INT;
DECLARE #DATASET nvarchar(50);
DECLARE #STATE nvarchar(50);
DECLARE #sql nvarchar(max);
SET #Counter = 1;
WHILE #Counter <= 10
BEGIN
SET #DATASET = (Select [DATASET] FROM [xxx].[dbo].[EPA_Geocoding_Progress] Where [INT] = #Counter)
SET #STATE = (Select [STATE] FROM [xxx].[dbo].[EPA_Geocoding_Progress] Where [INT] = #Counter)
SET #sql = '
UPDATE [xxx].[dbo].[EPA_Geocoding_Progress]
Set [Geocoded] = (Select COUNT (*) FROM [xxx].[' + #STATE + '].[' + #DATASET + '])
Where [STATE] = [' + #STATE + '] AND [DATASET] = [' + #DATASET + ']'
exec sp_executesql #sql;
SET #Counter = #Counter + 1;
END
If you will code with print like this:
declare #sql varchar(max)
declare #STATE varchar(50)
declare #DATASET varchar(50)
set #STATE = 'dbo'
set #DATASET = 'test'
Set #sql = '
UPDATE [dbo].[EPA_Geocoding_Progress]
Set [Geocoded] = (Select COUNT (*) FROM [' + #STATE + '].[' + #DATASET + '])'
print #sql;
you will see what's wrong.
SQL server Version
it seems no problem
check ; before Set
i try it:
CREATE TABLE EPA_Geocoding_Progress(
Geocoded int
);
CREATE TABLE TEST(
ID int
);
Declare #sql nvarchar(max) = ''
,#STATE nvarchar(max) = 'dbo'
,#DATASET nvarchar(max) = 'test'
Set #sql = '
UPDATE [dbo].[EPA_Geocoding_Progress]
Set [Geocoded] = (Select COUNT (*) FROM [' + #STATE + '].[' + #DATASET + '])'
select #sql
exec sp_executesql #sql; -- Run Success
DEMO SQL Fiddle

SQL Server - Instead of Insert Trigger - override truncation error?

Scenario: using an "Instead of Insert" trigger to a different table (overriding inserting into the current table) throws a truncation error.
Try to insert data into TableA
Instead Of Trigger is setup to run on TableA
This trigger instead inserts into TableB
TableA is not written to
Issue:
TableA has a nvarchar(10) desc column and TableB has a nvarchar(200) desc column. An insert trigger is setup on TableA where the data for the desc column is 50 characters long.
With SET ANSI_WARNINGS ON (the default), TableA causes a truncation error.
With SET ANSI_WARNINGS OFF (dangerous), truncation is ignored on TableB. So if the desc came in at 400 characters, it would be truncated to 200 characters with no errors.
Setup
SET ANSI_NULLS ON
GO
SET QUOTED_IDENTIFIER ON
GO
CREATE TABLE [dbo].[results]
(
[id] [int] IDENTITY(1,1) NOT NULL,
[causefailure] [nvarchar](5) NULL,
[type] [nvarchar](50) NOT NULL,
[description] [nvarchar](200) NULL,
[rundate] [datetime] NOT NULL DEFAULT (getdate())
) ON [PRIMARY]
GO
CREATE TABLE [dbo].[test_table]
(
[id] [int] IDENTITY(1,1) NOT NULL,
[description] [nvarchar](10) NULL,
[rundate] [datetime] NOT NULL DEFAULT (getdate())
) ON [PRIMARY]
GO
CREATE TRIGGER [dbo].[InsteadTrigger]
ON [dbo].[test_table]
INSTEAD OF INSERT
AS
SET NOCOUNT ON
SET ANSI_WARNINGS ON
BEGIN
INSERT INTO results([type], [description])
SELECT
(CASE SUBSTRING([description], 1, 1)
WHEN 'a'
THEN 'causes failure or truncation'
ELSE ''
END AS [causefailure],
'Instead Of Trigger' AS [type],
[description]
FROM inserted
END;
GO
ALTER TABLE [dbo].[test_table] ENABLE TRIGGER [InsteadTrigger]
GO
First attempt:
SET ANSI_WARNINGS OFF
INSERT INTO [dbo].[test_table]([description])
SELECT 'atest12345678910' AS [description]
Second attempt:
SET ANSI_WARNINGS ON
INSERT INTO [dbo].[test_table]([description])
SELECT 'btest12345678910' AS [description]
INSERT INTO [dbo].[test_table]([description])
SELECT 'atest12345678910' AS [description]
But when I run the the first code snippet, causefailure gets truncated. But there is no error. Running the second snippet inserts one record in [results]. But it throws an exception on the second.
Is there a way to ignore: constraints, conversion issues, etc., exceptions with the initial write to [test_table]? But have exceptions for any work done in the trigger (e.g. inserting into [results] and maybe even the actual work to [test_tables])?
Edit: I do not want to change the column size of nvarchar(10) desc
on TableA. The ultimate goal might be if the "instead of trigger"
fails to insert on TableA, it saves to TableB. Or it might even change
the size of desc on failure dynamically inside the trigger and retry
the insert.
Hopefully helpful!
Below is an incomplete idea how to work-around the above problem. It kind of evasive. But interesting way test how far you can rework a database.
The below changes the schema around when a table is created or altered. On creating or altering a table, it will be swapped out for an indexed view after the table gets renamed.
ALTER TRIGGER trigger_CreateTable
ON DATABASE
AFTER CREATE_TABLE, ALTER_TABLE
AS
BEGIN
--SELECT EVENTDATA()
DECLARE #Prefix AS nvarchar(256) = N'PleaseUseView_'
DECLARE #Event AS XML = EVENTDATA()
DECLARE #SchemaName AS nvarchar(255) = (#Event.value('(/EVENT_INSTANCE/SchemaName)[1]', 'NVARCHAR(255)'))
DECLARE #TableName AS nvarchar(255) = (#Event.value('(/EVENT_INSTANCE/ObjectName)[1]', 'NVARCHAR(255)'))
DECLARE #ObjectType AS nvarchar(255) = (#Event.value('(/EVENT_INSTANCE/ObjectType)[1]', 'NVARCHAR(255)'))
DECLARE #TableWithSchema AS nvarchar(512) = '[' + #SchemaName + '].[' + #TableName + ']'
CREATE TABLE #SchemaBindingDependencies
(
[id] int NOT NULL IDENTITY,
[schema] nvarchar(256) NOT NULL,
[name] nvarchar(256) NOT NULL
)
INSERT INTO #SchemaBindingDependencies([schema], [name])
SELECT DISTINCT s.name AS [schema], o.name
FROM sys.objects AS o
INNER JOIN sysdepends AS d
ON d.id = o.[object_id]
INNER JOIN sys.schemas AS s
ON s.[schema_id] = o.[schema_id]
WHERE o.type ='V' AND d.depid = OBJECT_ID(#TableWithSchema)
AND SUBSTRING(#TableName, LEN(#Prefix) + 1, 256) LIKE o.[name]
IF (EXISTS(SELECT 1 FROM #SchemaBindingDependencies))
BEGIN
DECLARE #Index AS int = (SELECT MAX(id) FROM #SchemaBindingDependencies)
WHILE (#Index > 0)
BEGIN
DECLARE #ViewName1 AS nvarchar(256) = (SELECT [name] FROM #SchemaBindingDependencies WHERE id = #Index)
IF (#ViewName1 IS NOT NULL)
BEGIN
DECLARE #SchemaName1 AS nvarchar(256) = (SELECT [schema] FROM #SchemaBindingDependencies WHERE id = #Index)
DECLARE #DropSchemaBoundViewQuery AS nvarchar(1000) = 'DROP VIEW [' + #SchemaName + '].[' + #ViewName1 + ']'
EXEC(#DropSchemaBoundViewQuery)
END
SET #Index = #Index - 1
END
END
IF (SUBSTRING(#TableName, 1, LEN(#Prefix)) <> #Prefix)
BEGIN
DECLARE #NewTableName AS nvarchar(512) = #Prefix + #TableName + ''
DECLARE #NewTableWithSchema AS nvarchar(512) = '[' + #SchemaName + '].[' + #NewTableName + ']'
EXEC sp_rename #TableWithSchema, #NewTableName
SET #TableName = #NewTableName
SET #TableWithSchema = '[' + #SchemaName + '].[' + #NewTableName + ']'
END
DECLARE #Columns AS nvarchar(max) = (STUFF((SELECT ',[' + x.[name] + ']' FROM (
SELECT c.[name]
FROM sys.columns AS c
INNER JOIN sys.tables AS t
ON t.[object_id] = c.[object_id]
INNER JOIN sys.schemas AS s
ON s.[schema_id] = t.[schema_id]
WHERE t.[name] = #TableName AND s.[name] = #SchemaName) AS x FOR XML PATH('')), 1, 1, ''))
DECLARE #ViewName AS nvarchar(256) = SUBSTRING(#TableName, LEN(#Prefix) + 1, 256)
DECLARE #ViewWithSchema AS nvarchar(512) = '[' + #SchemaName + '].[' + #ViewName + ']'
DECLARE #Query AS nvarchar(max) =
N'CREATE VIEW ' + #ViewWithSchema + N' ' + CHAR(10) + CHAR(13) +
N'WITH SCHEMABINDING ' + CHAR(10) + CHAR(13) +
N'AS ' + CHAR(10) + CHAR(13) +
N' SELECT ' + #Columns + ' ' + CHAR(10) + CHAR(13) +
N' FROM ' + #TableWithSchema + N' '
--SELECT #Query
EXEC(#Query)
SET #Query =
N'CREATE UNIQUE CLUSTERED INDEX [CIX_' + #ViewName + N'] ' + CHAR(10) + CHAR(13) +
N'ON ' + #ViewWithSchema + N'(' + #Columns + N')'
EXEC(#Query)
-- TODO: Use the below double commented to build a variable insert statement for the "Instead of TRIGGER"
--
----DECLARE #tv_source TABLE (id int)
----declare #XML xml;
----set #XML =
---- (
---- select top(0) *
---- from #tv_source
---- for XML RAW, ELEMENTS, XMLSCHEMA
---- );
----SELECT T.c.query('.'), T.c.value('#name', 'nvarchar(256)')
----FROM #XML.nodes('/*/*/*/*/*') AS T(c)
--
--SET #Query =
-- N'CREATE TRIGGER [Trigger_' + #ViewName + N'] ' + CHAR(10) + CHAR(13) +
-- N'ON ' + #ViewWithSchema + N' ' + CHAR(10) + CHAR(13) +
-- N'INSTEAD OF INSERT ' + CHAR(10) + CHAR(13) +
-- N'AS BEGIN ' + CHAR(10) + CHAR(13) +
-- N'BEGIN TRY ' + CHAR(10) + CHAR(13) +
-- N' INSERT INTO ' + #TableWithSchema + N'(' + #Columns + N')'
-- N' SELECT ' + #Columns +
--EXEC(#Query)
END
Ideally, you would use a different schema for tables. And use dbo or
the default for the view.
Once the Instead of TRIGGER is working, you could wrap a TRY/CATCH around it. On the catch, check schema for truncation. And expand the column sizes if needed.
Incomplete solution. But it is the answer I will stick with for now.
If anyone has any better answers or a complete solution, please add it!
tl;dr
One interesting thing is the below query
DECLARE #tv_source TABLE (id int)
declare #XML xml;
select top(0) *
from #tv_source
for XML RAW, ELEMENTS, XMLSCHEMA
You can return schema. SOAP for the above XML. Or JsonSchema or Avro if using the SQL 2016+ Json version to build Restful API's with schema awareness. And with schema awareness, an application gateway could auto-grab many dispersed Micro-Rest API's into one seemly large Rest API.

Sql server using a built int function in a stored procedure

I am trying to write a SP that uses the substring function inside a stored procedure where I use the column name as input. However when I run
exec sp_addCountySchoolDistrict N'table', N'districtCountySchoolCode'
It uses the 'districtCountySchoolCode' for the substring and not the value from the row...
For example
create table [dbo].[test] (districtCountySchoolCode nvarchar(100))
insert into dbo.test values ('1234567891234')
go
CREATE PROCEDURE sp_addCountySchoolDistrict
#tableName nvarchar(100),
#colName nvarchar(100)
AS
BEGIN
SET NOCOUNT ON;
DECLARE #Sql NVARCHAR(MAX);
SET #Sql= N'alter table ' + replace(replace(quotename(#tableName),']',''),'[','') +
N' add countyCode as ''' + substring(#colName, 1,2) + N',
districtCode as ''' + substring(#colName, 3,5) + N',
schoolCode as ''' + substring(#colName, 8,7) + N';'
select #sql;
EXECUTE sp_executesql #Sql
END
GO
exec sp_addCountySchoolDistrict N'[dbo].[test]', N'districtCountySchoolCode'
select * from test_copy
Coder Wall
Put the substring in the literal part of the dynamic sql, like this:
N' add countyCode as substring(''' + #colName+ N', 1,2) ,
try this
exec sp_addCountySchoolDistrict N'[dbo].[test]', N'1234567891234'
Your mistake is that you pass the "column name" as a string but your code nowhere selects the value from test table for the specified column. You have to perform a select statement first, to get the value and then use the value in the rest code.
This should work
create table [dbo].[test] (districtCountySchoolCode nvarchar(100))
insert into dbo.test values ('1234567891234')
go
CREATE PROCEDURE sp_addCountySchoolDistrict
#tableName nvarchar(100),
#colName nvarchar(100)
AS
BEGIN
declare #val nvarchar(100);
declare #initialQuery nvarchar(100);
SET NOCOUNT ON;
DECLARE #Sql NVARCHAR(MAX);
SET #initialQuery = 'SELECT #value=' + quotename(#colName) + ' FROM ' + replace(replace(quotename(#tableName),']',''),'[','')
EXECUTE sp_executesql #initialQuery, N'#value nvarchar(100) OUTPUT', #value=#val OUTPUT
SET #Sql= N'alter table ' + replace(replace(quotename(#tableName),']',''),'[','') +
N' add countyCode as ''' + substring(#val, 1,2) + N''',
districtCode as ''' + substring(#val, 3,5) + N''',
schoolCode as ''' + substring(#val, 8,7) + N''';'
select #sql;
EXECUTE sp_executesql #Sql
END
GO
exec sp_addCountySchoolDistrict N'[dbo].[test]', N'districtCountySchoolCode'
select * from test
But you have to add a where condition i suppose if you have more than one values in test table.

Dynamic, cross database trigger creation

I am trying to dynamically create a number of triggers across two databases but I am having problems with the Create Trigger statement when switching databases
SET #SQL = 'SELECT Name FROM ' + #DB + '.sys.triggers WHERE Name = ' + ''''
+ #Table + '_DELTATrigger' + ''''
EXEC(#SQL)
IF ##RowCount > 0
BEGIN
SET #SQL = 'USE ' + #DB + '; DROP TRIGGER [dbo].[' + #Table + '_DELTATrigger]'
EXEC(#SQL)
END
SET #SQL = 'CREATE TRIGGER [dbo].[' + #Table + '_DELTATrigger]
ON [dbo].[' + #Table + ']
AFTER INSERT, UPDATE
AS
BEGIN
SET NOCOUNT ON
UPDATE [' + #Table + ']
SET [Delta] = 1
FROM inserted
WHERE inserted.[ID] = [' + #Table + '].[ID]
END;'
EXEC (#SQL)
If I run this statement I get the following issue
Error 8197, Level 16, State 4, Procedure tblBuild_DataPlate_DELTATrigger, Line 1
Message: The object 'dbo.tblBuild_DataPlate' does not exist or is
invalid for this operation.
Changing the dynamic create trigger to include the USE statement does not work as the Create Trigger has to be the first statement
SET #SQL = 'USE ' + #DB + '; CREATE TRIGGER [dbo].[' + #Table + '_DELTATrigger]
ON [dbo].[' + #Table + ']
AFTER INSERT, UPDATE
AS
BEGIN
SET NOCOUNT ON
UPDATE [' + #Table + ']
SET [Delta] = 1
FROM inserted
WHERE inserted.[ID] = [' + #Table + '].[ID]
END;'
EXEC (#SQL)
Error 111, Level 15, State 1, Procedure -,
Line 1, Message: 'CREATE TRIGGER' must be the first
statement in a query batch.
You can not fully qualify the object with the database in this case as you get the following error
Error 166, Level 15, State 1, Procedure -, Line 1,
Message: 'CREATE/ALTER TRIGGER' does not allow
specifying the database name as a prefix to the object name.
Is there a dynamic way around this?
This code below will run the sp_executesql on the selected database (#DB), so with this, you can create the trigger on the rigth place:
SET #SQL = 'EXEC ' + #DB + '..sp_executesql N''
CREATE TRIGGER [dbo].[' + #Table + '_DELTATrigger]
ON [dbo].[' + #Table + ']
AFTER INSERT, UPDATE
AS
BEGIN
SET NOCOUNT ON
END;'''
EXEC (#SQL)

Creating table with the same columns as in a csv

I am writing a stored procedure which is supposed to take data from a csv file and insert into a table. My problem is that the number of columns in the csv file are not fixed(ie number of columns is variable). So I need some way to create a temporary table with exactly the same number of columns as in the csv file. So that I can use bulk insert.
Well I tried solving the issue by writing a sp which will take the csv file path as parameter and create a table names as temptable with the same format as that of the number of columns in the csv. CSV file looks like
eid,ename,esalary,etemp
1,Mark,1000,
2,Peter,1000,
Stored Proc script
create proc createtable
#path nvarchar(50)
as
begin
declare #execSQL nvarchar(1000)
declare #tempstr varchar(1000)
declare #col varchar(1000)
declare #table nvarchar(1000)
-- Create a temp table to with one column to hold the first row of the csv file
CREATE TABLE #tbl (line VARCHAR(1000))
SET #execSQL =
'BULK INSERT #tbl
FROM ''' + #path + '''
WITH (
FIELDTERMINATOR =''\n'',
FIRSTROW = 1,
ROWTERMINATOR = ''\n'',
LASTROW = 1
)
'
EXEC sp_executesql #stmt=#execSQL
SET #col = ''
SET #tempstr = (SELECT TOP 1 RTRIM(REPLACE(Line, CHAR(9), ',')) FROM #tbl)
DROP TABLE #tbl
WHILE CHARINDEX(',',#tempstr) > 0
BEGIN
SET #col=#col + LTRIM(RTRIM(SUBSTRING(#tempstr, 1, CHARINDEX(',',#tempstr)-1))) + ' varchar(100),'
SET #tempstr = SUBSTRING(#tempstr, CHARINDEX(',',#tempstr)+1, len(#tempstr))
END
SET #col = #col + #tempstr + ' varchar(100)'
if object_id('temptable') is not null
drop table temptable
SET #table = 'create table temptable (' + #col + ')'
EXEC sp_executesql #stmt=#table
-- Load data from csv
SET #execSQL =
'BULK INSERT temptable
FROM ''' + #path + '''
WITH (
FIELDTERMINATOR ='','',
FIRSTROW = 2,
ROWTERMINATOR = ''\n''
)
'
EXEC sp_executesql #stmt=#execSQL
end
improved nadeems script... A little bit more robust.
This code is excelent for loading multiple CSV files without using the default wizzards.
SET ANSI_NULLS ON
GO
SET QUOTED_IDENTIFIER ON
GO
CREATE proc [dbo].[importeer_csv_as_table]
#path nvarchar(255),
#new_table_name varchar(255),
#field_terminator varchar(255),
#row_terminator varchar(255)
as
begin
declare #execsql nvarchar(max)
declare #tempstr varchar(max)
declare #col varchar(max)
declare #table nvarchar(max)
declare #drop_table varchar(max)
-- Create a temp table to with one column to hold the first row of the csv file
create table #tbl (line varchar(1000))
set #execsql =
'bulk insert #tbl
from ''' + #path + '''
with (
fieldterminator =''' + #row_terminator + ''',
firstrow = 1,
rowterminator = ''' + #row_terminator + ''',
lastrow = 1
)
'
exec sp_executesql #stmt=#execsql
--replace field terminator with comma
update #tbl set line = replace(line, #field_terminator, ',')
set #col = ''
set #tempstr = (select top 1 rtrim(replace(line, char(9), ',')) from #tbl)
drop table #tbl
while charindex(',',#tempstr) > 0
begin
set #col=#col + '[' + ltrim(rtrim(substring(#tempstr, 1, charindex(',',#tempstr)-1))) + '] varchar(max),'
set #tempstr = substring(#tempstr, charindex(',',#tempstr)+1, len(#tempstr))
end
set #col = #col + '[' + #tempstr + '] varchar(max)'
if object_id(#new_table_name) is not null
begin
set #drop_table = 'drop table [' + #new_table_name + ']'
exec sp_executesql #stmt= #drop_table
end
set #table = 'create table [' + #new_table_name + '] (' + #col + ')'
--select #table
exec sp_executesql #stmt=#table
--Load data from csvle
set #execsql =
'bulk insert [' + #new_table_name + ']
from ''' + #path + '''
with (
fieldterminator =''' + #field_terminator + ''',
firstrow = 2,
rowterminator = ''' + #row_terminator + '''
)
'
exec sp_executesql #stmt=#execsql
end
GO
You could use Powershell to process the CSV file, there is an example here which you could probably adapt to take account of the variable number of fields. You can build the SQL to create a table and then issue a bulk load.