SQL script executes forever - possible loop? - sql

I hope that someone can help me with this script.
I wanted to create an SQL script to rebuild all non clustered index with "online ON" option to prevent locking the tables while rebuilding the indexes, and also without using a cursor (which slow things down). For that reason I used While loop (at least what most SQL developers suggested), so I wrote an SQL script.
What I noticed, SQL script executes forever. On one test machine I left it running for 12 minutes, after which I aborted the operation...
Can some SQL guru please tell me what is causing a bottle neck?
Is there a better way to do this, or perhaps to "fine tune" current script?
Sample of old code:
USE MASTER
GO
DECLARE #DbName AS VARCHAR(50),
#DBIndexName AS VARCHAR(250),
#DBTableName AS VARCHAR(100),
#AlterCommand AS VARCHAR(500),
#SwitchDB AS NVARCHAR(50)
SELECT name INTO #DatabaseList FROM master..sysdatabases
WHILE EXISTS (SELECT * FROM #DatabaseList)
BEGIN
SELECT TOP 1 #DbName = name FROM #DatabaseList
ORDER BY name ASC
SET #SwitchDB = 'USE '+#DbName
EXEC(#SwitchDB)
-- pronaći sve index-e koji imaju fragmentaciju veću od 10%, te staviti u tablicu
SELECT object_name(dt.object_id) Tablename,si.name
IndexName,dt.avg_fragmentation_in_percent AS
ExternalFragmentation,dt.avg_page_space_used_in_percent AS
InternalFragmentation
INTO #FragmIndex
FROM
(
SELECT object_id,index_id,avg_fragmentation_in_percent,avg_page_space_used_in_percent
FROM sys.dm_db_index_physical_stats (db_id(#DbName),null,null,null,'DETAILED')
WHERE index_id <> 0
) AS dt INNER JOIN sys.indexes si ON si.object_id=dt.object_id
AND si.index_id=dt.index_id AND dt.avg_fragmentation_in_percent>10
AND dt.avg_page_space_used_in_percent<75 and si.type_desc = 'NONCLUSTERED' ORDER BY avg_fragmentation_in_percent DESC
-- Napraviti petlju koja će izvršiti rebuild svih indexa
SET #SwitchDB = 'USE master'
EXEC (#SwitchDB)
WHILE EXISTS (SELECT * FROM #FragmIndex)
BEGIN
SELECT TOP 1 #DBIndexName = IndexName, #DBTableName = Tablename FROM #FragmIndex
ORDER BY IndexName ASC
-- rebuild index command setiranje na "ONLINE ON" kako bi se izbjeglo "zaključavanje tablice".
SET #AlterCommand ='ALTER INDEX '+#DBIndexName+' ON '+ #DbName+'.dbo.'+#DbTableName +'REBUILD WITH (FILLFACTOR=80,STATISTICS_NORECOMPUTE = ON,ONLINE=ON)'
EXEC(#AlterCommand)
DELETE #FragmIndex
WHERE IndexName = #DBIndexName
END
DELETE #DatabaseList
WHERE name = #DbName
END
DROP TABLE #DatabaseList
GO
Thank you very much, and best regards.
Just to add new "corrected code" - for all of you that wish to use it, modify it or whatever... :)
USE MASTER
GO
DECLARE #DbName AS VARCHAR(50),
#DBIndexName AS VARCHAR(250),
#DBTableName AS VARCHAR(100),
#AlterCommand AS VARCHAR(500),
#SwitchDB AS NVARCHAR(50),
#numEntries AS BIGINT,
#numIndexEntries AS BIGINT
SELECT name INTO #DatabaseList FROM master..sysdatabases
SET #numEntries = (SELECT COUNT(*) FROM #DatabaseList)
WHILE #numEntries > 0 --EXISTS (SELECT * FROM #DatabaseList)
BEGIN
SELECT TOP 1 #DbName = name FROM #DatabaseList
ORDER BY name ASC
SET #SwitchDB = 'USE '+#DbName
EXEC(#SwitchDB)
-- pronaći sve index-e koji imaju fragmentaciju veću od 10%, te staviti u tablicu
SELECT object_name(dt.object_id) Tablename,si.name
IndexName,dt.avg_fragmentation_in_percent AS
ExternalFragmentation,dt.avg_page_space_used_in_percent AS
InternalFragmentation
INTO #FragmIndex
FROM
(
SELECT object_id,index_id,avg_fragmentation_in_percent,avg_page_space_used_in_percent
FROM sys.dm_db_index_physical_stats (db_id(#DbName),null,null,null,'DETAILED')
WHERE index_id <> 0
) AS dt INNER JOIN sys.indexes si ON si.object_id=dt.object_id
AND si.index_id=dt.index_id AND dt.avg_fragmentation_in_percent>10
AND dt.avg_page_space_used_in_percent<75 and si.type_desc = 'NONCLUSTERED' ORDER BY avg_fragmentation_in_percent DESC
-- Napraviti petlju koja će izvršiti rebuild svih indexa
SET #SwitchDB = 'USE master'
EXEC (#SwitchDB)
SET #numIndexEntries = (SELECT COUNT(*) FROM #FragmIndex)
WHILE #numIndexEntries > 0 --EXISTS (SELECT * FROM #FragmIndex)
BEGIN
SELECT TOP 1 #DBIndexName = IndexName, #DBTableName = Tablename FROM #FragmIndex
ORDER BY IndexName ASC
-- rebuild index command setiranje na "ONLINE ON" kako bi se izbjeglo "zaključavanje tablice".
SET #AlterCommand ='ALTER INDEX '+#DBIndexName+' ON '+ #DbName+'.dbo.'+#DbTableName +'REBUILD WITH (FILLFACTOR=80,STATISTICS_NORECOMPUTE = ON,ONLINE=ON)'
EXEC(#AlterCommand)
DELETE #FragmIndex WHERE IndexName = #DBIndexName
SET #numIndexEntries = (SELECT COUNT(*) FROM #FragmIndex)
END
DELETE #DatabaseList WHERE name = #DbName
SET #numEntries = (SELECT COUNT(*) FROM #DatabaseList)
END
DROP TABLE #DatabaseList
GO
P.S: if you have any other suggestions, or found bug in the script - for improving the code, please let me know.

Two possible problems might cause that effect.
First of all, the DELETE statement may not really delete anything. You can easily check this by PRINTing the number of entries in #DatabaseList after every DELETE statement.
Secondly, the EXISTS (SELECT * FROM #DatabaseList) could just be evaluated once, not upon every iteration of the loop.
I'd change this as follows:
...
DECLARE #numEntries BIGINT
SET #numEntries = (SELECT COUNT(*) FROM #DatabaseList)
WHILE #numEntries > 0
BEGIN
...
DELETE #DatabaseList WHERE name = #DbName
SET #numEntries = (SELECT COUNT(*) FROM #DatabaseList)
END
...

Related

SQl Server Performance

I have database with more than 30 tables and more than 270k records in one table (the most important table) and create view get data from this table and other tables,
When I run the code below on my machine it takes less than 4 sec to get data from the view.
select * from view
My problem is that,
When I run the same script of database on another machine and run the same query from the view it takes a very long time.
Code for view
SELECT
dbo.UserSite.UserId,
dbo.UserSite.Name,
dbo.Site.RootPageURL,
dbo.PDFDocument.DocumentId,
dbo.RunDocumentVerificationResult.Status,
dbo.UserSite.UserSiteId,
dbo.Systemcode.Value,
dbo.RunDocumentVerificationResult.PageNumber,
dbo.RunDocumentVerificationResult.TestNameID,
dbo.RunDocumentVerificationResult.VerificationResultID,
dbo.TaskRun.VerificationEndDate,
dbo.TaskRun.RunId,
dbo.RunDocument.IsTagged,
dbo.RunDocument.IsProtected,
dbo.RunDocument.IsCorrupted
FROM
dbo.UserSite
INNER JOIN dbo.Site ON dbo.UserSite.SiteId = dbo.Site.SiteId
INNER JOIN dbo.TaskUserSites ON dbo.UserSite.UserSiteId = dbo.TaskUserSites.UserSiteId
INNER JOIN dbo.Task ON dbo.TaskUserSites.TaskId = dbo.Task.TaskId
INNER JOIN dbo.TaskRun ON dbo.Task.TaskId = dbo.TaskRun.TaskId
INNER JOIN dbo.RunDocument ON dbo.TaskRun.RunId = dbo.RunDocument.RunId
INNER JOIN dbo.PDFDocument ON dbo.PDFDocument.DocumentId = dbo.RunDocument.DocumentId
INNER JOIN dbo.RunDocumentVerificationResult ON dbo.RunDocument.RunDocumentId = dbo.RunDocumentVerificationResult.RunDocumentID
INNER JOIN dbo.Systemcode ON dbo.RunDocumentVerificationResult.Status = dbo.Systemcode.ID
EstimatedTime
Procdure Code is
ALTER proc [dbo].[status]
as
begin
begin transaction
declare #usersiteid bigint
declare #runid bigint
declare #TestedFiles int
declare #TaggedFiles int
declare #UnTaggedFiles int
declare #PassedFiles int
declare #FaildFiles int
declare #Name varchar(500)
declare #VerificationEndDate datetime
declare #RootPageURL varchar (1024)
declare #status table ( Name varchar(1000) , Urlrootpage varchar(2000) ,Testedfile int , TaggedFiles int , Untaggedfile int ,passedfiles int , faildfiles int,VerificationEndDate datetime,rootpageurl varchar(1024) )
declare #domain table (name varchar(1000) , urlrootpage varchar (2000) )
if (1=2)
begin
select 'n' Name ,'r' Urlrootpage ,1 Testedfile ,1 TaggedFiles ,0 Untaggedfile ,0 passedfiles ,0 faildfiles,GETDATE() VerificationEndDate ,'r' rootpageurl where 1=2
end
create table #status ( Name varchar(1000) , Urlrootpage varchar(2000) ,Testedfile int , TaggedFiles int , Untaggedfile int ,passedfiles int , faildfiles int,VerificationEndDate datetime,rootpageurl varchar(1024) )
set #usersiteid = (select min (UserSiteId) from vw)
set #runid = (select max (runid) from vw where usersiteid = #usersiteid)
while #usersiteid is not null
begin
set #TestedFiles = (select (count ( distinct documentid )) from vw where UserSiteId=#usersiteid and runid=#runid )
set #TaggedFiles = (select (count ( distinct documentid )) from vw where istagged=1 and UserSiteId=#usersiteid and runid=#runid)
set #UnTaggedFiles =(select (count ( distinct documentid )) from vw where istagged=0 and UserSiteId=#usersiteid and runid=#runid)
set #PassedFiles =(select (count ( distinct documentid )) from vw where Status<>1 and DocumentId not in (select DocumentId from vw where status =1) and UserSiteId=#usersiteid and runid=#runid)
set #FaildFiles = ( select (count ( distinct documentid )) from vw where Status=1 and UserSiteId=#usersiteid and runid=#runid)
set #Name = (select distinct name from vw where UserSiteId=#usersiteid)
set #rootPageUrl = (select distinct RootPageURL from vw where UserSiteId=#usersiteid)
set #VerificationEndDate = (select max(distinct VerificationEndDate) from vw where UserSiteId=#usersiteid and RunId=#runid)
insert into #status ( Name, Urlrootpage , Testedfile , TaggedFiles , Untaggedfile ,passedfiles , faildfiles ,VerificationEndDate ) values
(#Name,#RootPageURL,#TestedFiles,#TaggedFiles ,#UnTaggedFiles,#PassedFiles,#FaildFiles,#VerificationEndDate)
set #usersiteid = (select min (UserSiteId) from vw where UserSiteId > #usersiteid)
set #runid = (select max (runid) from vw where usersiteid = #usersiteid)
end
insert into #domain select UserSite.Name , Site.RootPageURL from UserSite inner join Site on UserSite.SiteId=Site.SiteId where UserSiteId not in (select UserSiteId from vw)
insert into #status select name,urlrootpage,0,0,0,0,0,null,0 from #domain
select Name,Urlrootpage,Testedfile,TaggedFiles,Untaggedfile, passedfiles,faildfiles from #status
end
If (##Error <> 0) -- Check if any error
Begin
rollback transaction
End
else
commit transaction
return
I would do a little test to find out if it is actually, as suggested, the network bandwidth that causes your query to be slow, or, better said, to look like it's slow. Append a limit-statement to your query and run it, like LIMIT 10. So while the whole query will execute, only the 10 first rows will be sent, and if the network is your bottleneck, it should now be very fast. If it is still that slow, your machine's sql server probably has very little memory to use, so it can't fit the whole result in, and your local sql server is probably configured to use more memory, so it executes faster. In this case, giving your sql server more memory should fix the problem. This should be no problem at all, since, as already mentioned in the comments, your database is actually very small, so the currently used memory will be very small too.
If your network connection turns out to be the bottleneck, you need to decide if, and why, you need all the results to be sent at once. I can't really help you on that one, since I don't know what the application is supposed to do with the data. But probably you should either do some aggegration in the database, or only send a small part of the data over the network.

optimizing the code

I have written this code for small database but know the database size has increased,it is showing timeout error.plz help in optimizing it
Below is the code:-
IF OBJECT_ID('Temp_expo') is not null
begin
drop table Temp_expo
end
set #query3 = 'SELECT SPCT_ID_REL_LOW,SPCT_ID_REL_HIGH,ROW_NUMBER() over (order by PDBC_PFX) as TempId
INTO Temp_expo
FROM ['+ #FCTServer +'].['+#FCTDBName+'].dbo.CMC_SPCT_SUPP_CONV
where SPCT_ID_REL_LOW <> '''' and SPCT_ID_REL_HIGH <> '''''
exec (#query3)
Select #minCount= min(TempId) from Temp_expo
Select #maxCount= max(TempId) from Temp_expo
create table #ICD9SPCT
(
ICD9Code varchar(200)
}
while #minCount<=#maxCount
begin
select #low=SPCT_ID_REL_LOW,#high=SPCT_ID_REL_HIGH
from Temp_expo
where TempId=#minCount
group by SPCT_ID_REL_LOW,SPCT_ID_REL_HIGH
set #loworder = (select ISNULL(OrderId,0) from FCT_ICD9_Diag_ORDER where ICD9=#low)
set #highorder = (select ISNULL(OrderId,0) from FCT_ICD9_Diag_ORDER where ICD9=#high)
insert into #ICD9SPCT
select ICD9 from FCT_ICD9_Diag_ORDER ordert
left join #ICD9SPCT icdorder on ordert.ICD9 = icdorder.ICD9Code
where OrderId between #loworder and #highorder and icdorder.ICD9Code is null
set #minCount = #minCount+1;
end
If this is for SQL SERVER, there are some basic tips you can try:
USE: WITH (NOLOCK) after every select you use.
i.e.
select ICD9 from FCT_ICD9_Diag_ORDER ordert WITH (NOLOCK)
left join #ICD9SPCT icdorder on ordert.ICD9 = icdorder.ICD9Code
where OrderId between #loworder and #highorder and icdorder.ICD9Code is null
You can also try to change your temp tables to variable tables, by just changing the # for an # like :
create table #ICD9SPCT
(
ICD9Code varchar(200)
}
Still, the WHILE loop you are using may be the primary cause of your problem.

Efficient SQL Server stored procedure

I am using SQL Server 2008 and running the following stored procedure that needs to "clean" a 70 mill table from about 50 mill rows to another table, the id_col is integer (primary identity key)
According to the last running I made it is working good but it is expected to last for about 200 days:
SET NOCOUNT ON
-- define the last ID handled
DECLARE #LastID integer
SET #LastID = 0
declare #tempDate datetime
set #tempDate = dateadd(dd,-20,getdate())
-- define the ID to be handled now
DECLARE #IDToHandle integer
DECLARE #iCounter integer
DECLARE #watch1 nvarchar(50)
DECLARE #watch2 nvarchar(50)
set #iCounter = 0
-- select the next to handle
SELECT TOP 1 #IDToHandle = id_col
FROM MAIN_TABLE
WHERE id_col> #LastID and DATEDIFF(DD,someDateCol,otherDateCol) < 1
and datediff(dd,someDateCol,#tempDate) > 0 and (some_other_int_col = 1745 or some_other_int_col = 1548 or some_other_int_col = 4785)
ORDER BY id_col
-- as long as we have s......
WHILE #IDToHandle IS NOT NULL
BEGIN
IF ((select count(1) from SOME_OTHER_TABLE_THAT_CONTAINS_20k_ROWS where some_int_col = #IDToHandle) = 0 and (select count(1) from A_70k_rows_table where some_int_col =#IDToHandle )=0)
BEGIN
INSERT INTO SECONDERY_TABLE
SELECT col1,col2,col3.....
FROM MAIN_TABLE WHERE id_col = #IDToHandle
EXEC [dbo].[DeleteByID] #ID = #IDToHandle --deletes the row from 2 other tables that is related to the MAIN_TABLE and than from the MAIN_TABLE
set #iCounter = #iCounter +1
END
IF (#iCounter % 1000 = 0)
begin
set #watch1 = 'iCounter - ' + CAST(#iCounter AS VARCHAR)
set #watch2 = 'IDToHandle - '+ CAST(#IDToHandle AS VARCHAR)
raiserror ( #watch1, 10,1) with nowait
raiserror (#watch2, 10,1) with nowait
end
-- set the last handled to the one we just handled
SET #LastID = #IDToHandle
SET #IDToHandle = NULL
-- select the next to handle
SELECT TOP 1 #IDToHandle = id_col
FROM MAIN_TABLE
WHERE id_col> #LastID and DATEDIFF(DD,someDateCol,otherDateCol) < 1
and datediff(dd,someDateCol,#tempDate) > 0 and (some_other_int_col = 1745 or some_other_int_col = 1548 or some_other_int_col = 4785)
ORDER BY id_col
END
Any ideas or directions to improve this procedure run-time will be welcomed
Yes, try this:
Declare #Ids Table (id int Primary Key not Null)
Insert #Ids(id)
Select id_col
From MAIN_TABLE m
Where someDateCol >= otherDateCol
And someDateCol < #tempDate -- If there are times in these datetime fields,
-- then you may need to modify this condition.
And some_other_int_col In (1745, 1548, 4785)
And Not exists (Select * from SOME_OTHER_TABLE_THAT_CONTAINS_20k_ROWS
Where some_int_col = m.id_col)
And Not Exists (Select * From A_70k_rows_table
Where some_int_col = m.id_col)
Select id from #Ids -- this to confirm above code generates the correct list of Ids
return -- this line to stop (Not do insert/deletes) until you have verified #Ids is correct
-- Once you have verified that above #Ids is correctly populated,
-- then delete or comment out the select and return lines above so insert runs.
Begin Transaction
Delete OT -- eliminate row-by-row call to second stored proc
From OtherTable ot
Join MAIN_TABLE m On m.id_col = ot.FKCol
Join #Ids i On i.Id = m.id_col
Insert SECONDERY_TABLE(col1, col2, etc.)
Select col1,col2,col3.....
FROM MAIN_TABLE m Join #Ids i On i.Id = m.id_col
Delete m -- eliminate row-by-row call to second stored proc
FROM MAIN_TABLE m
Join #Ids i On i.Id = m.id_col
Commit Transaction
Explaanation.
You had numerous filtering conditions that were not SARGable, i.e., they would force a complete table scan for every iteration of your loop, instead of being able to use any existing index. Always try to avoid filter conditions that apply processing logic to a table column value before comparing it to some other value. This eliminates the opportunity for the query optimizer to use an index.
You were executing the inserts one at a time... Way better to generate a list of PK Ids that need to be processed (all at once) and then do all the inserts at once, in one statement.

SQL: Query timeout expired

I have a simple query for update table (30 columns and about 150 000 rows).
For example:
UPDATE tblSomeTable set F3 = #F3 where F1 = #F1
This query will affected about 2500 rows.
The tblSomeTable has a trigger:
ALTER TRIGGER [dbo].[trg_tblSomeTable]
ON [dbo].[tblSomeTable]
AFTER INSERT,DELETE,UPDATE
AS
BEGIN
declare #operationType nvarchar(1)
declare #createDate datetime
declare #UpdatedColumnsMask varbinary(500) = COLUMNS_UPDATED()
-- detect operation type
if not exists(select top 1 * from inserted)
begin
-- delete
SET #operationType = 'D'
SELECT #createDate = dbo.uf_DateWithCompTimeZone(CompanyId) FROM deleted
end
else if not exists(select top 1 * from deleted)
begin
-- insert
SET #operationType = 'I'
SELECT #createDate = dbo..uf_DateWithCompTimeZone(CompanyId) FROM inserted
end
else
begin
-- update
SET #operationType = 'U'
SELECT #createDate = dbo..uf_DateWithCompTimeZone(CompanyId) FROM inserted
end
-- log data to tmp table
INSERT INTO tbl1
SELECT
#createDate,
#operationType,
#status,
#updatedColumnsMask,
d.F1,
i.F1,
d.F2,
i.F2,
d.F3,
i.F3,
d.F4,
i.F4,
d.F5,
i.F5,
...
FROM (Select 1 as temp) t
LEFT JOIN inserted i on 1=1
LEFT JOIN deleted d on 1=1
END
And if I execute the update query I have a timeout.
How can I optimize a logic to avoid timeout?
Thank you.
This query:
SELECT *
FROM (
SELECT 1 AS temp
) t
LEFT JOIN
INSERTED i
ON 1 = 1
LEFT JOIN
DELETED d
ON 1 = 1
will yield 2500 ^ 2 = 6250000 records from a cartesian product of INSERTED and DELETED (that is all possible combinations of all records in both tables), which will be inserted into tbl1.
Is that what you wanted to do?
Most probably, you want to join the tables on their PRIMARY KEY:
INSERT
INTO tbl1
SELECT #createDate,
#operationType,
#status,
#updatedColumnsMask,
d.F1,
i.F1,
d.F2,
i.F2,
d.F3,
i.F3,
d.F4,
i.F4,
d.F5,
i.F5,
...
FROM INSERTED i
FULL JOIN
DELETED d
ON i.id = d.id
This will treat update to the PK as deleting a record and inserting another, with a new PK.
Thanks Quassnoi, It's a good idea with "FULL JOIN". It is helped me.
Also I try to update table in portions (1000 items in one time) to make my code works faster because for some companyId I need to update more than 160 000 rows.
Instead of old code:
UPDATE tblSomeTable set someVal = #someVal where companyId = #companyId
I use below one:
declare #rc integer = 0
declare #parts integer = 0
declare #index integer = 0
declare #portionSize int = 1000
-- select Ids for update
declare #tempIds table (id int)
insert into #tempIds
select id from tblSomeTable where companyId = #companyId
-- calculate amount of iterations
set #rc=##rowcount
set #parts = #rc / #portionSize + 1
-- update table in portions
WHILE (#parts > #index)
begin
UPDATE TOP (#portionSize) t
SET someVal = #someVal
FROM tblSomeTable t
JOIN #tempIds t1 on t1.id = t.id
WHERE companyId = #companyId
delete top (#portionSize) from #tempIds
set #index += 1
end
What do you think about this? Does it make sense? If yes, how to choose correct portion size?
Or simple update also good solution? I just want to avoid locks in the future.
Thanks

Execute Stored Procedure in a Cursor

I need to execute stored procedure sp_spaceused for all the tables in my database.
I have used cursor for this, please find the below query.The thing is I need to generate report in a single result set.
For the below query I'm getting different results.
Declare #Name Varchar(500)
Declare #GetName Cursor
Set #Getname = Cursor for
select name from sys.tables
Open #Getname
Fetch Next From #Getname into #Name
While ##Fetch_Status=0
Begin
exec sp_spaceused #Name
Fetch Next From #Getname into #Name
End
Close #GetName
Deallocate #GetName
You can use something like the below (the data types may well need tweaking).
Edit: Please see Joe's answer for the correct data types to use!
create table #t
(
name sysname,
rows bigint,
reserved varchar(50),
data varchar(50),
index_size varchar(50),
unused varchar(50)
)
EXEC sp_MSForEachtable 'insert into #t EXEC sp_spaceused ''?'''
select name,rows,reserved,data,index_size,unused
from #t
create table #Temp (
name nvarchar(128),
[rows] char(11),
reserved varchar(18),
data varchar(18),
index_size varchar(18),
unused varchar(18)
)
insert into #Temp
exec sp_msforeachtable 'sp_spaceused [?]'
select * from #Temp
A faster, set-based solution to this problem is to join sys.dm_db_partition_stats and sys.internal_tables, just like sp_spaceused does. The code below is what runs when you generate the "Disk Usage By Table" report in Management Studio.
In my database, the set-based solution returned in 60 ms, while the cursor ran for 22 seconds.
begin try
SELECT
(row_number() over(order by a3.name, a2.name))%2 as l1,
a3.name AS [schemaname],
a2.name AS [tablename],
a1.rows as row_count,
(a1.reserved + ISNULL(a4.reserved,0))* 8 AS reserved,
a1.data * 8 AS data,
(CASE WHEN (a1.used + ISNULL(a4.used,0)) > a1.data THEN (a1.used + ISNULL(a4.used,0)) - a1.data ELSE 0 END) * 8 AS index_size,
(CASE WHEN (a1.reserved + ISNULL(a4.reserved,0)) > a1.used THEN (a1.reserved + ISNULL(a4.reserved,0)) - a1.used ELSE 0 END) * 8 AS unused
FROM
(SELECT
ps.object_id,
SUM (
CASE
WHEN (ps.index_id < 2) THEN row_count
ELSE 0
END
) AS [rows],
SUM (ps.reserved_page_count) AS reserved,
SUM (
CASE
WHEN (ps.index_id < 2) THEN (ps.in_row_data_page_count + ps.lob_used_page_count + ps.row_overflow_used_page_count)
ELSE (ps.lob_used_page_count + ps.row_overflow_used_page_count)
END
) AS data,
SUM (ps.used_page_count) AS used
FROM sys.dm_db_partition_stats ps
GROUP BY ps.object_id) AS a1
LEFT OUTER JOIN
(SELECT
it.parent_id,
SUM(ps.reserved_page_count) AS reserved,
SUM(ps.used_page_count) AS used
FROM sys.dm_db_partition_stats ps
INNER JOIN sys.internal_tables it ON (it.object_id = ps.object_id)
WHERE it.internal_type IN (202,204)
GROUP BY it.parent_id) AS a4 ON (a4.parent_id = a1.object_id)
INNER JOIN sys.all_objects a2 ON ( a1.object_id = a2.object_id )
INNER JOIN sys.schemas a3 ON (a2.schema_id = a3.schema_id)
WHERE a2.type <> N'S' and a2.type <> N'IT'
ORDER BY a3.name, a2.name
end try
begin catch
select
-100 as l1
, 1 as schemaname
, ERROR_NUMBER() as tablename
, ERROR_SEVERITY() as row_count
, ERROR_STATE() as reserved
, ERROR_MESSAGE() as data
, 1 as index_size
, 1 as unused
end catch
Try this:
Create a table (temp or otherwise) that mirrors the result set of the sproc. Then, in the body of your cursor, run this
INSERT INTO <tablename> EXEC sp_spaceused
after you close/deallocate the cursor, select from the table.