SQL insert into suspended - Read uncommitted or split query? - sql

I have a stored procedure that contains two insert into queries. While running the sp I often see these two as being suspended. The sp has been tested on a test server with almost no traffic but now as we moved it on to the production server these suspended states have appeared with 1-2 deadlocks.
I assume that SQL Server creates table locks while running these queries but I don't know what is the preferred way to solve this?
The insert into queries are moving 30000 records in one iteration into an other database. These are archive data, so queries coming from the normal production processes are nothing to do with the data being archived, they are 2-3 years old.
Can I add WITH NOLOCK to the selects to avoid suspended states and deadlocks?
Or should I set ISOLATION LEVEL to READ UNCOMMITTED? (these records are old, they won't change)
What other options do I have? Cursors to run through the ids it has to archive one by one? (I tried not to use cursors until now.)
These are the two queries. #workitemids and #workstepids are table variables containing one int field.
insert into archive_****.archive.workitems
select * from ****.dbo.WorkItems where ****.dbo.workitems.Id in (select Id from #workitemIds);
insert into archive_****.archive.worksteps([Id], [Timestamp], [Description], [WorkPlace_Id], [WorkItemState_Id], [UserId], [WorkItem_Id], [Technology_Id], [Failcodes_Id], [DrawingNo], [ManualData], [Deleted], [WorkItemState_Arrival_Id], Workstepdatas)
select [Id], [Timestamp], [Description], [WorkPlace_Id], [WorkItemState_Id], [UserId], [WorkItem_Id], [Technology_Id], [Failcodes_Id], [DrawingNo], [ManualData], [Deleted], [WorkItemState_Arrival_Id],
(select Fieldname Field, Value [Value], Unit [Unit] from ****.dbo.workstepdatas wsd
left join ****.dbo.technologydatafields tdf on tdf.Id = wsd.TechnologyDatafields_Id
where tdf.fieldname is not null and wsd.WorkStep_Id = ws.Id
and value NOT LIKE '%[' + CHAR(0)+ '-' +CHAR(31)+']%' COLLATE Latin1_General_100_BIN2
for xml auto,type)
from ****.dbo.worksteps ws
where ws.Id in (select Id from #workstepIds);

Please attempt to write the nested query into a cte as below and advise if any progress. You will need to change the database names.
insert into archive_db.archive.workitems (with tablock)
select *
from db.dbo.WorkItems as w
inner join #workitemIds as wi
on w.Id = wi.id;
with xmlcte
(ID, xmlRow)
as (
select ws.id
, (
select Fieldname as Field
, [Value]
, Unit
from db.dbo.workstepdatas wsd
left join db.dbo.technologydatafields tdf
on tdf.Id = wsd.TechnologyDatafields_Id
where
tdf.fieldname is not null
and wsd.WorkStep_Id = ws.Id
and [value] not like '%[' + char(0) + '-' + char(31) + ']%' collate Latin1_General_100_BIN2
for xml auto, type
) as xmlRow
from db.dbo.worksteps as ws
)
insert into archive_db.archive.worksteps (with tablock)
(
[Id]
, [Timestamp]
, [Description]
, [WorkPlace_Id]
, [WorkItemState_Id]
, [UserId]
, [WorkItem_Id]
, [Technology_Id]
, [Failcodes_Id]
, [DrawingNo]
, [ManualData]
, [Deleted]
, [WorkItemState_Arrival_Id]
, Workstepdatas
)
select ws.[Id]
, [Timestamp]
, [Description]
, [WorkPlace_Id]
, [WorkItemState_Id]
, [UserId]
, [WorkItem_Id]
, [Technology_Id]
, [Failcodes_Id]
, [DrawingNo]
, [ManualData]
, [Deleted]
, [WorkItemState_Arrival_Id]
, [xmlRow]
from db.dbo.worksteps ws
inner join #workstepIds as wsi
on ws.Id = wsi.id
inner join xmlcte -- I assume inner join is OK
on ws.id = xmlcte.id;

Related

How remove duplicates from a stored procedure without using DISTINCT

A stored procedure has been written that includes duplicates. ROW_NUMBER was tried but did not work. DISTINCT has worked but was unable to retrieve the large number of records required (about 700,000). Is there another way of using RANK or GROUP BY to remove duplicates?
I have used DISTINCT and this does not retrieve enough records. I have not successfully used GROUP BY.
I have attempted to use ROW NUMBER but this did not work either (you can see where its commented out).
CREATE PROCEDURE [report].[get_foodDetails]
#foodgroup_id INT,
#shop_id INT = 0,
#product_id INT = 0,
#maxrows INT = 600,
#expiry INT = 1,
#productactive INT = 1,
#expiryPeriod DATETIME = '9999-12-31 23:59:59'
AS
IF (#expiryPeriod >= '9999-12-31')
BEGIN
SET #expiryPeriod = GETDATE()
END
SELECT
-- dp.RowNumber
ISNULL([FoodType], '') AS [Foodtype],
ISNULL([FoodColour], '') AS [FoodColour],
ISNULL([FoodBarcode], '') AS [FoodBarcode],
ISNULL([FoodArticleNum], 0) AS [FoodArticleNum],
ISNULL([FoodShelfLife, '9999-21-31') AS [FoodShelfLIFe]
INTO
#devfood
FROM
report.[GetOrderList] (#foodgroup_id, #product_id, #productactive, #expiry, #expiryPeriod, #shop_id, #maxrows ) dp
INNER JOIN
food_group fg ON fg.food_group_id = it.item_FK_item_group_id
SELECT TOP(#maxrows) *
FROM #devfood
ORDER BY [device_packet_created_date]
END
Around 700,000 records retrieved. This is currently achieved although there are duplicates. There are only 20,000 retrieved when using DISTINCT (but no duplicates).
The sample code below is from a presentation I've used to demonstrate CTE's. This is a common mechanism for removing duplicates and is very fast. In this case the duplicates are removed directly from the table. If that is not your objective you could use a temp table or a prior chained CTE. Note that the important thing is what columns you partition by. If, in the example, you partitioned by only [name] you would not see both the red rose and the white rose.
-------------------------------------------------
if object_id(N'[flower].[order]', N'U') is not null
drop table [flower].[order];
go
create table [flower].[order]
(
[id] int identity(1, 1) not null constraint [flower.order.id.clustered_primary_key] primary key clustered
, [flower] nvarchar(128)
, [color] nvarchar(128)
, [count] int
);
go
insert into [flower].[order]
([flower]
, [color]
, [count])
values (N'rose',N'red',5),
(N'rose',N'red',3),
(N'rose',N'white',2),
(N'rose',N'red',1),
(N'rose',N'red',9),
(N'marigold',N'yellow',2),
(N'marigold',N'yellow',9),
(N'marigold',N'yellow',4),
(N'chamomile',N'amber',9),
(N'chamomile',N'amber',4),
(N'lily',N'white',12);
go
select [flower]
, [color]
from [flower].[order];
go
--
-------------------------------------------------
with [duplicate_finder]([name], [color], [sequence])
as (select [flower]
, [color]
, row_number()
over (
partition by [flower], [color]
order by [flower] desc) as [sequence]
from [flower].[order])
delete from [duplicate_finder]
where [sequence] > 1;
--
-- no duplicates
-------------------------------------------------
select [flower]
, [color]
from [flower].[order];
I know you said you tried ROW_NUMBER, but did you try it either of these ways?
First, a CTE. The CTE here is just your existing query, but with a ROW_NUMBER windowing function attached. For each duplicate iteration of a record, it will add one to RowNumber. With the next unique group of records, RowNumber resets to 1.
After the pull, only take the records with a RowNumber = 1. I use this all the time for deleting dupes out of the underlying record set, but it works well to just identify them as well.
WITH NoDupes AS
(
SELECT
ROW_NUMBER() OVER (PARTITION BY
ISNULL(FoodType, '')
,ISNULL(FoodColour, '')
,ISNULL(FoodBarcode, '')
,ISNULL(FoodArticleNum, '')
,ISNULL(FoodShelfLife, '9999-21-31')
ORDER BY
(
SELECT
0
)
) AS RowNumber
,ISNULL(FoodType, '') AS Foodtype
,ISNULL(FoodColour, '') AS FoodColour
,ISNULL(FoodBarcode, '') AS FoodBarcode
,ISNULL(FoodArticleNum, 0) AS FoodArticleNum
,ISNULL(FoodShelfLife, '9999-21-31') AS FoodShelfLIFe
FROM
report.GetOrderList(#foodgroup_id, #product_id, #productactive, #expiry, #expiryPeriod, #shop_id, #maxrows) AS dp
INNER JOIN
food_group AS fg
ON
fg.food_group_id = it.item_FK_item_group_id
)
SELECT
nd.Foodtype
,nd.FoodColour
,nd.FoodBarcode
,nd.FoodArticleNum
,nd.FoodShelfLIFe
INTO
#devfood
FROM
NoDupes AS nd
WHERE
NoDupes.RowNumber = 1;
Alternatively (and shorter) you could try SELECT TOP (1) WITH TIES, using that same ROW_NUMBER function to order the record set. The TOP (1) WITH TIES part functionally does the same thing as the CTE, returning only the first record of each set of duplicates.
SELECT
TOP (1) WITH TIES
ISNULL(FoodType, '') AS Foodtype
,ISNULL(FoodColour, '') AS FoodColour
,ISNULL(FoodBarcode, '') AS FoodBarcode
,ISNULL(FoodArticleNum, 0) AS FoodArticleNum
,ISNULL(FoodShelfLife, '9999-21-31') AS FoodShelfLIFe
INTO
#devfood
FROM
report.GetOrderList(#foodgroup_id, #product_id, #productactive, #expiry, #expiryPeriod, #shop_id, #maxrows) AS dp
INNER JOIN
food_group AS fg
ON
fg.food_group_id = it.item_FK_item_group_id
ORDER BY
ROW_NUMBER() OVER (PARTITION BY
ISNULL(FoodType, '')
,ISNULL(FoodColour, '')
,ISNULL(FoodBarcode, '')
,ISNULL(FoodArticleNum, '')
,ISNULL(FoodShelfLife, '9999-21-31')
ORDER BY
(
SELECT
0
)
);
The CTE is maybe a little clearer in it's intention for the next person who looks at the code, but the TOP might perform a little better.

Stored procedure becomes slow every couple of days

I am facing an issue on SQL Server in which my stored procedure becomes slow after couple of days.
Below is the sample of my stored procedure.
Could this be a caching issue on the server side? Can I increase the server's cache size to resolve the problem?
Normally the stored procedure returns data in one second.
#START_VALUE int=null,
#END_VALUE int=null
#UID NVARCHAR(MAX)=null,
AS
BEGIN
SELECT
dbo.TABLE1.ID,
ROW_NUMBER() OVER (ORDER BY TABLE1.UPDATED_ON desc) AS RN,
CONVERT(VARCHAR(10), dbo.TABLE1.DATE, 101) AS TDATE,
CATEGORY = (
SELECT TOP 1 COLUMN1
FROM TABLE5 CT1
WHERE TABLE1.CATEGORY = CT1.CATEGORY_ID
),
TYPETEXT = (
SELECT TOP 1 COLUMN1
FROM TABLE6 CT1
WHERE TABLE1.TYPE = CT1.TYPE_ID
),
IMAGE = STUFF(( SELECT DISTINCT ',' + CAST(pm.C1 AS varchar(12))
FROM TABLE2 pm
WHERE pm.ID = TABLE1.ID AND pm.C1 IS NOT NULL AND pm.C1 <> ''
FOR XML PATH('')),
1, 1, '' ) INTO #tempRecords
FROM dbo.TABLE1
WHERE ((#UID is null OR dbo.TABLE1.ID = #UID )
ORDER BY TABLE1.UPDATED DESC
SELECT #count = COUNT(*) FROM #tempRecords;
SELECT *, CONVERT([int],#count) AS 'TOTAL_RECORDS'
FROM #tempRecords
WHERE #tempRecords.RN BETWEEN CONVERT([bigint], #START_VALUE) AND CONVERT([bigint], #END_VALUE)
END
GO
'
A few performance tips:
1) #UID is null OR dbo.TABLE1.ID = #UID --> this is bad because you'll have one execution plan when UID is null and when it's not. Build a dynamic sql query and you'll get 2 execution plans.
2) Update stats in a maintenance plan.
3) Check index fragmentation.
4) Try to do the same thing without using a temp table.
5) Try to avoid castings.

Merge - Only update if values have changed

I am running a merge in SQL Server. In my update, I want to only update the row if the values have changed. There is a version row that increments on each update. Below is an example:
MERGE Employee as tgt USING
(SELECT Employee_History.Emp_ID
, Employee_History.First_Name
, Employee_History.Last_Name
FROM Employee_History)
as src (Emp_ID,First_Name,Last_Name)
ON tgt.Emp_ID = src.Emp_ID
WHEN MATCHED THEN
UPDATE SET
Emp_ID = src.Emp_ID,
,[VERSION] = tgt.VERSION + 1
,First_Name = src.First_Name
,Last_Name = src.Last_Name
WHEN NOT MATCHED BY target THEN
INSERT (Emp_ID,0,First_Name,Last_Name)
VALUES
(src.Emp_ID,[VERSION],src.First_Name,src.Last_Name);
Now, if I only wanted to update the row, and thus increment version, ONLY if the name has changed.
WHEN MATCHED can have AND . Also, no need to update EMP_ID .
...
WHEN MATCHED AND (trg.First_Name <> src.First_Name
OR trg.Last_Name <> src.Last_Name) THEN UPDATE
SET
[VERSION] = tgt.VERSION + 1
,First_Name = src.First_Name
,Last_Name = src.Last_Name
...
If Last_Name or First_Name are nullable, you need to take care of NULL values while comparing trg.Last_Name <> src.Last_Name , for instance ISNULL(trg.Last_Name,'') <> ISNULL(src.Last_Name,'')
The answer provided by a1ex07 is the right answer, but i just wanted to expand on the difficulty in comparing a large number of columns, watching for nulls, etc.
I found that I could generate a checksum in some CTE's with hashbytes, target those CTEs in the merge, and then use the "update and...." condition specified above to compare the hashes:
with SourcePermissions as (
SELECT 1 as Code, 1013 as ObjectTypeCode, 'Create Market' as ActionName, null as ModuleCode, 1 as AssignableTargetFlags
union all SELECT 2, 1013, 'View Market', null, 1
union all SELECT 3, 1013, 'Edit Market', null, 1
--...shortened....
)
,SourcePermissions2 as (
select sp.*, HASHBYTES('sha2_256', xmlcol) as [Checksum]
from SourcePermissions sp
cross apply (select sp.* for xml raw) x(xmlcol)
)
,TargetPermissions as (
select p.*, HASHBYTES('sha2_256', xmlcol) as [Checksum]
from Permission p
cross apply (select p.* for xml raw) x(xmlcol)
) --select * from SourcePermissions2 sp join TargetPermissions tp on sp.code=tp.code where sp.Checksum = tp.Checksum
MERGE TargetPermissions AS target
USING (select * from SourcePermissions2) AS source ([Code] , [ObjectTypeCode] , [ActionName] , [ModuleCode] , [AssignableTargetFlags], [Checksum])
ON (target.Code = source.Code)
WHEN MATCHED and source.[Checksum] != target.[Checksum] then
UPDATE SET [ObjectTypeCode] = source.[ObjectTypeCode], [ActionName]=source.[ActionName], [ModuleCode]=source.[ModuleCode], [AssignableTargetFlags] = source.[AssignableTargetFlags]
WHEN NOT MATCHED THEN
INSERT ([Code] , [ObjectTypeCode] , [ActionName] , [ModuleCode] , [AssignableTargetFlags])
VALUES (source.[Code] , source.[ObjectTypeCode] , source.[ActionName] , source.[ModuleCode] , source.[AssignableTargetFlags])
OUTPUT deleted.*, $action, inserted.[Code]
--only minor issue is that you can no longer do a inserted.* here since it gives error 404 (sql, not web), complaining about returning checksum which is included in the target cte but not the underlying table
,inserted.[ObjectTypeCode] , inserted.[ActionName] , inserted.[ModuleCode] , inserted.[AssignableTargetFlags]
;
Couple of notes: I could have simplified greatly with checksum or binary_checksum, but I always get collisions with those.
As to the 'why', this is part of an automated deployment to keep a lookup table up to date. The problem with the merge though is there is an indexed view that is complex and heavily used, so updates to the related tables are quite expensive.
Rather than avoiding an update altogether, you could change your [VERSION] + 1 code to add zero when names match:
[VERSION] = tgt.VERSION + (CASE
WHEN tgt.First_Name <> src.First_Name OR tgt.Last_Name <> src.Last_Name
THEN 1
ELSE 0 END)
#a1ex07 thanks for the answer.. a slight correction.. I am not following SQL version so this could be a change in SQL specification
WHEN MATCHED AND CONDITION THEN UPDATE
The above is not a valid syntax
Following is valid
WHEN MATCHED THEN UPDATE SET ... WHERE CONDITION WHEN NOT MATCHED THEN INSERT...
so would change it to
WHEN MATCHED THEN UPDATE
SET
[VERSION] = tgt.VERSION + 1
,First_Name = src.First_Name
,Last_Name = src.Last_Name
WHERE
trg.First_Name <> src.First_Name
OR trg.Last_Name <> src.Last_Name
https://docs.oracle.com/cd/B28359_01/server.111/b28286/statements_9016.htm#SQLRF01606

How to insert unique value into non-identity field

I'm trying to do an insert into an established table which has a primary key fields and another field (call it field1) that is unique (this other unique field has a unique constraint preventing my inserts). Field1 is not an identity field, so it does NOT autonumber. Unfortunately I can't change the table. Existing inserts are made using code to increment and all involve looping/cursors. Something like SELECT MAX(field1) + 1
So, is there anyway to do this insert without looping/cursor? This field means nothing to me, but there are already 500,000+ records using their silly numbering scheme, so I must respect that.
This is simplified (ReceiptNumber is the field I want to insert unique), but:
SET XACT_ABORT ON
Begin Transaction TransMain
Declare #nvErrMsg nvarchar(4000)
--Insert inventory receipts
Insert Into Avanti_InventoryReceipts (
ReceiptNumber , ItemNumber , ReceiptDate , OrderNumber , JobNumber , Supplier ,
LineNumber , MultiLineNumber , [Status] , QtyOrdered , QtyReceived , QtyToReceive ,
QtyBackOrdered , Cost , Wholesale , LastCost , QtyToInvoice , QtyUsed ,
ReferenceNumber , [Description] , SupplierType , Processed , DateExpected , DateReceived ,
AccountNumber , Reference2 , EmployeeCode , ExtraCode , Location , RollNumber ,
QtyIssues , Notes , NumPackages , BundleSize , ConsignmentUnitPrice , RecFromProduction ,
QtyCommitted )
SELECT ( SELECT MAX(ReceiptNumber) + 1 FROM Avanti_inventoryReceipts ) , CR.ItemNumber , Convert(char(8), GETDATE(), 112) , PONum , 'FL-INV' , PH.POVendor ,
0 , 0 , 'O' , CR.QtyOrdered , QtyReceivedToday , QtyReceivedToday ,
Case #closePO
When 'N' Then Case When ( QtyOrdered - QtyReceivedToday ) < 0 Then 0 Else ( QtyOrdered - QtyReceivedToday) End
When 'Y' Then 0
Else 0 End
, PD.TransCost * QtyReceivedToday , IH.PriceWholeSale , IH.CostLast , QtyReceivedToday , 0 ,
'' , PODetailDescription , '' , '' , '' , Convert(char(8), GETDATE(), 112) ,
'' , '' , #employeeCode , '' , 'F L E X O' , '' ,
0 , 'Flexo Materials' , 0 , 0 , 0 , '' , 0
FROM FI_CurrentReceiptData CR
LEFT JOIN Avanti_PODetails PD ON CR.PONum = PD.PONumber
LEFT JOIN Avanti_POHeader PH ON CR.PONum = PH.PONumber
LEFT JOIN Avanti_InventoryHeader IH ON CR.ItemNumber = IH.ItemNumber
IF ##ERROR <> 0
Begin
Select #nvErrMsg = 'Error entering into [InventoryReceipts] -' + [description]
From master..sysmessages
Where [error] = ##ERROR
RAISERROR ( #nvErrMsg , 16, 1 )
Goto Err_
End
Commit Transaction TransMain
Goto Exit_
Err_:
Rollback Transaction TransMain
Exit_:
SET XACT_ABORT OFF
You could do this:
insert into mytable (field1, field2, ...)
values (( SELECT MAX(field1) + 1 from mytable), 'value2', ...);
Why not looping? It should be quite efficient.
Since you already have a UNIQUE constraint on the field, you can:
Simply try to insert MAX(field1) + 1. Since there is index on UNIQUE field, MAX is fast.
If its passes, great you are done.
If it fails (which will typically be manifested as an exception in your client code), just try again until you succeed.
Most of the time, the INSERT will succeed right away. In rare instances where a concurrent user tries to insert the same value, you'll handle that gracefully by trying the "next" value.
I added an autonumber starting from 0 in client code and passed that in. Now I'm adding that value to the max receiptnumber to get a unique one. Also, I realized I already had an identity column in FI_CurrentReceiptData, but I didn't want to use that one because it won't start at 0 for each receipt set, and reseeding the identity each time seems like a waste of processor time.

Retrieving the Name of Running Stored Procedures Across Multiple Databases

I'm trying to write a query that reports the current database activity. The query links together various DMV's like sys.dm_exec_connections, sys.dm_exec_sessions, sys.dm_exec_requests, etc. The query also pulls the actual queries being run via the sys.dm_exec_sql_text function.
(I'm aware of Activity Monitor and SQL Profiler. I need to gather this information up in a query, so neither of these programs are relevant here.)
Much of the activity in our systems takes place in stored procedures and functions. It would be nice to see the names of these procedures in this query.
My question is:
How do I reliably display the name of the stored procedures or functions being executed?
I'm aware that the sys.dm_exec_sql_text function returns an objectid, and that I can join this objectid to sys.objects. The problem is, there are multiple databases on this server, and sys.objects only applies to the current database. I want this query to be able to show the running object name no matter what database the query happened to be run against.
So far the only solution I have is to use sp_msforeachdb create a temp table containing all the object IDs and names from all databases and join to this table from the result of the dm_exec_sql_text function.
Is there a better solution to the temp table approach? I feel like I'm missing something.
I would recommend Adam Machanic's excellent sp_WhoISActive. It doesn't return the exact object name, but does return the sql command being executed in a nice clickable form.
--I use the following proc:
USE [master]
GO
CREATE PROC [dbo].[sp_who3]
AS
SET NOCOUNT ON
DECLARE #LoginName varchar(128)
DECLARE #AppName varchar(128)
SELECT [SPID] = s.[spid]
, [CPU] = s.[cpu]
, [Physical_IO] = s.[physical_io]
, [Blocked] = s.[blocked]
, [LoginName] = CONVERT([sysname], RTRIM(s.[Loginame]))
, [Database] = d.[name]
, [AppName] = s.[program_name]
, [HostName] = s.[hostname]
, [Status] = s.[Status]
, [Cmd] = s.[cmd]
, [Last Batch] = s.[last_batch]
, [Kill Command] = 'Kill ' + CAST(s.[spid] AS varchar(10))
, [Buffer Command] = 'DBCC InputBuffer(' + CAST(s.[spid] AS varchar(10))
+ ')'
FROM [master].[dbo].[sysprocesses] s WITH(NOLOCK)
JOIN [master].[sys].[databases] d WITH(NOLOCK)
ON s.[dbid] = d.[database_id]
WHERE s.[Status] 'background'
AND s.[spid] ##SPID --#CurrentSpid#
ORDER BY s.[blocked] DESC, s.[physical_io] DESC, s.[cpu] DESC, CONVERT([sysname], RTRIM(s.[Loginame]))
BEGIN
SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED
SELECT [Spid] = er.[session_Id]
, [ECID] = sp.[ECID]
, [Database] = DB_NAME(sp.[dbid])
, [User] = [nt_username]
, [Status] = er.[status]
, [Wait] = [wait_type]
, [Individual Query] = SUBSTRING(qt.[text], er.[statement_start_offset] / 2, (CASE WHEN er.[statement_end_offset] = - 1 THEN LEN(CONVERT(VARCHAR(MAX), qt.[text])) * 2
ELSE er.[statement_end_offset] END - er.[statement_start_offset]) / 2)
, [Parent Query] = qt.[text]
, [Program] = sp.[program_name]
, [Hostname] = sp.[Hostname]
, [Domain] = sp.[nt_domain]
, [Start_time] = er.[Start_time]
FROM [sys].[dm_exec_requests] er WITH(NOLOCK)
INNER JOIN [sys].[sysprocesses] sp WITH(NOLOCK)
ON er.[session_id] = sp.[spid]
CROSS APPLY [sys].[dm_exec_sql_text](er.[sql_handle]) qt
WHERE er.[session_Id] > 50 -- Ignore system spids.
AND er.[session_Id] NOT IN (##SPID) -- Ignore the current statement.
ORDER BY er.[session_Id], sp.[ECID]
END
GO