SQL Server: case when in where condition takes more time - sql

I have the following SQL query which takes more time to run, i.e. more that 4 minutes to execute while executing the same query with static '1' or '0' value takes much less time, i.e. less than 3-4 seconds:
select
column1, column2
from
tablename
where
bitColumn_1 = (case when #bitColumn_1_param is null
then bitColumn_1
else #bitColumn_1_param
end)
and bitColumn_2 = (case when #bitColumn_2_param is null
then bitColumn_2
else #bitColumn_2_param
end)
and bitColumn_3 = (case when #bitColumn_3_param is null
then bitColumn_3
else #bitColumn_3_param
end)
and bitColumn_4 = (case when #bitColumn_4_param is null
then bitColumn_4
else #bitColumn_4_param
end)
and bitColumn_5 = (case when #bitColumn_5_param is null
then bitColumn_5
else #bitColumn_5_param
end)
and bitColumn_6 = (case when #bitColumn_6_param is null
then bitColumn_6
else #bitColumn_6_param
end)
and bitColumn_7 = (case when #bitColumn_7_param is null
then bitColumn_7
else #bitColumn_7_param
end)
Any help in improving the query would be helpful!
Thanks in advance

you can try below query,this may be help you
select column1,column2 from tablename
where bitColumn_1 = ISNULL(#bitColumn_1_param,bitColumn_1)
AND bitColumn_2 =ISNULL(#bitColumn_2_param,bitColumn_2)
AND bitColumn_3 =ISNULL(#bitColumn_3_param,bitColumn_3)
AND bitColumn_4 =ISNULL(#bitColumn_4_param,bitColumn_4)
AND bitColumn_5 =ISNULL(#bitColumn_5_param,bitColumn_5)
AND bitColumn_6 =ISNULL(#bitColumn_6_param,bitColumn_6)
AND bitColumn_7 =ISNULL(#bitColumn_1_param,bitColumn_7)

create table #test
(bitcolumn1 nchar(10),
bitcolumn2 nchar(10),
bitcolumn3 nchar(10),
bitcolumn4 nchar(10),
bitcolumn5 nchar(10),
bitcolumn6 nchar(10),
bitcolumn7 nchar(10)
)
insert #test
values
('x1',null,'x3','x4',null,'x6','x7')
declare
#bitcolumn1 nchar(10)=null,
#bitcolumn2 nchar(10)='z2',
#bitcolumn3 nchar(10)='z3',
#bitcolumn4 nchar(10)=null,
#bitcolumn5 nchar(10)='z5',
#bitcolumn6 nchar(10)='z6',
#bitcolumn7 nchar(10)='z7'
select * from #test
where bitcolumn1 = coalesce(bitcolumn1,#bitcolumn1) and
bitcolumn2= coalesce(bitcolumn2,#bitcolumn2) and
bitcolumn3 = coalesce(bitcolumn3,#bitcolumn3) and
bitcolumn4 = coalesce(bitcolumn4,#bitcolumn4) and
bitcolumn5 = coalesce(bitcolumn5,#bitcolumn5) and
bitcolumn6 = coalesce(bitcolumn6,#bitcolumn6) and
bitcolumn7 = coalesce(bitcolumn7,#bitcolumn7)

Related

Aggregate function in update query with case statement in sql server

I am trying to write update query by using aggregate function and case statement.
Some how i am stuck
Initially i have written following query which gave me error tha,"Aggregate function can not be used in update statement
UPDATE report
SET report.LoadDischargeQty =
CASE WHEN SUBSTRING(CRG_ArrDep,1,1) = 'D' AND cargo.CRG_Quantity is NOT NULL THEN cargo.CRG_Quantity
ELSE
CASE WHEN report.PlaId = 'LP' THEN
CASE WHEN SUBSTRING(CRG_ArrDep,1,1) = 'D' THEN ISNULL(SUM(CRG_SFgrMT),0) END -
CASE WHEN SUBSTRING(CRG_ArrDep,1,1) = 'A' THEN ISNULL(SUM(CRG_SFgrMT),0)END
WHEN report.PlaId = 'DP' THEN
CASE WHEN SUBSTRING(CRG_ArrDep,1,1) = 'A' THEN ISNULL(SUM(CRG_SFgrMT),0) END-
CASE WHEN SUBSTRING(CRG_ArrDep,1,1) = 'D' THEN ISNULL(SUM(CRG_SFgrMT),0)END
ELSE 0 END
END
from #CargoPerformanceReport report
INNER JOIN POSCARGO cargo ON cargo.POS_ID = report.PositionId AND ISNULL(cargo.CRG_Deleted,0)=0
So I refactored it as follow
UPDATE report
SET report.LoadDischargeQty =
CASE WHEN SUBSTRING(CRG_ArrDep,1,1) = 'D' AND cargo.CRG_Quantity is NOT NULL THEN cargo.CRG_Quantity
ELSE
select quantity.dischargeQuantity from (SELECT CASE WHEN report.PlaId = 'LP' THEN
CASE WHEN SUBSTRING(CRG_ArrDep,1,1) = 'D' THEN ISNULL(SUM(CRG_SFgrMT),0) END -
CASE WHEN SUBSTRING(CRG_ArrDep,1,1) = 'A' THEN ISNULL(SUM(CRG_SFgrMT),0)END
WHEN report.PlaId = 'DP' THEN
CASE WHEN SUBSTRING(CRG_ArrDep,1,1) = 'A' THEN ISNULL(SUM(CRG_SFgrMT),0) END-
CASE WHEN SUBSTRING(CRG_ArrDep,1,1) = 'D' THEN ISNULL(SUM(CRG_SFgrMT),0)END
ELSE 0 END dischargeQuantity ) quantity
END
from #CargoPerformanceReport report
INNER JOIN POSCARGO cargo ON cargo.POS_ID = report.PositionId AND ISNULL(cargo.CRG_Deleted,0)=0
table structure
CREATE TABLE #CargoPerformanceReport
(
PositionId VARCHAR(12),
PortAndActivityName VARCHAR(100),
PlaId VARCHAR(12),
LoadDischargeQty REAL,
);
Insert into #CargoPerformanceReport
Values('100',null,'LP',null)
CREATE TABLE #Poscargo
(
POS_ID VARCHAR(12),
CRG_ArrDep VARCHAR(3),
CRG_SFgrMT REAL,
CRG_Quantity REAL,
CRG_Deleted BIT
);
Insert Into #Poscargo(POS_ID,CRG_ArrDep,CRG_SFgrMT,null,0)
Values ('100','DD',100)
Insert Into #Poscargo(POS_ID,CRG_ArrDep,CRG_SFgrMT)
Values ('100','AD',100)
Insert Into #Poscargo(POS_ID,CRG_ArrDep,CRG_SFgrMT)
Values ('100','DD',200)
Insert Into #Poscargo(POS_ID,CRG_ArrDep,CRG_SFgrMT)
Values ('100','AD',50)
Insert Into #Poscargo(POS_ID,CRG_ArrDep,CRG_SFgrMT)
Values ('101','DL',200)
Insert Into #Poscargo(POS_ID,CRG_ArrDep,CRG_SFgrMT)
Values ('101','AL',200)
SELECT * FROM #Poscargo
SELECT * FROM #CargoPerformanceReport
result:-
PositionId | PlaId | LoadDischargeQty
100 | LP | 150
but it is not right way and also has error.
anyone have optimized solution for the same?
Put all that stuff into subquery:
SELECT LoadDischargeQty =
CASE
WHEN report.PlaId = 'LP'
THEN 1
ELSE -1
END * cargo.qty
FROM CargoPerformanceReport report
CROSS APPLY(
SELECT SUM(
CASE
WHEN CRG_ArrDep LIKE 'D%'
THEN 1
WHEN CRG_ArrDep LIKE 'A%'
THEN -1
ELSE 0
END * ISNULL(CRG_SFgrMT, 0)
) qty
FROM POSCARGO cargo
WHERE cargo.POS_ID = report.PositionId
AND ISNULL(cargo.CRG_Deleted,0)=0
) cargo
http://sqlfiddle.com/#!18/648d0/7
I don't understand how is CRG_Quantity column supposed to be used and you did not provide any row with that data so I removed it to show you the aggregation itself. Works fine, you'll get your 150. You may easily convert it into update statement.
Perhaps something like this?
It uses cases with summed cases.
Test on SQL Fiddle here
UPDATE t
SET LoadDischargeQty = q.CalcDischargeQty
FROM #CargoPerformanceReport t
JOIN
(
SELECT report.PositionId, report.PlaId,
CASE
WHEN SUM(CASE WHEN LEFT(cargo.CRG_ArrDep,1) = 'D' THEN cargo.CRG_Quantity END) IS NOT NULL
THEN SUM(CASE WHEN LEFT(cargo.CRG_ArrDep,1) = 'D' THEN cargo.CRG_Quantity END)
ELSE CASE
WHEN report.PlaId = 'LP'
THEN SUM(CASE WHEN LEFT(cargo.CRG_ArrDep,1) = 'D' THEN cargo.CRG_SFgrMT END) -
SUM(CASE WHEN LEFT(cargo.CRG_ArrDep,1) = 'A' THEN cargo.CRG_SFgrMT END)
WHEN report.PlaId = 'DP'
THEN SUM(CASE WHEN LEFT(cargo.CRG_ArrDep,1) = 'A' THEN cargo.CRG_SFgrMT END) -
SUM(CASE WHEN LEFT(cargo.CRG_ArrDep,1) = 'D' THEN cargo.CRG_SFgrMT END)
ELSE 0
END
END AS CalcDischargeQty
FROM #CargoPerformanceReport report
INNER JOIN #Poscargo cargo
ON cargo.POS_ID = report.PositionId AND (cargo.CRG_Deleted = 0 OR cargo.CRG_Deleted IS NULL)
GROUP BY report.PositionId, report.PlaId
) q ON t.PositionId = q.PositionId;
Sample Data
IF OBJECT_ID('tempdb..#CargoPerformanceReport') IS NOT NULL DROP TABLE #CargoPerformanceReport;
CREATE TABLE #CargoPerformanceReport
(
PositionId VARCHAR(12) PRIMARY KEY,
PortAndActivityName VARCHAR(100),
PlaId VARCHAR(12),
LoadDischargeQty REAL
);
IF OBJECT_ID('tempdb..#Poscargo') IS NOT NULL DROP TABLE #Poscargo;
CREATE TABLE #Poscargo
(
POS_ID VARCHAR(12),
CRG_ArrDep VARCHAR(3),
CRG_SFgrMT REAL,
CRG_Quantity REAL,
CRG_Deleted BIT
);
Insert into #CargoPerformanceReport Values
('100','name1','LP',null),
('101','name2','DP',null),
('102','name3','DP',null);
Insert Into #Poscargo(POS_ID, CRG_ArrDep, CRG_SFgrMT, CRG_Quantity, CRG_Deleted) Values
('100','DD',100,null,0)
,('100','AD',100,null,0)
,('100','DD',200,null,0)
,('100','AD',50,null,0)
,('101','DL',100,null,0)
,('101','AL',200,null,0)
,('102','DL',100,500,0)
,('102','AL',200,null,0);
Result
PositionId PortAndActivityName PlaId LoadDischargeQty
---------- ------------------- ----- ----------------
100 name1 LP 150
101 name2 DP 100
102 name3 DP 500

How do I hash a column of a table in SQL Server?

I receive raw data files from external sources and need to provide analysis on them. I load the files into a table & set the fields as varchars, then run a complex SQL script that does some automated analysis. One issue I've been trying to resolve is: How to tell if a column of data is duplicated with 1 or more other columns in that same table?
My goal is to have, for every column, a hash, checksum, or something similar that looks at a column's values in every row in the order they come in. I have dynamic SQL that loops through every field (different tables will have a variable number of columns) based on the fields listed in INFORMATION_SCHEMA.COLUMNS, so no concerns on how to accomplish that part.
I've been researching this all day but can't seem to find any sensible way to hash every row of a field. Google & StackOverflow searches return how to do various things to rows of data, but I couldn't find much on how to do the same thing vertically on a field.
So, I considered 2 possibilities & hit 2 roadblocks:
HASHBYTES - Use 'FOR XML PATH' (or similar) to grab every row & use a delimiter between each row, then use HASHBYTES to hash the long string. Unfortunately, this won't work for me since I'm running SQL Server 2014, and HASHBYTES is limited to an input of 8000 characters. (I can also imagine performance would be abysmal on tables with millions of rows, looped for 200+ columns).
CHECKSUM + CHECKSUM_AGG - Get the CHECKSUM of each value, turning it into an integer, then use CHECKSUM_AGG on the results (since CHECKSUM_AGG needs integers). This looks promising, but the order of the data is not considered, returning the same value on different rows. Plus the risk of collisions is higher.
The second looked promising but doesn't work as I had hoped...
declare #t1 table
(col_1 varchar(5)
, col_2 varchar(5)
, col_3 varchar(5));
insert into #t1
values ('ABC', 'ABC', 'ABC')
, ('ABC', 'ABC', 'BCD')
, ('BCD', 'BCD', NULL)
, (NULL, NULL, 'ABC');
select * from #t1;
select cs_1 = CHECKSUM(col_1)
, cs_2 = CHECKSUM(col_2)
, cs_3 = CHECKSUM(col_3)
from #t1;
select csa_1 = CHECKSUM_AGG(CHECKSUM([col_1]))
, csa_2 = CHECKSUM_AGG(CHECKSUM([col_2]))
, csa_3 = CHECKSUM_AGG(CHECKSUM([col_3]))
from #t1;
In the last result set, all 3 columns bring back the same value: 2147449198.
Desired results: My goal is to have some code where csa_1 and csa_2 bring back the same value, while csa_3 brings back a different value, indicating that it's its own unique set.
You could compare every column combo in this way, rather than using hashes:
select case when count(case when column1 = column2 then 1 else null end) = count(1) then 1 else 0 end Column1EqualsColumn2
, case when count(case when column1 = column3 then 1 else null end) = count(1) then 1 else 0 end Column1EqualsColumn3
, case when count(case when column1 = column4 then 1 else null end) = count(1) then 1 else 0 end Column1EqualsColumn4
, case when count(case when column1 = column5 then 1 else null end) = count(1) then 1 else 0 end Column1EqualsColumn5
, case when count(case when column2 = column3 then 1 else null end) = count(1) then 1 else 0 end Column2EqualsColumn3
, case when count(case when column2 = column4 then 1 else null end) = count(1) then 1 else 0 end Column2EqualsColumn4
, case when count(case when column2 = column5 then 1 else null end) = count(1) then 1 else 0 end Column2EqualsColumn5
, case when count(case when column3 = column4 then 1 else null end) = count(1) then 1 else 0 end Column3EqualsColumn4
, case when count(case when column3 = column5 then 1 else null end) = count(1) then 1 else 0 end Column3EqualsColumn5
, case when count(case when column4 = column5 then 1 else null end) = count(1) then 1 else 0 end Column4EqualsColumn5
from myData a
Here's the setup code:
create table myData
(
id integer not null identity(1,1)
, column1 nvarchar (32)
, column2 nvarchar (32)
, column3 nvarchar (32)
, column4 nvarchar (32)
, column5 nvarchar (32)
)
insert myData (column1, column2, column3, column4, column5)
values ('hello', 'hello', 'no', 'match', 'match')
,('world', 'world', 'world', 'world', 'world')
,('repeat', 'repeat', 'repeat', 'repeat', 'repeat')
,('me', 'me', 'me', 'me', 'me')
And here's the obligatory SQL Fiddle.
Also, to save you having to write this here's some code to generate the above. This version will also include logic to handle scenarios where both columns' values are null:
declare #tableName sysname = 'myData'
, #sql nvarchar(max)
;with cte as (
select name, row_number() over (order by column_id) r
from sys.columns
where object_id = object_id(#tableName, 'U') --filter on our table
and name not in ('id') --only process for the columns we're interested in
)
select #sql = coalesce(#sql + char(10) + ', ', 'select') + ' case when count(case when ' + quotename(a.name) + ' = ' + quotename(b.name) + ' or (' + quotename(a.name) + ' is null and ' + quotename(b.name) + ' is null) then 1 else null end) = count(1) then 1 else 0 end ' + quotename(a.name + '_' + b.name)
from cte a
inner join cte b
on b.r > a.r
order by a.r, b.r
set #sql = #sql + char(10) + 'from ' + quotename(#tableName)
print #sql
NB: That's not to say you should run it as dynamic SQL; rather you can use this to generate your code (unless you need to support the scenario where the number or name of columns may vary at runtime, in which case you'd obviously want the dynamic option).
NEW SOLUTION
EDIT: Based on some new information, namely that there may be more than 200 columns, my suggestion is to compute hashes for each column, but perform it in the ETL tool.
Essentially, feed your data buffer through a transformation that computes a cryptographic hash of the previously-computed hash concatenated with the current column value. When you reach the end of the stream, you will have serially-generated hash values for each column, that are a proxy for the content and order of each set.
Then, you can compare each to all of the others almost instantly, as opposed to running 20,000 table scans.
OLD SOLUTION
Try this. Basically, you'll need a query like this to analyze each column against the others. There is not really a feasible hash-based solution. Just compare each set by its insertion order (some sort of row sequence number). Either generate this number during ingestion, or project it during retrieval, if you have a computationally-feasible means of doing so.
NOTE: I took liberties with the NULL here, comparing it as an empty string.
declare #t1 table
(
rownum int identity(1,1)
, col_1 varchar(5)
, col_2 varchar(5)
, col_3 varchar(5));
insert into #t1
values ('ABC', 'ABC', 'ABC')
, ('ABC', 'ABC', 'BCD')
, ('BCD', 'BCD', NULL)
, (NULL, NULL, 'ABC');
with col_1_sets as
(
select
t1.rownum as col_1_rownum
, CASE WHEN t2.rownum IS NULL THEN 1 ELSE 0 END AS col_2_miss
, CASE WHEN t3.rownum IS NULL THEN 1 ELSE 0 END AS col_3_miss
from
#t1 as t1
left join #t1 as t2 on
t1.rownum = t2.rownum
AND isnull(t1.col_1, '') = isnull(t2.col_2, '')
left join #t1 as t3 on
t1.rownum = t3.rownum
AND isnull(t1.col_1, '') = isnull(t2.col_3, '')
),
col_1_misses as
(
select
SUM(col_2_miss) as col_2_misses
, SUM(col_3_miss) as col_3_misses
from
col_1_sets
)
select
'col_1' as column_name
, CASE WHEN col_2_misses = 0 THEN 1 ELSE 0 END AS is_col_2_match
, CASE WHEN col_3_misses = 0 THEN 1 ELSE 0 END AS is_col_3_match
from
col_1_misses
Results:
+-------------+----------------+----------------+
| column_name | is_col_2_match | is_col_3_match |
+-------------+----------------+----------------+
| col_1 | 1 | 0 |
+-------------+----------------+----------------+

SQL CASE wrong output

I have this weird encounter using CASE in sql 2014.
This is my query:
SELECT (CASE WHEN dbo.GetFunctionAge(C.Birthdate) = 0
THEN '' ELSE dbo.GetFunctionAge(C.Birthdate)
END) AS Age
,dbo.GetFunctionAge(C.Birthdate)
,c.Birthdate
FROM Client C
WHERE ClientID = '34d0d845-e3a6-4078-8936-953ff3378eac'
this is the output:
Here is the GetFunctionAge function if you might ask.
IF EXISTS (
SELECT *
FROM dbo.sysobjects
WHERE ID = OBJECT_ID(N'[dbo].[GetFunctionAge]') AND
xtype in (N'FN', N'IF', N'TF'))
DROP FUNCTION [dbo].[GetFunctionAge]
GO
CREATE FUNCTION [dbo].[GetFunctionAge](#BirthDate DATETIME)
RETURNS INT
AS
BEGIN
DECLARE #Age INT
IF(#BirthDate = '1753-01-01 00:00:00.000')
BEGIN
SET #Age = 0
END
ELSE
BEGIN
SET #Age = DATEDIFF(hour,#BirthDate,GETDATE())/8766
END
RETURN #Age
END
GO
Question:
Why is Column Age in my output is 0which should be ''?
I added (No column name) to show that its output is 0 so my expected output base from my case condition is '' not 0
I didn't receive any error regarding inconsistency of data so why is case behaving like that?
Thanks for those who could clarify this to me.
SELECT
(CASE
WHEN a.ageint = 0 THEN ''
ELSE cast(a.ageint as varchar(3))
END) AS Age
, a.ageint
, c.Birthdate
FROM Client as C
CROSS APPLY (
SELECT
ISNULL(dbo.GetFunctionAge(C.Birthdate), 0) AS ageint
) AS a
WHERE ClientID = '34d0d845-e3a6-4078-8936-953ff3378eac'
;
You can cast it into varchar so you can return ' '.
SELECT (CASE WHEN dbo.GetFunctionAge(C.Birthdate) = 0
THEN '' ELSE Cast(dbo.GetFunctionAge(C.Birthdate) as varchar(5))
END) AS Age
,dbo.GetFunctionAge(C.Birthdate)
,c.Birthdate
FROM Client C
WHERE ClientID = '34d0d845-e3a6-4078-8936-953ff3378eac'
But If you wish to remain your Age column in data type int.
You could just use NULL instead of ' '

Execution of sql query takes a long time

I have this problem with my query that it is really slow. I am using MSSQL server 2008 and have 3 DB with hundreds of sample data in it. The query will return a name and value and a computed percentage based on the 3 DBs. But the query I have will take almost 10mins to execute which is really a long time to take. I am still learning SQL and still not that good so I think the query I have is not using the best practice and not organized. Can anybody point to me where or how I can improve my query to run faster?
SELECT data.Ret,
case
when #group_by= 'site' OR (#group_by='attribute' AND #attribute_id = '5') and (data.rowid % 50) = 0 then (data.rowid / 50)-1
when #group_by= 'site' OR (#group_by='attribute' AND #attribute_id = '5') then (data.rowid / 50)
else 0 end as batchStore
,data.MajorName,data.MinorName,data.MajorVal,data.MinorVal,data.Version
,data.A_Percent,data.T_Percent,data.F_Percent
from
(
SELECT report.Ret,
CASE when #group_by= 'site' OR (#group_by='attribute' AND #attribute_id = '5')
then row_number() over (PARTITION BY report.Ret,report.Version order by report.Ret, report.MajorName)
else 0 end as rowid
,report.MajorName,report.MinorName,report.MajorVal,report.MinorVal,report.Version
,report.GTotal_A,report.GTotal_T,report.GTotal_F
,ISNULL(sum(report.Abn) / NULLIF(cast(report.GTotal_A as decimal),0),0) * 100 as A_Percent
,ISNULL(sum(report.Trn) / NULLIF(cast(report.GTotal_T as decimal),0),0) * 100 as T_Percent
,ISNULL(sum(report.Fld)/ NULLIF(cast(report.GTotal_F as decimal),0) * 100,0) as F_Percent
From
(
Select
CASE #group_by
WHEN 'object' THEN csl.s_name
WHEN 'site' THEN csl.s_name
WHEN 'year' THEN CAST(YEAR(dy.Day_Stamp) AS VARCHAR(50))
WHEN 'attribute' THEN CAST(coalesce(attrib.AttributeName,'') AS VARCHAR(50))
ELSE ''
END as MajorName,
CASE #group_by
WHEN 'object' THEN l.l_name
WHEN 'site' THEN ''
WHEN 'attribute' THEN CAST(coalesce(attrib.AttributeName,'') AS VARCHAR(50))
ELSE ''
END as MinorName,
CASE #group_by
WHEN 'object' THEN csl.s_name
WHEN 'site' THEN csl.s_name
WHEN 'year' THEN CAST(YEAR(dy.Day_Stamp) AS VARCHAR(50))
WHEN 'attribute' THEN CAST(coalesce(attrib.AttributeValue,'') AS VARCHAR(50))
ELSE ''
END as MajorVal,
CASE #group_by
WHEN 'object' THEN l.l_name
WHEN 'site' THEN ''
WHEN 'attribute' THEN CAST(coalesce(attrib.AttributeValue,'') AS VARCHAR(50))
ELSE ''
END as MinorVal,
csl.Cust_Name as Ret,l.SWVersion as Version
,d.Abn,d.Trn,d.Fld,data.GTotal_A ,data.GTotal_T,data.GTotal_F
From db_mon.dbo.CustSL csl
join db_tax.dbo.vwLane l
on (l.externalid = csl.custsl_id)
join db_mon.dbo.DaySummary dy
on (dy.Str = csl.s_name and dy.Lne = csl.l_name and year(dy.day_stamp) = year(#time_start_date) and year(dy.day_stamp) =year(#time_end_date))
Left Outer Join
(
Select a.id As AttributeId, a.attribute_name As AttributeName,
(Case When a.attribute_value_type = 'string' Then ea.string_value
Else (Case When a.attribute_value_type = 'integer' Then cast(ea.integer_value as nvarchar(100))
Else (Case When a.attribute_value_type = 'date' Then cast(ea.date_value as nvarchar(100))
Else (Case When a.attribute_value_type = 'boolean' Then cast(ea.boolean_value as nvarchar(100))
Else (Case When a.attribute_value_type = 'entity' Then cast(ea.ref_entity_id as nvarchar(100)) Else null End)
End)
End)
End)
End) As AttributeValue,
e.id As EntityId
From db_tax.dbo.entity_type et
Inner Join db_tax.dbo.entity As e on et.id = e.entity_type_id
Inner Join db_tax.dbo.entity_attribute As ea on e.id = ea.entity_id
Inner Join db_tax.dbo.attribute As a on ea.attribute_id = a.id
WHERE et.entity_type_name in ('Sticker','Label') And
a.id = (case WHEN #attribute_id = '' then 1 else cast(#attribute_id as int) end)
) AS attrib
On attrib.EntityId = l.L_Id
inner join db_mon.dbo.DaySummary d
on (csl.Cust_Name = d.Ret and csl.s_name = d.stckr and csl.l_name = d.label and l.SWVersion = d.Version)
join (
SELECT Ret,version,sum(Abn) as GTotal_A,sum(Trn) as GTotal_T,sum(Fld) as GTotal_F
from db_mon.dbo.DaySummary
where day_stamp >= #time_start_date and day_stamp <=#time_end_date
GROUP BY Ret,version
) data
on (d.Ret = data.Ret and l.SWVersion = data.Version)
WHERE (CHARINDEX(',' + CONVERT(VARCHAR,l.S_Id) + ',','xxx,' + #entities + ',xxx')>0 OR CHARINDEX(',' + CONVERT(VARCHAR,l.L_Id) + ',','xxx,' + #entities + ',xxx')>0)
and d.day_stamp >= #time_start_date
and d.day_stamp <=#time_end_date
) As report
Group By report.Ret,report.Version,report.MajorName,report.MinorName,report.MajorVal,report.MinorVal
,report.GTotal_A,report.GTotal_T,report.GTotal_F
)data
order By data.Ret,data.Version,batchStore,data.MajorName,data.MinorName,data.MajorVal,data.MinorVal
Does using a lot of join causes the slow execution?
SUB Select queries will always be slower then proper joins.
You are running 3 sub selects deep. This is going to impact your performance regardless of changing indexes etc.
You need to rewrite the whole thing to stop using sub selects.

How to Convert Centesimal to Sexagesimal

I have this query: I need convert the centesimal value from the database to sexagesimal (minutes)
SELECT
DISTINCT RD_MAT,
SUM(CASE WHEN RD_PD IN ('421') THEN RD_HORAS ELSE 0 END) AS 'ATRASOS',
SUM(CASE WHEN RD_PD IN ('420') THEN RD_HORAS ELSE 0 END) AS 'FALTAS',
SUM(CASE WHEN RD_PD IN ('084') THEN RD_HORAS ELSE 0 END) AS 'H.EXTRA 100%',
SUM(CASE WHEN RD_PD IN ('080') THEN RD_HORAS ELSE 0 END) AS 'H.EXTRA 50% ',
SUM(CASE WHEN RD_PD IN ('082') THEN RD_HORAS ELSE 0 END) AS 'H.EXTRA NOTURNA'
FROM SRD020
INNER JOIN SRV020 ON RD_PD = RV_COD
AND SRV020.D_E_L_E_T_ <> '*'
AND SRD020.D_E_L_E_T_ <> '*'
AND RD_MAT = '000123'
WHERE LEFT(RD_DATPGT,6) = '201304'
GROUP BY RD_MAT
The return of H.EXTRA 100% in centesimal is 16:53. But in minutes is 16:49.
Example: 40 + 40 = 80 in centesimal. 40 + 40 = 1:20 in sexagesimal. I need this value.
So, it looks like you're going for something like this. I'm casting to VARCHAR for output purposes, which you of course do not have to do.
DECLARE #value1 INT, #value2 INT
SET #value1 = 40
SET #value2 = 50
SELECT CAST(FLOOR((#value1+#value2)/60) AS VARCHAR(50)) + ':' + CAST((#value1+#value2)%60 AS VARCHAR(50))
Declare #h Varchar(5)='16:53'
Select Cast(DateAdd(SS,CAST(REPLACE(#h,':','.') as Float)*60,0) as Time)
>>> 00:16:31.0000000