/*
Stored Procedure
April 30, 2021
Mohamad Chaker
*/
USE CIS111_BookStoreMC
GO
--drop the procedure
IF OBJECT_ID('spAssetInfo') IS NOT NULL
DROP PROC spAssetInfo
GO
--Create the stored procedure
CREATE PROC spAssetInfo
AS
--Create a temporary table to display the inventory
SELECT AssetID, Description, Cost, PurchaseDate
INTO #temptable
FROM Assets
--Add a new column to the temporary table displaying the date an asset is completely depreciated
ALTER TABLE #temptable ADD CurrentValue MONEY
DECLARE #years INT;
DECLARE #currentYear INT;
SET #currentYear = YEAR(getdate());
DECLARE #cost MONEY;
--Add a new column to the temporary table to display the current value of the item in the current year
ALTER TABLE #temptable ADD CompleteDepreciationYear DATE
--#value holds the cost of an asset and is used to check when the value of an asset drops low
DECLARE #value MONEY;
SET #value = 0.00;
--#counter is an int that I used to iterate over the table rows
DECLARE #counter INT;
--#depreciationNum holds the amount of years until an item is completely depreciated to be later used in DATEADD()
DECLARE #depreciationNum INT;
SET #counter = 1;
DECLARE #assetsTableSize INT;
SET #assetsTableSize = (SELECT COUNT(*) FROM Assets);
WHILE (#counter <= #assetsTableSize)
BEGIN
--Current Value
SET #years = #currentYear - (select YEAR(PurchaseDate) From Assets Where AssetID = #counter);
SET #cost = (select Cost From Assets Where AssetID = #counter);
--calculate current value of each asset
WHILE(#years>0)
BEGIN
SET #cost = #cost * 0.8;
SET #years = #years - 1;
END
--add the current value of each asset to the temporary table
UPDATE #temptable
SET CurrentValue = #cost
WHERE AssetID = #counter;
--Deprection Year
SET #depreciationNum = 0;
SET #value = (select Cost From Assets Where AssetID = #counter);
WHILE(#value >0.1)
BEGIN
SET #value = #value * 0.8;
SET #depreciationNum = #depreciationNum + 1;
END
--add the date each asset is completely depreciated to the temporary table
UPDATE #temptable
SET CompleteDepreciationYear = CAST(DATEADD(year, #depreciationNum, (select PurchaseDate From Assets Where AssetID = #counter)) AS DATE)
WHERE AssetID = #counter;
--increment the counter
SET #counter = #counter + 1;
END
--display the assets inventory
SELECT * FROM #temptable
Prompt: Show an asset inventory along with the current value (minus 20% depreciation per year). Also show the year when each item will be
completely depreciated.
basically I am trying to show an Asset Inventory with PurchaseDate and the date the item is completely depreciated, the asset depreciates 20% per year. I tried to do a temporary table and copy some of the assets table columns to it then adding a column for the date when the asset completely depreciates.
I implemented this using an iterative solution but I was advised to post on SO to try and do this using a set-based implementation. I am new to SQL and newly learned that it's a set-based language and that it isn't very good with iterative solutions.
Thank you in advance!
The formula to calculate the current value is:
CurrentValue = InitialValue * (1-0.2)^(Today - InitialDate)
To find the NumberOfYears that the asset will reach 0.1:
0.1 = CurrentValue * (1-0.2)^NumberOfYears. Taking the log from both sides:
Log(0.1) = Log(CurrentValue * Log(0.8^NumberOfYears)
Log(0.1) = Log(CurrentValue) + Log(0.8^NumberOfYears)
Log(0.1) = Log(CurrentValue) + NumberOfYears * Log(0.8)
[Log(0.1) - Log(CurrentValue)] / Log(0.8) = NumberOfYears
You can create a function that use this formula and returns the NumberOfYears:
CREATE FUNCTION GetNumberOfYears(
#InitialValue FLOAT
)
RETURNS FLOAT
AS
BEGIN
DECLARE #CurrentValue FLOAT,
#InitialValue FLOAT,
#YearsToZero FLOAT;
-- CurrentValue Formula
SET #CurrentValue = #InitialValue * POWER(CAST(0.8 AS FLOAT), YEAR(GETDATE() - YEAR(PurchaseDate));
-- NumberOfYears Formula
SELECT #YearsToZero = (LOG(0.1) - LOG(#CurrentValue)) / LOG(0.8);
RETURN #YearsToZero;
END
It will give you the number of years (e.g. 54.6724159 years).
Then use it like SELECT GetNumberOfYears(Cost) FROM Assets
Or, you can use directly from your SELECT:
SELECT
Cost * POWER(CAST(0.85 AS FLOAT), YEAR(GETDATE()) - YEAR(PurchaseDate)) AS CurrentValue,
(LOG(0.1) - LOG( [the above CurrentValue formula] )) / LOG(0.8) AS YearsToZero
FROM Assets
Related
I have a stored procedure where a populate a base table with key fields and then loop through that table to get those key fields to requery a source data table to get detailed counts. The issue I am having is that when there are a lot of rows in the base table, the SP takes a long time to run. I've loaded the source data into temp tables and created index's and made the base table a temp table with an index as well.
CREATE TABLE #SupplementalData1
(
ROWID int IDENTITY(1, 1),
LOB varchar(100),
Program varchar(100),
Project varchar(100),
Container varchar(255),
RPTNG_Week date,
Scheduled_Open int,
Still_Open int,
Scheduled_Closed int,
Actual_Closed int
);
CREATE INDEX t1
ON #SupplementalData1 (LOB, Program, Project, Container, RPTNG_Week);
INSERT INTO #SupplementalData1 (LOB, Program, Project, Container, RPTNG_Week)
SELECT DISTINCT
a.LOB_CODE,
a.PRGRM_NAME,
a.PRJCT_NAME,
a.CNTNR_NAME,
b.Monday
FROM
#data a,
Schedule_Date_Lookup b
WHERE
b.Monday >= #MinMonday
AND b.Monday <= #MaxMonday
ORDER BY
a.LOB_CODE,
a.PRGRM_NAME,
a.PRJCT_NAME,
b.Monday;
DELETE FROM #SupplementalData1
WHERE RPTNG_Week > #EndDate;
-- Get the number of rows in the looping table
DECLARE #RowCount int;
SET #RowCount = (SELECT COUNT(ROWID)FROM #SupplementalData1);
-- Declare an iterator
DECLARE #I int;
-- Initialize the iterator
SET #I = 1;
--Declare Common Variables
DECLARE #iLOB varchar(MAX),
#iProgram varchar(MAX),
#iProject varchar(MAX),
#iContainer varchar(MAX),
#iRPTNG_Week date,
#Value int;
-- Loop through the rows of a table #myTable
WHILE (#I <= #RowCount)
BEGIN
-- Declare variables to hold the data which we get after looping each record
-- Get the data from table and set to variables
SELECT #iLOB = LOB,
#iProgram = Program,
#iProject = Project,
#iContainer = Container,
#iRPTNG_Week = RPTNG_Week
FROM #SupplementalData1
WHERE ROWID = #I;
SET #Value = (SELECT COUNT(CNTNR_NAME) AS Scheduled_Open_Sum
FROM #data c
WHERE (c.NEED_DATE >= #iRPTNG_Week)
AND c.LOB_CODE = #iLOB
AND c.PRGRM_NAME = #iProgram
AND c.PRJCT_NAME = #iProject
AND c.CNTNR_NAME = #iContainer);
UPDATE #SupplementalData1
SET Scheduled_Open = #Value
WHERE LOB = #iLOB
AND Program = #iProgram
AND Project = #iProject
AND Container = #iContainer
AND RPTNG_Week = #iRPTNG_Week;
-- -- Increment the iterator
SET #I = #I + 1;
END;
Is there an alternative way that would improve speed?
Without sample data, desired output and your logic the following wasn't tested, but should get you moving in the right direction.
Do away with the entire while statement and go with a set based approach.
Here is the while loop rewritten as a SELECT. I will usually do that first to double check and validate data.
SELECT *
FROM [#SupplementalData1] [supdata]
CROSS APPLY (
SELECT COUNT([CNTNR_NAME]) AS [Scheduled_Open_Sum]
FROM [#data] [c]
WHERE [c].[NEED_DATE] >= [supdata].[RPTNG_Week]
AND [c].[LOB_CODE] = [supdata].[LOB]
AND [c].[PRGRM_NAME] = [supdata].[Program]
AND [c].[PRJCT_NAME] = [supdata].[Project]
AND [c].[CNTNR_NAME] = [supdata].[Container]
) AS [cd];
Then once you have validated that is correct you can easily rewrite that has an update. Which would be what replaces your while loop.
UPDATE [supdata]
SET [Scheduled_Open] = [cd].[Scheduled_Open_Sum]
FROM [#SupplementalData1] [supdata]
CROSS APPLY (
SELECT COUNT([CNTNR_NAME]) AS [Scheduled_Open_Sum]
FROM [#data] [c]
WHERE [c].[NEED_DATE] >= [supdata].[RPTNG_Week]
AND [c].[LOB_CODE] = [supdata].[LOB]
AND [c].[PRGRM_NAME] = [supdata].[Program]
AND [c].[PRJCT_NAME] = [supdata].[Project]
AND [c].[CNTNR_NAME] = [supdata].[Container]
) AS [cd];
USE CIS111_BookStoreMC
GO
IF OBJECT_ID('spAssetInfo') IS NOT NULL
DROP PROC spAssetInfo
GO
CREATE PROC spAssetInfo
AS
SELECT AssetID, Description, Cost, PurchaseDate
INTO #temptable
FROM Assets
ALTER TABLE #temptable ADD CompleteDepreciationYear DATE
DECLARE #value MONEY;
SET #value = 0.00;
DECLARE #j INT;
DECLARE #depreciationNum INT;
SET #j = 1;
WHILE (#j <= 14)
BEGIN
SET #depreciationNum = 0;
SET #value = (select Cost From Assets Where AssetID = #j);
SET #j = #j + 1;
WHILE(#value > 0)
BEGIN
SET #value = #value - (#value * 0.2);
SET #depreciationNum = #depreciationNum + 1;
END
INSERT INTO #temptable
(CompleteDepreciationYear)
VALUES
(DATEADD(year, #depreciationNum, CAST((select PurchaseDate From Assets Where AssetID = #j-1) AS DATE)))
END
SELECT * FROM #temptable
I have been trying to figure this out for hours, basically I am trying to show an Asset Inventory with PurchaseDate and the date the item is completely depreciated, the asset depreciates 20% per year. I tried to do a temporary table and copy some of the assets table columns to it then adding a column for the date when the asset completely depreciates.
For some reason, when I try executing the procedure as such 'EXEC spAssetInfo', the query runs forever
I forgot to include it in the screenshot but I also have
SELECT * FROM #temptable
to show the table when the procedure is executed
The problem is this part:
Make a track and you will see that those two conditions are indefinitely (first is never false and the second is never true).
Money=5
Money=5*0,2 = 1
Money=1*0,2 = 0,2
Money=0,2*0,2 = 0,04 and then you have a loop ;)
I am using this SQL code but it works on the discount only. I want to make it in the event that a positive value is entered in the sales table, it is deducted from the inventory table, and in the case of entering a negative value, it increases in the inventory table
CREATE TRIGGER [dbo].[TryCutStock_By_Insert]
ON [dbo].[SaleDetail]
FOR INSERT
AS
DECLARE #StockQty AS varchar(10)
DECLARE #SaleQty AS varchar(10)
DECLARE #ProID AS varchar(50)
DECLARE #Result AS varchar(50)
SELECT #ProID = i.Code FROM inserted i;
SELECT #StockQty = ItemCard.Qty
FROM ItemCard
WHERE ItemCard.Code = #ProID;
SELECT #SaleQty = i.Qty FROM inserted i;
SELECT
#Result = CONVERT (int, #StockQty) - CONVERT(int, #SaleQty);
BEGIN
UPDATE ItemCard
SET Qty = #Result
WHERE Code = #ProID;
END
PRINT ''
Few things.
Don't use VARCHAR datatype for storing integers. Use right datatype.
As suggested by #Mitch-wheat, design the trigger to be batch aware
Don't have PRINT statement in the trigger
begin
update ic
set Qty= CONVERT (int,ic.Qty) - CONVERT(int,i.Qty)
FROM ItemCard AS ic
INNER JOIN inserted as i
ON i.Code = ic.Code
end
I use following sql query to update MyTable. the code take between 5 to 15 min. to update MyTabel as long as ROWS <= 100000000 but when Rows > 100000000 it take exponential time to update MYTable. How can I change this code to use set-base instead of while loop?
DECLARE #startTime DATETIME
DECLARE #batchSize INT
DECLARE #iterationCount INT
DECLARE #i INT
DECLARE #from INT
DECLARE #to INT
SET #batchSize = 10000
SET #i = 0
SELECT #iterationCount = COUNT(*) / #batchSize
FROM MyTable
WHERE LitraID = 8175
AND id BETWEEN 100000000 AND 300000000
WHILE #i <= #iterationCount BEGIN
BEGIN TRANSACTION T
SET #startTime = GETDATE()
SET #from = #i * #batchSize
SET #to = (#i + 1) * #batchSize - 1
;WITH data
AS (
SELECT DoorsReleased, ROW_NUMBER() OVER (ORDER BY id) AS Row
FROM MyTable
WHERE LitraID = 8175
AND id BETWEEN 100000000 AND 300000000
)
UPDATE data
SET DoorsReleased = ~DoorsReleased
WHERE row BETWEEN #from AND #to
SET #i = #i + 1
COMMIT TRANSACTION T
END
One of your issues is that your select statement in the loop fetches all records for LitraID = 8175, sets row numbers, then filters in the update statement. This happens on every iteration.
One way round this would be to get all ids for the update before entering the loop and storing them in a temporary table. Then you can write a similar query to the one you have, but joining to this table of ids.
However, there is an even easier way if you know approximately how many records have LitraID = 8175 and if they are spread throughout the table, not bunched together with similar ids.
DECLARE #batchSize INT
DECLARE #minId INT
DECLARE #maxId INT
SET #batchSize = 10000 --adjust according to how frequently LitraID = 8175, larger numbers if infrequent
SET #minId = 100000000
WHILE #minId <= 300000000 BEGIN
SET #maxId = #minId + #batchSize - 1
IF #maxId > 300000000 BEGIN
SET #maxId = 300000000
END
BEGIN TRANSACTION T
UPDATE MyTable
SET DoorsReleased = ~DoorsReleased
WHERE id BETWEEN #minId AND #maxId
COMMIT TRANSACTION T
SET #minId = #maxId + 1
END
This will use the value of id to control the loop, meaning you don't need the extra step to calculate #iterationCount. It uses small batches so that the table isn't locked for long periods. It doesn't have any unnecessary SELECT statements and the WHERE clause in the update is efficient assuming id has an index.
It won't have exactly the same number of records updated in every transaction, but there's no reason it needs to.
This will eliminate the loop
UPDATE MyTable
set DoorsReleased = ~DoorsReleased
WHERE LitraID = 8175
AND id BETWEEN 100000000 AND 300000000
AND DoorsReleased is not null -- if DoorsReleased is nullable
-- AND DoorsReleased <> ~DoorsReleased</strike>
if you are set on looping
below will NOT work
I thought ~ was part of the column name but it is a not operator
select 1;
WHILE (##ROWCOUNT > 0)
BEGIN
UPDATE top (100000) MyTable
set DoorsReleased = ~DoorsReleased
WHERE LitraID = 8175
AND id BETWEEN 100000000 AND 300000000
AND ( DoorsReleased <> ~DoorsReleased
or ( DoorsReleased is null and ~DoorsReleased is not null )
)
END
Inside a transaction I don't think looping would have value as the transaction log cannot clear. And a batch size of 10,000 is small.\
as stated in a comment if you want to loop then try using id as row_number() all those loops is expensive
you might be able to use OFFSET
I have the following TSQL Statement, I am trying to figure out how I can keep getting the results (100 rows at a time), store them in a variable (as I will have to add the totals after each select) and continue to select in a while loop until no more records are found and then return the variable totals to the calling function.
SELECT [OrderUser].OrderUserId, ISNULL(SUM(total.FileSize), 0), ISNULL(SUM(total.CompressedFileSize), 0)
FROM
(
SELECT DISTINCT TOP(100) ProductSize.OrderUserId, ProductSize.FileInfoId,
CAST(ProductSize.FileSize AS BIGINT) AS FileSize,
CAST(ProductSize.CompressedFileSize AS BIGINT) AS CompressedFileSize
FROM ProductSize WITH (NOLOCK)
INNER JOIN [Version] ON ProductSize.VersionId = [Version].VersionId
) AS total RIGHT OUTER JOIN [OrderUser] WITH (NOLOCK) ON total.OrderUserId = [OrderUser].OrderUserId
WHERE NOT ([OrderUser].isCustomer = 1 AND [OrderUser].isEndOrderUser = 0 OR [OrderUser].isLocation = 1)
AND [OrderUser].OrderUserId = 1
GROUP BY [OrderUser].OrderUserId
Depending on the clustered index, if its by numbered id, then use the code below. If its by date, go in 10 - 60 minute increments. keep an eye on performance of other things, but the lovely part of this code is you can start and stop at anytime if you push the results to permanent temp table (real table, just temp)
Here's a sample:
declare #count int
Declare #batch int
declare #max int
create table #temp (id int identity(1,1) primary key, Batch int, value int)
select #max = max(OrderUserId), #count = 0, #batch = 1000 from table
while (#count < #max)
begin
insert into #temp (batch,value)
select #count, Sum(stuffs)
from table
where orderId >= #count
and orderid < #count + #batch
Set #count = #count + #batch
waitfor delay ('00:00:01')
Raiserror('On Batch %d',0,1,#Count) with nowait /* Will print progess */
end