Histogram: Counting orders with variable bins in SQL - sql

I have a table containing orders, items, and prices. I am trying to generate histograms for each item based on the prices.
Create Table #Customer_Pricing
(
customer_id int,
item_id VARCHAR(10),
qty DECIMAL(5,2),
price DECIMAL(5,2),
)
;
GO
-- Insert Statements
Insert into #Customer_Pricing values(128456, 'SOM 555', 8, 2.50)
Insert into #Customer_Pricing values(123856, 'SOM 554', 1, 2.50)
Insert into #Customer_Pricing values(123456, 'SOM 554', 55, 2.00)
Insert into #Customer_Pricing values(123556, 'SOM 555', 2, 2.20)
Insert into #Customer_Pricing values(123456, 'SOM 553', 12, 2.13)
;
For each item, I wanted 3 bins so I determined the bin sizes by dividing the difference of the MAX-MIN by 3, then adding that value to the MIN.
WITH Stats_Table_CTE (item_id2,max_p, min_p, int_p, r1_upper, r2_lower, r2_upper, r3_lower)
AS
( SELECT item_id
,max(price)
,min(price)
,(max(price) - min(price))/3
,min(price)+(max(price) - min(price))/3-0.01
,min(price)+(max(price) - min(price))/3
,min(price)+((max(price) - min(price))/3)*2-0.01
,min(price)+((max(price) - min(price))/3)*2
FROM #Customer_Pricing
GROUP BY item_id)
Now, I need to count the frequencies for each range and each item. I have attempted to do so by using SUM(CASE...) but was unsuccessful.
SELECT item_id
,SUM(CASE WHEN price <= r1_upper, THEN 1 ELSE 0 END) AS r1_count
,SUM(CASE WHEN price >= r2_lower AND <= r2_upper, THEN 1 ELSE 0 END) AS r2_count
,SUM(CASE WHEN price >= r3_lower, THEN 1 ELSE 0 END) AS r3_count
FROM Stats_Table_CTE
GROUP BY item_id
I also attempted to use COUNT in the form
SELECT item_id, price
count(price <= r1_upper) AS r1_count.... but I got stuck
In one attempt, INNER JOINed the #Customer_Pricing table and Stats_Table_CTE but didn't know where to go from there.
Ideally, I would like the output table to appear as follows: *This is not the actual data, but I included it to show the desired format of the output.
Item ID min_p r1_upper (r2 bins) r3_lower max_p r1_count r2_ct
SOM 553 2.00 2.16 saving space 2.33 2.50 2 1
SOM 554 2.13 2.48 2.88 3.25 1 0
SOM 555 2.31 2.51 2.72 2.92 3 2
*The format of the output table is off, but I have item ID, the bins, and the counts across the top grouped by item

Here is my recommendation:
WITH Stats_Table_CTE AS (
SELECT item_id, max(price) as maxprice, min(price) as minprice,
(max(price) - min(price))/3 as binsize
FROM #Customer_Pricing
GROUP BY item_id
)
SELECT cp.item_id,
SUM(CASE WHEN price < minprice + binsize THEN 1 ELSE 0
END) AS r1_count
SUM(CASE WHEN price >= minprice + binsize AND price < minprice+ 2*binsize
THEN 1 ELSE 0
END) AS r2_count
SUM(CASE WHEN price >= minprice + 2*binsize
THEN 1 ELSE 0
END) AS r3_count
FROM #Customer_Pricing cp JOIN
Stats_Table_CTE st
ON st.item_id = cp.item_id
GROUP BY cp.item_id
The important part is the join back to #Customer_Pricing. Also important is the simplification of the logic -- you can define the bounds for the bins and use <, rather than having a lower and upper bound for each one. Also, your query had some syntax errors in it.
Note that in many databases, the CTE would not be necessary because you could just use window functions. Your question is not tagged with the database (although I could guess what it is), so that change seems unwarranted.

Related

Get bins range from temporary table SQL

I have a question related to my previous one.
What I have is a database that looks like:
category price date
-------------------------
Cat1 37 2019-03
Cat2 65 2019-03
Cat3 34 2019-03
Cat1 45 2019-03
Cat2 100 2019-03
Cat3 60 2019-03
This db has hundred of categories and comes from another one that has different attributes for each observation.
With this code:
WITH table AS
(
SELECT
category, price, date,
substring(date, 1, 4) AS year,
substring(date, 6, 2) as month
FROM
original_table
WHERE
(year = "2019" or year = "2020")
AND (month = "03")
AND product = "XXXXX"
ORDER BY
anno
)
-- I get this from a bigger table, but prefer to make small steps
-- that anyone in the fute can understand where this comes from as
-- the original table is expected to grow fast
SELECT
category,
ROUND(1.0 * next_price/ price - 1, 2) Pct_change,
SUBSTR(Date, 1, 4) || '-' || SUBSTR(next_date, 1, 4) Period,
tipo_establecimiento
FROM
(SELECT
*,
LEAD(Price) OVER (PARTITION BY category ORDER BY year) next_price,
LEAD(year) OVER (PARTITION BY category ORDER BY year) next_date,
CASE
WHEN (category_2>= 35) AND (category_2 <= 61)
THEN 'S'
ELSE 'N'
END 'tipo_establecimiento'
FROM
table)
WHERE
next_date IS NOT NULL AND Pct_change >= 0
ORDER BY
Pct_change DESC
This code gets me a view of the data that looks like:
category Pct_change period
cat1 0.21 2019-2020
cat2 0.53 2019-2020
cat3 0.76 "
This is great! But my next view has to take this one and provide me with a range that shows how many categories are in each range.
It should look like:
range avg num_cat_in
[0.1- 0.4] 0.3 3
This last table is just an example of what I expect
I have been trying with a code that looks like this but i get nothing
WITH table AS (
SELECT category, price, date, substring(date, 1, 4) AS year, substring(date, 6, 2) as month
FROM original_table
WHERE (year= "2019" or year= "2020") and (month= "03") and product = "XXXXX"
order by anno
)
-- I get this from a bigger table, but prefer to make small steps that anyone in the future can understand where this comes from as the original table is expected to grow fast
SELECT category,
ROUND(1.0 * next_price/ price - 1, 2) Pct_change,
SUBSTR(Date, 1, 4) || '-' || SUBSTR(next_date, 1, 4) Period,
tipo_establecimiento
FROM (
SELECT *,
LEAD(Price) OVER (PARTITION BY category ORDER BY year) next_price,
LEAD(year) OVER (PARTITION BY category ORDER BY year) next_date,
CASE
WHEN (category_2>= 35) AND (category_2 <= 61)
THEN 'S'
ELSE 'N'
END 'tipo_establecimiento'
FROM table
)
WHERE next_date IS NOT NULL AND Pct_change>=0
ORDER BY Pct_change DESC
WHERE next_date IS NOT NULL AND Pct_change>=0
)
SELECT
count(CASE WHEN Pct_change> 0.12 AND Pct_change <= 0.22 THEN 1 END) AS [12 - 22],
count(CASE WHEN Pct_change> 0.22 AND Pct_change <= 0.32 THEN 1 END) AS [22 - 32],
count(CASE WHEN Pct_change> 0.32 AND Pct_change <= 0.42 THEN 1 END) AS [32 - 42],
count(CASE WHEN Pct_change> 0.42 AND Pct_change <= 0.52 THEN 1 END) AS [42 - 52],
count(CASE WHEN Pct_change> 0.52 AND Pct_change <= 0.62 THEN 1 END) AS [52 - 62],
count(CASE WHEN Pct_change> 0.62 AND Pct_change <= 0.72 THEN 1 END) AS [62 - 72],
count(CASE WHEN Pct_change> 0.72 AND Pct_change <= 0.82 THEN 1 END) AS [72 - 82]
Thank you!!!
cf. my comment, I'm first assuming that your ranges are not hard-coded and that you wish to evenly split your data across quantiles of Prc_change. What this means is the calculation will figure out the ranges which split your sample as uniformly as possible. In this case, the following would work (where theview is the name of your previous view which calculates percentages):
select
concat('[',min(Pct_change),'-',min(Pct_change),']') as `range`
, avg(Pct_change) as `avg`
, count(*) as num_cat_in
from(
select *
, ntile(5)over(order by Pct_change) as bin
from theview
) t
group by bin
order by bin;
Here is a fiddle.
If on the other hand your ranges are hard-coded, I assume the ranges are in a table such as the one I create:
create table theranges (lower DOUBLE, upper DOUBLE);
insert into theranges values (0,0.2),(0.2,0.4),(0.4,0.6),(0.6,0.8),(0.8,1);
(You have to make sure that the ranges are non-overlapping. By convention I include percentages in the range from the lower bound included to the upper bound excluded, except for the upper bound of 1 which is included.) It is then a matter of left-joining the tables:
select
concat('[',lower,'-',upper,']') as `range`
, avg(Pct_change) as `avg`
, sum(if(Pct_change is null, 0, 1)) as num_cat_in
from theranges left join theview on (Pct_change>=lower and if(upper=1,true,Pct_change<upper))
group by lower, upper
order by lower;
(Note that in the bit that says upper=1, you must change 1 to whatever your highest hard-coded range is; here I am assuming your percentages are between 0 and 1.)
Here is the second fiddle.

Modifying an Aggregate

I am trying to get my surgeries to calculate at different rates and I am struggling with it. For example, patient 58903 has 4 total surgeries as shown below. However, I would like the first surgery to calculate at 100% of the PPO SURG rate (so $4232), the second one at 50%, and all remaining surgeries at 25% of the main PPO SURG rate. My current code returns $16,929 for patient 5903 which is just $4232*4. My desired output for the SURG Total below is $8,464 (4232+2116+1058+1058).
My Current Code:
SELECT
DISTINCT PATNO,
SUM( PPOSURG) AS 'Surg Total',
SUM( PPONONSURG) AS 'Non Surg Total',
SUM( PPODRUG) AS 'Drug Total',
INSNME,
IIF( SUM( PPOSURG) IS NOT NULL,
SUM( PPOSURG) + SUM(CASE WHEN REV=278 THEN (AMT1)*0.446 END),
ISNULL(SUM( PPODRUG),0)+ISNULL( SUM( PPONONSURG),0)) AS 'Claim Total'
FROM
[OPGRACE$] AS GR --Main Table
LEFT JOIN [BCSURGOP$] AS SRG ON GR.CPTCDA=SRG.[CPTCODESURG] --SURG joined on cpt
LEFT JOIN [BCBSDRUG$] AS DRUG ON GR.CPTCDA=DRUG.[CPT CODE] --DRUG Schedules joined on cpt
LEFT JOIN [BCBSNONSURGOP$] AS NSRG ON GR.CPTCDA=NSRG.[CPT CODE] --Non-SURG joined on cpt
WHERE QTY>0 AND PATNO>0
GROUP BY PATNO,INSNME
ORDER BY PATNO ASC
I tried doing something like this but you can't have an aggregate in a SUM
SUM(CASE WHEN COUNT(CPTCODESURG)=1 THEN PPOSURG ELSE PPOSURG*0.5 END) + SUM(CASE WHEN REV=278 THEN (AMT1)*0.446 END),
Here is my output for just patient 58903
SURG Total
Non Surg Total
Drug Total
CLAIM Total
16929.472
3103
179
22598.84
Here is a blurb of my joined table showing how patient 58903 shows up:
PName
Rev
CPT
PPOSURG
58903
360
29882
4232.368
58903
360
29882
4232.368
58903
360
29882
4232.368
58903
360
29882
4232.368
Would a PARTITION be the way to go here? A subquery? Can I somehow use a case statement? I don't expect anyone to write my code but literally any ideas would be extremely helpful, I have been really stuck on this.
If I understand you correctly you just need a row number partitioned by the patient and then a CASE expression to convert that into a multiplier. I've added an id column to the sample data to allow for an order by (which you need for a row number).
declare #Test table (id int identity(1,1), PName int, Rev int, CPT int, PPOSURG money);
insert into #Test (PName, Rev, CPT, PPOSURG)
values
(58903, 360, 29882, 4232.368),
(58903, 360, 29882, 4232.368),
(58903, 360, 29882, 4232.368),
(58903, 360, 29882, 4232.368);
with cte as (
select *
, row_number() over (partition by PName order by id) rn
from #Test
)
select PName, Rev, CPT
, cast(sum(PPOSURG * case rn when 1 then 1.00 when 2 then 0.50 else 0.25 end) as decimal(9,2)) Total
from cte
group by PName, Rev, CPT;

Query to show stock based on previous transactions

Please I need your help..
for an Objective
match SO (Sales Order) quantity to PO (Purchase Order) quantity based on FIFO (First In, First Out) where the first stock items purchased must be the first items sold.
I have a table Stock which use to track the movement of stock in and out of imaginary stock warehouse. The warehouse is initially empty, and stock then moves into the warehouse as a result of a stock purchase (‘IN’) and stock moves out of the warehouse when it is sold (‘OUT’). Each type of stock item is identified by an ItemID. Each movement of stock in or out of the warehouse, due to a purchase or sale of a given item, results in a row being added to the Stock table, uniquely identified by the value in the StockID identify column, and describing how many items were added or removed and the date of the transaction.
Table stock :
StockId DocumentID ItemID TranDate TranCode Quantity
------------------------------------------------------------
1 PO001 A021 2016.01.01 IN 3
4 SO010 A021 2016.01.02 OUT 2
2 PO002 A021 2016.01.10 IN 7
3 PO003 A021 2016.02.01 IN 9
5 SO011 A021 2016.02.11 OUT 8
6 SO012 A023 2016.02.12 OUT 6
How could I write a query to give output like the table below?
SOID POID Quantity
------------------------
SO010 PO001 2
SO011 PO001 1
SO011 PO002 7
SO012 PO003 6
So, seeing as no one else has given this a go, I figure I'll post something that resembles an answer (I believe).
Essentially, what you want to do is keep track of the number of things you have in stock and the number of things that have gone out, based on the date (I haven't accounted for multiple things coming in or going out on the same date, though).
DECLARE #Table TABLE
(
DocumentID VARCHAR(10) NOT NULL,
TranCode VARCHAR(3) NOT NULL,
TranDate DATE NOT NULL,
Quantity INT NOT NULL
); -- I'm ignoring the other columns here because they don't seem important to your overall needs.
INSERT #Table (DocumentID, TranCode, TranDate, Quantity)
VALUES
('PO001', 'IN', '2016-01-01', 3),
('SO010', 'OUT', '2016-01-02', 2),
('PO002', 'IN', '2016-01-10', 7),
('PO003', 'IN', '2016-02-01', 9),
('SO011', 'OUT', '2016-02-11', 8),
('SO012', 'OUT', '2016-02-12', 6);
WITH CTE AS
(
SELECT DocumentID,
TranCode,
TranDate,
Quantity,
RunningQuantity = -- Determine the current IN/OUT totals.
(
SELECT SUM(Quantity)
FROM #Table
WHERE TranCode = T.TranCode
AND TranDate <= T.TranDate
),
PrevQuantity = -- Keep track of the previous IN/OUT totals.
(
SELECT ISNULL(SUM(Quantity), 0)
FROM #Table
WHERE TranCode = T.TranCode
AND TranDate < T.TranDate
)
FROM #Table T
)
SELECT Outgoing.DocumentID,
Incoming.DocumentID,
Quantity =
CASE WHEN Outgoing.RunningQuantity <= Incoming.RunningQuantity AND Outgoing.PrevQuantity >= Incoming.PrevQuantity
THEN Outgoing.RunningQuantity - Outgoing.PrevQuantity
WHEN Outgoing.RunningQuantity <= Incoming.RunningQuantity AND Outgoing.PrevQuantity < Incoming.PrevQuantity
THEN Outgoing.RunningQuantity - Incoming.PrevQuantity
ELSE Incoming.RunningQuantity - Outgoing.PrevQuantity
END
FROM CTE Outgoing
JOIN CTE Incoming ON
Incoming.TranCode = 'IN'
AND Incoming.RunningQuantity > Outgoing.PrevQuantity
AND Incoming.PrevQuantity < Outgoing.RunningQuantity
WHERE Outgoing.TranCode = 'OUT'
ORDER BY Outgoing.TranDate;
Note: I would highly recommend you keep track of the information in a better way. For example, create a table that actually details which orders took what from which other orders (an order transaction table or something), because while it's not impossible to achieve what you want with the way your data is structured, it's much less complicated if you just store more helpful data.

SQL failing to add value from previous row into the next

I am trying to add the value of the previous row to the current row into the column cumulative
Select
Ddate as Date, etype, Reference, linkacc as ContraAcc,
Description,
sum(case when amount > 0 then amount else 0 end) as Debits,
sum(case when amount < 0 then amount else 0 end) as Credits,
sum(amount) as Cumulative
from
dbo.vw_LT
where
accnumber ='8400000'
and [DDate] between '2016-04-01 00:00:00' and '2016-04-30 00:00:00'
and [DataSource] = 'PAS11CEDCRE17'
group by
Ddate, etype, Reference, linkacc, Description, Amount
Output(what i am getting):
Date Reference ContraAcc Description Debits Credits Cumulative
--------------------------------------------------------------------------
2016-04-01 CC007 8000000 D/CC007 0 -39.19 -39.19
2016-04-01 CC007 8000000 D/CC007 1117.09 0 1117.09
2016-04-01 CC009 8000000 CC009 2600 0 2600
in the cumulative column should like below(what i need):
Date Reference ContraAcc Description Debits Credits Cumulative
--------------------------------------------------------------------------
2016-04-01 CC007 8000000 D/CC007 0 -39.19 -39.19
2016-04-01 CC007 8000000 D/CC007 1117.09 0 1077.9
2016-04-01 CC009 8000000 CC009 2600 0 3677.9
Before we delve into the solution, let me tell you that if you are using SQL Server version more than 2012, there are LAG and LEAD, which can help you to solve this.
I am not giving you an exact query to solve your problem (as we dont know what your primary key for that table is), but you can get the idea by seeing the below example
DECLARE #t TABLE
(
accountNumber VARCHAR(50)
,dt DATETIME
,TransactedAmt BIGINT
)
INSERT INTO #t VALUES ('0001','7/20/2016',1000)
INSERT INTO #t VALUES ('0001','7/21/2016',-1000)
INSERT INTO #t VALUES ('0001','7/22/2016',2000)
INSERT INTO #t VALUES ('0002','7/20/2016',500)
INSERT INTO #t VALUES ('0002','7/21/2016',-500)
INSERT INTO #t VALUES ('0002','7/22/2016',2000)
;WITH CTE AS
(
SELECT ROW_NUMBER() OVER(Partition by accountNumber order by dt) as RN, *
FROM #t
),CTE1 AS
(
SELECT *,TransactedAmt As TotalBalance
FROM CTE WHERE rn = 1
UNION
SELECT T1.*,T1.TransactedAmt + T0.TransactedAmt as TotalBalance
FROM CTE T1
JOIN CTE T0
ON T1.accountNumber = T0.accountNumber
AND T1.RN = T0.RN+1
AND T1.RN > 1
)
select * from CTE1 order by AccountNumber

SQL Deduct value from multiple rows

I would like to apply total $10.00 discount for each customers.The discount should be applied to multiple transactions until all $10.00 used.
Example:
CustomerID Transaction Amount Discount TransactionID
1 $8.00 $8.00 1
1 $6.00 $2.00 2
1 $5.00 $0.00 3
1 $1.00 $0.00 4
2 $5.00 $5.00 5
2 $2.00 $2.00 6
2 $2.00 $2.00 7
3 $45.00 $10.00 8
3 $6.00 $0.00 9
The query below keeps track of the running sum and calculates the discount depending on whether the running sum is greater than or less than the discount amount.
select
customerid, transaction_amount, transactionid,
(case when 10 > (sum_amount - transaction_amount)
then (case when transaction_amount >= 10 - (sum_amount - transaction_amount)
then 10 - (sum_amount - transaction_amount)
else transaction_amount end)
else 0 end) discount
from (
select customerid, transaction_amount, transactionid,
sum(transaction_amount) over (partition by customerid order by transactionid) sum_amount
from Table1
) t1 order by customerid, transactionid
http://sqlfiddle.com/#!6/552c2/7
same query with a self join which should work on most db's including mssql 2008
select
customerid, transaction_amount, transactionid,
(case when 10 > (sum_amount - transaction_amount)
then (case when transaction_amount >= 10 - (sum_amount - transaction_amount)
then 10 - (sum_amount - transaction_amount)
else transaction_amount end)
else 0 end) discount
from (
select t1.customerid, t1.transaction_amount, t1.transactionid,
sum(t2.transaction_amount) sum_amount
from Table1 t1
join Table1 t2 on t1.customerid = t2.customerid
and t1.transactionid >= t2.transactionid
group by t1.customerid, t1.transaction_amount, t1.transactionid
) t1 order by customerid, transactionid
http://sqlfiddle.com/#!3/552c2/2
You can do this with recursive common table expressions, although it isn't particularly pretty. SQL Server stuggles to optimize these types of query. See Sum of minutes between multiple date ranges for some discussion.
If you wanted to go further with this approach, you'd probably need to make a temporary table of x, so you can index it on (customerid, rn)
;with x as (
select
tx.*,
row_number() over (
partition by customerid
order by transaction_amount desc, transactionid
) rn
from
tx
), y as (
select
x.transactionid,
x.customerid,
x.transaction_amount,
case
when 10 >= x.transaction_amount then x.transaction_amount
else 10
end as discount,
case
when 10 >= x.transaction_amount then 10 - x.transaction_amount
else 0
end as remainder,
x.rn as rn
from
x
where
rn = 1
union all
select
x.transactionid,
x.customerid,
x.transaction_amount,
case
when y.remainder >= x.transaction_amount then x.transaction_amount
else y.remainder
end,
case
when y.remainder >= x.transaction_amount then y.remainder - x.transaction_amount
else 0
end,
x.rn
from
y
inner join
x
on y.rn = x.rn - 1 and y.customerid = x.customerid
where
y.remainder > 0
)
update
tx
set
discount = y.discount
from
tx
inner join
y
on tx.transactionid = y.transactionid;
Example SQLFiddle
I usually like to setup a test environment for such questions. I will use a local temporary table. Please note, I made the data un-ordered since it is not guaranteed in a real life.
-- play table
if exists (select 1 from tempdb.sys.tables where name like '%transactions%')
drop table #transactions
go
-- play table
create table #transactions
(
trans_id int identity(1,1) primary key,
customer_id int,
trans_amt smallmoney
)
go
-- add data
insert into #transactions
values
(1,$8.00),
(2,$5.00),
(3,$45.00),
(1,$6.00),
(2,$2.00),
(1,$5.00),
(2,$2.00),
(1,$1.00),
(3,$6.00);
go
I am going to give you two answers.
First, in 2014 there are new windows functions for rows preceding. This allows us to get a running total (rt) and a rt adjusted by one entry. Give these two values, we can determine if the maximum discount has been exceeded or not.
-- Two running totals for 2014
;
with cte_running_total
as
(
select
*,
SUM(trans_amt)
OVER (PARTITION BY customer_id
ORDER BY trans_id
ROWS BETWEEN UNBOUNDED PRECEDING AND
0 PRECEDING) as running_tot_p0,
SUM(trans_amt)
OVER (PARTITION BY customer_id
ORDER BY trans_id
ROWS BETWEEN UNBOUNDED PRECEDING AND
1 PRECEDING) as running_tot_p1
from
#transactions
)
select
*
,
case
when coalesce(running_tot_p1, 0) <= 10 and running_tot_p0 <= 10 then
trans_amt
when coalesce(running_tot_p1, 0) <= 10 and running_tot_p0 > 10 then
10 - coalesce(running_tot_p1, 0)
else 0
end as discount_amt
from cte_running_total;
Again, the above version is using a common table expression and advanced windowing to get the totals.
Do not fret! The same can be done all the way down to SQL 2000.
Second solution, I am just going to use the order by, sub-queries, and a temporary table to store the information that is normally in the CTE. You can switch the temporary table for a CTE in SQL 2008 if you want.
-- w/o any fancy functions - save to temp table
select *,
(
select count(*) from #transactions i
where i.customer_id = o.customer_id
and i.trans_id <= o.trans_id
) as sys_rn,
(
select sum(trans_amt) from #transactions i
where i.customer_id = o.customer_id
and i.trans_id <= o.trans_id
) as sys_tot_p0,
(
select sum(trans_amt) from #transactions i
where i.customer_id = o.customer_id
and i.trans_id < o.trans_id
) as sys_tot_p1
into #results
from #transactions o
order by customer_id, trans_id
go
-- report off temp table
select
trans_id,
customer_id,
trans_amt,
case
when coalesce(sys_tot_p1, 0) <= 10 and sys_tot_p0 <= 10 then
trans_amt
when coalesce(sys_tot_p1, 0) <= 10 and sys_tot_p0 > 10 then
10 - coalesce(sys_tot_p1, 0)
else 0
end as discount_amt
from #results
order by customer_id, trans_id
go
In short, your answer is show in the following screen shot. Cut and paste the code into SSMS and have some fun.