Calculating means for Columns based on data in another data set - indexing

I have two data sets, lets call them A and B (dput of the first 5 rows of each below):
`A: structure(list(Location = c(3960.82823, 3923.691, 3919.40593,
3907.97909, 3886.55377), Height = c(0.163744751, 0.231555472,
0.232150996, 0.192475738, 0.162966924), Start = c(3963.68494,
3946.54468, 3920.83429, 3909.40745, 3895.1239), End = c(3953.68645,
3920.83429, 3909.40745, 3895.1239, 3883.69706)), row.names = c(NA,
5L), class = "data.frame")
`
`B:structure(list(Wavenumber..cm.1. = c(3997.96546, 3996.5371, 3995.10875,
3993.68039, 3992.25204), M100 = c(0.00106, 0.00105, 0.00095,
0.00075, 0.00053), M101 = c(0.00081, 0.00092, 0.00102, 0.001,
0.00082), M102 = c(0.00099, 0.00109, 0.00105, 9e-04, 0.00072),
M103 = c(0.00101, 0.00111, 0.0012, 0.00129, 0.00133), M104 = c(0.00081,
0.00083, 0.00084, 0.00086, 0.00089), M105 = c(0.00139, 0.00113,
0.00092, 0.00089, 0.00102), M106 = c(0.00095, 0.00103, 0.00095,
0.00074, 0.00058), M107 = c(0.00054, 0.00058, 0.00059, 0.00049,
0.00032), M108 = c(0.00042, 5e-04, 5e-04, 0.00034, 0.00011
), M109 = c(0.00069, 0.00051, 0.00043, 0.00051, 0.00065),
M110 = c(0.00113, 0.00121, 0.00124, 0.00116, 0.00099), M111 = c(0.00039,
0.00056, 0.00068, 0.00068, 0.00056), M112 = c(0.0011, 0.00112,
0.00112, 0.00108, 0.00099), M113 = c(3e-04, 3e-04, 3e-04,
0.00027, 0.00019), M114 = c(0.00029, 6e-05, -2e-05, 9e-05,
0.00028), M115 = c(0.00091, 0.00079, 0.00061, 0.00038, 2e-04
), M116 = c(0.00117, 0.00105, 0.00096, 0.00092, 0.00092),
M117 = c(0.00039, 2e-04, 6e-05, 6e-05, 0.00018), M118 = c(0.00096,
0.00073, 0.00055, 0.00047, 0.00049), M119 = c(0.00037, 0.00031,
0.00024, 0.00018, 0.00018), M120 = c(0.00116, 0.00098, 0.00084,
0.00076, 0.00067), M121 = c(0.00039, 0.00024, 0.00011, 7e-05,
0.00011), M122 = c(0.00032, 0.00038, 0.00045, 0.00044, 0.00035
), M123 = c(9e-04, 0.00097, 0.00108, 0.0012, 0.00128), M124 = c(-0.00082,
-0.00065, -0.00049, -0.00037, -0.00036), M125 = c(0.00053,
0.00054, 0.00055, 6e-04, 0.00071), M126 = c(7e-05, 0.00022,
0.00022, 0.00011, 2e-05), M127 = c(0.00086, 9e-04, 0.00086,
0.00073, 0.00058), M128 = c(0.00089, 0.00078, 0.00069, 0.00057,
0.00043), M129 = c(0.00094, 0.00097, 0.00106, 0.00114, 0.00105
), M130 = c(0.0013, 0.00118, 0.00115, 0.00116, 0.00111),
M131 = c(0.00029, 0.00033, 0.00033, 3e-04, 0.00022), M132 = c(0,
0.00026, 0.00048, 6e-04, 0.00063), M133 = c(3e-05, -6e-05,
-6e-05, 5e-05, 0.00019), M134 = c(0.00056, 0.00054, 0.00052,
0.00054, 0.00057), M135 = c(2e-05, -4e-05, 6e-05, 0.00031,
0.00057), M136 = c(0.00083, 0.00075, 0.00068, 0.00068, 0.00073
), M137 = c(0.00064, 0.00074, 0.00084, 0.00095, 0.00105),
M139 = c(0.00044, 0.00044, 0.00042, 0.00043, 0.00047), M140 = c(0.00138,
0.00113, 0.00102, 0.0011, 0.00121), M141 = c(0.00062, 0.00043,
2e-04, 2e-05, 0), M142 = c(-0.00022, -0.00017, -0.00014,
-1e-04, 0), M143 = c(0.00109, 0.00108, 0.00103, 0.00093,
0.00087), M144 = c(0.00104, 0.00116, 0.00117, 0.00105, 0.00085
), M145 = c(7e-04, 0.00096, 0.00109, 0.00098, 0.00069), M146 = c(0.0014,
0.00158, 0.00165, 0.00154, 0.0013), M147 = c(6e-04, 0.00071,
0.00075, 0.00072, 0.00065), M148 = c(0.00098, 0.00093, 0.00091,
9e-04, 0.00088), M149 = c(0.00055, 0.00058, 0.00054, 0.00037,
0.00017), M150 = c(7e-04, 0.00068, 8e-04, 0.00107, 0.00132
), M151 = c(0.00037, 0.00042, 0.00046, 0.00047, 0.00046),
M152 = c(0.00047, 0.00042, 0.00043, 0.00045, 0.00045), M153 = c(0.00095,
0.00088, 0.00083, 8e-04, 0.00072), M154 = c(6e-05, 0.00013,
0.00032, 0.00054, 0.00062), M155 = c(0.00061, 0.00057, 0.00043,
0.00022, 4e-05), M156 = c(0.00077, 0.00078, 0.00071, 0.00052,
0.00025), M157 = c(0.00088, 0.00078, 0.00069, 0.00063, 0.00058
), M158 = c(0.00091, 0.00085, 0.00082, 0.00081, 8e-04), M159 = c(0.00078,
0.00076, 0.00073, 0.00074, 0.00079), M160 = c(0.00068, 7e-04,
0.00075, 8e-04, 0.00079), M161 = c(0.00055, 0.00073, 0.00082,
0.00085, 9e-04), M162 = c(0.00104, 0.00111, 0.0011, 0.00104,
0.00102), M163 = c(0.00076, 0.00071, 0.00069, 0.00068, 0.00067
), M164 = c(0.0012, 0.00133, 0.00154, 0.00174, 0.00177),
M165 = c(0.00072, 0.00073, 0.00072, 0.00074, 0.00083), M166 = c(0.00067,
0.00055, 0.00035, 0.00012, -2e-05), M167 = c(0.00068, 0.00053,
0.00047, 0.00051, 0.00059), M168 = c(0.00067, 0.00092, 0.001,
0.00087, 0.00067), M169 = c(0.00124, 0.00107, 0.00101, 0.00108,
0.00118), M170 = c(0.00054, 0.00064, 0.00069, 0.00066, 0.00053
), M171 = c(0.00029, 3e-04, 3e-04, 0.00031, 3e-04), M172 = c(0.00085,
0.00091, 0.00082, 0.00063, 0.00052), M173 = c(0.00022, 0.00036,
0.00053, 0.00061, 0.00056), M174 = c(5e-04, 0.00031, 0.00021,
0.00023, 0.00031), M175 = c(0.00074, 0.00066, 0.00059, 0.00051,
0.00043), M176 = c(9e-04, 0.00062, 0.00044, 0.00039, 0.00039
), M177 = c(0.00045, 0.00038, 0.00033, 0.00035, 0.00043),
M178 = c(0.00075, 0.00092, 0.00097, 0.00086, 0.00067), M179 = c(0.00047,
0.00033, 0.00026, 3e-04, 0.00037), M180 = c(0.00083, 0.00077,
0.00074, 0.00074, 7e-04), M181 = c(0.0013, 0.00138, 0.00137,
0.00127, 0.00109), M182 = c(0.00062, 0.00049, 0.00043, 0.00042,
0.00038), M183 = c(0.00056, 4e-04, 0.00034, 0.00046, 0.00065
), M184 = c(0.00122, 0.00116, 0.00096, 0.00067, 0.00039),
M185 = c(0.00045, 0.00026, 0.00012, 1e-04, 0.00024), M187 = c(0.00078,
0.00038, 8e-05, 0, 0.00014)), row.names = c(NA, 5L), class = "data.frame")
`
I want to be able to calculate the means of the M columns in data set B, based on the Start and End columns in data set A (which correspond to the Wavenumber cm-1 column in data set B). So that for each Start and End set of values you have a corresponding mean for each M column in data set B.
So for example for the Start and End values in the first row of data set A:
Start: 3963.68494 End: 3953.68645 you would calculate the mean of each M column in data set B using the absorbance values corresponding to the Wavenumber cm-1 range of 3963.6849 to 3953.68645, which would then be stored in a separate data frame (with all the M column names) called meanData or something.
I can quite figure out how to write a function/loop that would do that, going and taking the Start and End values in dataset A, looking at dataset B getting the corresponding Absorbance values that fall into that Start and End range, calculate their mean and write it into a new data frame under its corresponding M column name and repeating this for each row of Start and End Values in dataset A. I know you would likely do it with an index, but I'm not sure how to write it exactly. Any help would be very much appreciated!
I tried creating different indexes for the Start and End columns and using them to try and specify the values I want in dataset B, using [] but I was unsuccessful:
`test<-mean(B$M100[which(B$Wavenumber..cm.1.[index2[i] to B$Wavenumber..cm.1.index3[i]])`
where index2 is the Start values in dataset A and index3 is the end values in datasetA, this did not work

Related

Colors don't stick when lollipop plot is run

I have created a lollipop chart that I love. However, when the code runs to create the plot, the colors of the lines, segments, and points all change from what they were set to. Everything else runs great, so this isn't the end of the world, but I am trying to stick with a color palette throughout a report.
The colors should be this ("#9a0138", and "#000775" specifically):
But come out like this:
Any ideas?
Here is the data:
TabPercentCompliant <- structure(list(Provider_ShortName = c("ProviderA", "ProviderA", "ProviderA", "ProviderB",
"ProviderB", "ProviderB", "ProviderC", "ProviderC", "ProviderC", "ProviderD"), SubMeasureID = c("AMM2", "FUH7", "HDO", "AMM2", "FUH7", "HDO", "AMM2", "FUH7", "HDO", "AMM2"), AdaptedCompliant = c(139, 2, 117, 85, 1, 33, 36, 2, 22, 43), TotalEligible = c(238, 27, 155, 148, 10, 34, 61, 3, 24, 76), PercentCompliant = c(0.584033613445378, 0.0740740740740741, 0.754838709677419, 0.574324324324324, 0.1, 0.970588235294118, 0.590163934426229, 0.666666666666667, 0.916666666666667, 0.565789473684211 ), PercentTotalEligible = c(0.00516358587173479, 0.00058578495183546, 0.00336283953831467, 0.00321096936561659, 0.000216957389568689, 0.000737655124533542, 0.001323440076369, 6.50872168706066e-05, 0.000520697734964853, 0.00164887616072203), ClaimsAdjudicatedThrough = structure(c(19024, 19024, 19024, 19024, 19024, 19024, 19024, 19024, 19024, 19024 ), class = "Date"), AdaptedNCQAMean = c(0.57, 0.39, 0.93, 0.57, 0.39, 0.93, 0.57, 0.39, 0.93, 0.57), PerformanceLevel = c(0.0140336134453782, -0.315925925925926, -0.175161290322581, 0.00432432432432439, -0.29, 0.0405882352941176, 0.0201639344262295, 0.276666666666667, -0.0133333333333334, -0.00421052631578944)), row.names = c(NA, -10L), class = c("tbl_df", "tbl", "data.frame"))
VBP_Report_Date = "2022-09-01"
And the code for the plot:
Tab_PercentCompliant %>%
filter(ClaimsAdjudicatedThrough == VBP_Report_Date) %>%
ggplot(aes(x = Provider_ShortName,
y = PercentCompliant)
) +
geom_line(aes(x = Provider_ShortName,
y = AdaptedNCQAMean,
group = SubMeasureID,
color = "#9a0138",
size = .001)
) +
geom_point(aes(color = "#000775",
size = (PercentTotalEligible)
)
) +
geom_segment(aes(x = Provider_ShortName,
xend = Provider_ShortName,
y = 0,
yend = PercentCompliant,
color = "#000775")
)+
facet_grid(cols = vars(SubMeasureID),
scales = "fixed",
space = "fixed")+
theme_classic()+
theme(legend.position = "none") +
theme(panel.spacing = unit(.5, "lines"),
panel.border = element_rect(
color = "black",
fill = NA,
linewidth = .5),
panel.grid.major.y = element_line(
color = "gray",
linewidth = .5),
axis.text.x = element_text(
angle = 65,
hjust=1),
axis.title.x = element_blank(),
axis.line = element_blank(),
strip.background = element_rect(
color = NULL,
fill = "#e1e7fa"))+
scale_y_continuous(labels = scales::percent)+
labs(title = "Test",
subtitle = "Test",
caption = "Test")
If you have an aesthetic constant, it is often easier / better to have it "outside" your aes call. If you want to have a legend for your color, then you need to keep it "inside", but you will need to manually set the colors with + scale_color/fill_manual.
I've had to cut down quite a lot in your code to make it work. I've also removed bits that are extraneous to the problem. I've removed line size = 0.001 or the line wasn't visible. I've removed the weird filter step or the plot wasn't possible.
Tips: when defining a global aesthetic with ggplot(aes(x = ... etc), you don't need to specify this aesthetic in each geom layer (those aesthetics will be inherited)- makes a more concise / readable code.
library(ggplot2)
ggplot(TabPercentCompliant, aes(x = Provider_ShortName, y = PercentCompliant)) +
geom_line(aes(y = AdaptedNCQAMean, group = SubMeasureID),
color = "#9a0138") +
geom_point(aes(size = PercentTotalEligible), color = "#000775") +
geom_segment(aes(xend = Provider_ShortName, y = 0, yend = PercentCompliant),
color = "#000775") +
facet_grid(~SubMeasureID) +
theme(strip.background = element_rect(color = NULL, fill = "#e1e7fa"))
Here is the final code. Thanks again tjebo!
# Lollipop Chart ----------------------------------------------------------
Tab_PercentCompliant %>%
filter(ClaimsAdjudicatedThrough == VBP_Report_Date) %>%
ggplot(aes(x = Provider_ShortName,
y = PercentCompliant)
) +
geom_line(aes(y = AdaptedNCQAMean,
group = SubMeasureID),
color = "#9a0138"
) +
geom_point(aes(size = PercentTotalEligible),
color = "#000775",
) +
geom_segment(aes(xend = Provider_ShortName,
y = 0,
yend = PercentCompliant),
color = "#000775"
)+
facet_grid(cols = vars(SubMeasureID)
)+
theme_bw()+
theme(legend.position = "none",
axis.text.x = element_text(
angle = 65,
hjust=1),
axis.title.x = element_blank(),
axis.line = element_blank(),
strip.background = element_rect(
fill = "#e1e7fa"))+
scale_y_continuous(labels = scales::percent)+
labs(title = "Test",
subtitle = "Test",
caption = "Test")

How to optimize for a variable that goes into the argument of a function in pyomo?

I am trying to code a first order plus dead time (FOPDT) model and use it
for PID tuning. The inspiration for the work is the scipy code from: https://apmonitor.com/pdc/index.php/Main/FirstOrderOptimization
When I use model.Thetam() in the ODE constraint, it does not optimize Thetam,
keeps it at the initial value. When I use only model.Theta then the code throws an error -
ValueError: object arrays are not supported if I remove it from uf argument i.e.model.Km * (uf(tt - model.Thetam)-model.U0))
and if I remove it from the if statement (if tt > model.Thetam), then the error is - ERROR:pyomo.core:Rule failed when generating expression for Constraint ode with index 0.0: PyomoException: Cannot convert non-constant Pyomo expression (Thetam < 0.0) to bool. This error is usually caused by using a Var, unit, or mutable Param in a Boolean context such as an "if" statement, or when checking container membership or equality.
Code:
`url = 'http://apmonitor.com/pdc/uploads/Main/data_fopdt.txt'
data = pd.read_csv(url)
data = data.iloc[1:]
t = data['time'].values - data['time'].values[0]
u = data['u'].values
yp = data['y'].values
u0 = u[0]
yp0 = yp[0]
yf = interp1d(t, yp)
# specify number of steps
ns = len(t)
delta_t = t[1]-t[0]
# create linear interpolation of the u data versus time
uf = interp1d(t,u,fill_value="extrapolate")
model = ConcreteModel()
model.T = ContinuousSet(initialize = t)
model.Y = Var(model.T)
model.dYdT = DerivativeVar(model.Y, wrt = (model.T))
model.Y[0].fix(yp0)
model.Yp0 = Param(initialize = yp0)
model.U0 = Param(initialize = u0)
model.Km = Var(initialize = 2, bounds = (0.1, 10))
model.Taum = Var(initialize = 3, bounds = (0.1, 10))
model.Thetam = Var(initialize = 0, bounds = (0, 10))
model.ode = Constraint(model.T,
rule = lambda model, tt: model.dYdT[tt] == (-(model.Y[tt]-model.Yp0) + model.Km * (uf(tt - model.Thetam())-model.U0))/model.Taum if tt > model.Thetam()
else model.dYdT[tt] == -(model.Y[tt]-model.Yp0)/model.Taum)
def obj_rule(m):
return sum((m.Y[i] - yf(i))**2 for i in m.T)
model.obj = Objective(rule = obj_rule)
discretizer = TransformationFactory('dae.finite_difference')
discretizer.apply_to(model, nfe = 500, wrt = model.T, scheme = 'BACKWARD')
opt=SolverFactory('ipopt', executable='/content/ipopt')
opt.solve(model)#, tee = True)
model.pprint()
model2 = ConcreteModel()
model2.T = ContinuousSet(initialize = t)
model2.Y = Var(model2.T)
model2.dYdT = DerivativeVar(model2.Y, wrt = (model2.T))
model2.Y[0].fix(yp0)
model2.Yp0 = Param(initialize = yp0)
model2.U0 = Param(initialize = u0)
model2.Km = Param(initialize = 3.0145871)#3.2648)
model2.Taum = Param(initialize = 1.85862177) # 5.2328)
model2.Thetam = Param(initialize = 0)#2.936839032) #0.1)
model2.ode = Constraint(model2.T,
rule = lambda model, tt: model.dYdT[tt] == (-(model.Y[tt]-model.Yp0) + model.Km * (uf(tt - model.Thetam())-model.U0))/model.Taum)
discretizer2 = TransformationFactory('dae.finite_difference')
discretizer2.apply_to(model2, nfe = 500, wrt = model2.T, scheme = 'BACKWARD')
opt2=SolverFactory('ipopt', executable='/content/ipopt')
opt2.solve(model2)#, tee = True)
# model.pprint()
t = [i for i in model.T]
ypred = [model.Y[i]() for i in model.T]
ytrue = [yf(i) for i in model.T]
yoptim = [model2.Y[i]() for i in model2.T]
plt.plot(t, ypred, 'r-')
plt.plot(t, ytrue)
plt.plot(t, yoptim)
plt.legend(['pred', 'true', 'optim'])
`

PLOTLY tracegroupgap

Where do i insert tracegroupgap in this code? I have tried in legend=(dict.. without success.
layout = go.Layout(
title = 'IGS',
xaxis = dict(title = "Point", tickmode='linear', tick0=0, dtick=10),
yaxis = dict(title = "sp, mv"),
hovermode = 'closest',
legend = dict(font=dict(family="Courier",
size=10,
color="black"),
*tracegroupgap = 5*
)
)

Appending tables generated from a loop

I am a new python user here and am trying to append data together that I have pulled from a pdf using Camelot but am having trouble getting them to join together.
Here is my code:
url = 'https://www.fhfa.gov/DataTools/Downloads/Documents/HPI/HPI_AT_Tables.pdf'
tables = camelot.read_pdf(url,flavor='stream', edge_tol = 500, pages = '1-end')
i = 0
while i in range(0,tables.n):
header = tables[i].df.index[tables[i].df.iloc[:,0]=='Metropolitan Statistical Area'].to_list()
header = str(header)[1:-1]
header = (int(header))
tables[i].df = tables[i].df.rename(columns = tables[i].df.iloc[header])
tables[i].df = tables[i].df.drop(columns = {'': 'Blank'})
print(tables[i].df)
#appended_data.append(tables[i].df)
#if i > 0:
# dfs = tables[i-1].append(tables[i], ignore_index = True)
#pass
i = i + 1
any help would be much appreciated
You can use pandas.concat() to concat a list of dataframe.
while i in range(0,tables.n):
header = tables[i].df.index[tables[i].df.iloc[:,0]=='Metropolitan Statistical Area'].to_list()
header = str(header)[1:-1]
header = (int(header))
tables[i].df = tables[i].df.rename(columns = tables[i].df.iloc[header])
tables[i].df = tables[i].df.drop(columns = {'': 'Blank'})
df_ = pd.concat([table.df for table in tables])

Syntax for wildcard SQL Server Merge statement

This SQL Server MERGE statement works, but it is clumsy. Is there any syntax to merge these two tables so that they have the exact same structure? I am trying to update the Score_date from the Score_Import table. I have many tables to do and do not want to type them out. Thanks.
MERGE INTO [Score_Data].[dbo].Product as Dp
USING [Score_import].[dbo].Product as Ip
ON Dp.part_no = Ip.part_no
WHEN MATCHED THEN
UPDATE
SET Dp.total = Ip.total
,Dp.description = Ip.description
,Dp.family = Ip.family
,DP.um = IP.um
,DP.new_part_no = IP.new_part_no
,DP.prod_code = IP.prod_code
,DP.sub1 = IP.sub1
,DP.sub2 = IP.sub2
,DP.ven_no = IP.ven_no
,DP.no_sell = IP.no_sell
,DP.rp_dns = IP.rp_dns
,DP.nfa = IP.nfa
,DP.loc = IP.loc
,DP.cat_desc = IP.cat_desc
,DP.cat_color = IP.cat_color
,DP.cat_size = IP.cat_size
,DP.cat_fits = IP.cat_fits
,DP.cat_brand = IP.cat_brand
,DP.cat_usd1 = IP.cat_usd1
,DP.cat_usd2 = IP.cat_usd2
,DP.cat_usd3 = IP.cat_usd3
,DP.cat_usd4 = IP.cat_usd4
,DP.cat_usd5 = IP.cat_usd5
,DP.cat_usd6 = IP.cat_usd6
,DP.cat_usd7 = IP.cat_usd7
,DP.cat_usd8 = IP.cat_usd8
,DP.cat_usd9 = IP.cat_usd9
,DP.cat_usd10 = IP.cat_usd10
,DP.cat_usd11 = IP.cat_usd11
,DP.cat_usd12 = IP.cat_usd12
,DP.cat_usd13 = IP.cat_usd13
,DP.cat_usd14 = IP.cat_usd14
,DP.cat_usd15 = IP.cat_usd15
,DP.buy = IP.buy
,DP.price_1 = IP.price_1
,DP.price_2 = IP.price_2
,DP.price_3 = IP.price_3
,DP.price_4 = IP.price_4
,DP.price_5 = IP.price_5
,DP.price_6 = IP.price_6
,DP.price_7 = IP.price_7
,DP.price_8 = IP.price_8
,DP.price_9 = IP.price_9
,DP.create_date = IP.create_date
,DP.barcode = IP.barcode
,DP.check_digit = IP.check_digit
,DP.supplier = IP.supplier
,DP.prc_fam_code = DP.prc_fam_code
,DP.note = IP.note
,DP.mfg_part_no = IP.mfg_part_no
,DP.special = IP.special
,DP.spc_price = IP.spc_price
,DP.firm = IP.firm
,DP.box = IP.box
,DP.no_split = IP.no_split
,DP.drop_ship = IP.drop_ship
,DP.case_pack = IP.case_pack
,DP.inner_pack = IP.inner_pack
WHEN NOT MATCHED BY TARGET THEN
INSERT (part_no
,description
,family
,Total
,um
,new_part_no
,prod_code
,sub1
,sub2
,ven_no
,no_sell
,rp_dns
,nfa
,loc
,cat_desc
,cat_color
,cat_size
,cat_fits
,cat_brand
,cat_usd1
,cat_usd2
,cat_usd3
,cat_usd4
,cat_usd5
,cat_usd6
,cat_usd7
,cat_usd8
,cat_usd9
,cat_usd10
,cat_usd11
,cat_usd12
,cat_usd13
,cat_usd14
,cat_usd15
,buy
,price_1
,price_2
,price_3
,price_4
,price_5
,price_6,
,price_7,
,price_8
,price_9
,create_date
,barcode
,check_digit
,supplier
,prc_fam_code
,note
,mfg_part_no
,special
,spc_price
,firm
,box
,no_split
,drop_ship
,case_pack
,inner_pack)
VALUES
(Ip.Part_no
,Ip.description
,Ip.family
,Ip.Total
,Ip.um
,Ip.new_part_no
,Ip.prod_code
,Ip.sub1
,Ip.sub2
,Ip.ven_no
,Ip.no_sell
,Ip.rp_dns
,Ip.nfa
,Ip.loc
,Ip.cat_desc
,Ip.cat_color
,Ip.cat_size
,Ip.cat_fits
,Ip.cat_brand
,Ip.cat_usd1
,Ip.cat_usd2
,Ip.cat_usd3
,Ip.cat_usd4
,Ip.cat_usd5
,Ip.cat_usd6
,Ip.cat_usd7
,Ip.cat_usd8
,Ip.cat_usd9
,Ip.cat_usd10
,Ip.cat_usd11
,Ip.cat_usd12
,Ip.cat_usd13
,Ip.cat_usd14
,Ip.cat_usd15
,Ip.buy
,Ip.price_1
,Ip.price_2
,Ip.price_3
,Ip.price_4
,Ip.price_5
,Ip.price_6
,Ip.price_7
,Ip.price_8
,Ip.price_9
,Ip.create_date
,Ip.barcode
,Ip.check_digit
,Ip.supplier
,Ip.prc_fam_code
,Ip.note
,Ip.mfg_part_no
,Ip.special
,Ip.spc_price
,Ip.firm
,Ip.box
,Ip.no_split
,Ip.drop_ship
,Ip.case_pack
,Ip.inner_pack)
WHEN NOT MATCHED BY SOURCE THEN
DELETE
OUTPUT $action, Inserted.*, Deleted.*;