How can I use fmincon() for different input parameters without using for loop? - optimization

I want to run the optimization function fmincon() over thousands of different input parameters. Briefly, the aim of the optimization is to find the optimal consumption and investment strategy that give the highest utility for a given wealth. The basic set up and functions are given as follows:
library(pracma)
library(NlcOptim)
# individual preference parameters
gamma <- 5
beta <- 0.02
Y <- 1
# financial market parameters
r <- 0.02
mu <- 0.06
sigma <- 0.2
lambda <- (mu-r)/sigma
# Merton fraction
w_star <- lambda / (gamma*sigma)
# fix random seed
set.seed(85)
scenarios <- 1000
Z_omega <- array(rnorm(scenarios,0,1), dim=c(scenarios,1)) # Brownian motion vector for E[J(W)]
# J multiple
multiple <- 1000000000
fineness <- 0.01
# define utility function
u <- function(C) {
C^(1-gamma)/(1-gamma)
}
# wealth scenario at t+1 for a given W_t
W.next <- function(W,C,fstar) {
W.tplus1 <- exp(r + fstar*sigma*lambda - 0.5*fstar^2*sigma^2 + fstar*sigma*Z_omega) * (W + Y - C)
return(W.tplus1)
}
J.simulate <- function(W.tplus1) {
floor.number <- floor((round_any(W.tplus1, fineness, f=floor) * 1/fineness)) + 1
ceiling.number <- ceiling((round_any(W.tplus1, fineness, f=ceiling) * 1/fineness)) + 1
x1 <- G_T[floor.number]
x2 <- G_T[ceiling.number]
y1 <- J_WT[floor.number]
y2 <- J_WT[ceiling.number]
# linear interpolation for J
J.tplus1.simulate <- y1 + ((W.tplus1-x1)/(x2-x1) * (y2-y1))
return(J.tplus1.simulate)
}
# define h(C,f|W)
h_t <- function(Cfstar) {
C <- Cfstar[1]
fstar <- Cfstar[2]
# wealth scenario at t+1 for a given W_t
W.tplus1 <- W.next(W,C,fstar)
# compute indirect utility for simulated W_t+1 using already compute J_WT
J.tplus1.simulate <- J.simulate(W.tplus1) # ignore wealth less than 0.001 (it can never be optimal)
# expectation of all J(W_t+1)
J_t_plus_1 <- mean(J.tplus1.simulate, na.rm=TRUE) # ignore NAs
# function h_t
indirect_utility <- log(-(u(C) + exp(-beta) * J_t_plus_1)*multiple)
return(indirect_utility)
}
For the sake of simplicity, I generated 10 wealth levels, W, to be optimized:
# wealth grid at T
G_T <- c(0.001, seq(0.01, 3, by=0.01))
J_1T <- -291331.95
J_WT <- G_T^(1-gamma) * J_1T
# wealth to be optimized
W_optim <- seq(0.01, 0.1, by=0.01)
What I did using the for loop is as follows:
# number of loop
wealth.loop <- length(W_optim)
# result vectors
C_star <- numeric(wealth.loop)
f_star <- numeric(wealth.loop)
J <- numeric(wealth.loop)
# lowerbound is fixed
lowerbound <- c(0.01,0.0001)
# optimize!
for (g in 1:wealth.loop) {
W <- W_optim[g]
x0 <- c((W+Y)*0.05,w_star) # initial input vector
upperbound <- c(W+Y-0.01,1) # upperbound depending on W
optimization <- fmincon(x0=x0, fn=h_t, lb=lowerbound, ub=upperbound, tol=1e-10)
C_star[g] <- optimization$par[1]
f_star[g] <- optimization$par[2]
J[g] <- optimization$value
print(c(g,optimization$par[1],optimization$par[2]))
}
This works well, but it takes hours to optimize over more than hundred of thousands set of different parameters. Hence, I was looking for some smarter ways of doing this, like using apply-related functions. For instance, I tried:
W <- W_optim
# input matrix
x0 <- matrix(0, nrow=length(W), ncol=2)
x0[,1] <- (W+Y)*0.05
x0[,2] <- w_star
# lowerbound the same
lowerbound <- c(0.01,0.0001)
# upperbound matrix
upperbound <- matrix(0, nrow=length(W), ncol=2)
upperbound[,1] <- W+Y-0.01
upperbound[,2] <- 1
# optimize using mapply
mapply(fmincon, x0=x0, fn=h_t, lb=lowerbound, up=upperbound)
But obviously it doesn't work. I'm not sure whether the problem is using matrix as input parameters, not vector, or I'm just using a wrong function. Is there any way to solve this problem with an efficient & smart coding?
I tried to optimize over the different parameters at once using mapply, but apparently it didn't work. Maybe I should have used another apply-related function or I should make a different structure for the input matrix?

Related

Portfolio Optimization Using Quadprog Gives the Same Result for Every time even after changing variables

I have a task to construct the efficient frontier using 25 portfolios (monthly data). I tired writing a quadprog code for calculating minimum variance portfolio weights for a given expected rate of return. However, regardless of the expected return, the solver values give me the same set weights and variance, which the global minimum variance portfolio. I found the answer using an analytical solution. Attached are the codes:
basedf <- read.csv("test.csv", header = TRUE, sep = ",")
data <- basedf[,2:26]
ret <- as.data.frame(colMeans(data))
variance <- diag(var(data))
covmat <-as.matrix(var(data))
###minimum variance portfolio calculation
Q <- 2*cov(data)
A <- rbind(rep(1,25))
a <- 1
result <- solve.QP(Dmat = Q,
dvec = rep(0,25),
Amat = t(A),
bvec = a,
meq = 1)
w <-result$solution
w
var <- result$value
var
sum(w)
this is another set of codes giving the me same value::
mvp <- function(e,ep){
Dmat <- 2*cov(e)
dvec <- rep(0, ncol(e))
Amat <- cbind(rep(1, ncol(e)), colMeans(e))
bvec <- c(1, ep)
result <- solve.QP(Dmat = Dmat, dvec = dvec, Amat = Amat, bvec = bvec, meq=1)
wp <- result$solution
varP <- result$value
ret_values <- list(wp, varP)
names(ret_values) <- c("wp", "VarP")
return(ret_values)
}
z <- mvp(data, -.005)
z$wp
sum(z$wp)
z$VarP
ef <- function(e, min_e, max_e){
list_e <- seq(min_e,max_e, length=50)
loop <- sapply(list_e, function(x) mvp(e, x)$VarP)
effF <- as.data.frame(cbind(list_e,loop))
minvar <- min(effF$loop)
L <- effF$loop==minvar
minret <- effF[L,]$list_e
minpoint <- as.data.frame(cbind(minret,minvar))
minvarwp <- mvp(e, min_e)$wp
rlist <- list(effF, minpoint, minvarwp)
names(rlist) <- c( "eFF", "minPoint", "wp")
return(rlist)
}
in the efficient frontier, all the 50 portfolios have same level of variance. can anyone tell me whats wrong with solver equation??? thanks.
I tried quadprog but couldnt solve it.

Solve MLE for Vasicek Interest model but constantly run into an error "Error in if (!all(lower[isfixed] <= fixed[isfixed] & fixed[isfixed]..."

I tried to obtain MLEs of the Vasicek function using the following function.
I am running into into the following error constantly and I have no way to solve it. Please help me. Thanks!
Error in if (!all(lower[isfixed] <= fixed[isfixed] & fixed[isfixed] <= :
missing value where TRUE/FALSE needed
Here is the background:
Likelihood function
likehood.Vasicek<-function (theta, kappa, sigma, rt){
n <- NROW(rt)
y <- rt[2:n,] # Take rates other than r0
dt <- 1/12 # Simulated data is monthly
mu <- rt[1:(n-1),]* exp(-kappa*dt) + theta* (1- exp(-kappa*dt)) #Take prior rates for mu calculation
sd <- sqrt((sigma^2)*(1-exp(-2*kappa*dt))/(2*kappa))
pdf_yt <- dnorm(y, mu, sd, log = FALSE)
- sum(log(pdf_yt))
}
Simulating scenarios
IRModeling.Vasicek = function(r0, theta, kappa, sigma, T, N){
M <- T*12 # monthly time step
t <- 1/12 # time interval is monthly
rt = matrix(0, M+1, N) # N sets of scenarios with M months of time steps
rt[1,] <- r0 # set the initial value for each of the N scenarios
for (i in 1:N){
for (j in 1:M){
rt[j+1,i] = rt[j,i] + kappa*(theta - rt[j,i])*t + sigma*rnorm(1,mean=0,sd=1)*sqrt(t)
}
}
rt # Return the values
}
MLE
r0 = 0.03
theta = 0.03
kappa = 0.3
sigma = 0.03
T = 5 # years
N = 500
rt = IRModeling.Vasicek (r0, theta, kappa, sigma, T, N)
theta.est <- 0.04
kappa.est <- 0.5
sigma.est <- 0.02
parameters.est <- c(theta.est, kappa.est, sigma.est)
library(stats4)
bound.lower <- parameters.est*0.1
bound.upper <- parameters.est*2
est.mle<-mle(likelihood.Vasicek, start= list(theta = theta.est, kappa = kappa.est, sigma = sigma.est),
method="L-BFGS-B", lower=bound.lower, upper= bound.upper, fixed = list(rt = rt))
summary(est.mle)
Error
Error in if (!all(lower[isfixed] <= fixed[isfixed] & fixed[isfixed] <= :
missing value where TRUE/FALSE needed

How to plot two different color scales for two geom_points from two different dataframes in ggplot2?

I am trying to plot two datasets on the same graph. Both data are plotted using geom_point, and I want to separately represent the sizes and color by the z values.
x <- c(2,3,4,5)
y <- c(1.1,1.2,1.3,1.4)
z <- c(1,2,2,3)
x3 <- c(4,5,6,7)
y3 <- c(3.1,3.2,3.3,3.2)
z3<- c(1,2,3,4)
p1 <- data.frame(x=x,y=y,z=z)
p3 <- data.frame(x=x3,y=y3,z=z3)
s <- ggplot()+
geom_point(data= p1, aes(x=x,y=y, color=z, size=z))+
geom_point(data=p3, aes(x3,y=y3, color=z, size=z3))
How to I get continuous scale of colors and sizes separately to both geom_point? For example, z is scale_colour_gradient(low = "black", high = "red") and z3 is scale_colour_gradient(low = "light blue", high = "purple"). Similarly for sizes.
Thank you!
One of the easy ways would be with the ggnewscale package:
library(ggplot2)
library(ggnewscale)
x <- c(2,3,4,5)
y <- c(1.1,1.2,1.3,1.4)
z <- c(1,2,2,3)
x3 <- c(4,5,6,7)
y3 <- c(3.1,3.2,3.3,3.2)
z3<- c(1,2,3,4)
p1 <- data.frame(x=x,y=y,z=z)
p3 <- data.frame(x=x3,y=y3,z=z3)
s <- ggplot()+
geom_point(data= p1, aes(x=x,y=y, color=z, size=z))+
scale_colour_gradient(low = "black", high = "red") +
new_scale_colour() + # Define scales before initiating a new one
scale_size() +
new_scale("size") +
geom_point(data=p3, aes(x3,y=y3, color=z, size=z3)) +
scale_colour_gradient(low = "dodgerblue", high = "purple") +
scale_size()
s
Created on 2020-05-28 by the reprex package (v0.3.0)

rjags error Invalid vector argument to ilogit

I'd like to compare a betareg regression vs. the same regression using rjags
library(betareg)
d = data.frame(p= sample(c(.1,.2,.3,.4),100, replace= TRUE),
id = seq(1,100,1))
# I am looking to reproduce this regression with jags
b=betareg(p ~ id, data= d,
link = c("logit"), link.phi = NULL, type = c("ML"))
summary(b)
Below I am trying to do the same regression with rjags
#install.packages("rjags")
library(rjags)
jags_str = "
model {
#model
y ~ dbeta(alpha, beta)
alpha <- mu * phi
beta <- (1-mu) * phi
logit(mu) <- a + b*id
#priors
a ~ dnorm(0, .5)
b ~ dnorm(0, .5)
t0 ~ dnorm(0, .5)
phi <- exp(t0)
}"
id = d$id
y = d$p
model <- jags.model(textConnection(jags_str),
data = list(y=y,id=id)
)
update(model, 10000, progress.bar="none"); # Burnin for 10000 samples
samp <- coda.samples(model,
variable.names=c("mu"),
n.iter=20000, progress.bar="none")
summary(samp)
plot(samp)
I get an error on this line
model <- jags.model(textConnection(jags_str),
data = list(y=y,id=id)
)
Error in jags.model(textConnection(jags_str), data = list(y = y, id = id)) :
RUNTIME ERROR:
Invalid vector argument to ilogit
Can you advise
(1) how to fix the error
(2) how to set priors for the beta regression
Thank you.
This error occurs because you have supplied the id vector to the scalar function logit. In Jags inverse link functions cannot be vectorized. To address this, you need to use a for loop to go through each element of id. To do this I would probably add an additional element to your data list that denotes how long id is.
d = data.frame(p= sample(c(.1,.2,.3,.4),100, replace= TRUE),
id = seq(1,100,1), len_id = length(seq(1,100,1)))
From there you just need to make a small edit to your jags code.
for(i in 1:(len_id)){
y[i] ~ dbeta(alpha[i], beta[i])
alpha[i] <- mu[i] * phi
beta[i] <- (1-mu[i]) * phi
logit(mu[i]) <- a + b*id[i]
}
However, if you track mu it is going to be a matrix that is 20000 (# of iterations) by 100 (length of id). You are likely more interested in the actual parameters (a, b, and phi).

Spline in JAGS mixing badly

I have a model that calculates a spline for Mark-recapture data with survival data. The model is working fine, but the parameters that calculates the spline are mixing super badly.
mean 2.5% 97.5% That n.eff
...
m[1] 1.667899656 -0.555606 4.18479 2.8829 4
m[2] 1.293023680 -0.951046 3.90294 2.8476 4
m[3] 1.717855378 -0.484097 4.23105 2.8690 4
m[4] 1.723899423 -0.474260 4.23869 2.8686 4
m[5] 1.747050770 -0.456455 4.26314 2.8578 4
...
Basically, I'm calculating a recapture rate p composed of a species specific effect p.sp and the sampling effort p.effort. I also calculate a fitness component phi with a species specific term phi.sp, the effect of year phi.year, a climate factor phi.sum.preci and the spline m.
run.model <- function(d, ## incoming data (packaged up in src/analyses.R)
ni=1100, ## number of iterations to run ## number of draws per chain
nt=10, ## thinning rate ##to save space on computer disk space see p.61 Kéry
nb=100, ## burn in ## should be large enough to discard initial part of Markov chains that have not yet converged
nc=3, ## number of chains to run ## multiple chain to check the convergence
n.cluster = 3) {
model.jags <- function() {
## Priors ------------------------------------------------------------------
## Random effect species-specific intercept (survival)
mu.phi.sp ~ dnorm(0,0.01)
sigma.phi.sp ~ dunif(0,10)
tau.phi.sp <- 1/(sigma.phi.sp)^2
## Random effect for recapture rate
mu.p.sp ~ dnorm(0,0.01)
## Random effect of year and fixed effect of precipitation & abundance
sigma.phi.year ~ dunif(0,10)
tau.phi.year <- 1/(sigma.phi.year)^2
## fixed effect of effort
p.effort ~ dnorm(0, 0.01) ## fixed effect
## Fixed precipitation per year
phi.sum.preci ~ dnorm(0, 0.01) ## fixed effect
# Prior spline ------------------------------------------------------------
###BEGIN SPLINE###
# prior distribution for the fixed effects parameters
for (l in 1:3) {
beta[l] ~ dnorm(0,0.1)
}
prior.scaleeps <- 1
xi ~ dnorm(0, tau.xi)
tau.xi <- pow(prior.scaleeps, -2)
for (k in 1:nknotsb) {
b[k] <- yi*etab[k]
etab[k] ~ dnorm(0, tau.etab) # hierarchical model for theta
} # closing k
prior.scaleb <- 1
yi ~ dnorm (0, tau.yi)
tau.yi <- pow(prior.scaleb, -2)
tau.etab ~ dgamma(.5, .5) # chi^2 with 1 d.f.
sigmab <- abs(xi)/sqrt(tau.etab) # cauchy = normal/sqrt(chi^2)
###END SPLINE###
for(sp in 1:nsp) {
## Random species-specific intercept
phi.sp[sp] ~ dnorm(mu.phi.sp, tau.phi.sp)
## Random recapture rate
p.sp[sp] <- mu.p.sp # Changed from a comment from Luke Jan. 9 2017
}
for (yr in 1:nyear) {
## random year
phi.year[yr] ~ dnorm(0, tau.phi.year)
}
## Likelihood!
for(sp in 1:nsp) { ## per species
## Rates -------------------------------------------------------------------
## recapture rate
for (yr in 1:nyear) {
logit(p[sp,yr]) <- # added logit here
p.sp[sp] +
p.effort*effort[yr]
} ## closing for (year in 1:nyear)
} ## closing for (sp in 1:nsp)
## Each ID ----------------------------------------------------------------
## Likelihood!
for(ind in 1:nind) { ## nind = nrow(d$X)
### BEGIN SPLINE ###
## mean function model
m[ind] <-mfe[ind] + mre1[ind] + mre2[ind]
# fixed effect part
mfe[ind] <- beta[1] * Xfix[ind,1] +beta[2] * Xfix[ind,2] + beta[3] * Xfix[ind,3]
mre1[ind] <- b[1]*Z[ind,1] + b[2]*Z[ind,2] + b[3]*Z[ind,3] + b[4]*Z[ind,4] + b[5]*Z[ind,5] + b[6]*Z[ind,6] + b[7]*Z[ind,7] + b[8]*Z[ind,8] + b[9]*Z[ind,9] + b[10]*Z[ind,10]
mre2[ind] <- b[11]*Z[ind,11] + b[12]*Z[ind,12] + b[13]*Z[ind,13] + b[14]*Z[ind,14] + b[15]*Z[ind,15]
###END SPLINE###
}
## for each individual
for(ind in 1:nind) { ## nind = nrow(d$X)
for(yr in 1:nyear) {
logit(phi[ind,yr]) <-
phi.sp[species[ind]] + ## effect of species
phi.year[yr] + ## effect of year
# Effect of the traits on survival values
m[ind]+ # spline
phi.sum.preci*sum.rainfall[yr] # effect of precipitation per sampling event
} ## (yr in 1:nyear)
## First occasion
for(yr in 1:first[ind]) {
z[ind,yr] ~ dbern(1)
} ## (yr in 1:first[ind])
## Subsequent occasions
for(yr in (first[ind]+1):nyear) { # (so, here, we're just indexing from year "first+1" onwards).
mu.z[ind,yr] <- phi[ind,yr-1]*z[ind,yr-1]
z[ind,yr] ~ dbern(mu.z[ind,yr])
## Observation process
sight.p[ind,yr] <- z[ind,yr]*p[species[ind],yr] ## sightp probability of something to be seen
X[ind,yr] ~ dbern(sight.p[ind,yr]) ## X matrix : ind by years
} ## yr
} ## closing for(ind in 1:nind)
} ## closing model.jags function
## Calling Jags ------------------------------------------------------------
jags.parallel(data = d$data,
inits = d$inits,
parameters.to.save = d$params,
model.file = model.jags,
n.chains = nc, n.thin = nt, n.iter = ni, n.burnin = nb,
working.directory = NULL,
n.cluster = n.cluster)
} ## closing the run.model function
# Monitored parameters ----------------------------------------------------
get.params <- function()
c('phi.sp','mu.phi.sp','sigma.phi.sp','mu.p.sp','sigma.p.sp','phi.year','phi','p', 'phi.sum.preci','p.sp','p.effort','z',
# Spline parameters
"m","sigmab","b","beta")