OMPR - Writing scalable constraints elegently - optimization

Just a brief background:
There are 10 cement plants producing two kinds of products (OPC, PPC) supplying material to d demand locations by road or by rail (which can be loaded in multiples of x tonnes) whose cost matrix is LC (rows - Source, columns - Demand destinations). Since the source is defined as a combination of plant, product, and transport mode while the constraint is at a plant product level - I came up with this hack to combine constraints accordingly
Below is the working code for the same.
library(ompr)
library(ompr.roi)
library(ROI.plugin.glpk)
library(listcomp)
is_connected <- function(i, j) {
LC[i, j] > 0
}
railroutes = grep(":W:",rownames(LC))
d = demand[demand$DPP %in% colnames(LC),"DEMAND"]
rakemultiplier = capacity[capacity$SOURCE %in% rownames(LC),"Multiplier"]
p1OPC = which(grepl(Plants[1],rNames)&grepl("OPC",rNames))
p2OPC = which(grepl(Plants[2],rNames)&grepl("OPC",rNames))
p3OPC = which(grepl(Plants[3],rNames)&grepl("OPC",rNames))
p4OPC = which(grepl(Plants[4],rNames)&grepl("OPC",rNames))
# p5OPC = which(grepl(Plants[5],rNames)&grepl("OPC",rNames))
# p6OPC = which(grepl(Plants[6],rNames)&grepl("OPC",rNames))
p7OPC = which(grepl(Plants[7],rNames)&grepl("OPC",rNames))
p8OPC = which(grepl(Plants[8],rNames)&grepl("OPC",rNames))
p9OPC = which(grepl(Plants[9],rNames)&grepl("OPC",rNames))
p10OPC = which(grepl(Plants[10],rNames)&grepl("OPC",rNames))
p1PPC = which(grepl(Plants[1],rNames)&grepl("PPC",rNames))
p2PPC = which(grepl(Plants[2],rNames)&grepl("PPC",rNames))
p3PPC = which(grepl(Plants[3],rNames)&grepl("PPC",rNames))
p4PPC = which(grepl(Plants[4],rNames)&grepl("PPC",rNames))
p5PPC = which(grepl(Plants[5],rNames)&grepl("PPC",rNames))
p6PPC = which(grepl(Plants[6],rNames)&grepl("PPC",rNames))
p7PPC = which(grepl(Plants[7],rNames)&grepl("PPC",rNames))
p8PPC = which(grepl(Plants[8],rNames)&grepl("PPC",rNames))
p9PPC = which(grepl(Plants[9],rNames)&grepl("PPC",rNames))
p10PPC = which(grepl(Plants[10],rNames)&grepl("PPC",rNames))
set.seed(40)
a = Sys.time()
model <- MIPModel() |>
add_variable(x[i, j], i = 1:nrow(LC), j = 1:ncol(LC), is_connected(i, j), lb = 0, type = "continuous") |>
add_variable(y[i], lb = 0, type = "integer", i = railroutes) |>
add_variable(z1[j],lb = 0,ub = pub,type = "continuous", j = 1:ncol(LC)) |>
add_variable(z2[j],lb = 0,ub = nub,type = "continuous", j = 1:ncol(LC)) |>
add_constraint(sum_over(x[i,j],i = 1:nrow(LC),is_connected(i, j))+z1[j]-z2[j] == d[j],j = 1:ncol(LC)) |>
add_constraint(sum_expr(x[i,j],j = 1:ncol(LC),is_connected(i,j))/rakemultiplier[i] == y[i], i = railroutes) |>
add_constraint(sum_expr(x[i,j],j = 1:ncol(LC),is_connected(i,j)) == y[i]*rakemultiplier[i], i = railroutes) |>
add_constraint(sum_over(x[i,j], i = p1OPC, j = 1:ncol(LC),is_connected(i,j)) <= 120000) |>
add_constraint(sum_over(x[i,j], i = p2OPC, j = 1:ncol(LC),is_connected(i,j)) <= 120000) |>
add_constraint(sum_over(x[i,j], i = p3OPC, j = 1:ncol(LC),is_connected(i,j)) <= 120000) |>
add_constraint(sum_over(x[i,j], i = p4OPC, j = 1:ncol(LC),is_connected(i,j)) <= 120000) |>
# add_constraint(sum_over(x[i,j], i = p5OPC, j = 1:ncol(LC),is_connected(i,j)) <= 0) |>
# add_constraint(sum_over(x[i,j], i = p6OPC, j = 1:ncol(LC),is_connected(i,j)) <= 0) |>
add_constraint(sum_over(x[i,j], i = p7OPC, j = 1:ncol(LC),is_connected(i,j)) <= 175000) |>
add_constraint(sum_over(x[i,j], i = p8OPC, j = 1:ncol(LC),is_connected(i,j)) <= 175000) |>
add_constraint(sum_over(x[i,j], i = p9OPC, j = 1:ncol(LC),is_connected(i,j)) <= 175000) |>
add_constraint(sum_over(x[i,j], i = p10OPC, j = 1:ncol(LC),is_connected(i,j)) <= 175000) |>
add_constraint(sum_over(x[i,j], i = p1PPC, j = 1:ncol(LC),is_connected(i,j)) <= 75000) |>
add_constraint(sum_over(x[i,j], i = p2PPC, j = 1:ncol(LC),is_connected(i,j)) <= 75000) |>
add_constraint(sum_over(x[i,j], i = p3PPC, j = 1:ncol(LC),is_connected(i,j)) <= 75000) |>
add_constraint(sum_over(x[i,j], i = p4PPC, j = 1:ncol(LC),is_connected(i,j)) <= 75000) |>
add_constraint(sum_over(x[i,j], i = p5PPC, j = 1:ncol(LC),is_connected(i,j)) <= 75000) |>
add_constraint(sum_over(x[i,j], i = p6PPC, j = 1:ncol(LC),is_connected(i,j)) <= 150000) |>
add_constraint(sum_over(x[i,j], i = p7PPC, j = 1:ncol(LC),is_connected(i,j)) <= 150000) |>
add_constraint(sum_over(x[i,j], i = p8PPC, j = 1:ncol(LC),is_connected(i,j)) <= 150000) |>
add_constraint(sum_over(x[i,j], i = p9PPC, j = 1:ncol(LC),is_connected(i,j)) <= 150000) |>
add_constraint(sum_over(x[i,j], i = p10PPC, j = 1:ncol(LC),is_connected(i,j)) <= 150000) |>
set_objective(sum_over(x[i,j]*LC[i,j]+z1[j]+z2[j],i = 1:nrow(LC),j = 1:ncol(LC),is_connected(i, j)) ,"min")
model
print(Sys.time() - a)
a = Sys.time()
solution1 = model %>% solve_model(with_ROI(solver = "glpk",verbose=T))
print(Sys.time() - a)
rm(a)
This works perfectly fine, but one thing I am unhappy about is the fact that this code has to be manually changed when the original data is subsetted. Also when there are some plants that don't produce a certain product, I have to comment on the code - i.e the corresponding rows are numerical(0) (check p5opc,p6opc)
Is there a clever way to combine these expressions into something scalable to x plants taking care of the commenting when the corresponding variable is numerical(0) thus making the code more readable?

Related

Used MLESAC but got poor answer

MLESAC is better than RANSAC by calculating likelihood rather than counting numbers of inliers.
(Torr and Zisserman 2000)
So there is no reason to use RANSAC if we use MLESAC. But when I implied on the plane fitting problem, I got a worse result than RANSAC. It came out similar p_i when I substituted distance errors of each data in equation 19, leading wrong negative log likelihood.
%% MLESAC (REF.PCL)
% data
clc;clear; close all;
f = #(a_hat,b_hat,c_hat,x,y)a_hat.*x+b_hat.*y+c_hat; % z
a = 1;
b = 1;
c = 20;
width = 10;
range = (-width:0.01:width)'; % different from linespace
x = -width+(width-(-width))*rand(length(range),1); % r = a + (b-a).*rand(N,1)
y = -width+(width-(-width))*rand(length(range),1);
X = (-width:0.5:width)';
Y = (-width:0.5:width)';
[X,Y] = meshgrid(X,Y); % for drawing surf
Z = f(a/c,b/c,c/c,X,Y);
z_n = f(a/c,b/c,c/c,x,y); % z/c
% add noise
r = 0.3;
noise = r*randn(size(x));
z_n = z_n + noise;
% add outliers
out_rng = find(y>=8,200);
out_udel = 5;
z_n(out_rng) = z_n(out_rng) + out_udel;
plot3(x,y,z_n,'b.');hold on;
surf(X,Y,Z);hold on;grid on ;axis equal;
p_n = [x y z_n];
num_pt = size(p_n,1);
% compute sigma = median(dist (x - median (x)))
threshold = 0.3; %%%%%%%%% user-defined
medianx = median(p_n(:,1));
mediany = median(p_n(:,2));
medianz = median(p_n(:,3));
medianp = [medianx mediany medianz];
mediadist = median(sqrt(sum((p_n - medianp).*(p_n - medianp),2)));
sigma = mediadist * threshold;
% compute the bounding box diagonal
maxx = max(p_n(:,1));
maxy = max(p_n(:,2));
maxz = max(p_n(:,3));
minx = min(p_n(:,1));
miny = min(p_n(:,2));
minz = min(p_n(:,3));
bound = [maxx maxy maxz]-[minx miny minz];
v = sqrt(sum(bound.*bound,2));
%% iteration
iteration = 0;
num_inlier = 0;
max_iteration = 10000;
max_num_inlier = 0;
k = 1;
s = 5; % number of sample point
probability = 0.99;
d_best_penalty = 100000;
dist_scaling_factor = -1 / (2.0*sigma*sigma);
normalization_factor = 1 / (sqrt(2*pi)*sigma);
Gaussian = #(gamma,disterr,sig)gamma * normalization_factor * exp(disterr.^2*dist_scaling_factor);
Uniform = #(gamma,v)(1-gamma)/v;
while(iteration < k)
% get sample
rand_var = randi([1 length(x)],s,1);
% find coeff. & inlier
A_rand = [p_n(rand_var,1:2) ones(size(rand_var,1),1)];
y_est = p_n(rand_var,3);
Xopt = pinv(A_rand)*y_est;
disterr = abs(sum([p_n(:,1:2) ones(size(p_n,1),1)].*Xopt',2) - p_n(:,3))./sqrt(dot(Xopt',Xopt'));
inlier = find(disterr <= threshold);
outlier = find(disterr >= threshold);
num_inlier = size(inlier,1);
outlier_num = size(outlier,1);
% EM
gamma = 0.5;
iterations_EM = 3;
for i = 1:iterations_EM
% Likelihood of a datam given that it is an inlier
p_i = Gaussian(gamma,disterr,sigma);
% Likelihood of a datum given that it is an outlier
p_o = Uniform(gamma,v);
zi = p_i./(p_i + p_o);
gamma = sum(zi)/num_pt;
end
% Find the log likelihood of the mode -L
d_cur_pentnalty = -sum(log(p_i+p_o));
if(d_cur_pentnalty < d_best_penalty)
d_best_penalty = d_cur_pentnalty;
% record inlier
best_inlier = p_n(inlier,:);
max_num_inlier = num_inlier;
best_model = Xopt;
% Adapt k
w = max_num_inlier / num_pt;
p_no_outliers = 1 - w^s;
k = log(1-probability)/log(p_no_outliers);
end
% RANSAC
% if (num_inlier > max_num_inlier)
% max_num_inlier = num_inlier;
% best_model = Xopt;
%
% % Adapt k
% w = max_num_inlier / num_pt;
% p_no_outliers = 1 - w^s;
% k = log(1-probability)/log(p_no_outliers);
% end
iteration = iteration + 1;
if iteration > max_iteration
break;
end
end
a_est = best_model(1,:);
b_est = best_model(2,:);
c_est = best_model(3,:);
Z_opt = f(a_est,b_est,c_est,X,Y);
new_sur = mesh(X,Y,Z_opt,'edgecolor', 'r','FaceAlpha',0.5); % estimate
title('MLESAC',sprintf('original: a/c = %.2f, b/c = %.2f, c/c = %.2f\n new: a/c = %.2f, b/c = %.2f, c/c = %.2f',a/c,b/c,c/c,a_est,b_est,c_est));
The reference of my source code is from PCL(MLESAC), and I coded it in MATLAB.

echarts4r multiple axis on different grids

I would like to get a chart with 2 grids. One with two y axis.
a and b on one graph with 2 y axis and c,d,and e on another grid.
I tried to number them from 0,1 and 2, but it doesn't work. I would need to add an Y axis as on the picture below
Thanks
df <- data.frame(
x = 1:20,
a = runif(20, 10, 100),
b = runif(20, 10, 1000),
c= runif(20, 10, 300),
d = runif(20, 10, 300),
e= runif(20, 10, 300)
)
df |>
e_charts(x) |>
e_line(a,x_index = 0, y_index = 0) |>
e_line(b,x_index = 0, y_index = 0) |>
e_bar(c, stack = "grp",x_index = 1, y_index = 1) |>
e_bar(d, stack = "grp",x_index = 1, y_index = 1) |>
e_bar(e, stack = "grp",x_index = 1, y_index = 1) |>
# e_bar(H1N1pdm, stack = "grp",x_index = 1, y_index = 1) |>
# e_bar(Flu_B_Victoria, stack = "grp",x_index = 1, y_index = 1) |>
# e_bar(H1N1pdm_Flu_B, stack = "grp",x_index = 1, y_index = 1) |>
e_grid(height = "25%") |>
e_grid(height = "25%", top = "50%") |>
e_y_axis(gridIndex = 1) |>
e_x_axis(gridIndex = 1) |>
e_tooltip(trigger = "axis") |>
e_datazoom(x_index = c(0, 1))

tensorflow multi slice not reshape

I have 3D (64,64,64) shape (chair) when I reshape it using tf operation to (8,32,32,32) then do my operation Deep learning operation and then return it back using tf reshape to (64,64,64) the shape looks very bad, actually there is no shape only strange looks unknown shape (100% not looks like chair)
but if I use function that I build to slice 32 by 32 and I stack them as (8,32,32,32) I use it as input to my DL Model. the output (8,32,32,32) I use also combine function which I build to recombine by reversing the slice function I got good looking shape
the issue both function slice and combine numpy not tf. I have to train model end-to-end so I need equivalent function that slice or combine in tensorflow please
def slice(self,size, obj):
#print('inside')
oldi = 0
newi = 0
oldj = 0
newj = 0
oldk = 0
newk = 0
lst = []
s = obj.shape[0]
s += 1
for i in range(size, s, size):
if (newi == s - 1):
oldi = 0
else:
oldi = newi
for j in range(size, s, size):
if (newj == s - 1):
oldj = 0
else:
oldj = newj
for k in range(size, s, size):
newi = i
newj = j
newk = k
slc = obj[oldi:newi, oldj:newj, oldk:newk]
#print(oldi,':',newi,',',oldj,':',newj,',',oldk,':',newk)
#print(slc.shape)
lst.append(slc)
if (newk == s - 1):
oldk = 0
else:
oldk = newk
# print(slc.shape)
return lst
def combine(self,lst, shape, size):
oldi = 0
newi = 0
oldj = 0
newj = 0
oldk = 0
newk = 0
obj = np.zeros((shape, shape, shape))
s = shape
s += 1
counter = 0
for i in range(size, s, size):
if (newi == s - 1):
oldi = 0
else:
oldi = newi
for j in range(size, s, size):
if (newj == s - 1):
oldj = 0
else:
oldj = newj
for k in range(size, s, size):
newi = i
newj = j
newk = k
obj[oldi:newi, oldj:newj, oldk:newk] = lst[counter]
counter += 1
#print(oldi,':',newi,',',oldj,':',newj,',',oldk,':',newk)
# print(slc.shape)
if (newk == s - 1):
oldk = 0
else:
oldk = newk
return obj
in other words I want tensorflow operation mimic
the following function
def combine(self,lst, shape, size):
oldi = 0
newi = 0
oldj = 0
newj = 0
oldk = 0
newk = 0
obj = np.zeros((shape, shape, shape))
s = shape
s += 1
counter = 0
for i in range(size, s, size):
if (newi == s - 1):
oldi = 0
else:
oldi = newi
for j in range(size, s, size):
if (newj == s - 1):
oldj = 0
else:
oldj = newj
for k in range(size, s, size):
newi = i
newj = j
newk = k
obj[oldi:newi, oldj:newj, oldk:newk] = lst[counter]
counter += 1
#print(oldi,':',newi,',',oldj,':',newj,',',oldk,':',newk)
# print(slc.shape)
if (newk == s - 1):
oldk = 0
else:
oldk = newk
return obj

NameError when running GMRes following FEniCS discretisation

I've discretised a diffusion equation with FEniCS as follows:
def DiscretiseEquation(h):
mesh = UnitSquareMesh(h, h)
V = FunctionSpace(mesh, 'Lagrange', 1)
def on_boundary(x, on_boundary):
return on_boundary
bc_value = Constant(0.0)
boundary_condition = DirichletBC(V, bc_value, on_boundary)
class RandomDiffusionField(Expression):
def __init__(self, m, n, element):
self._rand_field = np.exp(-np.random.randn(m, n))
self._m = m
self._n = n
self._ufl_element = element
def eval(self, value, x):
x_index = np.int(np.floor(self._m * x[0]))
y_index = np.int(np.floor(self._n * x[1]))
i = min(x_index, self._m - 1)
j = min(y_index, self._n - 1)
value[0] = self._rand_field[i, j]
def value_shape(self):
return(1, )
class RandomRhs(Expression):
def __init__(self, m, n, element):
self._rand_field = np.random.randn(m, n)
self._m = m
self._n = n
self._ufl_element = element
def eval(self, value, x):
x_index = np.int(np.floor(self._m * x[0]))
y_index = np.int(np.floor(self._n * x[1]))
i = min(x_index, self._m - 1)
j = min(y_index, self._n - 1)
value[0] = self._rand_field[i, j]
def value_shape(self):
return (1, )
u = TrialFunction(V)
v = TestFunction(V)
random_field = RandomDiffusionField(100, 100, element=V.ufl_element())
zero = Expression("0", element=V.ufl_element())
one = Expression("1", element=V.ufl_element())
diffusion = as_matrix(((random_field, zero), (zero, one)))
a = inner(diffusion * grad(u), grad(v)) * dx
L = RandomRhs(h, h, element=V.ufl_element()) * v * dx
A = assemble(a)
b = assemble(L)
boundary_condition.apply(A, b)
A = as_backend_type(A).mat()
(indptr, indices, data) = A.getValuesCSR()
mat = csr_matrix((data, indices, indptr), shape=A.size)
rhs = b.array()
#Solving
x = spsolve(mat, rhs)
#Conversion to a FEniCS function
u = Function(V)
u.vector()[:] = x
I am running the GMRES solver as normal. The callback argument is a separate iteration counter I've defined.
DiscretiseEquation(100)
A = mat
b = rhs
x, info = gmres(A, b, callback = IterCount())
The routine returns a NameError, stating that 'mat' is not defined:
NameError Traceback (most recent call last)
<ipython-input-18-e096b2eea097> in <module>()
1 DiscretiseEquation(200)
----> 2 A = mat
3 b = rhs
4 x_200, info_200 = gmres(A, b, callback = IterCount())
5 gmres_res = closure_variables["residuals"]
NameError: name 'mat' is not defined
As far as I'm aware, it should be defined when I call the DiscretiseEquation function?

Create a ggplot2 survival curve with censored table

I am trying to create a Kaplan-Meier plot with 95% confidence bands plus having the censored data in a table beneath it. I can create the plot, but not the table. I get the error message: Error in grid.draw(both) : object 'both' not found.
library(survival)
library(ggplot2)
library(GGally)
library(gtable)
data(lung)
sf.sex <- survfit(Surv(time, status) ~ sex, data = lung)
pl.sex <- ggsurv(sf.sex) +
geom_ribbon(aes(ymin=low,ymax=up,fill=group),alpha=0.3) +
guides(fill=guide_legend("sex"))
pl.sex
tbl <- ggplot(df_nums, aes(x = Time, y = factor(variable), colour = variable,+
label=value)) +
geom_text() +
theme_bw() +
theme(panel.grid.major = element_blank(),+
legend.position = "none",+
plot.background = element_blank(), +
panel.grid.major = element_blank(),+
panel.grid.minor = element_blank(),+
panel.border = element_blank(),+
legend.position="none",+
axis.line = element_blank(),+
axis.text.x = element_blank(),+
axis.text.y = element_text(size=15, face="bold", color = 'black'),+
axis.ticks=element_blank(),+
axis.title.x = element_blank(),+
axis.title.y = element_blank(),+
plot.title = element_blank()) +
scale_y_discrete(breaks=c("Group.A", "Group.B"), labels=c("Group A", "Group B"))
both = rbind(ggplotGrob(g), ggplotGrob(tbl), size="last")
panels <- both$layout$t[grep("panel", both$layout$name)]
both$heights[panels] <- list(unit(1,"null"), unit(2, "lines"))
both <- gtable_add_rows(both, heights = unit(1,"line"), 8)
both <- gtable_add_grob(both, textGrob("Number at risk", hjust=0, x=0), t=9, l=2, r=4)
grid.newpage()
grid.draw(both)
I solved the problem by using the Rcmdrplugin KMggplot2 The code is generated by the plugin after selecting the data and variables.
library(survival, pos=18)
data(lung, package="survival")
lung <- within(lung, {
sex <- factor(sex, labels=c('male','female'))
})
ggthemes_data <- ggthemes::ggthemes_data
require("ggplot2")
.df <- na.omit(data.frame(x = lung$time, y = lung$status, z = lung$sex))
.df <- .df[do.call(order, .df[, c("z", "x"), drop = FALSE]), , drop = FALSE]
.fit <- survival::survfit(survival::Surv(time = x, event = y, type = "right") ~ z,
.df)
.pval <- plyr::ddply(.df, plyr::.(),
function(x) {
data.frame(
x = 0, y = 0, df = 1,
chisq = survival::survdiff(
survival::Surv(time = x, event = y, type = "right") ~ z, x
)$chisq
)})
.pval$label <- paste0(
"paste(italic(p), \" = ",
signif(1 - pchisq(.pval$chisq, .pval$df), 3),
"\")"
)
.fit <- data.frame(x = .fit$time, y = .fit$surv, nrisk = .fit$n.risk, nevent =
.fit$n.event, ncensor= .fit$n.censor, upper = .fit$upper, lower = .fit$lower)
.df <- .df[!duplicated(.df[,c("x", "z")]), ]
.df <- .fit <- data.frame(.fit, .df[, c("z"), drop = FALSE])
.med <- plyr::ddply(.fit, plyr::.(z), function(x) {
data.frame(
median = min(subset(x, y < (0.5 + .Machine$double.eps^0.5))$x)
)})
.df <- .fit <- rbind(unique(data.frame(x = 0, y = 1, nrisk = NA, nevent = NA,
ncensor = NA, upper = 1, lower = 1, .df[, c("z"), drop = FALSE])), .fit)
.cens <- subset(.fit, ncensor == 1)
.tmp1 <- data.frame(as.table(by(.df, .df[, c("z"), drop = FALSE], function(d)
max(d$nrisk, na.rm = TRUE))))
.tmp1$x <- 0
.nrisk <- .tmp1
for (i in 1:9) {.df <- subset(.fit, x < 100 * i); .tmp2 <-
data.frame(as.table(by(.df, .df[, c("z"), drop = FALSE], function(d) if
(all(is.na(d$nrisk))) NA else min(d$nrisk - d$nevent - d$ncensor, na.rm = TRUE))));
.tmp2$x <- 100 * i; .tmp2$Freq[is.na(.tmp2$Freq)] <- .tmp1$Freq[is.na(.tmp2$Freq)];
.tmp1 <- .tmp2; .nrisk <- rbind(.nrisk, .tmp2)}
.nrisk$y <- rep(seq(0.075, 0.025, -0.05), 10)
.plot <- ggplot(data = .fit, aes(x = x, y = y, colour = z)) +
RcmdrPlugin.KMggplot2::geom_stepribbon(data = .fit, aes(x = x, ymin = lower, ymax =
upper, fill = z), alpha = 0.25, colour = "transparent", show.legend = FALSE, kmplot
= TRUE) + geom_step(size = 1.5) +
geom_linerange(data = .cens, aes(x = x, ymin = y,
ymax = y + 0.02), size = 1.5) +
geom_text(data = .pval, aes(y = y, x = x, label =
label), colour = "black", hjust = 0, vjust = -0.5, parse = TRUE, show.legend =
FALSE, size = 14 * 0.282, family = "sans") +
geom_vline(data = .med, aes(xintercept
= median), colour = "black", lty = 2) + scale_x_continuous(breaks = seq(0, 900, by
= 100), limits = c(0, 900)) +
scale_y_continuous(limits = c(0, 1), expand = c(0.01,0)) + scale_colour_brewer(palette = "Set1") + scale_fill_brewer(palette = "Set1") +
xlab("Time from entry") + ylab("Proportion of survival") + labs(colour = "sex") +
ggthemes::theme_calc(base_size = 14, base_family = "sans") + theme(legend.position
= c(1, 1), legend.justification = c(1, 1))
.nrisk$y <- ((.nrisk$y - 0.025) / (max(.nrisk$y) - 0.025) + 0.5) * 0.5
.plot2 <- ggplot(data = .nrisk, aes(x = x, y = y, label = Freq, colour = z)) +
geom_text(size = 14 * 0.282, family = "sans") + scale_x_continuous(breaks = seq(0,900, by = 100), limits = c(0, 900)) +
scale_y_continuous(limits = c(0, 1)) +
scale_colour_brewer(palette = "Set1") + ylab("Proportion of survival") +
RcmdrPlugin.KMggplot2::theme_natrisk(ggthemes::theme_calc, 14, "sans")
.plot3 <- ggplot(data = subset(.nrisk, x == 0), aes(x = x, y = y, label = z, colour = z)) +
geom_text(hjust = 0, size = 14 * 0.282, family = "sans") +
scale_x_continuous(limits = c(-5, 5)) + scale_y_continuous(limits = c(0, 1)) +
scale_colour_brewer(palette = "Set1") +
RcmdrPlugin.KMggplot2::theme_natrisk21(ggthemes::theme_calc, 14, "sans")
.plotb <- ggplot(.df, aes(x = x, y = y)) + geom_blank() +
RcmdrPlugin.KMggplot2::theme_natriskbg(ggthemes::theme_calc, 14, "sans")
grid::grid.newpage(); grid::pushViewport(grid::viewport(layout =
grid::grid.layout(2, 2, heights = unit(c(1, 3), c("null", "lines")), widths =
unit(c(4, 1), c("lines", "null")))));
print(.plotb, vp =
grid::viewport(layout.pos.row = 1:2, layout.pos.col = 1:2));
print(.plot , vp =
grid::viewport(layout.pos.row = 1 , layout.pos.col = 1:2));
print(.plot2, vp =
grid::viewport(layout.pos.row = 2 , layout.pos.col = 1:2));
print(.plot3, vp =
grid::viewport(layout.pos.row = 2 , layout.pos.col = 1 ));
.plot <- recordPlot()
print(.plot)
Here's a start (code below)
I guess you can create the table need and replace it by the random.table
# install.packages("ggplot2", dependencies = TRUE)
# install.packages("RGraphics", dependencies = TRUE)
# install.packages("gridExtra", dependencies = TRUE)
# install.packages("survival", dependencies = TRUE)
require(ggplot2)
library(RGraphics)
library(gridExtra)
library(survival)
# Plot
data(lung)
sf.sex <- survfit(Surv(time, status) ~ sex, data = lung)
pl.sex <- ggsurv(sf.sex) +
geom_ribbon(aes(ymin=low,ymax=up,fill=group),alpha=0.3) +
guides(fill=guide_legend("sex"))
# Table
random.table <- data.frame("CL 95"=rnorm(5),n=runif(5,1,3))
pl.table <- tableGrob(random.table)
# Arrange the plots on the same page
grid.arrange(pl.sex, pl.table, ncol=1)