Comparison of the results of four methods

The following is the explanation and procedure of the whole simulation program:
Load the required package and generate the required data: the target distribution follows the Weibull distribution, and the truncated distribution follows the exponential distribution.

install.packages("rootSolve")
install.packages("Rsolnp")
library(Rsolnp)
library(rootSolve)
#################################################################
#####  define a new log function  log_n ###
log_n <- function(z,n){
an=n^2
ind=(z<1/an)
y1 =   log(1/an)-1.5+2*an*z-(an*z)^2/2
y1[!ind]=0
z[ind] = 1
y2=log(z)
y=y1+y2
return(y)
}
################################################################################
####   Function used to calculate    log( gamma(N+1)/gamma(N-n+1) )
digam<-function(n_big, n_small)
{ out=0
if(n_small>0 &  n_big>=n_small)  out=sum(log(n_big+1-c(1:n_small)))
out
}

###########################################################
################  Function used to generate data
gen_obs <- function(wshape,wscale,erate,n_big){
obs.x <- rweibull(n_big,shape = wshape,scale = wscale)
obs.y <- rexp(n_big,rate = erate)
ind =(obs.x > obs.y)
obs <- data.frame(x=obs.x[ind],y=obs.y[ind])
return(obs)
}

1. Direct maximum likelihood function

The max.like function is used to solve the optimal solution of the full likelihood function:

#########################################################
######      full log-likelihood function
###
###   Function max.like is to calculate the estiamtor of [N,alpha,theta],N is n_big
###           para= [N,alpha,theta] in max.like , corresponding estimate is par_hat
###
###   Before the function log_likelihood , define a data frame-obs
###
##################################################################
max.like<-function(obs){
n = nrow(obs)
x = obs[, 1]
y = obs[, 2]

log_likelihood <- function(para){ #negative likelihood
fn <- function(lambda){
ter_log <- log_n(dexp(y, para[3]), n)  -  log_n(1+lambda*(pexp(x,para[3])-para[2]),n)
t <- sum(ter_log)+(para[1]-n)*log(1-para[2])+digam(para[1],n)
return(t)
}
z <- (-1)*optimize(fn, interval=c(-1000,1000), tol=0.0001)$objective return(z) } par_hat <- nlminb( c(n,0.5, 1), log_likelihood, lower=c(n, 0.0001, 0.001), upper=c(100*n, 0.999, 100))$par
return(par_hat)
}


The max.like2 function is used to calculate the maximum likelihood estimation of α \ alpha α and θ \ theta θ when n takes the true value N0, then calculate the likelihood ratio function of N, and further construct the coverage of the full likelihood method. Here, when n is set to a different value during simulation, N0 here should be adjusted to the same value, otherwise the result deviation is very large.

###################################################################################################
########  calculate full likelihood confidence interval for parameter  par
###
###   max.like2 is to calculate the estiamtor of [alpha,theta] when given true n_big
###             para = [alpha,theta] in max.like2,  corresponding estimate is par_Nhat
###
###   con_int is to calculate (1-a) Interval lower bound , para_star is a parameter vector
###           para_star = [par_hat,par_Nhat,N0,1-a]
###
###
####################################################################
max.like2<-function(obs){
n = nrow(obs)
x = obs[, 1]
y = obs[, 2]

log_likelihood.N0 <- function(para){ #negative likelihood
# N take true value:N0
N0=300
fn <- function(lambda){
ter_log <- log_n(dexp(y, para[2]), n)  -  log_n(1+lambda*(pexp(x,para[2])-para[1]),n)
t <- sum(ter_log)+digam(N0,n)+(N0-n)*log(1-para[1])
return(t)
}
z <- (-1)*optimize(fn, interval=c(-500,500), tol=0.0001)$objective return(z) } par_Nhat <- nlminb(c(0.5, 1),log_likelihood.N0,lower=c(0.0001, 0.001), upper=c(0.999, 100))$par
return(par_Nhat)
}

Calculate the coverage of the full likelihood method:

###########################################################################################
#####   Construct (1-a) confidence interval lower bound  for full likelihood
###
con_int <- function(obs,para_star,N_true){
n = nrow(obs)
x = obs[, 1]
y = obs[, 2]

log_likelihood <- function(para){ ##negative likelihood
fn <- function(lambda){
ter_log <- log_n(dexp(y, para[3]), n)  -  log_n(1+lambda*(pexp(x,para[3])-para[2]),n)
t <- sum(ter_log)+(para[1]-n)*log(1-para[2])+digam(para[1],n)
return(t)
}
z <- optimize(fn, interval=c(-500,500), tol=0.001)$objective return(z) } clog_likelihood <- function(N){ # N is scalar fn <- function(lambda){ ter_log <- log_n(dexp(y,para_star[5]),n) - log_n(1+lambda*(pexp(x,para_star[5])-para_star[4]),n) t <- sum(ter_log)+(N-n)*log(1-para_star[4])+digam(N,n) return(t) } z <- optimize(fn, interval=c(-500,500), tol=0.001)$objective
return(z)
}
R2N <- 2*(log_likelihood(para = para_star[1:3]) - clog_likelihood(N=N_true))

ifelse(R2N <= qchisq(tail(para_star,1),df=1),1,0)
}
#####################################################################################
#########   calculate Coverage percentage for full likeligood
##   INPUT:
##         int : Vector 0 and 1 to determine if the true value is in the confidence interval
##               1 represents the truth value in the confidence interval
##         K :  Number of simulations
##
##   OUTPUT:
##         P_f : Coverage percentage for full likrlihood
##
######################################################
p_f <- function(K,int){
sum(int)/K
}

Maximum likelihood estimation of conditional likelihood (Wang):

######################################################################
##########     Wang: condition likelihood
###
###   Function max.clike is to calculate the estiamtor of [N,alpha,theta],N is n_big
###           para = [N,alpha,theta] in max.clike , corresponding estimate is par_tilde
###
###   sigma : the variance of the asymptotic distribution
###
###
###
###############################################################
max.clike<-function(obs){
n = nrow(obs)
x = obs[, 1]
y = obs[, 2]

llc <- function(theta){
ter_y = log_n(dexp(y,theta),n)-log_n(pexp(x,theta),n)
-sum(ter_y)
}

theta_tilde = nlminb(1, llc, lower=c(0.001), upper=c(100))$par N_tilde = sum(1/pexp(x, theta_tilde)) alpha_tilde = n/N_tilde par_tilde <- c(N_tilde, alpha_tilde, theta_tilde) #names(par_tilde) <- c("N_tilde", "alpha_tilde", "theta_tilde") return(par_tilde) } To calculate the coverage of conditional likelihood method: max.clike<-function(obs){ n = nrow(obs) x = obs[, 1] y = obs[, 2] llc <- function(theta){ ter_y = log_n(dexp(y,theta),n)-log_n(pexp(x,theta),n) -sum(ter_y) } theta_tilde = nlminb(1, llc, lower=c(0.001), upper=c(100))$par
N_tilde = sum(1/pexp(x, theta_tilde))
alpha_tilde = n/N_tilde
par_tilde <- c(N_tilde, alpha_tilde, theta_tilde)
#names(par_tilde) <- c("N_tilde", "alpha_tilde", "theta_tilde")
return(par_tilde)
}
#########################################################################
########################  Estimates of sigma #######
sigma_cest<- function(obs,theta_tilde,N_tilde){
n = nrow(obs)
x = obs[, 1]
y = obs[, 2]
#estimate of density function of truncated variable
g_tilde <- function(t){
ifelse(t >= 0,theta_tilde*exp(-theta_tilde*t),0)
}
g1_tilde <- function(t){
ifelse(t >= 0,(1+theta_tilde^2)*exp(-theta_tilde*t),0)
}
#estimate of distribution function
G_tilde <- function(t){
ifelse(t>=0,1-exp(-theta_tilde*t),0)
}
G1_tilde <- function(t){
ifelse(t>=0,t*exp(-theta_tilde*t),0)
}
g <- function(z) {
g1_tilde(z)^2/g_tilde(z)
}
integ <- NULL
for (i  in 1:n) {
xi <- x[i]
ter <- integrate(g,0,xi)$value integ <- c(integ,ter) } beta_tilde <- (1/N_tilde)*sum((G_tilde(x)^(-2))) V23_tilde <- (1/N_tilde)*sum(G1_tilde(x)/(G_tilde(x)^2)) V33_tilde <- (1/N_tilde)*(sum((G1_tilde(x)^2)/(G_tilde(x)^2))-sum(integ/G_tilde(x))) return(beta_tilde-1-V23_tilde*(1/V33_tilde)*V23_tilde) } ##################################################################################### ######### calculate Coverage percentage for condition likeligood ## INPUT: ## K : Number of simulations ## sigma_tilde : the condition estimate of sigma,is a vector in p_c ## N_tilde : the condition estimate of N,is a vector in p_c ## N_true : true n_big ## a : Significance level ## ## OUTPUT: ## P_c : Two type Coverage percentage for condition likelihood ## ###################################################### p_c <- function(K,sigma_tilde,N_tilde,N_true,a){ I2 <- ifelse(abs(N_tilde^(-1/2)*(N_tilde-N_true))<=sqrt(sigma_tilde)*qnorm(1-a/2),1,0) I3 <- ifelse(abs(N_tilde^(1/2)*log(N_tilde/N_true))<=sqrt(sigma_tilde)*qnorm(1-a/2),1,0) list(p2=sum(I2)/K,p3=sum(I3)/K) } 2, EM algorithm. ####################################################################################################### ######################### EM Algorithm for full likelihood ##################################### ############################################################################################# #### k1 : The number of unique values of observed x #### par =[p1,p2,...,pk1,theta]: (k1+1) dim vector,initial value of parameter #### ep=1e-5:precision #### it_max=1e4:The number of maximum iteration ################################################################################## ########################## Iterative process of EM algorithm ################### EM_it <- function(obs){ n = nrow(obs) x = obs[,1] y = obs[,2] tilde_x = unique(x) k1 = length(tilde_x) p_0 = 1/k1 theta_0 = sum(y)/n alpha_0 <- sum(pexp(tilde_x,theta_0)*p_0) par_0 = c(rep(p_0,k1),alpha_0,theta_0) #indicator <- function(x,t){ifelse(x==t,1,0)} # expoential density function g <- function(x,theta){ theta*exp(-1*theta*x) } my.em <- function(obs,par=par_0,ep=0.001,it_max=50000){ n = nrow(obs) x = obs[,1] y = obs[,2] tilde_x = unique(x) k1 = length(tilde_x) it_k <- 0 while(it_k <= it_max){ par0 <- par ### iteration a <- NULL for (j in 1:k1) { aj <- sum(ifelse(x==tilde_x[j],1,0)) +(n/par0[k1+1])*par0[j]*(1-pexp(tilde_x[j],par0[k1+2])) a <- c(a,aj) } fn <- function(theta){ b1 = sum(log_n(dexp(y,theta),n)) log_dexp <- function(x){ (log(theta)-theta*x)*par0[k1+2]*exp(-1*par0[k1+2]*x) } intb <- NULL for (j in 1:k1) { intbj = integrate(f = log_dexp, tilde_x[j],Inf)$value
intb = c(intb,intbj)
}
b2 = (n/par0[k1+1])*sum(par0[1:k1]*intb)

return(b1+b2)
}

p_1 <- a/sum(a)
theta_1 <- optimize(f=fn,interval = c(0.0001,100),tol = 0.0001,maximum = TRUE)$maximum alpha_1 <- sum(pexp(x,theta_1)*p_1) par_1 <- c(p_1,alpha_1,theta_1) par <- par_1 ## Whether to stop iteration norm <- sqrt(t(par-par0)%*%(par-par0)) if(norm < ep) { return(par) break } else {it_k <- it_k+1} } # while }#my.em par <- my.em(obs,par=par_0,ep=0.001,it_max=50000) p_em <- par[1:k1] alp_em <- par[k1+1] the_em <- par[k1+2] N_em <- n/alp_em par_em <- data.frame(N_em=N_em,alp_em=alp_em,the_em) #return(list(p_em=p_hat,the_alp_em= the_alp_hat)) return(par_em) }#EM.it ## Coverage that cannot be calculated by EM algorithm 3. Method of solving equation ######################################################################### ##################### the method of solving the equation ################ See <- function(obs){ n <- nrow(obs) x <- obs[,1] y <- obs[,2] ## expoential density function g <- function(x,theta){ ifelse(x>=0,theta/exp(theta*x),0) } ## The first derivative of theta (g) g1 <- function(x,theta){ ifelse(x>=0,(1-theta*x)/exp(theta*x),0) } ## expoential distribution function G <-function(x,theta){ ifelse(x>=0,1-1/exp(theta*x),0) } ## the first derivative of theta (G) G1 <- function(x,theta){ ifelse(x>=0,x/exp(theta*x),0) } ### parameter is a vector par=(N,alpha,theta,lambda) model <- function(par){ c(F1 = sum(1/((par[1]-n)+(1:n)))+log_n(1-par[2],n), F2 = (par[1]-n)/(1-par[2])- par[4]*sum(1/(1+par[4]*(G(x,par[3])-par[2]))), F3 = sum(g1(y,par[3])/g(y,par[3])) - par[4]*sum(G1(x,par[3])/(1+par[4]*(G(x,par[3])-par[2]))), F4 = sum((G(x,par[3])-par[2])/(1+par[4]*(G(x,par[3])-par[2]))) ) } par0 <- c(n+10,0.5,1,1) # the initial value of par，affect the simulating results par_See <- multiroot(f = model,start = par0,rtol = 1e-8,atol = 1e-8, ctol = 1e-8)$root
return(par_See)
}

4.1, build cross section likelihood (internal method of solving equation: multiroot function)

###########################################################################
###################  the fourth method ###################################
profile_max.like1 <- function(obs){
n <- nrow(obs)
x <- obs[,1]
y <- obs[,2]
## expoential density function
g <- function(x,theta){
ifelse(x>=0,theta/exp(theta*x),0)
}
## The first derivative of theta (g)
g1 <- function(x,theta){
ifelse(x>=0,(1-theta*x)/exp(theta*x),0)
}
## expoential distribution function
G <-function(x,theta){
ifelse(x>=0,1-1/exp(theta*x),0)
}
## the first derivative of theta (G)
G1 <- function(x,theta){
ifelse(x>=0,x/exp(theta*x),0)
}

### the profile likelihood of N by solving the equation about (alpha,theta,lambda)
f <- function(N){
## par is a vector, par = (alpha,theta,lambda)
model0 <- function(par){
c(
F1 = (N-n)/(1-par[1])- par[3]*sum(1/(1+par[3]*(G(x,par[2])-par[1]))),
F2 = sum(g1(y,par[2])/g(y,par[2])) - par[3]*sum(G1(x,par[2])/(1+par[3]*(G(x,par[2])-par[1]))),
F3 = sum((G(x,par[2])-par[1])/(1+par[3]*(G(x,par[2])-par[1])))
)
}
par0 <- c(0.5,1,1) # the initial value of par
atl <- multiroot(f = model0,start = par0, rtol = 1e-8,atol = 1e-8, ctol = 1e-8)$root # the root of (alpha,theta,lambda) sum(log_n((N-n)+(1:n),n))+(N-n)*log_n(1-atl[1],n)+sum(log_n(g(y,atl[2]),n))-sum(log_n(1+atl[3]*(G(x,atl[2])-atl[1]),n)) } N_est <- optimize(f, interval=c(n,400), tol=0.001,maximum = TRUE)$objective # the estimate of N

## par is a vector, par = (alpha,theta,lambda)
model <- function(par){
c(
F1 = (N_est-n)/(1-par[1])- par[3]*sum(1/(1+par[3]*(G(x,par[2])-par[1]))),
F2 = sum(g1(y,par[2])/g(y,par[2])) - par[3]*sum(G1(x,par[2])/(1+par[3]*(G(x,par[2])-par[1]))),
F3 = sum((G(x,par[2])-par[1])/(1+par[3]*(G(x,par[2])-par[1])))
)
}
par0 <- c(0.5,1,1) # the initial value of par
atl_est <- multiroot(f = model,start = par0, rtol = 1e-8,atol = 1e-8, ctol = 1e-8)$root ## profile_par is a vector,profile_par.est=[N_est,alpha_est,theta_est,lambda_est] profile_par.est <- c(N_est,atl_est) return(profile_par.est) } 4.2, build cross-section likelihood (internal use of maximum likelihood method: nlminb function) ############## Using the function nlminb to solve equations ###################### ############################################################################ profile_max.like2 <- function(obs){ n <- nrow(obs) x <- obs[,1] y <- obs[,2] ## expoential density function g <- function(x,theta){ ifelse(x>=0,theta/exp(theta*x),0) } ## expoential distribution function G <-function(x,theta){ ifelse(x>=0,1-1/exp(theta*x),0) } ############################################ ### ## f is the profile likelihood of N ## par is a vector, par = (alpha,theta,lambda) #################################################### f <- function(N) { fun <- function(par){ (-1)*(sum(log_n((N-n)+(1:n),n))+(N-n)*log_n(1-par[1],n)+sum(log_n(g(y,par[2]),n))-sum(log_n(1+par[3]*(G(x,par[2])-par[1]),n))) } atl <- nlminb(start = c(0.5,5,5),objective = fun,lower = c(0.001,0.001,-500),upper = c(0.9999,100,500))$par
sum(log_n((N-n)+(1:n),n))+(N-n)*log_n(1-atl[1],n)+sum(log_n(g(y,atl[2]),n))-sum(log_n(1+atl[3]*(G(x,atl[2])-atl[1]),n))
}
N_est <- optimize(f, interval=c(n,500), tol=0.001,maximum = TRUE)$objective ### Replace N with N_est fun <- function(par){ (-1)*(sum(log_n((N_est-n)+(1:n),n))+(N_est-n)*log_n(1-par[1],n)+sum(log_n(g(y,par[2]),n))-sum(log_n(1+par[3]*(G(x,par[2])-par[1]),n))) } atl_est <- nlminb(start = c(0.5,5,5),objective = fun,lower = c(0.001,0.001,-500),upper = c(0.9999,100,500))$par
## par_full.est is a vector: par_full.est=[N_est,alpha_est,theta_est,lambda_est]
par_full.est <- c(N_est,atl_est)
return(par_full.est)
}

5. Four simulation results

5.1 simulation parameter setting

#################################################################################
####             Simulation in one scenario
#################################################################################
N_true <- 200
K <- 2000      #repeat times
a <- 0.05       #significance level
wshape <- 5
wscale <- 2
erate  <- 2   # true theta
theta_true <- erate
comf <- function(x){
pexp(x,rate = erate)*dweibull(x,shape = wshape,scale = wscale)
}
alpha_true <- integrate(f=comf,lower = 0,upper = Inf)\$value;alpha_true  # true alpha
par_true <- rep(c(N_true,alpha_true,theta_true),2);par_true

The value of N here is 200, so the value of N0 in the previous max.like2 function should be changed to 200. In order to consider different sampling probability, select the parameter settings in the following cases (α \ alpha α is the sampling probability):

parameter wshape wscale erate(θ\thetaθ) α\alphaα
Set 1 5 2 2 0.963
Set 2 1 2 2 0.8
Set 3 1 2 1 0.667
Set 4 1 1 1 0.5

5.2 simulation results and time

All K=2000,N=200

1. Direct maximum likelihood method

#######################  simulating Result ################
### Method1
par_est =NULL
N_hat   = NULL
N_tilde = NULL
sigma_tilde = NULL    #sigma estimation of conditional likelihood method
int <- NULL
t0 <- proc.time()
for (i in 1:K) {
obs <-  gen_obs(wshape, wscale, erate, N_true)
par_est <- rbind(par_est, c(max.like(obs), max.clike(obs)))
ter_para_star <- c(max.like(obs), max.like2(obs),N_true,1-a)
N_hat   <- rbind(N_hat,max.like(obs)[1])
N_tilde <-  rbind(N_tilde,max.clike(obs)[1])
sigma_tilde <- rbind(sigma_tilde,sigma_cest(obs = obs,theta_tilde = max.clike(obs)[3],N_tilde = max.clike(obs)[1]))
ter_int <- con_int(obs = obs,para_star = ter_para_star,N_true = N_true)
int <- rbind(int,ter_int)
print(i)
}
t <- proc.time()-t0
print(paste0("Direct maximum likelihood method execution time:",t[3][[1]],"second"))

par_true <- rep(c(N_true,alpha_true,theta_true),2);par_true
m    <-  apply(par_est, 2, mean);m
sd   <-  apply(par_est, 2, sd);sd
bias <-  abs(apply(par_est,2,mean) - par_true);bias
percentage_f <- p_f(K,int = int);percentage_f
percentage_c <- p_c(K,sigma_tilde,N_tilde,N_true,a);percentage_c

Table of simulation results

In the table of simulation results, parameter estimation, standard deviation and deviation of parameter estimation are in order.

parameter estimation N_f α\alphaα_f θ\thetaθ_f percentage_f N_c α\alphaα_c θ\thetaθ_c percentage_c2 percentage_c3 Running time
Set 1 199.3616 3.6174 0.6384 0.9635 0.0108 0.0004 2.0320 0.1888 0.0320 94.05% 200.1870 3.6645 0.1870 0.9620 0.0111 0.0011 2.0065 0.1869 0.0065 88.15% 87.9% 1416 seconds
Set 2 198.2373 17.6154 1.7627 0.8093 0.0533 0.0093 2.0228 0.2109 0.0228 91% 200.2741 19.3338 0.2741 0.8037 0.0563 0.0037 2.0092 0.2097 0.0092 85.8% 86.1% 1424.87 seconds
Set 3 198.9731 32.7968 1.0269 0.6793 0.0749 0.0127 1.0237 0.1430 0.0237 91.7% 201.5994 34.7736 1.5994 0.6730 0.0764 0.0063 1.0139 0.1420 0.0139 83.05% 83.4% 1182.45 seconds
Set 4 201.2625 45.9229 1.2625 0.5156 0.0918 0.0156 1.0285 0.2315 0.0285 93.75% 205.3044 47.6765 5.3044 0.5074 0.0920 0.0074 1.0104 0.2299 0.0104 77.35% 76.3% 918.92 seconds

2, the EM method.

par_em <- NULL
t0 <- proc.time()
for (i in 1:K) {
obs <-  gen_obs(wshape, wscale, erate, N_true)
ter_em <- EM_it(obs)
par_em <- rbind(par_em,ter_em)
print(i)
}
t <- proc.time()-t0
print(paste0("EM Method execution time:",t[3][[1]],"second"))
par_true <- rep(c(N_true,alpha_true,theta_true),2);par_true
m_em    <-  apply(par_em, 2, mean);m_em
sd_em   <-  apply(par_em, 2, sd);sd_em
bias_em <-  abs(apply(par_em,2,mean) - par_true[1:3]);bias_em

EM method simulation results table:
The results are parameter estimation, standard deviation and deviation.

parameter estimation N_em α\alphaα_em θ\thetaθ_em Running time
Set 1 200.2807 3.5829 0.2807 0.9623 0.0108 0.0008 2.0119 0.1832 0.0119 3809.43 seconds
Set 2 198.9456 14.6940 1.0544 0.8070 0.0492 0.0070 2.0127 0.2060 0.0127 3711.39 seconds
Set 3 199.8875 25.5943 0.1125 0.6768 0.0688 0.0101 1.0105 0.1425 0.0105 3803.96 seconds
Set 4 202.7574 44.7711 2.7574 0.5103 0.0916 0.0103 1.0161 0.2407 0.0161 7032.74 seconds

3. Method of solving equations

par_See <- NULL # par_See = [N_est,alpha_est,theta_est,lambda_est]
t0 <- proc.time()
for (i in 1:K) {
obs <-  gen_obs(wshape, wscale, erate, N_true)
par_See <- rbind(par_See,See(obs))
print(i)
}
t <- proc.time()-t0
print(paste0("Execution time of solving equation method:",t[3][[1]],"second"))
par_true <- rep(c(N_true,alpha_true,theta_true),2);par_true
m_See    <-  apply(par_See[,1:3], 2, mean);m_See
sd_See   <-  apply(par_See[,1:3], 2, sd);sd_See
bias_See <-  abs(apply(par_See[,1:3],2,mean) - par_true[1:3]);bias_See


A warning appears while the program is running,

4. The method of constructing section likelihood equation

# Internal solution method
par_profile1 <- NULL # par_profile1 =[N_est,alpha_est,theta_est,lambda_est]
t0 <- proc.time()
for (i in 1:K) {
obs <-  gen_obs(wshape, wscale, erate, N_true)
par_profile1 <- rbind(par_profile1,profile_max.like1(obs))
print(i)
}
t <- proc.time()-t0
print(paste0("Build section likelihood(Internal solution equation)Method execution time:",t[3][[1]],"second"))
par_true <- rep(c(N_true,alpha_true,theta_true),2);par_true
m_profile1    <-  apply(par_profile1[,1:3], 2, mean);m_profile1
sd_profile1   <-  apply(par_profile1[,1:3], 2, sd);sd_profile1
bias_profile1 <-  abs(apply(par_profile1[,1:3],2,mean) - par_true[1:3]);bias_profile1

# Internal direct maximum likelihood(nlminb)
par_profile2 <- NULL # par_profile2 =[N_est,alpha_est,theta_est,lambda_est]
t0 <- proc.time()
for (i in 1:K) {
obs <-  gen_obs(wshape, wscale, erate, N_true)
par_profile2 <- rbind(par_profile2,profile_max.like2(obs))
print(i)
}
t <- proc.time()-t0
print(paste0("Build section likelihood(Internal solution equation)Method execution time:",t[3][[1]],"second"))
par_true <- rep(c(N_true,alpha_true,theta_true),2);par_true
m_profile2    <-  apply(par_profile2[,1:3], 2, mean);m_profile2
sd_profile2   <-  apply(par_profile2[,1:3], 2, sd);sd_profile2
bias_profile2 <-  abs(apply(par_profile2[,1:3],2,mean) - par_true[1:3]);bias_profile2


The last two methods are very fast, but there are some problems in the results of the R function used. See the document for specific problems.

Published 2 original articles · praised 2 · visited 63

Keywords: Lambda

Added by IAK on Mon, 27 Jan 2020 14:23:57 +0200