簡體   English   中英

使用 package maxLik 的 BFGS 二元邏輯回歸

[英]Binary Logistic Regression with BFGS using package maxLik

我嘗試使用 maxlik 與 BFGS 進行二元邏輯回歸,但我已經按照下面附加的語法包含了該功能,但結果是,但我得到了 output 這樣的

最大似然估計

BFGS 最大化,0 次迭代

*返回代碼 100:初始值超出范圍。

https://docs.google.com/spreadsheets/d/1fVLeJznB9k29FQ_BdvdCF8ztkOwbdFpx/edit?usp=sharing&ouid=109040212946671424093&rtpof=true&sd=true (這是我的數據)*

library(maxLik)
library(optimx)
data=read_excel("Book2.xlsx")
data$JKLaki = ifelse(data$JK==1,1,0) 
data$Daerah_Samarinda<- ifelse(data$Daerah==1,1,0)
data$Prodi2 = ifelse(data$Prodi==2,1,0)
data$Prodi3 = ifelse(data$Prodi==3,1,0)
data$Prodi4 = ifelse(data$Prodi==4,1,0)
str(data)
attach(data)

ll<- function(param){
  mu <- param[1]
  beta <- param[-1]
  y<- as.vector(data$Y)
  x<- cbind(1, data$JKLaki, data$IPK, data$Daerah_Samarinda, data$Prodi2, data$Prodi3, data$Prodi4)
  xb<- x%*%beta
  pi<- exp(xb)
  val <- -sum(y * log(pi) + (1 - y) * log(1 - pi),log=TRUE)
  return(val)
}  
gl<- funtion(param){
  mu <- param[1]
  beta <- param[-1]
  y <- as.vector(data$Y)
  x <- cbind(0, data$JKLaki,data$IPK,data$Daerah_Samarinda,data$Prodi2,data$Prodi3,data$Prodi4)
  sigma <- x*beta
  pi<- exp(sigma)/(1+exp(sigma))
  v= y-pi
  vx=as.matrix(x)%*%as.vector(v)
  gg= colSums(vx)
  return(-gg)}

mle<-maxLik(logLik=ll, grad=gl,hess=NULL,
            start=c(mu=1, beta1=0, beta2=0, beta3=0, beta4=0, beta5=0, beta6=0,beta7=0), method="BFGS")  
summary(mle)

我能得到一些幫助嗎,我厭倦了這個解決方案,拜托。

我已經能夠使用以下代碼優化對數似然:

library(DEoptim)
library(readxl)

data <- read_excel("Book2.xlsx")
data$JKLaki <- ifelse(data$JK == 1, 1, 0) 
data$Daerah_Samarinda <- ifelse(data$Daerah == 1, 1, 0)
data$Prodi2 <- ifelse(data$Prodi == 2, 1, 0)
data$Prodi3 <- ifelse(data$Prodi == 3, 1, 0)
data$Prodi4 <- ifelse(data$Prodi == 4, 1, 0)

ll <- function(param, data)
{
  mu <- param[1]
  beta <- param[-1]
  y <- as.vector(data$Y)
  x <- cbind(1, data$JKLaki, data$IPK, data$Daerah_Samarinda, data$Prodi2, data$Prodi3, data$Prodi4)
  xb <- x %*% beta
  pi <- exp(mu + xb)
  val <- -sum(y * log(pi) + (1 - y) * log(1 - pi))
  
  if(is.nan(val) == TRUE)
  {
    return(10 ^ 30)
    
  }else
  {
    return(val)
  }
}  

lower <- rep(-500, 8)
upper <- rep(500, 8)
obj_DEoptim_Iter1 <- DEoptim(fn = ll, lower = lower, upper = upper,
                             control = list(itermax = 5000), data = data)

lower <- obj_DEoptim_Iter1$optim$bestmem - 0.25 * abs(obj_DEoptim_Iter1$optim$bestmem)
upper <- obj_DEoptim_Iter1$optim$bestmem + 0.25 * abs(obj_DEoptim_Iter1$optim$bestmem)

obj_DEoptim_Iter2 <- DEoptim(fn = ll, lower = lower, upper = upper,
                             control = list(itermax = 5000), data = data)

obj_Optim <- optim(par = obj_DEoptim_Iter2$optim$bestmem, fn = ll, data = data)

$par
         par1          par2          par3          par4          par5          par6          par7 
-350.91045436  347.79576145    0.05337466    0.69032735   -0.01089112    0.47465162    0.38284804 
         par8 
   0.42125664 

$value
[1] 95.08457

$counts
function gradient 
     501       NA 

$convergence
[1] 1

$message
NULL

暫無
暫無

聲明:本站的技術帖子網頁,遵循CC BY-SA 4.0協議,如果您需要轉載,請注明本站網址或者原文地址。任何問題請咨詢:yoyou2525@163.com.

 
粵ICP備18138465號  © 2020-2024 STACKOOM.COM