'¿Why does the AIC doesn't change with different values of alpha on the BigVAR package?

I currently use R v.4.2.0, BigVAR package v.1.1.0, and Windows 11 home version 21H2.

I'm using the ConstructModel function to generate a penalized VAR model using the 'HLAGELEM' structure, however, the dual argument is not being used in order to run cross-validation to choose the best alpha. However, I assigned different values of alpha manually in order to compare via information criteria which alpha would be associated with the best model. The problem is that no matter which alpha I choose, the AIC is still the same for every model. ¿Why is this happening, and how can I properly use the dual argument to choose the best alpha automatically?

require(quantmod)
require(zoo)
require(vars)
library(expm)
library(BigVAR)
# get GDP, Federal Funds Rate, CPI from FRED
#Gross Domestic Product (Relative to 2000)
getSymbols('GDP',src='FRED',type='xts')
#> [1] "GDP"
GDP<- aggregate(GDP,as.yearqtr,mean)
GDP <- GDP/mean(GDP["2000"])*100
# Transformation Code: First Difference of Logged Variables
GDP <- diff(log(GDP))
index(GDP) <- as.yearqtr(index(GDP))
# Federal Funds Rate
getSymbols('FEDFUNDS',src='FRED',type='xts')
#> [1] "FEDFUNDS"
FFR <- aggregate(FEDFUNDS,as.yearqtr,mean)
# Transformation Code: First Difference
FFR <- diff(FFR)
# CPI ALL URBAN CONSUMERS, relative to 1983
getSymbols('CPIAUCSL',src='FRED',type='xts')
#> [1] "CPIAUCSL"
CPI <- aggregate(CPIAUCSL,as.yearqtr,mean)
CPI <- CPI/mean(CPI['1983'])*100
# Transformation code: difference of logged variables
CPI <- diff(log(CPI))                             
# Seasonally Adjusted M1
getSymbols('M1SL',src='FRED',type='xts')
#> [1] "M1SL"
M1<- aggregate(M1SL,as.yearqtr,mean)
# Transformation code, difference of logged variables
M1 <- diff(log(M1))
# combine series
Y <- cbind(CPI,FFR,GDP,M1)
names(Y) <- c("CPI","FFR","GDP","M1")
Y <- na.omit(Y)
k=ncol(Y)
T <- nrow(Y)
# start/end of rolling validation
T1 <- which(index(Y)=="1985 Q1")
T2 <- which(index(Y)=="2005 Q1")

#Demean
Y <- Y - (c(rep(1, nrow(Y))))%*%t(c(apply(Y[1:T1,], 2, mean))) 
#Standarize Variance 
for (i in 1:k) {
  Y[, i] <- Y[, i]/apply(Y[1:T1,], 2, sd)[i]
}

# Fit an Elementwise HLAG model

Model1=constructModel(as.matrix(Y),p=4,struct="HLAGELEM", 

gran=c(25,10),verbose=FALSE,VARX=list(),T1=T1,T2=T2)

Model1Results=cv.BigVAR(Model1)

str(Model1Results)

Now I tried manually different values for alpha

  model_0   = constructModel(as.matrix(Y),p=4,struct="HLAGELEM", gran=c(25,10),verbose=FALSE, IC = TRUE, model.controls=list(alpha = 0))
  model_0.1 = constructModel(as.matrix(Y),p=4,struct="HLAGELEM", gran=c(25,10),verbose=FALSE, IC = TRUE, model.controls=list(alpha = 0.1))
  model_0.2 = constructModel(as.matrix(Y),p=4,struct="HLAGELEM", gran=c(25,10),verbose=FALSE, IC = TRUE, model.controls=list(alpha = 0.2))
  model_0.3 = constructModel(as.matrix(Y),p=4,struct="HLAGELEM", gran=c(25,10),verbose=FALSE, IC = TRUE, model.controls=list(alpha = 0.3))
  model_0.4 = constructModel(as.matrix(Y),p=4,struct="HLAGELEM", gran=c(25,10),verbose=FALSE, IC = TRUE, model.controls=list(alpha = 0.4))
  model_0.5 = constructModel(as.matrix(Y),p=4,struct="HLAGELEM", gran=c(25,10),verbose=FALSE, IC = TRUE, model.controls=list(alpha = 0.5))
  model_0.6 = constructModel(as.matrix(Y),p=4,struct="HLAGELEM", gran=c(25,10),verbose=FALSE, IC = TRUE, model.controls=list(alpha = 0.6))
  model_0.7 = constructModel(as.matrix(Y),p=4,struct="HLAGELEM", gran=c(25,10),verbose=FALSE, IC = TRUE, model.controls=list(alpha = 0.7))
  model_0.8 = constructModel(as.matrix(Y),p=4,struct="HLAGELEM", gran=c(25,10),verbose=FALSE, IC = TRUE, model.controls=list(alpha = 0.8))
  model_0.9 = constructModel(as.matrix(Y),p=4,struct="HLAGELEM", gran=c(25,10),verbose=FALSE, IC = TRUE, model.controls=list(alpha = 0.9))
  model_1   = constructModel(as.matrix(Y),p=4,struct="HLAGELEM", gran=c(25,10),verbose=FALSE, IC = TRUE, model.controls=list(alpha = 0))

#Cross Validation

  
  Results_0  =cv.BigVAR(model_0)
  Results_0.1=cv.BigVAR(model_0.1)
  Results_0.2=cv.BigVAR(model_0.2)
  Results_0.3=cv.BigVAR(model_0.3)
  Results_0.4=cv.BigVAR(model_0.4)
  Results_0.5=cv.BigVAR(model_0.5)
  Results_0.6=cv.BigVAR(model_0.6)
  Results_0.7=cv.BigVAR(model_0.7)
  Results_0.8=cv.BigVAR(model_0.8)
  Results_0.9=cv.BigVAR(model_0.9)
  Results_1  =cv.BigVAR(model_1)
  
#Best AIC

  print(Results_0@AICSD, quote=TRUE)
  print(Results_0.1@AICSD, quote=TRUE)
  print(Results_0.2@AICSD, quote=TRUE)
  print(Results_0.3@AICSD, quote=TRUE)
  print(Results_0.4@AICSD, quote=TRUE)
  print(Results_0.5@AICSD, quote=TRUE)
  print(Results_0.6@AICSD, quote=TRUE)
  print(Results_0.7@AICSD, quote=TRUE)
  print(Results_0.8@AICSD, quote=TRUE)
  print(Results_0.9@AICSD, quote=TRUE)
  print(Results_0.1@AICSD, quote=TRUE)

These are the results I get.

[1] 284.9829
[1] 284.9829
[1] 284.9829
[1] 284.9829
[1] 284.9829
[1] 284.9829
[1] 284.9829
[1] 284.9829
[1] 284.9829
[1] 284.9829
[1] 284.9829

I tried this with AICMSFE, AICSD, AICpvec, AICsvec, BICMSFE, BICSD, BICpvec, BIC, and BICMSFE from the model results object, but still, no change was shown. I'd really appreciate your help solving this issue.



Sources

This article follows the attribution requirements of Stack Overflow and is licensed under CC BY-SA 3.0.

Source: Stack Overflow

Solution Source