library(OpenMx) #Use one fifth of the on-load default value of 'mvnRelEps': mxOption(NULL,"mvnRelEps",0.001) library(polycor) ### Generate data datS <- mxFactor( data.frame( x1=c(rep(0,1600000),rep(1,800),rep(0,810),rep(1,195),rep(0,810),rep(1,3),rep(0,9),rep(1,2),rep(0,780),rep(1,6),rep(0,6),rep(1,2),rep(0,200),rep(1,2),rep(1,1)) , x2=c(rep(0,1600000),rep(0,800),rep(1,810),rep(1,195),rep(0,810),rep(0,3),rep(1,9),rep(1,2),rep(0,780),rep(0,6),rep(1,6),rep(1,2),rep(0,200),rep(0,2),rep(1,1)) , x3=c(rep(0,1600000),rep(0,800),rep(0,810),rep(0,195),rep(1,810),rep(1,3),rep(1,9),rep(1,2),rep(0,780),rep(0,6),rep(0,6),rep(0,2),rep(1,200),rep(1,2),rep(1,1)) , x4=c(rep(0,1600000),rep(0,800),rep(0,810),rep(0,195),rep(0,810),rep(0,3),rep(0,9),rep(0,2),rep(1,780),rep(1,6),rep(1,6),rep(1,2),rep(1,200),rep(1,2),rep(1,1)) ) , levels=c(0,1) ) varNames <- names(datS) ### Observed tetrachoric correlations obsMat <- matrix(1,4,4) for(i in 1:3){ for(j in (i+1):4){ print( table(datS[,c(i,j)] ) ) # Print each table obsMat[i,j] <- obsMat[j,i] <- polychor(table(datS[,c(i,j)]) , ML=T ) } } round( obsMat , 3 ) ### Observed prevalences colMeans( 1*(datS==1) ) ### Problem inverting observed correlation matrix? round( solve( obsMat ) , 4 ) ### Very simple model model <- mxModel( 'SimpMod', ### Expected covariance matrix (variance==1 due to binary variables) mxMatrix(type='Stand',nrow=4,ncol=4,free=T,values=obsMat[lower.tri(obsMat)],name='eC',lbound=0,ubound=0.99), ### Expected Means mxMatrix(type='Full',nrow=1,ncol=4,free=F,values=0,name='eM'), ### Expected thresholds mxMatrix(type='Full',nrow=1,ncol=4,free=T,values=qnorm(colMeans( 1*(datS==1) ),lower.tail=F),name='eT'), ### Data and likelihood mxModel( 'Datmod', mxData(datS , type='raw'), mxExpectationNormal(means='SimpMod.eM',covariance='SimpMod.eC',thresholds='SimpMod.eT',threshnames=varNames,dimnames=varNames), mxFitFunctionML() ), ### Add the likelihoods (only one) mxFitFunctionMultigroup( c('Datmod.fitfunction') ) ) ### Fit model set.seed(190219) #We don't want mxTryHard* to add large-magnitude perturbations between attempts. That's because we're parameterizing the model's #tetrachoric correlation matrix directly as a standardized matrix, so it's not guaranteed to always be positive-definite. With large #perturbations, it is more likely that the matrix will be non-PD at the start values, causing the fit attempt to end with an error. #Hence, we use a small value for argument 'scale': modelOut <- mxTryHardOrdinal(model, silent=F, scale=0.05, extraTries=30) summary(modelOut) ###################################################### #Create a compute plan, to be modified: plan <- omxDefaultComputePlan() #If you run a model with the default compute plan, the following two changes happen automatically during mxRun(): plan$steps$ND$stepSize <- 1.0e-5 plan$steps$ND$iterations <- 3L #Substitute the gradient-descent compute step with a Nelder-Mead compute step. We will use an initial-simplex edge that is smaller #than the default, to try to avoid a non-PD correlation matrix at the start values. Note that the initial simplex will be #generated randomly: plan$steps$GD <- mxComputeNelderMead(xTolProx=1e-7,fTolProx=1e-7,iniSimplexType="random",iniSimplexEdge=0.5,doPseudoHessian=T) m2 <- mxModel(model,plan) set.seed(190219) #<--Same seed as with CSOLNP. #Again, use a small value for argument 'scale': m2o <- mxTryHardOrdinal(m2, silent=F, scale=0.05, extraTries=30) summary(m2o) m2o$output$gradient m2o$compute$steps$GD$output$simplexGradient eigen(m2o$compute$steps$GD$output$pseudoHessian)$values