Can´t get the fit indices for DoC model, as Saturated DZ gets non pos def

Posted on
No user picture. lf-araujo Joined: 11/25/2020
I am unable to figure out how to fix this. Check out this error:

Running Saturated DoC with 26 parameters
Error in summary.MxModel(out, refModels = mxRefModels(out, run = T)) :
Error : The job for model 'Saturated DoC' exited abnormally with the error message: fit is not finite (The continuous part of the model implied covariance (loc1) is not positive definite in data 'Saturated DZ.data' row 879. Detail:
covariance = matrix(c( # 2x2
-4.20388567580781, 0.587702305205191
, 0.587702305205191, 0.955870642918316), byrow=TRUE, nrow=2, ncol=2)
)
In addition: Warning message:
In model 'Saturated DoC' Optimizer returned a non-zero status code 10. Starting values are not feasible. Consider mxTryHard()
>

The expected cov for that particular dz model is full of zeros, which is what we want, no?


> mxGetExpected(satur$Saturated$`Saturated DZ`, "covariances")
obese1 ht1 obese2 ht2
obese1 1 0 0 0
ht1 0 0 1 1
obese2 0 1 0 0
ht2 0 1 0 1
>

Finally, the eigen vectors become negative in one spot:


> mxGetExpected(satur$Saturated$`Saturated DZ`, "covariances") |> eigen()
eigen() decomposition
$values
[1] 1.80194 1.00000 0.44504 -1.24698

$vectors
[,1] [,2] [,3] [,4]
[1,] 0.00000 1 0.00000 0.00000
[2,] -0.59101 0 -0.32799 0.73698
[3,] -0.32799 0 -0.73698 -0.59101
[4,] -0.73698 0 0.59101 -0.32799

Here is the MWE:


library(umx)
library(dplyr)
library(tidyr)
options(mxByrow = TRUE) # IK sorry

# Generating data
data(twinData)
glimpse(twinData)

# Make "obese" variable with ~20% subjects categorised as overweight and 30% as obese
obesityLevels = c('normal', 'overweight', 'obese', 'obese1')
cutPoints = quantile(twinData[, "bmi1"], probs = c(.1, .2, .3), na.rm = TRUE)
twinData$obese1 = cut(twinData$bmi1, breaks = c(-Inf, cutPoints, Inf), labels = obesityLevels)
twinData$obese2 = cut(twinData$bmi2, breaks = c(-Inf, cutPoints, Inf), labels = obesityLevels)
# Step 2: Make the ordinal variables into umxFactors (ordered, with the levels found in the data)
selVars = c("obese1", "obese2")
twinData <- mutate(twinData, obese1 = factor(obese1, ordered = T),
obese2 = factor(obese2, ordered = T),
)

mzData = subset(twinData, zygosity %in% c("MZFF", "MZMM"))
dzData = subset(twinData, zygosity %in% c("DZFF", "DZMM"))

pheno = c("obese", "ht")
name = "DoC"
sep = ""
covar=NULL
vnames = tvars(c(pheno), sep = sep)

# Find ordinal variables
colTypes = umx_is_ordered(xmu_extract_column(mzData, vnames),
summaryObject= TRUE)

if(colTypes$nOrdVars > 0){
ty = umxThresholdMatrix(rbind(mzData, dzData), fullVarNames = colTypes$ordVarNames,
sep = sep, method="Mehta")
ordinalPresent = TRUE
}

xmu_twin_check(
selDVs = c(pheno),
sep = sep, dzData = dzData, mzData = mzData, enforceSep = TRUE,
nSib = 2, optimizer = "CSOLNP" )

top = mxModel("top",
mxMatrix(name = "I", type = "Iden", nrow = 2, ncol = 2),
umxMatrixFree("B",
type = "Full", nrow = 2, ncol = 2,
labels = c(
NA, "g2",
"g1", NA
)
),
umxMatrix("psi_a", type = "Symm", ncol = 2, nrow = 2, byrow = TRUE,
labels = c(
"ax2",
"ra", "ay2"
),
free = c(
T,
T, T
),
values = c(
0.1,
0.05, 0.1
),
),
umxMatrixFree("psi_c", type = "Symm", ncol = 2, nrow = 2,
labels = c(
"cx2",
"rc", "cy2"
),
),
umxMatrixFree("psi_e", type = "Symm", ncol = 2, nrow = 2,
labels = c(
"ex2",
"re", "ey2"
),
),
umxMatrix("lamba", type = "Diag", nrow = 2, ncol = 2, byrow = TRUE,
free = c(F, F), labels = c("σ1", "σ2"),
values = c(1, 1)
),
mxAlgebra("A", expression = lamba %&% (solve(I - B) %&% psi_a)),
mxAlgebra("C", expression = lamba %&% (solve(I - B) %&% psi_c)),
mxAlgebra("E", expression = lamba %&% (solve(I - B) %&% psi_e)),
mxAlgebra("CovMZ", expression = rbind(
cbind(A + C + E, A + C),
cbind(A + C, A + C + E)
)),
mxAlgebra(name = "CovDZ", expression = rbind(
cbind(A + C + E, 0.5 %x% A + C),
cbind(0.5 %x% A + C, A + C + E)
)),
mxAlgebra("VC", expression = cbind(A, C, E, A / (A + C + E),
C / (A + C + E), E / (A + C + E)),
dimnames = list(
rep("VC", 2),
rep(c("A", "C", "E", "SA", "SC", "SE"), each = 2)
)
) ,

# THE SECTION BELOW IS TO CALCULATE THE MEANS, THE OVERCOMPLICATED APPROACH IS LEFT OVER OF ADDING COVARS
# BUT SHOULDNT AFFECT THE ISSUE
# create algebra for expected mean & threshold matrices
mxMatrix( type="Full", nrow=1, ncol=2, free= TRUE,
labels=c("mean_Ph1","mean_Ph2"),
name="meanT1" ),
# in mz twins, prs_twin1 == prs_twin2
# so, twin2 in mz pairs does not have the prs variable
mxMatrix( type="Full", nrow=1, ncol=2, free=TRUE,
labels=c("mean_Ph1","mean_Ph2"),
name="meanT2MZ"),
# in dz twins, prs_twin1 != prs_twin2
mxMatrix( type="Full", nrow=1, ncol=2, free=TRUE,
labels=c("mean_Ph1","mean_Ph2"),
name="meanT2DZ" )
)

# This is leftovers of adding covars, but shouldn't affect the issue
expMeanMZ <- mxAlgebra("expMeanMZ",
expression = cbind(top.meanT1 ,
top.meanT2MZ ))

expMeanDZ <- mxAlgebra("expMeanDZ",
expression = cbind(top.meanT1 ,
top.meanT2DZ ))

# Adding thresholds
top = top+ty

expMZ = mxExpectationNormal("top.CovMZ",
means = "expMeanMZ",
dimnames = vnames,
thresholds="top.threshMat",
threshnames = colTypes$ordVarNames
)

expDZ =mxExpectationNormal("top.CovDZ",
means = "expMeanDZ",
dimnames = vnames,
thresholds="top.threshMat",
threshnames = colTypes$ordVarNames
)

MZ = mxModel("MZ", expMeanMZ, expMZ, mxFitFunctionML())
DZ = mxModel("DZ", expMeanDZ, expDZ, mxFitFunctionML())

# Adding data
MZ = MZ + mxData(mzData, type = "raw")
DZ = DZ + mxData(dzData, type = "raw")

model = mxModel(name, top, MZ, DZ, mxFitFunctionMultigroup(c("MZ","DZ") ))
# Dropping g2 and re for identification DoC
model = umxModify(model, update = c("g2", "re"), autoRun = F)
model<- omxSetParameters(model, labels = c("obese_dev1","obese_dev2","obese_dev3"),
free = c(F,F,T), value = c(0.1,0.1,0.2))

out <- mxRun(model)
summary(out, refModels = mxRefModels(out, run = T))

satur <- mxRefModels(out)
mxGetExpected(satur$Saturated$`Saturated DZ`, "covariances") |> eigen()

Replied on Wed, 10/04/2023 - 15:17
No user picture. lf-araujo Joined: 11/25/2020

As I hit the submit buttom I got a suggestion from Mike:

Copying here:

Ok, you could get a sort of saturated model fit manually instead.  I am assuming that the way OpenMx does it is to fit with a symmetric matrix, which could generate positive definite / singular / negative definite matrices.  If it fits instead with a lower
triangular multiplied by its transpose, then only positive definite or singular matrices could be generated (you can’t get a negative definite because of all the squaring, basically).  To get rid of the singular solutions, one can bound
only the diagonal elements of the lower triangular matrix to be greater than zero - .001 for example.

The error basically says that the eigenvalue of continuous part is negative.  And it is - there’s a weird specification of their covariances which is 
.1 1
 1 1

Can you figure out how you managed to give variance of .1 to one variable, variance of 1 to the other variable, and covariance of 1 as well?  Better to have a matrix like this:

 1 .1
.1  1

for starting values.

Cheers
Mike

Replied on Thu, 10/05/2023 - 11:04
No user picture. lf-araujo Joined: 11/25/2020

I get to the same error using Prof. Maes example for a bi-phenotipic model, so I suppose the problem lies in the use of mxRefModels?

The error:


> sumSAT <- summary( fitSAT , refModels = mxRefModels(fitSAT, run = T))
Running Saturated twoSATj with 26 parameters
Error in summary.MxModel(fitSAT, refModels = mxRefModels(fitSAT, run = T)) :
Error : The job for model 'Saturated twoSATj' exited abnormally with the error message: fit is not finite (Ordinal covariance is not positive definite in data 'Saturated DZ.data' row 171 (loc1))
In addition: Warning message:
In model 'Saturated twoSATj' Optimizer returned a non-zero status code 10. Starting values are not feasible. Consider mxTryHard()

- How to obtain the fit indices (CFI, RMSEA) in these cases?


# ----------------------------------------------------------------------------------------------------------------------
# Program: twoSATj.R
# Author: Hermine Maes
# Date: 10 22 2018
#
# Twin Bivariate Saturated model to estimate means & variances, thresholds & correlations across multiple groups
# Matrix style model - Raw data - Joint Continuous Ordinal data
# -------|---------|---------|---------|---------|---------|---------|---------|---------|---------|---------|---------|
# Load Libraries & Options
rm(list=ls())
library(OpenMx)
library(psych); library(polycor)
source("miFunctions.R")
# Create Output
filename <- "twoSATj"
sink(paste(filename,".Ro",sep=""), append=FALSE, split=TRUE)
# ----------------------------------------------------------------------------------------------------------------------
# PREPARE DATA
# Load Data
data(twinData)
dim(twinData)
describe(twinData[,1:12], skew=F)
twinData[,'ht1'] <- twinData[,'ht1']*10
twinData[,'ht2'] <- twinData[,'ht2']*10
twinData[,'wt1'] <- twinData[,'wt1']/10
twinData[,'wt2'] <- twinData[,'wt2']/10
# Select Continuous Variables
vars <- c('ht') # list of continuous variables names
nvc <- 1 # number of continuous variables
ntvc <- nvc*2 # number of total continuous variables
conVars <- paste(vars,c(rep(1,nvc),rep(2,nvc)),sep="")
# Select Ordinal Variables
nth <- 3 # number of thresholds
vars <- c('wt') # list of ordinal variables names
nvo <- 1 # number of ordinal variables
ntvo <- nvo*2 # number of total ordinal variables
ordVars <- paste(vars,c(rep(1,nvo),rep(2,nvo)),sep="")
ordData <- twinData
wtquant <- quantile(ordData[,c('wt1','wt2')],(0:(nth+1))/(nth+1),na.rm=TRUE)
for (i in c('wt1','wt2')) { ordData[,i] <- cut(ordData[,i], breaks=wtquant, labels=c(0:nth)) }
# Select Variables for Analysis
vars <- c('ht','wt') # list of variables names
nv <- nvc+nvo # number of variables
ntv <- nv*2 # number of total variables
selVars <- paste(vars,c(rep(1,nv),rep(2,nv)),sep="")
# Select Data for Analysis
mzData <- subset(ordData, zyg==1, selVars)
dzData <- subset(ordData, zyg==3, selVars)
mzDataF <- cbind(mzData[,conVars],mxFactor( x=mzData[,ordVars], levels=c(0:nth)) )
dzDataF <- cbind(dzData[,conVars],mxFactor( x=dzData[,ordVars], levels=c(0:nth)) )
# Generate Descriptive Statistics
apply(mzData,2,myMean)
apply(dzData,2,myMean)
sapply(mzData[,ordVars],table)
sapply(dzData[,ordVars],table)
hetcor(mzData)$cor
hetcor(dzData)$cor
# Set Starting Values
frMV <- c(TRUE,FALSE) # free status for variables
frCvD <- valDiagLU(frMV,T,T,ntv) # free status for diagonal, lower & upper elements of covariance matrix
frCv <- matrix(as.logical(frCvD),4)
svMe <- c(15,0) # start value for means
svVa <- c(.5,1) # start value for variances
lbVa <- .0001 # lower bound for variances
svLTh <- 0 # start value for first threshold
svITh <- 1 # start value for increments
svTh <- matrix(rep(c(svLTh,(rep(svITh,nth-1)))),nrow=nth,ncol=nv) # start value for thresholds
lbTh <- matrix(rep(c(-3,(rep(0.001,nth-1))),nv),nrow=nth,ncol=nv) # lower bound for thresholds
# Create Labels
labMeMZ <- labVars("meanMZ",selVars)
labMeDZ <- labVars("meanDZ",selVars)
labMeZ <- labVars("meanZ",selVars)
labCvMZ <- labLower("covMZ",ntv)
labCvDZ <- labLower("covDZ",ntv)
labCvZ <- labLower("covZ",ntv)
labVaMZ <- labDiag("covMZ",ntv)
labVaDZ <- labDiag("covDZ",ntv)
labVaZ <- labDiag("covZ",ntv)
labThMZ <- labTh("MZ",ordVars,nth)
labThDZ <- labTh("DZ",ordVars,nth)
labThZ <- labTh("Z",ordVars,nth)
# ----------------------------------------------------------------------------------------------------------------------
# PREPARE MODEL
# Create Algebra for expected Mean & Threshold Matrices
meanMZ <- mxMatrix( type="Full", nrow=1, ncol=ntv, free=frMV, values=svMe, labels=labMeMZ, name="meanMZ" )
meanDZ <- mxMatrix( type="Full", nrow=1, ncol=ntv, free=frMV, values=svMe, labels=labMeDZ, name="meanDZ" )
thinMZ <- mxMatrix( type="Full", nrow=nth, ncol=ntvo, free=TRUE, values=svTh, lbound=lbTh, labels=labThMZ, name="thinMZ" )
thinDZ <- mxMatrix( type="Full", nrow=nth, ncol=ntvo, free=TRUE, values=svTh, lbound=lbTh, labels=labThDZ, name="thinDZ" )
inc <- mxMatrix( type="Lower", nrow=nth, ncol=nth, free=FALSE, values=1, name="inc" )
threMZ <- mxAlgebra( expression= inc %*% thinMZ, name="threMZ" )
threDZ <- mxAlgebra( expression= inc %*% thinDZ, name="threDZ" )
# Create Algebra for expected Covariance Matrices
covMZ <- mxMatrix( type="Symm", nrow=ntv, ncol=ntv, free=frCv, values=valDiag(svVa,ntv), lbound=valDiag(lbVa,ntv),
labels=labCvMZ, name="covMZ" )
covDZ <- mxMatrix( type="Symm", nrow=ntv, ncol=ntv, free=frCv, values=valDiag(svVa,ntv), lbound=valDiag(lbVa,ntv),
labels=labCvDZ, name="covDZ" )
# Create Data Objects for Multiple Groups
dataMZ <- mxData( observed=mzDataF, type="raw" )
dataDZ <- mxData( observed=dzDataF, type="raw" )
# Create Expectation Objects for Multiple Groups
expMZ <- mxExpectationNormal( covariance="covMZ", means="meanMZ", dimnames=selVars, thresholds="threMZ", threshnames=ordVars )
expDZ <- mxExpectationNormal( covariance="covDZ", means="meanDZ", dimnames=selVars, thresholds="threDZ", threshnames=ordVars )
funML <- mxFitFunctionML()
# Create Model Objects for Multiple Groups
modelMZ <- mxModel( meanMZ, covMZ, thinMZ, inc, threMZ, dataMZ, expMZ, funML, name="MZ" )
modelDZ <- mxModel( meanDZ, covDZ, thinDZ, inc, threDZ, dataDZ, expDZ, funML, name="DZ" )
multi <- mxFitFunctionMultigroup( c("MZ","DZ") )
# Create Confidence Interval Objects
ciCor <- mxCI( c('MZ.covMZ','DZ.covDZ' ))
ciThre <- mxCI( c('MZ.threMZ','DZ.threDZ' ))
# Build Saturated Model with Confidence Intervals
modelSAT <- mxModel( "twoSATj", modelMZ, modelDZ, multi, ciCor, ciThre )
# ----------------------------------------------------------------------------------------------------------------------
# RUN MODEL
# Run Saturated Model
fitSAT <- mxRun( modelSAT, intervals=F )
sumSAT <- summary( fitSAT )
# Print Goodness-of-fit Statistics & Parameter Estimates
fitGofs(fitSAT)
fitEsts(fitSAT)
mxGetExpected( fitSAT, c("means","thresholds","covariance"))
# ----------------------------------------------------------------------------------------------------------------------
# RUN SUBMODELS
# Constrain expected Thresholds to be equal across Twin Order
modelEMTVO <- mxModel( fitSAT, name="twoEMTVOj" )
svMe <- c(15,0); svVa <- c(.5,1); svLTh <- 0; svITh <- 1;
for (i in 1:nv) {
modelEMTVO <- omxSetParameters( modelEMTVO, label=c(labMeMZ[nv+i],labMeMZ[i]), free=frMV[i], values=svMe[i],
newlabels=labMeMZ[i] )
modelEMTVO <- omxSetParameters( modelEMTVO, label=c(labMeDZ[nv+i],labMeDZ[i]), free=frMV[i], values=svMe[i],
newlabels=labMeDZ[i] )
modelEMTVO <- omxSetParameters( modelEMTVO, label=c(labVaMZ[nv+i],labVaMZ[i]), free=frMV[i], values=svVa[i],
newlabels=labVaMZ[i] )
modelEMTVO <- omxSetParameters( modelEMTVO, label=c(labVaDZ[nv+i],labVaDZ[i]), free=frMV[i], values=svVa[i],
newlabels=labVaDZ[i] ) }
modelEMTVO <- omxSetParameters( modelEMTVO, label=c(labThMZ[nvo*nth+1],labThMZ[1]), free=TRUE, values=svLTh,
newlabels=labThMZ[1] )
modelEMTVO <- omxSetParameters( modelEMTVO, label=c(labThDZ[nvo*nth+1],labThDZ[1]), free=TRUE, values=svLTh,
newlabels=labThDZ[1] )
for (i in 2:nth) {for (j in seq(i,nvo*nth,nth)) {
modelEMTVO <- omxSetParameters( modelEMTVO, label=c(labThMZ[nvo*nth+j],labThMZ[j]), free=TRUE, values=svITh,
newlabels=labThMZ[j] )
modelEMTVO <- omxSetParameters( modelEMTVO, label=c(labThDZ[nvo*nth+j],labThDZ[j]), free=TRUE, values=svITh,
newlabels=labThDZ[j] ) }}
fitEMTVO <- mxRun( modelEMTVO, intervals=F )
fitGofs(fitEMTVO); fitEsts(fitEMTVO)
# Constrain expected Thresholds to be equal across Twin Order and Zygosity
modelEMTVZ <- mxModel( fitEMTVO, name="twoEMTVZj" )
for (i in 1:nv) {
modelEMTVZ <- omxSetParameters( modelEMTVZ, label=c(labMeMZ[i],labMeDZ[i]), free=frMV[i], values=svMe[i], newlabels=labMeZ[i] )
modelEMTVZ <- omxSetParameters( modelEMTVZ, label=c(labVaMZ[i],labVaDZ[i]), free=frMV[i], values=svVa[i], newlabels=labVaZ[i] ) }
modelEMTVZ <- omxSetParameters( modelEMTVZ, label=c(labThMZ[1],labThDZ[1]), free=TRUE, values=svLTh, newlabels=labThZ[1] )
for (i in 2:nth) {for (j in seq(i,nvo*nth,nth)) {
modelEMTVZ <- omxSetParameters( modelEMTVZ, label=c(labThMZ[j],labThDZ[j]), free=TRUE, values=svITh, newlabels=labThZ[j] ) }}
fitEMTVZ <- mxRun( modelEMTVZ, intervals=F )
fitGofs(fitEMTVZ); fitEsts(fitEMTVZ)
# Print Comparative Fit Statistics
mxCompare( fitSAT, subs <- list(fitEMTVO, fitEMTVZ) )
# ----------------------------------------------------------------------------------------------------------------------
sink()
save.image(paste(filename,".Ri",sep=""))

Replied on Thu, 10/05/2023 - 15:36
No user picture. lf-araujo Joined: 11/25/2020

I can't edit the post above, the correct code from Prof Maes, which gives the error is:


# ----------------------------------------------------------------------------------------------------------------------
# Program: twoSATj.R
# Author: Hermine Maes
# Date: 10 22 2018
#
# Twin Bivariate Saturated model to estimate means & variances, thresholds & correlations across multiple groups
# Matrix style model - Raw data - Joint Continuous Ordinal data
# -------|---------|---------|---------|---------|---------|---------|---------|---------|---------|---------|---------|
# Load Libraries & Options
# rm(list=ls())
library(OpenMx)
library(psych); library(polycor)
source("miFunctions.R")
# Create Output
filename <- "twoSATj"
sink(paste(filename,".Ro",sep=""), append=FALSE, split=TRUE)
# ----------------------------------------------------------------------------------------------------------------------
# PREPARE DATA
# Load Data
data(twinData)
dim(twinData)
describe(twinData[,1:12], skew=F)
twinData[,'ht1'] <- twinData[,'ht1']*10
twinData[,'ht2'] <- twinData[,'ht2']*10
twinData[,'wt1'] <- twinData[,'wt1']/10
twinData[,'wt2'] <- twinData[,'wt2']/10
# Select Continuous Variables
vars <- c('ht') # list of continuous variables names
nvc <- 1 # number of continuous variables
ntvc <- nvc*2 # number of total continuous variables
conVars <- paste(vars,c(rep(1,nvc),rep(2,nvc)),sep="")
# Select Ordinal Variables
nth <- 3 # number of thresholds
vars <- c('wt') # list of ordinal variables names
nvo <- 1 # number of ordinal variables
ntvo <- nvo*2 # number of total ordinal variables
ordVars <- paste(vars,c(rep(1,nvo),rep(2,nvo)),sep="")
ordData <- twinData
wtquant <- quantile(ordData[,c('wt1','wt2')],(0:(nth+1))/(nth+1),na.rm=TRUE)
for (i in c('wt1','wt2')) { ordData[,i] <- cut(ordData[,i], breaks=wtquant, labels=c(0:nth)) }
# Select Variables for Analysis
vars <- c('ht','wt') # list of variables names
nv <- nvc+nvo # number of variables
ntv <- nv*2 # number of total variables
selVars <- paste(vars,c(rep(1,nv),rep(2,nv)),sep="")
# Select Data for Analysis
mzData <- subset(ordData, zyg==1, selVars)
dzData <- subset(ordData, zyg==3, selVars)
mzDataF <- cbind(mzData[,conVars],mxFactor( x=mzData[,ordVars], levels=c(0:nth)) )
dzDataF <- cbind(dzData[,conVars],mxFactor( x=dzData[,ordVars], levels=c(0:nth)) )
# Generate Descriptive Statistics
apply(mzData,2,myMean)
apply(dzData,2,myMean)
sapply(mzData[,ordVars],table)
sapply(dzData[,ordVars],table)
hetcor(mzData)$cor
hetcor(dzData)$cor
# Set Starting Values
frMV <- c(TRUE,FALSE) # free status for variables
frCvD <- valDiagLU(frMV,T,T,ntv) # free status for diagonal, lower & upper elements of covariance matrix
frCv <- matrix(as.logical(frCvD),4)
svMe <- c(15,0) # start value for means
svVa <- c(.5,1) # start value for variances
lbVa <- .0001 # lower bound for variances
svLTh <- 0 # start value for first threshold
svITh <- 1 # start value for increments
svTh <- matrix(rep(c(svLTh,(rep(svITh,nth-1)))),nrow=nth,ncol=nv) # start value for thresholds
lbTh <- matrix(rep(c(-3,(rep(0.001,nth-1))),nv),nrow=nth,ncol=nv) # lower bound for thresholds
# Create Labels
labMeMZ <- labVars("meanMZ",selVars)
labMeDZ <- labVars("meanDZ",selVars)
labMeZ <- labVars("meanZ",selVars)
labCvMZ <- labLower("covMZ",ntv)
labCvDZ <- labLower("covDZ",ntv)
labCvZ <- labLower("covZ",ntv)
labVaMZ <- labDiag("covMZ",ntv)
labVaDZ <- labDiag("covDZ",ntv)
labVaZ <- labDiag("covZ",ntv)
labThMZ <- labTh("MZ",ordVars,nth)
labThDZ <- labTh("DZ",ordVars,nth)
labThZ <- labTh("Z",ordVars,nth)
# ----------------------------------------------------------------------------------------------------------------------
# PREPARE MODEL
# Create Algebra for expected Mean & Threshold Matrices
meanMZ <- mxMatrix( type="Full", nrow=1, ncol=ntv, free=frMV, values=svMe, labels=labMeMZ, name="meanMZ" )
meanDZ <- mxMatrix( type="Full", nrow=1, ncol=ntv, free=frMV, values=svMe, labels=labMeDZ, name="meanDZ" )
thinMZ <- mxMatrix( type="Full", nrow=nth, ncol=ntvo, free=TRUE, values=svTh, lbound=lbTh, labels=labThMZ, name="thinMZ" )
thinDZ <- mxMatrix( type="Full", nrow=nth, ncol=ntvo, free=TRUE, values=svTh, lbound=lbTh, labels=labThDZ, name="thinDZ" )
inc <- mxMatrix( type="Lower", nrow=nth, ncol=nth, free=FALSE, values=1, name="inc" )
threMZ <- mxAlgebra( expression= inc %*% thinMZ, name="threMZ" )
threDZ <- mxAlgebra( expression= inc %*% thinDZ, name="threDZ" )
# Create Algebra for expected Covariance Matrices
covMZ <- mxMatrix( type="Symm", nrow=ntv, ncol=ntv, free=frCv, values=valDiag(svVa,ntv), lbound=valDiag(lbVa,ntv),
labels=labCvMZ, name="covMZ" )
covDZ <- mxMatrix( type="Symm", nrow=ntv, ncol=ntv, free=frCv, values=valDiag(svVa,ntv), lbound=valDiag(lbVa,ntv),
labels=labCvDZ, name="covDZ" )
# Create Data Objects for Multiple Groups
dataMZ <- mxData( observed=mzDataF, type="raw" )
dataDZ <- mxData( observed=dzDataF, type="raw" )
# Create Expectation Objects for Multiple Groups
expMZ <- mxExpectationNormal( covariance="covMZ", means="meanMZ", dimnames=selVars, thresholds="threMZ", threshnames=ordVars )
expDZ <- mxExpectationNormal( covariance="covDZ", means="meanDZ", dimnames=selVars, thresholds="threDZ", threshnames=ordVars )
funML <- mxFitFunctionML()
# Create Model Objects for Multiple Groups
modelMZ <- mxModel( meanMZ, covMZ, thinMZ, inc, threMZ, dataMZ, expMZ, funML, name="MZ" )
modelDZ <- mxModel( meanDZ, covDZ, thinDZ, inc, threDZ, dataDZ, expDZ, funML, name="DZ" )
multi <- mxFitFunctionMultigroup( c("MZ","DZ") )
# Create Confidence Interval Objects
ciCor <- mxCI( c('MZ.covMZ','DZ.covDZ' ))
ciThre <- mxCI( c('MZ.threMZ','DZ.threDZ' ))
# Build Saturated Model with Confidence Intervals
modelSAT <- mxModel( "twoSATj", modelMZ, modelDZ, multi, ciCor, ciThre )
# ----------------------------------------------------------------------------------------------------------------------
# RUN MODEL
# Run Saturated Model
fitSAT <- mxRun( modelSAT, intervals=F )
sumSAT <- summary( fitSAT , refModels = mxRefModels(fitSAT, run = T))

Replied on Fri, 11/17/2023 - 11:43
Picture of user. AdminNeale Joined: 03/01/2013

To fix it, we can try harder with the optimization manually like this:

myrefModels = mxRefModels(fitSAT, run = F)
myrefModels$Saturated <- mxTryHard(myrefModels$Saturated)
myrefModels$Independence <-mxTryHard(myrefModels$Independence)
summary( fitSAT , refModels = myrefModels)

However, I note that it would be easier if mxRefModels() took an method= argument to use mxTryHard() or mxTryHardOrdinal() to fit the saturated and independence models in case of poor starting values.
Replied on Fri, 11/17/2023 - 14:01
No user picture. lf-araujo Joined: 11/25/2020

Thanks.

This particular issue was resolved, sorry for not updating here. The culprit was the line:

options(mxByrow = TRUE)

This affects all code run after it. It should never be used this way. Such a noob error.