Attachment | Size |
---|---|
Ade fit summary | 5.68 KB |
Hi.
I am running a fourvariate Cholesky model with the first and the last variables being ordinal and dichotomous respectively and with the second and the third being continuous. I include mxConstraint in the script in order to fix the variance of non-continuous variables to be equal to one and I expect the summary output to be without any standard errors, but I still get them, even though some of them are missing. Why is this happening? Is there something wrong with my script or did I miss something and now mxConsraint does not suppresses the SE's? If so, is the output valid and I can ignore missing SE's by estimating mxCI? I would greatly appreciate any insight!
require(OpenMx) mxOption(NULL, "Default optimizer", "NPSOL") # Select Continuous Variables varsc <- c('varCon1', 'varCon2') # list of continuous variables names nvc <- length(varsc) # number of continuous variables ntvc <- nvc*2 # number of total continuous variables conVars <- paste(varsc,c(rep(1,nvc),rep(2,nvc)),sep="_") # Select Ordinal Variables nth <- 3 # number of max thresholds ninc <- nth-1 # number of max increments varso <- c('varOrd', 'varDich') # list of ordinal variables names nvo <- length(varso) # number of ordinal variables ntvo <- nvo*2 # number of total ordinal variables ordVars <- paste(varso,c(rep(1,nvo),rep(2,nvo)),sep="_") nth1 = 3 nth2 = 1 nth = max(nth1, nth2) # Select Variables for Analysis vars <- c('varOrd', 'varCon1', 'varCon2', 'varDich') # list of variables names nv <- length(vars) # number of variables nind <- 2 ntv <- nv*nind # number of total variables oc <- c(1,0,0,1) # 0 for continuous, 1 for ordinal variables COMPROBAR selVars <- paste(vars,c(rep(1,nv),rep(2,nv)),sep="_") def = c("sex_s", "Zage_s", "q_age_s") ndef= length(def) defVars = paste(def,c(rep(1,ndef),rep(2,ndef)),sep="_") # Set the Binary variable up for OpenMx mzDataBin = subset(df, zyg3_s=='Mz', c(selVars, defVars)) dzDataBin = subset(df, zyg3_s=='DzL', c(selVars, defVars)) mzDataBin = mzDataBin[complete.cases(mzDataBin[,defVars]),] dzDataBin = dzDataBin[complete.cases(dzDataBin[,defVars]),] mzDataBinF = mzDataBin dzDataBinF = dzDataBin mzDataBinF[,ordVars[c(1,3)]] <- mxFactor( x=mzDataBin[,ordVars[c(1,3)]], levels=c(1,2,3,4) ) dzDataBinF[,ordVars[c(1,3)]] <- mxFactor( x=dzDataBin[,ordVars[c(1,3)]], levels=c(1,2,3,4) ) mzDataBinF[,ordVars[c(2,4)]] <- mxFactor( x=mzDataBin[,ordVars[c(2,4)]], levels=c(0,1) ) dzDataBinF[,ordVars[c(2,4)]] <- mxFactor( x=dzDataBin[,ordVars[c(2,4)]], levels=c(0,1) ) threshLabs <- paste(rep(varso,each=nth),c("thresh",rep("inc",ninc)),c(1,1:ninc),sep='_') meanLabs <- paste(vars, "mean",sep="_") betaLabs_age <- paste("beta","Age",vars, sep="_") betaLabs_sex <- paste("beta","Sex",vars, sep="_") betaLabsThre_age = paste("beta","Age",threshLabs, sep="_") betaLabsThre_sex = paste("beta","Sex",threshLabs, sep="_") # Create Labels for Lower Triangular Matrices aLabs <- paste("a",rev(nv+1-sequence(1:nv)),rep(1:nv,nv:1),sep="") dLabs <- paste("d",rev(nv+1-sequence(1:nv)),rep(1:nv,nv:1),sep="") eLabs <- paste("e",rev(nv+1-sequence(1:nv)),rep(1:nv,nv:1),sep="") # Set Starting Values frMV <- c(FALSE, TRUE, TRUE, FALSE) # free status for variables frCvD <- diag(frMV,nv,nv) # lower bounds for diagonal of covariance matrix frCvD[lower.tri(frCvD)] <- TRUE # lower bounds for below diagonal elements frCvD[upper.tri(frCvD)] <- TRUE # lower bounds for above diagonal elements frCv <- matrix(as.logical(frCvD),4) frTh <-c(T,T,T,T,F,F) svMe <- c(0,0,0,0) # start value for means svPa <- .4 # start value for path coefficient svPaD <- vech(diag(svPa,nv,nv)) # start values for diagonal of covariance matrix svPe <- .8 # start value for path coefficient for e svPeD <- vech(diag(svPe,nv,nv)) # start values for diagonal of covariance matrix lbPa <- 0 # start value for lower bounds lbPaD <- diag(lbPa,nv,nv) # lower bounds for diagonal of covariance matrix lbPaD[lower.tri(lbPaD)] <- -10 # lower bounds for below diagonal elements lbPaD[upper.tri(lbPaD)] <- NA # lower bounds for above diagonal elements # svTh <- c(-1,.1,.1,0,NA,NA) # start value for thresholds svTh <- 0 # start value for thresholds # ------------------------------------------------------------------ # PREPARE MODEL defAge1 <- mxMatrix( type="Full", nrow=1, ncol=1, free=FALSE, labels="data.Zage_s_1", name="Age1") defAge2 <- mxMatrix( type="Full", nrow=1, ncol=1, free=FALSE, labels="data.Zage_s_2", name="Age2") defSex1 <- mxMatrix( type="Full", nrow=1, ncol=1, free=FALSE, labels="data.sex_s_1", name="Sex1") defSex2 <- mxMatrix( type="Full", nrow=1, ncol=1, free=FALSE, labels="data.sex_s_2", name="Sex2") # Regression effects B_Age <- mxMatrix( type="Full", nrow=1, ncol=nv, free=frMV, values=c(0,.1,.1,0), labels=betaLabs_age, name="bAge" ) B_Age_Thre <-mxMatrix( type="Full", nrow=nth, ncol=nvo, free=frTh, values=c(rep(.2,nth),0.2,NA,NA), labels=betaLabsThre_age, name="bAgeThre" ) B_Sex <- mxMatrix( type="Full", nrow=1, ncol=nv, free=frMV, values=c(0,.1,.1,0), labels=betaLabs_sex, name="bSex" ) B_Sex_Thre <-mxMatrix( type="Full", nrow=nth, ncol=nvo, free=frTh, values=c(rep(.2,nth),0.2,NA,NA), labels=betaLabsThre_sex, name="bSexThre" ) #setting up the regression intercept <- mxMatrix( type="Full", nrow=1, ncol=nv, free=frMV, values=svMe, labels=meanLabs, name="intercept" ) expMean <- mxAlgebra( expression = cbind(intercept + (bAge%x%Age1) + (bSex%x%Sex1), intercept + (bAge%x%Age2) + (bSex%x%Sex2)) , name="expMean") inc <-mxMatrix( type="Lower",nrow=nth, ncol=nth, free=F, values=1, name="Low") threVar1 <-mxMatrix( type="Full", nrow=nth1, ncol=1, free=T, values=svTh, lbound=c(-3,rep(.001,ninc)), ubound=3, labels=c("healthp_s_thresh_1", "healthp_s_inc_1", "healthp_s_inc_2"), name="ThresholdVar1") threVar2 <-mxMatrix( type="Full", nrow=nth1, ncol=1, free=c(rep(T,nth2),rep(F,nth1-nth2)), values=c(rep(svTh,nth2),rep(NA,nth1-nth2)), lbound=c(-3,rep(.001,ninc)), ubound=3, labels=c("ibs_gen_s_thresh_1", "ibs_gen_s_inc_1", "ibs_gen_s_inc_2"), name="ThresholdVar2") threMatVar1 = mxAlgebra( expression= cbind(Low%*%ThresholdVar1), name='ThresholdMatvar1') thresholds = mxAlgebra(cbind(ThresholdMatvar1, ThresholdVar2), name='ThresholdsInc') expThre <-mxAlgebra( expression= cbind(ThresholdsInc + (bAgeThre%x%Age1) + (bSexThre%x%Sex1), ThresholdsInc + (bAgeThre%x%Age2) + (bSexThre%x%Sex2) ), name="expThreshold") inclusions <- list (B_Age, B_Sex, B_Age_Thre, B_Sex_Thre, defAge1, defAge2, defSex1, defSex2, intercept, expMean, inc, threVar1, threVar2, threMatVar1, thresholds, expThre) # ------------------------------------------------------------------------------ # ADE Model # Create Matrices for Path Coefficients pathA <- mxMatrix( type="Lower", nrow=nv, ncol=nv, free=TRUE, values=svPaD, label=aLabs, lbound=lbPaD, name="a" ) pathD <- mxMatrix( type="Lower", nrow=nv, ncol=nv, free=TRUE, values=svPaD, label=dLabs, lbound=lbPaD, name="d" ) pathE <- mxMatrix( type="Lower", nrow=nv, ncol=nv, free=TRUE, values=svPeD, label=eLabs, lbound=lbPaD, name="e" ) # Matrices generated to hold A, D, and E computed Variance Components covA <- mxAlgebra( expression=a %*% t(a), name="A" ) covD <- mxAlgebra( expression=d %*% t(d), name="D" ) covE <- mxAlgebra( expression=e %*% t(e), name="E" ) # Algebra for expected Mean and Variance/Covariance Matrices in MZ & DZ twins covMZ <- mxAlgebra( expression= rbind( cbind(A+D+E , A+D), cbind(A+D , A+D+E)), name="expCovMZ" ) covDZ <- mxAlgebra( expression= rbind( cbind(A+D+E , 0.5%x%A+0.25%x%D), cbind(0.5%x%A+0.25%x%D , A+D+E)), name="expCovDZ" ) # Algebra to compute total variances covP <- mxAlgebra( expression=A+D+E, name="V" ) # Constraint on variance of Binary variables matUnv <- mxMatrix( type="Unit", nrow=1, ncol=1, name="Unv1" ) matOc <- mxMatrix( type="Full", nrow=1, ncol=nv, free=FALSE, values=oc, name="Oc" ) var1 <- mxConstraint( expression=diag2vec(Oc%&%V)==Unv1, name="Var1" ) # Create Algebra for Standardization matI <- mxMatrix( type="Iden", nrow=nv, ncol=nv, name="I") invSD <- mxAlgebra( expression=solve(sqrt(I*V)), name="iSD") stA = mxAlgebra(A/V, name='stA') stD = mxAlgebra(D/V, name='stD') stE = mxAlgebra(E/V, name='stE') # Calculate genetic and environmental correlations corA <- mxAlgebra( expression=solve(sqrt(I*A))%&%A, name ="rA" ) #cov2cor() corD <- mxAlgebra( expression=solve(sqrt(I*D))%&%D, name ="rD" ) corE <- mxAlgebra( expression=solve(sqrt(I*E))%&%E, name ="rE" ) corP <- mxAlgebra( expression=solve(sqrt(I*V))%&%V, name ="rP" ) # Algebras generated to hold Parameter Estimates and Derived Variance Components rowVars <- rep('vars',nv) colVars <- rep(c('A','D','E','stA','stD','stE', 'rA','rD','rE','rP'),each=nv) estVars <- mxAlgebra( expression=cbind(A,D,E,stA,stD,stE,rA,rD,rE,rP), name="Vars", dimnames=list(rowVars,colVars)) # Data objects for Multiple Groups dataMZ <- mxData( observed=mzDataBinF, type="raw" ) dataDZ <- mxData( observed=dzDataBinF, type="raw" ) # Objective objects for Multiple Groups expMZ <- mxExpectationNormal( covariance="expCovMZ", means="expMean", dimnames=selVars, thresholds="expThreshold", threshnames=ordVars ) expDZ <- mxExpectationNormal( covariance="expCovDZ", means="expMean", dimnames=selVars ,thresholds="expThreshold", threshnames=ordVars ) funML <- mxFitFunctionML() # Combine Groups pars <- list(pathA, pathD, pathE, covA, covD, covE, stA, stD, stE, covP, corA, corD, corE, corP, matI, invSD, matUnv, matOc, estVars) modelMZ <- mxModel( pars, inclusions, covMZ, dataMZ, expMZ, funML, name="MZ" ) modelDZ <- mxModel( pars, inclusions, covDZ, dataDZ, expDZ, funML, name="DZ" ) multi <- mxFitFunctionMultigroup( c("MZ","DZ") ) AdeModel <- mxModel( "ADE", pars, var1, modelMZ, modelDZ, funML, multi) # , ciThre, ciInterc, ciBeta, ciVars, ciStVars, ciCorr # ------------------------------------------------------------------------------ # RUN MODEL # Run ADE model AdeFit <- mxRun(AdeModel)
The latter. As of version 2.13.x, OpenMx reports standard errors in the presence of MxConstraints. Those standard errors are adjusted appropriately to reflect the influence the MxConstraints have on the sampling distribution of the free-parameter estimates (see references in the man page of
mxComputeStandardError()
). If the model converges, the standard errors will generally be valid, unless you're running OpenMx with only one thread (which is always the case under Windows), due to a known issue.Yes, confidence intervals work the way they always have. I would recommend basing inference off of
mxCI()
rather than the standard errors, since profile-likelihood intervals have superior theoretical properties compared to Wald-style intervals. Plus, the SEs are calculated from the matrix of second partial fitfunction derivatives, and numerical derivatives of the fitfunction aren't always well-behaved when you're analyzing ordinal variables (I suspect that's why some of the SEs are missing).Great! Thanks a lot for your reply! Then I will proceed with mxCI.