Skip to content

Commit

Permalink
3.3-6
Browse files Browse the repository at this point in the history
  • Loading branch information
alexanderrobitzsch committed Feb 27, 2019
1 parent 76a5334 commit 30f928b
Show file tree
Hide file tree
Showing 21 changed files with 227 additions and 218 deletions.
4 changes: 2 additions & 2 deletions DESCRIPTION
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
Package: sirt
Type: Package
Title: Supplementary Item Response Theory Models
Version: 3.3-1
Date: 2019-02-08 14:12:25
Version: 3.3-6
Date: 2019-02-27 22:42:09
Author: Alexander Robitzsch [aut,cre] (<https://orcid.org/0000-0002-8226-3132>)
Maintainer: Alexander Robitzsch <[email protected]>
Description:
Expand Down
10 changes: 0 additions & 10 deletions R/MAR_normalized.R

This file was deleted.

2 changes: 1 addition & 1 deletion R/RcppExports.R
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
## File Name: RcppExports.R
## File Version: 3.003001
## File Version: 3.003006
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393

Expand Down
39 changes: 20 additions & 19 deletions R/Rhat_sirt.R
Original file line number Diff line number Diff line change
@@ -1,19 +1,20 @@
## File Name: Rhat_sirt.R
## File Version: 1.08
## File Version: 1.09


####################################
# auxiliary functions for Rhat statistic
############################################################################
# Code from rube package
# Source: http://www.stat.cmu.edu/~hseltman/rube/rube0.2-16/R/Rhat.R
# Inference from Iterative Simulation Using Multiple Sequences
# Author(s): Andrew Gelman and Donald B. Rubin
# Source: Statistical Science, Vol. 7, No. 4 (Nov., 1992), pp. 457-472
# Stable URL: http://www.jstor.org/stable/2246093
## Matches gelman.diag() from package "coda", but not WinBUGS() "summary" component.
## Better than gelman.diag() because multivariate stat is not bothered to be calculated
Rhat1 <- function(mat) {
############################################################################
# Code from rube package
# Source: http://www.stat.cmu.edu/~hseltman/rube/rube0.2-16/R/Rhat.R
# Inference from Iterative Simulation Using Multiple Sequences
# Author(s): Andrew Gelman and Donald B. Rubin
# Source: Statistical Science, Vol. 7, No. 4 (Nov., 1992), pp. 457-472
# Stable URL: http://www.jstor.org/stable/2246093
## Matches gelman.diag() from package "coda", but not WinBUGS() "summary" component.
## Better than gelman.diag() because multivariate stat is not bothered to be calculated
Rhat1 <- function(mat)
{
m <- ncol(mat)
n <- nrow(mat)
b <- apply(mat,2,mean)
Expand All @@ -32,11 +33,11 @@ Rhat1 <- function(mat) {
}



Rhat <- function(arr) {
dm <- dim(arr)
if (length(dm)==2) return(Rhat1(arr))
if (dm[2]==1) return(NULL)
if (dm[3]==1) return(Rhat1(arr[,,1]))
return(apply(arr,3,Rhat1))
}
Rhat <- function(arr)
{
dm <- dim(arr)
if (length(dm)==2) return(Rhat1(arr))
if (dm[2]==1) return(NULL)
if (dm[3]==1) return(Rhat1(arr[,,1]))
return(apply(arr,3,Rhat1))
}
14 changes: 6 additions & 8 deletions R/data.wide2long.R
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
## File Name: data.wide2long.R
## File Version: 0.24
## File Version: 0.25

#####################################################################
# converts a data frame in wide format into long format

#--- converts a data frame in wide format into long format
data.wide2long <- function( dat, id=NULL, X=NULL, Q=NULL)
{
items <- setdiff( colnames(dat), id )
Expand All @@ -12,14 +12,12 @@ data.wide2long <- function( dat, id=NULL, X=NULL, Q=NULL)
dat2 <- data.frame( dat1 )
colnames(dat2) <- "resp"
dat1 <- data.frame( "id_index"=rep( 1:N, each=I ) )

if ( ! is.null(id) ){
dat1 <- cbind( dat1, rep( dat[, id], each=I ) )
colnames(dat1)[2] <- id
}
dat1 <- cbind( dat1, "item"=rep( items, N ),
"item_index"=rep(1:I, N ),
"resp"=dat2$resp )
dat1 <- cbind( dat1, item=rep( items, N ), item_index=rep(1:I, N),
resp=dat2$resp )
if ( ! is.null(X) ){
dat1 <- cbind( dat1, X[ rep(1:N, each=I ), ] )
}
Expand All @@ -38,4 +36,4 @@ data.wide2long <- function( dat, id=NULL, X=NULL, Q=NULL)
dat1 <- data.frame( "rowindex"=1:(N*I), dat1 )
return(dat1)
}
#####################################################################

11 changes: 8 additions & 3 deletions R/invariance_alignment_cfa_config.R
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
## File Name: invariance_alignment_cfa_config.R
## File Version: 0.12
## File Version: 0.16


invariance_alignment_cfa_config <- function(dat, group, ...)
invariance_alignment_cfa_config <- function(dat, group, weights=NULL, ...)
{
groups <- unique(group)
G <- length(groups)
Expand All @@ -15,12 +15,17 @@ invariance_alignment_cfa_config <- function(dat, group, ...)
colnames(nu) <- items
lambda <- nu
err_var <- nu
weights_gg <- NULL
for (gg in 1:G){
dat_gg <- dat[ group==groups[gg], ]
dat_gg <- dat_gg[, colMeans(is.na(dat_gg)) < 1 ]
items_gg <- colnames(dat_gg)
ind_gg <- match(items_gg, items)
res <- invariance_alignment_cfa_config_estimate(dat_gg=dat_gg, ...)
if (!is.null(weights)){
weights_gg <- weights[ind_gg]
}
res <- invariance_alignment_cfa_config_estimate(dat_gg=dat_gg,
weights=weights_gg, ...)
nu[gg, ind_gg] <- res$nu
lambda[gg, ind_gg] <- res$lambda
err_var[gg, ind_gg] <- res$err_var
Expand Down
12 changes: 9 additions & 3 deletions R/invariance_alignment_cfa_config_estimate.R
Original file line number Diff line number Diff line change
@@ -1,13 +1,19 @@
## File Name: invariance_alignment_cfa_config_estimate.R
## File Version: 0.04
## File Version: 0.08

invariance_alignment_cfa_config_estimate <- function(dat_gg, ...)
invariance_alignment_cfa_config_estimate <- function(dat_gg, weights_gg=NULL, ...)
{
I_gg <- ncol(dat_gg)
items_gg <- colnames(dat_gg)
lavmodel <- paste0("F=~", paste0(items_gg, collapse="+") )
if (is.null(weights_gg)){
weights_name <- NULL
} else {
dat_gg$weights <- weights_gg
weights_name <- "weights"
}
mod <- lavaan::cfa(data=dat_gg, model=lavmodel, std.lv=TRUE,
meanstructure=TRUE, ...)
meanstructure=TRUE, sampling.weights=weights_name, ...)
partable <- lavaan::parameterTable(object=mod)
lambda <- partable[ partable$op=="=~", "est"]
nu <- partable[ partable$op=="~1", "est"][1:I_gg]
Expand Down
23 changes: 8 additions & 15 deletions R/invgamma2.R
Original file line number Diff line number Diff line change
@@ -1,39 +1,32 @@
## File Name: invgamma2.R
## File Version: 0.18
## File Version: 0.19


###################################################################
# inverse gamma distribution for variance
rinvgamma2 <- function( n, n0, var0 ){
# INPUT:
# N ... number of random draws
# n0 ... sample size prior
# var0 ... prior variance
# res <- 1/ stats::rgamma( N, n0 / 2, n0 * var0 / 2 )

#---- inverse gamma distribution for variance
rinvgamma2 <- function( n, n0, var0 )
{
res <- sirt_rinvgamma( n, shape=n0 / 2, scale=n0 * var0 / 2 )
return(res)
}
#####################################################################
dinvgamma2 <- function( x, n0, var0 ){
res <- sirt_dinvgamma( x, shape=n0 / 2, scale=n0 * var0 / 2 )
return(res)
}

#-----------------------------------------------------------------------
# copied from MCMCpack package
#--- copied from MCMCpack package
sirt_dinvgamma <- function (x, shape, scale=1)
{
if (shape <=0 | scale <=0) {
stop("Shape or scale parameter negative in dinvgamma().\n")
}
alpha <- shape
beta <- scale
log.density <- alpha * log(beta) - lgamma(alpha) - (alpha +
1) * log(x) - (beta/x)
log.density <- alpha * log(beta) - lgamma(alpha) - (alpha + 1) * log(x) - (beta/x)
return(exp(log.density))
}

sirt_rinvgamma <- function (n, shape, scale=1)
{
return(1/rgamma(n=n, shape=shape, rate=scale))
return(1/stats::rgamma(n=n, shape=shape, rate=scale))
}
4 changes: 2 additions & 2 deletions R/linking_haberman_als_residual_weights.R
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
## File Name: linking_haberman_als_residual_weights.R
## File Version: 0.370
## File Version: 0.371


linking_haberman_als_residual_weights <- function( logaj, logaAt,
Expand Down Expand Up @@ -28,7 +28,7 @@ linking_haberman_als_residual_weights <- function( logaj, logaAt,
#-- estimation BSQ or HUB
if (estimation %in% c("BSQ","HUB") ){
if (cutoff==Inf ){
k <- k_fac*MAR_normalized(x=loga_resid)
k <- k_fac*mad_normalized(x=loga_resid)
k_estimate <- TRUE
cutoff <- k + eps_k
}
Expand Down
10 changes: 10 additions & 0 deletions R/mad_normalized.R
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
## File Name: mad_normalized.R
## File Version: 0.03

mad_normalized <- function(x)
{
x <- stats::na.omit(x)
res <- stats::median( abs( x - stats::median(x) ) )
res <- res/0.6745
return(res)
}
17 changes: 8 additions & 9 deletions R/read.fwf2.R
Original file line number Diff line number Diff line change
@@ -1,11 +1,10 @@
## File Name: read.fwf2.R
## File Version: 1.08
## File Version: 1.11



############################################################################
# This function reads fwf files #
read.fwf2 <- function( file, format, variables=NULL){
# This function reads fwf files
read.fwf2 <- function( file, format, variables=NULL)
{
ff <- readLines( file )
ind.ff1 <- c( 1, cumsum(format)[- length(format) ] + 1 )
ind.ff2 <- cumsum(format)
Expand All @@ -14,10 +13,10 @@ read.fwf2 <- function( file, format, variables=NULL){
dfr <- data.frame( matrix(0, nrow=n, ncol=I ) )
for (ii in 1:I){
dfr[,ii ] <- as.numeric( substring( ff, ind.ff1[ii], ind.ff2[ii] ) )
}
}
if (!is.null(variables)){
colnames(dfr) <- variables
}
return(dfr)
}
############################################################################
return(dfr)
}

10 changes: 10 additions & 0 deletions R/sirt_trim_increment.R
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
## File Name: sirt_trim_increment.R
## File Version: 0.02

sirt_trim_increment <- function(increment, max_increment)
{
eps <- 1E-10
ci <- ceiling( abs(increment) / ( max_increment + eps ) )
change <- ifelse( abs(increment) > max_increment, increment/(2*ci), increment )
return(change)
}
59 changes: 15 additions & 44 deletions R/truescore.irt.R
Original file line number Diff line number Diff line change
@@ -1,11 +1,11 @@
## File Name: truescore.irt.R
## File Version: 0.22
## File Version: 0.24

###############################################################
truescore.irt <- function( A, B, c=NULL, d=NULL,
theta=seq(-3,3,len=21), error=NULL, pid=NULL,
h=.001 ){
#**********

#--- true score item response function
truescore.irt <- function( A, B, c=NULL, d=NULL, theta=seq(-3,3,len=21),
error=NULL, pid=NULL, h=.001 )
{
if ( is.vector(B) ){ B <- matrix( B, ncol=1 ) }
if ( is.vector(A) ){ A <- matrix( A, ncol=1 ) }
nB <- ncol(B)
Expand All @@ -15,18 +15,18 @@ truescore.irt <- function( A, B, c=NULL, d=NULL,
if ( is.null(d) ){ d <- rep(1,I ) }
if ( is.null(pid) ){ pid <- 1:(length(theta)) }
# true score
truescore <- .ts.irf( A, B, c, d, theta )
truescore <- truescore_irt_irf( A=A, B=B, c=c, d=d, theta=theta )
# calculate standard error of true score
if ( is.null( error ) ){
truescore.error <- NULL } else {
truescore1 <- .ts.irf( A, B, c, d, theta + h )
truescore1 <- truescore_irt_irf( A=A, B=B, c=c, d=d, theta=theta + h )
truescore.error <- sqrt( ( ( truescore1 - truescore ) / h )^2 ) * error
}
}
percscore <- truescore / sum( maxK )
percscore.error <- NULL
if ( ! is.null( error ) ){
percscore.error <- truescore.error / sum( maxK )
}
}
# optimization function values
ind <- intersect( which( !is.na( theta ) ), which( !is.na(percscore) ) )
x0 <- theta[ind]
Expand All @@ -39,44 +39,15 @@ truescore.irt <- function( A, B, c=NULL, d=NULL,
# define fit function
sum( ( y0 - minf - (maxf - minf) * stats::plogis( a*x0 + b ) )^2 )
}
# m1 <- glm( y0 ~ x0, family="binomial", control=list(maxit=4) )
# m1c <- coef(m1)
# h1 <- optim( c(m1c[2], -m1c[1] ), critfct)
h1 <- stats::optim( c( .5, 0 ), critfct )
# fitval <- c( h1$par[1], h1$par[2] )
# names(fitval) <- c("a", "b" )
#*****
# OUTPUT
res <- data.frame( "pid"=pid,"truescore"=truescore )

#--- OUTPUT
res <- data.frame( "pid"=pid, "truescore"=truescore )
res$truescore.error <- truescore.error
res2 <- data.frame( "percscore"=percscore )
res2$percscore.error <- percscore.error
res3 <- data.frame( "lower"=minf, "upper"=maxf, "a"=h1$par[1], "b"=h1$par[2] )
res <- cbind( res, res2, res3 )
return(res)
}
####################################################
.ts.irf <- function( A, B, c, d, theta ){
TP <- length(theta)
truescore <- rep(0, length(theta))
# extract maximum item categories from B parameters
nB <- ncol(B)
maxK <- nB - rowSums( is.na( B ))
I <- nrow(B)
scoreM <- matrix( 1:max(maxK), nrow=TP, ncol=max(maxK), byrow=TRUE )
for (ii in 1:I){
# ii <- 21
prob.ii <- matrix( NA, TP, maxK[ii] )
for (kk in seq(1, maxK[ii] ) ){
# kk <- 1
prob.ii[,kk] <- exp( theta * A[ii,kk] + B[ii,kk] )
}
prob.ii <- prob.ii / ( rowSums( prob.ii ) + 1 )
if ( ( maxK[ii]==1 ) & ( abs(c[ii])+abs(1-d[ii]) > 0 ) ){
prob.ii <- c[ii] + ( d[ii] - c[ii] ) * prob.ii
}
truescore <- truescore + rowSums( prob.ii * scoreM[, seq( 1, maxK[ii] ), drop=FALSE] )
}
return(truescore)
}
############################################################
}

Loading

0 comments on commit 30f928b

Please sign in to comment.