[med-svn] [r-bioc-multtest] 08/10: New upstream version 2.32.0

Andreas Tille tille at debian.org
Wed Nov 8 09:14:31 UTC 2017


This is an automated email from the git hooks/post-receive script.

tille pushed a commit to branch master
in repository r-bioc-multtest.

commit 1737406a3645677dd1daa8ebfbf82534c6bc7b15
Author: Andreas Tille <tille at debian.org>
Date:   Wed Nov 8 10:12:53 2017 +0100

    New upstream version 2.32.0
---
 DESCRIPTION                 |  33 ++
 NAMESPACE                   |  40 ++
 R/EBMTP.R                   | 550 ++++++++++++++++++++++++++
 R/EBzzz.R                   | 627 +++++++++++++++++++++++++++++
 R/ICQTNullDist.R            | 321 +++++++++++++++
 R/mt.basic.R                | 418 ++++++++++++++++++++
 R/mt.func.R                 | 375 ++++++++++++++++++
 R/nulldistn_c.R             | 198 ++++++++++
 R/statistics.R              | 635 ++++++++++++++++++++++++++++++
 R/test.R                    | 712 +++++++++++++++++++++++++++++++++
 R/zzz.R                     | 692 ++++++++++++++++++++++++++++++++
 data/golub.RData            | Bin 0 -> 304276 bytes
 debian/README.source        |   8 -
 debian/changelog            |  27 --
 debian/compat               |   1 -
 debian/control              |  41 --
 debian/copyright            |  27 --
 debian/rules                |  16 -
 debian/source/format        |   1 -
 debian/upstream/metadata    |   8 -
 debian/watch                |   3 -
 inst/CITATION               |  14 +
 inst/otherDocs/MTP.Rnw      | 908 ++++++++++++++++++++++++++++++++++++++++++
 inst/otherDocs/MTPALL.Rnw   | 493 +++++++++++++++++++++++
 inst/otherDocs/multtest.Rnw | 394 +++++++++++++++++++
 man/EBMTP-class.Rd          | 150 +++++++
 man/EBMTP.Rd                | 113 ++++++
 man/Hsets.Rd                | 103 +++++
 man/MTP-class.Rd            | 133 +++++++
 man/MTP-methods.Rd          | 114 ++++++
 man/MTP.Rd                  | 219 +++++++++++
 man/boot.null.Rd            | 144 +++++++
 man/corr.null.Rd            |  90 +++++
 man/fwer2gfwer.Rd           |  77 ++++
 man/get.index.Rd            |  45 +++
 man/golub.Rd                |  32 ++
 man/meanX.Rd                | 114 ++++++
 man/mt.internal.Rd          |  60 +++
 man/mt.maxT.Rd              | 142 +++++++
 man/mt.plot.Rd              |  89 +++++
 man/mt.rawp2adjp.Rd         | 155 ++++++++
 man/mt.reject.Rd            |  48 +++
 man/mt.sample.teststat.Rd   | 100 +++++
 man/mt.teststat.Rd          |  93 +++++
 man/ss.maxT.Rd              |  85 ++++
 man/wapply.Rd               |  50 +++
 src/Makevars                |   2 +
 src/Makevars.win            |   2 +
 src/Rpack.c                 | 316 +++++++++++++++
 src/VScount.c               |  40 ++
 src/block_sampling_fixed.c  |  98 +++++
 src/bootloop.c              |  55 +++
 src/mt.c                    | 580 +++++++++++++++++++++++++++
 src/mt.h                    | 201 ++++++++++
 src/pairt_sampling.c        | 171 ++++++++
 src/pairt_sampling_fixed.c  |  63 +++
 src/random.c                | 144 +++++++
 src/sampling.c              | 225 +++++++++++
 src/sampling_fixed.c        |  83 ++++
 src/stat_func.c             | 857 ++++++++++++++++++++++++++++++++++++++++
 src/stat_order.c            | 142 +++++++
 vignettes/MTP.pdf           | Bin 0 -> 271306 bytes
 vignettes/MTP.tex           | 933 ++++++++++++++++++++++++++++++++++++++++++++
 vignettes/MTPALL.pdf        | Bin 0 -> 280159 bytes
 vignettes/golub.R           |  48 +++
 vignettes/multtest.bib      | 293 ++++++++++++++
 vignettes/multtest.pdf      | Bin 0 -> 1699943 bytes
 67 files changed, 12819 insertions(+), 132 deletions(-)

diff --git a/DESCRIPTION b/DESCRIPTION
new file mode 100755
index 0000000..72ba442
--- /dev/null
+++ b/DESCRIPTION
@@ -0,0 +1,33 @@
+Package: multtest
+Title: Resampling-based multiple hypothesis testing
+Version: 2.32.0
+Author: Katherine S. Pollard, Houston N. Gilbert, Yongchao Ge, Sandra
+        Taylor, Sandrine Dudoit
+Description: Non-parametric bootstrap and permutation resampling-based
+        multiple testing procedures (including empirical Bayes methods)
+        for controlling the family-wise error rate (FWER), generalized
+        family-wise error rate (gFWER), tail probability of the
+        proportion of false positives (TPPFP), and false discovery rate
+        (FDR).  Several choices of bootstrap-based null distribution
+        are implemented (centered, centered and scaled,
+        quantile-transformed). Single-step and step-wise methods are
+        available. Tests based on a variety of t- and F-statistics
+        (including t-statistics based on regression parameters from
+        linear and survival models as well as those based on
+        correlation parameters) are included.  When probing hypotheses
+        with t-statistics, users may also select a potentially faster
+        null distribution which is multivariate normal with mean zero
+        and variance covariance matrix derived from the vector
+        influence function.  Results are reported in terms of adjusted
+        p-values, confidence regions and test statistic cutoffs. The
+        procedures are directly applicable to identifying
+        differentially expressed genes in DNA microarray experiments.
+Maintainer: Katherine S. Pollard <katherine.pollard at gladstone.ucsf.edu>
+Depends: R (>= 2.10), methods, BiocGenerics, Biobase
+Imports: survival, MASS, stats4
+Suggests: snow
+License: LGPL
+LazyLoad: yes
+biocViews: Microarray, DifferentialExpression, MultipleComparison
+NeedsCompilation: yes
+Packaged: 2017-04-24 22:24:45 UTC; biocbuild
diff --git a/NAMESPACE b/NAMESPACE
new file mode 100644
index 0000000..f71fe05
--- /dev/null
+++ b/NAMESPACE
@@ -0,0 +1,40 @@
+useDynLib(multtest)
+useDynLib(multtest, VScount=VScount)
+
+import(methods, BiocGenerics, Biobase)
+
+importFrom(graphics, plot)
+importFrom(stats, update)
+
+importFrom(survival, is.Surv)
+importFrom(survival, coxph.control)
+importFrom(survival, coxph.fit)
+
+importFrom(MASS, mvrnorm)
+importFrom(MASS, rlm)
+
+importMethodsFrom(stats4, plot, summary)
+#snow does not have a NAMESPACE
+#importFrom(snow, clusterApply)
+#importFrom(snow, LBclusterApply)
+#importFrom(snow, clusterEvalQ)
+#importFrom(snow, makeCluster)
+#importFrom(snow, stopCluster)
+
+exportClasses(MTP, EBMTP)
+
+exportMethods(as.list, plot, summary, update, EBupdate, "[")
+
+export(MTP, EBMTP, as.list, mt.maxT, mt.minP, mt.plot, mt.rawp2adjp,
+  mt.reject, mt.sample.label, mt.sample.rawp, mt.sample.teststat,
+  mt.teststat, mt.teststat.num.denum, meanX, diffmeanX, FX, blockFX, 
+  twowayFX, lmX, lmY, coxY, get.Tn, boot.null, boot.resample, 
+  center.only, center.scale, quant.trans, fwer2gfwer, fwer2tppfp, 
+  fwer2fdr, get.index, ss.maxT, ss.minP, sd.maxT, sd.minP, wapply, 
+  corr.Tn, corr.null, IC.Cor.NA, IC.CorXW.NA, insert.NA, diffs.1.N, 
+  marg.samp, tQuantTrans, G.VS, ABH.h0, dens.est, Hsets, VScount,
+  mtp2ebmtp, ebmtp2mtp)
+  
+S3method(print, MTP)
+S3method(print, EBMTP)
+
diff --git a/R/EBMTP.R b/R/EBMTP.R
new file mode 100644
index 0000000..5b9abd1
--- /dev/null
+++ b/R/EBMTP.R
@@ -0,0 +1,550 @@
+#main user-level function for empirical Bayes multiple hypothesis testing
+
+EBMTP<-function(X,W=NULL,Y=NULL,Z=NULL,Z.incl=NULL,Z.test=NULL,na.rm=TRUE,test="t.twosamp.unequalvar",robust=FALSE,standardize=TRUE,alternative="two.sided",typeone="fwer",method="common.cutoff",k=0,q=0.1,alpha=0.05,smooth.null=FALSE,nulldist="boot.cs",B=1000,psi0=0,marg.null=NULL,marg.par=NULL,ncp=NULL,perm.mat=NULL,ic.quant.trans=FALSE,MVN.method="mvrnorm",penalty=1e-6,prior="conservative",bw="nrd",kernel="gaussian",seed=NULL,cluster=1,type=NULL,dispatch=NULL,keep.nulldist=TRUE,keep.raw [...]
+  ##sanity checks / formatting
+  #X
+  if(missing(X)) stop("Argument X is missing")
+  if(inherits(X,"eSet")){ 
+    if(is.character(Y)) Y<-pData(X)[,Y]
+    if(is.character(Z)){
+      if(Z%in%Y){
+        Z<-Z[!(Z%in%Y)]
+	warning(paste("Outcome Y=",Y,"should not be included in the covariates Z=",Z,". Removing Y from Z",sep=""))
+	}
+      Z<-pData(X)[,Z]
+    }
+    X<-exprs(X)
+  }
+  X<-as.matrix(X)
+  dx<-dim(X)
+  if(length(dx)==0) stop("dim(X) must have positive length")
+  p<-dx[1]
+  n<-dx[2]
+  #W
+  if(!is.null(W)){
+    W[W<=0]<-NA
+    if(is.vector(W) & length(W)==n) W <- matrix(rep(W,p),nrow=p,ncol=n,byrow=TRUE)
+    if(is.vector(W) & length(W)==p) W <- matrix(rep(W,n),nrow=p,ncol=n)
+    if(test%in%c("f","f.block","f.twoway","t.cor","z.cor")){
+      warning("Weights can not be used with F-tests or tests of correlation parameters, arg W is being ignored.")
+      W<-NULL
+    }
+  }
+  #Y
+  if(!is.null(Y)){
+    if(is.Surv(Y)){
+      if(test!="coxph.YvsXZ") stop(paste("Test ",test," does not work with a survival object Y",sep=""))
+    }
+    else{
+      Y<-as.matrix(Y)
+      if(ncol(Y)!=1) stop("Argument Y must be a vector")
+    }
+    if(nrow(Y)!=n) stop("Outcome Y has length ",nrow(Y),", not equal to n=",n)
+  }
+  #Z
+  if(!is.null(Z)){
+    Z<-as.matrix(Z)
+    if(nrow(Z)!=n) stop("Covariates in Z have length ",nrow(Z),", not equal to n=",n,"\n")
+    #Z.incl tells which columns of Z to include in model
+    if(is.null(Z.incl)) Z.incl<-(1:ncol(Z))
+    if(length(Z.incl)>ncol(Z)) stop("Number of columns in Z.incl ",length(Z.incl)," exceeds ncol(Z)=",ncol(Z))
+    if(is.logical(Z.incl)) Z.incl<-(1:ncol(Z))[Z.incl]
+    if(is.character(Z.incl) & length(Z.incl)!=sum(Z.incl%in%colnames(Z))) stop(paste("Z.incl=",Z.incl," names columns not in Z",sep=""))
+    Za<-Z[,Z.incl]
+    #Z.test tells which column of Z to test for an association
+    if(test=="lm.XvsZ"){
+      if(is.null(Z.test)){
+        warning(paste("Z.test not specified, testing for association with variable in first column of Z:",colnames(Z)[1],sep=""))
+	Z.test<-1
+      }
+      if(is.logical(Z.test)) Z.test<-(1:ncol(Z))[Z.test]
+      if(is.character(Z.test) & !(Z.test%in%colnames(Z))) stop(paste("Z.test=",Z.test," names a column not in Z",sep=""))
+      if(is.numeric(Z.test) & !(Z.test%in%(1:ncol(Z)))) stop("Value of Z.test must be >0 and <",ncol(Z))
+      if(Z.test%in%Z.incl){
+        Z.incl<-Z.incl[!(Z.incl%in%Z.test)]
+	Za<-Z[,Z.incl]
+      }
+      Za<-cbind(Z[,Z.test],Za)
+    }
+    Z<-Za
+    rm(Za)
+  }
+  #test
+  TESTS<-c("t.onesamp","t.twosamp.equalvar","t.twosamp.unequalvar","t.pair","f","f.block","f.twoway","lm.XvsZ","lm.YvsXZ","coxph.YvsXZ","t.cor","z.cor")
+  test<-TESTS[pmatch(test,TESTS)]
+  if(is.na(test)) stop(paste("Invalid test, try one of ",TESTS,sep=""))
+
+  #robust + see below with choice of nulldist
+  if(test=="coxph.YvsXZ" & robust==TRUE)
+    warning("No robust version of coxph.YvsXZ, proceeding with usual version")
+  #temp until fix
+  if((test=="t.onesamp" | test=="t.pair") & robust==TRUE)
+    stop("Robust test statistics currently not available for one-sample or two-sample paired test statistics.")
+
+  #alternative
+  ALTS<-c("two.sided","less","greater")
+  alternative<-ALTS[pmatch(alternative,ALTS)]
+  if(is.na(alternative)) stop(paste("Invalid alternative, try one of ",ALTS,sep=""))
+
+  #null values
+  if(length(psi0)>1) stop(paste("In current implementation, all hypotheses must have the same null value. Number of null values: ",length(psi0),">1",sep=""))
+  ERROR<-c("fwer","gfwer","tppfp","fdr")
+  typeone<-ERROR[pmatch(typeone,ERROR)]
+  if(is.na(typeone)) stop(paste("Invalid typeone, try one of ",ERROR,sep=""))
+  if(any(alpha<0) | any(alpha>1)) stop("Nominal level alpha must be between 0 and 1.")
+  nalpha<-length(alpha)
+  reject<-
+    if(nalpha) array(dim=c(p,nalpha),dimnames=list(rownames(X),paste("alpha=",alpha,sep="")))
+    if(test=="z.cor" | test=="t.cor") matrix(nrow=0,ncol=0) # deprecated for correlations, rownames now represent p choose 2 edges - too weird and clunky in current state for output.
+    else matrix(nrow=0,ncol=0)
+
+  if(typeone=="fwer"){
+    if(length(k)>1) k<-k[1]
+    if(sum(k)!=0) stop("FWER control, by definition, requires k=0.  To control k false positives, please select typeone='gfwer'.")
+  }
+
+  if(typeone=="gfwer"){
+    if(length(k)>1){
+      k<-k[1]
+      warning("Can only compute gfwer adjp for one value of k at a time (using first value). Use EBupdate() to get results for additional values of k.")
+    }
+    if(k==0) warning("gfwer(0) is the same as fwer.")
+    if(k<0) stop("Number of false positives can not be negative.")
+    if(k>=p) stop(paste("Number of false positives must be less than number of tests=",p,sep=""))
+  }
+
+  if(typeone=="tppfp"){
+    if(length(q)>1){
+      q<-q[1]
+      warning("Can only compute tppfp adjp for one value of q at a time (using first value). Use EBupdate() to get results for additional values of q.")
+    }
+    if(q<0) stop("Proportion of false positives, q, can not be negative.")
+    if(q>1) stop("Proportion of false positives, q, must be less than 1.")
+  }
+
+  #null distribution
+  NULLS<-c("boot","boot.cs","boot.ctr","boot.qt","ic","perm")
+  nulldist<-NULLS[pmatch(nulldist,NULLS)]
+  if(is.na(nulldist)) stop(paste("Invalid nulldist, try one of ",NULLS,sep=""))
+  if(nulldist=="perm") stop("EBMTP currently only available with bootstrap-based and influence curve null distribution methods.  One can, however, supply an externally created perm.mat for boot.qt marginal null distributions.")
+  if(nulldist=="boot"){
+    nulldist <- "boot.cs"
+    warning("nulldist='boot' is deprecated and now corresponds to 'boot.cs'. Proceeding with default center and scaled null distribution.")
+  }
+  if(nulldist!="perm" & test=="f.block") stop("f.block test only available with permutation null distribution. Try test=f.twoway")
+  if(nulldist=="ic" & keep.rawdist==TRUE) stop("Test statistics distribution estimation using keep.rawdist=TRUE is only available with a bootstrap-based null distribution")
+  if(nulldist=="boot.qt" & robust==TRUE) stop("Quantile transform method requires parametric marginal nulldist.  Set robust=FALSE")
+  if(nulldist=="boot.qt" & standardize==FALSE) stop("Quantile transform method requires standardized test statistics.  Set standardize=TRUE")
+  if(nulldist=="ic" & robust==TRUE) stop("Influence curve null distributions available only for (parametric) t-statistics.  Set robust=FALSE")
+  if(nulldist=="ic" & standardize==FALSE) stop("Influence curve null distributions available only for (standardized) t-statistics.  Set standardize=TRUE")
+  if(nulldist=="ic" & (test=="f" | test=="f.twoway" | test=="f.block" | test=="coxph.YvsXZ")) stop("Influence curve null distributions available only for tests of mean, regression and correlation parameters. Cox PH also not yet implemented.")
+  if(nulldist!="ic" & (test=="t.cor" | test=="z.cor")) stop("Tests of correlation parameters currently only implemented for influence curve null distributions")
+  if((test!="t.cor" & test!="z.cor") & keep.index) warning("Matrix of indices only returned for tests of correlation parameters")
+
+  ### specifically for sampling null test statistics with IC nulldist
+  MVNS <- c("mvrnorm","Cholesky")
+  MVN.method <- MVNS[pmatch(MVN.method,MVNS)]
+  if(is.na(MVN.method)) stop("Invalid sampling method for IC-based MVN null test statistics.  Try either 'mvrnorm' or 'Cholesky'")
+
+  #methods
+  METHODS<-c("common.cutoff","common.quantile")
+  method<-METHODS[pmatch(method,METHODS)]
+  if(is.na(method)) stop(paste("Invalid method, try one of ",METHODS,sep=""))
+  if(method=="common.quantile") stop("Common quantile procedure not currently implemented.  Common cutoff is pretty good, though.")
+
+  #prior
+  PRIORS<-c("conservative","ABH","EBLQV")
+  prior<-PRIORS[pmatch(prior,PRIORS)]
+  if(is.na(prior)) stop(paste("Invalid prior, try one of ",PRIORS,sep=""))
+
+  #estimate 
+  ftest<-FALSE
+  if(test=="f" | test=="f.block"){
+    ftest<-TRUE
+    if(!is.null(W)) warning("Weighted F tests not yet implemented, proceding with unweighted version")
+  }
+
+    ##making a closure for the particular test
+    theta0<-0
+    tau0<-1
+    stat.closure<-switch(test,
+                         t.onesamp=meanX(psi0,na.rm,standardize,alternative,robust),
+                         t.twosamp.equalvar=diffmeanX(Y,psi0,var.equal=TRUE,na.rm,standardize,alternative,robust),
+                         t.twosamp.unequalvar=diffmeanX(Y,psi0,var.equal=FALSE,na.rm,standardize,alternative,robust),
+                         t.pair={
+                           uY<-sort(unique(Y))
+                           if(length(uY)!=2) stop("Must have two class labels for this test")
+                           if(trunc(n/2)!=n/2) stop("Must have an even number of samples for this test")
+                           X<-X[,Y==uY[2]]-X[,Y==uY[1]]
+                           meanX(psi0,na.rm,standardize,alternative,robust)
+                         },
+                         f={
+                           theta0<-1
+                           tau0<-2/(length(unique(Y))-1)
+                           FX(Y,na.rm,robust)
+                         },
+                         f.twoway={
+                           theta0<-1
+                           tau0 <- 2/((length(unique(Y))*length(gregexpr('12', paste(Y, collapse=""))[[1]]))-1)
+                           twowayFX(Y,na.rm,robust)
+                         },
+                         lm.XvsZ=lmX(Z,n,psi0,na.rm,standardize,alternative,robust),
+                         lm.YvsXZ=lmY(Y,Z,n,psi0,na.rm,standardize,alternative,robust),
+                         coxph.YvsXZ=coxY(Y,Z,psi0,na.rm,standardize,alternative),
+                         t.cor=NULL,
+                         z.cor=NULL)
+
+    ##computing observed test statistics
+    if(test=="t.cor" | test=="z.cor") obs<-corr.Tn(X,test=test,alternative=alternative,use="pairwise")
+    else obs<-get.Tn(X,stat.closure,W)
+    statistic <- (obs[3,]*obs[1,]/obs[2,]) #observed, with sign
+    Tn <- obs[1,]/obs[2,]  # for sidedness, matching with mulldistn
+
+      #Begin nulldists.  Permutation no longer included.
+  if(nulldist=="boot.qt"){
+  if(!is.null(marg.par)){
+      if(is.matrix(marg.par)) marg.par <- marg.par
+      if(is.vector(marg.par)) marg.par <- matrix(rep(marg.par,p),nrow=p,ncol=length(marg.par),byrow=TRUE)
+        }
+      if(is.null(ncp)) ncp = 0
+      if(!is.null(perm.mat)){ 
+        if(dim(X)[1]!=dim(perm.mat)[1]) stop("perm.mat must same number of rows as X.")
+        }
+    
+      nstats <- c("t.twosamp.unequalvar","z.cor","lm.XvsZ","lm.YvsXZ","coxph.lmYvsXZ")
+      tstats <- c("t.onesamp","t.twosamp.equalvar","t.pair","t.cor")
+      fstats <- c("f","f.block","f.twoway")
+      
+      # If default, set values of marg.null to pass on. 
+      if(is.null(marg.null)){
+	  if(any(nstats == test)) marg.null="normal"
+	  if(any(tstats == test)) marg.null="t"
+	  if(any(fstats == test)) marg.null="f"
+        }
+      else{ # Check to see that user-supplied entries make sense.  
+        MARGS <- c("normal","t","f","perm")
+        marg.null <- MARGS[pmatch(marg.null,MARGS)]
+        if(is.na(marg.null)) stop("Invalid marginal null distribution. Try one of: normal, t, f, or perm")
+        if(any(tstats==test) & marg.null == "f") stop("Choice of test stat and marginal nulldist do not match")
+        if(any(fstats==test) & (marg.null == "normal" | marg.null=="t")) stop("Choice of test stat and marginal nulldist do not match")
+        if(marg.null=="perm" & is.null(perm.mat)) stop("Must supply a matrix of permutation test statistics if marg.null='perm'")
+        if(marg.null=="f" & ncp < 0) stop("Cannot have negative noncentrality parameter with F distribution.")
+      }
+    
+      # If default (=NULL), set values of marg.par. Return as m by 1 or 2 matrix.
+      if(is.null(marg.par)){
+		marg.par <- switch(test,
+                          t.onesamp = n-1,
+                          t.twosamp.equalvar = n-2,
+                          t.twosamp.unequalvar = c(0,1),
+                          t.pair = floor(n/2-1),
+                          f = c(length(is.finite(unique(Y)))-1,dim(X)[2]- length(is.finite(unique(Y))) ),
+                          f.twoway = {
+                            c(length(is.finite(unique(Y)))-1, dim(X)[2]-(length(is.finite(unique(Y)))*length(gregexpr('12', paste(Y, collapse=""))[[1]]))-2)
+                            },
+                          lm.XvsZ = c(0,1),
+                          lm.YvsXZ = c(0,1),
+                          coxph.YvsXZ = c(0,1),
+                          t.cor = n-2,
+                          z.cor = c(0,1)
+                          )
+      marg.par <- matrix(rep(marg.par,dim(X)[1]),nrow=dim(X)[1],ncol=length(marg.par),byrow=TRUE)
+              }
+     else{ # Check that user-supplied values of marg.par make sense (marg.par != NULL)
+       if((marg.null=="t" | marg.null=="f") & any(marg.par[,1]==0)) stop("Cannot have zero df with t or F distributions. Check marg.par settings")
+       if(marg.null=="t" & dim(marg.par)[2]>1) stop("Too many parameters for t distribution.  marg.par should have length 1.")
+       if((marg.null=="f" | marg.null=="normal") & dim(marg.par)[2]!=2) stop("Incorrect number of parameters defining marginal null distribution.  marg.par should have length 2.")
+     }
+}
+  
+    ##or computing influence curves
+    if(nulldist=="ic"){
+      rawdistn <- matrix(nrow=0,ncol=0)
+      nulldistn<-switch(test,
+                        t.onesamp=corr.null(X,W,Y,Z,test="t.onesamp",alternative,use="pairwise",B,MVN.method,penalty,ic.quant.trans,marg.null,marg.par,perm.mat),
+                        t.pair=corr.null(X,W,Y,Z,test="t.pair",alternative,use="pairwise",B,MVN.method,penalty,ic.quant.trans,marg.null,marg.par,perm.mat),
+                        t.twosamp.equalvar=corr.null(X,W,Y,Z,test="t.twosamp.equalvar",alternative,use="pairwise",B,MVN.method,penalty,ic.quant.trans,marg.null,marg.par,perm.mat),
+                        t.twosamp.unequalvar=corr.null(X,W,Y,Z,test="t.twosamp.unequalvar",alternative,use="pairwise",B,MVN.method,penalty,ic.quant.trans,marg.null,marg.par,perm.mat),
+                        lm.XvsZ=corr.null(X,W,Y,Z,test="lm.XvsZ",alternative,use="pairwise",B,MVN.method,penalty,ic.quant.trans,marg.null,marg.par,perm.mat),
+                        lm.YvsXZ=corr.null(X,W,Y,Z,test="lm.YvsXZ",alternative,use="pairwise",B,MVN.method,penalty,ic.quant.trans,marg.null,marg.par,perm.mat),
+                        t.cor=corr.null(X,W,Y,Z,test="t.cor",alternative,use="pairwise",B,MVN.method,penalty,ic.quant.trans,marg.null,marg.par,perm.mat),
+                        z.cor=corr.null(X,W,Y,Z,test="z.cor",alternative,use="pairwise",B,MVN.method,penalty,ic.quant.trans,marg.null,marg.par,perm.mat)
+                        )
+    }
+
+    ## Cluster Checking
+    if ((!is.numeric(cluster))&(!inherits(cluster,c("MPIcluster", "PVMcluster", "SOCKcluster"))))
+       stop("Cluster argument must be integer or cluster object")
+    ## Create cluster if cluster > 1 and load required packages on nodes
+    if(is.numeric(cluster)){
+      if(cluster>1){
+    ## Check installation of packages
+      have_snow <- qRequire("snow")
+      if(!have_snow) stop("The package snow is required to use a cluster. Either snow is not installed or it is not in the standard library location.")
+      if (is.null(type))
+         stop("Must specify type argument to use a cluster. Alternatively, provide a cluster object as the argument to cluster.")
+      if (type=="SOCK")
+         stop("Create desired cluster and specify cluster object as the argument to cluster directly.")
+      if ((type!="PVM")&(type!="MPI"))
+         stop("Type must be MPI or PVM")
+      else if (type=="MPI"){
+         have_rmpi <- qRequire("Rmpi")
+         if(!have_rmpi) stop("The package Rmpi is required for the specified type. Either Rmpi is not installed or it is not in the standard library location.")
+       }
+      else if (type=="PVM"){
+         have_rpvm <- qRequire("rpvm")
+         if(!have_rpvm) stop("The package rpvm is required for the specified type. Either rpvm is not installed or it is not in the standard library location.")
+      }
+      cluster <- makeCluster(cluster, type)
+      clusterEvalQ(cluster, {library(Biobase); library(multtest)})
+      if (is.null(dispatch)) dispatch=0.05
+      }
+    }
+    else if(inherits(cluster,c("MPIcluster", "PVMcluster", "SOCKcluster"))){
+      clusterEvalQ(cluster, {library(Biobase); library(multtest)})
+      if (is.null(dispatch)) dispatch=0.05
+    }
+
+    ##computing the nonparametric bootstrap (null) distribution
+    if(nulldist=="boot.cs" | nulldist=="boot.ctr" | nulldist=="boot.qt"){
+      nulldistn<-boot.null(X,Y,stat.closure,W,B,test,nulldist,theta0,tau0,marg.null,marg.par,ncp,perm.mat,alternative,seed,cluster,dispatch,keep.nulldist,keep.rawdist)
+     if(inherits(cluster,c("MPIcluster", "PVMcluster", "SOCKcluster")))  stopCluster(cluster)
+    rawdistn <- nulldistn$rawboot
+    nulldistn <- nulldistn$muboot
+    }
+
+
+    ##performing multiple testing
+    #rawp values
+    rawp<-apply((obs[1,]/obs[2,])<=nulldistn,1,mean)
+    if(smooth.null & (min(rawp,na.rm=TRUE)==0)){
+      zeros<-(rawp==0)
+      if(sum(zeros)==1){
+        den<-density(nulldistn[zeros,],to=max(obs[1,zeros]/obs[2,zeros],nulldist[zeros,],na.rm=TRUE),na.rm=TRUE)
+	rawp[zeros]<-sum(den$y[den$x>=(obs[1,zeros]/obs[2,zeros])])/sum(den$y)
+      }
+      else{
+        den<-apply(nulldistn[zeros,],1,density,to=max(obs[1,zeros]/obs[2,zeros],nulldistn[zeros,],na.rm=TRUE),na.rm=TRUE)
+	newp<-NULL
+	stats<-obs[1,zeros]/obs[2,zeros]
+	for(i in 1:length(den)){
+          newp[i]<-sum(den[[i]]$y[den[[i]]$x>=stats[i]])/sum(den[[i]]$y)
+	}
+        rawp[zeros]<-newp		
+      }
+      rawp[rawp<0]<-0
+    }
+    #c, cr, adjp - this is where the function gets a lot different from MTP.
+    ### Begin nuts and bolts of EB here.
+
+    ### Set G function of type I error rates
+#MOVED BELOW SO V,S DEFINED
+#    error.closure <- switch(typeone, fwer=G.VS(V,S=NULL,tp=TRUE,bound=0),
+#                                     gfwer=G.VS(V,S=NULL,tp=TRUE,bound=k),
+#                                     tppfp=G.VS(V,S,tp=TRUE,bound=q),
+#                                     fdr=G.VS(V,S,tp=FALSE,bound=NULL)
+#                            )
+
+    ### Generate guessed sets of true null hypotheses
+    ### This function relates null and full densities.
+    ### Sidedness should be accounted for above.
+    H0.sets <- Hsets(Tn, nullmat=nulldistn, bw, kernel, prior, B, rawp=rawp) 
+    EB.h0M <- H0.sets$EB.h0M
+    prior.type <- prior
+    prior.val <- H0.sets$prior
+    lqv <- H0.sets$pn.out
+    H0.sets <- H0.sets$Hsets.mat
+
+    m <- length(Tn)
+
+    ### B is defined in global environment.
+    ### For adjusted p-values, just sort now and be able to get the index.
+    ### We want to sort the test statistics in terms of their evidence against the null
+    ### i.e., from largest to smallest.
+    ord.Tn <- order(Tn,decreasing=TRUE)
+    sort.Tn <- Tn[ord.Tn]
+    Z.nulls <- nulldistn[ord.Tn,]*H0.sets[ord.Tn,]
+    Tn.mat <- (1-H0.sets[ord.Tn,])*matrix(rep(sort.Tn,B),nrow=m,ncol=B)
+
+    ### Rather than using a sieve of candidate cutoffs, for adjp, test statistics
+    ### are used as cutoffs themselves.
+    cutoffs <- sort.Tn
+    clen <- m
+    cat("counting guessed false positives...", "\n")
+    Vn <- .Call(VScount,as.numeric(Z.nulls),as.numeric(cutoffs),as.integer(m),as.integer(B),as.integer(clen),NAOK=TRUE)
+    cat("\n")
+    Vn <- matrix(Vn, nrow=clen, ncol=B)
+
+    if(typeone=="fwer" | typeone=="gfwer") Sn <- NULL
+    else{
+      cat("counting guessed true positives...", "\n")
+      Sn <- .Call(VScount,as.numeric(Tn.mat),as.numeric(cutoffs),as.integer(m),as.integer(B),as.integer(clen),NOAK=TRUE)
+      cat("\n")
+      Sn <- matrix(Sn, nrow=clen, ncol=B)
+    }
+
+    ### Set G function of type I error rates
+    G <- switch(typeone, fwer=G.VS(Vn,Sn,tp=TRUE,bound=0),
+                         gfwer=G.VS(Vn,Sn,tp=TRUE,bound=k),
+                         tppfp=G.VS(Vn,Sn,tp=TRUE,bound=q),
+                         fdr=G.VS(Vn,Sn,tp=FALSE,bound=NULL)
+                )
+
+    Gmeans <- rowSums(G,na.rm=TRUE)/B
+
+    ### Now get adjps and rejection indicators.
+    adjp <- rep(0,m)
+    for(i in 1:m){
+      adjp[i] <- min(Gmeans[i:m])
+    }
+
+    ### Now reverse order to go back to original order of test statistics.
+    rev.order <- rep(0,m)
+    for(i in 1:m){
+      rev.order[i] <- which(sort.Tn==Tn[i])
+    }
+    adjp <- adjp[rev.order]
+    if(keep.falsepos) Vn <- Vn[rev.order,]
+    else Vn <- matrix(0,nrow=0,ncol=0)
+    if(keep.truepos) Sn <- Sn[rev.order,]
+    else Sn <- matrix(0,nrow=0,ncol=0)
+    if(typeone=="fwer" | typeone=="gfwer") Sn <- matrix(0,nrow=0,ncol=0)
+    if(keep.errormat) G <- G[rev.order,]
+    else G <- matrix(0,nrow=0,ncol=0)
+    if(!keep.Hsets) H0.sets <- matrix(0,nrow=0,ncol=0)
+  
+    # No confidence regions, but vector of rejections logicals, and cutoff, if applicable
+    ### Generate matrix of rejection logicals.
+    EB.reject <- reject
+    if(test!="z.cor" & test!="t.cor") for(a in 1:nalpha) EB.reject[,a]<-adjp<=alpha[a]
+    else EB.reject <- matrix(0,nrow=0,ncol=0)
+
+    ### Grab test statistics corresponding to cutoff, based on adjp.
+    #Leave out.
+    #cutoff <- rep(0,nalpha)
+    #for(a in 1:nalpha){
+    #  if(sum(adjp<=alpha[a])>0){
+    #   temp <- max(adjp[adjp<=alpha[a]])
+    #   cutoff.ind <- which(adjp==temp)
+    #   cutoff[a] <- max(Tn[cutoff.ind])
+    # }
+    #  else cutoff[a] <- NA
+    #}
+
+    #output results
+    if(!keep.nulldist) nulldistn<-matrix(nrow=0,ncol=0)
+    if(keep.rawdist==FALSE) rawdist<-matrix(nrow=0,ncol=0)
+    if(is.null(Y)) Y<-matrix(nrow=0,ncol=0)
+    if(nulldist!="boot.qt"){  
+      marg.null <- vector("character")
+      marg.par <- matrix(nrow=0,ncol=0)
+    }
+    if(!keep.label) label <- vector("numeric",0)
+    if(!keep.index) index <- matrix(nrow=0,ncol=0)
+    if(test!="z.cor" & test !="t.cor") index <- matrix(nrow=0,ncol=0)
+    if(keep.index & (test!="z.cor" | test !="t.cor")){
+      index <- t(combn(p,2))
+      colnames(index) <- c("Var1","Var2")
+    }
+    names(adjp)<-names(rawp)
+    estimates <- obs[3,]*obs[1,]
+    if(ftest) estimates <- vector("numeric",0)
+    if(test=="t.onesamp" | test=="t.pair") estimates <- obs[3,]*obs[1,]/sqrt(n)
+    out<-new("EBMTP",statistic=statistic,
+      estimate=estimates,
+      sampsize=n,rawp=rawp,adjp=adjp,reject=EB.reject,
+      rawdist=rawdistn,nulldist=nulldistn,nulldist.type=nulldist,
+      marg.null=marg.null,marg.par=marg.par,
+      label=label,falsepos=Vn,truepos=Sn,errormat=G,EB.h0M=EB.h0M,
+      prior=prior.val,prior.type=prior.type,lqv=lqv,Hsets=H0.sets,
+      index=index,call=match.call(),seed=as.integer(seed))
+
+  return(out)
+}
+
+
+
+######################################################
+######################################################
+######################################################
+
+### Function closure for different error rates.
+# CHANGE G.VS to function, not closure
+#G.VS <- function(V,S=NULL,tp=TRUE,bound){
+#  function(V,S){
+#    if(is.null(S)) g <- V     #FWER, GFWER
+#    else g <- V/(V+S)         #TPPFP, FDR
+#    if(tp==TRUE) {
+#      temp <- matrix(0,dim(g)[1],dim(g)[2])
+#      temp[g>bound] <- 1      #FWER, GFWER, TPPFP
+#      g <- temp
+#    }
+#    g
+#  }
+#}
+G.VS <- function(V,S=NULL,tp=TRUE,bound){
+   if(is.null(S)) g <- V     #FWER, GFWER
+   else g <- V/(V+S)         #TPPFP, FDR
+   if(tp==TRUE) {
+     temp <- matrix(0,dim(g)[1],dim(g)[2])
+     temp[g>bound] <- 1      #FWER, GFWER, TPPFP
+     g <- temp
+   }
+   g
+}
+
+### Adaptive BH estimate of the number of true null hypotheses.
+ABH.h0 <- function(rawp){
+  sortrawp <- sort(rawp)
+  m <- length(rawp)
+  ho.m <- rep(0,m)
+  for(k in 1:length(rawp)){
+    ho.m[k] <- (m+1-k)/(1-sortrawp[k])
+  }
+  grab <- min(which(diff(ho.m)>0))
+  ho.hat <- ceiling(min(ho.m[grab],m))
+  ho.hat
+}
+
+### Function for generating guessed sets via kernel density estimation.
+### The marginal null is specified, although if boot.cs or boot.ctr is true,
+### we can pool over the matrix of centered and scaled test statistics to
+### estimate the null density.
+### Also want the user to be able to set values of bw and kernel like they could
+### using the density() function in R.
+dens.est <- function(x,t,bw,kernel){
+  dg <- density(t, from=x, to=x, bw=bw, kernel=kernel)
+  dg$y[1]
+}
+
+Hsets <- function(Tn, nullmat, bw, kernel, prior, B, rawp){
+### Full density estimation over vector of observed test statistics,
+### saves on time and is asymptotic bootstrap distribution anyway.
+### (As opposed to pooling over the whole matrix of raw tstats)
+  f.Tn.est <- apply(as.matrix(Tn),1,dens.est,t=Tn, bw=bw, kernel=kernel)
+
+  ### Obtain null density - use matrix of null test statistics...
+  ### Ensures sidedness maintained more generally, especially in common-cutoff scenario
+  dens.est.null <- approxfun(density(nullmat, bw=bw, kernel=kernel))
+    f.Tn.null <- dens.est.null(Tn)
+
+  ### pn represent local q-values obtained by density estimation
+  ### Numbers might be so small and get returned NaN... true alts absolutely 
+  pn <- pmin(1, f.Tn.null/f.Tn.est)
+  pn[is.na(pn)] <- 0
+
+  ### Do you want to relax the prior?
+  if(prior=="conservative") priorval <- 1
+  if(prior=="ABH")          priorval <- ABH.h0(rawp)/length(Tn)
+  if(prior=="EBLQV")        priorval <- sum(pn,na.rm=TRUE)/length(Tn)
+
+  pn.out <- pmin(1, priorval*pn)
+  pn.out[is.na(pn.out)] <- 0
+
+  # Draw Bernoullis for Ho matrix (Guessed sets of true null and true alternative hypotheses).
+  # 1 = guessed true null, 0 = guessed true alternative hypotheses
+  Hsets.mat <- matrix(rbinom(length(Tn)*B,1,pn.out),nrow=length(Tn),ncol=B)
+  out <- list(Hsets.mat=Hsets.mat, EB.h0M=sum(pn,na.rm=TRUE)/length(Tn), prior=priorval, pn.out=pn.out)
+  out
+}
+
diff --git a/R/EBzzz.R b/R/EBzzz.R
new file mode 100644
index 0000000..df1b23b
--- /dev/null
+++ b/R/EBzzz.R
@@ -0,0 +1,627 @@
+setClass("EBMTP",representation(statistic="numeric",
+                              estimate="numeric",
+                              sampsize="numeric",
+                              rawp="numeric",
+                              adjp="numeric",
+                              reject="matrix",
+                              rawdist="matrix",
+                              nulldist="matrix",
+                              nulldist.type="character",
+                              marg.null="character",
+                              marg.par="matrix",
+                              label="numeric",
+                              falsepos="matrix",
+                              truepos="matrix",
+                              errormat="matrix",  
+                              EB.h0M="numeric",
+                              prior="numeric",
+                              prior.type="character",
+                              lqv="numeric",
+                              Hsets="matrix",
+                              index="matrix",
+                              call="call",
+                              seed="integer"),
+         prototype=list(statistic=vector("numeric",0),
+         estimate=vector("numeric",0),
+         sampsize=vector("numeric",0),
+         rawp=vector("numeric",0),
+         adjp=vector("numeric",0),
+         reject=matrix(nrow=0,ncol=0),
+         rawdist=matrix(nrow=0,ncol=0),
+         nulldist=matrix(nrow=0,ncol=0),
+         nulldist.type=vector("character",0),
+         marg.null=vector("character",0),
+         marg.par=matrix(nrow=0,ncol=0),
+         label=vector("numeric",0),
+         falsepos=matrix(nrow=0,ncol=0),
+         truepos=matrix(nrow=0,ncol=0),
+         errormat=matrix(nrow=0,ncol=0),
+         EB.h0M=vector("numeric",0),
+         prior=vector("numeric",0),
+         prior.type=vector("character",0),
+         lqv=vector("numeric",0),
+         Hsets=matrix(nrow=0,ncol=0),
+         index=matrix(nrow=0,ncol=0),
+         call=NULL,
+         seed=vector("integer",0)))
+
+print.EBMTP<-function(x,...){
+  call.list<-as.list(x at call)
+  cat("\n")
+  writeLines(strwrap("Multiple Testing Procedure",prefix="\t"))
+  cat("\n")
+  cat(paste("Object of class: ",class(x)))
+  cat("\n")
+  cat(paste("sample size =",x at sampsize,"\n"))
+  cat(paste("number of hypotheses =",length(x at statistic),"\n"))
+  cat("\n")
+  cat(paste("test statistics =",ifelse(is.null(call.list$test),"t.twosamp.unequalvar",call.list$test),"\n"))
+  cat(paste("type I error rate =",ifelse(is.null(call.list$typeone),"fwer",call.list$typeone),"\n"))
+  nominal<-eval(call.list$alpha)
+  if(is.null(eval(call.list$alpha))) nominal<-0.05
+  cat("nominal level alpha = ")
+  cat(nominal,"\n")
+  cat(paste("multiple testing procedure =",ifelse(is.null(call.list$method),"common.cutoff",call.list$method),"\n"))
+  cat("\n")
+  cat("Call: ")
+  print(x at call)
+  cat("\n")
+  cat("Slots: \n")
+  snames<-slotNames(x)
+  n<-length(snames)
+  out<-matrix(nrow=n,ncol=4)
+  dimnames(out)<-list(snames,c("Class","Mode","Length","Dimension"))
+  for(s in snames) out[s,]<-c(class(slot(x,s)),mode(slot(x,s)),length(slot(x,s)),paste(dim(slot(x,s)),collapse=","))
+  out<-data.frame(out)
+  print(out)
+  invisible(x)
+}
+
+### Put EBupdate last, since it is such a pain.
+### Start with the rest of the other methods, and see what
+### we want to keep/change from the MTP methods
+# plot, EBMTP currently does not return cutoffs or confidence regions, so, leave 5 and 6 from MTP
+# blank
+if( !isGeneric("plot") ) setGeneric("plot", function(x, y, ...) standardGeneric("plot"))
+
+setMethod("plot","EBMTP",
+	function(x,y="missing",which=1:4,caption=c("Rejections vs. Error Rate",
+                                           "Ordered Adjusted p-values","Adjusted p-values vs. Statistics",
+                                           "Unordered Adjusted p-values","Estimates & Confidence Regions",
+                                           "Test Statistics & Cut-offs"),sub.caption = deparse(x at call,width.cutoff=500),
+                   ask = prod(par("mfcol"))<length(which)&&dev.interactive(),
+                   logscale=FALSE,top=10,...){
+          call.list<-as.list(x at call)
+          if(!inherits(x,"EBMTP")) stop("Use only with 'EBMTP' objects")
+          if(is.null(which)) which<-1:4
+          if(length(caption)==1) caption<-rep(caption,4)
+          if(length(x at adjp)==0 & any(which)) stop("plot methods require adjusted p-values")
+          #if(length(x at conf.reg)==0 & any(which==5)) stop("plot method 5 requires confidence regions")
+          #if(length(x at cutoff)==0 & any(which==6)) stop("plot method 6 requires cut-offs")
+          #go back to MTP method if we eventually want to put these in once cut-offs and conf reg
+          #added into functionality, more for these below was deleted out. 
+          if(!is.numeric(which) || any(which<1) || any(which>4)) stop("which must be in 1:4")
+          show<-rep(FALSE,4)
+          show[which]<-TRUE
+          m<-length(x at adjp)
+          if(top>m){
+            warning("number of top hypotheses to plot exceeds total number of hypotheses - plotting less than requested number")
+            top<-m
+          }
+	  ord<-order(x at adjp)
+          if(any(show[2:4]) & logscale){
+            pv<-(-log(x at adjp,10))
+            pvlab<-"-log (base 10) Adjusted p-values"
+          }
+          else{
+            pv<-x at adjp
+            pvlab<-"Adjusted p-values"
+          }
+          one.fig<-prod(par("mfcol"))==1
+          if(ask){
+            op<-par(ask=TRUE)
+            on.exit(par(op))
+          }
+          if(show[1]){
+            nominal<-seq(0,1,by=0.05)
+            r<-mt.reject(x at adjp,nominal)$r
+            matplot(nominal,r,xlab="Type I error rate",
+                    ylab="Number of rejected hypotheses",
+                    type="l",...)
+            if(one.fig) title(sub=sub.caption,cex.sub=0.5,...)
+            mtext(caption[1],3,0.25)
+          }
+          if(show[2]){
+            spval<-sort(pv)
+            matplot(1:m,spval,xlab="Number of rejected hypotheses",
+                    ylab=paste("Sorted",pvlab,sep=" "),type="l",...)
+            if(one.fig) title(sub=sub.caption,cex.sub=0.5,...)
+            mtext(caption[2],3,0.25)
+          }
+          if(show[3]){
+            symb<-ifelse(length(pv)<100,"o",".")
+            matplot(x at statistic,pv,xlab="Test statistics",
+                    ylab=pvlab,type="p",pch=symb,...)
+            if(one.fig) title(sub=sub.caption,cex.sub=0.5,...)
+            mtext(caption[3],3,0.25)
+          }
+          if(show[4]){
+            matplot(1:m,pv,xlab="Index",ylab=pvlab,type = "l", ...)
+            if(one.fig) title(sub=sub.caption,cex.sub=0.5,...)
+            mtext(caption[4],3,0.25)
+          }
+          if(!one.fig && par("oma")[3]>=1) mtext(sub.caption,outer=TRUE,cex=0.8)
+          invisible()
+          })
+
+
+#summary
+if( !isGeneric("summary") )
+    setGeneric("summary", function(object, ...) standardGeneric("summary"))
+
+setMethod("summary","EBMTP",
+          function(object,...){
+            call.list<-as.list(object at call)
+            #cat(paste("EBMTP: ",ifelse(is.null(call.list$method),"common.cutoff",call.list$method),"\n"))
+            cat("EBMTP: common.cutoff","\n") # always common.cutoff, even when being updated from MTP object
+            err<-ifelse(is.null(call.list$typeone),"fwer",call.list$typeone)
+            if(err=="gfwer") err<-paste(err," (k=",ifelse(is.null(call.list$k),0,call.list$k),")",sep="")
+            if(err=="tppfp") err<-paste(err," (q=",ifelse(is.null(call.list$q),0.1,call.list$q),")",sep="")
+	    cat(paste("Type I error rate: ",err,"\n"))
+            cat(paste("prior: ",ifelse(is.null(call.list$prior),"conservative",call.list$prior),"\n\n"))
+            nominal<-eval(call.list$alpha)
+            if(is.null(nominal)) nominal<-0.05
+            if(is.null(call.list$test)) test <- "t.twosamp.unequalvar"
+            else test <- call.list$test
+            if(test!="t.cor" & test!="z.cor") out1<-data.frame(Level=nominal,Rejections=apply(object at reject,2,sum),row.names=NULL)
+            else{
+              tmp <- rep(0,length(nominal))
+              for(i in 1:length(nominal)) tmp[i] <- sum(object at adjp < nominal[i])
+              out1 <- data.frame(Level=nominal,Rejections=tmp,row.names=NULL)
+            }
+            print(out1)
+            cat("\n")
+            out2<-get.index(object at adjp,object at rawp,abs(object at statistic))
+            out3<-rn<-NULL
+            if(!is.null(object at adjp)){
+              out3<-rbind(out3,c(summary(object at adjp[!is.na(object at adjp)]),sum(is.na(object at adjp))))
+              rn<-c(rn,"adjp")
+            }
+            if(!is.null(object at rawp)){
+              out3<-rbind(out3,c(summary(object at rawp[!is.na(object at rawp)]),sum(is.na(object at rawp))))
+              rn<-c(rn,"rawp")
+            }
+            if(!is.null(object at statistic)){
+              out3<-rbind(out3,c(summary(object at statistic[!is.na(object at statistic)]),sum(is.na(object at statistic))))
+            rn<-c(rn,"statistic")
+            }
+            if(!is.null(object at estimate)){
+              out3<-rbind(out3,c(summary(object at estimate[!is.na(object at estimate)]),sum(is.na(object at estimate))))
+              rn<-c(rn,"estimate")
+            }
+            rownames(out3)<-rn
+            colnames(out3)[ncol(out3)]<-"NA's"
+            print(out3)
+            invisible(list(rejections=out1,index=out2,summaries=out3))
+          })
+
+
+
+if( !isGeneric("ebmtp2mtp") )
+    setGeneric("ebmtp2mtp", function(object, ...) standardGeneric("ebmtp2mtp"))
+
+setMethod("ebmtp2mtp","EBMTP",
+          function(object,...){
+            y<-new("MTP")
+            slot(y,"statistic") <- object at statistic
+            slot(y,"estimate") <- object at estimate
+            slot(y,"sampsize") <- object at sampsize
+            slot(y,"rawp") <- object at rawp
+            slot(y,"adjp") <- object at adjp
+            slot(y,"reject") <- object at reject
+            slot(y,"rawdist") <- object at rawdist
+            slot(y,"nulldist") <- object at nulldist
+            slot(y,"nulldist.type") <- object at nulldist.type
+            slot(y,"marg.null") <- object at marg.null
+            slot(y,"marg.par") <- object at marg.par
+            slot(y,"label") <- object at label
+            slot(y,"index") <- object at index
+            slot(y,"call") <- object at call
+            slot(y,"seed") <- object at seed
+            invisible(y)
+          }
+          )
+            
+setMethod("[","EBMTP",
+          function(x,i,j=NULL,...,drop=FALSE){
+            if(missing(i))
+            i<-TRUE
+            newx<-x
+            slot(newx,"statistic")<-x at statistic[i]
+            slot(newx,"estimate")<-x at estimate[i]
+            slot(newx,"rawp")<-x at rawp[i]
+            if(sum(length(x at adjp))) slot(newx,"adjp")<-x at adjp[i]
+            if(sum(length(x at label))) slot(newx,"label")<-x at label[i]
+            d<-dim(x at reject)
+            dn<-dimnames(x at reject)
+            if(sum(d)) slot(newx,"reject")<-matrix(x at reject[i,],nrow=ifelse(i[1]==TRUE & !is.numeric(i),d[1],length(i)),ncol=d[-1],dimnames=list(dn[[1]][i],dn[[2]]))
+            if(sum(dim(x at nulldist))) slot(newx,"nulldist")<-x at nulldist[i,]
+            if(sum(dim(x at marg.par))) slot(newx,"marg.par")<-x at marg.par[i,]
+            if(sum(dim(x at rawdist))) slot(newx,"rawdist")<-x at rawdist[i,]
+            if(sum(dim(x at falsepos))) slot(newx,"falsepos")<-x at falsepos[i,]
+            if(sum(dim(x at truepos))) slot(newx,"truepos")<-x at truepos[i,]
+            if(sum(dim(x at errormat))) slot(newx,"errormat")<-x at errormat[i,]
+            slot(newx,"lqv")<-x at lqv[i]
+            if(sum(dim(x at index))) slot(newx,"index")<-x at index[i,]
+	    invisible(newx)
+          })
+
+setMethod("as.list","EBMTP",
+          function(x,...){
+            snames<-slotNames(x)
+            n<-length(snames)
+            lobj<-list()
+            for(i in 1:n) lobj[[i]]<-slot(x,snames[i])
+            names(lobj)<-snames
+            invisible(lobj)
+          })
+
+
+if( !isGeneric("EBupdate") )
+    setGeneric("EBupdate", function(object, ...) standardGeneric("EBupdate"))
+
+setMethod("EBupdate","EBMTP",
+          function(object,formula.="missing",alternative="two.sided",typeone="fwer",
+          k=0,q=0.1,alpha=0.05,smooth.null=FALSE,
+          method="common.cutoff",prior="conservative",bw="nrd",kernel="gaussian",
+          get.adjp=TRUE,nulldist="boot.cs",keep.rawdist=FALSE,keep.nulldist=TRUE,
+          keep.falsepos=FALSE,keep.truepos=FALSE,keep.errormat=FALSE,keep.Hsets=FALSE,
+          marg.null=object at marg.null,marg.par=object at marg.par,ncp=NULL,
+          keep.label=TRUE,...,evaluate=TRUE){
+            p <- length(object at statistic)
+            m <- length(object at statistic)
+            B <- dim(object at nulldist)[2]
+            if(sum(object at rawdist)!=0) B <- dim(object at rawdist)[2]
+            ## checking
+            #Error rate
+            ERROR<-c("fwer","gfwer","tppfp","fdr")
+            typeone<-ERROR[pmatch(typeone,ERROR)]
+            if(is.na(typeone)) stop(paste("Invalid typeone, try one of ",ERROR,sep=""))
+            if(any(alpha<0) | any(alpha>1)) stop("Nominal level alpha must be between 0 and 1.")
+            nalpha<-length(alpha)
+            reject<-
+              if(nalpha) array(dim=c(p,nalpha),dimnames=list(names(object at rawp),paste("alpha=",alpha,sep="")))
+              else matrix(nrow=0,ncol=0)
+
+            if(typeone=="fwer"){
+              if(length(k)>1) k<-k[1]
+              if(sum(k)!=0) stop("FWER control, by definition, requires k=0.  To control k false positives, please select typeone='gfwer'.")
+            }
+     
+            if(typeone=="gfwer"){
+              if(length(k)>1){
+                k<-k[1]
+                warning("Can only compute gfwer adjp for one value of k at a time (using first value). Use EBupdate() to get results for other values of k.")
+              }
+              if(k<0) stop("Number of false positives can not be negative.")
+              if(k>=p) stop(paste("Number of false positives must be less than number of tests=",p,sep=""))
+            }
+
+            if(typeone=="tppfp"){
+              if(length(q)>1){
+                q<-q[1]
+                warning("Can only compute tppfp adjp for one value of q at a time (using first value). Use EBupdate() to get results for other values of q.")
+              }
+              if(q<0) stop("Proportion of false positives, q, can not be negative.")
+              if(q>1) stop("Proportion of false positives, q, must be less than 1.")
+            }
+            
+            #methods
+            METHODS<-c("common.cutoff","common.quantile")
+            method<-METHODS[pmatch(method,METHODS)]
+            if(is.na(method)) stop(paste("Invalid method, try one of ",METHODS,sep=""))
+            if(method=="common.quantile") stop("Common quantile procedure not currently implemented.  Common cutoff is pretty good, though.")
+            
+            #prior
+            PRIORS<-c("conservative","ABH","EBLQV")
+            prior<-PRIORS[pmatch(prior,PRIORS)]
+            if(is.na(prior)) stop(paste("Invalid prior, try one of ",PRIORS,sep=""))
+
+            #get args from previous call
+            call.list<-as.list(object at call)
+
+            if(is.null(call.list$test)) test<-"t.twosamp.unequalvar" #default
+            else test<-call.list$test
+            ### nulldistn
+            ### Preserve the old null dist, if kept (i.e., could have alternatively kept raw dist)
+            nulldistn <- object at nulldist
+            if(object at nulldist.type=="perm") stop("No way to update objects which originally used the permutation distribution. No available options for storing nulldist.  Rawdist can only be stored for bootstrap distribution.")
+            ### For boot.qt, make sure values of marg.null and marg.par, if set previously, are kept.
+            ### Otherwise, these become null, but the original values are set here before proceeding.
+            prev.marg.null <- object at marg.null
+            prev.marg.par <- object at marg.par
+
+            if(!ncol(object at nulldist) & !ncol(object at rawdist)) stop("Update method requires either keep.raw and/or keep.null=TRUE in original call to MTP")
+            nulldist<- # just setting character value of what nulldist should be
+               if(is.null(call.list$nulldist)) "boot.cs"
+               else call.list$nulldist
+
+         ## new call
+               newcall.list<-as.list(match.call())
+               changed<-names(call.list)[names(call.list)%in%names(newcall.list)]
+               changed<-changed[changed!=""]
+               added<-names(newcall.list)[!(names(newcall.list)%in%names(call.list))]
+               added<-added[added!="x"]
+               for(n in changed) call.list[[n]]<-newcall.list[[n]]
+               for(n in added) call.list[[n]]<-newcall.list[[n]]
+               newcall<-as.call(call.list)
+               ### NB can still use "call.list" to help with what has been changed.
+               df <- marg.par
+               call.list$marg.par <- df
+               
+         ## return call if evaluate is false
+               if(!evaluate) return(newcall)
+
+         ## else redo MTP
+            else{
+              num<-object at estimate
+              snum<-1
+              if(alternative=="two.sided"){
+                snum<-sign(num)
+                num<-abs(num)
+              }
+              if(alternative=="less"){
+                snum<-(-1)
+                num<-(-num)
+              }
+
+              if(object at nulldist.type!="boot.qt"){
+                marg.null = vector("character",length=0)
+                marg.par = matrix(nrow=0,ncol=0)
+              }
+              if("alternative" %in% changed | "alternative" %in% added) alternative <- call.list$alternative
+              if("marg.null" %in% changed | "marg.null" %in% added) marg.null <- call.list$marg.null
+              if("marg.par" %in% changed | "marg.par" %in% added){
+                  marg.par <- call.list$marg.par
+                  if(is.numeric(marg.par) & !is.matrix(marg.par)) marg.par <- matrix(rep(marg.par,length(object at statistic)),nrow=length(object at statistic),ncol=length(marg.par),byrow=TRUE)
+                }
+              if("perm.mat" %in% changed | "perm.mat" %in% added) perm.mat <- call.list$perm.mat
+              if("ncp" %in% changed | "ncp" %in% added) ncp <- call.list$ncp
+              if("MVN.method" %in% changed | "MVN.method" %in% added | "penalty" %in% changed | "penalty" %in% added |"ic.quant.trans" %in% changed | "ic.quant.trans" %in% added) stop("Changing 'MVN.method', 'ic.quant.trans' or 'penalty' requires new calculation of null distribution using nulldist='ic'.  Please use a new call to EBMTP.")
+         ### Check value of nulldist in this case
+              if("nulldist" %in% changed | "nulldist" %in% added) {
+                nulldist <- call.list$nulldist
+         ### Otherwise, nulldist keeps the old/default value in the original call.list, not the updated one.
+                if(nulldist=="perm") stop("Calls to update() cannot include changes involving the permutation distribution. Please try a separate call to MTP() with nulldist='perm'")
+                if(object at nulldist.type=="ic") stop("You cannot update an influence curve null distribution to another choice of null distribution.  Valid only for changes in the bootstrap distribution when keep.rawdist=TRUE.  Please try a separate call to MTP() if nulldist='boot' or 'perm' desired. Changing 'MVN.method', 'ic.quant.trans' or 'penalty' also requires new calculation of null distribution using nulldist='ic'")
+                if(nulldist=="ic") stop("Calls to update() cannot include changes involving the influence curve null distribution. Please try a separate call to MTP() with nulldist='ic'")
+                if(!ncol(object at rawdist)) stop("Calls to update() involving changes in bootstrap-based null distributions require keep.rawdist=TRUE")
+
+    ### Just recompute (bootstrap-based) nulldistn - way easier this way (with keep.raw=TRUE)
+    ### "Easy" ones first.  Need to get tau0 and theta0.
+              if(nulldist=="ic"){
+                marg.null = vector("character",length=0)
+                marg.par = matrix(nrow=0,ncol=0)
+              }
+                
+              if(nulldist=="boot" | nulldist=="boot.cs" | nulldist=="boot.ctr"){
+                marg.null = vector("character",length=0)
+                marg.par = matrix(nrow=0,ncol=0)
+                tau0<-1
+                theta0<-0
+                if(test=="f"){
+                  theta0<-1
+                  tau0<-2/(length(unique(object at label))-1)
+                }
+                if(test=="f.twoway"){
+                  theta0<-1
+                  tau0 <- 2/((length(unique(object at label))*length(gregexpr('12', paste(object at label, collapse=""))[[1]]))-1)
+                }
+                if(nulldist=="boot") nulldistn <- center.scale(object at rawdist, theta0, tau0, alternative)
+                if(nulldist=="boot.cs") nulldistn <- center.scale(object at rawdist, theta0, tau0, alternative)
+                if(nulldist=="boot.ctr") nulldistn <- center.only(object at rawdist, theta0, alternative)
+              }
+
+              if(nulldist=="boot.qt"){
+                if("marg.null" %in% changed | "marg.null" %in% added) marg.null <- call.list$marg.null
+                else marg.null <- NULL
+                if("marg.par" %in% changed | "marg.par" %in% added){
+                  marg.par <- call.list$marg.par
+                  if(is.numeric(marg.par) & !is.matrix(marg.par)) marg.par <- matrix(rep(marg.par,length(object at statistic)),nrow=length(object at statistic),ncol=length(marg.par),byrow=TRUE)
+                }
+                else marg.par <- NULL
+      
+        ### If these additional args are changed or added, these will be the new defaults, but they will not be NULL
+                ### Cannot be NULL for object defn.
+                ncp <- if(is.null(call.list$ncp)) 0
+                perm.mat <- if(is.null(call.list$perm.mat)) NULL
+                if(!is.null(perm.mat)){
+                  if(length(object at statistic)!=dim(perm.mat)[1]){ stop("Permutation and bootstrap matrices must have same number of rows (hypotheses).")
+                                                                }
+                }
+
+                nstats <- c("t.twosamp.unequalvar","z.cor","lm.XvsZ","lm.YvsXZ","coxph.lmYvsXZ")
+                tstats <- c("t.onesamp","t.twosamp.equalvar","t.pair","t.cor")
+                fstats <- c("f","f.block","f.twoway")
+         # If default (=NULL), set values of marg.null to pass on.
+                if(is.null(marg.null)){
+                  if(any(nstats == test)) marg.null="normal"
+                  if(any(tstats == test)) marg.null="t"
+                  if(any(fstats == test)) marg.null="f"
+                }
+                else{ # Check to see that user-supplied entries make sense.  
+                  MARGS <- c("normal","t","f","perm")
+                  marg.null <- MARGS[pmatch(marg.null,MARGS)]
+                  if(is.na(marg.null)) stop("Invalid marginal null distribution. Try one of: normal, t, f, or perm")
+                  if(any(tstats==test) & marg.null == "f") stop("Choice of test stat and marginal nulldist do not match")
+                  if(any(fstats==test) & (marg.null == "normal" | marg.null=="t")) stop("Choice of test stat and marginal nulldist do not match")
+                  if(marg.null=="perm" & is.null(perm.mat)) stop("Must supply a matrix of permutation test statistics if marg.null='perm'")
+                  if(marg.null=="f" & ncp < 0) stop("Cannot have negative noncentrality parameter with F distribution.")
+                }
+    
+        # If default (=NULL), set values of marg.par. Return as m by 1 or 2 matrix.
+                if(is.null(marg.par)){
+                  marg.par <- switch(test,
+                          t.onesamp = object at sampsize-1,
+                          t.twosamp.equalvar = object at sampsize-2,
+                          t.twosamp.unequalvar = c(0,1),
+                          t.pair = object at sampsize-2,
+                          f = c(length(is.finite(unique(object at label)))-1,object at sampsize-length(is.finite(unique(object at label)))),
+                          f.twoway = {
+                            c(length(is.finite(unique(object at label)))-1,object at sampsize-(length(is.finite(unique(object at label)))*length(gregexpr('12', paste(y, collapse=""))[[1]]))-2)
+                            },
+                          lm.XvsZ = c(0,1),
+                          lm.YvsXZ = c(0,1),
+                          coxph.YvsXZ = c(0,1),
+                          t.cor = object at sampsize-2,
+                          z.cor = c(0,1)
+                          )
+                  marg.par <- matrix(rep(marg.par,length(object at statistic)),nrow=length(object at statistic),ncol=length(marg.par),byrow=TRUE)
+        }
+                else{ # Check that user-supplied values of marg.par make sense (marg.par != NULL)
+                  if((marg.null=="t" | marg.null=="f") & any(marg.par[,1]==0)) stop("Cannot have zero df with t or F distributions. Check marg.par settings")
+                  if(marg.null=="t" & dim(marg.par)[2]>1) stop("Too many parameters for t distribution.  marg.par should have length 1.")
+                  if((marg.null=="f" | marg.null=="normal") & dim(marg.par)[2]!=2) stop("Incorrect number of parameters defining marginal null distribution.  marg.par should have length 2.")
+                }
+                nulldistn <- quant.trans(object at rawdist, marg.null, marg.par, ncp, alternative, perm.mat)
+              }
+              }
+
+              ### Cool. Now pick up where we left off.
+              ##performing multiple testing
+              #rawp values
+              obs<-rbind(num,object at estimate/object at statistic,sign(object at estimate))
+              rawp<-apply((obs[1,]/obs[2,])<=nulldistn,1,mean)
+		     if(smooth.null & min(rawp,na.rm=TRUE)==0){
+                       zeros<-rawp==0
+                       if(sum(zeros)==1){
+                         den<-density(nulldistn[zeros,],to=max(obs[1,zeros]/obs[2,zeros],nulldistn[zeros,],na.rm=TRUE),na.rm=TRUE)
+                         rawp[zeros]<-sum(den$y[den$x>=(obs[1,zeros]/obs[2,zeros])])/sum(den$y)
+                       }
+                       else{
+                         den<-apply(nulldistn[zeros,],1,density,to=max(obs[1,zeros]/obs[2,zeros],nulldistn[zeros,],na.rm=TRUE),na.rm=TRUE)
+                         newp<-NULL
+                         stats<-obs[1,zeros]/obs[2,zeros]
+                         for(i in 1:length(den)) newp[i]<-sum(den[[i]]$y[den[[i]]$x>=stats[i]])/sum(den[[i]]$y)
+                         rawp[zeros]<-newp
+                       }
+                       rawp[rawp<0]<-0
+                     }
+
+              #c, cr, adjp - this is where the function gets a lot different from MTP.
+              ### Begin nuts and bolts of EB here.
+t
+              ### Set G function of type I error rates
+              #REMOVED CLOSURE SO V,S DEFINED
+              #error.closure <- switch(typeone, fwer=G.VS(V,S=NULL,tp=TRUE,bound=0),
+              #                        gfwer=G.VS(V,S=NULL,tp=TRUE,bound=k),
+              #                        tppfp=G.VS(V,S,tp=TRUE,bound=q),
+              #                        fdr=G.VS(V,S,tp=FALSE,bound=NULL)
+              #                        )
+
+              ### Generate guessed sets of true null hypotheses
+              ### This function relates null and full densities.  Sidedness should be accounted for above.
+              statistic <- (obs[3,]*obs[1,]/obs[2,]) #observed, with sign
+              Tn <- obs[1,]/obs[2,]  # for sidedness, matching with mulldistn
+              
+              H0.sets <- Hsets(Tn, nullmat=nulldistn, bw, kernel, prior=prior, B=dim(object at nulldist)[2], rawp=object at rawp) 
+              EB.h0M <- H0.sets$EB.h0M
+              prior.type <- prior
+              prior.val <- H0.sets$prior
+              lqv <- H0.sets$pn.out
+              H0.sets <- H0.sets$Hsets.mat
+              
+              m <- length(Tn)
+              ### B defined in global environment
+              ### For adjusted p-values, just sort now and be able to get the index.
+              ### We want to sort the test statistics in terms of their evidence against the null
+              ### i.e., from largest to smallest.
+              ord.Tn <- order(Tn,decreasing=TRUE)
+              sort.Tn <- Tn[ord.Tn]
+              Z.nulls <- nulldistn[ord.Tn,]*H0.sets[ord.Tn,]
+              Tn.mat <- (1-H0.sets[ord.Tn,])*matrix(rep(sort.Tn,B),nrow=m,ncol=B)
+
+              ### Rather than using a sieve of candidate cutoffs, for adjp, test statistics
+              ### are used as cutoffs themselves.
+              cutoffs <- sort.Tn
+              clen <- m
+              cat("counting guessed false positives...", "\n")
+              Vn <- object
+              Vn <- .Call(VScount,as.numeric(Z.nulls),as.numeric(cutoffs),as.integer(m),
+                as.integer(B),as.integer(clen),NAOK=TRUE)
+              cat("\n")
+              Vn <- matrix(Vn, nrow=clen, ncol=B)
+              
+              if(typeone=="fwer" | typeone=="gfwer") Sn <- NULL
+              else{
+                cat("counting guessed true positives...", "\n")
+                Sn <- .Call(VScount,as.numeric(Tn.mat),as.numeric(cutoffs),as.integer(m),
+                  as.integer(B),as.integer(clen),NOAK=TRUE)
+                cat("\n")
+                Sn <- matrix(Sn, nrow=clen, ncol=B)
+              }
+
+              ### Set G function of type I error rates
+              #REMOVED CLOSURE: G <-  error.closure(Vn,Sn)
+              
+              G <- switch(typeone, fwer=G.VS(Vn,Sn,tp=TRUE,bound=0),
+                         gfwer=G.VS(Vn,Sn,tp=TRUE,bound=k),
+                         tppfp=G.VS(Vn,Sn,tp=TRUE,bound=q),
+                         fdr=G.VS(Vn,Sn,tp=FALSE,bound=NULL)
+                )
+
+              Gmeans <- rowSums(G,na.rm=TRUE)/B
+
+              ### Now get adjps and rejection indicators.
+              adjp <- rep(0,m)
+              for(i in 1:m){
+                adjp[i] <- min(Gmeans[i:m])
+              }
+
+              ### Now reverse order to go back to original order of test statistics.
+              rev.order <- rep(0,m)
+              for(i in 1:m){
+                rev.order[i] <- which(sort.Tn==Tn[i])
+              }
+              adjp <- adjp[rev.order]
+              if(keep.falsepos) Vn <- Vn[rev.order,]
+              else Vn <- matrix(0,nrow=0,ncol=0)
+              if(keep.truepos) Sn <- Sn[rev.order,]
+              else Sn <- matrix(0,nrow=0,ncol=0)
+              if(keep.errormat) G <- G[rev.order,]
+              else G <- matrix(0,nrow=0,ncol=0)
+              if(!keep.Hsets) H0.sets <- matrix(0,nrow=0,ncol=0)
+              
+              # No confidence regions, but vector of rejections logicals, and cutoff, if applicable
+              ### Generate matrix of rejection logicals.
+              EB.reject <- matrix(rep(0,m),nrow=m,ncol=length(alpha))
+              dimnames(EB.reject) <- list(rownames(object at nulldist),paste("alpha", alpha, sep=""))
+              if(nalpha) for(a in 1:nalpha) EB.reject[,a]<-adjp<=alpha[a]
+              else EB.reject <- matrix(0,nrow=0,ncol=0)
+
+              ### Grab test statistics corresponding to cutoff, based on adjp.
+              #Leave out.
+              #cutoff <- rep(0,nalpha)
+              #for(a in 1:nalpha){
+              #  if(sum(adjp<=alpha[a])>0){
+              #   temp <- max(adjp[adjp<=alpha[a]])
+              #   cutoff.ind <- which(adjp==temp)
+              #   cutoff[a] <- max(Tn[cutoff.ind])
+              # }
+              #  else cutoff[a] <- NA
+              #}
+
+              #output results
+              if(!keep.nulldist) nulldistn <-matrix(nrow=0,ncol=0)
+              if(keep.rawdist==FALSE) object at rawdist<-matrix(nrow=0,ncol=0)
+                out<-new("EBMTP",statistic=object at statistic,estimate=object at estimate,
+                sampsize=object at sampsize,rawp=rawp,adjp=adjp,
+                reject=EB.reject,rawdist=object at rawdist,nulldist=nulldistn,
+                nulldist.type=nulldist,marg.null=marg.null,marg.par=marg.par,label=object at label,
+                falsepos=Vn,truepos=Sn,errormat=G,Hsets=H0.sets,EB.h0M=EB.h0M,
+                prior=prior.val,prior.type=prior.type,lqv=lqv,
+                index=object at index,call=newcall,seed=object at seed)
+		return(out)
+               } #re else redo MTP
+             } # re function
+             ) # re set method
diff --git a/R/ICQTNullDist.R b/R/ICQTNullDist.R
new file mode 100644
index 0000000..4f006e7
--- /dev/null
+++ b/R/ICQTNullDist.R
@@ -0,0 +1,321 @@
+
+# No robust correlation test statistics.
+# Want to return a 3 by M matrix of observations.
+corr.Tn <- function(X,test,alternative,use="pairwise"){
+  P <- dim(X)[1]
+  M <- P*(P-1)/2
+  N <- dim(X)[2]
+  VCM <- cov(t(X),use=use)
+  Cor <- cov2cor(VCM)
+  Cov.v <- VCM[lower.tri(VCM)] # vectorize.
+  Cor.v <- Cor[lower.tri(Cor)] # vectorize.
+  if(test=="t.cor") num <- sqrt(N-2)*Cor.v/sqrt(1-Cor.v^2)
+  if(test=="z.cor") num <- sqrt(N-3)*0.5*log((1+Cor.v)/(1-Cor.v))
+  denom <- 1
+  if(alternative=="two.sided"){
+			snum<-sign(num)
+		 	num<-abs(num)
+                      }
+  else {
+    if(alternative=="less"){
+      snum<-(-1)
+      num<-(-num)
+    }
+    else snum<-1
+    }
+  rbind(num,denom,snum)
+}
+
+ic.tests <- c("t.onesamp","t.pair","t.twosamp.equalvar","t.twosamp.unequalvar","lm.XvsZ","lm.YvsXZ","t.cor","z.cor")
+
+corr.null <- function(X,W=NULL,Y=NULL,Z=NULL,test="t.twosamp.unequalvar",alternative="two-sided",use="pairwise",B=1000,MVN.method="mvrnorm",penalty=1e-6,ic.quant.trans=FALSE,marg.null=NULL,marg.par=NULL,perm.mat=NULL){
+  # Most sanity checks conducted already...
+  p <- dim(X)[1]
+  m <- dim(X)[1] 
+  n <- dim(X)[2] 
+  cat("calculating vector influence curve...", "\n")
+
+  if(test=="t.onesamp" | test=="t.pair"){
+    #t.pair sanity checks and formatting done in stat.closure section
+    #in test.R
+    if(is.null(W)) IC.Cor <- cor(t(X),use=use)
+    else IC.Cor <- IC.CorXW.NA(X,W,N=n,M=p,output="cor")
+  }
+
+  if(test=="t.twosamp.equalvar" | test=="t.twosamp.unequalvar"){
+    uY<-sort(unique(Y))
+    if(length(uY)!=2) stop("Must have two class labels for this test")
+    n1 <- sum(Y==uY[1])
+    n2 <- sum(Y==uY[2])
+    if(is.null(W)){
+      cov1 <- cov(t(X[,Y==uY[1]]),use=use)
+      cov2 <- cov(t(X[,Y==uY[2]]),use=use)
+    }
+    else{
+      cov1 <- IC.CorXW.NA(X[,Y==uY[1]],W[,Y==uY[1]],N=n1,M=p,output="cov")
+      cov2 <- IC.CorXW.NA(X[,Y==uY[2]],W[,Y==uY[2]],N=n2,M=p,output="cov")
+    }
+    newcov <- cov1/n1 + cov2/n2
+    IC.Cor <- cov2cor(newcov)
+  }
+
+  # Regression ICs written to automatically incorporate weights.
+  # If W=NULL, then give equal weights.
+  if(test=="lm.XvsZ"){
+    if(is.null(Z)) Z <- matrix(1,nrow=n,ncol=1)
+    else Z <- cbind(Z,1)
+    if(is.null(W)) W <- matrix(1/n,nrow=p,ncol=n)
+    IC.i <- matrix(0,nrow=m,ncol=n)
+    for(i in 1:m){
+      drop <- is.na(X[i,]) | is.na(rowSums(Z)) | is.na(W[i,])
+      x <- as.numeric(X[i,!drop])
+      z <- Z[!drop,]
+      w <- W[i,!drop]
+      nn <- n-sum(drop)
+      EXtWXinv <- solve(t(z)%*%(w*diag(nn))%*%z)*sum(w)
+      res.m <- lm.wfit(z,x,w)$res
+      if(sum(drop)>0) res.m <- insert.NA(which(drop==TRUE),res.m)
+      EXtWXinvXt <- rep(0,n)
+      for(j in 1:n){
+        EXtWXinvXt[j] <- (EXtWXinv%*%(t(Z)[,j]))[1]
+      }
+      IC.i[i,] <- res.m * EXtWXinvXt
+    }
+    IC.Cor <- IC.Cor.NA(IC.i,W,N=n,M=p,output="cor")
+  }
+  
+  if(test=="lm.YvsXZ"){
+    if(is.null(Y)) stop("An outcome variable is needed for this test")
+    if(length(Y)!=n) stop(paste("Dimension of outcome Y=",length(Y),", not equal dimension of data=",n,sep=""))
+    if(is.null(Z)) Z <- matrix(1,n,1)
+    else Z <- cbind(Z,1)
+    if(is.null(W)) W <- matrix(1,nrow=p,ncol=n)
+    IC.i <- matrix(0,nrow=m,ncol=n)
+    for(i in 1:m){
+      drop <- is.na(X[i,]) | is.na(rowSums(Z)) | is.na(W[i,])
+      x <- as.numeric(X[i,!drop])
+      z <- Z[!drop,]
+      w <- W[i,!drop]
+      y <- Y[!drop]
+      nn <- n-sum(drop)
+      xz <- cbind(x,z)
+      XZ <- cbind(X[i,],Z)
+      EXtWXinv <- solve(t(xz)%*%(w*diag(nn))%*%xz)*sum(w)
+      res.m <- lm.wfit(xz,y,w)$res
+      if(sum(drop)>0) res.m <- insert.NA(which(drop==TRUE),res.m)
+      EXtWXinvXt <- rep(0,n)
+      for(j in 1:n){
+        EXtWXinvXt[j] <- (EXtWXinv%*%(t(XZ)[,j]))[1]
+      }
+      IC.i[i,] <- res.m * EXtWXinvXt
+    }
+    IC.Cor <- IC.Cor.NA(IC.i,W,N=n,M=p,output="cor")
+  }
+  
+  if(test=="t.cor" | test=="z.cor"){
+    if(!is.null(W)) warning("Weights not currently implemented for tests of correlation parameters.  Proceeding with unweighted version")
+    # Change of dimension
+    P <- dim(X)[1] -> p # Number of variables.
+    M <- P*(P-1)/2 -> m # Actual number of pairwise hypotheses.
+    N <- dim(X)[2] -> m
+    ind <- t(combn(P,2))
+    VCM <- cov(t(X),use="pairwise")
+    Cor <- cov2cor(VCM)
+    Vars <- diag(VCM)
+    Cov.v <- VCM[lower.tri(VCM)] # vectorize.
+    Cor.v <- Cor[lower.tri(Cor)] # vectorize.
+    X2 <- X*X
+    EX <- rowMeans(X,na.rm=TRUE)
+    E2X <- rowMeans(X2,na.rm=TRUE)
+    Var1.v <- Vars[ind[,1]]
+    Var2.v <- Vars[ind[,2]]
+    EX1.v <- EX[ind[,1]]
+    EX2.v <- EX[ind[,2]]
+    E2X1.v <- E2X[ind[,1]]
+    E2X2.v <- E2X[ind[,2]]
+    X.vec1 <- X[ind[,1],]
+    X.vec2 <- X[ind[,2],]
+    X.vec12 <- X.vec1*X.vec2
+    EX1X2.v <- rowMeans(X.vec12,na.rm=TRUE)
+
+    cons <- 1/sqrt(Var1.v*Var2.v)
+    gradient <- matrix(1,nrow=M,ncol=5)
+    gradient[,1] <- EX1.v*Cov.v/Var1.v - EX2.v
+    gradient[,2] <- EX2.v*Cov.v/Var2.v - EX1.v
+    gradient[,3] <- -0.5*Cov.v/Var1.v
+    gradient[,4] <- -0.5*Cov.v/Var2.v
+
+    IC.i <- matrix(0, nrow=M, ncol=N)
+    for(i in 1:N){
+      diffs.i <- diffs.1.N(X[ind[,1],i], X[ind[,2],i], EX1.v, EX2.v, E2X1.v, E2X2.v, EX1X2.v)
+      IC.M <- rep(0,M)
+      for(j in 1:M){
+        IC.M[j] <- gradient[j,]%*%diffs.i[,j]
+      }
+      IC.i[,i] <- IC.M
+    }
+    IC.i <- cons * IC.i
+    IC.Cor <- IC.Cor.NA(IC.i,W=NULL,N=n,M=M,output="cor")
+  }
+
+  if(ic.quant.trans==FALSE) cat("sampling null test statistics...", "\n\n")
+  else cat("sampling null test statistics...", "\n")
+  
+  if(MVN.method=="mvrnorm") nulldist <- t(mvrnorm(n=B,mu=rep(0,dim(IC.Cor)[1]),Sigma=IC.Cor))
+  if(MVN.method=="Cholesky"){
+    IC.chol <- t(chol(IC.Cor+penalty*diag(dim(IC.Cor)[1])))
+    norms <- matrix(rnorm(B*dim(IC.Cor)[1]),nrow=dim(IC.Cor)[1],ncol=B)
+    nulldist <- IC.chol%*%norms
+  }
+  if(ic.quant.trans==TRUE){
+    cat("applying quantile transform...", "\n\n")
+    if(is.null(marg.null)){
+	marg.null <- "t"
+     	if(test=="t.cor" | test=="z.cor" | test=="t.twosamp.equalvar") marg.par <- matrix(rep(dim(X)[2]-2,dim(IC.Cor)[1]),nrow=dim(IC.Cor)[1],ncol=1)
+        if(test=="lm.XvsZ") marg.par <- matrix(rep(dim(X)[2]-dim(Z)[2],dim(IC.Cor)[1]),nrow=dim(IC.Cor)[1],ncol=1)
+        if(test=="lm.YvsXZ")  marg.par <- matrix(rep(dim(X)[2]-dim(Z)[2]-1,dim(IC.Cor)[1]),nrow=dim(IC.Cor)[1],ncol=1)
+        else marg.par <- matrix(rep(dim(X)[2]-1,dim(IC.Cor)[1]),nrow=dim(IC.Cor)[1],ncol=1)
+    }
+    if(test=="z.cor" & marg.null=="t") warning("IC nulldist for z.cor already MVN. Transforming to N-2 df t marginal distribution not advised.")
+    if(marg.null!="t" & marg.null!="perm") stop("IC nulldists can only be quantile transformed to a marginal t-distribution or user-supplied marginal permutation distribution")
+    if(marg.null=="t") nulldist <- tQuantTrans(nulldist,marg.null="t",marg.par,ncp=0,perm.mat=NULL)
+    if(marg.null=="perm") nulldist <- tQuantTrans(nulldist,marg.null="perm",marg.par=NULL,ncp=NULL,perm.mat=perm.mat)
+  }
+  if(alternative=="greater") nulldist <- nulldist
+  else if(alternative=="less") nulldist <- -nulldist
+  else nulldist <- abs(nulldist)
+  nulldist
+}
+
+# Function, given ICs for each individual, returns variance covariance
+# matrix or corresponding correlation matrix.
+IC.Cor.NA <- function(IC,W,N,M,output){
+  n <- dim(IC)[2]
+  m <- dim(IC)[1]
+  if(is.null(W)){
+    W <- matrix(1,nrow=dim(IC)[1],ncol=dim(IC)[2])
+    Wnew <- W/rowSums(W,na.rm=TRUE) # Equal weight, NA handling.
+  }
+  else Wnew <- W/rowSums(W,na.rm=TRUE)
+  IC.VC <- matrix(0,nrow=m,ncol=m)
+  for(i in 1:n){
+    temp <- crossprod(t(sqrt(Wnew[,i])*IC[,i]))
+    temp[is.na(temp)] <- 0
+    IC.VC <- IC.VC + temp
+  }
+  if(output=="cov") out <- IC.VC
+  if(output=="cor") out <- cov2cor(IC.VC)
+  out
+}
+ 
+# Weighted correlation. Generalizes cov.wt() to account for a matrix
+# of weights. Uses IC formulation instead of sweep() and crossprod().
+# May be slower/clunkier, but pretty transparent, and allows for NA
+# handling much like cor(...,use="pairwise") would.  That is, each
+# element of the correlation matrix returned uses the maximum amount
+# of information possible in obtaining individual elements of that
+# matrix.
+IC.CorXW.NA <- function(X,W,N,M,output){
+  n <- dim(X)[2]
+  m <- dim(X)[1]
+  XW <- X*W
+  EXW <- rowSums(XW)/rowSums(W)
+  ICW.i <- X-EXW
+  Wnew <- W/rowSums(W,na.rm=T)
+  IC.VC <- matrix(0,nrow=m,ncol=m)
+  for(i in 1:n){
+    temp <- crossprod(t(sqrt(Wnew[,i])*X[,i]))
+    temp[is.na(temp)] <- 0
+    IC.VC <- IC.VC + temp
+  }
+  if(output=="cov") out <- IC.VC
+  if(output=="cor") out <- cov2cor(IC.VC)
+  out
+}
+
+# For regression ICs, a function to insert NAs into appropriate locations
+# of a vector of returned residuals.
+insert.NA <- function(orig.NA, res.vec){
+  for(i in 1:length(orig.NA)){
+    res.vec <- append(res.vec, NA, after=orig.NA[i]-1)
+  }
+  res.vec
+}
+
+# For correlation ICS, a function to get diff vectors for all M.
+# This is the difference between estimates for
+# a sample size of one and a sample of size n.
+diffs.1.N <- function(vec1, vec2, e1, e2, e21, e22, e12){
+  diff.mat.1.N <- matrix(0,nrow=5,ncol=length(vec1))
+  diff.mat.1.N[1,] <- vec1 - e1
+  diff.mat.1.N[2,] <- vec2 - e2
+  diff.mat.1.N[3,] <- vec1*vec1 - e21
+  diff.mat.1.N[4,] <- vec2*vec2 - e22
+  diff.mat.1.N[5,] <- vec1*vec2 - e12
+  diff.mat.1.N
+}
+
+### For quantile transform, take a sample from the marginal null distribution.
+marg.samp <- function(marg.null,marg.par,m,B,ncp){
+out <- matrix(0,m,B)
+for(i in 1:m){
+  if(marg.null=="normal") out[i,] <- rnorm(B,mean=marg.par[i,1],sd=marg.par[i,2])
+  if(marg.null=="t") out[i,] <- rt(B,df=marg.par[i,1],ncp)
+  if(marg.null=="f") out[i,] <- rf(B,df1=marg.par[i,1],df2=marg.par[i,2],ncp)
+}
+out
+}
+
+### Quantile transform streamlined for IC nulldists.
+tQuantTrans <- function(rawboot, marg.null, marg.par, ncp, perm.mat=NULL){
+  m <- dim(rawboot)[1]
+  B <- dim(rawboot)[2] 
+  ranks <- t(apply(rawboot,1,rank,ties.method="random"))
+  if(marg.null=="t") Z.quant <- marg.samp(marg.null="t",marg.par,m,B,ncp)
+  if(marg.null=="perm") Z.quant <- perm.mat
+  Z.quant <- t(apply(Z.quant,1,sort))
+  if(marg.null!="perm"){                   
+      for(i in 1:m){                         
+        Z.quant[i,] <- Z.quant[i,][ranks[i,]]
+      }
+    }
+  else{
+    Z.quant <- t(apply(Z.quant,1,quantile,probs=seq(0,1,length.out=B),na.rm=TRUE))
+      for(i in 1:m){                         
+        Z.quant[i,] <- Z.quant[i,][ranks[i,]]
+      }
+    }
+  Z.quant
+}
+
+### Effective df for two sample test of means, unequal var.
+t.effective.df <- function(X,Y){
+  uY<-sort(unique(Y))
+  X1 <- X[Y==uY[1]]
+  X2 <- X[Y==uY[2]]
+  mu <- var(X2)/var(X1)
+  n1 <- length(Y[Y==uY[1]])
+  n2 <- length(Y[Y==uY[2]])
+  df <- (((1/n1)+(mu/n2))^2)/(1/((n1^2)*(n1-1)) + (mu^2)/((n2^2)*(n2-1)))
+  df
+}
+
+
+
+
+
+
+    
+
+
+
+
+
+
+
+
+
+
+
+
+  
diff --git a/R/mt.basic.R b/R/mt.basic.R
new file mode 100755
index 0000000..5c58c10
--- /dev/null
+++ b/R/mt.basic.R
@@ -0,0 +1,418 @@
+mt.rawp2adjp<-function(rawp,proc=c("Bonferroni","Holm","Hochberg","SidakSS","SidakSD","BH","BY","ABH",
+                            "TSBH"), alpha=0.05, na.rm=FALSE)
+{
+
+    m<-length(rawp)
+    if(na.rm){
+        mgood<-sum(!is.na(rawp))
+    }else{
+        mgood<-m
+    }
+    n<-length(proc)
+    a<-length(alpha)
+    index<-order(rawp)
+    h0.ABH<-NULL
+    h0.TSBH<-NULL
+    spval<-rawp[index]
+
+    adjp<-matrix(0,m,n+1)
+    dimnames(adjp)<-list(NULL,c("rawp",proc))
+    adjp[,1]<-spval
+
+    if(is.element("TSBH",proc))
+    {
+    #N.B.: This method performed first in order to handle a potential $adjp
+    #dimension change in the case that length(alpha)>1.
+    #Could also be possibly done using more append() functions, should more
+    #alpha-dependent procedures be developed/included later.
+        TS.spot <- which(proc=="TSBH")
+        TSBHs<-paste("TSBH",alpha,sep="_")
+        newprocs<-append(proc,TSBHs,after=TS.spot)
+        newprocs<-newprocs[newprocs!="TSBH"]
+        adjp<-matrix(0,m,n+a)
+        dimnames(adjp)<-list(NULL,c("rawp",newprocs))
+        adjp[,1]<-spval
+
+                                        # Apply first-pass BH.
+        tmp<-spval
+        for(i in (m-1):1){
+            tmp[i]<-min(tmp[i+1],min((mgood/i)*spval[i],1,na.rm=TRUE),na.rm=TRUE)
+            if(is.na(spval[i])) tmp[i]<-NA
+        }
+                                        # Now use first-pass results to estimate h_0, the number of true nulls.
+                                        # These results depend on the nominal testing level, alpha.
+        h0.TSBH <- rep(0,length(alpha))
+        names(h0.TSBH) <- paste("h0.TSBH",alpha,sep="_")
+        for(i in 1:length(alpha)){
+            h0.TSBH[i] <- mgood - sum(tmp < alpha[i]/(1+alpha[i]),na.rm=TRUE)
+            adjp[,TS.spot+i]<-tmp*h0.TSBH[i]/mgood
+        }
+    }
+
+    if(is.element("Bonferroni",proc))
+    {
+        tmp<-mgood*spval
+        tmp[tmp>1]<-1
+        adjp[,"Bonferroni"]<-tmp
+    }
+
+    if(is.element("Holm",proc))
+    {
+        tmp<-spval
+        tmp[1]<-min(mgood*spval[1],1)
+        for(i in 2:m)
+            tmp[i]<-max(tmp[i-1],min((mgood-i+1)*spval[i],1))
+        adjp[,"Holm"]<-tmp
+    }
+
+    if(is.element("Hochberg",proc))
+    {
+        tmp<-spval
+        for(i in (m-1):1){
+            tmp[i]<-min(tmp[i+1],min((mgood-i+1)*spval[i],1,na.rm=TRUE),na.rm=TRUE)
+            if(is.na(spval[i])) tmp[i]<-NA
+        }
+        adjp[,"Hochberg"]<-tmp
+    }
+
+    if(is.element("SidakSS",proc))
+        adjp[,"SidakSS"]<-1-(1-spval)^mgood
+
+    if(is.element("SidakSD",proc))
+    {
+        tmp<-spval
+        tmp[1]<-1-(1-spval[1])^mgood
+        for(i in 2:m)
+            tmp[i]<-max(tmp[i-1],1-(1-spval[i])^(mgood-i+1))
+        adjp[,"SidakSD"]<-tmp
+    }
+
+    if(is.element("BH",proc))
+    {
+        tmp<-spval
+        for(i in (m-1):1){
+            tmp[i]<-min(tmp[i+1],min((mgood/i)*spval[i],1,na.rm=TRUE),na.rm=TRUE)
+            if(is.na(spval[i])) tmp[i]<-NA
+        }
+        adjp[,"BH"]<-tmp
+    }
+
+    if(is.element("BY",proc))
+    {
+        tmp<-spval
+        a<-sum(1/(1:mgood))
+        tmp[m]<-min(a*spval[m], 1)
+        for(i in (m-1):1){
+            tmp[i]<-min(tmp[i+1],min((mgood*a/i)*spval[i],1,na.rm=TRUE),na.rm=TRUE)
+            if(is.na(spval[i])) tmp[i]<-NA
+        }
+        adjp[,"BY"]<-tmp
+    }
+
+    if(is.element("ABH",proc))
+    {
+        ## First obtain estimate of h_0, the number of true null hypotheses.
+        tmp<-spval
+        h0.m <- rep(0,mgood)
+        for(k in 1:mgood){
+            h0.m[k] <- (mgood+1-k)/(1-spval[k])
+        }
+        grab <- min(which(diff(h0.m,na.rm=TRUE)>0),na.rm=TRUE)
+        h0.ABH <- ceiling(min(h0.m[grab],mgood))
+        ## Now apply BH procedure with adaptive correction.
+        for(i in (m-1):1){
+            tmp[i]<-min(tmp[i+1],min((mgood/i)*spval[i],1,na.rm=TRUE),na.rm=TRUE)
+            if(is.na(spval[i])) tmp[i]<-NA
+        }
+        adjp[,"ABH"]<-tmp*h0.ABH/mgood
+    }
+
+    list(adjp=adjp,index=index,h0.ABH=h0.ABH[1],h0.TSBH=h0.TSBH[1:length(alpha)])
+}
+
+###########################################################################
+
+mt.reject<-function(adjp,alpha)
+{
+  which<-adjp<=alpha[1]
+  dimnames(which)<-dimnames(adjp)
+
+  if(is.matrix(adjp))
+  {
+    r<-matrix(0,length(alpha),ncol(adjp))
+    for(i in 1:length(alpha))
+        r[i,] <- colSums(adjp<=alpha[i])
+    dimnames(r)<-list(alpha,dimnames(adjp)[[2]])
+  }
+
+  if(!is.matrix(adjp))
+  {
+    r<-rep(0,length(alpha))
+    for(i in 1:length(alpha))
+      r[i]<-sum(adjp<=alpha[i])
+  }
+
+  list(r=r,which=which)
+}
+
+
+###########################################################################
+
+#need ... arg to legend to use with ... in mt.plot
+mt.legend<-function(x, y = NULL, legend, fill = NULL, col = "black", lty,
+    lwd, pch, angle = 45, density = NULL, bty = "o", bg = par("bg"),
+    pt.bg = NA, cex = 1, pt.cex = cex, pt.lwd = lwd, xjust = 0,
+    yjust = 1, x.intersp = 1, y.intersp = 1, adj = c(0, 0.5),
+    text.width = NULL, text.col = par("col"), merge = do.lines &&
+        has.pch, trace = FALSE, plot = TRUE, ncol = 1, horiz = FALSE,...)
+{
+    if (missing(legend) && !missing(y) && (is.character(y) ||
+        is.expression(y))) {
+        legend <- y
+        y <- NULL
+    }
+    mfill <- !missing(fill) || !missing(density)
+    xy <- xy.coords(x, y)
+    x <- xy$x
+    y <- xy$y
+    nx <- length(x)
+    if (nx < 1 || nx > 2)
+        stop("invalid coordinate lengths")
+    xlog <- par("xlog")
+    ylog <- par("ylog")
+    rect2 <- function(left, top, dx, dy, density = NULL, angle,
+        ...) {
+        r <- left + dx
+        if (xlog) {
+            left <- 10^left
+            r <- 10^r
+        }
+        b <- top - dy
+        if (ylog) {
+            top <- 10^top
+            b <- 10^b
+        }
+        rect(left, top, r, b, angle = angle, density = density,
+            ...)
+    }
+    segments2 <- function(x1, y1, dx, dy, ...) {
+        x2 <- x1 + dx
+        if (xlog) {
+            x1 <- 10^x1
+            x2 <- 10^x2
+        }
+        y2 <- y1 + dy
+        if (ylog) {
+            y1 <- 10^y1
+            y2 <- 10^y2
+        }
+        segments(x1, y1, x2, y2, ...)
+    }
+    points2 <- function(x, y, ...) {
+        if (xlog)
+            x <- 10^x
+        if (ylog)
+            y <- 10^y
+        points(x, y, ...)
+    }
+    text2 <- function(x, y, ...) {
+        if (xlog)
+            x <- 10^x
+        if (ylog)
+            y <- 10^y
+        text(x, y, ...)
+    }
+    if (trace)
+        catn <- function(...) do.call(cat, c(lapply(list(...),
+            formatC), list("\n")))
+    cin <- par("cin")
+    Cex <- cex * par("cex")
+    if (is.null(text.width))
+        text.width <- max(strwidth(legend, units = "user", cex = cex))
+    else if (!is.numeric(text.width) || text.width < 0)
+        stop("text.width must be numeric, >= 0")
+    xc <- Cex * xinch(cin[1], warn.log = FALSE)
+    yc <- Cex * yinch(cin[2], warn.log = FALSE)
+    xchar <- xc
+    yextra <- yc * (y.intersp - 1)
+    ymax <- max(yc, strheight(legend, units = "user", cex = cex))
+    ychar <- yextra + ymax
+    if (trace)
+        catn("  xchar=", xchar, "; (yextra,ychar)=", c(yextra,
+            ychar))
+    if (mfill) {
+        xbox <- xc * 0.8
+        ybox <- yc * 0.5
+        dx.fill <- xbox
+    }
+    do.lines <- (!missing(lty) && (is.character(lty) || any(lty >
+        0))) || !missing(lwd)
+    n.leg <- if (is.call(legend))
+        1
+    else length(legend)
+    n.legpercol <- if (horiz) {
+        if (ncol != 1)
+            warning("horizontal specification overrides: Number of columns := ",
+                n.leg)
+        ncol <- n.leg
+        1
+    }
+    else ceiling(n.leg/ncol)
+    if (has.pch <- !missing(pch) && length(pch) > 0) {
+        if (is.character(pch) && !is.na(pch[1]) && nchar(pch[1]) >
+            1) {
+            if (length(pch) > 1)
+                warning("Not using pch[2..] since pch[1] has multiple chars")
+            np <- nchar(pch[1])
+            pch <- substr(rep.int(pch[1], np), 1:np, 1:np)
+        }
+        if (!merge)
+            dx.pch <- x.intersp/2 * xchar
+    }
+    x.off <- if (merge)
+        -0.7
+    else 0
+    if (xlog)
+        x <- log10(x)
+    if (ylog)
+        y <- log10(y)
+    if (nx == 2) {
+        x <- sort(x)
+        y <- sort(y)
+        left <- x[1]
+        top <- y[2]
+        w <- diff(x)
+        h <- diff(y)
+        w0 <- w/ncol
+        x <- mean(x)
+        y <- mean(y)
+        if (missing(xjust))
+            xjust <- 0.5
+        if (missing(yjust))
+            yjust <- 0.5
+    }
+    else {
+        h <- n.legpercol * ychar + yc
+        w0 <- text.width + (x.intersp + 1) * xchar
+        if (mfill)
+            w0 <- w0 + dx.fill
+        if (has.pch && !merge)
+            w0 <- w0 + dx.pch
+        if (do.lines)
+            w0 <- w0 + (2 + x.off) * xchar
+        w <- ncol * w0 + 0.5 * xchar
+        left <- x - xjust * w
+        top <- y + (1 - yjust) * h
+    }
+    if (plot && bty != "n") {
+        if (trace)
+            catn("  rect2(", left, ",", top, ", w=", w, ", h=",
+                h, ", ...)", sep = "")
+        rect2(left, top, dx = w, dy = h, col = bg, density = NULL)
+    }
+    xt <- left + xchar + (w0 * rep.int(0:(ncol - 1), rep.int(n.legpercol,
+        ncol)))[1:n.leg]
+    yt <- top - (rep.int(1:n.legpercol, ncol)[1:n.leg] - 1) *
+        ychar - 0.5 * yextra - ymax
+    if (mfill) {
+        if (plot) {
+            fill <- rep(fill, length.out = n.leg)
+            rect2(left = xt, top = yt + ybox/2, dx = xbox, dy = ybox,
+                col = fill, density = density, angle = angle,
+                border = "black")
+        }
+        xt <- xt + dx.fill
+    }
+    if (plot && (has.pch || do.lines))
+        col <- rep(col, length.out = n.leg)
+    if (missing(lwd))
+        lwd <- par("lwd")
+    if (do.lines) {
+        seg.len <- 2
+        if (missing(lty))
+            lty <- 1
+        ok.l <- !is.na(lty) & (is.character(lty) | lty > 0)
+        lty <- rep(lty, length.out = n.leg)
+        lwd <- rep(lwd, length.out = n.leg)
+        if (trace)
+            catn("  segments2(", xt[ok.l] + x.off * xchar, ",",
+                yt[ok.l], ", dx=", seg.len * xchar, ", dy=0, ...)")
+        if (plot)
+            segments2(xt[ok.l] + x.off * xchar, yt[ok.l], dx = seg.len *
+                xchar, dy = 0, lty = lty[ok.l], lwd = lwd[ok.l],
+                col = col[ok.l])
+        xt <- xt + (seg.len + x.off) * xchar
+    }
+    if (has.pch) {
+        pch <- rep(pch, length.out = n.leg)
+        pt.bg <- rep(pt.bg, length.out = n.leg)
+        pt.cex <- rep(pt.cex, length.out = n.leg)
+        pt.lwd <- rep(pt.lwd, length.out = n.leg)
+        ok <- !is.na(pch) & (is.character(pch) | pch >= 0)
+        x1 <- (if (merge)
+            xt - (seg.len/2) * xchar
+        else xt)[ok]
+        y1 <- yt[ok]
+        if (trace)
+            catn("  points2(", x1, ",", y1, ", pch=", pch[ok],
+                ", ...)")
+        if (plot)
+            points2(x1, y1, pch = pch[ok], col = col[ok], cex = pt.cex[ok],
+                bg = pt.bg[ok], lwd = pt.lwd[ok])
+        if (!merge)
+            xt <- xt + dx.pch
+    }
+    xt <- xt + x.intersp * xchar
+    if (plot)
+        text2(xt, yt, labels = legend, adj = adj, cex = cex,
+            col = text.col)
+    invisible(list(rect = list(w = w, h = h, left = left, top = top),
+        text = list(x = xt, y = yt)))
+}
+
+mt.plot<-function(adjp,teststat, plottype="rvsa",logscale=FALSE,
+                  alpha=seq(0,1,length=100), proc="",leg=c(0,0),...)
+{
+  m<-nrow(adjp)
+  n<-ncol(adjp)
+  a<-length(alpha)
+
+  if(plottype=="rvsa")
+  {
+    r<-mt.reject(adjp,alpha)$r
+    matplot(alpha,r,xlab="Type I error rate",
+            ylab="Number of rejected hypotheses", type="l", ...)
+    mt.legend(leg[1],leg[2],proc,...)
+  }
+
+  if(plottype=="pvsr")
+  {
+    spval<-apply(adjp,2,sort)
+    matplot(1:m,spval,xlab="Number of rejected hypotheses",
+            ylab="Sorted adjusted p-values", type="l", ...)
+    mt.legend(leg[1],leg[2],proc,...)
+  }
+
+  if(plottype=="pvst")
+  {
+    if(!logscale)
+      matplot(teststat,adjp,xlab="Test statistics",
+              ylab="Adjusted p-values", type="p", ...)
+    if(logscale)
+      matplot(teststat,-log(adjp,10),xlab="Test statistics",
+              ylab="-log(adjusted p-values,10)", type="p", ...)
+    mt.legend(leg[1],leg[2],proc,...)
+  }
+  if(plottype=="pvsi")
+  {
+    if(!logscale)
+      matplot(1:m,adjp,xlab="index",ylab="Adjusted p-values", type="l", ...)
+    if(logscale)
+      matplot(1:m,-log(adjp,10),xlab="index",
+              ylab="-log(adjusted p-values,10)", type="l", ...)
+    mt.legend(leg[1],leg[2],proc,...)
+  }
+}
+
+
+
diff --git a/R/mt.func.R b/R/mt.func.R
new file mode 100755
index 0000000..1e525d9
--- /dev/null
+++ b/R/mt.func.R
@@ -0,0 +1,375 @@
+.mt.BLIM<-2^30
+.mt.naNUM<- -93074815
+.mt.RandSeed<-3455660
+#the maxim number of setting of the permutation, it's not resettable
+#in the current version. the numer comes from the largest
+#integer can be 2^32, while we need to exlcude one sign bit, and
+#to exclude another bit for safety.
+
+
+#dyn.load("multtest.so")
+#X is a matrix data
+#classlabel is a vector
+mt.teststat<-function(X,classlabel,test="t",na=.mt.naNUM,nonpara="n")
+{
+    if(is.factor(classlabel)) classlabel<-unclass(classlabel)-1
+    extra<-max(classlabel)+1
+    mt.checkothers(na=na,nonpara=nonpara)
+    tmp<-mt.transformX(X,classlabel,test,na,nonpara)
+    options<-c(test,"abs","y"); #"abs"  and "y" has no meaning here
+    res<-.C("get_stat",as.double(tmp$X),as.integer(tmp$m),
+               as.integer(tmp$n),as.integer(tmp$classlabel),as.double(na),
+               teststat=double(tmp$m),as.character(options),
+               as.integer(extra), PACKAGE="multtest")$teststat
+    res[abs(res)>=0.9*1e20]<-NA
+    res
+}
+mt.teststat.num.denum<-function(X,classlabel,test="t",na=.mt.naNUM,nonpara="n")
+{
+    extra<-max(classlabel)+1
+    mt.checkothers(na=na,nonpara=nonpara)
+    tmp<-mt.transformX(X,classlabel,test,na,nonpara)
+    options<-c(test,"abs","y"); #"abs"  and "y" has no meaning here
+    teststat<-.C("get_stat_num_denum",as.double(tmp$X),as.integer(tmp$m),
+	       as.integer(tmp$n),as.integer(tmp$classlabel),as.double(na),
+	       t.num=double(tmp$m),t.denum=double(tmp$m),as.character(options),
+               as.integer(extra), PACKAGE="multtest")
+
+    res<-cbind(teststat.num=teststat$t.num,teststat.denum=teststat$t.denum)
+    mt.niceres(res,X)
+}
+  mt.maxT<-function(X,classlabel,test="t",side="abs",
+                  fixed.seed.sampling="y",B=10000,na=.mt.naNUM,nonpara="n")
+{
+    if(is.factor(classlabel)) classlabel<-unclass(classlabel)-1
+    extra<-max(classlabel)+1
+    mt.checkothers(side=side,fixed.seed.sampling=fixed.seed.sampling,B=B,na=na,nonpara=nonpara)
+    tmp<-mt.transformX(X,classlabel,test,na,nonpara)
+    newB<-mt.getmaxB(classlabel,test,B)
+    if(B==0||newB<B)
+      fixed.seed.sampling<-"n" #as we're doing complete premutation
+    options<-c(test,side,fixed.seed.sampling);
+    res<-.C("get_maxT",as.double(tmp$X),as.integer(tmp$m),
+	    as.integer(tmp$n),as.integer(tmp$classlabel),as.double(na),
+	    t=double(tmp$m),p=double(tmp$m),adjP=double(tmp$m),
+	    as.integer(newB),index=integer(tmp$m),as.character(options),
+               as.integer(extra), PACKAGE="multtest")
+
+    res<-cbind(index=res$index,teststat=res$t,rawp=res$p,adjp=res$adjP)
+    mt.niceres(res,X,res[,1])
+}
+mt.minP<-function(X,classlabel,test="t",side="abs",
+                  fixed.seed.sampling="y",B=10000,na=.mt.naNUM,nonpara="n")
+{
+    if(is.factor(classlabel)) classlabel<-unclass(classlabel)-1
+    extra<-max(classlabel)+1
+    mt.checkothers(side=side,fixed.seed.sampling=fixed.seed.sampling,B=B,na=na,nonpara=nonpara)
+    tmp<-mt.transformX(X,classlabel,test,na,nonpara)
+    newB<-mt.getmaxB(classlabel,test,B)
+    if(B==0||newB<B)
+      fixed.seed.sampling<-"n" #as we're doing complete premutation
+    options<-c(test,side,fixed.seed.sampling);
+    res<-.C("get_minP",as.double(tmp$X),as.integer(tmp$m),
+	    as.integer(tmp$n),as.integer(tmp$classlabel),as.double(na),
+	    t=double(tmp$m),p=double(tmp$m),adjP=double(tmp$m),
+            plower=double(tmp$m),as.integer(newB),index=integer(tmp$m),
+            as.character(options),as.integer(extra), PACKAGE="multtest")
+
+    res<-cbind(index=res$index,teststat=res$t,rawp=res$p,adjp=res$adjP,plower=res$plower)
+    mt.niceres(res,X,res[,1])
+}
+mt.sample.teststat<-function(V,classlabel,test="t",fixed.seed.sampling="y",
+                       B=10000,na=.mt.naNUM,nonpara="n")
+{
+  extra<-max(classlabel)+1
+  mt.checkothers(fixed.seed.sampling=fixed.seed.sampling,B=B,na=na,nonpara=nonpara)
+  tmp<-mt.transformV(V,classlabel,test,na,nonpara)
+  newB<-mt.getmaxB(classlabel,test,B)
+  if(B==0||newB<B)
+    fixed.seed.sampling<-"n" #as we're doing complete premutation
+  options<-c(test,"abs",fixed.seed.sampling);#the "abs" has no meaing here.
+  res<-t(.C("get_samples_T",as.double(tmp$V),as.integer(tmp$n),
+          as.integer(tmp$classlabel),T=double(newB),as.double(na),
+          as.integer(newB),as.character(options),as.integer(extra),
+            PACKAGE="multtest")$T)
+  res[abs(res)>=0.9*1e20]<-NA
+  res
+}
+mt.sample.rawp<-function(V,classlabel,test="t",side="abs",
+                       fixed.seed.sampling="y",B=10000,na=.mt.naNUM,nonpara="n")
+{
+  extra<-max(classlabel)+1
+  mt.checkothers(side=side,fixed.seed.sampling=fixed.seed.sampling,B=B,na=na,nonpara=nonpara)
+  tmp<-mt.transformV(V,classlabel,test,na,nonpara)
+  newB<-mt.getmaxB(classlabel,test,B)
+  if(B==0||newB<B)
+    fixed.seed.sampling<-"n" #as we're doing complete premutation
+  options<-c(test,side,fixed.seed.sampling);
+  res<-.C("get_samples_P",as.double(tmp$V),as.integer(tmp$n),
+          as.integer(tmp$classlabel), P=double(newB),as.double(na),
+          as.integer(newB),
+          as.character(options),as.integer(extra), PACKAGE="multtest")$P
+  res[abs(res)>=0.9*1e20]<-NA
+  res
+}
+mt.sample.label<-function(classlabel,test="t",
+                            fixed.seed.sampling="y",B=10000)
+{
+  extra<-max(classlabel)+1
+  tmp<-mt.transformL(classlabel,test)
+  mt.checkothers(fixed.seed.sampling=fixed.seed.sampling,B=B)
+  newB<-mt.getmaxB(classlabel,test,B)
+  if(B==0||newB<B)
+    fixed.seed.sampling<-"n" #as we're doing complete premutation
+  options<-c(test,"abs",fixed.seed.sampling); #the "abs" has no meaing here
+  res<-.C("get_sample_labels",as.integer(tmp$n),as.integer(tmp$classlabel),
+          as.integer(newB), S=integer(tmp$n*newB),as.character(options),
+          as.integer(extra), PACKAGE="multtest")$S
+  resl<-matrix(res,nrow=tmp$n)
+  if(test=="pairt"){
+    #restore the original classlabelling
+    resn<-matrix(0,nrow=2*tmp$n,ncol=newB)
+    for(i in c(1:tmp$n))
+      for(j in c(1:newB)){
+        if(resl[i,j])
+          resn[2*i,j]<-1
+        else resn[2*i-1,j]<-1
+      }
+    resl<-resn
+  }
+  t(resl)
+}
+#used for private function
+mt.checkclasslabel<-function(classlabel,test)
+{
+  classlabel<-as.integer(classlabel)
+  if((!is.character(test))||(!is.vector(test))||(length(test)>1)
+      ||(!any(test==c("t","f","blockf","pairt","wilcoxon","t.equalvar"))))
+     stop(paste("your setting of test is",test,"\nthe test needs to be a single character from c('t',f','blockf','pairt','wilcoxon','t.equalvar')"))
+  if((!is.integer(as.integer(classlabel))) ||(!is.vector(classlabel)))
+     stop("classlabel needs to be just a vector of integers")
+  if(any(test==c("t","wilcoxon","t.equalvar"))){
+    x<-sum(classlabel==0)
+    y<-sum(classlabel==1)
+    if((x==0)||(y==0)||(x+y<length(classlabel)))
+      stop(paste("in t test, every number in class label needs to be 0 or 1 and neither of the 0 set or 1 set can be empty set\n",
+                 "The folllowing is your setting of classlabel",classlabel,"\n"))
+  }
+  if(test=="f"){
+      tab <- table(classlabel)
+      tab <- tab[tab>0]
+      if(length(tab)<2)
+          stop(paste("in F test, we need at least two groups\n",
+                     "Your setting of classlabel is", classlabel,
+                   "\n"))
+      if(sum(tab)-length(tab)<2)
+          stop(paste("Insufficient df for denominator of F",
+                     "the settings are", classlabel, "\n"))
+  }
+  if(test=="pairt"){
+    K<-max(classlabel)
+    if(K!=1)
+      stop(paste("in paired t test, we only handle two groups\n",
+                 "your classlabel=",classlabel,"\n"))
+    if(length(classlabel)%%2==1)
+      stop(paste("the classlabel length must be an even number in the paired t\n","your classlabel=",classlabel,"\n"))
+    halfn<-length(classlabel)%/%2
+    for(i in c(1:halfn)){
+      cur<-classlabel[(2*i-1):(2*i)]
+      if((sum(cur==0)==0)||(sum(cur==1)==0))
+        stop(paste("Some errors in specifying classlabel for the paired t test for the block",i,"located at","(",2*i-1,2*i,")\n",
+                   "your classlabel=",classlabel,"\n"))
+
+    }
+  }
+  if(test=="blockf"){
+    K<-max(classlabel)
+    if(K<1)
+      stop(paste("in blockF test, we need at least two groups\n",
+                 "your classlabel=",classlabel,"\n"))
+    if(length(classlabel)%%(K+1)>0)
+      stop(paste("the classlabel length must be the multiple of the number of treatments in the block test\n","your classlabel=",classlabel,"\n"))
+     B<-length(classlabel)%/%(K+1)
+    for(i in c(1:B)){
+      cur<-classlabel[c((K+1)*(i-1)+1):((K+1)*i)]
+      #to check if cur is a permutation of c(0,1,..,K)
+      for(j in c(0:K))
+        if(sum(cur==j)==0)
+          stop(paste("the classlabel has some errors for the blockf test at block",i,"located at",
+               "(",(K+1)*(i-1)+1,(K+1)*i,")","There is no elements =",j,"within this block\n","your classlabel=",classlabel,"\n"))
+    }
+  }
+}
+mt.checkX<-function(X,classlabel,test){
+  if((!is.matrix(X)) || !(is.numeric(X)))
+     stop(paste("X needs to be a matrix\n","your X=",X,"\n"))
+  if(ncol(X)!=length(classlabel))
+    stop(paste("the number of column of X needs to be the same as the lengtho of classlabel\n","your X=",X,"\n your classlabel is",classlabel,"\n"))
+  mt.checkclasslabel(classlabel,test)
+}
+mt.checkV<-function(V,classlabel,test){
+  if((!is.vector(V)) || !(is.numeric(V)))
+     stop(paste("V needs to be a vector\n","your V=",V,"\n"))
+  if(length(V)!=length(classlabel))
+    stop("the length of V needs to be the same as the length of classlabel\n",
+         "your V=",V,"\n your classlabel=",classlabel,"\n")
+  mt.checkclasslabel(classlabel,test)
+}
+
+mt.checkothers<-function(side="abs",fixed.seed.sampling="y",B=10000,na=.mt.naNUM,nonpara="n")
+{
+  if((length(B)>1) || !(is.integer(as.integer(B))) ||(!is.vector(B)))
+     stop(paste("B needs to be just a integer\n","your B=",B,"\n"))
+   if(B<0)
+     stop(paste("the number of Permutations (B) needs to be positive\n, If you want to complete permutation, just specify B as any number greater than the maximum number of permutation\n","your B=",B))
+  if((length(na)>1) || !(is.numeric(na)) ||(!is.vector(na)))
+     stop(paste("na needs to be just a number\n","your na=",na,"\n"))
+  if((!is.character(side))||(!is.vector(side))||(length(side)>1)
+     ||(!any(side==c("upper","abs","lower"))))
+    stop(paste("the side needs to be a single character from c('upper','abs','lower')\n","your side=",side,"\n"))
+  if((!is.character(fixed.seed.sampling))||(!is.vector(fixed.seed.sampling))||(length(fixed.seed.sampling)>1)
+     ||(!any(fixed.seed.sampling==c("y","n"))))
+    stop(paste("the fixed.seed.sampling needs to be a single character from c('y','n')\n","your fixed.sampling=",fixed.seed.sampling,"\n"))
+  if((!is.character(nonpara))||(!is.vector(nonpara))||(length(nonpara)>1)
+     ||(!any(nonpara==c("y","n"))))
+    stop(paste("the nonpara needs to be a single character from c('y','n')\n","your nonpara=",nonpara,"\n"))
+}
+mt.transformX<-function(X,classlabel,test,na,nonpara)
+{
+  X<-mt.number2na(data.matrix(X),na)
+  mt.checkX(X,classlabel,test)
+  n<-ncol(X)
+  if(test=="pairt"){
+    if(n%%2==1)
+      stop(paste("the number of columns for X must be an even number in the paired t  test\n","your X=",X,"\n your classlabel=",classlabel,
+                 "\n your test=",test,"\n"))
+    halfn<-n%/%2;
+    evendata<-X[,c(1:halfn)*2]
+    odddata<-X[,c(1:halfn)*2-1]
+    vecX<-(evendata-odddata)
+    vecX<-data.matrix(vecX)
+  }else{
+    vecX<-data.matrix(X)
+  }
+  if(test=="wilcoxon"||nonpara=="y"){
+    for(i in c(1:nrow(vecX))){
+      vecX[i,]<-rank(vecX[i,])
+    }
+  }
+  vecX<-mt.na2number(c(vecX),na)
+  newL<-mt.transformL(classlabel,test)
+  list(X=vecX,m=nrow(X),n=newL$n,classlabel=newL$classlabel)
+}
+mt.transformV<-function(V,classlabel,test,na,nonpara)
+{
+  V<-mt.number2na(as.double(V),na)
+  mt.checkV(V,classlabel,test)
+  n<-length(classlabel)
+  if(test=="pairt"){
+    if(n%%2==1)
+      stop(paste("the number of columns for V must be an even number in the paired t test\n","your V=",V,"\n your classlabel=",classlabel,
+                 "\n your test=",test,"\n"))
+    halfn<-n%/%2
+    evendata<-V[c(1:halfn)*2]
+    odddata<-V[c(1:halfn)*2-1]
+    newV<-c(evendata-odddata)
+  }
+  else{
+    newV<-V
+  }
+  if(test=="wilcoxon"||nonpara=="y"){
+    newV<-rank(newV)
+    }
+  newL<-mt.transformL(classlabel,test)
+  list(V=mt.na2number(newV,na),n=newL$n,classlabel=newL$classlabel)
+}
+mt.transformL<-function(classlabel,test)
+{
+  classlabel<-as.integer(classlabel)
+  mt.checkclasslabel(classlabel,test)
+  n<-length(classlabel)
+  newL<-classlabel
+  if(test=="pairt"){
+    if(n%%2==1)
+      stop(paste("the length of classlabel must be an even number in the pair t\n","your classlabel=",classlabel,"\n your test=",test="\n"))
+    halfn<-n%/%2;
+    n<-halfn
+    newL<-rep(0,n);
+    for(i in c(1:n)){
+      newL[i]<-classlabel[2*i]
+    }
+  }
+  list(classlabel=newL,n=n)
+}
+#this functions finds the maximum number of permutation
+#if the the initial B=0, or initial B greater than the maximum number of
+#permutation maxB, it will return all possible of number of permutation.
+mt.getmaxB<-function(classlabel,test,B, verbose=FALSE)
+{
+  if(B>.mt.BLIM)
+    stop(paste("The setting of B=",B,"is too large, Please set B<",.mt.BLIM,"\n"))
+  n<-length(classlabel)
+  if(test=="pairt"){
+    maxB<-2^(n%/%2)
+  }
+  if(any(test==c("t","f","wilcoxon","t.equalvar"))){
+    k<-max(classlabel)
+    maxB<-1
+    curn<-n
+    for(i in c(0:k)){
+      nk<-sum(classlabel==i)
+      for(j in c(1:nk)){
+        maxB<-maxB*curn/j
+        curn<-curn-1
+      }
+    }
+  }
+  if(test=="blockf"){
+    k<-max(classlabel)
+    maxB<-1
+    for(i in c(1:(k+1))){
+      maxB<-maxB*i
+    }
+    maxB<-maxB^(n%/%(k+1))
+  }
+  #finished the computing of maxB
+  if((B==0)&(maxB>.mt.BLIM)){
+    stop(paste("The complete enumeration is too big",maxB,
+               "is too large, Please set random permutation\n"))
+  }
+  if((B>maxB)||(B==0)){
+    if(verbose) cat("We'll do complete enumerations\n")
+    return(maxB)
+  }
+  return(B)
+}
+
+mt.na2number<-function(x,na){
+  y<-x
+  y[is.na(y)]<-na
+  y
+}
+mt.number2na<-function(x,na){
+  y<-x
+  y[y==na]<-NA
+  y
+}
+#patched from the new version
+mt.niceres<-function(res,X,index){
+  newres<-res
+  name<-rownames(X,do.NULL=FALSE,prefix="")
+  if(missing(index)) {
+    rownames(newres)<-name
+  }else {
+    rownames(newres)<-name[index]
+  }
+  newres[abs(newres)>=0.9*1e20]<-NA
+  data.frame(newres)
+}
+
+
+
+
+
+
+
diff --git a/R/nulldistn_c.R b/R/nulldistn_c.R
new file mode 100755
index 0000000..88cfe84
--- /dev/null
+++ b/R/nulldistn_c.R
@@ -0,0 +1,198 @@
+#functions to generate bootstrap null distribution
+#theta0 is the value of the test statistics under the complete null hypthesis
+#tau0 is the scaling parameter (upper  bound on variance of test statistics)
+
+boot.null <- function(X,label,stat.closure,W=NULL,B=1000,test,nulldist,theta0=0,tau0=1,marg.null=NULL,marg.par=NULL,ncp=0,perm.mat,alternative="two.sided",seed=NULL,cluster=1,dispatch=0.05,keep.nulldist,keep.rawdist){
+  cat("running bootstrap...\n")
+  X<-as.matrix(X)
+  n<-ncol(X)
+  p<-nrow(X)
+  if(!(is.vector(W) | is.matrix(W) | is.null(W))) stop("W must be a vector or a matrix")
+  if(is.null(W))W<-matrix(1,nrow=p,ncol=n)
+  if(is.vector(W)){
+    if(length(W)==n) W<-matrix(W,nrow=p,ncol=n,byrow=TRUE)
+    if(length(W)==p) W<-matrix(W,nrow=p,ncol=n)
+    if(length(W)!=n & length(W)!=p) stop("Length of W does not match dim(X)")
+  }
+  if(is.matrix(W) & (dim(W)[1]!=p | dim(W)[2]!=n)) stop("W and X must have same dimension")
+
+  # Dispatch to cluster
+  if (is.numeric(cluster)) {
+    if(!is.null(seed)) set.seed(seed)
+    muboot <- boot.resample(X,label,p,n,stat.closure,W,B,test)
+  }
+  else {
+    autoload("clusterApply","snow")
+    autoload("clusterApplyLB","snow")
+    if(!is.null(seed)) clusterApply(cluster, seed, set.seed)
+    else clusterApply(cluster, runif(length(cluster), max=10000000), set.seed)
+    # Create vector of jobs to dispatch
+          if ((dispatch > 0) & (dispatch < 1)){
+            BtoNodes <- rep(B*dispatch, 1/dispatch)
+          } else {
+             BtoNodes <- rep(dispatch, B/dispatch)
+          }
+    FromCluster <- clusterApplyLB(cluster, BtoNodes, boot.resample,X=X,label=label,p=p,n=n,stat.closure=stat.closure,W=W, test=test)
+    muboot <- matrix(unlist(FromCluster), nrow=nrow(X))
+  }
+
+  Xnames<-dimnames(X)[[1]]
+  dimnames(muboot)<-list(Xnames,paste(1:B))
+
+  #fill in any nas by resampling some more 
+  nas<-(is.na(muboot)|muboot=="Inf"|muboot=="-Inf")
+  count<-0
+  while(sum(nas)){
+    count<-count+1
+    if(count>1000) stop("Bootstrap null distribution computation terminating. Cannot obtain distribution without missing values after 1000 attempts. This problem may be resolved if you try again with a different seed.")
+    nascols<-unique(col(muboot)[nas])
+    for(b in nascols){
+      samp<-sample(n,n,replace=TRUE)
+      Xb<-X[,samp]
+      Wb<-W[,samp]
+      if(p==1){
+        Xb<-t(as.matrix(Xb))
+	Wb<-t(as.matrix(Wb))
+      }
+      Tb<-get.Tn(Xb,stat.closure,Wb)
+      muboot[,b]<-Tb[3,]*Tb[1,]/Tb[2,]
+    }
+    nas<-is.na(muboot)
+  }
+
+  rawboot <- matrix(nrow=0,ncol=0)
+  if(keep.rawdist) rawboot <- muboot
+  if(nulldist=="boot") muboot <- center.scale(muboot, theta0, tau0, alternative)
+  if(nulldist=="boot.cs") muboot <- center.scale(muboot, theta0, tau0, alternative)
+  if(nulldist=="boot.ctr") muboot <- center.only(muboot, theta0, alternative)
+  if(nulldist=="boot.qt") muboot <- quant.trans(muboot, marg.null, marg.par, ncp, alternative, perm.mat)
+  out <- list(muboot=muboot, rawboot=rawboot)
+  out 
+
+}
+
+center.only <- function(muboot,theta0,alternative){
+	muboot<-(muboot-apply(muboot,1,mean))+theta0
+	if(alternative=="greater") muboot <- muboot
+	else if(alternative=="less") muboot <- -muboot
+	else muboot <- abs(muboot)
+}
+
+center.scale <- function(muboot, theta0, tau0, alternative){
+  muboot<-(muboot-apply(muboot,1,mean))*sqrt(pmin(1,tau0/apply(muboot,1,var)))+theta0
+  if(alternative=="greater") muboot <- muboot
+  else if(alternative=="less") muboot <- -muboot
+  else muboot <- abs(muboot)
+}
+
+quant.trans <- function(muboot, marg.null, marg.par, ncp, alternative, perm.mat){
+### NB: Sanity checks occur outside this function at the beginning of MTP.
+  m <- dim(muboot)[1]
+  B <- dim(muboot)[2] 
+  ranks <- t(apply(muboot,1,rank,ties.method="random"))
+  Z.quant <- switch(marg.null,
+                    normal = marg.samp(marg.null="normal",marg.par,m,B,ncp),
+                    t = marg.samp(marg.null="t",marg.par,m,B,ncp),
+                    f = marg.samp(marg.null="f",marg.par,m,B,ncp),
+                    perm = perm.mat)
+  Z.quant <- t(apply(Z.quant,1,sort))
+### Left code like this for transparency. Could just as easily use quantile()
+### for this first part, although it would be redundant.
+  if(marg.null!="perm"){
+    for(i in 1:m){
+        Z.quant[i,] <- Z.quant[i,][ranks[i,]]
+      }
+    }
+  else{
+    Z.quant <- t(apply(Z.quant,1,quantile,probs=seq(0,1,length.out=B),na.rm=TRUE))
+    for(i in 1:m){
+        Z.quant[i,] <- Z.quant[i,][ranks[i,]]
+      }
+  }
+
+  if(alternative=="greater") Z.quant <- Z.quant
+  else if(alternative=="less") Z.quant <- -Z.quant
+  else Z.quant <- abs(Z.quant)
+  Z.quant
+}
+
+boot.resample <- function (X, label, p, n, stat.closure, W, B, test){
+    muboot <- matrix(0, nrow = p, ncol = B)
+    samp <- sample(n, n * B, replace = TRUE)
+    if (any(test == c("t.twosamp.equalvar", "t.twosamp.unequalvar",
+        "f"))) {
+        label <- as.vector(label)
+        uniqlabs <- unique(label)
+        num.group <- length(uniqlabs)
+        groupIndex <- lapply(1:num.group, function(k) which(label ==
+            uniqlabs[k]))
+        if(sum(is.na(label))){
+          naindex<-c(1:num.group)[is.na(uniqlabs)]
+          groupIndex[[naindex]]<-which(is.na(label))
+        }
+        obs <- sapply(1:num.group, function(x) length(groupIndex[[x]]))
+        samp <- lapply(1:num.group, function(k) matrix(NA, nrow = B,
+            ncol = obs[k]))
+        for (j in 1:B) {
+            for (i in 1:num.group) {
+                uniq.obs <- 1
+                count <- 0
+                while (uniq.obs == 1) {
+                  count <- count + 1
+                  samp[[i]][j, ] <- sample(groupIndex[[i]], obs[i],
+                    replace = TRUE)
+                  uniq.obs <- length(unique(samp[[i]][j, ]))
+                  if (count > 1000)
+                    stop("Bootstrap null distribution computation terminating. Cannot obtain bootstrap sample with at least 2 unique observations after 1000 attempts. Sample size may be too small for bootstrap procedure but this problem may be resolved if you try again with a different seed.")
+                }
+            }
+        }
+        samp <- as.vector(t(matrix(unlist(samp), nrow = B, ncol = sum(obs))))
+    }
+    else if (test == c("f.twoway")) {
+        label <- as.vector(label)
+        utreat <- unique(label)
+        num.treat <- length(utreat)
+        num.block <- length(gregexpr("12", paste(label, collapse = ""))[[1]])
+        ublock <- 1:num.block
+        Breaks <- c(0, gregexpr(paste(c(num.treat, 1), collapse = ""),
+            paste(label, collapse = ""))[[1]], n)
+        BlockNum <- sapply(1:num.block, function(x) Breaks[x +
+            1] - Breaks[x])
+        block <- unlist(lapply(1:num.block, function(x) rep(x,
+            BlockNum[x])))
+        groupIndex <- lapply(1:num.block, function(j) sapply(1:num.treat,
+            function(i) which(label == utreat[i] & block == ublock[j])))
+         obs <- sapply(1:num.block, function(x) sapply(1:num.treat,
+            function(y) length(groupIndex[[x]][,y])))
+        samp <- lapply(1:(num.treat * num.block), function(k) matrix(NA,
+            nrow = B, ncol = obs[k]))
+        for (k in 1:B) {
+            for (i in 1:num.block) {
+                for (j in 1:num.treat) {
+                  uniq.obs <- 1
+                  count <- 0
+                  while (uniq.obs == 1) {
+                    count <- count + 1
+                    samp[[(i - 1) * num.treat + j]][k, ] <- sample(groupIndex[[i]][,j],
+                    obs[j, i], replace = TRUE)
+                    uniq.obs <- length(unique(samp[[(i - 1) *
+                      num.treat + j]][k, ]))
+                    if (count > 1000)
+                      stop("Bootstrap null distribution computation terminating. Cannot obtain bootstrap sample with at least 2 unique observations after 1000 attempts. Sample size may be too small for bootstrap procedure but this problem may be resolved if you try again with a different seed.")
+                  }
+                }
+            }
+        }
+        samp <- as.vector(t(matrix(unlist(samp), nrow = B, ncol = sum(obs))))
+      }
+    cat("iteration = ")
+    muboot <- .Call("bootloop", stat.closure, as.numeric(X),
+        as.numeric(W), as.integer(p), as.integer(n), as.integer(B),
+        as.integer(samp), NAOK = TRUE)
+    cat("\n")
+    muboot <- matrix(muboot, nrow = p, ncol = B)
+}
+
+
+
diff --git a/R/statistics.R b/R/statistics.R
new file mode 100755
index 0000000..af92c11
--- /dev/null
+++ b/R/statistics.R
@@ -0,0 +1,635 @@
+#for one sample t
+#paired t (where difference is the r.v.)
+#Wilcoxon signed rank if robust=TRUE
+meanX<-function(psi0=0,na.rm=TRUE,standardize=TRUE,alternative="two.sided",robust=FALSE){
+	function(x,w=NULL, samp){
+		if(is.null(w))
+			w=rep(1,length(x))
+		if(length(w)!=length(x))
+			stop("x and w must have same length")
+		x[!is.finite(w)]<-NA
+		if(na.rm){
+			drop<-is.na(x)|is.na(w)
+			x<-x[!drop]
+			w<-w[!drop]
+		}
+		if(robust) x<-(x>0)*rank(abs(x))
+		n<-length(x)
+		sumw<-sum(w)
+		num<-sum(w*x)/sumw-psi0
+    if(is.na(num)) denom <- NA
+    else{
+     if(standardize) {denom <- sqrt(sum(w*(x-num)^2)/(sum(w*(1-(sum(w^2)/sumw^2)))))}
+    else
+       denom <- 1
+     }
+    if(alternative=="two.sided"){
+			snum<-sign(num)
+		 	num<-abs(num)
+		}
+		else {
+      if(alternative=="less"){
+			   snum<-(-1)
+	 		   num<-(-num)
+	 		   }
+    else snum<-1
+    }
+		c(num*sqrt(sumw),denom,snum)
+	}
+}
+
+diffmeanX<-function(label,psi0=0,var.equal=FALSE,na.rm=TRUE,standardize=TRUE,alternative="two.sided",robust=FALSE){
+	if(is.null(label))
+		stop("A label variable is needed for this test")
+	Samp<-1:length(label)
+	function(x,w=NULL, samp=Samp){
+		dep<-label[samp]
+		if(is.null(w))
+			w=rep(1,length(x))
+		if(length(w)!=length(x))
+			stop("x and w must have same length")
+		x[!is.finite(w)]<-NA
+		if(na.rm){
+			drop<-is.na(x)|is.na(dep)|is.na(w)
+			x<-x[!drop]
+			xlabel<-as.vector(dep[!drop])
+			w<-w[!drop]
+		}
+
+		else
+			xlabel<-as.vector(dep)
+    # Convert to 0,1 coding
+    uniq <- sort.int(unique.default(xlabel))
+ 		if(length(uniq)>2)
+			warning("More than 2 classes! Working with first unique label vs. rest.")
+    lab1<-uniq[1]
+    New0 <- which(xlabel==lab1)
+    New1 <- which(xlabel!=lab1)
+    xlabel <- as.numeric(replace(replace(xlabel, New1, 1), New0, 0))
+    xlabel <- as.numeric(xlabel)
+
+    # Check for at least 2 unique values in each group
+    if(standardize & length(unique.default(x[xlabel==lab1]))==1) stop("Only one unique value in bootstrap sample for first group. Cannot calculate variance. This problem may be resolved if you try again with a different seed.")
+    if(standardize & length(unique.default(x[xlabel!=lab1]))==1) stop("Only one unique value in bootstrap sample for second group. Cannot calculate variance. This problem may be resolved if you try again with a different seed.")
+    n<-length(x)
+    if(robust) x<-rank(x)
+    if ((sum(w==1)==n)&(standardize)){
+        vecX <- as.vector(x)
+        extra <- max(xlabel) + 1
+        na =  -93074815   # Consistency with mt.teststat
+        nonpara<-"y"
+        if (var.equal) test <- "t.equalvar"
+        else
+           test = "t"
+        if (robust) test <- "wilcoxon"
+        if (is.null(nrow(x))) traits <- 1
+        else
+            traits <- nrow(x)
+        options <- c(test, "abs", "y")
+        TestStat <- .C("get_stat_num_denum", as.double(vecX), as.integer(traits),
+                 as.integer(n), as.integer(xlabel), as.double(na),
+                 t.num = double(traits), t.denum = double(traits), as.character(options),
+                 as.integer(extra), PACKAGE = "multtest")
+         if (robust) {
+             obs <- length(xlabel)
+             lab1<-sort.int(unique.default(xlabel))[1]
+             m=sum(xlabel==lab1)
+             num <- TestStat$t.num*obs/(m*(obs-m))   # Conversion to same numerator value as original non C code function
+             denom <- TestStat$t.denum*obs/(m*(obs-m)) # Conversion to same denominator value as original non C code function
+         }
+         else {
+         num <- TestStat$t.num - psi0
+         denom <- TestStat$t.denum
+         }
+     }
+     else {
+    sub1<-x[xlabel==lab1]
+		sub2<-x[xlabel!=lab1]
+  	w1<-w[xlabel==lab1]
+		w2<-w[xlabel!=lab1]
+		m<-length(sub1)
+		m1<-sum(w1*sub1)/sum(w1)
+		m2<-sum(w2*sub2)/sum(w2)
+		num<-m2-m1-psi0
+		if(is.na(num))
+			denom<-NA
+		else{
+			if(robust){
+        if(sum(w)==1) df <- sum(w)
+        else {
+          df<- sum(w)-1
+          }
+        mm<-sum(w*x)/sum(w)
+        if(standardize) denom <- sqrt((1/m+1/(n-m))*sum(w*(x-mm)^2)/df)
+        else
+          denom <- 1
+			}
+			else{
+				if(standardize){
+					df1<-sum(w1)-1
+					df2<-sum(w2)-1
+					df<-sum(w)-2
+					if(var.equal) denom <- sqrt((1/m+1/(n-m))*(sum(w1*(sub1-m1)^2)+sum(w2*(sub2-m2)^2))/df)
+					else
+            denom <- sqrt((1/m*sum(w1*(sub1-m1)^2)/df1)+(1/(n-m)*sum(w2*(sub2-m2)^2)/df2))
+				}
+				else
+					denom<-1
+				}
+			}
+		}
+  if(alternative=="two.sided"){
+			snum<-sign(num)
+		 	num<-abs(num)
+		}
+		else {
+      if(alternative=="less"){
+			   snum<-(-1)
+	 		   num<-(-num)
+	 		   }
+    else snum<-1
+    }
+		c(num,denom,snum)
+	}
+}
+
+FX<-function(label,na.rm=TRUE,robust=FALSE){
+	if(is.null(label))
+		stop("A label variable is needed for this test")
+	Samp<-1:length(label)
+	function(x,w=NULL, samp=Samp){
+		dep<-label[samp]
+		if(is.null(w)){
+			if(na.rm){
+				drop<-is.na(x)|is.na(dep)
+				x<-x[!drop]
+				xlabel<-as.vector(dep[!drop])
+			}
+			else
+				xlabel<-as.vector(dep)
+    # Convert to 0,1,..k coding
+    labs <- sort.int(unique.default(xlabel))
+    num.levels <- length(labs)
+
+    for (i in 1:num.levels){
+        Index <- which(xlabel==labs[i])
+        xlabel <- replace(xlabel, Index, i-1)
+    }
+    xlabel <- as.numeric(xlabel)
+    # Check for at least 2 unique values in each group
+    for (i in 1:length(labs)){
+       if(length(unique.default(x[xlabel==labs[i]]))==1) stop("Only one unique value in bootstrap sample for one of the groups. Within group sum of squares is 0. This problem may be resolved if you try again with a different seed.")
+    }
+      if(robust) x<-rank(x)
+    	n<-length(x)
+      vecX <- as.vector(x)
+      extra <- max(xlabel) + 1
+      na =  -93074815 # Consistency with mt.teststat
+      if (is.null(nrow(x))) traits <- 1
+      else
+        traits <- nrow(x)
+      options <- c("f", "abs", "y")
+      TestStat <- .C("get_stat_num_denum", as.double(vecX), as.integer(traits),
+                 as.integer(n), as.integer(xlabel), as.double(na),
+                 t.num = double(traits), t.denum = double(traits), as.character(options),
+                 as.integer(extra), PACKAGE = "multtest")
+      num <- TestStat$t.num
+      denom <- TestStat$t.denum
+    }
+		else{
+			if(length(w)!=length(x))
+				stop("x and w must have same length")
+			x[!is.finite(w)]<-NA
+			if(na.rm){
+				drop<-is.na(x)|is.na(dep)|is.na(w)
+				x<-x[!drop]
+				xlabel<-as.vector(dep[!drop])
+				w<-w[!drop]
+			}
+			else
+				xlabel<-as.vector(dep)
+		# Convert to 0,1,..k coding
+          labs <- sort.int(unique.default(xlabel))
+          num.levels <- length(labs)
+
+          for (i in 1:num.levels){
+              Index <- which(xlabel==labs[i])
+              xlabel <- replace(xlabel, Index, i-1)
+          }
+          xlabel <- as.numeric(xlabel)
+          # Check for at least 2 unique values in each group
+          for (i in 1:length(labs)){
+             if(length(unique.default(x[xlabel==labs[i]]))==1) stop("Only one unique value in bootstrap sample for one of the groups. Within group sum of squares is 0. This problem may be resolved if you try again with a different seed.")
+          }
+			if(robust) x<-rank(x)
+			n<-length(x)
+      if(robust) x<-rank(x)
+#TODO: how to deal with weights in F?
+      vecX <- as.vector(x)
+      extra <- max(xlabel) + 1
+      na =  -93074815  # Consistency with mt.teststat
+      if (is.null(nrow(x))) traits <- 1
+      else
+        traits <- nrow(x)
+      options <- c("f", "abs", "y")
+      TestStat <- .C("get_stat_num_denum", as.double(vecX), as.integer(traits),
+                 as.integer(n), as.integer(xlabel), as.double(na),
+                 t.num = double(traits), t.denum = double(traits), as.character(options),
+                 as.integer(extra), PACKAGE = "multtest")
+      num <- TestStat$t.num
+      denom <- TestStat$t.denum
+		}
+		c(2*num,2*denom,1)
+	}
+}
+
+#F statistic for block design with k treatments and l blocks
+# One observation per block
+# The observations are ordered by block, and within
+#  each block, they are labeled using the integers 1 to k.
+#Friedman statistic if robust=TRUE
+blockFX<-function(label,na.rm=TRUE,robust=FALSE){
+	if(is.null(label))
+		stop("A label variable is needed for this test")
+	samp<-1:length(label)
+	function(x,w=NULL){
+		dep<-label[samp]
+		if(is.null(w)){
+			if(na.rm){
+				drop<-is.na(x)|is.na(dep)
+				x<-x[!drop]
+				xlabel<-dep[!drop]
+			}
+			else
+				xlabel<-dep
+			n<-length(x)
+			ulab<-sort(unique(xlabel))
+			k<-length(ulab)
+			l<-n/k
+			if(round(l)*k!=n) stop("The blocks are not of equal size.")
+			block<-sort(rep(1:l,k))[samp]
+			ublock<-1:l
+			if(robust){
+			 	for(j in 1:l)
+					x[block==j]<-rank(x[block==j])
+			}
+			m<-mean(x)
+			mlab<-mblock<-denom<-NULL
+			for(i in 1:k){
+				mlab[i]<-mean(x[xlabel==ulab[i]])
+				for(j in 1:l){
+					if(i==1)
+						mblock[j]<-mean(x[block==ublock[j]])
+					denom[(i-1)*l+j]<-x[xlabel==ulab[i] & block==ublock[j]]-mlab[i]-mblock[j]+m
+				}
+			}
+			num<-sum(l*(mlab-m)^2)
+			denom<-sum(denom^2)/(l-1)
+		}
+		else{
+			if(length(w)!=length(x))
+				stop("x and w must have same length")
+			x[!is.finite(w)]<-NA
+			if(na.rm){
+				drop<-is.na(x)|is.na(dep)|is.na(w)
+				x<-x[!drop]
+				xlabel<-dep[!drop]
+				w<-w[!drop]
+			}
+			else
+				xlabel<-dep
+			n<-length(x)
+			ulab<-sort(unique(xlabel))
+			k<-length(ulab)
+			l<-n/k
+			if(round(l)*k!=n) stop("The blocks are not of equal size.")
+			block<-sort(rep(1:l,k))[samp]
+			ublock<-1:l
+			if(robust){
+			 	for(j in 1:l)
+					x[block==j]<-rank(x[block==j])
+			}
+# TODO: how to deal with weights in block f?
+			m<-mean(x)
+			mlab<-mblock<-denom<-NULL
+						for(i in 1:k){
+				mlab[i]<-mean(x[xlabel==ulab[i]])
+				for(j in 1:l){
+					if(i==1)
+						mblock[j]<-mean(x[block==ublock[j]])
+					denom[(i-1)*l+j]<-x[xlabel==ulab[i] & block==ublock[j]]-mlab[i]-mblock[j]+m
+				}
+			}
+			num<-sum(l*(mlab-m)^2)
+			denom<-sum(denom^2)/(l-1)
+		}
+		c(num,denom,1)
+	}
+}
+
+#could add standardize=FALSE and set denom=1 to return just the sum of squares.
+#
+#F statistic for block design with k treatments and l blocks
+# The observations are ordered by block, and within
+#  each block, they are labeled using the integers 1 to k.
+#Friedman statistic if robust=TRUE
+
+twowayFX <-function(label,na.rm=TRUE,robust=FALSE){
+	if(is.null(label))
+		stop("A label variable is needed for this test")
+	Samp<-1:length(label)
+	function(x,w=NULL, samp=Samp){
+  dep<-label[samp]
+		if(is.null(w)){
+			if(na.rm){
+				drop<-is.na(x)|is.na(dep)
+				x<-x[!drop]
+				xlabel<-dep[!drop]
+			}
+			else
+				xlabel<-dep
+			n<-length(x)
+			ulab<-sort(unique(xlabel))
+			k<-length(ulab)
+      l <- length(gregexpr('12', paste(xlabel, collapse=""))[[1]])
+			ublock<-1:l
+      Breaks <- c(0,gregexpr(paste(c(k,1),collapse=""), paste(xlabel, collapse=""))[[1]], n)
+      BlockNum <- sapply(1:l, function(x) Breaks[x+1]-Breaks[x])
+      block <- unlist(sapply(1:l, function(x) rep(x,BlockNum[x])))
+			if(robust){
+			 	for(j in 1:l)
+					x[block==j]<-rank(x[block==j])
+			}
+			m<-mean(x)
+			mlab<-mblock<-mcell<-denom<-NULL
+			for(i in 1:k){
+				mlab[i]<-mean(x[xlabel==ulab[i]])
+          for(j in 1:l){
+					if(i==1)
+						mblock[j]<-mean(x[block==ublock[j]])
+            denom[(i-1)*l+j]<-sum((mean(x[xlabel==ulab[i] & block==ublock[j]])-mlab[i]-mblock[j]+m)^2)
+				}
+			}
+      num<-sum(l*(mlab-m)^2)/(k-1)
+			denom<-sum(denom)/((l-1)*(k-1))
+		}
+		else{
+			if(length(w)!=length(x))
+				stop("x and w must have same length")
+			x[!is.finite(w)]<-NA
+			if(na.rm){
+        drop<-is.na(x)|is.na(dep)|is.na(w)
+				x<-x[!drop]
+				xlabel<-dep[!drop]
+				w<-w[!drop]
+			}
+			else
+				xlabel<-dep
+      n<-length(x)
+			ulab<-sort(unique(xlabel))
+			k<-length(ulab)
+      l <- length(gregexpr('12', paste(xlabel, collapse=""))[[1]])
+			ublock<-1:l
+      Breaks <- c(0,gregexpr(paste(c(k,1),collapse=""), paste(xlabel, collapse=""))[[1]], n)
+      BlockNum <- sapply(1:l, function(x) Breaks[x+1]-Breaks[x])
+      block <- unlist(sapply(1:l, function(x) rep(x,BlockNum[x])))
+			if(robust){
+			 	for(j in 1:l)
+					x[block==j]<-rank(x[block==j])
+			}
+#TODO: how to deal with weights in block f?
+			m<-mean(x)
+			mlab<-mblock<-denom<-NULL
+						for(i in 1:k){
+				mlab[i]<-mean(x[xlabel==ulab[i]])
+          for(j in 1:l){
+					if(i==1)
+						mblock[j]<-mean(x[block==ublock[j]])
+            denom[(i-1)*l+j]<-sum((mean(x[xlabel==ulab[i] & block==ublock[j]])-mlab[i]-mblock[j]+m)^2)
+				}
+			}
+			num<-sum(l*(mlab-m)^2)/(k-1)
+			denom<-sum(denom)/((l-1)*(k-1))
+		}
+		c(num,denom,1)
+	}
+}
+
+
+## Z is a design *matrix*
+## with variable of interest in first column
+## and variables to adjust for in remaining columns
+## Z is fixed for all columns of X
+## gene expression is the outcome
+lmX<-function(Z=NULL,n,psi0=0,na.rm=TRUE,standardize=TRUE,alternative="two.sided",robust=FALSE){
+	if(is.null(Z))
+		Z<-matrix(1,n,1)
+	else
+		Z<-cbind(Z,rep(1,n))
+	Samp<-1:n
+	function(x,w=NULL, samp=Samp){
+		covar<-Z[samp,]
+		if(is.null(w)){
+			if(na.rm){
+				drop<-is.na(x)|rowSums(is.na(covar))
+				covar<-covar[!drop,]
+				x<-x[!drop]
+			}
+			covar<-as.matrix(covar)
+			if(robust){
+                          	autoload("rlm","MASS")
+				out<-rlm(covar,x)
+				out$df.residual<-length(x)-out$rank
+			}
+			else
+				out<-lm.fit(covar,x)
+			if(standardize) denom <- sqrt(sum(out$residuals^2)/out$df.residual)*sqrt(diag(chol2inv(out$qr$qr,size=out$rank))[1])
+         else denom <- 1
+      if(denom==0) stop("Denominator of test statistic is 0 for a bootstrap sample. This problem may resuly from too small and sample size but may be resolved if you try again with a different seed.")
+		}
+		else{
+			if(length(w)!=length(x))
+				stop("x and w must have same length")
+			x[!is.finite(w)]<-NA
+			if(na.rm){
+				drop<-is.na(x)|rowSums(is.na(covar))|is.na(w)
+				covar<-covar[!drop,]
+				x<-x[!drop]
+				w<-w[!drop]
+			}
+			covar<-as.matrix(covar)
+			if(robust){
+                              	autoload("rlm","MASS")
+				out<-rlm(covar,x,w)
+				out$df.residual<-length(x)-out$rank
+			}
+			else
+				out<-lm.wfit(covar,x,w)
+			if(standardize) denom <- sqrt(sum(w*out$residuals^2)/out$df.residual)*sqrt(diag(chol2inv(out$qr$qr,size=out$rank))[1])
+         else denom <- 1
+      if(denom==0) stop("Denominator of test statistic is 0 for a bootstrap sample. This problem may resuly from too small and sample size but may be resolved if you try again with a different seed.")
+		}
+		num<-out$coef[1]-psi0
+  if(alternative=="two.sided"){
+			snum<-sign(num)
+		 	num<-abs(num)
+		}
+		else {
+      if(alternative=="less"){
+			   snum<-(-1)
+	 		   num<-(-num)
+	 		   }
+    else snum<-1
+    }
+		c(num,denom,snum)
+	}
+}
+
+
+## gene expression is the covariate
+## y is an outcome of interest
+## Z is any intercept or other covariates
+## Z changes for each row of X
+lmY<-function(Y,Z=NULL,n,psi0=0,na.rm=TRUE,standardize=TRUE,alternative="two.sided",robust=FALSE){
+	if(is.null(Y))
+		stop("An outcome variable is needed for this test")
+	if(length(Y)!=n)
+		stop(paste("Dimension of outcome Y=",length(Y),", not equal dimension of data=",n,sep=""))
+	if(is.null(Z))
+		Z<-matrix(1,n,1)
+	else
+		Z<-cbind(Z,rep(1,n))
+	Samp<-1:n
+	function(x,w=NULL, samp=Samp){
+		dep<-Y[samp]
+		covar<-Z[samp,]
+		covar<-cbind(x,covar)
+		covar[!is.finite(w),]<-NA
+		if(is.null(w)){
+			if(na.rm){
+				drop<-is.na(dep)|rowSums(is.na(covar))
+				covar<-covar[!drop,]
+				xy<-dep[!drop]
+			}
+			else
+				xy<-dep
+			if(robust){
+                          	autoload("rlm","MASS")
+                                out<-rlm(covar,xy)
+				out$df.residual<-length(xy)-out$rank
+			}
+			else
+				out<-lm.fit(covar,xy)
+
+      if(standardize) denom <- sqrt(sum(out$residuals^2)/out$df.residual)*sqrt(diag(chol2inv(out$qr$qr,size=out$rank))[1])
+         else
+           denom <- 1
+      if(denom==0) stop("Denominator of test statistic is 0 for a bootstrap sample. This problem may resuly from too small and sample size but may be resolved if you try again with a different seed.")
+		}
+		else{
+			if(length(w)!=length(x))
+				stop("x and w must have same length")
+			if(na.rm){
+				drop<-is.na(dep)|rowSums(is.na(covar))|is.na(w)
+				covar<-covar[!drop,]
+				xy<-dep[!drop]
+				w<-w[!drop]
+			}
+			else
+				xy<-dep
+			if(robust){
+                            	autoload("rlm","MASS")
+				out<-rlm(covar,xy,w)
+				out$df.residual<-length(xy)-out$rank
+			}
+			else
+				out<-lm.wfit(covar,xy,w)
+			if(standardize) denom <- sqrt(sum(w*out$residuals^2)/out$df.residual)*sqrt(diag(chol2inv(out$qr$qr,size=out$rank))[1])
+         else
+            denom <- 1
+      if(denom==0) stop("Denominator of test statistic is 0 for a bootstrap sample. This problem may resuly from too small and sample size but may be resolved if you try again with a different seed.")
+		}
+		num<-out$coef[1]-psi0
+  if(alternative=="two.sided"){
+			snum<-sign(num)
+		 	num<-abs(num)
+		}
+		else {
+      if(alternative=="less"){
+			   snum<-(-1)
+	 		   num<-(-num)
+	 		   }
+    else snum<-1
+    }
+		c(num,denom,snum)
+	}
+}
+
+## returns NA's if coxph fails
+## strata is covariates to adjust for
+coxY<-function(surv.obj,strata=NULL,psi0=0,na.rm=TRUE,standardize=TRUE,alternative="two.sided",init=NULL,method="efron"){
+	autoload("coxph","survival")
+    	if(!inherits(surv.obj,"Surv"))  #covers NULL case
+        	stop("Response must be a survival object")
+	if(!is.null(strata))
+		strat<-as.matrix(strata)
+	else
+		strat<-rep(1,nrow(surv.obj))
+	strat<-strata(strat)
+    	Samp<-1:nrow(surv.obj)
+    	function(x,w=NULL, samp=Samp){
+        	if(!is.null(w)&length(w)!=length(x))
+            		stop("x and w must have same length")
+		dep<-surv.obj[samp,]
+		covar<-strat[samp]
+		if(na.rm){
+			drop<-is.na(x)
+			if(!is.null(w))
+				drop<-drop+is.na(w)
+			if(!is.null(strat))
+				drop<-drop+is.na(covar)
+			x<-x[!drop]
+			w<-w[!drop]
+			covar<-covar[!drop]
+			dep<-dep[!drop,]
+		}
+		if(sum(is.na(covar))){
+			drop<-is.na(covar)
+			x<-x[!drop]
+			w<-w[!drop]
+			covar<-covar[!drop]
+			dep<-dep[!drop,]
+		}
+		design<-cbind(x,rep(1,length(x)))
+		design[!is.finite(w),]<-NA
+		control<-coxph.control()
+		srvd<-try(coxph.fit(design,dep,strata=covar,init=init,control=control,weights=w,method=method,rownames=rownames(design)))
+        	if(inherits(srvd,"try-error"))
+	            return(c(NA,NA,NA))
+	        if(standardize) denom <- sqrt(srvd$var[1,1])
+             else denom <- 1
+          if(denom==0) stop("Denominator of test statistic is 0 for a bootstrap sample. This problem may resuly from too small and sample size but may be resolved if you try again with a different seed.")
+       	 	num<-srvd$coef[1]-psi0
+
+  if(alternative=="two.sided"){
+			snum<-sign(num)
+		 	num<-abs(num)
+		}
+		else {
+      if(alternative=="less"){
+			   snum<-(-1)
+	 		   num<-(-num)
+	 		   }
+    else snum<-1
+    }
+		c(num,denom,snum)
+	}
+}
+
+#function that applies stat.closure to (X,W)
+get.Tn<-function(X,stat.closure,W=NULL){
+	wapply(X,1,stat.closure,W)
+}
+
+
+
diff --git a/R/test.R b/R/test.R
new file mode 100755
index 0000000..8019c03
--- /dev/null
+++ b/R/test.R
@@ -0,0 +1,712 @@
+#main user-level function for multiple hypothesis testing
+
+MTP<-function(X,W=NULL,Y=NULL,Z=NULL,Z.incl=NULL,Z.test=NULL,na.rm=TRUE,test="t.twosamp.unequalvar",robust=FALSE,standardize=TRUE,alternative="two.sided",psi0=0,typeone="fwer",k=0,q=0.1,fdr.method="conservative",alpha=0.05,smooth.null=FALSE,nulldist="boot.cs",B=1000,ic.quant.trans=FALSE,MVN.method="mvrnorm",penalty=1e-6,method="ss.maxT",get.cr=FALSE,get.cutoff=FALSE,get.adjp=TRUE,keep.nulldist=TRUE,keep.rawdist=FALSE,seed=NULL,cluster=1,type=NULL,dispatch=NULL,marg.null=NULL,marg.par=NUL [...]
+  ##sanity checks / formatting
+  #X
+  if(missing(X)) stop("Argument X is missing")
+  if(inherits(X,"eSet")){ 
+    if(is.character(Y)) Y<-pData(X)[,Y]
+    if(is.character(Z)){
+      if(Z%in%Y){
+        Z<-Z[!(Z%in%Y)]
+	warning(paste("Outcome Y=",Y,"should not be included in the covariates Z=",Z,". Removing Y from Z",sep=""))
+	}
+      Z<-pData(X)[,Z]
+    }
+    X<-exprs(X)
+  }
+  X<-as.matrix(X)
+  dx<-dim(X)
+  if(length(dx)==0) stop("dim(X) must have positive length")
+  p<-dx[1]
+  n<-dx[2]
+  #W
+  if(!is.null(W)){
+    W[W<=0]<-NA
+    if(is.vector(W) & length(W)==n) W <- matrix(rep(W,p),nrow=p,ncol=n,byrow=TRUE)
+    if(is.vector(W) & length(W)==p) W <- matrix(rep(W,n),nrow=p,ncol=n)
+    if(test%in%c("f","f.block","f.twoway","t.cor","z.cor")){
+      warning("Weights can not be used with F-tests or tests of correlation parameters, arg W is being ignored.")
+      W<-NULL
+    }
+  }
+  #Y
+  if(!is.null(Y)){
+    if(is.Surv(Y)){
+      if(test!="coxph.YvsXZ") stop(paste("Test ",test," does not work with a survival object Y",sep=""))
+    }
+    else{
+      Y<-as.matrix(Y)
+      if(ncol(Y)!=1) stop("Argument Y must be a vector")
+    }
+    if(nrow(Y)!=n) stop("Outcome Y has length ",nrow(Y),", not equal to n=",n)
+  }
+  if(test=="t.pair") n <- dx[2]/2
+  #Z
+  if(!is.null(Z)){
+    Z<-as.matrix(Z)
+    if(nrow(Z)!=n) stop("Covariates in Z have length ",nrow(Z),", not equal to n=",n,"\n")
+    #Z.incl tells which columns of Z to include in model
+    if(is.null(Z.incl)) Z.incl<-(1:ncol(Z))
+    if(length(Z.incl)>ncol(Z)) stop("Number of columns in Z.incl ",length(Z.incl)," exceeds ncol(Z)=",ncol(Z))
+    if(is.logical(Z.incl)) Z.incl<-(1:ncol(Z))[Z.incl]
+    if(is.character(Z.incl) & length(Z.incl)!=sum(Z.incl%in%colnames(Z))) stop(paste("Z.incl=",Z.incl," names columns not in Z",sep=""))
+    Za<-Z[,Z.incl]
+    #Z.test tells which column of Z to test for an association
+    if(test=="lm.XvsZ"){
+      if(is.null(Z.test)){
+        warning(paste("Z.test not specified, testing for association with variable in first column of Z:",colnames(Z)[1],sep=""))
+	Z.test<-1
+      }
+      if(is.logical(Z.test)) Z.test<-(1:ncol(Z))[Z.test]
+      if(is.character(Z.test) & !(Z.test%in%colnames(Z))) stop(paste("Z.test=",Z.test," names a column not in Z",sep=""))
+      if(is.numeric(Z.test) & !(Z.test%in%(1:ncol(Z)))) stop("Value of Z.test must be >0 and <",ncol(Z))
+      if(Z.test%in%Z.incl){
+        Z.incl<-Z.incl[!(Z.incl%in%Z.test)]
+	Za<-Z[,Z.incl]
+      }
+      Za<-cbind(Z[,Z.test],Za)
+    }
+    Z<-Za
+    rm(Za)
+  }
+  #test
+  TESTS<-c("t.onesamp","t.twosamp.equalvar","t.twosamp.unequalvar","t.pair","f","f.block","f.twoway","lm.XvsZ","lm.YvsXZ","coxph.YvsXZ","t.cor","z.cor")
+  test<-TESTS[pmatch(test,TESTS)]
+  if(is.na(test)) stop(paste("Invalid test, try one of ",TESTS,sep=""))
+  #robust + see below with choice of nulldist
+  if(test=="coxph.YvsXZ" & robust==TRUE)
+    warning("No robust version of coxph.YvsXZ, proceding with usual version")
+  #temp until fix
+  if((test=="t.onesamp" | test=="t.pair") & robust==TRUE)
+    stop("Robust test statistics currently not available for one-sample or two-sample paired test statistics.")
+  #alternative
+  ALTS<-c("two.sided","less","greater")
+  alternative<-ALTS[pmatch(alternative,ALTS)]
+  if(is.na(alternative)) stop(paste("Invalid alternative, try one of ",ALTS,sep=""))
+  #null values
+  if(length(psi0)>1) stop(paste("In current implementation, all hypotheses must have the same null value. Number of null values: ",length(psi0),">1",sep=""))
+  #Error rate
+  ERROR<-c("fwer","gfwer","tppfp","fdr")
+  typeone<-ERROR[pmatch(typeone,ERROR)]
+  if(is.na(typeone)) stop(paste("Invalid typeone, try one of ",ERROR,sep=""))
+  if(any(alpha<0) | any(alpha>1)) stop("Nominal level alpha must be between 0 and 1")
+  nalpha<-length(alpha)
+  reject<-
+    if(nalpha) array(dim=c(p,nalpha),dimnames=list(rownames(X),paste("alpha=",alpha,sep="")))
+    if(test=="z.cor" | test=="t.cor") matrix(nrow=0,ncol=0) # deprecated for correlations, rownames now represent p choose 2 edges - too weird and clunky in current state for output.
+    else matrix(nrow=0,ncol=0)
+  if(typeone=="gfwer"){
+    if(get.cr==TRUE) warning("Confidence regions not currently implemented for gFWER")
+    if(get.cutoff==TRUE) warning("Cut-offs not currently implemented for gFWER")
+    get.cr<-get.cutoff<-FALSE
+    if(k<0) stop("Number of false positives can not be negative")
+    if(k>=p) stop(paste("Number of false positives must be less than number of tests=",p,sep=""))
+    if(length(k)>1){
+      k<-k[1]
+      warning("can only compute gfwer(k) adjp for one value of k at a time (using first value), try fwer2gfwer() function for multiple k")
+    }
+  }
+  if(typeone=="tppfp"){
+    if(get.cr==TRUE) warning("Confidence regions not currently implemented for TPPFP")
+    if(get.cutoff==TRUE) warning("Cut-offs not currently implemented for TPPFP")
+    get.cr<-get.cutoff<-FALSE
+    if(q<0) stop("Proportion of false positives, q, can not be negative")
+    if(q>1) stop("Proportion of false positives, q, must be less than 1")
+    if(length(q)>1){
+      q<-q[1]
+      warning("Can only compute tppfp adjp for one value of q at a time (using first value), try fwer2tppfp() function for multiple q")
+    }
+  }
+  if(typeone=="fdr"){
+    if(!nalpha) stop("Must specify a nominal level alpha for control of FDR")
+    if(get.cr==TRUE) warning("Confidence regions not currently implemented for FDR")
+    if(get.cutoff==TRUE) warning("Cut-offs not currently implemented for FDR")
+    get.cr<-get.cutoff<-FALSE
+  }		
+  #null distribution
+  NULLS<-c("boot","boot.cs","boot.ctr","boot.qt","ic","perm")
+  nulldist<-NULLS[pmatch(nulldist,NULLS)]
+  if(is.na(nulldist)) stop(paste("Invalid nulldist, try one of ",NULLS,sep=""))
+  if(nulldist=="boot"){
+    nulldist <- "boot.cs"
+    warning("nulldist='boot' is deprecated and now corresponds to 'boot.cs'. Proceeding with default center and scaled null distribution.")
+  }
+  if(nulldist!="perm" & test=="f.block") stop("f.block test only available with permutation null distribution. Try test=f.twoway")
+  if((nulldist=="perm" | nulldist=="ic") & keep.rawdist==TRUE) stop("Test statistics distribution estimation using keep.rawdist=TRUE is only available with a bootstrap-based null distribution")
+  if(nulldist=="boot.qt" & robust==TRUE) stop("Quantile transform method requires parametric marginal nulldist.  Set robust=FALSE")
+  if(nulldist=="boot.qt" & standardize==FALSE) stop("Quantile transform method requires standardized test statistics.  Set standardize=TRUE")
+  if(nulldist=="ic" & robust==TRUE) stop("Influence curve null distributions available only for (parametric) t-statistics.  Set robust=FALSE")
+  if(nulldist=="ic" & standardize==FALSE) stop("Influence curve null distributions available only for (standardized) t-statistics.  Set standardize=TRUE")
+  if(nulldist=="ic" & (test=="f" | test=="f.twoway" | test=="f.block" | test=="coxph.YvsXZ")) stop("Influence curve null distributions available only for tests of mean, regression and correlation parameters. Cox PH also not yet implemented.")
+  if(nulldist!="ic" & (test=="t.cor" | test=="z.cor")) stop("Tests of correlation parameters currently only implemented for influence curve null distributions")
+  if((test!="t.cor" & test!="z.cor") & keep.index) warning("Matrix of indices only returned for tests of correlation parameters")
+  ### specifically for sampling null test statistics with IC nulldist
+  MVNS <- c("mvrnorm","Cholesky")
+  MVN.method <- MVNS[pmatch(MVN.method,MVNS)]
+  if(is.na(MVN.method)) stop("Invalid sampling method for IC-based MVN null test statistics.  Try either 'mvrnorm' or 'Cholesky'")
+  #methods
+  METHODS<-c("ss.maxT","ss.minP","sd.maxT","sd.minP")
+  method<-METHODS[pmatch(method,METHODS)]
+  if(is.na(method)) stop(paste("Invalid method, try one of ",METHODS,sep=""))
+  #estimate and conf.reg
+  ftest<-FALSE
+  if(test=="f" | test=="f.block"){
+    ftest<-TRUE
+    if(get.cr) stop("Confidence intervals not available for F tests, try get.cr=FALSE")
+    if(!is.null(W)) warning("Weighted F tests not yet implemented, proceding with unweighted version")
+  }
+
+  
+  #permutation null distribution - self contained in this if statement
+  if(nulldist=="perm"){
+    if(method=="ss.minP" | method=="ss.maxT") stop("Only step-down procedures are currently available with permutation nulldist")
+    if(smooth.null) warning("Kernal density p-values not available with permutation nulldist")
+    if(get.cr) warning("Confidence regions not available with permutation nulldist")
+    if(get.cutoff) warning("Cut-offs not available with permutation nulldist")
+    #if(keep.nulldist) warning("keep.nulldist not available with permutation nulldist")
+    ptest<-switch(test,
+                  t.onesamp=stop("One sample t-test not available with permutation nulldist"),
+                  t.twosamp.equalvar=ifelse(robust,"wilcoxon","t.equalvar"),
+                  t.twosamp.unequalvar="t",
+                  t.pair="pairt",
+                  f="f",
+                  f.block="blockf",
+                  f.twoway=stop("f.twoway not available with permutation nulldist"),
+                  lm.XvsZ=stop("lm.XvsZ not available with permutation nulldist"),
+                  lm.YvsXZ=stop("lm.YvsXZ not available with permutation nulldist"),
+                  coxph.YvsXZ=stop("coxph.YvsXZ not available with permutation nulldist"),
+                  t.cor=stop("t.cor not available with permutation nulldist"),
+                  z.cor=stop("z.cor not available with permutation nulldist")
+                  )
+    pside<-switch(alternative,two.sided="abs",less="lower",greater="upper")
+    pnonpara<-
+      if(robust)"y"
+      else "n"
+    if(any(is.na(Y))){
+      bad<-is.na(Y)
+      Y<-Y[!bad]
+      X<-X[,!bad]
+      warning("No NAs allowed in Y, these observations have been removed.")
+    }
+    presult<-switch(method,
+                    sd.maxT=mt.maxT(X,classlabel=Y,test=ptest,side=pside,B=B,nonpara=pnonpara),
+                    sd.minP=mt.minP(X,classlabel=Y,test=ptest,side=pside,B=B,nonpara=pnonpara)
+                    )
+    if(typeone=="fwer" & nalpha){
+      for(a in 1:nalpha) reject[,a]<-(presult$adjp<=alpha[a])
+    }
+    if(typeone=="gfwer"){
+      presult$adjp<-fwer2gfwer(presult$adjp,k)
+      if(nalpha){
+        for(a in 1:nalpha) reject[,a]<-(presult$adjp<=alpha[a])
+      }
+      if(!get.adjp)
+        presult$adjp<-vector("numeric",0)
+    }
+    if(typeone=="tppfp"){
+      presult$adjp<-fwer2tppfp(presult$adjp,q)
+      if(nalpha){
+        for(a in 1:nalpha) reject[,a]<-(presult$adjp<=alpha[a])
+      }
+      if(!get.adjp)
+        presult$adjp<-vector("numeric",0)
+    }
+    if(typeone=="fdr"){
+      temp<-fwer2fdr(presult$adjp,fdr.method,alpha)
+      reject<-temp$reject
+      if(!get.adjp) presult$adjp<-vector("numeric",0)
+      else presult$adjp<-temp$adjp
+      rm(temp)
+    }			
+    #output results
+    orig<-order(presult$index)
+    if(keep.label) label <- as.numeric(Y)
+    else label <- vector("numeric",0)
+    out<-new("MTP",statistic=presult$teststat[orig],estimate=vector("numeric",0),sampsize=n,rawp=presult$rawp[orig],adjp=presult$adjp[orig],conf.reg=array(dim=c(0,0,0)),cutoff=matrix(nrow=0,ncol=0),reject=as.matrix(reject[orig,]),rawdist=matrix(nrow=0,ncol=0),nulldist=matrix(nrow=0,ncol=0),nulldist.type="perm",marg.null=vector("character",0),marg.par=matrix(nrow=0,ncol=0),label=label,index=matrix(nrow=0,ncol=0),call=match.call(),seed=vector("integer",0))
+  }
+  
+  else{ # This should apply to all other MTP calls using the bootstrap and IC nulldists.
+    if(nulldist=="boot.qt"){ # get parameter vals for quantile transform.
+      # Get parameter values for the quantile transformed nulldist
+      if(!is.null(marg.par)){
+        if(is.matrix(marg.par)) marg.par <- marg.par
+        if(is.vector(marg.par)) marg.par <- matrix(rep(marg.par,p),nrow=p,ncol=length(marg.par),byrow=TRUE)
+        }
+      if(is.null(ncp)) ncp = 0
+      if(!is.null(perm.mat)){ 
+        if(dim(X)[1]!=dim(perm.mat)[1]) stop("perm.mat must same number of rows as X.")
+        }
+    
+      nstats <- c("t.twosamp.unequalvar","z.cor","lm.XvsZ","lm.YvsXZ","coxph.lmYvsXZ")
+      tstats <- c("t.onesamp","t.twosamp.equalvar","t.pair","t.cor")
+      fstats <- c("f","f.block","f.twoway")
+      
+      # If default , set values of marg.null to pass on.
+      if(is.null(marg.null)){
+	  if(any(nstats == test)) marg.null="normal"
+	  if(any(tstats == test)) marg.null="t"
+	  if(any(fstats == test)) marg.null="f"
+        }
+      else{ # Check to see that user-supplied entries make sense.  
+        MARGS <- c("normal","t","f","perm")
+        marg.null <- MARGS[pmatch(marg.null,MARGS)]
+        if(is.na(marg.null)) stop("Invalid marginal null distribution. Try one of: normal, t, f, or perm")
+        if(any(tstats==test) & marg.null == "f") stop("Choice of test stat and marginal nulldist do not match")
+        if(any(fstats==test) & (marg.null == "normal" | marg.null=="t")) stop("Choice of test stat and marginal nulldist do not match")
+        if(marg.null=="perm" & is.null(perm.mat)) stop("Must supply a matrix of permutation test statistics if marg.null='perm'")
+        if(marg.null=="f" & ncp < 0) stop("Cannot have negative noncentrality parameter with F distribution.")
+      }
+    
+      # If default (=NULL), set values of marg.par. Return as m by 1 or 2 matrix.
+      if(is.null(marg.par)){
+		marg.par <- switch(test,
+                          t.onesamp = n-1,
+                          t.twosamp.equalvar = n-2,
+                          t.twosamp.unequalvar = c(0,1),
+                          t.pair = floor(n/2-1),
+                          f = c(length(is.finite(unique(Y)))-1,dim(X)[2]- length(is.finite(unique(Y))) ),
+                          f.twoway = {
+                            c(length(is.finite(unique(Y)))-1, dim(X)[2]-(length(is.finite(unique(Y)))*length(gregexpr('12', paste(Y, collapse=""))[[1]]))-2)
+                            },
+                          lm.XvsZ = c(0,1),
+                          lm.YvsXZ = c(0,1),
+                          coxph.YvsXZ = c(0,1),
+                          t.cor = n-2,
+                          z.cor = c(0,1)
+                          )
+      marg.par <- matrix(rep(marg.par,dim(X)[1]),nrow=dim(X)[1],ncol=length(marg.par),byrow=TRUE)
+              }
+     else{ # Check that user-supplied values of marg.par make sense (marg.par != NULL)
+       if((marg.null=="t" | marg.null=="f") & any(marg.par[,1]==0)) stop("Cannot have zero df with t or F distributions. Check marg.par settings")
+       if(marg.null=="t" & dim(marg.par)[2]>1) stop("Too many parameters for t distribution.  marg.par should have length 1.")
+       if((marg.null=="f" | marg.null=="normal") & dim(marg.par)[2]!=2) stop("Incorrect number of parameters defining marginal null distribution.  marg.par should have length 2.")
+     }
+    }
+
+    ##making a closure for the particular test
+    theta0<-0
+    tau0<-1
+    stat.closure<-switch(test,
+                         t.onesamp=meanX(psi0,na.rm,standardize,alternative,robust),
+                         t.twosamp.equalvar=diffmeanX(Y,psi0,var.equal=TRUE,na.rm,standardize,alternative,robust),
+                         t.twosamp.unequalvar=diffmeanX(Y,psi0,var.equal=FALSE,na.rm,standardize,alternative,robust),
+                         t.pair={
+                           uY<-sort(unique(Y))
+                           if(length(uY)!=2) stop("Must have two class labels for this test")
+                           if(trunc(ncol(X)/2)!=ncol(X)/2) stop("Must have an even number of samples for this test")
+                           X<-X[,Y==uY[2]]-X[,Y==uY[1]]
+                           Y<-NULL
+                           n<-dim(X)[2]
+                           meanX(psi0,na.rm,standardize,alternative,robust)
+                         },
+                         f={
+                           theta0<-1
+                           tau0<-2/(length(unique(Y))-1)
+                           FX(Y,na.rm,robust)
+                         },
+                         f.twoway={
+                           theta0<-1
+                           tau0 <- 2/((length(unique(Y))*length(gregexpr('12', paste(Y, collapse=""))[[1]]))-1)
+                           twowayFX(Y,na.rm,robust)
+                         },
+                         lm.XvsZ=lmX(Z,n,psi0,na.rm,standardize,alternative,robust),
+                         lm.YvsXZ=lmY(Y,Z,n,psi0,na.rm,standardize,alternative,robust),
+                         coxph.YvsXZ=coxY(Y,Z,psi0,na.rm,standardize,alternative),
+                         t.cor=NULL,
+                         z.cor=NULL)
+    ##computing observed test statistics
+    if(test=="t.cor" | test=="z.cor") obs<-corr.Tn(X,test=test,alternative=alternative,use="pairwise")
+    else obs<-get.Tn(X,stat.closure,W)
+    ##or computing influence curves
+    if(nulldist=="ic"){
+      rawdistn <- matrix(nrow=0,ncol=0)
+      nulldistn<-switch(test,
+                        t.onesamp=corr.null(X,W,Y,Z,test="t.onesamp",alternative,use="pairwise",B,MVN.method,penalty,ic.quant.trans,marg.null,marg.par,perm.mat),
+                        t.pair=corr.null(X,W,Y,Z,test="t.pair",alternative,use="pairwise",B,MVN.method,penalty,ic.quant.trans,marg.null,marg.par,perm.mat),
+                        t.twosamp.equalvar=corr.null(X,W,Y,Z,test="t.twosamp.equalvar",alternative,use="pairwise",B,MVN.method,penalty,ic.quant.trans,marg.null,marg.par,perm.mat),
+                        t.twosamp.unequalvar=corr.null(X,W,Y,Z,test="t.twosamp.unequalvar",alternative,use="pairwise",B,MVN.method,penalty,ic.quant.trans,marg.null,marg.par,perm.mat),
+                        lm.XvsZ=corr.null(X,W,Y,Z,test="lm.XvsZ",alternative,use="pairwise",B,MVN.method,penalty,ic.quant.trans,marg.null,marg.par,perm.mat),
+                        lm.YvsXZ=corr.null(X,W,Y,Z,test="lm.YvsXZ",alternative,use="pairwise",B,MVN.method,penalty,ic.quant.trans,marg.null,marg.par,perm.mat),
+                        t.cor=corr.null(X,W,Y,Z,test="t.cor",alternative,use="pairwise",B,MVN.method,penalty,ic.quant.trans,marg.null,marg.par,perm.mat),
+                        z.cor=corr.null(X,W,Y,Z,test="z.cor",alternative,use="pairwise",B,MVN.method,penalty,ic.quant.trans,marg.null,marg.par,perm.mat)
+                        )
+    }
+
+    ## Cluster Checking
+    if ((!is.numeric(cluster))&(!inherits(cluster,c("MPIcluster", "PVMcluster", "SOCKcluster"))))
+       stop("Cluster argument must be integer or cluster object")
+    ## Create cluster if cluster > 1 and load required packages on nodes
+    if(is.numeric(cluster)){
+      if(cluster>1){
+    ## Check installation of packages
+      have_snow <- qRequire("snow")
+      if(!have_snow) stop("The package snow is required to use a cluster. Either snow is not installed or it is not in the standard library location.")
+      if (is.null(type))
+         stop("Must specify type argument to use a cluster. Alternatively, provide a cluster object as the argument to cluster.")
+      if (type=="SOCK")
+         stop("Create desired cluster and specify cluster object as the argument to cluster directly.")
+      if ((type!="PVM")&(type!="MPI"))
+         stop("Type must be MPI or PVM")
+      else if (type=="MPI"){
+         have_rmpi <- qRequire("Rmpi")
+         if(!have_rmpi) stop("The package Rmpi is required for the specified type. Either Rmpi is not installed or it is not in the standard library location.")
+      }
+      else if (type=="PVM"){
+         have_rpvm <- qRequire("rpvm")
+         if(!have_rpvm) stop("The package rpvm is required for the specified type. Either rpvm is not installed or it is not in the standard library location.")
+      }
+      cluster <- makeCluster(cluster, type)
+      clusterEvalQ(cluster, {library(Biobase); library(multtest)})
+      if (is.null(dispatch)) dispatch=0.05
+      }
+    }
+    else if(inherits(cluster,c("MPIcluster", "PVMcluster", "SOCKcluster"))){
+      clusterEvalQ(cluster, {library(Biobase); library(multtest)})
+      if (is.null(dispatch)) dispatch=0.05
+    }
+
+    ##computing the nonparametric bootstrap (null) distribution
+    if(nulldist=="boot.cs" | nulldist=="boot.ctr" | nulldist=="boot.qt"){
+      nulldistn<-boot.null(X,Y,stat.closure,W,B,test,nulldist,theta0,tau0,marg.null,marg.par,ncp,perm.mat,alternative,seed,cluster,dispatch,keep.nulldist,keep.rawdist)
+     if(inherits(cluster,c("MPIcluster", "PVMcluster", "SOCKcluster")))  stopCluster(cluster)
+    rawdistn <- nulldistn$rawboot
+    nulldistn <- nulldistn$muboot
+    }
+
+    
+    ##performing multiple testing
+    #rawp values
+    rawp<-apply((obs[1,]/obs[2,])<=nulldistn,1,mean)
+    if(smooth.null & (min(rawp,na.rm=TRUE)==0)){
+      zeros<-(rawp==0)
+      if(sum(zeros)==1){
+        den<-density(nulldistn[zeros,],to=max(obs[1,zeros]/obs[2,zeros],nulldist[zeros,],na.rm=TRUE),na.rm=TRUE)
+	rawp[zeros]<-sum(den$y[den$x>=(obs[1,zeros]/obs[2,zeros])])/sum(den$y)
+      }
+      else{
+        den<-apply(nulldistn[zeros,],1,density,to=max(obs[1,zeros]/obs[2,zeros],nulldistn[zeros,],na.rm=TRUE),na.rm=TRUE)
+	newp<-NULL
+	stats<-obs[1,zeros]/obs[2,zeros]
+	for(i in 1:length(den)){
+          newp[i]<-sum(den[[i]]$y[den[[i]]$x>=stats[i]])/sum(den[[i]]$y)
+	}
+        rawp[zeros]<-newp		
+      }
+      rawp[rawp<0]<-0
+    }
+    #c, cr, adjp values
+    pind<-ifelse(typeone!="fwer",TRUE,get.adjp)
+    if(method=="ss.maxT") out<-ss.maxT(nulldistn,obs,alternative,get.cutoff,get.cr,pind,alpha)
+    if(method=="ss.minP") out<-ss.minP(nulldistn,obs,rawp,alternative,get.cutoff,get.cr,pind,alpha)
+    if(method=="sd.maxT") out<-sd.maxT(nulldistn,obs,alternative,get.cutoff,get.cr,pind,alpha)
+    if(method=="sd.minP") out<-sd.minP(nulldistn,obs,rawp,alternative,get.cutoff,get.cr,pind,alpha)
+    if(typeone=="fwer" & nalpha & (test!="t.cor" & test !="z.cor")){
+      for(a in 1:nalpha) reject[,a]<-(out$adjp<=alpha[a])
+    }
+    #augmentation procedures
+    if(typeone=="gfwer"){
+      out$adjp<-as.numeric(fwer2gfwer(out$adjp,k))
+      out$c<-matrix(nrow=0,ncol=0)
+      out$cr<-array(dim=c(0,0,0))
+      if(nalpha){
+        for(a in 1:nalpha) reject[,a]<-(out$adjp<=alpha[a])
+      }
+      if(!get.adjp) out$adjp<-vector("numeric",0)
+    }
+    if(typeone=="tppfp"){
+      out$adjp<-as.numeric(fwer2tppfp(out$adjp,q))
+      out$c<-matrix(nrow=0,ncol=0)
+      out$cr<-array(dim=c(0,0,0))
+      if(nalpha){
+        for(a in 1:nalpha) reject[,a]<-(out$adjp<=alpha[a])
+      }
+      if(!get.adjp) out$adjp<-vector("numeric",0)
+    }
+    if(typeone=="fdr"){
+      out$c<-matrix(nrow=0,ncol=0)
+      out$cr<-array(dim=c(0,0,0))
+      temp<-fwer2fdr(out$adjp,fdr.method,alpha)
+      reject<-temp$reject
+      if(!get.adjp) out$adjp<-vector("numeric",0)
+      else out$adjp<-temp$adjp
+      rm(temp)
+    }
+    #output results
+    if(!keep.nulldist) nulldistn<-matrix(nrow=0,ncol=0)
+    if(!keep.rawdist) rawdist<-matrix(nrow=0,ncol=0)
+    if(nulldist!="boot.qt"){  
+      marg.null <- vector("character")
+      marg.par <- matrix(nrow=0,ncol=0)
+    }
+    if(!keep.label) label <- vector("numeric",0)
+    if(!keep.index) index <- matrix(nrow=0,ncol=0)
+    if(test!="z.cor" & test !="t.cor") index <- matrix(nrow=0,ncol=0)
+    if(keep.index & (test!="z.cor" | test !="t.cor")){
+      index <- t(combn(p,2))
+      colnames(index) <- c("Var1","Var2")
+    }
+    names(out$adjp)<-names(rawp)
+    estimates <- obs[3,]*obs[1,]
+    if(ftest) estimates <- vector("numeric",0)
+    if(test=="t.onesamp" | test=="t.pair") estimates <- obs[3,]*obs[1,]/sqrt(n)
+    out<-new("MTP",statistic=(obs[3,]*obs[1,]/obs[2,]),
+      estimate=estimates,
+      sampsize=n,rawp=rawp,adjp=out$adjp,conf.reg=out$cr,cutoff=out$c,reject=reject,
+      rawdist=rawdistn,nulldist=nulldistn,nulldist.type=nulldist,
+      marg.null=marg.null,marg.par=marg.par,label=label,index=index,
+      call=match.call(),seed=as.integer(seed))
+  }
+  return(out)
+}
+
+#funtions to compute cutoffs and adjusted pvals
+
+ss.maxT<-function(null,obs,alternative,get.cutoff,get.cr,get.adjp,alpha=0.05){
+  p<-dim(null)[1]
+  B<-dim(null)[2]
+  nalpha<-length(alpha)
+  mT<-apply(null,2,max)
+  getc<-matrix(nrow=0,ncol=0)
+  getcr<-array(dim=c(0,0,0))
+  getp<-vector(mode="numeric")
+  if(get.cutoff | get.cr){
+    getc<-array(dim=c(p,nalpha),dimnames=list(dimnames(null)[[1]],paste("alpha=",alpha,sep="")))
+    if(get.cr) getcr<-array(dim=c(p,2,nalpha),dimnames=list(dimnames(null)[[1]],c("LB","UB"),paste("alpha=",alpha,sep="")))
+    for(a in 1:nalpha){
+      getc[,a]<-rep(quantile(mT,pr=(1-alpha[a])),p)
+      if(get.cr) getcr[,,a]<-cbind(ifelse(rep(alternative=="less",p),rep(-Inf,p),obs[3,]*obs[1,]-getc[,a]*obs[2,]),ifelse(rep(alternative=="greater",p),rep(Inf,p),obs[3,]*obs[1,]+getc[,a]*obs[2,]))
+    }
+  }
+  if(get.adjp) getp<-apply((obs[1,]/obs[2,])<=matrix(mT,nrow=p,ncol=B,byrow=TRUE),1,mean)
+  if(!get.cutoff) getc<-matrix(nrow=0,ncol=0)
+  list(c=getc,cr=getcr,adjp=getp)
+}
+
+ss.minP<-function(null,obs,rawp,alternative,get.cutoff,get.cr,get.adjp,alpha=0.05){
+  p<-dim(null)[1]
+  B<-dim(null)[2]
+  nalpha<-length(alpha)
+  getc<-matrix(nrow=0,ncol=0)
+  getcr<-array(dim=c(0,0,0))
+  getp<-vector(mode="numeric")
+  R<-apply(null,1,rank)
+  if(get.cutoff | get.cr){
+    getc<-array(dim=c(p,nalpha),dimnames=list(dimnames(null)[[1]],paste("alpha=",alpha,sep="")))
+    if(get.cr) getcr<-array(dim=c(p,2,nalpha),dimnames=list(dimnames(null)[[1]],c("LB","UB"),paste("alpha=",alpha,sep="")))
+    for(a in 1:nalpha){
+      q<-quantile(apply(R,1,max),1-alpha[a])
+      for(j in 1:p){
+        getc[j,a]<-min(c(null[j,R[,j]>=q],max(null[j,])))
+      }
+      if(get.cr) getcr[,,a]<-cbind(ifelse(rep(alternative=="less",p),rep(-Inf,p),obs[3,]*obs[1,]-getc[,a]*obs[2,]),ifelse(rep(alternative=="greater",p),rep(Inf,p),obs[3,]*obs[1,]+getc[,a]*obs[2,]))
+    }
+  }
+  if(get.adjp){
+    R<-matrix(apply((B+1-R)/B,1,min),nrow=p,ncol=B,byrow=TRUE)
+    getp<-apply(rawp>=R,1,mean)
+  }
+  if(!get.cutoff) getc<-matrix(nrow=0,ncol=0)
+  list(c=getc,cr=getcr,adjp=getp)
+}
+
+sd.maxT<-function(null,obs,alternative,get.cutoff,get.cr,get.adjp,alpha=0.05){
+  p<-dim(null)[1]
+  B<-dim(null)[2]
+  nalpha<-length(alpha)
+  ord<-rev(order(obs[1,]/obs[2,]))
+  mT<-null[ord[p],]
+  getc<-matrix(nrow=0,ncol=0)
+  getcr<-array(dim=c(0,0,0))
+  getp<-vector(mode="numeric")
+  if(get.cutoff | get.cr){
+    getc<-array(dim=c(p,nalpha),dimnames=list(dimnames(null)[[1]],paste("alpha=",alpha,sep="")))
+    for(a in 1:nalpha) getc[ord[p],a]<-quantile(mT,pr=1-alpha[a])
+  }
+  if(get.adjp) getp[ord[p]]<-mean((obs[1,]/obs[2,])[ord[p]]<=mT)
+  for(j in (p-1):1){
+    mT<-pmax(mT,null[ord[j],])
+    if(get.adjp) getp[ord[j]]<-mean((obs[1,ord[j]]/obs[2,ord[j]])<=mT)
+    if(get.cutoff | get.cr){
+      for(a in 1:nalpha) getc[ord[j],a]<-quantile(mT,pr=(1-alpha[a]))
+    }
+  }
+  c.ind<-rep(TRUE,nalpha)
+  for(j in 2:p){
+    if(get.adjp) getp[ord[j]]<-max(getp[ord[j]],getp[ord[j-1]])
+    if(get.cutoff | get.cr){
+      for(a in 1:nalpha){
+        if(c.ind[a]){
+          if((obs[1,]/obs[2,])[ord[j-1]]<=getc[ord[j-1],a]){
+            getc[ord[j:p],a]<-Inf
+            c.ind[a]<-FALSE
+          }
+	}
+      }
+    }
+  }
+  if(get.cr){
+    getcr<-array(dim=c(p,2,nalpha),dimnames=list(dimnames(null)[[1]],c("LB","UB"),paste("alpha=",alpha,sep="")))
+    for(a in 1:nalpha){
+      getcr[,,a]<-cbind(ifelse(rep(alternative=="less",p),rep(-Inf,p),obs[3,]*obs[1,]-getc[,a]*obs[2,]),ifelse(rep(alternative=="greater",p),rep(Inf,p),obs[3,]*obs[1,]+getc[,a]*obs[2,]))
+    }
+  }
+  if(!get.cutoff) getc<-matrix(nrow=0,ncol=0)
+  list(c=getc,cr=getcr,adjp=getp)
+}
+
+sd.minP<-function(null,obs,rawp,alternative,get.cutoff,get.cr,get.adjp,alpha=0.05){
+  p<-dim(null)[1]
+  B<-dim(null)[2]
+  nalpha<-length(alpha)
+  ord<-order(rawp)
+  R<-apply(null,1,rank) #B x p
+  mR<-R[,ord[p]]
+  getc<-matrix(nrow=0,ncol=0)
+  getcr<-array(dim=c(0,0,0))
+  getp<-vector(mode="numeric")
+  if(get.cutoff | get.cr){
+    getc<-array(dim=c(p,nalpha),dimnames=list(dimnames(null)[[1]],paste("alpha=",alpha,sep="")))
+    for(a in 1:nalpha){
+      q<-quantile(mR,pr=1-alpha[a])
+      getc[ord[p],a]<-min(c(null[ord[p],R[,ord[p]]>=q],max(null[ord[p],])))
+    }
+  }
+  if(get.adjp){
+    mP<-(B+1-mR)/B
+    getp[ord[p]]<-mean(rawp[ord[p]]>=mP)
+  }
+  for(j in (p-1):1){
+    mR<-pmax(mR,R[,ord[j]])
+    if(get.adjp){
+      mP<-(B+1-mR)/B
+      getp[ord[j]]<-mean(rawp[ord[j]]>=mP)
+    }
+    if(get.cutoff | get.cr){
+      for(a in 1:nalpha){
+        q<-quantile(mR,pr=1-alpha[a])
+	getc[ord[j],a]<-min(c(null[ord[j],R[,ord[j]]>=q],max(null[ord[j],])))
+      }
+    }
+  }
+  c.ind<-rep(TRUE,nalpha)
+  for(j in 2:p){
+    if(get.adjp) getp[ord[j]]<-max(getp[ord[j]],getp[ord[j-1]])
+    if(get.cutoff | get.cr){
+      for(a in 1:nalpha){
+        if(c.ind[a]){
+          if((obs[1,]/obs[2,])[ord[j-1]]<=getc[ord[j-1],a]){
+            getc[ord[j:p],a]<-Inf
+            c.ind[a]<-FALSE
+          }
+	}
+      }
+    }
+  }
+  if(get.cr){
+    getcr<-array(dim=c(p,2,nalpha),dimnames=list(dimnames(null)[[1]],c("LB","UB"),paste("alpha=",alpha,sep="")))
+    for(a in 1:nalpha){
+      getcr[,,a]<-cbind(ifelse(rep(alternative=="less",p),rep(-Inf,p),obs[3,]*obs[1,]-getc[,a]*obs[2,]),ifelse(rep(alternative=="greater",p),rep(Inf,p),obs[3,]*obs[1,]+getc[,a]*obs[2,]))
+    }
+  }
+  if(!get.cutoff) getc<-matrix(nrow=0,ncol=0)
+  list(c=getc,cr=getcr,adjp=getp)
+}
+
+#functions to convert FWER adjp to AMTP (gFWER, TPPFP) adjp:
+fwer2gfwer<-function(adjp,k=0){
+  ord<-order(adjp)
+  m<-length(adjp)
+  if(any(k>=m)) stop(paste("number of rejections k=",k," must be less than number of hypotheses=",m,sep=""))
+  newp<-NULL
+  for(j in k) newp<-rbind(newp,c(rep(0,j),adjp[ord[1:(m-j)]]))
+  rownames(newp)<-k
+  colnames(newp)<-ord
+  newp<-matrix(newp[,order(ord)],ncol=m,byrow=FALSE)
+  return(t(newp))
+}
+
+fwer2tppfp<-function(adjp,q=0.05){
+  ord<-order(adjp)
+  m<-length(adjp)
+  newp<-NULL
+  if(any(q>1)|any(q<0)) stop(paste("proportion of false positives q=",q," must be in [0,1]",sep=""))
+  for(l in q) newp<-rbind(newp,adjp[ord][ceiling((1:m)*(1-l))])
+  rownames(newp)<-q
+  colnames(newp)<-names(ord)
+  newp<-matrix(newp[,order(ord)],ncol=m,byrow=FALSE)
+  return(t(newp))
+}
+
+#function to compute rejection indicator for FDR methods
+fwer2fdr<-function(adjp,method="both",alpha=0.05){
+  get.cons<-function(adjp,alpha,ord,M,nalpha){
+    newp<-NULL
+    for(m in 1:M){
+      #try ceiling/floor
+      k<-if(m%%2) 0:((m-1)/2) else 0:(m/2)
+      f<-2*adjp[ord][m-k]
+      u<-2*(k+1)/m
+      l<-2*k/m
+      if(sum(f<=u)){
+        ind<-min(which(f<=u))
+	newp[ord[m]]<-
+	if(f[ind]>=l[ind]) f[ind]
+	else l[ind]
+      }
+      else newp[ord[m]]<-1
+    }
+    newp[newp>1]<-1
+    a<-alpha/2
+    rejections<-matrix(nrow=M,ncol=nalpha)
+    for(i in 1:nalpha) rejections[,i]<-(fwer2tppfp(adjp,a[i])<=a[i])
+    return(list(reject=rejections,adjp=newp))
+  }
+  get.restr<-function(adjp,alpha,ord,M,nalpha){
+    newp<-NULL
+    ginv<-function(x) 1-(1-x)^2
+    for(m in 1:M){
+      k<-m:1
+      f<-adjp[ord][k]
+      u<-1-(k-1)/m
+      l<-1-k/m
+      if(sum(f<=u)){
+        ind<-min(which(f<=u))
+	newp[ord[m]]<-
+          if(f[ind]>=l[ind]) ginv(f[ind])
+          else ginv(l[ind])
+      }
+      else
+	newp[ord[m]]<-1
+    }
+    newp[newp>1]<-1
+    a<-1-sqrt(1-alpha)
+    rejections<-matrix(nrow=M,ncol=nalpha)
+    for(i in 1:nalpha) rejections[,i]<-(fwer2tppfp(adjp,a[i])<=a[i])
+    return(list(reject=rejections,adjp=newp))
+  }
+  ord<-order(adjp)
+  nalpha<-length(alpha)
+  M<-length(adjp)
+  if(method=="both"){
+    rejections<-array(dim=c(M,nalpha,2),dimnames=list(NULL,paste("alpha=",alpha,sep=""),c("conservative","restricted")))
+    newp<-matrix(nrow=M,ncol=2,dimnames=list(NULL,c("conservative","restricted")))
+    temp<-get.cons(adjp,alpha,ord,M,nalpha)
+    rejections[,,"conservative"]<-temp$reject
+    newp[,"conservative"]<-temp$adjp
+    temp<-get.restr(adjp,alpha,ord,M,nalpha)
+    rejections[,,"restricted"]<-temp$reject
+    newp[,"restricted"]<-temp$adjp
+    rm(temp)
+  }
+  else{
+    rejections<-matrix(nrow=M,ncol=nalpha,dimnames=list(NULL,paste("alpha=",alpha,sep="")))
+    newp<-NULL
+    if(method=="conservative") temp<-get.cons(adjp,alpha,ord,M,nalpha)
+    else temp<-get.restr(adjp,alpha,ord,M,nalpha)
+    rejections<-temp$reject
+    newp<-temp$adjp
+    rm(temp)
+  }
+  return(list(reject=rejections,adjp=newp))
+}
diff --git a/R/zzz.R b/R/zzz.R
new file mode 100755
index 0000000..2cb7072
--- /dev/null
+++ b/R/zzz.R
@@ -0,0 +1,692 @@
+setClass("MTP",representation(statistic="numeric",
+                              estimate="numeric",
+                              sampsize="numeric",
+                              rawp="numeric",
+                              adjp="numeric",
+                              conf.reg="array",
+                              cutoff="matrix",
+                              reject="matrix",
+                              rawdist="matrix",
+                              nulldist="matrix",
+                              nulldist.type="character",
+                              marg.null="character",
+                              marg.par="matrix",
+                              label="numeric",
+                              index="matrix",
+                              call="call",
+                              seed="integer"),
+         prototype=list(statistic=vector("numeric",0),
+         estimate=vector("numeric",0),
+         sampsize=vector("numeric",0),
+         rawp=vector("numeric",0),
+         adjp=vector("numeric",0),
+         conf.reg=array(),
+         cutoff=matrix(nrow=0,ncol=0),
+         reject=matrix(nrow=0,ncol=0),
+         rawdist=matrix(nrow=0,ncol=0),
+         nulldist=matrix(nrow=0,ncol=0),
+         nulldist.type=vector("character",0),
+         marg.null=vector("character",0),
+         marg.par=matrix(nrow=0,ncol=0),
+         label=vector("numeric",0),
+         index=matrix(nrow=0,ncol=0),
+         call=NULL,
+         seed=vector("integer",0)))
+
+
+if( !isGeneric("mtp2ebmtp") )
+    setGeneric("mtp2ebmtp", function(object, ...) standardGeneric("mtp2ebmtp"))
+
+setMethod("mtp2ebmtp","MTP",
+          function(object,...){
+            y<-new("EBMTP")
+            slot(y,"statistic") <- object at statistic
+            slot(y,"estimate") <- object at estimate
+            slot(y,"sampsize") <- object at sampsize
+            slot(y,"rawp") <- object at rawp
+            slot(y,"adjp") <- object at adjp
+            slot(y,"reject") <- object at reject
+            slot(y,"rawdist") <- object at rawdist
+            slot(y,"nulldist") <- object at nulldist
+            slot(y,"nulldist.type") <- object at nulldist.type
+            slot(y,"marg.null") <- object at marg.null
+            slot(y,"marg.par") <- object at marg.par
+            slot(y,"label") <- object at label
+            slot(y,"index") <- object at index
+            slot(y,"call") <- object at call
+            slot(y,"seed") <- object at seed
+            invisible(y)
+          }
+          )
+
+if( !isGeneric("plot") ) setGeneric("plot", function(x, y, ...) standardGeneric("plot"))
+
+setMethod("plot","MTP",
+	function(x,y="missing",which=1:4,caption=c("Rejections vs. Error Rate",
+                                           "Ordered Adjusted p-values","Adjusted p-values vs. Statistics",
+                                           "Unordered Adjusted p-values","Estimates & Confidence Regions",
+                                           "Test Statistics & Cut-offs"),sub.caption = deparse(x at call,width.cutoff=500),
+                   ask = prod(par("mfcol"))<length(which)&&dev.interactive(),
+                   logscale=FALSE,top=10,...){
+          call.list<-as.list(x at call)
+          if(!inherits(x,"MTP")) stop("Use only with 'MTP' objects")
+          if(is.null(which)) which<-1:6
+          if(length(caption)==1) caption<-rep(caption,6)
+          if(length(x at adjp)==0 & any(which)) stop("plot methods require adjusted p-values")
+          if(length(x at conf.reg)==0 & any(which==5)) stop("plot method 5 requires confidence regions")
+          if(length(x at cutoff)==0 & any(which==6)) stop("plot method 6 requires cut-offs")
+          if(!is.numeric(which) || any(which<1) || any(which>6)) stop("which must be in 1:6")
+          show<-rep(FALSE,6)
+          show[which]<-TRUE
+          m<-length(x at adjp)
+          if(top>m){
+            warning("number of top hypotheses to plot exceeds total number of hypotheses - plotting less than requested number")
+            top<-m
+          }
+	  ord<-order(x at adjp)
+          if(any(show[2:4]) & logscale){
+            pv<-(-log(x at adjp,10))
+            pvlab<-"-log (base 10) Adjusted p-values"
+          }
+          else{
+            pv<-x at adjp
+            pvlab<-"Adjusted p-values"
+          }
+          one.fig<-prod(par("mfcol"))==1
+          if(ask){
+            op<-par(ask=TRUE)
+            on.exit(par(op))
+          }
+          if(show[1]){
+            nominal<-seq(0,1,by=0.05)
+            r<-mt.reject(x at adjp,nominal)$r
+            matplot(nominal,r,xlab="Type I error rate",
+                    ylab="Number of rejected hypotheses",
+                    type="l",...)
+            if(one.fig) title(sub=sub.caption,cex.sub=0.5,...)
+            mtext(caption[1],3,0.25)
+          }
+          if(show[2]){
+            spval<-sort(pv)
+            matplot(1:m,spval,xlab="Number of rejected hypotheses",
+                    ylab=paste("Sorted",pvlab,sep=" "),type="l",...)
+            if(one.fig) title(sub=sub.caption,cex.sub=0.5,...)
+            mtext(caption[2],3,0.25)
+          }
+          if(show[3]){
+            symb<-ifelse(length(pv)<100,"o",".")
+            matplot(x at statistic,pv,xlab="Test statistics",
+                    ylab=pvlab,type="p",pch=symb,...)
+            if(one.fig) title(sub=sub.caption,cex.sub=0.5,...)
+            mtext(caption[3],3,0.25)
+          }
+          if(show[4]){
+            matplot(1:m,pv,xlab="Index",ylab=pvlab,type = "l", ...)
+            if(one.fig) title(sub=sub.caption,cex.sub=0.5,...)
+            mtext(caption[4],3,0.25)
+          }
+          if(show[5]){
+            if(is.null(call.list$test)) call.list$test<-"t.twosamp.unequalvar"
+            if(call.list$test=="f" | call.list$test=="f.block") stop("Plot 5 requires confidence intervals, which are not available with F tests")
+            topp<-ord[1:top]
+            plot(c(1,top),range(c(x at estimate[topp],x at conf.reg[topp,,]),finite=TRUE,na.rm=TRUE),type="n",xlab="Most Significant Hypotheses",ylab="Estimates")
+            points(1:top,x at estimate[topp],pch="o")
+            nominal<-eval(call.list$alpha)
+            if(is.null(nominal)) nominal<-0.05
+            for(a in 1:length(nominal)){
+              text(1:top,x at conf.reg[topp,1,a],nominal[a])
+              text(1:top,x at conf.reg[topp,2,a],nominal[a])
+            }
+            if(one.fig) title(sub=sub.caption,cex.sub=0.5,...)
+            mtext(caption[5],3,0.25)
+          }
+          if(show[6]){
+            topp<-ord[1:top]
+            alt<-call.list$alternative
+            if(is.null(alt)) alt<-"two.sided"
+            stats<-switch(alt,two.sided=abs(x at statistic),greater=x at statistic,less=(-x at statistic))
+            plot(c(1,top),range(c(x at cutoff[topp,],stats[topp]),finite=TRUE,na.rm=TRUE),type="n",xlab="Most Significant Hypotheses",ylab="Test Statistics")
+            points(1:top,stats[topp],pch="o")
+            nominal<-eval(call.list$alpha)
+            if(is.null(nominal)) nominal<-0.05
+            for(a in 1:length(nominal)) text(1:top,x at cutoff[topp,a],nominal[a])
+            if(one.fig) title(sub=sub.caption,cex.sub=0.5,...)
+            mtext(caption[6],3,0.25)
+          }
+          if(!one.fig && par("oma")[3]>=1) mtext(sub.caption,outer=TRUE,cex=0.8)
+          invisible()
+          })
+
+
+if( !isGeneric("summary") )
+    setGeneric("summary", function(object, ...) standardGeneric("summary"))
+
+setMethod("summary","MTP",
+          function(object,...){
+            call.list<-as.list(object at call)
+            cat(paste("MTP: ",ifelse(is.null(call.list$method),"ss.maxT",call.list$method),"\n"))
+            err<-ifelse(is.null(call.list$typeone),"fwer",call.list$typeone)
+            if(err=="gfwer") err<-paste(err," (k=",ifelse(is.null(call.list$k),0,call.list$k),")",sep="")
+            if(err=="tppfp") err<-paste(err," (q=",ifelse(is.null(call.list$q),0.1,call.list$q),")",sep="")
+            if(err=="fdr") err<-paste(err," (",ifelse(is.null(call.list$fdr.method),"conservative",call.list$method),")",sep="")
+	    cat(paste("Type I error rate: ",err,"\n\n"))
+            nominal<-eval(call.list$alpha)
+            if(is.null(nominal)) nominal<-0.05
+            if(is.null(call.list$test)) test <- "t.twosamp.unequalvar"
+            else test <- call.list$test
+            if(test!="t.cor" & test!="z.cor") out1<-data.frame(Level=nominal,Rejections=apply(object at reject,2,sum),row.names=NULL)
+            else{
+              tmp <- rep(0,length(nominal))
+              for(i in 1:length(nominal)) tmp[i] <- sum(object at adjp < nominal[i])
+              out1 <- data.frame(Level=nominal,Rejections=tmp,row.names=NULL)
+            }
+            print(out1)
+            cat("\n")
+            out2<-get.index(object at adjp,object at rawp,abs(object at statistic))
+            out3<-rn<-NULL
+            if(!is.null(object at adjp)){
+              out3<-rbind(out3,c(summary(object at adjp[!is.na(object at adjp)]),sum(is.na(object at adjp))))
+              rn<-c(rn,"adjp")
+            }
+            if(!is.null(object at rawp)){
+              out3<-rbind(out3,c(summary(object at rawp[!is.na(object at rawp)]),sum(is.na(object at rawp))))
+              rn<-c(rn,"rawp")
+            }
+            if(!is.null(object at statistic)){
+              out3<-rbind(out3,c(summary(object at statistic[!is.na(object at statistic)]),sum(is.na(object at statistic))))
+            rn<-c(rn,"statistic")
+            }
+            if(!is.null(object at estimate)){
+              out3<-rbind(out3,c(summary(object at estimate[!is.na(object at estimate)]),sum(is.na(object at estimate))))
+              rn<-c(rn,"estimate")
+            }
+            rownames(out3)<-rn
+            colnames(out3)[ncol(out3)]<-"NA's"
+            print(out3)
+            invisible(list(rejections=out1,index=out2,summaries=out3))
+          })
+
+setMethod("[","MTP",
+          function(x,i,j=NULL,...,drop=FALSE){
+            if(missing(i))
+            i<-TRUE
+            newx<-x
+            slot(newx,"statistic")<-x at statistic[i]
+            slot(newx,"estimate")<-x at estimate[i]
+            slot(newx,"rawp")<-x at rawp[i]
+            if(sum(length(x at adjp))) slot(newx,"adjp")<-x at adjp[i]
+            if(sum(length(x at label))) slot(newx,"label")<-x at label[i]
+	    d<-dim(x at conf.reg)
+            dn<-dimnames(x at conf.reg)
+            if(sum(d)) slot(newx,"conf.reg")<-array(x at conf.reg[i,,],dim=c(ifelse(i[1]==TRUE & !is.numeric(i),d[1],length(i)),d[-1]),dimnames=list(dn[[1]][i],dn[[2]],dn[[3]]))
+            d<-dim(x at cutoff)
+            dn<-dimnames(x at cutoff)
+            if(sum(d)) slot(newx,"cutoff")<-matrix(x at cutoff[i,],nrow=ifelse(i[1]==TRUE & !is.numeric(i),d[1],length(i)),ncol=d[-1],dimnames=list(dn[[1]][i],dn[[2]]))
+            d<-dim(x at reject)
+            dn<-dimnames(x at reject)
+            if(sum(d)) slot(newx,"reject")<-matrix(x at reject[i,],nrow=ifelse(i[1]==TRUE & !is.numeric(i),d[1],length(i)),ncol=d[-1],dimnames=list(dn[[1]][i],dn[[2]]))
+            if(sum(dim(x at nulldist))) slot(newx,"nulldist")<-x at nulldist[i,]
+            if(sum(dim(x at rawdist))) slot(newx,"rawdist")<-x at nulldist[i,]
+            if(sum(dim(x at marg.par))) slot(newx,"marg.par")<-x at marg.par[i,]
+            if(sum(dim(x at index))) slot(newx,"index")<-x at index[i,]
+	    invisible(newx)
+          })
+
+setMethod("as.list","MTP",
+          function(x,...){
+            snames<-slotNames(x)
+            n<-length(snames)
+            lobj<-list()
+            for(i in 1:n) lobj[[i]]<-slot(x,snames[i])
+            names(lobj)<-snames
+            invisible(lobj)
+          })
+
+if( !isGeneric("update") )
+    setGeneric("update", function(object, ...) standardGeneric("update"))
+
+setMethod("update","MTP",
+          function(object,formula.="missing",alternative="two.sided",typeone="fwer",
+          k=0,q=0.1,fdr.method="conservative",alpha=0.05,smooth.null=FALSE,
+          method="ss.maxT",get.cr=FALSE,get.cutoff=FALSE,get.adjp=TRUE,nulldist="boot.cs",
+          keep.rawdist=TRUE,keep.nulldist=TRUE,marg.null=object at marg.null,
+          marg.par=object at marg.par,perm.mat=NULL,ncp=NULL,...,evaluate=TRUE){
+            ## checking
+            #Error rate
+            ERROR<-c("fwer","gfwer","tppfp","fdr")
+            typeone<-ERROR[pmatch(typeone,ERROR)]
+            if(is.na(typeone)) stop(paste("Invalid typeone, try one of ",ERROR,sep=""))
+            if(any(alpha<0) | any(alpha>1)) stop("Nominal level alpha must be between 0 and 1")
+            nalpha<-length(alpha)
+            p<-length(object at rawp)
+            reject<-
+              if(nalpha) array(dim=c(p,nalpha),dimnames=list(rownames(object at reject),paste("alpha=",alpha,sep="")))
+	      else matrix(nrow=0,ncol=0)
+            if(typeone=="gfwer"){
+              if(get.cr==TRUE) warning("Confidence regions not currently implemented for gFWER")
+              if(get.cutoff==TRUE) warning("Cut-offs not currently implemented for gFWER")
+              get.cr<-get.cutoff<-FALSE
+              if(k<0) stop("Number of false positives can not be negative")
+              if(k>=p) stop(paste("Number of false positives must be less than number of tests=",p,sep=""))
+              if(length(k)>1){
+                k<-k[1]
+		warning("can only compute gfwer adjp for one value of k at a time (using first value), try fwer2gfwer() function for multiple k")
+		}
+            }
+            if(typeone=="tppfp"){
+              if(get.cr==TRUE) warning("Confidence regions not currently implemented for TPPFP")
+              if(get.cutoff==TRUE) warning("Cut-offs not currently implemented for TPPFP")
+              get.cr<-get.cutoff<-FALSE
+              if(q<0) stop("Proportion of false positives, q, can not be negative")
+              if(q>1) stop("Proportion of false positives, q, must be less than 1")
+              if(length(q)>1){
+                q<-q[1]
+                warning("Can only compute tppfp adjp for one value of q at a time (using first value), try fwer2tppfp() function for multiple q")
+              }
+            }
+            if(typeone=="fdr"){
+              if(!nalpha) stop("Must specify a nominal level alpha for control of FDR")
+              if(get.cr==TRUE) warning("Confidence regions not currently implemented for FDR")
+              if(get.cutoff==TRUE) warning("Cut-offs not currently implemented for FDR")
+              get.cr<-get.cutoff<-FALSE
+            }
+
+            METHODS<-c("ss.maxT","ss.minP","sd.maxT","sd.minP")
+            method<-METHODS[pmatch(method,METHODS)]
+            if(is.na(method)) stop(paste("Invalid method, try one of ",METHODS," ",sep=""))
+
+            #get args from previous call
+            call.list <- as.list(object at call)
+            #estimate and conf.reg
+            ftest<-FALSE
+            if(is.null(call.list$test)) test<-"t.twosamp.unequalvar" #default
+            else test<-call.list$test
+            if(test%in%c("f","f.block","f.twoway")){
+              ftest<-TRUE
+              if(get.cr) stop("Confidence intervals not available for F tests, try get.cr=FALSE")
+            }
+            
+            #alternative
+            #if(is.null(call.list$alternative)) alternative<-"two.sided"
+            #else alternative<-call.list$alternative
+
+            #typeone
+            #if(is.null(call.list$typeone)) typeone<-"fwer"
+            #else typeone<-call.list$typeone
+            
+            ### nulldistn
+            ### Preserve the old null dist, if kept (i.e., could have alternatively kept raw dist)
+            nulldistn <- object at nulldist
+            if(object at nulldist.type=="perm") stop("No way to update objects which originally used the permutation distribution. No available options for storing nulldist.  Rawdist can only be stored for bootstrap distribution.")
+            ### For boot.qt, make sure values of marg.null and marg.par, if set previously, are kept.
+            ### Otherwise, these become null, but the original values are set here before proceeding.
+            prev.marg.null <- object at marg.null
+            prev.marg.par <- object at marg.par
+
+            if(!ncol(object at nulldist) & !ncol(object at rawdist)) stop("Update method requires either keep.raw and/or keep.null=TRUE in original call to MTP")
+            nulldist<- # just setting character value of what nulldist should be
+               if(is.null(call.list$nulldist)) "boot.cs"
+               else call.list$nulldist
+
+            ## new call
+            newcall.list<-as.list(match.call())
+            changed<-names(call.list)[names(call.list)%in%names(newcall.list)]
+            changed<-changed[changed!=""]
+            added<-names(newcall.list)[!(names(newcall.list)%in%names(call.list))]
+            added<-added[added!="x"]
+            for(n in changed) call.list[[n]]<-newcall.list[[n]]
+            for(n in added) call.list[[n]]<-newcall.list[[n]]
+            newcall<-as.call(call.list)
+            ### NB can still use "call.list" to help with what has been changed.
+            df <- marg.par
+            call.list$marg.par <- df
+               
+            ## return call if evaluate is false
+            if(!evaluate) return(newcall)
+
+            ## else redo MTP
+            else{
+              num<-object at estimate
+              snum<-1
+              if(alternative=="two.sided"){
+                snum<-sign(num)
+                num<-abs(num)
+              }
+              if(alternative=="less"){
+                snum<-(-1)
+                num<-(-num)
+              }
+
+              if(object at nulldist.type!="boot.qt"){
+                marg.null = vector("character",length=0)
+                marg.par = matrix(nrow=0,ncol=0)
+              }
+                 
+         ### Move rawp down from before.
+         ### Redoing the new null distributions needs to go here.
+              if("method" %in% changed | "method" %in% added) method <- call.list$method
+              if("alternative" %in% changed | "alternative" %in% added) alternative <- call.list$alternative
+              
+         ### Preserve the old null dist, if kept (i.e., could have alternatively kept raw dist)
+              nulldistn <- object at nulldist
+
+              if("marg.null" %in% changed | "marg.null" %in% added) marg.null <- call.list$marg.null
+              if("marg.par" %in% changed | "marg.par" %in% added){
+                  marg.par <- call.list$marg.par
+                  if(is.numeric(marg.par) & !is.matrix(marg.par)) marg.par <- matrix(rep(marg.par,length(object at statistic)),nrow=length(object at statistic),ncol=length(marg.par),byrow=TRUE)
+                }
+              if("perm.mat" %in% changed | "perm.mat" %in% added) perm.mat <- call.list$perm.mat
+              if("ncp" %in% changed | "ncp" %in% added) ncp <- call.list$ncp
+              if("MVN.method" %in% changed | "MVN.method" %in% added | "penalty" %in% changed | "penalty" %in% added |"ic.quant.trans" %in% changed | "ic.quant.trans" %in% added) stop("Changing 'MVN.method', 'ic.quant.trans' or 'penalty' requires new calculation of null distribution using nulldist='ic'.  Please use a new call to MTP.")
+         ### Check value of nulldist in this case
+              if("nulldist" %in% changed | "nulldist" %in% added) {
+                nulldist <- call.list$nulldist
+         ### Otherwise, nulldist keeps the old/default value in the original call.list, not the updated one.
+                if(nulldist=="perm") stop("Calls to update() cannot include changes involving the permutation distribution. Please try a separate call to MTP() with nulldist='perm'")
+                if(object at nulldist.type=="ic") stop("You cannot update an influence curve null distribution to another choice of null distribution.  Valid only for changes in the bootstrap distribution when keep.rawdist=TRUE.  Please try a separate call to MTP() if nulldist='boot' or 'perm' desired. Changing 'MVN.method', 'ic.quant.trans' or 'penalty' also requires new calculation of null distribution using nulldist='ic'")
+                if(nulldist=="ic") stop("Calls to update() cannot include changes involving the influence curve null distribution. Please try a separate call to MTP() with nulldist='ic'")
+                if(!ncol(object at rawdist)) stop("Calls to update() involving changes in bootstrap-based null distributions require keep.rawdist=TRUE")
+              
+
+    ### Just recompute (bootstrap-based) nulldistn - way easier this way (with keep.raw=TRUE)
+    ### "Easy" ones first.  Need to get tau0 and theta0.
+              if(nulldist=="ic"){
+                marg.null = vector("character",length=0)
+                marg.par = matrix(nrow=0,ncol=0)
+              }
+              if(nulldist=="boot" | nulldist=="boot.cs" | nulldist=="boot.ctr"){
+                marg.null = vector("character",length=0)
+                marg.par = matrix(nrow=0,ncol=0)
+                tau0<-1
+                theta0<-0
+                if(test=="f"){
+                  theta0<-1
+                  tau0<-2/(length(unique(object at label))-1)
+                }
+                if(test=="f.twoway"){
+                  theta0<-1
+                  tau0 <- 2/((length(unique(object at label))*length(gregexpr('12', paste(object at label, collapse=""))[[1]]))-1)
+                }
+                if(nulldist=="boot") nulldistn <- center.scale(object at rawdist, theta0, tau0, alternative)
+                if(nulldist=="boot.cs") nulldistn <- center.scale(object at rawdist, theta0, tau0, alternative)
+                if(nulldist=="boot.ctr") nulldistn <- center.only(object at rawdist, theta0, alternative)
+              }
+
+              if(nulldist=="boot.qt"){
+                if("marg.null" %in% changed | "marg.null" %in% added) marg.null <- call.list$marg.null
+                else marg.null <- NULL
+                if("marg.par" %in% changed | "marg.par" %in% added){
+                  marg.par <- call.list$marg.par
+                  if(is.numeric(marg.par) & !is.matrix(marg.par)) marg.par <- matrix(rep(marg.par,length(object at statistic)),nrow=length(object at statistic),ncol=length(marg.par),byrow=TRUE)
+                }
+                else marg.par <- NULL
+      
+        ### If these additional args are changed or added, these will be the new defaults, but they will not be NULL
+                ### Cannot be NULL for object defn.
+                ncp <- if(is.null(call.list$ncp)) 0
+                perm.mat <- if(is.null(call.list$perm.mat)) NULL
+                if(!is.null(perm.mat)){
+                  if(length(object at statistic)!=dim(perm.mat)[1]){ stop("Permutation and bootstrap matrices must have same number of rows (hypotheses).")
+                                                                }
+                }
+
+                nstats <- c("t.twosamp.unequalvar","z.cor","lm.XvsZ","lm.YvsXZ","coxph.lmYvsXZ")
+                tstats <- c("t.onesamp","t.twosamp.equalvar","t.pair","t.cor")
+                fstats <- c("f","f.block","f.twoway")
+         # If default (=NULL), set values of marg.null to pass on.
+                if(is.null(marg.null)){
+                  if(any(nstats == test)) marg.null="normal"
+                  if(any(tstats == test)) marg.null="t"
+                  if(any(fstats == test)) marg.null="f"
+                }
+                else{ # Check to see that user-supplied entries make sense.  
+                  MARGS <- c("normal","t","f","perm")
+                  marg.null <- MARGS[pmatch(marg.null,MARGS)]
+                  if(is.na(marg.null)) stop("Invalid marginal null distribution. Try one of: normal, t, f, or perm")
+                  if(any(tstats==test) & marg.null == "f") stop("Choice of test stat and marginal nulldist do not match")
+                  if(any(fstats==test) & (marg.null == "normal" | marg.null=="t")) stop("Choice of test stat and marginal nulldist do not match")
+                  if(marg.null=="perm" & is.null(perm.mat)) stop("Must supply a matrix of permutation test statistics if marg.null='perm'")
+                  if(marg.null=="f" & ncp < 0) stop("Cannot have negative noncentrality parameter with F distribution.")
+                }
+    
+        # If default (=NULL), set values of marg.par. Return as m by 1 or 2 matrix.
+                if(is.null(marg.par)){
+                  marg.par <- switch(test,
+                          t.onesamp = object at sampsize-1,
+                          t.twosamp.equalvar = object at sampsize-2,
+                          t.twosamp.unequalvar = c(0,1),
+                          t.pair = object at sampsize-2,
+                          f = c(length(is.finite(unique(object at label)))-1,object at sampsize-length(is.finite(unique(object at label)))),
+                          f.twoway = {
+                            c(length(is.finite(unique(object at label)))-1,object at sampsize-(length(is.finite(unique(object at label)))*length(gregexpr('12', paste(y, collapse=""))[[1]]))-2)
+                            },
+                          lm.XvsZ = c(0,1),
+                          lm.YvsXZ = c(0,1),
+                          coxph.YvsXZ = c(0,1),
+                          t.cor = object at sampsize-2,
+                          z.cor = c(0,1)
+                          )
+                  marg.par <- matrix(rep(marg.par,length(object at statistic)),nrow=length(object at statistic),ncol=length(marg.par),byrow=TRUE)
+        }
+                else{ # Check that user-supplied values of marg.par make sense (marg.par != NULL)
+                  if((marg.null=="t" | marg.null=="f") & any(marg.par[,1]==0)) stop("Cannot have zero df with t or F distributions. Check marg.par settings")
+                  if(marg.null=="t" & dim(marg.par)[2]>1) stop("Too many parameters for t distribution.  marg.par should have length 1.")
+                  if((marg.null=="f" | marg.null=="normal") & dim(marg.par)[2]!=2) stop("Incorrect number of parameters defining marginal null distribution.  marg.par should have length 2.")
+                }
+                nulldistn <- quant.trans(object at rawdist, marg.null, marg.par, ncp, alternative, perm.mat)
+              }
+              }
+
+     ### Cool. Now pick up where we left off.
+         obs<-rbind(num,object at estimate/object at statistic,sign(object at estimate))
+         rawp<-apply((obs[1,]/obs[2,])<=nulldistn,1,mean)
+		     if(smooth.null & min(rawp,na.rm=TRUE)==0){
+                  zeros<-rawp==0
+                  if(sum(zeros)==1){
+                    den<-density(nulldistn[zeros,],to=max(obs[1,zeros]/obs[2,zeros],nulldistn[zeros,],na.rm=TRUE),na.rm=TRUE)
+                    rawp[zeros]<-sum(den$y[den$x>=(obs[1,zeros]/obs[2,zeros])])/sum(den$y)
+                  }
+                  else{
+                    den<-apply(nulldistn[zeros,],1,density,to=max(obs[1,zeros]/obs[2,zeros],nulldistn[zeros,],na.rm=TRUE),na.rm=TRUE)
+                    newp<-NULL
+                    stats<-obs[1,zeros]/obs[2,zeros]
+                    for(i in 1:length(den)) newp[i]<-sum(den[[i]]$y[den[[i]]$x>=stats[i]])/sum(den[[i]]$y)
+                    rawp[zeros]<-newp
+                  }
+                  rawp[rawp<0]<-0
+                }
+		pind<-ifelse(typeone!="fwer",TRUE,get.adjp)
+		if(method=="ss.maxT") out<-ss.maxT(nulldistn,obs,alternative,get.cutoff,get.cr,pind,alpha)
+		if(method=="ss.minP") out<-ss.minP(nulldistn,obs,rawp,alternative,get.cutoff,get.cr,pind,alpha)
+                if(method=="sd.maxT") out<-sd.maxT(nulldistn,obs,alternative,get.cutoff,get.cr,pind,alpha)
+                if(method=="sd.minP") out<-sd.minP(nulldistn,obs,rawp,alternative,get.cutoff,get.cr,pind,alpha)
+                if(typeone=="fwer" & nalpha){
+                  for(a in 1:nalpha) reject[,a]<-(out$adjp<=alpha[a])
+		}
+		#augmentation procedures
+                #cat(typeone,"\n")
+                #cat(k,"\n")
+		if(typeone=="gfwer"){
+                  out$adjp<-as.numeric(fwer2gfwer(out$adjp,k))
+                  out$c<-matrix(nrow=0,ncol=0)
+                  out$cr<-array(dim=c(0,0,0))
+                  if(nalpha){
+                    for(a in 1:nalpha) reject[,a]<-(out$adjp<=alpha[a])
+		  }
+                  if(!get.adjp) out$adjp<-vector("numeric",0)
+                }
+		if(typeone=="tppfp"){
+                  out$adjp<-as.numeric(fwer2tppfp(out$adjp,q))
+                  out$c<-matrix(nrow=0,ncol=0)
+                  out$cr<-array(dim=c(0,0,0))
+                  if(nalpha){
+                    for(a in 1:nalpha) reject[,a]<-(out$adjp<=alpha[a])
+                  }
+                  if(!get.adjp) out$adjp<-vector("numeric",0)
+		}
+		if(typeone=="fdr"){
+                  out$c<-matrix(nrow=0,ncol=0)
+                  out$cr<-array(dim=c(0,0,0))
+                  temp<-fwer2fdr(out$adjp,fdr.method,alpha)
+                  reject<-temp$reject
+                  if(!get.adjp) out$adjp<-vector("numeric",0)
+                  else out$adjp<-temp$adjp
+                  rm(temp)
+                }
+		#output results
+  if(!keep.nulldist) nulldistn <-matrix(nrow=0,ncol=0)
+  if(keep.rawdist==FALSE) object at rawdist<-matrix(nrow=0,ncol=0)
+                out<-new("MTP",statistic=object at statistic,estimate=object at estimate,
+                sampsize=object at sampsize,rawp=rawp,adjp=out$adjp,conf.reg=out$cr,
+                cutoff=out$c,reject=reject,rawdist=object at rawdist,nulldist=nulldistn,
+                nulldist.type=nulldist,marg.null=marg.null,marg.par=marg.par,label=object at label,
+                index=object at index,call=newcall,seed=object at seed)
+		return(out)
+               } #re else redo MTP
+             } # re function
+             ) # re set method
+             
+###  
+
+
+print.MTP<-function(x,...){
+  call.list<-as.list(x at call)
+  cat("\n")
+  writeLines(strwrap("Multiple Testing Procedure",prefix="\t"))
+  cat("\n")
+  cat(paste("Object of class: ",class(x)))
+  cat("\n")
+  cat(paste("sample size =",x at sampsize,"\n"))
+  cat(paste("number of hypotheses =",length(x at statistic),"\n"))
+  cat("\n")
+  cat(paste("test statistics =",ifelse(is.null(call.list$test),"t.twosamp.unequalvar",call.list$test),"\n"))
+  cat(paste("type I error rate =",ifelse(is.null(call.list$typeone),"fwer",call.list$typeone),"\n"))
+  nominal<-eval(call.list$alpha)
+  if(is.null(eval(call.list$alpha))) nominal<-0.05
+  cat("nominal level alpha = ")
+  cat(nominal,"\n")
+  cat(paste("multiple testing procedure =",ifelse(is.null(call.list$method),"ss.maxT",call.list$method),"\n"))
+  cat("\n")
+  cat("Call: ")
+  print(x at call)
+  cat("\n")
+  cat("Slots: \n")
+  snames<-slotNames(x)
+  n<-length(snames)
+  out<-matrix(nrow=n,ncol=4)
+  dimnames(out)<-list(snames,c("Class","Mode","Length","Dimension"))
+  for(s in snames) out[s,]<-c(class(slot(x,s)),mode(slot(x,s)),length(slot(x,s)),paste(dim(slot(x,s)),collapse=","))
+  out<-data.frame(out)
+  print(out)
+  invisible(x)
+}
+
+.onLoad <- function(lib, pkg) require(methods)
+
+.onUnload <- function( libpath ) {
+  library.dynam.unload( "multtest", libpath )
+}
+
+#apply function with a weight matrix/vector
+#written copying apply, except that X must
+# be a matrix and MARGIN must be 1 or 2.
+# W is NULL, matrix or vector.
+
+wapply<-function(X,MARGIN,FUN,W=NULL,...){
+  if(is.null(W)) return(apply(X,MARGIN,FUN,...))
+  else{
+    if(length(MARGIN)!=1) stop("length(MARGIN) should be 1")
+    if(!(MARGIN==1 || MARGIN==2)) stop("MARGIN must be 1 or 2")
+    FUN<-match.fun(FUN)
+    X<-as.matrix(X)
+    dx<-dim(X)
+    if(length(dx)!=2) stop("X must be a matrix")
+    dn<-dimnames(X)
+    if(!(is.vector(W) | is.matrix(W))) stop("W must be a vector or matrix")
+    if(is.vector(W)){
+      if(MARGIN==1 & length(W)!=dx[2]) stop("length(W) not equal to ",dx[2])
+      if(MARGIN==2 & length(W)!=dx[1]) stop("length(W) not equal to ",dx[1])
+    }
+    if(is.matrix(W) & sum(dx!=dim(W))>0) stop("X and W must have the same dimension(s)")
+    d.call<-dx[-MARGIN]
+    d.ans<-dx[MARGIN]
+    dn.call<-dn[-MARGIN]
+    dn.ans<-dn[MARGIN]
+    if(is.na(d.ans) || !(d.ans>0)) stop("dim(X)[",MARGIN,"] is not a positive number")
+    if(MARGIN==1){
+      X<-t(X)
+      if(is.matrix(W)) W<-t(W)
+    }
+    ans<-vector("list",d.ans)
+    if(length(dn.call)) dimnames(X)<-c(dn.call,list(NULL))
+    for(i in 1:d.ans){
+      if(is.vector(W)) ans[[i]]<-FUN(X[,i]*W,...)
+      else ans[[i]]<-FUN(X[,i]*W[,i],...)
+    }
+    ans.list<-is.recursive(ans[[1]])
+    l.ans<-length(ans[[1]])
+    ans.names<-names(ans[[1]])
+    if(!ans.list) ans.list<-any(unlist(lapply(ans,length))!=l.ans)
+    if(!ans.list && length(ans.names)){
+      all.same<-sapply(ans,function(x) identical(names(x),ans.names))
+      if(!all(all.same)) ans.names<-NULL
+    }
+    len.a<-
+      if(ans.list) d.ans
+      else length(ans<-unlist(ans,recursive=FALSE))
+    if(len.a==d.ans){
+      names(ans)<-if(length(dn.ans[[1]])) dn.ans[[1]]
+      return(ans)
+    }
+    if(len.a>0 && len.a%%d.ans==0) return(array(ans,c(len.a%/%d.ans,d.ans),
+                           if(is.null(dn.ans)){
+                             if(!is.null(ans.names)) list(ans.names,NULL)
+                           }
+                           else c(list(ans.names),dn.ans)))
+    return(ans)
+  }
+}
+
+#function to make a vector for ordering the results by
+# adjp, then rawp, then abs(stat)
+get.index<-function(adjp,rawp,stat){
+  adj<-!is.null(adjp)
+  raw<-!is.null(rawp)
+  sta<-!is.null(stat)
+  if(adj) p<-length(adjp)
+  else{
+    if(raw) p<-length(rawp)
+    else stop("Must have at least one argument")
+  }
+  if(!sta) stat<-rep(1,p)
+  if(!raw) rawp<-rep(1,p)
+  if(!adj) adjp<-rep(1,p)
+  if((length(adjp)!=length(rawp)) | (length(adjp)!=length(stat))) stop("adjp, rawp, and stat must all be the same length")
+  index<-rank(adjp)
+  d1<-duplicated(index)
+  u1<-u2<-NULL
+  if(sum(d1)){
+    u1<-unique(index[d1])
+    for(u in u1){
+      sub<-index==u
+      i2<-rank(rawp[sub])
+      index[sub]<-index[sub]+i2-mean(i2)
+      d2<-duplicated(index[sub])
+      if(sum(d2)) u2<-unique(index[sub][d2])
+      for(uu in u2){
+        sub2<-index==uu
+	i3<-length(stat[sub2])-rank(abs(stat[sub2]))+1
+	index[sub2]<-index[sub2]+i3-mean(i3)
+      }
+    }
+  }
+  if(sum(duplicated(index))) warning("indices are not unique")
+  if(sum(index)!=sum(1:length(index))) warning("indices are not based on true ranks")
+  order(index)
+}
+
+qRequire <- function(pkg){
+   suppressWarnings(require(pkg, character.only=TRUE, quietly=TRUE, warn.conflicts=FALSE))
+}
+
+
diff --git a/data/golub.RData b/data/golub.RData
new file mode 100755
index 0000000..aeeb10a
Binary files /dev/null and b/data/golub.RData differ
diff --git a/debian/README.source b/debian/README.source
deleted file mode 100644
index 1960c2e..0000000
--- a/debian/README.source
+++ /dev/null
@@ -1,8 +0,0 @@
-Explanation for binary files inside source package according to
-  http://lists.debian.org/debian-devel/2013/09/msg00332.html
-
-Files: data/golub.RData
-  Data dump when processing vignettes/golub.R
-
- -- Andreas Tille <tille at debian.org>  Sat, 09 May 2015 08:52:18 +0200
-
diff --git a/debian/changelog b/debian/changelog
deleted file mode 100644
index e68db44..0000000
--- a/debian/changelog
+++ /dev/null
@@ -1,27 +0,0 @@
-r-bioc-multtest (2.32.0-1) unstable; urgency=medium
-
-  * New upstream version
-  * debhelper 10
-  * Standards-Version: 4.0.1 (no changes needed)
-
- -- Andreas Tille <tille at debian.org>  Thu, 24 Aug 2017 00:01:05 +0200
-
-r-bioc-multtest (2.30.0-1) unstable; urgency=medium
-
-  * New upstream version
-  * Convert to dh-r
-  * Generic BioConductor homepage
-
- -- Andreas Tille <tille at debian.org>  Wed, 26 Oct 2016 13:52:16 +0200
-
-r-bioc-multtest (2.26.0-1) unstable; urgency=medium
-
-  * New upstream version
-
- -- Andreas Tille <tille at debian.org>  Wed, 04 Nov 2015 16:16:09 +0100
-
-r-bioc-multtest (2.24.0-1) unstable; urgency=medium
-
-  * Initial release (closes: #785210).
-
- -- Andreas Tille <tille at debian.org>  Wed, 13 May 2015 15:20:31 +0200
diff --git a/debian/compat b/debian/compat
deleted file mode 100644
index f599e28..0000000
--- a/debian/compat
+++ /dev/null
@@ -1 +0,0 @@
-10
diff --git a/debian/control b/debian/control
deleted file mode 100644
index 2fa95e0..0000000
--- a/debian/control
+++ /dev/null
@@ -1,41 +0,0 @@
-Source: r-bioc-multtest
-Maintainer: Debian Med Packaging Team <debian-med-packaging at lists.alioth.debian.org>
-Uploaders: Andreas Tille <tille at debian.org>
-Section: gnu-r
-Priority: optional
-Build-Depends: debhelper (>= 10),
-               dh-r,
-               r-base-dev,
-               r-bioc-biobase,
-               r-cran-survival,
-               r-cran-mass
-Standards-Version: 4.0.1
-Vcs-Browser: https://anonscm.debian.org/viewvc/debian-med/trunk/packages/R/r-bioc-multtest/trunk/
-Vcs-Svn: svn://anonscm.debian.org/debian-med/trunk/packages/R/r-bioc-multtest/trunk/
-Homepage: https://bioconductor.org/packages//multtest/
-
-Package: r-bioc-multtest
-Architecture: any
-Depends: ${R:Depends},
-         ${misc:Depends},
-         ${shlibs:Depends},
-Recommends: ${R:Recommends}
-Suggests: ${R:Suggests}
-Description: Bioconductor resampling-based multiple hypothesis testing
- Non-parametric bootstrap and permutation resampling-based multiple
- testing procedures (including empirical Bayes methods) for controlling
- the family-wise error rate (FWER), generalized family-wise error rate
- (gFWER), tail probability of the proportion of false positives (TPPFP),
- and false discovery rate (FDR). Several choices of bootstrap-based null
- distribution are implemented (centered, centered and scaled,
- quantile-transformed). Single-step and step-wise methods are available.
- Tests based on a variety of t- and F-statistics (including t-statistics
- based on regression parameters from linear and survival models as well
- as those based on correlation parameters) are included. When probing
- hypotheses with t-statistics, users may also select a potentially faster
- null distribution which is multivariate normal with mean zero and
- variance covariance matrix derived from the vector influence function.
- Results are reported in terms of adjusted p-values, confidence regions
- and test statistic cutoffs. The procedures are directly applicable to
- identifying differentially expressed genes in DNA microarray
- experiments.
diff --git a/debian/copyright b/debian/copyright
deleted file mode 100644
index d9c8e40..0000000
--- a/debian/copyright
+++ /dev/null
@@ -1,27 +0,0 @@
-Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
-Upstream-Name: multtest
-Upstream-Contact: Katherine S. Pollard <kpollard at gladstone.ucsf.edu>
-Source: https://bioconductor.org/packages//multtest/
-
-Files: *
-Copyright: © 2006-2016 Katherine S. Pollard, Houston N. Gilbert, Yongchao Ge, Sandra Taylor, Sandrine Dudoit
-License: LGPL-3+
-
-
-Files: debian/*
-Copyright: 2015-2016 Andreas Tille <tille at debian.org>
-License: LGPL-3+
-
-License: LGPL-3+
- This program is free software: you can redistribute it and/or modify it
- under the terms of the GNU Lesser General Public License as published by
- the Free Software Foundation, either version 3 of the License, or (at
- your option) any later version.
- .
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- GNU Lesser General Public License for more details.
- .
- On Debian systems, the complete text of the GNU Lesser General Public
- License version 3 can be found in ‘/usr/share/common-licenses/LGPL-3’.
diff --git a/debian/rules b/debian/rules
deleted file mode 100755
index 927a515..0000000
--- a/debian/rules
+++ /dev/null
@@ -1,16 +0,0 @@
-#!/usr/bin/make -f
-
-debRreposname   := $(shell dpkg-parsechangelog | awk '/^Source:/ {print $$2}' | sed 's/r-\([a-z]\+\)-.*/\1/')
-awkString       := "'/^(Package|Bundle):/ {print $$2 }'"
-cranNameOrig    := $(shell awk "$(awkString)" DESCRIPTION)
-cranName        := $(shell echo "$(cranNameOrig)" | tr A-Z a-z)
-package         := r-$(debRreposname)-$(cranName)
-debRdir         := usr/lib/R/site-library
-debRlib         := $(CURDIR)/debian/$(package)/$(debRdir)
-
-%:
-	dh $@ --buildsystem R
-
-override_dh_fixperms:
-	dh_fixperms
-	chmod -x $(debRlib)/$(cranNameOrig)/otherDocs/*.Rnw
diff --git a/debian/source/format b/debian/source/format
deleted file mode 100644
index 163aaf8..0000000
--- a/debian/source/format
+++ /dev/null
@@ -1 +0,0 @@
-3.0 (quilt)
diff --git a/debian/upstream/metadata b/debian/upstream/metadata
deleted file mode 100644
index b550424..0000000
--- a/debian/upstream/metadata
+++ /dev/null
@@ -1,8 +0,0 @@
-Reference:
-  Author: Katherine S. Pollard and Sandrine Dudoit and Mark J. van der Laan
-  Title: "Multiple Testing Procedures: R multtest Package and Applications to Genomics, in Bioinformatics and Computational Biology Solutions Using R and Bioconductor"
-  Editor: R. Gentleman and V. Carey and W. Huber and R. Irizarry and S. Dudoit
-  ISSN: 1431-8776
-  Year: 2005
-  Pages: 251-272
-  URL: http://link.springer.com/chapter/10.1007%2F0-387-29362-0_15
diff --git a/debian/watch b/debian/watch
deleted file mode 100644
index 126c05a..0000000
--- a/debian/watch
+++ /dev/null
@@ -1,3 +0,0 @@
-version=4
-opts=downloadurlmangle=s?^(.*)\.\.?http:$1packages/release/bioc? \
- http://www.bioconductor.org/packages/release/bioc/html/multtest.html .*/multtest_([\d\.]+)\.tar\.gz
diff --git a/inst/CITATION b/inst/CITATION
new file mode 100644
index 0000000..42c4873
--- /dev/null
+++ b/inst/CITATION
@@ -0,0 +1,14 @@
+citHeader("To cite package multtest in publications use:")
+
+citEntry(
+ entry="Book",
+ title="Multiple Testing Procedures: R multtest Package and Applications to Genomics, in Bioinformatics and Computational Biology Solutions Using R and Bioconductor",
+ year="2005",
+ author=personList(as.person("Katherine S. Pollard"),
+	as.person("Sandrine Dudoit"),
+	as.person("Mark J. van der Laan")),
+ publisher="Springer",
+ textVersion="K.S Pollard, S. Dudoit, M.J. van der Laan (2005). Multiple Testing Procedures: R multtest Package and Applications to Genomics, in Bioinformatics and Computational Biology Solutions Using R and Bioconductor, R. Gentleman, V. Carey, W. Huber, R. Irizarry, S. Dudoit (Editors). Springer (Statistics for Biology and Health Series), pp. 251-272.")
+
+
+
diff --git a/inst/otherDocs/MTP.Rnw b/inst/otherDocs/MTP.Rnw
new file mode 100755
index 0000000..b6aa36a
--- /dev/null
+++ b/inst/otherDocs/MTP.Rnw
@@ -0,0 +1,908 @@
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+% \VignetteIndexEntry{Multiple Testing Procedures}
+% \VignetteKeywords{Expression Analysis}
+% \VignettePackage{multtest}
+
+\documentclass[11pt]{article}
+
+\usepackage{graphicx}    % standard LaTeX graphics tool
+\usepackage{Sweave}
+\usepackage{amsfonts}
+
+% these should probably go into a dedicated style file
+\newcommand{\Rpackage}[1]{\textit{#1}}
+\newcommand{\Robject}[1]{\texttt{#1}}
+\newcommand{\Rclass}[1]{\textit{#1}}
+
+%%%%%%%%%%%%%%%%%%%%%%%%%
+% Our added packages and definitions
+ 
+\usepackage{hyperref}
+\usepackage{amsmath}
+\usepackage{color}
+\usepackage{comment}
+\usepackage[authoryear,round]{natbib}
+
+\parindent 0in
+
+\definecolor{red}{rgb}{1, 0, 0}
+\definecolor{green}{rgb}{0, 1, 0}
+\definecolor{blue}{rgb}{0, 0, 1}
+\definecolor{myblue}{rgb}{0.25, 0, 0.75}
+\definecolor{myred}{rgb}{0.75, 0, 0}
+\definecolor{gray}{rgb}{0.5, 0.5, 0.5}
+\definecolor{purple}{rgb}{0.65, 0, 0.75}
+\definecolor{orange}{rgb}{1, 0.65, 0}
+
+\def\RR{\mbox{\it I\hskip -0.177em R}}
+\def\ZZ{\mbox{\it I\hskip -0.177em Z}}
+\def\NN{\mbox{\it I\hskip -0.177em N}}
+
+\newtheorem{theorem}{Theorem}
+\newtheorem{procedure}{Procedure}
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+\begin{document}
+
+\title{Multiple Testing Procedures} 
+\author{Katherine S. Pollard$^1$, Sandrine Dudoit$^2$, Mark J. van der Laan$^3$} 
+\maketitle
+
+\begin{center}
+1. Center for Biomolecular Science and Engineering, University of California, Santa Cruz, \url{ http://lowelab.ucsc.edu/katie/}\\
+2. Division of Biostatistics, University of California, Berkeley, \url{ http://www.stat.berkeley.edu/~sandrine/}\\
+3. Department of Statistics and Division of Biostatistics, University of California, Berkeley, \url{ http://www.stat.berkeley.edu/~laan/}\\
+\end{center}
+
+\tableofcontents
+
+\label{anal:mult:multtest}
+
+\section{Introduction}
+\label{anal:mult:s:intro}
+
+\subsection{Overview}
+The Bioconductor R package \Rpackage{multtest} implements widely applicable resampling-based single-step and stepwise multiple testing procedures (MTP) for controlling a broad class of Type I error rates, in testing problems involving general data generating distributions (with arbitrary dependence structures among variables), null hypotheses, and test statistics \cite{Dudoit&vdLaanMTBook,DudoitetalMT1SAGMB04,vdLaanetalMT2SAGMB04,vdLaanetalMT3SAGMB04,Pollard&vdLaanJSPI04}. 
+The current version of \Rpackage{multtest} provides MTPs for null hypotheses concerning means, differences in means, and regression parameters in linear and Cox proportional hazards models.
+Both bootstrap and permutation estimators of the test statistics ($t$- or $F$-statistics) null distribution are available. 
+Procedures are provided to control Type I error rates defined as tail probabilities and expected values of arbitrary functions of the numbers of Type I errors, $V_n$, and rejected hypotheses, $R_n$. 
+These error rates include: 
+the generalized family-wise error rate, $gFWER(k) = Pr(V_n > k)$, or chance of at least $(k+1)$ false positives (the special case $k=0$ corresponds to the usual family-wise error rate, FWER); 
+tail probabilities $TPPFP(q) = Pr(V_n/R_n > q)$ for the proportion of false positives among the rejected hypotheses;
+the false discovery rate, $FDR=E[V_n/R_n]$.
+Single-step and step-down common-cut-off (maxT) and common-quantile (minP) procedures, that take into account the joint distribution of the test statistics, are implemented to control the FWER. 
+In addition, augmentation procedures are provided to control the gFWER, TPPFP, and FDR, based on {\em any} initial FWER-controlling procedure.
+The results of a multiple testing procedure are summarized using rejection regions for the test statistics, confidence regions for the parameters of interest, and adjusted $p$-values.
+The modular design of the \Rpackage{multtest} package allows interested users to readily extend the package's functionality, by inserting additional functions for test statistics and testing procedures. 
+The S4 class/method object-oriented programming approach was adopted to summarize the results of a MTP.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+\subsection{Motivation}
+
+Current statistical inference problems in areas such as genomics, astronomy, and marketing routinely involve the simultaneous test of thousands, or even millions, of null hypotheses. 
+Examples of testing problems in genomics include: 
+\begin{itemize}
+\item
+the identification of differentially expressed genes in microarray experiments, i.e., genes whose expression measures are associated with possibly censored responses or covariates interest; 
+\item
+tests of association between gene expression measures and Gene Ontology (GO) annotation (\url{www.geneontology.org});
+\item
+the identification of transcription factor binding sites in ChIP-Chip experiments, where chromatin immunoprecipitation (ChIP) of transcription factor bound DNA is followed by microarray hybridization (Chip) of the IP-enriched DNA \cite{KelesetalTechRep147}; 
+\item
+the genetic mapping of complex traits using single nucleotide polymorphisms (SNP). 
+\end{itemize}
+The above testing problems share the following general characteristics: 
+\begin{itemize}
+\item
+inference for  high-dimensional multivariate distributions, with complex and unknown dependence structures among variables;
+\item
+broad range of parameters of interest, such as, regression coefficients in model relating patient survival to genome-wide transcript levels or DNA copy numbers, pairwise gene correlations between transcript levels; 
+\item
+many null hypotheses, in the thousands or even millions; 
+\item
+complex dependence structures among test statistics, e.g., Gene Ontology directed acyclic graph (DAG).
+\end{itemize}
+
+Motivated by these applications, we have developed resampling-based single-step and step-down multiple testing procedures (MTP) for controlling a broad class of Type I error rates, in testing problems involving general data generating distributions (with arbitrary dependence structures among variables), null hypotheses, and test statistics \cite{Dudoit&vdLaanMTBook,DudoitetalMT1SAGMB04,vdLaanetalMT2SAGMB04,vdLaanetalMT3SAGMB04,Pollard&vdLaanJSPI04}. 
+In particular, Dudoit et al. \cite{DudoitetalMT1SAGMB04} and Pollard \& van der Laan \cite{Pollard&vdLaanJSPI04} derive
+{\em single-step common-cut-off and common-quantile procedures} for controlling arbitrary parameters of the distribution of the number of Type I errors, such as the generalized family-wise error rate, $gFWER(k)$, or chance of at least $(k+1)$ false positives. 
+van der Laan et al. \cite{vdLaanetalMT2SAGMB04} focus on control of the family-wise error rate, $FWER = gFWER(0)$, and provide {\em step-down common-cut-off and common-quantile procedures}, based on maxima of test statistics (maxT) and minima of unadjusted $p$-values (minP), respectively. 
+Dudoit \& van der Laan \cite{Dudoit&vdLaanMTBook} and van der Laan et al. \cite{vdLaanetalMT3SAGMB04} propose a general class of {\em augmentation multiple testing procedures} (AMTP), obtained by adding suitably chosen null hypotheses to the set of null hypotheses already rejected by an initial MTP. In particular, given {\em any} FWER-controlling procedure, they show how one can trivially obtain 
+procedures controlling tail probabilities for the number (gFWER) and proportion (TPPFP) of false positives among the rejected hypotheses.
+ 
+A key feature of our proposed MTPs is the {\em test statistics null distribution} (rather than data generating null distribution) used to derive rejection regions (i.e., cut-offs) for the test statistics and resulting adjusted $p$-values \cite{Dudoit&vdLaanMTBook,DudoitetalMT1SAGMB04,vdLaanetalMT2SAGMB04,vdLaanetalMT3SAGMB04,Pollard&vdLaanJSPI04}. 
+For general null hypotheses, defined in terms of submodels for the data generating distribution, this null distribution is the asymptotic distribution of the vector of null value shifted and scaled test statistics. 
+Resampling procedures (e.g., based on the non-parametric or model-based bootstrap) are proposed to conveniently obtain consistent estimators of the null distribution and the resulting test statistic cut-offs and adjusted $p$-values \cite{DudoitetalMT1SAGMB04,vdLaanetalMT2SAGMB04,Pollard&vdLaanJSPI04}.
+
+The Bioconductor R package \Rpackage{multtest} provides software implementations of the above multiple testing procedures. 
+
+\subsection{Outline}
+
+The present vignette provides a summary of our proposed multiple testing procedures (\cite{Dudoit&vdLaanMTBook,DudoitetalMT1SAGMB04,vdLaanetalMT2SAGMB04,vdLaanetalMT3SAGMB04,Pollard&vdLaanJSPI04}. Section \ref{anal:mult:s:methods}), 
+discusses their software implementation in the Bioconductor R package \Rpackage{multtest} (Section \ref{anal:mult:s:software}). 
+The accompanying vignette (MTPALL) describes their application to the ALL dataset of Chiaretti et al. \cite{Chiarettietal04}.
+
+Specifically, given a multivariate dataset (stored as a \Rclass{matrix}, \Rclass{data.frame}, or microarray object of class \Rclass{ExpressionSet}) 
+and user-supplied choices for the test statistics, Type I error rate and its target level, resampling-based estimator of the test statistics null distribution, and procedure for error rate control, the main user-level function \Robject{MTP} returns unadjusted and adjusted $p$-values, cut-off vectors for the test statistics, and estimates and confidence regions for the parameters of interest. 
+Both bootstrap and permutation estimators of the test statistics null distribution are available and can optionally be output to the user. 
+The variety of models and hypotheses, test statistics, Type I error rates, and MTPs currently implemented are discussed in Section \ref{anal:mult:s:MTP}.
+The S4 class/method object-oriented programming approach was adopted to represent the results of a MTP. 
+Several methods are defined to produce numerical and graphical summaries of these results (Section \ref{anal:mult:s:summaries}).
+A modular programming approach, which utilizes function closures, allows interested users to readily extend the package's functionality, 
+by inserting functions for new test statistics and testing procedures (Section \ref{anal:mult:s:design}).
+Ongoing efforts are discussed in Section \ref{anal:mult:s:disc}.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+\section{Methods}
+\label{anal:mult:s:methods}
+
+\subsection{Multiple hypothesis testing framework}
+\label{anal:mult:s:framework}
+
+{\em Hypothesis testing} is concerned with using observed data to test hypotheses, i.e.,  make decisions, regarding properties of the unknown data generating distribution. 
+Below, we discuss in turn the main ingredients of a multiple testing problem, namely: data, null and alternative hypotheses, test statistics, multiple testing procedure (MTP) to define rejection regions for the test statistics, Type I and Type II errors, and adjusted $p$-values. 
+The crucial choice of a test statistics null distribution is addressed in Section \ref{anal:mult:s:nullDistn}. 
+Specific proposals of MTPs are given in Sections \ref{anal:mult:s:SS} -- \ref{anal:mult:s:AMTP}.\\
+
+\noindent
+{\bf Data.} Let $X_1,\ldots,X_n$ be a {\em random sample} of $n$ independent and identically distributed (i.i.d.) random variables, $X \sim P\in {\cal M}$, where the {\em data generating distribution} $P$ is known to be an element of a particular {\em statistical model} ${\cal M}$ (i.e., a set of possibly non-parametric distributions).\\
+
+\noindent
+{\bf Null and alternative hypotheses.} 
+In order to cover a broad class of testing problems, define $M$
+null hypotheses in terms of a collection of {\em submodels}, ${\cal
+  M}(m)\subseteq {\cal M}$,  $m=1,\ldots,M$, for the data generating
+distribution $P$. The $M$ {\em null hypotheses} are defined as
+$H_0(m) \equiv \mathrm{I}(P\in {\cal M}(m))$ and the corresponding {\em
+  alternative hypotheses} as $H_1(m) \equiv \mathrm{I}(P \notin {\cal M}(m))$.
+
+In many testing problems, the submodels concern {\em parameters}, i.e., functions of the data generating distribution $P$, $\Psi(P) = \psi= (\psi(m):m=1,\ldots,M)$, such as means, differences in means, correlations, and parameters in linear models, generalized linear models, survival models, time-series models, dose-response models, etc. One distinguishes between two types of testing problems: {\em one-sided tests}, where $H_0(m) = \mathrm{I}(\psi(m) \leq \psi_0(m))$, and {\em two-sided  [...]
+\psi_0(m))$.
+The hypothesized {\em null values}, $\psi_0(m)$, are frequently zero.
+
+ Let ${\cal H}_0={\cal H}_0(P)\equiv \{m:H_0(m)=1\} = \{m: P \in {\cal M}(m)\}$ be the set of $h_0 \equiv |{\cal H}_0|$ true null hypotheses, where we note that ${\cal H}_0$ depends on the data generating distribution $P$. Let ${\cal H}_1={\cal H}_1(P) \equiv {\cal H}_0^c(P) = \{m: H_1(m) = 1\} = \{m: P \notin {\cal M}(m)\}$
+be the set of  $h_1 \equiv |{\cal H}_1|  = M-h_0$ false null hypotheses, i.e., true positives.  
+The goal of a multiple testing
+  procedure is to accurately estimate the set ${\cal H}_0$, and thus its
+  complement ${\cal H}_1$, while controlling probabilistically the number
+  of false positives at a user-supplied level $\alpha$.\\
+
+\noindent
+{\bf Test statistics.} A testing procedure is a data-driven rule for deciding whether or not to {\em reject}  each of the $M$ null hypotheses $H_0(m)$, i.e., declare that $H_0(m)$ is false (zero) and hence $P \notin {\cal M}(m)$. 
+The decisions to reject or not the null hypotheses are based on an $M$--vector of
+{\em test statistics}, $T_n
+  =(T_n(m):m=1,\ldots,M)$, that are functions of the
+data, $X_1, \ldots, X_n$. Denote the typically unknown (finite sample) {\em joint distribution} of the test statistics $T_n$ by $Q_n=Q_n(P)$. 
+
+
+Single-parameter null hypotheses are commonly tested using {\em $t$-statistics}, i.e., standardized differences,
+\begin{equation}\label{anal:mult:e:tstat}
+T_n(m) \equiv \frac{\mbox{Estimator} - \mbox{Null value}}{\mbox{Standard error}} = \sqrt{n}\frac{\psi_n(m) - \psi_0(m)}{{\sigma_n(m)}}.
+\end{equation}
+In general, the $M$--vector $\psi_n = (\psi_n(m): m=1,\ldots, M)$ denotes an asymptotically linear {\em estimator} of the parameter $M$--vector $\psi = (\psi(m): m=1,\ldots,M)$ and $(\sigma_n(m)/\sqrt{n}:
+m=1,\ldots, M)$ denote consistent estimators of the {\em standard errors} of the components of $\psi_n$. 
+For tests of means, one recovers the usual one-sample and two-sample $t$-statistics, where the $\psi_n(m)$ and $\sigma_n(m)$ are based on sample means and variances, respectively.
+In some settings, it may be appropriate to use (unstandardized) {\em difference statistics}, $T_n(m) \equiv \sqrt{n}(\psi_n(m) - \psi_0(m))$ \cite{Pollard&vdLaanJSPI04}.
+Test statistics for other types of null hypotheses include $F$-statistics, $\chi^2$-statistics, and likelihood ratio statistics. \\
+
+
+\noindent
+{\bf Example: ALL microarray dataset.}
+Suppose that, as in the analysis of the ALL dataset of  Chiaretti et al. \cite{Chiarettietal04} (See accompanying vignette MTPALL), one is interested in identifying genes that are differentially expressed in two populations of  ALL cancer patients, those with normal cytogenetic test status and those with abnormal test. 
+The data consist of random $J$--vectors $X$, where the first $M$ entries of $X$ are microarray expression measures on $M$ genes of interest and the last entry, $X(J)$, is an indicator for cytogenetic test status (1 for normal, 0 for abnormal). 
+Then, the parameter of interest is an $M$--vector of differences in mean expression measures in the two populations, $\psi(m) = E[X(m) | X(J)=0] - E[X(m) | X(J)=1]$, $m=1,\ldots,M$. 
+To identify genes with higher mean expression measures in the abnormal compared to the normal cytogenetics subjects, one can test the one-sided null hypotheses $H_0(m) = \mathrm{I}(\psi(m) \leq 0)$ vs. the alternative hypotheses $H_1(m) = \mathrm{I}(\psi(m) > 0)$, using two-sample Welch $t$-statistics 
+\begin{equation}
+T_n(m) \equiv \frac{\bar{X}_{0,n_0}(m) - \bar{X}_{1,n_1}(m)}{\sqrt{\frac{\sigma_{0,n_0}^2(m)}{n_0} + \frac{\sigma_{1,n_1}^2(m)}{n_1}}},
+\end{equation}
+where $n_k$, $\bar{X}_{k,n_k}(m)$, and $\sigma_{k,n_k}^2(m)$ denote, respectively, the sample size, sample means, and sample variances, for patients with test status $k$, $k=0,\, 1$. The null hypotheses are rejected, i.e., the corresponding genes are declared differentially expressed, for large values of the test statistics $T_n(m)$.\\
+
+\noindent
+{\bf Multiple testing procedure.} A {\em multiple testing procedure} (MTP) provides {\em rejection regions}, ${\cal C}_n(m)$, i.e., sets of values for each test statistic $T_n(m)$ that lead to the decision to reject the null hypothesis $H_0(m)$. 
+In other words, a MTP produces a random (i.e., data-dependent) subset ${\cal R}_n$ of rejected hypotheses that estimates ${\cal H}_1$, the set of true positives,
+\begin{equation}
+{\cal R}_n={\cal R}(T_n, Q_{0n},\alpha) \equiv 
+\{m:\mbox{$H_0(m)$ is rejected}\} = \{m: T_n(m) \in {\cal C}_n(m)\},
+\end{equation}
+where ${\cal C}_n(m)={\cal C}(T_n,Q_{0n},\alpha)(m)$, $m=1,\ldots,M$, denote possibly random rejection regions. The long notation ${\cal R}(T_n, Q_{0n},\alpha)$ and ${\cal C}(T_n, Q_{0n},\alpha)(m)$ emphasizes that the MTP depends on:
+(i) the {\em data}, $X_1, \ldots, X_n$,
+ through the $M$--vector of {\em test statistics}, $T_n = (T_n(m): m=1,\ldots,
+ M)$;
+ (ii) a test statistics {\em null distribution}, $Q_{0n}$ (Section \ref{anal:mult:s:nullDistn}); and 
+(iii) the {\em nominal level} $\alpha$ of the MTP, i.e., the desired upper bound for a suitably defined false positive rate. 
+
+Unless specified otherwise, it is assumed that large values of the test statistic $T_n(m)$ provide evidence against the corresponding null hypothesis $H_0(m)$, that is, we consider rejection regions of the form ${\cal C}_n(m) = (c_n(m),\infty)$, where $c_n(m)$ are to-be-determined {\em cut-offs}, or {\em critical values}.\\ 
+
+\noindent
+{\bf Type I and Type II errors.} In any
+testing situation, two types of errors can be committed: a {\em false
+positive}, or {\em Type I error}, is committed by rejecting a true
+null hypothesis, and a {\em false negative}, or {\em Type
+II error}, is committed when the test procedure fails to reject a false null
+hypothesis. The situation can be summarized by Table \ref{anal:mult:t:TypeIandII}, below, where
+the number of Type I errors is $V_n \equiv \sum_{m \in {\cal H}_0} \mathrm{I}(T_n(m) \in {\cal C}_n(m)) = |{\cal R}_n \cap {\cal H}_0|$ and the number
+of Type II errors is $U_n \equiv \sum_{m \in {\cal H}_1} \mathrm{I}(T_n(m) \notin {\cal C}_n(m)) = |{\cal R}_n^c \cap {\cal H}_1|$. Note that both $U_n$
+and $V_n$ depend on the unknown data generating distribution $P$ through
+the unknown set of true null hypotheses ${\cal H}_0 = {\cal H}_0(P)$. The numbers $h_0=|{\cal H}_0|$ and $h_1 = |{\cal H}_1| = M-h_0$ of true and false null hypotheses are
+{\em unknown parameters}, the number of rejected hypotheses $R_n \equiv \sum_{m=1}^M  \mathrm{I}(T_n(m) \in {\cal C}_n(m)) = |{\cal R}_n|$ is an {\em observable random variable}, and the entries in the body of the table, $U_n$, $h_1 -
+U_n$, $V_n$, and $h_0-V_n$, are
+{\em unobservable random variables} (depending on $P$, through ${\cal H}_0(P)$). 
+\begin{table}[hhh]
+\caption{Type I and Type II errors in multiple hypothesis testing.}
+\label{anal:mult:t:TypeIandII}
+\begin{tabular}{ll|cc|l}
+\multicolumn{5}{c}{} \\
+\multicolumn{2}{c}{} & \multicolumn{2}{c}{Null hypotheses} & \multicolumn{1}{c}{}\\
+\multicolumn{2}{c}{} & \multicolumn{1}{c}{not rejected} & \multicolumn{1}{c}{rejected} & \multicolumn{1}{c}{} \\
+%%% \multicolumn{5}{c}{}\\
+\cline{3-4}
+&&&&\\
+& true & $| {\cal R}_n^c \cap {\cal H}_0 |$ &
+$V_n = | {\cal R}_n \cap {\cal H}_0 |$ &
+$h_0=| {\cal H}_0|$\\
+&&&(Type I errors)&\\
+Null hypotheses&&&&\\
+& false & $U_n = | {\cal R}_n^c \cap {\cal H}_1 |$ & $| {\cal R}_n \cap {\cal H}_1 |$ & $h_1=| {\cal H}_1
+|$\\
+&&(Type II errors)&&\\
+&&&&\\
+\cline{3-4}
+%%% \multicolumn{5}{c}{}\\
+\multicolumn{2}{c}{}& \multicolumn{1}{c}{$M-R_n$} &
+\multicolumn{1}{c}{ $R_n = | {\cal R}_n|$}
+&\multicolumn{1}{l}{$M$}\\
+\end{tabular}
+\end{table}
+
+Ideally, one would like to simultaneously minimize both the chances of committing a Type I error and a Type II error. Unfortunately, this is not feasible and one seeks a {\em trade-off} between the two types of errors. A standard approach is to specify an acceptable level $\alpha$ for the Type I error rate and derive testing procedures, i.e., rejection regions, that aim to minimize the Type II error rate, i.e., maximize {\em power}, within the class of tests with Type I error rate at mos [...]
+
+
+\noindent
+{\bf Type I error rates.}
+When testing multiple hypotheses, there are many possible definitions for the Type I error rate (and power). Accordingly, we adopt a general definition of Type I error rates, as parameters, $\theta_n = \theta(F_{V_n,R_n})$, of the joint distribution $F_{V_n,R_n}$ of the numbers of Type I errors $V_n$ and rejected hypotheses $R_n$. 
+Such a general representation covers the following commonly-used Type I error rates.
+\begin{enumerate}
+\item 
+{\em Generalized family-wise error rate} (gFWER), or 
+ probability of at least $(k+1)$ Type I errors, $k=0,\ldots, (h_0-1)$,
+\begin{equation}\label{anal:mult:e:gFWER}
+gFWER(k) \equiv Pr(V_n > k) = 1 - F_{V_n}(k).
+\end{equation}
+When $k=0$, the gFWER is the usual {\em family-wise error rate}, FWER, controlled by the classical Bonferroni procedure.
+\item
+{\em Per-comparison error rate} (PCER), or expected 
+proportion of Type I errors among the $M$ tests,
+\begin{equation}\label{anal:mult:e:PCER}
+PCER \equiv \frac{1}{M} E[V_n] = \frac{1}{M} \int v dF_{V_n}(v).
+\end{equation}
+\item
+{\em Tail probabilities for the proportion of false positives} (TPPFP) among the rejected hypotheses,
+\begin{equation}\label{anal:mult:e:TPPFP}
+TPPFP(q) \equiv Pr(V_n/R_n > q) = 1 - F_{V_n/R_n}(q), \qquad q \in (0,1),
+\end{equation}
+with the convention that $V_n/R_n \equiv 0$, if $R_n=0$.
+\item
+{\em False discovery rate} (FDR), or  expected value of the proportion of false positives among the rejected hypotheses, 
+\begin{equation}\label{anal:mult:e:FDR}
+FDR \equiv E[V_n/R_n] = \int q dF_{V_n/R_n}(q),
+\end{equation}
+again with the convention that $V_n/R_n \equiv 0$, if $R_n=0$ \cite{Benjamini&Hochberg95}. 
+\end{enumerate}
+Note that while the gFWER is a parameter of only the {\em marginal} distribution $F_{V_n}$ for the number of Type I errors $V_n$ (tail probability, or survivor function, for $V_n$), the TPPFP is a parameter of the {\em joint} distribution of $(V_n,R_n)$ (tail probability, or survivor function, for $V_n/R_n$). 
+ Error rates based on the {\em proportion} of false positives (e.g., TPPFP and FDR) are especially appealing for the large-scale testing problems encountered in genomics, compared to error rates based on the {\em number} of false positives (e.g., gFWER), as they do not increase exponentially with the number of hypotheses. 
+The above four error rates are part of the broad class of Type I error rates considered in Dudoit \& van der Laan \cite{Dudoit&vdLaanMTBook} and defined as tail probabilities $Pr(g(V_n,R_n) > q)$ and expected values $E[g(V_n,R_n)]$ for an arbitrary function $g(V_n,R_n)$ of the numbers of false positives $V_n$ and rejected hypotheses $R_n$. The gFWER and TPPFP correspond to the special cases $g(V_n,R_n) = V_n$ and $g(V_n,R_n) = V_n/R_n$, respectively.\\
+
+
+\noindent
+{\bf Adjusted $p$-values.} The notion of $p$-value extends directly to multiple testing problems, as follows. 
+Given a MTP, ${\cal R}_n = {\cal R}(T_n,Q_{0n}, \alpha)$, the {\em adjusted $p$-value}, $\widetilde{P}_{0n}(m) = \widetilde{P}(T_n,Q_{0n})(m)$, for null hypothesis $H_0(m)$, is defined as the smallest Type I error level $\alpha$ at which one would reject $H_0(m)$, that is,
+\begin{eqnarray}
+\widetilde{P}_{0n}(m) &\equiv& \inf \left \{ \alpha \in [0,1]: \mbox{Reject $H_0(m)$ at MTP level $\alpha$}\right \}\\
+&=& \inf\left \{\alpha \in [0,1]: m \in {\cal R}_n \right \}\nonumber \\
+&=& \inf\left \{\alpha \in [0,1]: T_n(m) \in {\cal C}_n(m) \right \}, \qquad m=1,\ldots, M.\nonumber
+\end{eqnarray}
+As in single hypothesis tests, the smaller the adjusted $p$-value, the stronger the evidence against the corresponding null hypothesis. The main difference between unadjusted (i.e., for the test of a single hypothesis) and adjusted $p$-values is that the latter are defined in terms of the Type I error rate for the {\em entire} testing procedure, i.e., take into account the multiplicity of tests.
+For example, the adjusted $p$-values for the classical Bonferroni procedure for FWER control are given by $\widetilde{P}_{0n}(m) = \min(M P_{0n}(m), 1)$, 
+where $P_{0n}(m)$ is the unadjusted $p$-value for the test of single hypothesis $H_0(m)$.
+
+We now have two representations for a MTP, in terms of rejection regions for the test statistics  and in terms of adjusted $p$-values 
+\begin{equation}
+{\cal R}_n = \{m: T_n(m) \in {\cal C}_n(m) \} = \{m: \widetilde{P}_{0n}(m) \leq \alpha\}.
+\end{equation}
+Again, as in the single hypothesis case, an
+advantage of reporting adjusted $p$-values, as opposed to only
+rejection or not of the hypotheses, is that the level $\alpha$ of the test does
+not need to be determined in advance, that is, results of the multiple
+testing procedure are provided for all $\alpha$. 
+ Adjusted $p$-values are convenient and flexible summaries of the strength of the evidence against each null hypothesis, in terms of the Type I error rate for the entire MTP (gFWER, TPPFP, FDR, or any other suitably defined error rate). \\
+
+\noindent
+{\bf Stepwise multiple testing procedures.} 
+One usually distinguishes between two main classes of multiple testing
+procedures, single-step and stepwise procedures.  
+ In {\em single-step procedures}, each null hypothesis is
+ evaluated using a rejection region that is  independent of the results of the tests of other hypotheses.
+Improvement in power, while preserving Type I error rate
+control, may be achieved by {\em stepwise procedures}, in which 
+rejection of a particular null hypothesis depends on the outcome of
+the tests of other hypotheses. 
+That is, the (single-step) test procedure is applied to a sequence of successively smaller nested random (i.e., data-dependent) subsets of null hypotheses, defined by the ordering of the test statistics (common cut-offs) or unadjusted $p$-values (common-quantile cut-offs).
+In {\em step-down procedures}, the hypotheses
+corresponding to the {\em most significant} test statistics (i.e., largest absolute test
+statistics or smallest unadjusted $p$-values) are considered successively, with further tests depending
+on the outcome of earlier ones.
+As soon as one fails to reject a null hypothesis, no further
+hypotheses are rejected. 
+In contrast, for {\em step-up procedures},
+the hypotheses corresponding to the {\em least significant} test
+statistics are considered successively, again with further tests
+depending on the outcome of earlier ones. As soon as one hypothesis
+is rejected, all remaining more significant hypotheses are rejected.\\
+
+
+
+\noindent
+{\bf Confidence regions.} 
+For the test of single-parameter null hypotheses and for any Type I error rate of the form $\theta(F_{V_n})$, Dudoit \& van der Laan \cite{Dudoit&vdLaanMTBook} and Pollard \& van der Laan \cite{Pollard&vdLaanJSPI04} provide results on the correspondence between single-step MTPs and $\theta$--specific {\em confidence regions}.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%
+\subsection{Test statistics null distribution}
+\label{anal:mult:s:nullDistn}
+
+\noindent
+{\bf Test statistics null distribution.}
+One of the main tasks in specifying a MTP is to derive rejection regions for the test statistics such that the Type I error rate is controlled at a desired level $\alpha$, i.e., such that $\theta(F_{V_n,R_n}) \leq \alpha$, for finite sample control, or $\limsup_n \theta(F_{V_n,R_n}) \leq \alpha$, for asymptotic control.
+However, one is immediately faced with the problem that the {\em true distribution} $Q_n=Q_n(P)$ of the test statistics $T_n$ is usually {\em unknown}, and hence, so are the distributions of the numbers of Type I errors, $V_n = \sum_{m \in {\cal H}_0} \mathrm{I}(T_n(m) \in {\cal C}_n(m))$, and rejected hypotheses, $R_n = \sum_{m=1}^M  \mathrm{I}(T_n(m) \in {\cal C}_n(m))$. 
+In practice, the test statistics {\em true distribution} $Q_n(P)$ is replaced by a {\em null distribution} $Q_0$ (or estimate thereof, $Q_{0n}$), in order to derive rejection regions, ${\cal C}(T_n,Q_0,\alpha)(m)$, and resulting adjusted $p$-values, $\widetilde{P}(T_n,Q_0)(m)$. 
+
+The choice of null distribution $Q_0$ is crucial, in order
+to ensure that (finite sample or asymptotic) control of the Type I
+error rate under the {\em assumed} null distribution $Q_0$ does indeed provide the required control under the {\em true} distribution $Q_n(P)$.
+For proper control, the null distribution $Q_0$ must be such that the Type I error rate under this assumed null distribution {\em dominates} the Type I error rate under the true distribution $Q_n(P)$. That is, one must have $\theta(F_{V_n,R_n}) \leq \theta(F_{V_0,R_0})$, for finite sample control, and $\limsup_n \theta(F_{V_n,R_n}) \leq  \theta(F_{V_0,R_0})$, for asymptotic control, where $V_0$ and $R_0$ denote, respectively, the numbers of Type I errors and rejected hypotheses under the [...]
+
+
+For error rates $\theta(F_{V_n})$, defined as arbitrary parameters of the distribution of the number of Type I errors $V_n$, we propose as null distribution the asymptotic distribution $Q_0$ of the vector of null value shifted and scaled test statistics \cite{Dudoit&vdLaanMTBook,DudoitetalMT1SAGMB04,vdLaanetalMT2SAGMB04,vdLaanetalMT3SAGMB04,Pollard&vdLaanJSPI04}:
+\begin{equation}
+Z_n(m) \equiv 
+ \sqrt{\min \left(1,
+  \frac{\tau_0(m)}{Var[T_n(m)]}\right)} \Bigl( T_n(m) + \lambda_0(m) - E[T_n(m)] \Bigr).
+\end{equation}
+For the test of single-parameter null hypotheses using $t$-statistics, the null values are $\lambda_0(m)=0$ and $\tau_0(m)=1$. For testing the equality of $K$ population means using $F$-statistics, the null values are  $\lambda_0(m)= 1$ and $\tau_0(m) = 2/(K-1)$, under the assumption of equal variances in the different populations.
+Dudoit et al. \cite{DudoitetalMT1SAGMB04} and van der Laan et al. \cite{vdLaanetalMT2SAGMB04} prove that this null distribution does indeed provide the desired asymptotic control of the Type I error rate $\theta(F_{V_n})$, for
+ general data generating distributions (with arbitrary dependence structures among variables), null hypotheses (defined in terms of submodels for the data generating distribution), and test statistics (e.g., $t$-statistics, $F$-statistics).
+
+For a broad class of testing problems, such as the test of single-parameter null hypotheses using $t$-statistics (as in Equation (\ref{anal:mult:e:tstat})), the null distribution $Q_0$ is an $M$--variate Gaussian distribution with mean vector zero and covariance matrix $\Sigma^*(P)$: $Q_0 = Q_0(P) \equiv N(0,\Sigma^*(P))$. 
+For tests of means, where the parameter of interest is the $M$--dimensional mean vector $\Psi(P) = \psi = E[X]$, the estimator $\psi_n$ is simply the $M$--vector of sample averages and $\Sigma^*(P)$ is the correlation matrix of $X \sim P$, $Cor[X]$. More generally, for an asymptotically linear estimator $\psi_n$, $\Sigma^*(P)$ is the correlation matrix of the vector influence curve (IC).
+
+Note that the following important points distinguish our approach from existing approaches to Type I error rate control. 
+Firstly, we are only concerned with Type I error control under the {\em true data generating distribution} $P$. The notions of weak and strong control (and associated subset pivotality, Westfall \& Young \cite{Westfall&Young93},
+p. 42--43) are therefore irrelevant to our approach. 
+Secondly, we propose a {\em null distribution for the test statistics} ($T_n \sim Q_0$), and not a data generating null distribution ($X \sim P_0\in \cap_{m=1}^M {\cal M}(m)$). 
+The latter practice does not necessarily provide proper Type I error control, as the test statistics' {\em assumed} null distribution $Q_n(P_0)$ and their {\em true} distribution $Q_n(P)$ may have different dependence structures (in the limit) for the true null hypotheses ${\cal H}_0$.\\
+
+
+\noindent
+{\bf Bootstrap estimation of the test statistics null distribution.}
+In practice, since the data generating distribution $P$ is unknown, then so is the proposed null distribution $Q_0=Q_0(P)$.  Resampling procedures, such as bootstrap Procedure \ref{anal:mult:proc:boot}, below, may be used to conveniently obtain consistent estimators $Q_{0n}$ of the null distribution $Q_0$ and of the resulting test statistic cut-offs and adjusted $p$-values. 
+
+Dudoit et al. \cite{DudoitetalMT1SAGMB04} and van der Laan et al. \cite{vdLaanetalMT2SAGMB04} show that single-step and step-down procedures based on consistent estimators of the null distribution $Q_0$ also provide asymptotic control of the Type I error rate. The reader is referred to these two articles and to Dudoit \& van der Laan \cite{Dudoit&vdLaanMTBook} for details on the choice of null distribution and various approaches for estimating this null distribution.
+
+Having selected a suitable test statistics null distribution, there remains the main task of specifying rejection regions for each null hypothesis, i.e., cut-offs for each test statistic. 
+Among the different approaches for defining rejection regions, we distinguish between single-step vs. stepwise procedures, and common cut-offs (i.e., the same cut-off $c_0$ is used for each test statistic) vs. common-quantile cut-offs (i.e., the cut-offs are the $\delta_0$--quantiles of the marginal null distributions of the test statistics). 
+The next three subsections discuss three main approaches for deriving rejection regions and corresponding adjusted $p$-values: single-step common-cut-off and common-quantile procedures for control of general Type I error rates $\theta(F_{V_n})$ (Section \ref{anal:mult:s:SS});  step-down  common-cut-off (maxT) and common-quantile (minP) procedures for control of the FWER (Section \ref{anal:mult:s:SD}); augmentation procedures for control of the gFWER and TPPFP, based on an initial FWER-co [...]
+
+\begin{center}
+\fbox{\parbox{4.5in}{%
+\begin{procedure}
+\label{anal:mult:proc:boot}
+{\bf [Bootstrap estimation of the null distribution $Q_0$]}
+\begin{enumerate} 
+\item
+ Let $P_n^{\star}$ denote an estimator of the data generating distribution
+$P$. For the {\em non-parametric bootstrap},  $P_n^{\star}$ is simply the
+empirical distribution $P_n$, that is, samples of size $n$ are drawn
+at random, with replacement from the observed data $X_1, \ldots, X_n$. For
+the {\em model-based bootstrap}, $P_n^{\star}$ is based on a model ${\cal
+  M}$ for the data generating distribution $P$, such
+as the family of $M$--variate Gaussian distributions.
+\item
+Generate $B$ bootstrap samples, each consisting of $n$ i.i.d. realizations of a random variable $X^{\#} \sim P_n^{\star}$. 
+\item
+For the $b$th bootstrap sample, $b=1,\ldots, B$, compute an $M$--vector of test statistics, $T_n^{\#}(\cdot,b) = (T_n^{\#}(m,b): m=1,\ldots,M)$.  Arrange these bootstrap statistics in an $M \times B$ matrix, $\mathbf{T}_n^{\#} = \bigl(T_n^{\#}(m,b)\bigr)$, with rows corresponding to the $M$ null hypotheses and columns to the $B$ bootstrap samples.
+\item
+Compute row means, $E[T_n{^\#}(m,\cdot)]$, and row variances, $Var[T_n{^\#}(m,\cdot)]$, of the matrix $\mathbf{T}_n^{\#}$, to yield estimates of the true means $E[T_n(m)]$ and variances $Var[T_n(m)]$ of the test statistics, respectively.
+\item
+Obtain an $M \times B$ matrix, $\mathbf{Z}_n^{\#} = \bigl(Z_n^{\#}(m,b)\bigr)$, of
+null value shifted and scaled bootstrap statistics $Z_n^{\#}(m,b)$, by row-shifting and scaling the matrix
+$\mathbf{T}_n^{\#}$ using the bootstrap estimates of $E[T_n(m)]$ and
+$Var[T_n(m)]$ and the user-supplied null values $\lambda_0(m)$ and
+$\tau_0(m)$. That is, compute 
+\begin{eqnarray}
+Z_n^{\#}(m,b) &\equiv&  \sqrt{\min \left(1,
+  \frac{\tau_0(m)}{Var[T_n{^\#}(m,\cdot)]}\right)}\\
+&& \qquad \times \ \Bigl( T_n^{\#}(m,b) + \lambda_0(m) - E[T_n{^\#}(m,\cdot)] \Bigr)  \nonumber .
+\end{eqnarray}
+\item
+The bootstrap
+estimate $Q_{0n}$ of the null distribution $Q_0$ is the empirical distribution of the $B$ columns $Z_n^{\#}(\cdot,b)$ of matrix $\mathbf{Z}_n^{\#}$.
+\end{enumerate}
+\end{procedure}
+}}
+\end{center}
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%
+\subsection{Single-step procedures for control of general Type I error rates $\theta(F_{V_n})$}
+\label{anal:mult:s:SS}
+
+
+Dudoit et al. \cite{DudoitetalMT1SAGMB04} and Pollard \& van der Laan \cite{Pollard&vdLaanJSPI04} propose single-step common-cut-off and common-quantile procedures for controlling arbitrary parameters $\theta(F_{V_n})$ of the distribution of the number of Type I errors. 
+The main idea is to substitute control of the parameter $\theta(F_{V_n})$, for the {\em  unknown, true distribution} $F_{V_n}$ of the number of Type I errors, by control of the corresponding parameter $\theta(F_{R_0})$, for the {\em known, null distribution} $F_{R_0}$ of the number of rejected hypotheses. 
+That is, consider single-step procedures of the form ${\cal R}_n \equiv \{m: T_n(m)> c_n(m) \}$, 
+where the cut-offs $c_n(m)$ are chosen so that $\theta(F_{R_0}) \leq
+\alpha$, for $R_0 \equiv \sum_{m=1}^M \mathrm{I}(Z(m) >  c_n(m))$
+and $Z \sim Q_0$.
+Among the class of MTPs that satisfy $\theta(F_{R_0}) \leq \alpha$, 
+Dudoit et al. \cite{DudoitetalMT1SAGMB04} and Pollard \& van der Laan \cite{Pollard&vdLaanJSPI04} propose two procedures, based on common cut-offs and common-quantile cut-offs, respectively. 
+The procedures are summarized below and the reader is referred to the articles for proofs and details on the derivation of cut-offs and adjusted $p$-values.\\
+
+\noindent
+{\bf Single-step common-cut-off procedure.} The set of rejected hypotheses for the {\em $\theta$--controlling single-step common-cut-off procedure} is of the form
+${\cal R}_n \equiv \{m: T_n(m)> c_0 \}$, where the common cut-off $c_0$ is the {\em smallest}  (i.e., least conservative) value for which $\theta(F_{R_0}) \leq \alpha$.
+
+For $gFWER(k)$ control (special case $\theta(F_{V_n}) = 1 - F_{V_n}(k)$), the procedure is based on the {\em $(k+1)$st ordered test statistic}.  
+Specifically, the adjusted $p$-values are given by
+\begin{equation}\label{anal:mult:e:SScut}
+\widetilde{p}_{0n}(m) = Pr_{Q_0} \left(Z^{\circ}(k+1) \geq t_n(m) \right),  \qquad m=1,\ldots, M,
+\end{equation}
+where $Z^{\circ}(m)$ denotes the $m$th ordered component of $Z = (Z(m): m=1,\ldots,M) \sim Q_0$, so that $Z^{\circ}(1) \geq \ldots \geq Z^{\circ}(M)$. 
+For FWER control ($k=0$), the procedure reduces to the  {\em single-step maxT procedure}, based on the {\em maximum test statistic}, $Z^{\circ}(1)$.\\
+
+\noindent
+{\bf Single-step common-quantile procedure.} The set of rejected hypotheses for the {\em $\theta$--controlling single-step common-quantile procedure} is of the form
+${\cal R}_n \equiv \{m: T_n(m)> c_0(m) \}$, where $c_0(m) = Q_{0,m}^{-1}(\delta_0)$ is the $\delta_0$--quantile of the marginal null distribution $Q_{0,m}$ of the $m$th test statistic, i.e., the smallest value $c$ such that $Q_{0,m}(c) = Pr_{Q_0}(Z(m) \leq c) \geq \delta_0$ for $Z \sim Q_0$. Here, $\delta_0$ is chosen as the {\em smallest} (i.e., least conservative) value for which $\theta(F_{R_0}) \leq \alpha$.
+
+For $gFWER(k)$ control, the procedure is based on the {\em $(k+1)$st ordered unadjusted $p$-value}. 
+Specifically, let $\bar{Q}_{0,m} \equiv 1 - Q_{0,m}$ denote the survivor functions for the marginal null distributions $Q_{0,m}$ and define unadjusted $p$-values $P_0(m) \equiv  \bar{Q}_{0,m}(Z(m))$ and $P_{0n}(m) \equiv  \bar{Q}_{0,m}(T_n(m))$, for $Z \sim Q_0$ and  $T_n \sim Q_n$, respectively. Then, the adjusted $p$-values for the common-quantile procedure are given by
+\begin{equation}\label{anal:mult:e:SSquant}
+\widetilde{p}_{0n}(m) = Pr_{Q_0} \left(P_0^{\circ}(k+1) \leq p_{0n}(m) \right),  \qquad m=1,\ldots, M,
+\end{equation}
+where $P_0^{\circ}(m)$ denotes the $m$th ordered component of the $M$--vector of unadjusted $p$-values $(P_0(m): m=1,\ldots,M)$, so that $P_0^{\circ}(1) \leq \ldots \leq P_0^{\circ}(M)$.  
+For FWER control ($k=0$), one recovers the {\em single-step minP procedure}, based on the {\em minimum unadjusted $p$-value}, $P_0^{\circ}(1)$.
+
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%
+\subsection{Step-down procedures for control of the family-wise error rate}
+\label{anal:mult:s:SD}
+
+van der Laan et al. \cite{vdLaanetalMT2SAGMB04} propose step-down common-cut-off (maxT) and common-quantile (minP) procedures for controlling the family-wise error rate, FWER. 
+These procedures are similar in spirit to their single-step counterparts in Section \ref{anal:mult:s:SS} (special case $\theta(F_{V_n}) = 1 - F_{V_n}(0)$), with the important step-down distinction that hypotheses are considered successively, from most significant to least significant, with further tests depending on the outcome of earlier ones. 
+That is, the test procedure is applied to a sequence of successively smaller nested random (i.e., data-dependent) subsets of null hypotheses, defined by the ordering of the test statistics (common cut-offs) or unadjusted $p$-values (common-quantile cut-offs). \\
+
+\noindent
+{\bf Step-down common-cut-off (maxT) procedure.}
+Rather than being based solely on the distribution of the maximum test statistic over all $M$ hypotheses, the step-down common cut-offs and corresponding adjusted $p$-values are based on the distributions of maxima of test statistics over successively smaller nested random subsets of null hypotheses. 
+Specifically, let $O_n(m)$ denote the indices for the ordered test statistics $T_n(m)$, so that $T_n(O_n(1)) \geq \ldots \geq T_n(O_n(M))$. 
+The step-down common-cut-off procedure is then based on the distributions of maxima of test statistics over the nested subsets of ordered hypotheses $\overline{\cal O}_n(h) \equiv \{O_n(h),\ldots,O_n(M)\}$. 
+The adjusted $p$-values for the {\em step-down maxT procedure} are given by 
+\begin{equation}\label{anal:mult:e:SDmaxT}
+\widetilde{p}_{0n}(o_n(m)) =  \max_{h=1,\ldots, m}\ \left\{ Pr_{Q_0}\left(
+  \max_{l \in \overline{\cal o}_n(h)} Z(l) \geq t_n(o_n(h))\right)
+  \right \},
+\end{equation}
+where $Z=(Z(m): m=1,\ldots, M)  \sim Q_0$. 
+Taking maxima of the probabilities over $h \in \{1, \ldots, m\}$ enforces monotonicity of the adjusted $p$-values and ensures that the procedure is indeed step-down, that is, one can only reject a particular hypothesis provided all hypotheses with
+more significant (i.e., larger) test statistics were rejected beforehand.\\
+
+\noindent
+{\bf Step-down common-quantile (minP) procedure.}
+Likewise, the step-down common-quantile cut-offs and corresponding adjusted $p$-values are based on the distributions of minima of unadjusted $p$-values over successively smaller nested random subsets of null hypotheses.
+Specifically, let $O_n(m)$ denote the indices for the ordered unadjusted $p$-values $P_{0n}(m)$, so that $P_{0n}(O_n(1)) \leq \ldots \leq P_{0n}(O_n(M))$. 
+The step-down common-quantile procedure is then based on the distributions of minima of unadjusted $p$-values over the nested subsets of ordered hypotheses $\overline{\cal O}_n(h) \equiv \{O_n(h),\ldots,O_n(M)\}$. 
+The adjusted $p$-values for the {\em step-down minP procedure} are given by
+\begin{equation}\label{anal:mult:e:SDminP}
+\widetilde{p}_{0n}(o_n(m)) = \max_{h=1,\ldots, m}\ \left\{ Pr_{Q_0}\left(
+  \min_{l \in \overline{\cal o}_n(h)} P_0(l) \leq p_{0n}(o_n(h))\right)
+  \right \},
+\end{equation}
+where $P_0(m) = \bar{Q}_{0,m}(Z(m))$ and $Z=(Z(m): m=1,\ldots, M)  \sim Q_0$. 
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%
+\subsection{Augmentation multiple testing procedures}
+\label{anal:mult:s:AMTP}
+
+Dudoit \& van der Laan \cite{Dudoit&vdLaanMTBook} and van der Laan et al. \cite{vdLaanetalMT3SAGMB04} discuss {\em augmentation multiple testing procedures} (AMTP), obtained by adding suitably chosen null hypotheses to the set of null hypotheses already rejected by an initial MTP. 
+Specifically, given {\em any} initial procedure controlling the generalized family-wise error rate, augmentation procedures are derived for controlling Type I error rates defined as tail probabilities and expected values for arbitrary functions $g(V_n,R_n)$ of the numbers of Type I errors and rejected hypotheses (e.g., proportion $g(V_n,R_n)=V_n/R_n$ of false positives among the rejected hypotheses). 
+Adjusted $p$-values for the AMTP are shown to be simply shifted versions of the adjusted $p$-values of the original MTP. 
+The important practical implication of these results is that {\em any} FWER-controlling MTP and its
+corresponding adjusted $p$-values, provide, without additional work, multiple testing procedures controlling a broad class of Type I error rates and their adjusted $p$-values.
+One can therefore build on the large pool of available FWER-controlling procedures, such as the single-step and step-down maxT and minP procedures discussed in Sections \ref{anal:mult:s:SS} and \ref{anal:mult:s:SD}, above. 
+
+Augmentation procedures for controlling tail probabilities of the number (gFWER) and proportion (TPPFP) of false positives, based on an initial FWER-controlling procedure, are treated in detail in van der Laan et al. \cite{vdLaanetalMT3SAGMB04} and are summarized below. The gFWER and TPPFP correspond to the special cases $g(V_n,R_n) = V_n$ and  $g(V_n,R_n) = V_n/R_n$, respectively. 
+Denote the adjusted $p$-values for the initial FWER-controlling procedure by $\widetilde{P}_{0n}(m)$. Order the $M$ null hypotheses according to these $p$-values, from smallest to largest, that is, define indices $O_n(m)$, so that $\widetilde{P}_{0n}(O_n(1))\leq \ldots \leq \widetilde{P}_{0n}(O_n(M))$. Then, for a nominal level $\alpha$ test, the initial FWER-controlling procedure rejects the $R_n$ null hypotheses 
+\begin{equation}
+{\cal R}_n \equiv \{m: \widetilde{P}_{0n}(m) \leq \alpha\}.
+\end{equation}
+
+\noindent
+{\bf Augmentation procedure for controlling the gFWER.} For control of $gFWER(k)$ at level $\alpha$, given an initial FWER-controlling procedure, reject the $R_n$ hypotheses specified by this MTP, as well as the next $A_n = \min\{k, M-R_n\}$ most significant null hypotheses. 
+The adjusted $p$-values $\widetilde{P}_{0n}^{+}(O_n(m))$ for the new gFWER-controlling AMTP are simply $k$--shifted versions of the adjusted $p$-values of the initial FWER-controlling MTP:
+\begin{equation}\label{anal:mult:e:adjpgFWER}
+\widetilde{P}_{0n}^{+}(O_n(m)) =
+\begin{cases}
+0, & \text{if $m=1,\ldots,k$},\\
+\widetilde{P}_{0n}(O_n(m-k)), & \text{if $m=k+1, \ldots, M$}.
+\end{cases}
+\end{equation}
+That is, the first $k$ adjusted $p$-values are set to zero and the remaining $p$-values are the adjusted $p$-values of the FWER-controlling MTP shifted by $k$. The AMTP thus guarantees at least $k$ rejected hypotheses.\\
+
+
+\noindent
+{\bf Augmentation procedure for controlling the TPPFP.} For control of $TPPFP(q)$ at level $\alpha$, given an initial FWER-controlling procedure, reject the $R_n$ hypotheses specified by this MTP, as well as the next $A_n$ most significant null hypotheses, 
+\begin{eqnarray}
+\label{anal:mult:e:augTPPFP}
+A_n &=& \max\left\{m \in \{0,\ldots, M - R_n\}:\frac{m}{m+ R_n}\leq q\right\} \nonumber\\
+&=& \min \left\{ \left \lfloor \frac{q R_n}{1-q} \right \rfloor, M-R_n \right\},
+\end{eqnarray}
+where the {\em floor} $\lfloor x \rfloor$ denotes the greatest integer less than or equal to $x$, i.e., $\lfloor x \rfloor \leq x < \lfloor x \rfloor + 1$. That is, keep rejecting null hypotheses until the ratio of additional rejections to the total number of rejections reaches the allowed proportion $q$ of false positives. 
+The adjusted $p$-values $\widetilde{P}_{0n}^{+}(O_n(m))$ for the new TPPFP-controlling AMTP are simply shifted versions of the adjusted $p$-values of the initial FWER-controlling MTP, that is,
+\begin{equation}\label{anal:mult:e:adjpTPPFP}
+\widetilde{P}_{0n}^{+}(O_n(m)) = \widetilde{P}_{0n}(O_n(\lceil(1-q)m\rceil)), \qquad m=1,\ldots,M,
+\end{equation}
+where the {\em ceiling} $\lceil x \rceil$ denotes the least integer greater than or equal to $x$, i.e., $\lceil x \rceil -1 < x \leq \lceil x \rceil$. \\
+
+
+\noindent
+{\bf FDR-controlling procedures.}
+Given any TPPFP-controlling procedure, van der Laan et al. \cite{vdLaanetalMT3SAGMB04} derive two simple (conservative) FDR-controlling procedures. 
+The more general and conservative procedure controls the FDR at nominal level $\alpha$, by controlling $TPPFP(\alpha/2)$ at level $\alpha/2$. 
+The less conservative procedure controls the FDR at nominal level $\alpha$, by controlling $TPPFP(1 - \sqrt{1-\alpha})$ at level $1 - \sqrt{1-\alpha}$.
+In what follows, we refer to these two MTPs as "conservative" and "restricted", respectively.
+The reader is referred to the original article for details and proofs of FDR control (Section 2.4, Theorem 3).
+ 
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+\section{Software implementation: \Rpackage{multtest} package}
+\label{anal:mult:s:software}
+
+\subsection{Overview}
+
+The MTPs proposed in Sections \ref{anal:mult:s:SS} -- \ref{anal:mult:s:AMTP} are implemented in the latest version of the Bioconductor R package \Rpackage{multtest} (version 1.5.0, Bioconductor release 1.5). 
+New features include: 
+expanded class of tests (e.g., for regression parameters in linear models and in Cox proportional hazards models);
+control of a wider selection of Type I error rates (e.g., gFWER, TPPFP, FDR); 
+bootstrap estimation of the test statistics null distribution; 
+augmentation multiple testing procedures;  
+confidence regions for the parameter vector of interest.
+Because of their general applicability and novelty, we focus in this section on MTPs that utilize a bootstrap estimated test statistics null distribution and that are available through the package's main user-level function: \Robject{MTP}.
+Note that for many testing problems, MTPs based on permutation (rather than bootstrap) estimated null distributions are also available in the present and earlier versions of \Rpackage{multtest}.
+In particular, permutation-based step-down maxT and minP FWER-controlling MTPs are implemented in the functions \Robject{mt.maxT} and \Robject{mt.minP}, respectively, and can also be applied directly through a call to the \Robject{MTP} function.
+
+We stress that {\em all} the bootstrap-based MTPs implemented in \Rpackage{multtest} can be performed using the main user-level function \Robject{MTP}. 
+Most users will therefore only need to be familiar with this function. 
+Other functions are provided primarily for the benefit of more advanced users, interested in extending the package's functionality (Section \ref{anal:mult:s:design}).
+For greater detail on \Rpackage{multtest} functions, the reader is referred to the package documentation, in the form of help files, e.g., \Robject{? MTP}, and vignettes, e.g., \Robject{openVignette("multtest")}. 
+
+One needs to specify the following main ingredients when applying a MTP: 
+the {\em data}, $X_1, \ldots, X_n$; 
+suitably defined {\em test statistics}, $T_n$, for each of the null hypotheses under consideration (e.g., one-sample $t$-statistics, robust rank-based $F$-statistics, $t$-statistics for regression coefficients in Cox proportional hazards model); 
+a choice of {\em Type I error rate}, $\theta(F_{V_n,R_n})$, providing an appropriate measure of false positives for the particular testing problem (e.g., $TPPFP(0.10)$);
+a proper {\em joint null distribution}, $Q_0$ (or estimate thereof, $Q_{0n})$, for the test statistics (e.g., bootstrap null distribution as in Procedure \ref{anal:mult:proc:boot}); 
+given the previously defined components, a {\em multiple testing procedure}, ${\cal R}_n={\cal R}(T_n, Q_{0n},\alpha)$, for controlling the error rate $\theta(F_{V_n,R_n})$ at a target level $\alpha$.
+Accordingly, the \Rpackage{multtest} package has adopted a modular and extensible approach to the implementation of MTPs, with the following four main types of functions.
+\begin{itemize}
+
+\item 
+Functions for computing the {\em test statistics}, $T_n$. These are internal functions (e.g., \Robject{meanX}, \Robject{coxY}), i.e., functions that are generally not called directly by the user. 
+As shown in Section \ref{anal:mult:s:MTP}, below, the type of test statistic is specified by the \Robject{test} argument of the main user-level function \Robject{MTP}.  
+Advanced users, interested in extending the class of tests available in \Rpackage{multtest}, can simply add their own test statistic functions to the existing library of such internal functions (see Section \ref{anal:mult:s:design}, below, for a brief discussion of the closure approach for specifying test statistics).
+
+\item
+Functions for obtaining the {\em test statistics null distribution}, $Q_0$, or an estimate thereof, $Q_{0n}$.  The main function currently available is the internal function \Robject{boot.resample}, implementing the non-parametric version of bootstrap Procedure \ref{anal:mult:proc:boot} (Section \ref{anal:mult:s:nullDistn}). 
+
+\item
+Functions for implementing the {\em multiple testing procedure}, ${\cal R}(T_n, Q_{0n},\alpha)$, i.e., for deriving rejection regions, confidence regions, and adjusted $p$-values. 
+The main function is the  user-level wrapper function \Robject{MTP}, which implements the single-step and step-down maxT and minP procedures for FWER control (Sections \ref{anal:mult:s:SS} and \ref{anal:mult:s:SD}). 
+The functions \Robject{fwer2gfwer}, \Robject{fwer2tppfp}, and \Robject{fwer2fdr} implement, respectively, gFWER-, TPPFP-, and FDR-controlling augmentation multiple testing procedures, based on adjusted $p$-values from {\em any} FWER-controlling procedure, and can be called via the \Robject{typeone} argument to \Robject{MTP} (Section \ref{anal:mult:s:AMTP}). 
+
+\item
+Functions for {\em numerical and graphical summaries} of a MTP. As described in Section \ref{anal:mult:s:summaries}, below, a number of summary methods are available to operate on objects of class \Rclass{MTP}, output from the main \Robject{MTP} function.
+\end{itemize}
+
+%%%%%%%%%%%%%%%%%%%%%%%%%
+\subsection{Resampling-based multiple testing procedures: \Robject{MTP} function}
+\label{anal:mult:s:MTP}
+
+The main user-level function for resampling-based multiple testing is \Robject{MTP}. Its input/output and usage are described next. 
+
+<<loadPacks, eval=TRUE, echo=TRUE>>=
+library(Biobase)
+library(multtest)
+@
+
+<<argsMTP, eval=TRUE, echo=TRUE>>=
+args(MTP)
+@
+
+\noindent
+{\bf  INPUT.}
+\begin{description}
+
+\item{\em Data.} 
+The data, \Robject{X}, consist of a $J$--dimensional random vector, observed on each of $n$ sampling units (patients, cell lines, mice, etc). 
+These data can be stored in a $J \times n$ \Rclass{matrix}, \Rclass{data.frame}, or \Rclass{exprs} slot of an object of class \Rclass{ExpressionSet}.
+In some settings,  a $J$--vector of weights may be associated with each observation, and stored in a $J \times n$ weight matrix, \Robject{W} (or an $n$--vector \Robject{W}, if the weights are the same for each of the $J$ variables). 
+One may also observe a possibly censored continuous or polychotomous outcome, \Robject{Y}, for each sampling unit, as obtained, for example, from the \Rclass{phenoData} slot of an object of class \Rclass{ExpressionSet}. 
+In some studies, $L$ additional covariates may be measured on each sampling unit and stored in \Robject{Z}, an $n \times L$ \Rclass{matrix} or \Rclass{data.frame}. 
+When the tests concern parameters in regression models with covariates from \Robject{Z} (e.g., values \Robject{lm.XvsZ}, \Robject{lm.YvsXZ}, and \Robject{coxph.YvsXZ}, for the argument \Robject{test}, described below), the arguments \Robject{Z.incl} and \Robject{Z.test} specify, respectively, which covariates (i.e., which columns of \Robject{Z}, including \Robject{Z.test}) should be included in the model and which regression parameter is to be tested (only when \texttt{test="lm.XvsZ"}). 
+The covariates can be specified either by a numeric column index or character string.
+If \Robject{X} is an instance of the class \Rclass{ExpressionSet}, \Robject{Y} can be a column index or character string referring to the variable in the \Rclass{data.frame} \Robject{pData(X)} to use as outcome. 
+Likewise, \Robject{Z.incl} and \Robject{Z.test} can be column indices or character strings referring to the variables in \Robject{pData(X)} to use as covariates.
+The data components (\Robject{X}, \Robject{W}, \Robject{Y}, \Robject{Z}, \Robject{Z.incl}, and \Robject{Z.test}) are the first six arguments to the \Robject{MTP} function. 
+Only \Robject{X} is a required argument; the others are by default \Robject{NULL}.
+The argument \Robject{na.rm} allows one to control the treatment of "Not Available" or \Robject{NA} values. It is set to \Robject{TRUE}, by default, so that an
+observation with a missing value in any of the data objects' $j$th component ($j=1,\ldots,J$) is excluded from computation of any of the relevant test statistics.
+
+
+\item{\em Test statistics.} 
+
+The test statistics should be chosen based on the parameter of interest (e.g., location, scale, or regression parameters) and the hypotheses one wishes to test. In the current implementation of \Rpackage{multtest}, the following test statistics are available through the argument \Robject{test}, with default value \Robject{t.twosamp.unequalvar}, for the two-sample Welch $t$-statistic. 
+\begin{itemize}
+\item 
+\Robject{t.onesamp}: One-sample $t$-statistic for tests of means.
+\item 
+\Robject{t.twosamp.equalvar}: Equal variance two-sample $t$-statistic for tests of differences in means.
+\item 
+\Robject{t.twosamp.unequalvar}: Unequal variance two-sample $t$-statistic for tests of differences in means (also known as two-sample Welch $t$-statistic). 
+\item 
+\Robject{t.pair}: Two-sample paired $t$-statistic for tests of differences in means.
+\item 
+\Robject{f}: Multi-sample $F$-statistic for tests of equality of population means.
+\item 
+\Robject{f.block}: Multi-sample $F$-statistic for tests of equality of population means in a block design.
+\item 
+
+\Robject{lm.XvsZ}: 
+$t$-statistic for tests of regression coefficients for variable \Robject{Z.test} in linear models each with outcome \Robject{X[j,]} ($j=1,\ldots,J$), and possibly additional covariates \Robject{Z.incl} from the \Rclass{matrix} \Robject{Z} (in the case of no covariates, one recovers the one-sample $t$-statistic, \Robject{t.onesamp}).
+\item 
+\Robject{lm.YvsXZ}: 
+$t$-statistic for tests of regression coefficients in linear models with outcome \Robject{Y} and each \Robject{X[j,]} ($j=1,\ldots,J$) as covariate of interest, with possibly other covariates \Robject{Z.incl} from the \Rclass{matrix} \Robject{Z}.
+\item 
+\Robject{coxph.YvsXZ}: $t$-statistic for tests of regression coefficients in Cox proportional hazards survival models with outcome \Robject{Y} and each \Robject{X[j,]} ($j=1,\ldots,J$) as covariate of interest, with possibly other covariates \Robject{Z.incl} from the \Rclass{matrix} \Robject{Z}.
+\end{itemize}
+
+
+{\em Robust}, {\em rank-based} versions of the above test statistics can be specified by setting the argument \Robject{robust} to \Robject{TRUE} (the default value is \Robject{FALSE}). 
+Consideration should be given to whether {\em standardized} (Equation (\ref{anal:mult:e:tstat})) or {\em unstandardized} difference statistics are most appropriate (see Pollard \& van der Laan \cite{Pollard&vdLaanJSPI04} for a comparison). Both options are available through the argument \Robject{standardize}, by default \Robject{TRUE}. 
+The type of alternative hypotheses is specified via the \Robject{alternative} argument: default value of \Robject{two.sided}, for two-sided test, and values of \Robject{less} or \Robject{greater}, for one-sided tests. 
+The (common) null value for the parameters of interest is specified through the \Robject{psi0} argument, by default zero.  
+
+
+\item{\em Type I error rate.} 
+The \Robject{MTP} function controls by default the family-wise error rate (FWER), or chance of at least one false positive (argument \Robject{typeone="fwer"}). 
+Augmentation procedures (Section \ref{anal:mult:s:AMTP}), controlling other Type I error rates such as the gFWER, TPPFP, and FDR, can be specified through the argument \Robject{typeone}.
+Related arguments include \Robject{k} and \Robject{q}, for the allowed number and proportion of false positives for control of $gFWER(k)$ and $TPPFP(q)$, respectively, and \Robject{fdr.method}, for the type of TPPFP-based FDR-controlling procedure (i.e., \Robject{"conservative"} or \Robject{"restricted"} methods).
+The nominal level of the test is determined by the argument \Robject{alpha}, by default 0.05. 
+Testing can be performed for a range of nominal Type I error rates by specifying a vector of levels \Robject{alpha}. 
+
+
+\item{\em Test statistics null distribution.} 
+In the current implementation of \Robject{MTP}, the test statistics null distribution is estimated by default using the non-parametric version of bootstrap Procedure~\ref{anal:mult:proc:boot} (argument \Robject{nulldist="boot"}). 
+The bootstrap procedure is implemented in the internal function \Robject{boot.resample}, which calls C to compute test statistics for each bootstrap sample.
+The values of the shift ($\lambda_0$) and scale ($\tau_0$) parameters are determined by the type of test statistics (e.g., $\lambda_0=0$ and $\tau_0=1$ for $t$-statistics). When \Robject{csnull=TRUE} (default), these values will be used to center and scale the estimated test statistics distribution, producing a null distribution. One may specify \Robject{csnull=FALSE} to compute a non-null test statistics distribution.
+Permutation null distributions are also available via \Robject{nulldist="perm"}.
+The number of resampling steps is specified by the argument \Robject{B}, by default 1,000. 
+Since the upper tail of a the bootstrap distribution may be difficult to estimate, particularly for small values of \Robject{B}, a kernal density estimator may be used for the tail of the distribution by setting \Robject{smooth.null=TRUE} (default is FALSE). 
+
+\item{\em Multiple testing procedures.} 
+Several methods for controlling the chosen Type I error rate are available in \Rpackage{multtest}. 
+\begin{itemize}
+\item
+{\em FWER-controlling procedures.}
+For FWER control, the \Robject{MTP} function implements the single-step and step-down (common-cut-off) maxT and (common-quantile) minP MTPs, described in Sections~\ref{anal:mult:s:SS} and \ref{anal:mult:s:SD}, and specified through the argument \Robject{method} (internal functions \Robject{ss.maxT}, \Robject{ss.minP}, \Robject{sd.maxT}, and \Robject{sd.minP}).
+The default MTP is the single-step maxT procedure (\Robject{method="ss.maxT"}), since it requires the least computation.
+\item 
+{\em gFWER-, TPPFP-, and FDR-controlling augmentation procedures.} 
+As discussed in Section \ref{anal:mult:s:AMTP}, any FWER-controlling MTP can be trivially augmented to control additional Type I error rates, such as the gFWER and TPPFP.
+Two FDR-controlling procedures can then be derived from the TPPFP-controlling AMTP.
+The AMTPs are implemented in the functions \Robject{fwer2gfwer}, \Robject{fwer2tppfp}, and \Robject{fwer2fdr}, that take FWER adjusted $p$-values as input and return augmentation adjusted $p$-values for control of the gFWER, TPPFP, and FDR, respectively. 
+Note that the aforementioned AMTPs can be applied directly via the \Robject{typeone} argument of the main function \Robject{MTP}.
+\end{itemize}
+
+\item{\em Parallel processing.}
+MTP can be run on a computer cluster with multiple nodes. This functionality requires the package \Rpackage{snow}. In addition, the packages \Rpackage{multtest} and \Rpackage{Biobase} must be
+installed on each node. \Robject{MTP} will load these packages as long as they are in the library
+search path. Else the user must load the packages on each node. When \Robject{cluster=1}, computations are performed on a single CPU. To implement bootstrapping in parallel, the user either sets \Robject{cluster} equal to a cluster object created using the function \Robject{makeCluster} 
+in \Rpackage{snow} or specifies the integer number of nodes to use in a cluster. For the latter 
+approach, \Robject{MTP} creates a cluster object with the specified number of nodes for the user. 
+In this case, the type of interface system to use must be specified in the \Robject{type} argument. 
+MPI and PVM interfaces require the packages \Rpackage{Rmpi} and \Rpackage{rpvm}, respectively. The number or percentage of bootstrap iterations to dispatch at one time to each node is specified 
+with the \Robject{dispatch} argument (default is 5\%).
+
+The following example illustrates how to load the \Rpackage{snow} package, make a cluster consisting 
+of two nodes, and load \Rpackage{Biobase} and \Rpackage{multtest} onto each node of the 
+cluster using \Robject{clusterEvalQ}. The object \Robject{cl} can be passed to \Robject{MTP} via
+the \Robject{cluster} argument. 
+
+<<snow, eval=FALSE, echo=TRUE>>=
+library(snow)
+cl <- makeCluster(2, "MPI")
+clusterEvalQ(cl, {library(Biobase); library(multtest)})
+@
+
+\item{\em Output control.} 
+Various arguments are available to control output, i.e., specify which combination of the following quantities should be returned: 
+confidence regions (argument \Robject{get.cr}); 
+cut-offs for the test statistics (argument \Robject{get.cutoff}); 
+adjusted $p$-values (argument \Robject{get.adjp}); 
+test statistics null distribution  (argument \Robject{keep.nulldist}). 
+Note that parameter estimates and confidence regions only apply to the test of single-parameter null hypotheses (i.e., not the $F$-tests). 
+In addition, in the current implementation of \Robject{MTP}, parameter confidence regions and test statistic cut-offs are only provided when \texttt{typeone="fwer"}, so that \Robject{get.cr} and \Robject{get.cutoff} should be set to \Robject{FALSE} when using the error rates gFWER, TPPFP, or FDR.
+
+
+\end{description}
+
+Note that the \Rpackage{multtest} package also provides several simple, marginal FWER-controlling MTPs, such as the Bonferroni, Holm \cite{Holm79}, Hochberg \cite{Hochberg88}, and \v{S}id\'{a}k \cite{Sidak67} procedures, and FDR-controlling MTPs, such as the Benjamini \& Hochberg \cite{Benjamini&Hochberg95} and Benjamini \& Yekutieli \cite{Benjamini&Yekutieli01} procedures. 
+These procedures are available through the \Robject{mt.rawp2adjp} function, which takes a vector of unadjusted $p$-values as input and returns the corresponding adjusted $p$-values.\\
+
+
+\noindent
+{\bf  OUTPUT.}\\
+
+
+The S4 class/method object-oriented programming approach was adopted to summarize the results of a MTP (Section \ref{anal:mult:s:design}). 
+Specifically, the output of the \Robject{MTP} function is an instance of the {\em class} \Rclass{MTP}. 
+A brief description of the class and associated methods is given next. Please consult the documentation for details, e.g., using \texttt{class ? MTP} and \texttt{methods ? MTP}. 
+
+<<classMTP, eval=TRUE, echo=TRUE>>=
+slotNames("MTP")
+@
+
+
+\begin{description}
+
+\item{\Robject{statistic}:} The numeric $M$--vector of test statistics, specified by the values of the \Robject{MTP} arguments \Robject{test}, \Robject{robust}, \Robject{standardize}, and \Robject{psi0}. In many testing problems, $M = J = $ \Robject{nrow(X)}.
+
+\item{\Robject{estimate}:} For the test of single-parameter null hypotheses using $t$-statistics (i.e., not the $F$-tests), the numeric $M$--vector of estimated parameters.
+
+\item{\Robject{sampsize}:} The sample size, i.e., $n=$ \Robject{ncol(X)}.
+
+\item{\Robject{rawp}:} The numeric $M$--vector of unadjusted $p$-values.
+
+\item{\Robject{adjp}:} The numeric $M$--vector of adjusted $p$-values (computed only if the \Robject{get.adjp} argument is \Robject{TRUE}).
+
+\item{\Robject{conf.reg}:}  For the test of single-parameter null hypotheses using $t$-statistics (i.e., not the $F$-tests), the numeric $M \times 2 \times$ \Robject{length(alpha)} \Rclass{array} of lower and upper simultaneous confidence limits for the parameter vector, for each value of the nominal Type I error rate \Robject{alpha} (computed only if the \Robject{get.cr} argument is \Robject{TRUE}). 
+
+\item{\Robject{cutoff}:} The numeric $M \times$ \Robject{length(alpha)} \Rclass{matrix} of cut-offs for the test statistics, for each value of the nominal Type I error rate \Robject{alpha} (computed only if the \Robject{get.cutoff} argument is \Robject{TRUE}).
+
+\item{\Robject{reject}:} 
+The $M \times$ \Robject{length(alpha)} \Rclass{matrix} of rejection indicators (\Robject{TRUE} for a rejected null hypothesis), for each value of the nominal Type I error rate \Robject{alpha}.
+
+\item{\Robject{nulldist}:} The numeric $M \times B$ \Rclass{matrix} for the estimated test statistics null distribution (returned only if \texttt{keep.nulldist=TRUE}; option not currently available for permutation null distribution, i.e.,  \texttt{nulldist="perm"}).
+By default (i.e., for \Robject{nulldist="boot"}), the entries of \Robject{nulldist} are the null value shifted and scaled bootstrap test statistics, as defined by Procedure~\ref{anal:mult:proc:boot}.
+
+\item{\Robject{call}:} The call to the function \Robject{MTP}.
+
+\item{\Robject{seed}:} 
+An integer for specifying the state of the random number generator used to create the resampled datasets. 
+The seed can be reused for reproducibility in a repeat call to \Robject{MTP}. 
+This argument is currently used only for the bootstrap null distribution (i.e., for \texttt{nulldist="boot"}).
+See \texttt{? set.seed} for details.
+
+
+\end{description}
+
+%%%%%%%%%%%%%%%%%%%%%%%%%
+\subsection{Numerical and graphical summaries}
+\label{anal:mult:s:summaries}
+
+The following {\em methods} are defined to operate on \Rclass{MTP} instances and summarize the results of a MTP.
+
+\begin{description}
+
+\item{\Robject{print}:} 
+The \Robject{print} method returns a description of an object of class \Rclass{MTP}, including 
+the sample size $n$,
+the number $M$ of tested hypotheses,
+the type of test performed (value of argument \Robject{test}), 
+the Type I error rate (value of argument \Robject{typeone}),
+the nominal level of the test  (value of argument \Robject{alpha}), 
+the name of the MTP  (value of argument \Robject{method}), 
+the call to the function \Robject{MTP}.
+In addition, this method produces a table with the class, mode, length, and dimension of each slot of the \Rclass{MTP} instance. 
+
+\item{\Robject{summary}:} 
+The \Robject{summary} method provides numerical summaries of the results of a MTP and returns a list with the following three components.
+\begin{itemize}
+\item
+\Robject{rejections}: 
+A \Rclass{data.frame} with the number(s) of rejected hypotheses for the nominal Type I error rate(s) specified by the \Robject{alpha} argument of the function \Robject{MTP} 
+(\Robject{NULL} values are returned if all three arguments \Robject{get.cr}, \Robject{get.cutoff}, and \Robject{get.adjp} are \Robject{FALSE}).
+\item
+\Robject{index}:
+A numeric $M$--vector of indices for ordering the hypotheses according to first \Robject{adjp}, then \Robject{rawp}, and finally the absolute value of \Robject{statistic} (not printed in the summary). 
+\item
+\Robject{summaries}:
+When applicable (i.e., when the corresponding quantities are returned by \Robject{MTP}), a table with six number summaries of the distributions of the adjusted $p$-values, unadjusted $p$-values, test statistics, and parameter estimates.
+\end{itemize}
+
+\item{\Robject{plot}:}   
+The \Robject{plot} method produces the following graphical summaries of the results of a MTP. The type of display may be specified via the \Robject{which} argument.
+\begin{enumerate}
+\item
+Scatterplot of number of rejected hypotheses vs. nominal Type I error rate.
+\item
+Plot of ordered adjusted $p$-values; can be viewed as a plot of Type I error rate vs. number of rejected hypotheses.
+\item
+Scatterplot of adjusted $p$-values vs. test statistics (also known as ``volcano plot'').
+\item
+Plot of unordered adjusted $p$-values.
+\item
+Plot of confidence regions for user-specified parameters, by default the 10 parameters corresponding to the smallest adjusted $p$-values  (argument \Robject{top}).
+\item
+Plot of test statistics and corresponding cut-offs (for each value of \Robject{alpha}) for user-specified hypotheses, by default the 10 hypotheses corresponding to the smallest adjusted $p$-values (argument \Robject{top}).
+\end{enumerate}
+The argument \Robject{logscale} (by default equal to \Robject{FALSE}) allows one to use the negative decimal logarithms of the adjusted $p$-values in the second, third, and fourth graphical displays.
+Note that some of these plots are implemented in the older function \Robject{mt.plot}.
+
+\item{\Robject{[}:} 
+Subsetting method, which operates selectively on each slot of an \Rclass{MTP} instance to retain only the data related to the specified hypotheses.
+
+\item{\Robject{as.list}:} 
+Converts an object of class \Rclass{MTP} to an object of class \Rclass{list}, with an entry for each slot. 
+
+\end{description}
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%
+\subsection{Software design}
+\label{anal:mult:s:design}
+
+The following features of the programming approach employed in \Rpackage{multtest} may be of interest to users, especially those interested in extending the functionality of the package. \\
+
+\noindent
+{\bf Function closures.}  The use of {\em function closures}, in the style of the \Rpackage{genefilter} package, allows uniform data input for all MTPs and facilitates the extension of the package's functionality by adding, for example, new types of test statistics. 
+Specifically, for each value of the \Robject{MTP} argument \Robject{test}, a closure is defined which consists of a function for computing the test statistic (with only two arguments, a data vector \Robject{x} and a corresponding weight vector \Robject{w}, with default value of \Robject{NULL}) and its enclosing environment, with bindings for relevant additional arguments, such as null values \Robject{psi0}, outcomes \Robject{Y}, and covariates \Robject{Z}. 
+Thus, new test statistics can be added to \Rpackage{multtest} by simply defining a new closure and adding a corresponding value for the \Robject{test} argument to \Robject{MTP} (existing internal test statistic functions are located in the file \texttt{R/statistics.R}).\\
+
+\noindent
+{\bf Class/method object-oriented programming.}  Like many other Bioconductor packages, \Rpackage{multtest}  has adopted the {\em S4 class/method object-oriented programming approach} of Chambers \cite{Chambers98}.
+In particular, a new class, \Rclass{MTP}, is defined to represent the results of multiple testing procedures, as implemented in the main \Robject{MTP} function. As discussed above, in Section \ref{anal:mult:s:summaries}, several methods are provided to operate on instances of this class.\\
+
+\noindent
+{\bf Calls to C.} Because resampling procedures, such as the non-parametric bootstrap implemented in \Rpackage{multtest}, are computationally intensive, care must be taken to ensure that the resampling steps are not prohibitively slow. The use of closures for the test statistics, however, prevents writing the entire program in C. In the current implementation, we have chosen to define the closure and compute the observed test statistics in R, and then call C (using the R random number ge [...]
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+\section{Discussion}
+\label{anal:mult:s:disc}
+
+The \Rpackage{multtest} package implements a broad range of resampling-based multiple testing procedures. Ongoing efforts are as follows.
+\begin{enumerate}
+\item
+Extending the class of available tests, by adding test statistic closures for tests of correlations, quantiles, and parameters in generalized linear models (e.g., logistic regression).
+\item
+Extending the class of resampling-based estimators for the test statistics null distribution (e.g., parametric bootstrap, Bayesian bootstrap). A closure approach may be considered for this purpose.
+\item
+Providing parameter confidence regions and test statistic cut-offs for other Type I error rates than the FWER.
+\item
+Implementing the new augmentation multiple testing procedures proposed in Dudoit \& van der Laan \cite{Dudoit&vdLaanMTBook} for controlling tail probabilities $Pr(g(V_n,R_n) > q)$ for an arbitrary function $g(V_n,R_n)$ of the numbers of false positives $V_n$ and rejected hypotheses $R_n$.
+\item
+Providing a formula interface for a symbolic description of the tests to be performed (cf. model specification in \Robject{lm}).
+%\item
+%Providing an \Robject{update} method for objects of class \Rclass{MTP}. This would allow reusing available estimates of the null distribution to implement different MTPs for a given Type I error rate and to control different Type I error rates. 
+\item
+Extending the \Rclass{MTP} class to keep track of results for several MTPs.
+\item
+Increasing the computational efficiency of the bootstrap estimation of the test statistics null distribution.
+\end{enumerate}
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+\bibliographystyle{plainnat}
+
+\bibliography{multtest}
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+\end{document}
diff --git a/inst/otherDocs/MTPALL.Rnw b/inst/otherDocs/MTPALL.Rnw
new file mode 100755
index 0000000..377a1d4
--- /dev/null
+++ b/inst/otherDocs/MTPALL.Rnw
@@ -0,0 +1,493 @@
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+% \VignetteIndexEntry{Multiple Testing Procedures}
+% \VignetteKeywords{Expression Analysis}
+% \VignettePackage{multtest}
+
+\documentclass[11pt]{article}
+
+\usepackage{graphicx}    % standard LaTeX graphics tool
+\usepackage{Sweave}
+\usepackage{amsfonts}
+
+% these should probably go into a dedicated style file
+\newcommand{\Rpackage}[1]{\textit{#1}}
+\newcommand{\Robject}[1]{\texttt{#1}}
+\newcommand{\Rclass}[1]{\textit{#1}}
+
+%%%%%%%%%%%%%%%%%%%%%%%%%
+% Our added packages and definitions
+ 
+\usepackage{hyperref}
+\usepackage{amsmath}
+\usepackage{color}
+\usepackage{comment}
+\usepackage[authoryear,round]{natbib}
+
+\parindent 0in
+
+\definecolor{red}{rgb}{1, 0, 0}
+\definecolor{green}{rgb}{0, 1, 0}
+\definecolor{blue}{rgb}{0, 0, 1}
+\definecolor{myblue}{rgb}{0.25, 0, 0.75}
+\definecolor{myred}{rgb}{0.75, 0, 0}
+\definecolor{gray}{rgb}{0.5, 0.5, 0.5}
+\definecolor{purple}{rgb}{0.65, 0, 0.75}
+\definecolor{orange}{rgb}{1, 0.65, 0}
+
+\def\RR{\mbox{\it I\hskip -0.177em R}}
+\def\ZZ{\mbox{\it I\hskip -0.177em Z}}
+\def\NN{\mbox{\it I\hskip -0.177em N}}
+
+\newtheorem{theorem}{Theorem}
+\newtheorem{procedure}{Procedure}
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+\begin{document}
+
+\title{Applications of Multiple Testing Procedures: ALL Data} 
+\author{Katherine S. Pollard$^1$, Sandrine Dudoit$^2$, Mark J. van der Laan$^3$} 
+\maketitle
+
+\begin{center}
+1. Center for Biomolecular Science and Engineering, University of California, Santa Cruz, \url{ http://lowelab.ucsc.edu/katie/}\\
+2. Division of Biostatistics, University of California, Berkeley, \url{ http://www.stat.berkeley.edu/~sandrine/}\\
+3. Department of Statistics and Division of Biostatistics, University of California, Berkeley, \url{ http://www.stat.berkeley.edu/~laan/}\\
+\end{center}
+
+\tableofcontents
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+\section{Overview}
+
+The Bioconductor R package \Rpackage{multtest} implements widely applicable resampling-based single-step and stepwise multiple testing procedures (MTP) for controlling a broad class of Type I error rates, in testing problems involving general data generating distributions (with arbitrary dependence structures among variables), null hypotheses, and test statistics \cite{Dudoit&vdLaanMTBook,DudoitetalMT1SAGMB04,vdLaanetalMT2SAGMB04,vdLaanetalMT3SAGMB04,Pollard&vdLaanJSPI04}. A key feature  [...]
+For general null hypotheses, defined in terms of submodels for the data generating distribution, this null distribution is the asymptotic distribution of the vector of null value shifted and scaled test statistics. 
+The current version of \Rpackage{multtest} provides MTPs for null hypotheses concerning means, differences in means, and regression parameters in linear,and Cox proportional hazards models.
+Both  non-parametric bootstrap and permutation estimators of the test statistics ($t$- or $F$-statistics) null distribution are available. 
+Procedures are provided to control Type I error rates defined as tail probabilities and expected values of arbitrary functions of the numbers of Type I errors, $V_n$, and rejected hypotheses, $R_n$. 
+These error rates include: 
+the generalized family-wise error rate, $gFWER(k) = Pr(V_n > k)$, or chance of at least $(k+1)$ false positives (the special case $k=0$ corresponds to the usual family-wise error rate, FWER); 
+tail probabilities $TPPFP(q) = Pr(V_n/R_n > q)$ for the proportion of false positives among the rejected hypotheses;
+the false discovery rate, $FDR=E[V_n/R_n]$.
+Single-step and step-down common-cut-off (maxT) and common-quantile (minP) procedures, that take into account the joint distribution of the test statistics, are implemented to control the FWER. 
+In addition, augmentation procedures are provided to control the gFWER and TPPFP, based on {\em any} initial FWER-controlling procedure.
+The results of a multiple testing procedure are summarized using rejection regions for the test statistics, confidence regions for the parameters of interest, and adjusted $p$-values.
+
+
+The modular design of the \Rpackage{multtest} package allows interested users to readily extend the package functionality by inserting additional functions for test statistics and testing procedures. 
+A class/method object-oriented programming approach was adopted to summarize the results of a MTP.
+
+
+The multiple testing procedures are applied to the Acute Lymphoblastic Leukemia (ALL) dataset of Chiaretti et al. \cite{Chiarettietal04}, available in the R package \Rpackage{ALL}, to identify genes whose expression measures are associated with (possibly censored) biological and clinical outcomes such as:  cytogenetic test status (normal vs. abnormal), tumor molecular subtype (BCR/ABL, NEG, ALL1/AF4, E2A/PBX1, p15/p16,  NUP-98), and patient survival.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+\section{Getting started}
+
+{\bf Installing the package.} To install the \Rpackage{multtest} package, first download the appropriate file for your platform from the Bioconductor website \url{http://www.bioconductor.org/}. For Windows, start R and select the \texttt{Packages} menu, then \texttt{Install package from local zip file...}.  Find and highlight the location of the zip file and click on {\tt open}. For Linux/Unix, use the usual command \texttt{R CMD INSTALL} or set the option \texttt{CRAN} to your nearest m [...]
+
+{\bf Loading the package.} To load the \Rpackage{multtest} package in your R session, type \texttt{library(multtest)}. \\
+
+{\bf Help files.}  Detailed information on \Rpackage{multtest} package functions can be obtained in the help files. For example, to view the help file for the function \texttt{MTP} in a browser, use \texttt{help.start} followed by \texttt{? MTP}.\\
+
+{\bf Case study.} We illustrate some of the functionality of the \Rpackage{multtest} package using the Acute Lymphoblastic Leukemia (ALL) microarray dataset of Chiaretti et al. \cite{Chiarettietal04}. 
+Available in the data package \Rpackage{ALL}, this dataset includes 21 phenotypes and 12,625 Affymetrix gene expression measures (chip series hgu95av2), for each of 128 ALL patients. The expression measures have been jointly normalized using RMA. To view a description of the experiments and data, type \texttt{? ALL}.\\
+
+{\bf Sweave.} This document was generated using the \Robject{Sweave} function from the R \Rpackage{tools} package. The source (.Rnw) file is in the \texttt{/inst/doc} directory of the \Rpackage{multtest} package.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+\section{Software Application: ALL microarray dataset}
+
+\subsection{}
+
+The main user-level function for resampling-based multiple testing is \Robject{MTP}. Its input/output and usage are described in the accompanying vignette (MTP). Here, we illustrate some of the functionality of the \Rpackage{multtest} package using the Acute Lymphoblastic Leukemia (ALL) microarray dataset of Chiaretti et al. \cite{Chiarettietal04}, available in the data package \Rpackage{ALL}. We begin by loading the necessary packages.
+
+<<loadPacks, eval=TRUE, echo=TRUE, results=hide>>=
+library(Biobase)
+library(multtest)
+<<setWidth, eval=TRUE, echo=FALSE, results=hide>>=
+options(width=60)
+@
+
+We use the \Robject{install.packages} command to get the necessary analysis and data pacakges from the R and Bioconductor repositories, after first checking if they are already installed.
+
+<<getDataPacksNew, eval=TRUE, echo=TRUE>>=
+reposList<-c("http://www.bioconductor.org/packages/bioc/devel",
+          "http://www.bioconductor.org/packages/data/devel",
+          "http://www.bioconductor.org/packages/omegahat/devel",
+          "http://cran.fhcrc.org")
+installed<-installed.packages()[,"Package"]
+if(!("genefilter"%in%installed))
+ try(install.packages("genefilter",repos=reposList,dependencies=c("Depends", "Imports")))
+library(genefilter)
+if(!("ALL"%in%installed))
+try(install.packages("ALL",repos=reposList,dependencies=c("Depends", "Imports")))
+library(ALL)
+if(!("hgu95av2"%in%installed))
+try(install.packages("hgu95av2",repos=reposList,dependencies=c("Depends", "Imports")))
+library(hgu95av2)
+@
+
+%<<getDataPacks, eval=TRUE, echo=TRUE>>=
+%z<-try(getReposEntry("http://www.bioconductor.org/data/experimental/repos"))
+%try(install.packages2("ALL",repEntry=z))
+%library(ALL)
+%try(install.packages2("hgu95av2"))
+%library(hgu95av2)
+%@
+
+\subsection{\Rpackage{ALL} data package and initial gene filtering}
+
+The Acute Lymphoblastic Leukemia (ALL) microarray dataset of Chiaretti et al. \cite{Chiarettietal04} consists of 21 {\em phenotypes} (i.e., patient level responses and covariates) and 12,625 Affymetrix {\em gene expression measures} (chip series HGU95Av2), for each of 128 ALL patients. 
+For greater detail, please consult the \Rpackage{ALL} package documentation.
+The main object in this package is \Robject{ALL}, an instance of the class \Rclass{ExpressionSet}, which contains the expression measures, phenotypes, and gene annotation information.  
+The genes-by-subjects matrix of expression measures is provided in the \Robject{exprs} slot of \Robject{ALL} and the phenotype data are stored in the \Robject{phenoData} slot. 
+Note that the expression measures have been obtained using the three-step robust multichip average (RMA) pre-processing method, implemented in the package \Rpackage{affy}. In particular, the expression measures have been subject to a base 2 logarithmic transformation.
+
+<<ALL, eval=TRUE, echo=TRUE>>=
+data(ALL)
+class(ALL)
+slotNames(ALL)
+show(ALL)
+names(varLabels(ALL))
+X <- exprs(ALL)
+pheno <- pData(ALL)
+@
+
+Our goal is to identify genes whose expression measures are associated with (possibly censored) biological and clinical outcomes such as: cytogenetic test status (normal vs. abnormal), tumor molecular subtype (BCR/ABL, NEG, ALL1/AF4, E2A/PBX1, p15/p16,  NUP-98), and time to relapse. 
+Before applying the multiple testing procedures, we perform initial gene filtering as in  Chiaretti et al. \cite{Chiarettietal04} and retain only those genes for which
+(i) at least 20\% of the subjects have a measured intensity of at least 100 and
+(ii)  the coefficient of variation (i.e., the ratio of the standard deviation to the mean) of the intensities across samples is between 0.7 and 10.
+These two filtering criteria can be readily applied using functions from the \Rpackage{genefilter} package.
+
+<<genefilter, eval=TRUE, echo=TRUE>>=
+ffun <- filterfun(pOverA(p=0.2, A=100), cv(a=0.7, b=10))
+filt <- genefilter(2^X, ffun)
+filtX <- X[filt,]
+dim(filtX)
+filtALL <- ALL[filt,]
+@
+
+%%%%%%%%%%%%%%%%%%%%%%%%%
+\subsection{Association of expression measures and cytogenetic test status: two-sample $t$-statistics}
+
+\paragraph{Step-down minP FWER-controlling MTP with two-sample Welch $t$-statistics and bootstrap null distribution}
+
+The phenotype data include an indicator variable, \Robject{cyto.normal}, for cytogenetic test status (1 for normal vs. 0 for abnormal). To identify genes with higher mean expression measures in the abnormal compared to the normal cytogenetics subjects, one-sided two-sample $t$-tests can be performed. We choose to use the Welch $t$-statistic and to control the FWER using the bootstrap-based step-down minP procedure with $B=100$ bootstrap iterations (though many more are recommended in practice).
+
+<<cytoBoot, eval=TRUE, echo=TRUE>>=
+seed <- 99
+cyto.boot <- MTP(X=filtALL, Y="cyto.normal", alternative="less", B=100, method="sd.minP", seed=seed)
+@
+
+Let us examine the results of the MTP stored in the object \Robject{cyto.boot}.
+
+<<cytoOut, eval=TRUE, echo=TRUE>>=
+class(cyto.boot)
+slotNames(cyto.boot)
+print(cyto.boot)
+summary(cyto.boot) 
+@
+
+The following commands may be used to obtain a list of genes that are differentially expressed in normal vs. abnormal cytogenetics patients at nominal FWER level $\alpha=0.05$, i.e., genes with adjusted $p$-values less than or equal to 0.05. 
+Functions from the \Rpackage{annotate} and \Rpackage{annaffy}  packages may then be used to obtain annotation information on these genes (e.g., gene names, PubMed abstracts, GO terms) and to generate HTML tables of the results. 
+
+<<cytoGenes, eval=TRUE, echo=TRUE>>=
+cyto.diff <- cyto.boot at adjp<=0.05
+sum(cyto.diff)
+cyto.AffyID <- geneNames(filtALL)[cyto.diff]
+mget(cyto.AffyID, env=hgu95av2GENENAME)
+@
+
+Various graphical summaries of the results may be obtained using the \Robject{plot} method, by selecting appropriate values of the argument \Robject{which} (Figure \ref{f:cytoPlot}).
+
+<<cytoPlot, echo=TRUE, fig=TRUE, prefix=FALSE, include=FALSE>>=
+par(mfrow=c(2,2))
+plot(cyto.boot)
+@
+
+\begin{figure}
+\begin{center}
+\includegraphics[width=3in,height=3in,angle=0]{cytoPlot}
+\end{center}
+\caption{
+{\em Cytogenetic test status --- Step-down minP FWER-controlling MTP.} By default, four graphical summaries are produced by the \Robject{plot} method for instances of the class \Rclass{MTP}.}
+\protect\label{f:cytoPlot}
+\end{figure}
+
+
+
+\paragraph{Marginal FWER-controlling MTPs with two-sample Welch $t$-statistics and bootstrap null distribution}
+
+Given a vector of unadjusted $p$-values, the \Robject{mt.rawp2adjp} function computes adjusted $p$-values for the marginal FWER-controlling MTPs of Bonferroni, Holm \cite{Holm79}, Hochberg \cite{Hochberg88}, and  $\check{\rm S}$id\'{a}k \cite{Sidak67}, discussed in detail in Dudoit et al. \cite{DudoitetalStatSci03}. 
+The \Robject{mt.plot} function may then be used to compare the different procedures in terms of their adjusted $p$-values.
+
+<<cytoMarg, eval=TRUE, echo=TRUE>>=
+marg <- c("Bonferroni", "Holm", "Hochberg", "SidakSS", "SidakSD")
+cyto.marg <- mt.rawp2adjp(rawp=cyto.boot at rawp, proc=marg)
+comp.marg <- cbind(cyto.boot at adjp, cyto.marg$adjp[order(cyto.marg$index),-1])
+@
+
+<<cytoMargPlot, echo=TRUE, fig=TRUE, prefix=FALSE, include=FALSE>>=
+par(mfrow=c(1,1))
+mt.plot(adjp=comp.marg, teststat=cyto.boot at statistic, proc=c("SD minP", marg), leg=c(0.1,400), col=1:6, lty=1:6, lwd=3)
+title("Comparison of marginal and step-down minP FWER-controlling MTPs")
+@
+
+In this dataset, most of the FWER-controlling MTPs perform similarly, making very few rejections at nominal Type I error rates near zero. 
+As expected, the bootstrap-based step-down minP procedure, which takes into account the joint distribution of the test statistics, leads to slightly more rejections than the marginal methods (Figure \ref{f:cytoMargPlot}).
+The results also illustrate that stepwise MTPs are less conservative than their single-step analogues (e.g., Holm and Hochberg vs. Bonferroni; step-down  \v{S}id\'{a}k vs. single-step \v{S}id\'{a}k).
+
+\begin{figure}
+\begin{center}
+\includegraphics[width=3in,height=3in,angle=0]{cytoMargPlot}
+\end{center}
+\caption{
+{\em Cytogenetic test status --- Marginal vs. joint FWER-controlling MTPs.} Plot of number of rejected hypotheses vs. nominal Type I error rate for comparing  bootstrap-based marginal and step-down minP FWER-controlling MTPs.}
+\protect\label{f:cytoMargPlot}
+\end{figure}
+
+%%%%%%%%%%%%%%%%%%%%%%%%%
+\paragraph{Step-down minP FWER-controlling MTP with two-sample Welch $t$-statistics and permutation null distribution}
+
+Because the sample sizes are not equal for the two cytogenetic groups and the expression measures may have different covariance structures in the two populations, we expect the bootstrap and permutation null distributions to yield different sets of rejected hypotheses (Pollard \& van der Laan \cite{Pollard&vdLaanJSPI04}). 
+To compare the two approaches, we apply the permutation-based step-down minP procedure, first using the old \Robject{mt.minP} function and then using the new \Robject{MTP} function (which calls \Robject{mt.minP}). 
+Please note that while the \Robject{MTP} and \Robject{mt.minP} functions produce the same results, these are presented in a different manner. In particular, for the new function \Robject{MTP}, the results (e.g., test statistics, parameter estimates, unadjusted $p$-values, adjusted $p$-values, cut-offs) are given in the original order of the null hypotheses, while in the \Robject{mt.minP} function, the hypotheses are sorted first according to their adjusted $p$-values, next their unadjust [...]
+In addition, the new function \Robject{MTP} implements a broader range of MTPs and has adopted the S4 class/method design for representing and summarizing the results of a MTP.
+
+<<cytoPermOld, eval=TRUE, echo=TRUE>>=
+set.seed(99)
+NAs <- is.na(pheno$cyto.normal)
+cyto.perm.old <- mt.minP(X=filtX[,!NAs], classlabel=pheno$cyto.normal[!NAs], side="lower", B=100)
+names(cyto.perm.old)
+sum(cyto.perm.old$adjp<=0.05)
+@
+
+<<cytoPermNew, eval=TRUE, echo=TRUE>>=
+set.seed(99)
+cyto.perm.new <- MTP(X=filtX, Y=pheno$cyto.normal, alternative="less", nulldist="perm", B=100, method="sd.minP")
+@
+
+<<cytoPermNewOut, eval=TRUE, echo=TRUE>>=
+summary(cyto.perm.new)
+sum(cyto.perm.new at adjp<=0.05)
+sum(cyto.perm.new at adjp<=0.05 & cyto.boot at adjp<=0.05)
+@
+
+At nominal FWER level $\alpha=0.05$, the permutation step-down minP procedure identifies \Sexpr{sum(cyto.perm.new at adjp<=0.05)} genes as differentially expressed between patients with normal and abnormal cytogenetic test status. 
+In contrast, the bootstrap version of the step-down minP procedure identifies \Sexpr{sum(cyto.boot at adjp<=0.05)} differentially expressed genes.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%
+\paragraph{Step-down minP FWER-controlling MTP with robust two-sample $t$-statistics and bootstrap null distribution}
+
+The Wilcoxon rank sum statistic (also known as the Mann-Whitney statistic) is a robust alternative to the usual two-sample $t$-statistic. 
+
+<<cytoWilcox, eval=TRUE, echo=TRUE>>=
+cyto.wilcox <- MTP(X=filtALL, Y="cyto.normal", robust=TRUE, alternative="less", B=100, method="sd.minP", seed=seed)
+@
+
+<<cytoWilcoxOut, eval=TRUE, echo=TRUE>>=
+sum(cyto.wilcox at adjp<=0.05)
+sum(cyto.wilcox at adjp<=0.05 & cyto.boot at adjp<=0.05)
+@
+
+At nominal FWER level $\alpha=0.05$, the bootstrap step-down minP MTP based on the robust Wilcoxon test statistic identifies \Sexpr{sum(cyto.wilcox at adjp<=0.05)} genes as differentially expressed, compared to  \Sexpr{sum(cyto.boot at adjp<=0.05)} genes for the same MTP based on the Welch $t$-statistic. 
+\Sexpr{sum(cyto.wilcox at adjp<=0.05 & cyto.boot at adjp<=0.05)} genes are identified by both procedures.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%
+\subsection{Augmentation procedures for gFWER, TPPFP, and FDR control}
+
+In the context of microarray gene expression data analysis or other high-dimensional inference problems, one is often willing to accept some false positives, provided their number is small in comparison to the number of rejected hypotheses.
+In this case, the FWER is not a suitable choice of Type I error rate and one should consider other rates that lead to larger sets of rejected hypotheses.
+The augmentation procedures implemented in the function \Robject{MTP}, allow one to reject additional hypotheses, while controlling an error rate such as the generalized family-wise error rate (gFWER), the tail probability of the proportion of false positives (TPPFP), or the false discovery rate (FDR). 
+We illustrate the use of the \Robject{fwer2gfwer}, \Robject{fwer2tppfp}, and \Robject{fwer2fdr} functions, but note that the gFWER, TPPFP, and FDR can also be controlled directly using the \Robject{MTP} function with appropriate choices of arguments \Robject{typeone}, \Robject{k}, \Robject{q}, and \Robject{fdr.method}.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%
+\paragraph{gFWER control}
+
+<<cytogfwer, echo=TRUE, fig=TRUE, prefix=FALSE, include=FALSE>>=
+k <- c(5, 10, 50, 100)
+cyto.gfwer <- fwer2gfwer(adjp=cyto.boot at adjp, k=k)
+comp.gfwer <- cbind(cyto.boot at adjp, cyto.gfwer)
+mtps <- paste("gFWER(",c(0,k),")", sep="")
+mt.plot(adjp=comp.gfwer, teststat=cyto.boot at statistic, proc=mtps, leg=c(0.1,400),col=1:5, lty=1:5, lwd=3)
+title("Comparison of gFWER(k)-controlling AMTPs based on SD minP MTP")
+@
+
+For gFWER-controlling AMTPs, Figure \ref{f:cytogfwer} illustrates that the number of rejected hypotheses increases linearly with the number $k$ of allowed false positives, for nominal levels $\alpha$ such that the initial FWER-controlling MTP does not reject more than $M-k$ hypotheses. 
+That is, the curve for the $gFWER(k)$--controlling AMTP is obtained from that of the initial FWER-controlling procedure by a simple vertical shift of $k$.
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%
+\paragraph{TPPFP control}
+
+<<cytotppfp, echo=TRUE, fig=TRUE, prefix=FALSE, include=FALSE>>=
+q <- c(0.05,0.1,0.5)
+cyto.tppfp <- fwer2tppfp(adjp=cyto.boot at adjp, q=q)
+comp.tppfp <- cbind(cyto.boot at adjp, cyto.tppfp)
+mtps <- c("FWER",paste("TPPFP(",q,")", sep=""))
+mt.plot(adjp=comp.tppfp, teststat=cyto.boot at statistic, proc=mtps, leg=c(0.1,400), col=1:4, lty=1:4, lwd=3)
+title("Comparison of TPPFP(q)-controlling AMTPs based on SD minP MTP")
+@
+
+For TPPFP control,  Figure \ref{f:cytotppfp} shows that, as expected, the number of rejections,  while controlling $TPPFP(q)$ at a given level $\alpha$, increases with the allowed proportion $q$ of false positives, though not linearly.
+Furthermore, for the ALL dataset, the increases in the number of rejections are not very large. 
+
+%%%%%%%%%%%%%%%%%%%%%%%%%
+\paragraph{FDR control}
+
+Given any TPPFP-controlling MTP, van der Laan et al. \cite{vdLaanetalMT3SAGMB04} derive two simple (conservative) FDR-controlling MTPs. 
+Here, we compare these two FDR-controlling approaches, based on a TPPFP-controlling augmentation of the step-down minP procedure, to the marginal Benjamini \& Hochberg \cite{Benjamini&Hochberg95} and Benjamini \& Yekutieli \cite{Benjamini&Yekutieli01} procedures, implemented in the function \Robject{mt.rawp2adjp}.
+
+<<cytofdr, echo=TRUE, fig=TRUE, prefix=FALSE, include=FALSE>>=
+cyto.fdr <- fwer2fdr(adjp=cyto.boot at adjp, method="both")$adjp
+cyto.marg.fdr <- mt.rawp2adjp(rawp=cyto.boot at rawp, proc=c("BY","BH"))
+comp.fdr <- cbind(cyto.fdr, cyto.marg.fdr$adjp[order(cyto.marg.fdr$index),-1])
+mtps <- c("AMTP Cons", "AMTP Rest", "BY", "BH")
+mt.plot(adjp=comp.fdr, teststat=cyto.boot at statistic, proc=mtps, leg=c(0.1,400), col=c(2,2,3,3), lty=rep(1:2,2), lwd=3)
+title("Comparison of FDR-controlling MTPs")
+@
+
+Figure \ref{f:cytofdr} shows that for most values of the nominal FDR level $\alpha$, the usual Benjamini \& Hochberg ("BH") MTP leads by far to the largest number of rejected hypotheses.
+The Benjamini \& Yekutieli ("BY") MTP, a conservative version of the Benjamini \& Hochberg MTP (with $\sim \log M$ penalty on the $p$-values), leads to much fewer rejections.
+The AMTPs based on conservative bounds for the FDR ("AMTP Cons" and "AMTP Rest") are much more conservative than the Benjamini \& Hochberg MTP and only lead to an increased number of rejections for very high nominal FDR levels.
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%
+\begin{figure}
+\begin{center}
+\includegraphics[width=3in,height=3in,angle=0]{cytogfwer}
+\end{center}
+\caption{
+{\em Cytogenetic test status --- gFWER-controlling AMTPs.} Plot of number of rejected hypotheses vs. nominal Type I error rate for comparing gFWER-controlling AMTPs, based on the bootstrap step-down minP FWER-controlling procedure, with different allowed numbers $k$ of false positives.}
+\protect\label{f:cytogfwer}
+\end{figure}
+
+\begin{figure}
+\begin{center}
+\includegraphics[width=3in,height=3in,angle=0]{cytotppfp}
+\end{center}
+\caption{
+{\em Cytogenetic test status --- TPPFP-controlling AMTPs.} Plot of number of rejected hypotheses vs. nominal Type I error rate for comparing TPPFP-controlling AMTPs, based on the bootstrap step-down minP FWER-controlling procedure, with different allowed proportions $q$ of false positives.}
+\protect\label{f:cytotppfp}
+\end{figure}
+
+\begin{figure}
+\begin{center}
+\includegraphics[width=3in,height=3in,angle=0]{cytofdr}
+\end{center}
+\caption{
+{\em Cytogenetic test status --- FDR-controlling MTPs.} Plot of number of rejected hypotheses vs. nominal Type I error rate for comparing four FDR-controlling MTPs.}
+\protect\label{f:cytofdr}
+\end{figure}
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%
+\subsection{Association of expression measures and tumor molecular subtype: multi-sample $F$-statistics}
+
+To identify genes with differences in mean expression measures between different tumor molecular subtypes (BCR/ABL, NEG, ALL1/AF4, E2A/PBX1, p15/p16,  NUP-98), one can perform a family of $F$-tests. 
+Tumor subtypes with fewer than 10 subjects are merged into one group. 
+Adjusted $p$-values and test statistic cut-offs (for nominal levels $\alpha$ of 0.01 and 0.1) are computed as follows for the bootstrap-based single-step maxT FWER-controlling procedure. 
+
+<<mbBoot, eval=TRUE, echo=TRUE>>=
+mb <- as.character(pheno$mol.biol)
+table(mb)
+other <- c("E2A/PBX1", "NUP-98", "p15/p16")
+mb[mb%in%other] <- "other"
+table(mb)
+mb.boot <- MTP(X=filtX, Y=mb, test="f", alpha=c(0.01,0.1), B=100, get.cutoff=TRUE, seed=seed)
+@
+
+Let us examine the results of the MTP.
+
+<<mbOut, eval=TRUE, echo=TRUE>>=
+summary(mb.boot)
+mb.diff <- mb.boot at adjp<=0.01
+sum(mb.diff)
+sum(mb.boot at statistic>=mb.boot at cutoff[,"alpha=0.01"] & mb.diff)
+@
+
+For control of the FWER at nominal level $\alpha=0.01$, the bootstrap-based single-step maxT procedure with $F$-statistics identifies 
+\Sexpr{sum(mb.diff)} genes (out of the \Sexpr{sum(filt)} filtered genes) 
+as having significant differences in mean expression measures between tumor molecular subtypes.  
+This set can be identified through either adjusted $p$-values or cut-offs for the test statistics. 
+The plot of test statistics and corresponding cut-offs in Figure \ref{f:mbPlot} illustrates that the $F$-statistics for the 10 genes with the smallest adjusted $p$-values are much larger than expected by chance under the null distribution.
+
+<<mbPlot, echo=TRUE, fig=TRUE, prefix=FALSE, include=FALSE>>=
+plot(mb.boot,which=6)
+@
+
+
+\begin{figure}
+\begin{center}
+\includegraphics[width=3in,height=3in,angle=0]{mbPlot}
+\end{center}
+\caption{
+{\em Tumor molecular subtype --- Single-step maxT FWER-controlling MTP.} Plot of $F$-statistics and corresponding cut-offs for the 10 genes with the smallest adjusted $p$-values, based on the bootstrap single-step maxT FWER-controlling procedure (\Robject{plot} method, \texttt{which=6}).}
+\protect\label{f:mbPlot}
+\end{figure}
+
+%%%%%%%%%%%%%%%%%%%%%%%%%
+\subsection{Association of expression measures and time to relapse: Cox $t$-statistics}
+
+The bootstrap-based MTPs implemented in the main \Robject{MTP} function (\Robject{nulldist="boot"}) allow the test of hypotheses concerning regression parameters in models for which the subset pivotality condition may not hold (e.g., logistic and Cox proportional hazards models). 
+The phenotype information in the \Rpackage{ALL} package includes the original remission status of the ALL patients (\Robject{remission} variable in the \Rclass{data.frame} \Robject{pData(ALL)}). 
+There are 88 subjects who experienced original complete remission (\texttt{remission="CR"}) and who were followed up for remission status at a later date. 
+We apply the single-step maxT procedure to test for a significant association between expression measures and time to relapse amongst these 88 subjects, adjusting for sex. 
+Note that most of the code below is concerned with extracting the (censored) time to relapse outcome and covariates from slots of the \Rclass{ExpressionSet} instance \Robject{ALL}.
+
+<<coxphPrep, eval=TRUE, echo=TRUE>>=
+library(survival)
+# Patients with original complete remission and who were followed up
+cr.ind <- pheno$remission=="CR"
+cr.pheno <- pheno[cr.ind,]
+times <- strptime(cr.pheno$"date last seen", "%m/%d/%Y")-strptime(cr.pheno$date.cr, "%m/%d/%Y")
+time.ind <- !is.na(times)
+times <- times[time.ind]
+# Patients who haven't relapsed are treated as censored
+cens <- ((1:length(times))%in%grep("CR", cr.pheno[time.ind,"f.u"]))
+# Time to relapse
+rel.times <- Surv(times, !cens)
+patients <- (1: ncol(filtX))[cr.ind][time.ind]
+# Prepare data for MTP
+relX <- filtX[, patients]
+relZ <- pheno[patients,]
+@
+
+<<coxphBoot, eval=TRUE, echo=TRUE>>=
+cox.boot <- MTP(X=relX, Y=rel.times, Z=relZ, Z.incl="sex", Z.test=NULL, test="coxph.YvsXZ", B=100, get.cr=TRUE, seed=seed)
+@
+
+<<coxphOut, eval=TRUE, echo=TRUE>>=
+summary(cox.boot)
+cox.diff <- cox.boot at adjp<=0.05
+sum(cox.diff)
+cox.AffyID <- geneNames(filtALL)[cox.diff]
+mget(cox.AffyID, env=hgu95av2GENENAME)
+@
+
+<<coxphPlot, echo=TRUE, fig=TRUE, prefix=FALSE, include=FALSE>>=
+plot(cox.boot, which=5)
+abline(h=0, col=2, lwd=2)
+@
+
+For control of the FWER at nominal level $\alpha=0.05$, the bootstrap-based single-step maxT procedure identifies \Sexpr{sum(cox.diff)} genes whose expression measures are significantly associated with time to relapse.
+Equivalently, Figure \ref{f:coxphPlot} illustrates that the level $\alpha=0.05$ confidence regions corresponding to these \Sexpr{sum(cox.diff)} genes do not include the null value $\psi_0=0$ for the Cox regression parameters (indicated by red horizontal line). 
+%The confidence intervals for the next four genes barely cover $\psi_0=0$. 
+
+
+\begin{figure}
+\begin{center}
+\includegraphics[width=3in,height=3in,angle=0]{coxphPlot}
+\end{center}
+\caption{
+{\em Time to relapse --- Single-step maxT FWER-controlling MTP.} Plot of Cox regression coefficient estimates and corresponding confidence intervals for the 10 genes with the smallest adjusted $p$-values, based on the bootstrap single-step maxT FWER-controlling procedure (\Robject{plot} method, \texttt{which=5}).}
+\protect\label{f:coxphPlot}
+\end{figure}
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+\bibliographystyle{plainnat}
+
+\bibliography{multtest}
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+\end{document}
diff --git a/inst/otherDocs/multtest.Rnw b/inst/otherDocs/multtest.Rnw
new file mode 100755
index 0000000..ce506c7
--- /dev/null
+++ b/inst/otherDocs/multtest.Rnw
@@ -0,0 +1,394 @@
+% \VignetteIndexEntry{multtest Tutorial}
+% \VignetteKeywords{Expression Analysis}
+% \VignettePackage{multtest}
+\documentclass[11pt]{article}
+
+\usepackage{amsmath,epsfig,fullpage}
+\usepackage{graphicx}
+\usepackage[authoryear,round]{natbib}
+\usepackage{hyperref}
+
+\parindent 0in
+
+\bibliographystyle{abbrvnat}
+
+\begin{document}
+
+\title{\bf Bioconductor's multtest package}
+\author{Sandrine Dudoit$^1$ and Yongchao Ge$^2$}
+
+\maketitle
+
+\begin{center}
+1. Division of Biostatistics, University of California, Berkeley,
+   \url{http://www.stat.berkeley.edu/~sandrine}\\
+2. Department of Biomathematical Sciences, Mount Sinai School of Medicine, New York,
+   {\tt yongchao.ge at mssm.edu}\\
+\end{center}
+
+\tableofcontents
+
+% library(tools)
+% Rnwfile<- file.path("/home/sandrine/CVS_stuff/madman/Rpacks/multtest/inst/doc",
+%                     "multtest.Rnw") 
+% Sweave(Rnwfile,pdf=TRUE,eps=TRUE,stylepath=TRUE,driver=RweaveLatex())
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+\section{Overview}
+
+The {\tt multtest} package contains a collection of functions for
+multiple hypothesis testing. These functions can be used to identify
+differentially expressed genes in microarray experiments, i.e., genes
+whose expression levels are associated with a response or covariate of
+interest. \\  
+
+{\bf Introduction to multiple testing.} This document provides a
+tutorial for using the {\tt multtest} package. For a detailed
+introduction to multiple testing consult the document {\tt
+  multtest.intro} in the {\tt inst/doc} directory of the package. See
+also \cite{Shaffer95} and  \cite{Dudoit&Shaffer02} for a review of
+multiple testing procedures and complete references.\\ 
+
+{\bf Multiple testing procedures implemented in {\tt multtest}.}
+The {\tt multtest} package implements multiple testing procedures for
+controlling different Type I error rates. It includes procedures for
+controlling the family--wise Type I error rate (FWER): Bonferroni,
+\cite{Hochberg88}, \cite{Holm79}, Sidak, \cite{Westfall&Young93} minP
+and maxT procedures. It also includes procedures for controlling the
+false discovery rate (FDR): \cite{Benjamini&Hochberg95} and
+\cite{Benjamini&Yekutieli01} step--up procedures. These procedures are
+implemented for tests based on $t$--statistics, $F$--statistics,
+paired $t$--statistics, block $F$--statistics, Wilcoxon
+statistics. The results of the procedures are summarized using
+adjusted $p$--values, which reflect for each gene the overall
+experiment Type I error rate when genes with a smaller $p$--value are
+declared differentially expressed. Adjusted $p$--values may be
+obtained either from the nominal distribution of the test statistics
+or by permutation. The permutation algorithm for the maxT and minP
+procedures is described in \cite{Ge&Dudoit}.\\ 
+
+{\bf Help files.}  As with any R package, detailed information on
+functions, their arguments and value, can be obtained in the help
+files. For instance, to view the help file for the function {\tt
+  mt.maxT} in a browser, use {\tt help.start()} followed by {\tt ?
+  mt.maxT}. 
+
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+\section{Case study: the ALL/AML leukemia dataset of Golub et al. (1999)}
+
+ We demonstrate the functionality of this package using gene
+ expression data from the leukemia ALL/AML study of
+ \cite{Golubetal}. To load the leukemia dataset, use {\tt
+   data(golub)}, and to view a description of the experiments and
+ data, type {\tt ? golub}.  
+
+%<<eval=TRUE, echo=TRUE>>=
+<<>>=
+library(multtest, verbose=FALSE)
+data(golub)
+@
+
+\cite{Golubetal} were interested in identifying genes that are
+differentially expressed in patients with two type of leukemias, acute
+lymphoblastic leukemia (ALL, class 0) and acute myeloid leukemia (AML,
+class 1). Gene expression levels were measured using Affymetrix
+high--density oligonucleotide chips containing $p=6,817$ human
+genes. The learning set comprises $n=38$ samples, 27 ALL cases and 11
+AML cases (data available at {\tt
+  http://www.genome.wi.mit.edu/MPR}). Following Golub et al. (personal
+communication, Pablo Tamayo), three preprocessing steps were applied
+to the normalized matrix of intensity values available on the website:
+(i) thresholding: floor 
+of 100 and ceiling of 16,000; (ii) filtering: exclusion of genes with
+$\max/\min \leq 5$ or $(\max-\min) \leq 500$, where $\max$ and $\min$ refer
+respectively to the maximum and minimum intensities for a
+particular gene across mRNA samples; (iii) base 10 logarithmic
+transformation. Boxplots of the expression levels for each of the 38
+samples revealed the need to standardize the expression levels within
+arrays before combining data across samples. The data were then
+summarized by a $3,051 \times 38 $ matrix $X=(x_{ji})$, where $x_{ji}$
+denotes the expression level for gene $j$ in tumor mRNA sample $i$. \\
+
+
+The dataset {\tt golub} contains the gene expression data for the 38
+training set tumor mRNA samples and 3,051 genes retained after
+pre--processing. The dataset includes 
+
+\begin{itemize}
+\item
+{{\tt golub}:} a $3,051 \times 38 $ matrix of expression levels;
+\item
+{{\tt golub.gnames}:} a $3,051 \times 3 $ matrix of gene identifiers;
+\item
+{{\tt golub.cl}:} a vector of tumor class labels (0 for ALL, 1 for AML). 
+\end{itemize}
+
+%<<eval=TRUE, echo=TRUE>>=
+<<>>=
+dim(golub)
+golub[1:4,1:4]
+dim(golub.gnames)
+golub.gnames[1:4,]
+golub.cl
+@
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+\section{The {\tt mt.teststat} and {\tt mt.teststat.num.denum} functions}
+
+
+The {\tt mt.teststat} and {\tt mt.teststat.num.denum} functions
+provide a convenient way to compute test statistics for each row of a
+data frame, e.g., two--sample Welch $t$--statistics, Wilcoxon
+statistics, $F$--statistics, paired $t$--statistics, block
+$F$--statistics. To compute two--sample $t$--statistics comparing, for
+each gene, expression in the ALL cases to expression in the AML cases 
+
+%<<eval=TRUE, echo=TRUE>>=
+<<>>=
+teststat<-mt.teststat(golub,golub.cl)
+@
+
+The following produces a normal Quantile--Quantile (Q--Q) plot of the
+test statistics (Figure \ref{fig:mtQQ})
+. In our application, we are
+not so much interested in testing whether the test statistics follow a
+particular distribution, but in using the Q--Q plot as a visual aid
+for identifying genes with ``unusual'' test statistics. Q--Q plots
+informally correct for the large number of comparisons and the points
+which deviate markedly from an otherwise linear relationship are
+likely to correspond to those genes whose expression levels differ
+between the control and treatment groups.
+
+%%<<mtQQ,fig=TRUE,prefix=FALSE,echo=TRUE,include=FALSE>>=
+%%\begin{verbatim}
+<<>>=
+postscript("mtQQ.eps")
+qqnorm(teststat)
+qqline(teststat)
+dev.off()
+pdf("mtQQ.pdf")
+qqnorm(teststat)
+qqline(teststat)
+dev.off()
+@
+%%\end{verbatim}
+%%@
+
+We may also wish to look at plots of the numerators and denominators
+of the test statistics (Figure \ref{fig:mtNumDen}) 
+
+%%<<mtNumDen,fig=TRUE,prefix=FALSE,echo=TRUE,include=FALSE>>=
+%%\begin{verbatim}
+<<>>=
+tmp<-mt.teststat.num.denum(golub,golub.cl,test="t")
+num<-tmp$teststat.num
+denum<-tmp$teststat.denum
+postscript("mtNumDen.eps")
+plot(sqrt(denum),num)
+dev.off()
+pdf("mtNumDen.pdf")
+plot(sqrt(denum),num)
+dev.off()
+@
+%%\end{verbatim}
+%%@
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+\section{The {\tt mt.rawp2adjp} function}
+
+This function computes adjusted $p$--values for simple multiple
+testing procedures from a vector of raw (unadjusted) $p$--values. The
+procedures include the 
+Bonferroni, \cite{Holm79}, \cite{Hochberg88}, and Sidak procedures for
+strong control of the family--wise Type I error rate (FWER), and the
+\cite{Benjamini&Hochberg95} and \cite{Benjamini&Yekutieli01}
+procedures for (strong) control of the false discovery rate (FDR). \\ 
+
+As a first approximation, compute raw nominal two--sided $p$--values
+for the $3,051$ test statistics using the standard Gaussian
+distribution 
+
+%%<<eval=TRUE, echo=TRUE>>=
+<<>>=
+rawp0<-2*(1-pnorm(abs(teststat)))
+@
+
+Adjusted $p$--values for these seven multiple testing procedures can
+be computed as follows and stored in the original gene order in {\tt
+  adjp} using {\tt order(res\$index)} 
+
+%%<<eval=TRUE, echo=TRUE>>=
+<<>>=
+procs<-c("Bonferroni","Holm","Hochberg","SidakSS","SidakSD","BH","BY")
+res<-mt.rawp2adjp(rawp0,procs)
+adjp<-res$adjp[order(res$index),]
+round(adjp[1:10,],2)
+@
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+\section{The {\tt mt.maxT} and {\tt mt.minP} functions}
+
+The {\tt mt.maxT} and {\tt mt.minP} functions compute permutation
+adjusted $p$--values for the maxT and minP step--down multiple testing
+procedure described in \cite{Westfall&Young93}. These procedure
+provide strong control of the FWER and also incorporate the joint
+dependence structure between the test statistics. There are thus in
+general less conservative than the standard Bonferroni procedure. The
+permutation algorithm for the maxT and minP procedures is described in
+\cite{Ge&Dudoit}.\\
+
+Permutation unadjusted $p$--values and adjusted $p$--values for the
+maxT procedure with Welch $t$--statistics are computed as
+follows. {\tt mt.maxT} returns $p$--values sorted in decreasing order
+of the absolute $t$--statistics and {\tt order(resT\$index)} is used
+to obtain $p$--values and test statistics in the original gene
+order. In practice, the number of permutations $B$ should be several
+thousands, we set $B=1,000$ here for illustration purposes.
+
+%%<<eval=TRUE, echo=TRUE>>=
+<<>>=
+resT<-mt.maxT(golub,golub.cl,B=1000)
+ord<-order(resT$index)
+rawp<-resT$rawp[ord]
+maxT<-resT$adjp[ord]
+teststat<-resT$teststat[ord]
+@
+
+
+Three functions related to the {\tt mt.maxT} and {\tt mt.minP}
+functions are {\tt mt.sample.teststat}, {\tt mt.sample.rawp}, and {\tt
+mt.sample.label}. These functions provide tools to investigate the
+permutation distribution of test statistics, raw (unadjusted)
+$p$--values, and class labels, respectively.
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+\section{The {\tt mt.reject} function}
+
+The function {\tt mt.reject} returns the identity and number of rejected
+     hypotheses for several multiple testing procedures and different 
+     nominal Type I error rates. The number of hypotheses rejected
+     using unadjusted $p$--values and maxT $p$--values for different
+     Type I error rates ($\alpha=0, 0.1, 0.2, \ldots, 1$) can be
+     obtained by 
+
+%%<<eval=TRUE, echo=TRUE>>=
+<<>>=
+mt.reject(cbind(rawp,maxT),seq(0,1,0.1))$r
+@
+
+The genes with maxT $p$--values less than or equal to 0.01 are 
+
+%%<<eval=TRUE, echo=TRUE>>=
+<<>>=
+which<-mt.reject(cbind(rawp,maxT),0.01)$which[,2]
+golub.gnames[which,2]
+@
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+\section{The {\tt mt.plot} function}
+
+The {\tt mt.plot} function produces a number of graphical summaries
+for the results of multiple testing procedures and their corresponding
+adjusted $p$--values. To produce plots of sorted permutation
+unadjusted $p$--values and adjusted $p$--values for the Bonferroni,
+maxT, \cite{Benjamini&Hochberg95}, and \cite{Benjamini&Yekutieli01}
+procedures use 
+
+%%<<eval=TRUE, echo=TRUE>>=
+<<>>=
+res<-mt.rawp2adjp(rawp,c("Bonferroni","BH","BY"))
+adjp<-res$adjp[order(res$index),]
+allp<-cbind(adjp,maxT)
+dimnames(allp)[[2]]<-c(dimnames(adjp)[[2]],"maxT")
+procs<-dimnames(allp)[[2]]
+procs<-procs[c(1,2,5,3,4)]
+cols<-c(1,2,3,5,6)
+ltypes<-c(1,2,2,3,3)
+@
+
+For plotting sorted adjusted $p$--values set the argument {\tt plottype="pvsr"}
+
+%%<<mtpvsr,fig=TRUE,prefix=FALSE,echo=TRUE,include=FALSE>>=
+%%\begin{verbatim}
+<<>>=
+postscript("mtpvsr.eps")
+mt.plot(allp[,procs],teststat,plottype="pvsr",
+        proc=procs,leg=c(2000,0.4),lty=ltypes,col=cols,lwd=2) 
+dev.off()
+pdf("mtpvsr.pdf")
+mt.plot(allp[,procs],teststat,plottype="pvsr",
+        proc=procs,leg=c(2000,0.4),lty=ltypes,col=cols,lwd=2) 
+dev.off()
+@
+%%\end{verbatim}
+%%@
+
+and for plotting adjusted $p$--values vs. the test statistics use {\tt
+  plottype="pvst"} 
+
+%%<<mtpvst,fig=TRUE,prefix=FALSE,echo=TRUE,include=FALSE>>=
+%%\begin{verbatim}
+<<>>=
+postscript("mtpvst.eps")
+mt.plot(allp[,procs],teststat,plottype="pvst",
+        logscale=TRUE,proc=procs,leg=c(-0.5,2),pch=ltypes,col=cols)
+dev.off()
+pdf("mtpvst.pdf")
+mt.plot(allp[,procs],teststat,plottype="pvst",
+        logscale=TRUE,proc=procs,leg=c(-0.5,2),pch=ltypes,col=cols)
+dev.off()
+@
+%%\end{verbatim}
+%%@
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+\bibliography{multtest} 
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+\begin{figure}[ht]
+  %%\centerline{\epsfig{figure=mtQQ.eps,width=4in,height=4in,angle=0}}
+  \begin{center}
+    \includegraphics[width=4in,height=4in,angle=0]{mtQQ}
+  \end{center}
+  \caption{Normal Q--Q plot of $t$--statistics for leukemia data.}
+  \protect\label{fig:mtQQ}
+\end{figure}
+
+\begin{figure}[ht]
+  %%\centerline{\epsfig{figure=mtNumDen.eps,width=4in,height=4in,angle=0}}
+  \begin{center}
+    \includegraphics[width=4in,height=4in,angle=0]{mtNumDen}
+  \end{center}
+  \caption{Numerator vs. square root of denominator of the
+    $t$--statistics for the leukemia data.} 
+  \protect\label{fig:mtNumDen}
+\end{figure}
+
+\begin{figure}[ht]
+  %%\centerline{\epsfig{figure=mtpvsr.eps,width=4in,height=4in,angle=0}}
+  \begin{center}
+    \includegraphics[width=4in,height=4in,angle=0]{mtpvsr}
+  \end{center}
+  \caption{Sorted adjusted $p$--values for the leukemia data.}
+  \protect\label{fig:mtpvsr}
+\end{figure}
+
+\begin{figure}[ht]
+  %%\centerline{\epsfig{figure=mtpvst.eps,width=4in,height=4in,angle=0}}
+  \begin{center}
+    \includegraphics[width=4in,height=4in,angle=0]{mtpvst}
+  \end{center}
+  \caption{Adjusted $p$--values (log scale) vs. $t$--statistics for the
+    leukemia data.}  
+  \protect\label{fig:mtpvst}
+\end{figure}
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+\end{document}
diff --git a/man/EBMTP-class.Rd b/man/EBMTP-class.Rd
new file mode 100644
index 0000000..088eefd
--- /dev/null
+++ b/man/EBMTP-class.Rd
@@ -0,0 +1,150 @@
+\name{EBMTP-class}
+\alias{EBMTP-class}
+\alias{EBMTP-method}
+
+\title{Class "EBMTP", classes and methods for empirical Bayes multiple testing procedure output}
+
+\description{An object of class EBMTP is the output of a particular multiple testing procedure, as generated by the function \code{EBMTP}. The object has slots for the various data used to make multiple testing decisions, in particular adjusted p-values.}
+
+\section{Objects from the Class}{
+Objects can be created by calls of the form \cr
+new('MTP', \cr
+	statistic = ...., object of class numeric\cr
+	estimate  = ...., object of class numeric\cr
+	sampsize  = ...., object of class numeric\cr
+	rawp      = ...., object of class numeric\cr
+	adjp      = ...., object of class numeric\cr
+ 	reject	  = ...., object of class matrix\cr
+	rawdist	  = ...., object of class matrix\cr
+        nulldist  = ...., object of class matrix\cr
+	nulldist.type = ...., object of class character\cr
+	marg.null = ...., object of class character\cr
+	marg.par  = ...., object of class matrix\cr
+	label	  = ...., object of class numeric\cr
+	falsepos  = ...., object of class matrix\cr
+        truepos   = ...., object of class matrix\cr
+        errormat  = ...., object of class matrix\cr 
+        EB.h0M    = ...., object of class numeric\cr
+        prior     = ...., object of class numeric\cr
+        prior.type= ...., object of class character\cr
+        lqv       = ...., object of class numeric\cr
+	Hsets	  = ...., object of class matrix\cr
+	index	  = ...., object of class matrix\cr
+        call      = ...., object of class call\cr
+        seed      = ...., object of class integer\cr
+       )
+}
+
+\section{Slots}{
+\describe{
+  \item{\code{statistic}}{Object of class \code{numeric}, observed test statistics for each hypothesis, specified by the values of the \code{MTP} arguments \code{test}, \code{robust}, \code{standardize}, and \code{psi0}.}
+    \item{\code{estimate}}{For the test of single-parameter null hypotheses using t-statistics (i.e., not the F-tests), the numeric vector of estimated parameters corresponding to each hypothesis, e.g. means, differences in means, regression parameters.}
+    \item{\code{sampsize}}{Object of class \code{numeric}, number of columns (i.e. observations) in the input data set.}
+    \item{\code{rawp}}{Object of class \code{numeric}, unadjusted, marginal p-values for each hypothesis.}
+    \item{\code{adjp}}{Object of class \code{numeric}, adjusted (for multiple testing) p-values for each hypothesis (computed only if the \code{get.adjp} argument is TRUE).}
+   \item{\code{reject}}{Object of class \code{'matrix'}, rejection indicators (TRUE for a rejected null hypothesis), for each value of the nominal Type I error rate \code{alpha}.}
+    \item{\code{rawdist}}{The numeric matrix for the estimated nonparametric non-null test statistics distribution (returned only if \code{keep.rawdist=TRUE} and if \code{nulldist} is one of 'boot.ctr', 'boot.cs', or 'boot.qt'). This slot must not be empty if one wishes to call \code{update} to change choice of bootstrap-based null distribution.}
+    \item{\code{nulldist}}{The numeric matrix for the estimated test statistics null distribution (returned only if \code{keep.nulldist=TRUE}). By default (i.e., for \code{nulldist='boot.cs'}), the entries of \code{nulldist} are the null value shifted and scaled bootstrap test statistics, with one null test statistic value for each hypothesis (rows) and bootstrap iteration (columns).}
+    \item{\code{nulldist.type}}{Character value describing which choice of null distribution was used to generate the MTP results.  Takes on one of the values of the original \code{nulldist} argument in the call to MTP, i.e., 'boot.cs', 'boot.ctr', 'boot.qt', or 'ic'.}
+   \item{\code{marg.null}}{If \code{nulldist='boot.qt'}, a character value returning which choice of marginal null distribution was used by the MTP.  Can be used to check default values or to ensure manual settings were correctly applied.}
+   \item{\code{marg.par}}{If \code{nulldist='boot.qt'}, a numeric matrix returning the parameters of the marginal null distribution(s) used by the MTP.  Can be used to check default values or to ensure manual settings were correctly applied.}
+   \item{\code{falsepos}}{A matrix with rows equal to the number of hypotheses and columns the number of samples of null test statistics (\code{B}) indicating the number of guessed false positives when using the corresponding value of the observed test statistic as a cut-off.  Not returned unless \code{keep.falsepos=TRUE}.}
+   \item{\code{truepos}}{A matrix with rows equal to the number of hypotheses and columns the number of samples of null test statistics (\code{B}) indicating the number of guessed true positives when using the corresponding value of the observed test statistic as a cut-off.  Not returned unless \code{keep.truepos=TRUE}.}
+   \item{\code{errormat}}{The matrix obtained after applying to type I error rate function closure to the matrices in \code{falsepos}, and, if applicable, \code{truepos}.  Not returned unless \code{keep.errormat=TRUE}.}
+   \item{\code{EB.h0M}}{The sum of the local q-values obtained after density estimation.  This number serves as an estimate of the proportion of true null hypotheses.  Values close to one indicate situations in which type I error control may not be guaranteed by the EBMTP.  When \code{prior='EBLQV'}, this value is used as the prior 'pi' during evaluation of the local q-value function.} 
+   \item{\code{prior}}{The numeric value of the prior 'pi' used when evaluating the local q-value function.}
+   \item{\code{prior.type}}{Character string returning the value of \code{prior} in the original call to \code{EBMTP}.  One of 'conservative', 'ABH', or 'EBLQV'.}
+   \item{\code{lqv}}{A numeric vector of length the number of hypotheses with the estimated local q-values used for generating guessed sets of true null hypotheses.}
+   \item{\code{Hsets}}{A numeric matrix with the same dimension as \code{nulldist}, containing the Bernoulli realizations of the estimated local q-values stored in \code{lqv} which were used to partition the hypotheses into guessed sets of true and false null hypotheses at each round of (re)sampling.  Not returned unless \code{keep.Hsets=TRUE}.}
+   \item{\code{label}}{If \code{keep.label=TRUE}, a vector storing the values used in the argument \code{Y}.  Storing this object is particularly important when one wishes to update EBMTP objects with F-statistics using default \code{marg.null} and \code{marg.par} settings when \code{nulldist='boot.qt'}. }
+   \item{\code{index}}{For tests of correlation parameters a matrix corresponding to \code{t(combn(p,2))}, where \code{p} is the number of variables in \code{X}.  This matrix gives the indices of the variables considered in each pairwise correlation.  For all other tests, this slot is empty, as the indices are in the same order as the rows of \code{X}.}
+   \item{\code{call}}{Object of class \code{call}, the call to the MTP function.}
+    \item{\code{seed}}{An integer or vector for specifying the state of the random number generator used to create the resampled datasets. The seed can be reused for reproducibility in a repeat call to \code{MTP}. This argument is currently used only for the bootstrap null distribution (i.e., for \code{nulldist="boot.xx"}). See \code{?set.seed} for details.}
+	}
+}
+
+\section{Methods}{
+ \code{signature(x = "EBMTP")} \cr
+ \describe{
+    \item{[}{: Subsetting method for \code{EBMTP} class, which operates selectively on each slot of an \code{EBMTP} instance to retain only the data related to the specified hypotheses.}
+
+    \item{as.list}{: Converts an object of class \code{EBMTP} to an object of class \code{list}, with an entry for each slot.}
+ 
+   \item{plot}{: plot methods for \code{EBMTP} class, produces the following graphical summaries of the results of a EBMTP. The type of display may be specified via the \code{which} argument. \cr
+
+1. Scatterplot of number of rejected hypotheses vs. nominal Type I error rate. \cr
+
+2. Plot of ordered adjusted p-values; can be viewed as a plot of Type I error rate vs. number of rejected hypotheses. \cr
+
+3. Scatterplot of adjusted p-values vs. test statistics (also known as "volcano plot"). \cr
+
+4. Plot of unordered adjusted p-values. \cr
+
+The plot method for objects of class \code{EBMTP} does not return the plots associated with \code{which=5} (using confidence regions) or with \code{which=6} (pertaining to cut-offs) as it does for objects of class \code{MTP}.  This is because the function \code{EBMTP} currently only returns adjusted p-values.  The argument \code{logscale} (by default equal to FALSE) allows one to use the negative decimal logarithms of the adjusted p-values in the second, third, and fourth graphical displ [...]
+
+    \item{print}{: print method for \code{EBMTP} class, returns a description of an object of class \code{EBMTP}, including sample size, number of tested hypotheses, type of test performed (value of argument \code{test}), Type I error rate (value of argument \code{typeone}), nominal level of the test  (value of argument \code{alpha}), name of the EBMTP (value of argument \code{method}), call to the function \code{EBMTP}.
+
+In addition, this method produces a table with the class, mode, length, and dimension of each slot of the \code{EBMTP} instance. 
+}
+
+    \item{summary}{: summary method for \code{EBMTP} class, provides numerical summaries of the results of an EBMTP and returns a list with the following three components. \cr
+
+1. rejections: A data.frame with the number(s) of rejected hypotheses for the nominal Type I error rate(s) specified by the \code{alpha} argument of the function \code{MTP}. \cr
+
+2. index: A numeric vector of indices for ordering the hypotheses according to first \code{adjp}, then \code{rawp}, and finally the absolute value of \code{statistic} (not printed in the summary). \cr 
+
+3. summaries: When applicable (i.e., when the corresponding quantities are returned by \code{MTP}), a table with six number summaries of the distributions of the adjusted p-values, unadjusted p-values, test statistics, and parameter estimates.}
+
+    \item{EBupdate}{: update method for \code{EBMTP} class, provides a mechanism to re-run the MTP with different choices of the following arguments - nulldist, alternative, typeone, k, q, alpha, smooth.null, bw, kernel, prior, keep.nulldist, keep.rawdist, keep.falsepos, keep.truepos, keep.errormat, keep.margpar. When evaluate is 'TRUE', a new object of class EBMTP is returned. Else, the updated call is returned. The \code{EBMTP} object passed to the update method must have either a  non [...]
+
+Additionally, when calling \code{EBupdate} for any Type I error rate other than FWER, the \code{typeone} argument must be specified (even if the original object did not control FWER). For example, 
+\code{typeone="fdr"}, would always have to be specified, even if the original object also controlled the FDR. In other words, for all function arguments, it is safest to always assume that you 
+are updating from the \code{EBMTP} default function settings, regardless of the original call to the \code{EBMTP} function. Currently, the main advantage of the \code{EBupdate} method is that it prevents the need for repeated estimation of the test statistics null distribution. \cr 
+
+To save on memory, if one knows ahead of time that one will want to compare different choices of bootstrap-based null distribution, then it is both necessary and sufficient to specify 'keep.rawdist=TRUE', as there is no other means of moving between null distributions other than through the non-transformed non-parametric bootstrap distribution.  In this case, 'keep.nulldist=FALSE' may be used.  Specifically, if an object of class \code{EBMTP} contains a non-empty \code{rawdist} slot and  [...]
+
+N.B.: Note that \code{keep.rawdist=TRUE} is only available for the bootstrap-based resampling methods.  The non-null distribution does not exist for the permutation or influence curve multivariate normal null distributions. }
+
+    \item{ebmtp2mtp}{: coersion method for converting objects of class \code{EBMTP} to objects of class \code{MTP}.  Slots common to both objects are taken from the object of class \code{EBMTP} and used to create a new object of class \code{MTP}.  Once an object of class \code{MTP} is created, one may use the method \code{update} to perform resampling-based multiple testing (as would have been done with calls to \code{MTP}) without the need for repeated resampling.}
+}
+}
+
+
+\references{
+H.N. Gilbert, K.S. Pollard, M.J. van der Laan, and S. Dudoit (2009). Resampling-based multiple 
+hypothesis testing with applications to genomics: New developments in R/Bioconductor 
+package multtest. \emph{Journal of Statistical Software} (submitted). Temporary URL: \url{http://www.stat.berkeley.edu/~houston/JSSNullDistEBMTP.pdf}.\cr
+
+Y. Benjamini and Y. Hochberg (2000). On the adaptive control of the false 
+discovery rate in multiple testing with independent statistics. \emph{J. Behav. Educ. Statist}. Vol 25: 60-83.\cr
+
+Y. Benjamini, A. M. Krieger and D. Yekutieli (2006). Adaptive linear step-up
+procedures that control the false discovery rate. \emph{Biometrika}. 
+Vol. 93: 491-507.\cr
+
+M.J. van der Laan, M.D. Birkner, and A.E. Hubbard (2005).  Empirical Bayes and Resampling Based Multiple Testing Procedure Controlling the Tail Probability of the Proportion of False Positives. Statistical Applications in Genetics and Molecular Biology, 4(1).
+\url{http://www.bepress.com/sagmb/vol4/iss1/art29/} \cr
+
+S. Dudoit and M.J. van der Laan.  Multiple Testing Procedures and Applications to Genomics.  Springer Series in Statistics. Springer, New York, 2008. \cr
+
+S. Dudoit, H. N. Gilbert, and M. J. van der Laan (2008). 
+Resampling-based empirical Bayes multiple testing procedures for controlling 
+generalized tail probability and expected value error rates: Focus on the false discovery rate and simulation study. \emph{Biometrical Journal}, 50(5):716-44. \url{http://www.stat.berkeley.edu/~houston/BJMCPSupp/BJMCPSupp.html}. \cr
+
+H.N. Gilbert, M.J. van der Laan, and S. Dudoit. Joint multiple testing procedures for 
+graphical model selection with applications to biological networks. Technical report, 
+U.C. Berkeley Division of Biostatistics Working Paper Series, April 2009. URL \url{http://www.bepress.com/ucbbiostat/paper245}. \cr
+}
+
+\author{Houston N. Gilbert, based on the original \code{MTP} class and method definitions written by Katherine S. Pollard}
+
+\seealso{ \code{\link{EBMTP}}, \code{\link{EBMTP-methods}},
+\code{\link{MTP}}, \code{\link{MTP-methods}},
+\code{\link{[-methods}}, \code{\link{as.list-methods}}, \code{\link{print-methods}}, \code{\link{plot-methods}}, \code{\link{summary-methods}}, \code{\link{mtp2ebmtp}}, 
+\code{\link{ebmtp2mtp}}}
+
+\examples{
+## See EBMTP function: ? EBMTP
+}
+
+\keyword{classes}
diff --git a/man/EBMTP.Rd b/man/EBMTP.Rd
new file mode 100644
index 0000000..dbec4f5
--- /dev/null
+++ b/man/EBMTP.Rd
@@ -0,0 +1,113 @@
+\name{EBMTP}
+\alias{EBMTP}
+
+\title{A function to perform empirical Bayes resampling-based multiple hypothesis testing}
+
+\description{
+A user-level function to perform empirical Bayes multiple testing procedures (EBMTP). A variety of t- and F-tests, including robust versions of most tests, are implemented.  A common-cutoff method is used to control the chosen type I error rate (FWER, gFWER, TPPFP, or FDR).  Bootstrap-based null distributions are available.  Additionally, for t-statistics, one may wish to sample from an appropriate multivariate normal distribution with mean zero and correlation matrix derived from the ve [...]
+}
+
+\usage{
+EBMTP(X, W = NULL, Y = NULL, Z = NULL, Z.incl = NULL, Z.test = NULL, 
+    na.rm = TRUE, test = "t.twosamp.unequalvar", robust = FALSE, 
+    standardize = TRUE, alternative = "two.sided", typeone = "fwer", 
+    method = "common.cutoff", k = 0, q = 0.1, alpha = 0.05, smooth.null = FALSE, 
+    nulldist = "boot.cs", B = 1000, psi0 = 0, marg.null = NULL, 
+    marg.par = NULL, ncp = NULL, perm.mat = NULL, ic.quant.trans = FALSE, 
+    MVN.method = "mvrnorm", penalty = 1e-06, prior = "conservative", 
+    bw = "nrd", kernel = "gaussian", seed = NULL, cluster = 1, 
+    type = NULL, dispatch = NULL, keep.nulldist = TRUE, keep.rawdist = FALSE, 
+    keep.falsepos = FALSE, keep.truepos = FALSE, keep.errormat = FALSE,
+    keep.Hsets=FALSE, keep.margpar = TRUE, keep.index = FALSE, keep.label = FALSE) 
+}
+
+\arguments{
+For brevity, the presentation of arguments below will highlight those which differ significantly from arguments in the other main-level user function \code{MTP}.  See \code{\link{MTP}} for further details.
+
+  \item{typeone}{Character string indicating which type I error rate to control, by default family-wise error rate ('fwer'). Other options include generalized family-wise error rate ('gfwer'), with parameter \code{k} giving the allowed number of false positives, and tail probability of the proportion of false positives ('tppfp'), with parameter \code{q} giving the allowed proportion of false positives. The false discovery rate ('fdr') can also be controlled.  In particular, for 'gfwer',  [...]
+  \item{method}{Character string indicating the EBMTP method.  Currently only 'common.cutoff' is implemented.  This method is most similar to 'ss.maxT' in \code{MTP}.}
+  \item{nulldist}{Character string indicating which resampling method to use for estimating the joint test statistics null distribution, by default the non-parametric bootstrap with centering and scaling ('boot.cs').  The old default 'boot' will still compile and will correspond to 'boot.cs'.  Other null distribution options include 'boot.ctr', 'boot.qt', and 'ic', corresponding to the centered-only bootstrap distribution, quantile-transformed bootstrap distribution, and influence curve  [...]
+  \item{prior}{Character string indicating which choice of prior probability to use for estimating local q-values (i.e., the posterior probabilities of a null hypothesis being true given the value of its corresponding test statistic).  Default is 'conservative', in which case the prior is set to its most conservative value of 1, meaning that all hypotheses are assumed to belong to the set of true null hypotheses.  Other options include 'ABH' for the adaptive Benjamini-Hochberg estimator  [...]
+  \item{bw}{A character string argument to \code{density} indicating the smoothing bandwidth to be used during kernel density estimation.  Default is 'nrd'.}
+  \item{kernel}{A character string argument to \code{density} specifying the smoothing kernel to be used.  Default is 'gaussian'.} 
+  \item{keep.falsepos}{A logical indicating whether or not to store the matrix of guessed false positives at each round of (re)sampling.  The matrix has rows equal to the number of cut-offs (observed test statistics) and columns equal to the \code{B} number of bootstrap samples or samples from the multivariate normal distribution (if \code{nulldist='ic'}).  Default is 'FALSE'.}
+  \item{keep.truepos}{A logical indicating whether or not to store the matrix of guessed true positives at each round of (re)sampling.  The matrix has rows equal to the number of cut-offs (observed test statistics) and columns equal to the \code{B} number of bootstrap samples or samples from the multivariate normal distribution (if \code{nulldist='ic'}).  Default is 'FALSE'.}
+  \item{keep.errormat}{A logical indicating whether or not to store the matrix of type I error rate values at each round of (re)sampling.  The matrix has rows equal to the number of cut-offs (observed test statistics) and columns equal to the \code{B} number of bootstrap samples or samples from the multivariate normal distribution (if \code{nulldist='ic'}).  Default is 'FALSE'.  In the case of FDR-control, for example, this matrix is \code{falsepos}/(\code{falsepos} + \code{truepos}).  T [...]
+   \item{keep.Hsets}{A logical indicating whether or not to return the matrix of indicators which partition the hypotheses into guessed sets of true and false null hypotheses at each round of (re)sampling.  Default is 'FALSE'.}
+   \item{X, W, Y, Z, Z.incl, Z.test, na.rm, test, robust, standardize, alternative, k, q, alpha, smooth.null, B, psi0, marg.null, marg.par, ncp, perm.mat, ic.quant.trans, MVN.method, penalty, seed, cluster, type, dispatch, keep.nulldist, keep.rawdist, keep.margpar, keep.index, keep.label}{These arguments are all similarly used by the \code{MTP} function, and their use has been defined elsewhere.  Please consult the \code{link{MTP}} help file or the references for further details.  Note t [...]
+}
+
+\details{
+The EBMTP begins with a marginal nonparametric mixture model for estimating local q-values.  By definition, q-values are 'the opposite' of traditional p-values.  That is, q-values represent the probability of null hypothesis being true given the value of its corresponding test statistic.  If the test statistics Tn have marginal distribution f = pi*f_0 + (1-pi)f_1, where pi is the prior probability of a true null hypothesis and f_0 and f_1 represent the marginal null and alternative densi [...]
+
+One can estimate both the null density f_0 and full density f by applying kernel density estimation over the matrix of null test statistics and the vector of observed test statistics, respectively.  Practically, this step in \code{EBMTP} also ensures that sidedness is correctly accounted for among the test statistics and their estimated null distribution.  The prior probability pi can be set to its most conservative value of 1 or estimated by some other means, e.g., using the adaptive Be [...]
+
+EBMTPs use function closures to represent type I error rates in terms of their defining features.  Restricting the choice of type I error rate to 'fwer', 'gfwer', 'tppfp', and 'fdr', means that these features include whether to control the number of false positives or the proportion of false positives among the number of rejetions made (i.e., the false discovery proportion), whether we are controlling a tail probability or expected value error rate, and, in the case of tail probability e [...]
+
+As detailed in the references, relaxing the prior may result in a more powerful multiple testing procedure, albeit sometimes at the cost of type I error control.  Additionally, when the proportion of true null hypotheses is close to one, type I error control may also become an issue, even when using the most conservative prior probability of one.  This feature is known to occur with some other procedures which rely on the marginal nonparametric mixture model for estimating (local) q-valu [...]
+
+Situations of moderate-high to high levels of correlation may also affect the results of multiple testing methods which use the same mixture model for generating q-values.  Microarray analysis represents a scenario in which dependence structures are typically weak enough to mitigate this concern.  On the other hand, the analysis of densely sampled SNPs, for example, may present problems.  \cr 
+ 
+}
+
+\value{
+An object of class \code{EBMTP}.  Again, for brevity, the values below represent slots which distinguish objects of class \code{EBMTP} from those of class \code{MTP}. \cr
+
+\item{\code{falsepos}}{A matrix with rows equal to the number of hypotheses and columns the number of samples of null test statistics (\code{B}) indicating the number of guessed false positives when using the corresponding value of the observed test statistic as a cut-off.  Not returned unless \code{keep.falsepos=TRUE}.}
+\item{\code{truepos}}{A matrix with rows equal to the number of hypotheses and columns the number of samples of null test statistics (\code{B}) indicating the number of guessed true positives when using the corresponding value of the observed test statistic as a cut-off.  Not returned unless \code{keep.truepos=TRUE}.}
+\item{\code{errormat}}{The matrix obtained after applying to type I error rate function closure to the matrices in \code{falsepos}, and, if applicable, \code{truepos}.  Not returned unless \code{keep.errormat=TRUE}.}
+\item{\code{EB.h0M}}{The sum of the local q-values obtained after density estimation.  This number serves as an estimate of the proportion of true null hypotheses.  Values close to one indicate situations in which type I error control may not be guaranteed by the EBMTP.  When \code{prior='EBLQV'}, this value is used as the prior 'pi' during evaluation of the local q-value function.} 
+\item{\code{prior}}{The numeric value of the prior 'pi' used when evaluating the local q-value function.}
+\item{\code{prior.type}}{Character string returning the value of \code{prior} in the original call to \code{EBMTP}.  One of 'conservative', 'ABH', or 'EBLQV'.}
+\item{\code{lqv}}{A numeric vector of length the number of hypotheses with the estimated local q-values used for generating guessed sets of true null hypotheses.}
+\item{\code{Hsets}}{A numeric matrix with the same dimension as \code{nulldist}, containing the Bernoulli realizations of the estimated local q-values stored in \code{lqv} which were used to partition the hypotheses into guessed sets of true and false null hypotheses at each round of (re)sampling. Not returned unless \code{keep.Hsets=TRUE}.}
+}
+
+\references{
+
+H.N. Gilbert, K.S. Pollard, M.J. van der Laan, and S. Dudoit (2009). Resampling-based multiple 
+hypothesis testing with applications to genomics: New developments in R/Bioconductor 
+package multtest. \emph{Journal of Statistical Software} (submitted). Temporary URL: \url{http://www.stat.berkeley.edu/~houston/JSSNullDistEBMTP.pdf}.\cr
+
+Y. Benjamini and Y. Hochberg (2000). On the adaptive control of the false 
+discovery rate in multiple testing with independent statistics. \emph{J. Behav.
+Educ. Statist}. Vol 25: 60-83.\cr
+
+Y. Benjamini, A. M. Krieger and D. Yekutieli (2006). Adaptive linear step-up
+procedures that control the false discovery rate. \emph{Biometrika}. 
+Vol. 93: 491-507.\cr
+
+M.J. van der Laan, M.D. Birkner, and A.E. Hubbard (2005).  Empirical Bayes and Resampling Based Multiple Testing Procedure Controlling the Tail Probability of the Proportion of False Positives. Statistical Applications in Genetics and Molecular Biology, 4(1).
+\url{http://www.bepress.com/sagmb/vol4/iss1/art29/} \cr
+
+S. Dudoit and M.J. van der Laan.  Multiple Testing Procedures and Applications to Genomics.  Springer Series in Statistics. Springer, New York, 2008. \cr
+
+S. Dudoit, H.N. Gilbert, and M J. van der Laan (2008). 
+Resampling-based empirical Bayes multiple testing procedures for controlling 
+generalized tail probability and expected value error rates: Focus on the false
+discovery rate and simulation study. \emph{Biometrical Journal}, 50(5):716-44. \url{http://www.stat.berkeley.edu/~houston/BJMCPSupp/BJMCPSupp.html}. \cr
+
+H.N. Gilbert, M.J. van der Laan, and S. Dudoit. Joint multiple testing procedures for 
+graphical model selection with applications to biological networks. Technical report, 
+U.C. Berkeley Division of Biostatistics Working Paper Series, April 2009. URL \url{http://www.bepress.com/ucbbiostat/paper245}. \cr
+
+
+}
+
+\author{Houston N. Gilbert, based on the original \code{MTP} code written by Katherine S. Pollard}
+
+\seealso{\code{\link{MTP}}, \code{\link{EBMTP-class}}, \code{\link{EBMTP-methods}}, \code{\link{Hsets}}}
+
+\examples{
+set.seed(99)
+data<-matrix(rnorm(90),nr=9)
+group<-c(rep(1,5),rep(0,5))
+
+#EB fwer control with centered and scaled bootstrap null distribution 
+#(B=100 for speed)
+eb.m1<-EBMTP(X=data,Y=group,alternative="less",B=100,method="common.cutoff")
+print(eb.m1)
+summary(eb.m1)
+par(mfrow=c(2,2))
+plot(eb.m1,top=9)
+}
diff --git a/man/Hsets.Rd b/man/Hsets.Rd
new file mode 100644
index 0000000..87876ad
--- /dev/null
+++ b/man/Hsets.Rd
@@ -0,0 +1,103 @@
+\name{Hsets}
+\alias{Hsets}
+\alias{G.VS}
+\alias{ABH.h0}
+\alias{VScount}
+\alias{dens.est}
+
+\title{Functions for generating guessed sets of true null hypotheses in empirical Bayes resampling-based multiple hypothesis testing}
+
+\description{These functions are called internally by the main user-level function \code{EBMTP}.  They are used for estimating local q-values, generating guessed sets of true null hypotheses, and applying these results to function closures defining the choice of type I error rate (FWER, gFWER, TPPFP, and FDR).}
+
+\usage{
+Hsets(Tn, nullmat, bw, kernel, prior, B, rawp) 
+
+ABH.h0(rawp) 
+
+G.VS(V, S = NULL, tp = TRUE, bound)
+}
+
+\arguments{
+  \item{Tn}{The vector of observed test statistics.}
+  \item{nullmat}{The matrix of null test statistics obtained either through null transformation of the bootstrap distribution or by sampling from an appropriate multivariate normal distribution (when \code{nulldist='ic'}.)}
+  \item{bw}{A character string argument to \code{density} indicating the smoothing bandwidth to be used during kernel density estimation. Default is 'nrd'.} 
+  \item{kernel}{A character string argument to \code{density} specifying the smoothing kernel to be used.  Default is 'gaussian'.} 
+  \item{prior}{Character string indicating which choice of prior probability to use for estimating local q-values (i.e., the posterior probabilities of a null hypothesis being true given the value of its corresponding test statistic).  Default is 'conservative', in which case the prior is set to its most conservative value of 1, meaning that all hypotheses are assumed to belong to the set of true null hypotheses.  Other options include 'ABH' for the adaptive Benjamini-Hochberg estimator  [...]
+  \item{B}{The number of bootstrap iterations (i.e. how many resampled data sets) or the number of samples from the multivariate normal distribution (if \code{nulldist='ic'}). Can be reduced to increase the speed of computation, at a cost to precision. Default is 1000.}
+  \item{rawp}{A vector of raw (unadjusted) p-values obtained bootstrap-based or influence curve null distribution.}
+  \item{V}{A matrix of the numbers of guessed false positives for each cut-off, i.e., observed value of a test statistic, within each sample in \code{B}.}
+  \item{S}{A matrix of the numbers of guessed true positives for each cut-off, i.e., observed value of a test statistic, within each sample in \code{B}.}
+  \item{tp}{Logical indicator which is TRUE if type I error rate is a tail probability error rate and FALSE is if it is an expected value error rate.}
+  \item{bound}{If a tail probability error rate, the bound to be placed on function of guessed false positives and guessed true positives.  For, 'fwer', equal to 0; 'gfwer', equal to 'k'; and tppfp, equal to 'q'.} 
+}
+
+\details{
+The most important object to be returned from the function \code{Hsets} is a matrix of indicators, i.e., Bernoulli realizations of the estimated local q-values, taking the value of 1 if the hypothesis is guessed as belonging to the set of true null hypotheses and 0 otherwise (guessed true alternative).  Realizations of these probabilities are generated with a call to \code{rbinom}, meaning that this function will set the RNG seed forward another \code{B}*(the number of hypotheses) places [...]
+}
+
+\value{
+For the function \code{Hsets}, a list with the following elements:
+  \item{Hsets.mat}{A matrix of numeric indicators with rows equal to the number of test (hypotheses, typically \code{nrow(X)}) and columns the number of samples of null test statistics, \code{B}.  Values of one indicate hypotheses guessed as belonging to the set of true null hypotheses based on the value of their corresponding test statistic.  Values of zero correspond to hypotheses guesses as belonging to the set of true alternative hypotheses.}
+  \item{EB.h0M}{The estimated proportion of true null hypotheses as determined by nonparametric density estimation.  This value is the sum of the estimated local q-values divided by the total number of tests (hypotheses).} 
+  \item{prior}{The value of the prior applied to the local q-value function.  If 'conservative', the prior is set to one.  Otherwise, the prior is the value obtained from the estimator of the adaptive Benjamini-Hochberg procedure (if \code{prior} is 'ABH') or from density estimation (if \code{prior} is 'EBLQV').}
+  \item{pn.out}{The vector of estimated local q-values.  This vector is returned in the \code{lqv} slot of objects of class \code{EBMTP}.}
+
+For the function \code{ABH.h0}, the estimated number of true null hypotheses using the estimator from the linear step-up adaptive Benjamini-Hochberg procedure. \cr
+
+For the function \code{G.VS}, a closure which accepts as arguments the matrices of guessed false positive and true positives (if applicable) and applies the appropriate function defining the desired type I error rate.
+}
+
+\references{
+H.N. Gilbert, K.S. Pollard, M.J. van der Laan, and S. Dudoit (2009). Resampling-based multiple 
+hypothesis testing with applications to genomics: New developments in R/Bioconductor 
+package multtest. \emph{Journal of Statistical Software} (submitted). Temporary URL: \url{http://www.stat.berkeley.edu/~houston/JSSNullDistEBMTP.pdf}.\cr
+
+Y. Benjamini and Y. Hochberg (2000). On the adaptive control of the false 
+discovery rate in multiple testing with independent statistics. \emph{J. Behav.
+Educ. Statist}. Vol 25: 60-83.\cr
+
+Y. Benjamini, A.M. Krieger and D. Yekutieli (2006). Adaptive linear step-up
+procedures that control the false discovery rate. \emph{Biometrika}. 
+Vol. 93: 491-507.\cr
+
+M.J. van der Laan, M.D. Birkner, and A.E. Hubbard (2005).  Empirical Bayes and Resampling Based Multiple Testing Procedure Controlling the Tail Probability of the Proportion of False Positives. Statistical Applications in Genetics and Molecular Biology, 4(1).
+\url{http://www.bepress.com/sagmb/vol4/iss1/art29/} \cr
+
+S. Dudoit and M.J. van der Laan.  Multiple Testing Procedures and Applications to Genomics.  Springer Series in Statistics. Springer, New York, 2008. \cr
+
+S. Dudoit, H.N. Gilbert, and M.J. van der Laan (2008). 
+Resampling-based empirical Bayes multiple testing procedures for controlling 
+generalized tail probability and expected value error rates: Focus on the false
+discovery rate and simulation study. \emph{Biometrical Journal}, 50(5):716-44. \url{http://www.stat.berkeley.edu/~houston/BJMCPSupp/BJMCPSupp.html}. \cr
+
+H.N. Gilbert, M.J. van der Laan, and S. Dudoit. Joint multiple testing procedures for 
+graphical model selection with applications to biological networks. Technical report, 
+U.C. Berkeley Division of Biostatistics Working Paper Series, April 2009. URL \url{http://www.bepress.com/ucbbiostat/paper245}. \cr
+}
+
+\author{Houston N. Gilbert}
+
+\seealso{\code{\link{EBMTP}}, \code{\link{EBMTP-class}}, \code{\link{EBMTP-methods}}}
+
+\examples{
+set.seed(99)
+data<-matrix(rnorm(90),nr=9)
+group<-c(rep(1,5),rep(0,5))
+
+#EB fwer control with centered and scaled bootstrap null distribution 
+#(B=100 for speed)
+eb.m1<-EBMTP(X=data,Y=group,alternative="less",B=100,method="common.cutoff")
+print(eb.m1)
+summary(eb.m1)
+par(mfrow=c(2,2))
+plot(eb.m1,top=9)
+
+abh <- ABH.h0(eb.m1 at rawp)
+abh
+
+eb.m2 <- EBupdate(eb.m1,prior="ABH")
+eb.m2 at prior
+
+}
+
+
diff --git a/man/MTP-class.Rd b/man/MTP-class.Rd
new file mode 100755
index 0000000..b10ef5b
--- /dev/null
+++ b/man/MTP-class.Rd
@@ -0,0 +1,133 @@
+\name{MTP-class}
+\docType{class}
+\alias{MTP-class}
+
+\title{Class "MTP", classes and methods for multiple testing procedure output}
+
+\description{An object of class MTP is the output of a particular multiple testing procedure, for example, generated by the MTP function. It has slots for the various data used to make multiple testing decisions, such as adjusted p-values and confidence regions.}
+
+\section{Objects from the Class}{
+Objects can be created by calls of the form \cr
+new('MTP', \cr
+	statistic = ...., object of class numeric\cr
+	estimate  = ...., object of class numeric\cr
+	sampsize  = ...., object of class numeric\cr
+	rawp      = ...., object of class numeric\cr
+	adjp      = ...., object of class numeric\cr
+	conf.reg  = ...., object of class array\cr
+        cutoff    = ...., object of class matrix\cr
+ 	reject	  = ...., object of class matrix\cr
+	rawdist	  = ...., object of class matrix\cr
+        nulldist  = ...., object of class matrix\cr
+	nulldist.type = ...., object of class character\cr
+	marg.null = ...., object of class character\cr
+	marg.par  = ...., object of class matrix\cr
+	label	  = ...., object of class numeric\cr
+	index	  = ...., object of class matrix\cr
+        call      = ...., object of class call\cr
+        seed      = ...., object of class integer\cr
+       )
+}
+
+\section{Slots}{
+\describe{
+  \item{\code{statistic}}{Object of class \code{numeric}, observed test statistics for each hypothesis, specified by the values of the \code{MTP} arguments \code{test}, \code{robust}, \code{standardize}, and \code{psi0}.}
+    \item{\code{estimate}}{For the test of single-parameter null hypotheses using t-statistics (i.e., not the F-tests), the numeric vector of estimated parameters corresponding to each hypothesis, e.g. means, differences in means, regression parameters.}
+    \item{\code{sampsize}}{Object of class \code{numeric}, number of columns (i.e. observations) in the input data set.}
+    \item{\code{rawp}}{Object of class \code{numeric}, unadjusted, marginal p-values for each hypothesis.}
+    \item{\code{adjp}}{Object of class \code{numeric}, adjusted (for multiple testing) p-values for each hypothesis (computed only if the \code{get.adjp} argument is TRUE).}
+    \item{\code{conf.reg}}{For the test of single-parameter null hypotheses using t-statistics (i.e., not the F-tests), the numeric array of lower and upper simultaneous confidence limits for the parameter vector, for each value of the nominal Type I error rate \code{alpha} (computed only if the \code{get.cr} argument is TRUE).}
+    \item{\code{cutoff}}{The numeric matrix of cut-offs for the vector of test statistics for each value of the nominal Type I error rate \code{alpha} (computed only if the \code{get.cutoff} argument is TRUE).}
+   \item{\code{reject}}{Object of class \code{'matrix'}, rejection indicators (TRUE for a rejected null hypothesis), for each value of the nominal Type I error rate \code{alpha}.}
+    \item{\code{rawdist}}{The numeric matrix for the estimated nonparametric non-null test statistics distribution (returned only if \code{keep.rawdist=TRUE} and if \code{nulldist} is one of 'boot.ctr', 'boot.cs', or 'boot.qt'). This slot must not be empty if one wishes to call \code{update} to change choice of bootstrap-based null distribution.}
+    \item{\code{nulldist}}{The numeric matrix for the estimated test statistics null distribution (returned only if \code{keep.nulldist=TRUE}); option not currently available for permutation null distribution, i.e.,  \code{nulldist='perm'}). By default (i.e., for \code{nulldist='boot.cs'}), the entries of \code{nulldist} are the null value shifted and scaled bootstrap test statistics, with one null test statistic value for each hypothesis (rows) and bootstrap iteration (columns).}
+    \item{\code{nulldist.type}}{Character value describing which choice of null distribution was used to generate the MTP results.  Takes on one of the values of the original \code{nulldist} argument in the call to MTP, i.e., 'boot.cs', 'boot.ctr', 'boot.qt', 'ic', or 'perm'.}
+   \item{\code{marg.null}}{If \code{nulldist='boot.qt'}, a character value returning which choice of marginal null distribution was used by the MTP.  Can be used to check default values or to ensure manual settings were correctly applied.}
+   \item{\code{marg.par}}{If \code{nulldist='boot.qt'}, a numeric matrix returning the parameters of the marginal null distribution(s) used by the MTP.  Can be used to check default values or to ensure manual settings were correctly applied.}
+   \item{\code{label}}{If \code{keep.label=TRUE}, a vector storing the values used in the argument \code{Y}.  Storing this object is particularly important when one wishes to update MTP objects with F-statistics using default \code{marg.null} and \code{marg.par} settings when \code{nulldist='boot.qt'}. }
+   \item{\code{index}}{For tests of correlation parameters a matrix corresponding to \code{t(combn(p,2))}, where \code{p} is the number of variables in \code{X}.  This matrix gives the indices of the variables considered in each pairwise correlation.  For all other tests, this slot is empty, as the indices are in the same order as the rows of \code{X}.}
+   \item{\code{call}}{Object of class \code{call}, the call to the MTP function.}
+    \item{\code{seed}}{An integer or vector for specifying the state of the random number generator used to create the resampled datasets. The seed can be reused for reproducibility in a repeat call to \code{MTP}. This argument is currently used only for the bootstrap null distribution (i.e., for \code{nulldist="boot.xx"}). See \code{?set.seed} for details.}
+	}
+}
+
+\section{Methods}{
+ \code{signature(x = "MTP")} \cr
+ \describe{
+    \item{[}{: Subsetting method for \code{MTP} class, which operates selectively on each slot of an \code{MTP} instance to retain only the data related to the specified hypotheses.}
+
+    \item{as.list}{: Converts an object of class \code{MTP} to an object of class \code{list}, with an entry for each slot.}
+ 
+   \item{plot}{: plot methods for \code{MTP} class, produces the following graphical summaries of the results of a MTP. The type of display may be specified via the \code{which} argument. \cr
+
+1. Scatterplot of number of rejected hypotheses vs. nominal Type I error rate. \cr
+
+2. Plot of ordered adjusted p-values; can be viewed as a plot of Type I error rate vs. number of rejected hypotheses. \cr
+
+3. Scatterplot of adjusted p-values vs. test statistics (also known as "volcano plot"). \cr
+
+4. Plot of unordered adjusted p-values. \cr
+
+5. Plot of confidence regions for user-specified parameters, by default the 10 parameters corresponding to the smallest adjusted p-values  (argument \code{top}). \cr
+
+6. Plot of test statistics and corresponding cut-offs (for each value of \code{alpha}) for user-specified hypotheses, by default the 10 hypotheses corresponding to the smallest adjusted p-values (argument \code{top}). \cr
+
+The argument \code{logscale} (by default equal to FALSE) allows one to use the negative decimal logarithms of the adjusted p-values in the second, third, and fourth graphical displays. The arguments \code{caption} and \code{sub.caption} allow one to change the titles and subtitles for each of the plots (default subtitle is the MTP function call). Note that some of these plots are implemented in the older function \code{mt.plot}.}
+
+    \item{print}{: print method for \code{MTP} class, returns a description of an object of class \code{MTP}, including sample size, number of tested hypotheses, type of test performed (value of argument \code{test}), Type I error rate (value of argument \code{typeone}), nominal level of the test  (value of argument \code{alpha}), name of the MTP  (value of argument \code{method}), call to the function \code{MTP}.
+
+In addition, this method produces a table with the class, mode, length, and dimension of each slot of the \code{MTP} instance. 
+}
+
+    \item{summary}{: summary method for \code{MTP} class, provides numerical summaries of the results of a MTP and returns a list with the following three components. \cr
+
+1. rejections: A data.frame with the number(s) of rejected hypotheses for the nominal Type I error rate(s) specified by the \code{alpha} argument of the function \code{MTP}. (NULL values are returned if all three arguments \code{get.cr}, \code{get.cutoff}, and \code{get.adjp} are FALSE). \cr
+
+2. index: A numeric vector of indices for ordering the hypotheses according to first \code{adjp}, then \code{rawp}, and finally the absolute value of \code{statistic} (not printed in the summary). \cr 
+
+3. summaries: When applicable (i.e., when the corresponding quantities are returned by \code{MTP}), a table with six number summaries of the distributions of the adjusted p-values, unadjusted p-values, test statistics, and parameter estimates.}
+
+    \item{update}{: update method for \code{MTP} class, provides a mechanism to re-run the MTP with different choices of the following arguments - nulldist, alternative, typeone, k, q, fdr.method, alpha, smooth.null, method, get.cr, get.cutoff, get.adjp, keep.nulldist, keep.rawdist, keep.margpar. When evaluate is 'TRUE', a new object of class MTP is returned. Else, the updated call is returned. The \code{MTP} object passed to the update method must have either a  non-empty \code{rawdist} [...]
+
+To save on memory, if one knows ahead of time that one will want to compare different choices of bootstrap-based null distribution, then it is both necessary and sufficient to specify 'keep.rawdist=TRUE', as there is no other means of moving between null distributions other than through the non-transformed non-parametric bootstrap distribution.  In this case, 'keep.nulldist=FALSE' may be used.  Specifically, if an object of class \code{MTP} contains a non-empty \code{rawdist} slot and an [...]
+
+N.B.: Note that \code{keep.rawdist=TRUE} is only available for the bootstrap-based resampling methods.  The non-null distribution does not exist for the permutation or influence curve multivariate normal null distributions. }
+
+    \item{mtp2ebmtp}{: coersion method for converting objects of class \code{MTP} to objects of class \code{EBMTP}.  Slots common to both objects are taken from the object of class \code{MTP} and used to create a new object of class \code{EBMTP}.  Once an object of class \code{EBMTP} is created, one may use the method \code{EBupdate} to perform resampling-based empirical Bayes multiple testing without the need for repeated resampling.}
+}
+}
+
+
+\references{
+M.J. van der Laan, S. Dudoit, K.S. Pollard (2004), Augmentation Procedures for Control of the Generalized Family-Wise Error Rate and Tail Probabilities for the Proportion of False Positives, Statistical Applications in Genetics and Molecular Biology, 3(1). 
+\url{http://www.bepress.com/sagmb/vol3/iss1/art15/}
+
+M.J. van der Laan, S. Dudoit, K.S. Pollard (2004), Multiple Testing. Part II. Step-Down Procedures for Control of the Family-Wise Error Rate, Statistical Applications in Genetics and Molecular Biology, 3(1).
+\url{http://www.bepress.com/sagmb/vol3/iss1/art14/}
+
+S. Dudoit, M.J. van der Laan, K.S. Pollard (2004), Multiple Testing. Part I. Single-Step Procedures for Control of General Type I Error Rates, Statistical Applications in Genetics and Molecular Biology, 3(1).
+\url{http://www.bepress.com/sagmb/vol3/iss1/art13/}
+
+Katherine S. Pollard and Mark J. van der Laan, "Resampling-based Multiple Testing: Asymptotic Control of Type I Error and Applications to Gene Expression Data" (June 24, 2003). U.C. Berkeley Division of Biostatistics Working Paper Series. Working Paper 121.
+\url{http://www.bepress.com/ucbbiostat/paper121}
+
+M.J. van der Laan and A.E. Hubbard (2006), Quantile-function Based Null Distributions in Resampling Based Multiple Testing, Statistical Applications in Genetics and Molecular Biology, 5(1).
+\url{http://www.bepress.com/sagmb/vol5/iss1/art14/}
+
+S. Dudoit and M.J. van der Laan.  Multiple Testing Procedures and Applications to Genomics.  Springer Series in Statistics. Springer, New York, 2008.
+}
+
+\author{Katherine S. Pollard  and Houston N. Gilbert with design contributions from Sandrine Dudoit and Mark J. van der Laan.}
+
+\seealso{ \code{\link{MTP}}, \code{\link{MTP-methods}},
+\code{\link{EBMTP}}, \code{\link{EBMTP-methods}},
+\code{\link{[-methods}}, \code{\link{as.list-methods}}, \code{\link{print-methods}}, \code{\link{plot-methods}}, \code{\link{summary-methods}}, \code{\link{mtp2ebmtp}}, 
+\code{\link{ebmtp2mtp}}}
+
+\examples{
+## See MTP function: ? MTP
+}
+
+\keyword{classes}
+
+
diff --git a/man/MTP-methods.Rd b/man/MTP-methods.Rd
new file mode 100755
index 0000000..7f78b0f
--- /dev/null
+++ b/man/MTP-methods.Rd
@@ -0,0 +1,114 @@
+\name{MTP-methods}
+\docType{methods}
+\alias{MTP-methods}
+\alias{EBMTP-methods}
+
+\alias{[-methods}
+\alias{[,MTP-method}
+\alias{[,EBMTP-method}
+
+\alias{as.list-methods}
+\alias{as.list,MTP-method}
+\alias{as.list,EBMTP-method}
+\alias{as.list}
+
+\alias{plot-methods}
+\alias{plot,MTP,ANY-method}
+\alias{plot,EBMTP,ANY-method}
+\alias{plot}
+
+\alias{print-methods}
+\alias{print,MTP-method}
+\alias{print,EBMTP-method}
+\alias{print.MTP}
+
+\alias{summary-methods}
+\alias{summary,MTP-method} 
+\alias{summary,EBMTP-method} 
+\alias{summary}
+
+\alias{update-methods}
+\alias{update,MTP-method}
+\alias{update}
+
+\alias{EBupdate-methods}
+\alias{EBupdate,EBMTP-method}
+\alias{EBupdate}
+
+\alias{mtp2ebmtp-methods}
+\alias{mtp2ebmtp,MTP-method}
+\alias{mtp2ebmtp}
+
+\alias{ebmtp2mtp-methods}
+\alias{ebmtp2mtp,EBMTP-method}
+\alias{ebmtp2mtp}
+
+
+\title{Methods for MTP and EBMTP objects in Package `multtest'}
+
+\description{Summary, printing, plotting, subsetting, updating, as.list and class conversion methods were defined for the \code{MTP} and \code{EBMTP} classes. These methods provide visual and numeric summaries of the results of a multiple testing procedure (MTP) and allow one to perform some basic manipulations of  objects class \code{MTP} or \code{EBMTP}.  \cr
+
+Several of the methods with the same name will work on objects of their respective class.  One exception to this rule is the difference between \code{update} and \code{EBupdate} (described below).  Because of the differences in the testing procedures, separately named methods were chosen to clearly delineate which method was being applied to which type of object.}
+
+\section{Methods}{
+\describe{
+
+   \item{[}{: Subsetting method for \code{MTP} and \code{EBMTP} classes, which operates selectively on each slot of an \code{MTP} or \code{EBMTP} instance to retain only the data related to the specified hypotheses.}
+
+    \item{as.list}{: Converts an object of class \code{MTP} or \code{EBMTP} to an object of class \code{list}, with an entry for each slot.}
+ 
+   \item{plot}{: plot methods for \code{MTP} and \code{EBMTP} classes, produces the following graphical summaries of the results of a MTP. The type of display may be specified via the \code{which} argument. \cr
+
+1. Scatterplot of number of rejected hypotheses vs. nominal Type I error rate. \cr
+
+2. Plot of ordered adjusted p-values; can be viewed as a plot of Type I error rate vs. number of rejected hypotheses. \cr
+
+3. Scatterplot of adjusted p-values vs. test statistics (also known as volcano plot). \cr
+
+4. Plot of unordered adjusted p-values. \cr
+
+Only for objects of class \code{MTP}: \cr
+
+5. Plot of confidence regions for user-specified parameters, by default the 10 parameters corresponding to the smallest adjusted p-values  (argument \code{top}). \cr
+
+6. Plot of test statistics and corresponding cut-offs (for each value of \code{alpha}) for user-specified hypotheses, by default the 10 hypotheses corresponding to the smallest adjusted p-values (argument \code{top}). \cr
+
+Plots (5) and (6) are not available for objects of class \code{EBMTP} because the function \code{EBMTP} returns only adjusted p-values and not confidence regions of cut-offs.  The argument \code{logscale} (by default equal to FALSE) allows one to use the negative decimal logarithms of the adjusted p-values in the second, third, and fourth graphical displays. The arguments \code{caption} and \code{sub.caption} allow one to change the titles and subtitles for each of the plots (default sub [...]
+
+    \item{print}{: print method for \code{MTP} and \code{EBMTP} classes, returns a description of an object of either class, including sample size, number of tested hypotheses, type of test performed (value of argument \code{test}), Type I error rate (value of argument \code{typeone}), nominal level of the test (value of argument \code{alpha}), name of the MTP (value of argument \code{method}), call to the function \code{MTP} or \code{EBMTP}. 
+
+In addition, this method produces a table with the class, mode, length, and dimension of each slot of the \code{MTP} or \code{EBMTP} instance. 
+}
+
+    \item{summary}{: summary method for \code{MTP} and \code{EBMTP} classes, provides numerical summaries of the results of a MTP and returns a list with the following three components. \cr
+
+1. rejections: A data.frame with the number(s) of rejected hypotheses for the nominal Type I error rate(s) specified by the \code{alpha} argument of the function \code{MTP} or \code{EBMTP}. (For objects of class \code{MTP}, NULL values are returned if all three arguments \code{get.cr}, \code{get.cutoff}, and \code{get.adjp} are FALSE). \cr
+
+2. index: A numeric vector of indices for ordering the hypotheses according to first \code{adjp}, then \code{rawp}, and finally the absolute value of \code{statistic} (not printed in the summary). \cr 
+
+3. summaries: When applicable (i.e., when the corresponding quantities are returned by \code{MTP} or \code{EBMTP}), a table with six number summaries of the distributions of the adjusted p-values, unadjusted p-values, test statistics, and parameter estimates.}
+    \item{update}{: update methods for \code{MTP} class, respectively, provides a mechanism to re-run the MTP with different choices of the following arguments - nulldist, alternative, typeone, k, q, fdr.method, alpha, smooth.null, method, get.cr, get.cutoff, get.adjp, keep.nulldist, keep.rawdist, keep.margpar. When evaluate is 'TRUE', a new object of class MTP is returned. Else, the updated call is returned. The \code{MTP} object passed to the update method must have either a  non-empty [...]
+
+    \item{EBupdate}{: update method for \code{EBMTP} class, provides a mechanism to re-run the MTP with different choices of the following arguments - nulldist, alternative, typeone, k, q, alpha, smooth.null, bw, kernel, prior, keep.nulldist, keep.rawdist, keep.falsepos, keep.truepos, keep.errormat, keep.margpar. When evaluate is 'TRUE', a new object of class EBMTP is returned. Else, the updated call is returned. The \code{EBMTP} object passed to the update method must have either a  non [...]
+
+Additionally, when calling \code{EBupdate} for any Type I error rate other than FWER, the \code{typeone} argument must be specified (even if the original object did not control FWER). For example, 
+\code{typeone="fdr"}, would always have to be specified, even if the original object also controlled the FDR. In other words, for all function arguments, it is safest to always assume that you 
+are updating from the \code{EBMTP} default function settings, regardless of the original call to the \code{EBMTP} function. Currently, the main advantage of the \code{EBupdate} method is that it prevents the need for repeated estimation of the test statistics null distribution. \cr 
+
+To save on memory, if one knows ahead of time that one will want to compare different choices of bootstrap-based null distribution, then it is both necessary and sufficient to specify 'keep.rawdist=TRUE', as there is no other means of moving between null distributions other than through the non-transformed non-parametric bootstrap distribution.  In this case, 'keep.nulldist=FALSE' may be used.  Specifically, if an object of class \code{MTP} or \code{EBMTP} contains a non-empty \code{rawd [...]
+
+N.B.: Note that \code{keep.rawdist=TRUE} is only available for the bootstrap-based resampling methods.  The non-null distribution does not exist for the permutation or influence curve multivariate normal null distributions.
+}
+    \item{mtp2ebmtp}{: coersion method for converting objects of class \code{MTP} to objects of class \code{EBMTP}.  Slots common to both objects are taken from the object of class \code{MTP} and used to create a new object of class \code{EBMTP}.  Once an object of class \code{EBMTP} is created, one may use the method \code{EBupdate} to perform resampling-based empirical Bayes multiple testing without the need for repeated resampling.}
+    \item{ebmtp2mtp}{: coersion method for converting objects of class \code{EBMTP} to objects of class \code{MTP}.  Slots common to both objects are taken from the object of class \code{EBMTP} and used to create a new object of class \code{MTP}.  Once an object of class \code{MTP} is created, one may use the method \code{update} to perform resampling-based multiple testing (as would have been done with calls to \code{MTP}) without the need for repeated resampling.}
+}
+}
+
+
+\author{Katherine S. Pollard and Houston N. Gilbert with design contributions from Sandrine Dudoit and Mark J. van der Laan.}
+
+\keyword{methods}
+
+
+
+
diff --git a/man/MTP.Rd b/man/MTP.Rd
new file mode 100755
index 0000000..3ecce93
--- /dev/null
+++ b/man/MTP.Rd
@@ -0,0 +1,219 @@
+\name{MTP}
+\alias{MTP}
+
+\title{A function to perform resampling-based multiple hypothesis testing}
+
+\description{
+A user-level function to perform multiple testing procedures (MTP). A variety of t- and F-tests, including robust versions of most tests, are implemented. Single-step and step-down minP and maxT methods are used to control the chosen type I error rate (FWER, gFWER, TPPFP, or FDR). Bootstrap and permutation null distributions are available.  Additionally, for t-statistics, one may wish to sample from an appropriate multivariate normal distribution with mean zero and correlation matrix der [...]
+}
+
+\usage{
+MTP(X, W = NULL, Y = NULL, Z = NULL, Z.incl = NULL, Z.test = NULL, 
+    na.rm = TRUE, test = "t.twosamp.unequalvar", robust = FALSE, 
+    standardize = TRUE, alternative = "two.sided", psi0 = 0, 
+    typeone = "fwer", k = 0, q = 0.1, fdr.method = "conservative", 
+    alpha = 0.05, smooth.null = FALSE, nulldist = "boot.cs", 
+    B = 1000, ic.quant.trans = FALSE, MVN.method = "mvrnorm", 
+    penalty = 1e-06, method = "ss.maxT", get.cr = FALSE, get.cutoff = FALSE, 
+    get.adjp = TRUE, keep.nulldist = TRUE, keep.rawdist = FALSE, 
+    seed = NULL, cluster = 1, type = NULL, dispatch = NULL, marg.null = NULL, 
+    marg.par = NULL, keep.margpar = TRUE, ncp = NULL, perm.mat = NULL, 
+    keep.index = FALSE, keep.label = FALSE) 
+}
+
+\arguments{
+  \item{X}{A matrix, data.frame or ExpressionSet containing the raw data. In the case of an ExpressionSet, \code{exprs(X)} is the data of interest and \code{pData(X)} may contain outcomes and covariates of interest. For most currently implemented tests (exception: tests involving correlation parameters), one hypothesis is tested for each row of the data.}
+  \item{W}{A vector or matrix containing non-negative weights to be used in computing the test statistics. If a matrix, \code{W} must be the same dimension as \code{X} with one weight for each value in \code{X}. If a vector, \code{W} may contain one weight for each observation (i.e. column) of \code{X} or one weight for each variable (i.e. row) of \code{X}. In either case, the weights are duplicated appropriately. Weighted F-tests are not available. Default is 'NULL'.}
+  \item{Y}{A vector, factor, or \code{Surv} object containing the outcome of interest. This may be class labels (F-tests and two sample t-tests) or a continuous or polycotomous dependent variable (linear regression based t-tests), or survival data (Cox proportional hazards based t-tests). For \code{block.f} and \code{f.twoway} tests, class labels must be ordered by block and within each block ordered by group. If \code{X} is an ExpressionSet, \code{Y} can be a character string referring  [...]
+  \item{Z}{A vector, factor, or matrix containing covariate data to be used in the regression (linear and Cox) models. Each variable should be in one column, so that \code{nrow(Z)=ncol(X)}. If \code{X} is an ExpressionSet, \code{Z} can be a character string referring to the column of \code{pData(X)} to use as covariates. The variables \code{Z.incl} and \code{Z.adj} allow one to specify which covariates to use in a particular test without modifying the input \code{Z}. Default is 'NULL'.}
+  \item{Z.incl}{The indices of the columns of \code{Z} (i.e. which variables) to include in the model. These can be numbers or column names (if the columns are names). Default is 'NULL'.}
+  \item{Z.test}{The index or names of the column of \code{Z} (i.e. which variable) to use to test for association with each row of \code{X} in a linear model. Only used for \code{test="lm.XvsZ"}, where it is necessary to specify which covariate's regression parameter is of interest. Default is 'NULL'.}
+  \item{na.rm}{Logical indicating whether to remove observations with an NA. Default is 'TRUE'.}
+  \item{test}{Character string specifying the test statistics to use, by default 't.twosamp.unequalvar'. See details (below) for a list of tests.}
+  \item{robust}{Logical indicating whether to use the robust version of the chosen test, e.g. Wilcoxon singed rank test for robust one-sample t-test or \code{rlm} instead of \code{lm} in linear models. Default is 'FALSE'.}
+  \item{standardize}{Logical indicating whether to use the standardized version of the test statistics (usual t-statistics are standardized). Default is 'TRUE'.}
+  \item{alternative}{Character string indicating the alternative hypotheses, by default 'two.sided'. For one-sided tests, use 'less' or 'greater' for null hypotheses of 'greater than or equal' (i.e. alternative is 'less') and 'less than or equal', respectively.}
+  \item{psi0}{The hypothesized null value, typically zero (default). Currently, this should be a single value, which is used for all hypotheses.}
+  \item{typeone}{Character string indicating which type I error rate to control, by default family-wise error rate ('fwer'). Other options include generalized family-wise error rate ('gfwer'), with parameter \code{k} giving the allowed number of false positives, and tail probability of the proportion of false positives ('tppfp'), with parameter \code{q} giving the allowed proportion of false positives. The false discovery rate ('fdr') can also be controlled.}
+  \item{k}{The allowed number of false positives for gFWER control. Default is 0 (FWER).}
+  \item{q}{The allowed proportion of false positives for TPPFP control. Default is 0.1.}
+  \item{fdr.method}{Character string indicating which FDR controlling method should be used when \code{typeone="fdr"}. The options are "conservative" (default) for the more conservative, general FDR controlling procedure and "restricted" for the method which requires more assumptions.}
+  \item{alpha}{The target nominal type I error rate, which may be a vector of error rates. Default is 0.05.}
+  \item{smooth.null}{Indicator of whether to use a kernel density estimate for the tail of the null distributon for computing raw pvalues close to zero. Only used if 'rawp' would be zero without smoothing. Default is 'FALSE'.}
+  \item{nulldist}{Character string indicating which resampling method to use for estimating the joint test statistics null distribution, by default the non-parametric bootstrap with centering and scaling ('boot.cs').  The old default 'boot' will still compile and will correspond to 'boot.cs'.  Other null distribution options include 'perm', 'boot.ctr', 'boot.qt', and 'ic', corresponding to the permutation distribution, centered-only bootstrap distribution, quantile-transformed bootstrap  [...]
+  \item{B}{The number of bootstrap iterations (i.e. how many resampled data sets), the number of permutations (if \code{nulldist} is 'perm'), or the number of samples from the multivariate normal distribution (if \code{nulldist} is 'ic') Can be reduced to increase the speed of computation, at a cost to precision. Default is 1000.}
+  \item{ic.quant.trans}{If \code{nulldist='ic'}, a logical indicating whether or not a marginal quantile transformation using a t-distribution or user-supplied marginal distribution (stored in \code{perm.mat}) should be applied to the multivariate normal null distribution.  Defaults for \code{marg.null} and \code{marg.par} exist, but can also be specified by the user (see below). Default is 'FALSE'.}
+  \item{MVN.method}{If \code{nulldist='ic'}, one of 'mvrnorm' or 'Cholesky' designating how correlated normal test statistics are to be generated.  Selecting 'mvrnorm' uses the function of the same name found in the \code{MASS} library, whereas 'Cholesky' relies on a Cholesky decomposition. Default is 'mvrnorm'.}
+  \item{penalty}{If \code{nulldist='ic'} and \code{MVN.method='Cholesky'}, the value in \code{penalty} is added to all diagonal elements of the estimated test statistics correlation matrix to ensure that the matrix is positive definite and that internal calls to \code{'chol'} do not return an error.  Default is 1e-6.}
+  \item{method}{The multiple testing procedure to use. Options are single-step maxT ('ss.maxT', default), single-step minP ('ss.minP'), step-down maxT ('sd.maxT'), and step-down minP ('sd.minP').}
+  \item{get.cr}{Logical indicating whether to compute confidence intervals for the estimates. Not available for F-tests. Default is 'FALSE'.}
+  \item{get.cutoff}{Logical indicating whether to compute thresholds for the test statistics. Default is 'FALSE'.}
+  \item{get.adjp}{Logical indicating whether to compute adjusted p-values. Default is 'TRUE'.}
+  \item{keep.nulldist}{Logical indicating whether to return the computed bootstrap or influence curve null distribution, by default 'TRUE'.  Not available for \code{nulldist}='perm'. Note that this matrix can be quite large.}
+  \item{keep.rawdist}{Logical indicating whether to return the computed non-null (raw) bootstrap distribution, by default 'FALSE'.  Not available when using \code{nulldist}='perm' or 'ic'.  Note that this matrix can become quite large.  If one wishes to use subsequent calls to \code{update} or \code{EBupdate} in which one updates choice of bootstrap null distribution, \code{keep.rawdist} must be TRUE.  To save on memory, \code{update} only requires that one of \code{keep.nulldist} or \co [...]
+  \item{seed}{Integer or vector of integers to be used as argument to \code{set.seed} to set the seed for the random number generator for bootstrap resampling. This argument can be used to repeat exactly a test performed with a given seed. If the seed is specified via this argument, the same seed will be returned in the seed slot of the MTP object created. Else a random seed(s) will be generated, used and returned. Vector of integers used to specify seeds for each node in a cluster used  [...]
+  \item{cluster}{Integer for number of nodes to create or a cluster object created through the package snow. With \code{cluster=1}, bootstrap is implemented on single node. Supplying a cluster object results in the bootstrap being implemented in parallel on the provided nodes. This option is only available for the bootstrap procedure. With default value of 1, bootstrap is executed on single CPU.}
+  \item{type}{Interface system to use for computer cluster. See \code{snow} package for details.}
+  \item{dispatch}{The number or percentage of bootstrap iterations to dispatch at a time to each node of the cluster if a computer cluster is used. If dispatch is a percentage, \code{B*dispatch} must be an integer. If dispatch is an integer, then \code{B/dispatch} must be an integer. Default is 5 percent.}
+  \item{marg.null}{If \code{nulldist='boot.qt'}, the marginal null distribution to use for quantile transformation.  Can be one of 'normal', 't', 'f' or 'perm'.  Default is 'NULL', in which case the marginal null distribution is selected based on choice of test statistics.  Defaults explained below. If 'perm', the user must supply a vector or matrix of test statistics corresponding to another marginal null distribution, perhaps one created externally by the user, and possibly referring t [...]
+  \item{marg.par}{If \code{nulldist='boot.qt'}, the parameters defining the marginal null distribution in \code{marg.null} to be used for quantile transformation.  Default is 'NULL', in which case the values are selected based on choice of test statistics and other available parameters (e.g., sample size, number of groups, etc.).  Defaults explained below.  User can override defaults, in which case a matrix of marginal null distribution parameters can be accepted.  Providing numeric (vec [...]
+  \item{keep.margpar}{If \code{nulldist='boot.qt'}, a logical indicating whether the (internally created) matrix of marginal null distribution parameters should be returned.  Default is 'TRUE'.}
+  \item{ncp}{If \code{nulldist='boot.qt'}, a value for a possible noncentrality parameter to be used during marginal quantile transformation. Default is 'NULL'.}
+  \item{perm.mat}{If \code{nulldist='boot.qt'} and \code{marg.null='perm'}, a matrix of user-supplied test statistics from a particular distribution to be used during marginal quantile transformation.  The statistics may represent empirically derived marginal permutation values, may be theoretical values, or may represent a sample from some other suitable choice of marginal null distribution.}
+  \item{keep.index}{If \code{nulldist='ic'} and \code{test='t.cor'} or \code{test='z.cor'}, the index returned is a matrix with the indices of the first and second variables considered for pairwise correlations.  If there are p hypotheses, this arguments returns \code{t(combn(p,2))}.  For all other choices of test statistic, the index is not returned, as they correspond to the original order of the hypotheses in \code{X}.}  
+  \item{keep.label}{Default is 'FALSE'.  A logical indicating whether or not the label in \code{Y} should be returned as a slot in the resulting MTP object.  Typically not necessary, although useful if one is using \code{update} and wants to use marginal null distribution defaults with \code{nulldist='boot.qt'} (e.g., with F-tests).}
+}
+
+\details{
+A multiple testing procedure (MTP) is defined by choices of test statistics, type I error rate, null distribution and method for error rate control. Each component is described here. For two-sample t-tests, the group with the smaller-valued label is substracted from the group with the larger-valued label.  That is, differences in means are calculated as "mean of group 2 - mean of group 1" or "mean of group B - mean of group A". For paired t-tests, the arrangement of group indices does no [...]
+
+Test statistics are determined by the values of \code{test}: 
+\describe{
+\item{t.onesamp:}{one-sample t-statistic for tests of means;}
+\item{t.twosamp.equalvar:}{equal variance two-sample t-statistic for tests of differences in means (two-sample t-statistic);}
+\item{t.twosamp.unequalvar:}{unequal variance two-sample t-statistic for tests of differences in means (two-sample Welch t-statistic);}
+\item{t.pair:}{two-sample paired t-statistic for tests of differences in means;}
+\item{f:}{multi-sample F-statistic for tests of equality of population means (assumes constant variance across groups, but not normality); }
+\item{f.block:}{multi-sample F-statistic for tests of equality of population means in a block design (assumes constant variance across groups, but not normality). This test is not available with the bootstrap null distribution;}
+\item{f.twoway:}{multi-sample F-statistic for tests of equality of population means in a block design (assumes constant variance across groups, but not normality). Differs from \code{f.block} in requiring multiple observations per group*block combintation. This test uses the means of each group*block combination as response variable and test for group main effects assuming a randomized block design;}
+\item{lm.XvsZ:}{t-statistic for tests of regression coefficients for variable \code{Z.test} in linear models, each with a row of X as outcome, possibly adjusted by covariates \code{Z.incl} from the matrix \code{Z} (in the case of no covariates, one recovers the one-sample t-statistic, \code{t.onesamp});}
+\item{lm.YvsXZ:}{t-statistic for tests of regression coefficients in linear models, with outcome Y and each row of X as covariate of interest, with possibly other covariates \code{Z.incl} from the matrix \code{Z};}
+\item{coxph.YvsXZ:}{t-statistic for tests of regression coefficients in Cox proportional hazards survival models, with outcome Y and each row of X as covariate of interest, with possibly other covariates \code{Z.incl} from the matrix \code{Z}.}
+\item{t.cor}{t-statistics for tests of pairwise correlation parameters for all variables in X.  Note that the number of hypotheses can become quite large very fast.  This test is only available with the influence curve null distribution.}
+\item{z.cor}{Fisher's z-statistics for tests of pairwise correlation parameters for all variables in X.  Note that the number of hypotheses can become quite large very fast.  This test is only available with the influence curve null distribution.}
+}
+
+When \code{robust=TRUE}, non-parametric versions of each test are performed. For the linear models, this means \code{rlm} is used instead of \code{lm}. There is not currently a robust version of \code{test=coxph.YvsXZ}. For the t- and F-tests, data values are simply replaced by their ranks. This is equivalent to performing the following familiar named rank-based tests. The conversion after each test is the formula to convert from the MTP test to the statistic reported by the listed R fun [...]
+\describe{
+\item{t.onesamp or t.pair:}{Wilcoxon signed rank, \code{wilcox.test} with \code{y=NULL} or \code{paired=TRUE}, \cr
+conversion: num/n}
+\item{t.twosamp.equalvar:}{Wilcoxon rank sum or Mann-Whitney, \code{wilcox.test}, \cr
+conversion: n2*(num+mean(r1)) - n2*(n2+1)/2}
+\item{f:}{Kruskal-Wallis rank sum, \code{kruskal.test}, \cr
+conversion: num*12/(n*(n-1))}
+\item{f.block:}{Friedman rank sum, \code{friedman.test}, \cr
+conversion: num*12/(K*(K+1))}
+\item{f.twoway:}{Friedman rank sum, \code{friedman.test}, \cr
+conversion: num*12/(K*(K+1))}
+}
+
+The implemented MTPs are based on control of the family-wise error rate, defined as the probability of any false positives. Let Vn denote the (unobserved) number of false positives. Then, control of FWER at level alpha means that Pr(Vn>0)<=alpha. The set of rejected hypotheses under a FWER controlling procedure can be augmented to increase the number of rejections, while controlling other error rates. The generalized family-wise error rate is defined as Pr(Vn>k)<=alpha, and it is clear t [...]
+
+In practice, one must choose a method for estimating the test statistics null distribution. We have implemented several versions of an ordinary non-parametric bootstrap estimator and a permutation estimator (which makes sense in certain settings, see references). The non-parametric bootstrap estimator (default) provides asymptotic control of the type I error rate for any data generating distribution, whereas the permutation estimator requires the subset pivotality assumption. One draw ba [...]
+
+For the nonparametric bootstrap distribution with marginal null quantile transformation, the following defaults for \code{marg.null} and \code{marg.par} are available based on choice of test statistics, sample size 'n', and various other parameters:
+\describe{
+\item{t.onesamp:}{t-distribution with df=n-1;}
+\item{t.twosamp.equalvar:}{t-distribution with df=n-2;}
+\item{t.twosamp.unequalvar:}{N(0,1);}
+\item{t.pair:}{t-distribution with df=n-1, where n is the number of unique samples, i.e., the number of observed differences between paired samples;}
+\item{f:}{F-distribution with df1=k-1, df2=n-k, for k groups;}
+\item{f.block:}{NA. Only available with permutation distribution;}
+\item{f.twoway:}{F-distribution with df1=k-1,df2=n-k*l, for k groups and l blocks;}
+\item{lm.XvsZ:}{N(0,1);}
+\item{lm.YvsXZ:}{N(0,1);}
+\item{coxph.YvsXZ:}{N(0,1);}
+\item{t.cor}{t-distribution with df=n-2;}
+\item{z.cor}{N(0,1).}
+}
+
+The above defaults, however, can be overridden by manually setting values of \code{marg.null} and \code{marg.par}.  In the case of \code{nulldist='ic'}, and \code{ic.quant.trans=TRUE}, the defaults are the same as above except that 'lm.XvsZ' and 'lm.YvsXZ' are replaced with t-distributions with df=n-p.
+  
+Given observed test statistics, a type I error rate (with nominal level), and a test statistics null distribution, MTPs provide adjusted p-values, cutoffs for test statistics, and possibly confidence regions for estimates. Four methods are implemented, based on minima of p-values and maxima of test statistics. Only the step down methods are currently available with the permutation null distribution.
+
+Computation times using a bootstrap null distribution are slower when weights are used for one and two-sample tests. Computation times when using a bootstrap null distribution also are slower for the tests \code{lmXvsZ}, \code{lmYvsXZ}, \code{coxph.YvsXZ}.
+
+To execute the bootstrap on a computer cluster, a cluster object generated with \code{makeCluster} in the package \code{snow} may be used as the argument for cluster. Alternatively, the number of nodes to use in the computer cluster can be used as the argument to cluster. In this case, \code{type} must be specified and a cluster will be created. In both cases, \code{Biobase} and \code{multtest} will be loaded onto each cluster node if these libraries are located in a directory in the sta [...]
+
+Finally, note that the old argument \code{csnull} is now DEPRECATED as of \code{multtest} v. 2.0.0 given the expanded null distribution options described above.  Previously, this argument was an indicator of whether the bootstrap estimated test statistics distribution should be centered and scaled (to produce a null distribution) or not. If \code{csnull=FALSE}, the (raw) non-null bootstrap estimated test statistics distribution was returned.  If the non-null bootstrap distribution should [...]
+}
+
+\value{
+An object of class \code{MTP}, with the following slots:
+
+    \item{\code{statistic}}{Object of class \code{numeric}, observed test statistics for each hypothesis, specified by the values of the \code{MTP} arguments \code{test}, \code{robust}, \code{standardize}, and \code{psi0}.}
+    \item{\code{estimate}}{For the test of single-parameter null hypotheses using t-statistics (i.e., not the F-tests), the numeric vector of estimated parameters corresponding to each hypothesis, e.g. means, differences in means, regression parameters.}
+    \item{\code{sampsize}}{Object of class \code{numeric}, number of columns (i.e. observations) in the input data set.}
+    \item{\code{rawp}}{Object of class \code{numeric}, unadjusted, marginal p-values for each hypothesis.}
+    \item{\code{adjp}}{Object of class \code{numeric}, adjusted (for multiple testing) p-values for each hypothesis (computed only if the \code{get.adjp} argument is TRUE).}
+    \item{\code{conf.reg}}{For the test of single-parameter null hypotheses using t-statistics (i.e., not the F-tests), the numeric array of lower and upper simultaneous confidence limits for the parameter vector, for each value of the nominal Type I error rate \code{alpha} (computed only if the \code{get.cr} argument is TRUE).}
+    \item{\code{cutoff}}{The numeric matrix of cut-offs for the vector of test statistics for each value of the nominal Type I error rate \code{alpha} (computed only if the \code{get.cutoff} argument is TRUE).}
+   \item{\code{reject}}{Object of class \code{'matrix'}, rejection indicators (TRUE for a rejected null hypothesis), for each value of the nominal Type I error rate \code{alpha}.}
+    \item{\code{rawdist}}{The numeric matrix for the estimated nonparametric non-null test statistics distribution (returned only if \code{keep.rawdist=TRUE} and if \code{nulldist} is one of 'boot.ctr', 'boot.cs', or 'boot.qt'). This slot must not be empty if one wishes to call \code{update} to change choice of bootstrap-based null distribution.}
+    \item{\code{nulldist}}{The numeric matrix for the estimated test statistics null distribution (returned only if \code{keep.nulldist=TRUE}); option not currently available for permutation null distribution, i.e.,  \code{nulldist='perm'}). By default (i.e., for \code{nulldist='boot.cs'}), the entries of \code{nulldist} are the null value shifted and scaled bootstrap test statistics, with one null test statistic value for each hypothesis (rows) and bootstrap iteration (columns).}
+    \item{\code{nulldist.type}}{Character value describing which choice of null distribution was used to generate the MTP results.  Takes on one of the values of the original \code{nulldist} argument in the call to MTP, i.e., 'boot.cs', 'boot.ctr', 'boot.qt', 'ic', or 'perm'.}
+   \item{\code{marg.null}}{If \code{nulldist='boot.qt'}, a character value returning which choice of marginal null distribution was used by the MTP.  Can be used to check default values or to ensure manual settings were correctly applied.}
+   \item{\code{marg.par}}{If \code{nulldist='boot.qt'}, a numeric matrix returning the parameters of the marginal null distribution(s) used by the MTP.  Can be used to check default values or to ensure manual settings were correctly applied.}
+   \item{\code{call}}{Object of class \code{call}, the call to the MTP function.}
+    \item{\code{seed}}{An integer or vector for specifying the state of the random number generator used to create the resampled datasets. The seed can be reused for reproducibility in a repeat call to \code{MTP}. This argument is currently used only for the bootstrap null distribution (i.e., for \code{nulldist="boot.xx"}). See \code{?set.seed} for details.}
+}
+
+\references{
+M.J. van der Laan, S. Dudoit, K.S. Pollard (2004), Augmentation Procedures for Control of the Generalized Family-Wise Error Rate and Tail Probabilities for the Proportion of False Positives, Statistical Applications in Genetics and Molecular Biology, 3(1). 
+\url{http://www.bepress.com/sagmb/vol3/iss1/art15/}
+
+M.J. van der Laan, S. Dudoit, K.S. Pollard (2004), Multiple Testing. Part II. Step-Down Procedures for Control of the Family-Wise Error Rate, Statistical Applications in Genetics and Molecular Biology, 3(1).
+\url{http://www.bepress.com/sagmb/vol3/iss1/art14/}
+
+S. Dudoit, M.J. van der Laan, K.S. Pollard (2004), Multiple Testing. Part I. Single-Step Procedures for Control of General Type I Error Rates, Statistical Applications in Genetics and Molecular Biology, 3(1).
+\url{http://www.bepress.com/sagmb/vol3/iss1/art13/}
+
+K.S. Pollard and Mark J. van der Laan, "Resampling-based Multiple Testing: Asymptotic Control of Type I Error and Applications to Gene Expression Data" (June 24, 2003). U.C. Berkeley Division of Biostatistics Working Paper Series. Working Paper 121.
+\url{http://www.bepress.com/ucbbiostat/paper121}
+
+M.J. van der Laan and A.E. Hubbard (2006), Quantile-function Based Null Distributions in Resampling Based Multiple Testing, Statistical Applications in Genetics and Molecular Biology, 5(1).
+\url{http://www.bepress.com/sagmb/vol5/iss1/art14/}
+
+S. Dudoit and M.J. van der Laan.  Multiple Testing Procedures and Applications to Genomics.  Springer Series in Statistics. Springer, New York, 2008.
+
+}
+
+\author{Katherine S. Pollard and Houston N. Gilbert with design contributions from Sandra Taylor, Sandrine Dudoit and Mark J. van der Laan.}
+
+\note{Thank you to Peter Dimitrov for suggestions about the code.}
+
+\seealso{\code{\link{EBMTP}}, \code{\link{MTP-class}}, \code{\link{MTP-methods}}, \code{\link{mt.minP}}, \code{\link{mt.maxT}}, \code{\link{ss.maxT}}, \code{\link{fwer2gfwer}}}
+
+\examples{
+ 
+#data 
+set.seed(99)
+data<-matrix(rnorm(90),nr=9)
+group<-c(rep(1,5),rep(0,5))
+
+#fwer control with centered and scaled bootstrap null distribution 
+#(B=100 for speed)
+m1<-MTP(X=data,Y=group,alternative="less",B=100,method="sd.minP")
+print(m1)
+summary(m1)
+par(mfrow=c(2,2))
+plot(m1,top=9)
+
+#fwer control with quantile transformed bootstrap null distribution
+#default settings = N(0,1) marginal null distribution
+m2<-MTP(X=data,Y=group,alternative="less",B=100,method="sd.minP",
+	nulldist="boot.qt",keep.rawdist=TRUE)
+
+#fwer control with quantile transformed bootstrap null distribution
+#marginal null distribution and df parameters manually set, 
+#first all equal, then varying with hypothesis
+m3<-update(m2,marg.null="t",marg.par=10)
+mps<-matrix(c(rep(9,5),rep(10,5)),nr=10,nc=1)
+m4<-update(m2,marg.null="t",marg.par=mps)
+
+m1 at nulldist.type
+m2 at nulldist.type
+m2 at marg.null
+m2 at marg.par
+m3 at nulldist.type
+m3 at marg.null
+m3 at marg.par
+m4 at nulldist.type
+m4 at marg.null
+m4 at marg.par
+
+}
+
+\keyword{htest}
+
+
diff --git a/man/boot.null.Rd b/man/boot.null.Rd
new file mode 100644
index 0000000..c8c6a97
--- /dev/null
+++ b/man/boot.null.Rd
@@ -0,0 +1,144 @@
+\name{boot.null}
+\alias{boot.null}
+\alias{boot.resample}
+\alias{center.scale}
+\alias{center.only}
+\alias{quant.trans}
+
+
+\title{Non-parametric bootstrap resampling function in package `multtest'}
+
+\description{Given a data set and a closure, which consists of a function for computing the test statistic and its enclosing environment, this function produces a non-parametric bootstrap estimated test statistics null distribution. The observations in the data are resampled using the ordinary non-parametric bootstrap is used to produce an estimated test statistics distribution. This distribution is then transformed to produce the null distribution. Options for transforming the nonparame [...]
+}
+
+\usage{
+boot.null(X, label, stat.closure, W = NULL, B = 1000, test, nulldist, theta0 = 0, tau0 = 1, marg.null = NULL, marg.par = NULL, 
+    ncp = 0, perm.mat, alternative = "two.sided", seed = NULL, 
+    cluster = 1, dispatch = 0.05, keep.nulldist, keep.rawdist)
+
+boot.resample(X, label, p, n, stat.closure, W, B, test)
+
+center.only(muboot, theta0, alternative)
+
+center.scale(muboot, theta0, tau0, alternative)
+
+quant.trans(muboot, marg.null, marg.par, ncp, alternative, perm.mat) 
+
+}
+
+\arguments{
+  \item{X}{A matrix, data.frame or ExpressionSet containing the raw data. In the case of an ExpressionSet, \code{exprs(X)} is the data of interest and \code{pData(X)} may contain outcomes and covariates of interest. For \code{boot.resample} \code{X} must be a matrix. For currently implemented tests, one hypothesis is tested for each row of the data.}
+  \item{label}{A vector containing the class labels for t- and F-tests.}
+  \item{stat.closure}{A closure for test statistic computation, like those produced internally by the \code{MTP} function. The closure consists of a function for computing the test statistic and its enclosing environment, with bindings for relevant additional arguments (such as null values, outcomes, and covariates).}
+  \item{W}{A vector or matrix containing non-negative weights to be used in computing the test statistics. If a matrix, \code{W} must be the same dimension as \code{X} with one weight for each value in \code{X}. If a vector, \code{W} may contain one weight for each observation (i.e. column) of \code{X} or one weight for each variable (i.e. row) of \code{X}. In either case, the weights are duplicated appropriately. Weighted F-tests are not available. Default is 'NULL'.}
+  \item{B}{The number of bootstrap iterations (i.e. how many resampled data sets) or the number of permutations (if \code{nulldist} is 'perm'). Can be reduced to increase the speed of computation, at a cost to precision. Default is 1000.}
+  \item{test}{Character string specifying the test statistics to use. See \code{MTP} for a list of tests.}
+  \item{theta0}{The value used to center the test statistics. For tests based on a form of t-statistics, this should be zero (default). For F-tests, this should be 1.}
+  \item{tau0}{The value used to scale the test statistics. For tests based on a form of t-statistics, this should be 1 (default). For F-tests, this should be 2/(K-1), where K is the number of groups. This argument is missing when \code{center.only} is chosen for transforming the raw bootstrap test statistics.}
+  \item{marg.null}{If \code{nulldist='boot.qt'}, the marginal null distribution to use for quantile transformation.  Can be one of 'normal', 't', 'f' or 'perm'.  Default is 'NULL', in which case the marginal null distribution is selected based on choice of test statistics.  Defaults explained below. If 'perm', the user must supply a vector or matrix of test statistics corresponding to another marginal null distribution, perhaps one created externally by the user, and possibly referring t [...]
+  \item{marg.par}{If \code{nulldist='boot.qt'}, the parameters defining the marginal null distribution in \code{marg.null} to be used for quantile transformation.  Default is 'NULL', in which case the values are selected based on choice of test statistics and other available parameters (e.g., sample size, number of groups, etc.).  Defaults explained below.  User can override defaults, in which case a matrix of marginal null distribution parameters can be accepted.  Providing a matrix of  [...]
+  \item{ncp}{If \code{nulldist='boot.qt'}, a value for a possible noncentrality parameter to be used during marginal quantile transformation. Default is 'NULL'.}
+  \item{perm.mat}{If \code{nulldist='boot.qt'} and \code{marg.null='perm'}, a  matrix of user-supplied test statistics from a particular distribution to be used during marginal quantile transformation.  The statistics may represent empirically derived marginal permutation values, may be theoretical values, or may represent a sample from some other suitable choice of marginal null distribution.}
+  \item{alternative}{Character string indicating the alternative hypotheses, by default 'two.sided'. For one-sided tests, use 'less' or 'greater' for null hypotheses of 'greater than or equal' (i.e. alternative is 'less') and 'less than or equal', respectively.}
+  \item{seed}{Integer or vector of integers to be used as argument to \code{set.seed} to set the seed for the random number generator for bootstrap resampling. This argument can be used to repeat exactly a test performed with a given seed. If the seed is specified via this argument, the same seed will be returned in the seed slot of the MTP object created. Else a random seed(s) will be generated, used and returned. Vector of integers used to specify seeds for each node in a cluster used  [...]
+  \item{cluster}{Integer of 1 or a cluster object created through the package snow. With cluster=1, bootstrap is implemented on single node. Supplying a cluster object results in the bootstrap being implemented in parallel on the provided nodes. This option is only available for the bootstrap procedure.}
+  \item{csnull}{DEPRECATED as of \code{multtest} v. 2.0.0 given expanded null distribution options. Previously, this argument was an indicator of whether the bootstrap estimated test statistics distribution should be centered and scaled (to produce a null distribution) or not. If \code{csnull=FALSE}, the (raw) non-null bootstrap estimated test statistics distribution was returned.  If the non-null bootstrap distribution should be returned, this object is now stored in the 'rawdist' slot  [...]
+  \item{dispatch}{The number or percentage of bootstrap iterations to dispatch at a time to each node of the cluster if a computer cluster is used. If dispatch is a percentage, \code{B*dispatch} must be an integer. If dispatch is an integer, then \code{B/dispatch} must be an integer. Default is 5 percent.}
+  \item{p}{An integer of the number of variables of interest to be tested.}
+  \item{n}{An integer of the total number of samples.}
+  \item{muboot}{A matrix of bootstrapped test statistics.}
+  \item{keep.nulldist}{Logical indicating whether to return the computed bootstrap null distribution, by default 'TRUE'.  Not available for \code{nulldist}='perm'. Note that this matrix can be quite large.}
+  \item{keep.rawdist}{Logical indicating whether to return the computed non-null (raw) bootstrap distribution, by default 'FALSE'.  Not available for when using \code{nulldist}='perm' or 'ic'.  Note that this matrix can become quite large.  If one wishes to use subsequent calls to \code{update} in which one updates choice of bootstrap null distribution, \code{keep.rawdist} must be TRUE.  To save on memory, \code{update} only requires that one of \code{keep.nulldist} or \code{keep.rawdist [...]
+}
+
+\value{
+A list with the following elements:
+  \item{rawboot}{If \code{keep.rawdist=TRUE}, the matrix of non-null, non-transformed bootstrap test statistics.  If 'FALSE', an empty matrix with dimension 0-by-0.}
+  \item{muboot}{If \code{keep.rawdist=TRUE} (default), the matrix of appropriately transformed null test statistics as given by one of \code{center.scale}, \code{center.only}, or \code{quant.trans}. This is the estimated joint test statistics null distribution. \cr
+
+Both list elements \code{rawboot} and \code{muboot} contain matrices of dimension the number of hypotheses (typically \code{nrow(X)}) by the number of bootstrap iterations (\code{B}). Each row of \code{muboot} is the bootstrap estimated marginal null distribution for a single hypothesis.  For \code{boot.null} and \code{center.scale}, each column of \code{muboot} is a centered and scaled resampled vector of test statistics.  For \code{boot.null} and \code{center.only}, each column of \cod [...]
+
+For \code{boot.null} and \code{quant.trans}, each column of \code{muboot} is a marginal null quantile-transformed resampled vector of test statistics.  For each choice of marginal null distribution (defined by \code{marg.null} and \code{marg.par}), a random sample of size B is drawn and then rearranged based on the ranks of the marginal test statistics bootstrap distribution corresponding to each hypothesis (typically within rows of \code{X}).  This means that using \code{quant.trans} wi [...]
+
+\describe{
+\item{t.onesamp:}{t-distribution with df=n-1;}
+\item{t.twosamp.equalvar:}{t-distribution with df=n-2;}
+\item{t.twosamp.unequalvar:}{N(0,1);}
+\item{t.pair:}{t-distribution with df=n-1, where n is the number of unique samples, i.e., the number of observed differences/paired samples;}
+\item{f:}{F-distribution with df1=k-1, df2=n-k, for k groups;}
+\item{f.block:}{NA. Only available with permutation distribution;}
+\item{f.twoway:}{F-distribution with df1=k-1,df2=n-k*l, for k groups and l blocks;}
+\item{lm.XvsZ:}{N(0,1);}
+\item{lm.YvsXZ:}{N(0,1);}
+\item{coxph.YvsXZ:}{N(0,1);}
+\item{t.cor}{t-distribution with df=n-2;}
+\item{z.cor}{N(0,1).}
+}
+The above defaults, however, can be overridden by manually setting values of \code{marg.null} and \code{marg.par}. \cr 
+
+The \code{rawboot} and \code{muboot} objects are returned in the slots \code{rawdist} and \code{nulldist} of an object of class \code{MTP} or \code{EBMTP} when the arguments \code{keep.rawdist} or \code{keep.nulldist} to the \code{MTP} function are TRUE. For \code{boot.resample} a matrix of bootstrap samples prior to null transformation is returned.
+}
+}
+
+\references{
+M.J. van der Laan, S. Dudoit, K.S. Pollard (2004), Augmentation Procedures for Control of the Generalized Family-Wise Error Rate and Tail Probabilities for the Proportion of False Positives, Statistical Applications in Genetics and Molecular Biology, 3(1). 
+\url{http://www.bepress.com/sagmb/vol3/iss1/art15/}
+
+M.J. van der Laan, S. Dudoit, K.S. Pollard (2004), Multiple Testing. Part II. Step-Down Procedures for Control of the Family-Wise Error Rate, Statistical Applications in Genetics and Molecular Biology, 3(1).
+\url{http://www.bepress.com/sagmb/vol3/iss1/art14/}
+
+S. Dudoit, M.J. van der Laan, K.S. Pollard (2004), Multiple Testing. Part I. Single-Step Procedures for Control of General Type I Error Rates, Statistical Applications in Genetics and Molecular Biology, 3(1).
+\url{http://www.bepress.com/sagmb/vol3/iss1/art13/}
+
+Katherine S. Pollard and Mark J. van der Laan, "Resampling-based Multiple Testing: Asymptotic Control of Type I Error and Applications to Gene Expression Data" (June 24, 2003). U.C. Berkeley Division of Biostatistics Working Paper Series. Working Paper 121.
+\url{http://www.bepress.com/ucbbiostat/paper121}
+
+M.J. van der Laan and A.E. Hubbard (2006), Quantile-function Based Null Distributions in Resampling Based Multiple Testing, Statistical Applications in Genetics and Molecular Biology, 5(1).
+\url{http://www.bepress.com/sagmb/vol5/iss1/art14/}
+
+S. Dudoit and M.J. van der Laan.  Multiple Testing Procedures and Applications to Genomics.  Springer Series in Statistics. Springer, New York, 2008.
+
+}
+
+\author{Katherine S. Pollard, Houston N. Gilbert, and Sandra Taylor, with design contributions from Sandrine Dudoit and Mark J. van der Laan.}
+
+\note{Thank you to Duncan Temple Lang and Peter Dimitrov for suggestions about the code.}
+
+\seealso{\code{\link{corr.null}}, \code{\link{MTP}}, \code{\link{MTP-class}}, \code{\link{EBMTP}}, \code{\link{EBMTP-class}}, \code{\link{get.Tn}}, \code{\link{ss.maxT}}, \code{\link{mt.sample.teststat}},\code{\link{get.Tn}}, \code{\link{wapply}}, \code{\link{boot.resample}}}
+
+\examples{
+
+set.seed(99)
+data<-matrix(rnorm(90),nr=9)
+
+#closure
+ttest<-meanX(psi0=0,na.rm=TRUE,standardize=TRUE,alternative="two.sided",robust=FALSE)
+
+#test statistics
+obs<-get.Tn(X=data,stat.closure=ttest,W=NULL)
+
+#bootstrap null distribution (B=100 for speed, default nulldist, "boot.cs")
+nulldistn<-boot.null(X=data,W=NULL,stat.closure=ttest,B=100,test="t.onesamp",
+	nulldist="boot.cs",theta0=0,tau0=1,alternative="two.sided",
+	keep.nulldist=TRUE,keep.rawdist=FALSE)$muboot
+
+#bootstrap null distribution with marginal quantile transformation showing
+#default values that are passed to marg.null and marg.par arguments
+nulldistn.qt<-boot.null(X=data,W=NULL,stat.closure=ttest,B=100,test="t.onesamp",
+	nulldist="boot.qt",theta0=0,tau0=1,alternative="two.sided",
+	keep.nulldist=TRUE,keep.rawdist=FALSE,marg.null="t",
+	marg.par=matrix(9,nr=10,nc=1))$muboot
+
+#unadjusted p-values
+rawp<-apply((obs[1,]/obs[2,])<=nulldistn,1,mean)
+sum(rawp<=0.01)
+
+rawp.qt<-apply((obs[1,]/obs[2,])<=nulldistn.qt,1,mean)
+sum(rawp.qt<=0.01)
+}
+
+\keyword{manip}
+\keyword{internal}
+
+
+
diff --git a/man/corr.null.Rd b/man/corr.null.Rd
new file mode 100644
index 0000000..30274d7
--- /dev/null
+++ b/man/corr.null.Rd
@@ -0,0 +1,90 @@
+\name{corr.null}
+\alias{corr.null}
+\alias{tQuantTrans}
+
+\title{Function to estimate a test statistics joint null distribution for t-statistics via the vector influence curve}
+
+\description{For a broad class of testing problems, such as the test of single-parameter null hypotheses using t-statistics, a proper, asymptotically valid test statistics joint null distribution is the multivariate Gaussian distribution with mean vector zero and covariance matrix equal to the correlation matrix of the vector influence curve for the estimator of the parameter of interest.  The function \code{corr.null} estimates the correlation matrix of the vector influence curve for su [...]
+
+
+\usage{
+corr.null(X, W = NULL, Y = NULL, Z = NULL, test = "t.twosamp.unequalvar", 
+    alternative = "two-sided", use = "pairwise", B = 1000, MVN.method = "mvrnorm", 
+    penalty = 1e-06, ic.quant.trans = FALSE, marg.null = NULL, 
+    marg.par = NULL, perm.mat = NULL) 
+}
+
+\arguments{
+  \item{X}{A matrix, data.frame or ExpressionSet containing the raw data. In the case of an ExpressionSet, \code{exprs(X)} is the data of interest and \code{pData(X)} may contain outcomes and covariates of interest. For most currently implemented tests (exception: tests involving correlation parameters), one hypothesis is tested for each row of the data.}
+  \item{W}{A matrix containing non-negative weights to be used in computing the test statistics.  Must be same dimension as \code{X}.}
+  \item{Y}{A vector, factor, or \code{Surv} object containing the outcome of interest.}
+  \item{Z}{A vector, factor, or matrix containing covariate data to be used in linear regression models. Each variable should be in one column, so that \code{nrow(Z)=ncol(X)}. By the time the function is called, this argument contains a 'design matrix' with the variable to be tested in the first column, additional covariates in the remaining columns, and no intercept column.}
+  \item{test}{Character string specifying the test statistics to use, by default 't.twosamp.unequalvar'. See details (below) for a list of tests.}
+  \item{alternative}{Character string indicating the alternative hypotheses, by default 'two.sided'. For one-sided tests, use 'less' or 'greater' for null hypotheses of 'greater than or equal' (i.e. alternative is 'less') and 'less than or equal', respectively.}
+  \item{use}{Similar to the options in \code{cor}, a character string giving a method for computing covariances in the presence of missing values.  Default is 'pairwise', which allows for the covariance/correlation matrix to be calculated using the most information possible when \code{NA}s are present.} 
+  \item{B}{The number of samples to be drawn from the normal distribution. Default is 1000.}
+  \item{MVN.method}{Character string of either of 'mvrnorm' or 'Cholesky' designating how correlated normal test statistics are to be generated.  Selecting 'mvrnorm' uses the function of the same name found in the \code{MASS} library, whereas 'Cholesky' relies on a Cholesky decomposition. Default is 'mvrnorm'.}
+  \item{penalty}{If \code{MVN.method='Cholesky'}, the value in \code{penalty} is added to all diagonal elements of the estimated test statistics correlation matrix to ensure that the matrix is positive definite and that internal calls to \code{'chol'} do not return an error.  Default is 1e-6.}
+  \item{ic.quant.trans}{A logical indicating whether or not a marginal quantile transformation using a t-distribution or user-supplied marginal distribution (stored in \code{perm.mat}) should be applied to the multivariate normal null distribution.  Defaults for \code{marg.null} and \code{marg.par} exist, but can also be specified by the user (see below). Default is 'FALSE'.}
+  \item{marg.null}{If \code{ic.quant.trans=TRUE}, a character string naming the marginal null distribution to use for quantile transformation.  Can be one of, 't' or 'perm'.  Default is 'NULL', in which case the marginal null distribution is selected based on choice of test statistics.  Defaults explained below. If 'perm', the user must supply a vector or matrix of test statistics corresponding to another marginal null distribution, perhaps one created externally by the user, and possibl [...]
+  \item{marg.par}{If \code{ic.quant.trans=TRUE}, the parameters defining the marginal null distribution in \code{marg.null} to be used for quantile transformation.  Default is 'NULL', in which case the values are selected based on choice of test statistics and other available parameters (e.g., sample size, number of groups, etc.).  Defaults explained below.  User can override defaults, in which case a matrix of marginal null distribution parameters must be provided.  Providing a matrix a [...]
+  \item{perm.mat}{If \code{ic.quant.trans=TRUE},  a matrix of user-supplied test statistics from a particular distribution to be used during marginal quantile transformation.  Supplying a vector of test statistics will apply the same vector to each hypothesis.  The statistics may represent empirically derived marginal permutation values, may be theoretical values, or may represent a sample from some other suitable choice of marginal null distribution.}
+}
+
+\details{
+This function is called internally when the argument \code{nulldist='ic'} is evaluated in the main user-level functions \code{MTP} or \code{EBMTP}.  Formatting of the data objects \code{X}, \code{W}, \code{Y}, and especially \code{Z} occurs at execution begin of the main user-level functions.\cr
+
+Based on the value of \code{test}, the appropriate correlation matrix of the vector influence curve is calculated. Once the correlation matrix is obtained, one may sample vectors of null test statistics directly from a multivariate normal distribution rather than relying on permutation-based or bootstrap-based resampling.  Because the Gaussian distribution is continuous, we expect this choice of null distribution to suffer less from discreteness than either the permutation or the bootstr [...]
+
+Because the influence curve null distributions have been implemented for parametric, standardized t-statistics, the options \code{robust} and \code{standardize} are not allowed. Influence curve null distributions are available for the following values of \code{test}: 't.onesamp', 't.pair', 't.twosamp.equalvar', 't.twosamp.unequalvar', 'lm.XvsZ', 'lm.YvsXZ', 't.cor', and 'z.cor'.\cr
+
+In the simpler cases involving one-sample and two-sample tests of means, the correlation matrices are obtained via calls to \code{cor}.  For two-sample tests, the correlation matrix corresponds to the following transformation of the group-specific covariance matrices: cov(X(group1))/n1 + cov(X(group2))/n2, where n1 and n2 are sample sizes of each group. When weights are present, the internal function \code{IC.CorXW.NA} is called to calculate weighted estimates of the (group) covariance m [...]
+
+For linear regression models, \code{corr.null} calculates the vector influence curve associated
+with each subject/sample.  The vector has length equal to the number of hypotheses.  The internal function \code{IC.Cor.NA} is used to calculate IC_n * (IC_n)^t in a manner which allows for NA-handling when the influence curve may contain missing elements.  For linear regression models of the form E[Y|X], IC_n takes the form (E[((X^t)X)^(-1)] (X^t)_i Y_i) - Y_i-hat.  Influence curves for correlation parameters are more complicated, and the user is referred to the references below.\cr
+
+Once the correlation matrix sigma' corresponding to the variance covariance matrix of the vector influence curve sigma =IC_n * (IC_n)^t is obtained, one may sample from N(0,sigma') to obtain null test statistics.\cr
+
+If \code{ic.quant.trans=TRUE}, the matrix of null test statistics can be quantile transformed to produce a matrix which accounts for the joint dependencies between test statistics (down columns), but which has marginal t-distributions (across rows).  If \code{marg.null} and \code{marg.par} are not specified (=NULL), the following default t-distributions are applied:\cr
+
+\describe{
+\item{t.onesamp}{df=n-1;}
+\item{t.pair}{df=n-1, where n is the number of unique samples, i.e., the number of observed differences between paired samples;}
+\item{t.twosamp.equalvar}{df=n-2;}
+\item{t.twosamp.unequalvar}{df=n-1; N.B., this is not recommended, since the effective degrees of freedom are unknown.  With sufficiently large n, a normal approximation should yield similar results.}
+\item{lm.XvsZ}{df=n-p, where p is the number of variables in the regression equation;}
+\item{lm.YvsXZ}{df=n-p, where p is the number of variables in the regression equation;}
+\item{t.cor}{df=n-2;}
+\item{z.cor}{N.B., also not recommended.  Fisher's z-statistics are already normally distributed.  Marginal transformation to a t-distribution makes little sense.}
+}
+}
+
+\value{
+A matrix of null test statistics with dimension the number of hypotheses (typically \code{nrow(X)}) by the number of desired samples (\code{B}).
+}
+
+\references{
+K.S. Pollard and Mark J. van der Laan, "Resampling-based Multiple Testing: Asymptotic Control of Type I Error and Applications to Gene Expression Data" (June 24, 2003). U.C. Berkeley Division of Biostatistics Working Paper Series. Working Paper 121.
+\url{http://www.bepress.com/ucbbiostat/paper121}
+
+S. Dudoit and M.J. van der Laan.  Multiple Testing Procedures and Applications to Genomics.  Springer Series in Statistics. Springer, New York, 2008.
+
+H.N. Gilbert, M.J. van der Laan, and S. Dudoit, "Joint Multiple Testing Procedures for Inferring Genetic Networks from Lower-Order Conditional Independence Graphs" (2009). \emph{In preparation.}
+}
+
+\author{Houston N. Gilbert}
+
+\seealso{\code{\link{boot.null}},\code{\link{MTP}}, \code{\link{MTP-class}}, \code{\link{EBMTP}}, \code{\link{EBMTP-class}}, \code{\link{get.Tn}}, \code{\link{ss.maxT}}, \code{\link{mt.sample.teststat}},\code{\link{get.Tn}}, \code{\link{wapply}}, \code{\link{boot.resample}}}
+
+\examples{
+set.seed(99)
+data <- matrix(rnorm(10*50),nr=10,nc=50)
+nulldistn.mvrnorm <- corr.null(data,t="t.onesamp",alternative="greater",B=5000)
+nulldistn.chol <- corr.null(data,t="t.onesamp",MVN.method="Cholesky",penalty=1e-9)
+nulldistn.t <- corr.null(data,t="t.onesamp",ic.quant.trans=TRUE)
+dim(nulldistn.mvrnorm)
+
+}
+
+\keyword{htest}
+\keyword{internal}
diff --git a/man/fwer2gfwer.Rd b/man/fwer2gfwer.Rd
new file mode 100755
index 0000000..35dab26
--- /dev/null
+++ b/man/fwer2gfwer.Rd
@@ -0,0 +1,77 @@
+\name{fwer2gfwer}
+
+\alias{fwer2gfwer}
+\alias{fwer2tppfp}
+\alias{fwer2fdr}
+
+\title{Function to compute augmentation MTP adjusted p-values}
+
+\description{Augmentation multiple testing procedures (AMTPs) to control the generalized family-wise error rate (gFWER), the tail probability of the proportion of false positives (TPPFP), and false discovery rate (FDR) based on any initial procudeure controlling the family-wise error rate (FWER). AMTPs are obtained by adding suitably chosen null hypotheses to the set of null hypotheses already rejected by an initial FWER-controlling MTP. A function for control of FDR given any TPPFP cont [...]
+}
+
+\usage{
+fwer2gfwer(adjp, k = 0)
+
+fwer2tppfp(adjp, q = 0.05)
+
+fwer2fdr(adjp, method = "both", alpha = 0.05)
+
+}
+
+
+\arguments{
+  \item{adjp}{Numeric vector of adjusted p-values from any FWER-controlling procedure.}
+  \item{k}{Maximum number of false positives.}
+  \item{q}{Maximum proportion of false positives.}
+  \item{method}{Character string indicating which FDR controlling method should be used. The options are "conservative" for a conservative, general method, "restricted" for a less conservative, but restricted method, or "both" (default) for both.}
+  \item{alpha}{Nominal level for an FDR controlling procedure (can be a vector of levels).}
+}
+
+\details{
+The gFWER and TPPFP functions control Type I error rates defined as tail probabilities for functions g(Vn,Rn) of the numbers of Type I errors (Vn) and rejected hypotheses (Rn). The gFWER and TPPFP correspond to the special cases g(Vn,Rn)=Vn (number of false positives) and g(Vn,Rn)=Vn/Rn (proportion of false positives among the rejected hypotheses), respectively. 
+
+Adjusted p-values for an AMTP are simply shifted versions of the adjusted p-values of the original FWER-controlling MTP. For control of gFWER (Pr(Vn>k)), for example, the first \code{k} adjusted p-values are set to zero and the remaining p-values are the adjusted p-values of the FWER-controlling MTP shifted by k. One can therefore build on the large pool of available FWER-controlling procedures, such as the single-step and step-down maxT and minP procedures.
+
+Given a FWER-controlling MTP, the FDR can be conservatively controlled at level \code{alpha} by considering the corresponding TPPFP AMTP with \code{q=alpha/2} at level \code{alpha/2}, so that Pr(Vn/Rn>alpha/2)<=alpha/2. A less conservative procedure (\code{general=FALSE}) is obtained by using an AMTP controlling the TPPFP with \code{q=1-sqrt(1-alpha)} at level \code{1-sqrt(1-alpha)}, so that Pr(Vn/Rn>1-sqrt(1-alpha))<=1-sqrt(1-alpha). The first, more general method can be used with any p [...]
+}
+
+\value{
+For \code{fwer2gfwer} and \code{fwer2tppfp}, a numeric vector of AMTP adjusted p-values. For \code{fwer2fdr}, a list with two components: (i) a numeric vector (or a \code{length(adjp)} by 2 matrix if \code{method="both"}) of adjusted p-values for each hypothesis, (ii) a \code{length(adjp)} by \code{length(alpha)} matrix (or \code{length(adjp)} by \code{length(alpha)} by 2 array if \code{method="both"}) of indicators of whether each hypothesis is rejected at each value of the argument \co [...]
+}
+
+\references{
+M.J. van der Laan, S. Dudoit, K.S. Pollard (2004), Augmentation Procedures for Control of the Generalized Family-Wise Error Rate and Tail Probabilities for the Proportion of False Positives, Statistical Applications in Genetics and Molecular Biology, 3(1). 
+\url{http://www.bepress.com/sagmb/vol3/iss1/art15/}
+
+M.J. van der Laan, S. Dudoit, K.S. Pollard (2004), Multiple Testing. Part II. Step-Down Procedures for Control of the Family-Wise Error Rate, Statistical Applications in Genetics and Molecular Biology, 3(1).
+\url{http://www.bepress.com/sagmb/vol3/iss1/art14/}
+
+S. Dudoit, M.J. van der Laan, K.S. Pollard (2004), Multiple Testing. Part I. Single-Step Procedures for Control of General Type I Error Rates, Statistical Applications in Genetics and Molecular Biology, 3(1).
+\url{http://www.bepress.com/sagmb/vol3/iss1/art13/}
+
+Katherine S. Pollard and Mark J. van der Laan, "Resampling-based Multiple Testing: Asymptotic Control of Type I Error and Applications to Gene Expression Data" (June 24, 2003). U.C. Berkeley Division of Biostatistics Working Paper Series. Working Paper 121.
+\url{http://www.bepress.com/ucbbiostat/paper121}
+}
+
+\author{Katherine S. Pollard with design contributions from Sandrine Dudoit and Mark J. van der Laan.}
+
+\seealso{\code{\link{MTP}}, \code{\link{MTP-class}}, \code{\link{MTP-methods}}, \code{\link{mt.minP}}, \code{\link{mt.maxT}}}
+
+\examples{
+
+data<-matrix(rnorm(200),nr=20)
+group<-c(rep(0,5),rep(1,5))
+fwer.mtp<-MTP(X=data,Y=group)
+fwer.adjp<-fwer.mtp at adjp
+gfwer.adjp<-fwer2gfwer(adjp=fwer.adjp,k=c(1,5,10))
+compare.gfwer<-cbind(fwer.adjp,gfwer.adjp)
+mt.plot(adjp=compare.gfwer,teststat=fwer.mtp at statistic,proc=c("gFWER(0)","gFWER(1)","gFWER(5)","gFWER(10)"),col=1:4,lty=1:4)
+title("Comparison of Single-step MaxT gFWER Controlling Methods")
+
+}
+
+\keyword{htest}
+\keyword{internal}
+
+
+
diff --git a/man/get.index.Rd b/man/get.index.Rd
new file mode 100755
index 0000000..7348216
--- /dev/null
+++ b/man/get.index.Rd
@@ -0,0 +1,45 @@
+\name{get.index}
+
+\alias{get.index}
+
+\title{Function to compute indices for ordering hypotheses in Package 'multtest'}
+
+\description{
+The hypotheses tested in a multiple testing procedure (MTP), can be ordered based on the output of that procedure. This function orders hypotheses based on adjusted p-values, then unadjusted p-values (to break ties in adjusted p-values), and finally test statistics (to break remaining ties).
+}
+
+\usage{
+get.index(adjp, rawp, stat)
+}
+
+\arguments{
+  \item{adjp}{Numeric vector of adjusted p-values.}
+  \item{rawp}{Numeric vector of unadjusted ("raw") marginal p-values.}
+  \item{stat}{Numeric vector of test statistics.}
+}
+
+\value{
+Numeric vector of indices so that the hypotheses can be ordered accroding to significance (smallest p-values and largest test statistics first). This function is used in the plot method for objects of class \code{MTP} to order adjusted p-values for graphical summaries. The summary method for objects of class \code{MTP} will return these indices as its second component.
+}
+
+\author{Katherine S. Pollard}
+
+\seealso{\code{\link{MTP}}, \code{\link{plot,MTP,ANY-method}}, \code{\link{summary,MTP-method}}}
+
+\examples{
+data<-matrix(rnorm(200),nr=20)
+mtp<-MTP(X=data,test="t.onesamp")
+index<-get.index(adjp=mtp at adjp,rawp=mtp at rawp,stat=mtp at statistic)
+mtp at statistic[index]
+mtp at estimate[index]
+apply(data[index,],1,mean)
+}
+
+\keyword{htest}
+\keyword{internal}
+
+
+
+
+
+
diff --git a/man/golub.Rd b/man/golub.Rd
new file mode 100755
index 0000000..0b19350
--- /dev/null
+++ b/man/golub.Rd
@@ -0,0 +1,32 @@
+\name{golub}
+\alias{golub}
+\alias{golub.cl}
+\alias{golub.gnames}
+
+\title{Gene expression dataset from Golub et al. (1999)}
+
+\usage{
+data(golub)
+}
+
+\description{
+  Gene expression data (3051 genes and 38 tumor mRNA samples) from the
+  leukemia microarray study of Golub et al. (1999). Pre-processing
+  was done as described in Dudoit et al. (2002). The R code for pre-processing is available
+in the file \url{../doc/golub.R}.}
+
+\value{
+  \item{golub}{matrix of gene expression levels for the 38 tumor mRNA samples, rows correspond to genes (3051 genes) and columns to mRNA samples.}
+  \item{golub.cl}{numeric vector indicating the tumor class, 27 acute lymphoblastic leukemia (ALL) cases (code 0) and 11 acute myeloid leukemia (AML) cases (code 1). }
+  \item{golub.gnames}{a matrix containing the names of the 3051 genes for the expression matrix \code{golub}. The three columns correspond to the gene \code{index}, \code{ID}, and \code{Name}, respectively.
+  }
+}
+
+\source{Golub et al. (1999). Molecular classification of cancer: class
+discovery and class prediction by gene expression
+monitoring, \emph{Science}, Vol. 286:531-537.\cr
+\url{http://www-genome.wi.mit.edu/MPR/}
+.}
+
+\references{S. Dudoit, J. Fridlyand, and T. P. Speed (2002). Comparison of discrimination methods for the  classification of tumors using gene expression data. \emph{Journal of the American Statistical Association}, Vol. 97, No. 457, p. 77--87. }
+\keyword{datasets}  
diff --git a/man/meanX.Rd b/man/meanX.Rd
new file mode 100755
index 0000000..fa059ec
--- /dev/null
+++ b/man/meanX.Rd
@@ -0,0 +1,114 @@
+\name{meanX}
+\alias{meanX}
+\alias{diffmeanX}
+\alias{FX}
+\alias{blockFX}
+\alias{twowayFX}
+\alias{lmX}
+\alias{lmY}
+\alias{coxY}
+\alias{get.Tn}
+
+\title{Functions to create test statistic closures and apply them to data}
+
+\description{
+The package \code{multtest} uses closures in the function \code{MTP} to compute test statistics. The closure used depends on the value of the argument \code{test}. These functions create the closures for different tests, given any additional variables, such as outcomes or covariates. The function \code{get.Tn} calls \code{wapply} to apply one of these closures to observed data (and possibly weights).  \cr
+
+One exception for how test statistics are calculated in \code{multtest} involve tests of correlation parameters, where the change of dimensionality between the p variables in \code{X} and the p-choose-2 hypotheses corresponding to the number of pairwise correlations presents a challenge.  In this case, the test statistics are calculated directly in \code{corr.Tn} and returned in a manner similar to the test statistic function closures.  No resampling is done either, since the null distri [...]
+}
+
+\usage{
+meanX(psi0 = 0, na.rm = TRUE, standardize = TRUE, 
+alternative = "two.sided", robust = FALSE)
+
+diffmeanX(label, psi0 = 0, var.equal = FALSE, na.rm = TRUE, 
+standardize = TRUE, alternative = "two.sided", robust = FALSE)
+
+FX(label, na.rm = TRUE, robust = FALSE)
+
+blockFX(label, na.rm = TRUE, robust = FALSE)
+
+twowayFX(label, na.rm = TRUE, robust = FALSE)
+
+lmX(Z = NULL, n, psi0 = 0, na.rm = TRUE, standardize = TRUE, 
+alternative = "two.sided", robust = FALSE)
+
+lmY(Y, Z = NULL, n, psi0 = 0, na.rm = TRUE, standardize = TRUE, 
+alternative = "two.sided", robust = FALSE)
+
+coxY(surv.obj, strata = NULL, psi0 = 0, na.rm = TRUE, standardize = TRUE, 
+alternative = "two.sided", init = NULL, method = "efron")
+
+get.Tn(X, stat.closure, W = NULL)
+
+corr.Tn(X, test, alternative, use = "pairwise")
+}
+
+\arguments{
+  \item{X}{A matrix, data.frame or ExpressionSet containing the raw data. In the case of an ExpressionSet, \code{exprs(X)} is the data of interest and \code{pData(X)} may contain outcomes and covariates of interest. For currently implemented tests, one hypothesis is tested for each row of the data.}
+  \item{W}{A vector or matrix containing non-negative weights to be used in computing the test statistics. If a matrix, \code{W} must be the same dimension as \code{X} with one weight for each value in \code{X}. If a vector, \code{W} may contain one weight for each observation (i.e. column) of \code{X} or one weight for each variable (i.e. row) of \code{X}. In either case, the weights are duplicated apporpraiately. Weighted f-tests are not available. Default is 'NULL'.}
+  \item{label}{A vector containing the class labels for t- and f-tests. For the \code{blockFX} function, observations are divided into \code{l} blocks of \code{n/l} observations. Within each block there may be \code{k} groups with \code{k>2}. For this test, there is only one observation per block*group combination. The labels (and corresponding rows of \code{Z} and columns of \code{X} and \code{W}) must be ordered by block and within each block ordered by group. Groups must be labeled wi [...]
+  \item{Y}{A vector or factor containing the outcome of interest for linear models. This may be a continuous or polycotomous dependent variable.}
+  \item{surv.object}{A survival object as returned by the \code{Surv} function, to be used as response in \code{coxY}.} 
+  \item{Z}{A vector, factor, or matrix containing covariate data to be used in the linear regression models. Each variable should be in one column.}
+  \item{strata}{A vector, factor, or matrix containing covariate data to be used in the Cox regression models. Covariate data will be converted to a factor variable (via the \code{strata} function) for use in the \code{coxph} function. Each variable should be in one column.} 
+  \item{n}{The sample size, e.g. \code{length(Y)} or \code{nrow(Z)}.}
+  \item{psi0}{Hypothesized null value for the parameter of interest (e.g. mean or difference in means), typically zero (default).}
+  \item{var.equal}{Indicator of whether to use t-statistics that assume equal variance in the two groups when computing the denominator of the test statistics.}
+  \item{na.rm}{Logical indicating whether to remove observations with an NA. Default is 'TRUE'.}
+  \item{standardize}{Logical indicating whether to use the standardized version of the test statistics (usual t-statistics are standardized). Default is 'TRUE'.}
+  \item{alternative}{Character string indicating the alternative hypotheses, by default 'two.sided'. For one-sided tests, use 'less' or 'greater' for null hypotheses of 'greater than or equal' (i.e. alternative is 'less') and 'less than or equal', respectively.}
+  \item{robust}{Logical indicating whether to use robust versions of the test statistics.}
+  \item{init}{Vector of initial values of the iteration in \code{coxY} function, as used in \code{coxph} in the \code{survival} package. Default initial value is zero for all variables (\code{init=NULL}).}
+  \item{method}{A character string specifying the method for tie handling in \code{coxY} function, as used in \code{coxph} in the \code{survival} package. Default is "efron".}
+  \item{test}{For \code{corr.Tn}, a character string of either 't.cor' or 'z.cor' indicating whether t-statistics or Fisher's z-statistics are to be calculated when probing hypotheses involving correlation parameters.}
+  \item{use}{Similar to the options in \code{cor}, a character string giving a method for computing covariances in the presence of missing values.  Default is 'pairwise', which allows for the covariance/correlation matrix to be calculated using the most information possible when \code{NA}s are present.} 
+}
+
+\details{
+The use of closures, in the style of the \code{genefilter} package, allows uniform data input for all MTPs and facilitates the extension of the package's functionality by adding, for example, new types of test statistics. 
+Specifically, for each value of the \code{MTP} argument \code{test}, a closure is defined which consists of a function for computing the test statistic (with only two arguments, a data vector \code{x} and a corresponding weight vector \code{w}, with default value of \code{NULL}) and its enclosing environment, with bindings for relevant additional arguments. These arguments may include null values \code{psi0}, outcomes (\code{Y}, \code{label}, \code{surv.object}), and covariates \code{Z}. [...]
+
+In the \code{MTP} function, the closure is first used to compute the vector of observed test statistics, and then, in each bootstrap iteration, to produce the estimated joint null distribution of the test statistics. In both cases, the function \code{get.Tn} is used to apply the closure to rows of the matrices of data (\code{X}) and weights (\code{W}). Thus, new test statistics can be added to \code{multtest} package by simply defining a new closure and adding a corresponding value for t [...]
+
+As mentioned above, one exception made to the closure rule in \code{multtest} was done for the case of tests involving correlation parameters (i.e., when \code{test='t.cor'} or \code{test='z.cor'}).  In particular, the change of dimension between the number of variables in \code{X} and the number of hypotheses corresponding to all pairwise correlation parameters presented a challenge.  In this setting, a 'closure-like' function was written which returns \code{choose(dim(X)[2],2)} test st [...]
+}
+
+\value{
+For \code{meanX}, \code{diffmeanX}, \code{FX}, \code{blockFX}, \code{twowayFX}, \code{lmX}, \code{lmY}, and \code{coxY}, a closure consisting of a function for computing test statistics and its enclosing environment. For \code{get.Tn} and \code{corr.Tn}, the observed test statistics stored in a matrix \code{obs} with numerator (possibly absolute value or negative, depending on the value of \code{alternative}) in the first row, denominator in the second row, and a 1 or -1 in the third row [...]
+}
+
+\author{Katherine S. Pollard, Houston N. Gilbert, and Sandra Taylor, with design contributions from Duncan Temple Lang, Sandrine Dudoit and Mark J. van der Laan}
+
+\seealso{\code{\link{MTP}}, \code{\link{get.Tn}}, \code{\link{wapply}}, \code{\link{boot.resample}}}
+
+\examples{
+data<-matrix(rnorm(200),nr=20)
+#one-sample t-statistics
+ttest<-meanX(psi0=0,na.rm=TRUE,standardize=TRUE,alternative="two.sided",robust=FALSE)
+obs<-wapply(data,1,ttest,W=NULL)
+statistics<-obs[1,]*obs[3,]/obs[2,]
+statistics
+
+#for tests of correlation parameters,
+#note change of dimension compared to dim(data),
+#function calculate statistics directly in same form as above
+obs <- corr.Tn(data,test="t.cor",alternative="greater")
+dim(obs)
+statistics<-obs[1,]*obs[3,]/obs[2,]
+length(statistics)
+
+#two-way F-statistics
+FData <- matrix(rnorm(5*60),nr=5)
+label<-rep(c(rep(1,10), rep(2,10), rep(3,10)),2)
+twowayf<-twowayFX(label)
+obs<-wapply(FData,1,twowayf,W=NULL)
+statistics<-obs[1,]*obs[3,]/obs[2,]
+statistics
+}
+
+\keyword{htest}
+\keyword{internal}
+
+
+
diff --git a/man/mt.internal.Rd b/man/mt.internal.Rd
new file mode 100755
index 0000000..62a90e9
--- /dev/null
+++ b/man/mt.internal.Rd
@@ -0,0 +1,60 @@
+\name{multtest-internal}
+\alias{.mt.BLIM}
+\alias{.mt.RandSeed}
+\alias{.mt.naNUM}
+\alias{mt.number2na}
+\alias{mt.na2number}
+\alias{mt.getmaxB}
+\alias{mt.transformL}
+\alias{mt.transformX}
+\alias{mt.transformV}
+\alias{mt.checkothers}
+\alias{mt.checkX}
+\alias{mt.checkV}
+\alias{mt.checkclasslabel}
+\alias{mt.niceres}
+\alias{mt.legend}
+\alias{corr.Tn}
+\alias{diffs.1.N}
+\alias{IC.Cor.NA}
+\alias{IC.CorXW.NA}
+\alias{insert.NA}
+\alias{marg.samp}
+
+\title{Internal multtest functions and variables}
+\description{
+  Internal multtest functions and variables
+}
+\usage{
+.mt.BLIM
+.mt.RandSeed
+.mt.naNUM
+mt.number2na(x,na)
+mt.na2number(x,na)
+mt.getmaxB(classlabel,test,B, verbose)
+mt.transformL(classlabel,test)
+mt.transformV(V,classlabel,test,na,nonpara)
+mt.transformX(X,classlabel,test,na,nonpara)
+mt.checkothers(side="abs",fixed.seed.sampling="y", B=10000,
+na=.mt.naNUM, nonpara="n")
+mt.checkX(X,classlabel,test)
+mt.checkV(V,classlabel,test)
+mt.checkclasslabel(classlabel,test)
+mt.niceres<-function(res,X,index)
+mt.legend(x, y = NULL, legend, fill = NULL, col = "black", lty, 
+    lwd, pch, angle = 45, density = NULL, bty = "o", bg = par("bg"), 
+    pt.bg = NA, cex = 1, pt.cex = cex, pt.lwd = lwd, xjust = 0, 
+    yjust = 1, x.intersp = 1, y.intersp = 1, adj = c(0, 0.5), 
+    text.width = NULL, text.col = par("col"), merge = do.lines && 
+        has.pch, trace = FALSE, plot = TRUE, ncol = 1, horiz = FALSE,...)
+corr.Tn(X, test, alternative, use = "pairwise") 
+diffs.1.N(vec1, vec2, e1, e2, e21, e22, e12) 
+IC.Cor.NA(IC, W, N, M, output) 
+IC.CorXW.NA(X, W, N, M, output)
+insert.NA(orig.NA, res.vec)
+marg.samp(marg.null, marg.par, m, B, ncp)   
+}
+\details{
+  These are not to be called directly by the user.
+}
+\keyword{internal}
diff --git a/man/mt.maxT.Rd b/man/mt.maxT.Rd
new file mode 100755
index 0000000..c565056
--- /dev/null
+++ b/man/mt.maxT.Rd
@@ -0,0 +1,142 @@
+\name{mt.maxT}
+\alias{mt.maxT}
+\alias{mt.minP}
+\title{
+  Step-down maxT and minP multiple testing procedures
+}
+\description{These functions compute permutation adjusted \eqn{p}-values for step-down multiple testing procedures described in Westfall & Young (1993).
+}
+\usage{
+mt.maxT(X,classlabel,test="t",side="abs",fixed.seed.sampling="y",B=10000,na=.mt.naNUM,nonpara="n")
+mt.minP(X,classlabel,test="t",side="abs",fixed.seed.sampling="y",B=10000,na=.mt.naNUM,nonpara="n")
+}
+
+\arguments{
+  \item{X}{A data frame or matrix, with \eqn{m} rows corresponding to variables
+    (hypotheses) and
+    \eqn{n} columns to observations. In the case of gene expression data, rows
+    correspond to genes and columns to mRNA samples. The data can
+    be read using \code{\link{read.table}}.
+  }
+  \item{classlabel}{
+    A vector of integers corresponding to observation (column)
+    class labels. For \eqn{k} classes, the labels must be integers
+    between 0 and \eqn{k-1}. For the \code{blockf} test option,
+    observations may be divided into
+    \eqn{n/k} blocks of \eqn{k} observations each. The observations are
+    ordered by block, and within each block, they are labeled using the
+    integers 0 to \eqn{k-1}.
+  }	
+  \item{test}{A character string specifying the statistic to be
+    used to test the null hypothesis of no association between the
+    variables and the class labels.\cr
+    If \code{test="t"}, the tests are based on two-sample Welch t-statistics
+    (unequal variances).  \cr
+    If \code{test="t.equalvar"}, the tests are based on two-sample
+    t-statistics with equal variance for the two samples. The
+    square of the t-statistic is equal to an F-statistic for \eqn{k=2}. \cr
+    If \code{test="wilcoxon"}, the tests are based on standardized rank sum Wilcoxon statistics.\cr
+    If \code{test="f"}, the tests are based on F-statistics.\cr
+    If \code{test="pairt"}, the tests are based on paired t-statistics. The
+    square of the paired t-statistic is equal to a block F-statistic for \eqn{k=2}. \cr
+    If \code{test="blockf"}, the tests are based on F-statistics which
+    adjust for block differences
+    (cf. two-way analysis of variance).
+  }
+  \item{side}{A character string specifying the type of rejection region.\cr
+    If \code{side="abs"}, two-tailed tests, the null hypothesis is rejected for large absolute values of the test statistic.\cr
+    If \code{side="upper"}, one-tailed tests, the null hypothesis is rejected for large values of the test statistic.\cr
+    If \code{side="lower"}, one-tailed tests,  the null hypothesis is rejected for small values of the test statistic.
+  }
+  \item{fixed.seed.sampling}{If \code{fixed.seed.sampling="y"}, a
+    fixed seed sampling procedure is used, which may double the
+    computing time, but will not use extra memory to store the
+    permutations. If \code{fixed.seed.sampling="n"}, permutations will
+    be stored in memory.  For the \code{blockf} test, the option \code{n} was not implemented as it requires too much memory.
+  }
+  \item{B}{The number of permutations. For a complete
+    enumeration, \code{B} should be 0 (zero) or any number not less than
+    the total number of permutations.
+  }
+  \item{na}{Code for missing values (the default is \code{.mt.naNUM=--93074815.62}).
+    Entries with missing values will be ignored in the computation, 
+    i.e., test statistics will be based on a smaller sample size. This
+    feature has not yet fully implemented.
+  }
+  \item{nonpara}{If \code{nonpara}="y", nonparametric test statistics are computed based on ranked data. \cr
+    If  \code{nonpara}="n", the original data are used.
+  }
+}
+
+\details{These functions compute permutation adjusted \eqn{p}-values for the step-down maxT and minP multiple testing procedures, which provide strong control of the family-wise Type I error rate (FWER). The adjusted \eqn{p}-values for the minP procedure are defined in equation (2.10) p. 66 of Westfall & Young (1993), and the maxT procedure is discussed p. 50 and 114. The permutation algorithms for estimating the adjusted \eqn{p}-values are given in Ge et al. (In preparation). The proced [...]
+}
+
+
+\value{
+  A data frame with components
+  \item{index}{Vector of row indices, between 1 and \code{nrow(X)}, where rows are sorted first according to
+    their adjusted \eqn{p}-values, next their unadjusted \eqn{p}-values, and finally their test statistics. }
+  \item{teststat}{Vector of test statistics, ordered according to \code{index}. To get the test statistics in the original data order, use \code{teststat[order(index)]}.}
+  \item{rawp}{Vector of raw (unadjusted) \eqn{p}-values, ordered according to \code{index}.}
+  \item{adjp}{Vector of adjusted \eqn{p}-values, ordered according to \code{index}.}
+  \item{plower}{For \code{\link{mt.minP}} function only, vector of "adjusted \eqn{p}-values", where ties in the permutation distribution of the successive minima of raw \eqn{p}-values with the observed \eqn{p}-values are counted only once. Note that procedures based on \code{plower} do not control the FWER. Comparison of \code{plower} and \code{adjp} gives an idea of the discreteness of the permutation distribution. Values in \code{plower} are ordered according to \code{index}.}
+}
+
+\references{
+S. Dudoit, J. P. Shaffer, and J. C. Boldrick (Submitted). Multiple hypothesis testing in microarray experiments.\cr
+
+Y. Ge, S. Dudoit, and T. P. Speed. Resampling-based multiple testing for microarray data hypothesis, Technical Report \#633 of UCB Stat. \url{http://www.stat.berkeley.edu/~gyc} \cr
+
+P. H. Westfall and S. S. Young (1993). \emph{Resampling-based
+multiple testing: Examples and methods for \eqn{p}-value adjustment}. John Wiley \& Sons.
+}	
+  
+\author{Yongchao Ge, \email{yongchao.ge at mssm.edu}, \cr
+Sandrine Dudoit, \url{http://www.stat.berkeley.edu/~sandrine}.}
+
+\seealso{\code{\link{mt.plot}}, \code{\link{mt.rawp2adjp}}, \code{\link{mt.reject}}, \code{\link{mt.sample.teststat}}, \code{\link{mt.teststat}}, \code{\link{golub}}.}
+
+
+\examples{
+# Gene expression data from Golub et al. (1999)
+# To reduce computation time and for illustrative purposes, we condider only
+# the first 100 genes and use the default of B=10,000 permutations.
+# In general, one would need a much larger number of permutations
+# for microarray data.
+
+data(golub)
+smallgd<-golub[1:100,] 
+classlabel<-golub.cl
+
+# Permutation unadjusted p-values and adjusted p-values 
+# for maxT and minP procedures with Welch t-statistics
+resT<-mt.maxT(smallgd,classlabel)
+resP<-mt.minP(smallgd,classlabel)
+rawp<-resT$rawp[order(resT$index)]
+teststat<-resT$teststat[order(resT$index)]
+
+# Plot results and compare to Bonferroni procedure
+bonf<-mt.rawp2adjp(rawp, proc=c("Bonferroni"))
+allp<-cbind(rawp, bonf$adjp[order(bonf$index),2], resT$adjp[order(resT$index)],resP$adjp[order(resP$index)])
+
+mt.plot(allp, teststat, plottype="rvsa", proc=c("rawp","Bonferroni","maxT","minP"),leg=c(0.7,50),lty=1,col=1:4,lwd=2)
+mt.plot(allp, teststat, plottype="pvsr", proc=c("rawp","Bonferroni","maxT","minP"),leg=c(60,0.2),lty=1,col=1:4,lwd=2)
+mt.plot(allp, teststat, plottype="pvst", proc=c("rawp","Bonferroni","maxT","minP"),leg=c(-6,0.6),pch=16,col=1:4)
+
+# Permutation adjusted p-values for minP procedure with F-statistics (like equal variance t-statistics)
+mt.minP(smallgd,classlabel,test="f",fixed.seed.sampling="n")
+
+# Note that the test statistics used in the examples below are not appropriate 
+# for the Golub et al. data. The sole purpose of these examples is to 
+# demonstrate the use of the mt.maxT and mt.minP functions.
+
+# Permutation adjusted p-values for maxT procedure with paired t-statistics
+classlabel<-rep(c(0,1),19)
+mt.maxT(smallgd,classlabel,test="pairt")
+
+# Permutation adjusted p-values for maxT procedure with block F-statistics
+classlabel<-rep(0:18,2)
+mt.maxT(smallgd,classlabel,test="blockf",side="upper")
+
+}
+\keyword{htest}
diff --git a/man/mt.plot.Rd b/man/mt.plot.Rd
new file mode 100755
index 0000000..98be13a
--- /dev/null
+++ b/man/mt.plot.Rd
@@ -0,0 +1,89 @@
+\name{mt.plot}
+\alias{mt.plot}
+\title{Plotting results from multiple testing procedures}
+\description{This function produces a number of graphical summaries
+  for the results of multiple testing procedures and their corresponding
+  adjusted \eqn{p}-values.}
+\usage{
+mt.plot(adjp, teststat, plottype="rvsa", logscale=FALSE, alpha=seq(0, 1, length = 100), proc, leg=c(0, 0), \dots)
+}
+\arguments{
+  \item{adjp}{A matrix of adjusted \emph{p}-values, with rows
+    corresponding to hypotheses (genes) and columns to multiple testing
+    procedures. This matrix could be obtained from the functions
+    \code{\link{mt.maxT}}, \code{\link{mt.minP}}, or \code{\link{mt.rawp2adjp}}.}
+  \item{teststat}{A vector of test statistics for each of the hypotheses. This vector could be obtained from the functions \code{\link{mt.teststat}}, \code{\link{mt.maxT}}, or \code{\link{mt.minP}}.}
+  \item{plottype}{A character string specifying the type of graphical
+    summary for the results of the multiple testing procedures. \cr
+    If \code{plottype="rvsa"}, the number of rejected hypotheses is plotted against the nominal Type I error rate for each of the procedures given in \code{proc}.\cr
+    If \code{plottype="pvsr"}, the ordered adjusted \emph{p}-values are plotted for each of the procedures given in \code{proc}. This can be viewed as a plot of the Type I error rate against the number of rejected hypotheses. \cr
+    If \code{plottype="pvst"}, the adjusted \emph{p}-values are plotted against the test statistics for each of the procedures given in \code{proc}.
+    \cr
+    If \code{plottype="pvsi"}, the adjusted \emph{p}-values are plotted for each of the procedures given in \code{proc} using the original data order.  }
+  \item{logscale}{A logical variable for the \code{pvst} and \code{pvsi} plots. If \code{logscale} is \code{TRUE}, the negative decimal logarithms of the adjusted \emph{p}-values are plotted against the test statistics or gene indices. If \code{logscale} is \code{FALSE}, the adjusted \emph{p}-values are plotted against the test statistics or gene indices.}
+  \item{alpha}{A vector of nominal Type I error rates for the \code{rvsa} plot.}
+  \item{proc}{A vector of character strings containing the names of the
+    multiple testing procedures, to be used in the legend.}
+  \item{\dots}{Graphical parameters such as \code{col}, \code{lty},
+    \code{pch}, and \code{lwd}
+    may also be supplied as arguments to the function (see \code{\link{par}}).}
+  \item{leg}{A vector of coordinates for the legend.}
+}
+
+\references{
+  
+  S. Dudoit, J. P. Shaffer, and J. C. Boldrick (Submitted). Multiple hypothesis testing in microarray experiments.\cr
+  
+  Y. Ge, S. Dudoit, and T. P. Speed. Resampling-based multiple testing for microarray data hypothesis, Technical Report \#633 of UCB Stat. \url{http://www.stat.berkeley.edu/~gyc}  \cr
+
+}
+\author{
+  Sandrine Dudoit,  \url{http://www.stat.berkeley.edu/~sandrine}, \cr
+  Yongchao Ge, \email{yongchao.ge at mssm.edu}.
+}
+\seealso{\code{\link{mt.maxT}}, \code{\link{mt.minP}}, \code{\link{mt.rawp2adjp}},  \code{\link{mt.reject}}, \code{\link{mt.teststat}}, \code{\link{golub}}.}
+
+\examples{
+# Gene expression data from Golub et al. (1999)
+# To reduce computation time and for illustrative purposes, we condider only
+# the first 100 genes and use the default of B=10,000 permutations.
+# In general, one would need a much larger number of permutations
+# for microarray data.
+
+data(golub)
+smallgd<-golub[1:100,] 
+classlabel<-golub.cl
+
+# Permutation unadjusted p-values and adjusted p-values for maxT procedure
+res1<-mt.maxT(smallgd,classlabel)
+rawp<-res1$rawp[order(res1$index)]
+teststat<-res1$teststat[order(res1$index)]
+
+# Permutation adjusted p-values for simple multiple testing procedures
+procs<-c("Bonferroni","Holm","Hochberg","SidakSS","SidakSD","BH","BY")
+res2<-mt.rawp2adjp(rawp,procs)
+
+# Plot results from all multiple testing procedures
+allp<-cbind(res2$adjp[order(res2$index),],res1$adjp[order(res1$index)])
+dimnames(allp)[[2]][9]<-"maxT"
+procs<-dimnames(allp)[[2]]
+procs[7:9]<-c("maxT","BH","BY")
+allp<-allp[,procs]
+
+cols<-c(1:4,"orange","brown","purple",5:6)
+ltypes<-c(3,rep(1,6),rep(2,2))
+
+# Ordered adjusted p-values
+mt.plot(allp,teststat,plottype="pvsr",proc=procs,leg=c(80,0.4),lty=ltypes,col=cols,lwd=2)
+
+# Adjusted p-values in original data order
+mt.plot(allp,teststat,plottype="pvsi",proc=procs,leg=c(80,0.4),lty=ltypes,col=cols,lwd=2)
+
+# Number of rejected hypotheses vs. level of the test
+mt.plot(allp,teststat,plottype="rvsa",proc=procs,leg=c(0.05,100),lty=ltypes,col=cols,lwd=2)
+
+# Adjusted p-values vs. test statistics
+mt.plot(allp,teststat,plottype="pvst",logscale=TRUE,proc=procs,leg=c(0,4),pch=ltypes,col=cols)
+
+}
+\keyword{hplot}
diff --git a/man/mt.rawp2adjp.Rd b/man/mt.rawp2adjp.Rd
new file mode 100755
index 0000000..9ae4f87
--- /dev/null
+++ b/man/mt.rawp2adjp.Rd
@@ -0,0 +1,155 @@
+\name{mt.rawp2adjp}
+\alias{mt.rawp2adjp}
+\title{Adjusted p-values for simple multiple testing procedures}
+\description{
+  This function computes adjusted \eqn{p}-values for simple
+  multiple testing procedures from a vector of raw (unadjusted)
+  \eqn{p}-values. The procedures include the Bonferroni, Holm (1979),
+  Hochberg (1988), and Sidak procedures for strong control of the
+  family-wise Type I error rate (FWER), and the Benjamini & Hochberg
+  (1995) and Benjamini & Yekutieli (2001) procedures for (strong)
+  control of the false discovery rate (FDR).  The less conservative
+  adaptive Benjamini & Hochberg (2000) and two-stage Benjamini & Hochberg
+  (2006) FDR-controlling procedures are also included.
+}
+\usage{
+mt.rawp2adjp(rawp, proc=c("Bonferroni", "Holm", "Hochberg", "SidakSS", "SidakSD",
+"BH", "BY","ABH","TSBH"), alpha = 0.05, na.rm = FALSE)
+}
+\arguments{
+  \item{rawp}{A vector of raw (unadjusted) \eqn{p}-values for each
+    hypothesis under consideration. These could be nominal
+    \eqn{p}-values, for example, from \eqn{t}-tables, or permutation
+    \eqn{p}-values as given in \code{mt.maxT} and \code{mt.minP}. If the
+    \code{mt.maxT} or \code{mt.minP} functions are used, raw
+    \eqn{p}-values should be given in the original data order,
+    \code{rawp[order(index)]}.}
+  \item{proc}{A vector of character strings containing the names of the
+    multiple testing procedures for which adjusted \eqn{p}-values are to
+    be computed. This vector should include any of the following:
+    \code{"Bonferroni"}, \code{"Holm"}, \code{"Hochberg"},
+    \code{"SidakSS"}, \code{"SidakSD"}, \code{"BH"}, \code{"BY"},
+    \code{"ABH"}, \code{"TSBH"}.\cr
+
+Adjusted \eqn{p}-values are computed for simple FWER- and FDR-
+controlling procedures based on a vector of raw (unadjusted)
+\eqn{p}-values by one or more of the following methods:
+
+\describe{
+\item{Bonferroni}{Bonferroni single-step adjusted \eqn{p}-values
+for strong control of the FWER.}
+\item{Holm}{Holm (1979) step-down adjusted \eqn{p}-values for
+strong control of the FWER.}
+\item{Hochberg}{ Hochberg (1988) step-up adjusted \eqn{p}-values
+for
+strong control of the FWER (for raw (unadjusted) \eqn{p}-values
+satisfying the Simes inequality).}
+\item{SidakSS}{Sidak single-step adjusted \eqn{p}-values for
+strong control of the FWER (for positive orthant dependent test
+statistics).}
+\item{SidakSD}{Sidak step-down adjusted \eqn{p}-values for
+strong control of the FWER (for positive orthant dependent test
+statistics).}
+\item{BH}{Adjusted \eqn{p}-values for the Benjamini & Hochberg
+(1995) step-up FDR-controlling procedure (independent and positive
+regression dependent test statistics).}
+\item{BY}{Adjusted \eqn{p}-values for the Benjamini & Yekutieli
+(2001) step-up FDR-controlling procedure (general dependency
+structures).}
+\item{ABH}{Adjusted \eqn{p}-values for the adaptive Benjamini & Hochberg
+(2000) step-up FDR-controlling procedure.  This method ammends the original step-up procedure using an estimate of the number of true null hypotheses obtained from \eqn{p}-values.}
+\item{TSBH}{Adjusted \eqn{p}-values for the two-stage Benjamini & Hochberg
+(2006) step-up FDR-controlling procedure.  This method ammends the original step-up procedure using an estimate of the number of true null hypotheses obtained from a first-pass application of \code{"BH"}.  The adjusted \eqn{p}-values are \eqn{a}-dependent, therefore \code{alpha} must be set in the function arguments when using this procedure.}
+}
+}
+
+\item{alpha}{A nominal type I error rate, or a vector of error
+    rates, used for estimating the number of true null hypotheses in the
+    two-stage Benjamini & Hochberg procedure (\code{"TSBH"}).  Default is 0.05.}
+
+\item{na.rm}{An option for handling \code{NA} values in a list of raw \eqn{p}-values.  If
+\code{FALSE}, the number of hypotheses considered is the length of the vector
+of raw \eqn{p}-values.  Otherwise, if \code{TRUE}, the number of hypotheses is
+the number of raw \eqn{p}-values which were not \code{NA}s.}
+}
+
+
+\value{
+A list with components:
+  \item{adjp}{A matrix of adjusted \eqn{p}-values, with rows
+    corresponding to hypotheses and columns to multiple testing
+    procedures. Hypotheses are sorted in increasing order of their raw
+    (unadjusted) \eqn{p}-values.}
+  \item{index}{A vector of row indices, between 1 and
+    \code{length(rawp)}, where rows are sorted according to
+    their raw (unadjusted) \eqn{p}-values. To obtain the adjusted
+    \eqn{p}-values in the original data order, use
+    \code{adjp[order(index),]}.}
+  \item{h0.ABH}{The estimate of the number of true null hypotheses as proposed
+    by Benjamini & Hochberg (2000) used when computing adjusted \eqn{p}-values
+    for the \code{"ABH"} procedure (see Dudoit et al., 2007).}
+  \item{h0.TSBH}{The estimate (or vector of estimates) of the number of true
+    null hypotheses as proposed by Benjamini et al. (2006) when computing adjusted
+    \eqn{p}-values for the \code{"TSBH"} procedure. (see Dudoit et al., 2007).}
+}
+
+\references{
+Y. Benjamini and Y. Hochberg (1995). Controlling the false discovery
+rate: a practical and powerful approach to multiple
+testing. \emph{J. R. Statist. Soc. B}. Vol. 57: 289-300.\cr
+
+Y. Benjamini and Y. Hochberg (2000). On the adaptive control of the false discovery rate in multiple testing with independent statistics. \emph{J. Behav. Educ. Statist}. Vol 25: 60-83.\cr
+
+Y. Benjamini and D. Yekutieli (2001). The control of the false discovery rate in multiple hypothesis testing under dependency. \emph{Annals of Statistics}. Vol. 29: 1165-88.\cr
+
+Y. Benjamini, A. M. Krieger and D. Yekutieli (2006). Adaptive linear step-up procedures that control the false discovery rate. \emph{Biometrika}. Vol. 93: 491-507.\cr
+
+S. Dudoit, J. P. Shaffer, and J. C. Boldrick (2003). Multiple
+hypothesis testing in microarray experiments. \emph{Statistical Science}. Vol. 18: 71-103. \cr
+
+S. Dudoit, H. N. Gilbert, and M. J. van der Laan (2008).
+Resampling-based empirical Bayes multiple testing procedures for controlling generalized tail probability and expected value error rates: Focus on the false discovery rate and simulation study. \emph{Biometrical Journal}, 50(5):716-44. \url{http://www.stat.berkeley.edu/~houston/BJMCPSupp/BJMCPSupp.html}. \cr
+
+Y. Ge, S. Dudoit, and T. P. Speed (2003). Resampling-based multiple testing for microarray data analysis. \emph{TEST}. Vol. 12: 1-44 (plus discussion p. 44-77).\cr
+
+Y. Hochberg (1988). A sharper Bonferroni procedure for multiple tests of significance, \emph{Biometrika}. Vol. 75: 800-802.\cr
+
+S. Holm (1979). A simple sequentially rejective multiple test
+procedure. \emph{Scand. J. Statist.}. Vol. 6: 65-70.
+}
+
+\author{
+  Sandrine Dudoit, \url{http://www.stat.berkeley.edu/~sandrine},\cr
+  Yongchao Ge, \email{yongchao.ge at mssm.edu},\cr
+  Houston Gilbert, \url{http://www.stat.berkeley.edu/~houston}.
+}
+
+\seealso{\code{\link{mt.maxT}}, \code{\link{mt.minP}},
+  \code{\link{mt.plot}}, \code{\link{mt.reject}}, \code{\link{golub}}.}
+
+\examples{
+# Gene expression data from Golub et al. (1999)
+# To reduce computation time and for illustrative purposes, we condider only
+# the first 100 genes and use the default of B=10,000 permutations.
+# In general, one would need a much larger number of permutations
+# for microarray data.
+
+data(golub)
+smallgd<-golub[1:100,]
+classlabel<-golub.cl
+
+# Permutation unadjusted p-values and adjusted p-values for maxT procedure
+res1<-mt.maxT(smallgd,classlabel)
+rawp<-res1$rawp[order(res1$index)]
+
+# Permutation adjusted p-values for simple multiple testing procedures
+procs<-c("Bonferroni","Holm","Hochberg","SidakSS","SidakSD","BH","BY","ABH","TSBH")
+res2<-mt.rawp2adjp(rawp,procs)
+}
+
+\keyword{htest}
+
+
+
+
+
diff --git a/man/mt.reject.Rd b/man/mt.reject.Rd
new file mode 100755
index 0000000..6a0dd20
--- /dev/null
+++ b/man/mt.reject.Rd
@@ -0,0 +1,48 @@
+\name{mt.reject}
+\alias{mt.reject}
+\title{Identity and number of rejected hypotheses }
+\description{This function returns the identity and number of rejected hypotheses for several multiple testing procedures and different nominal Type I error rates.
+}
+\usage{
+mt.reject(adjp, alpha)
+}
+\arguments{
+  \item{adjp}{A matrix of adjusted \emph{p}-values, with rows
+    corresponding to hypotheses and columns to multiple testing
+    procedures. This matrix could be obtained from the function
+    \code{\link{mt.rawp2adjp}}
+    .}
+  \item{alpha}{A vector of nominal Type I error rates.}
+}
+\value{
+  A list with components
+  \item{r}{A matrix containing the number of rejected hypotheses for several multiple testing procedures and different nominal Type I error rates. Rows correspond to Type I error rates and columns to multiple testing procedures.}
+  \item{which}{A matrix of indicators for the rejection of individual hypotheses by different multiple testing procedures for a nominal Type I error rate \code{alpha[1]}. Rows correspond to hypotheses and columns to multiple testing procedures.}
+}
+
+\author{
+  Sandrine Dudoit,  \url{http://www.stat.berkeley.edu/~sandrine}, \cr
+  Yongchao Ge, \email{yongchao.ge at mssm.edu}.
+}
+
+
+\seealso{\code{\link{mt.maxT}}, \code{\link{mt.minP}}, \code{\link{mt.rawp2adjp}}, \code{\link{golub}}.}
+
+\examples{
+# Gene expression data from Golub et al. (1999)
+# To reduce computation time and for illustrative purposes, we condider only
+# the first 100 genes and use the default of B=10,000 permutations.
+# In general, one would need a much larger number of permutations
+# for microarray data.
+
+data(golub)
+smallgd<-golub[1:100,] 
+classlabel<-golub.cl
+
+# Permutation unadjusted p-values and adjusted p-values for maxT procedure
+res<-mt.maxT(smallgd,classlabel)
+mt.reject(cbind(res$rawp,res$adjp),seq(0,1,0.1))$r
+
+}
+
+\keyword{htest}
diff --git a/man/mt.sample.teststat.Rd b/man/mt.sample.teststat.Rd
new file mode 100755
index 0000000..391eb95
--- /dev/null
+++ b/man/mt.sample.teststat.Rd
@@ -0,0 +1,100 @@
+\name{mt.sample.teststat}
+\title{Permutation distribution of test statistics and raw (unadjusted) p-values}
+\alias{mt.sample.teststat}
+\alias{mt.sample.rawp}
+\alias{mt.sample.label}
+\usage{
+mt.sample.teststat(V,classlabel,test="t",fixed.seed.sampling="y",B=10000,na=.mt.naNUM,nonpara="n")
+mt.sample.rawp(V,classlabel,test="t",side="abs",fixed.seed.sampling="y",B=10000,na=.mt.naNUM,nonpara="n")
+mt.sample.label(classlabel,test="t",fixed.seed.sampling="y",B=10000)
+}
+\description{
+  These functions provide tools to investigate the permutation distribution
+  of test statistics, raw (unadjusted) \eqn{p}-values, and class labels.
+}
+\arguments{
+  \item{V}{A numeric vector containing the data for one of the variables (genes).}
+
+ \item{classlabel}{
+A vector of integers corresponding to observation (column)
+    class labels. For \eqn{k} classes, the labels must be integers
+    between 0 and \eqn{k-1}. For the \code{blockf} test option,
+    observations may be divided into
+    \eqn{n/k} blocks of \eqn{k} observations each. The observations are
+    ordered by block, and within each block, they are labeled using the
+    integers 0 to \eqn{k-1}.
+  }	
+  \item{test}{A character string specifying the statistic to be
+    used to test the null hypothesis of no association between the
+    variables and the class labels.\cr
+    If \code{test="t"}, the tests are based on two-sample Welch t-statistics
+    (unequal variances).  \cr
+    If \code{test="t.equalvar"}, the tests are based on two-sample
+    t-statistics with equal variance for the two samples. The
+    square of the t-statistic is equal to an F-statistic for \eqn{k=2}. \cr
+    If \code{test="wilcoxon"}, the tests are based on standardized rank sum Wilcoxon statistics.\cr
+    If \code{test="f"}, the tests are based on F-statistics.\cr
+    If \code{test="pairt"}, the tests are based on paired t-statistics. The
+    square of the paired t-statistic is equal to a block F-statistic for \eqn{k=2}. \cr
+    If \code{test="blockf"}, the tests are based on F-statistics which
+    adjust for block differences
+    (cf. two-way analysis of variance).
+  }
+  \item{side}{A character string specifying the type of rejection region.\cr
+    If \code{side="abs"}, two-tailed tests, the null hypothesis is rejected for large absolute values of the test statistic.\cr
+    If \code{side="upper"}, one-tailed tests, the null hypothesis is rejected for large values of the test statistic.\cr
+    If \code{side="lower"}, one-tailed tests,  the null hypothesis is rejected for small values of the test statistic.
+  }
+  \item{fixed.seed.sampling}{If \code{fixed.seed.sampling="y"}, a
+    fixed seed sampling procedure is used, which may double the
+    computing time, but will not use extra memory to store the
+    permutations. If \code{fixed.seed.sampling="n"}, permutations will
+    be stored in memory.  For the \code{blockf} test, the option \code{n} was not implemented as it requires too much memory.
+  }
+  \item{B}{The number of permutations. For a complete
+    enumeration, \code{B} should be 0 (zero) or any number not less than
+    the total number of permutations.
+  }
+  \item{na}{Code for missing values (the default is \code{.mt.naNUM=--93074815.62}).
+    Entries with missing values will be ignored in the computation,
+    i.e., test statistics will be based on a smaller sample size. This
+    feature has not yet fully implemented.
+  }
+  \item{nonpara}{If \code{nonpara}="y", nonparametric test statistics are computed based on ranked data. \cr
+    If  \code{nonpara}="n", the original data are used.
+  }
+
+}
+\value{
+  For \code{\link{mt.sample.teststat}},  a vector containing \code{B} permutation test statistics. \cr \cr
+  For \code{\link{mt.sample.rawp}},  a vector containing \code{B} permutation unadjusted \eqn{p}-values. \cr\cr 
+  For \code{\link{mt.sample.label}}, a matrix containing \code{B}
+  sets of permuted class labels. Each row corresponds to one permutation.
+}
+
+\examples{
+
+# Gene expression data from Golub et al. (1999)
+data(golub)
+
+mt.sample.label(golub.cl,B=10)
+
+permt<-mt.sample.teststat(golub[1,],golub.cl,B=1000)
+qqnorm(permt)
+qqline(permt)
+
+permt<-mt.sample.teststat(golub[50,],golub.cl,B=1000)
+qqnorm(permt)
+qqline(permt)
+
+permp<-mt.sample.rawp(golub[1,],golub.cl,B=1000)
+hist(permp)
+}
+
+\author{Yongchao Ge, \email{yongchao.ge at mssm.edu}, \cr
+Sandrine Dudoit, \url{http://www.stat.berkeley.edu/~sandrine}.}
+
+\seealso{\code{\link{mt.maxT}}, \code{\link{mt.minP}}, \code{\link{golub}}.}
+
+\keyword{manip}
+
diff --git a/man/mt.teststat.Rd b/man/mt.teststat.Rd
new file mode 100755
index 0000000..0180ea0
--- /dev/null
+++ b/man/mt.teststat.Rd
@@ -0,0 +1,93 @@
+\name{mt.teststat}
+\alias{mt.teststat}
+\alias{mt.teststat.num.denum}
+
+\title{Computing test statistics for each row of a data frame}
+\usage{
+mt.teststat(X,classlabel,test="t",na=.mt.naNUM,nonpara="n")
+mt.teststat.num.denum(X,classlabel,test="t",na=.mt.naNUM,nonpara="n")
+}
+\description{
+  These functions provide a convenient way to compute test statistics,
+  e.g., two-sample Welch t-statistics, Wilcoxon statistics,
+  F-statistics, paired t-statistics,
+  block F-statistics, for each row of a data frame. 
+}
+\arguments{
+ \item{X}{A data frame or matrix, with \eqn{m} rows corresponding to variables
+    (hypotheses) and\eqn{n} columns to observations. In the case of gene 
+    expression data, rows
+    correspond to genes and columns to mRNA samples. The data can
+    be read using \code{\link{read.table}}.
+  }
+  \item{classlabel}{
+     A vector of integers corresponding to observation (column)
+    class labels. For \eqn{k} classes, the labels must be integers
+    between 0 and \eqn{k-1}. For the \code{blockf} test option,
+    observations may be divided into
+    \eqn{n/k} blocks of \eqn{k} observations each. The observations are
+    ordered by block, and within each block, they are labeled using the
+    integers 0 to \eqn{k-1}.
+  }	
+  \item{test}{A character string specifying the statistic to be
+    used to test the null hypothesis of no association between the
+    variables and the class labels.\cr
+    If \code{test="t"}, the tests are based on two-sample Welch t-statistics
+    (unequal variances).  \cr
+    If \code{test="t.equalvar"}, the tests are based on two-sample
+    t-statistics with equal variance for the two samples. The
+    square of the t-statistic is equal to an F-statistic for \eqn{k=2}. \cr
+    If \code{test="wilcoxon"}, the tests are based on standardized rank sum Wilcoxon statistics.\cr
+    If \code{test="f"}, the tests are based on F-statistics.\cr
+    If \code{test="pairt"}, the tests are based on paired t-statistics. The
+    square of the paired t-statistic is equal to a block F-statistic for \eqn{k=2}. \cr
+    If \code{test="blockf"}, the tests are based on F-statistics which
+    adjust for block differences
+    (cf. two-way analysis of variance).
+  }
+  \item{na}{Code for missing values (the default is \code{.mt.naNUM=--93074815.62}).
+    Entries with missing values will be ignored in the computation,
+    i.e., test statistics will be based on a smaller sample size. This
+    feature has not yet fully implemented.
+  }
+  \item{nonpara}{If \code{nonpara}="y", nonparametric test statistics are computed based on ranked data. \cr
+    If  \code{nonpara}="n", the original data are used.}
+}
+
+\value{
+  For \code{\link{mt.teststat}}, a vector of test statistics for each row (gene). \cr \cr
+  For \code{\link{mt.teststat.num.denum}}, a data frame with \cr
+  \item{teststat.num}{the numerator of the test statistics for each row, depending on the
+    specific \code{test} option.}
+  \item{teststat.denum}{the denominator of the test statistics for each row, depending on the
+    specific \code{test} option.}
+  }
+
+
+\author{Yongchao Ge, \email{yongchao.ge at mssm.edu}, \cr
+Sandrine Dudoit, \url{http://www.stat.berkeley.edu/~sandrine}.}
+
+\seealso{\code{\link{mt.maxT}}, \code{\link{mt.minP}}, \code{\link{golub}}.}
+
+\examples{
+# Gene expression data from Golub et al. (1999)
+data(golub)
+
+teststat<-mt.teststat(golub,golub.cl)
+qqnorm(teststat)
+qqline(teststat)
+
+tmp<-mt.teststat.num.denum(golub,golub.cl,test="t")
+num<-tmp$teststat.num
+denum<-tmp$teststat.denum
+plot(sqrt(denum),num)
+
+tmp<-mt.teststat.num.denum(golub,golub.cl,test="f")
+
+}
+		
+\keyword{univar}
+
+	
+
+	
diff --git a/man/ss.maxT.Rd b/man/ss.maxT.Rd
new file mode 100755
index 0000000..eaf1627
--- /dev/null
+++ b/man/ss.maxT.Rd
@@ -0,0 +1,85 @@
+\name{ss.maxT}
+\alias{ss.maxT}
+\alias{ss.minP}
+\alias{sd.maxT}
+\alias{sd.minP}
+
+\title{Procedures to perform multiple testing}
+
+\description{
+Given observed test statistics, a test statistics null distribution, and alternetive hyptheses, these multiple testing procedures provide family-wise error rate (FWER) adjusted p-values, cutoffs for test statistics, and possibly confidence regions for estimates. Four methods are implemented, based on minima of p-values and maxima of test statistics. 
+}
+
+\usage{
+ss.maxT(null, obs, alternative, get.cutoff, get.cr, 
+get.adjp, alpha = 0.05)
+
+ss.minP(null, obs, rawp, alternative, get.cutoff, get.cr, 
+get.adjp, alpha=0.05)
+
+sd.maxT(null, obs, alternative, get.cutoff, get.cr, 
+get.adjp, alpha = 0.05)
+
+sd.minP(null, obs, rawp, alternative, get.cutoff, get.cr, 
+get.adjp, alpha=0.05)
+}
+
+\arguments{
+  \item{null}{A matrix containing the test statistics null distribution, e.g. the output of \code{boot.resample}.}
+  \item{obs}{A vector of observed test statistics, e.g. the output of a test statistics closure such as \code{meanX}. These are stored as a matrix with numerator (possibly absolute value or negative, depending on the value of alternative) in the first row, denominator in the second row, and a 1 or -1 in the third row (depending on the value of alternative). The observed test statistics are obs[1,]*obs[3,]/obs[2,].}
+  \item{rawp}{Numeric vector of unadjusted ("raw") marginal p-values.}
+  \item{alternative}{Character string indicating the alternative hypotheses, by default 'two.sided'. For one-sided tests, use 'less' or 'greater' for null hypotheses of 'greater than or equal' (i.e. alternative is 'less') and 'less than or equal', respectively.}
+  \item{get.cutoff}{Logical indicating whether to compute thresholds for the test statistics. Default is 'FALSE'.}
+  \item{get.cr}{Logical indicating whether to compute confidence intervals for the estimates. Not available for f-tests. Default is 'FALSE'.}
+  \item{get.adjp}{Logical indicating whether to compute adjusted p-values. Default is 'TRUE'.}
+  \item{alpha}{The target nominal type I error rate, which may be a vector of error rates. Default is 0.05.}
+}
+
+\details{
+Having selected a suitable test statistics null distribution, there remains the main task of specifying rejection regions for each null hypothesis, i.e., cut-offs for each test statistic. One usually distinguishes between two main classes of multiple testing procedures, single-step and stepwise procedures. In single-step procedures, each null hypothesis is evaluated using a rejection region that is  independent of the results of the tests of other hypotheses. Improvement in power, while  [...]
+
+In step-down procedures, the hypotheses corresponding to the most significant test statistics (i.e., largest absolute test statistics or smallest unadjusted p-values) are considered successively, with further tests depending on the outcome of earlier ones. As soon as one fails to reject a null hypothesis, no further
+hypotheses are rejected. In contrast, for step-up procedures, the hypotheses corresponding to the least significant test statistics are considered successively, again with further tests depending on the outcome of earlier ones. As soon as one hypothesis is rejected, all remaining more significant hypotheses are rejected.
+
+These functions perform the following procedures: \cr
+ss.maxT: single-step, common cut-off (maxima of test statistics) \cr
+ss.minP: single-step, common quantile (minima of p-values) \cr
+sd.maxT: step-down, common cut-off (maxima of test statistics) \cr
+sd.minP: step-down, common quantile (minima of p-values) \cr
+
+}
+
+\value{A list with the following components:
+  \item{c}{Object of class \code{"matrix"}, for each nominal (i.e. target) level for the test, a vector of threshold values for the vector of test statistics.}
+  \item{cr}{Object of class \code{"array"}, for each nominal (i.e. target) level for the test, a matrix of lower and upper confidence bounds for the parameter of interest for each hypothesis. Not available for f-tests.}
+  \item{adjp}{Object of class \code{"numeric"}, adjusted p-values for each hypothesis.}
+}
+
+\references{
+M.J. van der Laan, S. Dudoit, K.S. Pollard (2004), Augmentation Procedures for Control of the Generalized Family-Wise Error Rate and Tail Probabilities for the Proportion of False Positives, Statistical Applications in Genetics and Molecular Biology, 3(1). 
+\url{http://www.bepress.com/sagmb/vol3/iss1/art15/}
+
+M.J. van der Laan, S. Dudoit, K.S. Pollard (2004), Multiple Testing. Part II. Step-Down Procedures for Control of the Family-Wise Error Rate, Statistical Applications in Genetics and Molecular Biology, 3(1).
+\url{http://www.bepress.com/sagmb/vol3/iss1/art14/}
+
+S. Dudoit, M.J. van der Laan, K.S. Pollard (2004), Multiple Testing. Part I. Single-Step Procedures for Control of General Type I Error Rates, Statistical Applications in Genetics and Molecular Biology, 3(1).
+\url{http://www.bepress.com/sagmb/vol3/iss1/art13/}
+
+Katherine S. Pollard and Mark J. van der Laan, "Resampling-based Multiple Testing: Asymptotic Control of Type I Error and Applications to Gene Expression Data" (June 24, 2003). U.C. Berkeley Division of Biostatistics Working Paper Series. Working Paper 121.
+\url{http://www.bepress.com/ucbbiostat/paper121}
+}
+
+\author{Katherine S. Pollard with design contributions from Sandrine Dudoit and Mark J. van der Laan.}
+
+\seealso{\code{\link{MTP}}}
+
+\examples{
+## These functions are used internally by the MTP function
+## See MTP function: ? MTP
+}
+
+\keyword{htest}
+\keyword{internal}
+
+
+
diff --git a/man/wapply.Rd b/man/wapply.Rd
new file mode 100755
index 0000000..75b3d5d
--- /dev/null
+++ b/man/wapply.Rd
@@ -0,0 +1,50 @@
+\name{wapply}
+
+\alias{wapply}
+
+\title{Weighted version of the apply function}
+
+\description{
+A function to perform 'apply' on an matrix of data and corresponding matrix of weights. 
+}
+
+\usage{
+wapply(X, MARGIN, FUN, W = NULL, ...)
+}
+
+\arguments{
+  \item{X}{A matrix of data.}
+  \item{MARGIN}{A vector giving the subscripts which the function will be
+          applied over. 1 indicates rows, 2 indicates columns.}
+  \item{FUN}{The function to be applied. In the case of functions like
+          \code{+} the function name must be quoted.}
+  \item{W}{An optional matrix of weights. When \code{W=NULL}, the usual \code{apply} function is called.}
+  \item{\dots}{optional arguments to \code{FUN}.}
+}
+
+\details{
+When weights are provided, these are passed to \code{FUN} along with the data \code{X}. For example, if \code{FUN=meanX}, each data value is multiplied by the corresponding weight before the mean is applied.
+}
+
+\value{
+If each call to \code{FUN} returns a vector of length \code{n}, then \code{wapply} returns an array of dimension \code{c(n, dim(X)[MARGIN])} if \code{n > 1}. If \code{n = 1}, \code{wapply} returns a vector if \code{MARGIN} has length 1 and an array of dimension \code{dim(X)[MARGIN]} otherwise. If \code{n = 0}, the result has length 0 but not necessarily the "correct" dimension.
+
+If the calls to \code{FUN} return vectors of different lengths, \code{wapply} returns a list of length \code{dim(X)[MARGIN]}.
+
+This function is used in the package \code{multtest} to compute weighted versions of test statistics. It is called by the function \code{get.Tn} inside the user-level function \code{MTP}.
+}
+
+\author{Katherine S. Pollard}
+
+\seealso{\code{\link{get.Tn}}, \code{\link{MTP}}}
+
+\examples{
+data<-matrix(rnorm(200),nr=20)
+weights<-matrix(rexp(200,rate=0.1),nr=20)
+wapply(X=data,MARGIN=1,FUN=mean,W=weights)
+}
+
+\keyword{internal}
+
+
+
diff --git a/src/Makevars b/src/Makevars
new file mode 100644
index 0000000..9cc013f
--- /dev/null
+++ b/src/Makevars
@@ -0,0 +1,2 @@
+PKG_CFLAGS=-DUSEDOUBLE
+PKG_LIBS=-lm
diff --git a/src/Makevars.win b/src/Makevars.win
new file mode 100755
index 0000000..b9f379d
--- /dev/null
+++ b/src/Makevars.win
@@ -0,0 +1,2 @@
+PKG_CFLAGS=-DUSEDOUBLE -DWINDOWS
+PKG_LIBS=-lm
diff --git a/src/Rpack.c b/src/Rpack.c
new file mode 100755
index 0000000..7cc8478
--- /dev/null
+++ b/src/Rpack.c
@@ -0,0 +1,316 @@
+/*****************************************************************/
+/*           Header files                                        */
+/*****************************************************************/
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <math.h>
+#include <time.h>
+#include <string.h>
+#include <ctype.h>
+#include "mt.h"
+#define mtT 1
+#define mtF 2
+#define mtPairT 3
+#define mtBlockF 4
+#define mtWilcoxon 5
+#define mtTequalVar 6
+#define mtFixedSampling 7
+typedef float (*FUNC_NUM_DENUM)(const float *, const int* ,const int,
+			const float , float *, float*,const void *);
+typedef void (*FUNC_CREATE)(int, int*,int);
+typedef void (*FUNC_DELETE)();
+typedef struct tagSAMPLING_DATA{
+  FUNC_STAT fn_maxT; 
+  /*the computing for maxT*, mostly needs to be standardlized*/
+  FUNC_STAT fn_minP;  
+  /*used to speed up the computation;mostly will be set as fn_stat;*/
+
+  FUNC_NUM_DENUM fn_num_denum;
+  /*the numerator and denumerator of maxT*/
+  FUNC_STAT fn_stat;/*the centered of the original definition, 
+		      no further modification
+		     mostly will be fn_minP or fn_maxT, e.g.
+		     in Wiloxon test, ranksum- mean, which is also fn_minP
+		     in two sample t-test, it will be the t, also fn_maxT
+		    */
+  FUNC_CMP  fn_cmp;
+  FUNC_SAMPLE fn_first;
+  FUNC_SAMPLE fn_next;
+  FUNC_CREATE fn_create;
+  FUNC_DELETE fn_delete;
+  int test;
+  int is_fixed_seed;
+} SAMPLING_DATA;
+int type2sample(char** options,SAMPLING_DATA* sd);
+int type2test(char* ptest,SAMPLING_DATA* sd);
+void create_gene_data(double*d,  int*pnrow, int*pncol, int*L, double*pna,GENE_DATA* pdata,int PrintIDX)
+{
+  int i,j;
+  pdata->nrow=*pnrow;
+  pdata->ncol=*pncol;
+  pdata->na=*pna;
+  malloc_gene_data(pdata);
+  for (j=0; j<pdata->ncol; j++) 
+    pdata->L[j]=L[j];
+  
+  for (i=0; i<pdata->nrow; i++) {
+    if(PrintIDX) sprintf(pdata->id[i],"%d",i+1); /*used for the indexes*/
+    else sprintf(pdata->id[i],"0");
+    for (j=0; j<pdata->ncol; j++) {
+      pdata->d[i][j]=d[j*pdata->nrow+i];
+      /*using the R tradition, which store the data column by column*/
+    }
+  }
+}
+
+void data2vec(double** data,double*d,  int nrow, int ncol)
+{
+  int i,j;
+  for (i=0; i<nrow; i++) {
+    for (j=0; j<ncol; j++) {
+      d[j*nrow+i]=data[i][j];
+      /*using the R tradition, which store the data column by column*/
+    }
+  }
+}
+void get_gene_indexes(GENE_DATA* pdata, int * indexes)
+{
+  int i;
+  for(i=0;i<pdata->nrow;i++){
+    indexes[i]=atoi(pdata->id[i]);
+  }
+}
+/*is computing fn_stat*/
+void get_stat(double*d, int*pnrow, int* pncol, int*L,double *pna, float *T,char** options,int* extra)
+{
+  GENE_DATA data;
+  SAMPLING_DATA sd;
+  if(type2test(options[0],&sd)==0)
+    return;
+  create_gene_data(d,pnrow,pncol,L,pna,&data,0);
+  compute_test_stat(&data,data.L,T,sd.fn_stat,extra);
+  free_gene_data(&data);
+}
+void get_stat_num_denum(double*d, int*pnrow, int* pncol, int*L,double *pna, float *Tnum,float*Tdenum,char**options, int*extra)
+{
+  GENE_DATA data;
+  SAMPLING_DATA sd;
+  int i;
+  if(type2test(options[0],&sd)==0)
+    return;
+  create_gene_data(d,pnrow,pncol,L,pna,&data,0);
+  for(i=0;i<data.nrow;i++)
+    (*sd.fn_num_denum)(data.d[i],data.L,data.ncol,data.na,Tnum+i,Tdenum+i,extra);
+  free_gene_data(&data);
+}
+  
+void get_maxT(double*d, int*pnrow, int* pncol, int*L,double *pna, float* T, float* P,float *adjP, int*pB, int *index,char**options, int*extra )
+{
+  GENE_DATA data;
+  SAMPLING_DATA sd;
+  if(type2sample(options,&sd)==0)
+    return;
+  create_gene_data(d,pnrow,pncol,L,pna,&data,1);
+  (sd.fn_create)(data.ncol,data.L,*pB);
+    adj_by_T(&data,T,P,adjP,sd.fn_maxT,sd.fn_first,sd.fn_next,sd.fn_cmp,extra);
+  get_gene_indexes(&data,index);
+  free_gene_data(&data);
+  sd.fn_delete();
+}
+void get_minP(double*d, int*pnrow, int* pncol, int*L,double *pna, float* T, float* P,float *adjP, float* adj_lower,int*pB, int *index,char**options, int*extra) 
+{
+  GENE_DATA data;
+  SAMPLING_DATA sd;
+  if(type2sample(options,&sd)==0)
+    return;
+  create_gene_data(d,pnrow,pncol,L,pna,&data,1);
+  Rprintf("B=%d\n",*pB);
+  sd.fn_create(data.ncol,data.L,*pB);
+  adj_pvalue_quick(&data,T,P,adjP,adj_lower,sd.fn_minP,sd.fn_maxT,sd.fn_first,sd.fn_next,sd.fn_cmp,extra);
+  get_gene_indexes(&data,index);
+  free_gene_data(&data);
+  sd.fn_delete();
+}
+void get_samples_T(float*V, int* pn,int* L,float* T,float *pna,int* pB,char**options, int*extra) 
+{
+  int n=*pn;
+  int B=*pB;
+  SAMPLING_DATA sd;
+  if(type2sample(options,&sd)==0)
+    return;
+  sd.fn_create(n,L,B);
+  get_all_samples_T(V,n,T,*pna,sd.fn_maxT,sd.fn_first,sd.fn_next,(void*)extra);
+  sd.fn_delete();
+}
+void get_samples_P(float*V, int* pn,int* L,float* P,float *pna,int* pB,char**options, int*extra)
+{
+  int n=*pn;
+  int B=*pB;
+  SAMPLING_DATA sd;
+  if(type2sample(options,&sd)==0)
+    return;
+ sd.fn_create(n,L,B);
+  get_all_samples_P(V,n,P,*pna,sd.fn_minP,sd.fn_first,sd.fn_next,sd.fn_cmp,(void*)extra);
+  sd.fn_delete();
+}
+
+void get_sample_labels(int*pn,int*L,int*pB,int* S,char**options, int*extra)
+{
+  int n=*pn;
+  int B=*pB;
+  int is_next=1;
+  int nb=0;
+  int i;
+  SAMPLING_DATA sd;
+  if(type2sample(options,&sd)==0)
+    return;
+  sd.fn_create(n,L,B);
+  sd.fn_first(L);
+  while(is_next){
+    for(i=0;i<n;i++)
+      S[nb+i]=L[i];
+    nb+=n;
+    is_next=sd.fn_next(L);
+    
+  }
+  sd.fn_delete();
+}
+int type2test(char* ptest,SAMPLING_DATA* sd)
+{
+  int test=0;
+  if(strcmp(ptest,"t")==0){
+    test=mtT;
+    sd->fn_stat=two_sample_tstat;
+    sd->fn_maxT=sd->fn_stat;
+    sd->fn_minP=sd->fn_stat;
+    sd->fn_num_denum=two_sample_tstat_num_denum;
+  }else  if(strcmp(ptest,"f")==0){
+    test=mtF;
+    sd->fn_stat=Fstat;
+    sd->fn_num_denum=Fstat_num_denum;
+    sd->fn_maxT=sd->fn_stat;
+    sd->fn_minP=sd->fn_stat;
+  }else  if(strcmp(ptest,"pairt")==0){
+    test=mtPairT;
+    sd->fn_stat=sign_tstat;
+    sd->fn_num_denum=sign_tstat_num_denum;
+    sd->fn_maxT=sd->fn_stat;
+    sd->fn_minP=sign_sum;/*changed to montone*/
+  }else  if(strcmp(ptest,"blockf")==0){
+    test=mtBlockF;
+    sd->fn_stat=Block_Fstat;
+    sd->fn_num_denum=Block_Fstat_num_denum;
+    sd->fn_maxT=sd->fn_stat;
+    sd->fn_minP=sd->fn_stat;
+  }
+  else if(strcmp(ptest,"wilcoxon")==0){
+    test=mtWilcoxon;
+    sd->fn_stat=Wilcoxon_T;
+    sd->fn_num_denum=Wilcoxon_num_denum;
+    sd->fn_maxT=sd->fn_stat;/*changed to normalize*/
+    sd->fn_minP=Wilcoxon_stat;
+  }else if(strcmp(ptest,"t.equalvar")==0){
+    test=mtTequalVar;
+    sd->fn_stat=two_sample_t1stat;
+    sd->fn_num_denum=two_sample_t1stat_num_denum;
+    sd->fn_maxT=sd->fn_stat;
+    sd->fn_minP=ave_diff;/*changed to montone*/
+  }else
+    return 0;
+  sd->test=test;
+  return 1;
+}
+int type2sample(char** options,SAMPLING_DATA* sd)
+{
+  char *ptest,*pfixed_seed,*pside;
+  int test=0;
+  int is_fixed_sampling=0;
+  int side=-2;
+  /************************/  
+  ptest=options[0];
+  pside=options[1];
+  pfixed_seed=options[2];
+
+  /***********************/
+  type2test(ptest,sd);
+  test=sd->test;
+
+  /***********************/
+  if(strcmp(pside,"upper")==0)
+    side=1;
+  if(strcmp(pside,"lower")==0)
+    side=-1;
+  if(strcmp(pside,"abs")==0)
+    side=0;
+  sd->fn_cmp=side2cmp(side);
+  /**************/
+  if(strcmp(pfixed_seed,"y")==0)
+    is_fixed_sampling=mtFixedSampling;
+  else
+    is_fixed_sampling=0;
+  sd->is_fixed_seed=is_fixed_sampling;
+  /***************/
+  switch(test){
+  case mtT: case mtF: case mtWilcoxon: case mtTequalVar:
+    if(is_fixed_sampling){
+      sd->fn_first=first_sample_fixed;
+      sd->fn_next=next_sample_fixed;
+      sd->fn_create=create_sampling_fixed;
+      sd->fn_delete=delete_sampling_fixed;
+    }else{
+      sd->fn_first=first_sample;
+      sd->fn_next=next_sample;
+      sd->fn_create=create_sampling;
+      sd->fn_delete=delete_sampling;
+    }
+    break;
+  case mtPairT: 
+    if(is_fixed_sampling){
+      sd->fn_create=create_sampling_pairt_fixed;
+      sd->fn_delete=delete_sampling_pairt_fixed;
+      sd->fn_first=first_sample_pairt_fixed;
+      sd->fn_next=next_sample_pairt_fixed;
+    }else{
+	 sd->fn_create=create_sampling_pairt;
+	 sd->fn_delete=delete_sampling_pairt;
+	 sd->fn_first=first_sample_pairt;
+	 sd->fn_next=next_sample_pairt;
+    }
+    break;
+  case mtBlockF:/*have not implemented the solutuion for storing the permutation yet, as it is very memory instensive*/
+    sd->fn_create=create_sampling_block;
+    sd->fn_delete=delete_sampling_block;
+    sd->fn_first=first_sample_block;
+    sd->fn_next=next_sample_block;
+    break;
+  default:
+    fprintf(stderr,"Can not recogize the parameter\n");
+    return 0;
+  }
+  return 1;
+}
+      
+    
+
+    
+/*test*/
+/*main()
+{
+  #define N 6
+  #define NUMB 6
+  int n=N;
+  int L[N]={0,0,1,1,2,2};
+  int B=NUMB;
+  int S[NUMB*N];
+  int i;
+  get_sample_labels(&n,L,&B,S);
+  for(i=0;i<B;i++)
+    print_narray(stderr,S+i*n,n);
+}*/
+
+
+  
+
+
+
diff --git a/src/VScount.c b/src/VScount.c
new file mode 100644
index 0000000..af46495
--- /dev/null
+++ b/src/VScount.c
@@ -0,0 +1,40 @@
+#include <R.h>
+#include <Rinternals.h>
+#include <Rdefines.h>
+#include <math.h>
+
+SEXP VScount(SEXP TH, SEXP cutoffs, SEXP m, SEXP B, SEXP c){
+
+  int B_len = INTEGER(B)[0], m_len = INTEGER(m)[0], c_len = INTEGER(c)[0];
+  int b, i, j;
+  SEXP guessFT, THb, VS;
+
+  PROTECT(guessFT=allocVector(INTSXP,1));
+  PROTECT(THb=allocVector(REALSXP,m_len));
+  PROTECT(VS=allocVector(INTSXP,c_len*B_len));
+
+  for(b=0;b<B_len;b++){
+
+    if((b%250==0.0) & (b>0.0))
+      Rprintf("%d ",b);
+
+    for(j=0;j<c_len;j++){
+      INTEGER(guessFT)[0]=0;
+
+      for(i=0;i<m_len;i++){
+	REAL(THb)[i]=REAL(TH)[b*m_len+i];
+	if(REAL(THb)[i]>REAL(cutoffs)[j]){
+	  ++INTEGER(guessFT)[0];
+	}
+      }
+
+      INTEGER(VS)[b*c_len+j]=INTEGER(guessFT)[0];
+    }
+  }
+
+  Rprintf("%d\n",B_len);
+
+  UNPROTECT(3);
+
+  return(VS);
+}
diff --git a/src/block_sampling_fixed.c b/src/block_sampling_fixed.c
new file mode 100755
index 0000000..c04b224
--- /dev/null
+++ b/src/block_sampling_fixed.c
@@ -0,0 +1,98 @@
+/*This file is used to do the sampling for block sampling*/
+#include "stdio.h"
+#include "stdlib.h"
+#include "math.h"
+#include "string.h"
+#include "mt.h"
+
+static int l_n=0;
+static int l_B=0;/*the number of total simultaions*/
+static int l_b=0;/* the number of permutations are done*/
+static int l_is_random=1;/* the permuation is random or not*/
+static int* l_L=NULL;
+static int l_m=0;/*the number of treaments*/
+static int* l_order_block=NULL;
+void create_sampling_block(int n,int*L,int B)
+{
+  int i,maxB,Nblock,m,imax,fac;/*m is the number of treatments*/
+  double logfac;
+  m=0;
+  for(i=0;i<n;i++)
+    if(L[i]>m){
+      m++;
+    }
+  m++;
+  Nblock=n/m;
+  logfac=logfactorial(m,m)*Nblock;
+  imax=(unsigned int)(~0)>>1;/*divide by 2 to avoid the negative number*/
+  if(fabs(logfac)<log(imax)){
+    fac=1;
+    for(i=1;i<m+1;i++)
+      fac*=i;
+    maxB=fac;
+    for(i=1;i<Nblock;i++)
+      maxB*=fac;
+  }else{
+    maxB=imax;
+  }
+  if((B<=0) || (B>=maxB)){
+    /* checking if complete permutation doable*/
+    if (fabs(logfac)>log(imax)){
+      fprintf(stderr,"as B(log(B)=%5.2f) is too big,we can not do the complete permutations\n",logfac);
+      return; /*exit(0)*/
+    }
+    l_B=maxB;
+    fprintf(stderr,"\nWe're doing %d complete permutations\n",l_B);
+    l_is_random=0;
+  }else{
+    /*doing random permutation*/
+    l_B=B;
+    l_is_random=1;
+    set_seed(g_random_seed);
+  }
+    l_n=n;
+    l_b=0;
+    l_m=m;
+    l_L=(int*)Calloc(n,int);
+    memcpy(l_L,L,sizeof(int)*n);
+    l_order_block=(int*)Calloc(n,int);
+    init_label_block(l_order_block,n,m);
+}
+      
+  
+void delete_sampling_block()
+{
+  Free(l_L);
+  l_L=NULL;
+  Free(l_order_block);
+}
+int next_sample_block(int* L)
+{
+  if(l_b>=l_B) return 0;
+
+  if(l_is_random){
+    memcpy(L,l_order_block,sizeof(int)*l_n);
+    sample_block(L,l_n,l_m);
+  } else{
+    next_label_block(L,l_n,l_m);
+  }
+  l_b++;
+  return 1;
+}
+int first_sample_block(int *L)
+{
+  if(L==NULL)
+    return l_B;
+  if(l_is_random){
+    memcpy(L,l_L,sizeof(int)*l_n);
+  }else{ 
+    init_label_block(L,l_n,l_m);
+  }
+  l_b=1;
+  return 1;
+}
+
+
+
+
+
diff --git a/src/bootloop.c b/src/bootloop.c
new file mode 100755
index 0000000..aee9c12
--- /dev/null
+++ b/src/bootloop.c
@@ -0,0 +1,55 @@
+#include <R.h>
+#include <Rinternals.h>
+#include <Rdefines.h>
+#include <math.h>
+
+SEXP
+bootloop(SEXP fbody, SEXP X, SEXP W, SEXP p, SEXP n, SEXP B, SEXP samp)
+{
+  int B_len= INTEGER(B)[0], p_len=INTEGER(p)[0], num_samples, b, i, j;
+  SEXP Xb, Wb, Sb, Tb, muboot;
+
+  num_samples = INTEGER(n)[0];
+
+  PROTECT(Xb=allocVector(REALSXP,num_samples));
+  PROTECT(Wb=allocVector(REALSXP,num_samples));
+  PROTECT(Sb=allocVector(INTSXP,num_samples));
+  PROTECT(Tb=allocVector(REALSXP,3));
+  PROTECT(muboot = allocVector(REALSXP, B_len * p_len));
+
+  SEXP e, ptr;
+  PROTECT(e=allocVector(LANGSXP,4)); // this includes the samp
+  SETCAR(e, fbody);
+
+  for(b=0;b<B_len;b++){
+
+    if((b%100==0.0) & (b>0.0)) /* modulo 100 */
+      Rprintf("%d ",b);
+
+    for(j=0;j<p_len;j++){
+
+      for(i=0;i<num_samples;i++){
+	INTEGER(Sb)[i]=INTEGER(samp)[num_samples*b+i];
+	REAL(Xb)[i]=REAL(X)[(INTEGER(samp)[num_samples*b+i]-1)*p_len+j];
+	REAL(Wb)[i]=REAL(W)[(INTEGER(samp)[num_samples*b+i]-1)*p_len+j];
+      }
+
+      ptr = CDR(e);
+      SETCAR(ptr, Xb);
+      ptr = CDR(ptr);
+      SETCAR(ptr, Wb);
+      ptr = CDR(ptr);
+      SETCAR(ptr, Sb);
+
+      Tb = eval(e, R_GlobalEnv);
+      REAL(muboot)[p_len*b+j] = REAL(Tb)[2]*REAL(Tb)[0]/REAL(Tb)[1];
+    }
+  }
+
+  Rprintf("%d\n",B_len);
+
+  UNPROTECT(6);
+
+  return(muboot);
+}
+
diff --git a/src/mt.c b/src/mt.c
new file mode 100755
index 0000000..725cf33
--- /dev/null
+++ b/src/mt.c
@@ -0,0 +1,580 @@
+/*****************************************************************/
+/*           Header files                                        */
+/*****************************************************************/
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <math.h>
+#include <time.h>
+#include <string.h>
+#include <ctype.h>
+#include <stdarg.h>
+#include "mt.h"
+/************************************************************************************/
+/*                      malloc_gene_data                                            */
+/************************************************************************************/
+/*Allocate the necessary space for the big data,
+  see the comments about the structrue GENE_DATA
+*/
+int myDEBUG=0;
+long int g_random_seed=3455660;
+void print_b(int b,int B,char* prompt){
+  static int p=0;
+  if(b==0) p=0;
+  if(!PROMPT_LEN){
+    if((B<=100) ||(b%(B/100)==0))
+      {
+	   /*fprintf(stderr,"%s%d\t",prompt,b);*/
+	Rprintf("%s%d\t",prompt,b);
+	p++;
+	if(PRINT_VAR_NUM && (p%PRINT_VAR_NUM==0))
+	     /*fprintf(stderr,"\n");*/
+	     Rprintf("\n");
+
+      }
+  }else if((b%(PROMPT_LEN+1))==0){ /*use PROMPT_LEN+1 to avoid the compiling warnings*/
+    p++;
+    /*fprintf(stderr,"%s%d",prompt,b);*/
+    Rprintf("%s%d",prompt,b);
+    if(PRINT_VAR_NUM && (p%PRINT_VAR_NUM==0))
+	 /*fprintf(stderr,"\n");*/
+	 Rprintf("\n");
+
+  }
+}
+#ifdef WINDOWS
+void win_print(FILE* fp, char* format,...)
+{
+  va_list ap;
+  va_start(ap, format);
+  REvprintf(format, ap);
+  va_end(ap);
+}
+#endif
+void malloc_gene_data(GENE_DATA* pdata)
+{
+  int i;
+  int nrow=pdata->nrow;
+  int ncol=pdata->ncol;
+
+  pdata->id=(char**)Calloc(nrow,char*);
+  pdata->d=(float**)Calloc(nrow,float*);
+  pdata->L=(int*)Calloc(ncol,int);
+
+  /*initialization*/
+  memset(pdata->L,0,sizeof(int)*ncol);
+  for(i=0;i<ncol;i++) 
+    pdata->L[i]=0;
+
+  for (i=0; i<nrow; i++) {
+    pdata->id[i] = (char *) Calloc(MAX_ID,char);
+    pdata->d[i]=(float *) Calloc(ncol,float);
+ }
+}
+
+/********************************************************************************/
+/*                           free_gene_data                                      */
+/********************************************************************************/
+/*free the space allocated for pdata*/
+void free_gene_data(GENE_DATA* pdata)
+{
+  int i;
+  for (i=0; i<pdata->nrow; i++) {
+    Free(pdata->d[i]);
+    Free(pdata->id[i]);
+  }
+  Free(pdata->L);
+  Free(pdata->d);
+  Free(pdata->id);
+}
+
+/********************************************************************************/
+/*                   compute_test_stat                                          */
+/********************************************************************************/
+void compute_test_stat(GENE_DATA* pdata, int* L,float* T,
+		       FUNC_STAT func_stat,const void* extra)
+     /*L is an array which contains 0,1,2 ... for specifying class label*/
+     /*T is the test_test needs to return*/
+     /*func_stat is a functin pointer with the following protocol
+       float func_stat(float *Y, int* L,int n, float na,const void* extra)*/
+{
+  int i;
+  for(i=0;i<pdata->nrow;i++){
+    /*    fprintf(stderr,"i=%d,T[%d]=%d\n",i,i,T[i]);*/
+    T[i]=(*func_stat)(pdata->d[i],L,pdata->ncol,pdata->na,extra);
+  }
+}
+ 
+/********************************************************************************/
+/*         get1pvalue                                                           */
+/********************************************************************************/
+/*  we'll do complete resampling with the function next_sample, which is determined by
+    the function next_sample when it returns false.
+     in the next_sample, you need to decide to choose complet resampling or not or 
+     whatever you like
+
+  L: is the labelling of each experiment
+  T: is that test statistics
+  P: unadjtesed P-values
+
+  To use the function first_sample, and next_sample, they're needed to write into a separate 
+  file, where it provides the create_sampling to do initialization such as allocate the space
+  (before use the sampling) and delete_sampling after we've done the sampling in the main().
+
+  int first_sample(int *L)
+       get the first sample of the labelling.
+  int next_sample(int* L)
+       get the next sample, if it's done all the sampling, then it returns 0, 
+       otherwise it returns 1.
+
+
+  input: pdata, L, B,next_sample,func_stat
+  output: T,P
+ */
+
+void  get1pvalue(GENE_DATA* pdata,int* L,float* T,float* P,
+		 FUNC_STAT func_stat,FUNC_SAMPLE func_first_sample, 
+		 FUNC_SAMPLE func_next_sample,FUNC_CMP func_cmp,const void* extra)
+{
+  int b=0,*bL,i,is_next,*total;
+  float *bT, *count;
+  int ncol=pdata->ncol;
+  int nrow=pdata->nrow;
+  int B=(*func_first_sample)(NULL);
+  /*allocate the space and initialziation*/
+  bT=(float*)Calloc(nrow,float);
+  bL=(int*)Calloc(ncol,int);
+  count=(float*)Calloc(nrow,float);
+  memset(count,0,sizeof(float)*nrow);
+  total=(int*)Calloc(nrow,int);
+  memset(total,0,sizeof(int)*nrow);
+
+  /*comuter the original one first*/
+  compute_test_stat(pdata,L,T,func_stat,extra);
+
+  /*iteration for permutaion*/
+  (*func_first_sample)(bL);
+  is_next=1;
+  b=0;
+  while(is_next){
+    compute_test_stat(pdata,bL,bT,func_stat,extra);
+    for(i=0;i<nrow;i++){
+      if(bT[i]==NA_FLOAT) continue;
+      if(T[i]==NA_FLOAT) continue;
+      /* right now I only implements the 3 cases, which are pretty common*/
+      if((func_cmp==cmp_high) &&(bT[i]>=T[i]-EPSILON)){
+	count[i]+=1;
+      }else if((func_cmp==cmp_low) &&(bT[i]<=T[i]+EPSILON)){
+	count[i]+=1;
+      }else if ((func_cmp==cmp_abs) &&(fabs(bT[i])>=fabs(T[i])-EPSILON)){
+	count[i]+=1;
+      }	      
+      total[i]++;
+     }
+    b++;
+    print_b(b,B,"b=");
+    is_next=(*func_next_sample)(bL);
+  }
+
+  /*summarize the results*/
+  for(i=0;i<nrow;i++){ 
+    if(total[i]==0) 
+      P[i]=NA_FLOAT;
+    else P[i]=count[i]*1.0/total[i];
+  }
+
+  /*free the spaces*/
+  Free(bT);
+  Free(count);
+  Free(total);
+  Free(bL);
+}
+
+/********************************************************************************/
+/*                    sort_gene_data                                            */
+/********************************************************************************/
+/* Description:
+   sort the rows of gene_data such that row R[i] of is the first row, i=0,...,m-1,
+   wher R[0],...,R[m-1] is a permutation of (0,...,m-1)
+*/
+
+void sort_gene_data(GENE_DATA* pdata,int*R)
+{
+  int i,nrow=pdata->nrow;
+  char** old_id; /*the old addresses of the gene id*/
+  float** old_d;  /*th old addresses of the gene data*/
+  old_d=(float**)Calloc(nrow,float*);
+  old_id=(char**)Calloc(nrow,char*);
+  /*store the original pointers from pdata*/
+  for(i=0;i<nrow;i++)
+    {
+      old_d[i]=pdata->d[i];
+      old_id[i]=pdata->id[i];
+    }
+  /*rearrange the data so that it's ordered according to R*/
+  for(i=0;i<nrow;i++)
+    {
+      pdata->d[i]=old_d[R[i]];
+      pdata->id[i]=old_id[R[i]];
+    }
+  Free(old_id);
+  Free(old_d);
+}
+/********************************************************************************/
+/*                    sort_vector                                               */
+/********************************************************************************/
+/* Desciption
+      sort the vector V according to the order R with n elemnets
+      where R[0],...,R[n-1] is a permutation of 0,...n-1
+*/
+void sort_vector(float* V,int*R,int n)
+{
+  float* old_V;
+  int i;
+  old_V=(float*)Calloc(n,float);
+  for(i=0;i<n;i++)
+    old_V[i]=V[i];
+  for(i=0;i<n;i++)
+    V[i]=old_V[R[i]];
+  Free(old_V);
+}
+/********************************************************************************/
+/*                    get_all_samples_P                                         */
+/********************************************************************************/
+/*  Descriptions: Try to get all the unadjusted p-values for a gene with
+    experssion values at V with n experiemtns. 
+
+    int first_sample(int *L)
+       get the first sample of the labelling.
+       if L==NULL, then it returns all the possible simulations, which depends on
+       the initial function create_sampling.
+    int next_sample(int* L)
+       get the next sample, if it's done all the sampling, then it returns 0, 
+       otherwise it returns 1.
+   output is P*/
+void get_all_samples_P(float* V, int n,float* P,float na, 
+		       FUNC_STAT func_stat,FUNC_SAMPLE func_first_sample, 
+		       FUNC_SAMPLE func_next_sample,FUNC_CMP func_cmp,const void* extra)
+{
+  int  *L,*R,i,oldb,is_next,b=0,B_new,B;
+  float* T=P,oldf;/*it first stores T, then switch to P*/
+ 
+  B=(*func_first_sample)(NULL);
+  /*allocate the spaces*/
+  L=(int*)Calloc(n,int);
+  R=(int*)Calloc(B,int);
+
+  /*compute all the test_stat*/
+  (*func_first_sample)(L);
+  is_next=1;
+  B_new=0;
+  while(is_next){
+    T[b]=func_stat(V,L,n,na,extra);
+    if(T[b]!=NA_FLOAT)
+      B_new++;
+    b++;
+    is_next=(*func_next_sample)(L);
+  }
+  if(B!=b){
+    fprintf(stderr,"Error we have b(%d)!=B(%d)\n",b,B);
+    return;/*exit(1)*/;
+  }
+  if(myDEBUG)
+      print_farray(stderr,T,B);
+  /*order the test_stat*/
+  order_data(T,R,B,func_cmp);
+
+  /*note the last elements of B-B_new has NA T-value*/
+  /*assign the probabilites*/
+  oldb=0;
+  oldf=T[R[0]];
+  for(b=1;b<B_new;b++){
+    if((func_cmp==cmp_high)&(T[R[b]]>=oldf-EPSILON)) continue;
+    else if ((func_cmp==cmp_low ) &&(T[R[b]]<=oldf+EPSILON)) continue;
+    else if((func_cmp==cmp_abs )&& fabs(T[R[b]])>=fabs(oldf)-EPSILON) continue;
+
+    for(i=oldb;i<b;i++)
+      P[R[i]]=(b+0.0)/B_new;
+    oldb=b;
+    if(b<B_new-1) oldf=T[R[b]];
+  }
+  for(i=oldb;i<b;i++)
+    P[R[i]]=1.0;
+  
+  /*for NA test_stat, assign NA probabilites*/
+  for(b=B_new;b<B;b++)
+    P[R[b]]=NA_FLOAT;
+
+  /*free the space*/
+  Free(L);
+  Free(R);
+} 
+
+/*get all the samples of T and they're also ordered.It's used only for diagonsis*/
+void get_all_samples_T(float* V, int n,float* T,float na, 
+		       FUNC_STAT func_stat,FUNC_SAMPLE func_first_sample, 
+		       FUNC_SAMPLE func_next_sample,const void* extra)
+{
+  int  *L,*R,is_next,b=0,B;
+ 
+  B=(*func_first_sample)(NULL);
+  /*allocate the spaces*/
+  L=(int*)Calloc(n,int);
+  R=(int*)Calloc(B,int);
+
+  /*compute all the test_stat*/
+  (*func_first_sample)(L);
+  is_next=1;
+  while(is_next){
+    T[b]=func_stat(V,L,n,na,extra);
+    b++;
+    is_next=(*func_next_sample)(L);
+  }
+  if(B!=b){
+    fprintf(stderr,"Error we have b(%d)!=B(%d)\n",b,B);
+    return;/*exit(1)*/;
+  }
+  if(myDEBUG)
+    print_farray(stderr,T,B);
+  Free(L);
+  Free(R);
+} 
+
+void adj_pvalue_quick(GENE_DATA* pdata,float*T, float* P, 
+		      float* Adj_P,float* Adj_Lower,
+		      FUNC_STAT func_stat,FUNC_STAT func_stat_T,
+		      FUNC_SAMPLE func_first_sample, 
+		      FUNC_SAMPLE func_next_sample,FUNC_CMP func_cmp,const void* extra)
+{
+
+  int *L,b,B,B_new,i,*R,neq; /*b for simulation*, neq is for the number of equal signs*/
+  float* all_P,*all_Q,count;
+  int ncol=pdata->ncol,nrow=pdata->nrow;
+   
+  /*allocate the space*/
+  B=(*func_first_sample)(NULL);
+  L=(int*)Calloc(ncol,int);
+  R=(int*)Calloc(nrow,int);
+  all_P=(float*)Calloc(B,float);
+  all_Q=(float*)Calloc(B,float);
+
+  /*get the original unadjusted p-values first
+   we'll use the normalized t-statistics*/
+  get1pvalue(pdata,pdata->L,T,P,func_stat_T,func_first_sample,func_next_sample,func_cmp,extra);
+  if(myDEBUG)
+    {
+      print_farray(stderr,T,pdata->nrow);
+      print_farray(stderr,P,pdata->nrow);
+    }
+  /*sort the test_stat*/
+  order_mult_data(R,nrow,2,P,cmp_low,T,func_cmp);
+  /*order_data(P,R,nrow,func_cmp);*/
+
+  /*rearrange the data according the unadjusted p-values*/
+  sort_gene_data(pdata,R);
+  sort_vector(T,R,nrow);
+  sort_vector(P,R,nrow);
+  
+  /*initialze all_Q[]=NA_FLOAT*/
+  for(b=0;b<B;b++)
+    all_Q[b]=NA_FLOAT;
+
+  /*loop for each gene*/
+  for(i=nrow-1;i>=0;i--){
+    get_all_samples_P(pdata->d[i],ncol,all_P,pdata->na,
+		      func_stat,func_first_sample,func_next_sample,func_cmp,extra);
+    if(myDEBUG)
+      print_farray(stderr,all_P,B);
+
+    /*update all_Q*/
+    count=0;
+    B_new=0;
+    neq=0;
+
+    for(b=0;b<B;b++){
+      if (all_P[b]==NA_FLOAT) break;/*we don't need care about NA pvlaues*/
+      if(all_Q[b]>all_P[b])
+	all_Q[b]=all_P[b];/*update q* by the value p*/
+      if(all_Q[b]==NA_FLOAT) continue;/*skip NA q*/
+      if(all_Q[b]<P[i]){
+	count+=1;
+      }else if (all_Q[b]<=P[i]+EPSILON)/*it'd already > */
+	 neq++;
+      B_new++;
+    }
+
+    if(myDEBUG)
+      {
+	print_farray(stderr,all_Q,B);
+	fprintf(stderr,"P[%d]=%5.3f,count=%5.2f,neq=%d\n",i,P[i],count,neq);
+      }
+
+    /*assign the Adj_P and Adj_Lower for gene i */
+    if(B_new!=0) {
+      Adj_P[i]=(count+neq)/B_new;
+
+      if(neq==0) 
+	Adj_Lower[i]=count/B_new;
+      else Adj_Lower[i]=(count+1)/B_new;
+    }
+    else {
+      Adj_P[i]=NA_FLOAT;
+      Adj_Lower[i]=NA_FLOAT; 
+    }
+    /*************************** */
+    print_b((nrow-i),nrow,"r="); 
+  }
+
+  /* to make monotone of Adj_P and Adj_Lower*/
+  for(i=1;i<nrow;i++)
+    if(Adj_P[i]<Adj_P[i-1])
+      Adj_P[i]=Adj_P[i-1];
+
+  for(i=1;i<nrow;i++)
+    if(Adj_Lower[i]<Adj_Lower[i-1])
+      Adj_Lower[i]=Adj_Lower[i-1];
+
+  /*free the spaces*/
+  Free(L);
+  Free(R);
+  Free(all_P);
+  Free(all_Q);
+}
+	
+/********************************************************************************/
+/*         adj-by_t                                                          */
+/********************************************************************************/
+/*  we'll do complete resampling with the function next_sample, which is determined by
+    the function next_sample when it returns false.
+     in the next_sample, you need to decide to choose complet resampling or not or 
+     whatever you like
+
+  L: is the labelling of each experiment
+  T: is that test statistics
+  P: unadjtesed P-values
+  Adj_P:ajusted p-values by using the max|T|
+  To use the function first_sample, and next_sample, they're needed to write into a separate 
+  file, where it provides the create_sampling to do initialization such as allocate the space
+  (before use the sampling) and delete_sampling after we've done the sampling in the main().
+
+  int first_sample(int *L)
+       get the first sample of the labelling.
+  int next_sample(int* L)
+       get the next sample, if it's done all the sampling, then it returns 0, 
+       otherwise it returns 1.
+
+
+  input: pdata, L, B,next_sample,func_stat
+  output: T,P
+ */
+
+void  adj_by_T(GENE_DATA* pdata,float* T,float* P,float*Adj_P,
+		 FUNC_STAT func_stat,FUNC_SAMPLE func_first_sample, 
+		 FUNC_SAMPLE func_next_sample,FUNC_CMP func_cmp,const void* extra)
+{
+  int b=0,*bL,i,is_next,*total1,*R,*total2;
+  float *bT, *count1,*count2,qT;/*qT is the successiv maxima*/
+  int ncol=pdata->ncol;
+  int nrow=pdata->nrow;
+  int B=(*func_first_sample)(NULL);
+  /*allocate the space and initialziation*/
+  bT=(float*)Calloc(nrow,float);
+  bL=(int*)Calloc(ncol,int);
+  count1=(float*)Calloc(nrow,float);
+  memset(count1,0,sizeof(float)*nrow);
+  total1=(int*)Calloc(nrow,int);
+  memset(total1,0,sizeof(int)*nrow);
+  count2=(float*)Calloc(nrow,float);
+  memset(count2,0,sizeof(float)*nrow);
+  total2=(int*)Calloc(nrow,int);
+  memset(total2,0,sizeof(int)*nrow);
+
+  R=(int*)Calloc(nrow,int);
+   /*comuter the original t-statfirst*/
+
+  compute_test_stat(pdata,pdata->L,T,func_stat,extra);
+
+  /*sort the T*/  
+  order_data(T,R,nrow,func_cmp);
+  sort_gene_data(pdata,R);
+  sort_vector(T,R,nrow);
+  
+  /*iteration for permutaion*/
+  (*func_first_sample)(bL);
+
+  /*changed to the orignal stat, which is monotone of t and centered*/
+  is_next=1;
+  b=0;
+  while(is_next){
+    compute_test_stat(pdata,bL,bT,func_stat,extra);
+    /*deal with unajdused value first*/
+    for(i=0;i<nrow;i++){
+      if(T[i]==NA_FLOAT) continue;
+      if(bT[i]!=NA_FLOAT){
+	if((func_cmp==cmp_high)&&(bT[i]+EPSILON>=T[i])) count2[i]++;
+	if((func_cmp==cmp_low)&&(bT[i]<=T[i]+EPSILON)) count2[i]++;
+	if((func_cmp==cmp_abs)&&(fabs(bT[i])>=fabs(T[i])-EPSILON)) count2[i]++;
+	total2[i]++;
+      }
+    }
+
+    /*deal with adjusted values*/
+    qT=NA_FLOAT;/*intitalize the qT*/
+    for(i=nrow-1;i>=0;i--){ /*looping the row reversely*/
+      if(T[i]==NA_FLOAT) continue;
+        /* right now I only implements the 3 cases, which are pretty common*/
+      if(func_cmp==cmp_high){
+	if((bT[i]!=NA_FLOAT)&&(qT!=NA_FLOAT)&&(bT[i]>qT))
+	  qT=bT[i];
+	if((bT[i]!=NA_FLOAT)&&(qT==NA_FLOAT))
+	  qT=bT[i];
+	if((qT!=NA_FLOAT)&&(qT>=T[i]-EPSILON)) count1[i]+=1;
+      }else if(func_cmp==cmp_low){
+	if((bT[i]!=NA_FLOAT)&&(qT!=NA_FLOAT)&&(bT[i]<qT))
+	  qT=bT[i];
+	if((bT[i]!=NA_FLOAT)&&(qT==NA_FLOAT))
+	  qT=bT[i];
+	if((qT!=NA_FLOAT)&&(qT<=T[i]+EPSILON)) count1[i]+=1;
+      }else if (func_cmp==cmp_abs) {
+	if((bT[i]!=NA_FLOAT)&&(qT!=NA_FLOAT)&&(fabs(bT[i])>qT))
+	  qT=fabs(bT[i]);
+	if((bT[i]!=NA_FLOAT)&&(qT==NA_FLOAT))
+	  qT=fabs(bT[i]);
+	if((qT!=NA_FLOAT)&&(qT>=fabs(T[i])-EPSILON)) count1[i]+=1;
+      }	      
+      if(qT!=NA_FLOAT) total1[i]++;
+    }
+    b++;
+    print_b(b,B,"b=");
+    is_next=(*func_next_sample)(bL);
+  }
+
+  /*summarize the results*/
+  /*unadjusted one*/
+  for(i=0;i<nrow;i++){ 
+    if(total2[i]==0) 
+      P[i]=NA_FLOAT;
+    else P[i]=count2[i]*1.0/total2[i];
+  }
+  /*adjused one*/
+  for(i=0;i<nrow;i++){ 
+    if(total1[i]==0) 
+      Adj_P[i]=NA_FLOAT;
+    else Adj_P[i]=count1[i]*1.0/total1[i];
+  }
+  /*enforce the montonicity*/
+  for(i=1;i<nrow;i++)
+    if(Adj_P[i]<Adj_P[i-1])
+      Adj_P[i]=Adj_P[i-1];
+  /*free the spaces*/
+  Free(bT);
+  Free(count1);
+  Free(total1);
+  Free(count2);
+  Free(total2);
+  Free(bL);
+  Free(R);
+}      
+void set_seed_sampling(long int seed){
+  g_random_seed=seed;
+}
diff --git a/src/mt.h b/src/mt.h
new file mode 100755
index 0000000..833d0c4
--- /dev/null
+++ b/src/mt.h
@@ -0,0 +1,201 @@
+/*****************************************************************/
+/*          some options                                         */
+/*****************************************************************/
+/*#define USEDOUBLE*/
+/*to transport to R, fix it to be a double*/
+/*for windows'R*/
+/*#define WINDOWS is used to build R package for windows system*/
+#include "R.h"
+#ifdef WINDOWS
+/*#include "R_ext\print.h"*/
+#define fprintf win_print
+void win_print(FILE*, char*,...);
+#endif
+/* these options can be incorporated in the command argments if you like*/  
+extern int myDEBUG;  /*this variable needs to be declared in the main file*/
+extern long int g_random_seed;
+#define PRINT_VAR_NUM 10  /*the ouput of the length of each line in the debug session
+			  when myDEBUG is 1, you need to try small data and small simulations
+			  otherwise, it just has too much printing*/
+#define PROMPT_LEN 0        
+/*  if PROMPT_LEN>1, then we'll use PROMPT_LEN+1 to report the progess
+    in permutations, otherwise, we'll report it when finish 1% of 
+    permutations, this only applies to the permutations in 
+    get the unadjusted p-values. For calculating adjusted p-values, it always 
+    prompt after finish every genes, as the total number of
+    genes is typically small, around 6000*/ 
+
+#include <float.h>
+
+#define MAX_ID 40                  /*the max number of characters allowed for ID*/
+#define MAX_WARN 256               /*the max of chars allowed in the warning message*/
+#define NA_FLOAT FLT_MAX          /*the default NA representation for float number*/
+
+#define NA_DATA   1e30             /*the default NA representation for a gene value*/
+
+
+/*using the double data*/
+#ifdef USEDOUBLE
+#define EPSILON (120*DBL_EPSILON)
+#define float double
+#else
+#define EPSILON (12*FLT_EPSILON)
+#endif
+
+
+typedef struct tagGENE_DATA{
+  char** id; /*the gene index id*/
+  float** d; /*the gene values matrix, mxn*/
+  float na;  
+  int nrow; /*nrow is the number of the genes*/
+  int ncol; /*ncol is the number of the experiments*/
+  int* L;   /*the status labelling of each experiment*/
+  char name[MAX_ID];/*the name of the status*/
+}GENE_DATA;
+
+typedef int (*FUNC_SAMPLE)(int *);
+typedef int (*FUNC_CMP)(const void*,const void*);
+typedef float (*FUNC_STAT)(const float*,const int*,const int,const float,const void*);
+/********************************************************************************/
+/*               multiple testing                                              */
+/********************************************************************************/
+void get_all_samples_P(float* V, int n,float* P,float na, 
+		       FUNC_STAT func_stat,FUNC_SAMPLE first_sample, 
+		       FUNC_SAMPLE next_sample,FUNC_CMP func_cmp,const void* extra);
+void get_all_samples_T(float* V, int n,float* T,float na, 
+		       FUNC_STAT func_stat,FUNC_SAMPLE first_sample, 
+		       FUNC_SAMPLE next_sample,const void* extra);
+
+void adj_pvalue_quick(GENE_DATA* pdata,float*T, float* P, 
+		      float* Adj_P,float* Adj_Lower,
+		      FUNC_STAT func_stat,FUNC_STAT func_stat_maxT,FUNC_SAMPLE first_sample, 
+		      FUNC_SAMPLE next_sample,FUNC_CMP func_cmp,const void* extra);
+void  get1pvalue(GENE_DATA* pdata,int* L,float* T,float* P,
+		 FUNC_STAT func_stat,FUNC_SAMPLE first_sample, 
+		 FUNC_SAMPLE next_sample,FUNC_CMP func_cmp,const void* extra);
+void  adj_by_T(GENE_DATA* pdata,float* T,float* P,float*Adj_P,
+		 FUNC_STAT func_stat,FUNC_SAMPLE func_first_sample, 
+	       FUNC_SAMPLE func_next_sample,FUNC_CMP func_cmp,const void* extra);	       
+		 
+/********************************************************************************/
+/*              processing with the gene_data                                   */
+/********************************************************************************/
+void read_infile(char *filename,GENE_DATA *pdata);
+void write_outfile(FILE* fp,GENE_DATA* pdata,float*T, float*P,float*Adj_P,float* Adj_Lower);
+void malloc_gene_data(GENE_DATA* pdata);
+void free_gene_data(GENE_DATA* pdata);
+void print_gene_data(GENE_DATA* pdata);
+void sort_gene_data(GENE_DATA* pdata,int*R);
+void sort_vector(float* V,int*R,int n);
+
+/********************************************************************************/
+/*               sampling good for two sample t and F-stat                      */
+/********************************************************************************/
+void create_sampling(int n,int*L,int B);
+void delete_sampling();
+int first_sample(int *L);
+int next_sample(int* L);
+
+void create_sampling_fixed(int n,int*L,int B);
+void delete_sampling_fixed();
+int first_sample_fixed(int *L);
+int next_sample_fixed(int* L);
+void set_seed_sampling(long int seed);
+
+void create_sampling_block(int n,int*L,int B);
+void delete_sampling_block();
+int first_sample_block(int *L);
+int next_sample_block(int* L);
+
+void create_sampling_pairt(int n,int*L,int B);
+void delete_sampling_pairt();
+int first_sample_pairt(int *L);
+int next_sample_pairt(int* L);
+
+void create_sampling_pairt_fixed(int n,int*L,int B);
+void delete_sampling_pairt_fixed();
+int first_sample_pairt_fixed(int *L);
+int next_sample_pairt_fixed(int* L);
+/********************************************************************************/
+/*            data_sorting                                                      */
+/********************************************************************************/
+void order_mult_data(int* R,int n,int k,...);
+void order_data(float* V,int*R,int n,FUNC_CMP func_cmp);
+
+int cmp_high(const void *v1, const void *v2);
+int cmp_low(const void *v1, const void *v2);
+int cmp_abs(const void *v1, const void *v2);
+
+
+/*micesslay functions*/
+void print_farray(FILE* fh,float* p_arr,int n);
+void print_narray(FILE*fh,int* p_arr,int n);
+
+/********************************************************************************/
+/*            common used statistics                                            */
+/********************************************************************************/
+void compute_test_stat(GENE_DATA* pdata, int* L,float* T,
+		       FUNC_STAT func_stat,const void* extra);
+float two_sample_tstat(const float *Y, const int* L,const int n,
+		       const float na,const void *extra); 
+float two_sample_tstat_num_denum(const float *Y, const int* L,const int n, 
+			       const float na,float* num, float* denum,const void* extra);
+
+/*t1stat is dealing with two sample t-statistics with equal variance*/
+float ave_diff(const float *Y, const int* L,const int n, const float na,const void* extra); /*used to speed up the minP as ave_diff is monotone of the t1stat*/
+float two_sample_t1stat(const float *Y, const int* L,const int n,
+		       const float na,const void *extra); 
+float two_sample_t1stat_num_denum(const float *Y, const int* L,const int n, 
+			       const float na,float* num, float* denum,const void* extra);
+
+/* Wilkoxon test*/
+float Wilcoxon_stat(const float *Y, const int* L,const int n, const float na,const void* extra); /*T-ET, where ET=1/2 n_0(n_0|+n_1+1), T is the sum of rank*/
+float Wilcoxon_T(const float *Y, const int* L,const int n,
+		       const float na,const void *extra); 
+/* is computing (T-ET)/var(T), wher var(T)=1/12*n_0*n_1*(n_0+n_1+1) */
+float Wilcoxon_num_denum(const float *Y, const int* L,const int n, 
+			       const float na,float* num, float* denum,const void* extra);
+
+float sign_sum(const float *Y, const int* L,const int n, const float na,const void* extra);
+float sign_tstat_num_denum(const float *Y, const int* L,const int n,
+			 const float na, float *num, float*denum,const void *extra);
+float sign_tstat(const float *Y, const int* L,const int n, 
+		 const float na, const void* extra);
+float Fstat_num_denum(const float *Y, const int* L,const int n, const float na,
+		      float *num, float*denum,const void* extra);
+float Fstat(const float *Y, const int* L,const int n,
+	    const float na,const void* extra);
+float Block_Fstat(const float *Y, const int* L,const int n, 
+		  const float na,const void* extra);
+float Block_Fstat_num_denum(const float *Y, const int* L,const int n,
+			    const float na, float *num, float*denum,const void* extra);
+/********************************************************************************/
+/*           some useful tools                                                  */
+/********************************************************************************/
+
+int bin2int(int*V,int n);/*integrate the bits as an integer*/  
+void int2bin(int x,int*V,int n);/*decompose an integar as bits stored in V*/
+int bincoeff(int n, int k);
+double logbincoeff(int n, int k);
+double logfactorial(int n, int k);
+void init_label(int n, int k, int*nk, int*L);
+void init_label_block(int *L, int n,int m);
+int next_label_block(int* L, int n, int m);
+void sample_block(int *L, int n,int m);
+void sample2label(int n, int k, int* nk,int *permun, int*L);
+void label2sample(int n, int k, int* nk,int*L,int *permun);
+int next_label(int n, int k, int* nk, int*L);
+int next_lex(int* A, int n, int k);
+void A2L(int* A,int* L,int n,int k);
+FUNC_CMP side2cmp(int side);
+void sample(int *V, int n, int m);
+float get_rand();
+void set_seed(long int seed);
+int next_mult_permu(int* V, int n, int k, int* nk);
+int next_two_permu(int* V, int n, int k);
+int next_permu(int*V,int n);
+void data2vec(double** data,double*d,  int nrow, int ncol);
+void set_seed_sampling(long int seed);
+
+void get_maxT(double*, int*, int*, int*, double*, float*, float*,
+	      float*, int*, int*, char**, int*);
diff --git a/src/pairt_sampling.c b/src/pairt_sampling.c
new file mode 100755
index 0000000..dcecf32
--- /dev/null
+++ b/src/pairt_sampling.c
@@ -0,0 +1,171 @@
+/*the l is for local global variable in this file*/
+#include "stdio.h"
+#include "stdlib.h"
+#include "math.h"
+#include "string.h"
+#include "mt.h"
+
+static int l_n=0;/*the number of samples for permutations*/
+static int l_B=0;/*the number of total simultaions*/
+static int l_b=0;/* the number of permutations are done*/
+static int l_is_random=1;/* the permuation is random or not*/
+static unsigned int* l_all_samples=NULL;
+/*store all the samples in random case*/
+static int l_sz=0; /*the number of bytes for per permutation*/
+static int l_len=0;
+static int get_binpermu(int h,int n,int sz,int len,int *L,int hMax,unsigned int *V)/*sz=ceiling(n/sizeof(int)*8)*/;
+static int set_binpermu(int *L,int h,int n,int sz,int len,int hMax,unsigned int *V);
+void create_sampling_pairt(int n,int*L,int B)
+{
+  int i,maxB;
+  unsigned int imax;
+  l_n=n;
+  l_b=0;
+  imax=(unsigned int)(~0);
+  l_len=floor(log(imax+1.0)/log(2));
+  l_sz=ceil(n/(l_len*1.0));
+  
+  /*setting the maximum B*/
+  if(fabs(n*log(2))<log(imax>>1)){ /*to be safe, moved two bits*/
+    maxB=1<<n;    
+  }else{/*we can set the only maximum of B*/
+    maxB=imax>>1;
+  }
+  
+  if((B==0) ||(B>=maxB)){
+    if(n>=(l_len-1)){
+      fprintf(stderr,"as n=%d is very large, we can not do complete permutation\n, Please try random permutation\n",n);
+      return;
+    }
+    l_is_random=0;
+    l_B=maxB;
+    /*when exceeding the maximum numbers, we'll use the complete permutaions*/
+    /*fprintf(stderr,"\nWe're doing %d complete permutations\n",l_B);*/
+    Rprintf("\nWe're doing %d complete permutations\n",l_B);
+  }
+  else{
+    int* myL;
+    myL=(int*)Calloc(n,int);
+    l_B=B;
+    l_is_random=1;
+    /*fprintf(stderr,"\nWe're doing %d random permutations\n",l_B);*/
+    Rprintf("\nWe're doing %d random permutations\n",l_B);
+    set_seed(g_random_seed);
+
+    l_all_samples=(unsigned int*)Calloc(l_B*l_sz,int);
+    /*setting the first sample as the original data*/
+    set_binpermu(L,0,n,l_sz,l_len,l_B,l_all_samples);
+    /*the extra as a buffer*/
+    for(i=1;i<l_B;i++){
+      int j;
+      float tmp;
+      for(j=0;j<n;j++){
+	tmp=get_rand();
+	if(tmp>0.5)
+	  myL[j]=1;
+	else
+	  myL[j]=0; 
+      }
+      set_binpermu(myL,i,n,l_sz,l_len,l_B,l_all_samples);
+    }
+    Free(myL);
+    if(myDEBUG)
+    {
+      fprintf(stderr,"the samples are\n");
+      for(i=0;i<l_B;i++)
+	fprintf(stderr,"%d ",l_all_samples[i]);
+    }
+  }
+}
+
+void delete_sampling_pairt()
+{
+  if(l_is_random){
+    if(l_B!=0){
+      Free(l_all_samples);
+      l_all_samples=NULL;
+    }
+  }
+}
+int first_sample_pairt(int *L)
+{
+  /*return the number of real samples*/ 
+  if(L==NULL)
+    return l_B;
+  
+  /*call different sampling function*/  
+  if(l_is_random){
+    get_binpermu(0,l_n,l_sz,l_len,L,l_B,l_all_samples);
+  }
+  else
+    int2bin(0,L,l_n);
+
+  l_b=1;/*resetting the the number of permuatins done*/
+
+  return 1;
+}
+int next_sample_pairt(int* L)
+{ 
+  if(l_b >=l_B)
+    return 0; /*no next sample*/
+
+  /*call different sampling function*/  
+  if(l_is_random)
+    get_binpermu(l_b,l_n,l_sz,l_len,L,l_B,l_all_samples);
+  else
+    int2bin(l_b,L,l_n); /* note for the complete resampling, we can not
+			   do more than 2^32 times*/
+
+  l_b++; 
+
+  return 1; 
+}
+static int get_binpermu(int h,int n,int sz,int len,int *L,int hMax,unsigned int *V)/*sz=ceiling(n/sizeof(int)*8)*/
+{
+  int i,j;
+  unsigned val;
+  memset(L,0,sizeof(unsigned int)*n);
+  if((h+1)> hMax) return 0;
+  for(j=0;j<sz;j++){
+    i=j*len; /*starting from the last bit*/
+    val=V[h*sz+j];
+    while(val>0){
+      /*this code maybe faster if necessary*/
+      L[i]=val&1;
+      i++;
+      val>>=1;/*to move another bit*/
+    }
+  }
+  return 1;
+}
+      
+      
+static int set_binpermu(int *L,int h,int n,int sz,int len,int hMax,unsigned int *V)
+{
+
+  int i,j,nextbound;
+  unsigned val,pow;
+  if((h+1)> hMax) return 0;
+  i=0; /*starting from the last bit*/
+  for(j=0;j<sz;j++){
+    nextbound=(j+1)*len;
+    if(nextbound> n)
+      nextbound=n;
+    pow=1;
+    val=0;
+    while(i<nextbound){
+      val+=(unsigned int)(L[i])*pow;
+      pow<<=1;
+      i++;
+    }
+    V[h*sz+j]=val;
+  }
+  return 1;
+}
+  
+  
+
+
+
+
+
diff --git a/src/pairt_sampling_fixed.c b/src/pairt_sampling_fixed.c
new file mode 100755
index 0000000..87d6d04
--- /dev/null
+++ b/src/pairt_sampling_fixed.c
@@ -0,0 +1,63 @@
+/*the l is for local global variable in this file*/
+#include "stdio.h"
+#include "stdlib.h"
+#include "string.h"
+#include "mt.h"
+
+static int l_n=0;/*the number of samples for permutations*/
+static int l_B=0;/*the number of total simultaions*/
+static int l_b=0;/* the number of permutations are done*/
+static int* l_L=NULL;
+void create_sampling_pairt_fixed(int n,int*L,int B)
+{
+  l_n=n;
+  l_B=B;
+  l_b=0;
+  if(B<=0){
+    fprintf(stderr,"B needs to be positive\n");
+    return;/*exit(0)*/;
+  }
+  l_L=(int*)Calloc(n, int);
+  memcpy(l_L,L,sizeof(int)*n);
+}
+
+
+void delete_sampling_pairt_fixed()
+{
+  Free(l_L);
+  l_L=NULL;
+}
+int first_sample_pairt_fixed(int *L)
+{
+  if(L==NULL)
+    return l_B;
+  else{
+    memcpy(L,l_L,sizeof(int)*l_n);
+  }
+  l_b=1;
+  set_seed(g_random_seed);
+  return 1;  
+}
+int next_sample_pairt_fixed(int* L)
+{ 
+  int n=l_n,i;
+  float tmp;
+  if(l_b>=l_B) return 0;
+  for(i=0;i<n;i++){
+    tmp=get_rand();
+    if(tmp>0.5)
+      L[i]=1;
+    else
+      L[i]=0;       
+  }
+  l_b++;
+  return 1;
+}
+
+  
+  
+
+
+
+
+
diff --git a/src/random.c b/src/random.c
new file mode 100755
index 0000000..b83e444
--- /dev/null
+++ b/src/random.c
@@ -0,0 +1,144 @@
+/* mt/random.c
+   Copyright (C)  Yongchao Ge, Berkeley, USA
+   This program is part of the the free software of the multtest, 
+   which is a C stand alone package, the multtest software has also 
+   been wrapped to R package for people easy to use.
+ */
+  
+
+#include "stdio.h"
+#include "stdlib.h"
+#include "math.h"
+#include "mt.h"
+/*-------------------------------ran2()---------------------------*/
+/* This function is implementation of random number generating algorithm 
+   based on ran2 function in the book of Numerical Recipes in C pp282(1992), 2nd edition
+
+   It generates a unifrom random number from 0 to 1 (not including 0 and 1) using 
+   random number generator of L'Ecuyer with Bayes_Durham Shuffle and added safeguards.
+*/
+#define A1 40014
+#define Q1 53668
+#define R1 12211
+#define M1 2147483563
+#define A2 40692
+#define Q2 52774
+#define R2 3791
+#define M2 2147483399 
+
+#define ONE (1.0- 1.2e-7)
+#define N_SHUFF 32
+#define N_WARMUP 8
+
+/*prestoring the results to increase the computing speed*/
+static long int  N_DIV=(1+(M1-1)/N_SHUFF); 
+static float  M1inv=(1.0/M1);
+
+/*---------------------------begin of RNG------------------------------------*/
+typedef struct tagRNG{
+  long int z1; /*the generatore 1*/
+  long int z2; /*the generator 2*/
+  long int y;
+  long int V[N_SHUFF];
+}RNG;
+RNG l_rng;
+void set_seed(long int seed){
+  long int z1,z2,*V;
+  int i;
+  z1=abs(seed); /*setting the seed to z1 and initialize it*/
+  if(z1==0) z1=1;/*be sure to prevent seed=0*/
+  z2=z1;/*initializing z2*/
+  /*warm up*/
+  for(i=0;i<N_WARMUP;i++){
+     long int t=z1/Q1;
+     z1=A1*(z1-t*Q1)-R1*t;
+     if(z1<0) z1+=M1;
+  }
+  /*initializing the array V*/
+  V=l_rng.V;
+  for(i=N_SHUFF-1;i>=0;i--){
+    long int t=z1/Q1;
+     z1=A1*(z1-t*Q1)-R1*t;
+     if(z1<0) z1+=M1;
+     V[i]=z1;
+  }
+  l_rng.z1=z1;
+  l_rng.z2=z2;
+  l_rng.y=z1;
+}
+float get_rand(){
+  int i;
+  long t;
+  long int z1=l_rng.z1,z2=l_rng.z2,y=l_rng.y,*V=l_rng.V; 
+  float res;
+  /*generator 1*/
+  t=z1/Q1;
+  z1=A1*(z1-t*Q1)-R1*t;
+  if(z1<0) z1+=M1;
+  /*generator 2*/
+  t=z2/Q2;
+  z2=A2*(z2-t*Q2)-R2*t;
+  if(z2<0) z2+=M2;
+  /*shuffling*/
+  i=y/N_DIV; /*N_DIV=(1+P1/N_SHUFF);to make sure i to be 0..N_SHUFF-1*/
+  y=V[i]-z2; /*V[i] is a random number similar to z1, y will be in  {-(m2-1)} .. (m1-1)*/
+  if(y<1) y+=(M1-1); /*to make sure y will be 1...(m1-1) note m1 is almost equal to m2,m1>m2*/
+  V[i]=z1; /*filling the new generated random number z1 to array V*/ 
+  /*normalizing from 0 to 1*/
+  res=y*M1inv; /*=y/M1*/
+  l_rng.z1=z1;
+  l_rng.z2=z2;
+  l_rng.y=y;
+  if(res>ONE) return ONE;
+  else return res;
+}
+/*-----------------------end of RNG----------------------------*/
+/*get the n samples from the n-dim vector V. the results are stored 
+in the first m member of vector V*/
+void sample(int *V, int n, int m)
+{
+  int i,j,temp;
+  float f;
+  for(i=0;i<m;i++){
+    /* no need to worry yet    
+       if(i==(n-1)) continue; */ /*no need to swap with the last elements*/
+    j=n;
+    while (j==n){/*skip the border, we only want random
+		    numbers from i,i+1,i+2,...,n-1*/
+      f=get_rand()*(n-i);
+      j=i+floor(f);
+    }
+    /*swap the nubmer V[i] and V[j] whther if i==j*/
+    temp=V[j];
+    V[j]=V[i];
+    V[i]=temp;
+  }   
+}
+  
+
+/*void main(int argc, char* argv[])
+{
+  int n;
+  long seed=100;
+  int i;
+  float temp;
+ #define DIM 20
+  int V[DIM];
+  n=atoi(argv[1]);
+  seed=atol(argv[2]);
+  set_seed(seed);
+  for(i=0;i<n;i++){
+    temp=get_rand();
+    fprintf(stderr,"%f ",temp);
+  }
+  fprintf(stderr,"\n");
+  fprintf(stderr,"The sampling of %d\n",DIM);
+  for(i=0;i<DIM;i++)
+    V[i]=i+1;
+  sample(V,DIM,DIM);
+  for(i=0;i<DIM;i++)
+  fprintf(stderr,"%d ",V[i]);
+}*/
+
+  
+  
diff --git a/src/sampling.c b/src/sampling.c
new file mode 100755
index 0000000..c669686
--- /dev/null
+++ b/src/sampling.c
@@ -0,0 +1,225 @@
+/*the l is for local global variable in this file*/
+#include "stdio.h"
+#include "stdlib.h"
+#include "math.h"
+#include "string.h"
+#include "mt.h"
+typedef struct tagPERMU_ARRAY{
+  int n; /*the number of original observations (samples) needs to permute*/
+  int k; /* the number of classes, labelled from 0..(k-1)
+	  which functions as the base of the integar representation*/
+  int* nk; /* the number of groups in class 0..(k-1)*/
+  int B;/* the number of permutations samples*/
+  int len; /*len= floor(log(imax,k)), where imax the maximum of integers */
+  int sz;/*the number of integars for each permutation needed sz=ceil(n/len))*/
+  unsigned int * v;/* the array, which has size of B*sz)
+ 		      unsigned integars*/
+}PERMU_ARRAY;
+static int init_permu_array(PERMU_ARRAY* pa,int *L,int n, int B);
+static int get_permu(PERMU_ARRAY* pa, int h,int *L);
+/* get the h-th permutation of the permu_array. L needs to be array
+   of length pa->n*/
+static int set_permu(PERMU_ARRAY* pa, int h,int *L);
+static void delete_permu_array(PERMU_ARRAY* pa);
+
+
+static int l_b=0; /* the number of permutations are done*/
+static int l_B=0; /*the number of all permutations */
+
+static PERMU_ARRAY l_pa;
+
+/*store all the samples in random case, the first one needs to be from the original data*/
+void create_sampling(int n,int*L,int B)
+{  
+  int i,rest,maxB=0;
+  int imax;
+  double f;
+  /*initiate the prelim computation*/
+  init_permu_array(&l_pa,L,n,0);
+  
+  /*setting the value of f=log(maxB)*/
+  f=0;
+  rest=n;
+  for(i=0;i<l_pa.k;i++){
+    f+=logbincoeff(rest,l_pa.nk[i]);
+    rest-=l_pa.nk[i];
+  }
+
+  /*setting the maximum B*/
+  imax=(unsigned int)(~0)>>1;/*divide by 2 to avoid the negative number*/
+  if(fabs(f)<log(imax)){
+    maxB=1;
+    rest=n;
+    for(i=0;i<l_pa.k;i++){
+      maxB*=bincoeff(rest,l_pa.nk[i]);
+      rest-=l_pa.nk[i];
+    }
+  }else{/*we can set the only maximum of B*/
+    maxB=imax;
+  }
+
+  /*to check random or complete*/
+  if((B<=0) || (B>=maxB)){
+    /* checking if complete permutation doable*/
+    if (fabs(f)>log(imax)){
+      fprintf(stderr,"as B(log(B)=%5.2lf) is too big,we can not do the complete permutations\n",f);
+      return;/*exit(0);*/
+    }
+    /*when exceeding the maximum numbers, we'll use the complete permutaions*/
+    l_B=maxB;
+/*    fprintf(stderr,"\nWe're doing %d complete permutations\n",l_B);*/
+    Rprintf("\nWe're doing %d complete permutations\n",l_B);
+  }else{
+    /*doing random permutation*/
+    int * ordern,* permun,*myL;
+    l_B=B;
+    /*fprintf(stderr,"\nWe're doing %d random permutations\n",l_B);*/
+    Rprintf("\nWe're doing %d random permutations\n",l_B);
+    /*reintiailize the permu_array*/
+    delete_permu_array(&l_pa);
+    init_permu_array(&l_pa,L,n,B);
+    permun=(int*)Calloc(l_pa.n,int);
+    ordern=(int*)Calloc(l_pa.n,int);
+    myL=(int*)Calloc(l_pa.n,int);
+    for(i=0;i<n;i++){
+      ordern[i]=i;
+    }
+    /*allocate and assign the values for l_first_sample*/
+    set_permu(&l_pa,0,L);
+    set_seed(g_random_seed);
+    for(i=1;i<B;i++){
+      memcpy(permun,ordern,sizeof(int)*n);
+      sample(permun,n,n);
+      /*change to labbeling*/
+      sample2label(n,l_pa.k,l_pa.nk,permun,myL);
+      set_permu(&l_pa,i,myL);
+    }
+    Free(myL);
+    Free(ordern);
+    Free(permun);
+  }
+}
+void delete_sampling()
+{
+  delete_permu_array(&l_pa);
+}
+
+int first_sample(int *L)
+{
+  if(L==NULL)
+    return l_B;
+
+  /*if is random, we'll choose the original L as the first 
+    sample*/
+  if(l_pa.B > 0){
+    get_permu(&l_pa,0,L);
+  }else{ 
+    init_label(l_pa.n,l_pa.k,l_pa.nk,L);
+  }
+  l_b=1;/*resetting the the number of permuatins done*/
+  /*print_narray(L,16);*/
+
+  return 1;
+}
+
+int next_sample(int* L)
+{
+  if(l_b>=l_B) return 0;
+
+  if(l_pa.B > 0){
+    get_permu(&l_pa,l_b,L);
+  }    
+  else{
+    next_label(l_pa.n,l_pa.k,l_pa.nk,L);
+  }
+  l_b++;
+  return 1;
+}
+
+static int init_permu_array(PERMU_ARRAY* pa, int *L,int n, int B)
+{
+  int i;
+  unsigned imax;
+  pa->n=n;
+  pa->B=B;
+  pa->nk=NULL;
+  pa->v=NULL;
+
+  /* compute the k*/
+  pa->k=0;
+  for(i=0;i<n;i++)
+    if(L[i]>pa->k)
+      pa->k=L[i];
+  (pa->k)++;
+  
+  /*compue nk*/
+  pa->nk=(int*)Calloc(pa->k,int);
+  memset(pa->nk,0,sizeof(int)*pa->k);
+  for(i=0;i<n;i++)
+    pa->nk[L[i]]++;
+  
+  /*computer imax, len*/
+  imax=~0; /*get all bits are 1 for the integars*/
+  pa->len=floor(log(imax+1.0)/log(pa->k)); 
+  pa->sz=ceil(n/(pa->len*1.0));
+  /*allocate the space for v*/
+  pa->v=(unsigned int*)Calloc(B*pa->sz,int);
+  return 1;
+}
+
+
+
+static int get_permu(PERMU_ARRAY* pa, int h, int *L)
+{
+  int i,j;
+  unsigned val;
+  memset(L,0,sizeof(unsigned int)*pa->n);
+  if((h+1)> pa->B) return 0;
+  for(j=0;j<pa->sz;j++){
+    i=j*pa->len; /*starting from the last bit*/
+    val=pa->v[h*pa->sz+j];
+    while(val>0){
+      /*this code maybe faster if necessary*/
+      L[i]=val%(unsigned int)(pa->k);
+      i++;
+      val/=(unsigned int)(pa->k);/*to move another bit*/
+    }
+  }
+  return 1;
+}
+      
+      
+static int set_permu(PERMU_ARRAY* pa, int h,int *L)
+{
+  int i,j,nextbound;
+  unsigned val,pow;
+  if((h+1)> pa->B) return 0;
+  i=0; /*starting from the last bit*/
+  for(j=0;j<pa->sz;j++){
+    nextbound=(j+1)*pa->len;
+    if(nextbound> (pa->n))
+      nextbound=pa->n;
+    pow=1;
+    val=0;
+    while(i<nextbound){
+      val+=(unsigned int)(L[i])*pow;
+      pow*=(unsigned int)pa->k;
+      i++;
+    }
+    pa->v[h*pa->sz+j]=val;
+  }
+  return 1;
+}
+static void delete_permu_array(PERMU_ARRAY* pa)
+{
+  Free(pa->nk);
+  pa->nk=NULL;
+  if(pa->B!=0){
+    Free(pa->v);
+    pa->v=NULL;
+  }
+}
+
+
+  
+  
diff --git a/src/sampling_fixed.c b/src/sampling_fixed.c
new file mode 100755
index 0000000..488b679
--- /dev/null
+++ b/src/sampling_fixed.c
@@ -0,0 +1,83 @@
+/*the l is for local global variable in this file*/
+#include "stdio.h"
+#include "stdlib.h"
+#include "math.h"
+#include "string.h"
+#include "mt.h"
+static int l_n=0; /*the length of L*/
+static int l_k=0; /*the number of groups*/
+static int* l_nk=NULL;/* the number of objects in each groups*/
+static int* l_L=NULL;/*the storrage of first label*/
+static int l_b=0; /* the number of permutations are done*/
+static int l_B=0; /*the number of all permutations */
+static int* l_permun=NULL;
+static int* l_ordern=NULL;
+
+void create_sampling_fixed(int n,int*L,int B)
+{  
+  int i,k;
+  l_n=n;
+  l_B=B;
+  l_b=0;
+  if(B<=0){
+    fprintf(stderr,"B needs to be positive\n");
+    return;/*exit(0)*/;
+  }
+  l_L=(int*)Calloc(n,int);
+  memcpy(l_L,L,sizeof(int)*n);
+  
+  k=0;
+  for(i=0;i<n;i++)
+    if(L[i]>k)
+      k=L[i];
+  k++;
+  l_k=k;
+  l_nk=(int*)Calloc(k,int);
+  memset(l_nk,0,sizeof(int)*k);
+  for(i=0;i<n;i++)
+    l_nk[L[i]]++;
+
+  l_permun=(int*)Calloc(n,int);
+  l_ordern=(int*)Calloc(n,int);
+  for(i=0;i<n;i++){
+      l_ordern[i]=i;
+    }
+}
+void delete_sampling_fixed()
+{
+  Free(l_L);
+  l_L=NULL;
+  Free(l_nk);
+  l_nk=NULL;
+  Free(l_permun);
+  l_permun=NULL;
+  Free(l_ordern);
+  l_ordern=NULL;
+}
+
+int first_sample_fixed(int *L)
+{
+  if(L==NULL)
+    return l_B;
+  else{
+    memcpy(L,l_L,sizeof(int)*l_n);
+  }
+  l_b=1;
+  set_seed(g_random_seed);
+  return 1;
+}
+
+int next_sample_fixed(int* L)
+{
+  int n=l_n;
+  if(l_b>=l_B) return 0;
+  memcpy(l_permun,l_ordern,sizeof(int)*n);
+  sample(l_permun,n,n);
+  /*change to labbeling*/
+  sample2label(n,l_k,l_nk,l_permun,L);
+  l_b++;
+  return 1;
+}
+
+  
+  
diff --git a/src/stat_func.c b/src/stat_func.c
new file mode 100755
index 0000000..f168987
--- /dev/null
+++ b/src/stat_func.c
@@ -0,0 +1,857 @@
+#include "stdio.h"
+#include "stdlib.h"
+#include "stdarg.h"
+#include "math.h"
+#include "string.h"
+#include "mt.h"
+
+/*This file is used to collect some useful statistics functions*/
+/********************************************************************************/
+/*                        two_sample_tstat                                      */
+/********************************************************************************/
+    /* Computes the value of the two sample t-statistic, allowing for missing values. 
+       Missing values are represented by na.  (At least two values per group should 
+       be present.) 
+       if return == NA_FLOAT, then it has some problems
+       to calculate the t-stat,such as variance is 0, 
+       or the count of one class is less than 2
+      Y: the vector of one gene across experiments
+      n: the number of experiments
+      L: the class labelling of each experiments
+      na: the NA representation of gene values.
+      extra: the additional information, not used here
+    */
+float two_sample_tstat(const float *Y, const int* L,const int n, const float na,const void *extra) 
+{
+  float num,denum,res;
+  res=two_sample_tstat_num_denum(Y,L,n,na,&num,&denum,extra);
+  if(res==NA_FLOAT) return NA_FLOAT;
+  return num/denum;
+}
+
+float two_sample_tstat_num_denum(const float *Y, const int* L,const int n, const float na,float* num, float* denum,const void* extra) 
+{
+  float mean_na[2]={0,0},ss_na[2]={0,0},devi;
+  float c0,c1;
+  int i,count[2]={0,0},class;
+  /*compute the mean and count first*/
+  /* count is the number of objects in each class*/
+  for (i=0; i<n; i++) {
+    if (Y[i]==na) 
+      continue;
+    class=L[i];
+    mean_na[class] += Y[i];
+    count[class]++; 
+  }
+ 
+  mean_na[0]/=(count[0]*1.0);
+  mean_na[1]/=(count[1]*1.0);
+
+  /*compute the variance in each group*/
+  for (i=0; i<n; i++) {
+    if (Y[i]==na) 
+       continue;
+    class=L[i];
+    devi=(Y[i]-mean_na[class]);
+   ss_na[class] += devi*devi;
+  }
+  /* if(ss_na[0]==0) return NA_FLOAT;
+  if(ss_na[1]==0) return NA_FLOAT;
+  */
+  if(ss_na[0]+ss_na[1]<EPSILON) return NA_FLOAT;
+  c0=(count[0]*(count[0]-1));
+  c1=(count[1]*(count[1]-1));
+  *num=mean_na[1]-mean_na[0];
+  *denum=sqrt(ss_na[0]/c0+ss_na[1]/c1);
+  return 1;
+}
+float ave_diff(const float *Y, const int* L,const int n, const float na,const void* extra)
+{
+  float mean_na[2]={0,0};
+  int i,count[2]={0,0},class;
+  /*compute the mean and count first*/
+  /* count is the number of objects in each class*/
+  for (i=0; i<n; i++) {
+    if (Y[i]==na) 
+      continue;
+    class=L[i];
+    mean_na[class] += Y[i];
+    count[class]++; 
+  }
+
+  mean_na[0]/=(count[0]*1.0);
+  mean_na[1]/=(count[1]*1.0);
+  if((count[0]==0)||(count[1]==0)) return NA_FLOAT;
+  return mean_na[1]-mean_na[0];
+}
+  
+float two_sample_t1stat(const float *Y, const int* L,const int n, const float na,const void *extra) 
+{
+  float num,denum,res;
+  res=two_sample_t1stat_num_denum(Y,L,n,na,&num,&denum,extra);
+  if(res==NA_FLOAT) return NA_FLOAT;
+  return num/denum;
+}
+float two_sample_t1stat_num_denum(const float *Y, const int* L,const int n, const float na,float* num, float* denum,const void* extra) 
+{
+  float mean_na[2]={0,0},ss_na[2]={0,0},devi;
+  float c0,c1;
+  int i,count[2]={0,0},class;
+  /*compute the mean and count first*/
+  /* count is the number of objects in each class*/
+  for (i=0; i<n; i++) {
+    if (Y[i]==na) 
+      continue;
+    class=L[i];
+    mean_na[class] += Y[i];
+    count[class]++; 
+  }
+
+  mean_na[0]/=(count[0]*1.0);
+  mean_na[1]/=(count[1]*1.0);
+
+  /*compute the variance in each group*/
+  for (i=0; i<n; i++) {
+    if (Y[i]==na) 
+       continue;
+    class=L[i];
+    devi=(Y[i]-mean_na[class]);
+   ss_na[class] += devi*devi;
+  }
+  /*  if(ss_na[0]==0) return NA_FLOAT;
+  if(ss_na[1]==0) return NA_FLOAT;
+  */
+  if(ss_na[0]+ss_na[1]<EPSILON) return NA_FLOAT;
+  c0=1/(count[0]*1.0)+1/(count[1]*1.0);
+  c1=count[0]+count[1]-2.0;
+  *num=mean_na[1]-mean_na[0];
+  *denum=sqrt((ss_na[0]+ss_na[1])*c0/c1);
+  return 1;
+}
+
+float Wilcoxon_stat(const float *Y, const int* L,const int n, const float na,const void* extra)
+{
+  int i,count=0,n1=0;
+  float s=0;
+  for(i=0;i<n;i++){
+    if(Y[i]==na) continue;
+    if(L[i]){
+      s+=Y[i];
+      n1++;
+    }
+    count++;
+  }
+  s-=(1+count)*n1/2.0;
+  return s;
+}
+      
+float Wilcoxon_T(const float *Y, const int* L,const int n,
+		       const float na,const void *extra)
+{
+  float num,denum,res;
+  res=Wilcoxon_num_denum(Y,L,n,na,&num,&denum,extra);
+  if(res==NA_FLOAT) return NA_FLOAT;
+  return num/denum;
+} 
+float Wilcoxon_num_denum(const float *Y, const int* L,const int n, 
+			       const float na,float* num, float* denum,const void* extra)
+{
+  int i,count=0,n1=0;
+  float s=0;
+  for(i=0;i<n;i++){
+    if(Y[i]==na) continue;
+    if(L[i]){
+      s+=Y[i];
+      n1++;
+    }
+    count++;
+  }
+  *num=s-(1+count)*n1/2.0;
+  *denum=sqrt(n1*(count-n1)*(count+1)/12.0);
+  if((*denum)<EPSILON) return NA_FLOAT;
+  return 1; 
+}
+/* The signed sum of Y[i], i.e. compute the sum of Y[i]*L[i], where L[i] is 1 or 0, 
+1 is for treatment, 0 is for control,
+It is used in the paired t-stat, this test is monotone of the paired t-stat in the 
+2xB block design, where we have B blocks, and each block has two experiments, while Y[i]
+is already the difference within one block, we don't consider the na to less complicate
+the function*/
+float sign_sum(const float *Y, const int* L,const int n, const float na,const void* extra)
+     /* extra not used*/
+{
+  float ret=0;
+  int i;
+  int count=0;
+  for(i=0;i<n;i++){
+    if(Y[i]==0) continue;
+    if(L[i]){
+      ret+=Y[i];
+    }else{
+      ret-=Y[i];
+    };
+    count++;
+  }
+  return ret;
+}
+/*the function is compute the one sample t-statistics,
+used to compute the paired t-stat for 2xB block design, where while Y[i]
+is already the difference within one block,*/ 
+float sign_tstat_num_denum(const float *Y, const int* L,const int n, const float na,float *num, float*denum,const void *extra)
+{
+  float ss=0;
+  float mean=0;
+  float devi;
+  int i,count;
+  count=0;
+  for(i=0;i<n;i++){
+    if(Y[i]==na) continue;
+    if(L[i]){
+      mean+=Y[i];
+    }else{
+      mean-=Y[i];
+    };
+    count++;
+  }
+  mean/=(count*1.0);
+  for(i=0;i<n;i++){
+    if(L[i]){
+      devi=Y[i]-mean;
+    }else{
+      devi=-Y[i]-mean;
+    };
+    ss+=(devi * devi);
+  }
+  *num=mean;
+  *denum=sqrt(ss/(count*(count-1.0)));
+  if((*denum)<EPSILON) return NA_FLOAT;
+  return 1;
+}
+float sign_tstat(const float *Y, const int* L,const int n, const float na, const void* extra)
+     /*extra not used*/
+{
+  float num,denum,res;
+  res=sign_tstat_num_denum(Y,L,n,na,&num,&denum,extra);
+  if(res==NA_FLOAT) return NA_FLOAT;
+  return num/denum;
+}  
+
+/*compute the F-stat for k samples, where L[i] is the labelling of object i,
+note for F-test, (int*)*extra has the information of the number of groups*/
+float Fstat(const float *Y, const int* L,const int n, const float na,const void* extra)
+{
+  float num,denum,res;
+  res=Fstat_num_denum(Y,L,n,na,&num,&denum,extra);
+  if(res==NA_FLOAT) return NA_FLOAT;
+  if(denum<EPSILON) return NA_FLOAT;
+  return num/denum;
+}
+float Fstat_num_denum(const float *Y, const int* L,const int n, const float na,float *num, float*denum,const void* extra)
+{
+  float wss=0,bss=0;/*within sum square, and between sum square*/
+  float mean=0,*meani,*ssi;/*the mean for for the whole and the mean for group i*/
+  float dev;
+  int i,class,k,*ni,N=0;/* ni the number of objects group i, 
+		   k is the number of groups,
+		   N is the total number of validate objects*/
+  k=*(int*)extra;
+  meani=(float *)Calloc(k,float);
+  ssi=(float *)Calloc(k,float);
+  ni=(int *)Calloc(k,int);
+  for(i=0;i<k;i++){
+    meani[i]=0;/*intialize to be zero*/
+    ssi[i]=0;
+    ni[i]=0;
+  }
+  for(i=0;i<n;i++){
+    if(Y[i]==na) 
+      continue;
+    class=L[i];
+    meani[class]+=Y[i];
+    ni[class]++;
+    N++;
+    mean+=Y[i];
+  }
+  /*summarize the data*/
+  mean/=(N*1.0);
+  for(i=0;i<k;i++){
+    meani[i]/=(ni[i]*1.0);/*get the mean for each group*/
+  }
+  /*compute bss and wss*/
+  for(i=0;i<n;i++){
+    if(Y[i]==na) 
+      continue;
+    class=L[i];
+    dev=Y[i]-meani[class];
+    ssi[class]+=dev*dev;
+  }
+  for(i=0;i<k;i++){
+    /*    if(ssi[i]==0) {
+	     ret=NA_FLOAT;
+	     break;
+	     }; */ /*each group needs to have non zero variance*/
+    wss+=ssi[i];
+    dev=meani[i]-mean;
+    bss+=dev*dev*ni[i];
+  }
+  /*summarize the num and denum*/
+  *num=bss/(k-1.0);
+  *denum=wss/(N-k-0.0);
+  Free(meani);
+  Free(ni);
+  Free(ssi);
+  return 1;
+} 
+/*compute the Block F-stat for k samples, where L[i] is the labelling of object i,
+note for block F-test, (int*)*extra has the information of the number of Blocks*/
+/* currently, we don't implement the na, as we need the rectangular design*/
+float Block_Fstat(const float *Y, const int* L,const int n, const float na,const void* extra)
+{
+  float num,denum,res;
+  res=Block_Fstat_num_denum(Y,L,n,na,&num,&denum,extra);
+  if(res==NA_FLOAT) return NA_FLOAT;
+  if(denum<EPSILON) return NA_FLOAT;
+  return num/denum;
+}
+float Block_Fstat_num_denum(const float *Y, const int* L,const int n, const float na,float *num, float*denum,const void* extra)
+{
+  float wss=0,bss=0;/*within sum square, and between sum square*/
+  float mean=0,*meani,*meanj;
+  /*the mean for for the whole and the mean for group i,j: i is for block, j is for treatment*/
+  float dev;
+  int treat,block,i,j,B,m,h;/* ni the number of objects group i, 
+		   k is the number of groups,*/
+
+  m=*(int*)extra;
+  B=n/m;
+  if(B*m!=n){
+    fprintf(stderr,"The design is not balanced as B(%d)xm(%d)!=n(%d)\n",B,m,n);
+    return NA_FLOAT;
+  }
+  meani=(float *)Calloc(B,float);
+  meanj=(float *)Calloc(m,float);
+  for(i=0;i<B;i++){
+    meani[i]=0;/*intialize to be zero*/
+    for(j=0;j<m;j++){
+      meani[i]+=Y[j+m*i];
+    }
+  }
+  for(j=0;j<m;j++){
+    meanj[j]=0;/*intialize to be zero*/
+  }
+  for(h=0;h<n;h++){
+    treat=L[h];
+    meanj[treat]+=Y[h];
+    mean+=Y[h];
+  }
+   
+  /*summarize the data*/
+  mean/=(n*1.0);
+  for(i=0;i<B;i++){
+    meani[i]/=(m*1.0);/*get the mean for each block*/
+  }
+  for(j=0;j<m;j++)
+    {
+    meanj[j]/=(B*1.0);/*get the mean for each treatments*/
+  }
+  /*compute bss and wss*/
+  for(i=0;i<n;i++){
+    block=i/m;    
+    dev=Y[i]-meani[block];
+    treat=L[i];
+    dev-=meanj[treat]-mean;
+    wss+=dev*dev;
+  }
+  for(j=0;j<m;j++){
+    dev=meanj[j]-mean;
+    bss+=dev*dev*B;
+  }
+  /*summarize the num and denum*/
+  *num=bss/(m-1.0);
+  *denum=wss/((m-1.0)*(B-1.0));
+  Free(meani);
+  Free(meanj);
+  return 1;
+} 
+void int2bin(int r,int*V,int n)
+{ int i;
+  for(i=n-1;i>=0;i--){
+    V[i]=r&1;
+    r>>=1; /*divide by 2 so that we can look-at the next digit*/
+  }
+}
+int bin2int(int*V,int n)
+{ int i,ret=0;
+  for(i=0;i<n-1;i++){
+    ret+=V[i];
+    ret<<=1; /*multiply by 2 so that we can look at the next digit*/
+  }
+  ret+=V[n-1];
+  return ret;
+}
+/********************************************************************************/
+/*                      bincoeff                                                */
+/********************************************************************************/
+/*Descriptions: return the binomial coefficient of n choosing k*/
+int bincoeff(int n, int k)
+{
+  float f=n;
+  int i;
+  for(i=1;i<k;i++)
+    f*=(n-i)/(i+1.0);
+  return (int)(f+0.5);
+}
+/*compute the log of the coefficient*/
+double logbincoeff(int n, int k)
+{
+  double f=log(n);
+  int i;
+  for(i=1;i<k;i++)
+    f+=log((n-i)/(i+1.0));
+  return f;
+}
+double logfactorial(int n, int k)
+{
+  double f=log(n);
+  int i;
+  for(i=1;i<k;i++)
+    f+=log((n-i)*1.0);
+  return f;
+}
+/********************************************************************************/
+/*                 A2L                                                      */
+/********************************************************************************/
+/*Descriptions:
+    Assume we have n objects, of which k of them are labeled with 0, the rest of them
+    are labeling with 1. A is the subset of the objects which have label 0, L is the 
+    labelling of each object. This function transforms A to Label
+*/ 
+void A2L(int* A,int* L,int n,int k)
+{
+  int i;
+  for(i=0;i<k;i++)
+    L[i]=0;
+  for(i=k+1;i<n;i++)
+    L[i]=1;
+}     
+
+/********************************************************************************/
+/*                     next_lex                                                 */
+/********************************************************************************/
+   /* Given a list a of k numbers a_0<a_1<...<a_(k-1) with a_i in {0, 1,..., 
+      n-1}, returns the list b: b_0<b_1<...<b_(k-1) which immediately follows a in 
+      lexicographic order. It can be used to determine all subsets of size k in
+      {0, 1,..., n-1}. 
+
+      note array is in A and after this function, is modified
+      and store the new array B back in array A
+     
+   */
+int next_lex(int* A, int n, int k)
+{
+  int l=k-1, s=n-1,i,old;/*l is for the location of A to increase*/
+
+  /*look for a location to increase*/  
+  while (A[l]==s &&l>=0) {
+    l--;
+    s--;
+  }
+  if(l<0) {
+    if (myDEBUG)
+      {
+	fprintf(stderr,"%s%s","We've achieved the maximum permutation already\n",
+		"We can not find the next one in next_lex\n");
+      }
+
+    return 0;/*note we can not generate the next permutations*/
+  }
+  /*we increase every number by 1*/
+  old=A[l];
+  for(i=l;i<k;i++)
+    A[i]=old+i-l+1;
+  return 1;
+}
+
+/*intialize the label such that the first nk[0] is labelled as 0, etc.*/
+void init_label(int n, int k, int*nk, int*L)
+{
+  int l,s,j;
+  s=0;/*s is for starting*/
+  for(l=0;l<k;l++){
+    for(j=0;j<nk[l];j++){
+      L[s]=l;
+      s++;
+    }
+   }
+}
+void sample2label(int n, int k, int* nk,int *permun, int*L)
+{
+  int l,s,j;
+  s=0;/*s is for starting*/
+  for(l=0;l<k;l++){
+    for(j=0;j<nk[l];j++){
+      L[permun[s]]=l;
+      s++;
+    }
+  }
+}
+void label2sample(int n, int k, int* nk,int*L,int *permun)
+{ 
+  int l,j;
+  int *s;/*s is for starting*/
+
+  /*initialize the beginning*/
+  s=(int*)Calloc(k,int);
+  s[0]=0;
+  for(l=1;l<k;l++){
+    s[l]=s[l-1]+nk[l-1];
+  }
+  for(j=0;j<n;j++){
+    l=L[j];
+    permun[s[l]]=j;
+    s[l]++;
+  }
+  Free(s);
+}
+  
+int next_label(int n, int k, int* nk, int*L)
+{
+  int *permun,ret;
+  permun=(int*)Calloc(n,int);
+  label2sample(n,k,nk,L,permun);
+  ret=next_mult_permu(permun,n,k,nk);
+  sample2label(n,k,nk,permun,L);
+  Free(permun);
+  return ret;
+}
+
+/*V has n elements, the first k elements(referred as array A) are ordered, 
+  and the rest n-k elements (referred as array B)are ordered increasing order, 
+  this is the problem to list all binomial permutation from n choosing k.
+  after the next_permu, if it's the last one, return false, and the whole array
+  V will be ordered, just swap the array A and B. otherwise, V will be next permutation and keep the same order, the algorithm has computation O(N)*/
+
+int next_two_permu(int* V, int n, int k)
+{
+  int i,j;
+  int old,maxb=V[n-1];
+  int* A=V;
+  int* B=V+k;
+  int* tempV,*cpyV;
+  tempV=(int*)Calloc(n,int);
+
+  i=k-1;
+  while(i>=0&& A[i]>maxb){
+    i--;
+  }
+  /*there's no next_permu as all the elements of array A 
+    is greater than array 
+    B*/
+  if(i<0){
+    /*rearrange the output so that V be ordered for the whole array.*/
+    memcpy(tempV,B,sizeof(int)*(n-k));
+    memcpy(tempV+(n-k),A,sizeof(int)*k);
+    /*using the tempV to swap the array A and array B*/
+    memcpy(V,tempV,sizeof(int)*n);
+    /*coppying back to V*/
+    Free(tempV);
+    return 0;
+  }
+  /*else to find the next permutation*/
+  /*first to find how many elements in B are between A[i] and A[i+1]*/
+  j=n-k-2;
+  old=A[i];
+  while(j>=0&&(B[j]>old)){
+    j--;
+  }
+  /*keep the original A[0..(i-1)] elements to tempV*/
+  memcpy(tempV,A,sizeof(int)*i);
+  /*keep the original B[0..j] elements to tempV+k*/
+  if(j+1>0)
+    memcpy(tempV+k,B,sizeof(int)*(j+1));
+  /*copy the (k-i) elements from array 
+    (A[i]<)B[j+1],...B[n-k-1],A[i+1],..A[k-1]*/
+  /*copy the ((n-k)-(j+1)) elements from array 
+    B[j+1],...B[n-k-1],..,A[i+1],..A[k-1]*/
+  /*construct the array B[j+1],...B[n-k-1],A[i+1],..A[k-1]*/
+  cpyV=(int*)Calloc(n,int);
+  memcpy(cpyV,B+j+1,sizeof(int)*((n-k)-(j+1)));
+  if(k>(i+1))
+    memcpy(cpyV+(n-k)-(j+1),A+i+1,sizeof(int)*(k-(i+1)));
+  memcpy(tempV+i,cpyV,sizeof(int)*(k-i));
+  tempV[k+j+1]=A[i];
+  if((n-k)>(j+2))
+    memcpy(tempV+k+j+2,cpyV+(k-i),sizeof(int)*((n-k)-(j+2)));
+  /*copy back to V*/
+  memcpy(V,tempV,sizeof(int)*n);
+  Free(cpyV);
+  Free(tempV);
+  return 1;
+}
+
+/*the next_permutation for multiple classes*/
+int next_mult_permu(int* V, int n, int k, int* nk)
+{
+  int olds,s,l;/*s is for starting location*/
+  int next=0;
+  /*initialize the begining*/
+  s=nk[0];
+  for(l=1;l<k;l++){
+    olds=s;
+    s+=nk[l];
+    next=next_two_permu(V,s,olds);
+    if(next) return 1;
+  }
+  return 0;/*we couldn't find the next permutation*/
+}
+FUNC_CMP side2cmp(int side)
+{
+  FUNC_CMP func_cmp;
+  if(side==0){
+    func_cmp=cmp_abs;
+  }else if(side==-1){
+    func_cmp=cmp_low;
+  }else{
+    func_cmp=cmp_high;
+  }
+  return func_cmp;
+}
+/*V[0],V[1],..,V[n] is a permutation of h_1<h_2<...<h_n, where h_i may be i in
+most case, but here to be general in order to be mos use*/
+/*it returns the next permutation in V*/ 
+int next_permu(int*V,int n) /* n has to be at least 2*/
+{
+  int i,j,old,*cpyV,l;
+  i=n-2;
+  while(i>=0){
+    if(V[i]<V[i+1]) break;
+    i--;
+  }
+  if(i<0){
+    if (myDEBUG){
+   	fprintf(stderr,"%s%s","We've achieved the maximum permutation already\n",
+		"We can not find the next one-in next_permu\n");
+    }
+    return 0;/*note we can not generate the next permutations*/
+  }
+  /*find the location of j, V[i]<V[i+1]>...>V[n-1]*/
+  /*i.e. V[n-1]<V[n-2]<...V[j+1]<V[i]=old<V[j]<...V[i+1]*/
+  old=V[i];
+  j=n-1;
+  while(j>i){
+    if(V[j]>old) break;
+    j--;
+  }
+  cpyV=(int*)Calloc(n,int);
+  memcpy(cpyV,V,sizeof(int)*n);
+  V[i]=cpyV[j];
+  cpyV[j]=old;
+  for(l=i+1;l<n;l++){
+    V[l]=cpyV[n+i-l];
+  }
+  Free(cpyV);
+  return 1;
+}
+
+/********************************************************************************/
+/*                      print_farray                                             */
+/********************************************************************************/
+/*Description:
+      used to print an array with n elements.
+      we can specify*/
+
+void print_narray(FILE* fh,int* p_arr,int n)
+{
+  int i;
+  for(i=0;i<n;i++){
+    fprintf(fh," %7d ",p_arr[i]);
+    if((PRINT_VAR_NUM)&&((i+1)%PRINT_VAR_NUM==0))
+      fprintf(fh,"\n");
+  }
+  fprintf(fh,"\n");
+}
+void print_farray(FILE* fh,float* p_arr,int n)
+{
+  int i;
+  for(i=0;i<n;i++){
+    fprintf(fh," %9g ",p_arr[i]);{
+      if((PRINT_VAR_NUM)&&((i+1)%PRINT_VAR_NUM==0))
+	fprintf(fh,"\n");
+    }
+  }
+  fprintf(fh,"\n");
+}
+
+
+  
+/********************************************************************************/
+/*                       read_infile                                            */
+/********************************************************************************/
+/*read the file into the struct GENE_DATA.
+  1). The first row of the file should contain the status*
+  2)  The space of pdata should be allocated already by malloc_gene_data(),
+  otherwise it might have loss of memory problem
+*/
+void read_infile(char *filename,GENE_DATA *pdata) {
+  FILE *fh;
+  int i, j;
+  double ftemp;
+  fh=fopen(filename,"r");
+  if (NULL == fh)
+      Rf_error("failed to open file '%s'", filename);
+  /*read the labelling first*/
+  fscanf(fh, "%s", pdata->name);
+  for (j=0; j<pdata->ncol; j++)
+    fscanf(fh, "%d", pdata->L+j);
+
+/*read the mxn matrix of the gene values data*/
+  for (i=0; i<pdata->nrow; i++) {
+    /*read gene id and the first gene values*/
+    fscanf(fh, "%s", pdata->id[i]);
+    /*read the rest of it*/
+    for (j=0; j<pdata->ncol; j++) {
+      /*deal with the double data*/
+      fscanf(fh, "%lg",&ftemp);
+      pdata->d[i][j]=ftemp;
+    }
+  }
+  fclose(fh);
+}
+
+/********************************************************************************/
+/*             print_gene_data                                                  */
+/********************************************************************************/
+/*print the gene_data to stderr, useful in debug*/
+void print_gene_data(GENE_DATA* pdata)
+{
+  int i, j;
+  for (i=0; i<pdata->nrow; i++){
+    fprintf(stderr,"%20s", pdata->id[i]);
+    for (j=0; j<pdata->ncol; j++) 
+      fprintf(stderr," %5.3f", pdata->d[i][j]);
+    fprintf(stderr,"\n");
+  }
+}
+/********************************************************************************/
+/*                     write_outfile                                            */
+/********************************************************************************/
+/*Descriptions:
+       write the test-statistics, unadjusted p-values, adjusted pvalues and Adjusted
+       p-values lower to the file.
+  input parameters:
+      filename: the file to write
+      pdata:    the pointer of the whole data
+      T,P,Adj_P,Adj_Lower: the array stores the test-statistics,
+      unadjusted p-values, adjusted pvalues and adjusted p-values lower, respectively
+      if Adj_Lower==NULL, it will not print this item
+*/
+void write_outfile(FILE* fh,GENE_DATA* pdata,float*T, float*P,float*Adj_P,float* Adj_Lower)
+{
+  int i,nrow;
+  /*float num,denum;*/
+  nrow=pdata->nrow;  /*the length of the array T,P,etc.*/
+
+  if(myDEBUG)
+  {
+    fprintf(stderr,"\nThe results of T,P Adj_P and Adj_Lower");
+    print_farray(stderr,T,nrow);      
+    print_farray(stderr,P,nrow);
+    print_farray(stderr,Adj_P,nrow);
+    if(Adj_Lower) print_farray(stderr,Adj_Lower,nrow);
+  };
+  fprintf(stderr,"\nWe're writing the output\n");
+  fprintf(fh,"%20s %10s %10s %10s","gene_id","test-stat",
+	  "unadj-p","adjusted-p");
+  if(Adj_Lower)
+    fprintf(fh,"%10s","p-lower");
+  fprintf(fh,"\n");
+
+  for (i=0; i<nrow; i++) {
+    /*  t_stat_num_den(pdata->d[i],pdata->L, pdata->ncol,pdata->na,&num,&denum);*/
+    fprintf(fh, "%20s %10.6f    %7g    %7g", 
+	    pdata->id[i],T[i],P[i],Adj_P[i]);
+    if(Adj_Lower){
+      fprintf(fh,"    %7g",Adj_Lower[i]);
+    }
+    fprintf(fh,"\n");
+  }
+}
+
+/*testing */
+/*int main()
+{
+  #define N 5
+  int V[N];
+  int i,is_next=1;
+  for(i=0;i<N;i++)
+    V[i]=i;
+  while(is_next){
+    print_narray(stderr,V,N);
+    is_next=next_permu(V,N);
+  }
+}
+*/
+int next_label_block(int* L, int n, int m)
+{
+  int s,l,i,j,block;/*s is for starting location*/
+  int next=0;
+  /*initialize the begining*/
+  block=n/m;
+  s=0;
+  for(l=0;l<block;l++){
+    next=next_permu(L+s,m);/*find the permutation within one block*/
+    if(next) {
+      /*resetting of the previous to the original*/
+      for(i=0;i<l;i++){
+	for(j=0;j<m;j++){
+	  L[i*m+j]=j;
+	}
+      }
+      return 1;
+    }
+    s+=m;
+  }
+  return 0;/*we couldn't find the next permutation*/
+}
+void init_label_block(int *L, int n,int m)
+{
+  int i,b,block;
+  block=n/m;
+  for(b=0;b<block;b++){
+    for(i=0;i<m;i++){
+      L[b*m+i]=i;
+    }
+  }
+}
+void sample_block(int *L, int n,int m)
+{
+  int block,b,s;
+  s=0;
+  block=n/m;
+  for(b=0;b<block;b++){
+    sample(L+s,m,m);
+    s+=m;
+  }
+}
+  
+/*int main()
+{ 
+  GENE_DATA data;
+  #define ROW 100
+  float T[ROW];
+  int L[38];
+  int i,k=2;
+
+  data.na=NA_FLOAT;
+  data.nrow=ROW;  
+  data.ncol=38;
+  malloc_gene_data(&data);
+  read_infile("data",&data); */
+  /*print_gene_data(&data);
+    compute_test_stat(&data,data.L,T,two_sample_tstat,NULL); checked
+    compute_test_stat(&data,data.L,T,Fstat,(const void *)&k);
+    compute_test_stat(&data,L,T,Block_Fstat,(const void *)&k);
+  print_farray(stderr,T,ROW);
+  free_gene_data(&data);
+  }*/
+
diff --git a/src/stat_order.c b/src/stat_order.c
new file mode 100755
index 0000000..b555005
--- /dev/null
+++ b/src/stat_order.c
@@ -0,0 +1,142 @@
+#include "stdio.h"
+#include "stdlib.h"
+#include "stdarg.h"
+#include "math.h"
+#include "mt.h"
+/* This function is used for some odering of the data*/
+
+/*****************************************************************/
+/*          glocal variables limited in this file               */
+/*****************************************************************/
+
+static float* gp_arr;   
+/*only used for different comparing functions*/
+
+typedef struct tagCMP_DATA{
+  float* V;
+  FUNC_CMP func_cmp;
+}CMP_DATA;
+
+/*the lower are used for the order_mult_data*/
+CMP_DATA *gp_cmp_data;
+int g_ncmp;
+static int cmp_mult(const void* v1, const void* v2);
+
+/*****************************************************************
+         Function definitions
+******************************************************************/
+								  
+static int cmp_mult(const void* v1, const void* v2)
+{
+  int i;
+  int ret=-2;
+  for(i=0;i<g_ncmp;i++){
+    gp_arr=gp_cmp_data[i].V; /*used in the function func_cmp provided in this file*/
+    ret=(gp_cmp_data[i].func_cmp)(v1,v2);
+    if(ret!=0) return ret;/*return the result*/		      
+  }
+  return ret;
+}
+/********************************************************************************/
+/*                     order_data                                               */
+/********************************************************************************/
+/*    n: the dimension of the data
+      V1, func_cmp1,...Vk,funct_cmpk
+      are the parameters to compare such that using the comparing function func_cmpi to 
+      order Vi, every Vi needs to be float array
+      the result is stored at int array R.
+ */
+void order_mult_data(int* R,int n,int k,...)
+{
+  CMP_DATA *cmp_data;
+  va_list ap;
+  int i;
+  cmp_data=(CMP_DATA*)Calloc(k,CMP_DATA);
+  va_start(ap,k);
+  for(i=0;i<k;i++) {
+    cmp_data[i].V=va_arg(ap,float*);
+    cmp_data[i].func_cmp=va_arg(ap,FUNC_CMP);
+  }
+  va_end(ap);
+  gp_cmp_data=cmp_data;
+  g_ncmp=k; /*both used in the function cmp_mult*/
+  for(i=0;i<n;i++)
+    R[i]=i;
+  qsort(R,n,sizeof(R[0]),cmp_mult);
+  Free(cmp_data);
+}  
+
+void order_data(float* V,int*R,int n,FUNC_CMP func_cmp)
+{
+  int i;
+  for(i=0;i<n;i++)
+    R[i]=i;
+  gp_arr=V;
+  qsort(R,n,sizeof(R[0]),func_cmp);
+}
+  
+
+/********************************************************************************/
+/*               comparing functions                                            */
+/********************************************************************************/
+/*  1 cmp_abs: comparing the absolute values, 
+    2 cmp_low: comparing the lower tail
+    3 cmp_high: comparing the higher tail
+  *Note the gp_arr is the global pointer which has to be used in the program qsort
+    After using the quick with those functions, it will be odered such that
+    1 in cmp_abs: the bigger values in absolute values will in lower index
+                  similar to two sided-test
+    2 in cmp_low: the smaller values will in lower index
+                  similar to lower tail test
+    3 in cmp_high:the bigger values will in lower index 
+                  similar to hight tail test.
+*/
+/*always put the absolute value at the end of the array*/
+int cmp_abs(const void *v1, const void *v2) {
+  float f1=fabs(*(gp_arr+*(int *)v1));
+  float f2=fabs(*(gp_arr+*(int *)v2));
+  if(f1==NA_FLOAT)
+    return 1;
+  if(f2==NA_FLOAT)
+    return -1;
+  if (f1<f2)
+    return 1;
+  if (f1>f2)
+    return -1;
+  else
+    return 0; 
+}
+int cmp_low(const void *v1, const void *v2) {
+  if((*(gp_arr+*(int *)v1))==NA_FLOAT)
+    return 1;
+  if((*(gp_arr+*(int *)v2))==NA_FLOAT)
+    return -1;
+  if ((*(gp_arr+*(int *)v1))<(*(gp_arr+*(int *)v2)))
+    return -1;
+  if ((*(gp_arr+*(int *)v1))>(*(gp_arr+*(int *)v2)))
+    return 1;
+  else
+    return 0; 
+} 
+int cmp_high(const void *v1, const void *v2) {
+  if((*(gp_arr+*(int *)v1))==NA_FLOAT)
+    return -1;
+  if((*(gp_arr+*(int *)v2))==NA_FLOAT)
+    return 1;
+  if ((*(gp_arr+*(int *)v1))<(*(gp_arr+*(int *)v2)))
+    return 1;
+  if ((*(gp_arr+*(int *)v1))>(*(gp_arr+*(int *)v2)))
+    return -1;
+  else
+    return 0; 
+}
+/********************************************************************************/
+/*           ending the sorting functions                                       */
+/********************************************************************************/
+
+
+
+
+
+
+
diff --git a/vignettes/MTP.pdf b/vignettes/MTP.pdf
new file mode 100755
index 0000000..be12750
Binary files /dev/null and b/vignettes/MTP.pdf differ
diff --git a/vignettes/MTP.tex b/vignettes/MTP.tex
new file mode 100755
index 0000000..a02ba3e
--- /dev/null
+++ b/vignettes/MTP.tex
@@ -0,0 +1,933 @@
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+% \VignetteIndexEntry{Multiple Testing Procedures}
+% \VignetteKeywords{Expression Analysis}
+% \VignettePackage{multtest}
+
+\documentclass[11pt]{article}
+
+\usepackage{graphicx}    % standard LaTeX graphics tool
+\usepackage{Sweave}
+\usepackage{amsfonts}
+
+% these should probably go into a dedicated style file
+\newcommand{\Rpackage}[1]{\textit{#1}}
+\newcommand{\Robject}[1]{\texttt{#1}}
+\newcommand{\Rclass}[1]{\textit{#1}}
+
+%%%%%%%%%%%%%%%%%%%%%%%%%
+% Our added packages and definitions
+ 
+\usepackage{hyperref}
+\usepackage{amsmath}
+\usepackage{color}
+\usepackage{comment}
+\usepackage[authoryear,round]{natbib}
+
+\parindent 0in
+
+\definecolor{red}{rgb}{1, 0, 0}
+\definecolor{green}{rgb}{0, 1, 0}
+\definecolor{blue}{rgb}{0, 0, 1}
+\definecolor{myblue}{rgb}{0.25, 0, 0.75}
+\definecolor{myred}{rgb}{0.75, 0, 0}
+\definecolor{gray}{rgb}{0.5, 0.5, 0.5}
+\definecolor{purple}{rgb}{0.65, 0, 0.75}
+\definecolor{orange}{rgb}{1, 0.65, 0}
+
+\def\RR{\mbox{\it I\hskip -0.177em R}}
+\def\ZZ{\mbox{\it I\hskip -0.177em Z}}
+\def\NN{\mbox{\it I\hskip -0.177em N}}
+
+\newtheorem{theorem}{Theorem}
+\newtheorem{procedure}{Procedure}
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+\begin{document}
+
+\title{Multiple Testing Procedures} 
+\author{Katherine S. Pollard$^1$, Sandrine Dudoit$^2$, Mark J. van der Laan$^3$} 
+\maketitle
+
+\begin{center}
+1. Center for Biomolecular Science and Engineering, University of California, Santa Cruz, \url{ http://lowelab.ucsc.edu/katie/}\\
+2. Division of Biostatistics, University of California, Berkeley, \url{ http://www.stat.berkeley.edu/~sandrine/}\\
+3. Department of Statistics and Division of Biostatistics, University of California, Berkeley, \url{ http://www.stat.berkeley.edu/~laan/}\\
+\end{center}
+
+\tableofcontents
+
+\label{anal:mult:multtest}
+
+\section{Introduction}
+\label{anal:mult:s:intro}
+
+\subsection{Overview}
+The Bioconductor R package \Rpackage{multtest} implements widely applicable resampling-based single-step and stepwise multiple testing procedures (MTP) for controlling a broad class of Type I error rates, in testing problems involving general data generating distributions (with arbitrary dependence structures among variables), null hypotheses, and test statistics \cite{Dudoit&vdLaanMTBook,DudoitetalMT1SAGMB04,vdLaanetalMT2SAGMB04,vdLaanetalMT3SAGMB04,Pollard&vdLaanJSPI04}. 
+The current version of \Rpackage{multtest} provides MTPs for null hypotheses concerning means, differences in means, and regression parameters in linear and Cox proportional hazards models.
+Both bootstrap and permutation estimators of the test statistics ($t$- or $F$-statistics) null distribution are available. 
+Procedures are provided to control Type I error rates defined as tail probabilities and expected values of arbitrary functions of the numbers of Type I errors, $V_n$, and rejected hypotheses, $R_n$. 
+These error rates include: 
+the generalized family-wise error rate, $gFWER(k) = Pr(V_n > k)$, or chance of at least $(k+1)$ false positives (the special case $k=0$ corresponds to the usual family-wise error rate, FWER); 
+tail probabilities $TPPFP(q) = Pr(V_n/R_n > q)$ for the proportion of false positives among the rejected hypotheses;
+the false discovery rate, $FDR=E[V_n/R_n]$.
+Single-step and step-down common-cut-off (maxT) and common-quantile (minP) procedures, that take into account the joint distribution of the test statistics, are implemented to control the FWER. 
+In addition, augmentation procedures are provided to control the gFWER, TPPFP, and FDR, based on {\em any} initial FWER-controlling procedure.
+The results of a multiple testing procedure are summarized using rejection regions for the test statistics, confidence regions for the parameters of interest, and adjusted $p$-values.
+The modular design of the \Rpackage{multtest} package allows interested users to readily extend the package's functionality, by inserting additional functions for test statistics and testing procedures. 
+The S4 class/method object-oriented programming approach was adopted to summarize the results of a MTP.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+\subsection{Motivation}
+
+Current statistical inference problems in areas such as genomics, astronomy, and marketing routinely involve the simultaneous test of thousands, or even millions, of null hypotheses. 
+Examples of testing problems in genomics include: 
+\begin{itemize}
+\item
+the identification of differentially expressed genes in microarray experiments, i.e., genes whose expression measures are associated with possibly censored responses or covariates interest; 
+\item
+tests of association between gene expression measures and Gene Ontology (GO) annotation (\url{www.geneontology.org});
+\item
+the identification of transcription factor binding sites in ChIP-Chip experiments, where chromatin immunoprecipitation (ChIP) of transcription factor bound DNA is followed by microarray hybridization (Chip) of the IP-enriched DNA \cite{KelesetalTechRep147}; 
+\item
+the genetic mapping of complex traits using single nucleotide polymorphisms (SNP). 
+\end{itemize}
+The above testing problems share the following general characteristics: 
+\begin{itemize}
+\item
+inference for  high-dimensional multivariate distributions, with complex and unknown dependence structures among variables;
+\item
+broad range of parameters of interest, such as, regression coefficients in model relating patient survival to genome-wide transcript levels or DNA copy numbers, pairwise gene correlations between transcript levels; 
+\item
+many null hypotheses, in the thousands or even millions; 
+\item
+complex dependence structures among test statistics, e.g., Gene Ontology directed acyclic graph (DAG).
+\end{itemize}
+
+Motivated by these applications, we have developed resampling-based single-step and step-down multiple testing procedures (MTP) for controlling a broad class of Type I error rates, in testing problems involving general data generating distributions (with arbitrary dependence structures among variables), null hypotheses, and test statistics \cite{Dudoit&vdLaanMTBook,DudoitetalMT1SAGMB04,vdLaanetalMT2SAGMB04,vdLaanetalMT3SAGMB04,Pollard&vdLaanJSPI04}. 
+In particular, Dudoit et al. \cite{DudoitetalMT1SAGMB04} and Pollard \& van der Laan \cite{Pollard&vdLaanJSPI04} derive
+{\em single-step common-cut-off and common-quantile procedures} for controlling arbitrary parameters of the distribution of the number of Type I errors, such as the generalized family-wise error rate, $gFWER(k)$, or chance of at least $(k+1)$ false positives. 
+van der Laan et al. \cite{vdLaanetalMT2SAGMB04} focus on control of the family-wise error rate, $FWER = gFWER(0)$, and provide {\em step-down common-cut-off and common-quantile procedures}, based on maxima of test statistics (maxT) and minima of unadjusted $p$-values (minP), respectively. 
+Dudoit \& van der Laan \cite{Dudoit&vdLaanMTBook} and van der Laan et al. \cite{vdLaanetalMT3SAGMB04} propose a general class of {\em augmentation multiple testing procedures} (AMTP), obtained by adding suitably chosen null hypotheses to the set of null hypotheses already rejected by an initial MTP. In particular, given {\em any} FWER-controlling procedure, they show how one can trivially obtain 
+procedures controlling tail probabilities for the number (gFWER) and proportion (TPPFP) of false positives among the rejected hypotheses.
+ 
+A key feature of our proposed MTPs is the {\em test statistics null distribution} (rather than data generating null distribution) used to derive rejection regions (i.e., cut-offs) for the test statistics and resulting adjusted $p$-values \cite{Dudoit&vdLaanMTBook,DudoitetalMT1SAGMB04,vdLaanetalMT2SAGMB04,vdLaanetalMT3SAGMB04,Pollard&vdLaanJSPI04}. 
+For general null hypotheses, defined in terms of submodels for the data generating distribution, this null distribution is the asymptotic distribution of the vector of null value shifted and scaled test statistics. 
+Resampling procedures (e.g., based on the non-parametric or model-based bootstrap) are proposed to conveniently obtain consistent estimators of the null distribution and the resulting test statistic cut-offs and adjusted $p$-values \cite{DudoitetalMT1SAGMB04,vdLaanetalMT2SAGMB04,Pollard&vdLaanJSPI04}.
+
+The Bioconductor R package \Rpackage{multtest} provides software implementations of the above multiple testing procedures. 
+
+\subsection{Outline}
+
+The present vignette provides a summary of our proposed multiple testing procedures (\cite{Dudoit&vdLaanMTBook,DudoitetalMT1SAGMB04,vdLaanetalMT2SAGMB04,vdLaanetalMT3SAGMB04,Pollard&vdLaanJSPI04}. Section \ref{anal:mult:s:methods}), 
+discusses their software implementation in the Bioconductor R package \Rpackage{multtest} (Section \ref{anal:mult:s:software}). 
+The accompanying vignette (MTPALL) describes their application to the ALL dataset of Chiaretti et al. \cite{Chiarettietal04}.
+
+Specifically, given a multivariate dataset (stored as a \Rclass{matrix}, \Rclass{data.frame}, or microarray object of class \Rclass{ExpressionSet}) 
+and user-supplied choices for the test statistics, Type I error rate and its target level, resampling-based estimator of the test statistics null distribution, and procedure for error rate control, the main user-level function \Robject{MTP} returns unadjusted and adjusted $p$-values, cut-off vectors for the test statistics, and estimates and confidence regions for the parameters of interest. 
+Both bootstrap and permutation estimators of the test statistics null distribution are available and can optionally be output to the user. 
+The variety of models and hypotheses, test statistics, Type I error rates, and MTPs currently implemented are discussed in Section \ref{anal:mult:s:MTP}.
+The S4 class/method object-oriented programming approach was adopted to represent the results of a MTP. 
+Several methods are defined to produce numerical and graphical summaries of these results (Section \ref{anal:mult:s:summaries}).
+A modular programming approach, which utilizes function closures, allows interested users to readily extend the package's functionality, 
+by inserting functions for new test statistics and testing procedures (Section \ref{anal:mult:s:design}).
+Ongoing efforts are discussed in Section \ref{anal:mult:s:disc}.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+\section{Methods}
+\label{anal:mult:s:methods}
+
+\subsection{Multiple hypothesis testing framework}
+\label{anal:mult:s:framework}
+
+{\em Hypothesis testing} is concerned with using observed data to test hypotheses, i.e.,  make decisions, regarding properties of the unknown data generating distribution. 
+Below, we discuss in turn the main ingredients of a multiple testing problem, namely: data, null and alternative hypotheses, test statistics, multiple testing procedure (MTP) to define rejection regions for the test statistics, Type I and Type II errors, and adjusted $p$-values. 
+The crucial choice of a test statistics null distribution is addressed in Section \ref{anal:mult:s:nullDistn}. 
+Specific proposals of MTPs are given in Sections \ref{anal:mult:s:SS} -- \ref{anal:mult:s:AMTP}.\\
+
+\noindent
+{\bf Data.} Let $X_1,\ldots,X_n$ be a {\em random sample} of $n$ independent and identically distributed (i.i.d.) random variables, $X \sim P\in {\cal M}$, where the {\em data generating distribution} $P$ is known to be an element of a particular {\em statistical model} ${\cal M}$ (i.e., a set of possibly non-parametric distributions).\\
+
+\noindent
+{\bf Null and alternative hypotheses.} 
+In order to cover a broad class of testing problems, define $M$
+null hypotheses in terms of a collection of {\em submodels}, ${\cal
+  M}(m)\subseteq {\cal M}$,  $m=1,\ldots,M$, for the data generating
+distribution $P$. The $M$ {\em null hypotheses} are defined as
+$H_0(m) \equiv \mathrm{I}(P\in {\cal M}(m))$ and the corresponding {\em
+  alternative hypotheses} as $H_1(m) \equiv \mathrm{I}(P \notin {\cal M}(m))$.
+
+In many testing problems, the submodels concern {\em parameters}, i.e., functions of the data generating distribution $P$, $\Psi(P) = \psi= (\psi(m):m=1,\ldots,M)$, such as means, differences in means, correlations, and parameters in linear models, generalized linear models, survival models, time-series models, dose-response models, etc. One distinguishes between two types of testing problems: {\em one-sided tests}, where $H_0(m) = \mathrm{I}(\psi(m) \leq \psi_0(m))$, and {\em two-sided  [...]
+\psi_0(m))$.
+The hypothesized {\em null values}, $\psi_0(m)$, are frequently zero.
+
+ Let ${\cal H}_0={\cal H}_0(P)\equiv \{m:H_0(m)=1\} = \{m: P \in {\cal M}(m)\}$ be the set of $h_0 \equiv |{\cal H}_0|$ true null hypotheses, where we note that ${\cal H}_0$ depends on the data generating distribution $P$. Let ${\cal H}_1={\cal H}_1(P) \equiv {\cal H}_0^c(P) = \{m: H_1(m) = 1\} = \{m: P \notin {\cal M}(m)\}$
+be the set of  $h_1 \equiv |{\cal H}_1|  = M-h_0$ false null hypotheses, i.e., true positives.  
+The goal of a multiple testing
+  procedure is to accurately estimate the set ${\cal H}_0$, and thus its
+  complement ${\cal H}_1$, while controlling probabilistically the number
+  of false positives at a user-supplied level $\alpha$.\\
+
+\noindent
+{\bf Test statistics.} A testing procedure is a data-driven rule for deciding whether or not to {\em reject}  each of the $M$ null hypotheses $H_0(m)$, i.e., declare that $H_0(m)$ is false (zero) and hence $P \notin {\cal M}(m)$. 
+The decisions to reject or not the null hypotheses are based on an $M$--vector of
+{\em test statistics}, $T_n
+  =(T_n(m):m=1,\ldots,M)$, that are functions of the
+data, $X_1, \ldots, X_n$. Denote the typically unknown (finite sample) {\em joint distribution} of the test statistics $T_n$ by $Q_n=Q_n(P)$. 
+
+
+Single-parameter null hypotheses are commonly tested using {\em $t$-statistics}, i.e., standardized differences,
+\begin{equation}\label{anal:mult:e:tstat}
+T_n(m) \equiv \frac{\mbox{Estimator} - \mbox{Null value}}{\mbox{Standard error}} = \sqrt{n}\frac{\psi_n(m) - \psi_0(m)}{{\sigma_n(m)}}.
+\end{equation}
+In general, the $M$--vector $\psi_n = (\psi_n(m): m=1,\ldots, M)$ denotes an asymptotically linear {\em estimator} of the parameter $M$--vector $\psi = (\psi(m): m=1,\ldots,M)$ and $(\sigma_n(m)/\sqrt{n}:
+m=1,\ldots, M)$ denote consistent estimators of the {\em standard errors} of the components of $\psi_n$. 
+For tests of means, one recovers the usual one-sample and two-sample $t$-statistics, where the $\psi_n(m)$ and $\sigma_n(m)$ are based on sample means and variances, respectively.
+In some settings, it may be appropriate to use (unstandardized) {\em difference statistics}, $T_n(m) \equiv \sqrt{n}(\psi_n(m) - \psi_0(m))$ \cite{Pollard&vdLaanJSPI04}.
+Test statistics for other types of null hypotheses include $F$-statistics, $\chi^2$-statistics, and likelihood ratio statistics. \\
+
+
+\noindent
+{\bf Example: ALL microarray dataset.}
+Suppose that, as in the analysis of the ALL dataset of  Chiaretti et al. \cite{Chiarettietal04} (See accompanying vignette MTPALL), one is interested in identifying genes that are differentially expressed in two populations of  ALL cancer patients, those with normal cytogenetic test status and those with abnormal test. 
+The data consist of random $J$--vectors $X$, where the first $M$ entries of $X$ are microarray expression measures on $M$ genes of interest and the last entry, $X(J)$, is an indicator for cytogenetic test status (1 for normal, 0 for abnormal). 
+Then, the parameter of interest is an $M$--vector of differences in mean expression measures in the two populations, $\psi(m) = E[X(m) | X(J)=0] - E[X(m) | X(J)=1]$, $m=1,\ldots,M$. 
+To identify genes with higher mean expression measures in the abnormal compared to the normal cytogenetics subjects, one can test the one-sided null hypotheses $H_0(m) = \mathrm{I}(\psi(m) \leq 0)$ vs. the alternative hypotheses $H_1(m) = \mathrm{I}(\psi(m) > 0)$, using two-sample Welch $t$-statistics 
+\begin{equation}
+T_n(m) \equiv \frac{\bar{X}_{0,n_0}(m) - \bar{X}_{1,n_1}(m)}{\sqrt{\frac{\sigma_{0,n_0}^2(m)}{n_0} + \frac{\sigma_{1,n_1}^2(m)}{n_1}}},
+\end{equation}
+where $n_k$, $\bar{X}_{k,n_k}(m)$, and $\sigma_{k,n_k}^2(m)$ denote, respectively, the sample size, sample means, and sample variances, for patients with test status $k$, $k=0,\, 1$. The null hypotheses are rejected, i.e., the corresponding genes are declared differentially expressed, for large values of the test statistics $T_n(m)$.\\
+
+\noindent
+{\bf Multiple testing procedure.} A {\em multiple testing procedure} (MTP) provides {\em rejection regions}, ${\cal C}_n(m)$, i.e., sets of values for each test statistic $T_n(m)$ that lead to the decision to reject the null hypothesis $H_0(m)$. 
+In other words, a MTP produces a random (i.e., data-dependent) subset ${\cal R}_n$ of rejected hypotheses that estimates ${\cal H}_1$, the set of true positives,
+\begin{equation}
+{\cal R}_n={\cal R}(T_n, Q_{0n},\alpha) \equiv 
+\{m:\mbox{$H_0(m)$ is rejected}\} = \{m: T_n(m) \in {\cal C}_n(m)\},
+\end{equation}
+where ${\cal C}_n(m)={\cal C}(T_n,Q_{0n},\alpha)(m)$, $m=1,\ldots,M$, denote possibly random rejection regions. The long notation ${\cal R}(T_n, Q_{0n},\alpha)$ and ${\cal C}(T_n, Q_{0n},\alpha)(m)$ emphasizes that the MTP depends on:
+(i) the {\em data}, $X_1, \ldots, X_n$,
+ through the $M$--vector of {\em test statistics}, $T_n = (T_n(m): m=1,\ldots,
+ M)$;
+ (ii) a test statistics {\em null distribution}, $Q_{0n}$ (Section \ref{anal:mult:s:nullDistn}); and 
+(iii) the {\em nominal level} $\alpha$ of the MTP, i.e., the desired upper bound for a suitably defined false positive rate. 
+
+Unless specified otherwise, it is assumed that large values of the test statistic $T_n(m)$ provide evidence against the corresponding null hypothesis $H_0(m)$, that is, we consider rejection regions of the form ${\cal C}_n(m) = (c_n(m),\infty)$, where $c_n(m)$ are to-be-determined {\em cut-offs}, or {\em critical values}.\\ 
+
+\noindent
+{\bf Type I and Type II errors.} In any
+testing situation, two types of errors can be committed: a {\em false
+positive}, or {\em Type I error}, is committed by rejecting a true
+null hypothesis, and a {\em false negative}, or {\em Type
+II error}, is committed when the test procedure fails to reject a false null
+hypothesis. The situation can be summarized by Table \ref{anal:mult:t:TypeIandII}, below, where
+the number of Type I errors is $V_n \equiv \sum_{m \in {\cal H}_0} \mathrm{I}(T_n(m) \in {\cal C}_n(m)) = |{\cal R}_n \cap {\cal H}_0|$ and the number
+of Type II errors is $U_n \equiv \sum_{m \in {\cal H}_1} \mathrm{I}(T_n(m) \notin {\cal C}_n(m)) = |{\cal R}_n^c \cap {\cal H}_1|$. Note that both $U_n$
+and $V_n$ depend on the unknown data generating distribution $P$ through
+the unknown set of true null hypotheses ${\cal H}_0 = {\cal H}_0(P)$. The numbers $h_0=|{\cal H}_0|$ and $h_1 = |{\cal H}_1| = M-h_0$ of true and false null hypotheses are
+{\em unknown parameters}, the number of rejected hypotheses $R_n \equiv \sum_{m=1}^M  \mathrm{I}(T_n(m) \in {\cal C}_n(m)) = |{\cal R}_n|$ is an {\em observable random variable}, and the entries in the body of the table, $U_n$, $h_1 -
+U_n$, $V_n$, and $h_0-V_n$, are
+{\em unobservable random variables} (depending on $P$, through ${\cal H}_0(P)$). 
+\begin{table}[hhh]
+\caption{Type I and Type II errors in multiple hypothesis testing.}
+\label{anal:mult:t:TypeIandII}
+\begin{tabular}{ll|cc|l}
+\multicolumn{5}{c}{} \\
+\multicolumn{2}{c}{} & \multicolumn{2}{c}{Null hypotheses} & \multicolumn{1}{c}{}\\
+\multicolumn{2}{c}{} & \multicolumn{1}{c}{not rejected} & \multicolumn{1}{c}{rejected} & \multicolumn{1}{c}{} \\
+%%% \multicolumn{5}{c}{}\\
+\cline{3-4}
+&&&&\\
+& true & $| {\cal R}_n^c \cap {\cal H}_0 |$ &
+$V_n = | {\cal R}_n \cap {\cal H}_0 |$ &
+$h_0=| {\cal H}_0|$\\
+&&&(Type I errors)&\\
+Null hypotheses&&&&\\
+& false & $U_n = | {\cal R}_n^c \cap {\cal H}_1 |$ & $| {\cal R}_n \cap {\cal H}_1 |$ & $h_1=| {\cal H}_1
+|$\\
+&&(Type II errors)&&\\
+&&&&\\
+\cline{3-4}
+%%% \multicolumn{5}{c}{}\\
+\multicolumn{2}{c}{}& \multicolumn{1}{c}{$M-R_n$} &
+\multicolumn{1}{c}{ $R_n = | {\cal R}_n|$}
+&\multicolumn{1}{l}{$M$}\\
+\end{tabular}
+\end{table}
+
+Ideally, one would like to simultaneously minimize both the chances of committing a Type I error and a Type II error. Unfortunately, this is not feasible and one seeks a {\em trade-off} between the two types of errors. A standard approach is to specify an acceptable level $\alpha$ for the Type I error rate and derive testing procedures, i.e., rejection regions, that aim to minimize the Type II error rate, i.e., maximize {\em power}, within the class of tests with Type I error rate at mos [...]
+
+
+\noindent
+{\bf Type I error rates.}
+When testing multiple hypotheses, there are many possible definitions for the Type I error rate (and power). Accordingly, we adopt a general definition of Type I error rates, as parameters, $\theta_n = \theta(F_{V_n,R_n})$, of the joint distribution $F_{V_n,R_n}$ of the numbers of Type I errors $V_n$ and rejected hypotheses $R_n$. 
+Such a general representation covers the following commonly-used Type I error rates.
+\begin{enumerate}
+\item 
+{\em Generalized family-wise error rate} (gFWER), or 
+ probability of at least $(k+1)$ Type I errors, $k=0,\ldots, (h_0-1)$,
+\begin{equation}\label{anal:mult:e:gFWER}
+gFWER(k) \equiv Pr(V_n > k) = 1 - F_{V_n}(k).
+\end{equation}
+When $k=0$, the gFWER is the usual {\em family-wise error rate}, FWER, controlled by the classical Bonferroni procedure.
+\item
+{\em Per-comparison error rate} (PCER), or expected 
+proportion of Type I errors among the $M$ tests,
+\begin{equation}\label{anal:mult:e:PCER}
+PCER \equiv \frac{1}{M} E[V_n] = \frac{1}{M} \int v dF_{V_n}(v).
+\end{equation}
+\item
+{\em Tail probabilities for the proportion of false positives} (TPPFP) among the rejected hypotheses,
+\begin{equation}\label{anal:mult:e:TPPFP}
+TPPFP(q) \equiv Pr(V_n/R_n > q) = 1 - F_{V_n/R_n}(q), \qquad q \in (0,1),
+\end{equation}
+with the convention that $V_n/R_n \equiv 0$, if $R_n=0$.
+\item
+{\em False discovery rate} (FDR), or  expected value of the proportion of false positives among the rejected hypotheses, 
+\begin{equation}\label{anal:mult:e:FDR}
+FDR \equiv E[V_n/R_n] = \int q dF_{V_n/R_n}(q),
+\end{equation}
+again with the convention that $V_n/R_n \equiv 0$, if $R_n=0$ \cite{Benjamini&Hochberg95}. 
+\end{enumerate}
+Note that while the gFWER is a parameter of only the {\em marginal} distribution $F_{V_n}$ for the number of Type I errors $V_n$ (tail probability, or survivor function, for $V_n$), the TPPFP is a parameter of the {\em joint} distribution of $(V_n,R_n)$ (tail probability, or survivor function, for $V_n/R_n$). 
+ Error rates based on the {\em proportion} of false positives (e.g., TPPFP and FDR) are especially appealing for the large-scale testing problems encountered in genomics, compared to error rates based on the {\em number} of false positives (e.g., gFWER), as they do not increase exponentially with the number of hypotheses. 
+The above four error rates are part of the broad class of Type I error rates considered in Dudoit \& van der Laan \cite{Dudoit&vdLaanMTBook} and defined as tail probabilities $Pr(g(V_n,R_n) > q)$ and expected values $E[g(V_n,R_n)]$ for an arbitrary function $g(V_n,R_n)$ of the numbers of false positives $V_n$ and rejected hypotheses $R_n$. The gFWER and TPPFP correspond to the special cases $g(V_n,R_n) = V_n$ and $g(V_n,R_n) = V_n/R_n$, respectively.\\
+
+
+\noindent
+{\bf Adjusted $p$-values.} The notion of $p$-value extends directly to multiple testing problems, as follows. 
+Given a MTP, ${\cal R}_n = {\cal R}(T_n,Q_{0n}, \alpha)$, the {\em adjusted $p$-value}, $\widetilde{P}_{0n}(m) = \widetilde{P}(T_n,Q_{0n})(m)$, for null hypothesis $H_0(m)$, is defined as the smallest Type I error level $\alpha$ at which one would reject $H_0(m)$, that is,
+\begin{eqnarray}
+\widetilde{P}_{0n}(m) &\equiv& \inf \left \{ \alpha \in [0,1]: \mbox{Reject $H_0(m)$ at MTP level $\alpha$}\right \}\\
+&=& \inf\left \{\alpha \in [0,1]: m \in {\cal R}_n \right \}\nonumber \\
+&=& \inf\left \{\alpha \in [0,1]: T_n(m) \in {\cal C}_n(m) \right \}, \qquad m=1,\ldots, M.\nonumber
+\end{eqnarray}
+As in single hypothesis tests, the smaller the adjusted $p$-value, the stronger the evidence against the corresponding null hypothesis. The main difference between unadjusted (i.e., for the test of a single hypothesis) and adjusted $p$-values is that the latter are defined in terms of the Type I error rate for the {\em entire} testing procedure, i.e., take into account the multiplicity of tests.
+For example, the adjusted $p$-values for the classical Bonferroni procedure for FWER control are given by $\widetilde{P}_{0n}(m) = \min(M P_{0n}(m), 1)$, 
+where $P_{0n}(m)$ is the unadjusted $p$-value for the test of single hypothesis $H_0(m)$.
+
+We now have two representations for a MTP, in terms of rejection regions for the test statistics  and in terms of adjusted $p$-values 
+\begin{equation}
+{\cal R}_n = \{m: T_n(m) \in {\cal C}_n(m) \} = \{m: \widetilde{P}_{0n}(m) \leq \alpha\}.
+\end{equation}
+Again, as in the single hypothesis case, an
+advantage of reporting adjusted $p$-values, as opposed to only
+rejection or not of the hypotheses, is that the level $\alpha$ of the test does
+not need to be determined in advance, that is, results of the multiple
+testing procedure are provided for all $\alpha$. 
+ Adjusted $p$-values are convenient and flexible summaries of the strength of the evidence against each null hypothesis, in terms of the Type I error rate for the entire MTP (gFWER, TPPFP, FDR, or any other suitably defined error rate). \\
+
+\noindent
+{\bf Stepwise multiple testing procedures.} 
+One usually distinguishes between two main classes of multiple testing
+procedures, single-step and stepwise procedures.  
+ In {\em single-step procedures}, each null hypothesis is
+ evaluated using a rejection region that is  independent of the results of the tests of other hypotheses.
+Improvement in power, while preserving Type I error rate
+control, may be achieved by {\em stepwise procedures}, in which 
+rejection of a particular null hypothesis depends on the outcome of
+the tests of other hypotheses. 
+That is, the (single-step) test procedure is applied to a sequence of successively smaller nested random (i.e., data-dependent) subsets of null hypotheses, defined by the ordering of the test statistics (common cut-offs) or unadjusted $p$-values (common-quantile cut-offs).
+In {\em step-down procedures}, the hypotheses
+corresponding to the {\em most significant} test statistics (i.e., largest absolute test
+statistics or smallest unadjusted $p$-values) are considered successively, with further tests depending
+on the outcome of earlier ones.
+As soon as one fails to reject a null hypothesis, no further
+hypotheses are rejected. 
+In contrast, for {\em step-up procedures},
+the hypotheses corresponding to the {\em least significant} test
+statistics are considered successively, again with further tests
+depending on the outcome of earlier ones. As soon as one hypothesis
+is rejected, all remaining more significant hypotheses are rejected.\\
+
+
+
+\noindent
+{\bf Confidence regions.} 
+For the test of single-parameter null hypotheses and for any Type I error rate of the form $\theta(F_{V_n})$, Dudoit \& van der Laan \cite{Dudoit&vdLaanMTBook} and Pollard \& van der Laan \cite{Pollard&vdLaanJSPI04} provide results on the correspondence between single-step MTPs and $\theta$--specific {\em confidence regions}.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%
+\subsection{Test statistics null distribution}
+\label{anal:mult:s:nullDistn}
+
+\noindent
+{\bf Test statistics null distribution.}
+One of the main tasks in specifying a MTP is to derive rejection regions for the test statistics such that the Type I error rate is controlled at a desired level $\alpha$, i.e., such that $\theta(F_{V_n,R_n}) \leq \alpha$, for finite sample control, or $\limsup_n \theta(F_{V_n,R_n}) \leq \alpha$, for asymptotic control.
+However, one is immediately faced with the problem that the {\em true distribution} $Q_n=Q_n(P)$ of the test statistics $T_n$ is usually {\em unknown}, and hence, so are the distributions of the numbers of Type I errors, $V_n = \sum_{m \in {\cal H}_0} \mathrm{I}(T_n(m) \in {\cal C}_n(m))$, and rejected hypotheses, $R_n = \sum_{m=1}^M  \mathrm{I}(T_n(m) \in {\cal C}_n(m))$. 
+In practice, the test statistics {\em true distribution} $Q_n(P)$ is replaced by a {\em null distribution} $Q_0$ (or estimate thereof, $Q_{0n}$), in order to derive rejection regions, ${\cal C}(T_n,Q_0,\alpha)(m)$, and resulting adjusted $p$-values, $\widetilde{P}(T_n,Q_0)(m)$. 
+
+The choice of null distribution $Q_0$ is crucial, in order
+to ensure that (finite sample or asymptotic) control of the Type I
+error rate under the {\em assumed} null distribution $Q_0$ does indeed provide the required control under the {\em true} distribution $Q_n(P)$.
+For proper control, the null distribution $Q_0$ must be such that the Type I error rate under this assumed null distribution {\em dominates} the Type I error rate under the true distribution $Q_n(P)$. That is, one must have $\theta(F_{V_n,R_n}) \leq \theta(F_{V_0,R_0})$, for finite sample control, and $\limsup_n \theta(F_{V_n,R_n}) \leq  \theta(F_{V_0,R_0})$, for asymptotic control, where $V_0$ and $R_0$ denote, respectively, the numbers of Type I errors and rejected hypotheses under the [...]
+
+
+For error rates $\theta(F_{V_n})$, defined as arbitrary parameters of the distribution of the number of Type I errors $V_n$, we propose as null distribution the asymptotic distribution $Q_0$ of the vector of null value shifted and scaled test statistics \cite{Dudoit&vdLaanMTBook,DudoitetalMT1SAGMB04,vdLaanetalMT2SAGMB04,vdLaanetalMT3SAGMB04,Pollard&vdLaanJSPI04}:
+\begin{equation}
+Z_n(m) \equiv 
+ \sqrt{\min \left(1,
+  \frac{\tau_0(m)}{Var[T_n(m)]}\right)} \Bigl( T_n(m) + \lambda_0(m) - E[T_n(m)] \Bigr).
+\end{equation}
+For the test of single-parameter null hypotheses using $t$-statistics, the null values are $\lambda_0(m)=0$ and $\tau_0(m)=1$. For testing the equality of $K$ population means using $F$-statistics, the null values are  $\lambda_0(m)= 1$ and $\tau_0(m) = 2/(K-1)$, under the assumption of equal variances in the different populations.
+Dudoit et al. \cite{DudoitetalMT1SAGMB04} and van der Laan et al. \cite{vdLaanetalMT2SAGMB04} prove that this null distribution does indeed provide the desired asymptotic control of the Type I error rate $\theta(F_{V_n})$, for
+ general data generating distributions (with arbitrary dependence structures among variables), null hypotheses (defined in terms of submodels for the data generating distribution), and test statistics (e.g., $t$-statistics, $F$-statistics).
+
+For a broad class of testing problems, such as the test of single-parameter null hypotheses using $t$-statistics (as in Equation (\ref{anal:mult:e:tstat})), the null distribution $Q_0$ is an $M$--variate Gaussian distribution with mean vector zero and covariance matrix $\Sigma^*(P)$: $Q_0 = Q_0(P) \equiv N(0,\Sigma^*(P))$. 
+For tests of means, where the parameter of interest is the $M$--dimensional mean vector $\Psi(P) = \psi = E[X]$, the estimator $\psi_n$ is simply the $M$--vector of sample averages and $\Sigma^*(P)$ is the correlation matrix of $X \sim P$, $Cor[X]$. More generally, for an asymptotically linear estimator $\psi_n$, $\Sigma^*(P)$ is the correlation matrix of the vector influence curve (IC).
+
+Note that the following important points distinguish our approach from existing approaches to Type I error rate control. 
+Firstly, we are only concerned with Type I error control under the {\em true data generating distribution} $P$. The notions of weak and strong control (and associated subset pivotality, Westfall \& Young \cite{Westfall&Young93},
+p. 42--43) are therefore irrelevant to our approach. 
+Secondly, we propose a {\em null distribution for the test statistics} ($T_n \sim Q_0$), and not a data generating null distribution ($X \sim P_0\in \cap_{m=1}^M {\cal M}(m)$). 
+The latter practice does not necessarily provide proper Type I error control, as the test statistics' {\em assumed} null distribution $Q_n(P_0)$ and their {\em true} distribution $Q_n(P)$ may have different dependence structures (in the limit) for the true null hypotheses ${\cal H}_0$.\\
+
+
+\noindent
+{\bf Bootstrap estimation of the test statistics null distribution.}
+In practice, since the data generating distribution $P$ is unknown, then so is the proposed null distribution $Q_0=Q_0(P)$.  Resampling procedures, such as bootstrap Procedure \ref{anal:mult:proc:boot}, below, may be used to conveniently obtain consistent estimators $Q_{0n}$ of the null distribution $Q_0$ and of the resulting test statistic cut-offs and adjusted $p$-values. 
+
+Dudoit et al. \cite{DudoitetalMT1SAGMB04} and van der Laan et al. \cite{vdLaanetalMT2SAGMB04} show that single-step and step-down procedures based on consistent estimators of the null distribution $Q_0$ also provide asymptotic control of the Type I error rate. The reader is referred to these two articles and to Dudoit \& van der Laan \cite{Dudoit&vdLaanMTBook} for details on the choice of null distribution and various approaches for estimating this null distribution.
+
+Having selected a suitable test statistics null distribution, there remains the main task of specifying rejection regions for each null hypothesis, i.e., cut-offs for each test statistic. 
+Among the different approaches for defining rejection regions, we distinguish between single-step vs. stepwise procedures, and common cut-offs (i.e., the same cut-off $c_0$ is used for each test statistic) vs. common-quantile cut-offs (i.e., the cut-offs are the $\delta_0$--quantiles of the marginal null distributions of the test statistics). 
+The next three subsections discuss three main approaches for deriving rejection regions and corresponding adjusted $p$-values: single-step common-cut-off and common-quantile procedures for control of general Type I error rates $\theta(F_{V_n})$ (Section \ref{anal:mult:s:SS});  step-down  common-cut-off (maxT) and common-quantile (minP) procedures for control of the FWER (Section \ref{anal:mult:s:SD}); augmentation procedures for control of the gFWER and TPPFP, based on an initial FWER-co [...]
+
+\begin{center}
+\fbox{\parbox{4.5in}{%
+\begin{procedure}
+\label{anal:mult:proc:boot}
+{\bf [Bootstrap estimation of the null distribution $Q_0$]}
+\begin{enumerate} 
+\item
+ Let $P_n^{\star}$ denote an estimator of the data generating distribution
+$P$. For the {\em non-parametric bootstrap},  $P_n^{\star}$ is simply the
+empirical distribution $P_n$, that is, samples of size $n$ are drawn
+at random, with replacement from the observed data $X_1, \ldots, X_n$. For
+the {\em model-based bootstrap}, $P_n^{\star}$ is based on a model ${\cal
+  M}$ for the data generating distribution $P$, such
+as the family of $M$--variate Gaussian distributions.
+\item
+Generate $B$ bootstrap samples, each consisting of $n$ i.i.d. realizations of a random variable $X^{\#} \sim P_n^{\star}$. 
+\item
+For the $b$th bootstrap sample, $b=1,\ldots, B$, compute an $M$--vector of test statistics, $T_n^{\#}(\cdot,b) = (T_n^{\#}(m,b): m=1,\ldots,M)$.  Arrange these bootstrap statistics in an $M \times B$ matrix, $\mathbf{T}_n^{\#} = \bigl(T_n^{\#}(m,b)\bigr)$, with rows corresponding to the $M$ null hypotheses and columns to the $B$ bootstrap samples.
+\item
+Compute row means, $E[T_n{^\#}(m,\cdot)]$, and row variances, $Var[T_n{^\#}(m,\cdot)]$, of the matrix $\mathbf{T}_n^{\#}$, to yield estimates of the true means $E[T_n(m)]$ and variances $Var[T_n(m)]$ of the test statistics, respectively.
+\item
+Obtain an $M \times B$ matrix, $\mathbf{Z}_n^{\#} = \bigl(Z_n^{\#}(m,b)\bigr)$, of
+null value shifted and scaled bootstrap statistics $Z_n^{\#}(m,b)$, by row-shifting and scaling the matrix
+$\mathbf{T}_n^{\#}$ using the bootstrap estimates of $E[T_n(m)]$ and
+$Var[T_n(m)]$ and the user-supplied null values $\lambda_0(m)$ and
+$\tau_0(m)$. That is, compute 
+\begin{eqnarray}
+Z_n^{\#}(m,b) &\equiv&  \sqrt{\min \left(1,
+  \frac{\tau_0(m)}{Var[T_n{^\#}(m,\cdot)]}\right)}\\
+&& \qquad \times \ \Bigl( T_n^{\#}(m,b) + \lambda_0(m) - E[T_n{^\#}(m,\cdot)] \Bigr)  \nonumber .
+\end{eqnarray}
+\item
+The bootstrap
+estimate $Q_{0n}$ of the null distribution $Q_0$ is the empirical distribution of the $B$ columns $Z_n^{\#}(\cdot,b)$ of matrix $\mathbf{Z}_n^{\#}$.
+\end{enumerate}
+\end{procedure}
+}}
+\end{center}
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%
+\subsection{Single-step procedures for control of general Type I error rates $\theta(F_{V_n})$}
+\label{anal:mult:s:SS}
+
+
+Dudoit et al. \cite{DudoitetalMT1SAGMB04} and Pollard \& van der Laan \cite{Pollard&vdLaanJSPI04} propose single-step common-cut-off and common-quantile procedures for controlling arbitrary parameters $\theta(F_{V_n})$ of the distribution of the number of Type I errors. 
+The main idea is to substitute control of the parameter $\theta(F_{V_n})$, for the {\em  unknown, true distribution} $F_{V_n}$ of the number of Type I errors, by control of the corresponding parameter $\theta(F_{R_0})$, for the {\em known, null distribution} $F_{R_0}$ of the number of rejected hypotheses. 
+That is, consider single-step procedures of the form ${\cal R}_n \equiv \{m: T_n(m)> c_n(m) \}$, 
+where the cut-offs $c_n(m)$ are chosen so that $\theta(F_{R_0}) \leq
+\alpha$, for $R_0 \equiv \sum_{m=1}^M \mathrm{I}(Z(m) >  c_n(m))$
+and $Z \sim Q_0$.
+Among the class of MTPs that satisfy $\theta(F_{R_0}) \leq \alpha$, 
+Dudoit et al. \cite{DudoitetalMT1SAGMB04} and Pollard \& van der Laan \cite{Pollard&vdLaanJSPI04} propose two procedures, based on common cut-offs and common-quantile cut-offs, respectively. 
+The procedures are summarized below and the reader is referred to the articles for proofs and details on the derivation of cut-offs and adjusted $p$-values.\\
+
+\noindent
+{\bf Single-step common-cut-off procedure.} The set of rejected hypotheses for the {\em $\theta$--controlling single-step common-cut-off procedure} is of the form
+${\cal R}_n \equiv \{m: T_n(m)> c_0 \}$, where the common cut-off $c_0$ is the {\em smallest}  (i.e., least conservative) value for which $\theta(F_{R_0}) \leq \alpha$.
+
+For $gFWER(k)$ control (special case $\theta(F_{V_n}) = 1 - F_{V_n}(k)$), the procedure is based on the {\em $(k+1)$st ordered test statistic}.  
+Specifically, the adjusted $p$-values are given by
+\begin{equation}\label{anal:mult:e:SScut}
+\widetilde{p}_{0n}(m) = Pr_{Q_0} \left(Z^{\circ}(k+1) \geq t_n(m) \right),  \qquad m=1,\ldots, M,
+\end{equation}
+where $Z^{\circ}(m)$ denotes the $m$th ordered component of $Z = (Z(m): m=1,\ldots,M) \sim Q_0$, so that $Z^{\circ}(1) \geq \ldots \geq Z^{\circ}(M)$. 
+For FWER control ($k=0$), the procedure reduces to the  {\em single-step maxT procedure}, based on the {\em maximum test statistic}, $Z^{\circ}(1)$.\\
+
+\noindent
+{\bf Single-step common-quantile procedure.} The set of rejected hypotheses for the {\em $\theta$--controlling single-step common-quantile procedure} is of the form
+${\cal R}_n \equiv \{m: T_n(m)> c_0(m) \}$, where $c_0(m) = Q_{0,m}^{-1}(\delta_0)$ is the $\delta_0$--quantile of the marginal null distribution $Q_{0,m}$ of the $m$th test statistic, i.e., the smallest value $c$ such that $Q_{0,m}(c) = Pr_{Q_0}(Z(m) \leq c) \geq \delta_0$ for $Z \sim Q_0$. Here, $\delta_0$ is chosen as the {\em smallest} (i.e., least conservative) value for which $\theta(F_{R_0}) \leq \alpha$.
+
+For $gFWER(k)$ control, the procedure is based on the {\em $(k+1)$st ordered unadjusted $p$-value}. 
+Specifically, let $\bar{Q}_{0,m} \equiv 1 - Q_{0,m}$ denote the survivor functions for the marginal null distributions $Q_{0,m}$ and define unadjusted $p$-values $P_0(m) \equiv  \bar{Q}_{0,m}(Z(m))$ and $P_{0n}(m) \equiv  \bar{Q}_{0,m}(T_n(m))$, for $Z \sim Q_0$ and  $T_n \sim Q_n$, respectively. Then, the adjusted $p$-values for the common-quantile procedure are given by
+\begin{equation}\label{anal:mult:e:SSquant}
+\widetilde{p}_{0n}(m) = Pr_{Q_0} \left(P_0^{\circ}(k+1) \leq p_{0n}(m) \right),  \qquad m=1,\ldots, M,
+\end{equation}
+where $P_0^{\circ}(m)$ denotes the $m$th ordered component of the $M$--vector of unadjusted $p$-values $(P_0(m): m=1,\ldots,M)$, so that $P_0^{\circ}(1) \leq \ldots \leq P_0^{\circ}(M)$.  
+For FWER control ($k=0$), one recovers the {\em single-step minP procedure}, based on the {\em minimum unadjusted $p$-value}, $P_0^{\circ}(1)$.
+
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%
+\subsection{Step-down procedures for control of the family-wise error rate}
+\label{anal:mult:s:SD}
+
+van der Laan et al. \cite{vdLaanetalMT2SAGMB04} propose step-down common-cut-off (maxT) and common-quantile (minP) procedures for controlling the family-wise error rate, FWER. 
+These procedures are similar in spirit to their single-step counterparts in Section \ref{anal:mult:s:SS} (special case $\theta(F_{V_n}) = 1 - F_{V_n}(0)$), with the important step-down distinction that hypotheses are considered successively, from most significant to least significant, with further tests depending on the outcome of earlier ones. 
+That is, the test procedure is applied to a sequence of successively smaller nested random (i.e., data-dependent) subsets of null hypotheses, defined by the ordering of the test statistics (common cut-offs) or unadjusted $p$-values (common-quantile cut-offs). \\
+
+\noindent
+{\bf Step-down common-cut-off (maxT) procedure.}
+Rather than being based solely on the distribution of the maximum test statistic over all $M$ hypotheses, the step-down common cut-offs and corresponding adjusted $p$-values are based on the distributions of maxima of test statistics over successively smaller nested random subsets of null hypotheses. 
+Specifically, let $O_n(m)$ denote the indices for the ordered test statistics $T_n(m)$, so that $T_n(O_n(1)) \geq \ldots \geq T_n(O_n(M))$. 
+The step-down common-cut-off procedure is then based on the distributions of maxima of test statistics over the nested subsets of ordered hypotheses $\overline{\cal O}_n(h) \equiv \{O_n(h),\ldots,O_n(M)\}$. 
+The adjusted $p$-values for the {\em step-down maxT procedure} are given by 
+\begin{equation}\label{anal:mult:e:SDmaxT}
+\widetilde{p}_{0n}(o_n(m)) =  \max_{h=1,\ldots, m}\ \left\{ Pr_{Q_0}\left(
+  \max_{l \in \overline{\cal o}_n(h)} Z(l) \geq t_n(o_n(h))\right)
+  \right \},
+\end{equation}
+where $Z=(Z(m): m=1,\ldots, M)  \sim Q_0$. 
+Taking maxima of the probabilities over $h \in \{1, \ldots, m\}$ enforces monotonicity of the adjusted $p$-values and ensures that the procedure is indeed step-down, that is, one can only reject a particular hypothesis provided all hypotheses with
+more significant (i.e., larger) test statistics were rejected beforehand.\\
+
+\noindent
+{\bf Step-down common-quantile (minP) procedure.}
+Likewise, the step-down common-quantile cut-offs and corresponding adjusted $p$-values are based on the distributions of minima of unadjusted $p$-values over successively smaller nested random subsets of null hypotheses.
+Specifically, let $O_n(m)$ denote the indices for the ordered unadjusted $p$-values $P_{0n}(m)$, so that $P_{0n}(O_n(1)) \leq \ldots \leq P_{0n}(O_n(M))$. 
+The step-down common-quantile procedure is then based on the distributions of minima of unadjusted $p$-values over the nested subsets of ordered hypotheses $\overline{\cal O}_n(h) \equiv \{O_n(h),\ldots,O_n(M)\}$. 
+The adjusted $p$-values for the {\em step-down minP procedure} are given by
+\begin{equation}\label{anal:mult:e:SDminP}
+\widetilde{p}_{0n}(o_n(m)) = \max_{h=1,\ldots, m}\ \left\{ Pr_{Q_0}\left(
+  \min_{l \in \overline{\cal o}_n(h)} P_0(l) \leq p_{0n}(o_n(h))\right)
+  \right \},
+\end{equation}
+where $P_0(m) = \bar{Q}_{0,m}(Z(m))$ and $Z=(Z(m): m=1,\ldots, M)  \sim Q_0$. 
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%
+\subsection{Augmentation multiple testing procedures}
+\label{anal:mult:s:AMTP}
+
+Dudoit \& van der Laan \cite{Dudoit&vdLaanMTBook} and van der Laan et al. \cite{vdLaanetalMT3SAGMB04} discuss {\em augmentation multiple testing procedures} (AMTP), obtained by adding suitably chosen null hypotheses to the set of null hypotheses already rejected by an initial MTP. 
+Specifically, given {\em any} initial procedure controlling the generalized family-wise error rate, augmentation procedures are derived for controlling Type I error rates defined as tail probabilities and expected values for arbitrary functions $g(V_n,R_n)$ of the numbers of Type I errors and rejected hypotheses (e.g., proportion $g(V_n,R_n)=V_n/R_n$ of false positives among the rejected hypotheses). 
+Adjusted $p$-values for the AMTP are shown to be simply shifted versions of the adjusted $p$-values of the original MTP. 
+The important practical implication of these results is that {\em any} FWER-controlling MTP and its
+corresponding adjusted $p$-values, provide, without additional work, multiple testing procedures controlling a broad class of Type I error rates and their adjusted $p$-values.
+One can therefore build on the large pool of available FWER-controlling procedures, such as the single-step and step-down maxT and minP procedures discussed in Sections \ref{anal:mult:s:SS} and \ref{anal:mult:s:SD}, above. 
+
+Augmentation procedures for controlling tail probabilities of the number (gFWER) and proportion (TPPFP) of false positives, based on an initial FWER-controlling procedure, are treated in detail in van der Laan et al. \cite{vdLaanetalMT3SAGMB04} and are summarized below. The gFWER and TPPFP correspond to the special cases $g(V_n,R_n) = V_n$ and  $g(V_n,R_n) = V_n/R_n$, respectively. 
+Denote the adjusted $p$-values for the initial FWER-controlling procedure by $\widetilde{P}_{0n}(m)$. Order the $M$ null hypotheses according to these $p$-values, from smallest to largest, that is, define indices $O_n(m)$, so that $\widetilde{P}_{0n}(O_n(1))\leq \ldots \leq \widetilde{P}_{0n}(O_n(M))$. Then, for a nominal level $\alpha$ test, the initial FWER-controlling procedure rejects the $R_n$ null hypotheses 
+\begin{equation}
+{\cal R}_n \equiv \{m: \widetilde{P}_{0n}(m) \leq \alpha\}.
+\end{equation}
+
+\noindent
+{\bf Augmentation procedure for controlling the gFWER.} For control of $gFWER(k)$ at level $\alpha$, given an initial FWER-controlling procedure, reject the $R_n$ hypotheses specified by this MTP, as well as the next $A_n = \min\{k, M-R_n\}$ most significant null hypotheses. 
+The adjusted $p$-values $\widetilde{P}_{0n}^{+}(O_n(m))$ for the new gFWER-controlling AMTP are simply $k$--shifted versions of the adjusted $p$-values of the initial FWER-controlling MTP:
+\begin{equation}\label{anal:mult:e:adjpgFWER}
+\widetilde{P}_{0n}^{+}(O_n(m)) =
+\begin{cases}
+0, & \text{if $m=1,\ldots,k$},\\
+\widetilde{P}_{0n}(O_n(m-k)), & \text{if $m=k+1, \ldots, M$}.
+\end{cases}
+\end{equation}
+That is, the first $k$ adjusted $p$-values are set to zero and the remaining $p$-values are the adjusted $p$-values of the FWER-controlling MTP shifted by $k$. The AMTP thus guarantees at least $k$ rejected hypotheses.\\
+
+
+\noindent
+{\bf Augmentation procedure for controlling the TPPFP.} For control of $TPPFP(q)$ at level $\alpha$, given an initial FWER-controlling procedure, reject the $R_n$ hypotheses specified by this MTP, as well as the next $A_n$ most significant null hypotheses, 
+\begin{eqnarray}
+\label{anal:mult:e:augTPPFP}
+A_n &=& \max\left\{m \in \{0,\ldots, M - R_n\}:\frac{m}{m+ R_n}\leq q\right\} \nonumber\\
+&=& \min \left\{ \left \lfloor \frac{q R_n}{1-q} \right \rfloor, M-R_n \right\},
+\end{eqnarray}
+where the {\em floor} $\lfloor x \rfloor$ denotes the greatest integer less than or equal to $x$, i.e., $\lfloor x \rfloor \leq x < \lfloor x \rfloor + 1$. That is, keep rejecting null hypotheses until the ratio of additional rejections to the total number of rejections reaches the allowed proportion $q$ of false positives. 
+The adjusted $p$-values $\widetilde{P}_{0n}^{+}(O_n(m))$ for the new TPPFP-controlling AMTP are simply shifted versions of the adjusted $p$-values of the initial FWER-controlling MTP, that is,
+\begin{equation}\label{anal:mult:e:adjpTPPFP}
+\widetilde{P}_{0n}^{+}(O_n(m)) = \widetilde{P}_{0n}(O_n(\lceil(1-q)m\rceil)), \qquad m=1,\ldots,M,
+\end{equation}
+where the {\em ceiling} $\lceil x \rceil$ denotes the least integer greater than or equal to $x$, i.e., $\lceil x \rceil -1 < x \leq \lceil x \rceil$. \\
+
+
+\noindent
+{\bf FDR-controlling procedures.}
+Given any TPPFP-controlling procedure, van der Laan et al. \cite{vdLaanetalMT3SAGMB04} derive two simple (conservative) FDR-controlling procedures. 
+The more general and conservative procedure controls the FDR at nominal level $\alpha$, by controlling $TPPFP(\alpha/2)$ at level $\alpha/2$. 
+The less conservative procedure controls the FDR at nominal level $\alpha$, by controlling $TPPFP(1 - \sqrt{1-\alpha})$ at level $1 - \sqrt{1-\alpha}$.
+In what follows, we refer to these two MTPs as "conservative" and "restricted", respectively.
+The reader is referred to the original article for details and proofs of FDR control (Section 2.4, Theorem 3).
+ 
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+\section{Software implementation: \Rpackage{multtest} package}
+\label{anal:mult:s:software}
+
+\subsection{Overview}
+
+The MTPs proposed in Sections \ref{anal:mult:s:SS} -- \ref{anal:mult:s:AMTP} are implemented in the latest version of the Bioconductor R package \Rpackage{multtest} (version 1.5.0, Bioconductor release 1.5). 
+New features include: 
+expanded class of tests (e.g., for regression parameters in linear models and in Cox proportional hazards models);
+control of a wider selection of Type I error rates (e.g., gFWER, TPPFP, FDR); 
+bootstrap estimation of the test statistics null distribution; 
+augmentation multiple testing procedures;  
+confidence regions for the parameter vector of interest.
+Because of their general applicability and novelty, we focus in this section on MTPs that utilize a bootstrap estimated test statistics null distribution and that are available through the package's main user-level function: \Robject{MTP}.
+Note that for many testing problems, MTPs based on permutation (rather than bootstrap) estimated null distributions are also available in the present and earlier versions of \Rpackage{multtest}.
+In particular, permutation-based step-down maxT and minP FWER-controlling MTPs are implemented in the functions \Robject{mt.maxT} and \Robject{mt.minP}, respectively, and can also be applied directly through a call to the \Robject{MTP} function.
+
+We stress that {\em all} the bootstrap-based MTPs implemented in \Rpackage{multtest} can be performed using the main user-level function \Robject{MTP}. 
+Most users will therefore only need to be familiar with this function. 
+Other functions are provided primarily for the benefit of more advanced users, interested in extending the package's functionality (Section \ref{anal:mult:s:design}).
+For greater detail on \Rpackage{multtest} functions, the reader is referred to the package documentation, in the form of help files, e.g., \Robject{? MTP}, and vignettes, e.g., \Robject{openVignette("multtest")}. 
+
+One needs to specify the following main ingredients when applying a MTP: 
+the {\em data}, $X_1, \ldots, X_n$; 
+suitably defined {\em test statistics}, $T_n$, for each of the null hypotheses under consideration (e.g., one-sample $t$-statistics, robust rank-based $F$-statistics, $t$-statistics for regression coefficients in Cox proportional hazards model); 
+a choice of {\em Type I error rate}, $\theta(F_{V_n,R_n})$, providing an appropriate measure of false positives for the particular testing problem (e.g., $TPPFP(0.10)$);
+a proper {\em joint null distribution}, $Q_0$ (or estimate thereof, $Q_{0n})$, for the test statistics (e.g., bootstrap null distribution as in Procedure \ref{anal:mult:proc:boot}); 
+given the previously defined components, a {\em multiple testing procedure}, ${\cal R}_n={\cal R}(T_n, Q_{0n},\alpha)$, for controlling the error rate $\theta(F_{V_n,R_n})$ at a target level $\alpha$.
+Accordingly, the \Rpackage{multtest} package has adopted a modular and extensible approach to the implementation of MTPs, with the following four main types of functions.
+\begin{itemize}
+
+\item 
+Functions for computing the {\em test statistics}, $T_n$. These are internal functions (e.g., \Robject{meanX}, \Robject{coxY}), i.e., functions that are generally not called directly by the user. 
+As shown in Section \ref{anal:mult:s:MTP}, below, the type of test statistic is specified by the \Robject{test} argument of the main user-level function \Robject{MTP}.  
+Advanced users, interested in extending the class of tests available in \Rpackage{multtest}, can simply add their own test statistic functions to the existing library of such internal functions (see Section \ref{anal:mult:s:design}, below, for a brief discussion of the closure approach for specifying test statistics).
+
+\item
+Functions for obtaining the {\em test statistics null distribution}, $Q_0$, or an estimate thereof, $Q_{0n}$.  The main function currently available is the internal function \Robject{boot.resample}, implementing the non-parametric version of bootstrap Procedure \ref{anal:mult:proc:boot} (Section \ref{anal:mult:s:nullDistn}). 
+
+\item
+Functions for implementing the {\em multiple testing procedure}, ${\cal R}(T_n, Q_{0n},\alpha)$, i.e., for deriving rejection regions, confidence regions, and adjusted $p$-values. 
+The main function is the  user-level wrapper function \Robject{MTP}, which implements the single-step and step-down maxT and minP procedures for FWER control (Sections \ref{anal:mult:s:SS} and \ref{anal:mult:s:SD}). 
+The functions \Robject{fwer2gfwer}, \Robject{fwer2tppfp}, and \Robject{fwer2fdr} implement, respectively, gFWER-, TPPFP-, and FDR-controlling augmentation multiple testing procedures, based on adjusted $p$-values from {\em any} FWER-controlling procedure, and can be called via the \Robject{typeone} argument to \Robject{MTP} (Section \ref{anal:mult:s:AMTP}). 
+
+\item
+Functions for {\em numerical and graphical summaries} of a MTP. As described in Section \ref{anal:mult:s:summaries}, below, a number of summary methods are available to operate on objects of class \Rclass{MTP}, output from the main \Robject{MTP} function.
+\end{itemize}
+
+%%%%%%%%%%%%%%%%%%%%%%%%%
+\subsection{Resampling-based multiple testing procedures: \Robject{MTP} function}
+\label{anal:mult:s:MTP}
+
+The main user-level function for resampling-based multiple testing is \Robject{MTP}. Its input/output and usage are described next. 
+
+\begin{Schunk}
+\begin{Sinput}
+> library(Biobase)
+> library(multtest)
+\end{Sinput}
+\end{Schunk}
+
+\begin{Schunk}
+\begin{Sinput}
+> args(MTP)
+\end{Sinput}
+\begin{Soutput}
+function (X, W = NULL, Y = NULL, Z = NULL, Z.incl = NULL, Z.test = NULL, 
+    na.rm = TRUE, test = "t.twosamp.unequalvar", robust = FALSE, 
+    standardize = TRUE, alternative = "two.sided", psi0 = 0, 
+    typeone = "fwer", k = 0, q = 0.1, fdr.method = "conservative", 
+    alpha = 0.05, smooth.null = FALSE, nulldist = "boot", B = 1000, 
+    method = "ss.maxT", get.cr = FALSE, get.cutoff = FALSE, get.adjp = TRUE, 
+    keep.nulldist = TRUE, seed = NULL) 
+NULL
+\end{Soutput}
+\end{Schunk}
+
+\noindent
+{\bf  INPUT.}
+\begin{description}
+
+\item{\em Data.} 
+The data, \Robject{X}, consist of a $J$--dimensional random vector, observed on each of $n$ sampling units (patients, cell lines, mice, etc). 
+These data can be stored in a $J \times n$ \Rclass{matrix}, \Rclass{data.frame}, or \Rclass{exprs} slot of an object of class \Rclass{ExpressionSet}.
+In some settings,  a $J$--vector of weights may be associated with each observation, and stored in a $J \times n$ weight matrix, \Robject{W} (or an $n$--vector \Robject{W}, if the weights are the same for each of the $J$ variables). 
+One may also observe a possibly censored continuous or polychotomous outcome, \Robject{Y}, for each sampling unit, as obtained, for example, from the \Rclass{phenoData} slot of an object of class \Rclass{ExpressionSet}. 
+In some studies, $L$ additional covariates may be measured on each sampling unit and stored in \Robject{Z}, an $n \times L$ \Rclass{matrix} or \Rclass{data.frame}. 
+When the tests concern parameters in regression models with covariates from \Robject{Z} (e.g., values \Robject{lm.XvsZ}, \Robject{lm.YvsXZ}, and \Robject{coxph.YvsXZ}, for the argument \Robject{test}, described below), the arguments \Robject{Z.incl} and \Robject{Z.test} specify, respectively, which covariates (i.e., which columns of \Robject{Z}, including \Robject{Z.test}) should be included in the model and which regression parameter is to be tested (only when \texttt{test="lm.XvsZ"}). 
+The covariates can be specified either by a numeric column index or character string.
+If \Robject{X} is an instance of the class \Rclass{ExpressionSet}, \Robject{Y} can be a column index or character string referring to the variable in the \Rclass{data.frame} \Robject{pData(X)} to use as outcome. 
+Likewise, \Robject{Z.incl} and \Robject{Z.test} can be column indices or character strings referring to the variables in \Robject{pData(X)} to use as covariates.
+The data components (\Robject{X}, \Robject{W}, \Robject{Y}, \Robject{Z}, \Robject{Z.incl}, and \Robject{Z.test}) are the first six arguments to the \Robject{MTP} function. 
+Only \Robject{X} is a required argument; the others are by default \Robject{NULL}.
+The argument \Robject{na.rm} allows one to control the treatment of "Not Available" or \Robject{NA} values. It is set to \Robject{TRUE}, by default, so that an
+observation with a missing value in any of the data objects' $j$th component ($j=1,\ldots,J$) is excluded from computation of any of the relevant test statistics.
+
+
+\item{\em Test statistics.} 
+
+The test statistics should be chosen based on the parameter of interest (e.g., location, scale, or regression parameters) and the hypotheses one wishes to test. In the current implementation of \Rpackage{multtest}, the following test statistics are available through the argument \Robject{test}, with default value \Robject{t.twosamp.unequalvar}, for the two-sample Welch $t$-statistic. 
+\begin{itemize}
+\item 
+\Robject{t.onesamp}: One-sample $t$-statistic for tests of means.
+\item 
+\Robject{t.twosamp.equalvar}: Equal variance two-sample $t$-statistic for tests of differences in means.
+\item 
+\Robject{t.twosamp.unequalvar}: Unequal variance two-sample $t$-statistic for tests of differences in means (also known as two-sample Welch $t$-statistic). 
+\item 
+\Robject{t.pair}: Two-sample paired $t$-statistic for tests of differences in means.
+\item 
+\Robject{f}: Multi-sample $F$-statistic for tests of equality of population means.
+\item 
+\Robject{f.block}: Multi-sample $F$-statistic for tests of equality of population means in a block design.
+\item 
+
+\Robject{lm.XvsZ}: 
+$t$-statistic for tests of regression coefficients for variable \Robject{Z.test} in linear models each with outcome \Robject{X[j,]} ($j=1,\ldots,J$), and possibly additional covariates \Robject{Z.incl} from the \Rclass{matrix} \Robject{Z} (in the case of no covariates, one recovers the one-sample $t$-statistic, \Robject{t.onesamp}).
+\item 
+\Robject{lm.YvsXZ}: 
+$t$-statistic for tests of regression coefficients in linear models with outcome \Robject{Y} and each \Robject{X[j,]} ($j=1,\ldots,J$) as covariate of interest, with possibly other covariates \Robject{Z.incl} from the \Rclass{matrix} \Robject{Z}.
+\item 
+\Robject{coxph.YvsXZ}: $t$-statistic for tests of regression coefficients in Cox proportional hazards survival models with outcome \Robject{Y} and each \Robject{X[j,]} ($j=1,\ldots,J$) as covariate of interest, with possibly other covariates \Robject{Z.incl} from the \Rclass{matrix} \Robject{Z}.
+\end{itemize}
+
+
+{\em Robust}, {\em rank-based} versions of the above test statistics can be specified by setting the argument \Robject{robust} to \Robject{TRUE} (the default value is \Robject{FALSE}). 
+Consideration should be given to whether {\em standardized} (Equation (\ref{anal:mult:e:tstat})) or {\em unstandardized} difference statistics are most appropriate (see Pollard \& van der Laan \cite{Pollard&vdLaanJSPI04} for a comparison). Both options are available through the argument \Robject{standardize}, by default \Robject{TRUE}. 
+The type of alternative hypotheses is specified via the \Robject{alternative} argument: default value of \Robject{two.sided}, for two-sided test, and values of \Robject{less} or \Robject{greater}, for one-sided tests. 
+The (common) null value for the parameters of interest is specified through the \Robject{psi0} argument, by default zero.  
+
+
+\item{\em Type I error rate.} 
+The \Robject{MTP} function controls by default the family-wise error rate (FWER), or chance of at least one false positive (argument \Robject{typeone="fwer"}). 
+Augmentation procedures (Section \ref{anal:mult:s:AMTP}), controlling other Type I error rates such as the gFWER, TPPFP, and FDR, can be specified through the argument \Robject{typeone}.
+Related arguments include \Robject{k} and \Robject{q}, for the allowed number and proportion of false positives for control of $gFWER(k)$ and $TPPFP(q)$, respectively, and \Robject{fdr.method}, for the type of TPPFP-based FDR-controlling procedure (i.e., \Robject{"conservative"} or \Robject{"restricted"} methods).
+The nominal level of the test is determined by the argument \Robject{alpha}, by default 0.05. 
+Testing can be performed for a range of nominal Type I error rates by specifying a vector of levels \Robject{alpha}. 
+
+
+\item{\em Test statistics null distribution.} 
+In the current implementation of \Robject{MTP}, the test statistics null distribution is estimated by default using the non-parametric version of bootstrap Procedure~\ref{anal:mult:proc:boot} (argument \Robject{nulldist="boot"}). 
+The bootstrap procedure is implemented in the internal function \Robject{boot.resample}, which calls C to compute test statistics for each bootstrap sample.
+The values of the shift ($\lambda_0$) and scale ($\tau_0$) parameters are determined by the type of test statistics (e.g., $\lambda_0=0$ and $\tau_0=1$ for $t$-statistics). When \Robject{csnull=TRUE} (default), these values will be used to center and scale the estimated test statistics distribution, producing a null distribution. One may specify \Robject{csnull=FALSE} to compute a non-null test statistics distribution.
+Permutation null distributions are also available via \Robject{nulldist="perm"}.
+The number of resampling steps is specified by the argument \Robject{B}, by default 1,000. 
+Since the upper tail of a the bootstrap distribution may be difficult to estimate, particularly for small values of \Robject{B}, a kernal density estimator may be used for the tail of the distribution by setting \Robject{smooth.null=TRUE} (default is FALSE). 
+
+\item{\em Multiple testing procedures.} 
+Several methods for controlling the chosen Type I error rate are available in \Rpackage{multtest}. 
+\begin{itemize}
+\item
+{\em FWER-controlling procedures.}
+For FWER control, the \Robject{MTP} function implements the single-step and step-down (common-cut-off) maxT and (common-quantile) minP MTPs, described in Sections~\ref{anal:mult:s:SS} and \ref{anal:mult:s:SD}, and specified through the argument \Robject{method} (internal functions \Robject{ss.maxT}, \Robject{ss.minP}, \Robject{sd.maxT}, and \Robject{sd.minP}).
+The default MTP is the single-step maxT procedure (\Robject{method="ss.maxT"}), since it requires the least computation.
+\item 
+{\em gFWER-, TPPFP-, and FDR-controlling augmentation procedures.} 
+As discussed in Section \ref{anal:mult:s:AMTP}, any FWER-controlling MTP can be trivially augmented to control additional Type I error rates, such as the gFWER and TPPFP.
+Two FDR-controlling procedures can then be derived from the TPPFP-controlling AMTP.
+The AMTPs are implemented in the functions \Robject{fwer2gfwer}, \Robject{fwer2tppfp}, and \Robject{fwer2fdr}, that take FWER adjusted $p$-values as input and return augmentation adjusted $p$-values for control of the gFWER, TPPFP, and FDR, respectively. 
+Note that the aforementioned AMTPs can be applied directly via the \Robject{typeone} argument of the main function \Robject{MTP}.
+\end{itemize}
+
+\item{\em Parallel processing.}
+MTP can be run on a computer cluster with multiple nodes. This functionality requires the package \Rpackage{snow}. In addition, the packages \Rpackage{multtest} and \Rpackage{Biobase} must be
+installed on each node. \Robject{MTP} will load these packages as long as they are in the library
+search path. Else the user must load the packages on each node. When \Robject{cluster=1}, computations are performed on a single CPU. To implement bootstrapping in parallel, the user either sets \Robject{cluster} equal to a cluster object created using the function \Robject{makeCluster} 
+in \Rpackage{snow} or specifies the integer number of nodes to use in a cluster. For the latter 
+approach, \Robject{MTP} creates a cluster object with the specified number of nodes for the user. 
+In this case, the type of interface system to use must be specified in the \Robject{type} argument. 
+MPI and PVM interfaces require the packages \Rpackage{Rmpi} and \Rpackage{rpvm}, respectively. The number or percentage of bootstrap iterations to dispatch at one time to each node is specified 
+with the \Robject{dispatch} argument (default is 5\%).
+
+The following example illustrates how to load the \Rpackage{snow} package, make a cluster consisting 
+of two nodes, and load \Rpackage{Biobase} and \Rpackage{multtest} onto each node of the 
+cluster using \Robject{clusterEvalQ}. The object \Robject{cl} can be passed to \Robject{MTP} via
+the \Robject{cluster} argument. 
+
+\begin{Schunk}
+\begin{Sinput}
+> library(snow)
+> cl <- makeCluster(2, "MPI")
+> clusterEvalQ(cl, {
++     library(Biobase)
++     library(multtest)
++ })
+\end{Sinput}
+\end{Schunk}
+
+\item{\em Output control.} 
+Various arguments are available to control output, i.e., specify which combination of the following quantities should be returned: 
+confidence regions (argument \Robject{get.cr}); 
+cut-offs for the test statistics (argument \Robject{get.cutoff}); 
+adjusted $p$-values (argument \Robject{get.adjp}); 
+test statistics null distribution  (argument \Robject{keep.nulldist}). 
+Note that parameter estimates and confidence regions only apply to the test of single-parameter null hypotheses (i.e., not the $F$-tests). 
+In addition, in the current implementation of \Robject{MTP}, parameter confidence regions and test statistic cut-offs are only provided when \texttt{typeone="fwer"}, so that \Robject{get.cr} and \Robject{get.cutoff} should be set to \Robject{FALSE} when using the error rates gFWER, TPPFP, or FDR.
+
+
+\end{description}
+
+Note that the \Rpackage{multtest} package also provides several simple, marginal FWER-controlling MTPs, such as the Bonferroni, Holm \cite{Holm79}, Hochberg \cite{Hochberg88}, and \v{S}id\'{a}k \cite{Sidak67} procedures, and FDR-controlling MTPs, such as the Benjamini \& Hochberg \cite{Benjamini&Hochberg95} and Benjamini \& Yekutieli \cite{Benjamini&Yekutieli01} procedures. 
+These procedures are available through the \Robject{mt.rawp2adjp} function, which takes a vector of unadjusted $p$-values as input and returns the corresponding adjusted $p$-values.\\
+
+
+\noindent
+{\bf  OUTPUT.}\\
+
+
+The S4 class/method object-oriented programming approach was adopted to summarize the results of a MTP (Section \ref{anal:mult:s:design}). 
+Specifically, the output of the \Robject{MTP} function is an instance of the {\em class} \Rclass{MTP}. 
+A brief description of the class and associated methods is given next. Please consult the documentation for details, e.g., using \texttt{class ? MTP} and \texttt{methods ? MTP}. 
+
+\begin{Schunk}
+\begin{Sinput}
+> slotNames("MTP")
+\end{Sinput}
+\begin{Soutput}
+ [1] "statistic" "estimate"  "sampsize"  "rawp"      "adjp"      "conf.reg" 
+ [7] "cutoff"    "reject"    "nulldist"  "call"      "seed"     
+\end{Soutput}
+\end{Schunk}
+
+
+\begin{description}
+
+\item{\Robject{statistic}:} The numeric $M$--vector of test statistics, specified by the values of the \Robject{MTP} arguments \Robject{test}, \Robject{robust}, \Robject{standardize}, and \Robject{psi0}. In many testing problems, $M = J = $ \Robject{nrow(X)}.
+
+\item{\Robject{estimate}:} For the test of single-parameter null hypotheses using $t$-statistics (i.e., not the $F$-tests), the numeric $M$--vector of estimated parameters.
+
+\item{\Robject{sampsize}:} The sample size, i.e., $n=$ \Robject{ncol(X)}.
+
+\item{\Robject{rawp}:} The numeric $M$--vector of unadjusted $p$-values.
+
+\item{\Robject{adjp}:} The numeric $M$--vector of adjusted $p$-values (computed only if the \Robject{get.adjp} argument is \Robject{TRUE}).
+
+\item{\Robject{conf.reg}:}  For the test of single-parameter null hypotheses using $t$-statistics (i.e., not the $F$-tests), the numeric $M \times 2 \times$ \Robject{length(alpha)} \Rclass{array} of lower and upper simultaneous confidence limits for the parameter vector, for each value of the nominal Type I error rate \Robject{alpha} (computed only if the \Robject{get.cr} argument is \Robject{TRUE}). 
+
+\item{\Robject{cutoff}:} The numeric $M \times$ \Robject{length(alpha)} \Rclass{matrix} of cut-offs for the test statistics, for each value of the nominal Type I error rate \Robject{alpha} (computed only if the \Robject{get.cutoff} argument is \Robject{TRUE}).
+
+\item{\Robject{reject}:} 
+The $M \times$ \Robject{length(alpha)} \Rclass{matrix} of rejection indicators (\Robject{TRUE} for a rejected null hypothesis), for each value of the nominal Type I error rate \Robject{alpha}.
+
+\item{\Robject{nulldist}:} The numeric $M \times B$ \Rclass{matrix} for the estimated test statistics null distribution (returned only if \texttt{keep.nulldist=TRUE}; option not currently available for permutation null distribution, i.e.,  \texttt{nulldist="perm"}).
+By default (i.e., for \Robject{nulldist="boot"}), the entries of \Robject{nulldist} are the null value shifted and scaled bootstrap test statistics, as defined by Procedure~\ref{anal:mult:proc:boot}.
+
+\item{\Robject{call}:} The call to the function \Robject{MTP}.
+
+\item{\Robject{seed}:} 
+An integer for specifying the state of the random number generator used to create the resampled datasets. 
+The seed can be reused for reproducibility in a repeat call to \Robject{MTP}. 
+This argument is currently used only for the bootstrap null distribution (i.e., for \texttt{nulldist="boot"}).
+See \texttt{? set.seed} for details.
+
+
+\end{description}
+
+%%%%%%%%%%%%%%%%%%%%%%%%%
+\subsection{Numerical and graphical summaries}
+\label{anal:mult:s:summaries}
+
+The following {\em methods} are defined to operate on \Rclass{MTP} instances and summarize the results of a MTP.
+
+\begin{description}
+
+\item{\Robject{print}:} 
+The \Robject{print} method returns a description of an object of class \Rclass{MTP}, including 
+the sample size $n$,
+the number $M$ of tested hypotheses,
+the type of test performed (value of argument \Robject{test}), 
+the Type I error rate (value of argument \Robject{typeone}),
+the nominal level of the test  (value of argument \Robject{alpha}), 
+the name of the MTP  (value of argument \Robject{method}), 
+the call to the function \Robject{MTP}.
+In addition, this method produces a table with the class, mode, length, and dimension of each slot of the \Rclass{MTP} instance. 
+
+\item{\Robject{summary}:} 
+The \Robject{summary} method provides numerical summaries of the results of a MTP and returns a list with the following three components.
+\begin{itemize}
+\item
+\Robject{rejections}: 
+A \Rclass{data.frame} with the number(s) of rejected hypotheses for the nominal Type I error rate(s) specified by the \Robject{alpha} argument of the function \Robject{MTP} 
+(\Robject{NULL} values are returned if all three arguments \Robject{get.cr}, \Robject{get.cutoff}, and \Robject{get.adjp} are \Robject{FALSE}).
+\item
+\Robject{index}:
+A numeric $M$--vector of indices for ordering the hypotheses according to first \Robject{adjp}, then \Robject{rawp}, and finally the absolute value of \Robject{statistic} (not printed in the summary). 
+\item
+\Robject{summaries}:
+When applicable (i.e., when the corresponding quantities are returned by \Robject{MTP}), a table with six number summaries of the distributions of the adjusted $p$-values, unadjusted $p$-values, test statistics, and parameter estimates.
+\end{itemize}
+
+\item{\Robject{plot}:}   
+The \Robject{plot} method produces the following graphical summaries of the results of a MTP. The type of display may be specified via the \Robject{which} argument.
+\begin{enumerate}
+\item
+Scatterplot of number of rejected hypotheses vs. nominal Type I error rate.
+\item
+Plot of ordered adjusted $p$-values; can be viewed as a plot of Type I error rate vs. number of rejected hypotheses.
+\item
+Scatterplot of adjusted $p$-values vs. test statistics (also known as ``volcano plot'').
+\item
+Plot of unordered adjusted $p$-values.
+\item
+Plot of confidence regions for user-specified parameters, by default the 10 parameters corresponding to the smallest adjusted $p$-values  (argument \Robject{top}).
+\item
+Plot of test statistics and corresponding cut-offs (for each value of \Robject{alpha}) for user-specified hypotheses, by default the 10 hypotheses corresponding to the smallest adjusted $p$-values (argument \Robject{top}).
+\end{enumerate}
+The argument \Robject{logscale} (by default equal to \Robject{FALSE}) allows one to use the negative decimal logarithms of the adjusted $p$-values in the second, third, and fourth graphical displays.
+Note that some of these plots are implemented in the older function \Robject{mt.plot}.
+
+\item{\Robject{[}:} 
+Subsetting method, which operates selectively on each slot of an \Rclass{MTP} instance to retain only the data related to the specified hypotheses.
+
+\item{\Robject{as.list}:} 
+Converts an object of class \Rclass{MTP} to an object of class \Rclass{list}, with an entry for each slot. 
+
+\end{description}
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%
+\subsection{Software design}
+\label{anal:mult:s:design}
+
+The following features of the programming approach employed in \Rpackage{multtest} may be of interest to users, especially those interested in extending the functionality of the package. \\
+
+\noindent
+{\bf Function closures.}  The use of {\em function closures}, in the style of the \Rpackage{genefilter} package, allows uniform data input for all MTPs and facilitates the extension of the package's functionality by adding, for example, new types of test statistics. 
+Specifically, for each value of the \Robject{MTP} argument \Robject{test}, a closure is defined which consists of a function for computing the test statistic (with only two arguments, a data vector \Robject{x} and a corresponding weight vector \Robject{w}, with default value of \Robject{NULL}) and its enclosing environment, with bindings for relevant additional arguments, such as null values \Robject{psi0}, outcomes \Robject{Y}, and covariates \Robject{Z}. 
+Thus, new test statistics can be added to \Rpackage{multtest} by simply defining a new closure and adding a corresponding value for the \Robject{test} argument to \Robject{MTP} (existing internal test statistic functions are located in the file \texttt{R/statistics.R}).\\
+
+\noindent
+{\bf Class/method object-oriented programming.}  Like many other Bioconductor packages, \Rpackage{multtest}  has adopted the {\em S4 class/method object-oriented programming approach} of Chambers \cite{Chambers98}.
+In particular, a new class, \Rclass{MTP}, is defined to represent the results of multiple testing procedures, as implemented in the main \Robject{MTP} function. As discussed above, in Section \ref{anal:mult:s:summaries}, several methods are provided to operate on instances of this class.\\
+
+\noindent
+{\bf Calls to C.} Because resampling procedures, such as the non-parametric bootstrap implemented in \Rpackage{multtest}, are computationally intensive, care must be taken to ensure that the resampling steps are not prohibitively slow. The use of closures for the test statistics, however, prevents writing the entire program in C. In the current implementation, we have chosen to define the closure and compute the observed test statistics in R, and then call C (using the R random number ge [...]
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+\section{Discussion}
+\label{anal:mult:s:disc}
+
+The \Rpackage{multtest} package implements a broad range of resampling-based multiple testing procedures. Ongoing efforts are as follows.
+\begin{enumerate}
+\item
+Extending the class of available tests, by adding test statistic closures for tests of correlations, quantiles, and parameters in generalized linear models (e.g., logistic regression).
+\item
+Extending the class of resampling-based estimators for the test statistics null distribution (e.g., parametric bootstrap, Bayesian bootstrap). A closure approach may be considered for this purpose.
+\item
+Providing parameter confidence regions and test statistic cut-offs for other Type I error rates than the FWER.
+\item
+Implementing the new augmentation multiple testing procedures proposed in Dudoit \& van der Laan \cite{Dudoit&vdLaanMTBook} for controlling tail probabilities $Pr(g(V_n,R_n) > q)$ for an arbitrary function $g(V_n,R_n)$ of the numbers of false positives $V_n$ and rejected hypotheses $R_n$.
+\item
+Providing a formula interface for a symbolic description of the tests to be performed (cf. model specification in \Robject{lm}).
+%\item
+%Providing an \Robject{update} method for objects of class \Rclass{MTP}. This would allow reusing available estimates of the null distribution to implement different MTPs for a given Type I error rate and to control different Type I error rates. 
+\item
+Extending the \Rclass{MTP} class to keep track of results for several MTPs.
+\item
+Increasing the computational efficiency of the bootstrap estimation of the test statistics null distribution.
+\end{enumerate}
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+\bibliographystyle{plainnat}
+
+\bibliography{multtest}
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+\end{document}
diff --git a/vignettes/MTPALL.pdf b/vignettes/MTPALL.pdf
new file mode 100755
index 0000000..379eee8
Binary files /dev/null and b/vignettes/MTPALL.pdf differ
diff --git a/vignettes/golub.R b/vignettes/golub.R
new file mode 100755
index 0000000..53e4cd8
--- /dev/null
+++ b/vignettes/golub.R
@@ -0,0 +1,48 @@
+###########################################################################
+#
+# Script for pre-processing the Golub et al. (1999) ALL AML training dataset
+#
+# Data available at: 
+# 	http://www-genome.wi.mit.edu/mpr
+#
+###########################################################################
+
+# Get data from Whitehead Institute website
+URL<-"http://www-genome.wi.mit.edu/mpr/publications/projects/Leukemia/data_set_ALL_AML_train.txt"	 
+golub.all<-read.table(URL,sep="\t",quote="",header=T,row.names=NULL,comment.char="")
+
+# Gene names and tumor class labels
+golub.gnames<-cbind(dimnames(golub.all)[[1]],as.character(golub.all[,1]),as.character(golub.all[,2]))
+golub.cl<-c(rep(1,27),rep(2,11))
+
+# Re-order columns
+golub<-golub.all
+golub<-golub[,1+2*(1:38)]
+golub<-golub[,c(1:27,33:38,28:32)]
+golub<-as.matrix(golub)
+
+# Floor & ceiling
+golub[golub<100]<-100
+golub[golub>16000]<-16000
+
+# Preliminary selection of genes
+tmp1<-apply(golub,1,max)
+tmp2<-apply(golub,1,min)
+which1<-(1:7129)[(tmp1/tmp2)>5]
+which2<-(1:7129)[(tmp1-tmp2)>500]
+golub.sub<-intersect(which1,which2)
+golub<-golub[golub.sub,]
+
+# Log_10 transformation
+golub<-log(golub,10)
+	
+# Normalization	
+golub.expr<-scale(golub,T,T)
+dimnames(golub.expr)<-list(NULL,NULL)
+
+#export to multtest
+golub<-golub.expr
+golub.cl<-c(rep(0,27),rep(1,11))
+golub.gnames<-golub.gnames[golub.sub,]
+###########################################################################
+
diff --git a/vignettes/multtest.bib b/vignettes/multtest.bib
new file mode 100755
index 0000000..656360a
--- /dev/null
+++ b/vignettes/multtest.bib
@@ -0,0 +1,293 @@
+ at STRING{ANNSTAT	= {The Annals of Statistics} }
+ at string{JASA= {Journal of the American Statistical Association}}
+ at string{JC = {Journal of Classification}}
+ at string{JSPI = {Journal of Statistical Planning and Inference}}
+ at string{JRSSB = {Journal of the Royal Statistical Society, Series B}}
+ at string{PNAS = {Proc. Natl. Acad. Sci.}}
+ at string{SAGMB = {Statistical Applications in Genetics and Molecular Biology}}
+
+ at Article{	  benjamini&hochberg95,
+  author	= {Y. Benjamini and Y. Hochberg},
+  title		= {Controlling the false discovery rate: a practical and
+		  powerful approach to multiple testing},
+  journal	= {JRSSB},
+  year		= {1995},
+  optkey	= {},
+  volume	= {57},
+  optnumber	= {},
+  optmonth	= {},
+  pages		= {289--300},
+  optnote	= {},
+  optannote	= {}
+}
+
+ at Article{	  benjamini&yekutieli01,
+  author	= {Y. Benjamini and D. Yekutieli},
+  title		= {The control of the false discovery rate in multiple
+		  hypothesis testing under dependency},
+  journal	= ANNSTAT,
+  year		= {2001},
+  optkey	= {},
+  volume	= {29},
+  number	= {4},
+  pages		= {1165--1188}
+}
+
+ at Book{Chambers98,
+   author = {J. M. Chambers},
+   title = {Programming with Data: A Guide to the S Language},
+   publisher = {Springer-Verlag, New York},
+   year = {1998}
+}
+
+ at Unpublished{	  dudoit&shaffer02,
+  author	= {S. Dudoit and J. P. Shaffer and J. C. Boldrick},
+  title		= {Multiple hypothesis testing in microarray experiments},
+  note		= {{\it Statistical Science}, to appear, preprint 
+available at UC
+		  Berkeley, Division Biostatistics working paper series:
+		  2002-110, {\tt http://www.bepress.com/ucbbiostat/paper110}},
+  optkey	= {},
+  optmonth	= {},
+  year		= {2002},
+  optannote	= {}
+}
+
+ at Article{DudoitetalStatSci03,
+  author	= {S. Dudoit and J. P. Shaffer and J. C. Boldrick},
+  title		= {Multiple hypothesis testing in microarray experiments},
+  journal		= {Statistical Science},
+  optkey	= {},
+  optmonth	= {},
+  year		= {2003},
+  volume = 	 {18},
+  number = 	 {1},
+  pages = 	 {71--103},
+  optannote	= {}
+}
+
+ at Unpublished{	  ge&dudoit,
+  author	= {Y. Ge and S. Dudoit and T. P. Speed},
+  title		= {Resampling-based multiple testing for microarray data
+		  analysis},
+  note		= {{\it Test}, to appear, preprint available at the
+		  Technical Report \#633, Jan. 2003, the Department of
+		  Statistics, UC Berkeley, {\tt
+		  http://www.stat.berkeley.edu/tech-reports/index.html}},
+  optkey	= {},
+  optmonth	= {},
+  year		= {2003},
+  optannote	= {}
+}
+
+ at Article{	  golubetal,
+  author	= {T. R. Golub and D. K. Slonim and P. Tamayo and C. Huard
+		  and M. Gaasenbeek and J. P. Mesirov and H. Coller and M.L.
+		  Loh and J. R. Downing and M. A. Caligiuri and C. D.
+		  Bloomfield and E. S. Lander},
+  title		= {Molecular classification of cancer: class discovery and
+		  class prediction by gene expression monitoring},
+  journal	= {Science},
+  year		= {1999},
+  optkey	= {},
+  volume	= {286},
+  optnumber	= {},
+  optmonth	= {},
+  pages		= {531--537},
+  optnote	= {},
+  optannote	= {}
+}
+
+ at Article{	  hochberg88,
+  author	= {Y. Hochberg},
+  title		= {A sharper Bonferroni procedure for multiple tests of
+		  significance},
+  journal	= {Biometrika},
+  year		= {1988},
+  optkey	= {},
+  volume	= {75},
+  optnumber	= {},
+  pages		= {800--802},
+  optmonth	= {},
+  optnote	= {},
+  optannote	= {}
+}
+
+ at Article{	  holm79,
+  author	= {S. Holm},
+  title		= {A simple sequentially rejective multiple test procedure},
+  journal	= {Scand. J. Statist.},
+  year		= {1979},
+  optkey	= {},
+  volume	= {6},
+  optnumber	= {},
+  pages		= {65--70},
+  optmonth	= {},
+  optnote	= {},
+  optannote	= {}
+}
+
+ at Article{Sidak67,
+  author = 	 {Z.  \v{S}id\'{a}k},
+  title = 	 {Rectangular confidence regions for the means of multivariate normal distributions},
+  journal = 	 JASA,
+  year = 	 {1967},
+  OPTkey = 	 {},
+  volume = 	 {62},
+  OPTnumber = 	 {},
+  pages = 	 {626-633},
+  OPTmonth = 	 {},
+  OPTnote = 	 {},
+  OPTannote = 	 {}
+}
+
+ at Article{	  shaffer95,
+  author	= {J. P. Shaffer},
+  title		= {Multiple hypothesis testing},
+  journal	= {Annu. Rev. Psychol.},
+  year		= {1995},
+  optkey	= {},
+  volume	= {46},
+  optnumber	= {},
+  pages		= {561--584},
+  optmonth	= {},
+  optnote	= {},
+  optannote	= {}
+}
+
+ at TechReport{KelesetalTechRep147,
+  author = 	 {S. Kele\c{s} and M. J. van der Laan and S. Dudoit and S. E. Cawley},
+  title = 	 {Multiple Testing Methods for {ChIP}-{Chip} High Density Oligonucleotide Array Data},
+  institution =  {Division of Biostatistics, University of California, Berkeley},
+  year = 	 {2004},
+  OPTkey = 	 {},
+  OPTtype = 	 {},
+  number = 	 {147},
+  OPTaddress = 	 {},
+  OPTmonth = 	 {},
+  OPTnote = 	 {},
+  OPTannote = 	 {},
+  url={www.bepress.com/ucbbiostat/paper147}
+}
+
+ at Book{		  westfall&young93,
+  author	= {P. H. Westfall and S. S. Young},
+  opteditor	= {},
+  title		= {Resampling-based multiple testing: {E}xamples and methods
+		  for $p$-value adjustment},
+  publisher	= {John Wiley \& Sons},
+  year		= {1993},
+  optkey	= {},
+  optvolume	= {},
+  optnumber	= {},
+  optseries	= {},
+  optaddress	= {},
+  optedition	= {},
+  optmonth	= {},
+  optnote	= {},
+  optannote	= {}
+}
+
+ at Book{Dudoit&vdLaanMTBook,
+  author =       {S. Dudoit and M. J. van der Laan},
+  ALTeditor =    {},
+  title =        {Multiple Testing Procedures and Applications to
+Genomics},
+  publisher =    {Springer},
+  year =         {2004},
+  OPTkey =       {},
+  OPTvolume =    {},
+  OPTnumber =    {},
+  OPTseries =    {},
+  OPTaddress =   {},
+  OPTedition =   {},
+  OPTmonth =     {},
+  note =         {(In preparation)},
+  OPTannote =    {}
+}
+
+ at Article{DudoitetalMT1SAGMB04,
+  author =       {S. Dudoit and M. J. van der Laan and K. S. Pollard},
+  title =        {Multiple testing. {P}art {I}. {S}ingle-step procedures
+for control of general {T}ype {I} error rates},
+  journal =      SAGMB,
+  year =         {2004},
+  OPTkey =       {},
+  volume =       {3},
+  number =       {1},
+  pages =        {Article 13},
+  OPTmonth =     {},
+  OPTnote =      {(To appear)},
+  OPTannote =    {Technical Report 138, Division of Biostatistics,
+University of California, Berkeley},
+  url={www.bepress.com/sagmb/vol3/iss1/art13}
+}
+
+ at Article{vdLaanetalMT2SAGMB04,
+  author =       {M. J. van der Laan and S. Dudoit and K. S. Pollard},
+  title =        {Multiple testing. {P}art {II}. {S}tep-down procedures
+for control of the family-wise error rate},
+  journal =      SAGMB,
+  year =         {2004},
+  OPTkey =       {},
+  volume =       {3},
+  number =       {1},
+  pages =        {Article 14},
+  OPTmonth =     {},
+  OPTnote =      {(To appear)},
+  OPTannote =    {Technical Report 139, Division of Biostatistics,
+University of California, Berkeley},
+  url={www.bepress.com/sagmb/vol3/iss1/art14}
+}
+
+ at Article{vdLaanetalMT3SAGMB04,
+  author =       {M. J. van der Laan and S. Dudoit and K. S. Pollard},
+  title =        {Augmentation Procedures for Control of the
+Generalized Family-Wise Error Rate and Tail Probabilities for the
+Proportion of
+False Positives},
+  journal =      SAGMB,
+  year =         {2004},
+  OPTkey =       {},
+  volume =       {3},
+  number =       {1},
+  pages =        {Article 15},
+  OPTmonth =     {},
+  OPTnote =      {},
+  OPTannote =    {Technical Report 141, Division of Biostatistics,
+University of California, Berkeley},
+  url={www.bepress.com/sagmb/vol3/iss1/art15}
+}
+
+ at Article{Pollard&vdLaanJSPI04,
+  author =       {K. S. Pollard  and M. J. van der Laan},
+  title =        {Choice of a null distribution in resampling-based
+multiple testing},
+  journal =      JSPI,
+  year =         {2004},
+  OPTkey =       {},
+  volume =       {125},
+  number =       {1--2},
+  pages =        {85--100},
+  OPTmonth =     {},
+  OPTnote =      {},
+  annote =       {The Third International Conference on Multiple
+Comparisons}
+}
+
+ at Article{Chiarettietal04,
+  author =       {S. Chiaretti and X. Li and R. Gentleman and A. Vitale
+and M. Vignetti and F. Mandelli and J. Ritz and R. Foa},
+  title =        {Gene expression profile of adult T-cell acute
+lymphocytic leukemia identifies distinct subsets of patients with
+different response to therapy and survival},
+  journal =      {Blood},
+  year =         {2004},
+  OPTkey =       {},
+  volume =       {103},
+  number =       {7},
+  pages =        {2771--2778},
+  OPTmonth =     {},
+  OPTnote =      {},
+  OPTannote =    {}
+}
\ No newline at end of file
diff --git a/vignettes/multtest.pdf b/vignettes/multtest.pdf
new file mode 100755
index 0000000..76598da
Binary files /dev/null and b/vignettes/multtest.pdf differ

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-med/r-bioc-multtest.git



More information about the debian-med-commit mailing list