[med-svn] [r-cran-vegan] 01/07: Imported Upstream version 2.2-0

Charles Plessy plessy at moszumanska.debian.org
Mon Jun 1 05:26:38 UTC 2015


This is an automated email from the git hooks/post-receive script.

plessy pushed a commit to branch master
in repository r-cran-vegan.

commit 5034510e74e63eaf5427bf60dad09defafb9fd6f
Author: Charles Plessy <plessy at debian.org>
Date:   Mon Jun 1 14:11:39 2015 +0900

    Imported Upstream version 2.2-0
---
 DESCRIPTION                    |   16 +-
 MD5                            |  458 +++++-----
 NAMESPACE                      |  151 ++-
 R/CCorA.R                      |   39 +-
 R/MDSrotate.R                  |   56 +-
 R/RsquareAdj.R                 |   13 +
 R/SSarrhenius.R                |    2 +-
 R/add1.cca.R                   |   15 +-
 R/ade2vegancca.R               |    7 +-
 R/adipart.default.R            |    2 +-
 R/adonis.R                     |   70 +-
 R/anosim.R                     |   53 +-
 R/anova.cca.R                  |  109 ++-
 R/anova.ccabyaxis.R            |   89 --
 R/anova.ccabymargin.R          |   52 --
 R/anova.ccabyterm.R            |  233 +++--
 R/anova.ccalist.R              |   90 ++
 R/as.hclust.spantree.R         |  158 ++++
 R/as.ts.oecosimu.R             |    6 +-
 R/as.ts.permat.R               |    4 +-
 R/betadisper.R                 |    4 +-
 R/bioenv.default.R             |   97 +-
 R/bioenv.formula.R             |    7 +-
 R/biplot.CCorA.R               |    8 +-
 R/biplot.rda.R                 |    4 +-
 R/bstick.cca.R                 |    2 +-
 R/cIndexKM.R                   |    6 +-
 R/capscale.R                   |   12 +-
 R/cca.default.R                |   20 +-
 R/clamtest.R                   |  254 +++---
 R/commsim.R                    |   24 +
 R/commsimulator.R              |  100 --
 R/confint.MOStest.R            |    1 -
 R/contribdiv.R                 |  104 +--
 R/decorana.R                   |    4 +-
 R/density.anosim.R             |  135 ---
 R/density.oecosimu.R           |   14 -
 R/densityplot.oecosimu.R       |   14 -
 R/dispweight.R                 |   59 ++
 R/drop1.cca.R                  |    8 +-
 R/envfit.default.R             |   26 +-
 R/envfit.formula.R             |    2 +-
 R/estimateR.default.R          |   26 +-
 R/eventstar.R                  |   36 +-
 R/factorfit.R                  |   34 +-
 R/fitspecaccum.R               |   22 +-
 R/fitted.radfit.R              |    8 +-
 R/gdispweight.R                |   52 ++
 R/getPermuteMatrix.R           |   33 +
 R/goodness.metaMDS.R           |    1 -
 R/hiersimu.default.R           |    2 +-
 R/howHead.R                    |   51 ++
 R/indpower.R                   |   50 +-
 R/make.commsim.R               |  436 +++++++++
 R/mantel.R                     |   54 +-
 R/mantel.partial.R             |   40 +-
 R/metaMDSiter.R                |  121 ++-
 R/mrpp.R                       |   64 +-
 R/mso.R                        |   31 +-
 R/multipart.default.R          |    2 +-
 R/nestedchecker.R              |    1 +
 R/nesteddisc.R                 |    6 +-
 R/nestedn0.R                   |    1 +
 R/nestednodf.R                 |   18 +-
 R/nestedtemp.R                 |    1 +
 R/nullmodel.R                  |   46 +
 R/oecosimu.R                   |  226 +++--
 R/ordiR2step.R                 |    6 +-
 R/ordiareatest.R               |   69 ++
 R/ordiellipse.R                |    4 +-
 R/ordihull.R                   |    2 +-
 R/ordilabel.R                  |   10 +-
 R/ordiplot3d.R                 |   92 --
 R/ordipointlabel.R             |    2 +-
 R/ordirgl.R                    |   57 --
 R/ordispider.R                 |   24 +-
 R/ordistep.R                   |   20 +-
 R/ordisurf.R                   |   15 +-
 R/orglpoints.R                 |    8 -
 R/orglsegments.R               |   17 -
 R/orglspider.R                 |   34 -
 R/orgltext.R                   |   11 -
 R/permatfull.R                 |   95 +-
 R/permatswap.R                 |  168 +---
 R/permustats.R                 |  272 ++++++
 R/permuted.index.R             |    1 +
 R/permutest.betadisper.R       |  156 +++-
 R/permutest.cca.R              |  138 ++-
 R/persp.tsallisaccum.R         |   10 +-
 R/plot.cca.R                   |   58 +-
 R/plot.clamtest.R              |   78 +-
 R/plot.contribdiv.R            |   44 +-
 R/plot.envfit.R                |   14 +-
 R/plot.spantree.R              |    2 -
 R/points.cca.R                 |    2 +-
 R/poolaccum.R                  |    6 +-
 R/print.CCorA.R                |    6 +-
 R/print.anosim.R               |    8 +-
 R/print.bioenv.R               |    7 +-
 R/print.cca.R                  |    6 +-
 R/print.commsim.R              |   11 +
 R/print.factorfit.R            |    7 +-
 R/print.mantel.R               |    6 +-
 R/print.mantel.correlog.R      |   16 +-
 R/print.mrpp.R                 |   11 +-
 R/print.mso.R                  |    4 +-
 R/print.nullmodel.R            |   14 +
 R/print.oecosimu.R             |   16 +-
 R/print.permat.R               |   10 +-
 R/print.permutest.betadisper.R |    4 +-
 R/print.permutest.cca.R        |    9 +-
 R/print.protest.R              |   10 +-
 R/print.simmat.R               |   18 +
 R/print.specaccum.R            |    9 +-
 R/print.summary.clamtest.R     |   26 +-
 R/print.summary.permat.R       |    8 +-
 R/print.vectorfit.R            |    5 +-
 R/protest.R                    |   46 +-
 R/radfit.data.frame.R          |    6 +
 R/rankindex.R                  |   12 +-
 R/rarecurve.R                  |   14 +-
 R/raupcrick.R                  |    5 +-
 R/rda.default.R                |   28 +-
 R/read.cep.R                   |    6 +-
 R/rgl.isomap.R                 |   10 -
 R/rgl.renyiaccum.R             |   31 -
 R/scores.default.R             |   16 +-
 R/scores.lda.R                 |    1 -
 R/simper.R                     |   95 +-
 R/simulate.nullmodel.R         |   66 ++
 R/simulate.rda.R               |  155 +++-
 R/specpool.R                   |   19 +-
 R/str.nullmodel.R              |    2 +
 R/stressplot.R                 |   12 +-
 R/stressplot.wcmdscale.R       |  187 ++++
 R/summary.clamtest.R           |   14 +-
 R/summary.dispweight.R         |   22 +
 R/summary.permat.R             |    6 +-
 R/tabasco.R                    |   62 +-
 R/text.cca.R                   |    9 +-
 R/tolerance.cca.R              |    2 +-
 R/treedist.R                   |    2 +-
 R/treedive.R                   |    8 +-
 R/tsallis.R                    |   88 +-
 R/tsallisaccum.R               |   96 +-
 R/update.nullmodel.R           |   40 +
 R/vectorfit.R                  |   32 +-
 R/vegan-defunct.R              |   10 +-
 R/vegan-deprecated.R           |  206 ++++-
 R/veganCovEllipse.R            |    5 +-
 R/veganMahatrans.R             |   20 +
 R/vegdist.R                    |    5 +-
 R/vegemite.R                   |    1 -
 README.md                      |    7 +
 build/vignette.rds             |  Bin 289 -> 290 bytes
 data/BCI.rda                   |  Bin 8093 -> 8503 bytes
 data/dune.env.rda              |  Bin 464 -> 441 bytes
 data/dune.phylodis.rda         |  Bin 0 -> 890 bytes
 data/dune.rda                  |  Bin 729 -> 805 bytes
 data/dune.taxon.rda            |  Bin 801 -> 779 bytes
 data/varespec.rda              |  Bin 2102 -> 2187 bytes
 inst/ChangeLog                 | 1978 +++++++++++++++++++++++++++++++++-------
 inst/NEWS.Rd                   |  284 ++++++
 inst/doc/FAQ-vegan.pdf         |  Bin 149064 -> 147419 bytes
 inst/doc/NEWS.html             |  391 +++++++-
 inst/doc/decision-vegan.R      |   56 +-
 inst/doc/decision-vegan.Rnw    |  197 +++-
 inst/doc/decision-vegan.pdf    |  Bin 324746 -> 342552 bytes
 inst/doc/diversity-vegan.R     |   12 +-
 inst/doc/diversity-vegan.Rnw   |   55 +-
 inst/doc/diversity-vegan.pdf   |  Bin 357896 -> 360242 bytes
 inst/doc/intro-vegan.R         |   25 +-
 inst/doc/intro-vegan.Rnw       |   34 +-
 inst/doc/intro-vegan.pdf       |  Bin 260455 -> 234792 bytes
 man/BCI.Rd                     |   31 +-
 man/CCorA.Rd                   |   14 +-
 man/MDSrotate.Rd               |   30 +-
 man/add1.cca.Rd                |   13 +-
 man/adipart.Rd                 |   14 +-
 man/adonis.Rd                  |   23 +-
 man/anosim.Rd                  |   19 +-
 man/anova.cca.Rd               |  218 +++--
 man/betadisper.Rd              |   13 +-
 man/bioenv.Rd                  |   67 +-
 man/cca.object.Rd              |   29 +-
 man/clamtest.Rd                |  301 +++---
 man/commsim.Rd                 |  413 +++++++++
 man/decostand.Rd               |    5 +-
 man/density.adonis.Rd          |  114 ---
 man/dispweight.Rd              |  120 +++
 man/diversity.Rd               |    8 +-
 man/dune.Rd                    |   15 +-
 man/dune.taxon.Rd              |   48 +-
 man/envfit.Rd                  |   17 +-
 man/eventstar.Rd               |  248 ++---
 man/goodness.cca.Rd            |    6 +-
 man/goodness.metaMDS.Rd        |    8 +-
 man/humpfit.Rd                 |    2 +
 man/isomap.Rd                  |   17 +-
 man/mantel.Rd                  |   20 +-
 man/mantel.correlog.Rd         |  344 +++----
 man/metaMDS.Rd                 |   11 +-
 man/monoMDS.Rd                 |    4 +-
 man/mrpp.Rd                    |   21 +-
 man/mso.Rd                     |   13 +-
 man/multipart.Rd               |    8 +-
 man/nestedtemp.Rd              |   23 +-
 man/nullmodel.Rd               |  168 ++++
 man/oecosimu.Rd                |  360 ++++----
 man/ordihull.Rd                |   33 +-
 man/ordiplot3d.Rd              |  228 -----
 man/ordistep.Rd                |   16 +-
 man/ordisurf.Rd                |   30 +-
 man/orditkplot.Rd              |    2 +-
 man/pcnm.Rd                    |  228 ++---
 man/permatfull.Rd              |  574 ++++++------
 man/permustats.Rd              |  144 +++
 man/permutations.Rd            |  255 ++++--
 man/permutest.betadisper.Rd    |   20 +-
 man/prc.Rd                     |   13 +-
 man/procrustes.Rd              |   23 +-
 man/rankindex.Rd               |   35 +-
 man/raupcrick.Rd               |    4 +-
 man/renyi.Rd                   |   19 +-
 man/reorder.hclust.Rd          |  115 +++
 man/simper.Rd                  |   24 +-
 man/simulate.rda.Rd            |   86 +-
 man/spantree.Rd                |   13 +
 man/specaccum.Rd               |   42 +-
 man/specpool.Rd                |   88 +-
 man/stressplot.wcmdscale.Rd    |   94 ++
 man/treedive.Rd                |   64 +-
 man/tsallis.Rd                 |  268 +++---
 man/vegan-defunct.Rd           |   36 +-
 man/vegan-deprecated.Rd        |  189 +++-
 man/vegan-internal.Rd          |   32 +-
 man/vegandocs.Rd               |   10 +-
 man/vegdist.Rd                 |   20 +-
 man/vegemite.Rd                |   12 +-
 man/wcmdscale.Rd               |    4 +-
 src/nestedness.c               |  214 +++--
 src/vegdist.c                  |    6 +-
 vignettes/FAQ-vegan.pdf        |  Bin 149064 -> 147419 bytes
 vignettes/FAQ-vegan.texi       |  252 +++--
 vignettes/NEWS.html            |  391 +++++++-
 vignettes/decision-vegan.Rnw   |  197 +++-
 vignettes/decision-vegan.tex   |  249 ++++-
 vignettes/diversity-vegan.Rnw  |   55 +-
 vignettes/diversity-vegan.tex  |  137 +--
 vignettes/intro-vegan.Rnw      |   34 +-
 vignettes/intro-vegan.tex      |  174 ++--
 vignettes/vegan.bib            |   10 +
 252 files changed, 11713 insertions(+), 5386 deletions(-)

diff --git a/DESCRIPTION b/DESCRIPTION
index a885386..a999149 100644
--- a/DESCRIPTION
+++ b/DESCRIPTION
@@ -1,18 +1,20 @@
 Package: vegan
 Title: Community Ecology Package
-Version: 2.0-10
-Date: December 12, 2013
+Version: 2.2-0
+Date: 2014-11-12
 Author: Jari Oksanen, F. Guillaume Blanchet, Roeland Kindt, Pierre Legendre, 
    Peter R. Minchin, R. B. O'Hara, Gavin L. Simpson, Peter Solymos, 
    M. Henry H. Stevens, Helene Wagner  
 Maintainer: Jari Oksanen <jari.oksanen at oulu.fi>
-Depends: permute (>= 0.8-0), lattice, R (>= 2.15.0)
-Suggests: MASS, mgcv, cluster, scatterplot3d, rgl, tcltk
+Depends: permute (>= 0.7-8), lattice, R (>= 2.15.0)
+Suggests: parallel, tcltk
+Imports: MASS, cluster, mgcv
 Description: Ordination methods, diversity analysis and other
   functions for community and vegetation ecologists.
 License: GPL-2
-URL: http://cran.r-project.org, http://vegan.r-forge.r-project.org/
-Packaged: 2013-12-12 10:13:58 UTC; jarioksa
+BugReports: https://github.com/vegandevs/vegan/issues
+URL: http://cran.r-project.org, https://github.com/vegandevs/vegan
+Packaged: 2014-11-17 08:25:36 UTC; jarioksa
 NeedsCompilation: yes
 Repository: CRAN
-Date/Publication: 2013-12-12 11:32:11
+Date/Publication: 2014-11-17 11:35:34
diff --git a/MD5 b/MD5
index a96a821..c2d57cc 100644
--- a/MD5
+++ b/MD5
@@ -1,31 +1,31 @@
-bf96136dde2851828d6628a29e5df1ab *DESCRIPTION
-917112eb22f9617b3ee0eb2c3432f659 *NAMESPACE
+f278a514c9adc73ccbd6d250ac171282 *DESCRIPTION
+f4bdaf8740b3d2e8c19c45fe667d1605 *NAMESPACE
 4b8531b446af54510e5fb31f841aed2f *R/AIC.radfit.R
-5c5fdbcdc2a38e2cbafdb8f2c5eb2e08 *R/CCorA.R
-6592cf7dc692f87b4a147eb625e18624 *R/MDSrotate.R
+4065e45e7c774b85d8cdf2150affdf18 *R/CCorA.R
+6924fe815d768f3b11c25d5f03250524 *R/MDSrotate.R
 1a95c5873b1546683487e17aae4fe511 *R/MOStest.R
-99bc778c8ff99648bdc120dd80e5d998 *R/RsquareAdj.R
-219c239766ce490b895cad668309bbcb *R/SSarrhenius.R
+8b2a65acb851c032affb73e3897e926c *R/RsquareAdj.R
+4ff8f584b8619b4262b0453c178b4e4c *R/SSarrhenius.R
 b16c440e6425bb8275af82740c88b06c *R/SSgitay.R
 5c50828bd1466847e54ffd44ddf62df0 *R/SSgleason.R
 e4754677764163b028ce3237d9b4a11b *R/SSlomolino.R
 d80688d78aba3cd9367ffaaaec6ec252 *R/TukeyHSD.betadisper.R
-a7e4a96ac08502dc4cd7023d0510e94f *R/add1.cca.R
-bc63dc46c564df2dea69d90f25e52ba3 *R/ade2vegancca.R
+52870b88f88d3ddab1056e03e5036727 *R/add1.cca.R
+0f953ea124a0d579b79d32974961ec87 *R/ade2vegancca.R
 3fea698281bc0b4c3a5ad26f4d44d0e2 *R/adipart.R
-a8898cc23b1b831ea783c6a257c2d2a0 *R/adipart.default.R
+4ec343c3350530e56f917aaffad7f8bc *R/adipart.default.R
 05387ee9e552fcec123b4b922e837eaa *R/adipart.formula.R
-39805e701d7a10abe111056a5b8afcfe *R/adonis.R
+581487cf45ae5cb305e54471caa69302 *R/adonis.R
 38726b7dad817378b3fc41094c11595a *R/alias.cca.R
-fa03b3668c0435eebc778b9def908be9 *R/anosim.R
+bc6deb19c3d02f60350485ac221964c3 *R/anosim.R
 a4f23289c4a5eab2a3587292b306d497 *R/anova.betadisper.R
-a3767eb78bd5d62341ff198fbf65d1b1 *R/anova.cca.R
-39899a9935779881f1d34f727f58ca3b *R/anova.ccabyaxis.R
-350cc1a6e930a195c80b5c7aef0d9b7e *R/anova.ccabymargin.R
-145b004abb6a5dca32fc8f60338af590 *R/anova.ccabyterm.R
+68e6eb427d128f411b19e19bb15f4c6c *R/anova.cca.R
+208e400508ab4e1e26de5f78e993de3a *R/anova.ccabyterm.R
+109e34d3c1a5cde84a6b427f91d0d649 *R/anova.ccalist.R
 97cbe54d0f4f7adee7a20b6c982d1ecf *R/anova.ccanull.R
 7fab08bcc596df60a22c4b04c8507121 *R/anova.prc.R
 6fb2bf929aed44ef41bfd4dfc6e010cc *R/as.fisher.R
+3dfd3650a9e487189fea183bc28d8138 *R/as.hclust.spantree.R
 eee49f0fce625a3b8266b82f05c86aac *R/as.mcmc.oecosimu.R
 cfaa7dc7d3f45c29e59e83326f1371d4 *R/as.mcmc.permat.R
 ee820c6d748d3aaa26d7412ea11128aa *R/as.mlm.R
@@ -33,92 +33,93 @@ ee820c6d748d3aaa26d7412ea11128aa *R/as.mlm.R
 ec4c60b2bae11116b56946b136a78ed0 *R/as.mlm.rda.R
 a7f01bd69394d5554cf10279a2690080 *R/as.preston.R
 50aa2e700cc26301885d9ef7ca7f12c4 *R/as.rad.R
-124345e75a121a5d750d1b306156614a *R/as.ts.oecosimu.R
-251aa91b87e2f964f24fe61791eedac9 *R/as.ts.permat.R
+02981974a248d55081272a40e038c8c8 *R/as.ts.oecosimu.R
+704239604b0ed8420cb22a31b28a01d3 *R/as.ts.permat.R
 fbec6d133dea10372ce082c7035a8ab2 *R/beals.R
-2c609113eb4b410f4dd7da61c74564c4 *R/betadisper.R
+72c63138e0fdd6c5a6e37bb0e24fa95e *R/betadisper.R
 0228981546980ab73c8346a0292fa98d *R/betadiver.R
 46ae3f75a0b483fecab589637d72a307 *R/bgdispersal.R
 4603ea944d470a9e284cb6cab6d75529 *R/bioenv.R
-4018f8ddbc314125dbd17d44f710147e *R/bioenv.default.R
-82a17642aae8182c9345a03a846a853c *R/bioenv.formula.R
-875d40515bf55ee65dc7fcdefb9f52d1 *R/biplot.CCorA.R
-e83522ded9481ebde69e61419d0033b7 *R/biplot.rda.R
+b845b8eee0b89eb699958270eb09109f *R/bioenv.default.R
+abe03a297a6200d9b48b38c6d92333aa *R/bioenv.formula.R
+4dbe9f135fadbba3f6939d64a5bb0e29 *R/biplot.CCorA.R
+bbad1330ec2d53c3a28b0881af3fd3e1 *R/biplot.rda.R
 0999bb90f22b72fade2ca6adbd01758f *R/boxplot.betadisper.R
 dd03c1ef27bc56d056dc761fd7ecd153 *R/boxplot.specaccum.R
 cbf54233db3c2839101f98e02eb538dd *R/bstick.R
-14ba8e7ffce8b0b0cc9e8a8f3160acf3 *R/bstick.cca.R
+79775431ff9ad60359d3d4b99e85b44b *R/bstick.cca.R
 229bb1ed02b171c8ce0a2bdfb3b37ef6 *R/bstick.decorana.R
 b5a4874f7763f1793b3068eec4e859d5 *R/bstick.default.R
 4bf61eddbdd1cec0bd03150cd478f3d9 *R/bstick.prcomp.R
 2ad6f2071ad822c631e04705df2b245c *R/bstick.princomp.R
-b3ec7384552e94c46102d26e38c40538 *R/cIndexKM.R
+b98443c6f47168edc9fd96e8a30c82e1 *R/cIndexKM.R
 a6df607186ceb18d204494b6a33816d4 *R/calibrate.R
 f093f401495379d30354e6424606190a *R/calibrate.cca.R
 f56b52d53b17c7dc8d8c9accd5a3401a *R/calibrate.ordisurf.R
-907e5f013ace197de05c884e71c79563 *R/capscale.R
+73bec4078e99f0afb8cfeab0ed35394c *R/capscale.R
 bbff0e97ac052b1742a0e4ba082bac68 *R/cascadeKM.R
 2e09a82ec52e211afc2ac6e8d4b40898 *R/cca.R
-22fd69d812ee7dbaf9b23d6774ab0883 *R/cca.default.R
+525437539285ce7484c1a880f6b44995 *R/cca.default.R
 e01e3acecdb9ac8d9195937e9879d126 *R/cca.formula.R
 6102056b628e02085c3bfe779a67c633 *R/centroids.cca.R
 c66d8fbe69ccca94f2ee8f777ff16ae2 *R/checkSelect.R
-c6c6a44746c586dd8b75204efa17b531 *R/clamtest.R
+6faf5d12f3e1abb40c0f8d2cfeabc4b4 *R/clamtest.R
 365fa22c822e835218740e858d256526 *R/coef.cca.R
 8ed92f141523fe0cc4c3bb51807b8b07 *R/coef.radfit.R
 ea10763445cb76b219d18bb274109df5 *R/coef.rda.R
-ab87ce0f23c88b6b40207a7597fa9b64 *R/commsimulator.R
-722959743928d23213409c778c6acbc2 *R/confint.MOStest.R
-490b90477d595160757812bc06d6a70b *R/contribdiv.R
+855938510011daed294a819e40c7dfb8 *R/commsim.R
+397c7044d52639a5a0ce2557638de486 *R/confint.MOStest.R
+d8351b54b16d7327adb545d86fcdac5e *R/contribdiv.R
 d0f10f160ac99ba936824a49c608868a *R/cophenetic.spantree.R
 edee3aaced61290b219985d0ce69155c *R/coverscale.R
-1b1a6343072d69c5ccbf9a72ba068cbd *R/decorana.R
+61f5010b6412918cc9e25bc1a8fdd9d6 *R/decorana.R
 c22bdcfe87e2bf710db3b301d880a54a *R/decostand.R
-476dabb4b68409249d28557129ff3d6c *R/density.anosim.R
-4a13947927b175862e2266ff9589f2a0 *R/density.oecosimu.R
-f15615231f0bcad3a68ab7d718968251 *R/densityplot.oecosimu.R
 e5b54fa580331ab24d28dc59110c45fe *R/designdist.R
 8fb0105cb21a5e425f72e0085fa669a2 *R/deviance.cca.R
 52f41185396ddd9acdcee9df7298d65a *R/deviance.rda.R
 1898b0d7b1ee3f860ab52aeb525839b8 *R/dispindmorisita.R
+50948e10cb607ffc89adc46d9335d640 *R/dispweight.R
 cafeabc2133997b3381c9edf6a971abf *R/distconnected.R
 51c2b04636ee7131d379d5aa2b3c7877 *R/diversity.R
 00136d6f9d6dd5ca16db7633eceb4e92 *R/downweight.R
-064822fa2240075d55e6f19fa10fc07b *R/drop1.cca.R
+9da15140e3eaffaa63ca738b008f8f83 *R/drop1.cca.R
 79c66a10794aacaf08f1d28192228bea *R/eigengrad.R
 be739eb24b369efbdaefa03537a5418c *R/eigenvals.R
 17a62527ee103c09bfba0c851ab12560 *R/envfit.R
-0a315b3c3c55494d08ae86fafd3939f2 *R/envfit.default.R
-fe12ea2872df48afc72f59efd3c50c4f *R/envfit.formula.R
+abdc99957cd34d0c5f79ca1d9dd68c68 *R/envfit.default.R
+1ef64854841e194d35484feffe7914e5 *R/envfit.formula.R
 f443552fe39ec3d6a259f953f4c3af1b *R/estaccumR.R
 81098475867f802dea0565fe426c9fc5 *R/estimateR.R
 cf0a0bf7116ef7a21e090d0c1a76f8d0 *R/estimateR.data.frame.R
-fde991da12a66144a0cd1aa30150e258 *R/estimateR.default.R
+7fb3e05576a36e717f03a84513858e07 *R/estimateR.default.R
 1df3194c88598964282c114cb8db5513 *R/estimateR.matrix.R
-8fadb18ee25d5c3689f437a4d3db0360 *R/eventstar.R
+8a07a85be771af60a831d8b4ed3c8973 *R/eventstar.R
 5ad3db71edac392b0513ccb96700af0d *R/extractAIC.cca.R
-cbf14ecd859d43cf37b1754539e9fefe *R/factorfit.R
+977df97b8394882fb925172a3c677149 *R/factorfit.R
 7e304b1c384c4d8588e5dbedd9459c73 *R/fieller.MOStest.R
 ee8330855e6a7bc2350047d76b2209a4 *R/fisher.alpha.R
 2776f68ef40e177303c3b73163036969 *R/fisherfit.R
-6baa91137f90af022902e047bde687ce *R/fitspecaccum.R
+672a4769b413f4f035de3f2c7d03f0ac *R/fitspecaccum.R
 1db8e420fdd54103774d745d356333b8 *R/fitted.capscale.R
 8fc0cd4954e2976b71fe4995291d2fab *R/fitted.cca.R
 0080b65cfd48bac5e53961b8e12682e5 *R/fitted.procrustes.R
-bdb5429c7c23d1f730c5c7c938fb5e09 *R/fitted.radfit.R
+892cc9bf94b232f6a5c2936ea4f63592 *R/fitted.radfit.R
 30ff7806ee2f3e93b436fa3d1c00fedf *R/fitted.rda.R
+c716cff48e68d6b1d74eaefde86f4cd8 *R/gdispweight.R
+76b1ffb784bab6671ebaa51c3b4bdb0b *R/getPermuteMatrix.R
 57c9a7ccff6a9c066b2aba3475f2330b *R/goodness.R
 681e6b2273eac6f1724d13ce51ad3cf2 *R/goodness.cca.R
-7af5f06020065621d8466decb16e0aa4 *R/goodness.metaMDS.R
+5364f16346047d3b8719ddef653a70bb *R/goodness.metaMDS.R
 4188d434bdb95c56734b5e76319bf55e *R/goodness.rda.R
 8a767726c40223a58d4055759bf41efe *R/head.summary.cca.R
 d17f4f6be45b52e01cd605b09b56a80a *R/hierParseFormula.R
 3d19236ee5dd2f1c678061773895e86f *R/hiersimu.R
-ab36d3e113a35b8e53a475d1e8190072 *R/hiersimu.default.R
+3ba5159beba75e010720f18116fbd919 *R/hiersimu.default.R
 edf53c3358944421756412b991432bd7 *R/hiersimu.formula.R
+a3049ce03c99ec8ecd11b96a9da214c1 *R/howHead.R
 d02fc9c672a9b2c4a31065702a3381be *R/humpfit.R
 1637bd10b39801c14b65656f29dafcf1 *R/identify.ordiplot.R
-34d3e11d35b2fbff759d2bb78c249814 *R/indpower.R
+9e731fa2cfb821bbe7ed62336d5fa3b3 *R/indpower.R
 6d30d57bbf47d448d0c267964ad7233a *R/inertcomp.R
 bf423cb7cf07abc3a4c64229bcc8fc14 *R/initMDS.R
 8de3472b9c7eb28d00cf34b9a7d95587 *R/intersetcor.R
@@ -135,32 +136,34 @@ a5d9b7fa31477efc0a1ff76a62988a8e *R/lines.procrustes.R
 f58c42977c7b03c9d2252295068b8846 *R/lines.spantree.R
 e0f782c5a0cb558189b5b15a5bea072f *R/linestack.R
 1dcc7e0504b5468a3bb2253924901e50 *R/make.cepnames.R
-106633a42f577ab3e266b146ba4447e0 *R/mantel.R
+7e0f2ff685a61a5fdb261024cea5cd8b *R/make.commsim.R
+0bfeef9d6d58fe17761a3773d64421a6 *R/mantel.R
 fdb2f4786b31866197c80d827584edaf *R/mantel.correlog.R
-6ba992d75c8ec43717adf957c8ff2427 *R/mantel.partial.R
+e13a7bd7ffb0d1ca7efd9ff0bb535a78 *R/mantel.partial.R
 e054f13ad65a7f2616561c73557b412b *R/meandist.R
 57cb748570098b7e5a5aedbddb39fb84 *R/metaMDS.R
 26b26e400ead4cf3de31d7eab29c6984 *R/metaMDSdist.R
-b4f488ea5456f4f58079691705ac729b *R/metaMDSiter.R
+6137e51d39b4efadaf2057a0e01faf60 *R/metaMDSiter.R
 f63315501ad2f3a96dee9ee27a867131 *R/metaMDSredist.R
 928df675822d321e4533ba2b7cf0c79f *R/model.frame.cca.R
 9406148bd2cfa3e74b83adfe24858c46 *R/model.matrix.cca.R
 f746e66be6ac3ccc3be9cb4b4b375b4d *R/monoMDS.R
-beae6832197823e4ade3569c021b1693 *R/mrpp.R
-0c8ef224eeeb965c8b60bb63d5adf10e *R/mso.R
+9f185582be24868f96f963fbe82b517e *R/mrpp.R
+e145ba6b52ae375dc42b40e98e8b6594 *R/mso.R
 7e428f1adfdae287a1d64a79c6f2c3bc *R/msoplot.R
 7c219818ce5841e957db47f16986080b *R/multipart.R
-d8f736c5fce5c8950406ae6d434c9578 *R/multipart.default.R
+d97409d999625ad156c0ef177eb79c93 *R/multipart.default.R
 4f3e2c82d5783c04f9a50761c82e2f02 *R/multipart.formula.R
 f5e79cb1c2dc1fcabb6e6b5cb4dc0828 *R/nestedbetasor.R
-85d4744650c1e2a0edf16809b77f7ab4 *R/nestedchecker.R
-c15884dd28790c7521ecb33175c86e5c *R/nesteddisc.R
-e65023174f4ce8874a2f88f332af5a37 *R/nestedn0.R
-2f8881df74a6d2ffc9c2f324ec9ce90e *R/nestednodf.R
-cf4c7acbbf20366f629dee40c9203764 *R/nestedtemp.R
+6100179a10b511a93105c40ac194b088 *R/nestedchecker.R
+788352193f13dda1ab16d031939b3baa *R/nesteddisc.R
+20cd3b812b25a8012ea80be97b12520a *R/nestedn0.R
+36f7ec0897cd6517784bc7dcb12ce827 *R/nestednodf.R
+830451884d0cd81f2b3f8226b593bb64 *R/nestedtemp.R
 74b2723851155de631716fa479f8ea38 *R/no.shared.R
 47973ff187f68836a19d20ea37c60868 *R/nobs.R
-2c24d7eeb78c8149275ce5b6b3c3bd88 *R/oecosimu.R
+9c89764ae10460148c1dcf9d25e05649 *R/nullmodel.R
+f57e69420b80ad432c9975ebf4f22b4d *R/oecosimu.R
 7b3988a207ecfe1ea574c5857ffcd2a3 *R/orderingKM.R
 e3d108eed97633040fa22c2b384e19e4 *R/ordiArgAbsorber.R
 871e2f900809d12e1b0522b303eb7057 *R/ordiArrowMul.R
@@ -168,55 +171,51 @@ e3d108eed97633040fa22c2b384e19e4 *R/ordiArgAbsorber.R
 da71b576fb9908051a545375e14a80e0 *R/ordiGetData.R
 99c1ec285e2afe4fb8beccbd507a123e *R/ordiNAexclude.R
 b78b985ccd7179b59e031bce191354da *R/ordiParseFormula.R
-119f2a2cb24ca7a63c8ee591b50d8ecf *R/ordiR2step.R
+cccb6afc19bd7feaa3cf2e98fdbb79d4 *R/ordiR2step.R
 7757339f5b8899cb54f13da274abda66 *R/ordiTerminfo.R
+9f40021fd5598605d692e9f5a46e1321 *R/ordiareatest.R
 e06d56a6e7d47767b9e71a73cbd3a80b *R/ordiarrows.R
 85f3047b80ab9a2ea57dd7935d07b583 *R/ordicloud.R
 793f91b9bf7c35f335949121d6f317c9 *R/ordicluster.R
-7160bf241bdbe59c1145160435b274ae *R/ordiellipse.R
+4547cf33b1c9f6c010dec7f613d6ca91 *R/ordiellipse.R
 c253906529c77aead16b293275f1afc3 *R/ordigrid.R
-9ca9cd415622e47453a06897e2fde751 *R/ordihull.R
-b64fd4746b6f8c8c792d84be677c6b4b *R/ordilabel.R
+b71c80707ad2e1160f2480b5c1787c12 *R/ordihull.R
+208b0fe7875e5c79cd400970406a44ce *R/ordilabel.R
 20a6f500c31e6640de535f0730b69a55 *R/ordilattice.getEnvfit.R
 c805e6801ef30758b9f7718269cabdfc *R/ordimedian.R
 55d2f18681d065ea6dd29e0f1e64772e *R/ordiplot.R
-58a7e6f3453b1d95e1984c392577d4a8 *R/ordiplot3d.R
-158797cbb1ac53db1869003dfe400aef *R/ordipointlabel.R
+4f4fd722823b3825edd1b5c1fdb6888c *R/ordipointlabel.R
 e57a2b904e572829a5fd97f5b6576644 *R/ordiresids.R
-601554e97800197af1bc50993406d9ef *R/ordirgl.R
 41949d4c859f08bc5a978791d387d1a4 *R/ordisegments.R
-6766b4753ea23da33975045e3943262d *R/ordispider.R
+03fd63d78c762930fd87ec787d9d4bac *R/ordispider.R
 1de439b5ffaf18640e08fadcaf7193ee *R/ordisplom.R
-2d233485d1aa0b604f1949c2a3691f61 *R/ordistep.R
-d83970050d2430b1aca11d9bd4a6cb5d *R/ordisurf.R
+189eb9be55c6527f693925348b63c87d *R/ordistep.R
+a6108f550b749db7139b143cc9e36c9c *R/ordisurf.R
 023ff6234cccb988d4bccd2e79b15f64 *R/orditkplot.R
 bc3671e5b7a30e2849d3b59f65783c97 *R/orditorp.R
 5a99010a09cd4a018b02d397be8cc4ec *R/ordixyplot.R
-c53e9402a842833d80a8df39c0adee6f *R/orglpoints.R
-56c4036863990ae0ec88a8d459540fd0 *R/orglsegments.R
-7ea9d2eb332869c020abe77e3ac6e416 *R/orglspider.R
-647cd6f0ffa48070eeaa095bb9057884 *R/orgltext.R
 9fe401c201c03826754ec5613f6ecd71 *R/panel.ordi.R
 94ff61e031b751b9594400a51decc13b *R/panel.ordi3d.R
 3bab50920d7e58e2bf0b96a6b874cd9d *R/pasteCall.R
 8a81288380543076a5a9fefe5ffc7502 *R/pcnm.R
-e08110689dfeb1098cb4d9194f084c66 *R/permatfull.R
-26a9634c5ad6bc16e2e24c283e33b761 *R/permatswap.R
-909306255cee4f36d2ba7ba13d376e90 *R/permuted.index.R
-42af50af0a792b2fac83d68bfb3b13a3 *R/permutest.betadisper.R
-4764a3d49455270e5217b72aa4d68787 *R/permutest.cca.R
+b5b164724f3872370bff36ef767f8efb *R/permatfull.R
+eeeaf4245033bd2a4ce822c919e42c6e *R/permatswap.R
+b60a2bc00daa5f58ca92af05c6f663f5 *R/permustats.R
+3d6a5ecd5feab93db30c063fd144d422 *R/permuted.index.R
+7a9689a09ce451f9cd5b9e785b5135c3 *R/permutest.betadisper.R
+3b0cdb459acd47452c382848581450e4 *R/permutest.cca.R
 b4e77b98f86c4b567d687b64e3aa8812 *R/persp.renyiaccum.R
-011a26868189ef4f3516a1b1931a2ea1 *R/persp.tsallisaccum.R
+b499c6eea710aa0c65a580dba30f2914 *R/persp.tsallisaccum.R
 f7c8d52c791489d956a7fd833913f242 *R/plot.MOStest.R
 5334397cbe3c0188928c4bd3146e118d *R/plot.anosim.R
 58d63201c7f61f455b0394b7a0e1235d *R/plot.betadisper.R
 de416206dba5566d70080bc14e86382e *R/plot.betadiver.R
 9023fd332556779fe9405e0714ec5dae *R/plot.cascadeKM.R
-a16f29fa9be2c7592458130b6b93482b *R/plot.cca.R
-38ccde16c9eb9028219d27f14d343b3e *R/plot.clamtest.R
-8c043a9b7262c33ec2054045cdaa1811 *R/plot.contribdiv.R
+094453d66164cae09c245185be81dcdd *R/plot.cca.R
+61df47a15577f5ad882d2694bdd73d38 *R/plot.clamtest.R
+dc793c47c821ba2961bf284132997ba3 *R/plot.contribdiv.R
 0ab3b62ac155ede193867f43640dbd34 *R/plot.decorana.R
-e9ad6e3db90cdebb2e5852dc016afc2b *R/plot.envfit.R
+6295a9cb63408e65a01a0faf2635e90a *R/plot.envfit.R
 10bf121300b684a8173f680de54f452a *R/plot.fisherfit.R
 9a4f1746e6e5b80b48994f404e72eb74 *R/plot.humpfit.R
 ed258eefbe3facce3533a16395217fab *R/plot.isomap.R
@@ -239,12 +238,12 @@ af0dac1922ddd4eac1090ba1dd5b1089 *R/plot.radfit.frame.R
 360dec911e8d4e772f888d89b8e0f6f7 *R/plot.radline.R
 08f6b41506125e27b37a08b3bb730ffb *R/plot.renyi.R
 20893b15e8b9db8b2282fef8c63299fa *R/plot.renyiaccum.R
-e71b966111f99c7048ebbe26c1aa6a12 *R/plot.spantree.R
+bb3dd7a884bc2ab1fad9acf47e5f33c2 *R/plot.spantree.R
 3951c0261856fbcdaff54d2e82bd8e11 *R/plot.specaccum.R
 abc96c8853871035d494dfa9086d4d6e *R/plot.taxondive.R
 6104fadf391072e78a8f2825ac41ceb2 *R/plot.varpart.R
 00d109fe7fc29440698b9f1a4bbc876f *R/plot.varpart234.R
-4c9de900b73bb0c12950b9f2b96a9901 *R/points.cca.R
+aa2199fd5eb25caaec83e7134e0b6bd4 *R/points.cca.R
 3fbe4782d2c376d98f23db8890c42b3b *R/points.decorana.R
 b5661457c540b56e77eba97b8b290a91 *R/points.humpfit.R
 a0e1e2d579fa8c1992a26a2e8d435750 *R/points.metaMDS.R
@@ -252,7 +251,7 @@ a54bcddf1b7a44ee1f86ae4eaccb7179 *R/points.ordiplot.R
 e352171f478eb27cf4a875cc3a1693fc *R/points.orditkplot.R
 7409704e2e94cd051524e8c5af3bdcb4 *R/points.procrustes.R
 80d9cee7ff1fa7ee8cb18850711a14b2 *R/points.radline.R
-b4fbbb0786258e1e83c4262e0db2aa43 *R/poolaccum.R
+41958351c903818c06bc056e71b0e828 *R/poolaccum.R
 91aa7fd2fbd99f8e325932d59886dac7 *R/postMDS.R
 f9dcd972e5c81ce936c9ec5b296d484c *R/prc.R
 32a52d09ade017e52d96eb56c05904c3 *R/predict.cca.R
@@ -266,95 +265,96 @@ eb223fbfded71ae4f0b374c1e92c3f2e *R/predict.specaccum.R
 81bb150e264f7da07989c909f4531a45 *R/prepanel.ordi3d.R
 0bb3fa9e3847051a28ceb186023bb952 *R/prestondistr.R
 7db2fd99abc08bf5e1341e5b74fb4617 *R/prestonfit.R
-953d32321b6e12a30209a8cda78244c9 *R/print.CCorA.R
+9eaa1459bcd91341d4ab3200858563b0 *R/print.CCorA.R
 3a1584c7d991683a61271fb2fc002b73 *R/print.MOStest.R
 1e07dd6a9eefb1d0af31a4db98c94305 *R/print.adonis.R
-15b31674cb74df69467902853a9254d1 *R/print.anosim.R
+dbce7c674b2e53109295fc280e96356c *R/print.anosim.R
 a530724906dc69888c27a538fc388cbf *R/print.betadisper.R
-2945b0c68fb04cb2c7dc460a419c5478 *R/print.bioenv.R
+2f1f189154aec84360211e3ae195693d *R/print.bioenv.R
 528c225f34769670a4a3049a0e29ae59 *R/print.capscale.R
-f405d7d68fc336afc819d599089b2e6c *R/print.cca.R
+1cf9b8849f2c410a543ce35bcc4650af *R/print.cca.R
+a88f54aacef2ff5cdfa99215de346349 *R/print.commsim.R
 6d0cd7929afcbe0d192c980dc5196555 *R/print.decorana.R
 65e888e34fa8a8e1d5b577fbadb3161a *R/print.envfit.R
-ff355b68b19f8d8c29917ca33d4e8b8d *R/print.factorfit.R
+ddf461832db2bd6f6f01348faf334c6d *R/print.factorfit.R
 b5358d1ce788e2c98813f59ef70f75c2 *R/print.fisherfit.R
 6da316510cb840a6a9dd8d31d0e205af *R/print.humpfit.R
 b31dbaa6493fdda1f865f95b3e889aab *R/print.isomap.R
-6263b03c7eb5ae61f917888597abc4fd *R/print.mantel.R
-f92fd82d10ce91e2cba2239e756e1332 *R/print.mantel.correlog.R
+d143e7c29760fed50528e5d791d36c8c *R/print.mantel.R
+4ff15a5ea43e997154944c7612bf3d56 *R/print.mantel.correlog.R
 9d6b6102e251f155c0b9af98d37a5f49 *R/print.metaMDS.R
 f221ea2ab4e8903ca1ae735038bfba04 *R/print.monoMDS.R
-8bd5bbb931a97ddada79e4552bd614b8 *R/print.mrpp.R
-946b3b708190211e9eb1acc94ffa102d *R/print.mso.R
+9e3feb49d153002226a102c025f6a72c *R/print.mrpp.R
+ae04b552a7569e705dc7e8e07490f310 *R/print.mso.R
 7c074bf7870cb4c306c6181769b28a19 *R/print.nestedchecker.R
 eed481e994c01ec4d7b443fb8cafad46 *R/print.nesteddisc.R
 91c6a9b43c8b045d11a4b8834d1c9d47 *R/print.nestedn0.R
 0f8e3935f95b67b96e066435743bbee0 *R/print.nestednodf.R
 2f1732fffc2fb487420a910a1d3f5971 *R/print.nestedtemp.R
-99523abcb00ceeb9cad6e85f764febd1 *R/print.oecosimu.R
-faf2620b1fbaec410af7b6e3159510ce *R/print.permat.R
-575da3562c07c6324e84288ac603b011 *R/print.permutest.betadisper.R
-f0c12622e4f250aacca5b7fabe54cbd1 *R/print.permutest.cca.R
+6126af3f4fb029f14fc7b503ee2f0627 *R/print.nullmodel.R
+f7a679fb3864e1e2e005d80ec28f4432 *R/print.oecosimu.R
+39c3d65d6a6b2a68f974e936cc6232ae *R/print.permat.R
+fd6136405300e42e73d4850a338505e3 *R/print.permutest.betadisper.R
+d661319e28299f31403b738235f6e7aa *R/print.permutest.cca.R
 a01e4eeb80a020c9e516f47f559baa98 *R/print.poolaccum.R
 c0f0559d9f53c8635bba01f1f90b7cc3 *R/print.prestonfit.R
 4abe25d64d3e55ef83ece3691f77f900 *R/print.procrustes.R
-d971f6b79980b918b6b39772c53bedbd *R/print.protest.R
+e6b0897025e574a25a533aaabe8f6e5f *R/print.protest.R
 480adb7c75b99e469b5600c62aa8d12d *R/print.radfit.R
 8b1a0e667c16cbd376a88962cb3baf19 *R/print.radfit.frame.R
 a589e795a078e79a758c1c848b728be3 *R/print.radline.R
-4e452412505b056333b97c8f5fcdc439 *R/print.specaccum.R
+d64b127c8d733171d69f09f54c756e7b *R/print.simmat.R
+cdb70efbe870e6e804f34a5ba4b4c9eb *R/print.specaccum.R
 aacebed613c7a8b259424efc39b4e061 *R/print.summary.bioenv.R
 e5b625997dd416b43f2635501e486712 *R/print.summary.cca.R
-ce329fb389fbd69d5e212ae7f504bafe *R/print.summary.clamtest.R
+9d57753b97cf0f2e3ea8fb96d99c1fb4 *R/print.summary.clamtest.R
 aea75769140cba757b5363d87a1ad2ab *R/print.summary.decorana.R
 7ce80bbbf0f63ed74b3d9cf7d84944f3 *R/print.summary.humpfit.R
 59e34aed90403b18e80cc89f75edd9d5 *R/print.summary.isomap.R
 8b2f5e8d922b8372e2515bdf0d083ab2 *R/print.summary.meandist.R
-b3eb24e4b56838e9db1811c07d37a571 *R/print.summary.permat.R
+db1dc929d679ce1641c6a7d319091e2c *R/print.summary.permat.R
 55399e1d3a3bc601466c9706b9caf1dc *R/print.summary.prc.R
 0e4bd2b0b5395b17365888876460fe85 *R/print.summary.procrustes.R
 148aa3651ac7d6a0fcc2b5f3dfb98d9f *R/print.summary.taxondive.R
 0511b5e09f1af985431cac7c1b74a8cf *R/print.taxondive.R
 46680885c99c48e4bc5d84203c40d4b0 *R/print.varpart.R
 c80f3931f05ab3066dfe93b98e737856 *R/print.varpart234.R
-326bb34df103cb26cadae2ba06b580b3 *R/print.vectorfit.R
+0001f633e4db1a1498820b0b0b45deac *R/print.vectorfit.R
 8917f5ef5398c984e0e2675c83e74c5c *R/print.wcmdscale.R
 083d526f54611d40ce749ffe95f169ae *R/procrustes.R
 819af0297e5d0a907f7fa91319c67e96 *R/profile.MOStest.R
 2f6b69115ea549102dad9b1b22c88034 *R/profile.humpfit.R
-4e28e2b84d11d8f8b0ad6755bcbe2ef1 *R/protest.R
+a46a4071a273af8bea01b5ef29b96f54 *R/protest.R
 9169bd797963b5b121684de528651170 *R/rad.lognormal.R
 b129148e6efbbe1c45482c93d66f959b *R/rad.null.R
 949aca6b5bb7954a91819b17e515e396 *R/rad.preempt.R
 5a7e143e1292681c3d9b1e7b1b792aa6 *R/rad.zipf.R
 6780818aadc7b8c92c8f9a26a84b7dc0 *R/rad.zipfbrot.R
 23fd677c0c8d6f4a8e6c6444d2cc8601 *R/radfit.R
-235a5213c266f1b2101c767ad1528888 *R/radfit.data.frame.R
+86012c39c6dcae195cd987d13e1f8a2a *R/radfit.data.frame.R
 2f6d8082f39540bbe7cf0e0cf3c666c9 *R/radfit.default.R
 36cfb246e391a7434c714fbb2269cdb6 *R/radlattice.R
-33e294ffd0c75b25d36d18d4a3ad9884 *R/rankindex.R
-0a7c015a3700b34f29cb6bb0b94f68d2 *R/rarecurve.R
+a93eaad75c58209ac9e7faaeb0642457 *R/rankindex.R
+eac0f40f1fe7d0b412010316aae8318c *R/rarecurve.R
 6be7a6edec618f464dd7c783eca371f0 *R/rarefy.R
-9bea5153ae57e1c729623f59b9395415 *R/raupcrick.R
+d9a219ae6f3e6155ae76bc59d3e14d30 *R/raupcrick.R
 8116cefb40383fb571abb9fb82091087 *R/rda.R
-f9008aa5cf3109a3607aca9ac6bfe8d7 *R/rda.default.R
+1b73dfa420d8447e403e8e214afd2207 *R/rda.default.R
 90b562e8a94febce8430a344392a2943 *R/rda.formula.R
-eefe337541bf9dce01852dceeac12e1c *R/read.cep.R
+f87845cb9a96298b61aadc899f995b3f *R/read.cep.R
 ef65ea5fb624aef7e34284d932503876 *R/renyi.R
 9bebccc25480b88b522d723c1d644bbb *R/renyiaccum.R
 90a897e14094cc1eba66c5f59a5bb79c *R/residuals.cca.R
 38df11064481bc21f8555152cfd3d115 *R/residuals.procrustes.R
-4ee8534c438c824f1cf4ea62337e259d *R/rgl.isomap.R
-9134d94e86e79778b460a4e522fd7c04 *R/rgl.renyiaccum.R
 4ffd3879dcf18d0bdef8ffc8bf5b8ad3 *R/rrarefy.R
 ed66f1e11f53f7fbdbd8663de2b7f5dd *R/scores.R
 d46cc2163dbc117a978f64d54df7bbd4 *R/scores.betadisper.R
 341ee43f8524dccb5e369513a16923b1 *R/scores.betadiver.R
 66a78103e2797044c3a7d3ba15d43811 *R/scores.cca.R
 447810692c53fab8cd6907ec920e0852 *R/scores.decorana.R
-2673ef1fe5912531497baaa07bfd9920 *R/scores.default.R
+d7485aeb982d12832c96f54a47dff2f4 *R/scores.default.R
 6415eb8600a7e516ae6c053832c45357 *R/scores.envfit.R
-7609ff8ca8b5d96e69f1933e4f90c483 *R/scores.lda.R
+1124e9978354cabe44bd1a7e258a4392 *R/scores.lda.R
 f60983f80088428cc3ce955a975fa6b3 *R/scores.metaMDS.R
 d07740764d55fbfe7560ad101ea495a1 *R/scores.ordihull.R
 f146575a3f60358567dfed56e8cbb2cd *R/scores.ordiplot.R
@@ -366,29 +366,33 @@ f146575a3f60358567dfed56e8cbb2cd *R/scores.ordiplot.R
 3fe910b739d447ba5026f077cb0c670d *R/screeplot.prcomp.R
 66d8c6dfecb51ca1afdf309926c00d08 *R/screeplot.princomp.R
 95f15a952493d1965e59006be7f0b8d1 *R/showvarparts.R
-d634237d94bc2d40dd8e5f2170c49353 *R/simper.R
+dffa3bb2eddc1944323febbd8a31ed89 *R/simper.R
 b35ee7d9cdc86eecefb5dcf478fc8abf *R/simpleRDA2.R
-73367e17a66ffeca6410771f0ca8d1ef *R/simulate.rda.R
+6670475eff913b3586560d4b2ec65149 *R/simulate.nullmodel.R
+a5e793142ae74276a02e761cfe255f22 *R/simulate.rda.R
 9f235c650efc4217a3cc88996b627e1d *R/spandepth.R
 3bb1adac8b593f81ebf4c2146ee112b9 *R/spantree.R
 3653508b2e2ae2575c2e86af246df42a *R/specaccum.R
 3c94a17c2602903234a65cb244053130 *R/specnumber.R
-6e382a42402a7bc206b6eb6b6c278d77 *R/specpool.R
+41c068dce4a7a6146686614e647c8a96 *R/specpool.R
 77cc19684e9ceb27500ca7f802923328 *R/specpool2vect.R
 2cf0545588fb2bb86185f71c21bda1c5 *R/spenvcor.R
 33d884aae53dcc5fa80d9e9ffae4515e *R/stepacross.R
-8a9ad6d02dc566d954f3426f600ebf19 *R/stressplot.R
+bd2d1d998f18e7a9c65d5072932cbef2 *R/str.nullmodel.R
+301ba29a09201611845f7adb2b2d7d81 *R/stressplot.R
+f687d03b090a0962026ca60272ab90d5 *R/stressplot.wcmdscale.R
 a61ce658c68a8b17e094f7b0040e4efe *R/summary.anosim.R
 7145185243348b4a1be616329b87b9ff *R/summary.bioenv.R
 772628c3a5de67f983d6ba15d8274b40 *R/summary.cca.R
-7bbf5652f229c2e7920c43192bb2e7b1 *R/summary.clamtest.R
+c1f7a5045e57ac95c9ae7f13f2a11743 *R/summary.clamtest.R
 bf8be2e9b02c1a3cd5f3ad0005e8354c *R/summary.decorana.R
+088b8c747d67fa5ad158304a21f32628 *R/summary.dispweight.R
 4c9fc84fd70c4555d5b5bfc1768dc5a8 *R/summary.humpfit.R
 51d3b042e199d201235f10f3d4a57f70 *R/summary.isomap.R
 76171bbaa9984ffbb31cbdd9e1771f4c *R/summary.meandist.R
 76587e48a9cc631cf4e9f2e369099fce *R/summary.ordiellipse.R
 27c7f052d2d9674d898f0aa3d741a8c4 *R/summary.ordihull.R
-7d5283ea3d2741f317cb59df17bdc4df *R/summary.permat.R
+11578277712acd07ebb5f7c66c0a47b8 *R/summary.permat.R
 3ef798c28399894f4bf0ba649360e69e *R/summary.poolaccum.R
 71609e72f80900e7c461440336c98c97 *R/summary.prc.R
 a8c1bf3d3f371f3ee5a3de3c39a4113a *R/summary.procrustes.R
@@ -396,32 +400,34 @@ a8c1bf3d3f371f3ee5a3de3c39a4113a *R/summary.procrustes.R
 25e8a947666bed660358e359730160d9 *R/summary.specaccum.R
 8f34602692f6a5476bb4a6d7b2a269fe *R/summary.taxondive.R
 7c55757d8187363116d3561d53cbc5ba *R/swan.R
-f1530a301997e88e27e2eb072e295515 *R/tabasco.R
+c2c3f2005758d438c6f2815ab2495d5d *R/tabasco.R
 62bc8a0693a71adb1457b0a102d8951a *R/taxa2dist.R
 90ec6192eb43f8fd29bc93485f4ef293 *R/taxondive.R
-dcfdf0eb68a8acfa6a8b0cfb6fcac0f5 *R/text.cca.R
+3f933def4e4dbe3792be7c977c5612b5 *R/text.cca.R
 1f4d9ba97695c0fa99456f427114b049 *R/text.decorana.R
 6a6e426f6e464bb7bdaa75d92674562c *R/text.metaMDS.R
 974bdc93cd9b352d30debf3e93111136 *R/text.ordiplot.R
 846003f5f9de23241805042ac459ed1d *R/text.orditkplot.R
 0fc7a75cf414d76cc751cc33ed5d6384 *R/tolerance.R
-207fe2c0d2f5da03f65e287f93ea2d15 *R/tolerance.cca.R
-4c4be7182a861de34673afccd1bd372c *R/treedist.R
-350a6ba06c34f2efc74c6aa503f8a7ab *R/treedive.R
+f9d58b5156b1961cc1e338a596b7a36d *R/tolerance.cca.R
+7b45ffae615add899174090372c90188 *R/treedist.R
+1400038a7df6468da830bc75782d3873 *R/treedive.R
 cf0f2cbf17bbd944d455f71918ab88eb *R/treeheight.R
-4f9be34a32cdc1e7694bbbdc9a1fd6d3 *R/tsallis.R
-66c89c58df24894b292790214842d992 *R/tsallisaccum.R
+26fffea5380da4106dfe1f97681524cd *R/tsallis.R
+29175d8b46c44e66add86564d73218a3 *R/tsallisaccum.R
+78a5b5292f78b0fd84b943dceddceb97 *R/update.nullmodel.R
 a0682e3051c01f8f5823b158c018d18f *R/varpart.R
 8d09b6b6390c2866234763beae855cf3 *R/varpart2.R
 77fef5d5355715c9928edd3b9995d415 *R/varpart3.R
 7f4f5d715a9b3e1a916f72ffbfebcc19 *R/varpart4.R
-550d6c38da0321003e97fdbea68ae31b *R/vectorfit.R
-7695b046ee79a9b3140a22d2bed6e048 *R/vegan-defunct.R
-b43aca5a7abcce2f36629ed4036add72 *R/vegan-deprecated.R
-722ab25ac95b6c419b29a94347916f23 *R/veganCovEllipse.R
+21b6af9e9d96c2fb4c642894efc82227 *R/vectorfit.R
+6f433537ff5ce5811a0ca8c4ac4c729d *R/vegan-defunct.R
+593e3e9774284bfc0362a5c0b0b2fbcc *R/vegan-deprecated.R
+129a1cf5e913a365ffd679b63378811b *R/veganCovEllipse.R
+5656cc97f30992a5e02dba21b2846485 *R/veganMahatrans.R
 a3b54cd895f7ef425d2827501f03a1ff *R/vegandocs.R
-916e31172356ad50ea304e46570cddbc *R/vegdist.R
-cbf0bc1d54508fe56afcc8bd789abd77 *R/vegemite.R
+0f163ee6f1ede80946907518f7cd52ea *R/vegdist.R
+b0194270cdab1ba7f2b190a7077f78c5 *R/vegemite.R
 5d6047d7f63f04ae9ff40179c534aa0b *R/veiledspec.R
 1f6deab4b61a9be48be43e80703cd4b6 *R/vif.cca.R
 322254f8bc3b02f7a971058cbdfa9edd *R/wascores.R
@@ -431,11 +437,13 @@ ecfd48e2f4df6bcd683a87203dd80e12 *R/weights.cca.R
 73babeed9df14635d99b1a619a1286e4 *R/weights.rda.R
 4138f57726620d493f218e5e3da0013c *R/wisconsin.R
 17cbf4b5c186fe577cf361f0254df1d6 *R/zzz.R
-b0c5bd6da3c6d704eaf46677e2260ff1 *build/vignette.rds
-b92ea8ce0eb1f5860d535645fd5286b7 *data/BCI.rda
-31eb8ea8153321cb2a5e9040ef12d50f *data/dune.env.rda
-51793132b98f7dd3c963f2500a190eaa *data/dune.rda
-4ea1caae5860aea35328b4d7102e55cf *data/dune.taxon.rda
+fd211597259383a5219e578e7c63bd1e *README.md
+28cb7a9da307b57fc09d0281f336bb40 *build/vignette.rds
+0f283f2be37fdfec65ec6e5b0146889c *data/BCI.rda
+412ea5cf443401fe54f0b14c14c45806 *data/dune.env.rda
+b0a8834b45c79fc017717838d700f0f6 *data/dune.phylodis.rda
+339a47050fe72465c659e82378562781 *data/dune.rda
+7a9f931df676f8a3f1489a2119304915 *data/dune.taxon.rda
 442171925629c4ef13f9d70601ca9cb3 *data/mite.env.rda
 c51905bd025ccea2737527b6fca4a081 *data/mite.pcnm.rda
 272a07c3f4162668600425cb1a043e10 *data/mite.rda
@@ -443,149 +451,153 @@ c51905bd025ccea2737527b6fca4a081 *data/mite.pcnm.rda
 89b86c533283705a67a0d1f65c69a485 *data/pyrifos.rda
 ee3c343418d7cf2e435028adf93205f1 *data/sipoo.rda
 f87df84297865b5faf31e232e97a0f94 *data/varechem.rda
-82153b3e47807b926b77cef49900f422 *data/varespec.rda
-9e1416aeb7c96ca4beda07b598707c38 *inst/ChangeLog
-93e2dddfd4f2cd4fef866eb446ed128a *inst/NEWS.Rd
+7136b8666250a538d60c88869390a085 *data/varespec.rda
+d157d49a41a6136c8149caf411fa4916 *inst/ChangeLog
+c4c282c929c236c076808d5a65d56cdf *inst/NEWS.Rd
 9abfab8b05c34dd283379a7d87500ffb *inst/ONEWS
-4ffa0736ba10dddfed202a2a0ef51983 *inst/doc/FAQ-vegan.pdf
-e98d4ad5d4d34bfbdf934da7deff70aa *inst/doc/NEWS.html
-a6d028bb062957daf8ebbb3029dec88a *inst/doc/decision-vegan.R
-fce7a85b3e7f348fb12812758dc45d5c *inst/doc/decision-vegan.Rnw
-7e9332fe0291fa99607406188fec330d *inst/doc/decision-vegan.pdf
-5e47f7bb1e894e5042bfc6b2d1f3a132 *inst/doc/diversity-vegan.R
-658b2d71438cb7f1a7315b7a893b993c *inst/doc/diversity-vegan.Rnw
-1398e7d95b307a526828e3b9e1c35a92 *inst/doc/diversity-vegan.pdf
-99be07f681b826342c48620d57933b76 *inst/doc/intro-vegan.R
-66c024cfa42524d1649f7033286c52b0 *inst/doc/intro-vegan.Rnw
-06227f76f0ab7ca87c9e308c6a46f8e3 *inst/doc/intro-vegan.pdf
+9e43d1d42c1676c304078899396cccd7 *inst/doc/FAQ-vegan.pdf
+051443e01135d5f8b06127cf4efd120a *inst/doc/NEWS.html
+e3e19be6e4226ef4b943c5dd46c3e161 *inst/doc/decision-vegan.R
+09c81618a5a91cbfc5e8c3d969dc63fd *inst/doc/decision-vegan.Rnw
+e8ac356ccf0904c3189aad62d093d8d8 *inst/doc/decision-vegan.pdf
+91c198ab4bae32c5bab4b770b9dd3f42 *inst/doc/diversity-vegan.R
+9008f797d0ed0ee26368e148c6138c52 *inst/doc/diversity-vegan.Rnw
+64b54e4da74903f72a2bc4f0d16007c4 *inst/doc/diversity-vegan.pdf
+42c6873fda4c73ed0ccdeddef41563b2 *inst/doc/intro-vegan.R
+ddee3279ac0982a3da0bcf9fc10947ac *inst/doc/intro-vegan.Rnw
+5b01bd1f1d4ce30257785815ddf710bf *inst/doc/intro-vegan.pdf
 a1c35ea488b715441cd2269eb6998945 *inst/doc/partitioning.pdf
-e2d32a2a53e75e8be60574c7e1cc3239 *man/BCI.Rd
-33db614085aa448f4241cd79ddc62461 *man/CCorA.Rd
-525d1213753747626c919c22d14073f5 *man/MDSrotate.Rd
+5037564d03aeac297d52c412762ffed8 *man/BCI.Rd
+d4d97e3b71561f61bd9f1f0686a57434 *man/CCorA.Rd
+e540cd18b5f99d385a8d2945052dc70e *man/MDSrotate.Rd
 fd218be03aa2591e5123d11780ccba1a *man/MOStest.Rd
 f2823a48acb6f861404b6682b3f52a45 *man/RsquareAdj.Rd
 73f141f28a1aca961cd4e992d8610934 *man/SSarrhenius.Rd
-d7499407f3dd1cc977ea094eaa2af35e *man/add1.cca.Rd
-ee4d14cea6c51078a02a43978f4f5e9d *man/adipart.Rd
-a2fa01618dd236031de91527f7902ce9 *man/adonis.Rd
-0443d02a7f37af2e14e745c3c1eb3aee *man/anosim.Rd
-79ade6b5344d65d7cb6b4306dde6576f *man/anova.cca.Rd
+32f805196e58b526c3a2bab5c87116b3 *man/add1.cca.Rd
+582f8fe9738d853476565f57e72862de *man/adipart.Rd
+caf191d6c5c1e618e11cb8d7441407b4 *man/adonis.Rd
+9a341d0716f7d6cc46b427d7cc017d2d *man/anosim.Rd
+193d8a15c966cc0d5d9a71008a29eca7 *man/anova.cca.Rd
 c57af27fa11dadcd48981fcf42b2d221 *man/as.mlm.Rd
 8e3718248ff8d48e724654ab17caa2e2 *man/beals.Rd
-5a89017522b4837379cfc0c7f6ab1b7e *man/betadisper.Rd
+de6584621a76810c11f7ae392a24c261 *man/betadisper.Rd
 1336f0afb69a05bee9f6e7706d81d038 *man/betadiver.Rd
 b04c2fae35dba2d97cb248814d5e2fe9 *man/bgdispersal.Rd
-890f6ec54dcf7962cbaa197ef7be8339 *man/bioenv.Rd
+860b9c7f2325f500c27f3c903831efae *man/bioenv.Rd
 1eab4a6369fa1d203a4a3f41f4ee4c06 *man/biplot.rda.Rd
 88602656153ee95f10335487273e132d *man/capscale.Rd
 d3c1067cb5e4dc6f6b9a1c7d94e70ab5 *man/cascadeKM.Rd
 460a434181bc442836ad7f287289e815 *man/cca.Rd
-97385598d1197eeaf8c987bf34329013 *man/cca.object.Rd
-3d77fcd7ce12271fcabbaf0f080c030c *man/clamtest.Rd
+9a2708af1831b9ddce1004971b6f4efc *man/cca.object.Rd
+d808372c406b8d0ede9e26623655ac08 *man/clamtest.Rd
+362992febcb1479b750a995203626e40 *man/commsim.Rd
 335d0f7691ad9d0c48fffce9f9db6201 *man/contribdiv.Rd
 c41033fb9c572365490cc23b9870c950 *man/decorana.Rd
-00a34002a7464d1008b2ba63526a4afe *man/decostand.Rd
-bda32a146ba37c0193a850b9358e4ef8 *man/density.adonis.Rd
+e485bac4360ba510f8db3c9d361701f8 *man/decostand.Rd
 22e3451a1cc9e294c2ad0e1a4531b136 *man/designdist.Rd
 c01e0664652fbc8ef4963059bee4e422 *man/deviance.cca.Rd
 f58b474141a1b0fdf438bfe6dd8da0c9 *man/dispindmorisita.Rd
+70c0ef1ef267a37e2677476a43b72265 *man/dispweight.Rd
 f3f742efa7511a4c33108a00b512ebd9 *man/distconnected.Rd
-3de81ae8ba2466dd19e0466df3002dd7 *man/diversity.Rd
-31a227bd7e2dd4cf92c9b936e1f49963 *man/dune.Rd
-5bdeafda9c2d62feec3bde6b4cd49e3b *man/dune.taxon.Rd
+acd876aeea0d7640320815b9cdc5e898 *man/diversity.Rd
+08b96c1a45c11ffcb7f0da33a888421a *man/dune.Rd
+91fa409075b3bd64706c8ff380b3d01d *man/dune.taxon.Rd
 5f5f8c7df063606ccde6124c5dbe8add *man/eigenvals.Rd
-516c0b3d11d2746d15ead8919a35803c *man/envfit.Rd
-cffbfaef219e46846902deef9271bccd *man/eventstar.Rd
+c38ec974394c7d985645112387d6c48e *man/envfit.Rd
+d2cf422a3d7702ac6293fcd3ff046afc *man/eventstar.Rd
 5857c2307b1dfd69953a88bd3c384180 *man/fisherfit.Rd
-841b3f32510ed2c3f64186d623f858ae *man/goodness.cca.Rd
-4d5e44b51132481ab920292b2651041c *man/goodness.metaMDS.Rd
-81f199c3ba2c65a7b6f81cbb7cc9886d *man/humpfit.Rd
+9594c689994edba07e1712b9ea3b550d *man/goodness.cca.Rd
+afc00cd6ac8f9b56bffbbb77e369057d *man/goodness.metaMDS.Rd
+81f6bbc59aedfa21953278c285c250bf *man/humpfit.Rd
 c8fea575af3da292987d4f8c4aa831b0 *man/indpower.Rd
-cebefbf4e3090e7f7605c2f5ce303da2 *man/isomap.Rd
+2b1c8ca24c00022de093f85a8645d419 *man/isomap.Rd
 5f2c36639ab3e98a5fbb03bb8c8adda9 *man/kendall.global.Rd
 e473a6d2589993b85fc1176866fdde78 *man/linestack.Rd
 59ce2773a5d92535708137747a52f358 *man/make.cepnames.Rd
-e8a27e8ace7bd56776794ebfd8391d64 *man/mantel.Rd
-54f6de61cde7c8bd03110cf1dd1358e1 *man/mantel.correlog.Rd
-f0f939023236abf04e5af5e2a3652082 *man/metaMDS.Rd
+f8d6f3bd27a07dc00c6779405652ec07 *man/mantel.Rd
+85d798177d90587416f9ca88e2f445c9 *man/mantel.correlog.Rd
+e598d23fdc8a162bb793a3aa774559b9 *man/metaMDS.Rd
 4cfb02239809fa03b28e10ec8e8c9c6b *man/mite.Rd
 c50bd45c9e8c6e892d2dd8f7fe5f0bd9 *man/model.matrix.cca.Rd
-682de6d686378a35abbe1442129c1ce2 *man/monoMDS.Rd
-274abac4b2e57f30f1fb2bcb809c3c0f *man/mrpp.Rd
-382e99ee5b67d89f2f9ad440236929f5 *man/mso.Rd
-838a98b67e2015061513c32731732608 *man/multipart.Rd
-646fcb9015f0f3dc520ab2be0db5c042 *man/nestedtemp.Rd
+fef80c85e48be18dc8c62566da0c14b1 *man/monoMDS.Rd
+b897a6552d7524c853e91f9d8b972cb6 *man/mrpp.Rd
+181ca1c040aff6f79fca96d4c0b9708c *man/mso.Rd
+10d5049f8819e378f7f95fdb3858e6e7 *man/multipart.Rd
+0a86c6e08d0b55f9b3dc4b35878d9acb *man/nestedtemp.Rd
 c7f768b6f36aec4bc9d5b4c8f72c1141 *man/nobs.adonis.Rd
-90239dffda8fb82e8e8f3e6b46b0be7a *man/oecosimu.Rd
+d1b6a742f96e2dd5f422008221b05ae0 *man/nullmodel.Rd
+18d0fe4cbf14475524b7f99c9d7fc753 *man/oecosimu.Rd
 3e6f6e4c473e4ea91c18d34bf487ff0c *man/ordiarrows.Rd
-03aab4cb7ca71141281d2abd3e810231 *man/ordihull.Rd
+de06b800bfbded5bd5b17775f930a3c8 *man/ordihull.Rd
 8f8a34c5fcfcc1fe9f88ca16e84a1da6 *man/ordilabel.Rd
 994cfc973f88c682b741e48377e1b9b4 *man/ordiplot.Rd
-b88713c8f69a6a0e677d406b51d2062e *man/ordiplot3d.Rd
 61d4e0e9ab3c3cd566d541f6954b0cda *man/ordipointlabel.Rd
 d4d27a34b2e7d9d1b732a0d06cb9d9f4 *man/ordiresids.Rd
-2939ccc8e48c8c393ed8a8d6bd0ddd5d *man/ordistep.Rd
-4df251d6d187ce05663f58b0ef6fb742 *man/ordisurf.Rd
-42dfa56db8adbd0a44e716086dee6d7d *man/orditkplot.Rd
+da0b3d8e0681a5ddc2bea83fd1796048 *man/ordistep.Rd
+72b2485b893cc2cfb63ddecb095492f9 *man/ordisurf.Rd
+3887cd29dd4adc986e6cf6618136da95 *man/orditkplot.Rd
 8785cc44c56d1b24fbcbde8de9e325d5 *man/orditorp.Rd
 d971701b3c6f89b3a6b358a3966a43d2 *man/ordixyplot.Rd
-adc9628edf2079867649bbaa68daee53 *man/pcnm.Rd
-864ed25d069da12a2226310240f1f740 *man/permatfull.Rd
-807092c467db330149046d1dc9e9ab91 *man/permutations.Rd
-0ca5118e13c43995271c3a9175145ab5 *man/permutest.betadisper.Rd
+1d1d238c33b95eddfa4b497da9ba4c57 *man/pcnm.Rd
+d3fd306546c43339ad7d8fd985a28801 *man/permatfull.Rd
+1abd9ac0457eb66698e2a33e33115adf *man/permustats.Rd
+4a2ed8481b1f6805d343e83fda91e0ed *man/permutations.Rd
+10e7cc018db792a9c75f3cad7ca3e999 *man/permutest.betadisper.Rd
 47898b675bb6d36fce6961e6a70d8d57 *man/plot.cca.Rd
-7b4d950fcf9d3f4591a217ae9b5ccf7e *man/prc.Rd
+242e814ce0a68af785e499a9dbc5d078 *man/prc.Rd
 37cad2f61855e0cc430943ac98885069 *man/predict.cca.Rd
-80aa98f140babc312081f0a01ad3a77d *man/procrustes.Rd
+ebe62922b07c37fce551cc8c3a6231c1 *man/procrustes.Rd
 01a6ca946df5ad493adfb54003ad8e00 *man/pyrifos.Rd
 f61f64cc1be643149fd02f08a0cd7f9f *man/radfit.Rd
-3e70bfa0a8ae5d4c3c60dba77500b584 *man/rankindex.Rd
-64342c9ea7e7b2607d433c3346f9726a *man/raupcrick.Rd
+8b12fb04530537414e03e1a6fbccda7c *man/rankindex.Rd
+915c6ea3098d6ac9c3de6249606b2fe9 *man/raupcrick.Rd
 2867f5f71a47da498cbadf9aaa01b2b6 *man/read.cep.Rd
-dc7d7857e7a01ea099fc97c8a3a13239 *man/renyi.Rd
+0a09dd95ccf94b90d99a713f8b810bca *man/renyi.Rd
+eec06fd5cfdddadb56bca849f88b38f0 *man/reorder.hclust.Rd
 5c25a88ca55fabce5783509c706faad5 *man/scores.Rd
 8104fd642b527f76e159580e3d317fcf *man/screeplot.cca.Rd
-814fe1cad3b64291fd13772a6078ea9d *man/simper.Rd
-45cd418b2264b4eb6abc89cc11a7877f *man/simulate.rda.Rd
+fa4c03b6622b3cba08b633393560b70a *man/simper.Rd
+621f8a2810727ab3523fc0bd69a56dca *man/simulate.rda.Rd
 b34910fa6ed6c9bfbd90a7f7443a135f *man/sipoo.Rd
-d7dd63e022633049766cffdaf6cac723 *man/spantree.Rd
-2e0ddc50d04a9b8dae57ee475b3edc5c *man/specaccum.Rd
-53818a4edb1d52d425065bea76963021 *man/specpool.Rd
+37121fc0a195e97b3b1287678d175bab *man/spantree.Rd
+a998a73ff2783a45fce220dc18924174 *man/specaccum.Rd
+ff2fecf2a0726ffc824a1a626e462f3a *man/specpool.Rd
 5b9e51c85395f80f8504954e4175f877 *man/stepacross.Rd
+812fedada0ae3582c28f4f91bbcedc09 *man/stressplot.wcmdscale.Rd
 0aac5f5c8f58fc8fe1cb6c0ba819b196 *man/taxondive.Rd
 85f77fcf89b48586502c00baef8e5561 *man/tolerance.Rd
-bfe306a0cb659930e17e46d191f7629f *man/treedive.Rd
-fd154a9d281c586683c87fdf0d44ccad *man/tsallis.Rd
+a4b37297402220dee75997c4f49a729c *man/treedive.Rd
+fb3bbb6521943417c9ee47bab463b189 *man/tsallis.Rd
 033dd7d7917185cea81e4d7afcd59df9 *man/varechem.Rd
 e7717c542e5c0372ca2ff71bcc26d8b0 *man/varpart.Rd
-699122da39bdbbfbfeb6a1f8f078242c *man/vegan-defunct.Rd
-c8e9610be158d93af56d7db754e560d9 *man/vegan-deprecated.Rd
-c0b323b085b93d3f57bac7f38a7868de *man/vegan-internal.Rd
+0e0e4db86ab5afa92f6d5a921c5e14ff *man/vegan-defunct.Rd
+76c332552a660a95a4e652c251187da9 *man/vegan-deprecated.Rd
+8e32395af09dfb764c821b9f61416f23 *man/vegan-internal.Rd
 1798f9f9db805ac6e3a71c953eae8364 *man/vegan-package.Rd
-f6f284ceb3b9a39e7b079936f9400cc2 *man/vegandocs.Rd
-bb223296c8d1135c12cee29532d9e696 *man/vegdist.Rd
-982fdb71847d8b7338c7effc5432027d *man/vegemite.Rd
+18a565c35d567fb3fd1666462dfa6ed2 *man/vegandocs.Rd
+ad48b24429d673e1af3120d0cf6c3eb3 *man/vegdist.Rd
+a2cc1d837017b4de0b4bec617e29533d *man/vegemite.Rd
 c3209a8eff0fe638d3a43b25ea5bec16 *man/wascores.Rd
-7d16b5020895bf5ead253010c733e7e5 *man/wcmdscale.Rd
+9410503f25833dcbdd38f7072fb7cea1 *man/wcmdscale.Rd
 dd4512521b5b7a678f87c7f27d6b986c *src/cepin.f
 dd22a1632081402e62320a4c0d6b2aa9 *src/data2hill.c
 7703ffdb64c5f31de56bfd8253877ad6 *src/decorana.f
 87b05dd087e591f1f8e92ecbf1983207 *src/goffactor.c
 e19f79f4b3fef915a3ece2db284475f6 *src/monoMDS.f
-9eec9972cea872032eb9310491b93ab2 *src/nestedness.c
+a42c4629717137858295a1eb6f3e89de *src/nestedness.c
 0299086afe16bd7a4b57835d1b11f6d8 *src/ordering.f
 31bdbe9b08340e1662a62cf6e61ade6a *src/pnpoly.c
 b9b647fcf8a3e59e10b9351fae60ec06 *src/stepacross.c
-87233fad519f344865adfc74c92c2a1a *src/vegdist.c
-4ffa0736ba10dddfed202a2a0ef51983 *vignettes/FAQ-vegan.pdf
-7d9cb640d68ea4c935a7c3e1590c5532 *vignettes/FAQ-vegan.texi
+36ea09c9a6553010e786f0e787185d60 *src/vegdist.c
+9e43d1d42c1676c304078899396cccd7 *vignettes/FAQ-vegan.pdf
+f36ab9880f86e1026f7c76c387b46d7b *vignettes/FAQ-vegan.texi
 45ce50de9edf3aeacd8d11d1483f764c *vignettes/Makefile
-e98d4ad5d4d34bfbdf934da7deff70aa *vignettes/NEWS.html
-fce7a85b3e7f348fb12812758dc45d5c *vignettes/decision-vegan.Rnw
-73a0586b73a2d0634a05b768ca8492d1 *vignettes/decision-vegan.tex
-658b2d71438cb7f1a7315b7a893b993c *vignettes/diversity-vegan.Rnw
-429f9669fe0c242b122a61de0303e714 *vignettes/diversity-vegan.tex
-66c024cfa42524d1649f7033286c52b0 *vignettes/intro-vegan.Rnw
-ee3f2c8366052dea6b145c30f304430e *vignettes/intro-vegan.tex
-0c229cd8dbde571130ff2f4b516414e5 *vignettes/vegan.bib
+051443e01135d5f8b06127cf4efd120a *vignettes/NEWS.html
+09c81618a5a91cbfc5e8c3d969dc63fd *vignettes/decision-vegan.Rnw
+19bdb0e4c22aa8eb40337a0b78d1dd22 *vignettes/decision-vegan.tex
+9008f797d0ed0ee26368e148c6138c52 *vignettes/diversity-vegan.Rnw
+3ab6c7357c26930e8bf7501ada9038d4 *vignettes/diversity-vegan.tex
+ddee3279ac0982a3da0bcf9fc10947ac *vignettes/intro-vegan.Rnw
+0ab415a068116fb1417c809c53f83b94 *vignettes/intro-vegan.tex
+5b5c916bf21a82d716bef6ac87716b43 *vignettes/vegan.bib
 fd58fa43e5e36d0ddcddd26dac1c7e31 *vignettes/vegan.sty
diff --git a/NAMESPACE b/NAMESPACE
index f786902..e5c2072 100644
--- a/NAMESPACE
+++ b/NAMESPACE
@@ -5,30 +5,31 @@ useDynLib(vegan)
 
 export(CCorA, MOStest, RsquareAdj, SSarrhenius, SSgitay, SSgleason,
 SSlomolino, adipart, adonis, anosim, beals, betadisper, betadiver,
-bgdispersal, bioenv, bstick, cIndexKM, calibrate, capscale, cascadeKM,
-cca, commsimulator, contribdiv, clamtest, decorana, decostand, designdist,
-coverscale, dispindmorisita, distconnected, diversity, downweight,
-drarefy, eigengrad, eigenvals, envfit, estaccumR, estimateR, eventstar,
-factorfit, fisherfit, fitspecaccum, goodness, hiersimu, humpfit,
-indpower, inertcomp, initMDS, intersetcor, isomapdist, isomap,
-linestack, mantel, meandist, metaMDSdist, metaMDSiter, metaMDSredist,
-MDSrotate, metaMDS, monoMDS, mrpp, msoplot, mso, multipart,
-nestedbetajac, nestedbetasor, 
-nestedchecker, nesteddisc, nestedn0, nestednodf, nestedtemp, oecosimu,
-ordiR2step, ordiarrows, ordicloud, ordicluster,
-ordiellipse, ordigrid, ordihull, ordilabel, ordiplot3d,
-ordiplot, ordipointlabel, ordiresids, ordirgl, ordisegments,
-ordispider, ordisplom, ordistep, ordisurf, orditkplot, orditorp,
-ordixyplot, orglpoints, orglsegments, orglspider, orgltext, 
-pcnm, permatfull, permatswap, permutest, poolaccum, postMDS, prc,
-prestondistr, prestonfit, procrustes, protest, radfit, radlattice,
-rankindex, rarecurve, rarefy, raupcrick, rda, renyiaccum, renyi,
-rrarefy, scores, showvarparts, simper, spandepth, spantree, specaccum,
-specnumber, specpool2vect, specpool, spenvcor, stepacross, stressplot,
-swan, tabasco, taxa2dist, taxondive, tolerance, treedist, treedive, treeheight,
-tsallisaccum, tsallis, varpart, vectorfit, vegandocs, vegdist,
-vegemite, veiledspec, wascores, wcmdscale, wisconsin)
-
+bgdispersal, bioenv, bioenvdist, bstick, cIndexKM, calibrate, capscale,
+cascadeKM, cca, contribdiv, clamtest, commsim, decorana,
+decostand, designdist, coverscale, dispweight, dispindmorisita, distconnected,
+diversity, downweight, drarefy, eigengrad, eigenvals, envfit,
+estaccumR, estimateR, eventstar, factorfit, fisherfit, fitspecaccum,
+gdispweight,goodness, hiersimu, humpfit, indpower, inertcomp, initMDS,
+intersetcor, isomapdist, isomap, linestack, mantel, meandist,
+metaMDSdist, metaMDSiter, metaMDSredist, MDSrotate, metaMDS, monoMDS,
+mrpp, msoplot, mso, multipart, make.commsim, nestedbetajac, nestedbetasor, nestedchecker,
+nesteddisc, nestedn0, nestednodf, nestedtemp, nullmodel, oecosimu,
+ordiareatest,
+ordiR2step, ordiarrows, ordicloud, ordicluster, ordiellipse, ordigrid,
+ordihull, ordilabel, ordiplot, ordipointlabel, ordiresids,
+ordisegments, ordispider, ordisplom, ordistep, ordisurf,
+orditkplot, orditorp, ordixyplot, 
+pcnm, permatfull, permatswap, permustats, permutest,
+poolaccum, postMDS, prc, prestondistr, prestonfit, procrustes,
+protest, radfit, radlattice, rankindex, rarefy, rarecurve, raupcrick,
+rda, renyiaccum, renyi, rrarefy, scores,
+showvarparts, simper, spandepth,
+spantree, specaccum, specnumber, specpool2vect, specpool, spenvcor,
+stepacross, stressplot, swan, tabasco, taxa2dist, taxondive, tolerance,
+treedist, treedive, treeheight, tsallisaccum, tsallis, varpart,
+vectorfit, vegandocs, vegdist, vegemite, veiledspec, wascores,
+wcmdscale, wisconsin)
 ## export pasteCall for 'permute'
 export(pasteCall)
 ## export anova.cca for 'BiodiversityR': this should be fixed there
@@ -36,40 +37,52 @@ export(anova.cca)
 ## Export as.mcmc for coda
 export(as.mcmc.oecosimu, as.mcmc.permat)
 ## DEFUNCT: export names defined in vegan-defunct
-export(permuted.index2, getNumObs)
+export(metaMDSrotate)
 
 ## export regular functions with dot names
 
 export(as.fisher, as.mlm, as.preston, as.rad, fieller.MOStest,
 fisher.alpha, kendall.global, kendall.post, make.cepnames,
 mantel.correlog, mantel.partial, no.shared, rad.lognormal, rad.null,
-rad.preempt, rad.zipf, rad.zipfbrot, read.cep, rgl.isomap,
-rgl.renyiaccum, vif.cca)
+rad.preempt, rad.zipf, rad.zipfbrot, read.cep,
+vif.cca)
 
 ## Export panel functions
-export(panel.ordi, panel.ordi3d, prepanel.ordi3d)
+export(panel.ordi, panel.ordiarrows, panel.ordi3d, prepanel.ordi3d)
 
 ## Export .Depracated functions (to be removed later)
-export(metaMDSrotate)
+export(commsimulator)
+S3method(density, adonis)
+S3method(density, anosim)
+S3method(density, mantel)
+S3method(density, mrpp)
+S3method(density, oecosimu)
+S3method(density, permutest.cca)
+S3method(density, protest)
+S3method(densityplot, adonis)
+S3method(densityplot, oecosimu)
 
 ## do NOT export the following internal functions
 
 ## export(ade2vegancca, orderingKM, ordiArgAbsorber, ordiArrowMul,
 ## ordiGetData, ordimedian, ordiNAexclude, ordiNApredict,
 ## ordiParseFormula, ordiTerminfo, pregraphKM, simpleRDA2, varpart2,
-## varpart3, varpart4, veganCovEllipse)
+## varpart3, varpart4, veganCovEllipse, veganMahatrans)
 
-## Registration of S3 methods
+## Imports
 import(stats)
 import(graphics)
-import(lattice)
 import(permute)
-importFrom(utils, head, tail)
+importFrom(utils, head, tail, str)
 importFrom(tools, Rd2txt, startDynamicHelp)
-## nobs only exists in R 2.13.0 -- import from permute with older R
-if (getRversion() < "2.13.0") {
-    importFrom(permute, nobs)
-}
+import(lattice)
+import(parallel)
+import(tcltk)
+importFrom(MASS, isoMDS, sammon, Shepard, mvrnorm)
+importFrom(cluster, daisy)
+## 's' must be imported in mgcv < 1.8-0 (not needed later)
+importFrom(mgcv, gam, s, te)
+## Registration of S3 methods defined in vegan
 # adipart: vegan
 S3method(adipart, default)
 S3method(adipart, formula)
@@ -82,6 +95,7 @@ S3method(RsquareAdj, default)
 S3method(RsquareAdj, glm)
 S3method(RsquareAdj, lm)
 S3method(RsquareAdj, rda)
+S3method(RsquareAdj, capscale)
 # TukeyHSD: stats
 S3method(TukeyHSD, betadisper)
 # add1: stats
@@ -92,6 +106,8 @@ S3method(alias, cca)
 S3method(anova, betadisper)
 S3method(anova, cca)
 S3method(anova, prc)
+# as.hclust: stats
+S3method(as.hclust, spantree)
 ## Do not export as.mcmc now: would need import(coda)
 # as.mcmc: coda <======= rare
 #S3method(as.mcmc, oecosimu)
@@ -134,16 +150,9 @@ S3method(confint, MOStest)
 # cophenetic: stats
 S3method(cophenetic, spantree)
 # density: stats
-S3method(density, adonis)
-S3method(density, anosim)
-S3method(density, mantel)
-S3method(density, mrpp)
-S3method(density, oecosimu)
-S3method(density, permutest.cca)
-S3method(density, protest)
+S3method(density, permustats)
 # densityplot: lattice
-S3method(densityplot, adonis)
-S3method(densityplot, oecosimu)
+S3method(densityplot, permustats)
 # deviance: stats
 S3method(deviance, cca)
 S3method(deviance, rda)
@@ -152,6 +161,7 @@ S3method(deviance, radfit.frame)
 # drop1: stats
 S3method(drop1, cca)
 # eigenvals: vegan
+S3method(eigenvals, betadisper)
 S3method(eigenvals, cca)
 S3method(eigenvals, default)
 S3method(eigenvals, dudi)
@@ -187,11 +197,15 @@ S3method(head, summary.cca)
 # hiersimu: vegan
 S3method(hiersimu, default)
 S3method(hiersimu, formula)
+# methods for hclust object in base R: these would be better in R
+S3method(reorder, hclust)
+S3method(rev, hclust)
 # identify: graphics
 S3method(identify, ordiplot)
 # labels: base
 S3method(labels, envfit)
 # lines: graphics
+S3method(lines, fitspecaccum)
 S3method(lines, humpfit)
 S3method(lines, permat)
 S3method(lines, preston)
@@ -200,6 +214,7 @@ S3method(lines, procrustes)
 S3method(lines, radline)
 S3method(lines, radfit)
 S3method(lines, spantree)
+S3method(lines, specaccum)
 ## logLik: stats
 S3method(logLik, radfit)
 S3method(logLik, radfit.frame)
@@ -225,6 +240,28 @@ S3method(nobs, wcmdscale)
 # ordisurf: vegan
 S3method(ordisurf, default)
 S3method(ordisurf, formula)
+
+## permustats methods
+S3method(permustats, adonis)
+S3method(permustats, anosim)
+S3method(permustats, mantel)
+S3method(permustats, mrpp)
+S3method(permustats, oecosimu)
+S3method(permustats, ordiareatest)
+S3method(permustats, permutest.betadisper)
+S3method(permustats, permutest.cca)
+S3method(permustats, protest)
+## these return an error: no permutation data
+S3method(permustats, CCorA)
+S3method(permustats, envfit)
+S3method(permustats, factorfit)
+S3method(permustats, vectorfit)
+S3method(permustats, mso)
+
+S3method(print, permustats)
+S3method(summary, permustats)
+S3method(print, summary.permustats)
+
 # permutest: vegan
 S3method(permutest, betadisper)
 S3method(permutest, cca)
@@ -306,6 +343,7 @@ S3method(print, betadisper)
 S3method(print, bioenv)
 S3method(print, capscale)
 S3method(print, cca)
+S3method(print, commsim)
 S3method(print, decorana)
 S3method(print, eigenvals)
 S3method(print, envfit)
@@ -324,7 +362,9 @@ S3method(print, nesteddisc)
 S3method(print, nestedn0)
 S3method(print, nestednodf)
 S3method(print, nestedtemp)
+S3method(print, nullmodel)
 S3method(print, oecosimu)
+S3method(print, ordiareatest)
 S3method(print, permat)
 S3method(print, permutest.betadisper)
 S3method(print, permutest.cca)
@@ -336,11 +376,13 @@ S3method(print, radfit)
 S3method(print, radfit.frame)
 S3method(print, radline)
 S3method(print, specaccum)
+S3method(print, simmat)
 S3method(print, simper)
 S3method(print, summary.bioenv)
 S3method(print, summary.cca)
 S3method(print, summary.clamtest)
 S3method(print, summary.decorana)
+S3method(print, summary.dispweight)
 S3method(print, summary.humpfit)
 S3method(print, summary.isomap)
 S3method(print, summary.meandist)
@@ -359,6 +401,10 @@ S3method(print, wcmdscale)
 # see note on 'confint'
 S3method(profile, MOStest)
 S3method(profile, humpfit)
+## qqmath: lattice
+S3method(qqmath, permustats)
+## qqnorm: stats
+S3method(qqnorm, permustats)
 # radfit: vegan
 S3method(radfit, data.frame)
 S3method(radfit, default)
@@ -376,6 +422,7 @@ S3method(scores, cca)
 S3method(scores, decorana)
 S3method(scores, default)
 S3method(scores, envfit)
+S3method(scores, hclust)
 S3method(scores, lda)
 S3method(scores, metaMDS)
 S3method(scores, monoMDS)
@@ -394,15 +441,25 @@ S3method(screeplot, princomp)
 S3method(simulate, capscale)
 S3method(simulate, cca)
 S3method(simulate, rda)
+S3method(simulate, nullmodel)
+# str: utils
+S3method(str, nullmodel)
 # stressplot: vegan
 S3method(stressplot, default)
 S3method(stressplot, monoMDS)
+S3method(stressplot, wcmdscale)
+S3method(stressplot, capscale)
+S3method(stressplot, cca)
+S3method(stressplot, rda)
+S3method(stressplot, prcomp)
+S3method(stressplot, princomp)
 # summary: base
 S3method(summary, anosim)
 S3method(summary, bioenv)
 S3method(summary, cca)
 S3method(summary, clamtest)
 S3method(summary, decorana)
+S3method(summary, dispweight)
 S3method(summary, eigenvals)
 S3method(summary, humpfit)
 S3method(summary, isomap)
@@ -428,6 +485,8 @@ S3method(text, orditkplot)
 S3method(text, procrustes)
 # tolerance: vegan -- or analogue?? Gav?
 S3method(tolerance, cca)
+# update: stats
+S3method(update, nullmodel)
 # vif: car -- but not used as a S3method within vegan
 # because of car definition: could be defined as exported 'vif' generic
 # in vegan with namespace
diff --git a/R/CCorA.R b/R/CCorA.R
index 9a282da..bb6f81c 100644
--- a/R/CCorA.R
+++ b/R/CCorA.R
@@ -1,5 +1,5 @@
 `CCorA` <-
-    function(Y, X, stand.Y = FALSE, stand.X = FALSE, nperm = 0, ...)
+    function(Y, X, stand.Y = FALSE, stand.X = FALSE, permutations = 0, ...)
 {
     epsilon <- sqrt(.Machine$double.eps)
     ##
@@ -40,20 +40,15 @@
         }
         invisible(0)
     }
-    probPillai <- function(Y, X, n, S11.inv, S22.inv, s, df1, df2, epsilon,
-                           Fref, nperm, ...) {
+    probPillai <- function(Y.per, X, n, S11.inv, S22.inv, s, df1, df2, epsilon,
+                           Fref, permat, ...) {
         ## Permutation test for Pillai's trace in CCorA.
         ## Reference: Brian McArdle's unpublished graduate course notes.
-        nGE <- 1
-        for(i in 1:nperm) {
-            Y.per <- Y[permuted.index(n, ...),, drop=FALSE]
-            S12.per <- cov(Y.per,X)
-            gross.mat <- S12.per %*% S22.inv %*% t(S12.per) %*% S11.inv
-            Pillai.per <- sum(diag(gross.mat))
-            Fper  <- (Pillai.per*df2)/((s-Pillai.per)*df1)
-            if(Fper >= (Fref-epsilon)) nGE <- nGE+1
-        }
-        P <- nGE/(nperm+1)
+        S12.per <- cov(Y.per,X)
+        gross.mat <- S12.per %*% S22.inv %*% t(S12.per) %*% S11.inv
+        Pillai.per <- sum(diag(gross.mat))
+        Fper  <- (Pillai.per*df2)/((s-Pillai.per)*df1)
+        Fper >= (Fref-epsilon)
     }
     ## END: internal functions
     ##
@@ -161,21 +156,29 @@
     df2 <- (n - max(pp,qq) - 1)
     Fval  <- (PillaiTrace*df2)/((s-PillaiTrace)*df1)
     p.Pillai <- pf(Fval, s*df1, s*df2, lower.tail=FALSE)
+    permat <- getPermuteMatrix(permutations, n, ...)
+    nperm <- nrow(permat)
+    if (ncol(permat) != n)
+        stop(gettextf("'permutations' have %d columns, but data have %d rows",
+                      ncol(permat), n))
 
-    if(nperm > 0) {
-        p.perm <- probPillai(Y, X, n, S11.inv, S22.inv, s, df1, df2,
-                             epsilon, Fval, nperm, ...)
+    if (nperm > 0) {
+        p.perm <- sapply(1:nperm, function(indx, ...) 
+                         probPillai(Y[permat[indx,],] , X, n, S11.inv, S22.inv, s,
+                                    df1, df2, epsilon, Fval, nperm, ...))
+        p.perm <- (sum(p.perm) +1)/(nperm + 1)
     } else {
         p.perm <- NA
     }
-
+    
     out <- list(Pillai=PillaiTrace, Eigenvalues=Eigenvalues, CanCorr=K.svd$d,
                 Mat.ranks=c(RsquareX.Y$m, RsquareY.X$m), 
                 RDA.Rsquares=c(RsquareY.X$Rsquare, RsquareX.Y$Rsquare),
                 RDA.adj.Rsq=c(Rsquare.adj.Y.X, Rsquare.adj.X.Y),
                 nperm=nperm, p.Pillai=p.Pillai, p.perm=p.perm, Cy=Cy, Cx=Cx, 
                 corr.Y.Cy=corr.Y.Cy, corr.X.Cx=corr.X.Cx, corr.Y.Cx=corr.Y.Cx, 
-                corr.X.Cy=corr.X.Cy, call = match.call())
+                corr.X.Cy=corr.X.Cy, control = attr(permat, "control"),
+                call = match.call())
     class(out) <- "CCorA"
     out
 }
diff --git a/R/MDSrotate.R b/R/MDSrotate.R
index b49d959..09ad88e 100644
--- a/R/MDSrotate.R
+++ b/R/MDSrotate.R
@@ -15,45 +15,43 @@
     N <- NCOL(x)
     if (N < 2)
         stop(gettextf("needs at least 2 dimensions"))
-    vec <- drop(vec)
-    if (length(dim(vec)) > 1)
-        stop(gettextf("function works only with univariate 'vec'"))
+    vec <- as.matrix(vec)
+    NV <- NCOL(vec)
+    if (NV >= N)
+        stop(gettextf("You can have max %d vectors, but you had %d",
+             N-1, NV))
     if (!is.numeric(vec))
         stop(gettextf("'vec' must be numeric"))
     ## vectorfit finds the direction cosine. We rotate first axis to
     ## 'vec' which means that we make other axes orthogonal to 'vec'
     ## one by one
     if (na.rm)
-        keep <- !is.na(vec)
+        keep <- complete.cases(vec)
     else
-        keep <- !logical(length(vec))
-    ## scores must be orthogonal for the next loop to work
-    if (N > 2) {
-        pc <- prcomp(x[keep,])
-        x <- x %*% pc$rotation
-        if (!all(is.na(sp)))
-            sp <- sp %*% pc$rotation
-    }
+        keep <- !logical(NROW(vec))
     ## Rotation loop
-    for (k in 2:N) {
-        rot <- vectorfit(x[keep, c(1,k)], vec[keep], permutations=0)$arrows
-        rot <- drop(rot)
-        ## counterclockwise rotation matrix:
-        ## [cos theta   -sin theta]
-        ## [sin theta    cos theta]
-        rot <- rbind(rot, rev(rot))
-        rot[1,2] <- -rot[1,2]
-        ## Rotation of points and species scores
-        x[, c(1,k)] <- x[, c(1,k)] %*% rot
-        if (!all(is.na(sp)))
-            sp[, c(1,k)] <- sp[, c(1,k)] %*% rot
+    for(v in seq_len(NV)) {
+        for (k in (v+1):N) {
+            arrs <- vectorfit(x[keep,], vec[keep,v], permutations = 0)$arrows
+            rot <- arrs[c(v,k)]/sqrt(sum(arrs[c(v,k)]^2))
+            rot <- drop(rot)
+            ## counterclockwise rotation matrix:
+            ## [cos theta   -sin theta]
+            ## [sin theta    cos theta]
+            rot <- rbind(rot, rev(rot))
+            rot[1,2] <- -rot[1,2]
+            ## Rotation of points and species scores
+            x[, c(v,k)] <- x[, c(v,k)] %*% rot
+            if (!all(is.na(sp)))
+                sp[, c(v,k)] <- sp[, c(v,k)] %*% rot
+        }
     }
-    ## Rotate 2..N axes to PC
-    if (N > 2 && attr(object$points, "pc")) {
-        pc <- prcomp(x[,-1])
-        x[,-1] <- pc$x
+    ## Two or more free axes are (optionally) rotated to PCs
+    if (N - NV > 1 && attr(object$points, "pc")) {
+        pc <- prcomp(x[,-seq_len(NV)])
+        x[,-seq_len(NV)] <- pc$x
         if (!all(is.na(sp)))
-            sp[,-1] <- sp[,-1] %*% pc$rotation
+            sp[,-seq_len(NV)] <- sp[,-seq_len(NV)] %*% pc$rotation
     }
     ## '[] <-' retains attributes
     object$points[] <- x
diff --git a/R/RsquareAdj.R b/R/RsquareAdj.R
index a586558..00db6a7 100644
--- a/R/RsquareAdj.R
+++ b/R/RsquareAdj.R
@@ -33,6 +33,19 @@
     list(r.squared = R2, adj.r.squared = radj)
 }
 
+## dbRDA: Euclidean style distances with no imaginary component can be
+## handled as rda, but I have no idea how to handle objects with
+## imaginary inertia.
+
+`RsquareAdj.capscale` <-
+    function(x, ...)
+{
+    if (!is.null(x$CA$imaginary.chi))
+        list(r.squared = NA, adj.r.squared = NA)
+    else
+        NextMethod("RsquareAdj", x, ...)
+}
+
 ## cca result: no RsquareAdj
 RsquareAdj.cca <-
     function(x, ...)
diff --git a/R/SSarrhenius.R b/R/SSarrhenius.R
index 9b32eea..ce1499c 100644
--- a/R/SSarrhenius.R
+++ b/R/SSarrhenius.R
@@ -3,7 +3,7 @@ SSarrhenius <-
               function(mCall, data, LHS)
 {
     xy <- sortedXyData(mCall[["area"]], LHS, data)
-    value <- as.vector(coef(lm(log(xy[,"y"]) ~ log(xy[,"x"]))))
+    value <- as.vector(coef(lm(log(pmax(xy[,"y"],1)) ~ log(xy[,"x"]))))
     value[1] <- exp(value[1])
     names(value) <- mCall[c("k","z")]
     value
diff --git a/R/add1.cca.R b/R/add1.cca.R
index a70502e..396fa0a 100644
--- a/R/add1.cca.R
+++ b/R/add1.cca.R
@@ -1,6 +1,6 @@
 `add1.cca`<-
     function(object, scope, test = c("none", "permutation"),
-             pstep = 100, perm.max = 200, ...)
+             permutations = how(nperm = 199), ...)
 {
     if (inherits(object, "prc"))
         stop("'step'/'add1' cannot be used for 'prc' objects")
@@ -15,7 +15,7 @@
         if (!is.character(scope)) 
             scope <- add.scope(object, update.formula(object, scope))
         ns <- length(scope)
-        adds <- matrix(0, ns+1, 3)
+        adds <- matrix(0, ns+1, 2)
         adds[1, ] <- NA
         for (i in 1:ns) {
             tt <- scope[i]
@@ -27,11 +27,16 @@
             else
                 nfit <- update(object,
                                as.formula(paste(". ~ . +", tt)))
-            tmp <- anova(nfit, step = pstep, perm.max = perm.max, ...)
-            adds[i+1,] <- unlist(tmp[1,3:5])
+            tmp <- anova(nfit,  permutations = permutations, ...)
+            adds[i+1,] <- unlist(tmp[1,3:4])
         }
-        colnames(adds) <- colnames(tmp)[3:5]
+        colnames(adds) <- colnames(tmp)[3:4]
         out <- cbind(out, adds)
+        ## check for redundant (0 Df) terms
+        if (any(nas <- out[,1] < 1, na.rm = TRUE)) {
+            out[[3]][nas] <- NA
+            out[[4]][nas] <- NA
+        }
         class(out) <- cl
     }
     out
diff --git a/R/ade2vegancca.R b/R/ade2vegancca.R
index d44908b..55ecc42 100644
--- a/R/ade2vegancca.R
+++ b/R/ade2vegancca.R
@@ -1,4 +1,3 @@
-
 `ade2vegancca` <-
     function(object)
 {
@@ -6,16 +5,14 @@
     CCA <- list(eig = object$eig,
                 u = as.matrix(object$l1),
                 v = as.matrix(object$c1),
-                u.eig = as.matrix(object$li),
-                v.eig = as.matrix(object$co),
-                wa.eig = as.matrix(object$ls),
+                wa = sweep(as.matrix(object$ls), 2,
+                           1/sqrt(object$eig[1:nf]), "*"),
                 biplot = as.matrix(object$cor)[-1,],
                 rank = object$rank,
                 tot.chi = sum(object$eig),
                 QR = NA,
                 envcentre = NA,
                 Xbar = NA)
-    CCA$wa <- sweep(CCA$wa.eig, 2, 1/sqrt(object$eig[1:nf]), "*")
     out <- list(call = object$call,
                 grand.total = NA,
                 rowsum = object$lw,
diff --git a/R/adipart.default.R b/R/adipart.default.R
index 28691f2..7109aff 100644
--- a/R/adipart.default.R
+++ b/R/adipart.default.R
@@ -35,7 +35,7 @@ function(y, x, index=c("richness", "shannon", "simpson"),
     rval <- as.data.frame(rval[rev(1:length(rval))])
     l2 <- sapply(rval, function(z) length(unique(z)))
     if (any(l1 != l2))
-        warning("levels are not perfectly nested")
+        stop("levels are not perfectly nested")
 
     ## aggregate response matrix
     fullgamma <-if (nlevels(rhs[,nlevs]) == 1)
diff --git a/R/adonis.R b/R/adonis.R
index 479d020..4ad79e3 100644
--- a/R/adonis.R
+++ b/R/adonis.R
@@ -1,7 +1,7 @@
 `adonis` <-
     function(formula, data=NULL, permutations=999, method="bray", strata=NULL,
              contr.unordered="contr.sum", contr.ordered="contr.poly",
-             ...)
+             parallel = getOption("mc.cores"), ...)
 {
     ## formula is model formula such as Y ~ A + B*C where Y is a data
     ## frame or a matrix, and A, B, and C may be factors or continuous
@@ -90,22 +90,52 @@
           ) }
 
     ## Permutations
-    if (missing(strata))
-        strata <- NULL
-    p <- sapply(1:permutations,
-                function(x) permuted.index(n, strata=strata))
-
-
-    tH.s <- lapply(H.s, t)
-    ## Apply permutations for each term
-    ## This is the new f.test (2011-06-15) that uses fewer arguments
-    f.perms <- sapply(1:nterms, function(i) {
-        sapply(1:permutations, function(j) {
-            f.test(tH.s[[i]], G[p[,j], p[,j]], df.Exp[i], df.Res, tIH.snterm)
-        } )
-    })
-    ## Round to avoid arbitrary P-values with tied data
-    f.perms <- round(f.perms, 12)
+    p <- getPermuteMatrix(permutations, n, strata = strata)
+    permutations <- nrow(p)
+    if (permutations) {
+        tH.s <- lapply(H.s, t)
+        ## Apply permutations for each term
+        ## This is the new f.test (2011-06-15) that uses fewer arguments
+        ## Set first parallel processing for all terms
+        if (is.null(parallel))
+            parallel <- 1
+        hasClus <- inherits(parallel, "cluster")
+        isParal <- (hasClus || parallel > 1) && require(parallel)
+        isMulticore <- .Platform$OS.type == "unix" && !hasClus
+        if (isParal && !isMulticore && !hasClus) {
+            parallel <- makeCluster(parallel)
+        }
+        if (isParal) {
+            if (isMulticore) {
+                f.perms <-
+                    sapply(1:nterms, function(i)
+                           unlist(mclapply(1:permutations, function(j)
+                                           f.test(tH.s[[i]], G[p[j,], p[j,]],
+                                                  df.Exp[i], df.Res, tIH.snterm),
+                                           mc.cores = parallel)))
+            } else {
+                f.perms <-
+                    sapply(1:nterms, function(i)
+                           parSapply(parallel, 1:permutations, function(j)
+                                     f.test(tH.s[[i]], G[p[j,], p[j,]],
+                                            df.Exp[i], df.Res, tIH.snterm)))
+            }
+        } else {
+            f.perms <-
+                sapply(1:nterms, function(i) 
+                       sapply(1:permutations, function(j) 
+                              f.test(tH.s[[i]], G[p[j,], p[j,]],
+                                     df.Exp[i], df.Res, tIH.snterm)))
+        }
+        ## Close socket cluster if created here
+        if (isParal && !isMulticore && !hasClus)
+            stopCluster(parallel)
+        ## Round to avoid arbitrary P-values with tied data
+        f.perms <- round(f.perms, 12)
+        P <- (rowSums(t(f.perms) >= F.Mod)+1)/(permutations+1)
+    } else { # no permutations
+        f.perms <- P <- rep(NA, nterms)
+    }
     F.Mod <- round(F.Mod, 12)
     SumsOfSqs = c(SS.Exp.each, SS.Res, sum(SS.Exp.each) + SS.Res)
     tab <- data.frame(Df = c(df.Exp, df.Res, n-1),
@@ -113,12 +143,12 @@
                       MeanSqs = c(SS.Exp.each/df.Exp, SS.Res/df.Res, NA),
                       F.Model = c(F.Mod, NA,NA),
                       R2 = SumsOfSqs/SumsOfSqs[length(SumsOfSqs)],
-                      P = c((rowSums(t(f.perms) >= F.Mod)+1)/(permutations+1),
-                      NA, NA))
+                      P = c(P, NA, NA))
     rownames(tab) <- c(attr(attr(rhs.frame, "terms"), "term.labels")[u.grps],
                        "Residuals", "Total")
     colnames(tab)[ncol(tab)] <- "Pr(>F)"
-    attr(tab, "heading") <- "Terms added sequentially (first to last)\n"
+    attr(tab, "heading") <- c(howHead(attr(p, "control")),
+        "Terms added sequentially (first to last)\n")
     class(tab) <- c("anova", class(tab))
     out <- list(aov.tab = tab, call = match.call(),
                 coefficients = beta.spp, coef.sites = beta.sites,
diff --git a/R/anosim.R b/R/anosim.R
index 4cf8c20..e976191 100644
--- a/R/anosim.R
+++ b/R/anosim.R
@@ -1,6 +1,6 @@
 `anosim` <-
     function (dat, grouping, permutations = 999,
-              distance = "bray", strata) 
+              distance = "bray", strata = NULL, parallel = getOption("mc.cores")) 
 {
     if (inherits(dat, "dist")) 
         x <- dat
@@ -29,28 +29,51 @@
     take <- as.numeric(irow[within])
     cl.vec[within] <- levels(grouping)[grouping[take]]
     cl.vec <- factor(cl.vec, levels = c("Between", levels(grouping)))
+    ptest <- function(take, ...) {
+        cl.perm <- grouping[take]
+        tmp.within <- matched(irow, icol, cl.perm)
+        tmp.ave <- tapply(x.rank, tmp.within, mean)
+        -diff(tmp.ave)/div
+    }
+    permat <- getPermuteMatrix(permutations, N, strata = strata)
+    if (ncol(permat) != N)
+        stop(gettextf("'permutations' have %d columns, but data have %d rows",
+                      ncol(permat), N))
+    permutations <- nrow(permat)
+
     if (permutations) {
-        perm <- rep(0, permutations)
-        for (i in 1:permutations) {
-            take <- permuted.index(N, strata)
-            cl.perm <- grouping[take]
-            tmp.within <- matched(irow, icol, cl.perm)
-            tmp.ave <- tapply(x.rank, tmp.within, mean)
-            perm[i] <- -diff(tmp.ave)/div
+        ## Parallel processing
+        if (is.null(parallel))
+            parallel <- 1
+        hasClus <- inherits(parallel, "cluster")
+        if ((hasClus || parallel > 1)  && require(parallel)) {
+            if(.Platform$OS.type == "unix" && !hasClus) {
+                perm <- unlist(mclapply(1:permutations, function(i, ...)
+                                        ptest(permat[i,]),
+                                        mc.cores = parallel))
+            } else {
+                if (!hasClus) {
+                    parallel <- makeCluster(parallel)
+                }
+                perm <- parRapply(parallel, permat, ptest)
+                if (!hasClus)
+                    stopCluster(parallel)
+            }
+        } else {
+            perm <- sapply(1:permutations, function(i) ptest(permat[i,]))
         }
         p.val <- (1 + sum(perm >= statistic))/(1 + permutations)
-        sol$signif <- p.val
-        sol$perm <- perm
+    } else { # no permutations
+        p.val <- perm <- NA
     }
+    sol$signif <- p.val
+    sol$perm <- perm
     sol$permutations <- permutations
     sol$statistic <- as.numeric(statistic)
     sol$class.vec <- cl.vec
     sol$dis.rank <- x.rank
-    sol$dissimilarity <- attr(x, "method") 
-    if (!missing(strata)) {
-        sol$strata <- deparse(substitute(strata))
-        sol$stratum.values <- strata
-    }
+    sol$dissimilarity <- attr(x, "method")
+    sol$control <- attr(permat, "control")
     class(sol) <- "anosim"
     sol
 }
diff --git a/R/anova.cca.R b/R/anova.cca.R
index 5ac4f03..4da0f48 100644
--- a/R/anova.cca.R
+++ b/R/anova.cca.R
@@ -1,62 +1,73 @@
 `anova.cca` <-
-    function (object, alpha = 0.05, beta = 0.01, step = 100, perm.max = 9999, 
-              by = NULL, ...) 
+    function(object, ..., permutations = how(nperm=999), by = NULL,
+             model = c("reduced", "direct", "full"),
+             parallel = getOption("mc.cores"), strata = NULL,
+             cutoff = 1, scope = NULL)
 {
+    model <- match.arg(model)
+    ## permutation matrix
+    N <- nrow(object$CA$u)
+    permutations <- getPermuteMatrix(permutations, N, strata = strata)
+    seed <- attr(permutations, "seed")
+    control <- attr(permutations, "control")
+    nperm <- nrow(permutations)
+    ## see if this was a list of ordination objects
+    dotargs <- list(...)
+    ## we do not want to give dotargs to anova.ccalist, but we
+    ## evaluate 'parallel' and 'model' here
+    if (length(dotargs)) {
+        isCCA <- sapply(dotargs, function(z) inherits(z, "cca"))
+        if (any(isCCA)) {
+            dotargs <- dotargs[isCCA]
+            object <- c(list(object), dotargs)
+            sol <-
+                anova.ccalist(object,
+                              permutations = permutations,
+                              model = model,
+                              parallel = parallel)
+            attr(sol, "Random.seed") <- seed
+            attr(sol, "control") <- control
+            return(sol)
+        }
+    }
+    ## We only have a single model: check if it is empty
     if (is.null(object$CA) || is.null(object$CCA) ||
         object$CCA$rank == 0 || object$CA$rank == 0)
         return(anova.ccanull(object))
-    perm.max <- max(step-1, perm.max)
-    if (perm.max %% step == 0)
-        perm.max <- perm.max - 1
+    ## by cases
     if (!is.null(by)) {
-        by <- match.arg(by, c("axis", "terms", "margin"))
-        if (by == "axis") 
-            sol <- anova.ccabyaxis(object, alpha = alpha, beta = beta, 
-                                   step = step, perm.max = perm.max, by = NULL, 
-                                   ...)
-        else if (by == "margin") {
-            sol <- anova.ccabymargin(object, alpha = alpha, beta = beta,
-                                     step = step, perm.max = perm.max,
-                                     by = NULL, ...)
-            }
-        else {
-            mf <- match.call(expand.dots = FALSE)
-            if (!is.null(mf$...) && any(k <- pmatch(names(mf$...), 
-                                                    "permutations", nomatch = FALSE))) 
-                step <- unlist(mf$...[k == 1])
-            sol <- anova.ccabyterm(object, step = step, ...)
-        }
+        by <- match.arg(by, c("terms", "margin", "axis"))
+        sol <- switch(by,
+                      "terms" = anova.ccabyterm(object,
+                      permutations = permutations,
+                      model = model, parallel = parallel),
+                      "margin" = anova.ccabymargin(object,
+                      permutations = permutations,
+                      model = model, parallel = parallel,
+                      scope = scope),
+                      "axis" = anova.ccabyaxis(object,
+                      permutations = permutations,
+                      model = model, parallel = parallel,
+                      cutoff = cutoff))
+        attr(sol, "Random.seed") <- seed
+        attr(sol, "control") <- control
         return(sol)
     }
-    seed <- NULL
-    betaq <- c(beta/2, 1 - beta/2)
-    nperm <- 0
-    unsure <- TRUE
-    hits <- 0
-    while (unsure && nperm < perm.max) {
-        adj <- as.numeric(nperm == 0)
-        tst <- permutest.cca(object, step - adj, ...)
-        if (is.null(seed)) 
-            seed <- tst$Random.seed
-        nperm <- nperm + step - adj
-        hits <- hits + sum(tst$F.perm >= tst$F.0)
-        fork <- qbinom(betaq, nperm, alpha)
-        if (hits < fork[1] || hits > fork[2]) 
-            unsure <- FALSE
-    }
+    ## basic overall test: pass other arguments except 'strata'
+    ## because 'permutations' already is a permutationMatrix
+    tst <- permutest.cca(object, permutations = permutations,
+                         model = model, parallel = parallel, ...)
     Fval <- c(tst$F.0, NA)
-    Pval <- c((hits+1)/(nperm+1), NA)
-    nperm <- c(nperm, NA)
-    table <- data.frame(tst$df, tst$chi, Fval, nperm, Pval)
+    Pval <- (sum(tst$F.perm >= tst$F.0) + 1)/(tst$nperm + 1)
+    Pval <- c(Pval, NA)
+    table <- data.frame(tst$df, tst$chi, Fval, Pval)
     is.rda <- inherits(object, "rda")
-    colnames(table) <- c("Df", ifelse(is.rda, "Var", "Chisq"), 
-                         "F", "N.Perm", "Pr(>F)")
-    head <- paste("Permutation test for", tst$method, "under", 
-                  tst$model, "model\n")
-    if (!is.null(tst$strata)) 
-        head <- paste(head, "Permutations stratified within '", 
-                      tst$strata, "'\n", sep = "")
+    colnames(table) <- c("Df", ifelse(is.rda, "Variance", "ChiSquare"),
+                         "F", "Pr(>F)")
+    head <- paste0("Permutation test for ", tst$method, " under ",
+                  tst$model, " model\n", howHead(control))
     mod <- paste("Model:", c(object$call))
-    structure(table, heading = c(head, mod), Random.seed = seed, 
+    structure(table, heading = c(head, mod), Random.seed = seed,
+              control = control,
               class = c("anova.cca", "anova", "data.frame"))
 }
diff --git a/R/anova.ccabyaxis.R b/R/anova.ccabyaxis.R
deleted file mode 100644
index 86b533f..0000000
--- a/R/anova.ccabyaxis.R
+++ /dev/null
@@ -1,89 +0,0 @@
-`anova.ccabyaxis` <-
-    function (object, cutoff = 1,  ...) 
-{
-    cutoff <- cutoff + sqrt(.Machine$double.eps)
-    rnk <- object$CCA$rank
-    if (!max(rnk, 0)) 
-        stop("Needs a constrained ordination")
-    if (is.null(object$terms)) 
-        stop("Analysis is only possible for models fitted using formula")
-    ## Handle missing values in scores, both "omit" and "exclude" to
-    ## match dims with data.
-    if (!is.null(object$na.action)) {
-        u <- napredict(structure(object$na.action, class="exclude"),
-                       object$CCA$u)
-    } else {
-        u <- object$CCA$u
-    }
-    ## Get conditions
-    if (!is.null(object$pCCA)) {
-        CondMat <- qr.X(object$pCCA$QR)
-        ## deweight if CCA
-        if (!inherits(object, "rda"))
-            CondMat <- sweep(CondMat, 1, sqrt(object$rowsum), "/")
-    }
-    else
-        CondMat <- NULL
-    ## pad with NA rows if there is a subset
-    if (!is.null(object$subset)) {
-        lc <- matrix(NA, nrow=length(object$subset),
-                     ncol = NCOL(u))
-        lc[object$subset,]  <- u
-        if (!is.null(CondMat)) {
-            tmp <- matrix(NA, nrow=length(object$subset),
-                          ncol = NCOL(CondMat))
-            tmp[object$subset,] <- CondMat
-            CondMat <- tmp
-        }
-        object$call$subset <- object$subset
-    } else {
-        lc <- u
-    }
-    lc <- as.data.frame(lc)
-    axnam <- colnames(lc)
-    df <- c(rep(1, rnk), object$CA$rank)
-    chi <- c(object$CCA$eig, Residual = object$CA$tot.chi)
-    Fval <- c(chi[1:rnk]/df[1:rnk]/chi[rnk+1]*df[rnk+1], NA)
-    nperm <- c(numeric(rnk), NA)
-    Pval <- rep(NA, rnk+1)
-    out <- data.frame(df, chi, Fval, nperm, Pval)
-    environment(object$terms) <- environment()
-    fla <- paste(". ~ ", axnam[1], "+ Condition(",
-                 paste(axnam[-1], collapse="+"),")")
-    if (!is.null(CondMat)) {
-        fla <- paste(fla, " + Condition(CondMat)")
-        lc$CondMat <- CondMat
-    }
-    fla <- update(formula(object), fla)
-    sol <- anova(update(object, fla, data=lc),  ...)
-    out[c(1, rnk + 1), ] <- sol
-    seed <- attr(sol, "Random.seed")
-    attr(out, "names") <- attr(sol, "names")
-    .call <- pasteCall(object$call, "Model:")
-    attr(out, "heading") <- sub(" \n","", .call)
-    attr(out, "Random.seed") <- seed
-    bigseed <- get(".Random.seed", envir = .GlobalEnv, inherits = FALSE)
-    bigperm <- out$N.Perm[1]
-    if (rnk > 1) {
-        for (.ITRM in 2:rnk) {
-            fla <- paste(".~", axnam[.ITRM], "+Condition(",
-                         paste(axnam[-(.ITRM)], collapse="+"),")")
-            if (!is.null(CondMat))
-                fla <- paste(fla, "+ Condition(CondMat)")
-            fla <- update(formula(object),  fla) 
-            sol <- update(object, fla, data = lc)
-            assign(".Random.seed", seed, envir = .GlobalEnv)
-            out[.ITRM, ] <- as.matrix(anova(sol, ...))[1,]
-            if (out[.ITRM, "N.Perm"] > bigperm) {
-                bigperm <- out[.ITRM, "N.Perm"]
-                bigseed <- get(".Random.seed", envir = .GlobalEnv, 
-                  inherits = FALSE)
-            }
-            if (out[.ITRM, "Pr(>F)"] > cutoff)
-                break
-        }
-    }
-    assign(".Random.seed", bigseed, envir = .GlobalEnv)
-    class(out) <- c("anova.cca", "anova", "data.frame")
-    out
-}
diff --git a/R/anova.ccabymargin.R b/R/anova.ccabymargin.R
deleted file mode 100644
index 351b385..0000000
--- a/R/anova.ccabymargin.R
+++ /dev/null
@@ -1,52 +0,0 @@
-`anova.ccabymargin` <-
-    function(object, step=100, scope, ...)
-{
-    if(inherits(object, "prc"))
-        stop("anova(..., by = 'margin') cannot be used for 'prc' results")
-    if (!missing(scope) && is.character(scope))
-        trms <- scope
-    else
-        trms <- drop.scope(object, scope)
-    alltrms <- labels(terms(object$terminfo))
-    keep <- trms %in% alltrms
-    trms <- trms[keep]
-    ntrms <- length(trms)
-    bigperm <- 0
-    for (.ITRM in 1:ntrms) {
-        fla <- formula(object)
-        ## Put all trms except current into Condition() and update
-        ## formula
-        if (length(alltrms) > 1) {
-            keeptrms <- alltrms[!(alltrms==trms[.ITRM])]
-            updfla <- paste("Condition(",paste(keeptrms, collapse="+"), ")")
-            fla <- update(fla, paste(". ~ . + ", updfla))
-        }
-        tmp <- update(object, fla)
-        tmp <- anova(tmp, step=step, ...)
-        ## Start every permutation from the same seed, but get the
-        ## seed of the longest simulation and reset the RNG to that
-        ## state when exiting the function
-        if (tmp[1,"N.Perm"] > bigperm) {
-            bigperm <- tmp[1, "N.Perm"]
-            bigseed <- get(".Random.seed", envir = .GlobalEnv,
-                           inherits = FALSE)
-        }
-        if (.ITRM == 1) {
-            seed <- attr(tmp, "Random.seed")
-            sol <- tmp
-        }
-        else {
-            sol <- rbind(sol[1:(.ITRM-1),], as.matrix(tmp[1,]), sol[.ITRM,])
-        }
-        assign(".Random.seed", seed, envir = .GlobalEnv)
-    }
-    ## Put RNG at the end of the longest simulation
-    if (bigperm > 0)
-        assign(".Random.seed", bigseed, envir = .GlobalEnv)
-    rownames(sol)[1:ntrms] <- trms
-    head <- attr(sol, "heading")
-    head[1] <- paste(head[1], "Marginal effects of terms\n", sep="")
-    head[2] <- paste("Model:", c(object$call))
-    attr(sol, "heading") <- head
-    sol
-}
diff --git a/R/anova.ccabyterm.R b/R/anova.ccabyterm.R
index e07a723..8b0e509 100644
--- a/R/anova.ccabyterm.R
+++ b/R/anova.ccabyterm.R
@@ -1,78 +1,171 @@
+### Implementation of by-cases for vegan 2.2 versions of
+### anova.cca. These are all internal functions that are not intended
+### to be called by users in normal sessions, but they should be
+### called from anova.cca (2.2). Therefore the user interface is rigid
+### and input is not checked. The 'permutations' should be a
+### permutation matrix.
+
+### by = terms builds models as a sequence of adding terms and submits
+### this to anova.ccalist
+
 `anova.ccabyterm` <-
-    function (object, step = 100, ...) 
+    function(object, permutations, model, parallel)
 {
-    ## Data set size may change during iteration if there are missing
-    ## values: use length(object$residual) to check this like step,
-    ## drop1.default, add1.default do.
-    n0 <- length(object$residuals)
-    trm <- terms(object)
-    call <- paste("Model:", c(object$call))
-    trmlab <- attr(trm, "term.labels")
-    trmlab <- trmlab[trmlab %in% attr(terms(object$terminfo), 
+    ## We need term labels but without Condition() terms
+    trms <- terms(object)
+    trmlab <- attr(trms, "term.labels")
+    trmlab <- trmlab[trmlab %in% attr(terms(object$terminfo),
                                       "term.labels")]
     ntrm <- length(trmlab)
-    ## 'adj' puts the result together with the permutations and reduces
-    ## number of simulations by one so that P = (hits+1)/(permutations+1).
-    ## The first step is reduced by adj.
-    adj <- (step %% 10) == 0
-    step <- step - adj
-    pchi <- matrix(0, nrow = ntrm + 1, ncol = step)
-    chi <- numeric(ntrm + 1)
-    df <- numeric(ntrm + 1)
-    names(df) <- c(trmlab, "Residual")
-    sim <- permutest.cca(object, permutations = step, ...)
-    pchi[ntrm + 1, ] <- sim$den
-    pchi[ntrm, ] <- sim$num
-    df[ntrm:(ntrm + 1)] <- sim$df
-    chi[ntrm:(ntrm + 1)] <- sim$chi
-    if (!is.null(object$call$data))
-        modelframe <- ordiGetData(object$call, globalenv())
+    m0 <- update(object, paste(".~.-", paste(trmlab, collapse="-")))
+    mods <- list(m0)
+    for(i in seq_along(trmlab)) {
+        fla <- paste(". ~ . + ", trmlab[i])
+        mods[[i+1]] <- update(mods[[i]], fla)
+    }
+    ## The result
+    sol <- anova.ccalist(mods, permutations = permutations,
+                         model = model, parallel = parallel)
+    ## Reformat
+    out <- data.frame(c(sol[-1,3], sol[ntrm+1,1]),
+                      c(sol[-1,4], sol[ntrm+1,2]),
+                      c(sol[-1,5], NA),
+                      c(sol[-1,6], NA))
+    isRDA <- inherits(object, "rda")
+    colnames(out) <- c("Df", ifelse(isRDA, "Variance", "ChiSquare"),
+                       "F", "Pr(>F)")
+    rownames(out) <- c(trmlab, "Residual")
+    head <- paste0("Permutation test for ", object$method, " under ",
+                   model, " model\n",
+                   "Terms added sequentially (first to last)\n",
+                   howHead(attr(permutations, "control")))
+    mod <- paste("Model:", c(object$call))
+    attr(out, "heading") <- c(head, mod)
+    class(out) <- c("anova","data.frame")
+    out
+}
+
+## by = margin: this is not a anova.ccalist case, but we omit each
+## term in turn and compare against the complete model.
+
+`anova.ccabymargin` <-
+    function(object, permutations, scope, ...)
+{
+    nperm <- nrow(permutations)
+    ## Refuse to handle models with missing data
+    if (!is.null(object$na.action))
+        stop("by = 'margin' models cannot handle missing data")
+    ## We need term labels but without Condition() terms
+    if (!is.null(scope) && is.character(scope))
+        trms <- scope
     else
-        modelframe <- model.frame(object)
-    for (.ITRM in ntrm:2) {
-        if (ntrm < 2) 
-            break
-        assign(".Random.seed", sim$Random.seed, envir = .GlobalEnv)
-        fla <- as.formula(paste(" . ~ . -", trmlab[.ITRM]))
-        object <- update(object, fla,
-                         if (!is.null(modelframe)) data = modelframe)
-        ## Change in data set due to missing values?
-        if (length(object$residuals) != n0)
-            stop("number of rows has changed: remove missing values?")
-        if (is.null(object$CCA)) 
+        trms <- drop.scope(object)
+    trmlab <- trms[trms %in% attr(terms(object$terminfo),
+                                      "term.labels")]
+    if(length(trmlab) == 0)
+        stop("the scope was empty: no available marginal terms")
+    ## baseline: all terms
+    big <- permutest(object, permutations, ...)
+    dfbig <- big$df[2]
+    chibig <- big$chi[2]
+    scale <- big$den/dfbig
+    ## Collect all marginal models. This differs from old version
+    ## (vegan 2.0) where other but 'nm' were partialled out within
+    ## Condition(). Now we only fit the model without 'nm' and compare
+    ## the difference against the complete model.
+    mods <- lapply(trmlab, function(nm, ...)
+           permutest(update(object, paste(".~.-", nm)),
+                     permutations, ...), ...)
+    ## Chande in df
+    Df <- sapply(mods, function(x) x$df[2]) - dfbig
+    ## F of change
+    Chisq <- sapply(mods, function(x) x$chi[2]) - chibig
+    Fstat <- (Chisq/Df)/(chibig/dfbig)
+    ## Simulated F-values
+    Fval <- sapply(mods, function(x) x$num)
+    ## Had we an empty model we need to clone the denominator
+    if (length(Fval) == 1)
+        Fval <- matrix(Fval, nrow=nperm)
+    Fval <- sweep(-Fval, 1, big$num, "+")
+    Fval <- sweep(Fval, 2, Df, "/")
+    Fval <- sweep(Fval, 1, scale, "/")
+    ## Simulated P-values
+    Pval <- (colSums(sweep(Fval, 2, Fstat, ">=")) + 1)/(nperm + 1)
+    ## Collect results to anova data.frame
+    out <- data.frame(c(Df, dfbig), c(Chisq, chibig),
+                      c(Fstat, NA), c(Pval, NA))
+    isRDA <- inherits(object, "rda")
+    colnames(out) <- c("Df", ifelse(isRDA, "Variance", "ChiSquare"),
+                       "F", "Pr(>F)")
+    rownames(out) <- c(trmlab, "Residual")
+    head <- paste0("Permutation test for ", object$method, " under ",
+                   mods[[1]]$model, " model\n",
+                   "Marginal effects of terms\n",
+                   howHead(attr(permutations, "control")))
+    mod <- paste("Model:", c(object$call))
+    attr(out, "heading") <- c(head, mod)
+    class(out) <- c("anova", "data.frame")
+    out
+}
+
+### Marginal test for axes
+
+`anova.ccabyaxis` <-
+    function(object, permutations, model, parallel, cutoff = 1)
+{
+    nperm <- nrow(permutations)
+    ## Observed F-values and Df
+    eig <- object$CCA$eig
+    resdf <- nobs(object) - length(eig) - max(object$pCCA$rank, 0) - 1
+    Fstat <- eig/object$CA$tot.chi*resdf
+    Df <- rep(1, length(eig))
+    ## Marginal P-values
+    LC <- object$CCA$u
+    ## missing values?
+    if (!is.null(object$na.action))
+        LC <- napredict(structure(object$na.action,
+                                  class="exclude"), LC)
+    ## subset?
+    if (!is.null(object$subset)) {
+        tmp <- matrix(NA, nrow=length(object$subset),
+                      ncol = ncol(LC))
+        tmp[object$subset,] <- LC
+        LC <- tmp
+        object <- update(object, subset = object$subset)
+    }
+    LC <- as.data.frame(LC)
+    fla <- reformulate(names(LC))
+    Pvals <- rep(NA, length(eig))
+    environment(object$terms) <- environment()
+    for (i in 1:length(eig)) {
+        part <- paste("~ . +Condition(",
+                      paste(names(LC)[-i], collapse = "+"), ")")
+        upfla <- update(fla, part)
+        ## only one axis, and cannot partial out?
+        if (length(eig) == 1)
+            mod <- permutest(object, permutations, model = model,
+                             parallel = parallel)
+        else
+            mod <-
+                permutest(update(object, upfla, data = LC),
+                          permutations, model = model,
+                          parallel = parallel)
+        Pvals[i] <- (sum(mod$F.perm >= mod$F.0) + 1)/(nperm+1)
+        if (Pvals[i] > cutoff)
             break
-        sim <- permutest.cca(object, permutations = step, ...)
-        pchi[.ITRM, ] <- pchi[.ITRM, ] - sim$num
-        chi[.ITRM] <- chi[.ITRM] - sim$chi[1]
-        df[.ITRM] <- df[.ITRM] - sim$df[1]
-        pchi[.ITRM - 1, ] <- sim$num
-        chi[.ITRM - 1] <- sim$chi[1]
-        df[.ITRM - 1] <- sim$df[1]
     }
-    Fval <- chi/df/(chi[ntrm + 1]/df[ntrm + 1])
-    Fval[ntrm + 1] <- NA
-    pchi <- sweep(pchi, 1, df, "/")
-    pchi[-(ntrm + 1), ] <- sweep(pchi[-(ntrm + 1), , drop = FALSE], 
-                                 2, pchi[ntrm + 1, , drop = FALSE], "/")
-    ## Round to avoid arbitrary P values due to numerical precision
-    pchi <- round(pchi, 12)
-    Fval <- round(Fval, 12)
-    P <- rowSums(sweep(pchi[-(ntrm + 1), , drop = FALSE], 1, 
-                       Fval[-(ntrm + 1)], ">="))
-    P <- c((P + adj)/(step + adj), NA)
-    out <- data.frame(df, chi, Fval, c(rep(step, ntrm), NA), 
-                      P)
-    inertname <- if (sim$method == "cca") 
-        "Chisq"
-    else "Var"
-    colnames(out) <- c("Df", inertname, "F", "N.Perm", "Pr(>F)")
-    out <- out[out[, 1] > 0 | out[, 2] > sqrt(.Machine$double.eps), 
-               ]
-    head <- paste("Permutation test for", sim$method, "under", 
-                  sim$model, "model\nTerms added sequentially (first to last)\n")
-    if (!is.null(sim$strata)) 
-        head <- paste(head, "Permutations stratified within '", 
-                      sim$strata, "'\n", sep = "")
-    structure(out, heading = c(head, call), Random.seed = sim$Random.seed, 
-              class = c("anova.cca", "anova", "data.frame"))
+    out <- data.frame(c(Df, resdf), c(eig, object$CA$tot.chi),
+                      c(Fstat, NA), c(Pvals,NA))
+    rownames(out) <- c(names(eig), "Residual")
+    isRDA <- inherits(object, "rda")
+    colnames(out) <- c("Df", ifelse(isRDA, "Variance", "ChiSquare"),
+                       "F", "Pr(>F)")
+    head <- paste0("Permutation test for ", object$method, " under ",
+                   model, " model\n",
+                   "Marginal tests for axes\n",
+                   howHead(attr(permutations, "control")))
+    mod <- paste("Model:", c(object$call))
+    attr(out, "heading") <- c(head, mod)
+    class(out) <- c("anova", "data.frame")
+    out
 }
diff --git a/R/anova.ccalist.R b/R/anova.ccalist.R
new file mode 100644
index 0000000..d682fa0
--- /dev/null
+++ b/R/anova.ccalist.R
@@ -0,0 +1,90 @@
+`anova.ccalist` <-
+    function(object, permutations, model, parallel)
+{
+    ## 'object' *must* be a list of cca objects, and 'permutations'
+    ## *must* be a permutation matrix -- we assume that calling
+    ## function takes care of this, and this function is not directly
+    ## called by users.
+    nmodels <- length(object)
+    ## check that input is valid
+    ## 1. All models must be fitted with the same method
+    method <- sapply(object, function(z) z$method)
+    if (!all(method == method[1]))
+        stop("same ordination method must be used in all models")
+    else
+        method <- method[1]
+    ## 2. Same response
+    resp <- sapply(object, function(z) deparse(formula(z)[[2]]))
+    if (!all(resp == resp[1]))
+        stop("response must be same in all models")
+    ## 3. Same no. of observations
+    N <- sapply(object, nobs)
+    if (!all(N == N[1]))
+        stop("number of observations must be same in all models")
+    else
+        N <- N[1]
+    ## 4. Terms must be nested
+    trms <- lapply(object, function(z) labels(terms(z)))
+    o  <- order(sapply(trms, length))
+    for(i in 2:nmodels) 
+        if(!all(trms[[o[i-1]]] %in% trms[[o[i]]]))
+            stop("models must be nested")
+        
+    ## Check permutation matrix
+    nperm <- nrow(permutations)
+    ## check
+    if (ncol(permutations) != N)
+        stop(gettextf("permutation matrix has %d columns, but you have %d sites",
+                      ncol(nperm), N))
+    ## All models are evaluated in permutest.cca with identical
+    ## permutations so that the differences of single permutations can
+    ## be used to assess the significance of differences of fitted
+    ## models. This strictly requires nested models (not checked
+    ## here): all terms of the smaller model must be included in the
+    ## larger model. 
+    mods <- lapply(object, function(z)
+                   permutest.cca(z, permutations = permutations,
+                                 model = model, parallel = parallel))
+    dfs <- sapply(mods, function(z) z$df)
+    dev <- sapply(mods, function(z) z$chi)
+    resdf <- dfs[2,]
+    df <- -diff(resdf)
+    resdev <- dev[2,]
+    changedev <- -diff(resdev)
+    big <- which.min(resdf)
+    scale <- resdev[big]/resdf[big]
+    fval <- changedev/df/scale
+    ## Collect permutation results: denominator of F varies in each
+    ## permutation.
+    pscale <- mods[[big]]$den/resdf[big]
+    ## Numerator of F
+    pfvals <- sapply(mods, function(z) z$num)
+    if (is.list(pfvals))
+        pfvals <- do.call(cbind, pfvals)
+    pfvals <- apply(pfvals, 1, diff)
+    ## dropped to vector?
+    if (!is.matrix(pfvals))
+        pfvals <- matrix(pfvals, nrow=1, ncol=nperm)
+    pfvals <- sweep(pfvals, 1, df, "/")
+    pfvals <- sweep(pfvals, 2, pscale, "/")
+    pval <- rowSums(sweep(pfvals, 1, fval, ">="))
+    pval <- (pval + 1)/(nperm+1)
+    ## collect table
+    table <- data.frame(resdf, resdev, c(NA, df),
+                        c(NA,changedev), c(NA,fval), c(NA,pval))
+    isRDA <- method != "cca"
+    dimnames(table) <- list(1L:nmodels,
+                            c("Res.Df",
+                              ifelse(isRDA,"Res.Variance", "Res.ChiSquare"), 
+                              "Df",
+                              ifelse(isRDA,"Variance","ChiSquare"),
+                                     "F", "Pr(>F)"))
+    ## Collect header information
+    formulae <- sapply(object, function(z) deparse(formula(z)))
+    head <- paste0("Permutation tests for ", method, " under ",
+                  mods[[big]]$model, " model\n",
+                   howHead(attr(permutations, "control")))
+    topnote <- paste("Model ", format(1L:nmodels), ": ", formulae,
+                     sep = "", collapse = "\n")
+    structure(table, heading=c(head,topnote), class = c("anova", "data.frame"))
+}
diff --git a/R/as.hclust.spantree.R b/R/as.hclust.spantree.R
new file mode 100644
index 0000000..65d3ebc
--- /dev/null
+++ b/R/as.hclust.spantree.R
@@ -0,0 +1,158 @@
+### Casts a vegan spantree object into single linkage dendrogram of
+### class hclust. The non-trivial items in "hclust" object are a
+### 'merge' matrix for fusions of points and/or clusters, a 'height'
+### vector which gives the heights of each fusion, and an 'order'
+### vector that gives the order of leaves in the plotted
+### dendrogram. The 'height's are only sorted spantree segment
+### distances, but for 'merge' we need to establish cluster
+### memberships, and for 'order' we must traverse the tree.
+
+`as.hclust.spantree` <-
+    function(x, ...)
+{
+    ## Order by the lengths of spanning tree links
+    o <- order(x$dist)
+    npoints <- length(o) + 1
+    ## Ordered indices of dads and kids
+    dad <- (2:npoints)[o]
+    kid <- x$kid[o]
+    ## merge matrix of hclust has negative index when a single point
+    ## is added to a tree and a positive index when a group is joined
+    ## to a tree, and the group is numbered by the level it was
+    ## formed.
+    labs <- -seq_len(npoints)
+    merge <- matrix(0, nrow=npoints-1, ncol=2)
+    for(i in 1:nrow(merge)) {
+        merge[i, ] <- c(labs[dad[i]], labs[kid[i]])
+        ## update labs for the current group and its kids
+        labs[labs %in% labs[c(dad[i], kid[i])]] <- i
+    }
+
+    order <- hclustMergeOrder(merge)
+      
+    out <- list(merge = merge, height = x$dist[o], order = order,
+                labels = x$labels, method = "spantree", call =
+                match.call())
+    class(out) <- "hclust"
+    out
+}
+
+### Internal vegan function to get the 'order' from a merge matrix of
+### an hclust tree
+
+`hclustMergeOrder` <-
+    function(merge)
+{
+    ## Get order of leaves with recursive search from the root
+    order <- numeric(nrow(merge)+1)
+    ind <- 0
+    ## "<<-" updates data only within hclustMergeOrder, but outside
+    ## the visit() function.
+    visit <- function(i, j) {
+        if (merge[i,j] < 0) {
+            ind <<- ind+1
+            order[ind] <<- -merge[i,j]
+        } else {
+            visit(merge[i,j], 1)
+            visit(merge[i,j], 2)
+        }
+    }
+    visit(nrow(merge), 1)
+    visit(nrow(merge), 2)
+    return(order)
+}
+
+### Reorder an hclust tree. Basic R provides reorder.dendrogram, but
+### this functoin works with 'hclust' objects, and also differs in
+### implementation. We use either weighted mean, min or max or
+### sum. The dendrogram is always ordered in ascending order, so that
+### with max the left kid always has lower value. So with 'max' the
+### largest value is smaller in leftmost group. The choice 'sum'
+### hardly makes sense, but it is the default in
+### reorder.dendrogram. The ordering with 'mean' differs from
+### reorder.dendrogram which uses unweighted means, but here we weight
+### means by group sizes so that the mean of an internal node is the
+### mean of its leaves.
+
+`reorder.hclust` <-
+    function(x, wts,
+             agglo.FUN = c("mean", "min", "max", "sum", "uwmean"),
+             ...)
+{
+    agglo.FUN <- match.arg(agglo.FUN)
+    merge <- x$merge
+    nlev <- nrow(merge)
+    stats <- numeric(nlev)
+    counts <- numeric(nlev)
+    pair <- numeric(2)
+    pairw <- numeric(2)
+    ## Go through merge, order each level and update the statistic.
+    for(i in 1:nlev) {
+        for(j in 1:2) {
+            if (merge[i,j] < 0) {
+                pair[j] <- wts[-merge[i,j]]
+                pairw[j] <- 1
+            } else {
+                pair[j] <- stats[merge[i,j]]
+                pairw[j] <- counts[merge[i,j]]
+            }
+        }
+        ## reorder
+        merge[i,] <- merge[i, order(pair)]
+        ## statistic for this merge level
+        stats[i] <-
+            switch(agglo.FUN,
+                   "mean" = weighted.mean(pair, pairw),
+                   "min" = min(pair),
+                   "max" = max(pair),
+                   "sum" = sum(pair),
+                   "uwmean" = mean(pair))
+        counts[i] <- sum(pairw)
+    }
+    ## Get the 'order' of the reordered dendrogram
+    order <- hclustMergeOrder(merge)
+    x$merge <- merge
+    x$order <- order
+    x$value <- stats
+    x
+}
+
+### Trivial function to reverse the order of an hclust tree (why this
+### is not in base R?)
+
+`rev.hclust` <-
+    function(x)
+{
+    x$order <- rev(x$order)
+    x
+}
+
+### Get coordinates for internal or terminal nodes (leaves) that would
+### be used in plot.hclust
+
+`scores.hclust` <-
+    function(x, display = "internal", ...)
+{
+    extnam <- c("leaves", "terminal")
+    intnam <- c("internal")
+    display <- match.arg(display, c(extnam, intnam))
+    ## Terminal nodes (leaves): plot.hclust scales x-axis for n points
+    ## as 1..n. The y-value is the 'height' where the terminal node
+    ## was fused to the tree.
+    if(display %in% extnam) {
+        merge <- x$merge
+        y <- numeric(nrow(merge) + 1)
+        for(i in 1:nrow(merge))
+            for(j in 1:2)
+                if(merge[i,j] < 0)
+                    y[-merge[i,j]] <- x$height[i]
+        xx <- order(x$order)
+        xy <- cbind(`x` = xx, `height` = y)
+    } else {
+        ## Internal nodes are given in the order they were fused which
+        ## also is the order of 'height'
+        xx <- reorder(x, order(x$order), agglo.FUN = "uwmean")$value
+        xy <- cbind(`x`= xx, `height` = x$height)
+    }
+    xy
+}
diff --git a/R/as.ts.oecosimu.R b/R/as.ts.oecosimu.R
index b471895..3f94f7c 100644
--- a/R/as.ts.oecosimu.R
+++ b/R/as.ts.oecosimu.R
@@ -1,10 +1,8 @@
 `as.ts.oecosimu` <-
     function(x, ...)
 {
-    seqmethods <- c("swap", "tswap", "permat.swap", "permat.abuswap")
-    if (!(x$oecosimu$method %in% seqmethods))
-        stop("as.ts available only for sequential methods ",
-             paste(seqmethods, collapse=", "))
+    if  (!x$oecosimu$isSeq)
+        stop("as.ts available only for sequential methods")
     startval <- attr(x$oecosimu$simulated, "burnin") + 1 
     thin <- attr(x$oecosimu$simulated, "thin")
     out <- ts(t(x$oecosimu$simulated), start = startval, deltat=thin,
diff --git a/R/as.ts.permat.R b/R/as.ts.permat.R
index 458e4e1..b2e2256 100644
--- a/R/as.ts.permat.R
+++ b/R/as.ts.permat.R
@@ -4,7 +4,9 @@
     type <- match.arg(type, c("bray", "chisq"))
     out <- summary(x)[[type]]
     if (!is.ts(out)) {
-        seqmethods <- c("swap", "tswap", "abuswap")
+        seqmethods <- sapply(make.commsim(), function(z) make.commsim(z)$isSeq)
+        seqmethods <- names(seqmethods)[seqmethods]
+#        seqmethods <- c("swap", "tswap", "abuswap")
         stop("as.ts available only for sequential methods ",
             paste(seqmethods, collapse=", "))
     } 
diff --git a/R/betadisper.R b/R/betadisper.R
index 41549de..6d96f09 100644
--- a/R/betadisper.R
+++ b/R/betadisper.R
@@ -68,7 +68,7 @@
         n <- n - sum(gr.na)
         ## update labels
         labs <- labs[!gr.na]
-        warning("Missing observations due to 'group' removed.")
+        warning("missing observations due to 'group' removed")
     }
     ## remove NA's in d
     if(any(x.na <- apply(x, 1, function(x) any(is.na(x))))) {
@@ -78,7 +78,7 @@
         n <- n - sum(x.na)
         ## update labels
         labs <- labs[!x.na]
-        warning("Missing observations due to 'd' removed.")
+        warning("missing observations due to 'd' removed")
     }
     x <- x + t(x)
     x <- dblcen(x)
diff --git a/R/bioenv.default.R b/R/bioenv.default.R
index a377036..c321395 100644
--- a/R/bioenv.default.R
+++ b/R/bioenv.default.R
@@ -1,16 +1,23 @@
 `bioenv.default` <-
 function (comm, env, method = "spearman", index = "bray", upto = ncol(env), 
-              trace = FALSE, partial = NULL, ...) 
+          trace = FALSE, partial = NULL,
+          metric = c("euclidean", "mahalanobis", "manhattan", "gower"),
+          parallel = getOption("mc.cores"),
+          ...) 
 {
+    metric <- match.arg(metric)
+    method <- match.arg(method, eval(formals(cor)$method))
+    if (any(sapply(env, is.factor)) && metric != "gower")
+        stop("you have factors in 'env': only 'metric = \"gower\"' is allowed")
     if (is.null(partial)) {
-        corfun <- function(dx, dy, dz, method) {
-            cor(dx, dy, method=method)
+        corfun <- function(dx, dy, dz, method, ...) {
+            cor(dx, dy, method=method, ...)
         }
     } else {
-        corfun <- function(dx, dy, dz, method) {
-            rxy <- cor(dx, dy, method=method)
-            rxz <- cor(dx, dz, method=method)
-            ryz <- cor(dy, dz, method=method)
+        corfun <- function(dx, dy, dz, method, ...) {
+            rxy <- cor(dx, dy, method=method, ...)
+            rxz <- cor(dx, dz, method=method, ...)
+            ryz <- cor(dy, dz, method=method, ...)
             (rxy - rxz*ryz)/sqrt(1-rxz*rxz)/sqrt(1-ryz*ryz)
         }
     }
@@ -25,13 +32,29 @@ function (comm, env, method = "spearman", index = "bray", upto = ncol(env),
     n <- ncol(env)
     ntake <- 2^n - 1
     ndone <- 0
+    upto <- min(upto, n)
     if (n > 8 || trace) {
         if (upto < n) 
             cat("Studying", nall <- sum(choose(n, 1:upto)), "of ")
         cat(ntake, "possible subsets (this may take time...)\n")
         flush.console()
     }
-    x <- scale(env)
+    ## Check metric and adapt data and distance function
+    if (metric == "euclidean") {
+        x <- scale(env, scale = TRUE)
+        distfun <- function(x) dist(x)
+    } else if (metric == "mahalanobis") {
+        x <- as.matrix(scale(env, scale = FALSE))
+        distfun <- function(x) dist(veganMahatrans(x))
+    } else if (metric == "gower") {
+        x <- env
+        distfun <- function(x) daisy(x, metric = "gower")
+    } else if (metric == "manhattan") {
+        x <- decostand(env, "range")
+        distfun <- function(x) dist(x, "manhattan")
+    } else {
+        stop("unknown metric")
+    }
     best <- list()
     if (inherits(comm, "dist")) {
         comdis <- comm
@@ -45,6 +68,22 @@ function (comm, env, method = "spearman", index = "bray", upto = ncol(env),
     } else {
         comdis <- vegdist(comm, method = index)
     }
+    ## Prepare for parallel processing
+    if (is.null(parallel))
+        parallel <- 1
+    hasClus <- inherits(parallel, "cluster")
+    isParal <- (hasClus || parallel > 1) && require(parallel)
+    isMulticore <- .Platform$OS.type == "unix" && !hasClus
+    if (isParal && !isMulticore && !hasClus) {
+        parallel <- makeCluster(parallel)
+    }
+    ## get the number of clusters
+    if (inherits(parallel, "cluster"))
+        nclus <- length(parallel)
+    else
+        nclus <- parallel
+    CLUSLIM <- 8
+    ## The proper loop
     for (i in 1:upto) {
         if (trace) {
             nvar <- choose(n, i)
@@ -55,9 +94,23 @@ function (comm, env, method = "spearman", index = "bray", upto = ncol(env),
         sets <- t(combn(1:n, i))
         if (!is.matrix(sets)) 
             sets <- as.matrix(t(sets))
-        est <- numeric(nrow(sets))
-        for (j in 1:nrow(sets)) est[j] <- corfun(comdis, dist(x[, 
-                                                                sets[j, ]]), partial, method = method)
+        if (isParal && nrow(sets) >= CLUSLIM*nclus) {
+            if (isMulticore) {
+                est <- unlist(mclapply(1:nrow(sets), function(j)
+                                       corfun(comdis,
+                                              distfun(x[,sets[j,],drop = FALSE]),
+                                              partial, method = method, ...),
+                                       mc.cores = parallel))
+            } else {
+                est <- parSapply(parallel, 1:nrow(sets), function(j)
+                                  corfun(comdis, distfun(x[,sets[j,],drop = FALSE]),
+                                         partial, method = method, ...))
+            }
+        } else {
+            est <- sapply(1:nrow(sets), function(j) 
+                          corfun(comdis, distfun(x[,sets[j,], drop=FALSE ]),
+                                 partial, method = method, ...))
+        }
         best[[i]] <- list(best = sets[which.max(est), ], est = max(est))
         if (trace) {
             ndone <- ndone + nvar
@@ -66,10 +119,28 @@ function (comm, env, method = "spearman", index = "bray", upto = ncol(env),
             flush.console()
         }
     }
-    out <- list(names = colnames(env), method = method, index = index, 
-                upto = upto, models = best, partial = partpart)
+    whichbest <- which.max(lapply(best, function(tmp) tmp$est))
+    out <- list(names = colnames(env), method = method, index = index,
+                metric = metric, upto = upto, models = best,
+                whichbest = whichbest,
+                partial = partpart, x = x, distfun = distfun)
     out$call <- match.call()
     out$call[[1]] <- as.name("bioenv")
     class(out) <- "bioenv"
     out
 }
+
+## Function to extract the environmental distances used within
+## bioenv. The default is to take the best model, but any model can be
+## specified by its number.
+
+`bioenvdist`  <-
+    function(x, which = "best")
+{
+    ## any non-numeric argument is regarded as "best"
+    if(!is.numeric(which))
+        which <- x$whichbest
+    x$distfun(x$x[, x$models[[which]]$best, drop = FALSE])
+}
+
+
diff --git a/R/bioenv.formula.R b/R/bioenv.formula.R
index deccf16..2d01869 100644
--- a/R/bioenv.formula.R
+++ b/R/bioenv.formula.R
@@ -7,12 +7,7 @@
     comm <- formula[[2]]
     comm <- eval(comm, data, parent.frame())
     formula[[2]] <- NULL
-    mf <- model.frame(formula, data, na.action = NULL)
-    if (any(sapply(mf, function(x) is.factor(x) || !is.numeric(x)))) 
-        stop("bioenv applies only to numeric variables")
-    env <- attr(mf, "terms")
-    attr(env, "intercept") <- 0
-    env <- model.matrix(env, mf)
+    env <- model.frame(formula, data, na.action = NULL)
     out <- bioenv(comm, env, ...)
     out$formula <- fla
     out$call <- match.call()
diff --git a/R/biplot.CCorA.R b/R/biplot.CCorA.R
index 40b5471..dc39e45 100644
--- a/R/biplot.CCorA.R
+++ b/R/biplot.CCorA.R
@@ -22,11 +22,11 @@
 
     epsilon <- sqrt(.Machine$double.eps)
 	if(length(which(x$Eigenvalues > epsilon)) == 1)
-		stop("Plot of axes (", paste(plot.axes, collapse=","),
-			") not drawn because the solution has a single dimension.")
+		stop("plot of axes (", paste(plot.axes, collapse=","),
+			") not drawn because the solution has a single dimension")
 	if(max(plot.axes) > length(which(x$Eigenvalues > epsilon)))
-		stop("Plot of axes (", paste(plot.axes, collapse=","),
-			") not drawn because the solution has fewer dimensions.")
+		stop("plot of axes (", paste(plot.axes, collapse=","),
+			") not drawn because the solution has fewer dimensions")
 
 	if (missing(xlabs))
 		xlabs <- rownames(x$Cy)
diff --git a/R/biplot.rda.R b/R/biplot.rda.R
index 1f5fc11..09e20e8 100644
--- a/R/biplot.rda.R
+++ b/R/biplot.rda.R
@@ -31,9 +31,9 @@ biplot.rda <- function(x, choices = c(1, 2), scaling = 2,
   if(length(type) < 2)
       type <- rep(type, 2)
   if (missing(xlim))
-      xlim <- range(g$species[, 1], g$sites[, 1])
+      xlim <- range(g$species[, 1], g$sites[, 1], na.rm = TRUE)
   if (missing(ylim))
-      ylim <- range(g$species[, 2], g$sites[, 2])
+      ylim <- range(g$species[, 2], g$sites[, 2], na.rm = TRUE)
   plot(g[[1]], xlim = xlim, ylim = ylim, type = "n", asp = 1,
        ...)
   abline(h = 0, lty = 3)
diff --git a/R/bstick.cca.R b/R/bstick.cca.R
index 709c126..f68b9fa 100644
--- a/R/bstick.cca.R
+++ b/R/bstick.cca.R
@@ -4,7 +4,7 @@
     if(!inherits(n, c("rda", "cca")))
         stop("'n' not of class \"cca\" or \"rda\"")
     if(!is.null(n$CCA) && n$CCA$rank > 0)
-        stop("'bstick' only for unconstrained models.")
+        stop("'bstick' only for unconstrained models")
     ## No idea how to define bstick for capscale with negative
     ## eigenvalues
     if (inherits(n, "capscale") && !is.null(n$CA$imaginary.rank))
diff --git a/R/cIndexKM.R b/R/cIndexKM.R
index d04bc8c..3701890 100644
--- a/R/cIndexKM.R
+++ b/R/cIndexKM.R
@@ -26,7 +26,11 @@
     {
         n <- sum(clsize)
         k <- length(clsize)
-        zgss$bgss/(k - 1)/(zgss$wgss/(n - k))
+        ## undefined 0/0 for one class (or fewer in error cases)
+        if (k <= 1)
+            NA
+        else
+            zgss$bgss/(k - 1)/(zgss$wgss/(n - k))
     }
 ################################################
     ssi <- function(centers, clsize) 
diff --git a/R/capscale.R b/R/capscale.R
index 77fbb11..2a1ecdf 100644
--- a/R/capscale.R
+++ b/R/capscale.R
@@ -124,20 +124,20 @@
         if (!is.null(sol$pCCA) && sol$pCCA$rank > 0) 
             comm <- qr.resid(sol$pCCA$QR, comm)
         if (!is.null(sol$CCA) && sol$CCA$rank > 0) {
-            sol$CCA$v.eig <- t(comm) %*% sol$CCA$u/sqrt(k)
-            sol$CCA$v <- decostand(sol$CCA$v.eig, "normalize", MARGIN = 2)
+            v.eig <- t(comm) %*% sol$CCA$u/sqrt(k)
+            sol$CCA$v <- decostand(v.eig, "normalize", MARGIN = 2)
             comm <- qr.resid(sol$CCA$QR, comm)
         }
         if (!is.null(sol$CA) && sol$CA$rank > 0) {
-            sol$CA$v.eig <- t(comm) %*% sol$CA$u/sqrt(k)
-            sol$CA$v <- decostand(sol$CA$v.eig, "normalize", MARGIN = 2)
+            v.eig <- t(comm) %*% sol$CA$u/sqrt(k)
+            sol$CA$v <- decostand(v.eig, "normalize", MARGIN = 2)
         }
     } else {
         ## input data were dissimilarities, and no 'comm' defined:
         ## species scores make no sense and are made NA
-        sol$CA$v.eig[] <- sol$CA$v[] <- NA
+        sol$CA$v[] <- NA
         if (!is.null(sol$CCA))
-            sol$CCA$v.eig[] <- sol$CCA$v[] <- NA
+            sol$CCA$v[] <- NA
         sol$colsum <- NA
     }
     if (!is.null(sol$CCA) && sol$CCA$rank > 0) 
diff --git a/R/cca.default.R b/R/cca.default.R
index fcf8b5d..47bcbbb 100644
--- a/R/cca.default.R
+++ b/R/cca.default.R
@@ -68,11 +68,9 @@
                            1, 1/sqrt(rowsum), "*")
             CCA$v <- sweep(as.matrix(sol$v[, 1:rank, drop = FALSE]), 
                            1, 1/sqrt(colsum), "*")
-            CCA$u.eig <- sweep(CCA$u, 2, sol$d[1:rank], "*")
-            CCA$v.eig <- sweep(CCA$v, 2, sol$d[1:rank], "*")
-            CCA$wa.eig <- sweep(Xbar %*% sol$v[, 1:rank, drop = FALSE], 
-                                1, 1/sqrt(rowsum), "*")
-            CCA$wa <- sweep(CCA$wa.eig, 2, 1/sol$d[1:rank], "*")
+            wa.eig <- sweep(Xbar %*% sol$v[, 1:rank, drop = FALSE], 
+                            1, 1/sqrt(rowsum), "*")
+            CCA$wa <- sweep(wa.eig, 2, 1/sol$d[1:rank], "*")
             oo <- Q$pivot
             if (!is.null(pCCA$rank)) 
                 oo <- oo[-(1:pCCA$rank)] - ncol(Z.r)
@@ -92,15 +90,14 @@
                         QR = Q, Xbar = Xbar)
             u <- matrix(0, nrow=nrow(sol$u), ncol=0)
             v <- matrix(0, nrow=nrow(sol$v), ncol=0)
-            CCA$u <- CCA$u.eig <- CCA$wa <- CCA$wa.eig <- u
-            CCA$v <- CCA$v.eig <- v
+            CCA$u <- CCA$wa <- u
+            CCA$v <- v
             CCA$biplot <- matrix(0, 0, 0)
             CCA$alias <- colnames(Y.r)
         }
         Xbar <- qr.resid(Q, Xbar)
         if (exists("exclude.spec")) {
             attr(CCA$v, "na.action") <- exclude.spec
-            attr(CCA$v.eig, "na.action") <- exclude.spec
         }
         
     }
@@ -119,8 +116,6 @@
                       1, 1/sqrt(rowsum), "*")
         CA$v <- sweep(as.matrix(sol$v[, 1:rank, drop = FALSE]), 
                       1, 1/sqrt(colsum), "*")
-        CA$u.eig <- sweep(CA$u, 2, sol$d[1:rank], "*")
-        CA$v.eig <- sweep(CA$v, 2, sol$d[1:rank], "*")
         CA$rank <- rank
         CA$tot.chi <- sum(CA$eig)
         CA$Xbar <- Xbar
@@ -128,12 +123,11 @@
     } else {   # zero rank: no residual component
         CA <- list(eig = 0, rank = rank, tot.chi = 0,
                    Xbar = Xbar)
-        CA$u <- CA$u.eig <- matrix(0, nrow(sol$u), 0)
-        CA$v <- CA$v.eig <- matrix(0, nrow(sol$v), 0)
+        CA$u <- matrix(0, nrow(sol$u), 0)
+        CA$v <- matrix(0, nrow(sol$v), 0)
     }
     if (exists("exclude.spec")) {
         attr(CA$v, "na.action") <- exclude.spec
-        attr(CA$v.eig, "na.action") <- exclude.spec
     }
     call <- match.call()
     call[[1]] <- as.name("cca")
diff --git a/R/clamtest.R b/R/clamtest.R
index 60bbcc8..251fb31 100644
--- a/R/clamtest.R
+++ b/R/clamtest.R
@@ -1,127 +1,127 @@
-## CLAM, reproduction of software described in Chazdon et al. 2011
-## Ecology, 92, 1332--1343
-clamtest <- 
-function(comm, groups, coverage.limit = 10,
-specialization = 2/3, npoints = 20, alpha = 0.05/20) 
-{
-    ## inital checks
-    comm <- as.matrix(comm)
-    if (NROW(comm) < 2)
-        stop("'comm' must have at least 2 rows")
-    if (nrow(comm) > 2 && missing(groups))
-        stop("'groups' is missing")
-    if (nrow(comm) == 2 && missing(groups))
-        groups <- if (is.null(rownames(comm)))
-            c("Group.1", "Group.2") else rownames(comm)
-    if (length(groups) != nrow(comm))
-        stop("length of 'groups' must equal 'nrow(comm)'")
-    if (length(unique(groups)) != 2)
-        stop("number of groups must be 2")
-    glabel <- as.character(unique(groups))
-    if (is.null(colnames(comm)))
-        colnames(comm) <- paste("Species", 1:ncol(comm), sep=".")
-    if (any(colSums(comm) <= 0))
-        stop("'comm' contains zero sum columns")
-    spp <- colnames(comm)
-    ## reproduced from Chazdon et al. 2011, Ecology 92, 1332--1343
-    S <- ncol(comm)
-    if (nrow(comm) == 2) {
-        Y <- comm[glabel[1],]
-        X <- comm[glabel[2],]
-    } else {
-        Y <- colSums(comm[which(groups==glabel[1]),])
-        X <- colSums(comm[which(groups==glabel[2]),])
-    }
-    names(X) <- names(Y) <- NULL
-    #all(ct$Total_SG == Y)
-    #all(ct$Total_OG == X)
-    m <- sum(Y)
-    n <- sum(X)
-    if (sum(Y) <= 0 || sum(X) <= 0)
-        stop("zero group totals not allowed")
-    ## check if comm contains integer, especially for singletons
-    if (any(X[X>0] < 1) || any(Y[Y>0] < 1))
-        warning("<1 non integer values detected: analysis might not be meaningful")
-    if (abs(sum(X,Y) - sum(as.integer(X), as.integer(Y))) > 10^-6)
-        warning("non integer values detected")
-    C1 <- 1 - sum(X==1)/n
-    C2 <- 1 - sum(Y==1)/m
-    ## this stands for other than 2/3 cases
-    uu <- specialization/(1-specialization)
-    ## critical level
-    Zp <- qnorm(alpha, lower.tail=FALSE)
-    #p_i=a
-    #pi_i=b
-    ## function to calculate test statistic from Appendix D 
-    ## (Ecological Archives E092-112-A4)
-    ## coverage limit is count, not freq !!!
-    testfun <- function(p_i, pi_i, C1, C2, n, m) {
-        C1 <- ifelse(p_i*n < coverage.limit, C1, 1)
-        C2 <- ifelse(pi_i*m < coverage.limit, C2, 1)
-        Var <- C1^2*(p_i*(1-p_i)/n) + uu^2*C2^2*(pi_i*(1-pi_i)/m)
-        C1*p_i - C2*pi_i*uu - Zp*sqrt(Var)
-    }
-    ## root finding for iso-lines (instead of itarative search)
-    rootfun <- function(pi_i, C1, C2, n, m, upper) {
-        f <- function(p_i) testfun(p_i/n, pi_i/m, C1, C2, n, m)
-        if (length(unique(sign(c(f(1), f(upper))))) > 1)
-        ceiling(uniroot(f, lower=1, upper=upper)$root) else NA
-    }
-    ## sequences for finding Xmin and Ymin values
-    Xseq <- as.integer(trunc(seq(1, max(X), len=npoints)))
-    Yseq <- as.integer(trunc(seq(1, max(Y), len=npoints)))
-    ## finding Xmin and Ymin values for Xseq and Yseq
-    Xmins <- sapply(Yseq, function(z) rootfun(z, C1, C2, n, m, upper=max(X)))
-    Ymins <- sapply(Xseq, function(z) rootfun(z, C2, C1, m, n, upper=max(Y)))
-
-    ## needed to tweak original set of rules (extreme case reported
-    ## by Richard Telford failed here)
-    if (all(is.na(Xmins)))
-        Xmins[1] <- 1
-    if (all(is.na(Ymins)))
-        Ymins[1] <- 1
-
-    minval <- list(data.frame(x=Xseq[!is.na(Ymins)], y=Ymins[!is.na(Ymins)]),
-        data.frame(x=Xmins[!is.na(Xmins)], y=Yseq[!is.na(Xmins)]))
-
-    ## shared but too rare
-    Ymin <- Ymins[1]
-    Xmin <- Xmins[1]
-    sr <- X < Xmin & Y < Ymin
-
-    ## consequence of manually setting Xmin/Ymin resolved here
-    tmp1 <- if (Xmin==1)
-        list(x=1, y=Xmin) else approx(c(Xmin, 1), c(1, Ymin), xout=1:Xmin)
-    tmp2 <- if (Ymin==1)
-        list(x=1, y=Ymin) else approx(c(1, Ymin), c(Xmin, 1), xout=1:Ymin)
-
-    for (i in 1:S) {
-        if (X[i] %in% tmp1$x)
-            sr[i] <- Y[i] < tmp1$y[which(X[i]==tmp1$x)]
-        if (Y[i] %in% tmp2$x)
-            sr[i] <- X[i] < tmp2$y[which(Y[i]==tmp2$x)]
-    }
-    ## classification
-    a <- ifelse(X==0, 1, X)/n # \hat{p_i}
-    b <- ifelse(Y==0, 1, Y)/m # \hat{\pi_i}
-    specX <- !sr & testfun(a, b, C1, C2, n, m) > 0
-    specY <- !sr & testfun(b, a, C2, C1, m, n) > 0
-    gen <- !sr & !specX & !specY
-    ## crosstable
-    tmp <- ifelse(cbind(gen, specY, specX, sr), 1, 0)
-    types <- c("Generalist", paste("Specialist", glabel[1], sep="_"),
-        paste("Specialist", glabel[2], sep="_"), "Too_rare")
-    classes <- factor((1:4)[rowSums(tmp*col(tmp))], levels=1:4)
-    levels(classes) <- c("Generalist", paste("Specialist", glabel[1], sep="_"),
-        paste("Specialist", glabel[2], sep="_"), "Too_rare")
-    tab <- data.frame(Species=spp, y=Y, x=X, Classes=classes)
-    colnames(tab)[2:3] <- paste("Total", glabel, sep="_")
-    rownames(tab) <- NULL
-    class(tab) <- c("clamtest","data.frame")
-    attr(tab, "settings") <- list(labels = glabel,
-        coverage.limit = coverage.limit, specialization = specialization, 
-        npoints = npoints, alpha = alpha)
-    attr(tab, "minv") <- minval
-    attr(tab, "coverage") <- structure(c(C2, C1), .Names=glabel)
-    tab
-}
+## CLAM, reproduction of software described in Chazdon et al. 2011
+## Ecology, 92, 1332--1343
+clamtest <- 
+function(comm, groups, coverage.limit = 10,
+specialization = 2/3, npoints = 20, alpha = 0.05/20) 
+{
+    ## inital checks
+    comm <- as.matrix(comm)
+    if (NROW(comm) < 2)
+        stop("'comm' must have at least 2 rows")
+    if (nrow(comm) > 2 && missing(groups))
+        stop("'groups' is missing")
+    if (nrow(comm) == 2 && missing(groups))
+        groups <- if (is.null(rownames(comm)))
+            c("Group.1", "Group.2") else rownames(comm)
+    if (length(groups) != nrow(comm))
+        stop("length of 'groups' must equal 'nrow(comm)'")
+    if (length(unique(groups)) != 2)
+        stop("number of groups must be 2")
+    glabel <- as.character(unique(groups))
+    if (is.null(colnames(comm)))
+        colnames(comm) <- paste("Species", 1:ncol(comm), sep=".")
+    if (any(colSums(comm) <= 0))
+        stop("'comm' contains zero sum columns")
+    spp <- colnames(comm)
+    ## reproduced from Chazdon et al. 2011, Ecology 92, 1332--1343
+    S <- ncol(comm)
+    if (nrow(comm) == 2) {
+        Y <- comm[glabel[1],]
+        X <- comm[glabel[2],]
+    } else {
+        Y <- colSums(comm[which(groups==glabel[1]),])
+        X <- colSums(comm[which(groups==glabel[2]),])
+    }
+    names(X) <- names(Y) <- NULL
+    #all(ct$Total_SG == Y)
+    #all(ct$Total_OG == X)
+    m <- sum(Y)
+    n <- sum(X)
+    if (sum(Y) <= 0 || sum(X) <= 0)
+        stop("zero group totals not allowed")
+    ## check if comm contains integer, especially for singletons
+    if (any(X[X>0] < 1) || any(Y[Y>0] < 1))
+        warning("<1 non integer values detected: analysis might not be meaningful")
+    if (abs(sum(X,Y) - sum(as.integer(X), as.integer(Y))) > 10^-6)
+        warning("non integer values detected")
+    C1 <- 1 - sum(X==1)/n
+    C2 <- 1 - sum(Y==1)/m
+    ## this stands for other than 2/3 cases
+    uu <- specialization/(1-specialization)
+    ## critical level
+    Zp <- qnorm(alpha, lower.tail=FALSE)
+    #p_i=a
+    #pi_i=b
+    ## function to calculate test statistic from Appendix D 
+    ## (Ecological Archives E092-112-A4)
+    ## coverage limit is count, not freq !!!
+    testfun <- function(p_i, pi_i, C1, C2, n, m) {
+        C1 <- ifelse(p_i*n < coverage.limit, C1, 1)
+        C2 <- ifelse(pi_i*m < coverage.limit, C2, 1)
+        Var <- C1^2*(p_i*(1-p_i)/n) + uu^2*C2^2*(pi_i*(1-pi_i)/m)
+        C1*p_i - C2*pi_i*uu - Zp*sqrt(Var)
+    }
+    ## root finding for iso-lines (instead of itarative search)
+    rootfun <- function(pi_i, C1, C2, n, m, upper) {
+        f <- function(p_i) testfun(p_i/n, pi_i/m, C1, C2, n, m)
+        if (length(unique(sign(c(f(1), f(upper))))) > 1)
+        ceiling(uniroot(f, lower=1, upper=upper)$root) else NA
+    }
+    ## sequences for finding Xmin and Ymin values
+    Xseq <- as.integer(trunc(seq(1, max(X), len=npoints)))
+    Yseq <- as.integer(trunc(seq(1, max(Y), len=npoints)))
+    ## finding Xmin and Ymin values for Xseq and Yseq
+    Xmins <- sapply(Yseq, function(z) rootfun(z, C1, C2, n, m, upper=max(X)))
+    Ymins <- sapply(Xseq, function(z) rootfun(z, C2, C1, m, n, upper=max(Y)))
+
+    ## needed to tweak original set of rules (extreme case reported
+    ## by Richard Telford failed here)
+    if (all(is.na(Xmins)))
+        Xmins[1] <- 1
+    if (all(is.na(Ymins)))
+        Ymins[1] <- 1
+
+    minval <- list(data.frame(x=Xseq[!is.na(Ymins)], y=Ymins[!is.na(Ymins)]),
+        data.frame(x=Xmins[!is.na(Xmins)], y=Yseq[!is.na(Xmins)]))
+
+    ## shared but too rare
+    Ymin <- Ymins[1]
+    Xmin <- Xmins[1]
+    sr <- X < Xmin & Y < Ymin
+
+    ## consequence of manually setting Xmin/Ymin resolved here
+    tmp1 <- if (Xmin==1)
+        list(x=1, y=Xmin) else approx(c(Xmin, 1), c(1, Ymin), xout=1:Xmin)
+    tmp2 <- if (Ymin==1)
+        list(x=1, y=Ymin) else approx(c(1, Ymin), c(Xmin, 1), xout=1:Ymin)
+
+    for (i in 1:S) {
+        if (X[i] %in% tmp1$x)
+            sr[i] <- Y[i] < tmp1$y[which(X[i]==tmp1$x)]
+        if (Y[i] %in% tmp2$x)
+            sr[i] <- X[i] < tmp2$y[which(Y[i]==tmp2$x)]
+    }
+    ## classification
+    a <- ifelse(X==0, 1, X)/n # \hat{p_i}
+    b <- ifelse(Y==0, 1, Y)/m # \hat{\pi_i}
+    specX <- !sr & testfun(a, b, C1, C2, n, m) > 0
+    specY <- !sr & testfun(b, a, C2, C1, m, n) > 0
+    gen <- !sr & !specX & !specY
+    ## crosstable
+    tmp <- ifelse(cbind(gen, specY, specX, sr), 1, 0)
+    types <- c("Generalist", paste("Specialist", glabel[1], sep="_"),
+        paste("Specialist", glabel[2], sep="_"), "Too_rare")
+    classes <- factor((1:4)[rowSums(tmp*col(tmp))], levels=1:4)
+    levels(classes) <- c("Generalist", paste("Specialist", glabel[1], sep="_"),
+        paste("Specialist", glabel[2], sep="_"), "Too_rare")
+    tab <- data.frame(Species=spp, y=Y, x=X, Classes=classes)
+    colnames(tab)[2:3] <- paste("Total", glabel, sep="_")
+    rownames(tab) <- NULL
+    class(tab) <- c("clamtest","data.frame")
+    attr(tab, "settings") <- list(labels = glabel,
+        coverage.limit = coverage.limit, specialization = specialization, 
+        npoints = npoints, alpha = alpha)
+    attr(tab, "minv") <- minval
+    attr(tab, "coverage") <- structure(c(C2, C1), .Names=glabel)
+    tab
+}
diff --git a/R/commsim.R b/R/commsim.R
new file mode 100644
index 0000000..6e38787
--- /dev/null
+++ b/R/commsim.R
@@ -0,0 +1,24 @@
+## this is function to create a commsim object, does some checks
+## there is a finite number of useful arguments here
+## but I added ... to allow for unforeseen algorithms,
+## or being able to reference to external objects
+commsim <- 
+function(method, fun, binary, isSeq, mode) 
+{
+    fun <- if (!missing(fun))
+        match.fun(fun) else stop("'fun' missing")
+    if (any(!(names(formals(fun)) %in% 
+        c("x", "n", "nr", "nc", "rs", "cs", "rf", "cf", "s", "fill", "thin", "..."))))
+            stop("unexpected arguments in 'fun'")
+    out <- structure(list(method = if (!missing(method))
+            as.character(method)[1L] else stop("'method' missing"),
+        binary = if (!missing(binary))
+            as.logical(binary)[1L] else stop("'binary' missing"),
+        isSeq = if (!missing(isSeq))
+            as.logical(isSeq)[1L] else stop("'isSeq' missing"),
+        mode = if (!missing(mode))
+            match.arg(as.character(mode)[1L],
+            c("integer", "double")) else stop("'mode' missing"),
+        fun = fun), class = "commsim")
+    out
+}
diff --git a/R/commsimulator.R b/R/commsimulator.R
deleted file mode 100644
index 691d801..0000000
--- a/R/commsimulator.R
+++ /dev/null
@@ -1,100 +0,0 @@
-"commsimulator" <-
-function (x, method, thin = 1) 
-{
-    method <- match.arg(method, 
-                        c("r0","r1","r2","r00","c0","swap", "tswap",
-                          "backtrack", "quasiswap"))
-    if (any(x > 1))
-        x <- ifelse(x > 0, 1, 0)
-    nr <- nrow(x)
-    nc <- ncol(x)
-    if (method %in% c("r0", "r1", "r2")) {
-        rs <- rowSums(x)
-        if (method == "r0")
-            p <- rep(1, nc)
-        else
-            p <- colSums(x)
-        if (method == "r2")
-            p <- p*p
-        out <- matrix(0, nrow=nr, ncol=nc)
-        for (i in 1:nr)
-            out[i,sample.int(nc, rs[i], prob=p)] <- 1 
-    }
-    else if (method == "r00") {
-        out <- numeric(nr*nc)
-        out[sample.int(length(out), sum(x))] <- 1
-        dim(out) <- dim(x)
-    }
-    else if (method == "c0") {
-        cs <- colSums(x)
-        out <- matrix(0, nrow=nr, ncol=nc)
-        for (j in 1:nc)
-            out[sample.int(nr, cs[j]), j] <- 1
-    } else if (method == "swap") {
-        x <- as.matrix(x)
-        out <- .C("swap", m = as.integer(x), as.integer(nrow(x)),
-                  as.integer(ncol(x)), as.integer(thin),
-                  PACKAGE = "vegan")$m
-        dim(out) <- dim(x)
-    } else if (method == "tswap") {
-        x <- as.matrix(x)
-        out <- .C("trialswap", m = as.integer(x), as.integer(nrow(x)),
-                  as.integer(ncol(x)), as.integer(thin),
-                  PACKAGE = "vegan")$m
-        dim(out) <- dim(x)
-    } else if (method == "quasiswap") {
-        out <- r2dtable(1, rowSums(x), colSums(x))[[1]]
-        out <- .C("quasiswap", m = as.integer(out), as.integer(nrow(x)),
-                  as.integer(ncol(x)), PACKAGE = "vegan")$m
-        dim(out) <- dim(x)
-    }
-    else if (method == "backtrack") {
-        fill <- sum(x)
-        rs <- rowSums(x) 
-        cs <- colSums(x) 
-        all <- matrix(1:(nr*nc), nrow=nr, ncol=nc)
-        out <- matrix(0, nrow=nr, ncol=nc)
-        free <- matrix(1:(nr*nc), nrow=nr)
-        icount <- numeric(length(rs))
-        jcount <- numeric(length(cs))
-        ## Fill: ordering by cell probabilities
-        prob <- outer(rs, cs, "*") 
-        ij <- sample(free, prob=prob)
-        i <- (ij - 1) %% nr + 1
-        j <- (ij - 1) %/% nr + 1
-        for (k in 1:length(ij)) {
-            if (icount[i[k]] < rs[i[k]] && jcount[j[k]] < cs[j[k]]) {
-            	out[ij[k]] <- 1
-            	icount[i[k]] <- icount[i[k]] + 1
-            	jcount[j[k]] <- jcount[j[k]] + 1
-            }
-        }
-        ## "Backtrack": remove a random presence and fill with !present
-        ndrop <- 1
-        for (i in 1:10000) {
-            oldout <- out
-            oldn <- sum(out)
-            drop <- sample(all[out==1], ndrop)
-            out[drop] <- 0
-            candi <- outer(rowSums(out) < rs, colSums(out) < cs, "&") & out == 0
-            while (sum(candi) > 0) {
-                if (sum(candi) > 1)
-                    ij <- sample(all[candi], 1)
-                else
-                    ij <- all[candi]
-                out[ij] <- 1
-                candi <- outer(rowSums(out) < rs, colSums(out) < cs, "&") & out == 0
-            }
-            if (sum(out) >= fill) break
-            if (oldn >= sum(out))
-                ndrop <- min(ndrop + 1, 4)
-            else
-                ndrop <- 1
-            if (oldn > sum(out))
-                out <- oldout
-        }
-    }
-    colnames(out) <- colnames(x)
-    rownames(out) <- rownames(x)
-    out
-}
diff --git a/R/confint.MOStest.R b/R/confint.MOStest.R
index ecb5aca..c3ccac5 100644
--- a/R/confint.MOStest.R
+++ b/R/confint.MOStest.R
@@ -1,6 +1,5 @@
 `confint.MOStest` <-
     function (object, parm = 1, level = 0.95, ...) 
 {
-    require(MASS) || stop("requires packages MASS")
     confint(profile(object), level = level, ...)
 }
diff --git a/R/contribdiv.R b/R/contribdiv.R
index 84f8093..07134f9 100644
--- a/R/contribdiv.R
+++ b/R/contribdiv.R
@@ -1,52 +1,52 @@
-## Contribution diversity
-## Lu, H.P., H.H. Wagner and X.Y. Chen (2007). 
-## A contribution diversity approach to evaluate species diversity. 
-## Basic and Applied Ecology 8: 1 -12.
-`contribdiv` <-
-    function(comm, index = c("richness", "simpson"), relative = FALSE,
-             scaled = TRUE, drop.zero = FALSE)
-{
-
-    index <- match.arg(index)
-
-    x <- comm[rowSums(comm) > 0, colSums(comm) > 0]
-    n <- nrow(x)
-    S <- ncol(x)
-
-    if (index == "richness") {
-        n.i <- colSums(x > 0)
-        S.k <- rowSums(x > 0)
-        alpha <- S.k / n
-        beta <- apply(x, 1, function(z) sum((n - n.i[z > 0]) / (n * n.i[z > 0])))
-        denom <- 1
-    } else {
-        P.ik <- decostand(x, "total")
-        P.i <- apply(P.ik, 2, function(z) sum(z) / n)
-        P.i2 <- matrix(P.i, n, S, byrow=TRUE)
-        alpha <- diversity(x, "simpson")
-        beta <- rowSums(P.ik * (P.ik - P.i2))
-        denom <- n
-    }
-    gamma <- alpha + beta
-    D <- sum(beta) / sum(gamma)
-    if (relative) {
-        denom <- if (scaled)
-            {denom * sum(gamma)} else 1
-        alpha <- (alpha - mean(alpha)) / denom
-        beta <- (beta - mean(beta)) / denom
-        gamma <- (gamma - mean(gamma)) / denom
-    }
-    rval <- data.frame(alpha = alpha, beta = beta, gamma = gamma)
-    if (!drop.zero && nrow(comm) != n) {
-        nas <- rep(NA, nrow(comm))
-        rval2 <- data.frame(alpha = nas, beta = nas, gamma = nas)
-        rval2[rowSums(comm) > 0, ] <- rval
-        rval <- rval2
-    }
-    attr(rval, "diff.coef") <- D
-    attr(rval, "index") <- index
-    attr(rval, "relative") <- relative
-    attr(rval, "scaled") <- scaled
-    class(rval) <- c("contribdiv", "data.frame")
-    rval
-}
+## Contribution diversity
+## Lu, H.P., H.H. Wagner and X.Y. Chen (2007). 
+## A contribution diversity approach to evaluate species diversity. 
+## Basic and Applied Ecology 8: 1 -12.
+`contribdiv` <-
+    function(comm, index = c("richness", "simpson"), relative = FALSE,
+             scaled = TRUE, drop.zero = FALSE)
+{
+
+    index <- match.arg(index)
+
+    x <- comm[rowSums(comm) > 0, colSums(comm) > 0]
+    n <- nrow(x)
+    S <- ncol(x)
+
+    if (index == "richness") {
+        n.i <- colSums(x > 0)
+        S.k <- rowSums(x > 0)
+        alpha <- S.k / n
+        beta <- apply(x, 1, function(z) sum((n - n.i[z > 0]) / (n * n.i[z > 0])))
+        denom <- 1
+    } else {
+        P.ik <- decostand(x, "total")
+        P.i <- apply(P.ik, 2, function(z) sum(z) / n)
+        P.i2 <- matrix(P.i, n, S, byrow=TRUE)
+        alpha <- diversity(x, "simpson")
+        beta <- rowSums(P.ik * (P.ik - P.i2))
+        denom <- n
+    }
+    gamma <- alpha + beta
+    D <- sum(beta) / sum(gamma)
+    if (relative) {
+        denom <- if (scaled)
+            {denom * sum(gamma)} else 1
+        alpha <- (alpha - mean(alpha)) / denom
+        beta <- (beta - mean(beta)) / denom
+        gamma <- (gamma - mean(gamma)) / denom
+    }
+    rval <- data.frame(alpha = alpha, beta = beta, gamma = gamma)
+    if (!drop.zero && nrow(comm) != n) {
+        nas <- rep(NA, nrow(comm))
+        rval2 <- data.frame(alpha = nas, beta = nas, gamma = nas)
+        rval2[rowSums(comm) > 0, ] <- rval
+        rval <- rval2
+    }
+    attr(rval, "diff.coef") <- D
+    attr(rval, "index") <- index
+    attr(rval, "relative") <- relative
+    attr(rval, "scaled") <- scaled
+    class(rval) <- c("contribdiv", "data.frame")
+    rval
+}
diff --git a/R/decorana.R b/R/decorana.R
index 2664368..1afb43b 100644
--- a/R/decorana.R
+++ b/R/decorana.R
@@ -8,11 +8,11 @@
     ZEROEIG <- 1e-7 # consider as zero eigenvalue
     veg <- as.matrix(veg)
     if (any(rowSums(veg) <= 0)) 
-        stop("All row sums must be >0 in the community matrix: remove empty sites.")
+        stop("all row sums must be >0 in the community matrix: remove empty sites")
     if (any(veg < 0))
         stop("'decorana' cannot handle negative data entries")
     if (any(colSums(veg) <= 0)) 
-        warning("Some species were removed because they were missing in the data.")
+        warning("some species were removed because they were missing in the data")
     nr <- nrow(veg)
     nc <- ncol(veg)
     mk <- mk + 4
diff --git a/R/density.anosim.R b/R/density.anosim.R
deleted file mode 100644
index a84b3e5..0000000
--- a/R/density.anosim.R
+++ /dev/null
@@ -1,135 +0,0 @@
-### density & densityplot methods for vegan functions returning
-### statistics from permuted/simulated data. These are modelled after
-### density.oecosimu and densityplot.oecosimu (which are in their
-### separate files).
-
-## anosim
-
-`density.anosim` <-
-    function(x, ...)
-{
-    obs <- x$statistic
-    ## Put observed statistic among permutations
-    out <- density(c(obs, x$perm), ...)
-    out$call <- match.call()
-    out$observed <- obs
-    out$call[[1]] <- as.name("density")
-    class(out) <- c("vegandensity", class(out))
-    out
-}
-
-## adonis can return a matrix of terms, hence we also have densityplot()
-
-`density.adonis` <-
-    function(x, ...)
-{
-    cols <- ncol(x$f.perms)
-    if (cols > 1)
-        warning("'density' is meaningful only with one term, you have ", cols)
-    obs <- x$aov.tab$F.Model
-    obs <- obs[!is.na(obs)]
-    out <- density(c(obs, x$f.perms), ...)
-    out$observed <- obs
-    out$call <- match.call()
-    out$call[[1]] <- as.name("density")
-    class(out) <- c("vegandensity", class(out))
-    out
-}
-
-`densityplot.adonis` <-
-    function(x, data, xlab = "Null", ...)
-{
-    obs <- x$aov.tab$F.Model
-    obs <- obs[!is.na(obs)]
-    sim <- rbind(obs, x$f.perms)
-    nm <- rownames(x$aov.tab)[col(sim)]
-    densityplot( ~ as.vector(sim) | factor(nm, levels = unique(nm)),
-                xlab = xlab,
-                panel = function(x, ...) {
-                    panel.densityplot(x, ...)
-                    panel.abline(v = obs[panel.number()], ...)
-                },
-                ...)
-}
-
-## mantel
-
-`density.mantel` <-
-    function(x, ...)
-{
-    obs <- x$statistic
-    out <- density(c(obs, x$perm), ...)
-    out$observed <- obs
-    out$call <- match.call()
-    out$call[[1]] <- as.name("density")
-    class(out) <- c("vegandensity", class(out))
-    out
-}
-
-## mrpp
-
-`density.mrpp` <-
-    function(x, ...)
-{
-    obs <- x$delta
-    out <- density(c(obs, x$boot.deltas), ...)
-    out$observed <- obs
-    out$call <- match.call()
-    out$call[[1]] <- as.name("density")
-    class(out) <- c("vegandensity", class(out))
-    out
-}
-
-## anova.cca does not return permutation results, but permutest.cca
-## does. However, permutest.cca always finds only one statistic. Full
-## tables anova.cca are found by repeated calls to permutest.cca.
-
-`density.permutest.cca` <-
-    function(x, ...)
-{
-    obs <- x$F.0
-    out <- density(c(obs, x$F.perm), ...)
-    out$observed <- obs
-    out$call <- match.call()
-    out$call[[1]] <- as.name("density")
-    class(out) <- c("vegandensity", class(out))
-    out
-}
-
-## protest
-
-`density.protest` <-
-    function(x, ...)
-{
-    obs <- x$t0
-    out <- density(c(obs, x$t), ...)
-    out$observed <- obs
-    out$call <- match.call()
-    out$call[[1]] <- as.name("density")
-    class(out) <- c("vegandensity", class(out))
-    out
-}
-
-#### plot method: the following copies stats::plot.density() code but
-#### adds one new argument to draw abline(v=...) for the observed
-#### statistic
-
-`plot.vegandensity` <-
-    function (x, main = NULL, xlab = NULL, ylab = "Density", type = "l", 
-    zero.line = TRUE, obs.line = TRUE, ...) 
-{
-    if (is.null(xlab)) 
-        xlab <- paste("N =", x$n, "  Bandwidth =", formatC(x$bw))
-    if (is.null(main)) 
-        main <- deparse(x$call)
-    ## change obs.line to col=2 (red) if it was logical TRUE
-    if (isTRUE(obs.line))
-        obs.line <- 2
-    plot.default(x, main = main, xlab = xlab, ylab = ylab, type = type,
-                 ...)
-    if (zero.line) 
-        abline(h = 0, lwd = 0.1, col = "gray")
-    if (is.character(obs.line) || obs.line)
-        abline(v = x$observed, col = obs.line)
-    invisible(NULL)
-}
diff --git a/R/density.oecosimu.R b/R/density.oecosimu.R
deleted file mode 100644
index 640df4c..0000000
--- a/R/density.oecosimu.R
+++ /dev/null
@@ -1,14 +0,0 @@
-`density.oecosimu` <-
-    function(x, ...)
-{
-    cols <- nrow(x$oecosimu$simulated)
-    if (cols > 1)
-        warning("'density' is meaningful only with one statistic, you have ", cols)
-    obs <- x$oecosimu$statistic
-    out <- density(rbind(obs, t(x$oecosimu$simulated)), ...)
-    out$observed <- obs
-    out$call <- match.call()
-    out$call[[1]] <- as.name("density")
-    class(out) <- c("vegandensity", class(out))
-    out
-}
diff --git a/R/densityplot.oecosimu.R b/R/densityplot.oecosimu.R
deleted file mode 100644
index 95b53be..0000000
--- a/R/densityplot.oecosimu.R
+++ /dev/null
@@ -1,14 +0,0 @@
-`densityplot.oecosimu` <-
-    function(x, data, xlab = "Simulated", ...)
-{
-    obs <- x$oecosimu$statistic
-    sim <- rbind(obs, t(x$oecosimu$simulated))
-    nm <- names(obs)[col(sim)]
-    densityplot( ~ as.vector(sim) | factor(nm, levels = unique(nm)),
-                xlab = xlab,
-                panel = function(x, ...) {
-                    panel.densityplot(x, ...)
-                    panel.abline(v = obs[panel.number()], ...)
-                },
-                ...)
-}
diff --git a/R/dispweight.R b/R/dispweight.R
new file mode 100644
index 0000000..29044ee
--- /dev/null
+++ b/R/dispweight.R
@@ -0,0 +1,59 @@
+`dispweight` <-
+    function(comm, groups, nsimul = 999, nullmodel = "c0_ind",
+             plimit = 0.05)
+{
+    ## no groups?
+    if (missing(groups))
+        groups <- rep(1, nrow(comm))
+    ## Remove empty levels of 'groups' or this fails cryptically (and
+    ## take care 'groups' is a factor)
+    groups <- factor(groups)
+    ## Statistic is the sum of squared differences by 'groups'
+    means <- apply(comm, 2, function(x) tapply(x, groups, mean))
+    ## handle 1-level factors: all sites belong to the same 'groups'
+    if (is.null(dim(means)))
+        means <- matrix(means, nrow=1, ncol = length(means),
+                        dimnames = list(levels(groups), names(means)))
+    ## expand to matrix of species means
+    fitted <- means[groups,]
+    dhat <- colSums((comm - fitted)^2/fitted, na.rm = TRUE)
+    ## Get df for non-zero blocks of species. Completely ignoring
+    ## all-zero blocks for species sounds strange, but was done in the
+    ## original paper, and we follow here. However, this was not done
+    ## for significance tests, and only concerns 'D' and 'weights'.
+    nreps <- table(groups)
+    div <- colSums(sweep(means > 0, 1, nreps - 1, "*"))
+    ## "significance" of overdispersion is assessed from Chi-square
+    ## evaluated separately for each species. This means fixing only
+    ## marginal totals for species but letting row marginals vary
+    ## freely, unlike in standard Chi-square where both margins are
+    ## fixed. In vegan this is achieved by nullmodel 'c0_ind'. Instead
+    ## of one overall simulation, nullmodel is generated separately
+    ## for each of 'groups'
+    chisq <- function(x) {
+        fitted <- colMeans(x)
+        colSums(sweep(x, 2, fitted)^2, na.rm = TRUE) / fitted
+    }
+    simulated <- matrix(0, nrow = ncol(comm), ncol = nsimul)
+    for (lev in levels(groups)) {
+        nm <- nullmodel(comm[groups == lev,], nullmodel)
+        if (nm$commsim$binary)
+            stop("'binary' nullmodel cannot be used")
+        tmp <- apply(simulate(nm, nsimul), 3, chisq)
+        ok <- !is.na(tmp)
+        simulated[ok] <- simulated[ok] + tmp[ok] 
+    }
+    ## p value based on raw dhat, then we divide
+    p <- (rowSums(dhat <= simulated) + 1) / (nsimul + 1)
+    dhat <- dhat/div
+    weights <- ifelse(p <= plimit, 1/dhat, 1)
+    comm <- sweep(comm, 2, weights, "*")
+    attr(comm, "D") <- dhat
+    attr(comm, "df") <- div
+    attr(comm, "p") <- p
+    attr(comm, "weights") <-  weights
+    attr(comm, "nsimul") <- nsimul
+    attr(comm, "nullmodel") <- nullmodel
+    class(comm) <- c("dispweight", class(comm))
+    comm
+}
diff --git a/R/drop1.cca.R b/R/drop1.cca.R
index 23ba23c..0dbb093 100644
--- a/R/drop1.cca.R
+++ b/R/drop1.cca.R
@@ -1,6 +1,6 @@
 `drop1.cca` <-
     function(object, scope, test = c("none", "permutation"),
-             pstep = 100, perm.max = 200, ...)
+             permutations = how(nperm = 199), ...)
 {
     if (inherits(object, "prc"))
         stop("'step'/'drop1' cannot be used for 'prc' objects")
@@ -13,10 +13,10 @@
             scope <- rn
         else if (!is.character(scope))
             scope <- drop.scope(scope)
-        adds <- anova(object, by = "margin", step = pstep,
-                      perm.max = perm.max, scope = scope, ...)
+        adds <- anova(object, by = "margin", scope = scope,
+                      permutations = permutations, ...)
         nr <- nrow(adds)
-        out <- cbind(out, rbind(NA, adds[rn,3:5]))
+        out <- cbind(out, rbind(NA, adds[rn,3:4]))
         class(out) <- cl
     }
     out
diff --git a/R/envfit.default.R b/R/envfit.default.R
index cae6342..c1d2527 100644
--- a/R/envfit.default.R
+++ b/R/envfit.default.R
@@ -1,5 +1,5 @@
 `envfit.default` <-
-    function (ord, env, permutations = 999, strata, choices = c(1, 2), 
+    function (ord, env, permutations = 999, strata = NULL, choices = c(1, 2), 
              display = "sites", w = weights(ord), na.rm = FALSE, ...) 
 {
     weights.default <- function(object, ...) NULL
@@ -16,30 +16,26 @@
         env <- droplevels(env[keep,, drop=FALSE]) ## drop any lost levels
         na.action <- structure(seq_along(keep)[!keep], class="omit")
     }
+    ## make permutation matrix for all variables handled in the next loop
+    nr <- nrow(X)
+    permat <-  getPermuteMatrix(permutations, nr, strata = strata)
+    if (ncol(permat) != nr)
+        stop(gettextf("'permutations' have %d columns, but data have %d rows",
+                      ncol(permat), nr))
+
     if (is.data.frame(env)) {
         vects <- sapply(env, is.numeric)
         if (any(!vects)) {  # have factors
             Pfac <- env[, !vects, drop = FALSE]
             P <- env[, vects, drop = FALSE]
-            if (length(P)) {
-                if (permutations) {
-                    if (!exists(".Random.seed", envir = .GlobalEnv, 
-                                inherits = FALSE)) {
-                        runif(1)
-                    }
-                    seed <- get(".Random.seed", envir = .GlobalEnv, 
-                                inherits = FALSE)
-                }
+            if (length(P)) { # also have vectors
                 vectors <- vectorfit(X, P, permutations, strata, 
                                      choices, w = w, ...)
             }
-            if (!is.null(seed)) {
-                assign(".Random.seed", seed, envir = .GlobalEnv)
-            }
             factors <- factorfit(X, Pfac, permutations, strata,
-                                 choices, w = w, ...)
+                                         choices, w = w, ...)
             sol <- list(vector = vectors, factors = factors)
-        }
+            }
         else vectors <- vectorfit(X, env, permutations, strata, 
                                   choices, w = w, ...)
     }
diff --git a/R/envfit.formula.R b/R/envfit.formula.R
index 8a9748f..684d5da 100644
--- a/R/envfit.formula.R
+++ b/R/envfit.formula.R
@@ -7,5 +7,5 @@
     X <- eval(X, data, parent.frame())
     formula[[2]] <- NULL
     P <- model.frame(formula, data, na.action = na.pass)
-    envfit(X, P, ...)
+    envfit.default(X, P, ...)
 }
diff --git a/R/estimateR.default.R b/R/estimateR.default.R
index 193f67b..5205a46 100644
--- a/R/estimateR.default.R
+++ b/R/estimateR.default.R
@@ -28,8 +28,9 @@
     }
     if (!identical(all.equal(x, round(x)), TRUE)) 
         stop("function accepts only integers (counts)")
-    freq <- x[x > 0]
     X <- x[x > 0]
+    N <- sum(X)
+    SSC <- 1 # (N-1)/N # do NOT use small-sample correction
     T.X <- table(X)
     S.obs <- length(X)
     S.rare <- sum(T.X[as.numeric(names(T.X)) <= 10])
@@ -41,9 +42,28 @@
     }
     a <- sapply(i, COUNT, X)
     G <- a[1]/a[2]
-    S.Chao1 <- S.obs + a[1] * (a[1] - 1) / (a[2] + 1)/ 2
+    ## EstimateS uses basic Chao only if a[2] > 0, and switches to
+    ## bias-corrected version only if a[2] == 0. However, we always
+    ## use bias-corrected form. The switchin code is commented out so
+    ## that it is easy to put back.
+
+    ##if (a[2] > 0)
+    ##    S.Chao1 <- S.obs + SSC * a[1]^2/2/a[2]
+    ##else if (a[1] > 0)
+    ##
+    S.Chao1 <- S.obs + SSC * a[1]*(a[1]-1) / (a[2]+1)/2
+    ##else
+    ##    S.Chao1 <- S.obs
     Deriv.Ch1 <- gradF(a, i)
-    sd.Chao1 <- sqrt(a[2] * ((G^4)/4 + G^3 + (G^2)/2))
+    ##if (a[2] > 0)
+    ##    sd.Chao1 <- sqrt(a[2] * (SSC * (SSC * (G^4/4 + G^3) + G^2/2)))
+    ##else if (a[1] > 0)
+    sd.Chao1 <-
+        sqrt(SSC*(a[1]*(a[1]-1)/2/(a[2]+1) +
+                  SSC*(a[1]*(2*a[1]-1)^2/4/(a[2]+1)^2 +
+                       a[1]^2*a[2]*(a[1]-1)^2/4/(a[2]+1)^4)))
+    ##else
+    ##    sd.Chao1 <- 0
     C.ace <- 1 - a[1]/N.rare
     i <- 1:length(a)
     thing <- i * (i - 1) * a
diff --git a/R/eventstar.R b/R/eventstar.R
index 3963308..11137e6 100644
--- a/R/eventstar.R
+++ b/R/eventstar.R
@@ -1,18 +1,18 @@
-eventstar <- function(x, qmax=5) {
-    if (is.null(dim(x)))
-        x <- matrix(x, 1, length(x))
-    lossfun <- function(q, x)
-        tsallis(x, scales=q, norm=TRUE)
-    qstarfun <- function(x) {
-        optimize(lossfun, interval=c(0, qmax), x=x)$minimum
-    }
-    qs <- apply(x, 1, qstarfun)
-    Hs <- sapply(1:nrow(x), function(i) tsallis(x[i,], 
-        scales=qs[i], hill=FALSE))
-    S <- rowSums(x)
-    Es <- ifelse(qs==1, log(S), Hs/((S^(1-qs)-1)/(1-qs)))
-    Ds <- (1-(qs-1)*Hs)^(1/(1-qs))
-    out <- data.frame(qstar=qs, Estar=Es, Hstar=Hs, Dstar=Ds)
-    rownames(out) <- rownames(x)
-    out
-}
+eventstar <- function(x, qmax=5) {
+    if (is.null(dim(x)))
+        x <- matrix(x, 1, length(x))
+    lossfun <- function(q, x)
+        tsallis(x, scales=q, norm=TRUE)
+    qstarfun <- function(x) {
+        optimize(lossfun, interval=c(0, qmax), x=x)$minimum
+    }
+    qs <- apply(x, 1, qstarfun)
+    Hs <- sapply(1:nrow(x), function(i) tsallis(x[i,], 
+        scales=qs[i], hill=FALSE))
+    S <- rowSums(x)
+    Es <- ifelse(qs==1, log(S), Hs/((S^(1-qs)-1)/(1-qs)))
+    Ds <- (1-(qs-1)*Hs)^(1/(1-qs))
+    out <- data.frame(qstar=qs, Estar=Es, Hstar=Hs, Dstar=Ds)
+    rownames(out) <- rownames(x)
+    out
+}
diff --git a/R/factorfit.R b/R/factorfit.R
index cca6a76..f93d3ce 100644
--- a/R/factorfit.R
+++ b/R/factorfit.R
@@ -1,5 +1,5 @@
-"factorfit" <-
-    function (X, P, permutations = 0, strata, w,  ...) 
+`factorfit` <-
+    function (X, P, permutations = 0, strata = NULL, w,  ...) 
 {
     P <- as.data.frame(P)
     ## Check that all variables are factors, and coerce if necessary
@@ -18,16 +18,20 @@
         w <- rep(w, NR)
     r <- NULL
     pval <- NULL
-    totvar <- .C("goffactor", as.double(X), as.integer(rep(0, 
-                                                           NR)), as.double(w), as.integer(NR), as.integer(NC), as.integer(1), 
+    totvar <- .C("goffactor", as.double(X), as.integer(rep(0, NR)),
+                 as.double(w), as.integer(NR), as.integer(NC), as.integer(1), 
                  double(1), double(1), double(1), var = double(1), PACKAGE = "vegan")$var
     sol <- centroids.cca(X, P, w)
     var.id <- rep(names(P), sapply(P, nlevels))
+    ## make permutation matrix for all variables handled in the next loop
+    permat <- getPermuteMatrix(permutations, NR, strata = strata)
+    permutations <- nrow(permat)
+
     for (i in 1:length(P)) {
         A <- as.integer(P[[i]])
         NL <- nlevels(P[[i]])
-        invar <- .C("goffactor", as.double(X), as.integer(A - 
-                                                          1), as.double(w), as.integer(NR), as.integer(NC), 
+        invar <- .C("goffactor", as.double(X), as.integer(A - 1), as.double(w),
+                    as.integer(NR), as.integer(NC), 
                     as.integer(NL), double(NL), double(NL), double(NL), 
                     var = double(1), PACKAGE = "vegan")$var
         r.this <- 1 - invar/totvar
@@ -35,16 +39,17 @@
         if (permutations) {
             A <- as.integer(P[[i]])
             NL <- nlevels(P[[i]])
-            tmp <- rep(NA, permutations)
-            for (i in 1:permutations) {
-                indx <- permuted.index(length(A), strata)
+            ptest <- function(indx, ...) {
                 take <- A[indx]
-                invar <- .C("goffactor", as.double(X), as.integer(take - 
-                                                                  1), as.double(w), as.integer(NR), as.integer(NC), 
+                invar <- .C("goffactor", as.double(X),
+                            as.integer(take -  1), as.double(w),
+                            as.integer(NR), as.integer(NC), 
                             as.integer(NL), double(NL), double(NL), double(NL), 
                             var = double(1), PACKAGE = "vegan")$var
-                tmp[i] <- 1 - invar/totvar
+                1 - invar/totvar
             }
+            tmp <- sapply(1:permutations,
+                          function(indx,...) ptest(permat[indx,], ...))
             pval.this <- (sum(tmp >= r.this) + 1)/(permutations + 1)
             pval <- c(pval, pval.this)
         }
@@ -57,10 +62,7 @@
         names(pval) <- names(P)
     out <- list(centroids = sol, r = r, permutations = permutations, 
                 pvals = pval, var.id = var.id)
-    if (!missing(strata)) {
-        out$strata <- deparse(substitute(strata))
-        out$stratum.values <- strata
-    }
+    out$control <- attr(permat, "control")
     class(out) <- "factorfit"
     out
 }
diff --git a/R/fitspecaccum.R b/R/fitspecaccum.R
index e7bc7f3..bed0f02 100644
--- a/R/fitspecaccum.R
+++ b/R/fitspecaccum.R
@@ -15,6 +15,7 @@ fitspecaccum <-
         x <- object$individuals
     else
         x <- object$sites
+    hasWeights <- !is.null(object$weights)
     NLSFUN <- function(y, x, model, ...) {
         switch(model,
         "arrhenius" = nls(y ~ SSarrhenius(x, k, z),  ...),
@@ -28,7 +29,10 @@ fitspecaccum <-
         "weibull" = nls(y ~ SSweibull(x, Asym, Drop, lrc, par), ...))
     }
     mods <- lapply(seq_len(NCOL(SpeciesRichness)),
-                  function(i, ...) NLSFUN(SpeciesRichness[,i], x, model, ...))
+                  function(i, ...)
+                   NLSFUN(SpeciesRichness[,i],
+                          if (hasWeights) object$weights[,i] else x,
+                          model, ...), ...)
     object$fitted <- drop(sapply(mods, fitted))
     object$residuals <- drop(sapply(mods, residuals))
     object$coefficients <- drop(sapply(mods, coef))
@@ -44,8 +48,22 @@ fitspecaccum <-
     function(x, col = par("fg"), lty = 1, 
              xlab = "Sites", ylab = x$method, ...)
 {
-    fv <- fitted(x)
+    if (is.null(x$weights))
+        fv <- fitted(x)
+    else
+        fv <- sapply(x$models, predict, newdata = list(x = x$effort))
     matplot(x$sites, fv, col = col, lty = lty, pch = NA,
             xlab = xlab, ylab = ylab, type = "l", ...)
     invisible()
 }
+
+`lines.fitspecaccum` <-
+    function(x, col = par("fg"), lty = 1, ...)
+{
+    if (is.null(x$weights))
+        fv <- fitted(x)
+    else
+        fv <- sapply(x$models, predict, newdata= list(x = x$effort))
+    matlines(x$sites, fv, col = col, lty = lty, pch = NA, type = "l", ...)
+    invisible()
+}
diff --git a/R/fitted.radfit.R b/R/fitted.radfit.R
index 958f4a7..4b0a746 100644
--- a/R/fitted.radfit.R
+++ b/R/fitted.radfit.R
@@ -1,7 +1,13 @@
 `fitted.radfit` <-
     function(object, ...)
 {
-    sapply(object$models, fitted)
+    out <- sapply(object$models, fitted)
+    if (!length(object$y))
+        out <- numeric(length(object$models))
+    if (length(object$y) <= 1) 
+        out <- structure(as.vector(out), dim = c(1, length(object$models)),
+                         dimnames = list(names(object$y), names(object$models)))
+    out
 }
 
 `fitted.radfit.frame` <-
diff --git a/R/gdispweight.R b/R/gdispweight.R
new file mode 100644
index 0000000..df94062
--- /dev/null
+++ b/R/gdispweight.R
@@ -0,0 +1,52 @@
+### Clarke's dispweight is based on the hypothesis that count data
+### should follow Poisson distribution, and species overdispersed to
+### the Poisson should be downweighted. The basic model assesses the
+### expected values of species and their overdispersion wrt to class
+### means for a single factor and then estimates the significance of
+### the overdispersion using individual-based simulation within these
+### same classes. Function gdispweight generalizes this by allowing a
+### formula that specifies any fitted model, but estimates the
+### significance of the overdispersion analytically from Pearson
+### residuals.
+
+`gdispweight` <-
+    function(formula, data, plimit = 0.05)
+{
+    ## We do not handle missing values (yet?)
+    op <- options(na.action = "na.fail")
+    on.exit(op)
+    ## extract response data
+    comm <- eval(formula[[2]])
+    ## extract rhs
+    if (missing(data))
+        data <- environment(formula)
+    x <- model.matrix(delete.response(terms(formula, data = data)),
+                      data = data)
+    ## Quasi-Poisson
+    family <- quasipoisson()
+    V <- family$variance
+    ## fit models to all species separately and extract results
+    mods <- lapply(comm, function(y) glm.fit(x, y, family = family))
+    y <- sapply(mods, '[[', "y")
+    mu <- sapply(mods, fitted)
+    wts <- sapply(mods, '[[',  "prior.weights")
+    res <- (y-mu) * sqrt(wts) / sqrt(V(mu))
+    df <- sapply(mods, df.residual)
+    ## the same stats as in Clarke's original, but parametrically
+    stat <- colSums(res^2)
+    p <- pchisq(stat, df, lower.tail = FALSE)
+    dhat <- stat/df
+    w <- ifelse(p < plimit, 1/dhat, 1)
+    ## do not upweight underdispersed species
+    w <- ifelse(w > 1, 1, w)
+    ## done
+    comm <- sweep(comm, 2, w, "*")
+    class(comm) <- c("dispweight", class(comm))
+    attr(comm, "D") <- dhat
+    attr(comm, "df") <- df
+    attr(comm, "p") <- p
+    attr(comm, "weights") <- w
+    attr(comm, "nsimul") <- NA
+    attr(comm, "nullmodel") <- NA
+    comm
+}
diff --git a/R/getPermuteMatrix.R b/R/getPermuteMatrix.R
new file mode 100644
index 0000000..4909608
--- /dev/null
+++ b/R/getPermuteMatrix.R
@@ -0,0 +1,33 @@
+### Interface to the permute package
+
+### input can be (1) a single number giving the number of
+### permutations, (2) a how() structure for control parameter in
+### permute::shuffleSet, or (3) a permutation matrix which is returned
+### as is. In addition, there can be a 'strata' argument which will
+### modify case (1). The number of shuffled items must be given in 'N'.
+
+`getPermuteMatrix` <-
+    function(perm, N,  strata = NULL)
+{
+    ## 'perm' is either a single number, a how() structure or a
+    ## permutation matrix
+    if (length(perm) == 1) {
+        perm <- how(nperm = perm) 
+    }
+    ## apply 'strata', but only if possible: ignore silently other cases
+    if (!missing(strata) && !is.null(strata)) {
+        if (inherits(perm, "how") && is.null(getBlocks(perm)))
+            setBlocks(perm) <- strata
+    }
+    ## now 'perm' is either a how() or a matrix
+    if (inherits(perm, "how"))
+        perm <- shuffleSet(N, control = perm)
+    ## now 'perm' is a matrix (or always was). If it is a plain
+    ## matrix, set minimal attributes for printing. This is a dirty
+    ## kluge: should be handled more cleanly.
+    if (is.null(attr(perm, "control")))
+        attr(perm, "control") <-
+            structure(list(within=list(type="supplied matrix"),
+                           nperm = nrow(perm)), class = "how")
+    perm
+}
diff --git a/R/goodness.metaMDS.R b/R/goodness.metaMDS.R
index 47e362d..a290dc2 100644
--- a/R/goodness.metaMDS.R
+++ b/R/goodness.metaMDS.R
@@ -3,7 +3,6 @@
 {
     if (inherits(object, "monoMDS"))
         return(NextMethod("goodness", object, ...))
-    require(MASS) || stop("Needs MASS package")
     if (missing(dis))
         dis <- metaMDSredist(object)
     if(attr(dis, "Size") != nrow(object$points))
diff --git a/R/hiersimu.default.R b/R/hiersimu.default.R
index ce07bed..57ccccf 100644
--- a/R/hiersimu.default.R
+++ b/R/hiersimu.default.R
@@ -32,7 +32,7 @@ relative = FALSE, drop.highest = FALSE, nsimul=99, ...)
     rval <- as.data.frame(rval[rev(1:length(rval))])
     l2 <- sapply(rval, function(z) length(unique(z)))
     if (any(l1 != l2))
-        warning("levels are not perfectly nested")
+        stop("levels are not perfectly nested")
 
     ## aggregate response matrix
     fullgamma <-if (nlevels(rhs[,nlevs]) == 1)
diff --git a/R/howHead.R b/R/howHead.R
new file mode 100644
index 0000000..ff5735a
--- /dev/null
+++ b/R/howHead.R
@@ -0,0 +1,51 @@
+### Make a compact summary of permutations. This copies Gav Simpson's
+### permute:::print.how, but only displays non-default choices in how().
+`howHead` <- function(x, ...)
+{
+    ## print nothing is this not 'how'
+    if (is.null(x) || !inherits(x, "how"))
+        return()
+    ## collect header
+    head <- NULL
+    ## blocks
+    if (!is.null(getBlocks(x))) 
+        head <- paste0(head, paste("Blocks: ", x$blocks.name, "\n"))
+    ## plots
+    plotStr <- getStrata(x, which = "plots")
+    if (!is.null(plotStr)) {
+        plots <- getPlots(x)
+        ptype <- getType(x, which = "plots")
+        head <- paste0(head, paste0("Plots: ", plots$plots.name, ", "))
+        head <- paste0(head, paste("plot permutation:", ptype))
+        if(getMirror(x, which = "plots") == "Yes")
+            head <- paste(head, "mirrored")
+        if (isTRUE(all.equal(ptype, "grid"))) {
+            nr <- getRow(x, which = "plots")
+            nc <- getCol(x, which = "plots")
+            head <- paste0(head, sprintf(ngettext(nr, " %d row", " %d rows"),
+                                        nr))
+            head <- paste0(head, sprintf(ngettext(nc, " %d column",
+                                                 " %d columns"), nc))
+        }
+        head <- paste0(head, "\n")
+    }
+    ## the fine level (within plots if any)
+    type <- getType(x, which = "within")
+    head <- paste0(head, "Permutation: ", type)
+    if (isTRUE(type %in% c("series", "grid"))) {
+        if(getMirror(x, which = "within") == "Yes")
+            head <- paste(head, "mirrored")
+        if(getConstant(x) == "Yes")
+            head <- paste0(head, " constant permutation within each Plot")
+    }
+    if (isTRUE(all.equal(type, "grid"))) {
+        nr <- getRow(x, which = "plots")
+        nc <- getCol(x, which = "plots")
+        head <- paste0(head, sprintf(ngettext(nr, " %d row", " %d rows"),
+                                    nr))
+        head <- paste0(head, sprintf(ngettext(nc, " %d column",
+                                             " %d columns"), nc))
+    }
+    head <- paste0(head, "\nNumber of permutations: ", getNperm(x),  "\n")
+    head
+}
diff --git a/R/indpower.R b/R/indpower.R
index 415be23..0e774b0 100644
--- a/R/indpower.R
+++ b/R/indpower.R
@@ -1,25 +1,25 @@
-indpower <-
-function(x, type=0)
-{
-    x <- as.matrix(x)
-    x <- ifelse(x > 0, 1, 0)
-    if (NCOL(x) < 2)
-        stop("provide at least 2 columns for 'x'")
-    if (!(type %in% 0:2))
-        stop("'type' must be in c(0, 1, 2)")
-    n <- nrow(x)
-    j <- crossprod(x) ## faster t(x) %*% x
-    ip1 <- sweep(j, 1, diag(j), "/")
-    ip2 <- 1 - sweep(-sweep(j, 2, diag(j), "-"), 1, n - diag(j), "/")
-    out <- switch(as.character(type),
-        "0" = sqrt(ip1 * ip2),
-        "1" = ip1,
-        "2" = ip2)
-    cn <- if (is.null(colnames(out)))
-        1:ncol(out) else colnames(out)
-    rn <- if (is.null(rownames(out)))
-        1:ncol(out) else rownames(out)
-    colnames(out) <- paste("t", cn, sep=".")
-    rownames(out) <- paste("i", rn, sep=".")
-    out
-}
+indpower <-
+function(x, type=0)
+{
+    x <- as.matrix(x)
+    x <- ifelse(x > 0, 1, 0)
+    if (NCOL(x) < 2)
+        stop("provide at least 2 columns for 'x'")
+    if (!(type %in% 0:2))
+        stop("'type' must be in c(0, 1, 2)")
+    n <- nrow(x)
+    j <- crossprod(x) ## faster t(x) %*% x
+    ip1 <- sweep(j, 1, diag(j), "/")
+    ip2 <- 1 - sweep(-sweep(j, 2, diag(j), "-"), 1, n - diag(j), "/")
+    out <- switch(as.character(type),
+        "0" = sqrt(ip1 * ip2),
+        "1" = ip1,
+        "2" = ip2)
+    cn <- if (is.null(colnames(out)))
+        1:ncol(out) else colnames(out)
+    rn <- if (is.null(rownames(out)))
+        1:ncol(out) else rownames(out)
+    colnames(out) <- paste("t", cn, sep=".")
+    rownames(out) <- paste("i", rn, sep=".")
+    out
+}
diff --git a/R/make.commsim.R b/R/make.commsim.R
new file mode 100644
index 0000000..814576d
--- /dev/null
+++ b/R/make.commsim.R
@@ -0,0 +1,436 @@
+## this lists all known algos in vegan and more
+## if method is commsim object, it is returned
+## if it is character, switch returns the right one, else stop with error
+## so it can be used instead of match.arg(method) in other functions
+## NOTE: very very long -- but it can be a central repository of algos
+## NOTE 2: storage mode coercions are avoided here
+## (with no apparent effect on speed), it should be 
+## handled by nullmodel and commsim characteristics
+make.commsim <- 
+function(method)
+{
+    algos <- list(
+        "r00" = commsim(method="r00", binary=TRUE, isSeq=FALSE,
+        mode="integer",
+        fun=function(x, n, nr, nc, rs, cs, rf, cf, s, fill, thin) {
+            out <- matrix(0L, nr * nc, n)
+            for (k in seq_len(n))
+                out[sample.int(nr * nc, s), k] <- 1L
+            dim(out) <- c(nr, nc, n)
+            out
+        }),
+        "c0" = commsim(method="c0", binary=TRUE, isSeq=FALSE,
+        mode="integer",
+        fun=function(x, n, nr, nc, rs, cs, rf, cf, s, fill, thin) {
+            out <- array(0L, c(nr, nc, n))
+            J <- seq_len(nc)
+            for (k in seq_len(n))
+                for (j in J)
+                    out[sample.int(nr, cs[j]), j, k] <- 1L
+            out
+        }),
+        "r0" = commsim(method="r0", binary=TRUE, isSeq=FALSE,
+        mode="integer",
+        fun=function(x, n, nr, nc, rs, cs, rf, cf, s, fill, thin) {
+            out <- array(0L, c(nr, nc, n))
+            I <- seq_len(nr)
+            for (k in seq_len(n))
+                for (i in I)
+                    out[i, sample.int(nc, rs[i]), k] <- 1L
+            out
+        }),
+        "r0_old" = commsim(method="r0_old", binary=TRUE, isSeq=FALSE,
+        mode="integer",
+        fun=function(x, n, nr, nc, rs, cs, rf, cf, s, fill, thin) {
+            out <- array(0L, c(nr, nc, n))
+            I <- seq_len(nr)
+            p <- rep(1, nc)
+            for (k in seq_len(n))
+                for (i in I)
+                    out[i, sample.int(nc, rs[i], prob = p), k] <- 1L
+            out
+        }),
+        "r1" = commsim(method="r1", binary=TRUE, isSeq=FALSE,
+        mode="integer",
+        fun=function(x, n, nr, nc, rs, cs, rf, cf, s, fill, thin) {
+            out <- array(0L, c(nr, nc, n))
+            I <- seq_len(nr)
+            storage.mode(cs) <- "double"
+            for (k in seq_len(n))
+                for (i in I)
+                    out[i, sample.int(nc, rs[i], prob=cs), k] <- 1L
+            out
+        }),
+        "r2" = commsim(method="r2", binary=TRUE, isSeq=FALSE,
+        mode="integer",
+        fun=function(x, n, nr, nc, rs, cs, rf, cf, s, fill, thin) {
+            out <- array(0L, c(nr, nc, n))
+            p <- cs * cs
+            I <- seq_len(nr)
+            for (k in seq_len(n))
+                for (i in I)
+                    out[i, sample.int(nc, rs[i], prob=p), k] <- 1L
+            out
+        }),
+        "quasiswap" = commsim(method="quasiswap", binary=TRUE, isSeq=FALSE,
+        mode="integer",
+        fun=function(x, n, nr, nc, rs, cs, rf, cf, s, fill, thin) {
+            out <- array(unlist(r2dtable(n, rs, cs)), c(nr, nc, n))
+            storage.mode(out) <- "integer"
+            for (k in seq_len(n))
+                out[,,k] <- .C("quasiswap", 
+                    m = out[,,k], nr, nc, PACKAGE = "vegan")$m
+            out
+        }),
+        "swap" = commsim(method="swap", binary=TRUE, isSeq=TRUE, 
+        mode="integer",
+        fun=function(x, n, nr, nc, rs, cs, rf, cf, s, fill, thin) {
+            out <- array(0L, c(nr, nc, n))
+            out[,,1] <- .C("swap", 
+                m = x, nr, nc, thin, PACKAGE = "vegan")$m
+            for (k in seq_len(n-1))
+                out[,,k+1] <- .C("swap", 
+                    m = out[,,k], nr, nc, thin, 
+                    PACKAGE = "vegan")$m
+            out
+        }),
+        "tswap" = commsim(method="tswap", binary=TRUE, isSeq=TRUE,
+        mode="integer",
+        fun=function(x, n, nr, nc, rs, cs, rf, cf, s, fill, thin) {
+            out <- array(0L, c(nr, nc, n))
+            out[,,1] <- .C("trialswap", 
+                m = x, nr, nc, thin, PACKAGE = "vegan")$m
+            for (k in seq_len(n-1))
+                out[,,k+1] <- .C("trialswap", 
+                    m = out[,,k], nr, nc, thin, PACKAGE = "vegan")$m
+            out
+        }),
+        "backtrack" = commsim(method="backtrack", binary=TRUE, isSeq=FALSE,
+        mode="integer",
+        fun=function(x, n, nr, nc, rs, cs, rf, cf, s, fill, thin) {
+            btrfun <- function() {
+                all <- matrix(as.integer(1:(nr * nc)), nrow = nr, ncol = nc)
+                out <- matrix(0L, nrow = nr, ncol = nc)
+                free <- matrix(as.integer(1:(nr * nc)), nrow = nr)
+                icount <- integer(length(rs))
+                jcount <- integer(length(cs))
+                prob <- outer(rs, cs, "*")
+                ij <- sample(free, prob = prob)
+                i <- (ij - 1)%%nr + 1
+                j <- (ij - 1)%/%nr + 1
+                for (k in 1:length(ij)) {
+                    if (icount[i[k]] < rs[i[k]] && jcount[j[k]] < cs[j[k]]) {
+                        out[ij[k]] <- 1L
+                        icount[i[k]] <- icount[i[k]] + 1L
+                        jcount[j[k]] <- jcount[j[k]] + 1L
+                    }
+                }
+                ndrop <- 1
+                for (i in 1:10000) {
+                    oldout <- out
+                    oldn <- sum(out)
+                    drop <- sample(all[out == 1L], ndrop)
+                    out[drop] <- 0L
+                    candi <- outer(rowSums(out) < rs, colSums(out) < cs, "&") & out == 0L
+                    while (sum(candi) > 0) {
+                        if (sum(candi) > 1) 
+                          ij <- sample(all[candi], 1)
+                        else ij <- all[candi]
+                        out[ij] <- 1L
+                        candi <- outer(rowSums(out) < rs, colSums(out) < cs, "&") & out == 0
+                    }
+                    if (sum(out) >= fill) 
+                        break
+                    if (oldn >= sum(out)) 
+                        ndrop <- min(ndrop + 1, 4)
+                    else ndrop <- 1
+                    if (oldn > sum(out)) 
+                        out <- oldout
+                }
+                out
+            }
+            out <- array(0L, c(nr, nc, n))
+            for (k in seq_len(n))
+                out[, , k] <- btrfun()
+            out
+        }),
+        "r2dtable" = commsim(method="r2dtable", binary=FALSE, isSeq=FALSE,
+        mode="integer",
+        fun=function(x, n, nr, nc, cs, rs, rf, cf, s, fill, thin) {
+            out <- array(unlist(r2dtable(n, rs, cs)), c(nr, nc, n))
+            storage.mode(out) <- "integer"
+            out
+        }),
+        "swap_count" = commsim(method="swap_count", binary=FALSE, isSeq=TRUE,
+        mode="integer",
+        fun=function(x, n, nr, nc, cs, rs, rf, cf, s, fill, thin) {
+            out <- array(0L, c(nr, nc, n))
+            out[,,1] <- .C("swapcount", 
+                m = x, nr, nc, thin, PACKAGE = "vegan")$m
+            for (k in seq_len(n-1))
+                out[,,k+1] <- .C("swapcount", 
+                    m = out[,,k], nr, nc, thin, PACKAGE = "vegan")$m
+            out
+        }),
+        "quasiswap_count" = commsim(method="quasiswap_count", binary=FALSE, isSeq=FALSE,
+        mode="integer",
+        fun=function(x, n, nr, nc, cs, rs, rf, cf, s, fill, thin) {
+            out <- array(unlist(r2dtable(n, rs, cs)), c(nr, nc, n))
+            storage.mode(out) <- "integer"
+            for (k in seq_len(n))
+                out[,,k] <- .C("rswapcount", 
+                    m = out[,,k], nr, nc, fill, PACKAGE = "vegan")$m
+            out
+        }),
+        "swsh_samp" = commsim(method="swsh_samp", binary=FALSE, isSeq=FALSE,
+        mode="double",
+        fun=function(x, n, nr, nc, cs, rs, rf, cf, s, fill, thin) {
+            nz <- x[x > 0]
+            out <- array(unlist(r2dtable(fill, rf, cf)), c(nr, nc, n))
+            storage.mode(out) <- "double"
+            for (k in seq_len(n)) {
+                out[,,k] <- .C("quasiswap", 
+                    m = as.integer(out[,,k]), nr, nc, PACKAGE = "vegan")$m
+                out[,,k][out[,,k] > 0] <- sample(nz) # we assume that length(nz)>1
+            }
+            out
+        }),
+        "swsh_both" = commsim(method="swsh_both", binary=FALSE, isSeq=FALSE,
+        mode="integer",
+        fun=function(x, n, nr, nc, cs, rs, rf, cf, s, fill, thin) {
+            indshuffle <- function(x) {
+                drop(rmultinom(1, sum(x), rep(1, length(x))))
+            }
+            nz <- as.integer(x[x > 0])
+            out <- array(unlist(r2dtable(fill, rf, cf)), c(nr, nc, n))
+            storage.mode(out) <- "integer"
+            for (k in seq_len(n)) {
+                out[,,k] <- .C("quasiswap", 
+                    m = out[,,k], nr, nc, PACKAGE = "vegan")$m
+                out[,,k][out[,,k] > 0] <- indshuffle(nz - 1L) + 1L  # we assume that length(nz)>1
+            }
+            out
+        }),
+        "swsh_samp_r" = commsim(method="swsh_samp_r", binary=FALSE, isSeq=FALSE,
+        mode="double",
+        fun=function(x, n, nr, nc, cs, rs, rf, cf, s, fill, thin) {
+            out <- array(unlist(r2dtable(fill, rf, cf)), c(nr, nc, n))
+            storage.mode(out) <- "double"
+            I <- seq_len(nr)
+            for (k in seq_len(n)) {
+                out[,,k] <- .C("quasiswap", 
+                    m = as.integer(out[,,k]), nr, nc, PACKAGE = "vegan")$m
+                for (i in I) {
+                    nz <- x[i,][x[i,] > 0]
+                    if (length(nz) == 1)
+                        out[i,,k][out[i,,k] > 0] <- nz
+                    if (length(nz) > 1)
+                        out[i,,k][out[i,,k] > 0] <- sample(nz)
+                }
+            }
+            out
+        }),
+        "swsh_samp_c" = commsim(method="swsh_samp_c", binary=FALSE, isSeq=FALSE,
+        mode="double",
+        fun=function(x, n, nr, nc, cs, rs, rf, cf, s, fill, thin) {
+            out <- array(unlist(r2dtable(fill, rf, cf)), c(nr, nc, n))
+            storage.mode(out) <- "double"
+            J <- seq_len(nc)
+            for (k in seq_len(n)) {
+                out[,,k] <- .C("quasiswap", 
+                    m = as.integer(out[,,k]), nr, nc, PACKAGE = "vegan")$m
+                for (j in J) {
+                    nz <- x[,j][x[,j] > 0]
+                    if (length(nz) == 1)
+                        out[,j,k][out[,j,k] > 0] <- nz
+                    if (length(nz) > 1)
+                        out[,j,k][out[,j,k] > 0] <- sample(nz)
+                }
+            }
+            out
+        }),
+        "swsh_both_r" = commsim(method="swsh_both_r", binary=FALSE, isSeq=FALSE,
+        mode="integer",
+        fun=function(x, n, nr, nc, cs, rs, rf, cf, s, fill, thin) {
+            indshuffle <- function(x) {
+                drop(rmultinom(1, sum(x), rep(1, length(x))))
+            }
+            I <- seq_len(nr)
+            out <- array(unlist(r2dtable(fill, rf, cf)), c(nr, nc, n))
+            storage.mode(out) <- "integer"
+            for (k in seq_len(n)) {
+                out[,,k] <- .C("quasiswap", 
+                    m = out[,,k], nr, nc, PACKAGE = "vegan")$m
+                for (i in I) {
+                    nz <- as.integer(x[i,][x[i,] > 0])
+                    if (length(nz) == 1)
+                        out[i,,k][out[i,,k] > 0] <- nz
+                    if (length(nz) > 1)
+                        out[i,,k][out[i,,k] > 0] <- indshuffle(nz - 1L) + 1L
+                }
+            }
+            out
+        }),
+        "swsh_both_c" = commsim(method="swsh_both_c", binary=FALSE, isSeq=FALSE,
+        mode="integer",
+        fun=function(x, n, nr, nc, cs, rs, rf, cf, s, fill, thin) {
+            indshuffle <- function(x) {
+                drop(rmultinom(1, sum(x), rep(1, length(x))))
+            }
+            J <- seq_len(nc)
+            out <- array(unlist(r2dtable(fill, rf, cf)), c(nr, nc, n))
+            storage.mode(out) <- "integer"
+            for (k in seq_len(n)) {
+                out[,,k] <- .C("quasiswap", 
+                    m = out[,,k], nr, nc,  PACKAGE = "vegan")$m
+                for (j in J) {
+                    nz <- as.integer(x[,j][x[,j] > 0])
+                    if (length(nz) == 1)
+                        out[,j,k][out[,j,k] > 0] <- nz
+                    if (length(nz) > 1)
+                        out[,j,k][out[,j,k] > 0] <- indshuffle(nz - 1L) + 1L
+                }
+            }
+            out
+        }),
+        "abuswap_r" = commsim(method="abuswap_r", binary=FALSE, isSeq=TRUE,
+        mode="double",
+        fun=function(x, n, nr, nc, cs, rs, rf, cf, s, fill, thin) {
+            out <- array(0, c(nr, nc, n))
+            out[,,1] <- .C("abuswap", 
+                m = x, nr, nc, thin, 1L, PACKAGE = "vegan")$m
+            for (k in seq_len(n-1))
+                out[,,k+1] <- .C("abuswap", 
+                    m = out[,,k], nr, nc, thin, 1L, PACKAGE = "vegan")$m
+            out
+        }),
+        "abuswap_c" = commsim(method="abuswap_c", binary=FALSE, isSeq=TRUE,
+        mode="double",
+        fun=function(x, n, nr, nc, cs, rs, rf, cf, s, fill, thin) {
+            out <- array(0, c(nr, nc, n))
+            out[,,1] <- .C("abuswap", 
+                m = x, nr, nc, thin, 0L, PACKAGE = "vegan")$m
+            for (k in seq_len(n-1))
+                out[,,k+1] <- .C("abuswap", 
+                    m = out[,,k], nr, nc, thin, 0L, PACKAGE = "vegan")$m
+            out
+        }),
+        "r00_samp" = commsim(method="r00_samp", binary=FALSE, isSeq=FALSE,
+        mode="double",
+        fun=function(x, n, nr, nc, rs, cs, rf, cf, s, fill, thin) {
+            out <- matrix(0, nr * nc, n)
+            for (k in seq_len(n))
+                out[, k] <- sample(x)
+            dim(out) <- c(nr, nc, n)
+            out
+        }),
+        "c0_samp" = commsim(method="c0_samp", binary=FALSE, isSeq=FALSE,
+        mode="double",
+        fun=function(x, n, nr, nc, rs, cs, rf, cf, s, fill, thin) {
+            out <- array(0, c(nr, nc, n))
+            J <- seq_len(nc)
+            for (k in seq_len(n))
+                for (j in J)
+                    out[, j, k] <- sample(x[,j])
+            out
+        }),
+        "r0_samp" = commsim(method="r0_samp", binary=FALSE, isSeq=FALSE,
+        mode="double",
+        fun=function(x, n, nr, nc, rs, cs, rf, cf, s, fill, thin) {
+            out <- array(0, c(nr, nc, n))
+            I <- seq_len(nr)
+            for (k in seq_len(n))
+                for (i in I)
+                    out[i, , k] <- sample(x[i,])
+            out
+        }),
+        "r00_ind" = commsim(method="r00_ind", binary=FALSE, isSeq=FALSE,
+        mode="integer",
+        fun=function(x, n, nr, nc, rs, cs, rf, cf, s, fill, thin) {
+            indshuffle <- function(x) {
+                drop(rmultinom(1, sum(x), rep(1, length(x))))
+            }
+            out <- matrix(0L, nr * nc, n)
+            for (k in seq_len(n))
+                out[, k] <- indshuffle(x)
+            dim(out) <- c(nr, nc, n)
+            out
+        }),
+        "c0_ind" = commsim(method="c0_ind", binary=FALSE, isSeq=FALSE,
+        mode="integer",
+        fun=function(x, n, nr, nc, rs, cs, rf, cf, s, fill, thin) {
+            indshuffle <- function(x) {
+                drop(rmultinom(1, sum(x), rep(1, length(x))))
+            }
+            out <- array(0L, c(nr, nc, n))
+            J <- seq_len(nc)
+            for (k in seq_len(n))
+                for (j in J)
+                    out[, j, k] <- indshuffle(x[,j])
+            out
+        }),
+        "r0_ind" = commsim(method="r0_ind", binary=FALSE, isSeq=FALSE,
+        mode="integer",
+        fun=function(x, n, nr, nc, rs, cs, rf, cf, s, fill, thin) {
+            indshuffle <- function(x) {
+                drop(rmultinom(1, sum(x), rep(1, length(x))))
+            }
+            out <- array(0L, c(nr, nc, n))
+            I <- seq_len(nr)
+            for (k in seq_len(n))
+                for (i in I)
+                    out[i, , k] <- indshuffle(x[i,])
+            out
+        }),
+        "r00_both" = commsim(method="r00_both", binary=FALSE, isSeq=FALSE,
+        mode="integer",
+        fun=function(x, n, nr, nc, rs, cs, rf, cf, s, fill, thin) {
+            indshuffle <- function(x) {
+                drop(rmultinom(1, sum(x), rep(1, length(x))))
+            }
+            out <- matrix(0L, nr * nc, n)
+            for (k in seq_len(n)) {
+                out[,k][x > 0] <- indshuffle(x[x > 0] - 1L) + 1L
+                out[,k] <- sample(out[,k])
+            }
+            dim(out) <- c(nr, nc, n)
+            out
+        }),
+        "c0_both" = commsim(method="c0_both", binary=FALSE, isSeq=FALSE,
+        mode="integer",
+        fun=function(x, n, nr, nc, rs, cs, rf, cf, s, fill, thin) {
+            indshuffle <- function(x) {
+                drop(rmultinom(1, sum(x), rep(1, length(x))))
+            }
+            out <- array(0L, c(nr, nc, n))
+            J <- seq_len(nc)
+            for (k in seq_len(n))
+                for (j in J) {
+                    out[,j,k][x[,j] > 0] <- indshuffle(x[,j][x[,j] > 0] - 1L) + 1L
+                    out[,j,k] <- sample(out[,j,k])
+                }
+            out
+        }),
+        "r0_both" = commsim(method="r0_both", binary=FALSE, isSeq=FALSE,
+        mode="integer",
+        fun=function(x, n, nr, nc, rs, cs, rf, cf, s, fill, thin) {
+            indshuffle <- function(x) {
+                drop(rmultinom(1, sum(x), rep(1, length(x))))
+            }
+            out <- array(0L, c(nr, nc, n))
+            I <- seq_len(nr)
+            for (k in seq_len(n))
+                for (i in I) {
+                    out[i,,k][x[i,] > 0] <- indshuffle(x[i,][x[i,] > 0] - 1L) + 1L
+                    out[i,,k] <- sample(out[i,,k])
+                }
+            out
+        })
+    )
+    if (missing(method))
+        return(names(algos))
+    if (inherits(method, "commsim"))
+        return(method)
+    method <- match.arg(method, sort(names(algos)))
+    algos[[method]]
+}
diff --git a/R/mantel.R b/R/mantel.R
index f0de5d8..a7ce767 100644
--- a/R/mantel.R
+++ b/R/mantel.R
@@ -1,6 +1,6 @@
-"mantel" <-
+`mantel` <-
   function (xdis, ydis, method = "pearson", permutations = 999, 
-            strata, na.rm = FALSE) 
+            strata = NULL, na.rm = FALSE, parallel = getOption("mc.cores")) 
 {
     xdis <- as.dist(xdis)
     ydis <- as.vector(as.dist(ydis))
@@ -17,19 +17,12 @@
                       spearman = "Spearman's rank correlation rho",
                       variant)
     N <- attr(xdis, "Size")
-    if (length(permutations) == 1) {
-        if (permutations > 0) {
-            arg <- if (missing(strata)) NULL else strata
-            permat <- t(replicate(permutations,
-                                  permuted.index(N, strata = arg)))
-        }
-    } else {
-        permat <- as.matrix(permutations)
-        if (ncol(permat) != N)
-            stop(gettextf("'permutations' have %d columns, but data have %d observations",
-                          ncol(permat), N))
-        permutations <- nrow(permutations)
-    }
+    permat <- getPermuteMatrix(permutations, N, strata = strata)
+    if (ncol(permat) != N)
+        stop(gettextf("'permutations' have %d columns, but data have %d observations",
+                      ncol(permat), N))
+    permutations <- nrow(permat)
+
     if (permutations) {
         perm <- numeric(permutations)
         ## asdist as an index selects lower diagonal like as.dist,
@@ -40,19 +33,36 @@
             permvec <- (xmat[take, take])[asdist]
             drop(cor(permvec, ydis, method = method, use = use))
         }
-        perm <- sapply(1:permutations, function(i, ...) ptest(permat[i,], ...) )
+        ## Parallel processing
+        if (is.null(parallel))
+            parallel <- 1
+        hasClus <- inherits(parallel, "cluster")
+        if ((hasClus || parallel > 1)  && require(parallel)) {
+            if(.Platform$OS.type == "unix" && !hasClus) {
+                perm <- do.call(rbind,
+                               mclapply(1:permutations,
+                                        function(i, ...) ptest(permat[i,],...),
+                                        mc.cores = parallel))
+            } else {
+                if (!hasClus) {
+                    parallel <- makeCluster(parallel)
+                }
+                perm <- parRapply(parallel, permat, ptest)
+                if (!hasClus)
+                    stopCluster(parallel)
+            }
+        } else {
+            perm <- sapply(1:permutations, function(i, ...) ptest(permat[i,], ...))
+        }
         signif <- (sum(perm >= statistic) + 1)/(permutations + 1)
-     }
+    }
     else {
         signif <- NA
         perm <- NULL
     }
     res <- list(call = match.call(), method = variant, statistic = statistic, 
-                signif = signif, perm = perm, permutations = permutations)
-    if (!missing(strata)) {
-        res$strata <- deparse(substitute(strata))
-        res$stratum.values <- strata
-    }
+                signif = signif, perm = perm, permutations = permutations,
+                control = attr(permat, "control"))
     class(res) <- "mantel"
     res
 }
diff --git a/R/mantel.partial.R b/R/mantel.partial.R
index eee21e5..3097cea 100644
--- a/R/mantel.partial.R
+++ b/R/mantel.partial.R
@@ -1,6 +1,6 @@
-"mantel.partial" <-
+`mantel.partial` <-
   function (xdis, ydis, zdis, method = "pearson", permutations = 999, 
-            strata, na.rm = FALSE) 
+            strata = NULL, na.rm = FALSE, parallel = getOption("mc.cores")) 
 {
     part.cor <- function(rxy, rxz, ryz) {
         (rxy - rxz * ryz)/sqrt(1-rxz*rxz)/sqrt(1-ryz*ryz)
@@ -23,17 +23,44 @@
                       spearman = "Spearman's rank correlation rho",
                       variant)
     statistic <- part.cor(rxy, rxz, ryz)
+    N <- attr(xdis, "Size")
+    permat <- getPermuteMatrix(permutations, N, strata = strata)
+    if (ncol(permat) != N)
+        stop(gettextf("'permutations' have %d columns, but data have %d observations",
+                      ncol(permat), N))
+    permutations <- nrow(permat)
+
     if (permutations) {
         N <- attr(xdis, "Size")
         perm <- rep(0, permutations)
         xmat <- as.matrix(xdis)
         asdist <- row(xmat) > col(xmat)
-        for (i in 1:permutations) {
-            take <- permuted.index(N, strata)
+        ptest <- function(take, ...) {
             permvec <- (xmat[take, take])[asdist]
             rxy <- cor(permvec, ydis, method = method, use = use)
             rxz <- cor(permvec, zdis, method = method, use = use)
-            perm[i] <- part.cor(rxy, rxz, ryz)
+            part.cor(rxy, rxz, ryz)
+        }
+        ## parallel processing
+        if (is.null(parallel))
+            parallel <- 1
+        hasClus <- inherits(parallel, "cluster")
+        if ((hasClus || parallel > 1)  && require(parallel)) {
+            if(.Platform$OS.type == "unix" && !hasClus) {
+                perm <- do.call(rbind,
+                               mclapply(1:permutations,
+                                        function(i, ...) ptest(permat[i,],...),
+                                        mc.cores = parallel))
+            } else {
+                if (!hasClus) {
+                    parallel <- makeCluster(parallel)
+                }
+                perm <- parRapply(parallel, permat, ptest)
+                if (!hasClus)
+                    stopCluster(parallel)
+            }
+        } else {
+            perm <- sapply(1:permutations, function(i, ...) ptest(permat[i,], ...))
         }
         signif <- (sum(perm >= statistic)+1)/(permutations + 1)
     }
@@ -42,7 +69,8 @@
         perm <- NULL
     }
     res <- list(call = match.call(), method = variant, statistic = statistic, 
-                signif = signif, perm = perm, permutations = permutations)
+                signif = signif, perm = perm, permutations = permutations,
+                control = attr(permat, "control"))
     if (!missing(strata)) {
         res$strata <- deparse(substitute(strata))
         res$stratum.values <- strata
diff --git a/R/metaMDSiter.R b/R/metaMDSiter.R
index 7221ef7..72a3437 100644
--- a/R/metaMDSiter.R
+++ b/R/metaMDSiter.R
@@ -1,10 +1,9 @@
 `metaMDSiter` <-
     function (dist, k = 2, trymax = 20, trace = 1, plot = FALSE, 
-              previous.best, engine = "monoMDS", maxit = 200, ...) 
+              previous.best, engine = "monoMDS", maxit = 200,
+              parallel = getOption("mc.cores"), ...) 
 {
     engine <- match.arg(engine, c("monoMDS", "isoMDS"))
-    if (engine == "isoMDS")
-        require(MASS) || stop("Needs package MASS (function isoMDS)")
     EPS <- 0.05
     if (engine == "monoMDS")
         EPS <- EPS/100 # monoMDS stress (0,1), isoMDS (0,100) 
@@ -12,7 +11,19 @@
     RMSELIM <- 0.005
     SOL <- FALSE
     converged <- FALSE
+    ## set tracing for engines
     isotrace <- max(0, trace - 1)
+    monotrace <- engine == "monoMDS" && trace > 1
+    monostop <- function(mod) {
+        if (mod$maxits == 0)
+            return(NULL)
+        lab <- switch(mod$icause,
+                      "no. of iterations >= maxit",
+                      "stress < smin",
+                      "stress ratio > sratmax",
+                      "scale factor of the gradient < sfgrmin")
+        cat("   ", mod$iters, "iterations: ", lab, "\n")
+    }
     ## Previous best or initial configuration 
     if (!missing(previous.best) && !is.null(previous.best)) {
         ## check if previous.best is from metaMDS or isoMDS
@@ -56,38 +67,90 @@
     }
     if (trace) 
         cat("Run 0 stress", s0$stress, "\n")
+    if (monotrace)
+        monostop(s0)
     tries <- 0
-    while(tries < trymax) {
-        tries <- tries + 1
-        stry <- switch(engine,
-                       "monoMDS" = monoMDS(dist, k = k, maxit = maxit, ...),
-                       "isoMDS" = isoMDS(dist, initMDS(dist, k = k), k = k,
-                       maxit = maxit, tol = 1e-07, trace = isotrace))
-        if (trace) {
-            cat("Run", tries, "stress", stry$stress, "\n")
-        }
-        if ((s0$stress - stry$stress) > -EPS) {
-            pro <- procrustes(s0, stry, symmetric = TRUE)
-            if (plot && k > 1) 
-                plot(pro)
-            if (stry$stress < s0$stress) {
-                s0 <- stry
-                if (trace) 
-                    cat("... New best solution\n")
+    ## Prepare for parallel processing
+    if (is.null(parallel))
+        parallel <- 1
+    hasClus <- inherits(parallel, "cluster")
+    isParal <- (hasClus || parallel > 1) && require(parallel)
+    isMulticore <- .Platform$OS.type == "unix" && !hasClus
+    if (isParal && !isMulticore && !hasClus) {
+        parallel <- makeCluster(parallel)
+        clusterEvalQ(parallel, library(vegan))
+    }
+    ## get the number of clusters
+    if (inherits(parallel, "cluster"))
+        nclus <- length(parallel)
+    else
+        nclus <- parallel
+    ## proper iterations
+    while(tries < trymax && !converged) {
+        init <- replicate(nclus, initMDS(dist, k = k))
+        if (nclus > 1) isotrace <- FALSE
+        if (isParal) {
+            if (isMulticore) {
+                stry <-
+                    mclapply(1:nclus, function(i)
+                             switch(engine,
+                                    "monoMDS" = monoMDS(dist, init[,,i], k = k,
+                                    maxit = maxit, ...),
+                                    "isoMDS" = isoMDS(dist, init[,,i], k = k,
+                                    maxit = maxit, tol = 1e-07,
+                                    trace = isotrace)),
+                             mc.cores = parallel)
+            } else {
+                stry <-
+                    parLapply(parallel, 1:nclus, function(i)
+                              switch(engine,
+                                     "monoMDS" = monoMDS(dist, init[,,i], k = k,
+                                     maxit = maxit, ...),
+                                     "isoMDS" = isoMDS(dist, init[,,i], k = k,
+                                     maxit = maxit, tol = 1e-07, trace = isotrace)))
             }
-            summ <- summary(pro)
-            if (trace) 
-                cat("... procrustes: rmse", summ$rmse, " max resid", 
-                    max(summ$resid), "\n")
-            if (summ$rmse < RMSELIM && max(summ$resid) < RESLIM) {
+        } else {
+            stry <- list(switch(engine,
+                                "monoMDS" = monoMDS(dist, init[,,1], k = k,
+                                maxit = maxit, ...),
+                                "isoMDS" = isoMDS(dist, init[,,1], k = k,
+                                maxit = maxit, tol = 1e-07, trace = isotrace)))
+        }
+        ## analyse results of 'nclus' tries
+        for (i in 1:nclus) {
+            tries <- tries + 1
+            if (trace)
+                cat("Run", tries, "stress", stry[[i]]$stress, "\n")
+            if (monotrace)
+                monostop(stry[[i]])
+            if ((s0$stress - stry[[i]]$stress) > -EPS) {
+                pro <- procrustes(s0, stry[[i]], symmetric = TRUE)
+                if (plot && k > 1) 
+                    plot(pro)
+                if (stry[[i]]$stress < s0$stress) {
+                    s0 <- stry[[i]]
+                    ## New best solution has not converged unless
+                    ## proved later
+                    converged <- FALSE
+                    if (trace) 
+                        cat("... New best solution\n")
+                }
+                summ <- summary(pro)
                 if (trace) 
-                    cat("*** Solution reached\n\n")
-                converged <- TRUE
-                break
+                    cat("... procrustes: rmse", summ$rmse, " max resid", 
+                        max(summ$resid), "\n")
+                if (summ$rmse < RMSELIM && max(summ$resid) < RESLIM) {
+                    if (trace) 
+                        cat("*** Solution reached\n")
+                    converged <- TRUE
+                }
             }
+            flush.console()
         }
-        flush.console()
     }
+    ## stop socket cluster
+    if (isParal && !isMulticore && !hasClus)
+        stopCluster(parallel)
     if (!missing(previous.best) && inherits(previous.best, "metaMDS")) {
         tries <- tries + previous.best$tries
     }
diff --git a/R/mrpp.R b/R/mrpp.R
index d169cc1..7e9e3b2 100644
--- a/R/mrpp.R
+++ b/R/mrpp.R
@@ -1,6 +1,7 @@
-"mrpp" <-
-function (dat, grouping, permutations = 999, distance = "euclidean", 
-    weight.type = 1, strata) 
+`mrpp` <-
+    function (dat, grouping, permutations = 999, distance = "euclidean", 
+              weight.type = 1, strata = NULL,
+              parallel = getOption("mc.cores")) 
 {
     classmean <- function(ind, dmat, indls) {
         sapply(indls, function(x)
@@ -37,23 +38,48 @@ function (dat, grouping, permutations = 999, distance = "euclidean",
     ## significance test for it. Keep the item in reserve for
     ## possible later re-inclusion.
     CS <- NA
-    if (missing(strata)) 
-        strata <- NULL
-    perms <- sapply(1:permutations, function(x) grouping[permuted.index(N, 
-        strata = strata)])
-    m.ds <- numeric(permutations)
-    m.ds <- apply(perms, 2, function(x) mrpp.perms(x, dmat, indls, 
-        w))
-    p <- (1 + sum(del >= m.ds))/(permutations + 1)
-    r2 <- 1 - del/E.del
-    out <- list(call = match.call(), delta = del, E.delta = E.del, CS = CS,
-        n = ncl, classdelta = classdel,
-        Pvalue = p, A = r2, distance = distance, weight.type = weight.type, 
-        boot.deltas = m.ds, permutations = permutations)
-    if (!is.null(strata)) {
-        out$strata <- deparse(substitute(strata))
-        out$stratum.values <- strata
+    permutations <- getPermuteMatrix(permutations, N, strata = strata)
+    if (ncol(permutations) != N)
+        stop(gettextf("'permutations' have %d columns, but data have %d rows",
+                      ncol(permutations), N))
+
+    control <- attr(permutations, "control")
+    if(nrow(permutations)) {
+        perms <- apply(permutations, 1, function(indx) grouping[indx])
+        permutations <- ncol(perms)
+
+        ## Parallel processing
+        if (is.null(parallel))
+            parallel <- 1
+        hasClus <- inherits(parallel, "cluster")
+        if ((hasClus || parallel > 1)  && require(parallel)) {
+            if(.Platform$OS.type == "unix" && !hasClus) {
+                m.ds <- unlist(mclapply(1:permutations, function(i, ...)
+                                        mrpp.perms(perms[,i], dmat, indls, w),
+                                        mc.cores = parallel))
+            } else {
+                if (!hasClus) {
+                    parallel <- makeCluster(parallel)
+                }
+                m.ds <- parCapply(parallel, perms, function(x)
+                                  mrpp.perms(x, dmat, indls, w))
+                if (!hasClus)
+                    stopCluster(parallel)
+            }
+        } else {
+            m.ds <- apply(perms, 2, function(x) mrpp.perms(x, dmat, indls, w))
+        }
+        p <- (1 + sum(del >= m.ds))/(permutations + 1)
+        r2 <- 1 - del/E.del
+    } else { # no permutations
+        m.ds <- p <- r2 <- NA
+        permutations <- 0
     }
+    out <- list(call = match.call(), delta = del, E.delta = E.del, CS = CS,
+                n = ncl, classdelta = classdel, Pvalue = p, A = r2,
+                distance = distance, weight.type = weight.type,
+                boot.deltas = m.ds, permutations = permutations,
+                control = control)
     class(out) <- "mrpp"
     out
 }
diff --git a/R/mso.R b/R/mso.R
index 57ceda1..ba1ec7b 100644
--- a/R/mso.R
+++ b/R/mso.R
@@ -1,6 +1,6 @@
 `mso` <-
     function (object.cca, object.xy, grain = 1, round.up = FALSE,
-              permutations = FALSE) 
+              permutations = 0) 
 {
     if (inherits(object.cca, "mso")) {
         rm <- which(class(object.cca) == "mso")
@@ -58,27 +58,28 @@
                                 H), mean)
         object$vario <- cbind(object$vario, All = test$ca, CA = test$ca)
     }
-    if (permutations) {
-        ##require(base)
+    permat <- getPermuteMatrix(permutations, nrow(object$CA$Xbar))
+    nperm <- nrow(permat)
+    if (nperm) {
         object$H.test <- matrix(0, length(object$H), nrow(object$vario))
         for (i in 1:nrow(object$vario)) {
             object$H.test[, i] <- as.numeric(object$H == object$vario$H[i])
         }
-        xdis <- dist(object$CA$Xbar)^2
-        N <- attr(xdis, "Size")
-        statistic <- abs(cor(as.vector(xdis), object$H.test))
-        perm <- matrix(0, length(statistic), permutations)
-        for (i in 1:permutations) {
-            take <- sample(N, N)
-            permvec <- as.vector(as.dist(as.matrix(xdis)[take, 
-                                                         take]))
-            perm[, i] <- abs(cor(permvec, object$H.test))
+        xdis <- as.matrix(dist(object$CA$Xbar)^2)
+        ## taking lower triangle is faster than as.dist() because it
+        ## does not set attributes
+        ltri <- lower.tri(xdis)
+        statistic <- abs(cor(as.vector(xdis[ltri]), object$H.test))
+        permfunc <- function(k) {
+            permvec <- as.vector(xdis[k,k][ltri])
+            abs(cor(permvec, object$H.test))
         }
-        object$vario$CA.signif <- apply((perm >= matrix(statistic, 
-                                         nrow(perm), ncol(perm)))/permutations, 1, sum)
+        perm <- sapply(1:nperm, function(take) permfunc(permat[take,]))
+        object$vario$CA.signif <-
+            (rowSums(sweep(perm, 1, statistic, ">=")) + 1)/(nperm + 1)
+        attr(object$vario, "control") <- attr(permat, "control")
     }
     object$call <- match.call()
     class(object) <- c("mso", class(object))
     object
 }
-
diff --git a/R/multipart.default.R b/R/multipart.default.R
index eee0343..abf83e5 100644
--- a/R/multipart.default.R
+++ b/R/multipart.default.R
@@ -37,7 +37,7 @@
     rval <- as.data.frame(rval[rev(1:length(rval))])
     l2 <- sapply(rval, function(z) length(unique(z)))
     if (any(l1 != l2))
-        warning("levels are not perfectly nested")
+        stop("levels are not perfectly nested")
 
     ## aggregate response matrix
     fullgamma <-if (nlevels(rhs[,nlevs]) == 1)
diff --git a/R/nestedchecker.R b/R/nestedchecker.R
index a05b7e9..1349eae 100644
--- a/R/nestedchecker.R
+++ b/R/nestedchecker.R
@@ -4,6 +4,7 @@ function(comm)
     cb <- sum(designdist(comm, "(A-J)*(B-J)", "binary"))
     sppairs <- ncol(comm)*(ncol(comm)-1)/2
     out <- list("C.score" = cb/sppairs, statistic = cb)
+    names(out$statistic) <- "checkerboards"
     class(out) <- "nestedchecker"
     out
 }
diff --git a/R/nesteddisc.R b/R/nesteddisc.R
index 29ae199..64af484 100644
--- a/R/nesteddisc.R
+++ b/R/nesteddisc.R
@@ -28,7 +28,8 @@
     ## Function to evaluate discrepancy
     FUN <- function(x) sum(comm[col(comm)[,x] <= rowSums(comm)] == 0) 
     Ad <- FUN(x)
-    ## Go through all le-items and permute ties
+    ## Go through all le-items and permute ties. Functions allPerms
+    ## and shuffleSet are in permute package.
     for (i in 1:length(le)) {
         if (le[i] > 1) {
             take <- x
@@ -49,7 +50,7 @@
             ## duplicated orders
             else {
                 ties <- TRUE
-                perm <- t(replicate(niter, permuted.index(le[i])))
+                perm <- shuffleSet(le[i], niter)
                 perm <- perm + cle[i]
             }
             vals <- sapply(1:nrow(perm), function(j) {
@@ -64,6 +65,7 @@
         }
     }
     out <- list(statistic=Ad, ties = ties, order = k[x])
+    names(out$statistic) <- "discrepancy"
     class(out) <- "nesteddisc"
     out
 }
diff --git a/R/nestedn0.R b/R/nestedn0.R
index 50e95f2..e70c1cf 100644
--- a/R/nestedn0.R
+++ b/R/nestedn0.R
@@ -8,6 +8,7 @@ function(comm)
     for (i in 1:ncol(comm))
         n0[i] <- sum(comm[,i] == 0 & R > spmin[i])
     out <- list(spmin = spmin, n0 = n0, statistic = sum(n0))
+    names(out$statistic) <- "N0"
     class(out) <- "nestedn0"
     out
 }
diff --git a/R/nestednodf.R b/R/nestednodf.R
index 574f58e..bf10bbe 100644
--- a/R/nestednodf.R
+++ b/R/nestednodf.R
@@ -1,5 +1,5 @@
 `nestednodf` <- 
-    function(comm, order = TRUE, weighted = FALSE) 
+    function(comm, order = TRUE, weighted = FALSE, wbinary = FALSE) 
 {
     bin.comm <- ifelse(comm > 0, 1, 0)
     rfill <- rowSums(bin.comm)
@@ -34,8 +34,12 @@
                 next
             if (weighted) {
                 second <- comm[j, ]
-                N.paired.rows[counter] <-
-                    sum(first - second >= 0 & second > 0)/sum(second > 0)
+                if (!wbinary) 
+                    N.paired.rows[counter] <-
+                        sum(first - second > 0 & second > 0)/sum(second > 0)
+                else
+                    N.paired.rows[counter] <-
+                        sum(first - second >= 0 & second > 0)/sum(second > 0)
             }
             else {
                 N.paired.rows[counter] <-
@@ -52,8 +56,12 @@
                 next
             if (weighted) {
                 second <- comm[, j]
-                N.paired.cols[counter] <-
-                    sum(first - second >= 0 & second > 0)/sum(second > 0)
+                if (!wbinary)
+                    N.paired.cols[counter] <-
+                        sum(first - second > 0 & second > 0)/sum(second > 0)
+                else
+                    N.paired.cols[counter] <-
+                        sum(first - second >= 0 & second > 0)/sum(second > 0)
             }
             else {
                 N.paired.cols[counter] <-
diff --git a/R/nestedtemp.R b/R/nestedtemp.R
index 714338b..b356079 100644
--- a/R/nestedtemp.R
+++ b/R/nestedtemp.R
@@ -88,6 +88,7 @@
     temp <- 100*sum(u)/prod(dim(comm))/0.04145
     out <- list(comm = comm, u = u, r = r, c = c, p = p,
                 fill=fill,  statistic = temp, smooth=smo)
+    names(out$statistic) <- "temperature"
     class(out) <- "nestedtemp"
     out
 }
diff --git a/R/nullmodel.R b/R/nullmodel.R
new file mode 100644
index 0000000..a98f6a8
--- /dev/null
+++ b/R/nullmodel.R
@@ -0,0 +1,46 @@
+## this thing creates an environment
+## the whole point is to create all possible inputs for 
+## commsim functions only once and reuse them as necessary
+## also helps keeping track of updating process for sequential algorithms
+## method$mode can be evaluated and use storage mode accordingly
+nullmodel <- 
+function(x, method)
+{
+    x <- as.matrix(x)
+    if (is.null(dim(x)) || length(dim(x)) != 2L)
+        stop("'x' must be a matrix-like object")
+    if (any(is.na(x)))
+        stop("'NA' values not allowed")
+    if (any(x<0))
+        stop("negative values not allowed")
+    method <- make.commsim(method)
+    if (method$binary)
+        x <- ifelse(x > 0, 1L, 0L)
+    int <- method$mode == "integer"
+    if (int && abs(sum(x) - sum(as.integer(x))) > 10^-6)
+        stop("non integer values not allowed")
+    if (int)
+        x <- round(x, 0) # round up to closest integer
+    storage.mode(x) <- method$mode
+    out <- list(
+        data=x,
+        nrow=as.integer(dim(x)[1L]),
+        ncol=as.integer(dim(x)[2L]),
+        rowSums=rowSums(x),
+        colSums=colSums(x),
+        rowFreq=as.integer(rowSums(x > 0)),
+        colFreq=as.integer(colSums(x > 0)),
+        totalSum=ifelse(int, as.integer(sum(x)), as.double(sum(x))),
+        fill=as.integer(sum(x > 0)),
+        commsim=method,
+        state=if (method$isSeq) x else NULL,
+        iter=if (method$isSeq) as.integer(0L) else NULL
+        )
+#    storage.mode(out$x) <- method$mode
+    storage.mode(out$rowSums) <- method$mode
+    storage.mode(out$colSums) <- method$mode
+    out <- list2env(out, parent=emptyenv())
+    class(out) <- c("nullmodel", "environment")
+#    class(out) <- "nullmodel"
+    out
+}
diff --git a/R/oecosimu.R b/R/oecosimu.R
index ba5b17e..49171f3 100644
--- a/R/oecosimu.R
+++ b/R/oecosimu.R
@@ -2,101 +2,142 @@
     function(comm, nestfun, method, nsimul=99,
              burnin=0, thin=1, statistic = "statistic",
              alternative = c("two.sided", "less", "greater"),
-             ...)
+             batchsize = NA,
+             parallel = getOption("mc.cores"), ...)
 {
     alternative <- match.arg(alternative)
     nestfun <- match.fun(nestfun)
-    if (!is.function(method)) {
-        method <- match.arg(method, c("r00", "r0", "r1", "r2", "c0",
-                                  "swap", "tswap", "backtrack", "quasiswap",
-                                  "r2dtable"))
-        if (method == "r2dtable") {
-            nr <- rowSums(comm)
-            nc <- colSums(comm)
-            permfun <- function(z) r2dtable(1, nr, nc)[[1]]
+    if (length(statistic) > 1)
+        stop("only one 'statistic' is allowed")
+    if (!is.na(batchsize))
+        batchsize <- batchsize * 1024 * 1024
+    applynestfun <-
+        function(x, fun = nestfun, statistic = "statistic", ...) {
+            tmp <- fun(x, ...)
+            if (is.list(tmp))
+                tmp[[statistic]]
+            else
+                tmp
+    }
+    if (inherits(comm, "simmat")) {
+        x <- comm
+        method <- attr(x, "method")
+        nsimul <- dim(x)[3]
+        if (nsimul == 1)
+            stop("only one simulation in ", sQuote(deparse(substitute(comm))))
+        comm <- attr(comm, "data")
+        simmat_in <- TRUE
+    } else {
+        simmat_in <- FALSE
+        if (inherits(comm, "nullmodel")) {
+            nm <- comm
+            comm <- comm$data
+        } else {
+            nm <- nullmodel(comm, method)
+            if (nm$commsim$binary) {
+                ## sometimes people do not realize that null model
+                ## makes their data binary
+                if (max(abs(comm - nm$data)) > 0.1)
+                    warning("nullmodel transformed 'comm' to binary data")
+                comm <- nm$data
+            }
+        }
+        method <- nm$commsim$method
+    }
+    ## Check the number of batches needed to run the requested number
+    ## of simulations without exceeding arg 'batchsize', and find the
+    ## size of each batch.
+    if (!simmat_in && !is.na(batchsize)) {
+        commsize <- object.size(comm)
+        totsize <- commsize * nsimul
+        if (totsize > batchsize) { 
+            nbatch <- ceiling(unclass(totsize/batchsize))
+            batches <- diff(round(seq(0, nsimul, by = nsimul/nbatch)))
+        } else {
+            nbatch <- 1
         }
     } else {
-        permfun <- match.fun(method)
-        method <- "custom"
+        nbatch <- 1
     }
-    quant <- method %in% c("r2dtable", "custom")
-    ## binarize data with binary null models before getting statistics
-    if (!quant)
-        comm <- ifelse(comm > 0, 1, 0)
+    if (nbatch == 1)
+        batches <- nsimul
+    
     ind <- nestfun(comm, ...)
-
-    if (is.list(ind))
-        indstat <- ind[[statistic]]
-    else
-        indstat <- ind
-    n <- length(indstat)
-    simind <- matrix(0, nrow=n, ncol=nsimul)
-
-    ## permutation for binary data
-    if (!quant) {
-        if (method %in% c("swap", "tswap")){
-            checkbrd <- 1
-            if (method == "tswap") {
-                checkbrd <- sum(designdist(comm, "(J-A)*(J-B)", "binary"))
-                M <- ncol(comm)
-                N <- nrow(comm)
-                checkbrd <- M*(M-1)*N*(N-1)/4/checkbrd
-                thin <- round(thin*checkbrd)
+    indstat <-
+        if (is.list(ind))
+            ind[[statistic]]
+        else
+            ind
+    ## burnin of sequential models
+    if (!simmat_in && nm$commsim$isSeq) {
+        ## estimate thinning for "tswap" (trial swap)
+        if (nm$commsim$method == "tswap") {
+            checkbrd <-sum(designdist(comm, "(J-A)*(J-B)", 
+                                      "binary"))
+            M <- nm$ncol
+            N <- nm$nrow
+            checkbrd <- M * (M - 1) * N * (N - 1)/4/checkbrd
+            thin <- round(thin * checkbrd)
+            burnin <- round(burnin * checkbrd)
+        }
+        if (burnin > 0)
+            nm <- update(nm, burnin)
+    }
+    ## start with empty simind
+    simind <- NULL
+    ## Go to parallel processing if 'parallel > 1' or 'parallel' could
+    ## be a pre-defined socket cluster or 'parallel = NULL'.
+    if (is.null(parallel))
+        parallel <- 1
+    hasClus <- inherits(parallel, "cluster")
+    if ((hasClus || parallel > 1)  && require(parallel)) {
+        if(.Platform$OS.type == "unix" && !hasClus) {
+            for (i in seq_len(nbatch)) {
+                ## simulate if no simmat_in
+                if(!simmat_in)
+                    x <- simulate(nm, nsim = batches[i], thin = thin)
+                tmp <- mclapply(seq_len(batches[i]),
+                                function(j)
+                                applynestfun(x[,,j], fun=nestfun,
+                                             statistic = statistic, ...),
+                                mc.cores = parallel)
+                simind <- cbind(simind, do.call(cbind, tmp))
             }
-            attr(simind, "thin") <- thin
-            attr(simind, "burnin") <- burnin
-            x <- comm
-            if (burnin > 0)
-                x <- commsimulator(x, method= method, thin = round(checkbrd) * burnin)
-            for(i in 1:nsimul) {
-                x <- commsimulator(x, method = method, thin = thin)
-                tmp <- nestfun(x, ...)
-                if (is.list(tmp))
-                    simind[,i] <- tmp[[statistic]]
-                else
-                    simind[,i] <- tmp
+        } else {
+            ## if hasClus, do not set up and stop a temporary cluster
+            if (!hasClus) {
+                parallel <- makeCluster(parallel)
+                ## make vegan functions available: others may be unavailable
+                clusterEvalQ(parallel, library(vegan))
             }
-        }
-        else {
-            for (i in 1:nsimul) {
-                x <- commsimulator(comm, method=method)
-                tmp <- nestfun(x,...)
-                if (is.list(tmp))
-                    simind[,i] <- tmp[[statistic]]
-                else
-                    simind[,i] <- tmp
+            for(i in seq_len(nbatch)) {
+                if (!simmat_in)
+                    x <- simulate(nm, nsim = batches[i], thin = thin)
+                simind <- cbind(simind,
+                                parApply(parallel, x, 3, function(z)
+                                         applynestfun(z, fun = nestfun,
+                                                      statistic = statistic, ...)))
             }
+            if (!hasClus)
+                stopCluster(parallel)
         }
-    ## permutation for count data
     } else {
-        if (!all(dim(comm) == dim(permfun(comm))))
-            stop("permutation function is not compatible with community matrix")
-        ## sequential algorithms
-        if (burnin > 0 || thin > 1) {
-            if (burnin > 0) {
-                m <- permfun(comm, burnin=burnin, thin=1)
-            }  else m <- comm
-            for (i in 1:nsimul) {
-                tmp <- nestfun(permfun(m, burnin=0, thin=thin), ...)
-                if (is.list(tmp))
-                    simind[, i] <- tmp[[statistic]]
-                else simind[, i] <- tmp
-            }
-            attr(simind, "thin") <- thin
-            attr(simind, "burnin") <- burnin
-        ## not sequential algorithms
-        } else {
-            for (i in 1:nsimul) {
-                tmp <- nestfun(permfun(comm), ...)
-                if (is.list(tmp)) {
-                    simind[, i] <- tmp[[statistic]]
-                } else simind[, i] <- tmp
-            }
-            attr(simind, "thin") <- NULL
-            attr(simind, "burnin") <- NULL
+        for(i in seq_len(nbatch)) {
+            ## do not simulate if x was already a simulation
+            if(!simmat_in)
+                x <- simulate(nm, nsim = batches[i], thin = thin)
+            simind <- cbind(simind, apply(x, 3, applynestfun, fun = nestfun,
+                                          statistic = statistic, ...))
         }
     }
-    ## end of addition
+    
+    simind <- matrix(simind, ncol = nsimul)
+
+    if (attr(x, "isSeq")) {
+        attr(simind, "thin") <- attr(x, "thin")
+        attr(simind, "burnin") <- burnin
+    }
+    
     sd <- apply(simind, 1, sd, na.rm = TRUE)
     means <- rowMeans(simind, na.rm = TRUE)
     z <- (indstat - means)/sd
@@ -113,7 +154,7 @@
                 less = pless,
                 greater = pmore)
     p <- pmin(1, (p + 1)/(nsimul + 1))
-
+    
     ## ADDITION: if z is NA then it is not correct to calculate p values
     ## try e.g. oecosimu(dune, sum, "permat")
     if (any(is.na(z)))
@@ -121,19 +162,12 @@
 
     if (is.null(names(indstat)) && length(indstat) == 1)
         names(indstat) <- statistic
-    ## $oecosimu cannot be added to a data frame, but this gives
-    ## either an error or a mess
-    if (is.data.frame(ind))
-        ind <- as.list(ind)
-    if (!is.list(ind))
-        ind <- list(statistic = ind)
-    if (method == "custom")
-        attr(method, "permfun") <- permfun
-    ind$oecosimu <- list(z = z, means = means, pval = p, simulated=simind,
-                         method=method,
-                         statistic = indstat, alternative = alternative)
-    attr(ind, "call") <- match.call()
-    class(ind) <- c("oecosimu", class(ind))
-    ind
+    oecosimu <- list(z = z, means = means, pval = p, simulated=simind,
+                     method=method, statistic = indstat,
+                     alternative = alternative, isSeq = attr(x, "isSeq"))
+    out <- list(statistic = ind, oecosimu = oecosimu)
+    attr(out, "call") <- match.call()
+    class(out) <- "oecosimu"
+    out
 }
 
diff --git a/R/ordiR2step.R b/R/ordiR2step.R
index 7592fbd..b7f8286 100644
--- a/R/ordiR2step.R
+++ b/R/ordiR2step.R
@@ -4,7 +4,7 @@
 
 `ordiR2step` <-
     function(object, scope, direction = c("both", "forward"),
-             Pin = 0.05, R2scope = TRUE, pstep = 100, perm.max = 1000,
+             Pin = 0.05, R2scope = TRUE, permutations = how(nperm=499),
              trace = TRUE, ...)
 {
     direction <- match.arg(direction)
@@ -78,7 +78,7 @@
             if (R2scope) R2.adds[best] <= R2.all else TRUE) {
             ## Second criterion: added variable is significant
             tst <- add1(object, scope = adds[best], test="permu",
-                        pstep = pstep, perm.max = perm.max,
+                        permutations = permutations,
                         alpha = Pin, trace = FALSE, ...)
             if (trace) {
                 print(tst[-1,])
@@ -95,7 +95,7 @@
         }
     }
     if (NROW(anotab) > 0) {
-        anotab <- rbind(anotab, "<All variables>" = c(R2.all, rep(NA, 5)))
+        anotab <- rbind(anotab, "<All variables>" = c(R2.all, rep(NA, 4)))
         class(anotab) <- c("anova", class(anotab))
         object$anova <- anotab
     }
diff --git a/R/ordiareatest.R b/R/ordiareatest.R
new file mode 100644
index 0000000..d57d9cd
--- /dev/null
+++ b/R/ordiareatest.R
@@ -0,0 +1,69 @@
+#' Permutation test for the area of convex hull or ellipse in ordination
+#'
+#' Finds if the area covered by a convex hull or fitted ellipse is
+#' smaller than expected under null hypothesis using permutation test.
+#'
+#' @param ord 2-d ordination
+#' @param factor defining groups
+#' @param are of convex hull of or an ellipse
+#' @param permutations: number, permutation matrix or a
+#' \code{\link[permute]{how}} definition.
+#' @param parallel parallel processing
+#' @param \dots other parameters passed to area functions
+#'
+#' @author Jari Oksanen
+`ordiareatest` <-
+    function(ord, groups, area = c("hull", "ellipse"), permutations = 999,
+             parallel = getOption("mc.cores"), ...)
+{
+    ## Function to find area
+    area <- match.arg(area)
+    areafun <- if (area == "hull") ordihull else ordiellipse
+    areafun <- match.fun(areafun)
+    ## Observed statistics
+    obs <- summary(areafun(ord, groups, draw = "none", ...))["Area",]
+    ## permutations
+    pfun <- function(take, ...)
+        summary(areafun(ord, groups[take], draw = "none", ...))["Area",]
+    perm <- getPermuteMatrix(permutations, length(groups))
+    nperm <- nrow(perm)
+    if (is.null(parallel))
+        parallel <- 1
+    hasClus <- inherits(parallel, "cluster")
+    if ((hasClus || parallel > 1) && require(parallel)) {
+        if(.Platform$OS.type == "unix" && !hasClus) {
+            areas <- do.call(cbind,
+                             mclapply(1:permutations,
+                                      function(i, ...) pfun(perm[i,],...),
+                                        mc.cores = parallel))
+            } else {
+                if (!hasClus) {
+                    parallel <- makeCluster(parallel)
+                }
+                areas <- parApply(parallel, perm, MARGIN=1, pfun)
+                if (!hasClus)
+                    stopCluster(parallel)
+            }
+    } else {
+        areas <- sapply(1:permutations, function(i, ...) pfun(perm[i,], ...))
+    }
+    signif <- (rowSums(areas <= obs) + 1)/(nperm + 1)
+    out <- list("areas" = obs, "pvalues" = signif, "permutations" = areas,
+                nperm = nperm, control = attr(perm, "control"), "kind" = area)
+    class(out) <- "ordiareatest"
+    out
+}
+
+### print method
+
+`print.ordiareatest` <-
+    function(x, ...)
+{
+    qu <- apply(x$permutations, 1, quantile, probs=c(0.05, 0.5))
+    m <- cbind("Area" = x$areas, t(qu), "Pr(<sim)" = x$pvalues)
+    cat("\n")
+    cat(gettextf("Permutation test for the size of ordination %ss\nAlternative hypothesis: observed area is smaller than random %s\n\n", x$kind, x$kind))
+    cat(howHead(x$control), "\n")
+    printCoefmat(m, tst.ind=1:3)
+    invisible(x)
+}
diff --git a/R/ordiellipse.R b/R/ordiellipse.R
index fe0164d..888fae4 100644
--- a/R/ordiellipse.R
+++ b/R/ordiellipse.R
@@ -34,10 +34,10 @@
     if (label)
         cntrs <- names <- NULL
     ## Remove NA scores
-    kk <- complete.cases(pts)
+    kk <- complete.cases(pts) & !is.na(groups)
     for (is in inds) {
         gr <- out[groups == is & kk]
-        if (length(gr) > 2) {
+        if (length(gr) > 1) {
             X <- pts[gr, ]
             W <- w[gr]
             mat <- cov.wt(X, W)
diff --git a/R/ordihull.R b/R/ordihull.R
index 58b12e8..42d678b 100644
--- a/R/ordihull.R
+++ b/R/ordihull.R
@@ -30,7 +30,7 @@
     if (label)
         cntrs <- names <- NULL
     ## Remove NA scores
-    kk <- complete.cases(pts)
+    kk <- complete.cases(pts) & !is.na(groups)
     for (is in inds) {
         gr <- out[groups == is & kk]
         if (length(gr) > 1) {
diff --git a/R/ordilabel.R b/R/ordilabel.R
index 918c4af..f6b3ff6 100644
--- a/R/ordilabel.R
+++ b/R/ordilabel.R
@@ -18,6 +18,8 @@
         ord <- order(priority)
         x <- x[ord, ]
         labels <- labels[ord]
+    } else {
+        ord <- seq_along(labels)
     }
     em <- strwidth("m", cex = cex, ...)
     ex <- strheight("x", cex = cex, ...)
@@ -28,12 +30,16 @@
             col <- border
         else
             col <- par("fg")
+    col <- rep(col, length=nrow(x))[ord]
+    if(!is.null(border))
+        border <- rep(border, length=nrow(x))[ord]
+    fill <- rep(fill, length=nrow(x))[ord]
     for (i in 1:nrow(x)) {
         ordiArgAbsorber(x[i,1] + c(-1,1,1,-1)*w[i], x[i,2] + c(-1,-1,1,1)*h[i],
-                        col = fill, border = border, xpd = xpd,
+                        col = fill[i], border = border[i], xpd = xpd,
                         FUN = polygon, ...)
         ordiArgAbsorber(x[i,1], x[i,2], labels = labels[i], cex = cex,
-                        col = col, xpd = xpd, FUN = text, ...)
+                        col = col[i], xpd = xpd, FUN = text, ...)
     }
     invisible(x)
 }
diff --git a/R/ordiplot3d.R b/R/ordiplot3d.R
deleted file mode 100644
index 3f7ad32..0000000
--- a/R/ordiplot3d.R
+++ /dev/null
@@ -1,92 +0,0 @@
-`ordiplot3d` <-
-    function (object, display = "sites", choices = 1:3, ax.col = 2, 
-              arr.len = 0.1, arr.col = 4, envfit, xlab, ylab, zlab, ...) 
-{
-    require(scatterplot3d) || stop("Requires package 'scatterplot3d'")
-    x <- scores(object, display = display, choices = choices, ...)
-    if (missing(xlab)) xlab <- colnames(x)[1]
-    if (missing(ylab)) ylab <- colnames(x)[2]
-    if (missing(zlab)) zlab <- colnames(x)[3]
-    ### scatterplot3d does not allow setting equal aspect ratio. We
-    ### try to compensate this by setting equal limits for all axes
-    ### and hoping the graph is more or less square so that the lines
-    ### come correctly out.
-    rnge <- apply(x, 2, range)
-    scl <- c(-0.5, 0.5) * max(apply(rnge, 2, diff))
-    pl <- ordiArgAbsorber(x[, 1], x[, 2], x[, 3],  
-                          xlab = xlab, ylab = ylab, zlab = zlab,
-                          xlim = mean(rnge[,1]) + scl,
-                          ylim = mean(rnge[,2]) + scl,
-                          zlim = mean(rnge[,3]) + scl,
-                          FUN = "scatterplot3d", ...)
-    pl$points3d(range(x[, 1]), c(0, 0), c(0, 0), type = "l", 
-                col = ax.col)
-    pl$points3d(c(0, 0), range(x[, 2]), c(0, 0), type = "l", 
-                col = ax.col)
-    pl$points3d(c(0, 0), c(0, 0), range(x[, 3]), type = "l", 
-                col = ax.col)
-    if (!missing(envfit) ||
-        (!is.null(object$CCA) && object$CCA$rank > 0)) {
-        if (!missing(envfit)) 
-            object <- envfit
-        bp <- scores(object, dis = "bp", choices = choices, ...)
-        cn <- scores(object, dis = "cn", choices = choices, ...)
-        if (!is.null(cn) && !any(is.na(cn))) {
-            bp <- bp[!(rownames(bp) %in% rownames(cn)), , drop = FALSE]
-            cn.xyz <- pl$xyz.convert(cn)
-            points(cn.xyz, pch = "+", cex = 2, col = arr.col)
-        }
-        if (!is.null(bp) && nrow(bp) > 0) {
-            tmp <- pl$xyz.convert(bp)
-            mul <- ordiArrowMul(cbind(tmp$x, tmp$y), fill=1)
-            bp.xyz <- pl$xyz.convert(bp * mul)
-            orig <- pl$xyz.convert(0, 0, 0)
-            arrows(orig$x, orig$y, bp.xyz$x, bp.xyz$y, length = arr.len, 
-                   col = arr.col)
-        }
-    }
-    ## save the location of the origin
-    pl$origin <- matrix(unlist(pl$xyz.convert(0, 0, 0)), nrow=1)
-    ## Add function that flattens 3d envfit object so that it can be
-    ## projected on the created 3d graph
-    xyz2xy <- pl$xyz.convert
-    envfit.convert <- function(object) {
-        if (!is.null(object$vectors)) {
-            rn <- rownames(object$vectors$arrows)
-            arr <- object$vectors$arrows[, choices, drop = FALSE]
-            arr <- sapply(xyz2xy(arr), cbind)
-            if (!is.matrix(arr))
-                arr <- matrix(arr, ncol = 2)
-            arr <- sweep(arr, 2, pl$origin)
-            rownames(arr) <- rn
-            object$vectors$arrows <- arr
-        }
-        if (!is.null(object$factors)) {
-            rn <- rownames(object$factors$centroids)
-            object$factors$centroids <-
-                object$factors$centroids[ ,choices, drop = FALSE]
-            object$factors$centroids <-
-                sapply(xyz2xy(object$factors$centroids), cbind)
-            if (!is.matrix(object$factors$centroids))
-                object$factors$centroids <-
-                    matrix(object$factors$centroids, ncol = 2)
-            rownames(object$factors$centroids) <- rn
-        }
-        object
-    }
-    pl$envfit.convert <- envfit.convert
-    ## save projected coordinates of points
-    tmp <- pl$xyz.convert(x)
-    pl$points <- cbind(tmp$x, tmp$y)
-    rownames(pl$points) <- rownames(x)
-    if (exists("bp.xyz")) {
-        pl$arrows <- cbind(bp.xyz$x, bp.xyz$y)
-        rownames(pl$arrows) <- rownames(bp)
-    }
-    if (exists("cn.xyz")) {
-        pl$centroids <- cbind(cn.xyz$x, cn.xyz$y)
-        rownames(pl$centroids) <- rownames(cn)
-    }
-    class(pl) <- c("ordiplot3d", "ordiplot")
-    invisible(pl)
-}
diff --git a/R/ordipointlabel.R b/R/ordipointlabel.R
index 92ca1e4..6f780b4 100644
--- a/R/ordipointlabel.R
+++ b/R/ordipointlabel.R
@@ -15,7 +15,7 @@
         if(isTRUE(all.equal(length(display), 1L))) {
             xy[[1]] <- .checkSelect(select, xy[[1]])
         } else {
-            warning("'select' does not apply when plotting more than one set of scores.\n'select' was ignored.")
+            warning("'select' does not apply when plotting more than one set of scores--\n'select' was ignored")
         }
     }
     if (length(display) > 1) {
diff --git a/R/ordirgl.R b/R/ordirgl.R
deleted file mode 100644
index 9894cb6..0000000
--- a/R/ordirgl.R
+++ /dev/null
@@ -1,57 +0,0 @@
-"ordirgl" <-
-    function (object, display = "sites", choices = 1:3, type = "p", 
-              ax.col = "red", arr.col = "yellow", text, envfit, ...) 
-{
-    if (!require(rgl)) 
-        stop("Requires package 'rgl'")
-    x <- scores(object, display = display, choices = choices, 
-                ...)
-    if (ncol(x) < 3) 
-        stop("3D display needs three dimensions...")
-    rgl.clear()
-    if (type == "p") 
-        rgl.points(x[, 1], x[, 2], x[, 3], ...)
-    else if (type == "t") {
-        if (missing(text)) 
-            text <- rownames(x)
-        rgl.texts(x[, 1], x[, 2], x[, 3], text, adj = 0.5, ...)
-    }
-    rgl.lines(range(x[, 1]), c(0, 0), c(0, 0), col = ax.col)
-    rgl.lines(c(0, 0), range(x[, 2]), c(0, 0), col = ax.col)
-    rgl.lines(c(0, 0), c(0, 0), range(x[, 3]), col = ax.col)
-    rgl.texts(1.1 * max(x[, 1]), 0, 0, colnames(x)[1], col = ax.col, 
-              adj = 0.5)
-    rgl.texts(0, 1.1 * max(x[, 2]), 0, colnames(x)[2], col = ax.col, 
-              adj = 0.5)
-    rgl.texts(0, 0, 1.1 * max(x[, 3]), colnames(x)[3], col = ax.col, 
-              adj = 0.5)
-    if (!missing(envfit) ||
-        (!is.null(object$CCA) && object$CCA$rank > 0)) {
-        if (!missing(envfit)) 
-            object <- envfit
-        bp <- scores(object, dis = "bp", choices = choices)
-        cn <- scores(object, dis = "cn", choices = choices)
-        if (!is.null(cn) && !any(is.na(cn))) {
-            bp <- bp[!(rownames(bp) %in% rownames(cn)), , drop = FALSE]
-            rgl.texts(cn[, 1], cn[, 2], cn[, 3], rownames(cn), 
-                      col = arr.col, adj = 0.5)
-            rgl.points(cn[, 1], cn[, 2], cn[, 3], size = 5, col = arr.col)
-        }
-        if (!is.null(bp) && nrow(bp) > 0) {
-            mul <- c(range(x[, 1]), range(x[, 2]), range(x[, 
-                                                           3]))/c(range(bp[, 1]), range(bp[, 2]), range(bp[, 
-                                                                                                           3]))
-            mul <- mul[is.finite(mul) & mul > 0]
-            mul <- min(mul)
-            bp <- bp * mul
-            for (i in 1:nrow(bp)) {
-                rgl.lines(c(0, bp[i, 1]), c(0, bp[i, 2]), c(0, 
-                                                            bp[i, 3]), col = arr.col)
-                rgl.texts(1.1 * bp[i, 1], 1.1 * bp[i, 2], 1.1 * 
-                          bp[i, 3], rownames(bp)[i], col = arr.col,
-                          adj = 0.5)
-            }
-        }
-    }
-    invisible()
-}
diff --git a/R/ordispider.R b/R/ordispider.R
index ba94827..128c5de 100644
--- a/R/ordispider.R
+++ b/R/ordispider.R
@@ -1,8 +1,10 @@
 `ordispider` <-
     function (ord, groups, display = "sites", w = weights(ord, display),
+              spiders = c("centroid", "median"),
               show.groups, label = FALSE, ...)
 {
     weights.default <- function(object, ...) NULL
+    spiders <- match.arg(spiders)
     if (inherits(ord, "cca") && missing(groups)) {
         lc <- scores(ord, display = "lc", ...)
         wa <- scores(ord, display = "wa", ...)
@@ -30,21 +32,29 @@
         groups <- groups[take]
         w <- w[take]
     }
+    if (spiders == "median" && sd(w) > sqrt(.Machine$double.eps))
+        warning("weights are ignored with 'median' spiders")
     out <- seq(along = groups)
     inds <- names(table(groups))
     if (label) 
     cntrs <- names <- NULL
-    ## 'kk' removes NA scores
-    kk <- complete.cases(pts)
+    ## 'kk' removes NA scores and NA groups
+    kk <- complete.cases(pts) & !is.na(groups)
     for (is in inds) {
         gr <- out[groups == is & kk]
-        if (length(gr) > 1) {
-            X <- pts[gr, ]
+        if (length(gr)) {
+            X <- pts[gr, , drop = FALSE]
             W <- w[gr]
-            ave <- apply(X, 2, weighted.mean, w = W)
+            if (length(gr) > 1) {
+                ave <- switch(spiders,
+                              "centroid" = apply(X, 2, weighted.mean, w = W),
+                              "median" = ordimedian(X, rep(1, nrow(X))))
+                ordiArgAbsorber(ave[1], ave[2], X[, 1], X[, 2],
+                                FUN = segments, ...)
+            } else {
+                ave <- X
+            }
             spids[,gr] <- ave
-            ordiArgAbsorber(ave[1], ave[2], X[, 1], X[, 2],
-                            FUN = segments, ...)
             if (label) {
                 cntrs <- rbind(cntrs, ave)
                 names <- c(names, is)
diff --git a/R/ordistep.R b/R/ordistep.R
index 693397f..8e46db5 100644
--- a/R/ordistep.R
+++ b/R/ordistep.R
@@ -1,6 +1,6 @@
 `ordistep` <-
     function(object, scope, direction =c("both", "backward", "forward"),
-             Pin = 0.05, Pout = 0.1, pstep = 100, perm.max = 1000,
+             Pin = 0.05, Pout = 0.1, permutations = how(nperm = 199),
              steps=50, trace = TRUE, ...)
 {
     if (!inherits(object, "cca"))
@@ -43,17 +43,18 @@
         change <- NULL
         ## Consider dropping
         if (backward && length(scope$drop)) {
-            aod <- drop1(object, scope = scope$drop, test="perm", pstep = pstep,
-                         perm.max = perm.max, alpha = Pout, trace = trace, ...)
+            aod <- drop1(object, scope = scope$drop, test="perm",
+                         permutations = permutations,
+                         alpha = Pout, trace = trace, ...)
             aod <- aod[-1,]
-            o <- order(-aod[,5], aod[,4], aod[,2])
+            o <- order(-aod[,4], aod[,2])
             aod <- aod[o,]
             rownames(aod) <- paste("-", rownames(aod), sep = " ")
             if (trace) {
                 cat("\n")
                 print(aod)
             }
-            if (is.na(aod[1,5]) || aod[1,5] > Pout) {
+            if (is.na(aod[1,4]) || aod[1,4] > Pout) {
                 anotab <- rbind(anotab, aod[1,])
                 change <- rownames(aod)[1]
                 object <- eval.parent(update(object, paste("~  .", change)))
@@ -67,17 +68,18 @@
         }
         ## Consider adding
         if (forward && length(scope$add)) {
-            aod <- add1(object, scope = scope$add, test = "perm", pstep = pstep,
-                        perm.max = perm.max, alpha = Pin, trace = trace, ...)
+            aod <- add1(object, scope = scope$add, test = "perm",
+                        permutations = permutations,
+                        alpha = Pin, trace = trace, ...)
             aod <- aod[-1,]
-            o <- order(aod[,5], aod[,4], aod[,2])
+            o <- order(aod[,4], aod[,2])
             aod <- aod[o,]
             rownames(aod) <- paste("+", rownames(aod), sep = " ")
             if (trace) {
                 cat("\n")
                 print(aod)
             }
-            if (!is.na(aod[1,5]) && aod[1,5] <= Pin) {
+            if (!is.na(aod[1,4]) && aod[1,4] <= Pin) {
                 anotab <- rbind(anotab, aod[1,])
                 change <- rownames(aod)[1]
                 object <- eval.parent(update(object, paste( "~  .",change)))
diff --git a/R/ordisurf.R b/R/ordisurf.R
index 755416a..b0548d9 100644
--- a/R/ordisurf.R
+++ b/R/ordisurf.R
@@ -25,7 +25,7 @@
 {
     weights.default <- function(object, ...) NULL
     if(!missing(thinplate)) {
-        warning("Use of 'thinplate' is deprecated and will soon be removed;\nuse 'isotropic' instead.")
+        warning("use of 'thinplate' is deprecated and will soon be removed;\nuse 'isotropic' instead")
         isotropic <- thinplate
     }
     ## GRID no user-definable - why 31?
@@ -33,7 +33,6 @@
     w <- eval(w)
     if (!is.null(w) && length(w) == 1)
         w <- NULL
-    require(mgcv) || stop("Requires package 'mgcv'")
     X <- scores(x, choices = choices, display = display, ...)
     ## The original name of 'y' may be lost in handling NA: save for
     ## plots
@@ -48,9 +47,9 @@
     x2 <- X[, 2]
     ## handle fx - allow vector of length up to two
     if(!(missfx <- missing(fx)) && missing(knots))
-        warning("Requested fixed d.f. splines but without specifying 'knots'.\nSwitching to 'fx = FALSE'.")
+        warning("requested fixed d.f. splines but without specifying 'knots':\nswitching to 'fx = FALSE'")
     if (length(fx) > 2L)
-        warning("Length of 'fx' supplied exceeds '2'. Using the first two.")
+        warning("length of 'fx' supplied exceeds '2': using the first two")
     ## expand fx robustly, no matter what length supplied
     fx <- rep(fx, length.out = 2)
     ## can't have `fx = TRUE` and `select = TRUE`
@@ -59,17 +58,17 @@
             warning("'fx = TRUE' requested; using 'select = FALSE'")
             select <- FALSE
         } else if(!miss.select && isTRUE(select)){
-            stop("Fixed d.f. splines ('fx = TRUE') incompatible with 'select = TRUE'")
+            stop("fixed d.f. splines ('fx = TRUE') incompatible with 'select = TRUE'")
         }
     }
     ## handle knots - allow vector of length up to two
     if (length(knots) > 2L)
-        warning("Length of 'knots' supplied exceeds '2'. Using the first two.")
+        warning("length of 'knots' supplied exceeds '2': using the first two")
     ## expand knots robustly, no matter what length supplied
     knots <- rep(knots, length.out = 2)
     ## handle the bs - we only allow some of the possible options
     if (length(bs) > 2L)
-        warning("Number of basis types supplied exceeds '2'. Only using the first two.")
+        warning("number of basis types supplied exceeds '2': only using the first two")
     bs <- rep(bs, length.out = 2)
     ## check allowed types
     BS <- c("tp","ts","cr","cs","ds","ps","ad")
@@ -83,7 +82,7 @@
     }
     ## can't use "cr", "cs", "ps" in 2-d smoother with s()
     if(isTRUE(isotropic) && any(bs %in% c("cr", "cs", "ps"))) {
-        stop("Bases \"cr\", \"cs\", and \"ps\" not allowed in isotropic smooths.")
+        stop("bases \"cr\", \"cs\", and \"ps\" not allowed in isotropic smooths")
     }
     ## Build formula
     if (knots[1] <= 0) {
diff --git a/R/orglpoints.R b/R/orglpoints.R
deleted file mode 100644
index fea1525..0000000
--- a/R/orglpoints.R
+++ /dev/null
@@ -1,8 +0,0 @@
-"orglpoints" <-
-    function (object, display = "sites", choices = 1:3, ...) 
-{
-    x <- scores(object, display = display, choices = choices, ...)
-    rgl.points(x[,1], x[,2], x[,3], ...)
-    invisible()
-}
-
diff --git a/R/orglsegments.R b/R/orglsegments.R
deleted file mode 100644
index 28f2381..0000000
--- a/R/orglsegments.R
+++ /dev/null
@@ -1,17 +0,0 @@
-"orglsegments" <-
-    function (object, groups, display = "sites", choices = 1:3,...) 
-{
-    pts <- scores(object, display = display, choices = choices, ...)
-    inds <- names(table(groups))
-    for (is in inds) {
-        X <- pts[groups == is, , drop = FALSE]
-        if (nrow(X) > 1) {
-            for (i in 2:nrow(X)) {
-                rgl.lines(c(X[i-1,1],X[i,1]), c(X[i-1,2],X[i,2]), 
-                          c(X[i-1,3],X[i,3]), ...)
-            }
-        }
-    }
-    invisible()
-}
-
diff --git a/R/orglspider.R b/R/orglspider.R
deleted file mode 100644
index 1b29c2c..0000000
--- a/R/orglspider.R
+++ /dev/null
@@ -1,34 +0,0 @@
-"orglspider" <-
-    function (object, groups, display = "sites",
-              w = weights(object, display), choices = 1:3, ...) 
-{
-    weights.default <- function(object, ...) NULL
-    if (inherits(object, "cca") && missing(groups)) {
-        lc <- scores(object, display = "lc", choices = choices, ...)
-        wa <- scores(object, display = "wa", choices = choices, ...)
-        for (i in 1:nrow(lc)) rgl.lines(c(lc[i, 1], wa[i, 1]), 
-                                        c(lc[i, 2], wa[i, 2]), c(lc[i, 3], wa[i, 3]), ...)
-    }
-    else {
-        pts <- scores(object, display = display, choices = choices,  ...)
-        out <- seq(along = groups)
-        w <- eval(w)
-        if (length(w) == 1) 
-            w <- rep(1, nrow(pts))
-        if (is.null(w)) 
-            w <- rep(1, nrow(pts))
-        inds <- names(table(groups))
-        for (is in inds) {
-            gr <- out[groups == is]
-            if (length(gr) > 1) {
-                X <- pts[gr, ]
-                W <- w[gr]
-                ave <- apply(X, 2, weighted.mean, w = W)
-                for (i in 1:length(gr))
-                    rgl.lines(c(ave[1], X[i,1]), c(ave[2], X[i, 2]),
-                              c(ave[3], X[i, 3]),  ...)
-            }
-        }
-    }
-    invisible()
-}
diff --git a/R/orgltext.R b/R/orgltext.R
deleted file mode 100644
index e21679f..0000000
--- a/R/orgltext.R
+++ /dev/null
@@ -1,11 +0,0 @@
-"orgltext" <-
-    function (object, text, display = "sites", choices = 1:3, justify = "center",  
-              adj = 0.5, ...) 
-{
-    x <- scores(object, display = display, choices = choices, 
-                ...)
-    if (missing(text)) 
-        text <- rownames(x)
-    rgl.texts(x[, 1], x[, 2], x[, 3], text, adj = adj,  ...)
-    invisible()
-}
diff --git a/R/permatfull.R b/R/permatfull.R
index 451ff9a..eb9bae4 100644
--- a/R/permatfull.R
+++ b/R/permatfull.R
@@ -1,77 +1,54 @@
 ## permatfull function
 `permatfull` <-
-function(m, fixedmar="both", shuffle="both", strata=NULL, mtype="count", times=99)
+function(m, fixedmar="both", shuffle="both", 
+strata=NULL, mtype="count", times=99, ...)
 {
-## internal function
-indshuffle <- function(x)
-{
-   N <- length(x)
-   n <- sum(x)
-   out <- numeric(N)
-   names(out) <- 1:N
-   y <- table(sample(1:N, n, replace = TRUE))
-   out[names(out) %in% names(y)] <- y
-   names(out) <- NULL
-   out
-}
-bothshuffle <- function(x, y=1)
-{
-    x[x!=0] <- indshuffle(x[x!=0] - y) + y
-    sample(x)
-}
-    if (!identical(all.equal(m, round(m)), TRUE))
-       stop("function accepts only integers (counts)")
     mtype <- match.arg(mtype, c("prab", "count"))
     shuffle <- match.arg(shuffle, c("ind", "samp", "both"))
-    count <- mtype == "count"
     fixedmar <- match.arg(fixedmar, c("none", "rows", "columns", "both"))
-    sample.fun <- switch(shuffle,
-        "ind"=indshuffle,
-        "samp"=sample,
-        "both"=bothshuffle)
     m <- as.matrix(m)
-    n.row <- nrow(m)
-    n.col <- ncol(m)
-    if (mtype == "prab") m <- ifelse(m > 0, 1, 0)
-
-    if (is.null(strata))
-        str <- as.factor(rep(1, n.row))
-        else str <- as.factor(strata)[drop = TRUE]
-
-    levels(str) <- 1:length(unique(str))
-    str <- as.numeric(str)
+    str <- if (is.null(strata))
+        1 else as.integer(as.factor(strata)[drop = TRUE])
+    levstr <- unique(str)
     nstr <- length(unique(str))
-    if (any(tapply(str,list(str),length) == 1))
+    if (!is.null(strata) && any(table(str) < 2))
         stop("strata should contain at least 2 observations")
-    perm <- list()
-    perm[[1]] <- matrix(0, n.row, n.col)
-    for (k in 1:times)
-        perm[[k]] <- perm[[1]]
-    for (j in 1:nstr) {
-    id <- which(str == j)
-        if (fixedmar == "none")
-            for (i in 1:times)
-                if (count) perm[[i]][id,] <- matrix(sample.fun(array(m[id,])), length(id), n.col)
-                else perm[[i]][id,] <- commsimulator(m[id,], method="r00")
-        if (fixedmar == "rows")
-            for (i in 1:times)
-                if (count) perm[[i]][id,] <- t(apply(m[id,], 1, sample.fun))
-                else perm[[i]][id,] <- commsimulator(m[id,], method="r0")
-        if (fixedmar == "columns")
-            for (i in 1:times)
-                if (count) perm[[i]][id,] <- apply(m[id,], 2, sample.fun)
-                else perm[[i]][id,] <- commsimulator(m[id,], method="c0")
-        if (fixedmar == "both")
-            for (i in 1:times)
-                if (count) perm[[i]][id,] <- r2dtable(1, apply(m[id,], 1, sum), apply(m[id,], 2, sum))[[1]]
-                else perm[[i]][id,] <- commsimulator(m[id,], method="quasiswap")
+    ALGO <- switch(fixedmar,
+        "none" = "r00",
+        "rows" = "r0",
+        "columns" = "c0",
+        "both" = ifelse(mtype=="prab", "quasiswap", "r2dtable"))
+    if (mtype=="count") {
+        if (fixedmar!="both")
+            ALGO <- paste(ALGO, shuffle, sep="_")
+    }
+    if (is.null(strata)) {
+        tmp <- simulate(nullmodel(m, ALGO), nsim=times, ...)
+        perm <- vector("list", times)
+        for (i in seq_len(times))
+            perm[[i]] <- tmp[,,i]
+    } else {
+        perm <- vector("list", times)
+        tmp <- vector("list", length(unique(strata)))
+        for (j in seq_len(nstr)) {
+            tmp[[j]] <- simulate(nullmodel(m[strata==levstr[j],], ALGO), 
+                nsim=times, ...)
+        }
+        for (i in seq_len(times)) {
+            perm[[i]] <- array(0, dim(m))
+            for (j in seq_len(nstr)) {
+                perm[[i]][strata==levstr[j],] <- tmp[[j]][,,i]
+            }
         }
+    }
     if (fixedmar == "both")
         shuffle <- NA
+    if (mtype == "prab")
+        m <- ifelse(m > 0, 1, 0)
     out <- list(call=match.call(), orig=m, perm=perm)
     attr(out, "mtype") <- mtype
     attr(out, "ptype") <- "full"
-    attr(out, "method") <- NA
+    attr(out, "method") <- ALGO
     attr(out, "fixedmar") <- fixedmar
     attr(out, "times") <- times
     attr(out, "shuffle") <- shuffle
diff --git a/R/permatswap.R b/R/permatswap.R
index 41daa0f..8882e93 100644
--- a/R/permatswap.R
+++ b/R/permatswap.R
@@ -1,34 +1,22 @@
 ## permatswap function
 `permatswap` <-
 function(m, method="quasiswap", fixedmar="both", shuffle="both", strata=NULL,
-         mtype="count", times=99, burnin = 0, thin = 1)
+mtype="count", times=99, burnin = 0, thin = 1, ...)
 {
-## internal function
-indshuffle <- function(x)
-{
-   N <- length(x)
-   n <- sum(x)
-   out <- numeric(N)
-   names(out) <- 1:N
-   y <- table(sample(1:N, n, replace = TRUE))
-   out[names(out) %in% names(y)] <- y
-   names(out) <- NULL
-   out
-}
-bothshuffle <- function(x, y=1)
-{
-    x[x!=0] <- indshuffle(x[x!=0] - y) + y
-    sample(x)
-}
-    if (!identical(all.equal(m, round(m)), TRUE))
-       stop("function accepts only integers (counts)")
     mtype <- match.arg(mtype, c("prab", "count"))
     fixedmar <- match.arg(fixedmar, c("rows", "columns", "both"))
     shuffle <- match.arg(shuffle, c("samp", "both"))
     count <- mtype == "count"
+    m <- as.matrix(m)
+    str <- if (is.null(strata))
+        1 else as.integer(as.factor(strata)[drop = TRUE])
+    levstr <- unique(str)
+    nstr <- length(unique(str))
+    if (!is.null(strata) && any(table(str) < 2))
+        stop("strata should contain at least 2 observations")
+    ## evaluating algo type
     if (count) {
         method <- match.arg(method, c("swap", "quasiswap", "swsh", "abuswap"))
-        ## warning if swapcount is to be used
         if (method == "swap") {
             warning("quantitative swap method may not yield random null models, use only to study its properties")
             isSeq <- TRUE
@@ -43,122 +31,56 @@ bothshuffle <- function(x, y=1)
                 isSeq <- TRUE
             } else {
                 isSeq <- FALSE
-                if (fixedmar != "both")
+                if (method != "swsh" && fixedmar != "both")
                     stop("'fixedmar' must be \"both\"")
             }
         }
+        if (method %in% c("swap", "quasiswap"))
+            ALGO <- paste(method, "count", sep="_")
+        if (method == "abuswap")
+            ALGO <- paste(method, substr(fixedmar, 1, 1), sep="_")
+        if (method == "swsh") {
+            if (fixedmar=="both")
+                stop("if 'method=\"swsh\"', 'fixedmar' must not be \"both\"")
+            ALGO <- if (fixedmar=="none") {
+                paste(method, shuffle, sep="_")
+            } else {
+                paste(method, shuffle, substr(fixedmar, 1, 1), sep="_")
+            }
+        }
     } else {
         if (fixedmar != "both")
             stop("if 'mtype=\"prab\"', 'fixedmar' must be \"both\"")
         method <- match.arg(method, c("swap", "quasiswap", "tswap", "backtracking"))
         isSeq <- method != "quasiswap"
+        ALGO <- method
+    }
+    if (is.null(strata)) {
+        tmp <- simulate(nullmodel(m, ALGO), 
+            nsim=times, burnin=burnin, thin=thin, ...)
+        perm <- vector("list", times)
+        for (i in seq_len(times))
+            perm[[i]] <- tmp[,,i]
+    } else {
+        perm <- vector("list", times)
+        tmp <- vector("list", length(unique(strata)))
+        for (j in seq_len(nstr)) {
+            tmp[[j]] <- simulate(nullmodel(m[strata==levstr[j],], ALGO), 
+                nsim=times, burnin=burnin, thin=thin, ...)
         }
-
-    m <- as.matrix(m)
-    att <- attributes(m)
-    n.row <- nrow(m)
-    n.col <- ncol(m)
-    if (mtype == "prab") m <- ifelse(m > 0, 1, 0)
-
-    if (is.null(strata))
-        str <- as.factor(rep(1, n.row))
-        else str <- as.factor(strata)[drop = TRUE]
-
-    levels(str) <- 1:length(unique(str))
-    str <- as.numeric(str)
-    nstr <- length(unique(str))
-    if (any(tapply(str,list(str),length) == 1))
-        stop("strata should contain at least 2 observations")
-
-    perm <- list()
-    perm[[1]] <- matrix(0, n.row, n.col)
-    if (times > 1)
-        for (i in 2:times)
-            perm[[i]] <- perm[[1]]
-
-    for (j in 1:nstr) {
-        id <- which(str == j)
-        temp <- m[id,]
-        nn.row <- nrow(m[id,])
-        nn.col <- ncol(m[id,])
-        if (isSeq) {
-            if (count) {
-                if (burnin > 0) {
-                    if (method == "swap")
-                        temp <- .C("swapcount", m = as.double(temp),
-                            as.integer(nn.row), as.integer(nn.col),
-                            as.integer(burnin), PACKAGE = "vegan")$m
-                    if (method == "abuswap")
-                       temp <- .C("abuswap", m = as.double(temp),
-                            as.integer(nn.row), as.integer(nn.col),
-                            as.integer(burnin), as.integer(direct), PACKAGE = "vegan")$m
-                }
-            } else {
-                if (burnin > 0)
-                    temp <- commsimulator(temp, method=method, thin = burnin)
-            }
-            for (i in 1:times) {
-                if (count) {
-                    if (method == "swap")
-                        perm[[i]][id,] <- .C("swapcount",
-                                    m = as.double(temp),
-                                    as.integer(nn.row),
-                                    as.integer(nn.col),
-                                    as.integer(thin),
-                                    PACKAGE = "vegan")$m
-                    if (method == "abuswap")
-                        perm[[i]][id,] <- .C("abuswap",
-                                    m = as.double(temp),
-                                    as.integer(nn.row),
-                                    as.integer(nn.col),
-                                    as.integer(thin),
-                                    as.integer(direct),
-                                    PACKAGE = "vegan")$m
-	            } else {
-                    perm[[i]][id,] <- commsimulator(temp, method=method, thin=thin)
-                }
-            temp <- perm[[i]][id,]
-            } # for i end
-        } else {
-            if (method != "swsh") {
-                r2tabs <- r2dtable(times, rowSums(m[id,]), colSums(m[id,]))
-            } else {
-                tempPos <- temp[temp > 0]
-            }
-            for (i in 1:times) {
-                if (count) {
-                    if (method != "swsh") {
-                        ms <- sum(m[id,] > 0)
-                        tmp <- r2tabs[[i]]
-                        ## if fills are equal, no need to restore fill
-                        if (sum(tmp > 0) != ms) {
-                            tmp <- .C("rswapcount",
-                                        m = as.double(tmp),
-                                        as.integer(nn.row),
-                                        as.integer(nn.col),
-                                        as.integer(ms),
-                                        PACKAGE="vegan")$m
-                        }
-                        perm[[i]][id,] <- matrix(tmp, nrow(perm[[i]][id,]), ncol(perm[[i]][id,]))
-                    } else { # method == "swsh"
-                        tmp <- commsimulator(temp, method="quasiswap")
-                        if (shuffle == "samp") {
-                            tmp[tmp > 0] <- sample(tempPos)
-                        } else {
-                            tmp[tmp > 0] <- bothshuffle(tempPos)
-                        }
-                        perm[[i]][id,] <- tmp
-                    }
-                } else perm[[i]][id,] <- commsimulator(temp, method=method)
+        for (i in seq_len(times)) {
+            perm[[i]] <- array(0, dim(m))
+            for (j in seq_len(nstr)) {
+                perm[[i]][strata==levstr[j],] <- tmp[[j]][,,i]
             }
-            burnin <- 0
-            thin <- 0
         }
-    } # for j end
+    }
+    if (mtype == "prab")
+        m <- ifelse(m > 0, 1, 0)
     out <- list(call=match.call(), orig=m, perm=perm)
     attr(out, "mtype") <- mtype
     attr(out, "ptype") <- "swap"
-    attr(out, "method") <- method
+    attr(out, "method") <- ALGO
     attr(out, "fixedmar") <- if (method == "swsh") "none" else fixedmar
     attr(out, "times") <- times
     attr(out, "shuffle") <- if (method == "swsh") shuffle else NA
diff --git a/R/permustats.R b/R/permustats.R
new file mode 100644
index 0000000..8bebc46
--- /dev/null
+++ b/R/permustats.R
@@ -0,0 +1,272 @@
+### Functions to extract permutation statististic or null model
+### results from various vegan objects.
+
+## extract items as 'statistic' and 'permutations'. Specific methods
+## towards the end of this file
+
+`permustats` <-
+    function(x, ...)
+{
+    UseMethod("permustats")
+}
+
+## something like str()
+`print.permustats` <-
+    function(x, ...)
+{
+    print(str(x))
+    invisible(x)
+}
+
+### modelled after print.oecosimu (should perhaps have oecosimu() args
+### like 'alternative'
+
+`summary.permustats` <- function(object, interval = 0.95, ...) {
+    nalt <- length(object$alternative)
+    nstat <- length(object$statistic)
+    ## Replicate alternative to length of statistic
+    if ((nalt < nstat) && identical(nalt, 1L)) {
+        object$alternative <- rep(object$alternative, length.out = nstat)
+    }
+    TAB <- c("two.sided", "greater", "less")
+    compint <- (1 - interval) / 2
+    PROBS <- list(two.sided = c(compint, 0.5, interval + compint),
+                  greater = c(NA, 0.5, interval),
+                  less = c(1 - interval, 0.5, NA))
+    alt <- match(object$alternative, TAB)
+    probs <- PROBS[alt]
+    ## take care that permutations are in a column matrix
+    permutations <- as.matrix(object$permutations)
+    object$means <- colMeans(permutations)
+    sd <- apply(permutations, 2, sd)
+    object$z <-
+        (object$statistic - object$means)/sd
+    qFun <- function(i, sim, probs) {
+        quantile(sim[, i], probs = probs[[i]], na.rm = TRUE)
+    }
+    object$quantile <- lapply(seq_along(probs), qFun, sim = permutations, probs = probs)
+    object$quantile <- do.call("rbind", object$quantile)
+    dimnames(object$quantile) <- list(NULL, c("lower", "median", "upper"))
+    object$interval <- interval
+    ## not (yet) P-values...
+    class(object) <- "summary.permustats"
+    object
+}
+
+`print.summary.permustats` <- function(x, ...) {
+    m <- cbind("statistic" = x$statistic,
+               "z" = x$z,
+               "mean" = x$means,
+               x$quantile)
+    cat("\n")
+    printCoefmat(m, tst.ind = 1:ncol(m), na.print = "", ...)
+    writeLines(strwrap(paste0("(Interval (Upper - Lower) = ", x$interval, ")", sep = ""),
+                       initial = "\n"))
+    invisible(x)
+}
+
+### densityplot
+
+`densityplot.permustats` <-
+    function(x, data, xlab = "Permutations", ...)
+{
+    obs <- x$statistic
+    sim <- rbind(x$statistic, as.matrix(x$permutations))
+    nm <- names(obs)[col(sim)]
+    densityplot( ~ as.vector(sim) | factor(nm, levels = unique(nm)),
+                xlab = xlab,
+                panel = function(x, ...) {
+                    panel.densityplot(x, ...)
+                    panel.abline(v = obs[panel.number()], ...)
+                },
+                ...)
+}
+
+### simple density: normally densityplot should be used (or I suggest
+### so), but we also offer basic density. This can be either with or
+### without observed statistic.
+
+`density.permustats` <-
+    function(x, observed = TRUE, ...)
+{
+    ## only works with statistic
+    if (length(x$statistic) > 1)
+        stop(gettextf("only works with one statistic: you got %d",
+                      length(x$statistic)))
+    p <- x$permutations
+    if (observed)
+        p <- c(x$statistic, p)
+    out <- density(p)
+    out$call <- match.call()
+    out$call[[1]] <- as.name("density")
+    out
+}
+
+### QQ-plot against Guaussian distribution
+
+`qqnorm.permustats` <-
+    function(y, observed = TRUE, ...)
+{
+    ## only works with statistic
+    if (length(y$statistic) > 1)
+        stop(gettextf("only works with one statistic: you got %d",
+                      length(y$statistic)))
+    p <- y$permutations
+    if (observed)
+        p <- c(y$statistic, p)
+    q <- qqnorm(p, ...)
+    if (observed)
+        abline(h = y$statistic, ...)
+    invisible(q)
+}
+
+`qqmath.permustats` <-
+    function(x, data, observed = TRUE, ylab = "Permutations", ...)
+{
+    obs <- x$statistic
+    if (observed)
+        sim <- rbind(x$statistic, as.matrix(x$permutations))
+    else
+        sim <- as.matrix(x$permutations)
+    nm <- names(obs)[col(sim)]
+    qqmath( ~ as.vector(sim) | factor(nm, levels = unique(nm)),
+                ylab = ylab,
+                panel = function(x, ...) {
+                    panel.qqmath(x, ...)
+                    if (observed)
+                        panel.abline(h = obs[panel.number()], ...)
+                },
+                ...)
+}
+
+###
+### specific methods to extract permustats
+###
+
+`permustats.anosim` <-
+    function(x, ...)
+{
+    structure(list(
+        "statistic" = structure(x$statistic, names="R"),
+        "permutations" = x$perm,
+        "alternative" = "greater"),
+              class="permustats")
+}
+
+`permustats.adonis` <-
+    function(x, ...)
+{
+    tab <- x$aov.tab
+    k <- !is.na(tab$F.Model)
+    structure(list(
+        "statistic" = structure(tab$F.Model[k], names = rownames(tab)[k]),
+        "permutations" = x$f.perms,
+        "alternative" = "greater"),
+              class="permustats")
+}
+
+`permustats.mantel` <-
+    function(x, ...)
+{
+    structure(list(
+        "statistic" = structure(x$statistic, names="r"),
+        "permutations" = x$perm,
+        "alternative" = "greater"),
+              class="permustats")
+}
+
+`permustats.mrpp` <-
+    function(x, ...)
+{
+    structure(list(
+        "statistic" = structure(x$delta, names="delta"),
+        "permutations" = x$boot.deltas,
+        "alternative" = "less"),
+              class="permustats")
+}
+
+`permustats.oecosimu` <-
+    function(x, ...)
+{
+    structure(list(
+        "statistic" = x$oecosimu$statistic,
+        "permutations" = t(x$oecosimu$simulated),
+        "alternative" = x$oecosimu$alternative),
+              class="permustats")
+}
+
+`permustats.ordiareatest` <-
+    function(x, ...)
+{
+    structure(list(
+        "statistic" = x$areas,
+        "permutations" = t(x$permutations),
+        "alternative" = "less"),
+              class = "permustats")
+}
+
+`permustats.permutest.cca` <-
+    function(x, ...)
+{
+    structure(list(
+        "statistic" = structure(x$F.0, names = "F"),
+        "permutations" = x$F.perm,
+        "alternative" = "greater"),
+              class="permustats")
+}
+
+`permustats.protest` <-
+    function(x, ...)
+{
+    structure(list(
+        "statistic" = structure(x$t0, names = "r"),
+        "permutations" = x$t,
+        "alternative" = "greater"),
+              class="permustats")
+}
+
+### the following do not return permutation data
+`permustats.CCorA` <-
+    function(x, ...)
+{
+    stop("no permutation data available")
+}
+
+`permustats.envfit` <-
+    function(x, ...)
+{
+    stop("no permutation data available")
+}
+
+`permustats.factorfit` <-
+    function(x, ...)
+{
+    stop("no permutation data available")
+}
+
+`permustats.vectorfit` <-
+    function(x, ...)
+{
+    stop("no permutation data available")
+}
+
+`permustats.mso` <-
+    function(x, ...)
+{
+    stop("no permutation data available")
+}
+
+`permustats.permutest.betadisper` <-
+    function(x, ...)
+{
+    ntypes <- NCOL(x$perm)
+    alt <- if (ntypes > 1) {
+        c("greater", rep("two.sided", ntypes - 1))
+    } else {
+        "greater"
+    }
+    structure(list("statistic" = x$statistic,
+                   "permutations" = x$perm,
+                   "alternative" = alt),
+              class ="permustats")
+}
diff --git a/R/permuted.index.R b/R/permuted.index.R
index f8a60ee..3fb2837 100644
--- a/R/permuted.index.R
+++ b/R/permuted.index.R
@@ -1,6 +1,7 @@
 "permuted.index" <-
     function (n, strata) 
 {
+    .Deprecated("permute package (shuffle or shuffleSet)")
     if (missing(strata) || is.null(strata)) 
         out <- sample.int(n, n)
     else {
diff --git a/R/permutest.betadisper.R b/R/permutest.betadisper.R
index 6f668b9..cf54f99 100644
--- a/R/permutest.betadisper.R
+++ b/R/permutest.betadisper.R
@@ -1,5 +1,6 @@
 `permutest.betadisper` <- function(x, pairwise = FALSE,
-                                   control = how(nperm = 999), ...)
+                                   permutations = 999,
+                                   parallel = getOption("mc.cores"), ...)
 {
     t.statistic <- function(x, y) {
         m <- length(x)
@@ -11,12 +12,52 @@
         pooled <- sqrt(((m-1)*xvar + (n-1)*yvar) / (m+n-2))
         (xbar - ybar) / (pooled * sqrt(1/m + 1/n))
     }
-    
+
+    permFun <- function(idx) {
+        if (!is.matrix(idx)) {
+            dim(idx) <- c(1, length(idx))
+        }
+        R <- nrow(idx)
+        Fperm <- matrix(nrow = R, ncol = 1)
+        if (pairwise) {                 # set up object to hold t stats
+            Tperm <- matrix(ncol = n.pairs, nrow = R)
+            Jseq <- seq_len(n.pairs)
+        }
+        rdf <- nobs - p                 # residual degrees of freedom
+        ## iterate
+        for (i in seq_len(R)) {         # iterate
+            take <- idx[i, ]                # current permutation from set
+            p.resid <- resids[take]         # permute residuals
+            f <- qr.fitted(mod.Q, p.resid)  # create new data
+            mss <- sum((f - mean(f))^2)
+            r <- qr.resid(mod.Q, p.resid)
+            rss <- sum(r^2)
+            resvar <- rss / rdf
+            Fperm[i, ] <- (mss / (p - 1)) / resvar
+
+            ## pairwise tests
+            if(pairwise) {
+                for(j in Jseq) {
+                    grp1 <- x$distance[take][group == combin[1, j]]
+                    grp2 <- x$distance[take][group == combin[2, j]]
+                    Tperm[i, j] <- t.statistic(grp1, grp2)
+                }
+            }
+        }
+
+        ## bind on pairwise stats if any
+        if (pairwise) {
+            Fperm <- cbind(Fperm, Tperm)
+        }
+        Fperm
+    }
+
     if(!inherits(x, "betadisper"))
         stop("Only for class \"betadisper\"")
+
     ## will issue error if only a single group
     mod.aov <- anova(x)
-    nobs <- length(x$distances)
+    nobs <- length(x$distances) ## number of observations
     mod <- lm(x$distances ~ x$group)
     mod.Q <- mod$qr
     p <- mod.Q$rank
@@ -24,73 +65,90 @@
 
     ## extract groups
     group <- x$group
-    
-    ## get set of permutations - shuffleSet checks design
-    perms <- shuffleSet(length(group), control = control)
-
-    ## number of permutations being performed, possibly adjusted after
-    ## checking in shuffleSet
-    nperm <- nrow(perms)
+    ## permutations is either a single number, a how() structure or a
+    ## permutation matrix
+    permutations <- getPermuteMatrix(permutations, nobs)
+    nperm <- nrow(permutations)
 
-    ## set-up objects to hold permuted results
-    res <- numeric(length = nperm + 1)
-    res[1] <- summary(mod)$fstatistic[1]
-    
     ## pairwise comparisons
     if(pairwise) {
-        ## unique pairings
-        combin <- combn(levels(x$group), 2)
+        combin <- combn(levels(x$group), 2) # unique pairings
         n.pairs <- ncol(combin)
-        t.stats <- matrix(0, ncol = n.pairs, nrow = nperm + 1)
-        t.stats[1,] <- apply(combn(levels(group), 2), 2, function(z) {
-            t.statistic(x$distances[group == z[1]],
-                        x$distances[group == z[2]])})
     }
 
-    ## begin loop over shuffleSet perms
-    for(i in seq_len(nperm)) {
-        perm <- perms[i,] ## take current permutation from set
-        perm.resid <- resids[perm] ## permute residuals
-        f <- qr.fitted(mod.Q, perm.resid) ## create new data
-        mss <- sum((f - mean(f))^2)
-        r <- qr.resid(mod.Q, perm.resid)
-        rss <- sum(r^2)
-        rdf <- nobs - p
-        resvar <- rss / rdf
-        res[i+1] <- (mss / (p - 1)) / resvar
-        
-        ## pairwise comparisons
-        if(pairwise) {
-            for(j in seq_len(n.pairs)) {
-                grp1 <- x$distance[perm][group == combin[1, j]]
-                grp2 <- x$distance[perm][group == combin[2, j]]
-                t.stats[i+1, j] <- t.statistic(grp1, grp2)
+    ## Parallel processing of permutations
+    if (is.null(parallel)) {
+        parallel <- 1
+    }
+    hasClus <- inherits(parallel, "cluster")
+    if ((hasClus || parallel > 1L) && requireNamespace("parallel")) {
+        if (.Platform$OS.type == "unix" && !hasClus) {
+            Pstats <- do.call("rbind",
+                           mclapply(seq_len(nperm),
+                                    function(x) permFun(permutations[x, , drop = FALSE]),
+                                    mc.cores = parallel))
+        } else {
+            ## if hasClus, don't set up and top a temporary cluster
+            if (!hasClus) {
+                parallel <- makeCluster(parallel)
+            }
+            Pstats <- parRapply(parallel, permutations, function(x) permFun(x))
+            if (!hasClus) {
+                stopCluster(parallel)
             }
         }
+    } else {
+        Pstats <- permFun(permutations)
+    }
+
+    ## Process results
+    F0 <- summary(mod)$fstatistic[1]
+    Fstats <- round(Pstats[, 1], 12)    # allow empty dim to be dropped
+    statistic <- F0 <- round(F0, 12)
+    names(statistic) <- "Overall (F)"
+
+    ## pairwise comparisons
+    if(pairwise) {
+        T0 <- apply(combn(levels(group), 2), 2, function(z) {
+            t.statistic(x$distances[group == z[1]],
+                        x$distances[group == z[2]])})
+        Tstats <- round(Pstats[, -1, drop = FALSE], 12)
+        T0 <- round(T0, 12)
+        statistic <- c(statistic, T0)
     }
 
     ## compute permutation p-value
-    pval <- sum(res >= res[1]) / length(res)
-    
+    pval <- (sum(Fstats >= F0) + 1) / (length(Fstats) + 1)
+
     if(pairwise) {
         df <- apply(combin, 2, function(z) {
             length(x$distances[group == z[1]]) +
                 length(x$distance[group == z[2]]) - 2})
-        pairwise <- list(observed = 2 * pt(-abs(t.stats[1,]), df),
-                         permuted = apply(t.stats, 2,
-                         function(z) sum(abs(z) >= abs(z[1]))/length(z)))
-        names(pairwise$observed) <- names(pairwise$permuted) <-
-            apply(combin, 2, paste, collapse = "-")
+        pairp <- (colSums(sweep(abs(Tstats), 2, abs(T0), '>=')) + 1) /
+            (NROW(Tstats) + 1)
+        pairp <- list(observed = 2 * pt(-abs(T0), df),
+                         permuted = pairp)
+        tnames <- apply(combin, 2, paste, collapse = "-")
+        names(pairp$observed) <- names(pairp$permuted) <- tnames
+        names(statistic)[-1] <- paste(tnames, "(t)")
     } else {
-        pairwise <- NULL
+        pairp <- NULL
     }
-    
+
     retval <- cbind(mod.aov[, 1:4], c(nperm, NA), c(pval, NA))
     dimnames(retval) <- list(c("Groups", "Residuals"),
                              c("Df", "Sum Sq", "Mean Sq", "F", "N.Perm",
                                "Pr(>F)"))
-    retval <- list(tab = retval, pairwise = pairwise,
-                   groups = levels(group), control = control)
+    retval <- list(tab = retval,
+                   pairwise = pairp,
+                   groups = levels(group),
+                   statistic = statistic,
+                   perm = if (pairwise) {
+                       structure(cbind(Fstats, Tstats), dimnames = list(NULL, names(statistic)))
+                   } else {
+                       structure(Fstats, names = names(statistic))
+                   },
+                   control = attr(permutations, "control"))
     class(retval) <- "permutest.betadisper"
     retval
 }
diff --git a/R/permutest.cca.R b/R/permutest.cca.R
index 33c11a7..1a9a0ff 100644
--- a/R/permutest.cca.R
+++ b/R/permutest.cca.R
@@ -5,13 +5,67 @@ permutest.default <- function(x, ...)
     stop("No default permutation test defined")
 
 `permutest.cca` <-
-    function (x, permutations = 99,
-              model = c("reduced", "direct", "full"), first = FALSE,
-              strata, ...) 
+    function (x, permutations = how(nperm=99),
+              model = c("reduced", "direct"), first = FALSE,
+              strata = NULL, parallel = getOption("mc.cores") , ...)
 {
+    ## do something sensible with insensible input (no constraints)
+    if (is.null(x$CCA)) {
+        sol <- list(call = match.call(), testcall = x$call, model = NA,
+                    F.0 = NA, F.perm = NA, chi = c(0, x$CA$tot.chi),
+                    num = 0, den = x$CA$tot.chi,
+                    df = c(0, nrow(x$CA$u) - max(x$pCCA$rank,0) - 1),
+                    nperm = 0, method = x$method, first = FALSE,
+                    Random.seed = NA)
+        class(sol) <- "permutest.cca"
+        return(sol)
+    }
     model <- match.arg(model)
     isCCA <- !inherits(x, "rda")
     isPartial <- !is.null(x$pCCA)
+    ## Function to get the F statistics in one loop
+    getF <- function (indx, ...)
+    {
+        if (!is.matrix(indx))
+            dim(indx) <- c(1, length(indx))
+        R <- nrow(indx)
+        mat <- matrix(0, nrow = R, ncol = 3)
+        for (i in seq_len(R)) {
+            take <- indx[i,]
+            Y <- E[take, ]
+            if (isCCA)
+                wtake <- w[take]
+            if (isPartial) {
+                if (isCCA) {
+                    XZ <- .C("wcentre", x = as.double(Z), as.double(wtake),
+                             as.integer(N), as.integer(Zcol),
+                             PACKAGE = "vegan")$x
+                    dim(XZ) <- c(N, Zcol)
+                    QZ <- qr(XZ)
+                }
+                Y <- qr.resid(QZ, Y)
+            }
+            if (isCCA) {
+                XY <- .C("wcentre", x = as.double(X), as.double(wtake),
+                         as.integer(N), as.integer(Xcol),
+                         PACKAGE = "vegan")$x
+                dim(XY) <- c(N, Xcol)
+                Q <- qr(XY)
+            }
+            tmp <- qr.fitted(Q, Y)
+            if (first)
+                cca.ev <- La.svd(tmp, nv = 0, nu = 0)$d[1]^2
+            else cca.ev <- sum(tmp * tmp)
+            if (isPartial || first) {
+                tmp <- qr.resid(Q, Y)
+                ca.ev <- sum(tmp * tmp)
+            }
+            else ca.ev <- Chi.tot - cca.ev
+            mat[i,] <- cbind(cca.ev, ca.ev, (cca.ev/q)/(ca.ev/r))
+        }
+        mat
+    }
+    ## end getF()
     if (first) {
         Chi.z <- x$CCA$eig[1]
         q <- 1
@@ -21,18 +75,16 @@ permutest.default <- function(x, ...)
         names(Chi.z) <- "Model"
         q <- x$CCA$qrank
     }
+    ## Set up
     Chi.xz <- x$CA$tot.chi
     names(Chi.xz) <- "Residual"
     r <- nrow(x$CA$Xbar) - x$CCA$QR$rank - 1
-    if (model == "full") 
+    if (model == "full")
         Chi.tot <- Chi.xz
     else Chi.tot <- Chi.z + Chi.xz
-    if (!isCCA) 
+    if (!isCCA)
         Chi.tot <- Chi.tot * (nrow(x$CCA$Xbar) - 1)
     F.0 <- (Chi.z/q)/(Chi.xz/r)
-    F.perm <- numeric(permutations)
-    num <- numeric(permutations)
-    den <- numeric(permutations)
     Q <- x$CCA$QR
     if (isCCA) {
         w <- x$rowsum # works with any na.action, weights(x) won't
@@ -47,10 +99,10 @@ permutest.default <- function(x, ...)
             Z <- sweep(Z, 1, sqrt(w), "/")
         }
     }
-    if (model == "reduced" || model == "direct") 
+    if (model == "reduced" || model == "direct")
         E <- x$CCA$Xbar
     else E <- x$CA$Xbar
-    if (isPartial && model == "direct") 
+    if (isPartial && model == "direct")
         E <- E + Y.Z
     ## Save dimensions
     N <- nrow(E)
@@ -59,44 +111,34 @@ permutest.default <- function(x, ...)
         if (isPartial)
             Zcol <- ncol(Z)
     }
-    if (!exists(".Random.seed", envir = .GlobalEnv, inherits = FALSE)) 
-        runif(1)
-    seed <- get(".Random.seed", envir = .GlobalEnv, inherits = FALSE)
-    for (i in 1:permutations) {
-        take <- permuted.index(N, strata)
-        Y <- E[take, ]
-        if (isCCA)
-            wtake <- w[take]
-        if (isPartial) {
-            if (isCCA) {
-                XZ <- .C("wcentre", x = as.double(Z), as.double(wtake),
-                         as.integer(N), as.integer(Zcol),
-                         PACKAGE = "vegan")$x
-                dim(XZ) <- c(N, Zcol)
-                QZ <- qr(XZ)
+    permutations <- getPermuteMatrix(permutations, N, strata = strata)
+    nperm <- nrow(permutations)
+    ## Parallel processing (similar as in oecosimu)
+    if (is.null(parallel))
+        parallel <- 1
+    hasClus <- inherits(parallel, "cluster")
+    if ((hasClus || parallel > 1)  && require(parallel)) {
+        if(.Platform$OS.type == "unix" && !hasClus) {
+            tmp <- do.call(rbind,
+                           mclapply(1:nperm,
+                                    function(i) getF(permutations[i,]),
+                                    mc.cores = parallel))
+        } else {
+            ## if hasClus, do not set up and stop a temporary cluster
+            if (!hasClus) {
+                parallel <- makeCluster(parallel)
             }
-            Y <- qr.resid(QZ, Y)
-        }
-        if (isCCA) {
-            XY <- .C("wcentre", x = as.double(X), as.double(wtake),
-                     as.integer(N), as.integer(Xcol),
-                     PACKAGE = "vegan")$x
-            dim(XY) <- c(N, Xcol)
-            Q <- qr(XY)
-        }
-        tmp <- qr.fitted(Q, Y)
-        if (first) 
-            cca.ev <- La.svd(tmp, nv = 0, nu = 0)$d[1]^2
-        else cca.ev <- sum(tmp * tmp)
-        if (isPartial || first) {
-            tmp <- qr.resid(Q, Y)
-            ca.ev <- sum(tmp * tmp)
+            tmp <- parRapply(parallel, permutations, function(i) getF(i))
+            tmp <- matrix(tmp, ncol=3, byrow=TRUE)
+            if (!hasClus)
+                stopCluster(parallel)
         }
-        else ca.ev <- Chi.tot - cca.ev
-        num[i] <- cca.ev
-        den[i] <- ca.ev
-        F.perm[i] <- (cca.ev/q)/(ca.ev/r)
+    } else {
+        tmp <- getF(permutations)
     }
+    num <- tmp[,1]
+    den <- tmp[,2]
+    F.perm <- tmp[,3]
     ## Round to avoid arbitrary ordering of statistics due to
     ## numerical inaccuracy
     F.0 <- round(F.0, 12)
@@ -105,8 +147,10 @@ permutest.default <- function(x, ...)
     Call[[1]] <- as.name("permutest")
     sol <- list(call = Call, testcall = x$call, model = model,
                 F.0 = F.0, F.perm = F.perm,  chi = c(Chi.z, Chi.xz),
-                num = num, den = den, df = c(q, r), nperm = permutations,
-                method = x$method, first = first,  Random.seed = seed)
+                num = num, den = den, df = c(q, r), nperm = nperm,
+                method = x$method, first = first)
+    sol$Random.seed <- attr(permutations, "seed")
+    sol$control <- attr(permutations, "control")
     if (!missing(strata)) {
         sol$strata <- deparse(substitute(strata))
         sol$stratum.values <- strata
diff --git a/R/persp.tsallisaccum.R b/R/persp.tsallisaccum.R
index d5dedf1..9a002f5 100644
--- a/R/persp.tsallisaccum.R
+++ b/R/persp.tsallisaccum.R
@@ -1,5 +1,5 @@
-persp.tsallisaccum <- 
-function(x, theta = 220, phi = 15, col = heat.colors(100), zlim, ...) 
-{
-persp.renyiaccum(x, theta = theta, phi = phi, col = col, zlim = zlim, ...)
-}
+persp.tsallisaccum <- 
+function(x, theta = 220, phi = 15, col = heat.colors(100), zlim, ...) 
+{
+persp.renyiaccum(x, theta = theta, phi = phi, col = col, zlim = zlim, ...)
+}
diff --git a/R/plot.cca.R b/R/plot.cca.R
index 91d7425..833580c 100644
--- a/R/plot.cca.R
+++ b/R/plot.cca.R
@@ -1,12 +1,12 @@
 `plot.cca` <-
-    function (x, choices = c(1, 2), display = c("sp", "wa", "cn"), 
-              scaling = 2, type, xlim, ylim,  const, ...) 
+    function (x, choices = c(1, 2), display = c("sp", "wa", "cn"),
+              scaling = 2, type, xlim, ylim,  const, ...)
 {
     TYPES <- c("text", "points", "none")
     g <- scores(x, choices, display, scaling, const)
     if (length(g) == 0 || all(is.na(g)))
       stop("nothing to plot: requested scores do not exist")
-    if (!is.list(g)) 
+    if (!is.list(g))
         g <- list(default = g)
     ## Take care that there are names
     for (i in seq_len(length(g))) {
@@ -15,20 +15,20 @@
                                          prefix = substr(names(g)[i], 1, 3))
     }
     if (!is.null(g$centroids)) {
-        if (is.null(g$biplot)) 
+        if (is.null(g$biplot))
             g$biplot <- scores(x, choices, "bp", scaling)
         if (!is.na(g$centroids)[1]) {
             bipnam <- rownames(g$biplot)
             cntnam <- rownames(g$centroids)
             g$biplot <- g$biplot[!(bipnam %in% cntnam), , drop = FALSE]
-            if (nrow(g$biplot) == 0) 
+            if (nrow(g$biplot) == 0)
                 g$biplot <- NULL
         }
     }
     if (missing(type)) {
         nitlimit <- 80
         nit <- max(nrow(g$spe), nrow(g$sit), nrow(g$con), nrow(g$def))
-        if (nit > nitlimit) 
+        if (nit > nitlimit)
             type <- "points"
         else type <- "text"
     }
@@ -53,36 +53,44 @@
             }
         return(invisible(pl))
     }
-    if (missing(xlim))
-        xlim <- range(g$spe[, 1], g$sit[, 1], g$con[, 1], g$default[,1],
+    if (missing(xlim)) {
+        xlim <- range(g$species[, 1], g$sites[, 1], g$constraints[, 1],
+                      g$biplot[, 1],
+                      if (length(g$centroids) > 0 && is.na(g$centroids)) NA else g$centroids[, 1],
+                      g$default[, 1],
                       na.rm = TRUE)
+    }
     if (!any(is.finite(xlim)))
         stop("no finite scores to plot")
-    if (missing(ylim))
-        ylim <- range(g$spe[, 2], g$sit[, 2], g$con[, 2], g$default[,2],
+    if (missing(ylim)) {
+        ylim <- range(g$species[, 2], g$sites[, 2], g$constraints[, 2],
+                      g$biplot[, 2],
+                      if (length(g$centroids) > 0 && is.na(g$centroids)) NA else g$centroids[, 2],
+                      g$default[, 2],
                       na.rm = TRUE)
-    plot(g[[1]], xlim = xlim, ylim = ylim, type = "n", asp = 1, 
+    }
+    plot(g[[1]], xlim = xlim, ylim = ylim, type = "n", asp = 1,
          ...)
     abline(h = 0, lty = 3)
     abline(v = 0, lty = 3)
     if (!is.null(g$species)) {
-        if (type == "text") 
-            text(g$species, rownames(g$species), col = "red", 
+        if (type == "text")
+            text(g$species, rownames(g$species), col = "red",
                  cex = 0.7)
-        else if (type == "points") 
+        else if (type == "points")
             points(g$species, pch = "+", col = "red", cex = 0.7)
     }
     if (!is.null(g$sites)) {
-        if (type == "text") 
+        if (type == "text")
             text(g$sites, rownames(g$sites), cex = 0.7)
-        else if (type == "points") 
+        else if (type == "points")
             points(g$sites, pch = 1, cex = 0.7)
     }
     if (!is.null(g$constraints)) {
-        if (type == "text") 
-            text(g$constraints, rownames(g$constraints), cex = 0.7, 
+        if (type == "text")
+            text(g$constraints, rownames(g$constraints), cex = 0.7,
                  col = "darkgreen")
-        else if (type == "points") 
+        else if (type == "points")
             points(g$constraints, pch = 2, cex = 0.7, col = "darkgreen")
     }
     if (!is.null(g$biplot) && nrow(g$biplot) > 0 && type != "none") {
@@ -91,24 +99,24 @@
         }
         else mul <- 1
         attr(g$biplot, "arrow.mul") <- mul
-        arrows(0, 0, mul * g$biplot[, 1], mul * g$biplot[, 2], 
+        arrows(0, 0, mul * g$biplot[, 1], mul * g$biplot[, 2],
                length = 0.05, col = "blue")
         biplabs <- ordiArrowTextXY(mul * g$biplot, rownames(g$biplot))
         text(biplabs, rownames(g$biplot), col = "blue")
         axis(3, at = c(-mul, 0, mul), labels = rep("", 3), col = "blue")
         axis(4, at = c(-mul, 0, mul), labels = c(-1, 0, 1), col = "blue")
     }
-    if (!is.null(g$centroids) && !is.na(g$centroids) && type != 
+    if (!is.null(g$centroids) && !is.na(g$centroids) && type !=
         "none") {
-        if (type == "text") 
+        if (type == "text")
             text(g$centroids, rownames(g$centroids), col = "blue")
-        else if (type == "points") 
+        else if (type == "points")
             points(g$centroids, pch = "x", col = "blue")
     }
     if (!is.null(g$default) && type != "none") {
-        if (type == "text") 
+        if (type == "text")
             text(g$default, rownames(g$default), cex = 0.7)
-        else if (type == "points") 
+        else if (type == "points")
             points(g$default, pch = 1, cex = 0.7)
     }
     class(g) <- "ordiplot"
diff --git a/R/plot.clamtest.R b/R/plot.clamtest.R
index 146438d..e1b314b 100644
--- a/R/plot.clamtest.R
+++ b/R/plot.clamtest.R
@@ -1,39 +1,39 @@
-plot.clamtest <- function(x, xlab, ylab, main,
-    pch=21:24, col.points=1:4, col.lines=2:4, lty=1:3,
-    position="bottomright", ...) {
-    summ <- summary(x)
-    glabel <- summ$labels
-    if (missing(main))
-        main <- "Species Classification"
-    if (missing(xlab))
-        xlab <- paste(glabel[2], "(abundance + 1)")
-    if (missing(ylab))
-        ylab <- paste(glabel[1], "(abundance + 1)")
-    Y <- x[,2]
-    X <- x[,3]
-    minval <- summ$minv
-    ## plot the dots
-    rr <- range(X+1,Y+1)
-    plot(X+1, Y+1, log = "xy", xaxt = "n", yaxt = "n",
-        col=col.points[as.integer(x$Classes)],
-        pch=pch[as.integer(x$Classes)], 
-        xlab=xlab, ylab=ylab, main=main,
-        xlim=rr, ylim=rr, ...)
-    axis(1, c(1,10,100,1000,10000))
-    axis(2, c(1,10,100,1000,10000))
-    ## too rare threshold
-    Ymin <- minval[[1]][1,2]
-    Xmin <- minval[[2]][1,1]
-    lines(rep(Xmin, 2)+1, c(0, 1)+1, col=col.lines[1], lty=lty[1])
-    lines(c(0, 1)+1, rep(Ymin, 2)+1, col=col.lines[1], lty=lty[1])
-    tmp <- approx(c(Xmin, 1), c(1, Ymin))
-    lines(tmp$x+1, tmp$y+1, col=col.lines[1], lty=lty[1])
-    ## Y vs. gen threshold
-    lines(minval[[1]]+1, col=col.lines[2], lty=lty[2])
-    ## X vs. gen threshold
-    lines(minval[[2]]+1, col=col.lines[3], lty=lty[3])
-    if (!is.null(position))
-        legend(position, col=col.points, pch=pch, 
-            legend=rownames(summ$summary))
-    invisible(x)
-}
+plot.clamtest <- function(x, xlab, ylab, main,
+    pch=21:24, col.points=1:4, col.lines=2:4, lty=1:3,
+    position="bottomright", ...) {
+    summ <- summary(x)
+    glabel <- summ$labels
+    if (missing(main))
+        main <- "Species Classification"
+    if (missing(xlab))
+        xlab <- paste(glabel[2], "(abundance + 1)")
+    if (missing(ylab))
+        ylab <- paste(glabel[1], "(abundance + 1)")
+    Y <- x[,2]
+    X <- x[,3]
+    minval <- summ$minv
+    ## plot the dots
+    rr <- range(X+1,Y+1)
+    plot(X+1, Y+1, log = "xy", xaxt = "n", yaxt = "n",
+        col=col.points[as.integer(x$Classes)],
+        pch=pch[as.integer(x$Classes)], 
+        xlab=xlab, ylab=ylab, main=main,
+        xlim=rr, ylim=rr, ...)
+    axis(1, c(1,10,100,1000,10000))
+    axis(2, c(1,10,100,1000,10000))
+    ## too rare threshold
+    Ymin <- minval[[1]][1,2]
+    Xmin <- minval[[2]][1,1]
+    lines(rep(Xmin, 2)+1, c(0, 1)+1, col=col.lines[1], lty=lty[1])
+    lines(c(0, 1)+1, rep(Ymin, 2)+1, col=col.lines[1], lty=lty[1])
+    tmp <- approx(c(Xmin, 1), c(1, Ymin))
+    lines(tmp$x+1, tmp$y+1, col=col.lines[1], lty=lty[1])
+    ## Y vs. gen threshold
+    lines(minval[[1]]+1, col=col.lines[2], lty=lty[2])
+    ## X vs. gen threshold
+    lines(minval[[2]]+1, col=col.lines[3], lty=lty[3])
+    if (!is.null(position))
+        legend(position, col=col.points, pch=pch, 
+            legend=rownames(summ$summary))
+    invisible(x)
+}
diff --git a/R/plot.contribdiv.R b/R/plot.contribdiv.R
index eec9462..ba59253 100644
--- a/R/plot.contribdiv.R
+++ b/R/plot.contribdiv.R
@@ -1,22 +1,22 @@
-plot.contribdiv <-
-function(x, sub, xlab, ylab, ylim, col, ...) {
-    y <- x[,c(1,3)]
-    if (missing(ylab))
-        ylab <- paste("Diversity components (", attr(x, "index"), ")", sep = "")
-    if (missing(xlab))
-        xlab <- "Sites"
-    if (missing(sub))
-        sub <- paste("Differentiation coefficient = ", round(attr(x, "diff.coef"),3), sep = "")
-    if (missing(ylim))
-        ylim <- c(0, max(y))
-    if (missing(col))
-        col <- c("lightgrey", "darkgrey")
-    matplot(y, type = "n", sub=sub, xlab=xlab, ylab=ylab, axes = FALSE,
-            bty = "n", ...)
-    polygon(c(1,1:nrow(y),nrow(y)), c(0,y$gamma,0), col=col[1])
-    polygon(c(1,1:nrow(y),nrow(y)), c(0,y$alpha,0), col=col[2])
-    axis(side = 1)
-    axis(side = 2)
-    box()
-    invisible(x)
-}
+plot.contribdiv <-
+function(x, sub, xlab, ylab, ylim, col, ...) {
+    y <- x[,c(1,3)]
+    if (missing(ylab))
+        ylab <- paste("Diversity components (", attr(x, "index"), ")", sep = "")
+    if (missing(xlab))
+        xlab <- "Sites"
+    if (missing(sub))
+        sub <- paste("Differentiation coefficient = ", round(attr(x, "diff.coef"),3), sep = "")
+    if (missing(ylim))
+        ylim <- c(0, max(y))
+    if (missing(col))
+        col <- c("lightgrey", "darkgrey")
+    matplot(y, type = "n", sub=sub, xlab=xlab, ylab=ylab, axes = FALSE,
+            bty = "n", ...)
+    polygon(c(1,1:nrow(y),nrow(y)), c(0,y$gamma,0), col=col[1])
+    polygon(c(1,1:nrow(y),nrow(y)), c(0,y$alpha,0), col=col[2])
+    axis(side = 1)
+    axis(side = 2)
+    box()
+    invisible(x)
+}
diff --git a/R/plot.envfit.R b/R/plot.envfit.R
index cf5fab5..0accc5d 100644
--- a/R/plot.envfit.R
+++ b/R/plot.envfit.R
@@ -17,12 +17,14 @@
         } else {
             ## input vector: either vectors or factors must be NULL,
             ## and the existing set of labels is replaced
-            if (!is.null(labs$v) && !is.null(labs$f))
+            if (!is.null(x$vectors) && !is.null(x$factors))
                 stop("needs a list with both 'vectors' and 'factors' labels")
-            if (!is.null(labs$v))
-                labs$v <- labels
-            else
+            ## need to handle the case where both sets of labels are NULL
+            ## such as when used with the default interface and single x
+            if (!is.null(x$factors))
                 labs$f <- labels
+            else
+                labs$v <- labels
         }
     }
     vect <- NULL
@@ -102,7 +104,7 @@
         xlim <- range(xstack[,1] + sw, xstack[,2] - sw)
         ylim <- range(xstack[,2] + sh, xstack[,2] - sh)
         plot.window(xlim = xlim, ylim = ylim, asp = 1, ...)
-        ## Re-evaluate arrow.mul, set its text and re-evaluate limits again 
+        ## Re-evaluate arrow.mul, set its text and re-evaluate limits again
         if (!is.null(vect)) {
             arrow.mul <- ordiArrowMul(vect, at = at, fill = 1)
             vect <- arrow.mul * vect
@@ -119,7 +121,7 @@
         alabs <- colnames(vect)
         title(..., ylab = alabs[2], xlab = alabs[1])
     }
-    
+
     if (!is.null(vect)) {
         arrows(at[1], at[2], vect[, 1], vect[, 2], len = 0.05,
                col = col)
diff --git a/R/plot.spantree.R b/R/plot.spantree.R
index 30869b5..2e21718 100644
--- a/R/plot.spantree.R
+++ b/R/plot.spantree.R
@@ -3,8 +3,6 @@
               ...) 
 {
     FUNname <- deparse(substitute(FUN))
-    if (length(FUNname) && FUNname %in% c("sammon", "isoMDS")) 
-        require(MASS) || stop(FUNname, "requires package MASS")
     FUN <- match.fun(FUN)
     n <- length(x$kid) + 1
     if (missing(ord)) {
diff --git a/R/points.cca.R b/R/points.cca.R
index 7c0990d..245a084 100644
--- a/R/points.cca.R
+++ b/R/points.cca.R
@@ -4,7 +4,7 @@
 {
     formals(arrows) <- c(formals(arrows), alist(... = ))
     if (length(display) > 1)
-        stop("Only one 'display' item can be added in one command.")
+        stop("only one 'display' item can be added in one command")
     pts <- scores(x, choices = choices, display = display, scaling = scaling,
                   const)
     if (!missing(select))
diff --git a/R/poolaccum.R b/R/poolaccum.R
index 7e17ba5..c1fd436 100644
--- a/R/poolaccum.R
+++ b/R/poolaccum.R
@@ -10,6 +10,9 @@
     ## specpool() is slow, but the vectorized versions below are
     ## pretty fast
     for (i in 1:permutations) {
+        ## It is a bad practice to replicate specpool equations here:
+        ## if we change specpool, this function gets out of sync. You
+        ## should be ashamed, Jari Oksanen!
         take <- sample.int(n, n)
         tmp <- apply(x[take,] > 0, 2, cumsum)
         S[,i] <- rowSums(tmp > 0)
@@ -20,7 +23,8 @@
         boot[,i] <- 2*S[,i] - m + rowSums(exp(sweep(log1p(-sweep(tmp, 1, N, "/")), 1, N, "*") ))
         a1 <- rowSums(tmp == 1)
         a2 <- rowSums(tmp == 2)
-        chao[, i] <- S[,i] + ifelse(a2 > 0, a1*a1/2/a2, 0)
+        chao[, i] <- S[,i] + ifelse(a2 > 0, (N-1)/N*a1*a1/2/a2,
+                                    (N-1)/N*a1*(a1-1)/2)
         jack1[,i] <- S[,i] + a1 * (N-1)/N
         jack2[,i] <- S[,i] + a1*(2*N-3)/N - a2*(N-2)^2/N/(N-1)
     }
diff --git a/R/print.CCorA.R b/R/print.CCorA.R
index d078195..ef1beae 100644
--- a/R/print.CCorA.R
+++ b/R/print.CCorA.R
@@ -10,11 +10,13 @@
     cat("Pillai's trace: ", format(x$Pillai, ...), "\n")
     cat("\n")
     cat("Significance of Pillai's trace:\n")
+
+    cat("from F-distribution:  ", format.pval(x$p.Pillai), "\n")
     if (x$nperm > 0) {
-        cat("based on", x$nperm, "permutations: ")
+        cat("based on permutations: ")
         cat(x$p.perm,"\n")
+        cat(howHead(x$control), "\n")
     }
-    cat("from F-distribution: ", format.pval(x$p.Pillai), "\n\n")
     out <- rbind("Eigenvalues" = x$EigenValues, "Canonical Correlations" = x$CanCorr)
     colnames(out) <- colnames(x$Cy)
     printCoefmat(out, ...)
diff --git a/R/print.anosim.R b/R/print.anosim.R
index 5a0d029..0c86e31 100644
--- a/R/print.anosim.R
+++ b/R/print.anosim.R
@@ -1,4 +1,4 @@
-"print.anosim" <-
+`print.anosim` <-
     function (x, digits = max(3, getOption("digits") - 3), ...) 
 {
     cat("\nCall:\n")
@@ -10,10 +10,8 @@
     if (nperm) {
         cat("      Significance:", format.pval(x$signif), 
             "\n\n")
-        cat("Based on ", nperm, " permutations")
+        cat(howHead(x$control))
     }
-    if (!is.null(x$strata)) 
-        cat(", stratified within", x$strata)
-    cat("\n\n")
+    cat("\n")
     invisible(x)
 }
diff --git a/R/print.bioenv.R b/R/print.bioenv.R
index 62bf2a9..0aec96e 100644
--- a/R/print.bioenv.R
+++ b/R/print.bioenv.R
@@ -1,11 +1,12 @@
-"print.bioenv" <-
+`print.bioenv` <-
     function (x, ...) 
 {
     cat("\nCall:\n")
     cat(deparse(x$call), "\n")
     cat("\nSubset of environmental variables with best correlation to community data.\n\n")
-    cat("Correlations:     ", x$method, "\n")
-    cat("Dissimilarities:  ", x$index, "\n\n")
+    cat("Correlations:   ", x$method, "\n")
+    cat("Dissimilarities:", x$index, "\n")
+    cat("Metric:         ", x$metric, "\n\n") 
     i <- which.max(lapply(x$models, function(tmp) tmp$est))
     cat("Best model has", i, "parameters (max.", x$upto, "allowed):\n")
     cat(paste(x$names[x$models[[i]]$best], collapse = " "))
diff --git a/R/print.cca.R b/R/print.cca.R
index bcfd764..1dd943c 100644
--- a/R/print.cca.R
+++ b/R/print.cca.R
@@ -42,18 +42,18 @@
             "deleted due to missingness\n")
     if (!is.null(x$CCA) && x$CCA$rank > 0) {
         cat("\nEigenvalues for constrained axes:\n")
-        print(x$CCA$eig, digits = digits, ...)
+        print(zapsmall(x$CCA$eig, digits = digits), ...)
     }
     if (!is.null(x$CA) && x$CA$rank > 0) {
         ax.lim <- 8
         ax.trig <- 16
         cat("\nEigenvalues for unconstrained axes:\n")
         if (x$CA$rank > ax.trig) {
-            print(x$CA$eig[1:ax.lim], digits = digits, ...)
+            print(zapsmall(x$CA$eig[1:ax.lim], digits = digits), ...)
             cat("(Showed only", ax.lim, "of all", x$CA$rank, 
                 "unconstrained eigenvalues)\n")
         }
-        else print(x$CA$eig, digits = digits, ...)
+        else print(zapsmall(x$CA$eig, digits = digits), ...)
     }
     cat("\n")
     invisible(x)
diff --git a/R/print.commsim.R b/R/print.commsim.R
new file mode 100644
index 0000000..df6697f
--- /dev/null
+++ b/R/print.commsim.R
@@ -0,0 +1,11 @@
+print.commsim <- function(x, ...) {
+    cat("An object of class", dQuote(class(x)[1L]), "\n")
+    isSeq <- ifelse(x$isSeq, "sequential", "non-sequential")
+    if(x$binary)
+        kind <- "binary"
+    else
+        kind <- ifelse(x$mode == "integer", "count", "abundance")
+    cat(sQuote(x$method), " method (", 
+        kind, ", ", isSeq, ", ", x$mode, " mode)\n\n", sep="")
+    invisible(x)
+}
diff --git a/R/print.factorfit.R b/R/print.factorfit.R
index dcce494..d8bb692 100644
--- a/R/print.factorfit.R
+++ b/R/print.factorfit.R
@@ -1,4 +1,4 @@
-"print.factorfit" <-
+`print.factorfit` <-
     function (x, ...) 
 {
     cat("Centroids:\n")
@@ -7,10 +7,7 @@
     out <- cbind(r2 = x$r, "Pr(>r)" = x$pvals)
     if (x$permutations) {
         printCoefmat(out, has.Pvalue = TRUE, ...)
-        cat("P values based on", x$permutations, "permutations")
-        if (!is.null(x$strata)) 
-            cat(", stratified within", x$strata)
-        cat(".\n")
+        cat(howHead(x$control))
     }
     else  printCoefmat(out, na.print = "", ...)
     invisible(x)
diff --git a/R/print.mantel.R b/R/print.mantel.R
index cce7f89..a01c7b1 100644
--- a/R/print.mantel.R
+++ b/R/print.mantel.R
@@ -16,11 +16,9 @@
     out <- quantile(x$perm, c(0.9, 0.95, 0.975, 0.99))
     cat("Upper quantiles of permutations (null model):\n")
     print(out, digits = 3)
-    cat("\nBased on", nperm, "permutations")
-    if (!is.null(x$strata)) 
-      cat(", stratified within", x$strata)
+    cat(howHead(x$control))
   }
-  cat("\n\n")
+  cat("\n")
   invisible(x)
 }
 
diff --git a/R/print.mantel.correlog.R b/R/print.mantel.correlog.R
index 539db90..61f76ee 100644
--- a/R/print.mantel.correlog.R
+++ b/R/print.mantel.correlog.R
@@ -1,9 +1,9 @@
-'print.mantel.correlog' <- function(x, ...)
-{
-    cat('\nMantel Correlogram Analysis\n')
-    cat('\nCall:\n','\n')
-    cat(deparse(x$call),'\n')
-    cat('\n')
-	printCoefmat(x$mantel.res, P.values=TRUE, signif.stars=TRUE, Pvalues = TRUE)
-    invisible(x) 
+'print.mantel.correlog' <- function(x, ...)
+{
+    cat('\nMantel Correlogram Analysis\n')
+    cat('\nCall:\n','\n')
+    cat(deparse(x$call),'\n')
+    cat('\n')
+	printCoefmat(x$mantel.res, P.values=TRUE, signif.stars=TRUE, Pvalues = TRUE)
+    invisible(x) 
 }
\ No newline at end of file
diff --git a/R/print.mrpp.R b/R/print.mrpp.R
index a30743c..632e417 100644
--- a/R/print.mrpp.R
+++ b/R/print.mrpp.R
@@ -17,17 +17,18 @@ function (x, digits = max(3, getOption("digits") - 3), ...)
         cat(formatC(x$CS, digits = digits), "\n")
     }
     cat("Chance corrected within-group agreement A: ")
-    cat(formatC(x$A, digits = digits), "\n")
+    if (!is.na(x$A))
+        cat(formatC(x$A, digits = digits), "\n")
+    else
+        cat("NA\n")
     cat("Based on observed delta", formatC(x$delta), "and expected delta",
         formatC(x$E.delta),"\n\n")
     nperm <- x$permutations
     if (nperm) {
         cat("Significance of delta:", format.pval(x$Pvalue), 
             "\n")
-        cat("Based on ", nperm, " permutations")
     }
-    if (!is.null(x$strata)) 
-        cat(", stratified within", x$strata)
-    cat("\n\n")
+    cat(howHead(x$control))
+    cat("\n")
     invisible(x)
 }
diff --git a/R/print.mso.R b/R/print.mso.R
index 83e3ba8..95a2174 100644
--- a/R/print.mso.R
+++ b/R/print.mso.R
@@ -1,9 +1,11 @@
 `print.mso` <-
     function(x,  digits = max(3, getOption("digits") - 3), ...)
 {
-    NextMethod(x, "print", digits = digits, ...)
+    NextMethod("print", x, digits = digits, ...)
     cat("mso variogram:\n\n")
     print(x$vario, digits = digits, ...)
+    if(!is.null(attr(x$vario, "control")))
+        cat("\n", howHead(attr(x$vario, "control")), "\n", sep="")
     invisible(x)
 }
 
diff --git a/R/print.nullmodel.R b/R/print.nullmodel.R
new file mode 100644
index 0000000..adda25a
--- /dev/null
+++ b/R/print.nullmodel.R
@@ -0,0 +1,14 @@
+print.nullmodel <- function(x, ...) {
+    isSeq <- ifelse(x$commsim$isSeq, "sequential", "non-sequential")
+    if (x$commsim$binary)
+        kind <- "binary"
+    else
+        kind <- ifelse(x$commsim$mode == "integer", "count", "abundance")
+    cat("An object of class", dQuote(class(x)[1L]), "\n")
+    cat(sQuote(x$commsim$method), " method (", 
+        kind, ", ", isSeq, ")\n", sep="")
+    cat(x$nrow, "x", x$ncol, "matrix\n")
+    if (x$commsim$isSeq)
+        cat("Iterations =", x$iter, "\n\n") else cat("\n")
+    invisible(x)
+}
diff --git a/R/print.oecosimu.R b/R/print.oecosimu.R
index 23d413c..bbf317b 100644
--- a/R/print.oecosimu.R
+++ b/R/print.oecosimu.R
@@ -6,7 +6,7 @@
     cat(as.character(attr(x,"call")[[1]]), "object\n\n")
     writeLines(strwrap(pasteCall(attr(x, "call"))))
     cat("\n")
-    cat("simulation method", x$oecosimu$method, "with",
+    cat("nullmodel method", sQuote(x$oecosimu$method), "with", 
         ncol(x$oecosimu$simulated), "simulations\n")
     if (length(att <- attributes(x$oecosimu$simulated)) > 1) {
         att$dim <- NULL
@@ -20,21 +20,21 @@
     ## dim attribute is always there, but print all others
 
     cat("\n\n")
-    cl <- class(x)
-    if ((length(cl) > 1 && cl[2] != "list" ) &&
-        !any(cl %in% c("adipart", "hiersimu", "multipart"))) {
-            NextMethod("print", x)
+
+    if (!inherits(x, c("adipart", "hiersimu", "multipart")) &&
+        !inherits(x$statistic, c("numeric", "list"))) {
+            print(x$statistic)
             cat("\n")
     }
     probs <- switch(x$oecosimu$alternative,
                     two.sided = c(0.025, 0.5, 0.975),
-                    greater = c(0, 0.5, 0.95),
-                    less = c(0.05, 0.5, 1))
+                    greater = c(0.5, 0.95),
+                    less = c(0.05, 0.5))
     qu <- apply(x$oecosimu$simulated, 1, quantile, probs=probs, na.rm = TRUE)
     m <- cbind("statistic" = x$oecosimu$statistic,
                "z" = x$oecosimu$z, "mean" = x$oecosimu$means, t(qu),
                "Pr(sim.)"=x$oecosimu$pval)
-    printCoefmat(m, cs.ind = 3:6, ...)
+    printCoefmat(m, cs.ind = 3:(ncol(m)-1), ...)
     if (any(is.na(x$oecosimu$simulated))) {
         nacount <- rowSums(is.na(x$oecosimu$simulated))
         cat("\nNumber of NA cases removed from simulations:\n",
diff --git a/R/print.permat.R b/R/print.permat.R
index 1d7fc3c..43693d6 100644
--- a/R/print.permat.R
+++ b/R/print.permat.R
@@ -4,12 +4,12 @@ function(x, digits=3, ...)
 {
     cat("Object of class 'permat' with ", attr(x, "times"), " simulations\n", sep="")
     cat("\nMatrix type:", attr(x, "mtype"), "\nPermutation type:", attr(x, "ptype"))
+    cat("\nMethod: ", attr(x, "method"), sep = "")
     if (attr(x, "ptype") == "swap") {
-        cat("\nMethod: ", attr(x, "method"), sep = "")
-        if (attr(x, "method") != "quasiswap") {
+        if (!is.na(attr(x, "burnin")))
             cat(", burnin: ", attr(x, "burnin"), sep = "")
+        if (!is.na(attr(x, "thin")))
             cat(", thin: ", attr(x, "thin"), sep = "")
-        }
     }
     cat("\nRestricted:", attr(x, "is.strat"), "\nFixed margins:", attr(x, "fixedmar"))
     if (!is.na(attr(x, "shuffle"))) {
@@ -20,8 +20,4 @@ function(x, digits=3, ...)
     }
     cat("\n")
     invisible(x)
-#    cat("\n\nMatrix dimensions:", nrow(x$orig), "rows,", ncol(x$orig), "columns")
-#    cat("\nSum of original matrix:", sum(x$orig))
-#    cat("\nFill of original matrix:", round(sum(x$orig>0)/(nrow(x$orig)*ncol(x$orig)),digits))
-#    cat("\nNumber of permuted matrices:", attr(x, "times"),"\n")
 }
diff --git a/R/print.permutest.betadisper.R b/R/print.permutest.betadisper.R
index 82ba329..a5b60c6 100644
--- a/R/print.permutest.betadisper.R
+++ b/R/print.permutest.betadisper.R
@@ -5,7 +5,7 @@
     cat("\n")
     writeLines(strwrap("Permutation test for homogeneity of multivariate dispersions\n"))
     ##cat("\n")
-    print(x$control)
+    cat(howHead(x$control))
     nc <- dim(x$tab)[2]
     cn <- colnames(x$tab)
     has.P <- substr(cn[nc], 1, 3) == "Pr("
@@ -21,7 +21,7 @@
         zap.i <- zap.i[!(zap.i %in% i)]
     if (length(i <- grep("N.Perm$", cn)))
         zap.i <- zap.i[!(zap.i %in% i)]
-    cat("Response: Distances", sep = "\n")
+    cat("\nResponse: Distances", sep = "\n")
     printCoefmat(x$tab, digits = digits,
                  signif.stars = getOption("show.signif.stars"),
                  has.Pvalue = has.P, P.values = has.P, cs.ind = NULL,
diff --git a/R/print.permutest.cca.R b/R/print.permutest.cca.R
index 13fa2ed..8f48257 100644
--- a/R/print.permutest.cca.R
+++ b/R/print.permutest.cca.R
@@ -1,7 +1,8 @@
-"print.permutest.cca" <-
+`print.permutest.cca` <-
     function (x, ...) 
 {
     cat("\nPermutation test for", x$method, "\n\n")
+    cat(howHead(x$control), "\n")
     writeLines(strwrap(pasteCall(x$testcall)))
     Pval <- (sum(x$F.perm >= x$F.0) + 1)/(x$nperm + 1)
     cat("Permutation test for ")
@@ -12,10 +13,6 @@
     cat("Pseudo-F:\t", x$F.0, "(with", paste(x$df, collapse = ", "),
         "Degrees of Freedom)\n")
     cat("Significance:\t", format.pval(Pval), 
-        "\n")
-    cat("Based on", x$nperm, "permutations under", x$model, "model")
-    if (!is.null(x$strata)) 
-        cat(",\nstratified within factor", x$strata)
-    cat(".\n\n")
+        "\n\n")
     invisible(x)
 }
diff --git a/R/print.protest.R b/R/print.protest.R
index 5d94c62..47c3bf5 100644
--- a/R/print.protest.R
+++ b/R/print.protest.R
@@ -1,4 +1,4 @@
-"print.protest" <-
+`print.protest` <-
   function(x, digits = max(3, getOption("digits") - 3), ...)
 {
   cat("\nCall:\n")
@@ -8,10 +8,8 @@
   cat("Correlation in a symmetric Procrustes rotation: ")
   cat(formatC(x$t0, digits = digits), "\n")
   cat("Significance:  ")
-  cat(format.pval(x$signif),"\n")
-  cat("Based on", x$permutations, "permutations")
-  if (!is.null(x$strata)) 
-    cat(", stratified within", x$strata)
-  cat(".\n\n")
+  cat(format.pval(x$signif),"\n\n")
+  cat(howHead(x$control))
+  cat("\n")
   invisible(x)
 }
diff --git a/R/print.simmat.R b/R/print.simmat.R
new file mode 100644
index 0000000..d815edd
--- /dev/null
+++ b/R/print.simmat.R
@@ -0,0 +1,18 @@
+print.simmat <- function(x, ...) {
+    isSeq <- ifelse(attr(x, "isSeq"), "sequential", "non-sequential")
+    if (attr(x, "binary"))
+        kind <- "binary"
+    else
+        kind <- ifelse(attr(x, "mode") == "integer", "count", "abundance")
+    d <- dim(x)
+    cat("An object of class", dQuote(class(x)[1L]), "\n")
+    cat(sQuote(attr(x, "method")), " method (", 
+        kind, ", ", isSeq, ")\n", sep="")
+    cat(d[1L], "x", d[2L], "matrix\n")
+    cat("Number of permuted matrices =", d[3L], "\n")
+    if (attr(x, "isSeq")) {
+        cat("Start = ", attr(x, "start"), ", End = ", attr(x, "end"), 
+            ", Thin = ", attr(x, "thin"), "\n\n", sep="") 
+        } else cat("\n")
+    invisible(x)
+}
diff --git a/R/print.specaccum.R b/R/print.specaccum.R
index 9be5ba0..c0bb1cf 100644
--- a/R/print.specaccum.R
+++ b/R/print.specaccum.R
@@ -1,4 +1,4 @@
-"print.specaccum" <-
+`print.specaccum` <-
     function(x, ...)
 {
     cat("Species Accumulation Curve\n")
@@ -6,10 +6,13 @@
     if (x$method == "random") {
         cat(", with ", ncol(x$perm), " permutations", sep="")
     }
+    if (!is.null(x$weights))
+        cat(", weighted")
     cat("\n")
     cat("Call:", deparse(x$call), "\n\n")
-    mat <- rbind(Sites = x$sites, Richness = x$richness, sd=x$sd)
+    mat <- rbind(Sites = x$sites, Individuals = x$individuals, Effort = x$effort,
+                 Richness = x$richness, sd=x$sd)
     colnames(mat) <- rep("", ncol(mat))
-    print(mat)
+    print(zapsmall(mat))
     invisible(x)
 }
diff --git a/R/print.summary.clamtest.R b/R/print.summary.clamtest.R
index 5468da7..9dae7db 100644
--- a/R/print.summary.clamtest.R
+++ b/R/print.summary.clamtest.R
@@ -1,13 +1,13 @@
-print.summary.clamtest <- function(x, digits=max(3, getOption("digits") - 3), ...) {
-    cat("Two Groups Species Classification Method (CLAM)\n\n")
-    cat("Specialization threshold =", x$specialization)
-    cat("\nAlpha level =", x$alpha)
-    cat("\n\nEstimated sample coverage:\n")
-    print(x$coverage, digits=digits)
-    cat("\nMinimum abundance for classification:\n")
-    print(structure(c(x$minv[[1]][1,2], x$minv[[2]][1,1]),
-        .Names=x$labels))
-    cat("\n")
-    printCoefmat(x$summary, digits=digits, ...)
-}
-
+print.summary.clamtest <- function(x, digits=max(3, getOption("digits") - 3), ...) {
+    cat("Two Groups Species Classification Method (CLAM)\n\n")
+    cat("Specialization threshold =", x$specialization)
+    cat("\nAlpha level =", x$alpha)
+    cat("\n\nEstimated sample coverage:\n")
+    print(x$coverage, digits=digits)
+    cat("\nMinimum abundance for classification:\n")
+    print(structure(c(x$minv[[1]][1,2], x$minv[[2]][1,1]),
+        .Names=x$labels))
+    cat("\n")
+    printCoefmat(x$summary, digits=digits, ...)
+}
+
diff --git a/R/print.summary.permat.R b/R/print.summary.permat.R
index bbb1a6f..0d88859 100644
--- a/R/print.summary.permat.R
+++ b/R/print.summary.permat.R
@@ -6,12 +6,12 @@ function(x, digits=2, ...)
     cat("Summary of object of class 'permat'\n\nCall: ")
     print(x$x$call)
     cat("\nMatrix type:", attr(x$x, "mtype"), "\nPermutation type:", attr(x$x, "ptype"))
+    cat("\nMethod: ", attr(x$x, "method"), sep = "")
     if (attr(x$x, "ptype") == "swap") {
-        cat("\nMethod: ", attr(x$x, "method"), sep = "")
-        if (attr(x$x, "method") != "quasiswap") {
+        if (!is.na(attr(x$x, "burnin")))
             cat(", burnin: ", attr(x$x, "burnin"), sep = "")
+        if (!is.na(attr(x$x, "thin")))
             cat(", thin: ", attr(x$x, "thin"), sep = "")
-        }
     }
     cat("\nRestricted:", attr(x$x, "is.strat"), "\nFixed margins:", attr(x$x, "fixedmar"))
     if (!is.na(attr(x$x, "shuffle"))) {
@@ -34,7 +34,7 @@ function(x, digits=2, ...)
         cat("\nSums within strata retained:", round(100 * sum(x$strsum) / n, digits), "%")
     cat("\n\nBray-Curtis dissimilarities among original and permuted matrices:\n")
     print(summary(x$bray))
-    cat("\nChi-squared for original matrix: ", round(attr(x$chisq, "chisq.orig"), digits), ")\n", sep = "")
+    cat("\nChi-squared for original matrix: ", round(attr(x$chisq, "chisq.orig"), digits), "\n", sep = "")
     cat("Chi-squared values among expected and permuted matrices:\n")
     print(summary(x$chisq))
 invisible(x)
diff --git a/R/print.vectorfit.R b/R/print.vectorfit.R
index c72b7ad..37c974d 100644
--- a/R/print.vectorfit.R
+++ b/R/print.vectorfit.R
@@ -5,10 +5,7 @@
     printCoefmat(out, na.print = "",
                  zap.ind = seq_len(ncol(out)-2), ...)
     if (x$permutations) {
-        cat("P values based on", x$permutations, "permutations")
-        if (!is.null(x$strata)) 
-            cat(", stratified within", x$strata)
-        cat(".\n")
+        cat(howHead(x$control))
     }
     invisible(x)
 }
diff --git a/R/protest.R b/R/protest.R
index 0671af0..1097d93 100644
--- a/R/protest.R
+++ b/R/protest.R
@@ -1,5 +1,6 @@
-"protest" <-
-    function (X, Y, scores = "sites", permutations = 999, strata, ...)
+`protest` <-
+    function (X, Y, scores = "sites", permutations = how(nperm = 999),
+              ...)
 {
     X <- scores(X, display = scores, ...)
     Y <- scores(Y, display = scores, ...)
@@ -16,28 +17,29 @@
     sol$symmetric <- TRUE
     sol$t0 <- sqrt(1 - sol$ss)
     N <- nrow(X)
-    perm <- rep(0, permutations)
-    for (i in 1:permutations) {
-        take <- permuted.index(N, strata)
-        ## avoid overhead of procrustes() and only evaluate the
-        ## statistic by svd (hand crafted from r2388 of the devel
-        ## branch).
-        perm[i] <- sum(svd(crossprod(X, Y[take,]), nv = 0, nu = 0)$d)
-    }
-    Pval <- (sum(perm >= sol$t0) + 1)/(permutations + 1)
-    if (!missing(strata)) {
-        strata <- deparse(substitute(strata))
-        s.val <- strata
-    }
-    else {
-        strata <- NULL
-        s.val <- NULL
-    }
+
+    ## Permutations: We only need the goodness of fit statistic from
+    ## Procrustes analysis, and therefore we only have the necessary
+    ## function here. This avoids a lot of overhead of calling
+    ## procrustes() for each permutation. The following gives the
+    ## Procrustes r directly.
+    procr <- function(X, Y) sum(svd(crossprod(X, Y), nv=0, nu=0)$d)
+
+    permutations <- getPermuteMatrix(permutations, N)
+    if (ncol(permutations) != N)
+        stop(gettextf("'permutations' have %d columns, but data have %d observations",
+                      ncol(permutations), N))
+    np <- nrow(permutations)
+
+    perm <- sapply(seq_len(np),
+                   function(i, ...) procr(X, Y[permutations[i,],]))
+
+    Pval <- (sum(perm >= sol$t0) + 1)/(np + 1)
+
     sol$t <- perm
     sol$signif <- Pval
-    sol$permutations <- permutations
-    sol$strata <- strata
-    sol$stratum.values <- s.val
+    sol$permutations <- np
+    sol$control <- attr(permutations, "control")
     sol$call <- match.call()
     class(sol) <- c("protest", "procrustes")
     sol
diff --git a/R/radfit.data.frame.R b/R/radfit.data.frame.R
index 1c4427a..f980500 100644
--- a/R/radfit.data.frame.R
+++ b/R/radfit.data.frame.R
@@ -3,6 +3,12 @@
 {
     ## x *must* have rownames
     rownames(x) <- rownames(x, do.NULL = TRUE)
+    ## remove empty rows with no species
+    nspec <- specnumber(x)
+    if (any(nspec == 0)) {
+        warning("removed empty rows with no species")
+        x <- x[nspec>0,, drop=FALSE]
+    }
     out <- apply(x, 1, radfit, ...)
     if (length(out) == 1)
         out <- out[[1]]
diff --git a/R/rankindex.R b/R/rankindex.R
index eb5d5bc..36c8282 100644
--- a/R/rankindex.R
+++ b/R/rankindex.R
@@ -1,14 +1,18 @@
 "rankindex" <-
 function (grad, veg, indices = c("euc", "man", "gow", "bra", 
-    "kul"), stepacross = FALSE, method = "spearman", ...) 
+    "kul"), stepacross = FALSE, method = "spearman",
+     metric = c("euclidean", "mahalanobis", "manhattan", "gower"), ...) 
 {
+    metric = match.arg(metric)
     grad <- as.data.frame(grad)
     if (any(sapply(grad, is.factor))) {
-        require(cluster) || stop("factors in 'grad' need package 'cluster'")
-        message("'grad' included factors: used cluster:::daisy")
         span <- daisy(grad)
     } else {
-        span <- vegdist(grad, "eucl")
+        span <- switch(metric,
+                       "euclidean" = dist(scale(grad, scale=TRUE)),
+                       "mahalanobis" = dist(veganMahatrans(scale(grad, scale=FALSE))),
+                       "manhattan" = dist(decostand(grad, "range"), "manhattan"),
+                       "gower" = daisy(grad, metric = "gower"))
     }
     veg <- as.matrix(veg)
     res <- numeric(length(indices))
diff --git a/R/rarecurve.R b/R/rarecurve.R
index b1782f6..a540d72 100644
--- a/R/rarecurve.R
+++ b/R/rarecurve.R
@@ -1,10 +1,18 @@
 `rarecurve` <-
     function(x, step = 1, sample, xlab = "Sample Size", ylab = "Species",
-             label = TRUE,...)
+             label = TRUE, col, lty, ...)
 {
+    ## sort out col and lty
+    if (missing(col))
+        col <- par("col")
+    if (missing(lty))
+        lty <- par("lty")
     tot <- rowSums(x)
     S <- specnumber(x)
     nr <- nrow(x)
+    ## rep col and lty to appropriate length
+    col <- rep(col, length.out = nr)
+    lty <- rep(lty, length.out = nr)
     ## Rarefy
     out <- lapply(seq_len(nr), function(i) {
         n <- seq(1, tot[i], by = step)
@@ -25,9 +33,9 @@
         abline(h = rare, lwd=0.5)
     }
     ## rarefaction curves
-    for(ln in seq_len(length(out))) {
+    for (ln in seq_len(length(out))) {
         N <- attr(out[[ln]], "Subsample")
-        lines(N, out[[ln]], ...)
+        lines(N, out[[ln]], col = col[ln], lty = lty[ln], ...)
     }
     ## label curves at their endpoitns
     if (label) {
diff --git a/R/raupcrick.R b/R/raupcrick.R
index b4a97b9..bb1b538 100644
--- a/R/raupcrick.R
+++ b/R/raupcrick.R
@@ -1,5 +1,5 @@
 `raupcrick` <-
-    function(comm, null = "r1", nsimul = 999, chase = FALSE)
+    function(comm, null = "r1", nsimul = 999, chase = FALSE, ...)
 {
     comm <- as.matrix(comm)
     comm <- ifelse(comm > 0, 1, 0)
@@ -12,7 +12,8 @@
     ## but is much slower
     sol <- oecosimu(comm, function(x) tcrossprod(x)[tri], method = null,
                     nsimul = nsimul,
-                    alternative = if (chase) "less" else "greater")
+                    alternative = if (chase) "less" else "greater",
+                    ...)
     ## Chase et al. way, or the standard way
     if (chase)
         out <- 1 - sol$oecosimu$pval
diff --git a/R/rda.default.R b/R/rda.default.R
index 1a009f7..8b7cff1 100644
--- a/R/rda.default.R
+++ b/R/rda.default.R
@@ -1,7 +1,7 @@
 `rda.default` <-
     function (X, Y, Z, scale = FALSE, ...) 
 {
-    ZERO <- 1e-04
+    ZERO <- 1e-05
     CCA <- NULL
     pCCA <- NULL
     CA <- NULL
@@ -39,7 +39,7 @@
         Y <- qr.fitted(Q, Xbar)
         sol <- svd(Y)
         ## it can happen that rank < qrank
-        rank <- min(rank, sum(sol$d > ZERO))
+        rank <- min(rank, sum(sol$d > (sol$d[1L] * ZERO)))
         sol$d <- sol$d/sqrt(NR)
         ax.names <- paste("RDA", 1:length(sol$d), sep = "")
         colnames(sol$u) <- ax.names
@@ -51,13 +51,9 @@
             CCA <- list(eig = sol$d[1:rank]^2)
             CCA$u <- as.matrix(sol$u)[, 1:rank, drop = FALSE]
             CCA$v <- as.matrix(sol$v)[, 1:rank, drop = FALSE]
-            CCA$u.eig <- sweep(as.matrix(CCA$u), 2, sol$d[1:rank], 
-                               "*")
-            CCA$v.eig <- sweep(as.matrix(CCA$v), 2, sol$d[1:rank], 
-                               "*")
-            CCA$wa.eig <- Xbar %*% sol$v[, 1:rank, drop = FALSE]
-            CCA$wa.eig <- CCA$wa.eig/sqrt(NR)
-            CCA$wa <- sweep(CCA$wa.eig, 2, 1/sol$d[1:rank], "*")
+            wa.eig <- Xbar %*% sol$v[, 1:rank, drop = FALSE]
+            wa.eig <- wa.eig/sqrt(NR)
+            CCA$wa <- sweep(wa.eig, 2, 1/sol$d[1:rank], "*")
             oo <- Q$pivot
             if (!is.null(pCCA$rank)) 
                 oo <- oo[-(1:pCCA$rank)] - ncol(Z.r)
@@ -78,8 +74,8 @@
                         QR = Q, Xbar = Xbar)
             u <- matrix(0, nrow=nrow(sol$u), ncol=0)
             v <- matrix(0, nrow=nrow(sol$v), ncol=0)
-            CCA$u <- CCA$u.eig <- CCA$wa <- CCA$wa.eig <- u
-            CCA$v <- CCA$v.eig <- v
+            CCA$u <- CCA$wa <- u
+            CCA$v <- v
             CCA$biplot <- matrix(0, 0, 0)
             CCA$alias <- colnames(Y.r)
         }
@@ -93,23 +89,19 @@
     names(sol$d) <- ax.names
     rownames(sol$u) <- rownames(X)
     rownames(sol$v) <- colnames(X)
-    rank <- min(Q$rank, sum(sol$d > ZERO))
+    rank <- min(Q$rank, sum(sol$d > (sol$d[1L] * ZERO)))
     if (rank) {
         CA <- list(eig = (sol$d[1:rank]^2))
         CA$u <- as.matrix(sol$u)[, 1:rank, drop = FALSE]
         CA$v <- as.matrix(sol$v)[, 1:rank, drop = FALSE]
-        CA$u.eig <- sweep(as.matrix(CA$u), 2, sol$d[1:rank], 
-                          "*")
-        CA$v.eig <- sweep(as.matrix(CA$v), 2, sol$d[1:rank], 
-                          "*")
         CA$rank <- rank
         CA$tot.chi <- sum(CA$eig)
         CA$Xbar <- Xbar
     } else {   # zero rank: no residual component
         CA <- list(eig = 0, rank = rank, tot.chi = 0,
                    Xbar = Xbar)
-        CA$u <- CA$u.eig <- matrix(0, nrow(sol$u), 0)
-        CA$v <- CA$v.eig <- matrix(0, nrow(sol$v), 0)
+        CA$u <- matrix(0, nrow(sol$u), 0)
+        CA$v <- matrix(0, nrow(sol$v), 0)
     }
     call <- match.call()
     call[[1]] <- as.name("rda")
diff --git a/R/read.cep.R b/R/read.cep.R
index 9f5abe8..19fc89c 100644
--- a/R/read.cep.R
+++ b/R/read.cep.R
@@ -12,7 +12,7 @@
   if (trace) 
     cat("File", file, "\n")
   if (file.access(file, 4) < 0) {
-    stop("File does not exist or is not readable.")
+    stop("file does not exist or is not readable")
   }
   on.exit(.Fortran("cepclose", PACKAGE = "vegan"))
   cep <- .Fortran("cephead", file = file, kind = integer(1), 
@@ -66,8 +66,8 @@
                         PACKAGE = "vegan"))
   if (cd$ier) {
     if (cd$ier == 1) 
-      stop("Too many non-zero entries: increase maxdata.")
-    else stop("Unknown and obscure error: don't know what to do.")
+      stop("too many non-zero entries: increase maxdata")
+    else stop("unknown and obscure error: I do not know what to do")
   }
   if (trace) 
     cat("Read", cd$nsp, "species, ", cd$nst, "sites.\n")
diff --git a/R/rgl.isomap.R b/R/rgl.isomap.R
deleted file mode 100644
index dda53bc..0000000
--- a/R/rgl.isomap.R
+++ /dev/null
@@ -1,10 +0,0 @@
-`rgl.isomap` <-
-    function(x, web = "white", ...)
-{
-    require(rgl) || stop("requires package 'rgl'")
-    ordirgl(x, ...)
-    z <- scores(x, ...)
-    net <- x$net
-    for (i in 1:nrow(net))
-        rgl.lines(z[net[i,],1], z[net[i,],2], z[net[i,],3], color=web)
-}
diff --git a/R/rgl.renyiaccum.R b/R/rgl.renyiaccum.R
deleted file mode 100644
index d4bcb28..0000000
--- a/R/rgl.renyiaccum.R
+++ /dev/null
@@ -1,31 +0,0 @@
-`rgl.renyiaccum` <-
-    function(x, rgl.height = 0.2,  ...)
-{
-    require(rgl) || stop("requires packages 'rgl'")
-    y <- x[,,1] * rgl.height
-    rgl.min = 0
-    rgl.max = max(y)
-    xp <- seq(0, 1, len = nrow(y))
-    z <- seq(0, 1, len = ncol(y))
-    ylim <- 1000 * range(y)
-    ylen <- ylim[2] - ylim[1] + 1
-    colorlut <- rainbow(ylen)
-    col <- colorlut[1000*y-ylim[1]+1]
-    rgl.bg(color = "white")
-    rgl.surface(xp, z, y, color=col)
-    y <- x[,,5] * rgl.height
-    ##rgl.surface(xp,z,y,color="grey", alpha=0.3)
-    rgl.surface(xp, z, y,  color="black", front="lines", back="lines")
-    y <- x[,,6] * rgl.height
-    ##rgl.surface(xp,z,y,color="grey",alpha=0.3)
-    rgl.surface(xp, z, y, color="black", front="lines", back="lines")
-    y <- x[,,6]*0 + rgl.min
-    rgl.surface(xp, z, y, alpha=0)
-    y <- x[,,6] * 0 + rgl.max
-    rgl.surface(xp, z, y, alpha=0)
-    labs <- pretty(c(rgl.min, range(x)))
-    rgl.bbox(color="#333377", emission="#333377", specular="#3333FF", shininess=5, alpha=0.8,
-             zlen=0, xlen=0, yat = rgl.height*labs, ylab=labs) 
-    rgl.texts(0, rgl.min, 0.5, "Scale", col = "darkblue")
-    rgl.texts(0.5, rgl.min, 0, "Sites", col="darkblue")
-}
diff --git a/R/scores.default.R b/R/scores.default.R
index 1a88482..ecfd1d8 100644
--- a/R/scores.default.R
+++ b/R/scores.default.R
@@ -39,11 +39,21 @@
     }
     else if (is.numeric(x)) {
         X <- as.matrix(x)
-        ## as.matrix() changes 1-row scores into 1-col matrix: this is
+        ## as.matrix() changes a score vector to 1-col matrix: this is
         ## a hack which may fail sometimes (but probably less often
         ## than without this hack):
-        if (ncol(X) == 1 && nrow(X) == length(choices))
-            X <- t(X)
+
+        ## Removed this hack after an issue raised by
+        ## vanderleidebastiani in github. He was worried for getting
+        ## an error when 'choices' were not given with genuinely 1-dim
+        ## (1-col) results. At a second look, it seems that this hack
+        ## will fail both with missing 'choices', and also often with
+        ## 'choices' given because 'choices' are only applied later,
+        ## so that nrow(X) > length(choices). Only vectors (dim arg
+        ## missing) should fail here. Let's see...
+        
+        ##if (ncol(X) == 1 && nrow(X) == length(choices))
+        ##    X <- t(X)
     }
     if (is.null(rownames(X))) {
         root <- substr(display, 1, 4)
diff --git a/R/scores.lda.R b/R/scores.lda.R
index c7ab23d..f96533d 100644
--- a/R/scores.lda.R
+++ b/R/scores.lda.R
@@ -1,7 +1,6 @@
 `scores.lda` <-
     function(x, display, ...)
 {
-    require(MASS) || stop("'lda' objects created in MASS need MASS for 'scores'")
     display <- match.arg(display,
                          c("sites", "species", "scores", "predictors", "x", "coef"),
                          several.ok = TRUE)
diff --git a/R/simper.R b/R/simper.R
index 3dbc0d4..5114d4f 100644
--- a/R/simper.R
+++ b/R/simper.R
@@ -1,10 +1,24 @@
 `simper` <-
-    function(comm, group, ...)
+    function(comm, group, permutations = 0, trace = FALSE,  
+             parallel = getOption("mc.cores"), ...)
 {
     if (any(rowSums(comm, na.rm = TRUE) == 0)) 
         warning("you have empty rows: results may be meaningless")
-    permutations <- 0
-    trace <- FALSE
+    pfun <- function(x, comm, comp, i, contrp) {
+        groupp <- group[perm[x,]]
+        ga <- comm[groupp == comp[i, 1], , drop = FALSE] 
+        gb <- comm[groupp == comp[i, 2], , drop = FALSE]
+        n.a <- nrow(ga)
+        n.b <- nrow(gb)
+        for(j in seq_len(n.b)) {
+            for(k in seq_len(n.a)) {
+                mdp <- abs(ga[k, , drop = FALSE] - gb[j, , drop = FALSE])
+                mep <- ga[k, , drop = FALSE] + gb[j, , drop = FALSE]
+                contrp[(j-1)*n.a+k, ] <- mdp / sum(mep)  
+            }
+        }
+        colMeans(contrp)
+    }
     comm <- as.matrix(comm)
     comp <- t(combn(unique(as.character(group)), 2))
     outlist <- NULL
@@ -12,11 +26,7 @@
     P <- ncol(comm)
     nobs <- nrow(comm)
     ## Make permutation matrix
-    if (length(permutations) == 1) {
-        perm <- shuffleSet(nobs, permutations, ...)
-    } else {  # permutations is a matrix
-        perm <- permutations
-    }
+    perm <- getPermuteMatrix(permutations, nobs, ...)
     ## check dims (especially if permutations was a matrix)
     if (ncol(perm) != nobs)
         stop(gettextf("'permutations' have %d columns, but data have %d rows",
@@ -25,39 +35,50 @@
     nperm <- nrow(perm)
     if (nperm > 0)
         perm.contr <- matrix(nrow=P, ncol=nperm)
-    for (i in 1:nrow(comp)) {
-        group.a <- comm[group == comp[i, 1], ]
-        group.b <- comm[group == comp[i, 2], ]
+    ## Parallel processing ?
+    if (is.null(parallel))
+        parallel <- 1
+    hasClus <- inherits(parallel, "cluster")
+    isParal <- (hasClus || parallel > 1) && require(parallel)
+    isMulticore <- .Platform$OS.type == "unix" && !hasClus
+    if (isParal && !isMulticore && !hasClus) {
+        parallel <- makeCluster(parallel)
+    }
+    for (i in seq_len(nrow(comp))) {
+        group.a <- comm[group == comp[i, 1], , drop = FALSE]
+        group.b <- comm[group == comp[i, 2], , drop = FALSE]
         n.a <- nrow(group.a)
         n.b <- nrow(group.b)
         contr <- matrix(ncol = P, nrow = n.a * n.b)
-        for (j in 1:n.b) {
-            for (k in 1:n.a) {
-                md <- abs(group.a[k, ] - group.b[j, ])
-                me <- group.a[k, ] + group.b[j, ]
+        for (j in seq_len(n.b)) {
+            for (k in seq_len(n.a)) {
+                md <- abs(group.a[k, , drop = FALSE] - group.b[j, , drop = FALSE])
+                me <- group.a[k, , drop = FALSE] + group.b[j, , drop = FALSE]
                 contr[(j-1)*n.a+k, ] <- md / sum(me)	
             }
         }
         average <- colMeans(contr)
         
+        ## Apply permutations
         if(nperm > 0){
             if (trace)
                 cat("Permuting", paste(comp[i,1], comp[i,2], sep = "_"), "\n")
             contrp <- matrix(ncol = P, nrow = n.a * n.b)
-            for(p in 1:nperm){
-                groupp <- group[perm[p,]]
-                ga <- comm[groupp == comp[i, 1], ] 
-                gb <- comm[groupp == comp[i, 2], ]
-                for(j in 1:n.b) {
-                    for(k in 1:n.a) {
-                        mdp <- abs(ga[k, ] - gb[j, ])
-                        mep <- ga[k, ] + gb[j, ]
-                        contrp[(j-1)*n.a+k, ] <- mdp / sum(mep)  
-                    }
-                }
-                perm.contr[ ,p] <- colMeans(contrp)
+
+            if (isParal) {
+                if (isMulticore){
+                    perm.contr <- mclapply(seq_len(nperm), function(d) 
+                        pfun(d, comm, comp, i, contrp), mc.cores = parallel)
+                    perm.contr <- do.call(cbind, perm.contr)
+                } else {
+                    perm.contr <- parSapply(parallel, seq_len(nperm), function(d) 
+                        pfun(d, comm, comp, i, contrp))
+                }  
+            } else {
+                perm.contr <- sapply(1:nperm, function(d) 
+                    pfun(d, comm, comp, i, contrp))
             }
-        p <- (apply(apply(perm.contr, 2, function(x) x >= average), 1, sum) + 1) / (nperm + 1)
+            p <- (rowSums(apply(perm.contr, 2, function(x) x >= average)) + 1) / (nperm + 1)
         } 
         else {
           p <- NULL
@@ -75,7 +96,11 @@
                     avb = avb, ord = ord, cusum = cusum, p = p)
         outlist[[paste(comp[i,1], "_", comp[i,2], sep = "")]] <- out
     }
+    ## Close socket cluster if created here
+    if (isParal && !isMulticore && !hasClus)
+        stopCluster(parallel)
     attr(outlist, "permutations") <- nperm
+    attr(outlist, "control") <- attr(perm, "control")
     class(outlist) <- "simper"
     outlist
 }
@@ -99,7 +124,9 @@
     function(object, ordered = TRUE, digits = max(3, getOption("digits") - 3), ...)
 {
     if (ordered) {
-        out <- lapply(object, function(z) data.frame(contr = z$average, sd = z$sd, ratio = z$ratio, av.a = z$ava, av.b = z$avb)[z$ord, ])
+        out <- lapply(object, function(z) 
+            data.frame(contr = z$average, sd = z$sd, ratio = z$ratio, 
+                       av.a = z$ava, av.b = z$avb)[z$ord, ])
         cusum <- lapply(object, function(z) z$cusum)
         for(i in 1:length(out)) {
             out[[i]]$cumsum <- cusum[[i]]
@@ -109,10 +136,13 @@
         } 
     } 
     else {
-        out <- lapply(object, function(z) data.frame(cbind(contr = z$average, sd = z$sd, 'contr/sd' = z$ratio, ava = z$ava, avb = z$avb, p = z$p)))
+        out <- lapply(object, function(z) 
+            data.frame(cbind(contr = z$average, sd = z$sd, 'contr/sd' = z$ratio, 
+                             ava = z$ava, avb = z$avb, p = z$p)))
     }
     attr(out, "digits") <- digits
     attr(out, "permutations") <- attr(object, "permutations")
+    attr(out, "control") <- attr(object, "control")
     class(out) <- "summary.simper"
     out
 }
@@ -139,7 +169,8 @@
                             symbols = c("***", "**", "*", ".", " ")), "legend")
         cat("---\nSignif. codes: ", leg, "\n")
     }
-    if ((np <- attr(x, "permutations")) > 0)
-        cat("P-values based on", np, "permutations\n")
+    if (!is.null(attr(x, "control")))
+        cat(howHead(attr(x, "control")))
     invisible(x)
 }
+
diff --git a/R/simulate.nullmodel.R b/R/simulate.nullmodel.R
new file mode 100644
index 0000000..f81556e
--- /dev/null
+++ b/R/simulate.nullmodel.R
@@ -0,0 +1,66 @@
+simulate.nullmodel <-
+function(object, nsim=1, seed = NULL, burnin=0, thin=1, ...)
+{
+    if (!exists(".Random.seed", envir = .GlobalEnv, inherits = FALSE)) 
+        runif(1)
+    if (is.null(seed)) 
+        RNGstate <- get(".Random.seed", envir = .GlobalEnv)
+    else {
+        R.seed <- get(".Random.seed", envir = .GlobalEnv)
+        set.seed(seed)
+        RNGstate <- structure(seed, kind = as.list(RNGkind()))
+        on.exit(assign(".Random.seed", R.seed, envir = .GlobalEnv))
+    }
+    if (nsim < 1)
+        stop("'nsim' must be at least 1")
+    m <- object$data
+    if (object$commsim$isSeq) {
+        ## here is burnin, see update method
+        if (burnin > 0)
+            object <- update(object, burnin, ...)
+        x <- object$state
+    } else {
+        x <- m
+#        if (thin != 1)
+#            message("non-sequential model: 'thin' set to 1")
+        thin <- 1L
+#        if (burnin != 0)
+#            message("non-sequential model: 'burnin' set to 0")
+        burnin <- 0L
+    }
+    perm <- object$commsim$fun(x=x,
+        n=as.integer(nsim),
+        nr=object$nrow,
+        nc=object$ncol,
+        rs=object$rowSums,
+        cs=object$colSums,
+        rf=object$rowFreq,
+        cf=object$colFreq,
+        s=object$totalSum,
+        fill=object$fill,
+        thin=as.integer(thin), ...)
+    if (object$commsim$isSeq) {
+        Start <- as.integer(object$iter + 1L)
+        End <- as.integer(object$iter + nsim * thin)
+        state <- perm[,,nsim]
+        storage.mode(state) <- object$commsim$mode
+        assign("state", state, envir=object)
+        assign("iter", as.integer(End), envir=object)
+    } else {
+        Start <- 1L
+        End <- as.integer(nsim)
+    }
+    attr(perm, "data") <- m
+    attr(perm, "seed") <- RNGstate
+    attr(perm, "method") <- object$commsim$method
+    attr(perm, "binary") <- object$commsim$binary
+    attr(perm, "isSeq") <- object$commsim$isSeq
+    attr(perm, "mode") <- object$commsim$mode
+    attr(perm, "start") <- Start
+    attr(perm, "end") <- End
+    attr(perm, "thin") <- as.integer(thin)
+    class(perm) <- c("simmat", "array")
+    dimnames(perm) <- list(rownames(m), colnames(m),
+        paste("sim", seq_len(nsim), sep = "_"))
+    perm
+}
diff --git a/R/simulate.rda.R b/R/simulate.rda.R
index a39a833..70ed381 100644
--- a/R/simulate.rda.R
+++ b/R/simulate.rda.R
@@ -1,6 +1,13 @@
 `simulate.rda` <-
-    function(object, nsim = 1, seed = NULL, indx = NULL, rank = "full", ...) 
+    function(object, nsim = 1, seed = NULL, indx = NULL, rank = "full",
+             correlated = FALSE, ...) 
 {
+    ## Fail if there is no constrained component (it could be possible
+    ## to change the function to handle unconstrained ordination, too,
+    ## when rank < "full", but that would require redesign)
+    if (is.null(object$CCA))
+        stop("function can be used only with constrained ordination")
+    
     ## Handle RNG: code directly from stats::simulate.lm
     if (!exists(".Random.seed", envir = .GlobalEnv, inherits = FALSE)) 
         runif(1)
@@ -12,24 +19,72 @@
         RNGstate <- structure(seed, kind = as.list(RNGkind()))
         on.exit(assign(".Random.seed", R.seed, envir = .GlobalEnv))
     }
-    ## Proper simulation: very similar for simulate.lm, but produces a
-    ## response matrix.
-    if (nsim > 1)
-        .NotYetUsed("nsim")
+    ## indx can be an output of permute::shuffleSet in which case it
+    ## is a nsim x nrow matrix, or it may be a single vector, in which
+    ## case it will changed to shuffleSet
+    if (!is.null(indx))
+        if (is.vector(indx))
+            dim(indx) <- c(1, length(indx))
+    ## If nsim is missing, take it from indx (if given)
+    if (missing(nsim) && !is.null(indx))
+        nsim <- nrow(indx)
+    ## Check that dims match
+    if (!is.null(indx))
+        if(nrow(indx) != nsim)
+            stop(gettextf("'nsim' (%d) and no. of 'indx' rows (%d) do not match",
+                          nsim, nrow(indx)))
+    ## Proper simulation: very similar for simulate.lm, but produces
+    ## an array of response matrices
+
     ftd <- predict(object, type = "response", rank = rank)
     ## pRDA: add partial Fit to the constrained
     if (!is.null(object$pCCA))
         ftd <- ftd + object$pCCA$Fit
-    if (is.null(indx))
-        ans <- as.data.frame(ftd + matrix(rnorm(length(ftd), 
-               sd = outer(rep(1,nrow(ftd)), apply(object$CA$Xbar, 2, sd))), 
-               nrow = nrow(ftd)))
+    ## if(is.null(indx)), we have parametric Gaussian simulation and
+    ## need to generate sd matrices. The residuals sd is always taken
+    ## from the unconstrained (residual) component $CA$Xbar. If
+    ## species are uncorrelated, we need only species sd's, but if
+    ## correlated, we also need species covariances.
+    if (!correlated)
+        dev <- outer(rep(1, nrow(ftd)), apply(object$CA$Xbar, 2, sd))
     else
-        ans <- as.data.frame(ftd + object$CA$Xbar[indx,])
+        dev <- cov(object$CA$Xbar)
+    ## Generate an array
+    ans <- array(0, c(dim(ftd), nsim))
+    for (i in seq_len(nsim)) {
+        if (!is.null(indx))
+            ans[,,i] <- as.matrix(ftd + object$CA$Xbar[indx[i,],])
+        else if (!correlated)
+            ans[,,i] <- as.matrix(ftd + matrix(rnorm(length(ftd), sd = dev),
+                                               nrow = nrow(ftd)))
+        else {
+            ans[,,i] <- t(apply(ftd, 1,
+                                function(x) mvrnorm(1, mu = x, Sigma = dev))) 
+        }
+    }
+    ## set RNG attributes
     if (is.null(indx))
         attr(ans, "seed") <- RNGstate
-    else
-        attr(ans, "seed") <- indx
+    else 
+        attr(ans, "seed") <- "index"
+    ## set commsim attributes if nsim > 1, else return a 2-dim matrix
+    if (nsim == 1) {
+        ans <- ans[,,1]
+        attributes(ans) <- attributes(ftd)
+    } else {
+        dimnames(ans) <- list(rownames(ftd), colnames(ftd),
+                              paste("sim", seq_len(nsim), sep = "_"))
+        attr(ans, "data") <- round(ftd + object$CA$Xbar, 12)
+        attr(ans, "method") <- paste("simulate", ifelse(is.null(indx),
+                                                        "parametric", "index"))
+        attr(ans, "binary") <- FALSE
+        attr(ans, "isSeq") <- FALSE
+        attr(ans, "mode") <- "double"
+        attr(ans, "start") <- 1L
+        attr(ans, "end") <- as.integer(nsim)
+        attr(ans, "thin") <- 1L
+        class(ans) <- c("simulate.rda", "simmat", "array")
+    }
     ans
 }
 
@@ -39,8 +94,12 @@
 ### still guarantee that all marginal totals are positive.
 
 `simulate.cca` <-
-    function(object, nsim = 1, seed = NULL, indx = NULL, rank = "full", ...)
+    function(object, nsim = 1, seed = NULL, indx = NULL, rank = "full",
+             correlated = FALSE, ...)
 {
+    ## Fail if no CCA
+    if (is.null(object$CCA))
+        stop("function can be used only with constrained ordination")
     ## Handle RNG: code directly from stats::simulate.lm
     if (!exists(".Random.seed", envir = .GlobalEnv, inherits = FALSE)) 
         runif(1)
@@ -52,10 +111,16 @@
         RNGstate <- structure(seed, kind = as.list(RNGkind()))
         on.exit(assign(".Random.seed", R.seed, envir = .GlobalEnv))
     }
-    ## Proper simulation: very similar for simulate.lm, but produces a
-    ## response matrix.
-    if (nsim > 1)
-        .NotYetUsed("nsim")
+    ## Preparations like in simulate.rda()
+    if (!is.null(indx))
+        if (is.vector(indx))
+            dim(indx) <- c(1, length(indx))
+    if (missing(nsim) && !is.null(indx))
+        nsim <- nrow(indx)
+    if (!is.null(indx))
+        if(nrow(indx) != nsim)
+            stop(gettextf("'nsim' (%d) and no. of 'indx' rows (%d) do not match",
+                          nsim, nrow(indx)))   
     ## Need sqrt of rowsums for weighting
     sq.r <- sqrt(object$rowsum)
     ## Fitted value
@@ -65,21 +130,53 @@
         ftd <- ftd + object$pCCA$Fit
     ## Residual Xbar need weighting and back-weighting
     Xbar <- sweep(object$CA$Xbar, 1, sq.r, "*")
-    if (is.null(indx)) {
-        ans <- matrix(rnorm(length(ftd), 
-               sd = outer(rep(1,nrow(ftd)), apply(Xbar, 2, sd))), 
+    ## Simulation
+    if (correlated)
+        dev <- cov(Xbar)
+    else
+        dev <- outer(rep(1, nrow(ftd)), apply(Xbar, 2, sd))
+    ans <- array(0, c(dim(ftd), nsim))
+    for (i in seq_len(nsim)) {
+        if (is.null(indx)) {
+            if (correlated)
+                tmp <- mvrnorm(nrow(ftd), numeric(ncol(ftd)), Sigma = dev)
+            else
+                tmp <- matrix(rnorm(length(ftd), sd = dev), 
                           nrow = nrow(ftd))
-        ans <- as.data.frame(ftd + sweep(ans, 1, sq.r, "/"))
+            ans[,,i] <- as.matrix(ftd + sweep(tmp, 1, sq.r, "/"))
+        }
+        else 
+            ans[,,i] <- as.matrix(ftd + sweep(Xbar[indx[i,],], 1, sq.r, "/"))
     }
-    else 
-        ans <- as.data.frame(ftd + sweep(Xbar[indx,], 1, sq.r, "/"))
     ## From internal form to the original form with fixed marginal totals
     rc <- object$rowsum %o% object$colsum
-    ans <- (ans * sqrt(rc) + rc) * object$grand.total
+    for (i in seq_len(nsim))
+        ans[,,i] <- (ans[,,i] * sqrt(rc) + rc) * object$grand.total
+    ## RNG attributes
     if (is.null(indx))
         attr(ans, "seed") <- RNGstate
     else
-        attr(ans, "seed") <- indx
+        attr(ans, "seed") <- "index"
+    ## set commsim attributes if nsim > 1, else return a 2-dim matrix
+    if (nsim == 1) {
+        ans <- ans[,,1]
+        attributes(ans) <- attributes(ftd)
+    } else {
+        dimnames(ans) <- list(rownames(ftd), colnames(ftd),
+                              paste("sim", seq_len(nsim), sep = "_"))
+        obsdata <- ftd + object$CA$Xbar
+        obsdata <- (obsdata * sqrt(rc) + rc) * object$grand.total
+        attr(ans, "data") <- round(obsdata, 12)
+        attr(ans, "method") <- paste("simulate", ifelse(is.null(indx),
+                                                        "parametric", "index"))
+        attr(ans, "binary") <- FALSE
+        attr(ans, "isSeq") <- FALSE
+        attr(ans, "mode") <- "double"
+        attr(ans, "start") <- 1L
+        attr(ans, "end") <- as.integer(nsim)
+        attr(ans, "thin") <- 1L
+        class(ans) <- c("simulate.cca", "simmat", "array")
+    }    
     ans
 }
 
@@ -92,8 +189,14 @@
 ### component.
 
 `simulate.capscale` <-
-    function(object, nsim = 1, seed = NULL, indx = NULL, rank = "full", ...) 
+    function(object, nsim = 1, seed = NULL, indx = NULL, rank = "full",
+             correlated = FALSE, ...) 
 {
+    ## Fail if no CCA component
+    if (is.null(object$CCA))
+        stop("function can be used only with constrained ordination")
+    if (is.null(indx) && correlated)
+        warning("argument 'correlated' does not work and will be ignored")
     ## Handle RNG: code directly from stats::simulate.lm
     if (!exists(".Random.seed", envir = .GlobalEnv, inherits = FALSE)) 
         runif(1)
diff --git a/R/specpool.R b/R/specpool.R
index db85afd..ee03bcb 100644
--- a/R/specpool.R
+++ b/R/specpool.R
@@ -1,5 +1,5 @@
-"specpool" <-
-    function (x, pool) 
+`specpool` <-
+    function (x, pool, smallsample = TRUE) 
 {
     x <- as.matrix(x)
     if (missing(pool)) 
@@ -25,6 +25,10 @@
         n <- length(gr)
         if (n <= 0)
             next
+        if (smallsample)
+            ssc <- (n-1)/n
+        else
+            ssc <- 1
         X <- x[gr, , drop = FALSE]
         freq <- colSums(X > 0)
         p <- freq[freq > 0]/n
@@ -37,8 +41,9 @@
             a2 <- sum(freq == 2)
         else 0
         chao[is] <- S[is] + if(!is.na(a2) && a2 > 0)
-            a1 * a1/2/a2
-        else 0
+            ssc * a1 * a1/2/a2
+        else
+            ssc * a1 * (a1-1)/2
         jack.1[is] <- S[is] + a1 * (n - 1)/n
         jack.2[is] <- S[is] + a1 * (2 * n - 3)/n - a2 * (n - 
                                                          2)^2/n/(n - 1)
@@ -46,7 +51,11 @@
         aa <- if (!is.na(a2) && a2 > 0) 
             a1/a2
         else 0
-        var.chao[is] <- a2 * (0.5 + (1 + aa/4) * aa) * aa * aa
+        if (a2 > 0)
+            var.chao[is] <- a2 * ssc * (0.5 + ssc * (1 + aa/4) * aa) * aa * aa
+        else
+            var.chao[is] <-
+                ssc * (ssc * (a1*(2*a1-1)^2/4 - a1^4/chao[is]/4) + a1*(a1-1)/2)
         if (!is.na(a1) && a1 > 0) {
             jf <- table(rowSums(X[, freq == 1, drop = FALSE] > 
                                 0))
diff --git a/R/str.nullmodel.R b/R/str.nullmodel.R
new file mode 100644
index 0000000..8ce9c90
--- /dev/null
+++ b/R/str.nullmodel.R
@@ -0,0 +1,2 @@
+`str.nullmodel` <-
+    function(object, ...) str(as.list(object), ...)
diff --git a/R/stressplot.R b/R/stressplot.R
index ec97d9b..4a588a2 100644
--- a/R/stressplot.R
+++ b/R/stressplot.R
@@ -75,10 +75,16 @@
 `stressplot.default` <-
     function(object, dis, pch, p.col = "blue", l.col = "red", lwd = 2, ...)
 {
-    require(MASS) || stop("Needs MASS package")
+    ## the default function only works with metaMDS or MASS::isoMDS results
+    if (!(inherits(object, "metaMDS") ||
+        all(c("points", "stress") %in% names(object))))
+        stop("can be used only with objects that are compatible with MASS::isoMDS results")
     if (missing(dis))
-        dis <- metaMDSredist(object)
-    if (attr(dis, "Size") != nrow(object$points))
+        if (inherits(object, "metaMDS"))
+            dis <- metaMDSredist(object)
+        else
+            stop("needs dissimilarities 'dis'")
+     if (attr(dis, "Size") != nrow(object$points))
         stop("Dimensions do not match in ordination and dissimilarities")
     shep <- Shepard(dis, object$points)
     stress <- sum((shep$y - shep$yf)^2)/sum(shep$y^2)
diff --git a/R/stressplot.wcmdscale.R b/R/stressplot.wcmdscale.R
new file mode 100644
index 0000000..0db46e5
--- /dev/null
+++ b/R/stressplot.wcmdscale.R
@@ -0,0 +1,187 @@
+### stressplot() methods for eigenvector ordinations wcmdscale, rda,
+### cca, capscale
+
+`stressplot.wcmdscale` <-
+    function(object, k = 2, pch,  p.col = "blue", l.col = "red", lwd = 2, ...)
+{
+    ## Check that original distances can be reconstructed: this
+    ## requires that all axes were calculated instead of 'k' first.
+    hasdims <- NCOL(object$points)
+    if (!is.null(object$negaxes))
+        hasdims <- hasdims + NCOL(object$negaxes)
+    if (hasdims < length(object$eig))
+        stop("observed distances cannot be reconstructed: all axes were not calculated")
+    ## Get the ordination distances in k dimensions
+    if (k > NCOL(object$points))
+        stop("'k' cannot exceed the number of real dimensions")
+    w <- sqrt(object$weights)
+    u <- diag(w) %*% object$points
+    odis <- dist(u[,1:k, drop = FALSE])
+    ## Reconstitute the original observed distances
+    dis <- dist(u)
+    if (!is.null(object$negaxes))
+        dis <- sqrt(dis^2 - dist(diag(w) %*% object$negaxes)^2)
+    ## additive constant is not implemented in wcmdscale (which
+    ## returns 'ac = NA'), but the next statement would take care of
+    ## that: we want to have the input distances as observed distances
+    ## so that we need to subtract 'ac' here, although ordination
+    ## distances 'odis' do not add up to 'dis' but to 'dis + ac'.
+    if (!is.na(object$ac))
+        dis <- dis - object$ac
+    ##Plot
+    if (missing(pch))
+        if (length(dis) > 5000)
+            pch <- "."
+        else
+            pch <- 1
+    plot(dis, odis, pch = pch, col = p.col, xlab = "Observed Dissimilarity",
+         ylab = "Ordination Distance", ...)
+    abline(0, 1, col = l.col, lwd = lwd, ...)
+    invisible(odis)
+}
+
+`stressplot.rda` <-
+    function(object, k = 2, pch, p.col = "blue", l.col = "red", lwd = 2, ...)
+{
+    ## Normalized scores to reconstruct data
+    u <- cbind(object$CCA$u, object$CA$u)
+    v <- cbind(object$CCA$v, object$CA$v)
+    ev <- c(object$CCA$eig, object$CA$eig)
+    ## normalizing constant
+    nr <- NROW(u)
+    const <- sqrt(ev * (nr-1))
+    u <- u %*% diag(const)
+    ## Distances
+    Xbar <- u %*% t(v)
+    Xbark <- u[, seq_len(k), drop = FALSE] %*% t(v[, seq_len(k), drop = FALSE])
+    if (!is.null(object$pCCA)) {
+        Xbar <- Xbar + object$pCCA$Fit
+        Xbark <- Xbark + object$pCCA$Fit
+    }
+    dis <- dist(Xbar)
+    odis <- dist(Xbark)
+    ## plot like above
+        ## Plot
+    if (missing(pch))
+        if (length(dis) > 5000)
+            pch <- "."
+        else
+            pch <- 1
+    plot(dis, odis, pch = pch, col = p.col, xlab = "Observed Dissimilarity",
+         ylab = "Ordination Distance", ...)
+    abline(0, 1, col = l.col, lwd = lwd, ...)
+    invisible(odis)
+}
+
+`stressplot.cca` <-
+    function(object, k = 2, pch, p.col = "blue", l.col = "red", lwd = 2, ...)
+{
+    ## Normalized scores to reconstruct data
+    u <- cbind(object$CCA$u, object$CA$u)
+    sev <- sqrt(c(object$CCA$eig, object$CA$eig))
+    w <- sqrt(object$rowsum)
+    u <- diag(w) %*% u %*% diag(sev)
+    v <- cbind(object$CCA$v, object$CA$v)
+    v <- diag(sqrt(object$colsum)) %*% v
+    ## Distances
+    Xbar <- u %*% t(v)
+    Xbark <- u[,seq_len(k), drop = FALSE] %*% t(v[,seq_len(k), drop = FALSE])
+    if (!is.null(object$pCCA)) {
+        Xbar <- Xbar + object$pCCA$Fit
+        Xbark <- Xbark + object$pCCA$Fit
+    }
+    dis <- dist(Xbar)
+    odis <- dist(Xbark)
+    ## Plot
+    if (missing(pch))
+        if (length(dis) > 5000)
+            pch <- "."
+        else
+            pch <- 1
+    plot(dis, odis, pch = pch, col = p.col, xlab = "Observed Dissimilarity",
+         ylab = "Ordination Distance", ...)
+    abline(0, 1, col = l.col, lwd = lwd, ...)
+    invisible(odis)
+}
+
+`stressplot.capscale` <-
+    function(object, k = 2, pch, p.col = "blue", l.col = "red", lwd = 2, ...)
+{
+    ## Scores to reconstruct data
+    u <- cbind(object$CCA$u, object$CA$u)
+    ev <- c(object$CCA$eig, object$CA$eig)
+    if (object$adjust == 1)
+        const <- sqrt(NROW(u) - 1)
+    else
+        const <- 1
+    u <- u %*% diag(sqrt(ev) * const)
+    ## Constrained ordination needs also scores 'v' to reconstruct
+    ## 'data', but these are not returned by capscale() which replaces
+    ## original 'v' with weighted sums of 'comm' data.
+    if (!is.null(object$CCA)) 
+        v <- svd(object$CCA$Xbar - object$CA$Xbar, nu = 0, nv = object$CCA$qrank)$v
+    else
+        v <- NULL
+    if (!is.null(object$CA))
+        v <- cbind(v, svd(object$CA$Xbar, nu = 0, nv = object$CA$rank)$v)
+    ## Reconstruct Xbar and Xbark
+    Xbar <- u %*% t(v)
+    Xbark <- u[,seq_len(k), drop = FALSE] %*% t(v[,seq_len(k), drop = FALSE])
+    if (!is.null(object$pCCA)) {
+        pFit <- object$pCCA$Fit/object$adjust
+        Xbar <- Xbar + pFit
+        Xbark <- Xbark + pFit
+    }
+    ## Distances
+    dis <- dist(Xbar)
+    odis <- dist(Xbark)
+    if (!is.null(object$CA$imaginary.u.eig))
+        dis <- sqrt(dis^2 - dist(object$CA$imaginary.u.eig)^2)
+    if (!is.null(object$ac))
+        dis <- dis - object$ac
+    ## plot like above
+        ## Plot
+    if (missing(pch))
+        if (length(dis) > 5000)
+            pch <- "."
+        else
+            pch <- 1
+    plot(dis, odis, pch = pch, col = p.col, xlab = "Observed Dissimilarity",
+         ylab = "Ordination Distance", ...)
+    abline(0, 1, col = l.col, lwd = lwd, ...)
+    invisible(odis)
+}
+
+## Standard R PCA functions
+
+`stressplot.prcomp` <-
+    function(object, k = 2, pch, p.col = "blue", l.col = "red", lwd = 2, ...)
+{
+    dis <- dist(object$x)
+    odis <- dist(object$x[, 1:k, drop = FALSE])
+    if (missing(pch))
+        if (length(dis) > 5000)
+            pch <- "."
+        else
+            pch <- 1
+    plot(dis, odis, pch = pch, col = p.col, xlab = "Observed Dissimilarity",
+         ylab = "Ordination Distance", ...)
+    abline(0, 1, col = l.col, lwd = lwd, ...)
+    invisible(odis)    
+}
+
+`stressplot.princomp` <-
+    function(object, k = 2, pch, p.col = "blue", l.col = "red", lwd = 2, ...)
+{
+    dis <- dist(object$scores)
+    odis <- dist(object$scores[, 1:k, drop = FALSE])
+    if (missing(pch))
+        if (length(dis) > 5000)
+            pch <- "."
+        else
+            pch <- 1
+    plot(dis, odis, pch = pch, col = p.col, xlab = "Observed Dissimilarity",
+         ylab = "Ordination Distance", ...)
+    abline(0, 1, col = l.col, lwd = lwd, ...)
+    invisible(odis)    
+}
diff --git a/R/summary.clamtest.R b/R/summary.clamtest.R
index d6443af..f7e586e 100644
--- a/R/summary.clamtest.R
+++ b/R/summary.clamtest.R
@@ -1,7 +1,7 @@
-summary.clamtest <- function(object, ...) {
-    structure(c(attr(object, "settings"), 
-        list(summary=cbind(Species=table(object$Classes), 
-            Proportion=table(object$Classes)/nrow(object)),
-        minv=attr(object, "minv"),
-        coverage=attr(object, "coverage"))), class="summary.clamtest")
-}
+summary.clamtest <- function(object, ...) {
+    structure(c(attr(object, "settings"), 
+        list(summary=cbind(Species=table(object$Classes), 
+            Proportion=table(object$Classes)/nrow(object)),
+        minv=attr(object, "minv"),
+        coverage=attr(object, "coverage"))), class="summary.clamtest")
+}
diff --git a/R/summary.dispweight.R b/R/summary.dispweight.R
new file mode 100644
index 0000000..b722d24
--- /dev/null
+++ b/R/summary.dispweight.R
@@ -0,0 +1,22 @@
+### summary methods extracts dispweight attributes, and prints a table
+### of dispersion statistics
+
+`summary.dispweight`  <-
+    function(object, ...)
+{
+    x <- attributes(object)
+    class(x) <- "summary.dispweight"
+    x
+}
+
+`print.summary.dispweight` <-
+    function(x, ...)
+{
+    tab <- with(x, cbind(D, weights, df, p))
+    colnames(tab) <- c("Dispersion", "Weight", "Df", "Pr(Disp.)")
+    printCoefmat(tab, cs.ind = NA, ...)
+    if (!is.na(x$nsimul))
+        cat(gettextf("Based on %d simulations on '%s' nullmodel\n",
+                     x$nsimul, x$nullmodel))
+    invisible(x)
+}
diff --git a/R/summary.permat.R b/R/summary.permat.R
index 8107524..51d07d4 100644
--- a/R/summary.permat.R
+++ b/R/summary.permat.R
@@ -3,6 +3,8 @@
     function(object, ...)
 {
     x <- object
+    ## calculations are much faster if x$orig is matrix instead of data.frame
+    x$orig <- data.matrix(x$orig)
     n <- attr(x, "times")
     ss <- sum(x$orig)
     fi <- sum(x$orig > 0)
@@ -32,7 +34,9 @@
     attr(chisq, "chisq.orig") <- sum((x$orig - E)^2 / E)
 #    attr(chisq, "df") <- (nr - 1) * (nc - 1)
     ## ts if sequential
-    seqmethods <- c("swap", "tswap", "abuswap")
+    seqmethods <- sapply(make.commsim(), function(z) make.commsim(z)$isSeq)
+    seqmethods <- names(seqmethods)[seqmethods]
+#    seqmethods <- c("swap", "tswap", "abuswap")
     if (attr(x, "method") %in% seqmethods) {
         startval <- attr(x, "burnin") + 1 
         dtime <- max(1, attr(x, "thin"))
diff --git a/R/tabasco.R b/R/tabasco.R
index b5c79ab..d30d2dc 100644
--- a/R/tabasco.R
+++ b/R/tabasco.R
@@ -19,32 +19,52 @@
                 sp.ind <- order(wascores(use, x))
         }
         else if (inherits(use, c("dendrogram", "hclust", "twins"))) {
+            ## "twins" and "dendrogram" are treated as "dendrogram",
+            ## but "hclust" is kept as "hclust": they differ in
+            ## reorder()
             if (inherits(use, "twins")) {
-                require(cluster) || stop("package cluster needed to handle 'use'")
-            }
-            if (!inherits(use, "dendrogram"))
                 use <- as.dendrogram(use)
+            }
             if (!is.null(site.ind))
                 stop("'site.ind' cannot be used with dendrogram")
+            ## The tree/dendrogam and input data must be ordered
+            ## identically. It could be regarded as a "user error" if
+            ## they are not, but this could be really frustrating and
+            ## give obscure errors, and therefore we take care of
+            ## identical ordering here
+            if (inherits(use, "hclust") && !is.null(use$labels))
+                x <- x[use$labels,]
+            else # dendrogram
+                x <- x[labels(use),]
             ## Reorder tree if Rowv specified
             if (isTRUE(Rowv)) {
                 ## order by first CA axis -- decorana() is fastest
                 tmp <- decorana(x, ira = 1)
+                ## reorder() command is equal to all, but "dendrogram"
+                ## will use unweighted mean and "hclust" weighted
+                ## mean.
                 use <- reorder(use, scores(tmp, dis="sites", choices = 1),
-                               agglo.FUN = mean)
+                               agglo.FUN = "mean")
             } else if (length(Rowv) > 1) {
                 ## Rowv is a vector
                 if (length(Rowv) != nrow(x))
                     stop(gettextf("Rowv has length %d, but 'x' has %d rows",
                                   length(Rowv), nrow(x)))
-                use <- reorder(use, Rowv, agglo.FUN = mean)
+                use <- reorder(use, Rowv, agglo.FUN = "mean")
+            }
+            if (inherits(use, "dendrogram")) { 
+                site.ind <- seq_len(nrow(x))
+                names(site.ind) <- rownames(x)
+                site.ind <- site.ind[labels(use)]
+            } else {
+                site.ind <- use$order
             }
-            site.ind <- seq_len(nrow(x))
-            names(site.ind) <- rownames(x)
-            site.ind <- site.ind[labels(use)]
             if (is.null(sp.ind)) 
                 sp.ind <- order(wascores(order(site.ind), x))
             pltree <- use
+            ## heatmap needs a "dendrogram"
+            if(!inherits(pltree, "dendrogram"))
+                pltree <- as.dendrogram(pltree)
         }
         else if (is.list(use)) {
             tmp <- scores(use, choices = 1, display = "sites")
@@ -66,24 +86,34 @@
     }
     ## see if sp.ind is a dendrogram or hclust tree
     if (inherits(sp.ind, c("hclust", "dendrogram", "twins"))) {
-        if (inherits(sp.ind, "twins"))
-            require("cluster") || stop("package cluster needed to handle 'sp.ind'")
-        if (!inherits(sp.ind, "dendrogram"))
+        if (inherits(sp.ind, "twins")) {
             sp.ind <- as.dendrogram(sp.ind)
+        }
         sptree <- sp.ind
+        ## Reorder data to match order in the dendrogam (see 'use' above)
+        if (inherits(sptree, "hclust"))
+            x <- x[, sptree$labels]
+        else # dendrogram
+            x <- x[, labels(sptree)]
         ## Consider reordering species tree
         if (isTRUE(Colv) && !is.null(site.ind)) {
             sptree <- reorder(sptree, wascores(order(site.ind), x),
-                                  agglo.FUN = mean)
+                                  agglo.FUN = "mean")
         } else if (length(Colv) > 1) {
             if (length(Colv) != ncol(x))
                 stop(gettextf("Colv has length %d, but 'x' has %d columns",
                               length(Colv), ncol(x)))
-            sptree <- reorder(sptree, Colv, agglo.FUN = mean)
+            sptree <- reorder(sptree, Colv, agglo.FUN = "mean")
+        }
+        if (inherits(sptree, "dendrogram")) {
+            sp.ind <- seq_len(ncol(x))
+            names(sp.ind) <- colnames(x)
+            sp.ind <- sp.ind[labels(sptree)]
+        } else {
+            sp.ind <- sptree$order
         }
-        sp.ind <- seq_len(ncol(x))
-        names(sp.ind) <- colnames(x)
-        sp.ind <- sp.ind[labels(sptree)]
+        if (!inherits(sptree, "dendrogram"))
+            sptree <- as.dendrogram(sptree)
         ## reverse: origin in the upper left corner
         sptree <- rev(sptree)
     }
diff --git a/R/text.cca.R b/R/text.cca.R
index 90cfc6a..dc22d22 100644
--- a/R/text.cca.R
+++ b/R/text.cca.R
@@ -4,16 +4,19 @@
 {
     formals(arrows) <- c(formals(arrows), alist(... = ))
     if (length(display) > 1)
-        stop("Only one 'display' item can be added in one command.")
+        stop("only one 'display' item can be added in one command")
     pts <- scores(x, choices = choices, display = display, scaling = scaling,
                   const)
+    ## store rownames of pts for use later, otherwise if user supplies
+    ## labels, the checks in "cn" branch fail and "bp" branch will
+    ## be entered even if there should be no "bp" plotting
+    cnam <- rownames(pts)
     if (!missing(labels))
         rownames(pts) <- labels
     if (!missing(select))
         pts <- .checkSelect(select, pts)
     if (display == "cn") {
-        cnam <- rownames(pts)
-        text(pts, labels = cnam, ...)
+        text(pts, labels = rownames(pts), ...)
         pts <- scores(x, choices = choices, display = "bp", scaling = scaling,
                       const)
         bnam <- rownames(pts)
diff --git a/R/tolerance.cca.R b/R/tolerance.cca.R
index 8776801..87e961d 100644
--- a/R/tolerance.cca.R
+++ b/R/tolerance.cca.R
@@ -27,7 +27,7 @@ tolerance.cca <- function(x, choices = 1:2,
                           which = c("species","sites"),
                           scaling = 2, useN2 = FALSE, ...) {
     if(inherits(x, "rda"))
-        stop("Tolerances only available for unimodal ordinations.")
+        stop("tolerances only available for unimodal ordinations")
     if(missing(which))
         which <- "species"
     ## reconstruct species/response matrix Y - up to machine precision!
diff --git a/R/treedist.R b/R/treedist.R
index 3c694c0..cc73349 100644
--- a/R/treedist.R
+++ b/R/treedist.R
@@ -1,5 +1,5 @@
 `treedist` <-
-    function(x, tree, relative = TRUE,  match.force = FALSE, ...)
+    function(x, tree, relative = TRUE,  match.force = TRUE, ...)
 {
     n <- nrow(x)
     ABJ <- matrix(0, n , n)
diff --git a/R/treedive.R b/R/treedive.R
index 1497e42..fcc3ad6 100644
--- a/R/treedive.R
+++ b/R/treedive.R
@@ -1,17 +1,17 @@
 `treedive` <-
-    function(comm, tree, match.force = FALSE)
+    function(comm, tree, match.force = TRUE, verbose = TRUE)
 {
     if (!inherits(tree, c("hclust", "spantree")))
         stop("'clus' must be an 'hclust' or 'spantree' result object")
     m <- as.matrix(cophenetic(tree))
     ## Check tree/comm match by names
     if (match.force || ncol(comm) != ncol(m)) {
-        if (match.force)
+        if (match.force && verbose)
             message("Forced matching of 'tree' labels and 'comm' names")
-        else
+        else if (verbose)
             message("Dimensions do not match between 'comm' and 'tree'")
         fnd <- colnames(comm) %in% tree$labels
-        if (!all(fnd)) {
+        if (!all(fnd) && verbose) {
             warning("not all names of 'comm' found in 'tree'")
             comm <- comm[, fnd]
         }
diff --git a/R/tsallis.R b/R/tsallis.R
index 3fab4dd..70a1b3e 100644
--- a/R/tsallis.R
+++ b/R/tsallis.R
@@ -1,44 +1,44 @@
-tsallis <-
-function (x, scales = seq(0, 2, 0.2), norm=FALSE, hill=FALSE)
-{
-    if (norm && hill)
-        stop("'norm = TRUE' and 'hill = TRUE' should not be used at the same time")
-    x <- as.matrix(x)
-    n <- nrow(x)
-    p <- ncol(x)
-    if (p == 1) {
-        x <- t(x)
-        n <- nrow(x)
-        p <- ncol(x)
-    }
-    x <- decostand(x, "total", 1)
-    m <- length(scales)
-    result <- array(0, dim = c(n, m))
-    dimnames(result) <- list(sites = rownames(x), scale = scales)
-    for (a in 1:m) {
-        if (scales[a] != 1 && scales[a] != 0) {
-                result[, a] <- (1-(apply(x^scales[a], 1, sum)))/(scales[a] - 1)
-        }
-        else {
-            if (scales[a] == 1) result[, a] <- diversity(x, "shannon")
-            if (scales[a] == 0) result[, a] <- rowSums(x > 0) - 1
-        }
-        if (norm) {
-            ST <- rowSums(x > 0)
-            if (scales[a] == 1) result[, a] <- result[, a] / log(ST)
-            else result[, a] <- result[, a] / ((ST^(1-scales[a]) - 1) / (1 - scales[a]))
-        }
-        if (hill) {
-            result[, a] <- if (scales[a] == 1) {
-                exp(result[, a])
-            } else {
-                (1 - (scales[a] - 1) * result[, a])^(1/(1-scales[a]))
-            }
-        }
-    }
-    result <- as.data.frame(result)
-    if (any(dim(result) == 1)) 
-        result <- unlist(result, use.names = TRUE)
-    class(result) <- c("tsallis", "renyi", class(result))
-    result
-}
+tsallis <-
+function (x, scales = seq(0, 2, 0.2), norm=FALSE, hill=FALSE)
+{
+    if (norm && hill)
+        stop("'norm = TRUE' and 'hill = TRUE' should not be used at the same time")
+    x <- as.matrix(x)
+    n <- nrow(x)
+    p <- ncol(x)
+    if (p == 1) {
+        x <- t(x)
+        n <- nrow(x)
+        p <- ncol(x)
+    }
+    x <- decostand(x, "total", 1)
+    m <- length(scales)
+    result <- array(0, dim = c(n, m))
+    dimnames(result) <- list(sites = rownames(x), scale = scales)
+    for (a in 1:m) {
+        if (scales[a] != 1 && scales[a] != 0) {
+                result[, a] <- (1-(apply(x^scales[a], 1, sum)))/(scales[a] - 1)
+        }
+        else {
+            if (scales[a] == 1) result[, a] <- diversity(x, "shannon")
+            if (scales[a] == 0) result[, a] <- rowSums(x > 0) - 1
+        }
+        if (norm) {
+            ST <- rowSums(x > 0)
+            if (scales[a] == 1) result[, a] <- result[, a] / log(ST)
+            else result[, a] <- result[, a] / ((ST^(1-scales[a]) - 1) / (1 - scales[a]))
+        }
+        if (hill) {
+            result[, a] <- if (scales[a] == 1) {
+                exp(result[, a])
+            } else {
+                (1 - (scales[a] - 1) * result[, a])^(1/(1-scales[a]))
+            }
+        }
+    }
+    result <- as.data.frame(result)
+    if (any(dim(result) == 1)) 
+        result <- unlist(result, use.names = TRUE)
+    class(result) <- c("tsallis", "renyi", class(result))
+    result
+}
diff --git a/R/tsallisaccum.R b/R/tsallisaccum.R
index 44af3eb..fcbaeaf 100644
--- a/R/tsallisaccum.R
+++ b/R/tsallisaccum.R
@@ -1,48 +1,48 @@
-tsallisaccum <-
-function (x, scales = seq(0, 2, 0.2), permutations = 100, raw = FALSE,
-          subset, ...)
-{
-    if (!missing(subset))
-        x <- subset(x, subset)
-    x <- as.matrix(x)
-    n <- nrow(x)
-    p <- ncol(x)
-    if (p == 1) {
-        x <- t(x)
-        n <- nrow(x)
-        p <- ncol(x)
-    }
-    m <- length(scales)
-    result <- array(dim = c(n, m, permutations))
-    dimnames(result) <- list(pooled.sites = c(1:n), scale = scales, 
-        permutation = c(1:permutations))
-    for (k in 1:permutations) {
-        result[, , k] <- as.matrix(tsallis((apply(x[sample(n), 
-            ], 2, cumsum)), scales = scales, ...))
-    }
-    if (raw) {
-        if (m == 1) {
-            result <- result[, 1, ]
-        }
-    }
-    else {
-        tmp <- array(dim = c(n, m, 6))
-        for (i in 1:n) {
-            for (j in 1:m) {
-                tmp[i, j, 1] <- mean(result[i, j, 1:permutations])
-                tmp[i, j, 2] <- sd(result[i, j, 1:permutations])
-                tmp[i, j, 3] <- min(result[i, j, 1:permutations])
-                tmp[i, j, 4] <- max(result[i, j, 1:permutations])
-                tmp[i, j, 5] <- quantile(result[i, j, 1:permutations], 
-                  0.025)
-                tmp[i, j, 6] <- quantile(result[i, j, 1:permutations], 
-                  0.975)
-            }
-        }
-        result <- tmp
-        dimnames(result) <- list(pooled.sites = c(1:n), scale = scales, 
-            c("mean", "stdev", "min", "max", "Qnt 0.025", "Qnt 0.975"))
-    }
-    class(result) <- c("tsallisaccum", "renyiaccum", class(result))
-    result
-}
+tsallisaccum <-
+function (x, scales = seq(0, 2, 0.2), permutations = 100, raw = FALSE,
+          subset, ...)
+{
+    if (!missing(subset))
+        x <- subset(x, subset)
+    x <- as.matrix(x)
+    n <- nrow(x)
+    p <- ncol(x)
+    if (p == 1) {
+        x <- t(x)
+        n <- nrow(x)
+        p <- ncol(x)
+    }
+    m <- length(scales)
+    result <- array(dim = c(n, m, permutations))
+    dimnames(result) <- list(pooled.sites = c(1:n), scale = scales, 
+        permutation = c(1:permutations))
+    for (k in 1:permutations) {
+        result[, , k] <- as.matrix(tsallis((apply(x[sample(n), 
+            ], 2, cumsum)), scales = scales, ...))
+    }
+    if (raw) {
+        if (m == 1) {
+            result <- result[, 1, ]
+        }
+    }
+    else {
+        tmp <- array(dim = c(n, m, 6))
+        for (i in 1:n) {
+            for (j in 1:m) {
+                tmp[i, j, 1] <- mean(result[i, j, 1:permutations])
+                tmp[i, j, 2] <- sd(result[i, j, 1:permutations])
+                tmp[i, j, 3] <- min(result[i, j, 1:permutations])
+                tmp[i, j, 4] <- max(result[i, j, 1:permutations])
+                tmp[i, j, 5] <- quantile(result[i, j, 1:permutations], 
+                  0.025)
+                tmp[i, j, 6] <- quantile(result[i, j, 1:permutations], 
+                  0.975)
+            }
+        }
+        result <- tmp
+        dimnames(result) <- list(pooled.sites = c(1:n), scale = scales, 
+            c("mean", "stdev", "min", "max", "Qnt 0.025", "Qnt 0.975"))
+    }
+    class(result) <- c("tsallisaccum", "renyiaccum", class(result))
+    result
+}
diff --git a/R/update.nullmodel.R b/R/update.nullmodel.R
new file mode 100644
index 0000000..a777eea
--- /dev/null
+++ b/R/update.nullmodel.R
@@ -0,0 +1,40 @@
+update.nullmodel <-
+function(object, nsim=1, seed = NULL, ...)
+{
+    if (!exists(".Random.seed", envir = .GlobalEnv, inherits = FALSE)) 
+        runif(1)
+    if (is.null(seed)) 
+        RNGstate <- get(".Random.seed", envir = .GlobalEnv)
+    else {
+        R.seed <- get(".Random.seed", envir = .GlobalEnv)
+        set.seed(seed)
+        RNGstate <- structure(seed, kind = as.list(RNGkind()))
+        on.exit(assign(".Random.seed", R.seed, envir = .GlobalEnv))
+    }
+    if (object$commsim$isSeq) {
+        perm <- object$commsim$fun(x=object$state,
+            n=1L,
+            nr=object$nrow,
+            nc=object$ncol,
+            rs=object$rowSums,
+            cs=object$colSums,
+            rf=object$rowFreq,
+            cf=object$colFreq,
+            s=object$totalSum,
+            fill=object$fill,
+            thin=as.integer(nsim), ...)
+        state <- perm[,,1L]
+        storage.mode(state) <- object$commsim$mode
+        iter <- as.integer(object$iter + nsim)
+#        assign("state", state, envir=object)
+#        assign("iter", iter, envir=object)
+#        attr(state, "iter") <- iter
+        out <- nullmodel(state, object$commsim)
+        out$iter <- iter
+    } else {
+#        state <- NULL
+        out <- object
+    }
+#    invisible(state)
+    out
+}
diff --git a/R/vectorfit.R b/R/vectorfit.R
index f54d66e..e328220 100644
--- a/R/vectorfit.R
+++ b/R/vectorfit.R
@@ -1,5 +1,5 @@
 "vectorfit" <-
-    function (X, P, permutations = 0, strata, w, ...) 
+    function (X, P, permutations = 0, strata = NULL, w, ...) 
 {
     if (missing(w) || is.null(w)) 
         w <- 1
@@ -24,29 +24,37 @@
     if (is.null(colnames(X))) 
         colnames(heads) <- paste("Dim", 1:nc, sep = "")
     else colnames(heads) <- colnames(X)
+    ## make permutation matrix for all variables handled in the next loop
+    nr <- nrow(X)
+    permat <- getPermuteMatrix(permutations, nr, strata = strata)
+    if (ncol(permat) != nr)
+        stop(gettextf("'permutations' have %d columns, but data have %d rows",
+                          ncol(permat), nr))
+    permutations <- nrow(permat)
+
     if (permutations) {
-        nr <- nrow(X)
-        permstore <- matrix(nrow = permutations, ncol = ncol(P))
-        for (i in 1:permutations) {
-            indx <- permuted.index(nrow(P), strata)
+        ptest <- function(indx, ...) {
             take <- P[indx, , drop = FALSE]
             take <- .C("wcentre", x = as.double(take), as.double(w),
                        as.integer(nrow(take)), as.integer(ncol(take)),
                        PACKAGE = "vegan")$x
             dim(take) <- dim(P)
             Hperm <- qr.fitted(Q, take)
-            permstore[i, ] <- diag(cor(Hperm, take))^2
+            diag(cor(Hperm, take))^2
         }
-        permstore <- sweep(permstore, 2, r, ">=")
-        pvals <- (apply(permstore, 2, sum) + 1)/(permutations + 1)
+        permstore <- sapply(1:permutations, function(indx, ...) ptest(permat[indx,], ...))
+        ## Single variable is dropped to a vector, and otherwise
+        ## permutations are the matrix columns and variables are rows
+        if (!is.matrix(permstore))
+            permstore <- matrix(permstore, ncol=permutations)
+        permstore <- sweep(permstore, 1, r, ">=")
+        validn <- rowSums(is.finite(permstore))
+        pvals <- (rowSums(permstore, na.rm = TRUE) + 1)/(validn + 1)
     }
     else pvals <- NULL
     sol <- list(arrows = heads, r = r, permutations = permutations, 
                 pvals = pvals)
-    if (!missing(strata)) {
-        sol$strata <- deparse(substitute(strata))
-        sol$stratum.values <- strata
-    }
+    sol$control <- attr(permat, "control")
     class(sol) <- "vectorfit"
     sol
 }
diff --git a/R/vegan-defunct.R b/R/vegan-defunct.R
index e16d6e6..927dc92 100644
--- a/R/vegan-defunct.R
+++ b/R/vegan-defunct.R
@@ -1,8 +1,8 @@
 ## "new" permutation code was moved to package 'permute' in R 2.0-0.
 ## Here we list as defunct those functions that are not in 'permute'.
 
-`permuted.index2` <- function (n, control = permControl()) 
-    .Defunct("permute::shuffle", package="vegan")
-
-`getNumObs` <- function(object, ...) 
-    .Defunct("nobs", package = "vegan")
+`metaMDSrotate` <-
+    function(object, vec, na.rm = FALSE, ...)
+{
+    .Defunct(new="MDSrotate", "vegan")
+}
diff --git a/R/vegan-deprecated.R b/R/vegan-deprecated.R
index ba8f4c4..e90a8af 100644
--- a/R/vegan-deprecated.R
+++ b/R/vegan-deprecated.R
@@ -1,6 +1,204 @@
-`metaMDSrotate` <-
-    function(object, vec, na.rm = FALSE, ...)
+### rewritten commsimulator
+
+"commsimulator" <-
+function (x, method, thin = 1) 
 {
-    .Deprecated(new="MDSrotate", "vegan")
-    MDSrotate(object = object, vec = vec, na.rm = na.rm, ...)
+    ## Do not yet warn on deprecation to allow smooth transition
+    ##.Deprecated("nullmodel", package="vegan")
+    method <- match.arg(method, 
+                        c("r0","r1","r2","r00","c0","swap", "tswap",
+                          "backtrack", "quasiswap"))
+    if (method == "r0")
+        method <- "r0_old"
+    x <- as.matrix(x)
+    out <- simulate(nullmodel(x, method), nsim = 1, thin = thin)
+    out <- out[,,1]
+    attributes(out) <- attributes(x)
+    out
+}
+
+### density and densityplot
+
+### density & densityplot methods for vegan functions returning
+### statistics from permuted/simulated data. These are modelled after
+### density.oecosimu and densityplot.oecosimu (which are in their
+### separate files).
+
+## anosim
+
+`density.anosim` <-
+    function(x, ...)
+{
+    .Deprecated("densityplot(permustats(<anosim.result>))",
+                package="vegan")
+    obs <- x$statistic
+    ## Put observed statistic among permutations
+    out <- density(c(obs, x$perm), ...)
+    out$call <- match.call()
+    out$observed <- obs
+    out$call[[1]] <- as.name("density")
+    class(out) <- c("vegandensity", class(out))
+    out
+}
+
+## adonis can return a matrix of terms, hence we also have densityplot()
+
+`density.adonis` <-
+    function(x, ...)
+{
+    .Deprecated("densityplot(permustats(<adonis.result>))",
+                package="vegan")
+    cols <- ncol(x$f.perms)
+    if (cols > 1)
+        warning("'density' is meaningful only with one term, you have ", cols)
+    obs <- x$aov.tab$F.Model
+    obs <- obs[!is.na(obs)]
+    out <- density(c(obs, x$f.perms), ...)
+    out$observed <- obs
+    out$call <- match.call()
+    out$call[[1]] <- as.name("density")
+    class(out) <- c("vegandensity", class(out))
+    out
+}
+
+`densityplot.adonis` <-
+    function(x, data, xlab = "Null", ...)
+{
+    .Deprecated("densityplot(permustats(<adonis.result>))",
+                package="vegan")
+    obs <- x$aov.tab$F.Model
+    obs <- obs[!is.na(obs)]
+    sim <- rbind(obs, x$f.perms)
+    nm <- rownames(x$aov.tab)[col(sim)]
+    densityplot( ~ as.vector(sim) | factor(nm, levels = unique(nm)),
+                xlab = xlab,
+                panel = function(x, ...) {
+                    panel.densityplot(x, ...)
+                    panel.abline(v = obs[panel.number()], ...)
+                },
+                ...)
+}
+
+## mantel
+
+`density.mantel` <-
+    function(x, ...)
+{
+    .Deprecated("densityplot(permustats(<mantel.result>))",
+                package="vegan")
+    obs <- x$statistic
+    out <- density(c(obs, x$perm), ...)
+    out$observed <- obs
+    out$call <- match.call()
+    out$call[[1]] <- as.name("density")
+    class(out) <- c("vegandensity", class(out))
+    out
+}
+
+## mrpp
+
+`density.mrpp` <-
+    function(x, ...)
+{
+    .Deprecated("densityplot(permustats(<mrpp.result>))",
+                package="vegan")
+    obs <- x$delta
+    out <- density(c(obs, x$boot.deltas), ...)
+    out$observed <- obs
+    out$call <- match.call()
+    out$call[[1]] <- as.name("density")
+    class(out) <- c("vegandensity", class(out))
+    out
+}
+
+## anova.cca does not return permutation results, but permutest.cca
+## does. However, permutest.cca always finds only one statistic. Full
+## tables anova.cca are found by repeated calls to permutest.cca.
+
+`density.permutest.cca` <-
+    function(x, ...)
+{
+    .Deprecated("densityplot(permustats(<permutest.result>))",
+                package="vegan")
+    obs <- x$F.0
+    out <- density(c(obs, x$F.perm), ...)
+    out$observed <- obs
+    out$call <- match.call()
+    out$call[[1]] <- as.name("density")
+    class(out) <- c("vegandensity", class(out))
+    out
+}
+
+## protest
+
+`density.protest` <-
+    function(x, ...)
+{
+    .Deprecated("densityplot(permustats(<protest.result>))",
+                package="vegan")
+    obs <- x$t0
+    out <- density(c(obs, x$t), ...)
+    out$observed <- obs
+    out$call <- match.call()
+    out$call[[1]] <- as.name("density")
+    class(out) <- c("vegandensity", class(out))
+    out
+}
+
+#### plot method: the following copies stats::plot.density() code but
+#### adds one new argument to draw abline(v=...) for the observed
+#### statistic
+
+`plot.vegandensity` <-
+    function (x, main = NULL, xlab = NULL, ylab = "Density", type = "l", 
+    zero.line = TRUE, obs.line = TRUE, ...) 
+{
+    if (is.null(xlab)) 
+        xlab <- paste("N =", x$n, "  Bandwidth =", formatC(x$bw))
+    if (is.null(main)) 
+        main <- deparse(x$call)
+    ## change obs.line to col=2 (red) if it was logical TRUE
+    if (isTRUE(obs.line))
+        obs.line <- 2
+    plot.default(x, main = main, xlab = xlab, ylab = ylab, type = type,
+                 ...)
+    if (zero.line) 
+        abline(h = 0, lwd = 0.1, col = "gray")
+    if (is.character(obs.line) || obs.line)
+        abline(v = x$observed, col = obs.line)
+    invisible(NULL)
+}
+
+`density.oecosimu` <-
+    function(x, ...)
+{
+    .Deprecated("densityplot(permustats(<oecosimu.result>))",
+                package="vegan") 
+    cols <- nrow(x$oecosimu$simulated)
+    if (cols > 1)
+        warning("'density' is meaningful only with one statistic, you have ", cols)
+    obs <- x$oecosimu$statistic
+    out <- density(rbind(obs, t(x$oecosimu$simulated)), ...)
+    out$observed <- obs
+    out$call <- match.call()
+    out$call[[1]] <- as.name("density")
+    class(out) <- c("vegandensity", class(out))
+    out
+}
+
+`densityplot.oecosimu` <-
+    function(x, data, xlab = "Simulated", ...)
+{
+    .Deprecated("densityplot(permustats(<oecosimu.result>))",
+                package="vegan")
+    obs <- x$oecosimu$statistic
+    sim <- rbind(obs, t(x$oecosimu$simulated))
+    nm <- names(obs)[col(sim)]
+    densityplot( ~ as.vector(sim) | factor(nm, levels = unique(nm)),
+                xlab = xlab,
+                panel = function(x, ...) {
+                    panel.densityplot(x, ...)
+                    panel.abline(v = obs[panel.number()], ...)
+                },
+                ...)
 }
diff --git a/R/veganCovEllipse.R b/R/veganCovEllipse.R
index 9692f57..bf81ee8 100644
--- a/R/veganCovEllipse.R
+++ b/R/veganCovEllipse.R
@@ -5,5 +5,8 @@
     theta <- (0:npoints) * 2 * pi/npoints
     Circle <- cbind(cos(theta), sin(theta))
     ## scale, center and cov must be calculated separately
-    t(center + scale * t(Circle %*% chol(cov)))
+    Q <- chol(cov, pivot = TRUE)
+    ## pivot takes care of cases when points are on a line
+    o <- attr(Q, "pivot")
+    t(center + scale * t(Circle %*% Q[,o]))
 }
diff --git a/R/veganMahatrans.R b/R/veganMahatrans.R
new file mode 100644
index 0000000..1a86bf2
--- /dev/null
+++ b/R/veganMahatrans.R
@@ -0,0 +1,20 @@
+### Internal function for Mahalanobis transformation of the matrix.
+### Mahalanobis transformation of matrix X is M = X S^(-1/2) where S
+### is the covariance matrix. The inverse square root of S is found
+### via eigen decomposition S = G L G^T, where G is the matrix of
+### eigenvectors, and L is the diagonal matrix of eigenvalues. Thus
+### S^(-1/2) = G L^(-1/2) G^T. This is an internal function so that
+### input must be correct: 'x' must be a centred matrix (not a
+### data.frame, not raw data).
+`veganMahatrans` <-
+    function (x, s2, tol = 1e-8) 
+{
+    n <- nrow(x)
+    if (missing(s2))
+        s2 <- cov(x) 
+    e <- eigen(s2, symmetric = TRUE)
+    k <- e$values > tol
+    sisqr <- e$vectors[,k, drop=FALSE] %*%
+        (sqrt(1/e$values[k]) * t(e$vectors[,k, drop = FALSE]))
+    x %*% sisqr 
+}
diff --git a/R/vegdist.R b/R/vegdist.R
index f82784e..5e0b5fc 100644
--- a/R/vegdist.R
+++ b/R/vegdist.R
@@ -7,7 +7,8 @@
         method <- "euclidean"
     METHODS <- c("manhattan", "euclidean", "canberra", "bray", 
                  "kulczynski", "gower", "morisita", "horn", "mountford", 
-                 "jaccard", "raup", "binomial", "chao", "altGower", "cao")
+                 "jaccard", "raup", "binomial", "chao", "altGower", "cao",
+                 "mahalanobis")
     method <- pmatch(method, METHODS)
     inm <- METHODS[method]
     if (is.na(method)) 
@@ -25,6 +26,8 @@
                 dQuote(inm))
     if (method == 6) # gower, but no altGower
         x <- decostand(x, "range", 2, na.rm = TRUE, ...)
+    if (method == 16) # mahalanobis
+        x <- veganMahatrans(scale(x, scale = FALSE))
     if (binary) 
         x <- decostand(x, "pa")
     N <- nrow(x <- as.matrix(x))
diff --git a/R/vegemite.R b/R/vegemite.R
index eb763a0..16b89a0 100644
--- a/R/vegemite.R
+++ b/R/vegemite.R
@@ -11,7 +11,6 @@
         }
         else if (inherits(use, c("hclust", "twins"))) {
             if (inherits(use, "twins")) {
-                require(cluster) || stop("package cluster needed for 'use'")
                 use <- as.hclust(use)
             }
             if (is.null(site.ind)) 
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..3848d40
--- /dev/null
+++ b/README.md
@@ -0,0 +1,7 @@
+# vegan: an R package for community ecologists
+
+## Build status
+
+Linux       | Windows
+------------|------------
+[![Build Status](https://travis-ci.org/vegandevs/vegan.svg?branch=master)](https://travis-ci.org/vegandevs/vegan) | [![Build status](https://ci.appveyor.com/api/projects/status/n7c2srupr55uhh4u/branch/master?svg=true)](https://ci.appveyor.com/project/gavinsimpson/vegan/branch/master)
diff --git a/build/vignette.rds b/build/vignette.rds
index e6bf2f2..891f9d5 100644
Binary files a/build/vignette.rds and b/build/vignette.rds differ
diff --git a/data/BCI.rda b/data/BCI.rda
index 2fadbf1..abeffeb 100644
Binary files a/data/BCI.rda and b/data/BCI.rda differ
diff --git a/data/dune.env.rda b/data/dune.env.rda
index 4f7b108..a8b724c 100644
Binary files a/data/dune.env.rda and b/data/dune.env.rda differ
diff --git a/data/dune.phylodis.rda b/data/dune.phylodis.rda
new file mode 100644
index 0000000..634312a
Binary files /dev/null and b/data/dune.phylodis.rda differ
diff --git a/data/dune.rda b/data/dune.rda
index ba371b6..cad31c8 100644
Binary files a/data/dune.rda and b/data/dune.rda differ
diff --git a/data/dune.taxon.rda b/data/dune.taxon.rda
index de8ad70..d29dc3a 100644
Binary files a/data/dune.taxon.rda and b/data/dune.taxon.rda differ
diff --git a/data/varespec.rda b/data/varespec.rda
index 39fdb5b..d6383f9 100644
Binary files a/data/varespec.rda and b/data/varespec.rda differ
diff --git a/inst/ChangeLog b/inst/ChangeLog
index 4c030db..0680df6 100644
--- a/inst/ChangeLog
+++ b/inst/ChangeLog
@@ -1,319 +1,1674 @@
-$Date: 2013-12-12 12:06:44 +0200 (Thu, 12 Dec 2013) $
+VEGAN DEVEL VERSIONS at https://github.com/vegandevs/vegan
 
-VEGAN RELEASE VERSIONS at http://cran.r-project.org/
+Version 2.1-43 (opened September 11, 2014)
+
+	* cca, rda, capscale: remove u.eig, v.eig and wa.eig items or
+	scores scaled by eigenvectors. We have had a warning of their
+	eventual removal since vegan 1.6-8, and now we finally did
+	so. Scaling of scores happens when scores() function access
+	normalized score items of the results.
+
+	* commsimulator: commented out deprecation warning at the moment,
+	because this triggered warnings in CRAN packages bipartite and
+	metacom. Contacted the maintainers of these packages, and will put
+	back the warning in 2.2-1.
+
+	* getPermuteMatrix: works also when 'strata' are
+	missing. Triggered an error in CRAN package mpmcorrelogram.
+
+	* permustats: new function to extract permutation results from
+	vegan objects, with support functions summary, density,
+	densityplot, qqnorm and qqmath. Deprecated previous density and
+	densityplot methods directly accessing the same results.
 
-Version 2.0-10 (released December 12, 2013)
-
-	* r2815: update email in simper.Rd.
-	* merge 2809,2810: treat all non-numeric variables as factors
-	inenvfit.
-	* merge 2713 man/: remove references to very old R versions in man
-	files (R/ part of this rev not applied)
-	* merge 2708: adapt quantilesto test direction.
-	* merge 2679: add collector curve to renyiaccum.
-	* merge 2678: renyiaccum can plot one scale. 
-	* merge 2641: subset in renyi/spec/tsallisaccum.
-	* merge 2630,1,2: fisherfit new algo and delete profile & confint.
-	* merge 2628,9: plot vectorfit *should* work with constant
-	(non-variable) vectors: partial conflict, needs checking.
-	* merge 2627: zap zeros in print.cca.
-	* merge 2626: nestednodf fill and consinstency in quantitative
-	data.
-	* merge 2527: adapt permutest.betadisper to the CRAN release of
-	permute 0.8-0.
-	* merge 2451, 2454, 2455, 2465: weighted specaccum.
-	* conflicts (not applied): r2625 (oecosimu), 2638 (oecosimu.Rd)
-
-Version 2.0-9 (released September 25, 2013)
-
-	* merge 2618: a typo.
-	* merge 2613: 'medoid' to 'median' in betadisper output and doc.
-	* merge 2605, 7: FAQ about La.svd errors in cca/rda/capscale.
-	* merge 2604: print.vectorfit zaps zeros.
-	* merge 2600: remove tools::: in vegandocs().
-	* merge 2599: remove workarounds for R <= 2.13.0.
-	* merge 2598: remove workarounds for R <= 2.12.0.
-	* merge 2597: move vignettes from inst/doc to vignettes/; depend
-	on R 2.14.0.
-	* merge 2593: remove stats::: in anova.ccabyaxis().
-	* merge 2592: expand varpart.Rd to avoid questions.
-	* merge 2590: typos.
-	* merge r2588: remove unneeded utils::: in vegandocs (some remain). 
-	* merge 2584,6: vegan Depends on lattice, avoid lattice:::
-	* merge r2583,7: remove cross-references to disappear in R-3.0-2.
-	* merge r2580,1: literature refs in specaccum.Rd and
-	betadisper.Rd.
-	* merge r2571: aspell fixes in Rd files (stressplot.wcmdscale.Rd,
-	commsim.Rd and nullmodel.Rd did not exist in 2.0 and were
-	excluded).
-	* merge r2570: aspell fixes in R files.
-	* merge r2568: aspell fixes in Rd files.
-	* merge r2564: line wrapping in betadiver(help=TRUE).
-	* merge r2562, 2563, 2565-7, 2572-6: edit and reformat Rnw file;
-	decision-vegan.Rnw required hand-editing due to conflicts in
-	text on parallel processing that was not merged.
-	* merge r2558: ordiktplot bmp device in all platforms. 
-	
-Version 2.0-8 (released July 10, 2013)
-
-	* merge r2540: remove hard-coded inconsolata fonts.
-	* merge r2539: stressplot return data in input order.
-	* merge r2538: use expression(R^2) in stressplot.
-	* merge r2537, 2541: add plot.ordipointlabel.
-	* merge r2535, 2536: better positioning of arrow labels.
-	* merge r2534: lwd in ordisurf.
-	* merge r2533: ordipointlabel uses ordiArgAbsorber.
-	* merge r2532: clean up ordilabel.
-	* merge r2504: notation in adipart.Rd.
-	* merge r2498: adapt raupcrick to r2495.
-	* merge r2497: avoid visible ~ in .Rnw.
-	* merge r2496: use crossprod() in indpower.
-	* merge r2495: fix twisted test direction in oecosimu.
-	* merge r2490 thru 2493: ordisurf new options.
-	* merge r2484 thru 2486: betadisper fixes (centroid with one
-	group, call it medoid).
-	
-Version 2.0-7 (released March 19, 2013)
-
-	* merge r2476: take back part of 2434 and return matrix from
-	wcmdscale unless eigenvalues are requested.
-	* merge r2469, 2470: do not use u.eig & v.eig.
-	* merge r2468: tabasco checks against negative data.
-	* merge r2462: fix "asymp" model in fitspecaccum.
-	* merge r2458: restructure fitspecaccum.
-	* merge r2453: nestedtemp failed with fill < 0.38%.
-	* merge r2452: plot/text.cca gained axis.bp = TRUE argument. 
-	* merge r2448 (partly), 2449: print.monoMDS clearer about
-	convergence. Only this part merged: metaMDSiter untouched.
-	* handcraft r2447: metaMDSiter gained argument 'maxit'.
-	* merge r2443: edit cca.object.Rd.
-	* merge r2434: return wcmdscale object always with non-default
-	arguments.
-	* merge r2432: edit cca.Rd.
-	* merge r2431,2433,2435 thru 2442: add tabasco().
-
-Version 2.0-6 (released February 11, 2013)
-
-	* merge 2420, 2425: cca cross references.
-	* merge 2417 thru 2419: predict & fitted upgraded for cca etc.  
-	* merge 2414: adjustment in predict.capscale.
-	* merge 2408: FAQ on random effects in cca etc.
-	* merge 2404,2411: refer Legendre & Legendre for the vegan rda
-	algorithm.
-	* merge 2934 (partial, mc): oecosimu handles nestedfun() returning
-	data.frame. Not merged: checking length of statistic when setting
-	its name.
-	* merge 2932,3: matching names in treedive() and treedist().
-	* merge 2388 (partial), 2389, 2390: faster protest, the part of
-	evaluating the test statistic hand crafted, because
-	parallelizations caused conflicts here.
-	* merge 2385, 7: protest print, faster sum of squares in procrustes. 
-	* merge 2384: new FAQ entries.
-	* merge 2383: remove alias to print.wcmdscale.
-	* merge 2374,2376,2378,2382: orditorp gains argument select
-	* merge 2372: ordilabel uses ordiArgAbsorber when plotting
-	* merge 2369: clamtest fix and border cases.
-	* merge 2367: rectify mantel, summary.anosim print.
-	* merge 2362: doc on r2357 for capscale.
-	* merge 2361: adjust imaginary axes similarly as real eigenvalue.
-	* merge 2358: print etc. for wcmdscale (no stressplot parts of
-	2358). 
-	* merge 2357: print additive constant in capscale.
-	* merge 2350: capscale species score scaling bug fix.
-	* merge 2349: multipart print bug fix.
-	* merge 2345: FAQ update (github, metric NMDS space).
-	* merge 2342: unused factor levels in envfit/factorfit.
-	* merge 2341 (partial): visible space in pdf of Rnw files.
-	* merge 2336: stricter monoMDS convergence criteria.
-	* merge 2334,5: scores.monoMDS,metaMDS updates.
-	* merge 2332: add plot.nestednodf().
-	* merge 2330,1: ordiR2step() gained arg 'R2scope' and handles
-	partial RDA.
-	* merge 2328,9: add 'legend' arg to msoplot().
-	* merge 2325: fix rotation of axes in 2-d plot.procrustes().
-	* merge 2319, 2323: clamtest bug fixes and clarifications.
-	* merge 2318: dispindmorista gains p-values of output.
-	
-Version 2.0-5 (released October 8, 2012)
-
-	* merge r2309: anova.cca.Rd edits.
-	* merge r2307: no line breaks within \code{} in Rd.
-	* merge r2305: proofread Rd files a..b.
-	* merge r2299: fix broken \link{}s in docs. 
-	* merge r2297: upgrade docs for L&L 2012 (3r ed.)
-	* merge r2291 thru 2296, 2298, 2300: radfit upgrade.
-	* merge r2287,8: scoping in anova.ccabyaxis and anova.ccabyterm.
-	* merge r2285: add predict.radfit.
-	* merge r2262, 2268:2270, and also r1950: mantel and
-	mantel.partial gained argument na.rm = FALSE. This needed hand
-	editing of merges, and also merging old r1950: beware and test.
-	* merge r2271: tweak varpart.Rd. (plot coloured circles).
-	* merge r2267: tweak vegdist.Rd (ref to vegdist).
-	* merge r2260: streamline adonis (internal changes).
-	* merge r2258: protect betadisper against changes in R-devel API.
-	* merge r2254: stylistic in examples of Rd files.
-	* merge r2252,56,57,61,64,66: add density methods for vegan
-	permutation functions and add plot.densityvegan to display these.
-	* merge r2250: do not use paste0 in envift.Rd (fails in R 2.14).
-	* merge r2249: fix vignette building with TeXLive 2012.
-	* merge r2246: remove dead code from cIndexKM() and R-devel CMD
-	check warning.
-	* merge r2244: more portable doc/Makefile
-	* merge r2237 thru 2240: add labels.envfit() and "labels" arg to
-	plot.envfit(). 
-	* merge r2227 thru 2235: adipart, hiersimu, multipart code
-	refactoring (especially formula method) and making to inherit from
-	oecosimu in printing the results.  The merge did not apply quite
-	cleanly, but oecosimu, print.oecosimu and NAMESPACE needed manual
-	editing (beware).
-	* merge r2225: biplot.rda 'type' fix.
-
-Version 2.0-4 (released June 18, 2012)
-
-	* merge r2215: plot.envfit() gains args 'bg' for background colour
-	of labels.
-	* cherry-pick from r2213: warn on empty rows in simper (picked
-	github revision cce42b3).
-	* merge r2206,7: check 'newdata' dims in predict(..., type =
-	"response"|"working") for CA results.
-	* merge r2195-7, 2204: bias adjusted betadisper.
-	* merge r2191-2193: standardise handling of 'select' arg in
-	those plotting functions that support it. Adds non-exported
-	function .checkSelect().
-	* merge r2182,2182,2199,2201: FAQ about data size in NMDS.
-	* merge r2178, 2180: ordipointlabel gains 'select' argument.
-	* merge r2173-2176, 2185: ordihull labels, semintransparent
-	colours in ordihull & ordiellipse.
-	* merge r2172,2179,2181,2184,2186,2187: metaMDS warns about too
-	good stress.
-	* merge r2170: bioenv accepts dissimilarities as input.
-	* merge r2167: warn about unequal aspect ratio in ordiplot3d.
-	* merge r2162: set equal axis scales for ordiplot3d.
-	* merge r2157:2160,2167,2168,2208: ordiplot3d returns
-	envfit.convert().
-	* merge r2156: betadisper example adapted for default spatial
-	median.
-	* merge r2150: monoMDS checks that the number of dissimilarities
-	is sufficient for the requested analysis. The decostand.Rd fix of
-	r2150 was not yet merged.
-	* merge r2149: drarefy & rrarefy check that input data are
-	integers.
-	* merge r2148 (partial): format references. However, scoverage()
-	was not merged yet, and its changes have not been merged.
-	* merge r2144: hiersimu and multipart do not assume constant
-	gamma.  Similar change was made in r2132 to adipart.
-	* merge r2143: formula method for adipart/hiersimu/multipart.
-	NAMESPACE needed manual merge (edited patch from diff).
-	* merge r2139: clamtest coverage threshold fix.
-	* merge r2137: explain data transformation in pyrifos.Rd.
-	* merge r2135: print.adipart displays null model method.
-	* merge r2132: adipart bug fix: assumed constant gamma in
-	permutations.
-	* merge r2129: envfit failed with empty factor levels.
-	* merge r2128: anova(<prc-object>, by = ...) failed.
-	* merge r2127: more configurable msoplot.
-	* merge r2125: typo in anova.cca.Rd.
-	* merge r2123: r2121 for adonis.
-	* merge r2121: doc location/dispersion mix-up in simper, mrpp &
-	anosim.
-
-Version 2.0-3 (released March 3, 2012)
-
-	* merge r2115: simper fixes from github EDiLD/vegan pull request
-	#6 by Eduard Szöcs (proportions instead of percentages etc.)
-	* merge r2113: tweak simper.Rd formatting.
-	* commit r2112: deactivate permutation tests in simper.
-	* merge r2110: centroids.cca fix for 2-level factors as the only
-	constraint.
-	* merge r2106: merge scores and vegdist doc updates from github.
-	* merge r2105: pacify -pedantic C compiler in vegdist.
-	* merge r2104: add Cao index in vegdist.
-	* merge r2103: fix bug in permutation p-values in simper.
-	* merge r2101: fix stress description and remove refs to
-	ecodist::nmds in monoMDS.Rd.
-	* merge r2100: fix print.summary.simper.
-	* merge r2098: fix summary.simper(..., order=FALSE) when
-	permutations = 0.
-	* merge r2097: add print.summary.simper.
-	* copy simper.R & simper.Rd at r2092.
-	* merge r2089: scores.default fixed with non-existing scores.
-	* merge r2080: use droplevels in betadisper.
-	* merge r2079: do not use .Internal().
-	* merge r2078, 2084: simper NAMESPACE.
-	* merge r2071,2: dimnames fix in indopower & expand example.
-	* merge r2068: broken url in renyi.Rd.
-	* merge r2065: number of iterations is an argument in nesteddisc.
-	* merge r2060: adonis tells terms were added sequentially.
-	* mrege r2057: add .Rinstignore to silense R 2.15.0 checks.
-	* merge r2056: use inconsolata fonts in vignettes.
-	* merge r2053: implement adjusted R2 for partial RDA.
-	* merge r2052: do not scale constraints in varpart[234].
-	* merge r2049: capscale robust for zeroed centroids.
-	* merge r2044: ordistep robust for complete aliasing.
-	* merge r2043: anova.ccabymargin robust for zero effects.
-	* merge r2035,6,7: more robust plot.cca.
-	* merge r2030: resurrect old ChangeLog entries.
-	* merge r2027,28,33: tweak oecosimu printed output.
-	* merge r2018,19, 20, 21: add nestedbetasor() & nestedbetajac().
-	* merge r2016: empty factor levels in betadisper() fixed in
-	ordimedian(). 
-	* merge r2015: remove Bob O'Hara's old email address.
-	* r2014: typo in diversity.Rd.
-	* merge r2008,9,11,12: add rarecurve.
-	* merge r2007: FAQ update for arrow scaling.
-	* merge r2004: metaMDS(..., noshare=0) triggers stepacross(), but
-	'noshare = FALSE' never does.
-	* merge r2001: explain scaling of arrows in envfit() plot() --
-	this looks like being a FAQ.
-
-Version 2.0-2 (opened October 20, 2011)
-
-	* merge r1991 by hand: change sapply(H.s t) to use lapply in adonis
-	to fix a further bug in implementing speed-ups in f.test
-	* merge r1988 by hand: calling f.test without transposed matrix
-	bug in adonis.
-	* merge r1985: ordiarrows, ordisegments gained arg 'order.by'.
-	* merge r1974,1975,1980,1982: FAQ update.
-	* merge r1970: ordispider returns invisible plotting structure.
-	* merge r1969: ordiplot3d.Rd tells about xyz.convert().
-	* merge r1964: clarify stress scaling in iso/monoMDS.
-	* merge r1961: consinstent 'noshrink' defaults in metaMDSdist()
-	and metaMDS(..., engine="isoMDS")
-	* merge r1959: capscale zero-rank constraints bug fix.
-
-Version 2.0-1 (released October 20, 2011)
-
-	* merge r1945: add plot & lines for as.preston & as.fisher.
-	* merge r1944: R 2.14.0 (r52709) gives a message() when sd() is
-	used for matrices -- now fixed for rda.default, capscale and
-	simulate.rda/cca/capscale. 
-	* merge r1939: slacker and faster nesteddisc.
-	* merge r1928: permutest.cca result could not be update()d.
- 	* merge r1927: reset 'tries' when 'previous.best' was a different
-	model.
-	* merge r1916: ordisurf.Rd references.
-	* merge r1914: example speed-up.
-	* r1897, 1840, 1825, 1823: copy clamtest (at r1897),
-	summary.clamtest, print.summary.clamtest, plot.clamtest (all at
-	r1823) and clamtest.Rd (at r1897)
-	* merge r1872: raupcrick doc fixes.
-	* merge r1869: meandist re-ordering bug fix.
-	* merge r1846: tweak speed (a bit).
-	* merge r1845: faster centroids.cca.
-	* merge r1843: tiny speed-up in permutest.cca(..., first=TRUE) 
-	* merge r1838: a bit faster example(MDSrotate).
-	* merge r1835: add raupcrick.
-	* merge r1811: permatswap bug fix in nestedness.c.
-	* merge r1810: -pedatic -Wall fixes in monoMDS.f, ordering.f
+	* ordirgl, ordiplot3d: 3D functions using rgl or scatterplot3d
+	packages were removed from vegan and moved to a new CRAN package
+	vegan3d (released Oct 7, 2014).
 	
+	* scores.hclust: combined documentation with other hclust methods.
+	
+	* scoverage: removed from vegan.
+
+	* dispweight: veganified so that uses nullmodels. Added
+	gdispweight(): a new generalized method taking any glm() formula
+	instead of one grouping.
+	
+	* betadisper Permutation tests via the `permutest()` method
+	are now parallelised. The number of cores to parallelise over
+	is specified by argument `parallel` in common with the
+	implementation for `cca()`.
+
+	Fix a couple of bugs in the examples; number of permutations
+	was specified using `control` which is not an argument to the
+	`permutest()` method for `betadisper()`.
+
+	* text.cca: was incorrectly testing if factor constraints were
+	in the biplot scores when user-suppiled lables for factors were
+	given.
+
+	* ccanova: removed from vegan. These were backup functions of old
+	anova.cca, and were completely rewritten for new vegan.
+
+	* metaMDSrotate: made defunct (replaced with MDSrotate).
+
+Version 2.1-42 (closed September 11, 2014)
+
+	* Opened a new version to prepare release 2.2-0.
+
+	* Moved main development from R-Forge to GitHub.com.
+
+	* NAMESPACE, DESCRIPTION: adapted to current R CMD check that has
+	more stringent tests on attaching and importing external packages
+	and using their functions.
+
+	* getPermuteMatrix: routines have a common API based on the new
+	function.
+
+Version 2.1-41 (closed September 4, 2014)
+	
+	* ordiellipse, ordihull, ordispider: can now handle (omit) NA
+	cases in 'groups'. They were able to omit to NA cases in scores,
+	but having NA in 'groups' triggered really cryptic error
+	messages. 
+
+	* adipart, multipart, hiersimu: it is now an error to provide
+	non-nested sampling hierarchy (used to be a warning).
+
+	* new version opened with the CRAN release of vegan_2.0-10 on
+	December 12, 2013.
+
+	* anosim, mantel, mantel.partial: R CMD check told that "... may
+	be used in an incorrect context". The dots were added in r2765,
+	and now removed.
+
+	* ordistep: add1.cca reported P=0 for redundant terms with 0 Df,
+	and this caused an error in ordistep.
+
+	* as.hclust.spantree: a new function to cast a "spantree" result
+	object to an "hclust" tree.
+
+	* hclust: add reorder() and rev() methods for standard "hclust"
+	trees of R.  I have no clue why base R does not have these
+	methods, but I provide them now in vegan. An additional reason for
+	providing these methods is that reorder(<dendrogram-object>, wts,
+	agglo.FUN = mean) will use unweighted mean of merged groups even
+	when these are of very unequal sizes. The reorder method provided
+	here will use group sizes as weights and the value of the group
+	will be the mean of its leaves (terminal nodes).
+
+	Add scores() method to extract coordinates of internal nodes or
+	leaves from a plotted hclust() tree. The function is whimsical and
+	may be removed before release (it is documented separately to make
+	this easier).
+
+	* biplot.rda: failed in axis scaling with negative 'scaling'
+	values when some species had zero variance (and hence species
+	scores was 0/0 = NaN).
+	
+	* cascadeKM: Calinski index for one group will now be NA instead
+	of randomly chosen Inf, -Inf or NaN which can cause confusion (see
+	http://stackoverflow.com/questions/21022848/r-produces-different-result-after-io-on-file
+	
+	* ordiellipse: failed if all points were on a line. Now handles
+	these cases by drawing a line trhough the points, and issuing a
+	warning from chol(): "the matrix is either rank-deficient or
+	indefinite". Earlier we required at least three points for an
+	ellipse, but these could still be on a line and fail. Now we
+	accept two points, and draw the line with a warning. The problem
+	with three points was reported by Paul Bacquet (Louvain,
+	Belgium). 
+
+	* radfit: plotting of radfit frames of several communuties failed
+	if there were one-species or no-species (empty) rows. Part of this
+	was fitted.radfit that now returns sensible and consistent output
+	for these cases as well, and radfit.data.frame() completely
+	removes empty rows from the data (with a warning).
+
+	* RsquareAdj: return list(NA, NA) for capscale objects with
+	imaginary component, and use rda method if there is no imaginary
+	component. 
+
+	* tabasco: "hclust" objects (use, sp.ind) are reordered using
+	weighted means. This is a better method than the unweighted means
+	used for reordering of dendrograms. Earlier "hclust" objects were
+	changed to dendrograms, but now we provide reorder.hclust() and
+	rev.hclust() in vegan, and can use improved method of ordering the
+	table. Dendrogram and hclust labels must match the community data
+	names, and now the community matrix is internally reordered to
+	match the dendrograms. This requires that the clusterings do have
+	labels attributes.
+
+	* treedive, treedist: default is now match.force = TRUE, and
+	treedive() gained new argument 'verbose' to turn of most messages
+	-- which is practical in oecosimu().
+
+	* BCI: names checked after http://www.theplantlist.org, but kept
+	the old (alphabetic) order of species. The changes are: Abarema
+	macradenium -> A. macradenia, Apeiba aspera -> A. glabra,
+	Aspidosperma cruenta -> A. desmanthum, Cassipourea ellipitica ->
+	C. guianensis, Chlorophora tinctoria -> Maclura t., Coccoloba
+	manzanillensis -> C. manzinellensis, Cupania sylvatica ->
+	C. seemannii, Dipteryx panamensis -> D. oleifera, Eugenia
+	coloradensis -> E. florida, Eugenia oerstedeana -> E. oerstediana,
+	Inga marginata -> I. semialata, Lonchocarpus latifolius ->
+	L. heptaphyllus (this is ambiguous, since Hebestichma cubense is
+	another alternative), Maquira costaricana -> Maquira guianensis
+	var. costaricana, Phoebe cinnamomifolia -> Cinnamomum triplinerve,
+	Swartzia simplex var. ochnaceae -> var. continentalis, Tabebuia
+	guayacan -> Handroanthus g. Guarea is ambiguous: data have three
+	taxa (fuzzy, grandifolia and guidonia), but theplantlist.org says
+	grandifolia is an ill. synonym of guidonia. This change allows
+	matching 206 of 225 BCI species with
+	http://datadryad.org/resource/doi:10.5061/dryad.63q27. In
+	addition, there are two vars of Swartzia simplex in BCI which
+	could be matched at species level.
+	
+	* dune data sets: use 4+4 letter CEP names for species instead of
+	old 3+3 names. Botanical nomenclature was updated: Leontodon
+	autumnalis -> Scorzoneroides (Leoaut -> Scorautu), Potentilla
+	palustris -> Comarum (Potpal -> Comapalu). Rows are now arranged
+	numerically, and species names alphabetically, vascular plants
+	first, and then the two bryophytes.
+
+	Added data 'dune.phylodis' which are ages of dune species
+	extracted from
+	http://datadryad.org/resource/doi:10.5061/dryad.63q27 (Zanne AE et
+	al. 2014, Nature doi:10.1038/nature12872, published online Dec 22,
+	2013).
+
+	dune.taxon was updated to APG III.
+
+	* varespec: use 4+4 letter CEP names instead of 3+dot+3.
+	Nomenclature was cautiously fixed. Most important changes: Ledum
+	palustre -> Rhododendron tomentosum (Led.pal -> Rhodtome),
+	Cetraria nivalis -> Flavocetraria nivalis (Cet.niv ->
+	Flavniva). In addition, Dip.mon was corrected to Diphcomp
+	(Diphasiastrum complanatum, should perhaps be Lycopodium c.), and
+	Barbilophozia lycopodioides to B. hatcheri.
+
+	* plot.envfit: when `envfit()` is called as
+	`envfit(ord, foo[, "bar"])`, there are no useful rownames on the
+	objects returned. Hence the logic in `plot.envfit()` when called
+	with argument `labels` was failing.
+
+	* rarecurve: line colour and type for each sample can now be
+	specified through formal arguments `col` and `lty`. Incidental
+	wish of http://stackoverflow.com/q/22714775/429846.
+
+	* simper: now doesn't fail with obscure error when groups have a
+        single member.
+
+Version 2.1-40 (closed December 12, 2013)	
+	
+	* anova.cca: Function is now based on the new code, but the old is
+	avaialable in a function called ccanova. The API changes:
+	arguments 'step' and 'perm.max' are gone and replaced with
+	'permutations' that accepts a how() object defining permutations,
+	or a single number like previously or a permutation matrix. The
+	new anova.cca adds an option of analysing a sequence of ordination
+	models. This also means that '...' is now the second argument and
+	the names of all arguments must be written in full. This change
+	can be expected to cause trouble outside vegan. Currently the
+	tests in examples are passed, but tests fail (looks like being
+	caused by the change in API). 
+
+	* add1.cca, drop1.cca, ordistep, ordiR2step: functions were
+	adapted to new anova.cca and this brought along similar changes in
+	API. 
+
+	* envfit: function assumed that environmental variables are either
+	factors or numeric, and choked if they were neither but, say,
+	character strings. Now the function tries to coerce all
+	non-numeric variables into factors, including character strings
+	and logical. This is one possible reason for problems reported in
+	https://stat.ethz.ch/pipermail/r-sig-ecology/2013-December/004217.html,
+	although the reported case is irreproducible and we cannot be
+	sure. Also fixed handling of tied values in assessing the P-values
+	in vectorfit.
+
+	* nestednodf: vegan 2.1-36 (release 2.0-10) changed the function
+	so that weighted analysis of binary data was equal to unweighted
+	binary analysis, but this broke consinstency with the original
+	software and publication by Almeida-Neto & Ulrich. The fix was now
+	made optional, and the default is to follow published method by
+	adding argument 'wbinary' (defaults FALSE). Based on the
+	suggestion by Matt Barbour in GitHub.
+
+	* ordispider: can now use spatial medians as centres instead of
+	the default centroids. The kind of centre is defined by new
+	argument 'spiders'.
+
+	* tests for cca/rda/capscale: commented out test that failed with
+	the anova.cca. Known issue was that 'by = "term"' and 'by =
+	"axis"' stop with error with missing data. This was designed and
+	and can be re-designed to handle missing data by listwise deletion
+	so that models will have the same number of observation for every
+	variable.  An unknown issue was that 'by = "margin"' failed in
+	capscale(). This may be a scoping issue an needs inspection. We
+	keep the old vegan-tests.Rout.save file so that we are reminded of
+	the problems at every check.
+	
+Version 2.1-39 (closed December 3, 2013)
+
+	* anova.cca: started to rewrite the anova.cca family of functions
+	for permute package. At the first stage, a temporary development
+	function anovacca was created. The user interface was changed, and
+	the function no more adapts the number of iterations for the
+	P-value, and arguments 'step' and 'perm.max' were removed.
+	Instead, permute package is used to create a permutation matrix
+	used in all cases with fixed number of permutations.
+
+	In addition to the overall test, the function allows now testing
+	a sequence of models (anova.ccalist). Specific tests provided are
+	by = "term" (anovacca.byterm) which is fitted as a sequence of
+	models in anova.ccalist. Case by = "margin" directly calls
+	permutest.cca and gets the significances from differences of
+	residual variation similarly as anova.ccalist. Case by = "axis" is
+	is implemented as a marginal model.
+
+	Simple permutations give identical results for "term" and "axis"
+	cases, but by = "margin" is different. Marginal models were
+	implemented as partial models with other parameters partialled out
+	in vegan 2.0 and earlier, but the current implementation is an
+	anova.ccalist model where the parameter in question was removed
+	and model compared against the complete model.
+
+	* commsim: documentation (commsim.Rd) was restructured so that
+	nullmodels were collected under separate sections with a brief
+	introductory text and shorter specific text of the
+	algorithm. Hopefully this makes easier for an outsider to grasp
+	the width of the choices.
+
+	* oecosimu: change printed quantiles to match the direction of the
+	test as changed in r2495.
+
+	* permutest.betadisper: updated to the new permute API and operates
+	similarly to permutest.cca in respect of how the permutation test
+	can be defined. It currently doesn't support the parallel processing
+	of the cca method, however.
+
+	* tests: vegan examples and vegan-tests have been out of sync for
+	a long time. These have not been updated because most of the
+	changes seem to be triggered by switching to R 3.0-x, and we have
+	not had time to analyse the reasons. The differences also seem to
+	be platform specific, and Linux and MacOS give slightly different
+	results. In particular, there seem to be differences in
+	permutations, constrained ordination, in particular in capscale()
+	and rounding of output. We have also introduced some changes in
+	output that were not yet synced.
+
+	* janitorial: vegan has been dependent on R >= 2.14.0 since
+	version 2.1-34 (r2597 Wed 28-Aug-2013). Now superfluous references
+	and tests for the older R version were removed in R code and
+	documentation. FIXME: cca() returns residuals.zombie item that was
+	supposed to be needed in R < 2.13.0. This was not yet removed: we
+	must first check that this can be safely done.
+
+	vegan also now depends on permute version 0.7-8 or later.
+
+Version 2.1-38 (closed November 10, 2013)
+
+	* DESCRIPTION: depends on permute >= 0.7-5, where the
+	permute::how() result object can be updated.
+
+	* bioenv: returns now the number of the best model ('whichbest'),
+	the standardized environmental data ('x') and the distance
+	function used for the environmental data ('distfun'). New function
+	bioenvdist() uses these to re-calculate the environmental
+	distances for the best model, or any other model selected by its
+	number.
+
+	* permutest.cca: permutation test uses 'permute' package. The old
+	interface was retained, and 'permutations' can be a single number
+	or a permutation matrix (as previously), but now it can also be a
+	how() object of the 'permute' package. The argument 'strata' was
+	also retained, but it is planned to be deprecated in the future,
+	and it is recommended that users switch to defining 'blocks' in
+	how(). 
+
+	* renyiaccum: plot() works also when only one index ('scales') was
+	used. The function gained new argument 'collector = FALSE' to
+	accumulate sites in the order they are in the data set in addition
+	to the summary statistics of permutations. This can be used to
+	analyse the randomness of the particular order of sites (looks
+	like my student would need this).
+	
+Version 2.1-37 (closed November 5, 2013)
+
+	* anova.cca: added new function anova.ccalist() to compare a
+	sequence of models. The function is still experimental ("proof of
+	the concept") and unexported. If this stays in vegan, it should
+	eventually be called from anova.cca(). This would bring along a
+	change of API to anova.cca(object, ..., alpha=...): the dots must
+	follow the first argument which turns of positional and partial
+	matching of arguments so that the function can collect the "cca"
+	models. We must decide whether the new function is worth such a
+	change that can make life harder for ordinary users.
+
+	One potential advantage is that the code in anova.ccaby* functions
+	could be simplifed to a anova.ccabylist() calls.
+
+	The function is based on calling permutest.cca for each model with
+	identical permutations. We can then compare the change in model
+	for each permutation and collect the test statistics for
+	differences. This requires that the models really are nested so
+	that residual deviance certainly decreases in bigger model
+	(testing theory requires nesting, but this is commonly violated by
+	users: here nesting is necessary). 
+
+	* parellel: default cluster defined by setDefaultCluster() is no
+	longer used in functions with parallel processing. Using default
+	cluster would need querying an unexported environment
+	parellel:::.reg, and this gives a NOTE in R CMD check.
+
+	* specaccum, renyiaccum, tsallisaccum: gained argument to select a
+	'subset' of sites (looks like my student would need them).
+	
+Version 2.1-36 (closed October 14, 2013)
+
+	* opened with the release of vegan 2.0-9.
+
+	* decostand(..., "normalize") uses now .Machine$double.eps to
+	replace zero sum of squares instead of matrix minimum.
+
+	* envfit: if a variable is constant, no fitted vector can be
+	calculated and results will be given as NA (with warnings).
+	Plotting of all vectors will fail in such cases because no finite
+	scale was found for arrows. Fixed on ordiArrowMul.
+
+	* envfit: try to produce something sensible if fitted variable has
+	constant values (invariable vector, one-level factor). Report
+	arrow heads as all zero, and R2=0 for vectors, and the centroid to
+	the data centroid and R2=0 for factors. Tied values are now
+	treated differently in factorfit: now they support null
+	hypothesis, previously they decreased the P-values. 
+
+	* fisherfit: completely rewritten and estimates of standard error
+	removed: I could not find no justification for these. Actually, it
+	seems that the value of Fisher alpha as estimated in the function
+	was independent of the abundance distribution of species, but will
+	be defined by the number of species (S) and number of individuals
+	(N). Now the Fisher alpha is estimated from the relationship S =
+	alpha*(1 + log(N/alpha)) using function uniroot(). Because of
+	this, standard errors cannot be estimated and they were
+	removed. In addition, functions confint.fisherfit,
+	profile.fisherfit and plot.profile.fisherfit were removed. The
+	estimation of standard errors was also removed in function
+	fisher.alpha (that only calls fisherfit).
+
+	* nestednodf: matrix fill was wrongly calculated in weighted
+	analysis. The nominator was length of 'comm', and if input was a
+	data frame that was the number of columns instead of the number of
+	cells. The fill was correct in non-weighted analysis because there
+	data were transformed to a matrix, and the length of a matrix is
+	the number of cells (unlike in data frames).
+
+	* nestednodf: weighted analysis gave all statistics as zero if
+	binary data were supplied. Some ">" comparisons were changed to
+	">=" and now weighted analysis of binary data gives same results
+	as non-weighted analysis. However, this can change results of
+	weighted analysis of quantitative data. The change needs
+	endorsement by the function author Gustavo Carvalho.
+
+	* oecosimu: warns user if the specified nullmodel 'method' changes
+	quantitative input data to binary data under cover. Some people
+	have not noticed this.
+	
+Version 2.1-35 (closed September 25, 2013)
+
+	* ordilabel: colour arguments ('col', 'fill', 'borderä) can now be
+	vectors and will be recycled if needed. May need care if used with
+	'select', but should work with 'priority'. This was needed for a
+	function under development and testing: when labelling over lines,
+	it may be nice if the border has the same colour as the line
+	covered.
+
+	* MDSrotate: can now rotate a solution to more than one vector,
+	provided that the number of dimensions is higher than the number
+	of vectors. Because fitted vectors usually are correlated, only
+	the first vector is aligned to dimension 1, but second vector and
+	further are oblique to the corresponding axis. In any case they
+	will have zero correlation to all subsequent dimensions.
+
+	* simulate.rda, simulate.cca: gained argument 'correlated' for
+	using covariances of species in generating correlated multivariate
+	normal residuals in parametric simulations. The covariances are
+	estimated from the residual ordination of species. The argument
+	defaults FALSE which implements the old parametric simulation
+	where each species is simulated independently.  The argument has
+	no effect in capscale() which stores no information on species.
+	
+Version 2.1-34 (closed September 5, 2013)
+
+	* DESCRIPTION: dependent on R >= 2.14.0.
+	
+	* DESCRIPTION, vignettes: R 3.0.2-to-be checks with --as-cran
+	requires (with a NOTE) the vignettes source files (Rnw) to be in
+	vignettes/ directory.  Because Makefile is not executed in
+	inst/doc if vignettes/ directory is present, all other sources had
+	to be moved to vignettes/ as well with vignettes/.install_extras
+	to move those to inst/doc after building. This also made
+	/.Rinstignore unnecessary. The vignettes directory was introduced
+	and made recommended in R 2.14.0 so that the this version of vegan
+	depends on R >= 2.14.0. 
+
+	* tweaks for R < 2.12.1 removed: code to fix buggy naming of
+	columns in qr.X before 2.12.1 (as.mlm.cca/rda, intersetcor), and
+	change in the cmdscale() output (eigenvals.default).
+
+	* tweak for R < 2.13.0 removed: change in cmdscale() output
+	(capscale).
+
+	* vegandocs: does not use unexported tools:::httpdPort. Now only
+	remaining ':::' case is querying default cluster in the
+	environment parallel:::.reg.
+	
+Version 2.1-33 (closed August 28, 2013)
+
+	* DESCRIPTION: new dependence on lattice. Passes new strict R
+	checks with NOTE on ':::' calls to tools:::httpdPort (to launch
+	reading vegan NEWS.html in existing browser window) and
+	parallel:::.reg to acces the defaultCluster in an unexported
+	environment within 'parallel' Namespace.
+	
+Version 2.1-32 (closed August 19, 2013)
+
+	* opened a new version with the CRAN release of vegan 2.0-8.
+
+	* merged Eduard Szöcs's code on dispersion weighting of
+	overdispersed species following Clarke, K. R., M. G. Chapman,
+	P. J. Somerfield, and H. R. Needham.  2006. Dispersion-based
+	Weighting of Species Counts in Assemblage Analyses. _Marine
+	Ecology Progress Series_, 320, 11–27. The basic development was
+	made in github.com and merged to R-Forge.
+
+	* nullmodel: replaced internal indshuffle() function in
+	make.commsim() with much faster stats::rmultinom(). The
+	rmultinom() function takes argument 'n' for the number of random
+	vectors, and using this could be still faster, but we only
+	generate one vector in time. Using 'n' would require better
+	analysis of individual nullmodels. The commit changes random
+	sequences, but passes tests.
+
+	* oecosimu: Gained argument 'batchsize' to set the maximum size of
+	simulated nullmodels (in Mb). If a larger object would be
+	produced, the analysis is broken into several batches to avoid
+	exceeding the maximum size. This avoids exhausting memory which
+	can make whole R unresponsive and analysis very, very slow. In
+	general, the argument is needed with large data sets and/or large
+	number of simulations.
+
+	* orditkplot: bmp has been available in unix-alike OSes since
+	2008, or a moth after writing orditkplot. Thanks to Brian Ripley
+	for informing us.
+
+	* vignettes: vignettes use now standard article style instead of
+	hacked jss style. Decision and diversity vignettes are in two
+	columns, but intro in one (R output did not fit in one
+	column). The common packages, macros and definitions were moved to
+	new vegan.sty which is written so that it should work both with
+	amsart and article, and with one and two columns. Figures are now
+	in standard figure environment, but intro redefines this to use
+	sidecaption figures.  
+	
+Version 2.1-31 (closed July 10, 2013)
+
+	* Dependencies: Vegan now depends on a version equal to 0.7-4 or
+	later.
+
+	* betadisper, permutest.betadisper: Modified to use the new
+	permute package API (from version 0.7-3 onwards). The `permutest`
+	method gains a new argument `permutations`, which takes a matrix
+	of permutations to be used.
+
+	* protest: modified to use the new permute API. Gains argument
+	`control` which describes the design. As a result, `strata`
+	argument has been removed and `permutations` argument can only
+	be used to supply your own matrix of permutations. The number of
+	permutations and other features of the design are set via `control`
+	and function `how()` from permute.
+
+	* ordipointlabel: now uses `ordiArgAbsorber()` to stop the warnings
+	about non-graphical paramters being passed to plotting functions.
+
+	* ordisurf: can now pass in a line width for the contours via
+	argument `lwd.cl`.
+
+	* ordiArrowTextXY: New (internal) support function that finds
+	coordinates of text box at the point of the arrow so that the
+	arrow point just touches the text.  This should improve
+	positioning of the arrow labels and avoid writing labels over the
+	arrows.
+
+	* plot.envfit, plot.cca, text.cca: use ordiArrowTextXY() for arrow
+	labels instead of expanding arrow heads by 10%.
+
+	* plot.envfit: plot(..., add = FALSE) estimated string (text)
+	dimensions after plot.new() but before plot.window(). Since
+	plot.new() sets xlim, ylim to c(0,1) and then plot.window resets
+	the limits to the data values, string dimensions in user units
+	were poorly estimated. This became evident with new positioning of
+	arrow text based on string dimensions.
+
+        * plot.ordipointlabel: gains a plot method that is very similar to
+        `plot.orditkplot()` but which does not mess with graphical
+        parameters. This allows it to fit more naturally into a standard
+        R workflow (it plays nicely with `layout()` for example.
+
+	* stressplot: metaMDS and monoMDS stressplot() use now
+	expression(R^2) instead of ascii R2.  The stressplot.monoMDS()
+	function returns the plotting structures in the original input
+	order.
+
+	* vignettes: Brian Ripley urged as to remove
+	\usepackage{inconsolota} as this package is on the way to be
+	removed from CTAN.
+
+Version 2.1-30 (closed June 12, 2013)
+
+	* bioenv: can now use Mahalanobis, Manhattan and Gower distances
+	for environmental variables.  The Mahalanobis distances are based
+	on orthogonalized data, Manhattan distances give the direct sum of
+	differences of environmental variables, and Gower distances can
+	also handle factor variables. This involves adding internal
+	function veganMahatrans() for Mahalanobis transformation.  The
+	change was triggered by a recent email by Lydia Beaudrot (UC
+	Davis) to implement Mahalanobis distances, and in the same I also
+	implemented Robby Marotte's suggestion of using Gower distances
+	(vegan Forum item in R-Forge in July 2012). The output is changed
+	to show the 'metric' and the name of the 'method' is fully
+	expanded. No more fails if 'upto' is too large. Passes "..." to
+	cor() like documented, but never done.
+
+	* rankindex: can now use Mahalanobis or Manhattan
+	distances. Scales automatically gradient variables to unit
+	variance (Euclidean) or range (Manhattan), and the scaling is
+	already inbuilt in Mahalanobis and Gower.
+
+	* vegdist: added Mahalanobis distance.
+
+Version 2.1-29 (closed April 19, 2013)
+
+	* ordisurf: significant changes were made to this function:
+
+	 - The default for `method` and `select` were changed to `"REML"`
+	and `TRUE` respectivelt.
+
+	 - Argument `thinplate` is deprecated in favour of `isotropic`. A
+	warning is now issued if `thinplate` is used.
+
+	 - The spline basis for the smoother can now be specified from a
+	subset of those implemented in the mgcv package. This is achieved
+	via the `bs` argument, which defaults to `"tp"` for thin plate
+	regression splines.
+
+	 - Argument `knots` and `bs` can now be a vector of length two, one
+	per ordination dimension considered. This is only of use with
+	anisotropic surfaces with `isotropic = FALSE`.
+
+	 - New argument `fx`; indicates whether the smoothers are fixed
+	degrees of freedom regression splines (`fx = FALSE`) or a
+	penalised regression spline (`fx = TRUE`). Can be a vector of
+	length 2 for anisotropic surfaces (`isotropic = FALSE`).
+
+	 - The number of locations in each ordination dimension at which the
+	fitted surface is evaluated can now be specified via new argument
+	`npoints`.
+
+	 - The formula passed to `gam` is now built in greater detail. When
+	the model is printed the user can see exactly how the smoother was
+	constructed.
+ 
+	* oecosimu: the interpretation argument "alternative" was really
+	twisted. We now changed the test direction and have a much clearer
+	explanatory text in printed output. The issue was raised by Juan
+	Manual Barreneche (jumanbar) in GitHub (issue #14). NB. The
+	direction of the test changed from previous versions.
+
+	* raupcrick: adapted to the change in oecosimu() to define the
+	direction of the test.
+
+Version 2.1-28 (closed April 19, 2013)
+
+	* betadisper: failed with type = "centroid" when there was only
+	one group (i.e., in estimating the overall beta diversity in the
+	data). Reported by Pierre Legendre.
+
+	Now correctly reports distance to "medoid" in the print method
+	when type = "median". Reported by Pierre Legendre. The print
+	method also now shows only the first 8 eigenvalues.
+
+	* eigenvals: new method for class "betadisper".
+
+	* rda: eigenvalues are now regarded as zero if they are very small
+	compared to the first eigenvalue. Earlier we used fixed limit of
+	1e-4, but now the limit is first eigenvalues * 1e-5. Similar
+	change was not made in cca, since there the theoretical maximum of
+	the eigenvalue is 1, and comparison to an absolute minimum
+	threshold of 1e-4 makes sense.
+	
+Version 2.1-27 (closed March 17, 2013)
+
+	* cca/rda/capscale: removed references to 'u.eig' and 'v.eig'
+	items in support functions. These now use 'u' and 'v' items and
+	scale these with 'eig' (or diag(sqrt(eig))) when needed. This
+	prepares for removing items 'u.eig', 'v.eig' and 'wa.eig' in vegan
+	2.3 and 2.4. Concerns fitted.capscale, goodness.cca and
+	goodness.rda. The documentation has warned about removing these
+	*.eig items for years, and most vegan functions already avoided
+	using them. We have no idea if any external packages depend on
+	these and therefore we delay the removal till vegan 2.3 and 2.4
+	release. However, we warn about this in NEWS for 2.0-7 and also
+	say this clearly in cca.object documentation.
+
+	* nestedtemp: function failed if the matrix fill was < 0.38%,
+	because the fill line parameter was outside the original
+	estimation bracket. Now bracket is moved up if the estimation
+	fails. The problem was reported by Carsten Dormann (Univ Freiburg)
+	and Benjamin A. Sikes (Lincoln University, NZ).
+	
+	* specaccum: gained argument 'w' for weights to give the sampling
+	effort. This resurrects a feature that was introduced in r1505 (22
+	Feb 2011) and 1507, and removed in r1606 (26 May 2011). It seems
+	that BiodiversityR::balanced.specaccum() provides the same feature
+	but we still try it here.  The feature was resurrected after a
+	user query by Bastien Mérigot (Univ Montpellier II). The working
+	of the new argument is still untested with specaccum() support
+	functions.
+
+	* fitspecaccum: model = "asymp" was actually fitting logistic
+	regression (the same as model = "logis").
+
+	* text.cca, points.cca: gained argument 'axis.bp' (defaults TRUE)
+	to suppress drawing axes for scaled biplot arrows. Only effective
+	if 'bp' scores were requested.
+
+	* wcmdscale: setting only weights 'w' does not force full
+	"wcmdscale" output but returns only the matrix of coordinates of
+	real axes. Similarly, setting 'add = TRUE' would not force
+	"wcmdscale", but 'add' is not implemented.
+	
+Version 2.1-26 (opened February 11, 2013)
+
+	* New version opened with the release of vegan_2.0-6 on February
+	11, 2013.
+
+	* metaMDS: exposed argument 'maxit' in metaMDSiter() so that users
+	can set the number of iterations in monoMDS() or isoMDS().
+	metaMDS(..., trace = 2) show the stopping criterion used in with
+	engine = "monoMDS". After wishes by Jon Bakker, Univ Washington
+	(U.S.A.).
+
+	* tabasco: a sister function of vegemite() to display a compact
+	community table using heatmap(). Both vegemite() and tabasco() can
+	handle cluster::agnes() trees.
+
+	* wcmdscale: return a full "wcmdscale" object if any argument is
+	set to non-default value. This also implies that if weights 'w'
+	are set, the result will be full "wcmdscale" object with a
+	"weights" item, instead of a simple matrix with no information
+	about weights used.
+	
+Version 2.1-25 (closed February 11, 2013)
+
+	* FAQ: new entry on impossibility of using random effects in cca,
+	rda, capscale & adonis, and telling how to do this
+	approximately. Please comment and correct.
+
+	* oecosimu: changed the structure of the oecosimu() result
+	object. It now returns a list of two items: "statistic" is the
+	observed statistic, and "oecosimu" which contains the simulation
+	records. In previous version the function returned the statistic
+	and added item "oecosimu" there. Previously, the result object was
+	of type c("oecosimu", class(statistic)) as the original
+	"statistic" object was returned amended with an "oecosimu" item,
+	but now it is only of type "oecosimu" with the original
+	"statistic" as a separate item. So "statistic" is now one level
+	deeper instead of being the main object.
+
+	* predict.cca, predict.rda: In rev2412, 2413 we studied including
+	partial (pCCA) component in predict(..., type="response") and
+	predict(..., type="working") models, but then adopted a policy of
+	never having partial component, but always returning only the
+	component requested for. Therefore warning messages of ignoring
+	pCCA component were removed. This log entry was made to emphasize
+	that there is a policy decision, and an alternative policy can be
+	tracked in the repository.
+
+	* fitted.[cca,rda,capscale]: can now return partial component with
+	argument 'model = "pCCA"'.
+
+	* simulate: the simulate functions for "rda" and "cca" return
+	objects with original row and column names. Function capscale()
+	already did so, and simulate.nullmodel() was changed so in vegan
+	2.1-24 (r2396).
+	
+Version 2.1-24 (closed February 2, 2013)
+
+	* simulate.nullmodel: output array inherits dimnames after the
+	input data matrix, this is often required by oecosimu.
+	Dimension names are now only stored once for all nsim
+	simulations, so it is an improvement over the implementation
+	in commsimulator.
+
+	* oecosimu: the 'oecosimu' list cannot be added to the result if
+	the nestfun() returns a data frame. In that case, the 'oecosimu'
+	list is treated like a variable, and this gives either an error of
+	wrong length or if the length matches, a mess.  Now data frame is
+	silently turned into a list which also means that it will not be
+	printed with the output. The name of the 'statistic' is not used
+	for unnamed vector output where it would only name the first item
+	(like would be the case if the 'statistic' was extracted from a
+	data frame). It is now checked that only one 'statistic' is given
+	and a comprehensible error message is issued instead of the
+	current confusing one. The changes were trickered when testing
+	picante::pd().
+
+	* protest: huge speed-up. Instead of calling procrustes() in every
+	permutation step, we only calculate the goodness of fit statistic
+	in svd(). This avoids a huge overhead of procrustes(). In a test
+	with a 160 x 12 matrix (RDA scores from Bryce Canyon data
+	'bryceveg') with 9999 permutations, the time went down from 12 sec
+	to 0.8 sec in my desktop. The analysis prints now also the 'ss'
+	term (residual sum of squares) which for symmetric analysis is
+	equal to squared m12.
+
+	* procrustes: marginally faster way of getting sum of squares of a
+	matrix. This should not influence the results, but one metaMDS()
+	trace result has a small difference in nearly-zero rmse (was
+	1.094382e-06, is 1.09439e-06) in my desktop.
+
+	* treedive, treedist: treedive() did not correctly match data and
+	tree when the tree contained species that did not occur in the
+	data. Function treedist() tries to match tree and data when their
+	sizes differ, and argument 'match.force' was added to force
+	matching even when sizes do not differ.
+	
+Version 2.1-23 (closed January 25, 2013)
+
+	* clamtest: Richard Telford reported an extreme case 
+	{rbind(a=c(1,0,5,10,4),b=c(0,10,5,2,1))} where
+	clamtest failed due to inadequately setting up minimum
+	abundance thresholds for rare species. The issue is solved
+	by hard coding the minimum values to be 1 when no suitable 
+	solution is found. Also, clamtest failed when the community matrix 
+	had no column names, this is now fixed.
+
+	* capscale: It was wrongly assumed that eigenvalues could be used
+	in normalization of species scores, but this worked only with
+	Euclidean distances. Now normalization is done explicitly with
+	decostand() function. This change means that scaling of species
+	scores will change, and graphs can look different than
+	previously. All analyses should be redone. Function now displays
+	the value of the additive constant with 'add = TRUE'.
+
+	* stressplot: added stressplot() methods for wcmdscale(),
+	capscale(), cca(), rda(), prcomp() and princomp() results.  These
+	also work with constrained ordination. These methods display the
+	ordination distances in given number of dimensions (defaults 'k =
+	2') against original observed distances.  These original distances
+	are found from the full space solution, and in capscale() and
+	wcmdscale() they are correct for the imaginary axes. The weights
+	are used in wcmdscale() and cca() so that their distances differ
+	from plotted ordinations, but agree with eigenvalues. Partial
+	models (p-dbRDA, pRDA, pCCA) add the partial component both to the
+	original dissimilarities and the fit.
+
+	The row scores (u) alone will not correctly estimate original
+	dissimilarities in constrained (or partial) ordination. In
+	unconstrained ordination we can get the distances as dist(u %*%
+	diag(sqrt(eig))), but in constrained ordination this will not give
+	the observed dissimilarities with all axes. Currently we get the
+	ordination distances from a (low-rank) approximation of the data
+	as dist(u %*% diag(sqrt(eig)) %*% t(v)). However, it is not sure
+	that this the right thing to do, but perhaps we should acknowledge
+	the fact row ordination with constraints does not approximate
+	distances. So this may change.
+
+	* wcmdscale: added method functions print(), plot() and
+	scores(). Now class "wcmdscale" results also retun the function
+	call and dimensions have names.
+
+	* ordilabel: was missing ordiArgAbsorber() on the plotting calls
+	to text() and polygon(). Thus lots of warnings were raised in use.
+
+	* orditorp: added argument select, to choose which of the rows of
+	scores are plotted, so matches with ordilabel() and
+	ordipointlabel().
+	
+Version 2.1-22 (closed January 8, 2013)
+
+	* multipart: argument global was printed as TRUE regardless of the
+	actual argument value. This did not affect calculations. Reported
+	by Valerie Coudrain.
+
+	* monoMDS, metaMDS: Default convergence criteria were changed in
+	monoMDS. Most importantly, now scale factor of the gradient is
+	sfgrmin = 1e-7. The former limit 1e-5 was much too slack with
+	large data sets and iterations stopped too early without getting
+	close to the solution. In addition, scores() ignore now requests
+	to scores beyond those calculated instead of failing, and
+	scores.metaMDS() does not drop dimensions.
+
+	* metaMDS: Iteration sometimes finds a false convergence, or an
+	identical solution to the previous best although a better solution
+	can exist. These are undetected except sometimes in parallel
+	processing. Therefore we first label convergence false when we
+	find a new best solution, and then study if it really
+	converged. This is a rare effect, and can only be seen with
+	parallel processing. An example is
+
+	set.seed(7)
+	metaMDS(BCI, k=4, parallel=2)
+
+	which will converge in run 7 with the old code although run 8
+	would be better, but converges in run 18 with the new code.
+	
+	* nestednodf: added plot() method modelled after
+	plot.nestedtemp().
+	
+	* ordiR2step: gained argument 'R2scope' (default TRUE) which can
+	be used to turn off the criterion of stopping when the adj-R2 of
+	the current model exceeds that of scope.  This option allows model
+	building when the 'scope' would be overdetermined (number of
+	predictors higher than number of observations). Pierre Legendre
+	needed this option for some checks with huge AEM/PCNM scopes.
+
+	* envfit, plot.envfit: Plotting an object fitted by envfit() would fail
+	if p.max was used and there were un-used levels for one or more factor
+	constraints. The un-used levels could result from deletion of
+	observations with missing values or simply the result of supplying
+	a subset of a larger data set to envfit(). Both cases are now handled
+	through the use of droplevels().
+
+Version 2.1-21 (closed November 19, 2012)
+
+	* New version opened with the CRAN release of vegan 2.0-5 on Oct
+	8, 2012.
+
+	* dispindmorisita: output gained a new column for Chi-squared
+	based probabilities that the null hypothesis (random distribution)
+	is true.
+
+	* clamtest: output was wrong when some of the possible
+	species groups were missing (bug report submitted by R Telford).
+
+	* procrustes: plot() of two-dimensional solutions often draw
+	original axes in a wrong angle. The problem was reported by
+	Elizabeth Ottesen (MIT).
+
+	* msoplot: gained legend argument for positioning the legend
+	according to user needs.
+
+Version 2.1-20 (closed October 8, 2012)
+
+	* anova.cca: Dr Sven Neulinger (Christian Albrecht University,
+	Kiel, Germany) reported several problems with anova.cca cases. All
+	these were problems in scoping. Two problems solved with this
+	commit were: (1) anova.cca(..., by = "axis") always failed in
+	partial analysis (with conditions), (2) anova.cca(..., by =
+	"term") failed in partial models when there was no 'data='
+	argument, but the variables were in the global workspace. The
+	first fix also seems to allow anova(<prc.object>, by = "axis")
+	that used to fail. In addition, there is one unsolved problem with
+	search order: stats function C() is found instead of variable 'C'
+	and this gives an error message "object is not a matrix".
+
+	* radfit: The methods are now more consistent over different
+	levels of radfit models (radline, radfit, radfit.frame).  The
+	common methods to all include now AIC(), coef, deviance(),
+	logLik(), predict() and fitted(). The radfit() objects gained
+	points() and lines() methods. The data frame method also works
+	with matrices.  The predict() method gained new argument to change
+	the expected 'total' size of communities. The fitted() returns now
+	named vectors or matrices. It seems that radlattice() never
+	displayed BIC contrary to documentation and labelling in graph.
+	This has been wrong since the introduction of radlattice in r551
+	(2008-11-09).  Now radlattice also check that it gets the "radfit"
+	object it can handle.
+
+	* Rd: documentation files upgraded to the third English edition
+	(2012) of Legendre & Legendre.
+	
+Version 2.1-19 (closed September 16, 2012)
+
+	* adonis: Small changes in calculations. Simplified calculations
+	of matrix G as centred distance matrix, and does not keep n x n
+	matrices that are not needed (A) or used only once (identity
+	matrix I). These can make calculations marginally faster and
+	reduce the memory usage, but probably there are no observable
+	effects in most data sets. The new centring is probably more
+	accurate than older, and therefore the last significant digits can
+	slightly change (magnitude 1e-12 in tests).
+
+	* betadisper: An effective R stats .C function was used for double
+	centring, but it was removed from the API in r60360 | ripley |
+	2012-08-22 07:59:00 UTC (Wed, 22 Aug 2012). This removal stopped
+	betadisper() with error. Now we have a less efficient R code for
+	the same purpose. However, the effects in timing should be
+	negligible.
+ 
+	* density methods: all vegan functions that return simulated or
+	permuted statistics have now density() methods that directly
+	access the returned statistic. The functions return an object of
+	class "vegandensity" that inherits from class "density". The
+	object is identical to class "density", but it is amended with
+	item "observed" that contains the observed statistic. The observed
+	statistic is also put among permuted values when estimating the
+	density. This can cause a pimple in density lines when the
+	observed statistic is very different from simulated values, but it
+	is consistent with the permutation tests.
+
+	The function has a plot.vegandensity() function that is similar to
+	plot.density(), but it also draws a vertical line for the observed
+	statistic.
+
+	The density methods were made available for adonis, anosim,
+	mantel & partial.mantel, mrpp, permutest.cca and procrustes. The
+	anova.cca function does not return permutated statistics.
+
+	All density methods handle only one statistic. Function adonis()
+	can return a matrix of permuted F-values for each term, and it
+	gained a densityplot method (lattice package) that can handle all
+	these simultaneously.
+	
+	Functions adipart, hiersimu and multipart were made
+	oecosimu-compliant earlier and simultaneously they also gained the
+	density and densityplot methods.
+
+	* mantel, mantel.partial: Gained argument na.rm (defaults FALSE)
+	to remove missing values from dissimilarities. To implement this,
+	there were some internal changes in functions (that should not
+	influence the results): The functions mixed cor.test() and cor(),
+	but now only use cor(). Function cor.test() was only used to get the
+	textual presentation of the correlation 'method', but this is now
+	found internally.
+
+	* protest: do not return the observed statistic as one of the
+	permuted values but separately.
+
+	* radfit: gained a predict method which works for single models
+	('radline'), radfit, and radfit.frame. All predict functions
+	accept 'newdata' which need not be integer, but extrapolation may
+	fail for some models.  Needs still documentation. The function was
+	provided due to a user request.
+	
+Version 2.1-18 (closed August 20, 2012)
+
+	* cIndexKM: internal function count() triggered a warning in R CMD
+	check with R-devel because the funtion used .C() call to an
+	unloaded packages "cclust". count() was never called in the
+	current cIndexKM() and was removed together with the following
+	currently unused functions: withinss(), varwithinss(),
+	maxmindist(), vargss(). These deletions should have absolutely no
+	visible effects.
+
+	* envfit: the plot() method gained argument 'labels' to change the
+	default labels. The default labels are displayed with the new
+	labels() function.
+
+	* vignettes: building vignettes failed in CRAN and R-Forge. These
+	sites use TeXLive 2012 distribution which was not yet used by any
+	vegan developer when vegan 2.0-4 was released. Ubuntu Linux still
+	stocks TeXLive 2009, and new version is promised first for the
+	12.10 release, and many other Linuxes are just as old. TeXLive
+	2012 for MacOS was released in July 2012 and with that we could
+	pin down the problem. There is hardly any user visible changes
+	except that building vegan succeeds with vignettes.
+	
+Version 2.1-17 (closed July 30, 2012)
+
+	* New version opened with the CRAN release of vegan 2.0-4 on June
+	18, 2012.
+
+	* biplot.rda: bug in specification of `type` argument if not supplied
+	by the user; should have been a vector of length == 2.
+
+	* adipart, hiersimu, multipart: default and formula methods of
+	these functions were identical (also for the calculations) except
+	in interpreting the input. Now the formula method only interprets
+	the formula and calls the default method for the actual
+	calculations without replicating its code. The "call" attribute of
+	these functions now returns the generic function name without
+	".default", ".formula" suffix. 
+
+	Functions use now print.oecosimu() for displaying results and
+	their specific print.*() functions were deleted. This involved
+	changes in attributes: the printed attributes are now in
+	object$oecosimu$simulated instead of object.
+
+	* oecosimu: returns "call" attribute similarly as adipart(),
+	hiersimu() and multipart(). The print.oecosimu() output changed,
+	and shows the call. print.oecosimu() is able to display adipart(),
+	hiersimu() and multipart() results, but does not show all
+	informations that those dedicated functions showed about options. 
+
+	* Formula methods for adipart/multipart/hiersimu functions
+	use a new internal (hierParseFormula) to interpret the formula.
+
+Version 2.1-16 (closed June 18, 2012)
+
+	* envfit: plot() gained new argument 'bg' that triggers labelling
+	with ordilabel() using the colour given in 'bg' as the background.
+
+	* simper: added parallel processing for permutation tests in
+	accordance with other vegan functions and with similar user
+	interface. The code was developed by Eduard Szöcs in
+	http://github.com.
+
+	* predict.cca: number of rows must match in the original data and
+	'newdata' of cca() result, because original row weights are used
+	in scaling the results.  Now the match is checked, and
+	non-matching 'newdata' is ignored with warning.  Earlier this gave
+	an error.  Reported by Glenn De'ath.
+
+	* betadisper: the method is biased with small, unequal group
+	sizes. Bias corrected version can now be used with new argument
+	'bias.adjust' (defaults 'FALSE'). The problem was analysed and
+	bias correction developed by Adrian Stier and Ben Bolker.
+
+	* .checkSelect: standardise those plotting functions that have a
+	'select' argument that controls which rows of the scores are
+	plotted. All these functions now use .checkSelect() to check and
+	apply 'select' as appropriate.
+
+	* ordipointlabel: gains argument 'select' which allows some rows
+	of the plotted scores to be skipped in the same manner as for
+	text.cca(). This only applies when a single set of scores is
+	plotted. Otherwise it is ignored and a warning issued.
+
+	* ordihull, ordiellipse: defaults to use semitransparent fill
+	colour with 'draw = "polygon"', and gain argument 'alpha' to set
+	the transparency.
+
+	* ordihull: gained explicit 'col' argument and adds labels after
+	drawing convex hulls so that filled hulls (with 'draw = "polygon")
+	do not cover labels. With these changes, the behaviour of
+	ordihull() is similar to ordiellipse(). The labels are centred
+	more correctly.
+
+	* metaMDS: A warning is issued on too good stress (zero or nearly
+	zero).  This is often a symptom of insufficient data.  In general,
+	you need n > 2*k + 1 points for k dimensions, and Kruskal's advice
+	is to have n > 4*k + 1.  With low number of points there can be
+	several complete (zero stress) but different results, and no two
+	convergent solution can be found.  The warning is issued also when
+	convergence was obtained, and information on dimensions is
+	printed.  FAQ gained an entry on the issue.
+
+	* bioenv: accepts now dissimilarities or a square matrix that can
+	interpred as dissimilarities instead of a community data frame.
+	This allows using other dissimilarities than those in vegdist(). 
+
+	* update.nullmodel: explicit assignment is required to update
+	the input object (nm <- update(nm, ...)).
+
+	* ordiplot3d: the returned envfit.convert() function did not
+	recognize 'choices'.
+
+Version 2.1-15 (closed May 11, 2012)
+
+	* rrarefy, drarefy: check the data are integers -- the functions
+	do not give sensible results with real values.
+
+	* monoMDS: checks now that there is a sufficient number of non-NA
+	dissimilarities for the analysis. People really try to use NMDS
+	with too small data sets. The change was triggered by a user who
+	had tried to find a six-dimensional solution for seven points
+	(21 dissimilarities, 42 scores) using 100,000 random starts in
+	metaMDS. With over-defined models there is an infinite number of
+	different solutions with nearly zero stress, and no convergence is
+	found. 
+
+	* ordiplot3d: function returns the projected coordinates of the
+	origin, and function envfit.convert() that can project a
+	three-dimensional envfit() result to the current plot. Unlike
+	originally assumed, the function will not set equal aspect ratio
+	for all axes.  We try to compensate this by setting equal scaling
+	to all axes.
+	
+Version 2.1-14 (opened March 9, 2012)
+
+	* Opened a new version with the CRAN release of vegan 2.0-3 on
+	March 3, 2012.
+
+	* Warton, Wright & Wang (Methods Ecol Evol 3, 89-101; 2012) had a
+	paper where the analysed the confusion of location (differences
+	between groups) and dispersion (variability within groups) in
+	dissimilarity-based analyses.  We have warned on this for long in
+	vegan, but now we can get support from this paper. Explicit
+	arnings added to anosim, mrpp, simper (where this is worst) and
+	adonis (where this was already analysed by Marti Anderson when
+	introducing the method).
+
+	* adipart, multipart, hiersimu: permutation tests assumed constant
+	full gamma diversity in all simulations even when the null model
+	could produce variable gamma diversities. The default method
+	("r2dtable") had constant gamma diversity.
+
+	* adipart, multipart, hiersimu: these are now generic functions
+	with default and formula methods. The formula method is identical
+	to the previous function, the default method can take two matrices
+	as input, but the second argument describing the hierarchy can be
+	missing. In this case a trivial two-level hierarchy will be
+	assumed (each row is a seperate group, all rows are in same
+	group).
+
+	* anova of prc() objects by "axis", "terms" or "margin" failed due
+	to NAMESPACE issues. Reported as issue #7 by Eduard Szöcs in
+	github.com.
+
+	* clamtest: wrongly used frequencies instead of the counts
+	when calculating sample coverage to test x < coverage.limit. 
+	No detectable differences were produced when rerunning 
+	examples from Chazdon et al. 2011 (Ecology, 92, 1332--1343)
+	and vegan help page.
+
+	* envfit: failed if some of the environmental variables were
+	factors with unused factor levels. Fixed in centroids.cca.
+	Reported as issue #8 in github.com by Eduard Szöcs.
+
+	* msoplot: expose 'ylim' as an argument. Previously, 'ylim' was
+	set internally and user could not change it setting, although
+	y-axis maximum was sometimes so low that standard errors were
+	outside the plot, and legend covered lines. The default setting of
+	'ylim' was improved, and the function follows R idiom more
+	closely. Reported in a private email to J.O. by Ricardo Pita.
+
+	* scoverage: new function for sample coverage based correction for
+	calculation of relative frequencies in count community matrices
+	(Good 1953, Biometrika 40, 237--264).
+	
+Version 2.1-13 (closed March 9, 2012)
+
+	* cca/rda/capscale: names of levels could be dropped if a
+	two-class factor was used as the only constraint. Noticed in an
+	email of Sascha Kirchner (Univ Helsinki, Finland).
+	
+	* scores: expand description of the default method to avoid
+	confusion with specific vegan methods.
+	
+	* scores.monoMDS: did not know 'choices' and hence plot() was also
+	unable to choose dimensions. 
+	
+	* vegdist: Added Cao dissimilarity (CYd). Thanks to Yong Cao for
+	consultation. 
+	
+Version 2.1-12 (closed February 23, 2012)
+
+	* scores.default: failed if users asked scores of non-existing
+	axes. This was reported as an error in ordiplot() when the user
+	tried to plot 2-dim graph of 1-dim solution in
+	https://stat.ethz.ch/pipermail/r-sig-ecology/2012-February/002764.html
+
+	* simper: new function to implement "similarity percentages" of
+	Clarke (Austral. J. Ecol. 18, 117-143; 1993) contributed by Eduard
+	Szöcs (Uni Landau, Germany).
+
+Version 2.1-11 (closed February 9, 2012)
+
+	* indpower: now can handle input objects without dimnames.  This
+	caused problems with oecosimu, because nullmodel objects have no
+	dimnames to save memory.  Extended example on indpower help page
+	shows the p-value and heterogeneity calculations suggested in
+	Halme et al. 2009.
+
+	* adonis, anosim, mantel, mantel.partial, mrpp, permutest.cca: do
+	not need clusterEvalQ(parallel, library(vegan)) for socket
+	clusters. 
+
+	* adonis: added missing 'mc.cores=' for multicore parallel
+	processing. 
+
+	* bioenv: implemented parallel processing.
+
+	* metaMDS: implemented parallel processing which runs iterations
+	(tries) in batches of 'parallel' iterations. However, it seems
+	that this does not work completely with monoMDS: basic results are
+	OK, but 'diss' and 'dist' vectors are scrambled (tested in Linux &
+	R 2.15.0) which is evident if you try to run stressplot() on the
+	result.  The problematic behaviour can also be generated directly
+	with monoMDS():
+
+	mods <- mclapply(1:8, function(i) monoMDS(d), mc.cores=2)
+	stressplot(mods[[1]])
+
+	The 'diss' and 'dist' seem to contain random rubbish as soon as
+	mc.cores > 1.  On the other hand, there are no similar problems
+	with isoMDS() -- but it does not directly return 'diss' and
+	'dist'.  If this cannot be solved, the parallel processing (r2069)
+	will be reverted.
+
+	* nesteddisc: new argument 'niter' to give the number of
+	iterations to reorder tied columns.
+
+	* renyi.Rd: fixed a broken link reported by Arne Erpenbach (Uni
+	Frankfurt, Germany). 
+
+Version 2.1-10 (closed February 5, 2012)
+
+	* adonis: print info that terms are added sequentially -- this
+	seems to confuse users. Make this change in adonis() instead of
+	print.adonis to be prepared to add other policies.
+
+	* adonis, anosim, mantel, mantel.partial, mrpp: implemented
+	parallel processing.
+
+	* RsquareAdj: implemented adjusted R2 for partial RDA results.
+	The adjusted R2 of model rda(Y ~ X1 + Condition(X2)) is defined so
+	that it is the same as component '[a] = X1|X2' in
+	varpart(). Removed some dead code from RsquareAdj.cca().
+
+	* varpart: do not scale constraints to unit sd -- this makes
+	constant columns (like all zero) into NaN and causes an error in
+	simpleRDA2.  Not scaling may help in problems like that reported
+	in "[vegan-help][5477] Nested factors in function "varpart"?" by
+	Katie Shelef on 26 Jan 2012.
+
+	* use inconsolata fonts in vignettes.
+
+	* added .Rinstignore file to list inst/doc files that should not
+	be installed (Makefile, tex, bib, sty). Background: R 2.13.0
+	mandated to put vignettes to their specific vignettes/ directory,
+	but this is incompatible with R 2.12.* where vegan also should
+	work.  The inst/doc directory can still be used (it isnow
+	'deprecated'), but system files (such as Makefile) and
+	intermediate files (such as tex) are silently copied to the
+	installation.  R 2.15.0 (under development) added R CMD check test
+	for these extra files.  File .Rinstignore allows maintaining R
+	2.12.* compatibility and silences R 2.15.0 tests.
+	
+Version 2.1-9 (closed January 22, 2012)
+
+	* public launch of parallel processing in vegan. First step was to
+	explain the implementation in decision-vegan.Rnw. 
+
+	* DESCRIPTION: vegan suggests 'parallel'. The 'parallel' package
+	was released with R 2.14.0. If you need to check or use vegan with
+	older R, you should set environmental variable
+	_R_CHECK_FORCE_SUGGESTS_=FALSE (see, e.g., discussion
+	https://stat.ethz.ch/pipermail/r-devel/2011-December/062827.html).
+
+	* oecosimu, permutest.cca: new parallel block which honours
+	setDefaultCluster() in R-devel (becoming R 2.15.0) and
+	automatically uses parallel processing with socket clusters if
+	setDefaultCluster was defined. Tested in R (unstable) (2012-01-16
+	r58122) with full features, in R 2.14.1 without setDefaultCluster,
+	and in R 2.13.1 (2011-07-08) without parallel processing
+
+	* anova.ccabymargin failed if none of the terms was analysed (all
+	were aliased) and no permutations were performed.  This would
+	happen with, e.g.,
+	
+	A <- dune.env$Management
+	anova(rda(dune ~  Management + A, dune.env), by = "margin")
+
+	or in general if all marginal effects were aliased. 
+
+	* capscale: plotting failed for mod <- capscale(dune ~
+	Condition(Management) + A1, dune.env). The centroids for
+	Management were completely removed leaving a zero-row matrix of
+	centroids, and this caused an error in plot() and would probably
+	fail elsewhere.  The same problem can appear with completely
+	aliased classes, or when class centroids are nearly zero.
+
+	* ordistep: handle cases where the marginal effects for
+	adding/dropping are completely aliased. This should avoid problems
+	like that reported in
+	https://stat.ethz.ch/pipermail/r-help/2012-January/300167.html
+
+Version 2.1-8 (closed January 8, 2012)
+
+	* betadisper: failed with an error in internal function
+	betadisper() if there were empty levels. This could happen when
+	'groups' was a factor with empty levels, and was reported in 
+	https://stat.ethz.ch/pipermail/r-sig-ecology/2011-November/002525.html
+	The behaviour is now corrected in ordimedian() which will return NA
+	for empty factor levels. 
+
+	* nestedbetasor, nestedbetajac: New functions that implement
+	decomposition of Sorensen and Jaccard beta diversities into
+	components of turnover and nestedness following Baselga (Global
+	Ecology and Biogeography 19, 134-143; 2010). These are documented
+	with nestedness indices and are ready to be used with oecosimu().
+
+	* oecosimu: more informative text on 'alternative' hypotheses in
+	the printed output. Return also the mean of simulations and show
+	that in the printed output.
+
+	* plot.cca: works with degenerate solutions where constraints are
+	aliased and 'biplot' scores have zero rows. Stops with a
+	comprehensible error message if a user requests non-existing
+	scores (such as "bp" scores for unconstrained ordination).
+	
+	* rarecurve: new function to draw rarefaction curves for each
+	plot, optionally with sample size vertical and corresponding
+	horizontal richness lines. Soil microbiologists with sequencing
+	data seem to want these, and I have seen them used elsewhere as
+	well. 
+	
+Version 2.1-7 (closed November 19, 2011)
+
+	* adonis: speed up implemented in r1636 was not passing the
+	transposed matrices to internal f.test function. Reported by
+	Nicholas Lewin-Koh.
+
+	* metaMDS: arguments 'noshare = 0' and 'noshare = FALSE' are now
+	different: zero is taken as the numeric threshold and always
+	triggers stepacross(), whereas FALSE is logical and never triggers
+	stepacross.
+
+	* vegan 2.0-2 was released on November 15, based on this version.
+
+Version 2.1-6 (closed November 12, 2011)
+
+	* FAQ: correct mark-up of hyper links (@uref{} instead of @url{}),
+	add new entries on RDA scaling, scaling of NMDS stress and scaling
+	of environmental arrows in cca/rda/capscale/envfit, plus some
+	minor updates of old entries.
+
+	* ordiarrows, ordisegments: gained argument 'order.by' that can be
+	used to order the points within groups before drawing the arrows
+	or segments. This message in R-sig-ecology seems to need this: 
+	https://stat.ethz.ch/pipermail/r-sig-ecology/2011-November/002464.html
+
+	* ordispider: returns invisibly the coordinates to which each
+	point is connected. Triggered by an email query of this
+	functionality. 
+
+	* ordiplot3d: expanded example to show how to use xyz.convert() to
+	add points as per
+	https://stat.ethz.ch/pipermail/r-help/2011-October/293955.html
+
+	* oecosimu: new proposition for implementing parallel processing
+	following suggestions of Peter Solymos.  The only relevant
+	argument is now 'parallel' which can be either the number of
+	parallel processes (defaults getOption("mc.cores", 1)) or a
+	pre-defined socket cluster or NULL in which case it is taken as
+	the default cluster defined by setDefaultCluster (this last option
+	only works in R-to-be-2.15.0 unstable, and using NULL is a user
+	error in R 2.14.0 and hence undocumented). The 'parallel' defaults
+	to 1 (no parallel processing), but if the user sets the "mc.cores"
+	option, all parallel processing functions will automatically use
+	that number of parallel processes.  (The "mc.cores" argument is
+	used by the 'parallel' package, but it is normally unset.) If
+	'parallel' is a socket cluster or there is a default cluster
+	('parallel = NULL'), this will be used without setting up and
+	closing the cluster. This (1) saves time, (2) allows using of
+	other packages than 'vegan' if user has given command
+	'clusterEvalQ(library(foo))', and (3) makes unix-like OS
+	(incl. MacOS X and Linux) to use the socket processing instead of
+	forking.
+
+	* permutest.cca: parallel processing modelled after oecosimu().
+
+Version 2.1-5 (closed October 30, 2011)
+
+	* opened a new version with the release of vegan 2.0-1 on Oct 20,
+	2011.
+
+	* metaMDSdist: the default value of "noshare" was inconsistent
+	with metaMDS(), and therefore stressplot() could fail for
+	engine="isoMDS". Usually metaMDS() sets the 'noshare' depending on
+	the engine, but metaMDSdist() is called directly from
+	capscale(..., metaMDSdist = TRUE) and metaMDSredist (for
+	stressplot), and now these default to extended dissimilarities.
+	The problem was reported by Falk Hildebrand
+
+	* capscale: could fail if constrained component had zero rank,
+	typically in partial models where constrained component was
+	completely aliased. This was observed when checking an R-News
+	query of October 19, 2011
+	(https://stat.ethz.ch/pipermail/r-help/2011-October/293077.html)
+	
+Version 2.1-4 (opened October 20, 2011)
+
+	* adonis, anosim, CCorA, envfit (factorfit, vectorfit), mantel,
+	mantel.partial, mrpp, protest: user interface changed and
+	'permutations' can now be a matrix where each row gives permuted
+	indices. Internally first find a permutation matrix or use the
+	given permutation matrix, and then find the statistics with single
+	{ls}apply. Functions adonis and mrpp already worked like this, but
+	they gained the option of matrix input. This makes the functions
+	ready both for the 'permute' package and for parallelization
+	(replace {ls}apply with mclapply, par{SL}apply).  Function
+	envfit() was much simplified by generating a common permutation
+	matrix in envfit.default() and using that as the input to
+	vectorfit() and factorfit(). The anova.cca* cases should also be
+	made to use a single generated permutation matrix, as
+	permutest.cca() allows this.
+
+Version 2.1-3 (closed October 16, 2011)
+
+	* added plot.preston, lines.preston and plot.fisher (that also can
+	add points and lines). These are similar as plot.prestonfit and
+	plot.fisherfit, but without the fitted model. Among other things,
+	they can be used to add alternative models to fisherfit and
+	prestonfit models.
+
+	* sd() function for matrix or data.frame columns was deprecated in
+	R r57185 (R-to-be-2.15.0). The reason seems to be that users were
+	confused when median() did not work on data.frames, and as a
+	solution the R developers decided to take care that mean() or sd()
+	will not work either (it would be nice to understand how these
+	people think). Fixed in rda.default, capscale and
+	simulate.rda/cca/capscale. It seems that this was also implemented
+	in soon released R 2.14.0 as r57209 | maechler | 2011-10-10
+	19:28:33 +0300 (Mon, 10 Oct 2011), but as message() instead of a
+	warning(). 
+	
+	* nesteddisc: use only max 200 tries to reorder columns: tracing
+	showed that in most cases an improved ordering is found rather
+	quickly, and trying up to 1000 times takes awfully long. Now
+	faster, and usually as good as earlier, but slacker.
+
+	* simulate.rda/cca: implemented 'nsim' or an option to generate an
+	array of simulated matrices inheriting from "simmat" object and
+	using print.simmat() for a compact display. If 'nsim = 1', similar
+	2-dim matrix is returned as before so that cca(simulate(mod))
+	still works. For 'nsim > 1', the 'indx' argument should have
+	'nsim' rows, but if 'nsim' is missing, number of rows in 'indx'
+	will give the number of simulations, and 'indx' can be made with
+	permute::shuffleSet().  Implemented for rda() and cca() results,
+	but not for capscale() where simulate returns a "dist" object
+	which is nasty to pack into an array.
+
+	* oecosimu: An attempt to set 'parallel' processing in evaluating
+	the statistic, and only evaluating the statistic -- the simulation
+	of null models is not influenced. Both "multicore" (fork) and
+	"snow" (socket) style parallelization are implemented. 
+
+	* permutest.cca: implemented 'parallel' processing in
+	permutest.cca.  The parallelization only works in R 2.14.0 (alpha)
+	and later with the 'parallel' package. Function permutest.cca gets
+	a new arguments 'parallel' (defaults 1) that gives the number of
+	parallel process, and 'kind' that selects the parallelization
+	style which is either "snow" (large overhead, but works in al
+	OS's) and "multicore" (faster, but only works in unix-like systems
+	like Linux and MacOS X). The arguments are silently ignored if the
+	system is not capable of parallel processing. The functionality
+	cannot be included cleanly: it depends on the package 'parallel',
+	but suggesting 'parallel' fails R CMD check in the current R
+	release (2.13.2) which does not yet have 'parallel'. So we get
+	warnings: 'library' or 'require' "call not declared from:
+	parallel", and "permutest.cca: no visible global function
+	definition for ‘mclapply". However, with these warnings,
+	the function passes tests in R 2.13.2.
+
+	* permutest.cca: the user interface changed so that argument
+	'permutations' can be either the number permutations (like
+	previosly), or a matrix of permutations like produced by
+	permute::shuffleSet(). This was done to move RNG outside
+	parallelized code. This will also allow much simpler and
+	anova.cca* code. Currently, the 'strata' argument will not work,
+	but this will be fixed "real soon now".
+
+Version 2.1-2 (opened October 4, 2011)
+
+	* permutest.cca could not be update()d, because "permutest.cca"
+	was not exported from NAMESPACE -- only "permutest" was
+	exported. Another buglet (and this calls for checking other 'call'
+	items that return non-exported calls).
+
+	* metaMDS did not reset 'tries' when the analysis was started from
+	'previous.best' of a different model (except when no. of dims 'k'
+	changed). I think this was a bug(let). Fixed in metaMDSiter.R.
+	
+	* commsimulator is going to be deprecated: it is no longer used in
+	oecosimu() ore elsewhere in other functions. Currently, functions
+	make.commsim(), nullmodel() and simulate.nullmodel() do the same,
+	and more. As the first step, its documentation in oecosimu.Rd is
+	moved to vegan-deprecated.Rd.
+	
+	* examples: cut donw some excessively time consuming examples.
+	Profiling of all vegan examples showed that 25% of total time was
+	spent in anova.cca, and 12.6% in ordistep, but they probably are
+	sufficiently documented more quickly. 
+
+Version 2.1-1 (opened September 20, 2011)
+
+	* oecosimu: the 'comm' argument can be either 1) community data,
+	2) a nullmodel object or 3) a simmat object. If 'comm' is a
+	nullmodel, simulation method is found from the nullmodel object,
+	and if 'comm' is a simmat object, its matrices are analysed
+	without simulations within oecosimu(), and different statistics
+	can be swept out based on the same set of simulated matrices.
+
+	* permatfull/swap is using the new simulate.nullmodel(...)
+	infrastructure. permatfull1 and permatswap1 removed from vegan 
+	devel.
+
+	* Value of 'mode' is set to "double" for the following
+	null model algorithms in make.commsim: abuswap_r, abuswap_c,
+	r00_samp, r0_samp, c0_samp. These can take any nonnegative real 
+	valued matrix as input. nullmodel function now handles storage
+	mode reliably.
+
+	* meandist bug fix: tapply() function used to find mean group x
+	group dissimilarities could reorder the class levels and return a
+	confused matrix. This could happen in particular when the
+	'grouping' was a vector of integers which then were ordered
+	alphabetically so that "1" < "10" < "2". Now uses internally more
+	stable way of applying tapply() which should have the danger of
+	reordering the levels.  Incidentally, this also seems to be
+	faster. The problem was found by Dr Miguel Alvarez (Univ Bonn).
+
+	* nestedness.c: changed interface in "swapcount" and "rswapcount"
+	which now require integer data matrix. The first argument in .C
+	call should now be defined as as.integer() instead of old
+	as.double().
+
+	* str.nullmodel: new function to display the *str*ucture of the
+	"nullmodel". The "nullmodel" is an environment and therefore does
+	not show in usual str() although its items can be accessed with
+	$-notation. No documentation, but alias in nullmodel.Rd.
+
+	* New functions: commsim is used to define Null Model Algorithms
+	via a function that returns n x m x nsim array of simulated
+	matrices based on structural constraints. make.commsim contains
+	Null Model Algorithms already defined in vegan from commsimulator
+	and permat* functions (and some more).  The nullmodel function
+	creates an environment, where statistics of the input matrix are
+	stored. The environment also stores updated status of sequential
+	algorithms and current number of iterations. The update and
+	simulate methods are used to update the nullmodel (for sequential
+	algorithms) or simulate random matrices, respectively. The
+	simulate method returns the n x m x nsim array (simmat class).
+	Efficiency gains are sometimes high (because marginal statistics
+	are calculate only once by nullmodel), but not significant in most
+	cases.  Most advantageously, this implementation can unite the
+	commsimulator and permat* branches and can serve as basis for
+	further extensions.  Current intent is to investigate how this low
+	level infrastructure can be used within oecosimu and permat*
+	functions without breaking current vegan functionality.
+
+Version 2.1-0 (closed September 20, 2011)
+
+	* New major version opened with the release of vegan_2.0-0 on
+	September 8, 2011.
+
+	* nestedness.c: isDiag* uses now switch(sX) where sX is the number
+	of non-empty cells. The "swapcount" method uses new isDiagFill
+	which finds the largest swappable element that does not change the
+	fill, and "rswapcount" uses isDiag which finds both the largest
+	swappable element the change in fill. The swap and trialswap also
+	find first the fill of the 2x2 submatrix, and continue only if
+	fill == 2. The measurable effects are small again (perhaps 1%).
+
+	* tests: added tests for commsimulator, permatswap1 and
+	permatfull1 before starting the adventures with nestedness.c.
+
+	* commsimulator: a bit less overhead -- all attributes set
+	simultaneously instead of setting separately dim, rownames and
+	colnames. The results should be identical(), running should be
+	marginally faster or neutral.
+
+	* permatswap: there was a bug in internal C routine, and therefore
+	not all permissible swaps were performed. Based on limited
+	analysis, the effects of this bug seem to be negligible.  It is,
+	however, recommended to re-run all analyses. The C code was made
+	faster by getting quickly out from isDiag* if there are only 0 or
+	1 filled items, because there is nothing to swap. Tests show that
+	the C code indeed is ca 10% faster in permatswap(BCI,
+	method="swa", thin=1000, times=999), but only a 20-25% of time was
+	spent in C, and the new permatswap/permatswap1 spends 2.2x longer
+	in other parts. With faster C code the net slowdown is 1.7x.
+
+	* various attemps of speed-up (often in vain): The speed-up
+	started with permatfull/permatswap which appeared to be by far the
+	slowest functions in R CMD check --timings. Later this proved to
+	be a feature of checking these functions in MacBook Air/MacOS X:
+	the functions were not slow in Linux, nor in MacBook when the very
+	same tests were run outside R CMD check. However, several changes
+	were made: 
+
+	- permatfull/permatswap avoid data.frame/matrix casting, and save
+	results in matrices. This was based on misleading test statistics
+	in MacOS, and it is not sure what are the real effects.
+	- commsimulator: smaller overhead.
+	- sample.int was used in place of sample in commsimulator,
+	nesteddisc, permuted.index and poolaccum (the effects may be
+	measurable for permuted.index, but not large).
+	- centroids.cca: much faster. This was perhaps real -- profiling
+	(not timing) showed that cca.formula/rda.formula have a large
+	overhead over cca.default/rda.default, and centroids.cca was
+	responsible for a lot of that -- most is due to ordiParseFormula.
+	- permutest.cca uses La.svd, but the effects are non-measurable
+	and only concern case first = TRUE.
+	- MDSrotate example: envfit does not do permutations.
+
+	* New functions: permatfull1 and permatswap1. Both functions
+	return a single permuted matrix. These functions are now called
+	repeatedly by the corresponding permatfull and permatswap
+	functions.
+
+	* New function: clamtest (with summary and plot methods).  The
+	method uses a multinomial model based on estimated species
+	relative abundance in two habitats, it minimizes bias due to
+	differences in sampling intensities between two habitat types as
+	well as bias due to insufficient sampling within each habitat. The
+	method permits a robust statistical classification of habitat
+	specialists and generalists, without excluding rare species a
+	priori. Based on Chazdon et al. 2011 (Ecology, 92, 1332--1343).
+
+	* raupcrick: new function to implement Raup-Crick (dissimilarity/
+	probability) index with unequal sampling probabilities of species.
+	Brian Inouye informed about their paper (Chase et al., Exosphere
+	2:art24 [doi:10.1890/ES10-00117.1]; 2011) where they showed that
+	Raup & Crick said that we should use sampling probabilities
+	proportional to species frequencies in assessing their index, but
+	vegdist(x, "raup") uses equal probabilities. Unequal sampling
+	probabilities cannot be directly implemented in vegan, but the
+	Chase et al. method can be implemented as oecosimu(x, function(x)
+	designdist(x, "J"), method="r1"). Basically, the current function
+	uses this, but with boosted code that is much faster than
+	designdist(). 
+
 Version 2.0-0 (released September 8, 2011)
 
 	* opened the release candidate of vegan_2.0-0 on September 3,
-	2011 in the devel branch pkg/vegan at r1785.
-	* Release: Copied the devel pkg/vegan branch to release
-	branches/2.0 at r1796 (inst/doc/vegan.bib at r1798), and removed
-	tests/ directory.
-	* merge r1800: Abstract to diversity-vegan.Rnw.
+	2011. 
+
 	* some old functions used attributes(x)$which instead of more
 	correct attr(x, "which"), and in addition postMDS() used
 	attributes(x)$names instead of names(). Concerns anosim(),
@@ -322,11 +1677,10 @@ Version 2.0-0 (released September 8, 2011)
 	since the first vegan release on 6/9/01, and it was changed day
 	before it turned ten-years-old. After this, wisconsin() is the
 	only unchanged function from the first release.
+
 	* vegan-defunct: put definitions of removed "new" permutation
 	functions to vegan-defunct and have a vegan-defunct.Rd for them. 
 
-VEGAN DEVEL VERSIONS at http://vegan.r-forge.r-project.org/
-
 Version 1.92-2 (closed September 3, 2011)
 
 	* monoMDS: saves and displays info on the dissimilarities used,
diff --git a/inst/NEWS.Rd b/inst/NEWS.Rd
index f8124dc..94a250e 100644
--- a/inst/NEWS.Rd
+++ b/inst/NEWS.Rd
@@ -2,6 +2,290 @@
 \title{vegan News}
 \encoding{UTF-8}
 
+\section{Changes in version 2.2-0}{
+
+  \subsection{GENERAL}{
+    \itemize{
+  
+      \item Several \pkg{vegan} functions can now use parallel
+      processing for slow and repeating calculations. All these
+      functions have argument \code{parallel}. The argument can be an
+      integer giving the number of parallel processes. In unix-alikes
+      (Mac OS, Linux) this will launch \code{"multicore"} processing
+      and in Windows it will set up \code{"snow"} clusters as desribed
+      in the documentation of the \pkg{parallel} package. If \code{option}
+      \code{"mc.cores"} is set to an integer > 1, this will be used to
+      automatically start parallel processing. Finally, the argument
+      can also be a previously set up \code{"snow"} cluster which will
+      be used both in Windows and in unix-alikes. \pkg{Vegan} vignette
+      on Design decision explains the implementation (use
+      \code{vegandocs("decission")}, and \pkg{parallel} package has more
+      extensive documentation on parallel processing in \R.
+
+      The following function use parallel processing in analysing
+      permutation statistics: \code{adonis}, \code{anosim},
+      \code{anova.cca} (and \code{permutest.cca}), \code{mantel} (and
+      \code{mantel.partial}), \code{mrpp}, \code{ordiareatest},
+      \code{permutest.betadisper} and \code{simper}. In addition,
+      \code{bioenv} can compare several candidate sets of models in
+      paralle, \code{metaMDS} can launch several random starts in
+      parallel, and \code{oecosimu} can evaluate test statistics for
+      several null models in parallel.
+
+      \item All permutation tests are based on the \pkg{permute} package
+      which offers strong tools for restricted permutation. All these
+      functions have argument \code{permutations}. The default usage of
+      simple non-restricted permutations is achieved by giving a single
+      integer number. Restricted permutations can be defined using the
+      \code{how} function of the \pkg{permute} package. Finally, the
+      argument can be a permutation matrix where rows define
+      permutations. It is possible to use external or user constructed
+      permutations.
+
+      See \code{help(permutations)} for a brief introduction on
+      permutations in \pkg{vegan}, and \pkg{permute} package for the
+      full documention. The vignette of the \pkg{permute} package can
+      be read from \pkg{vegan} with command
+      \code{vegandocs("permutations")}.
+
+      The following functions use the \pkg{permute} package:
+      \code{CCorA}, \code{adonis}, \code{anosim}, \code{anova.cca} (plus
+      associated \code{permutest.cca}, \code{add1.cca},
+      \code{drop1.cca}, \code{ordistep}, \code{ordiR2step}),
+      \code{envfit} (plus associated \code{factorfit} and
+      \code{vectorfit}), \code{mantel} (and \code{mantel.partial}),
+      \code{mrpp}, \code{mso}, \code{ordiareatest},
+      \code{permutest.betadisper}, \code{protest} and \code{simper}.
+
+      \item Community null model generation has been completely
+      redesigned and rewritten. The communities are constructed with
+      new \code{nullmodel} function and defined in a low level
+      \code{commsim} function. The actual null models are generated
+      with a \code{simulate} function that builds an array of null
+      models. The new null models include a wide array of quantitative
+      models in addition to the old binary models, and users can plug
+      in their own generating functions. The basic tool invoking and
+      analysing null models is \code{oecosimu}. The null models are
+      often used only for the analysis of nestedness, but the
+      implementation in \code{oecosimu} allows analysing any
+      statistic, and null models are better seen as an alternative to
+      permutation tests.
+  
+    } %end itemize
+  } % end general
+
+  \subsection{INSTALLATION}{
+    \itemize{
+
+      \item \pkg{vegan} package dependencies and namespace imports
+      were adapted to changes in \R, and no more trigger warnings and
+      notes in package tests.
+
+      \item Three-dimensional ordination graphics using
+      \pkg{scatterplot3d} for static plots and \pkg{rgl} for dynamic
+      plots were removed from \pkg{vegan} and moved to a companion
+      package \pkg{vegan3d}. The package is available in CRAN.
+
+     } %end itemize
+   } % end installation
+
+  \subsection{NEW FUNCTIONS}{
+    \itemize{
+
+      \item Function \code{dispweight} implements dispersion weighting
+      of Clarke et al. (\emph{Marine Ecology Progress Series}, 320,
+      11--27).  In addition, we implemented a new method for
+      generalized dispersion weighting \code{gdispweight}. Both
+      methods downweight species that are significantly
+      over-dispersed.
+
+      \item New \code{hclust} support functions \code{reorder},
+      \code{rev} and \code{scores}. Functions \code{reorder} and
+      \code{rev} are similar as these functions for \code{dendrogram}
+      objects in base \R. However, \code{reorder} can use (and defaults
+      to) weighted mean. In weighted mean the node average is always the
+      mean of member leaves, whereas the \code{dendrogram} uses always
+      unweighted means of joined branches.
+
+      \item Function \code{ordiareatest} supplements \code{ordihull} and
+      \code{ordiellipse} and provides a randomization test for the
+      one-sided alternative hypothesis that convex hulls or ellipses in
+      two-dimensional ordination space have smaller areas than with
+      randomized groups.
+
+      \item Function \code{permustats} extracts and inspects permutation
+      results with support functions \code{summary}, \code{density},
+      \code{densityplot}, \code{qqnorm} and \code{qqmath}. The
+      \code{density} and \code{qqnorm} are standard \R{} tools that only
+      work with one statistic, and \code{densityplot} and \code{qqmath}
+      are \pkg{lattice} graphics that work with univariate and
+      multivariate statistics. The results of following functions can be
+      extracted: \code{anosim}, \code{adonis}, \code{mantel} (and
+      \code{mantel.partial}), \code{mrpp}, \code{oecosimu},
+      \code{permustest.cca} (but not the corresponding \code{anova}
+      methods), \code{permutest.betadisper}, and \code{protest}.
+
+      \item \code{stressplot} functions display the ordination distances
+      at given number of dimensions against original distances.  The
+      method functins are similar to \code{stressplot} for
+      \code{metaMDS}, and always use the inherent distances of each
+      ordination method. The functions are available for the results
+      \code{capscale}, \code{cca}, \code{princomp}, \code{prcomp},
+      \code{rda}, and \code{wcmdscale}.
+
+    } % end itemize
+  } % end new functions
+
+  \subsection{BUG FIXES}{
+    \itemize{
+
+      \item \code{cascadeKM} of only one group will be \code{NA} instead
+      of a random value. 
+
+      \item \code{ordiellipse} can handle points exactly on a line,
+      including only two points (with a warning).
+
+      \item plotting \code{radfit} results for several species failed if
+      any of the communities had no species or had only one species.
+
+      \item \code{RsquareAdj} for \code{capscale} with negative
+      eigenvalues will now report \code{NA} instead of using biased
+      method of \code{rda} results.
+
+      \item \code{simper} failed when a group had only a single member.
+
+      }% end itemize
+  } % end bug fixes
+
+  \subsection{NEW FEATURES}{
+    \itemize{
+
+      \item \code{anova.cca} functions were re-written to use the
+      \pkg{permute} package. Old results may not be exactly
+      reproduced, and models with missing data may fail in several
+      cases. There is a new option of analysing a sequence of models
+      against each other.
+
+      \item \code{simulate} functions for \code{cca} and \code{rda}
+      can return several simulations in a \code{nullmodel} compatible
+      object. The functions can produce simulations with correlated
+      errors (also for \code{capscale}) in parametric simulation with
+      Gaussian error.
+
+      \item \code{bioenv} can use Manhattan, Gower and Mahalanobis
+      distances in addition to the default Euclidean. New helper
+      function \code{bioenvdist} can extract the dissimilarities
+      applied in best model or any other model.
+
+      \item \code{metaMDS(..., trace = 2)} will show convergence
+      information with the default \code{monoMDS} engine.
+
+      \item Function \code{MDSrotate} can rotate a \eqn{k}-dimensional
+      ordination to \eqn{k-1} variables. When these variables are
+      correlated (like usually is the case), the vectors can also be
+      correlated to previously rotated dimensions, but will be
+      uncorrelated to all later ones.
+
+      \item \pkg{vegan} 2.0-10 changed the weighted \code{nestednodf}
+      so that weighted analysis of binary data was equivalent to
+      binary analysis. However, this broke the equivalence to the
+      original method. Now the function has an argument \code{wbinary}
+      to select the method of analysis. The problem was reported and a
+      fix submitted by Vanderlei Debastiani (Universidade Federal do
+      Rio Grande do Sul, Brasil).
+
+      \item \code{ordiellipse}, \code{ordihull} and \code{ordiellipse}
+      can handle missing values in \code{groups}.
+
+      \item \code{ordispider} can now use spatial medians instead of
+      means. 
+
+      \item \code{rankindex} can use Manhattan, Gower and Mahalanobis
+      distance in addition to the default Euclidean.
+
+      \item User can set colours and line types in function
+      \code{rarecurve} for plotting rarefaction curves.
+
+      \item \code{spantree} gained a support function \code{as.hclust}
+      to change the minimum spanning tree into an \code{hclust} tree.
+
+      \item \code{fitspecaccum} can do weighted analysis. Gained
+      \code{lines} method.
+
+      \item Functions for extrapolated number of species or for the size
+      of species pool using Chao method were modified following Chiu et
+      al., \emph{Biometrics} 70, 671--682 (2014).
+
+      Incidence based \code{specpool} can now use (and defaults to)
+      small sample correction with number of sites as the sample
+      size. Function uses basic Chao extrapolation based on the ratio of
+      singletons and doubletons, but switches now to bias corrected Chao
+      extrapolation if there are no doubletons (species found
+      twice). The variance formula for bias corrected Chao was derived
+      following the supporting
+      \href{http://onlinelibrary.wiley.com/doi/10.1111/biom.12200/suppinfo}{online material}
+      and differs slightly from Chiu et al. (2014).
+
+      The \code{poolaccum} function was changed similarly, but the small
+      sample correction is used always.
+
+      The abundance based \code{estimateR} uses bias corrected Chao
+      extrapolation, but earlier it estimated its variance with classic
+      Chao model. Now we use the widespread
+      \href{http://viceroy.eeb.uconn.edu/EstimateS/EstimateSPages/EstSUsersGuide/EstimateSUsersGuide.htm#AppendixB}{approximate
+	equation} for variance.
+
+      With these changes these functions are more similar to
+            \href{http://viceroy.eeb.uconn.edu/EstimateS/EstimateSPages/EstSUsersGuide/EstimateSUsersGuide.htm#AppendixB}{EstimateS}.
+
+      \item \code{tabasco} uses now \code{reorder.hclust} for
+      \code{hclust} object for better ordering than previously when it
+      cast trees to \code{dendrogram} objects.
+
+      \item \code{treedive} and \code{treedist} default now to
+       \code{match.force = TRUE} and can be silenced with
+      \code{verbose = FALSE}.
+
+      \item \code{vegdist} gained Mahalanobis distance.
+
+      \item Nomenclature updated in plant community data with the help
+      of \pkg{Taxonstand} and \pkg{taxize} packages. The taxonomy of
+      the \code{dune} data was adapted to the same sources and APG
+      III.  \code{varespec} and \code{dune} use 8-character names (4
+      from genus + 4 from species epithet). New data set on
+      phylogenetic distances for \code{dune} was extracted from Zanne
+      et al. (\emph{Nature} 506, 89--92; 2014).
+
+      \item User configurable plots for \code{rarecurve}.
+
+    } %end itemize
+  } % end new featuresq
+
+  \subsection{DEPRECATED AND DEFUNCT}{
+    \itemize{
+
+      \item \code{strata} are deprecated in permutations. It is still
+      accepted but will be phased out in next releases. Use \code{how}
+      of \pkg{permute} package.
+
+      \item \code{cca}, \code{rda} and \code{capscale} do not return
+      scores scaled by eigenvalues: use \code{scores} function to
+      extract scaled results.
+
+      \item \code{commsimulator} is deprecated. Replace
+      \code{commsimulator(x, method)} with
+      \code{simulate(nullmodel(x, method))}.
+
+      \item \code{density} and \code{densityplot} for permutation
+      results are deprecated: use \code{permustats} with its
+      \code{density} and \code{densityplot} method.
+
+    } %end itemize
+  } % end deprecated
+  
+
+} % end version 2.2-0
+
 \section{Changes in version 2.0-10}{
 
   \subsection{GENERAL}{
diff --git a/inst/doc/FAQ-vegan.pdf b/inst/doc/FAQ-vegan.pdf
index 34554d2..0379210 100644
Binary files a/inst/doc/FAQ-vegan.pdf and b/inst/doc/FAQ-vegan.pdf differ
diff --git a/inst/doc/NEWS.html b/inst/doc/NEWS.html
index 4694118..b61ebb8 100644
--- a/inst/doc/NEWS.html
+++ b/inst/doc/NEWS.html
@@ -1,13 +1,344 @@
-<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
-<html><head><title>R: vegan News</title>
-<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
-<link rel="stylesheet" type="text/css" href="R.css">
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd"><html xmlns="http://www.w3.org/1999/xhtml"><head><title>R: vegan News</title>
+<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
+<link rel="stylesheet" type="text/css" href="R.css" />
 </head><body>
 
-<table width="100%" summary="page for NEWS"><tr><td>NEWS</td><td align="right">R Documentation</td></tr></table>
+<table width="100%" summary="page for NEWS"><tr><td>NEWS</td><td style="text-align: right;">R Documentation</td></tr></table>
 
 <h2>vegan News</h2>
 
+<h3>Changes in version 2.2-0</h3>
+
+
+
+<h4>GENERAL</h4>
+
+
+<ul>
+<li><p> Several <span class="pkg">vegan</span> functions can now use parallel
+processing for slow and repeating calculations. All these
+functions have argument <code>parallel</code>. The argument can be an
+integer giving the number of parallel processes. In unix-alikes
+(Mac OS, Linux) this will launch <code>"multicore"</code> processing
+and in Windows it will set up <code>"snow"</code> clusters as desribed
+in the documentation of the <span class="pkg">parallel</span> package. If <code>option</code>
+<code>"mc.cores"</code> is set to an integer > 1, this will be used to
+automatically start parallel processing. Finally, the argument
+can also be a previously set up <code>"snow"</code> cluster which will
+be used both in Windows and in unix-alikes. <span class="pkg">Vegan</span> vignette
+on Design decision explains the implementation (use
+<code>vegandocs("decission")</code>, and <span class="pkg">parallel</span> package has more
+extensive documentation on parallel processing in <span style="font-family: Courier New, Courier; color: #666666;"><b>R</b></span>.
+</p>
+<p>The following function use parallel processing in analysing
+permutation statistics: <code>adonis</code>, <code>anosim</code>,
+<code>anova.cca</code> (and <code>permutest.cca</code>), <code>mantel</code> (and
+<code>mantel.partial</code>), <code>mrpp</code>, <code>ordiareatest</code>,
+<code>permutest.betadisper</code> and <code>simper</code>. In addition,
+<code>bioenv</code> can compare several candidate sets of models in
+paralle, <code>metaMDS</code> can launch several random starts in
+parallel, and <code>oecosimu</code> can evaluate test statistics for
+several null models in parallel.
+</p>
+</li>
+<li><p> All permutation tests are based on the <span class="pkg">permute</span> package
+which offers strong tools for restricted permutation. All these
+functions have argument <code>permutations</code>. The default usage of
+simple non-restricted permutations is achieved by giving a single
+integer number. Restricted permutations can be defined using the
+<code>how</code> function of the <span class="pkg">permute</span> package. Finally, the
+argument can be a permutation matrix where rows define
+permutations. It is possible to use external or user constructed
+permutations.
+</p>
+<p>See <code>help(permutations)</code> for a brief introduction on
+permutations in <span class="pkg">vegan</span>, and <span class="pkg">permute</span> package for the
+full documention. The vignette of the <span class="pkg">permute</span> package can
+be read from <span class="pkg">vegan</span> with command
+<code>vegandocs("permutations")</code>.
+</p>
+<p>The following functions use the <span class="pkg">permute</span> package:
+<code>CCorA</code>, <code>adonis</code>, <code>anosim</code>, <code>anova.cca</code> (plus
+associated <code>permutest.cca</code>, <code>add1.cca</code>,
+<code>drop1.cca</code>, <code>ordistep</code>, <code>ordiR2step</code>),
+<code>envfit</code> (plus associated <code>factorfit</code> and
+<code>vectorfit</code>), <code>mantel</code> (and <code>mantel.partial</code>),
+<code>mrpp</code>, <code>mso</code>, <code>ordiareatest</code>,
+<code>permutest.betadisper</code>, <code>protest</code> and <code>simper</code>.
+</p>
+</li>
+<li><p> Community null model generation has been completely
+redesigned and rewritten. The communities are constructed with
+new <code>nullmodel</code> function and defined in a low level
+<code>commsim</code> function. The actual null models are generated
+with a <code>simulate</code> function that builds an array of null
+models. The new null models include a wide array of quantitative
+models in addition to the old binary models, and users can plug
+in their own generating functions. The basic tool invoking and
+analysing null models is <code>oecosimu</code>. The null models are
+often used only for the analysis of nestedness, but the
+implementation in <code>oecosimu</code> allows analysing any
+statistic, and null models are better seen as an alternative to
+permutation tests.
+</p>
+</li></ul>
+ 
+ 
+
+
+<h4>INSTALLATION</h4>
+
+
+<ul>
+<li> <p><span class="pkg">vegan</span> package dependencies and namespace imports
+were adapted to changes in <span style="font-family: Courier New, Courier; color: #666666;"><b>R</b></span>, and no more trigger warnings and
+notes in package tests.
+</p>
+</li>
+<li><p> Three-dimensional ordination graphics using
+<span class="pkg">scatterplot3d</span> for static plots and <span class="pkg">rgl</span> for dynamic
+plots were removed from <span class="pkg">vegan</span> and moved to a companion
+package <span class="pkg">vegan3d</span>. The package is available in CRAN.
+</p>
+</li></ul>
+ 
+ 
+
+
+<h4>NEW FUNCTIONS</h4>
+
+
+<ul>
+<li><p> Function <code>dispweight</code> implements dispersion weighting
+of Clarke et al. (<em>Marine Ecology Progress Series</em>, 320,
+11–27).  In addition, we implemented a new method for
+generalized dispersion weighting <code>gdispweight</code>. Both
+methods downweight species that are significantly
+over-dispersed.
+</p>
+</li>
+<li><p> New <code>hclust</code> support functions <code>reorder</code>,
+<code>rev</code> and <code>scores</code>. Functions <code>reorder</code> and
+<code>rev</code> are similar as these functions for <code>dendrogram</code>
+objects in base <span style="font-family: Courier New, Courier; color: #666666;"><b>R</b></span>. However, <code>reorder</code> can use (and defaults
+to) weighted mean. In weighted mean the node average is always the
+mean of member leaves, whereas the <code>dendrogram</code> uses always
+unweighted means of joined branches.
+</p>
+</li>
+<li><p> Function <code>ordiareatest</code> supplements <code>ordihull</code> and
+<code>ordiellipse</code> and provides a randomization test for the
+one-sided alternative hypothesis that convex hulls or ellipses in
+two-dimensional ordination space have smaller areas than with
+randomized groups.
+</p>
+</li>
+<li><p> Function <code>permustats</code> extracts and inspects permutation
+results with support functions <code>summary</code>, <code>density</code>,
+<code>densityplot</code>, <code>qqnorm</code> and <code>qqmath</code>. The
+<code>density</code> and <code>qqnorm</code> are standard <span style="font-family: Courier New, Courier; color: #666666;"><b>R</b></span> tools that only
+work with one statistic, and <code>densityplot</code> and <code>qqmath</code>
+are <span class="pkg">lattice</span> graphics that work with univariate and
+multivariate statistics. The results of following functions can be
+extracted: <code>anosim</code>, <code>adonis</code>, <code>mantel</code> (and
+<code>mantel.partial</code>), <code>mrpp</code>, <code>oecosimu</code>,
+<code>permustest.cca</code> (but not the corresponding <code>anova</code>
+methods), <code>permutest.betadisper</code>, and <code>protest</code>.
+</p>
+</li>
+<li> <p><code>stressplot</code> functions display the ordination distances
+at given number of dimensions against original distances.  The
+method functins are similar to <code>stressplot</code> for
+<code>metaMDS</code>, and always use the inherent distances of each
+ordination method. The functions are available for the results
+<code>capscale</code>, <code>cca</code>, <code>princomp</code>, <code>prcomp</code>,
+<code>rda</code>, and <code>wcmdscale</code>.
+</p>
+</li></ul>
+ 
+ 
+
+
+<h4>BUG FIXES</h4>
+
+
+<ul>
+<li> <p><code>cascadeKM</code> of only one group will be <code>NA</code> instead
+of a random value. 
+</p>
+</li>
+<li> <p><code>ordiellipse</code> can handle points exactly on a line,
+including only two points (with a warning).
+</p>
+</li>
+<li><p> plotting <code>radfit</code> results for several species failed if
+any of the communities had no species or had only one species.
+</p>
+</li>
+<li> <p><code>RsquareAdj</code> for <code>capscale</code> with negative
+eigenvalues will now report <code>NA</code> instead of using biased
+method of <code>rda</code> results.
+</p>
+</li>
+<li> <p><code>simper</code> failed when a group had only a single member.
+</p>
+</li></ul>
+
+ 
+
+
+<h4>NEW FEATURES</h4>
+
+
+<ul>
+<li> <p><code>anova.cca</code> functions were re-written to use the
+<span class="pkg">permute</span> package. Old results may not be exactly
+reproduced, and models with missing data may fail in several
+cases. There is a new option of analysing a sequence of models
+against each other.
+</p>
+</li>
+<li> <p><code>simulate</code> functions for <code>cca</code> and <code>rda</code>
+can return several simulations in a <code>nullmodel</code> compatible
+object. The functions can produce simulations with correlated
+errors (also for <code>capscale</code>) in parametric simulation with
+Gaussian error.
+</p>
+</li>
+<li> <p><code>bioenv</code> can use Manhattan, Gower and Mahalanobis
+distances in addition to the default Euclidean. New helper
+function <code>bioenvdist</code> can extract the dissimilarities
+applied in best model or any other model.
+</p>
+</li>
+<li> <p><code>metaMDS(..., trace = 2)</code> will show convergence
+information with the default <code>monoMDS</code> engine.
+</p>
+</li>
+<li><p> Function <code>MDSrotate</code> can rotate a <i>k</i>-dimensional
+ordination to <i>k-1</i> variables. When these variables are
+correlated (like usually is the case), the vectors can also be
+correlated to previously rotated dimensions, but will be
+uncorrelated to all later ones.
+</p>
+</li>
+<li> <p><span class="pkg">vegan</span> 2.0-10 changed the weighted <code>nestednodf</code>
+so that weighted analysis of binary data was equivalent to
+binary analysis. However, this broke the equivalence to the
+original method. Now the function has an argument <code>wbinary</code>
+to select the method of analysis. The problem was reported and a
+fix submitted by Vanderlei Debastiani (Universidade Federal do
+Rio Grande do Sul, Brasil).
+</p>
+</li>
+<li> <p><code>ordiellipse</code>, <code>ordihull</code> and <code>ordiellipse</code>
+can handle missing values in <code>groups</code>.
+</p>
+</li>
+<li> <p><code>ordispider</code> can now use spatial medians instead of
+means. 
+</p>
+</li>
+<li> <p><code>rankindex</code> can use Manhattan, Gower and Mahalanobis
+distance in addition to the default Euclidean.
+</p>
+</li>
+<li><p> User can set colours and line types in function
+<code>rarecurve</code> for plotting rarefaction curves.
+</p>
+</li>
+<li> <p><code>spantree</code> gained a support function <code>as.hclust</code>
+to change the minimum spanning tree into an <code>hclust</code> tree.
+</p>
+</li>
+<li> <p><code>fitspecaccum</code> can do weighted analysis. Gained
+<code>lines</code> method.
+</p>
+</li>
+<li><p> Functions for extrapolated number of species or for the size
+of species pool using Chao method were modified following Chiu et
+al., <em>Biometrics</em> 70, 671–682 (2014).
+</p>
+<p>Incidence based <code>specpool</code> can now use (and defaults to)
+small sample correction with number of sites as the sample
+size. Function uses basic Chao extrapolation based on the ratio of
+singletons and doubletons, but switches now to bias corrected Chao
+extrapolation if there are no doubletons (species found
+twice). The variance formula for bias corrected Chao was derived
+following the supporting
+<a href="http://onlinelibrary.wiley.com/doi/10.1111/biom.12200/suppinfo">online material</a>
+and differs slightly from Chiu et al. (2014).
+</p>
+<p>The <code>poolaccum</code> function was changed similarly, but the small
+sample correction is used always.
+</p>
+<p>The abundance based <code>estimateR</code> uses bias corrected Chao
+extrapolation, but earlier it estimated its variance with classic
+Chao model. Now we use the widespread
+<a href="http://viceroy.eeb.uconn.edu/EstimateS/EstimateSPages/EstSUsersGuide/EstimateSUsersGuide.htm#AppendixB">approximate
+equation</a> for variance.
+</p>
+<p>With these changes these functions are more similar to
+<a href="http://viceroy.eeb.uconn.edu/EstimateS/EstimateSPages/EstSUsersGuide/EstimateSUsersGuide.htm#AppendixB">EstimateS</a>.
+</p>
+</li>
+<li> <p><code>tabasco</code> uses now <code>reorder.hclust</code> for
+<code>hclust</code> object for better ordering than previously when it
+cast trees to <code>dendrogram</code> objects.
+</p>
+</li>
+<li> <p><code>treedive</code> and <code>treedist</code> default now to
+<code>match.force = TRUE</code> and can be silenced with
+<code>verbose = FALSE</code>.
+</p>
+</li>
+<li> <p><code>vegdist</code> gained Mahalanobis distance.
+</p>
+</li>
+<li><p> Nomenclature updated in plant community data with the help
+of <span class="pkg">Taxonstand</span> and <span class="pkg">taxize</span> packages. The taxonomy of
+the <code>dune</code> data was adapted to the same sources and APG
+III.  <code>varespec</code> and <code>dune</code> use 8-character names (4
+from genus + 4 from species epithet). New data set on
+phylogenetic distances for <code>dune</code> was extracted from Zanne
+et al. (<em>Nature</em> 506, 89–92; 2014).
+</p>
+</li>
+<li><p> User configurable plots for <code>rarecurve</code>.
+</p>
+</li></ul>
+ 
+ 
+
+
+<h4>DEPRECATED AND DEFUNCT</h4>
+
+
+<ul>
+<li> <p><code>strata</code> are deprecated in permutations. It is still
+accepted but will be phased out in next releases. Use <code>how</code>
+of <span class="pkg">permute</span> package.
+</p>
+</li>
+<li> <p><code>cca</code>, <code>rda</code> and <code>capscale</code> do not return
+scores scaled by eigenvalues: use <code>scores</code> function to
+extract scaled results.
+</p>
+</li>
+<li> <p><code>commsimulator</code> is deprecated. Replace
+<code>commsimulator(x, method)</code> with
+<code>simulate(nullmodel(x, method))</code>.
+</p>
+</li>
+<li> <p><code>density</code> and <code>densityplot</code> for permutation
+results are deprecated: use <code>permustats</code> with its
+<code>density</code> and <code>densityplot</code> method.
+</p>
+</li></ul>
+ 
+ 
+
+
 <h3>Changes in version 2.0-10</h3>
 
 
@@ -102,11 +433,11 @@ using the sampling effort as weights.
 
 <ul>
 <li><p> This version is released due to changes in programming
-interface and testing procedures in <font face="Courier New,Courier" color="#666666"><b>R</b></font> 3.0.2. If you are using an
-older version of <font face="Courier New,Courier" color="#666666"><b>R</b></font>, there is no need to upgrade <span class="pkg">vegan</span>. There
+interface and testing procedures in <span style="font-family: Courier New, Courier; color: #666666;"><b>R</b></span> 3.0.2. If you are using an
+older version of <span style="font-family: Courier New, Courier; color: #666666;"><b>R</b></span>, there is no need to upgrade <span class="pkg">vegan</span>. There
 are no new features nor bug fixes. The only user-visible changes
 are in documentation and in output messages and formatting. Because
-of <font face="Courier New,Courier" color="#666666"><b>R</b></font> changes, this version is dependent on <font face="Courier New,Courier" color="#666666"><b>R</b></font> version 2.14.0
+of <span style="font-family: Courier New, Courier; color: #666666;"><b>R</b></span> changes, this version is dependent on <span style="font-family: Courier New, Courier; color: #666666;"><b>R</b></span> version 2.14.0
 or newer and on <span class="pkg">lattice</span> package.
 </p>
 </li></ul>
@@ -122,7 +453,7 @@ or newer and on <span class="pkg">lattice</span> package.
 
 <ul>
 <li><p> This is a maintenance release that fixes some issues
-raised by changed in <font face="Courier New,Courier" color="#666666"><b>R</b></font> toolset for processing vignettes.  In
+raised by changed in <span style="font-family: Courier New, Courier; color: #666666;"><b>R</b></span> toolset for processing vignettes.  In
 the same we also fix some typographic issues in the vignettes.
 </p>
 </li></ul>
@@ -168,7 +499,7 @@ be used to replot the saved result.
 
 <ul>
 <li> <p><code>tabasco()</code> is a new function for graphical display
-of community data matrix.  Technically it is an interface to <font face="Courier New,Courier" color="#666666"><b>R</b></font>
+of community data matrix.  Technically it is an interface to <span style="font-family: Courier New, Courier; color: #666666;"><b>R</b></span>
 <code>heatmap</code>, but its use is closer to <span class="pkg">vegan</span> function
 <code>vegemite</code>. The function can reorder the community data
 matrix similarly as <code>vegemite</code>, for instance, by ordination
@@ -357,7 +688,7 @@ statistic is evaluated within the function.
 <code>plot</code> etc. of the results. These methods are only used if
 the full <code>wcmdscale</code> result is returned with, e.g., argument
 <code>eig = TRUE</code>. The default is still to return only a matrix of
-scores similarly as the standard <font face="Courier New,Courier" color="#666666"><b>R</b></font> function <code>cmdscale()</code>,
+scores similarly as the standard <span style="font-family: Courier New, Courier; color: #666666;"><b>R</b></span> function <code>cmdscale()</code>,
 and in that case the new methods are not used.
 </p>
 </li></ul>
@@ -456,13 +787,13 @@ in the data.  All functions now have methods <code>AIC</code>,
 version of LaTeX (TeXLive 2012).
 </p>
 </li>
-<li> <p><font face="Courier New,Courier" color="#666666"><b>R</b></font> versions later than 2.15-1 (including development
+<li> <p><span style="font-family: Courier New, Courier; color: #666666;"><b>R</b></span> versions later than 2.15-1 (including development
 version) report warnings and errors when installing and checking
 <span class="pkg">vegan</span>, and you must upgrade <span class="pkg">vegan</span> to this version.
 The warnings concern functions <code>cIndexKM</code> and
 <code>betadisper</code>, and the error occurs in <code>betadisper</code>.
 These errors and warnings were triggered by internal changes in
-<font face="Courier New,Courier" color="#666666"><b>R</b></font>.
+<span style="font-family: Courier New, Courier; color: #666666;"><b>R</b></span>.
 </p>
 </li></ul>
 
@@ -593,8 +924,8 @@ argument can be used only with one set of points.
 <li><p> Added new nestedness functions <code>nestedbetasor</code> and
 <code>nestedbetajac</code> that implement multiple-site dissimilarity
 indices and their decomposition into turnover and nestedness
-components following Baselga (<EM>Global Ecology and
-Biogeography</EM> 19, 134–143; 2010).
+components following Baselga (<em>Global Ecology and
+Biogeography</em> 19, 134–143; 2010).
 </p>
 </li>
 <li><p> Added function <code>rarecurve</code> to draw rarefaction curves
@@ -604,8 +935,8 @@ for each curve.
 </p>
 </li>
 <li><p> Added function <code>simper</code> that implements
-“similarity percentages” of Clarke (<EM>Australian
-Journal of Ecology</EM> 18, 117–143; 1993).  The method compares
+“similarity percentages” of Clarke (<em>Australian
+Journal of Ecology</em> 18, 117–143; 1993).  The method compares
 two or more groups and decomposes the average between-group
 Bray-Curtis dissimilarity index to contributions by individual
 species.  The code was developed in 
@@ -677,8 +1008,8 @@ effect of constraining term to adjusted <i>R-squared</i>.
 </p>
 </li>
 <li><p> Added Cao dissimilarity (CYd) as a new dissimilarity
-method in <code>vegdist</code> following Cao et al., <EM>Water
-Envir Res</EM> 69, 95–106 (1997). The index should be good for
+method in <code>vegdist</code> following Cao et al., <em>Water
+Envir Res</em> 69, 95–106 (1997). The index should be good for
 data with high beta diversity and variable sampling
 intensity. Thanks to consultation to Yong Cao (Univ Illinois,
 USA).
@@ -757,7 +1088,7 @@ with no <code>groups</code> they are the LC scores.
 <ul>
 <li> <p><code>clamtest</code>: new function to classify species as
 generalists and specialists in two distinct habitats (CLAM test of
-Chazdon et al., <EM>Ecology</EM> 92, 1332–1343; 2011).  The test is
+Chazdon et al., <em>Ecology</em> 92, 1332–1343; 2011).  The test is
 based on multinomial distribution of individuals in two habitat
 types or sampling units, and it is applicable only to count data
 with no over-dispersion.
@@ -777,7 +1108,7 @@ frequencies.  <span class="pkg">Vegan</span> has Raup-Crick index as a choice in
 <code>vegdist</code>, but that uses equal sampling probabilities for
 species and analytic equations. The new <code>raupcrick</code>
 function uses simulation with <code>oecosimu</code>. The function
-follows Chase et al. (2011) <EM>Ecosphere</EM> 2:art24
+follows Chase et al. (2011) <em>Ecosphere</em> 2:art24
 [<a href="http://www.esajournals.org/doi/abs/10.1890/ES10-00117.1">doi:10.1890/ES10-00117.1</a>],
 and was developed with the consultation of Brian Inouye.
 </p>
@@ -810,12 +1141,12 @@ submatrix if all cells were filled.
 <code>update</code>d because of a ‘<span class="file">NAMESPACE</span>’ issue.
 </p>
 </li>
-<li> <p><font face="Courier New,Courier" color="#666666"><b>R</b></font> 2.14.0 changed so that it does not accept using
+<li> <p><span style="font-family: Courier New, Courier; color: #666666;"><b>R</b></span> 2.14.0 changed so that it does not accept using
 <code>sd()</code> function for matrices (which was the behaviour at
-least since <font face="Courier New,Courier" color="#666666"><b>R</b></font> 1.0-0), and several <span class="pkg">vegan</span> functions were
+least since <span style="font-family: Courier New, Courier; color: #666666;"><b>R</b></span> 1.0-0), and several <span class="pkg">vegan</span> functions were
 changed to adapt to this change (<code>rda</code>, <code>capscale</code>,
 <code>simulate</code> methods for <code>rda</code>, <code>cca</code> and
-<code>capscale</code>). The change in <font face="Courier New,Courier" color="#666666"><b>R</b></font> 2.14.0 does not influence the
+<code>capscale</code>). The change in <span style="font-family: Courier New, Courier; color: #666666;"><b>R</b></span> 2.14.0 does not influence the
 results but you probably wish to upgrade <span class="pkg">vegan</span> to avoid
 annoying warnings.
 </p>
@@ -849,11 +1180,11 @@ early in tries, and the results are equally good in most cases.
 <li><p> Peter Minchin joins the <span class="pkg">vegan</span> team.
 </p>
 </li>
-<li> <p><span class="pkg">vegan</span> implements standard <font face="Courier New,Courier" color="#666666"><b>R</b></font> ‘<span class="file">NAMESPACE</span>’. In
+<li> <p><span class="pkg">vegan</span> implements standard <span style="font-family: Courier New, Courier; color: #666666;"><b>R</b></span> ‘<span class="file">NAMESPACE</span>’. In
 general, <code>S3</code> methods are not exported which means that you
 cannot directly use or see contents of functions like
 <code>cca.default</code>, <code>plot.cca</code> or <code>anova.ccabyterm</code>. To
-use these functions you should rely on <font face="Courier New,Courier" color="#666666"><b>R</b></font> delegation and simply
+use these functions you should rely on <span style="font-family: Courier New, Courier; color: #666666;"><b>R</b></span> delegation and simply
 use <code>cca</code> and for its result objects use <code>plot</code> and
 <code>anova</code> without suffix <code>.cca</code>. To see the contents of
 the function you can use <code>:::</code>, such as
@@ -900,7 +1231,7 @@ an environmental vector.
 <li> <p><code>eventstar</code> finds the minimum of the evenness profile
 on the Tsallis entropy, and uses this to find the corresponding
 values of diversity, evenness and numbers equivalent following
-Mendes et al. (<EM>Ecography</EM> 31, 450-456; 2008). The code was
+Mendes et al. (<em>Ecography</em> 31, 450-456; 2008). The code was
 contributed by Eduardo Ribeira Cunha and Heloisa Beatriz Antoniazi
 Evangelista and adapted to <span class="pkg">vegan</span> by Peter Solymos.
 </p>
@@ -909,9 +1240,9 @@ Evangelista and adapted to <span class="pkg">vegan</span> by Peter Solymos.
 the species accumulation results from <code>specaccum</code>. The
 function can use new self-starting species accumulation models
 in <span class="pkg">vegan</span> or other self-starting non-linear regression
-models in <font face="Courier New,Courier" color="#666666"><b>R</b></font>. The function can fit Arrhenius, Gleason, Gitay,
+models in <span style="font-family: Courier New, Courier; color: #666666;"><b>R</b></span>. The function can fit Arrhenius, Gleason, Gitay,
 Lomolino (in <span class="pkg">vegan</span>), asymptotic, Gompertz,
-Michaelis-Menten, logistic and Weibull (in base <font face="Courier New,Courier" color="#666666"><b>R</b></font>) models. The
+Michaelis-Menten, logistic and Weibull (in base <span style="font-family: Courier New, Courier; color: #666666;"><b>R</b></span>) models. The
 function has <code>plot</code> and <code>predict</code> methods.
 </p>
 </li>
@@ -920,7 +1251,7 @@ function has <code>plot</code> and <code>predict</code> methods.
 <code>SSlomolino</code>. These can be used with <code>fitspecaccum</code> or
 directly in non-linear regression with <code>nls</code>. These functions
 were implemented because they were found good for species-area
-models by Dengler (<EM>J. Biogeogr.</EM> 36, 728-744; 2009).
+models by Dengler (<em>J. Biogeogr.</em> 36, 728-744; 2009).
 </p>
 </li></ul>
 
diff --git a/inst/doc/decision-vegan.R b/inst/doc/decision-vegan.R
index 5e9f01a..5aaded0 100644
--- a/inst/doc/decision-vegan.R
+++ b/inst/doc/decision-vegan.R
@@ -11,7 +11,29 @@ require(vegan)
 
 
 ###################################################
-### code chunk number 2: decision-vegan.Rnw:50-61
+### code chunk number 2: decision-vegan.Rnw:84-85 (eval = FALSE)
+###################################################
+## options(mc.cores = 2)
+
+
+###################################################
+### code chunk number 3: decision-vegan.Rnw:126-137 (eval = FALSE)
+###################################################
+## ## start up and define meandist()
+## library(vegan)
+## data(sipoo)
+## meandist <- 
+##     function(x) mean(vegdist(x, "bray"))
+## library(parallel)
+## clus <- makeCluster(4)
+## clusterEvalQ(clus, library(vegan))
+## mbc1 <- oecosimu(dune, meandist, "r2dtable", 
+##                  parallel = clus)
+## stopCluster(clus)
+
+
+###################################################
+### code chunk number 4: decision-vegan.Rnw:241-252
 ###################################################
 getOption("SweaveHooks")[["fig"]]()
 data(sipoo)
@@ -28,7 +50,7 @@ arrows(x,y, r, f(r, mod$p), lwd=4)
 
 
 ###################################################
-### code chunk number 3: decision-vegan.Rnw:418-422
+### code chunk number 5: decision-vegan.Rnw:609-613
 ###################################################
 library(vegan)
 data(varespec)
@@ -37,41 +59,41 @@ orig <- cca(varespec ~ Al + K, varechem)
 
 
 ###################################################
-### code chunk number 4: a
+### code chunk number 6: a
 ###################################################
 plot(orig, dis=c("lc","bp"))
 
 
 ###################################################
-### code chunk number 5: decision-vegan.Rnw:431-432
+### code chunk number 7: decision-vegan.Rnw:622-623
 ###################################################
 getOption("SweaveHooks")[["fig"]]()
 plot(orig, dis=c("lc","bp"))
 
 
 ###################################################
-### code chunk number 6: decision-vegan.Rnw:441-443
+### code chunk number 8: decision-vegan.Rnw:632-634
 ###################################################
 i <- sample(nrow(varespec))
 shuff <- cca(varespec[i,] ~ Al + K, varechem)
 
 
 ###################################################
-### code chunk number 7: decision-vegan.Rnw:446-447
+### code chunk number 9: decision-vegan.Rnw:637-638
 ###################################################
 getOption("SweaveHooks")[["fig"]]()
 plot(shuff, dis=c("lc","bp"))
 
 
 ###################################################
-### code chunk number 8: a
+### code chunk number 10: a
 ###################################################
 plot(procrustes(scores(orig, dis="lc"), 
                 scores(shuff, dis="lc")))
 
 
 ###################################################
-### code chunk number 9: decision-vegan.Rnw:460-461
+### code chunk number 11: decision-vegan.Rnw:651-652
 ###################################################
 getOption("SweaveHooks")[["fig"]]()
 plot(procrustes(scores(orig, dis="lc"), 
@@ -79,7 +101,7 @@ plot(procrustes(scores(orig, dis="lc"),
 
 
 ###################################################
-### code chunk number 10: decision-vegan.Rnw:469-472
+### code chunk number 12: decision-vegan.Rnw:660-663
 ###################################################
 tmp1 <- rda(varespec ~ Al + K, varechem)
 i <- sample(nrow(varespec)) # Different shuffling
@@ -87,7 +109,7 @@ tmp2 <- rda(varespec[i,] ~ Al + K, varechem)
 
 
 ###################################################
-### code chunk number 11: decision-vegan.Rnw:475-477
+### code chunk number 13: decision-vegan.Rnw:666-668
 ###################################################
 getOption("SweaveHooks")[["fig"]]()
 plot(procrustes(scores(tmp1, dis="lc"), 
@@ -95,21 +117,21 @@ plot(procrustes(scores(tmp1, dis="lc"),
 
 
 ###################################################
-### code chunk number 12: decision-vegan.Rnw:494-496
+### code chunk number 14: decision-vegan.Rnw:685-687
 ###################################################
 orig
 shuff
 
 
 ###################################################
-### code chunk number 13: decision-vegan.Rnw:501-502
+### code chunk number 15: decision-vegan.Rnw:692-693
 ###################################################
 getOption("SweaveHooks")[["fig"]]()
 plot(procrustes(orig, shuff))
 
 
 ###################################################
-### code chunk number 14: decision-vegan.Rnw:515-520
+### code chunk number 16: decision-vegan.Rnw:706-711
 ###################################################
 tmp1 <- rda(varespec ~ ., varechem)
 tmp2 <- rda(varespec[i,] ~ ., varechem)
@@ -119,7 +141,7 @@ max(residuals(proc))
 
 
 ###################################################
-### code chunk number 15: decision-vegan.Rnw:532-535
+### code chunk number 17: decision-vegan.Rnw:723-726
 ###################################################
 data(dune)
 data(dune.env)
@@ -127,14 +149,14 @@ orig <- cca(dune ~ Moisture, dune.env)
 
 
 ###################################################
-### code chunk number 16: decision-vegan.Rnw:540-541
+### code chunk number 18: decision-vegan.Rnw:731-732
 ###################################################
 getOption("SweaveHooks")[["fig"]]()
 plot(orig, dis="lc")
 
 
 ###################################################
-### code chunk number 17: a
+### code chunk number 19: a
 ###################################################
 plot(orig, display="wa", type="points")
 ordispider(orig, col="red")
@@ -142,7 +164,7 @@ text(orig, dis="cn", col="blue")
 
 
 ###################################################
-### code chunk number 18: decision-vegan.Rnw:565-566
+### code chunk number 20: decision-vegan.Rnw:756-757
 ###################################################
 getOption("SweaveHooks")[["fig"]]()
 plot(orig, display="wa", type="points")
diff --git a/inst/doc/decision-vegan.Rnw b/inst/doc/decision-vegan.Rnw
index cc42763..6e00ca8 100644
--- a/inst/doc/decision-vegan.Rnw
+++ b/inst/doc/decision-vegan.Rnw
@@ -7,7 +7,7 @@
 \author{Jari Oksanen}
 \title{Design decisions and implementation details in vegan}
 
-\date{\footnotesize{$ $Id: decision-vegan.Rnw 2616 2013-09-11 08:34:17Z jarioksa $ $
+\date{\footnotesize{
   processed with vegan
 \Sexpr{packageDescription("vegan", field="Version")}
 in \Sexpr{R.version.string} on \today}}
@@ -35,6 +35,197 @@ another document.
 
 \tableofcontents
 
+\section{Parallel processing}
+
+Several \pkg{vegan} functions can perform parallel processing using
+the standard \R{} package \pkg{parallel}. 
+The \pkg{parallel} package in \R{} implements
+the functionality of earlier contributed packages \pkg{multicore} and
+\pkg{snow}.  The \pkg{multicore} functionality forks the analysis to
+multiple cores, and \pkg{snow} functionality sets up a socket cluster
+of workers.  The \pkg{multicore} functionality only works in unix-like
+systems (such as MacOS and Linux), but \pkg{snow} functionality works
+in all operating systems.  \pkg{Vegan} can use either method, but
+defaults to \pkg{multicore} functionality when this is available,
+because its forked clusters are usually faster.  This chapter
+describes both the user interface and internal implementation for the
+developers.
+
+\subsection{User interface}
+\label{sec:parallel:ui}
+
+The functions that are capable of parallel processing have argument
+\code{parallel}.  The normal default is \code{parallel = 1} which
+means that no parallel processing is performed.  It is possible to set
+parallel processing as the default in \pkg{vegan} (see
+\S\,\ref{sec:parallel:default}). 
+
+For parallel processing, the \code{parallel} argument can be either
+
+\begin{enumerate}
+\item An integer in which case the given number of parallel processes
+  will be launched (value $1$ launches non-parallel processing). In
+  unix-like systems (\emph{e.g.}, MacOS, Linux) these will be forked
+  \code{multicore} processes. In Windows socket clusters will be set up,
+  initialized and closed.
+\item A previously created socket cluster. This saves time as the
+  cluster is not set up and closed in the function.  If the argument is a
+  socket cluster, it will also be used in unix-like systems. Setting
+  up a socket cluster is discussed in \S\,\ref{sec:parallel:socket}.
+\end{enumerate}
+
+\subsubsection{Using parallel processing as default}
+\label{sec:parallel:default}
+
+If the user sets option \code{mc.cores}, its value will be used as the
+default value of the \code{parallel} argument in \pkg{vegan}
+functions.  The following command will set up parallel processing to
+all subsequent \pkg{vegan} commands:
+<<eval=false>>=
+options(mc.cores = 2)
+@ 
+
+The \code{mc.cores} option is defined in the \pkg{parallel} package,
+but it is usually unset in which case \pkg{vegan} will default to
+non-parallel computation.  The \code{mc.cores} option can be set by
+the environmental variable \code{MC_CORES} when the \pkg{parallel}
+package is loaded.
+
+\R{} allows\footnote{Since \R{} version 2.15.0.}
+setting up a default socket cluster (\code{setDefaultCluster}), but
+this will not be used in \pkg{vegan}. 
+
+\subsubsection{Setting up socket clusters}
+\label{sec:parallel:socket}
+
+If socket clusters are used (and they are the only alternative in
+Windows), it is often wise to set up a cluster before calling
+parallelized code and give the pre-defined cluster as the value of
+the \code{parallel} argument in \pkg{vegan}.  If you want to use
+socket clusters in unix-like systems (MacOS, Linux), this can be only
+done with pre-defined clusters.
+
+If socket cluster is not set up in Windows, \pkg{vegan} will create and
+close the cluster within the function body. This involves following commands:
+\begin{Schunk}
+\begin{Soutput}
+clus <- makeCluster(4)
+## perform parallel processing
+stopCluster(clus)
+\end{Soutput}
+\end{Schunk}
+The first command sets up the cluster, in this case with four
+cores, and the second command stops the cluster.
+
+Most parallelized \pkg{vegan} functions work similarly in socket and
+fork clusters, but in \code{oecosimu} the parallel processing is used
+to evaluate user-defined functions, and their arguments and data must
+be made known to the socket cluster.  For example, if you want to run
+in parallel the \code{meandist} function of the \code{oecosimu}
+example with a pre-defined socket cluster, you must use:
+<<eval=false>>=
+## start up and define meandist()
+library(vegan)
+data(sipoo)
+meandist <- 
+    function(x) mean(vegdist(x, "bray"))
+library(parallel)
+clus <- makeCluster(4)
+clusterEvalQ(clus, library(vegan))
+mbc1 <- oecosimu(dune, meandist, "r2dtable", 
+                 parallel = clus)
+stopCluster(clus)
+@ 
+Socket clusters are used for parallel processing in Windows, but you
+do not need to pre-define the socket cluster in \code{oecosimu} if you
+only need \pkg{vegan} commands.  However, if you need some other
+contributed packages, you must pre-define the socket cluster also in
+Windows with appropriate \code{clusterEvalQ} calls.
+
+If you pre-set the cluster, you can also use \pkg{snow} style socket
+clusters in unix-like systems.
+
+\subsubsection{Random number generation}
+
+\pkg{Vegan} does not use parallel processing in random number
+generation, and you can set the seed for the standard random number
+generator. Setting the seed for the parallelized generator (L'Ecuyer)
+has no effect in \pkg{vegan}.
+
+\subsubsection{Does it pay off?}
+
+Parallelized processing has a considerable overhead, and the analysis
+is faster only if the non-parallel code is really slow (takes several
+seconds in wall clock time). The overhead is particularly large in
+socket clusters (in Windows). Creating a socket cluster and evaluating
+\code{library(vegan)} with \code{clusterEvalQ} can take two seconds or
+longer, and only pays off if the non-parallel analysis takes ten
+seconds or longer. Using pre-defined clusters will reduce the
+overhead. Fork clusters (in unix-likes operating systems) have a
+smaller overhead and can be faster, but they also have an overhead.
+
+Each parallel process needs memory, and for a large number of
+processes you need much memory.  If the memory is exhausted, the
+parallel processes can stall and  take much longer than
+non-parallel processes (minutes instead of seconds).
+
+If the analysis is fast, and function runs in, say, less than five
+seconds, parallel processing is rarely useful.  Parallel processing is
+useful only in slow analyses: large number of replications or
+simulations, slow evaluation of each simulation. The danger of memory
+exhaustion must always be remembered.
+
+The benefits and potential problems of parallel processing depend on
+your particular system: it is best to rely on your own experience. 
+
+\subsection{Internals for developers}
+
+The implementation of the parallel processing should accord with the
+description of the user interface above (\S\,\ref{sec:parallel:ui}).
+Function \code{oecosimu} can be used as a reference implementation,
+and similar interpretation and order of interpretation of arguments
+should be followed.  All future implementations should be consistent
+and all must be changed if the call heuristic changes.
+
+The value of the \code{parallel} argument can be \code{NULL}, a
+positive integer or a socket cluster.  Integer $1$ means that no
+parallel processing is performed.  The ``normal'' default is
+\code{NULL} which in  the ``normal'' case is interpreted as $1$.  Here
+``normal'' means that \R{} is run with default settings without
+setting \code{mc.cores} or environmental variable \code{MC_CORES}.  
+
+Function \code{oecosimu} interprets the \code{parallel} arguments in
+the following way:
+\begin{enumerate} 
+\item \code{NULL}: The function is called with argument \code{parallel
+    = getOption("mc.cores")}. The option \code{mc.cores} is normally
+  unset and then the default is \code{parallel = NULL}.  
+\item Integer: An integer value is taken as the number of created
+  parallel processes.  In unix-like systems this is the number of
+  forked multicore processes, and in Windows this is the number of
+  workers in socket clusters.  In Windows, the socket cluster is
+  created, and if needed \code{library(vegan)} is evaluated in the
+  cluster (this is not necessary if the function only uses internal
+  functions), and the cluster is stopped after parallel processing.
+\item Socket cluster: If a socket cluster is given, it will be used in
+  all operating systems, and  the cluster is not stopped
+  within the function.
+\end{enumerate}
+
+This gives the following precedence order for parallel processing
+(highest to lowest):
+\begin{enumerate}
+  \item Explicitly given argument value of \code{parallel} will always
+    be used.
+  \item If \code{mc.cores} is set, it will be used. In Windows this
+    means creating and stopping socket clusters. Please note
+    that the \code{mc.cores} is only set from the environmental
+    variable \code{MC_CORES} when you load the \pkg{parallel} package,
+    and it is always unset before first
+    \code{require(parallel)}.
+ \item The fall back behaviour is no parallel processing. 
+\end{enumerate}
+
 \section{Nestedness and Null models}
 
 Some published indices of nestedness and null models of communities
@@ -237,8 +428,8 @@ weighted averaging scores have somewhat wider dispersion.
     in the functions \code{prcomp} and \code{princomp}, and the
     one used in the \pkg{vegan} function \code{rda} 
     and the proprietary software \proglang{Canoco}
-    scores in terms of orthonormal species ($u_{ik}$) and site scores
-    ($v_{jk}$), eigenvalues ($\lambda_k$), number of sites  ($n$) and
+    scores in terms of orthonormal species ($v_{ik}$) and site scores
+    ($u_{jk}$), eigenvalues ($\lambda_k$), number of sites  ($n$) and
     species standard deviations ($s_j$). In \code{rda},
     $\mathrm{const} = \sqrt[4]{(n-1) \sum \lambda_k}$.  Corresponding
     negative scaling in \pkg{vegan}
diff --git a/inst/doc/decision-vegan.pdf b/inst/doc/decision-vegan.pdf
index d6c1bd2..2812360 100644
Binary files a/inst/doc/decision-vegan.pdf and b/inst/doc/decision-vegan.pdf differ
diff --git a/inst/doc/diversity-vegan.R b/inst/doc/diversity-vegan.R
index 5225d29..a56a00f 100644
--- a/inst/doc/diversity-vegan.R
+++ b/inst/doc/diversity-vegan.R
@@ -196,33 +196,33 @@ boxplot(mod)
 
 
 ###################################################
-### code chunk number 29: diversity-vegan.Rnw:611-612
+### code chunk number 29: diversity-vegan.Rnw:639-640
 ###################################################
 specpool(BCI)
 
 
 ###################################################
-### code chunk number 30: diversity-vegan.Rnw:617-619
+### code chunk number 30: diversity-vegan.Rnw:645-647
 ###################################################
 s <- sample(nrow(BCI), 25)
 specpool(BCI[s,])
 
 
 ###################################################
-### code chunk number 31: diversity-vegan.Rnw:630-631
+### code chunk number 31: diversity-vegan.Rnw:658-659
 ###################################################
 estimateR(BCI[k,])
 
 
 ###################################################
-### code chunk number 32: diversity-vegan.Rnw:667-669
+### code chunk number 32: diversity-vegan.Rnw:698-700
 ###################################################
 veiledspec(prestondistr(BCI[k,]))
 veiledspec(BCI[k,])
 
 
 ###################################################
-### code chunk number 33: diversity-vegan.Rnw:683-684
+### code chunk number 33: diversity-vegan.Rnw:714-715
 ###################################################
 smo <- beals(BCI)
 
@@ -237,7 +237,7 @@ plot(beals(BCI, species=j, include=FALSE), BCI[,j],
 
 
 ###################################################
-### code chunk number 35: diversity-vegan.Rnw:697-698
+### code chunk number 35: diversity-vegan.Rnw:728-729
 ###################################################
 getOption("SweaveHooks")[["fig"]]()
 j <- which(colnames(BCI) == "Ceiba.pentandra")
diff --git a/inst/doc/diversity-vegan.Rnw b/inst/doc/diversity-vegan.Rnw
index ff84ad6..903a141 100644
--- a/inst/doc/diversity-vegan.Rnw
+++ b/inst/doc/diversity-vegan.Rnw
@@ -9,7 +9,7 @@
 
 \title{Vegan: ecological diversity} \author{Jari Oksanen} 
 
-\date{\footnotesize{$ $Id: diversity-vegan.Rnw 2807 2013-12-05 11:50:52Z jarioksa $ $
+\date{\footnotesize{
   processed with vegan \Sexpr{packageDescription("vegan", field="Version")}
   in \Sexpr{R.version.string} on \today}}
 
@@ -573,9 +573,12 @@ species is related to the number of rare species, or species seen only
 once or twice.
 
 Function \code{specpool} implements the following models to estimate
-the pool size $S_p$ \citep{SmithVanBelle84, Chao87}:
+the pool size $S_p$ \citep{SmithVanBelle84, Chao87, ChiuEtal14}:
 \begin{align}
-S_p &= S_o + \frac{f_1^2}{2 f_2} & \text{Chao}\\
+\label{eq:chao-basic}
+S_p &= S_o + \frac{f_1^2}{2 f_2} \frac{N-1}{N} & \text{Chao}\\
+\label{eq:chao-bc}
+S_p &= S_o + \frac{f_1 (f_1 -1)}{2 (f_2+1)}  \frac{N-1}{N} & \text{Chao bias-corrected}\\
 S_p &= S_o + f_1 \frac{N-1}{N}  & \text{1st order Jackknife}\\
 S_p & = S_o + f_1 \frac{2N-3}{N} \nonumber \\ & + f_2 \frac{(N-2)^2}{N(N-1)}
 & \text{2nd order Jackknife}\\
@@ -587,12 +590,36 @@ and $p_i$ are proportions of species.  The idea in jackknife seems to
 be that we missed about as many species as we saw only once, and the
 idea in bootstrap that if we repeat sampling (with replacement) from
 the same data, we miss as many species as we missed originally.
+\citet{ChiuEtal14} introduced the small-sample correction term
+$\frac{N}{N-1}$, but it was not originally used \citep{Chao87}.
+
+The variance the estimator of the basic Chao estimate is \citep{ChiuEtal14}:
+\begin{multline}
+\label{eq:var-chao-basic}
+s^2 = f_2 \left(A^2 \frac{G^4}{4} + A^2 G^3 + A \frac{G^2}{2} \right),\\
+\text{where}\; A = \frac{N-1}{N}\;\text{and}\; G = \frac{f_1}{f_2} 
+\end{multline}
+The variance of bias-corrected Chao estimate can be approximated by
+replacing the terms of eq.~\ref{eq:var-chao-basic} with the
+corresponding terms in eq.~\ref{eq:chao-bc}:
+\begin{multline}
+\label{eq:var-chao-bc}
+s^2 = A \frac{f_1(f_1-1)}{2(f_2+1)} + A^2 \frac{f_1(2 f_1+1)^2}{(f_2+1)^2}\\
+ + A^2 \frac{f_1^2 f_2 (f_1 -1)^2}{4 (f_2 + 1)^4}
+\end{multline}
+If we apply the bias-correction in the special case where there are no
+doubletons ($f_2 = 0$), the he variance is 
+\citep[who omit small-sample correction in some terms]{ChiuEtal14}:
+\begin{multline}
+\label{eq:var-chao-bc0}
+s^2 = \frac{1}{4} A^2 f_1 (2f_1 -1)^2 + \frac{1}{2} A f_1 (f_1-1) - \frac{1}{4}A^2 \frac{f_1^4}{S_p}
+\end{multline}
+Function \code{specpool} uses eq.~\ref{eq:chao-basic} and estimates
+its variance with eq.~\ref{eq:var-chao-basic} when $f_2 > 0$. When
+$f_2 = 0$, \code{specpool} applies eq.~\ref{eq:chao-bc} which reduces
+to $\frac{N-1}{N} \frac{1}{2} f_1 (f_1 - 1)$, and its variance
+estimator eq.~\ref{eq:var-chao-bc0}.
 
-The variance the estimator of \citet{Chao87} is:
-\begin{equation}
-s^2 = f_2 \left(\frac{G^4}{4} + G^3 + \frac{G^2}{2} \right), \,
-\text{where}\quad G = \frac{f_1}{f_2}
-\end{equation}
 The variance of the first-order jackknife is based on the number of
 ``singletons'' $r$ (species occurring only once in the data) in sample
 plots \citep{SmithVanBelle84}:
@@ -604,8 +631,9 @@ Variance of the second-order jackknife is not evaluated in
 For the variance of bootstrap estimator, it is practical to define a
 new variable $q_i = (1-p_i)^N$ for each species \citep{SmithVanBelle84}:
 \begin{multline}
-s^2 = \sum_{i=1}^{S_o} q_i (1-q_i) + 2 \sum \sum Z_p ,\\ \text{where}\; Z_p = \dots
+s^2 = \sum_{i=1}^{S_o} q_i (1-q_i)  \\ +2 \sum_{i \neq j}^{S_o} \left[(Z_{ij}/N)^N - q_i q_j \right]
 \end{multline}
+where $Z_{ij}$ is the number of sites where both species are absent.
 
 The extrapolated richness values for the whole BCI data are:
 <<>>=
@@ -629,9 +657,12 @@ species with low frequencies.  Function \code{estimateR} implements
 two of these methods:
 <<>>=
 estimateR(BCI[k,])
-@
-Chao's method is similar as above, but uses another, ``unbiased''
-equation. \textsc{ace} is based on rare species also:
+@ 
+Chao's method is similar as the bias-corrected model
+eq.~\ref{eq:chao-bc} with its variance estimator
+eq.~\ref{eq:var-chao-bc}, but it uses counts of individuals instead of
+incidences, and does not use small sample correction.  \textsc{ace} is
+based on rare species also:
 \begin{equation}
 \begin{split}
 S_p &= S_\mathrm{abund} + \frac{S_\mathrm{rare}}{C_\mathrm{ACE}} +
diff --git a/inst/doc/diversity-vegan.pdf b/inst/doc/diversity-vegan.pdf
index ccb277a..14a1d29 100644
Binary files a/inst/doc/diversity-vegan.pdf and b/inst/doc/diversity-vegan.pdf differ
diff --git a/inst/doc/intro-vegan.R b/inst/doc/intro-vegan.R
index 99957b9..36f9c34 100644
--- a/inst/doc/intro-vegan.R
+++ b/inst/doc/intro-vegan.R
@@ -92,7 +92,7 @@ points(ord, disp="sites", pch=21, col="red", bg="yellow", cex=1.3)
 ###################################################
 ### code chunk number 12: intro-vegan.Rnw:248-250
 ###################################################
-ord.fit <- envfit(ord ~ A1 + Management, data=dune.env, perm=1000)
+ord.fit <- envfit(ord ~ A1 + Management, data=dune.env, perm=999)
 ord.fit
 
 
@@ -151,44 +151,45 @@ anova(ord)
 
 
 ###################################################
-### code chunk number 21: intro-vegan.Rnw:340-341
+### code chunk number 21: intro-vegan.Rnw:334-335
 ###################################################
-anova(ord, by="term", permu=200)
+anova(ord, by="term", permutations=199)
 
 
 ###################################################
-### code chunk number 22: intro-vegan.Rnw:347-348
+### code chunk number 22: intro-vegan.Rnw:340-341
 ###################################################
-anova(ord, by="mar")
+anova(ord, by="mar", permutations=199)
 
 
 ###################################################
 ### code chunk number 23: a
 ###################################################
-anova(ord, by="axis", perm=500)
+anova(ord, by="axis", permutations=499)
 
 
 ###################################################
-### code chunk number 24: intro-vegan.Rnw:365-367
+### code chunk number 24: intro-vegan.Rnw:353-355
 ###################################################
 ord <- cca(dune ~ A1 + Management + Condition(Moisture), data=dune.env)
 ord
 
 
 ###################################################
-### code chunk number 25: intro-vegan.Rnw:372-373
+### code chunk number 25: intro-vegan.Rnw:360-361
 ###################################################
-anova(ord, by="term", perm=500)
+anova(ord, by="term", permutations=499)
 
 
 ###################################################
-### code chunk number 26: intro-vegan.Rnw:378-379
+### code chunk number 26: intro-vegan.Rnw:369-371
 ###################################################
-anova(ord, by="term", perm=500, strata=Moisture)
+how <- how(nperm=499, plots = Plots(strata=dune.env$Moisture))
+anova(ord, by="term", permutations = how)
 
 
 ###################################################
-### code chunk number 27: intro-vegan.Rnw:383-384
+### code chunk number 27: intro-vegan.Rnw:375-376
 ###################################################
 detach(dune.env)
 
diff --git a/inst/doc/intro-vegan.Rnw b/inst/doc/intro-vegan.Rnw
index 8a253ce..1be1c38 100644
--- a/inst/doc/intro-vegan.Rnw
+++ b/inst/doc/intro-vegan.Rnw
@@ -6,7 +6,7 @@
 \title{Vegan: an introduction to ordination} 
 \author{Jari Oksanen}
 
-\date{\footnotesize{$ $Id: intro-vegan.Rnw 2597 2013-08-28 08:56:55Z jarioksa $ $
+\date{\footnotesize{
   processed with vegan
 \Sexpr{packageDescription("vegan", field="Version")}
 in \Sexpr{R.version.string} on \today}}
@@ -246,7 +246,7 @@ Function \code{envfit} can be called with a \code{formula}
 interface, and it optionally can assess the ``significance'' of the
 variables using permutation tests:
 <<>>=
-ord.fit <- envfit(ord ~ A1 + Management, data=dune.env, perm=1000)
+ord.fit <- envfit(ord ~ A1 + Management, data=dune.env, perm=999)
 ord.fit
 @
 The result can be drawn directly or added to an ordination diagram
@@ -330,33 +330,21 @@ to give its name in full, because \proglang{R} automatically chooses the
 correct \code{anova} variant for the result of constrained
 ordination.
 
-The \code{anova.cca} function tries to be clever and lazy: it
-automatically stops if the observed permutation significance probably
-differs from the targeted critical value ($0.05$ as default), but it
-will continue long in uncertain cases.  You must set \code{step} and
-\code{perm.max} to same values to override this behaviour.
-
 It is also possible to analyse terms separately:
 <<>>=
-anova(ord, by="term", permu=200)
+anova(ord, by="term", permutations=199)
 @
-In this case, the function is unable to automatically select the
-number of iterations. This test is sequential: the terms are analysed
+This test is sequential: the terms are analysed
 in the order they happen to be in the model. You can also analyse
 significances of marginal effects (``Type III effects''):
 <<>>=
-anova(ord, by="mar")
+anova(ord, by="mar", permutations=199)
 @
 
 Moreover, it is possible to analyse significance of each axis:
 <<a>>=
-anova(ord, by="axis", perm=500)
+anova(ord, by="axis", permutations=499)
 @
-Now the automatic selection works, but typically some of your axes
-will be very close to the critical value, and it may be useful to set
-a lower \code{perm.max} than the default $10000$ (typically you use
-higher limits than in these examples: we used lower limits to save
-time when this document is automatically generated with this package).
 
 \subsection{Conditioned or partial ordination}
 
@@ -370,13 +358,17 @@ This partials out the effect of \code{Moisture} before analysing the
 effects of \code{A1} and \code{Management}.  This also influences
 the significances of the terms:
 <<>>=
-anova(ord, by="term", perm=500)
+anova(ord, by="term", permutations=499)
 @
 If we had a designed experiment, we may wish to restrict the
 permutations so that the observations only are permuted within levels
-of \code{strata}:
+of \code{Moisture}. Restricted permutation is based on the powerful
+\pkg{permute} package. Function \code{how()} can be used to define
+permutation schemes. In the following, we set the levels with
+\code{plots} argument:
 <<>>=
-anova(ord, by="term", perm=500, strata=Moisture)
+how <- how(nperm=499, plots = Plots(strata=dune.env$Moisture))
+anova(ord, by="term", permutations = how)
 @
 
 %%%%%%%%%%%%%%%%%%%
diff --git a/inst/doc/intro-vegan.pdf b/inst/doc/intro-vegan.pdf
index 254576b..9cedf1c 100644
Binary files a/inst/doc/intro-vegan.pdf and b/inst/doc/intro-vegan.pdf differ
diff --git a/man/BCI.Rd b/man/BCI.Rd
index fd51327..f2d64fb 100644
--- a/man/BCI.Rd
+++ b/man/BCI.Rd
@@ -8,9 +8,15 @@
 }
 \usage{data(BCI)}
 \format{
-  A data frame with 50 plots (rows) of 1 hectare with counts of trees on each
-  plot with total of 225 species (columns). Full Latin names are used
-  for tree species.
+  
+  A data frame with 50 plots (rows) of 1 hectare with counts of trees
+  on each plot with total of 225 species (columns). Full Latin names
+  are used for tree species. The names were updated against
+  \url{http://www.theplantlist.org} in Jan 2014 (see ChangeLog 2.1-41
+  for details) which allows matching 206 of species against
+  \url{http://datadryad.org/resource/doi:10.5061/dryad.63q27} (Zanne
+  et al., 2014).
+
  }
 \details{
   Data give the numbers of trees at least 10 cm in
@@ -28,11 +34,24 @@
 \source{
   \url{http://www.sciencemag.org/cgi/content/full/295/5555/666/DC1}
 }
+
 \references{
+  
   Condit, R, Pitman, N, Leigh, E.G., Chave, J., Terborgh, J., Foster,
-  R.B., \enc{Nuñez}{Nunez}, P., Aguilar, S., Valencia, R., Villa, G., Muller-Landau,
-  H.C., Losos, E. & Hubbell, S.P. (2002). Beta-diversity in tropical
-  forest trees. \emph{Science} 295, 666--669.
+  R.B., \enc{Nuñez}{Nunez}, P., Aguilar, S., Valencia, R., Villa, G.,
+  Muller-Landau, H.C., Losos, E. & Hubbell, S.P. (2002).
+  Beta-diversity in tropical forest trees. \emph{Science} 295,
+  666--669.
+
+  Zanne A.E., Tank D.C., Cornwell, W.K., Eastman J.M., Smith, S.A.,
+  FitzJohn, R.G., McGlinn, D.J., O’Meara, B.C., Moles, A.T., Reich,
+  P.B., Royer, D.L., Soltis, D.E., Stevens, P.F., Westoby, M., Wright,
+  I.J., Aarssen, L., Bertin, R.I., Calaminus, A., Govaerts, R.,
+  Hemmings, F., Leishman, M.R., Oleksyn, J., Soltis, P.S., Swenson,
+  N.G., Warman, L. & Beaulieu, J.M. (2014) Three keys to the radiation
+  of angiosperms into freezing environments. \emph{Nature}
+  doi:10.1038/nature12872 (published online Dec 22, 2013).
+
 }
 \seealso{\code{\link[BiodiversityR]{BCI.env}} in \pkg{BiodiversityR}
   package for environmental data  (coordinates are given below in the
diff --git a/man/CCorA.Rd b/man/CCorA.Rd
index e9a31d8..64c5b49 100644
--- a/man/CCorA.Rd
+++ b/man/CCorA.Rd
@@ -11,7 +11,7 @@ calculations in the case of very sparse and collinear matrices, and
 permutation test of Pillai's trace statistic. }
 
 \usage{
-CCorA(Y, X, stand.Y=FALSE, stand.X=FALSE, nperm = 0, ...)
+CCorA(Y, X, stand.Y=FALSE, stand.X=FALSE, permutations = 0, ...)
 
 \method{biplot}{CCorA}(x, plot.type="ov", xlabs, plot.axes = 1:2, int=0.5, 
    col.Y="red", col.X="blue", cex=c(0.7,0.9), ...)
@@ -22,8 +22,12 @@ CCorA(Y, X, stand.Y=FALSE, stand.X=FALSE, nperm = 0, ...)
   \item{X}{ Right matrix (object class: \code{matrix} or \code{data.frame}). }
   \item{stand.Y}{ Logical; should \code{Y} be standardized? }
   \item{stand.X}{ Logical; should \code{X} be standardized? }
-  \item{nperm}{ Numeric; number of permutations to evaluate the
-    significance of Pillai's trace, e.g. \code{nperm=99} or \code{nperm=999}.}
+  
+  \item{permutations}{a list of control values for the permutations
+    as returned by the function \code{\link[permute]{how}}, or the
+    number of permutations required, or a permutation matrix where each
+    row gives the permuted indices.}
+  
   \item{x}{\code{CCoaR} result object.}
   \item{plot.type}{ A character string indicating which of the following 
     plots should be produced: \code{"objects"}, \code{"variables"}, \code{"ov"} 
@@ -101,6 +105,8 @@ to \code{\link{biplot.default}}; consult its help page for configuring biplots.
   \item{ corr.X.Cx }{ Scores of X variables in X biplot, computed as cor(X,Cx). }
   \item{ corr.Y.Cx }{ cor(Y,Cy) available for plotting variables Y in space of X manually. }
   \item{ corr.X.Cy }{ cor(X,Cx) available for plotting variables X in space of Y manually. }
+  \item{control}{A list of control values for the permutations
+    as returned by the function \code{\link[permute]{how}}.}
   \item{ call }{ Call to the CCorA function. }
 }
 
@@ -140,7 +146,7 @@ biplot(out, plot.type="biplots", xlabs = NULL) # Replace object names by numbers
 # Example using random numbers. No significant relationship is expected
 mat1 <- matrix(rnorm(60),20,3)
 mat2 <- matrix(rnorm(100),20,5)
-out2 = CCorA(mat1, mat2, nperm=99)
+out2 = CCorA(mat1, mat2, permutations=99)
 out2
 biplot(out2, "b")
 }
diff --git a/man/MDSrotate.Rd b/man/MDSrotate.Rd
index efac2f8..b898b4c 100644
--- a/man/MDSrotate.Rd
+++ b/man/MDSrotate.Rd
@@ -19,8 +19,10 @@ MDSrotate(object, vec, na.rm = FALSE, ...)
  \item{object}{ A result object from \code{\link{metaMDS}} or
     \code{\link{monoMDS}}.}
 
-  \item{vec}{ A continuous environmental variable (vector of the same
-    length as the number of points).}
+  \item{vec}{ A continuous environmental variable or a matrix of such
+    variables. The number of variables must be lower than the number of
+    dimensions, and the solution is rotated to these variables in the
+    order they appear in the matrix.}
 
   \item{na.rm}{ Remove missing values from the continuous variable
     \code{vec}.}
@@ -29,14 +31,22 @@ MDSrotate(object, vec, na.rm = FALSE, ...)
 
 }
 
-\details{ The orientation and rotation are undefined in
-  multidimensional scaling.  Functions \code{\link{metaMDS}} and
-  \code{\link{metaMDS}} can rotate their solutions to principal
-  components so that the dispersion of the points is highest on the
-  first dimension. Sometimes a different rotation is more intuitive,
-  and \code{MDSrotate} allows rotation of the result so that the first
-  axis is parallel to a given external variable.  
-}
+\details{ The orientation and rotation are undefined in multidimensional
+  scaling.  Functions \code{\link{metaMDS}} and \code{\link{metaMDS}}
+  can rotate their solutions to principal components so that the
+  dispersion of the points is highest on the first dimension. Sometimes
+  a different rotation is more intuitive, and \code{MDSrotate} allows
+  rotation of the result so that the first axis is parallel to a given
+  external variable or two first variables are completely in a
+  two-dimensional plane etc. If several external variables are supplied,
+  they are applied in the order they are in the matrix. First axis is
+  rotated to the first supplied variable, and the second axis to the
+  second variable. Because variables are usually correlated, the second
+  variable is not usually aligned with the second axis, but it is
+  uncorrelated to later dimensions. There must be at least one free
+  dimension: the number of external variables must be lower than the
+  number of dimensions, and all used environmental variables are
+  uncorrelated with that free dimension.}
 
 \value{ Function returns the original ordination result, but with
   rotated scores (both site and species if available), and the
diff --git a/man/add1.cca.Rd b/man/add1.cca.Rd
index c4167d6..87dff41 100644
--- a/man/add1.cca.Rd
+++ b/man/add1.cca.Rd
@@ -9,9 +9,9 @@ constrained ordination model.
 }
 \usage{
 \method{add1}{cca}(object, scope, test = c("none", "permutation"),
-    pstep = 100, perm.max = 200, ...)
+    permutations = how(nperm=199), ...)
 \method{drop1}{cca}(object, scope, test = c("none", "permutation"), 
-    pstep = 100, perm.max = 200, ...)
+    permutations = how(nperm=199), ...)
 }
 
 \arguments{
@@ -20,9 +20,12 @@ constrained ordination model.
   \item{scope}{ A formula giving the terms to be considered for adding
   or dropping; see \code{\link{add1}} for details.}
   \item{test}{ Should a permutation test be added using \code{\link{anova.cca}}. }
-  \item{pstep}{Number of permutations in one step, passed as argument
-  \code{step} to \code{\link{anova.cca}}.}
-  \item{perm.max}{ Maximum number of permutation in \code{\link{anova.cca}}. }
+ 
+  \item{permutations}{a list of control values for the permutations
+    as returned by the function \code{\link[permute]{how}}, or the
+    number of permutations required, or a permutation matrix where each
+    row gives the permuted indices.}
+
   \item{\dots}{Other arguments passed to \code{\link{add1.default}},
   \code{\link{drop1.default}}, and \code{\link{anova.cca}}.}
 }
diff --git a/man/adipart.Rd b/man/adipart.Rd
index aabf01b..5df308a 100644
--- a/man/adipart.Rd
+++ b/man/adipart.Rd
@@ -162,16 +162,16 @@ cutter <- function (x, cut = seq(0, 10, by = 2.5)) {
         out[which(x > cut[i] & x <= cut[(i + 1)])] <- i
     return(out)}
 ## The hierarchy of sample aggregation
-levsm <- data.frame(
+levsm <- with(mite.xy, data.frame(
     l1=1:nrow(mite),
-    l2=cutter(mite.xy$y, cut = seq(0, 10, by = 2.5)),
-    l3=cutter(mite.xy$y, cut = seq(0, 10, by = 5)),
-    l4=cutter(mite.xy$y, cut = seq(0, 10, by = 10)))
+    l2=cutter(y, cut = seq(0, 10, by = 2.5)),
+    l3=cutter(y, cut = seq(0, 10, by = 5)),
+    l4=cutter(y, cut = seq(0, 10, by = 10))))
 ## Let's see in a map
 par(mfrow=c(1,3))
-plot(mite.xy, main="l1", col=as.numeric(levsm$l1)+1)
-plot(mite.xy, main="l2", col=as.numeric(levsm$l2)+1)
-plot(mite.xy, main="l3", col=as.numeric(levsm$l3)+1)
+plot(mite.xy, main="l1", col=as.numeric(levsm$l1)+1, asp = 1)
+plot(mite.xy, main="l2", col=as.numeric(levsm$l2)+1, asp = 1)
+plot(mite.xy, main="l3", col=as.numeric(levsm$l3)+1, asp = 1)
 par(mfrow=c(1,1))
 ## Additive diversity partitioning
 adipart(mite, index="richness", nsimul=19)
diff --git a/man/adonis.Rd b/man/adonis.Rd
index 0835e8b..d59520a 100644
--- a/man/adonis.Rd
+++ b/man/adonis.Rd
@@ -12,7 +12,7 @@
 \usage{
 adonis(formula, data, permutations = 999, method = "bray",
        strata = NULL, contr.unordered = "contr.sum",
-       contr.ordered = "contr.poly", ...)
+       contr.ordered = "contr.poly", parallel = getOption("mc.cores"), ...)
 }
 
 \arguments{  
@@ -24,8 +24,10 @@ adonis(formula, data, permutations = 999, method = "bray",
   Value below).} 
   \item{data}{ the data frame from which \code{A}, \code{B}, and
     \code{C} would be drawn.} 
-  \item{permutations}{ number of replicate permutations used for the
-    hypothesis tests (\eqn{F} tests).} 
+  \item{permutations}{a list of control values for the permutations
+    as returned by the function \code{\link[permute]{how}}, or the
+    number of permutations required, or a permutation matrix where each
+    row gives the permuted indices.}
   \item{method}{ the name of any method used in \code{\link{vegdist}} to
     calculate pairwise distances if the left hand side of the
     \code{formula} was a data frame or a matrix. } 
@@ -33,6 +35,10 @@ adonis(formula, data, permutations = 999, method = "bray",
   \item{contr.unordered, contr.ordered}{contrasts used for the design
     matrix (default in R is dummy or treatment contrasts for unordered
     factors). }
+  \item{parallel}{Number of parallel processes or a predefined socket
+    cluster.  With \code{parallel = 1} uses ordinary, non-parallel
+    processing. The parallel processing is done with \pkg{parallel}
+    package.}
   \item{\dots}{Other arguments passed to \code{vegdist}.}
 }
 
@@ -132,9 +138,8 @@ overview of rules.
     your predictors. }   
   \item{f.perms}{ an \eqn{N} by \eqn{m} matrix of the null \eqn{F}
     statistics for each source of variation based on \eqn{N}
-    permutations of the data. The distribution of a single term can be
-    inspected with \code{\link{density.adonis}} function, or all terms
-    simultaneously with \code{densityplot.adonis}.}
+    permutations of the data. The permutations can be inspected with
+    \code{\link{permustats}} and its support functions.}
   \item{model.matrix}{The \code{\link{model.matrix}} for the right hand
     side of the formula.}
   \item{terms}{The \code{\link{terms}} component of the model.}
@@ -206,10 +211,10 @@ Y <- data.frame(Agropyron, Schizachyrium)
 mod <- metaMDS(Y)
 plot(mod)
 ### Hulls show treatment
-ordihull(mod, group=dat$NO3, show="0")
-ordihull(mod, group=dat$NO3, show="10", col=3)
+with(dat, ordihull(mod, group=NO3, show="0"))
+with(dat, ordihull(mod, group=NO3, show="10", col=3))
 ### Spider shows fields
-ordispider(mod, group=dat$field, lty=3, col="red")
+with(dat, ordispider(mod, group=field, lty=3, col="red"))
 
 ### Correct hypothesis test (with strata)
 adonis(Y ~ NO3, data=dat, strata=dat$field, perm=999)
diff --git a/man/anosim.Rd b/man/anosim.Rd
index 390df72..35a0487 100644
--- a/man/anosim.Rd
+++ b/man/anosim.Rd
@@ -10,7 +10,8 @@
   of sampling units.
 }
 \usage{
-anosim(dat, grouping, permutations = 999, distance = "bray", strata)
+anosim(dat, grouping, permutations = 999, distance = "bray", strata = NULL,
+    parallel = getOption("mc.cores"))
 }
 
 \arguments{
@@ -18,8 +19,10 @@ anosim(dat, grouping, permutations = 999, distance = "bray", strata)
     columns are response variable(s), or a dissimilarity object or a
     symmetric square matrix of dissimilarities.}
   \item{grouping}{Factor for grouping observations.}
-  \item{permutations}{Number of permutation to assess the significance
-    of the ANOSIM statistic. }
+  \item{permutations}{a list of control values for the permutations
+    as returned by the function \code{\link[permute]{how}}, or the
+    number of permutations required, or a permutation matrix where each
+    row gives the permuted indices.}
   \item{distance}{Choice of distance metric that measures the
     dissimilarity between two observations. See \code{\link{vegdist}} for
     options.  This will be used if \code{dat} was not a dissimilarity
@@ -27,6 +30,10 @@ anosim(dat, grouping, permutations = 999, distance = "bray", strata)
   \item{strata}{An integer vector or factor specifying the strata for
     permutation. If supplied, observations are permuted only within the
     specified strata.}
+  \item{parallel}{Number of parallel processes or a predefined socket
+    cluster.  With \code{parallel = 1} uses ordinary, non-parallel
+    processing. The parallel processing is done with \pkg{parallel}
+    package.}
 }
 \details{
   Analysis of similarities (ANOSIM) provides a way to test statistically
@@ -54,7 +61,7 @@ anosim(dat, grouping, permutations = 999, distance = "bray", strata)
   permuting the grouping vector to obtain the empirical distribution
   of \eqn{R} under null-model.  See \code{\link{permutations}} for
   additional details on permutation tests in Vegan. The distribution
-  of simulated values can be inspected with the \code{density}
+  of simulated values can be inspected with the \code{\link{permustats}}
   function.
 
   The function has \code{summary} and \code{plot} methods.  These both
@@ -71,13 +78,15 @@ anosim(dat, grouping, permutations = 999, distance = "bray", strata)
   \item{statistic}{The value of ANOSIM statistic \eqn{R}}
   \item{signif}{Significance from permutation.}
   \item{perm}{Permutation values of \eqn{R}. The distribution of
-    permutation values can be inspected with function \code{\link{density.anosim}}.}
+    permutation values can be inspected with function \code{\link{permustats}}.}
   \item{class.vec}{Factor with value \code{Between} for dissimilarities
     between classes and class name for corresponding dissimilarity
     within class.}
   \item{dis.rank}{Rank of dissimilarity entry.}
   \item{dissimilarity}{The name of the dissimilarity index: the
     \code{"method"} entry of the \code{dist} object.}
+  \item{control}{A list of control values for the permutations
+    as returned by the function \code{\link[permute]{how}}.}
 }
 \references{
   Clarke, K. R. (1993). Non-parametric multivariate analysis of changes
diff --git a/man/anova.cca.Rd b/man/anova.cca.Rd
index 01fd184..baf242d 100644
--- a/man/anova.cca.Rd
+++ b/man/anova.cca.Rd
@@ -1,67 +1,100 @@
 \name{anova.cca}
 \alias{anova.cca}
-\alias{anova.ccanull}
-\alias{anova.ccabyaxis}
-\alias{anova.ccabyterm}
-\alias{anova.ccabymargin}
-\alias{anova.prc}
+%\alias{anova.ccanull}
+%\alias{anova.ccabyaxis}
+%\alias{anova.ccabyterm}
+%\alias{anova.ccabymargin}
+%\alias{anova.prc}
 \alias{permutest}
-\alias{permutest.default}
+%\alias{permutest.default}
 \alias{permutest.cca}
 
 \title{Permutation Test for Constrained Correspondence Analysis,
   Redundancy Analysis and Constrained Analysis of Principal Coordinates }
-\description{
+
+\description{ 
   The function performs an ANOVA like permutation test for Constrained
   Correspondence Analysis (\code{\link{cca}}), Redundancy Analysis
-  (\code{\link{rda}}) or distance-based Redundancy Analysis
-  (dbRDA, \code{\link{capscale}}) to assess the significance of constraints.
+  (\code{\link{rda}}) or distance-based Redundancy Analysis (dbRDA,
+  \code{\link{capscale}}) to assess the significance of constraints.
 }
-\usage{
-\method{anova}{cca}(object, alpha=0.05, beta=0.01, step=100, perm.max=9999,
-      by = NULL, ...)
-
-permutest(x, ...)
 
-\method{permutest}{cca}(x, permutations = 99,
-          model = c("reduced", "direct", "full"),
-          first = FALSE, strata, ...)
+\usage{
+\method{anova}{cca}(object, ..., permutations = how(nperm=999),
+     by = NULL, model = c("reduced", "direct", "full"), 
+     parallel = getOption("mc.cores"), strata = NULL,
+     cutoff = 1, scope = NULL)
+\method{permutest}{cca}(x, permutations = how(nperm = 99), 
+     model = c("reduced", "direct"), first = FALSE, strata = NULL, 
+     parallel = getOption("mc.cores"),  ...) 
 }
 
 \arguments{
-  \item{object,x}{A result object from \code{\link{cca}}. }
-  \item{alpha}{Targeted Type I error rate. }
-  \item{beta}{Accepted Type II error rate. }
-  \item{step}{Number of permutations during one step. }
-  \item{perm.max}{Maximum number of permutations. }
-  \item{by}{Setting \code{by = "axis"} will assess significance for each
-    constrained axis, and setting \code{by = "terms"} will assess
+
+  \item{object}{One or several result objects from \code{\link{cca}},
+    \code{\link{rda}} or \code{\link{capscale}}. If there are several
+    result objects, they are compared against each other in the ordre
+    they were supplied. For a single object, a test specified in
+    \code{by} or an overal test is given.}
+
+  \item{x}{A single ordination result object.}
+ 
+  \item{permutations}{a list of control values for the permutations
+    as returned by the function \code{\link[permute]{how}}, or the
+    number of permutations required, or a permutation matrix where each
+    row gives the permuted indices.}
+
+  \item{by}{Setting \code{by = "axis"} will assess significance for
+    each constrained axis, and setting \code{by = "terms"} will assess
     significance for each term (sequentially from first to last), and
-    setting \code{by = "margin"} will assess the marginal effects of the
-    terms (each marginal term analysed in a model with all other
-    variables).}
-  \item{\dots}{Parameters passed to other functions. 
-    \code{anova.cca} passes all arguments to
-    \code{permutest.cca}. In \code{anova} with \code{by = "axis"} you
-    can use argument \code{cutoff} (defaults \code{1}) which stops
-    permutations after exceeding the given level. }
-  \item{permutations}{Number of permutations for assessing significance
-    of constraints.}
-  \item{model}{Permutation model (partial match).}
-  \item{first}{Assess only the significance of the first constrained
-    eigenvalue; will be passed from \code{anova.cca}. }
+    setting \code{by = "margin"} will assess the marginal effects of
+    the terms (each marginal term analysed in a model with all other
+    variables)}
+
+  \item{model}{Permutation model: \code{model="direct"} permutes 
+    community data, and \code{model="reduced"} permutes residuals
+    of the community data after Conditions (partial model).}
+
+  \item{parallel}{Use parallel processing with the given number of
+    cores.}
+
   \item{strata}{An integer vector or factor specifying the strata for
-    permutation. If supplied, observations are permuted only within the
-    specified strata.}
+    permutation. If supplied, observations are permuted only within
+    the specified strata. It is an error to use this when
+    \code{permutations} is a matrix, or a \code{\link[permute]{how}}
+    defines \code{blocks}. This is a legacy argument that will be
+    deprecated in the future: use 
+    \code{permutations = how(\dots, blocks)} instead. }
+
+  \item{cutoff}{Only effective with \code{by="axis"} where stops
+    permutations after an axis exceeds the \code{cutoff}.}
+
+  \item{scope}{Only effective with \code{by="margin"} where it can be
+    used to select the marginal terms for testing. The default is to
+    test all marginal terms in \code{\link{drop.scope}}.}
+
+  \item{first}{Analyse only significance of the first axis.}
+
+  \item{\dots}{Parameters passed to other functions.  \code{anova.cca}
+    passes all arguments to \code{permutest.cca}. In \code{anova} with
+    \code{by = "axis"} you can use argument \code{cutoff} (defaults
+    \code{1}) which stops permutations after exceeding the given
+    level. }
 }
+
 \details{
-  Functions \code{anova.cca} and \code{permutest.cca} implement an ANOVA
-  like permutation test for the joint effect of constraints in
+
+  Functions \code{anova.cca} and \code{permutest.cca} implement 
+  ANOVA like permutation tests for the joint effect of constraints in
   \code{\link{cca}}, \code{\link{rda}} or \code{\link{capscale}}.
-  Functions \code{anova.cca} and \code{permutest.cca} differ in printout
-  style and in interface.
-  Function \code{permutest.cca} is the proper workhorse, but
-  \code{anova.cca} passes all parameters to \code{permutest.cca}.
+  Functions \code{anova.cca} and \code{permutest.cca} differ in
+  printout style and in interface.  Function \code{permutest.cca} is
+  the proper workhorse, but \code{anova.cca} passes all parameters to
+  \code{permutest.cca}.
+
+  Function \code{anova} can analyse a sequence of constrained
+  ordination models. The analysis is based on the differences in
+  residual deviances in permutations of nested models.
 
   The default test is for the sum of all constrained eigenvalues.
   Setting \code{first = TRUE} will perform a test for the first
@@ -75,7 +108,7 @@ permutest(x, ...)
   out}) and a test for the first constrained eigenvalues is
   performed (Legendre et al. 2011). 
   You can stop permutation tests after exceeding a given
-  significance level with argument \code{cutoff} to speed up
+   significance level with argument \code{cutoff} to speed up
   calculations in large models. Setting \code{by = "terms"} will
   perform separate significance test for each term (constraining
   variable). The terms are assessed sequentially from first to last,
@@ -89,65 +122,36 @@ permutest(x, ...)
   the current function only evaluates marginal terms. It will, for
   instance, ignore main effects that are included in interaction
   terms. In calculating pseudo-\eqn{F}, all terms are compared to the
-  same residual of the full model. Permutations for all axes or terms
-  will start from the same \code{\link{.Random.seed}}, and the seed
-  will be advanced to the value after the longest permutation at the
-  exit from the function.  
-  
-  In \code{anova.cca} the number of permutations is controlled by
-  targeted \dQuote{critical} \eqn{P} value (\code{alpha}) and accepted
-  Type II or rejection error (\code{beta}).  If the results of
-  permutations differ from the targeted \code{alpha} at risk level given
-  by \code{beta}, the permutations are terminated.  If the current
-  estimate of \eqn{P} does not differ significantly from \code{alpha} of
-  the alternative hypothesis, the permutations are continued with
-  \code{step} new permutations (at the first step, the number of
-  permutations is \code{step - 1}).  However, with \code{by="terms"} a
-  fixed number of permutations will be used, and this is given by
-  argument \code{permutations}, or if this is missing, by \code{step}.
+  same residual of the full model. 
   
   Community data are permuted with choice \code{model="direct"},
-  residuals after partial CCA/ RDA/ dbRDA with choice \code{model="reduced"} 
-  (default), and residuals after CCA/ RDA/ dbRDA under choice
-  \code{model="full"}.  If there is no partial CCA/ RDA/ dbRDA stage,
-  \code{model="reduced"} simply permutes the data and is equivalent to
-  \code{model="direct"}.  The test statistic is \dQuote{pseudo-\eqn{F}},
-  which is the ratio of constrained and unconstrained total Inertia
-  (Chi-squares, variances or something similar), each divided by their
-  respective ranks.  If there are no conditions (\dQuote{partial}
-  terms), the sum of all eigenvalues remains constant, so that
-  pseudo-\eqn{F} and eigenvalues would give equal results.  In partial
-  CCA/ RDA/ dbRDA, the effect of conditioning variables
-  (\dQuote{covariables}) is removed before permutation, and these
-  residuals are added to the non-permuted fitted values of partial CCA
-  (fitted values of \code{X ~ Z}).  Consequently, the total Chi-square
-  is not fixed, and test based on pseudo-\eqn{F} would differ from the
-  test based on plain eigenvalues. CCA is a weighted method, and
-  environmental data are re-weighted at each permutation step using
-  permuted weights.
-}
+  and residuals after partial CCA/ RDA/ dbRDA with choice
+  \code{model="reduced"} (default).   If there is no partial CCA/
+  RDA/ dbRDA stage, \code{model="reduced"} simply permutes the data
+  and is equivalent to \code{model="direct"}.  The test statistic is
+  \dQuote{pseudo-\eqn{F}}, which is the ratio of constrained and
+  unconstrained total Inertia (Chi-squares, variances or something
+  similar), each divided by their respective ranks.  If there are no
+  conditions (\dQuote{partial} terms), the sum of all eigenvalues
+  remains constant, so that pseudo-\eqn{F} and eigenvalues would give
+  equal results.  In partial CCA/ RDA/ dbRDA, the effect of
+  conditioning variables (\dQuote{covariables}) is removed before
+  permutation, and these residuals are added to the non-permuted
+  fitted values of partial CCA (fitted values of \code{X ~ Z}).
+  Consequently, the total Chi-square is not fixed, and test based on
+  pseudo-\eqn{F} would differ from the test based on plain
+  eigenvalues. CCA is a weighted method, and environmental data are
+  re-weighted at each permutation step using permuted weights.  }
 
 \value{ 
-  Function \code{permutest.cca} returns an object of class
-  \code{"permutest.cca"}, which has its own \code{print} method. The
-  distribution of permuted \eqn{F} values can be inspected with
-  \code{\link{density.permutest.cca}} function.  The function
-  \code{anova.cca} calls \code{permutest.cca} and fills an
-  \code{\link{anova}} table.
+  The function \code{anova.cca} calls \code{permutest.cca} and fills an
+  \code{\link{anova}} table.  
 }
 
 \note{
   Some cases of \code{anova} need access to the original data on
   constraints (at least \code{by = "term"} and \code{by = "margin"}),
   and they may fail if data are unavailable.
- 
-  The default permutation \code{model} changed from \code{"direct"} to
-  \code{"reduced"} in \pkg{vegan} version 1.15-0, and you must
-  explicitly set \code{model = "direct"} for compatibility with the old
-  version.
-
-  Tests \code{by = "terms"} and \code{by = "margin"} are consistent
-  only when \code{model = "direct"}.  
 }
 \references{
   Legendre, P. and Legendre, L. (2012). \emph{Numerical Ecology}. 3rd
@@ -157,13 +161,14 @@ permutest(x, ...)
   significance of canonical axes in redundancy analysis. 
   \emph{Methods in Ecology and Evolution} 2, 269--277.
 }
-\author{Jari  Oksanen}
-\seealso{\code{\link{cca}}, \code{\link{rda}}, \code{\link{capscale}}
-  to get something to analyse. Function \code{\link{drop1.cca}} calls
-  \code{anova.cca} with \code{by = "margin"}, and
-  \code{\link{add1.cca}} an analysis for single terms additions, which
-  can be used in automatic or semiautomatic model building (see
-  \code{\link{deviance.cca}}). }
+\author{Jari Oksanen}
+
+\seealso{\code{\link{anova.cca}}, \code{\link{cca}},
+  \code{\link{rda}}, \code{\link{capscale}} to get something to
+  analyse. Function \code{\link{drop1.cca}} calls \code{anova.cca}
+  with \code{by = "margin"}, and \code{\link{add1.cca}} an analysis
+  for single terms additions, which can be used in automatic or
+  semiautomatic model building (see \code{\link{deviance.cca}}). }
 
 \examples{
 data(varespec)
@@ -171,15 +176,6 @@ data(varechem)
 vare.cca <- cca(varespec ~ Al + P + K, varechem)
 ## overall test
 anova(vare.cca)
-## Test for axes
-anova(vare.cca, by="axis", perm.max=500)
-## Sequential test for terms
-anova(vare.cca, by="terms", permu=200)
-## Marginal or Type III effects
-anova(vare.cca, by="margin")
-## Marginal test knows 'scope'
-anova(vare.cca, by = "m", scope="P")
 }
 \keyword{ multivariate }
 \keyword{ htest }
-
diff --git a/man/betadisper.Rd b/man/betadisper.Rd
index d25a5bf..2c02b13 100644
--- a/man/betadisper.Rd
+++ b/man/betadisper.Rd
@@ -111,7 +111,7 @@ betadisper(d, group, type = c("median","centroid"), bias.adjust = FALSE)
   parts respectively. This is equation (3) in Anderson (2006). If the
   imaginary part is greater in magnitude than the real part, then we
   would be taking the square root of a negative value, resulting in
-  NaN. From \pkg{vegan} 1.12-12 \code{betadisper} takes the absolute
+  NaN.  Function takes the absolute
   value of the real distance minus the imaginary distance, before
   computing the square root. This is in line with the behaviour of Marti
   Anderson's PERMDISP2 programme. 
@@ -140,7 +140,7 @@ betadisper(d, group, type = c("median","centroid"), bias.adjust = FALSE)
   (Anderson \emph{et al} 2006). Function \code{\link{betadiver}}
   provides some popular dissimilarity measures for this purpose.
 
-  As noted in passing by Anderson (2001) and in a related
+  As noted in passing by Anderson (2006) and in a related
   context by O'Neill (2000), estimates of dispersion around a
   central location (median or centroid) that is calculated from the same data
   will be biased downward. This bias matters most when comparing diversity
@@ -200,9 +200,6 @@ betadisper(d, group, type = c("median","centroid"), bias.adjust = FALSE)
   analysis gives the correct error rates.
 }
 \references{
-  Anderson, M. J. (2001) A new method for non-parametric multivariate 
-  analysis of variance. \emph{Austral Ecology} \strong{26}, 32--46.
-
   Anderson, M.J. (2006) Distance-based tests for homogeneity of
   multivariate dispersions. \emph{Biometrics} \strong{62}, 245--253.
 
@@ -240,7 +237,7 @@ mod
 anova(mod)
 
 ## Permutation test for F
-permutest(mod, pairwise = TRUE)
+permutest(mod, pairwise = TRUE, permutations = 99)
 
 ## Tukey's Honest Significant Differences
 (mod.HSD <- TukeyHSD(mod))
@@ -279,7 +276,7 @@ groups[c(2,20)] <- NA
 dis[c(2, 20)] <- NA
 mod2 <- betadisper(dis, groups) ## warnings
 mod2
-permutest(mod2, control = how(nperm = 100))
+permutest(mod2, permutations = 99)
 anova(mod2)
 plot(mod2)
 boxplot(mod2)
@@ -288,7 +285,7 @@ plot(TukeyHSD(mod2))
 ## Using group centroids
 mod3 <- betadisper(dis, groups, type = "centroid")
 mod3
-permutest(mod3, control = how(nperm = 100))
+permutest(mod3, permutations = 99)
 anova(mod3)
 plot(mod3)
 boxplot(mod3)
diff --git a/man/bioenv.Rd b/man/bioenv.Rd
index faefe00..34414d1 100644
--- a/man/bioenv.Rd
+++ b/man/bioenv.Rd
@@ -3,6 +3,7 @@
 \alias{bioenv.default}
 \alias{bioenv.formula}
 \alias{summary.bioenv}
+\alias{bioenvdist}
 
 \title{Best Subset of Environmental Variables with
   Maximum (Rank) Correlation with Community Dissimilarities }
@@ -13,8 +14,11 @@
 }
 \usage{
 \method{bioenv}{default}(comm, env, method = "spearman", index = "bray",
-       upto = ncol(env), trace = FALSE, partial = NULL, ...)
+       upto = ncol(env), trace = FALSE, partial = NULL, 
+       metric = c("euclidean", "mahalanobis", "manhattan", "gower"),
+       parallel = getOption("mc.cores"), ...)
 \method{bioenv}{formula}(formula, data, ...)
+bioenvdist(x, which = "best")
 }
 
 \arguments{
@@ -29,19 +33,44 @@
   \item{trace}{Trace the calculations }
   \item{partial}{Dissimilarities partialled out when inspecting
     variables in \code{env}.}
+  \item{metric}{Metric used for distances of environmental distances. See 
+    Details.}
+  \item{parallel}{Number of parallel processes or a predefined socket
+    cluster.  With \code{parallel = 1} uses ordinary, non-parallel
+    processing. The parallel processing is done with \pkg{parallel}
+    package.}
+  \item{x}{\code{bioenv} result object.}
+  \item{which}{The number of the model for which the environmental
+    distances are evaluated, or the \code{"best"} model.}
   \item{...}{Other arguments passed to \code{\link{cor}}.}
 }
 \details{
+  
   The function calculates a community dissimilarity matrix using
   \code{\link{vegdist}}.  Then it selects all possible subsets of
   environmental variables, \code{\link{scale}}s the variables, and
   calculates Euclidean distances for this subset using
-  \code{\link{dist}}.  Then it finds the correlation between community
-  dissimilarities and environmental distances, and for each size of
-  subsets, saves the best result. 
-  There are \eqn{2^p-1} subsets of \eqn{p} variables, and an exhaustive
-  search may take a very, very, very long time (parameter \code{upto} offers a
-  partial relief). 
+  \code{\link{dist}}.  The function finds the correlation between
+  community dissimilarities and environmental distances, and for each
+  size of subsets, saves the best result.  There are \eqn{2^p-1}
+  subsets of \eqn{p} variables, and an exhaustive search may take a
+  very, very, very long time (parameter \code{upto} offers a partial
+  relief).
+
+  The argument \code{metric} defines distances in the given set of
+  environmental variables.  With \code{metric = "euclidean"}, the
+  variables are scaled to unit variance and Euclidean distances are
+  calculated. With \code{metric = "mahalanobis"}, the Mahalanobis
+  distances are calculated: in addition to scaling to unit variance,
+  the matrix of the current set of environmental variables is also
+  made orthogonal (uncorrelated). With \code{metric = "manhanttan"},
+  the variables are scaled to unit range and Manhattan distances are
+  calculated, so that the distances are sums of differences of
+  environmental variables.  With \code{metric = "gower"}, the Gower
+  distances are calculated using function
+  \code{\link[cluster]{daisy}}. This allows also using factor
+  variables, but with continuous variables the results are equal to
+  \code{metric = "manhattan"}.
 
   The function can be called with a model \code{\link{formula}} where
   the LHS is the data matrix and RHS lists the environmental variables.
@@ -53,6 +82,10 @@
   class \code{\link{dist}}. The
   \code{partial} item can be used with any correlation \code{method},
   but it is strictly correct only for Pearson.
+
+  Function \code{bioenvdist} recalculates the environmental distances
+  used within the function. The default is to calculate distances for
+  the best model, but the number of any model can be given.
   
   Clarke & Ainsworth (1993) suggested this method to be used for
   selecting the best subset of environmental variables in interpreting
@@ -68,10 +101,12 @@
   Windows).  In addition, Clarke & Ainsworth suggested a novel method of
   rank correlation which is not available in the current function.
 }
+
 \value{
   The function returns an object of class \code{bioenv} with a
   \code{summary} method.
 }
+
 \references{
   Clarke, K. R & Ainsworth, M. 1993. A method of linking multivariate
   community structure to environmental variables. \emph{Marine Ecology
@@ -79,17 +114,17 @@
 }
 \author{ Jari Oksanen }
 
-\note{
-  If you want to study the \sQuote{significance} of \code{bioenv}
+\note{ If you want to study the \sQuote{significance} of \code{bioenv}
   results, you can use function \code{\link{mantel}} or
   \code{\link{mantel.partial}} which use the same definition of
-  correlation. 
-  However, \code{bioenv} standardizes environmental variables to unit standard
-  deviation using function \code{\link{scale}} and you must do the same
-  in \code{\link{mantel}} for comparable results. Further, \code{bioenv}
-  selects variables to maximize the Mantel correlation, and significance
-  tests based on \emph{a priori} selection of variables are biased. 
-  }
+  correlation.  However, \code{bioenv} standardizes environmental
+  variables depending on the used metric, and you must do the same in
+  \code{\link{mantel}} for comparable results (the standardized data are
+  returned as item \code{x} in the result object). It is safest to use
+  \code{bioenvdist} to extract the environmental distances that really
+  were used within \code{bioenv}. NB., \code{bioenv} selects variables
+  to maximize the Mantel correlation, and significance tests based on
+  \emph{a priori} selection of variables are biased.  }
 
 \seealso{\code{\link{vegdist}}, \code{\link{dist}}, \code{\link{cor}}
   for underlying routines, \code{\link{monoMDS}} and
diff --git a/man/cca.object.Rd b/man/cca.object.Rd
index 05e0346..a83241a 100644
--- a/man/cca.object.Rd
+++ b/man/cca.object.Rd
@@ -95,10 +95,11 @@
      \code{CCA} components. Only in \code{CCA}.}
     \item{\code{tot.chi}}{Total inertia or the sum of all eigenvalues of the
       component.}
-    \item{\code{imaginary.chi}, \code{imaginary.rank}}{The sum and
-     rank (number) of negative eigenvalues in
-     \code{\link{capscale}}. Only in \code{CA} and only if negative
-     eigenvalues were found in \code{\link{capscale}}.}
+    \item{\code{imaginary.chi}, \code{imaginary.rank},
+     \code{imaginary.u.eig}}{The sum, rank (number) of negative
+     eigenvalues and scaled site scores for imaginary axes in
+     \code{\link{capscale}}. Only in \code{CA} item and only if
+     negative eigenvalues were found in \code{\link{capscale}}.}
     \item{\code{u}}{(Weighted) orthonormal site scores.  Please note that
       scaled scores are not stored in the \code{cca} object, but they
       are made when the object is accessed with functions like
@@ -106,22 +107,18 @@
       \code{\link{plot.cca}}, or their \code{rda} variants.   Only in
       \code{CCA} and \code{CA}.  In the \code{CCA} component these are
       the so-called linear combination scores. }
-    \item{\code{u.eig}}{\code{u} scaled by eigenvalues.  There is no
-      guarantee that any \code{.eig} variants of scores will be kept in
-      the future releases.}
+    
     \item{\code{v}}{(Weighted) orthonormal species scores.  If missing species
       were omitted from the analysis, this will contain
       attribute \code{\link{na.action}} that lists the
       omitted species. Only in \code{CCA} and \code{CA}.}
-    \item{\code{v.eig}}{\code{v} weighted by eigenvalues.}
+    
     \item{\code{wa}}{Site scores found as weighted averages (\code{cca}) or
       weighted sums (\code{rda}) of 
       \code{v} with weights \code{Xbar}, but the multiplying effect of
       eigenvalues  removed. These often are known as WA scores in
       \code{cca}. Only in  \code{CCA}.}
-    \item{\code{wa.eig}}{The direct result of weighted averaging or weighted
-      summation  (matrix multiplication)
-      with the resulting eigenvalue inflation.}
+
     \item{\code{wa.excluded, u.excluded}}{WA scores for rows removed by
       \code{na.action = na.exclude} in \code{CCA} and \code{CA}
       components if these could be calculated.}
@@ -176,6 +173,16 @@
       \code{\link{capscale}}, section \dQuote{Notes}.}
   }
 }
+
+\note{
+  In old versions of \pkg{vegan} the object also included scores
+  scaled by eigenvalues (\code{u.eig}, \code{v.eig} and \code{wa.eig}),
+  but these were removed in \pkg{vegan} 2.2-0. The scores are scaled
+  when they are accessed with \code{\link{scores}} function. It is
+  advisable to always use \code{\link{scores}} in accessing the
+  results instead of directly accessing the elements of the the
+  \code{cca} object.
+}
   
 \seealso{The description here provides a hacker's interface.  User
   level functions for further analysis and handling of \code{cca}
diff --git a/man/clamtest.Rd b/man/clamtest.Rd
index 3d65d76..c6417c5 100644
--- a/man/clamtest.Rd
+++ b/man/clamtest.Rd
@@ -1,150 +1,151 @@
-\name{clamtest}
-\alias{clamtest}
-\alias{summary.clamtest}
-\alias{plot.clamtest}
-
-\title{
-Multinomial Species Classification Method (CLAM)
-}
-
-\description{
-The CLAM statistical approach for classifying generalists and
-specialists in two distinct habitats is described in Chazdon et al. (2011).
-}
-\usage{
-clamtest(comm, groups, coverage.limit = 10, specialization = 2/3, 
-   npoints = 20, alpha = 0.05/20)
-\method{summary}{clamtest}(object, ...)
-\method{plot}{clamtest}(x, xlab, ylab, main,  pch = 21:24, col.points = 1:4, 
-   col.lines = 2:4, lty = 1:3, position = "bottomright", ...)
-}
-\arguments{
-  \item{comm}{
-Community matrix, consisting of counts.
-}
-  \item{groups}{
-A vector identifying the two habitats. Must have exactly
-two unique values or levels. Habitat IDs in the grouping vector
-must match corresponding rows in the community matrix \code{comm}.
-}
-  \item{coverage.limit}{
-Integer, below this limit the sample coverage based correction
-is applied to rare species. Sample coverage is calculated separately 
-for the two habitats. Sample relative abundances are used for species 
-with higher than or equal to \code{coverage.limit} total counts per habitat.
-}
-  \item{specialization}{
-Numeric, specialization threshold value between 0 and 1.
-The value of \eqn{2/3} represents \sQuote{supermajority} rule,
-while a value of \eqn{1/2} represents a \sQuote{simple majority} rule
-to assign shared species as habitat specialists.
-}
-  \item{npoints}{
-Integer, number of points used to determine the boundary lines
-in the plots.
-}
-\item{alpha}{ Numeric, nominal significance level for individual
-  tests.  The default value reduces the conventional limit of
-  \eqn{0.05} to account for overdispersion and multiple testing for
-  several species simultaneously. However, the is no firm reason for
-  exactly this limit.  }
-  \item{x, object}{
-Fitted model object of class \code{"clamtest"}.
-}
-  \item{xlab, ylab}{
-Labels for the plot axes.
-}
-  \item{main}{
-Main title of the plot.
-}
-  \item{pch, col.points}{
-Symbols and colors used in plotting species groups.
-}
-  \item{lty, col.lines}{
-Line types and colors for boundary lines in plot to separate species groups.
-}
-  \item{position}{
-Position of figure legend, see \code{\link{legend}} for specification details.
-Legend not shown if \code{position = NULL}.
-}
-  \item{\dots}{
-Additional arguments passed to methods.
-}
-}
-
-\details{ The method uses a multinomial model based on estimated
-  species relative abundance in two habitats (A, B). It minimizes bias
-  due to differences in sampling intensities between two habitat types
-  as well as bias due to insufficient sampling within each
-  habitat. The method permits a robust statistical classification of
-  habitat specialists and generalists, without excluding rare species
-  \emph{a priori} (Chazdon et al. 2011).  Based on a user-defined
-  \code{specialization} threshold, the model classifies species into
-  one of four groups: (1) generalists; (2) habitat A specialists; (3)
-  habitat B specialists; and (4) too rare to classify with confidence.
-  } 
-
-\value{ A data frame (with class attribute \code{"clamtest"}),
-  with columns: 
-  \itemize{ 
-    \item{\code{Species}:}{ species name (column names from \code{comm}),} 
-    \item{\code{Total_*A*}:}{ total count in habitat A,} 
-    \item{\code{Total_*B*}:}{ total count in habitat B,} 
-    \item{\code{Classes}:}{ species classification, a factor with
-       levels \code{Generalist}, \code{Specialist_*A*},
-       \code{Specialist_*B*}, and \code{Too_rare}.}  
-}
-  \code{*A*} and \code{*B*} are placeholders for habitat names/labels found in the
-  data.
-
-The \code{summary} method returns descriptive statistics of the results.
-The \code{plot} method returns values invisibly and produces a bivariate
-scatterplot of species total abundances in the two habitats. Symbols and
-boundary lines are shown for species groups.
-}
-\references{
-Chazdon, R. L., Chao, A., Colwell, R. K., Lin, S.-Y., Norden, N., 
-Letcher, S. G., Clark, D. B., Finegan, B. and Arroyo J. P.(2011). 
-A novel statistical method for classifying habitat
-generalists and specialists. \emph{Ecology} \bold{92}, 1332--1343.
-}
-\author{
-Peter Solymos \email{solymos at ualberta.ca}
-}
-\note{
-The code was tested against standalone CLAM software provided
-on the website of Anne Chao (\url{http://chao.stat.nthu.edu.tw/softwarece.html});
-minor inconsistencies were found, especially for finding the
-threshold for 'too rare' species.
-These inconsistencies are probably due to numerical differences between the
-two implementation. The current \R implementation uses 
-root finding for iso-lines instead of iterative search.
-
-The original method (Chazdon et al. 2011) has two major problems:
-\enumerate{
-  
-  \item It assumes that the error distribution is multinomial. This is
-    a justified choice if individuals are freely distributed, and
-    there is no over-dispersion or clustering of individuals. In most
-    ecological data, the variance is much higher than multinomial
-    assumption, and therefore test statistic are too optimistic.
-
-  \item The original authors suggest that multiple testing adjustment
-    for multiple testing should be based on the number of points
-    (\code{npoints}) used to draw the critical lines on the plot,
-    whereas the adjustment should be based on the number of tests (i.e.,
-    tested species). The function uses the same numerical values as
-    the original paper, but there is no automatic connection between
-    \code{npoints} and \code{alpha} arguments, but you must work out
-    the adjustment yourself.
-}
-}
-\examples{
-data(mite)
-data(mite.env)
-sol <- clamtest(mite, mite.env$Shrub=="None", alpha=0.005)
-summary(sol)
-head(sol)
-plot(sol)
-}
-\keyword{ htest }
+\name{clamtest}
+\alias{clamtest}
+\alias{summary.clamtest}
+\alias{plot.clamtest}
+
+\title{
+Multinomial Species Classification Method (CLAM)
+}
+
+\description{
+The CLAM statistical approach for classifying generalists and
+specialists in two distinct habitats is described in Chazdon et al. (2011).
+}
+\usage{
+clamtest(comm, groups, coverage.limit = 10, specialization = 2/3, 
+   npoints = 20, alpha = 0.05/20)
+\method{summary}{clamtest}(object, ...)
+\method{plot}{clamtest}(x, xlab, ylab, main,  pch = 21:24, col.points = 1:4, 
+   col.lines = 2:4, lty = 1:3, position = "bottomright", ...)
+}
+\arguments{
+  \item{comm}{
+Community matrix, consisting of counts.
+}
+  \item{groups}{
+A vector identifying the two habitats. Must have exactly
+two unique values or levels. Habitat IDs in the grouping vector
+must match corresponding rows in the community matrix \code{comm}.
+}
+  \item{coverage.limit}{
+Integer, the sample coverage based correction 
+is applied to rare species with counts below this limit. 
+Sample coverage is calculated separately 
+for the two habitats. Sample relative abundances are used for species 
+with higher than or equal to \code{coverage.limit} total counts per habitat.
+}
+  \item{specialization}{
+Numeric, specialization threshold value between 0 and 1.
+The value of \eqn{2/3} represents \sQuote{supermajority} rule,
+while a value of \eqn{1/2} represents a \sQuote{simple majority} rule
+to assign shared species as habitat specialists.
+}
+  \item{npoints}{
+Integer, number of points used to determine the boundary lines
+in the plots.
+}
+\item{alpha}{ Numeric, nominal significance level for individual
+  tests.  The default value reduces the conventional limit of
+  \eqn{0.05} to account for overdispersion and multiple testing for
+  several species simultaneously. However, the is no firm reason for
+  exactly this limit.  }
+  \item{x, object}{
+Fitted model object of class \code{"clamtest"}.
+}
+  \item{xlab, ylab}{
+Labels for the plot axes.
+}
+  \item{main}{
+Main title of the plot.
+}
+  \item{pch, col.points}{
+Symbols and colors used in plotting species groups.
+}
+  \item{lty, col.lines}{
+Line types and colors for boundary lines in plot to separate species groups.
+}
+  \item{position}{
+Position of figure legend, see \code{\link{legend}} for specification details.
+Legend not shown if \code{position = NULL}.
+}
+  \item{\dots}{
+Additional arguments passed to methods.
+}
+}
+
+\details{ The method uses a multinomial model based on estimated
+  species relative abundance in two habitats (A, B). It minimizes bias
+  due to differences in sampling intensities between two habitat types
+  as well as bias due to insufficient sampling within each
+  habitat. The method permits a robust statistical classification of
+  habitat specialists and generalists, without excluding rare species
+  \emph{a priori} (Chazdon et al. 2011).  Based on a user-defined
+  \code{specialization} threshold, the model classifies species into
+  one of four groups: (1) generalists; (2) habitat A specialists; (3)
+  habitat B specialists; and (4) too rare to classify with confidence.
+  } 
+
+\value{ A data frame (with class attribute \code{"clamtest"}),
+  with columns: 
+  \itemize{ 
+    \item{\code{Species}:}{ species name (column names from \code{comm}),} 
+    \item{\code{Total_*A*}:}{ total count in habitat A,} 
+    \item{\code{Total_*B*}:}{ total count in habitat B,} 
+    \item{\code{Classes}:}{ species classification, a factor with
+       levels \code{Generalist}, \code{Specialist_*A*},
+       \code{Specialist_*B*}, and \code{Too_rare}.}  
+}
+  \code{*A*} and \code{*B*} are placeholders for habitat names/labels found in the
+  data.
+
+The \code{summary} method returns descriptive statistics of the results.
+The \code{plot} method returns values invisibly and produces a bivariate
+scatterplot of species total abundances in the two habitats. Symbols and
+boundary lines are shown for species groups.
+}
+\references{
+Chazdon, R. L., Chao, A., Colwell, R. K., Lin, S.-Y., Norden, N., 
+Letcher, S. G., Clark, D. B., Finegan, B. and Arroyo J. P.(2011). 
+A novel statistical method for classifying habitat
+generalists and specialists. \emph{Ecology} \bold{92}, 1332--1343.
+}
+\author{
+Peter Solymos \email{solymos at ualberta.ca}
+}
+\note{
+The code was tested against standalone CLAM software provided
+on the website of Anne Chao (\url{http://chao.stat.nthu.edu.tw/softwarece.html});
+minor inconsistencies were found, especially for finding the
+threshold for 'too rare' species.
+These inconsistencies are probably due to numerical differences between the
+two implementation. The current \R implementation uses 
+root finding for iso-lines instead of iterative search.
+
+The original method (Chazdon et al. 2011) has two major problems:
+\enumerate{
+  
+  \item It assumes that the error distribution is multinomial. This is
+    a justified choice if individuals are freely distributed, and
+    there is no over-dispersion or clustering of individuals. In most
+    ecological data, the variance is much higher than multinomial
+    assumption, and therefore test statistic are too optimistic.
+
+  \item The original authors suggest that multiple testing adjustment
+    for multiple testing should be based on the number of points
+    (\code{npoints}) used to draw the critical lines on the plot,
+    whereas the adjustment should be based on the number of tests (i.e.,
+    tested species). The function uses the same numerical values as
+    the original paper, but there is no automatic connection between
+    \code{npoints} and \code{alpha} arguments, but you must work out
+    the adjustment yourself.
+}
+}
+\examples{
+data(mite)
+data(mite.env)
+sol <- with(mite.env, clamtest(mite, Shrub=="None", alpha=0.005))
+summary(sol)
+head(sol)
+plot(sol)
+}
+\keyword{ htest }
diff --git a/man/commsim.Rd b/man/commsim.Rd
new file mode 100644
index 0000000..02cc443
--- /dev/null
+++ b/man/commsim.Rd
@@ -0,0 +1,413 @@
+\encoding{UTF-8}
+\name{commsim}
+\alias{commsim}
+\alias{make.commsim}
+\alias{print.commsim}
+\title{
+Create a Object for Null Model Algorithms
+}
+\description{
+The \code{commsim} function can be used to feed Null Model algorithms into
+\code{\link{nullmodel}} analysis.
+The \code{make.commsim} function returns various predefined algorithm types
+(see Details).
+These functions represent low level interface for community null model
+infrastructure in \pkg{vegan} with the intent of extensibility,
+and less emphasis on direct use by users.
+}
+\usage{
+commsim(method, fun, binary, isSeq, mode)
+make.commsim(method)
+\method{print}{commsim}(x, ...)
+}
+\arguments{
+  \item{method}{
+Character, name of the algorithm.
+}
+  \item{fun}{
+A function. For possible formal arguments of this function
+see Details.
+}
+  \item{binary}{
+Logical, if the algorithm applies to presence-absence or count matrices.
+}
+  \item{isSeq}{
+Logical, if the algorithm is sequential (needs burnin) or not.
+}
+  \item{mode}{
+Character, storage mode of the community matrix, either 
+\code{"integer"} or \code{"double"}.
+}
+  \item{x}{
+An object of class \code{commsim}.
+}
+  \item{\dots}{
+Additional arguments.
+}
+}
+\details{
+The function \code{fun} must return an array of \code{dim(nr, nc, n)},
+and must take some of the following arguments:
+\itemize{
+  \item{\code{x}: }{input matrix,}
+  \item{\code{n}: }{number of permuted matrices in output,}
+  \item{\code{nr}: }{number of rows,}
+  \item{\code{nc}: }{number of columns,}
+  \item{\code{rs}: }{vector of row sums,}
+  \item{\code{cs}: }{vector of column sums,}
+  \item{\code{rf}: }{vector of row frequencies (non-zero cells),}
+  \item{\code{cf}: }{vector of column frequencies (non-zero cells),}
+  \item{\code{s}: }{total sum of \code{x},}
+  \item{\code{fill}: }{matrix fill (non-zero cells),}
+  \item{\code{thin}: }{thinning value for sequential algorithms,}
+  \item{\code{...}: }{additional arguments.}
+}
+
+  Several null model algorithm are pre-defined and can be called by
+  their name. The predefined algorithms are described in detail in the
+  following chapters. The binary null models produce matrices of zeros
+  (absences) and ones (presences) also when input matrix is
+  quantitative. There are two types of quantitative data: Counts are
+  integers with a natural unit so that individuals can be shuffled, but
+  abundances can have real (floating point) values and do not have a
+  natural subunit for shuffling. All quantitative models can handle
+  counts, but only some are able to handle real values. Some of the null
+  models are sequential so that the next matrix is derived from the
+  current one. This makes models dependent on each other, and usually
+  you must thin these matrices and study the sequences for stability:
+  see \code{oecosimu} for details and instructions.
+
+  See Examples for structural constraints imposed by each algorithm and
+  defining your own null model.
+
+}
+%% commsimulator
+
+\section{Binary null models}{
+
+  All binary null models retain fill: number of absences or conversely
+  the number of absences. The classic models may also column (species)
+  frequencies (\code{c0}) or row frequencies or species richness of each
+  site (\code{r0}) and take into account commonness and rarity of
+  species (\code{r1}, \code{r2}).  Algorithms \code{swap}, \code{tswap},
+  \code{quasiswap} and \code{backtracking} preserve both row and column
+  frequencies. Two first of these are sequential but the two latter are
+  non-sequential and produce independent matrices. Basic algorithms are
+  reviewed by Wright et al. (1998).
+
+\itemize{
+  \item{\code{"r00"}: }{non-sequential algorithm for binary matrices
+    that only  maintains the number of presences (fill).}
+
+  \item{\code{"r0", "r0_old"}: }{non-sequential algorithm for binary
+    matrices that maintains the site (row) frequencies.
+    Methods \code{"r0"} and \code{"r0_old"} implement the
+    same method, but use different random number sequences; use
+    \code{"r0_old"} if you want to reproduce results in \pkg{vegan
+    2.0-0} or older using \code{commsimulator} (now deprecated).}
+
+  \item{\code{"r1"}: }{non-sequential algorithm for binary matrices
+    that maintains the site (row) frequencies, but uses column marginal
+    frequencies as probabilities of selecting species.}
+
+  \item{\code{"r2"}: }{non-sequential algorithm for binary matrices
+    that maintains the site (row) frequencies, and uses squared column
+    sums as as probabilities of selecting species.}
+  
+  \item{\code{"c0"}: }{non-sequential algorithm for binary matrices
+    that maintains species frequencies (Jonsson 2001). }
+  
+  \item{\code{"swap"}: }{sequential algorithm for binary matrices that
+    changes the matrix structure, but does not influence marginal sums
+    (Gotelli & Entsminger 2003).  This inspects \eqn{2 \times 2}{2 by
+    2} submatrices so long that a swap can be done.}
+  
+  \item{\code{"tswap"}: }{sequential algorithm for binary matrices.
+    Same as the \code{"swap"} algorithm, but it tries a fixed
+    number of times and performs zero to many swaps at one step
+    (according the thin argument in later call). This
+    approach was suggested by \enc{Miklós}{Miklos} & Podani (2004)
+    because they found that ordinary swap may lead to biased
+    sequences, since some columns or rows may be more easily swapped.}
+
+  \item{\code{"quasiswap"}: }{non-sequential algorithm for binary
+    matrices that implements a method where matrix is first filled
+    honouring row and column totals, but with integers that may be
+    larger than one.  Then the method inspects random \eqn{2 \times
+    2}{2 by 2} matrices and performs a quasiswap on them. Quasiswap is
+    similar to ordinary swap, but it can reduce numbers above one
+    to ones maintaining marginal totals (\enc{Miklós}{Miklos} & Podani
+    2004).  This is the recommended algorithm if you want to retain both
+    species and row frequencies.}
+
+  \item{\code{"backtracking"}: }{non-sequential algorithm for binary
+    matrices that implements a filling method with constraints both
+    for row and column frequencies (Gotelli & Entsminger 2001).  The
+    matrix is first filled randomly using row and column frequencies
+    as probabilities. Typically row and column sums are reached before
+    all incidences are filled in. After that begins "backtracking",
+    where some of the points are removed, and then filling is started
+    again, and this backtracking is done so may times that all
+    incidences will be filled into matrix. The function may be very slow
+    for some matrices.}
+}
+}
+
+\section{Quantitative Models for Counts with Fixed Marginal Sums}{
+
+  These models shuffle individuals of counts but keep marginal sums
+  fixed, but marginal frequencies are not preserved. Algorithm
+  \code{r2dtable} uses standard \R function \code{\link{r2dtable}} also
+  used for simulated \eqn{P}-values in \code{\link{chisq.test}}.
+  Algorithm \code{quasiswap_count} uses the same, but retains the
+  original fill. Typically this means increasing numbers of zero cells
+  and the result is zero-inflated with respect to \code{r2dtable}. 
+
+\itemize{
+
+  \item{\code{"r2dtable"}: }{non-sequential algorithm for count
+    matrices.  This algorithm keeps matrix sum and row/column sums
+    constant. Based on \code{\link{r2dtable}}.}
+
+  \item{\code{"quasiswap_count"}: }{non-sequential algorithm for count
+    matrices.  This algorithm is similar as Carsten Dormann's
+    \code{\link[bipartite]{swap.web}} function in the package
+    \pkg{bipartite}. First, a random matrix is generated by the
+    \code{\link{r2dtable}} function retaining row and column sums.  Then
+    the original matrix fill is reconstructed by sequential steps to
+    increase or decrease matrix fill in the random matrix. These steps
+    are based on swapping \eqn{2 \times 2}{2 x 2} submatrices (see
+    \code{"swap_count"} algorithm for details) to maintain row and
+    column totals. }
+}
+}
+
+\section{Quantitative Swap Models}{
+
+  Quantitative swap models are similar to binary \code{swap}, but they
+  swap the largest permissible value. The models in this section all
+  maintain the fill and perform a quantitative swap only if this can be
+  done without changing the fill. Single step of swap often changes the
+  matrix very little. In particular, if cell counts are variable, high
+  values change very slowly. Checking the chain stability and
+  independence is even more crucial than in binary swap, and very strong
+  \code{thin}ning is often needed. These models should never be used
+  without inspecting their properties for the current data.
+
+ \itemize{ 
+
+   \item{\code{"swap_count"}: }{sequential algorithm for count matrices.
+    This algorithm find \eqn{2 \times 2}{2 x 2} submatrices that can be
+    swapped leaving column and row totals and fill unchanged. The
+    algorithm finds the largest value in the submatrix that can be
+    swapped (\eqn{d}). Swap means that the values in diagonal or
+    antidiagonal positions are decreased by \eqn{d}, while remaining
+    cells are increased by \eqn{d}. A swap is made only if fill does not
+    change.  }
+
+   \item{\code{"abuswap_r"}: }{sequential algorithm for count or
+    nonnegative real valued matrices with fixed row frequencies (see
+    also \code{\link{permatswap}}).  The algorithm is similar to
+    \code{swap_count}, but uses different swap value for each row of the
+    \eqn{2 \times 2}{2 x 2} submatrix. Each step changes the the
+    corresponding column sums, but honours matrix fill, row sums, and
+    row/column frequencies (Hardy 2008; randomization scheme 2x).}
+
+  \item{\code{"abuswap_c"}: }{sequential algorithm for count or
+    nonnegative real valued matrices with fixed column frequencies (see
+    also \code{\link{permatswap}}).  The algorithm is similar as the
+    previous one, but operates on columns.  2 x 2 submatrices. Each step
+    changes the the corresponding row sums, but honours matrix fill,
+    column sums, and row/column frequencies (Hardy 2008; randomization
+    scheme 3x).}  }
+}
+
+\section{Quantitative Swap and Shuffle Models}{
+
+  Quantitative Swap and Shuffle methods (\code{swsh} methods) preserve
+  fill and column and row frequencies, and also either row or column
+  sums. The methods first perform a binary \code{quasiswap} and then
+  shuffle original quantitative data to non-zero cells. The
+  \code{samp} methods shuffle original non-zero cell values and can be
+  used also with non-integer data. The \code{both} methods
+  redistribute individuals randomly among non-zero cells and can only
+  be used with integer data. The shuffling is either free over the
+  whole matrix, or within rows (\code{r} methods) or within columns
+  (\code{c} methods). Shuffling within a row preserves row sums, and
+  shuffling within a column preserves column sums.
+
+\itemize{ 
+
+  \item{\code{"swsh_samp"}: }{non-sequential algorithm for
+    quantitative data (either integer counts or non-integer values).
+    Original non-zero values values are shuffled.}
+
+  \item{\code{"swsh_both"}: }{non-sequential algorithm for count data. 
+    Individuals are shuffled freely over non-zero cells.}
+
+  \item{\code{"swsh_samp_r"}: }{non-sequential algorithm for
+    quantitative data.  Non-zero values (samples) are shuffled
+    separately for each row.}
+
+  \item{\code{"swsh_samp_c"}: }{non-sequential algorithm for
+    quantitative data.  Non-zero values (samples) are shuffled
+    separately for each column.}
+
+  \item{\code{"swsh_both_r"}: }{non-sequential algorithm for count matrices. 
+    Individuals are shuffled freely for non-zero values within each row.}
+
+  \item{\code{"swsh_both_c"}: }{non-sequential algorithm for count matrices. 
+    Individuals are shuffled freely for non-zero values with each column.}
+}
+}
+
+\section{Quantitative Shuffle Methods}{
+
+  Quantitative shuffle methods are generalizations of binary models
+  \code{r00}, \code{r0} and \code{c0}.  The \code{_ind} methods shuffle
+  individuals so that the grand sum, row sum or column sums are similar
+  as in the observed matrix. These methods are similar as
+  \code{r2dtable} but with still slacker constraints on marginal
+  sums. The \code{_samp} and \code{_both} methods first perform the
+  correspongind binary model with similar restriction on marginal
+  frequencies, and then distribute quantitative values over non-zero
+  cells. The \code{_samp} models shuffle original cell values and can
+  therefore handle also non-count real values. The \code{_both} models
+  shuffle individuals among non-zero values. The shuffling is over the
+  whole matrix in \code{r00_}, and within row in \code{r0_} and within
+  column in \code{c0_} in all cases.
+
+\itemize{
+  \item{\code{"r00_ind"}: }{non-sequential algorithm for count matrices. 
+    This algorithm keeps total sum constant,
+    individuals are shuffled among cells of the matrix.}
+
+  \item{\code{"r0_ind"}: }{non-sequential algorithm for count matrices. 
+    This algorithm keeps row sums constant,
+    individuals are shuffled among cells of each row of the matrix.}
+
+  \item{\code{"c0_ind"}: }{non-sequential algorithm for count matrices. 
+    This algorithm keeps column sums constant,
+    individuals are shuffled among cells of each column of the matrix.}
+
+  \item{\code{"r00_samp"}: }{non-sequential algorithm for count 
+    or nonnegative real valued (\code{mode = "double"}) matrices. 
+    This algorithm keeps total sum constant,
+    cells of the matrix are shuffled.}
+
+  \item{\code{"r0_samp"}: }{non-sequential algorithm for count 
+    or nonnegative real valued (\code{mode = "double"}) matrices. 
+    This algorithm keeps row sums constant,
+    cells within each row are shuffled.}
+
+  \item{\code{"c0_samp"}: }{non-sequential algorithm for count 
+    or nonnegative real valued (\code{mode = "double"}) matrices. 
+    This algorithm keeps column sums constant,
+    cells within each column are shuffled.}
+
+  \item{\code{"r00_both"}: }{non-sequential algorithm for count matrices. 
+    This algorithm keeps total sum constant,
+    cells and individuals among cells of the matrix are shuffled.}
+
+  \item{\code{"r0_both"}: }{non-sequential algorithm for count matrices. 
+    This algorithm keeps total sum constant,
+    cells and individuals among cells of each row are shuffled.}
+
+  \item{\code{"c0_both"}: }{non-sequential algorithm for count matrices. 
+    This algorithm keeps total sum constant,
+    cells and individuals among cells of each column are shuffled.}
+}
+}
+
+\value{
+An object of class \code{commsim} with elements 
+corresponding to the arguments (\code{method}, \code{binary}, 
+\code{isSeq}, \code{mode}, \code{fun}).
+
+If the input of \code{make.comsimm} is a \code{commsim} object,
+it is returned without further evaluation. If this is not the case,
+the character \code{method} argument is matched against
+predefined algorithm names. An error message is issued
+if none such is found. If the \code{method} argument is missing,
+the function returns names of all currently available
+null model algorithms as a character vector.
+}
+\references{
+  Gotelli, N.J. & Entsminger, N.J. (2001). Swap and fill algorithms in
+  null model analysis: rethinking the knight's tour. \emph{Oecologia}
+  129, 281--291.
+
+  Gotelli, N.J. & Entsminger, N.J. (2003). Swap algorithms in null model
+  analysis. \emph{Ecology} 84, 532--535.
+
+  Hardy, O. J. (2008) Testing the spatial phylogenetic structure of
+  local communities: statistical performances of different null models
+  and test statistics on a locally neutral community.  \emph{Journal of
+  Ecology} 96, 914--926.
+
+  Jonsson, B.G. (2001) A null model for randomization tests of
+  nestedness in species assemblages. \emph{Oecologia} 127, 309--313.
+
+  \enc{Miklós}{Miklos}, I. & Podani, J. (2004). Randomization of
+  presence-absence matrices: comments and new algorithms. \emph{Ecology}
+  85, 86--92.
+
+  Patefield, W. M. (1981) Algorithm AS159.  An efficient method of
+  generating r x c tables with given row and column totals.
+  \emph{Applied Statistics} 30, 91--97.
+
+  Wright, D.H., Patterson, B.D., Mikkelson, G.M., Cutler, A. & Atmar,
+  W. (1998). A comparative analysis of nested subset patterns of species
+  composition. \emph{Oecologia} 113, 1--20.
+}
+
+\author{
+Jari Oksanen and Peter Solymos
+}
+
+\seealso{ See \code{\link{permatfull}}, \code{\link{permatswap}} for
+alternative specification of quantitative null models. Function
+\code{\link{oecosimu}} gives a higher-level interface for applying null
+models in hypothesis testing and analysis of models. Function
+\code{\link{nullmodel}} and \code{\link{simulate.nullmodel}} are used to
+generate arrays of simulated null model matrices.  }
+
+\examples{
+## write the r00 algorithm
+f <- function(x, n, ...) 
+    array(replicate(n, sample(x)), c(dim(x), n))
+(cs <- commsim("r00", fun=f, binary=TRUE, 
+    isSeq=FALSE, mode="integer"))
+
+## retrieving the sequential swap algorithm
+(cs <- make.commsim("swap"))
+
+## feeding a commsim object as argument
+make.commsim(cs)
+
+## structural constraints
+diagfun <- function(x, y) {
+    c(sum = sum(y) == sum(x),
+        fill = sum(y > 0) == sum(x > 0),
+        rowSums = all(rowSums(y) == rowSums(x)),
+        colSums = all(colSums(y) == colSums(x)),
+        rowFreq = all(rowSums(y > 0) == rowSums(x > 0)),
+        colFreq = all(colSums(y > 0) == colSums(x > 0)))
+}
+evalfun <- function(meth, x, n) {
+    m <- nullmodel(x, meth)
+    y <- simulate(m, nsim=n)
+    out <- rowMeans(sapply(1:dim(y)[3], 
+        function(i) diagfun(attr(y, "data"), y[,,i])))
+    z <- as.numeric(c(attr(y, "binary"), attr(y, "isSeq"),
+        attr(y, "mode") == "double"))
+    names(z) <- c("binary", "isSeq", "double")
+    c(z, out)
+}
+x <- matrix(rbinom(10*12, 1, 0.5)*rpois(10*12, 3), 12, 10)
+algos <- make.commsim()
+a <- t(sapply(algos, evalfun, x=x, n=10))
+print(as.table(ifelse(a==1,1,0)), zero.print = ".")
+}
+\keyword{ multivariate }
+\keyword{ datagen }
diff --git a/man/decostand.Rd b/man/decostand.Rd
index 1d520ee..7263fd3 100644
--- a/man/decostand.Rd
+++ b/man/decostand.Rd
@@ -10,7 +10,6 @@ methods for community ecologists.
 }
 \usage{
 decostand(x, method, MARGIN, range.global, logbase = 2, na.rm=FALSE, ...)
-
 wisconsin(x)
 }
 
@@ -87,7 +86,7 @@ wisconsin(x)
   \code{"decostand"} giving the name of applied standardization
   \code{"method"}.
 }
-\author{Jari Oksanen and Etienne \enc{Laliberté}{Laliberte}
+\author{Jari Oksanen, Etienne \enc{Laliberté}{Laliberte}
   (\code{method = "log"}).}
 \note{Common transformations can be made with standard \R functions.}
 
@@ -117,5 +116,5 @@ sptrans <- wisconsin(varespec)
 sptrans <- decostand(varespec, "chi.square")
 plot(procrustes(rda(sptrans), cca(varespec)))
 }
-\keyword{ multivariate}%-- one or more ...
+\keyword{ multivariate}
 \keyword{ manip }
diff --git a/man/density.adonis.Rd b/man/density.adonis.Rd
deleted file mode 100644
index c8acf6d..0000000
--- a/man/density.adonis.Rd
+++ /dev/null
@@ -1,114 +0,0 @@
-\name{density.adonis}
-\alias{density.adonis}
-\alias{density.anosim}
-\alias{density.mantel}
-\alias{density.mrpp}
-\alias{density.permutest.cca}
-\alias{density.protest}
-\alias{plot.vegandensity}
-\alias{densityplot.adonis}
-
-\title{
-  Kernel Density Estimation for Permutation Results in Vegan
-}
-
-\description{ 
-  The \code{density} functions can directly access the permutation
-  results of \pkg{vegan} functions, and \code{plot} can display the
-  densities. The \code{densityplot} method can access and display the
-  permutation results of functions that return permutations of several
-  statistics simultaneously.  
-}
-
-\usage{
-\method{density}{adonis}(x, ...)
-\method{plot}{vegandensity}(x, main = NULL, xlab = NULL, ylab = "Density", 
-   type = "l", zero.line = TRUE, obs.line = TRUE, ...)
-}
-
-\arguments{
-  \item{x}{The object to be handled. For \code{density} and
-     \code{densityplot} this is an object containing permutations. For
-     \code{plot} this is a result of \pkg{vegan} \code{density}
-     function.}
-  \item{main, xlab, ylab, type, zero.line}{Arguments of
-    \code{\link{plot.density}} and \code{\link[lattice]{densityplot}}
-    functions.}
-  \item{obs.line}{Draw vertical line for the observed
-    statistic. Logical value \code{TRUE} draws a red line, and
-    \code{FALSE} draws nothing. Alternatively, \code{obs.line} can be a
-    definition of the colour used for the line, either as a numerical
-    value from the \code{\link[grDevices]{palette}} or as the name of
-    the colour, or other normal definition of the colour.}
-  \item{\dots}{ Other arguments passed to the function. In
-    \code{density} these are passed to \code{\link{density.default}}.}
-}
-
-\details{ 
-
-  The \code{density} and \code{densityplot} function can directly access
-  permutation results of most \pkg{vegan} functions.  The \code{density}
-  function is identical to \code{\link{density.default}} and takes all
-  its arguments, but adds the observed statistic to the result as item
-  \code{"observed"}. The observed statistic is also put among the
-  permuted values so that the results are consistent with significance
-  tests. The \code{plot} method is similar to the default
-  \code{\link{plot.density}}, but can also add the observed statistic to
-  the graph as a vertical line.  The \code{densityplot} function is
-  based on the same function in the \pkg{lattice} package (see
-  \code{\link[lattice]{densityplot}}).
-
-  The density methods are available for \pkg{vegan} functions
-  \code{\link{adonis}}, \code{\link{anosim}}, \code{\link{mantel}},
-  \code{\link{mantel.partial}}, \code{\link{mrpp}},
-  \code{\link{permutest.cca}}, and \code{\link{protest}}.  The
-  \code{density} function for \code{\link{oecosimu}} is documented
-  separately, and it is also used for \code{\link{adipart}},
-  \code{\link{hiersimu}} and \code{\link{multipart}}.
-
-  All \pkg{vegan} \code{density} functions return an object of class
-  \code{"vegandensity"} inheriting from \code{\link{density}}, and can
-  be plotted with its \code{plot} method.  This is identical to the
-  standard \code{plot} of \code{densiy} objects, but can also add a
-  vertical line for the observed statistic.
-
-  Functions that can return several permuted statistics simultaneously
-  also have \code{\link[lattice]{densityplot}} method
-  (\code{\link{adonis}}, \code{\link{oecosimu}} and diversity 
-  partitioning functions based on \code{oecosimu}).  The standard
-  \code{\link{density}} can only handle univariate data, and a warning
-  is issued if the function is used for a model with several observed
-  statistics.  The \code{\link[lattice]{densityplot}} method is available
-  for \code{\link{adonis}} and \code{\link{oecosimu}} (documented
-  separately). NB, there is no \code{density} method for
-  \code{\link{anova.cca}}, but only for \code{\link{permutest.cca}}.
-
-}
-
-\value{
-  The \code{density} function returns the standard \code{\link{density}}
-  result object with one new item: \code{"observed"} for the observed
-  value of the statistic. The functions have a specific \code{plot}
-  method, but otherwise they use methods for
-  \code{\link{density.default}}, such as \code{print} and \code{lines}.
-}
-
-\author{
-  Jari Oksanen
-}
-
-\seealso{
-  \code{\link{density.default}}.
-}
-
-\examples{
-data(dune)
-data(dune.env)
-mod <- adonis(dune ~ Management, data = dune.env)
-plot(density(mod))
-mod <- adonis(dune ~ Management * Moisture, dune.env)
-densityplot(mod)
-}
-
-\keyword{ distribution }
-\keyword{ smooth }
diff --git a/man/dispweight.Rd b/man/dispweight.Rd
new file mode 100644
index 0000000..2804b29
--- /dev/null
+++ b/man/dispweight.Rd
@@ -0,0 +1,120 @@
+\encoding{UTF-8}
+\name{dispweight}
+\alias{dispweight}
+\alias{gdispweight}
+\alias{summary.dispweight}
+\title{Dispersion-based weighting of species counts}
+
+\description{Transform abundance data downweighting species that are 
+  overdispersed to the Poisson error.}
+
+\usage{
+dispweight(comm, groups, nsimul = 999, nullmodel = "c0_ind",
+    plimit = 0.05)
+gdispweight(formula, data, plimit = 0.05)
+\method{summary}{dispweight}(object, ...)
+}
+
+\arguments{
+  \item{comm}{Community data matrix.}
+  \item{groups}{Factor describing the group structure. If missing, all 
+     sites are regarded as belonging to one group. \code{NA} values are 
+     not allowed.}
+  \item{nsimul}{Number of simulations.}
+  \item{nullmodel}{The \code{\link{nullmodel}} used in
+    \code{\link{commsim}} within \code{groups}. The default
+    follows Clarke et al. (2006).}
+  \item{plimit}{Downweight species if their \eqn{p}-value is at or
+     below this limit.}
+  \item{formula, data}{Formula where the left-hand side is the
+    community data frame and right-hand side gives the explanatory
+    variables. The explanatory variables are found in the data frame
+    given in \code{data} or in the parent frame.}
+  \item{object}{Result object from \code{dispweight} or
+    \code{gdispweight}.}
+  \item{\dots}{Other parameters passed to functions.}
+}
+
+\details{
+
+The dispersion index (\eqn{D}) is calculated as ratio between variance
+and expected value for each species.  If the species abundances follow
+Poisson distribution, expected dispersion is \eqn{E(D) = 1}, and if
+\eqn{D > 1}, the species is overdispersed. The inverse \eqn{1/D} can
+be used to downweight species abundances.  Species are only
+downweighted when overdispersion is judged to be statistically
+significant (Clarke et al. 2006).
+
+Function \code{dispweight} implements the original procedure of Clarke
+et al. (2006). Only one factor can be used to group the sites and to
+find the species means. The significance of overdispersion is assessed
+freely distributing individuals of each species within factor
+levels. This is achieved by using \code{\link{nullmodel}}
+\code{"c0_ind"} (which accords to Clarke et al. 2006), but other
+nullmodels can be used, though they may not be meaningful (see
+\code{\link{commsim}} for alternatives). If a species is absent in
+some factor level, the whole level is ignored in calculation of
+overdispersion, and the number of degrees of freedom can vary among
+species. The reduced number of degrees of freedom is used as a divisor
+for overdispersion \eqn{D}, and such species have higher dispersion
+and hence lower weights in transformation.
+
+Function \code{gdispweight} is a generalized parametric version of
+\code{dispweight}. The function is based on \code{\link{glm}} with
+\code{\link{quasipoisson}} error \code{\link{family}}. Any
+\code{\link{glm}} model can be used, including several factors or
+continuous covariates. Function \code{gdispweight} uses the same test
+statistic as \code{dispweight} (Pearson Chi-square), but it does not
+ignore factor levels where species is absent, and the number of
+degrees of freedom is equal for all species. Therefore transformation
+weights can be higher than in \code{dispweight}. The
+\code{gdispweight} function evaluates the significance of
+overdispersion parametrically from Chi-square distribution
+(\code{\link{pchisq}}).
+
+Functions \code{dispweight} and \code{gdispweight} transform data, but
+they add information on overdispersion and weights as attributes of
+the result. The \code{summary} can be used to extract and print that
+information.  
+}
+
+\value{
+Function returns transformed data with the following new attributes:
+    \item{D}{Dispersion statistic.}
+    \item{df}{Degrees of freedom for each species.}
+    \item{p}{\eqn{p}-value of the Dispersion statistic \eqn{D}.}
+    \item{weights}{weights applied to community data.}
+    \item{nsimul}{Number of simulations used to assess the \eqn{p}-value,
+      or \code{NA} when simulations were not performed.}
+    \item{nullmodel}{The name of \code{\link{commsim}} null model, or
+      \code{NA} when simulations were not performed.}
+}
+
+\references{
+Clarke, K. R., M. G. Chapman, P. J. Somerfield, and
+H. R. Needham. 2006. Dispersion-based weighting of species counts in
+assemblage analyses. \emph{Marine Ecology Progress Series}, 320,
+11–27.
+}
+
+\author{
+  Eduard Szöcs \email{eduardszoesc at gmail.com} wrote the original
+  \code{dispweight}, Jari Oksanen significantly modified the code,
+  provided support functions and developed \code{gdispweight}.
+}
+
+
+\examples{
+data(mite, mite.env)
+## dispweight and its summary
+mite.dw <- with(mite.env, dispweight(mite, Shrub, nsimul = 99))
+summary(mite.dw)
+## generalized dispersion weighting
+mite.dw <- gdispweight(mite ~ Shrub + WatrCont, data = mite.env)
+rda(mite.dw ~ Shrub + WatrCont, data = mite.env)
+}
+
+
+\keyword{multivariate}
+\keyword{manip}
+
diff --git a/man/diversity.Rd b/man/diversity.Rd
index 324f33d..588d8cb 100644
--- a/man/diversity.Rd
+++ b/man/diversity.Rd
@@ -19,7 +19,7 @@ rarefy(x, sample, se = FALSE, MARGIN = 1)
 rrarefy(x, sample)
 drarefy(x, sample)
 rarecurve(x, step = 1, sample, xlab = "Sample Size", ylab = "Species",
-   label = TRUE, ...)
+          label = TRUE, col, lty, ...)
 fisher.alpha(x, MARGIN = 1, ...)
 specnumber(x, groups, MARGIN = 1)
 }
@@ -35,7 +35,11 @@ specnumber(x, groups, MARGIN = 1)
   \item{se}{Estimate standard errors.}
   \item{step}{Step size for sample sizes in rarefaction curves.}
   \item{xlab, ylab}{Axis labels in plots of rarefaction curves.}
-  \item{label}{Label rarefaction curves by rownames of \code{x} (logical).}
+  \item{label}{Label rarefaction curves by rownames of \code{x}
+    (logical).}
+  \item{col, lty}{plotting colour and line type, see
+    \code{\link{par}}. Can be a vector of length \code{nrow(x)}, one per
+    sample, and will be extended to such a length internally.}
   \item{groups}{A grouping factor: if given, finds the total number of
     species in each group.}
   \item{...}{Parameters passed to \code{\link{nlm}}, or to \code{\link{plot}}, 
diff --git a/man/dune.Rd b/man/dune.Rd
index 95c8baf..1f78b2c 100644
--- a/man/dune.Rd
+++ b/man/dune.Rd
@@ -10,13 +10,17 @@
 \description{
   The dune meadow vegetation data, \code{dune}, has cover class values
   of 30 species on 20 sites. The corresponding environmental data frame
-  \code{dune.env} has following entries:
-}
+  \code{dune.env} has following entries: } 
+
 \format{
-  For \code{dune}, a data frame of observations of 30 species at 20
-  sites.
+  \code{dune} is a data frame of observations of 30 species at 20
+  sites. The species names are abbreviated to 4+4 letters (see
+  \code{\link{make.cepnames}}). The following names are changed from
+  the original source (Jongman et al. 1987): \emph{Leontodon
+  autumnalis} to \emph{Scorzoneroides}, and \emph{Potentilla
+  palustris} to \emph{Comarum}.
   
-  For \code{dune.env}, a data frame of 20 observations on the following
+  \code{dune.env} is a data frame of 20 observations on the following
   5 variables:
   \describe{
     \item{A1:}{a numeric vector of thickness of soil A1 horizon.}
@@ -45,7 +49,6 @@
 }
 \examples{
 data(dune)
-
 data(dune.env)
 }
 \keyword{datasets}
diff --git a/man/dune.taxon.Rd b/man/dune.taxon.Rd
index e982b4d..0fa9826 100644
--- a/man/dune.taxon.Rd
+++ b/man/dune.taxon.Rd
@@ -1,33 +1,53 @@
+\encoding{UTF-8}
 \name{dune.taxon}
 \alias{dune.taxon}
+\alias{dune.phylodis}
 \docType{data}
-\title{Taxonomic Classification of Dune Meadow Species}
+\title{Taxonomic Classification and Phylogeny of Dune Meadow Species}
 \description{
   Classification table of the species in the \code{\link{dune}} data
   set.
 }
-\usage{data(dune.taxon)}
+\usage{
+  data(dune.taxon)
+  data(dune.phylodis)
+}
 \format{
-  A data frame with 30 species (rows) classified into five taxonomic
-  levels (columns).
+  \code{dune.taxon} is data frame with 30 species (rows) classified
+  into five taxonomic levels (columns). \code{dune.phylodis} is a
+  \code{\link{dist}} object of estimated coalescence ages extracted
+  from \url{http://datadryad.org/resource/doi:10.5061/dryad.63q27}
+  (Zanne et al. 2014) using tools in packages \pkg{ape} and
+  \pkg{phylobase}.
 }
+
 \details{
-  The classification of vascular plants is adapted from AGP (2003), and
-  that of mosses from Hill et al. (2006).
+  The classification of vascular plants is based on APG (2009), and
+  that of mosses on Hill et al. (2006).
 }
 \references{
-  AGP [Angiosperm Phylogeny Group] (2003) An update of the Angiosperm
+  APG [Angiosperm Phylogeny Group] (2009) An update of the Angiosperm
   Phylogeny Group classification for the orders and families of flowering
-  plants: AGP II. \emph{Bot. J. Linnean Soc.} \strong{141}: 399--436.
+  plants: APG III. \emph{Bot. J. Linnean Soc.} \strong{161}: 105--121.
 
   Hill, M.O et al. (2006) An annotated checklist of the mosses of Europe
   and Macaronesia. \emph{J. Bryology} \strong{28}: 198--267.
+
+  Zanne A.E., Tank D.C., Cornwell, W.K., Eastman J.M., Smith, S.A.,
+  FitzJohn, R.G., McGlinn, D.J., O’Meara, B.C., Moles, A.T., Reich,
+  P.B., Royer, D.L., Soltis, D.E., Stevens, P.F., Westoby, M., Wright,
+  I.J., Aarssen, L., Bertin, R.I., Calaminus, A., Govaerts, R.,
+  Hemmings, F., Leishman, M.R., Oleksyn, J., Soltis, P.S., Swenson,
+  N.G., Warman, L. & Beaulieu, J.M. (2014) Three keys to the radiation
+  of angiosperms into freezing environments. \emph{Nature} 506, 89--92.
+
 }
-\note{
-  The data set was made to demonstrate \code{\link{taxondive}}, and will
-  probably be removed after a better example is found.
-}
-\examples{
-data(dune.taxon)
+
+\seealso{Functions \code{\link{taxondive}}, \code{\link{treedive}},
+  and \code{\link{treedist}} use these data sets. }
+
+\examples{ 
+  data(dune.taxon) 
+  data(dune.phylodis)
 }
 \keyword{datasets}
diff --git a/man/envfit.Rd b/man/envfit.Rd
index c0768c2..07e6677 100644
--- a/man/envfit.Rd
+++ b/man/envfit.Rd
@@ -16,14 +16,14 @@
   the factors show the averages of factor levels.
 }
 \usage{
-\method{envfit}{default}(ord, env, permutations = 999, strata, choices=c(1,2), 
-   display = "sites", w  = weights(ord), na.rm = FALSE, ...)
+\method{envfit}{default}(ord, env, permutations = 999, strata = NULL, 
+   choices=c(1,2),  display = "sites", w  = weights(ord), na.rm = FALSE, ...)
 \method{envfit}{formula}(formula, data, ...)
 \method{plot}{envfit}(x, choices = c(1,2), labels, arrow.mul, at = c(0,0), 
    axis = FALSE, p.max = NULL, col = "blue", bg, add = TRUE, ...)
 \method{scores}{envfit}(x, display, choices, ...)
-vectorfit(X, P, permutations = 0, strata, w, ...)
-factorfit(X, P, permutations = 0, strata, w, ...)
+vectorfit(X, P, permutations = 0, strata = NULL, w, ...)
+factorfit(X, P, permutations = 0, strata = NULL, w, ...)
 }
 
 \arguments{
@@ -37,8 +37,11 @@ factorfit(X, P, permutations = 0, strata, w, ...)
   \item{P}{Data frame, matrix or vector of environmental
     variable(s). These must be continuous for \code{vectorfit} and
     factors or characters for \code{factorfit}. }
-  \item{permutations}{ Number of permutations for assessing significance
-    of vectors or factors. Set to \code{0} to skip permutations.}
+  \item{permutations}{a list of control values for the permutations
+    as returned by the function \code{\link[permute]{how}}, or the
+    number of permutations required, or a permutation matrix where each
+    row gives the permuted indices. Set \code{permutations = 0} to skip
+    permutations.}
   \item{formula, data}{Model  \code{\link{formula}} and data.  }
   \item{na.rm}{Remove points with missing values in ordination scores
     or environmental variables. The operation is casewise: the whole
@@ -159,6 +162,8 @@ factorfit(X, P, permutations = 0, strata, w, ...)
   \item{centroids}{Class centroids from \code{factorfit}.}
   \item{r}{Goodness of fit statistic: Squared correlation coefficient}
   \item{permutations}{Number of permutations.}
+  \item{control}{A list of control values for the permutations
+    as returned by the function \code{\link[permute]{how}}.}
   \item{pvals}{Empirical P-values for each variable.}
 
   Function \code{envfit} returns a list of class \code{envfit} with
diff --git a/man/eventstar.Rd b/man/eventstar.Rd
index c5378fd..6718748 100644
--- a/man/eventstar.Rd
+++ b/man/eventstar.Rd
@@ -1,124 +1,124 @@
-\encoding{UTF-8}
-\name{eventstar}
-\alias{eventstar}
-
-\title{
-Scale Parameter at the Minimum of the Tsallis Evenness Profile
-}
-\description{
-The function \code{eventstar} finds the minimum (\eqn{q^*}{q*}) of the 
-evenness profile based on the Tsallis entropy. This scale factor
-of the entropy represents a specific weighting of species
-relative frequencies that leads to minimum evenness of the
-community (Mendes et al. 2008).
-}
-\usage{
-eventstar(x, qmax = 5)
-}
-\arguments{
-  \item{x}{
-A community matrix or a numeric vector.
-}
-  \item{qmax}{
-Maximum scale parameter of the Tsallis entropy to be used in 
-finding the minimum of Tsallis based evenness
-in the range \code{c(0, qmax)}.
-}
-}
-\details{
-The function \code{eventstar} finds a characteristic value of the scale 
-parameter \eqn{q} of the Tsallis entropy corresponding to
-minimum of the evenness (equitability) profile based on Tsallis entropy.
-This value was proposed by Mendes et al. (2008) as \eqn{q^*}{q*}.
-
-The \eqn{q^\ast}{q*} index represents the scale parameter of
-the one parameter Tsallis diversity family that leads to
-the greatest deviation from the maximum equitability given the relative 
-abundance vector of a community.
-
-The value of \eqn{q^\ast}{q*} is found by identifying the minimum
-of the evenness profile over scaling factor \eqn{q}{q} by
-one-dimensional minimization. Because evenness profile is
-known to be a convex function, it is guaranteed that underlying
-\code{\link{optimize}} function will find a unique solution
-if it is in the range \code{c(0, qmax)}.
-
-The scale parameter value \eqn{q^\ast}{q*} is used to 
-find corresponding values of diversity (\eqn{H_{q^\ast}}{H.q*}), 
-evenness (\eqn{H_{q^\ast}(\max)}{H.q*(max)}),
-and numbers equivalent (\eqn{D_{q^\ast}}{D.q*}). For calculation
-details, see \code{\link{tsallis}} and Examples below.
-
-Mendes et al. (2008) advocated the use of \eqn{q^\ast}{q*}
-and corresponding diversity, evenness, and Hill numbers, because
-it is a unique value representing the diversity profile, and is
-is positively associated with rare species in the community,
-thus it is a potentially useful indicator of certain
-relative abundance distributions of the communities.
-}
-\value{
-A data frame with columns:
-\itemize{
-  \item{\code{qstar}}{ scale parameter value \eqn{q\ast}{q*}
-    corresponding to minimum value of Tsallis based evenness profile.}
-  \item{\code{Estar}}{ Value of evenness based on normalized Tsallis 
-    entropy at \eqn{q^\ast}{q*}.}
-  \item{\code{Hstar}}{ Value of Tsallis entropy at \eqn{q^\ast}{q*}.}
-  \item{\code{Dstar}}{ Value of Tsallis entropy at \eqn{q^\ast}{q*} 
-    converted to numbers equivalents
-    (also called as Hill numbers, effective number of species, 
-    \sQuote{true} diversity; cf. Jost 2007).}
-}
-See \code{\link{tsallis}} for calculation details.
-}
-\references{
-Mendes, R.S., Evangelista, L.R., Thomaz, S.M.,
-  Agostinho, A.A. and Gomes, L.C. (2008) A unified
-  index to measure ecological diversity and species
-  rarity. \emph{Ecography} \bold{31}, 450--456.
-
-Jost, L. (2007) Partitioning diversity into independent alpha and beta components.
-  \emph{Ecology} \bold{88}, 2427--2439.
-
-Tsallis, C. (1988) Possible generalization of Boltzmann-Gibbs statistics. 
-  \emph{J. Stat. Phis.} \bold{52}, 479--487.
-}
-\note{
-Values for \eqn{q^\ast}{q*} found by Mendes et al. (2008) ranged
-from 0.56 and 1.12 presenting low variability, so an
-interval between 0 and 5 should safely encompass
-the possibly expected \eqn{q^\ast}{q*} values in practice,
-but profiling the evenness and changing the value of
-the \code{qmax} argument is advised if output values
-near the range limits are found.
-}
-\author{
-Eduardo Ribeiro Cunha \email{edurcunha at gmail.com} and 
-Heloisa Beatriz Antoniazi Evangelista \email{helobeatriz at gmail.com}, 
-with technical input of Péter Sólymos.
-}
-\seealso{
-Tsallis entropy: \code{\link{tsallis}}
-}
-\examples{
-data(BCI)
-(x <- eventstar(BCI[1:5,]))
-## profiling
-y <- as.numeric(BCI[10,])
-(z <- eventstar(y))
-q <- seq(0, 2, 0.05)
-Eprof <- tsallis(y, scales=q, norm=TRUE)
-Hprof <- tsallis(y, scales=q)
-Dprof <- tsallis(y, scales=q, hill=TRUE)
-opar <- par(mfrow=c(3,1))
-plot(q, Eprof, type="l", main="Evenness")
-abline(v=z$qstar, h=tsallis(y, scales=z$qstar, norm=TRUE), col=2)
-plot(q, Hprof, type="l", main="Diversity")
-abline(v=z$qstar, h=tsallis(y, scales=z$qstar), col=2)
-plot(q, Dprof, type="l", main="Effective number of species")
-abline(v=z$qstar, h=tsallis(y, scales=z$qstar, hill=TRUE), col=2)
-par(opar)
-}
-\keyword{ optimize }
-\keyword{ multivariate }
-\keyword{ utilities }
+\encoding{UTF-8}
+\name{eventstar}
+\alias{eventstar}
+
+\title{
+Scale Parameter at the Minimum of the Tsallis Evenness Profile
+}
+\description{
+The function \code{eventstar} finds the minimum (\eqn{q^*}{q*}) of the 
+evenness profile based on the Tsallis entropy. This scale factor
+of the entropy represents a specific weighting of species
+relative frequencies that leads to minimum evenness of the
+community (Mendes et al. 2008).
+}
+\usage{
+eventstar(x, qmax = 5)
+}
+\arguments{
+  \item{x}{
+A community matrix or a numeric vector.
+}
+  \item{qmax}{
+Maximum scale parameter of the Tsallis entropy to be used in 
+finding the minimum of Tsallis based evenness
+in the range \code{c(0, qmax)}.
+}
+}
+\details{
+The function \code{eventstar} finds a characteristic value of the scale 
+parameter \eqn{q} of the Tsallis entropy corresponding to
+minimum of the evenness (equitability) profile based on Tsallis entropy.
+This value was proposed by Mendes et al. (2008) as \eqn{q^*}{q*}.
+
+The \eqn{q^\ast}{q*} index represents the scale parameter of
+the one parameter Tsallis diversity family that leads to
+the greatest deviation from the maximum equitability given the relative 
+abundance vector of a community.
+
+The value of \eqn{q^\ast}{q*} is found by identifying the minimum
+of the evenness profile over scaling factor \eqn{q}{q} by
+one-dimensional minimization. Because evenness profile is
+known to be a convex function, it is guaranteed that underlying
+\code{\link{optimize}} function will find a unique solution
+if it is in the range \code{c(0, qmax)}.
+
+The scale parameter value \eqn{q^\ast}{q*} is used to 
+find corresponding values of diversity (\eqn{H_{q^\ast}}{H.q*}), 
+evenness (\eqn{H_{q^\ast}(\max)}{H.q*(max)}),
+and numbers equivalent (\eqn{D_{q^\ast}}{D.q*}). For calculation
+details, see \code{\link{tsallis}} and Examples below.
+
+Mendes et al. (2008) advocated the use of \eqn{q^\ast}{q*}
+and corresponding diversity, evenness, and Hill numbers, because
+it is a unique value representing the diversity profile, and is
+is positively associated with rare species in the community,
+thus it is a potentially useful indicator of certain
+relative abundance distributions of the communities.
+}
+\value{
+A data frame with columns:
+\itemize{
+  \item{\code{qstar}}{ scale parameter value \eqn{q\ast}{q*}
+    corresponding to minimum value of Tsallis based evenness profile.}
+  \item{\code{Estar}}{ Value of evenness based on normalized Tsallis 
+    entropy at \eqn{q^\ast}{q*}.}
+  \item{\code{Hstar}}{ Value of Tsallis entropy at \eqn{q^\ast}{q*}.}
+  \item{\code{Dstar}}{ Value of Tsallis entropy at \eqn{q^\ast}{q*} 
+    converted to numbers equivalents
+    (also called as Hill numbers, effective number of species, 
+    \sQuote{true} diversity; cf. Jost 2007).}
+}
+See \code{\link{tsallis}} for calculation details.
+}
+\references{
+Mendes, R.S., Evangelista, L.R., Thomaz, S.M.,
+  Agostinho, A.A. and Gomes, L.C. (2008) A unified
+  index to measure ecological diversity and species
+  rarity. \emph{Ecography} \bold{31}, 450--456.
+
+Jost, L. (2007) Partitioning diversity into independent alpha and beta components.
+  \emph{Ecology} \bold{88}, 2427--2439.
+
+Tsallis, C. (1988) Possible generalization of Boltzmann-Gibbs statistics. 
+  \emph{J. Stat. Phis.} \bold{52}, 479--487.
+}
+\note{
+Values for \eqn{q^\ast}{q*} found by Mendes et al. (2008) ranged
+from 0.56 and 1.12 presenting low variability, so an
+interval between 0 and 5 should safely encompass
+the possibly expected \eqn{q^\ast}{q*} values in practice,
+but profiling the evenness and changing the value of
+the \code{qmax} argument is advised if output values
+near the range limits are found.
+}
+\author{
+Eduardo Ribeiro Cunha \email{edurcunha at gmail.com} and 
+Heloisa Beatriz Antoniazi Evangelista \email{helobeatriz at gmail.com}, 
+with technical input of Péter Sólymos.
+}
+\seealso{
+Tsallis entropy: \code{\link{tsallis}}
+}
+\examples{
+data(BCI)
+(x <- eventstar(BCI[1:5,]))
+## profiling
+y <- as.numeric(BCI[10,])
+(z <- eventstar(y))
+q <- seq(0, 2, 0.05)
+Eprof <- tsallis(y, scales=q, norm=TRUE)
+Hprof <- tsallis(y, scales=q)
+Dprof <- tsallis(y, scales=q, hill=TRUE)
+opar <- par(mfrow=c(3,1))
+plot(q, Eprof, type="l", main="Evenness")
+abline(v=z$qstar, h=tsallis(y, scales=z$qstar, norm=TRUE), col=2)
+plot(q, Hprof, type="l", main="Diversity")
+abline(v=z$qstar, h=tsallis(y, scales=z$qstar), col=2)
+plot(q, Dprof, type="l", main="Effective number of species")
+abline(v=z$qstar, h=tsallis(y, scales=z$qstar, hill=TRUE), col=2)
+par(opar)
+}
+\keyword{ optimize }
+\keyword{ multivariate }
+\keyword{ utilities }
diff --git a/man/goodness.cca.Rd b/man/goodness.cca.Rd
index ce275ac..398cfe0 100644
--- a/man/goodness.cca.Rd
+++ b/man/goodness.cca.Rd
@@ -32,7 +32,7 @@ vif.cca(object)
 
 \arguments{
   \item{object}{A result object from \code{\link{cca}},
-    \code{\link{rda}}, \code{\link{capscale}} or \code{\link{decorana}}. }
+    \code{\link{rda}} or \code{\link{capscale}}. }
   \item{display}{Display \code{"species"} or \code{"sites"}. }
   \item{choices}{Axes shown. Default is to show all axes of the \code{"model"}. }
   \item{model}{Show constrained (\code{"CCA"}) or unconstrained
@@ -52,7 +52,7 @@ vif.cca(object)
   Function \code{goodness} gives the diagnostic statistics for species
   or sites. The alternative statistics are the cumulative proportion of
   inertia accounted for by the axes, and the residual distance left
-  unaccounted for.  The conditional (``partialled out'') constraints are
+  unaccounted for.  The conditional (\dQuote{partialled out}) constraints are
   always regarded as explained and included in the statistics.
 
   Function \code{inertcomp} decomposes the inertia into partial,
@@ -124,7 +124,7 @@ vif.cca(object)
 }
 
 \seealso{\code{\link{cca}}, \code{\link{rda}}, \code{\link{capscale}},
-  \code{\link{decorana}}, \code{\link[car]{vif}}. }
+  \code{\link[car]{vif}}. }
 \examples{
 data(dune)
 data(dune.env)
diff --git a/man/goodness.metaMDS.Rd b/man/goodness.metaMDS.Rd
index c2100b7..79aac0f 100644
--- a/man/goodness.metaMDS.Rd
+++ b/man/goodness.metaMDS.Rd
@@ -66,13 +66,17 @@
 } 
 
 \value{ Function \code{goodness} returns a vector of values. Function
-  \code{stressplot} returns invisibly an object with itmes for
+  \code{stressplot} returns invisibly an object with items for
   original dissimilarities, ordination distances and fitted values.  }
 
 \author{Jari Oksanen. }
 
 \seealso{\code{\link{metaMDS}},  \code{\link{monoMDS}}, 
-  \code{\link[MASS]{isoMDS}}, \code{\link[MASS]{Shepard}}. }
+  \code{\link[MASS]{isoMDS}}, \code{\link[MASS]{Shepard}}. Similar
+  diagrams for eigenvector ordinations can be drawn with
+  \code{\link{stressplot.wcmdscale}}, \code{\link{stressplot.cca}},
+  \code{\link{stressplot.rda}} and \code{\link{stressplot.capscale}}.
+}
 
 \examples{
 data(varespec)
diff --git a/man/humpfit.Rd b/man/humpfit.Rd
index 9a9a75f..7c90ffe 100644
--- a/man/humpfit.Rd
+++ b/man/humpfit.Rd
@@ -144,6 +144,7 @@ humpfit(mass, spno, family = poisson, start)
 ##
 ## Data approximated from Al-Mufti et al. (1977)
 ##
+\donttest{
 mass <- c(140,230,310,310,400,510,610,670,860,900,1050,1160,1900,2480)
 spno <- c(1,  4,  3,  9, 18, 30, 20, 14,  3,  2,  3,  2,  5,  2)
 sol <- humpfit(mass, spno)
@@ -156,6 +157,7 @@ library(MASS)
 plot(profile(sol, parm=1:2))
 confint(sol, parm=c(1,2))
 }
+}
 \keyword{models }
 \keyword{regression }
 \keyword{nonlinear}
diff --git a/man/isomap.Rd b/man/isomap.Rd
index 96984e6..3bcd71c 100644
--- a/man/isomap.Rd
+++ b/man/isomap.Rd
@@ -3,7 +3,6 @@
 \alias{isomapdist}
 \alias{plot.isomap}
 \alias{summary.isomap}
-\alias{rgl.isomap}
 
 \title{ Isometric Feature Mapping Ordination }
 \description{
@@ -18,7 +17,6 @@ isomap(dist, ndim=10, ...)
 isomapdist(dist, epsilon, k, path = "shortest", fragmentedOK =FALSE, ...)
 \method{summary}{isomap}(object, axes = 4, ...)
 \method{plot}{isomap}(x, net = TRUE, n.col = "gray", type = "points", ...)
-rgl.isomap(x, web = "white", ...)
 }
 
 \arguments{
@@ -44,7 +42,6 @@ rgl.isomap(x, web = "white", ...)
     and \code{\link{ordiplot}} if \code{net = FALSE}, and pass
     extra arguments to these functions.}
 
-  \item{web}{Colour of the web in \pkg{rgl} graphics.}
   \item{\dots}{Other parameters passed to functions. }
 }
 \details{
@@ -72,12 +69,10 @@ rgl.isomap(x, web = "white", ...)
   The \code{plot} function uses internally \code{\link{ordiplot}},
   except that it adds text over net using \code{\link{ordilabel}}. The
   \code{plot} function passes extra arguments to these functions.  In
-  addition, function \code{rgl.isomap} can make dynamic 3D plots that
-  can be rotated on the screen. The functions is based on
-  \code{\link{ordirgl}}, but it adds the connecting lines. The function
-  passes extra arguments to \code{\link{scores}} or
-  \code{\link{ordirgl}} functions so that you can select axes, or define
-  colours and sizes of points.  }
+  addition, \pkg{vegan3d} package has function
+  \code{\link[vegan3d]{rgl.isomap}} to make dynamic 3D plots that can
+  be rotated on the screen.
+}
 
 \value{
   Function \code{isomapdist} returns a dissimilarity object similar to
@@ -131,10 +126,6 @@ lines(tr, pl, col="red")
 pl <- plot(isomap(dis, epsilon=0.45), main="isomap epsilon=0.45")
 lines(tr, pl, col="red")
 par(op)
-## The following command requires user interaction
-\dontrun{
-rgl.isomap(ord, size=4, color="hotpink")
-}
 }
 \keyword{ multivariate}
 
diff --git a/man/mantel.Rd b/man/mantel.Rd
index 5357df9..c2eebb5 100644
--- a/man/mantel.Rd
+++ b/man/mantel.Rd
@@ -13,17 +13,20 @@
 
 }
 \usage{
-mantel(xdis, ydis, method="pearson", permutations=999, strata,
-    na.rm = FALSE)
+mantel(xdis, ydis, method="pearson", permutations=999, strata = NULL,
+    na.rm = FALSE, parallel = getOption("mc.cores"))
 mantel.partial(xdis, ydis, zdis, method = "pearson", permutations = 999, 
-    strata, na.rm = FALSE)
+    strata = NULL, na.rm = FALSE, parallel = getOption("mc.cores"))
 }
 
 \arguments{
   \item{xdis, ydis, zdis}{ Dissimilarity matrices or a \code{dist} objects. }
   \item{method}{ Correlation method, as accepted by \code{\link{cor}}:
     \code{"pearson"}, \code{"spearman"} or \code{"kendall"}. }
-  \item{permutations}{Number of permutations in assessing significance. }
+  \item{permutations}{a list of control values for the permutations
+    as returned by the function \code{\link[permute]{how}}, or the
+    number of permutations required, or a permutation matrix where each
+    row gives the permuted indices.}
   \item{strata}{An integer vector or factor specifying the strata for
     permutation. If supplied, observations are permuted only within the
     specified strata.}
@@ -31,6 +34,11 @@ mantel.partial(xdis, ydis, zdis, method = "pearson", permutations = 999,
     correlation. Use this option with care: Permutation tests can
     be biased, in particular if two matrices had missing values in
     matching positions.}
+  \item{parallel}{Number of parallel processes or a predefined socket
+    cluster.  With \code{parallel = 1} uses ordinary, non-parallel
+    processing. The parallel processing is done with \pkg{parallel}
+    package.}
+
 }
 \details{
   Mantel statistic is simply a correlation between entries of two
@@ -62,9 +70,11 @@ mantel.partial(xdis, ydis, zdis, method = "pearson", permutations = 999,
   \item{statistic}{The Mantel statistic.}
   \item{signif}{Empirical significance level from permutations.}
   \item{perm}{A vector of permuted values. The distribution of
-    permuted values can be inspected with \code{\link{density.mantel}} 
+    permuted values can be inspected with \code{\link{permustats}} 
     function.}
   \item{permutations}{Number of permutations.}
+  \item{control}{A list of control values for the permutations
+    as returned by the function \code{\link[permute]{how}}.}
 }
 \references{ The test is due to Mantel, of course, but the
   current implementation is based on Legendre and Legendre.
diff --git a/man/mantel.correlog.Rd b/man/mantel.correlog.Rd
index a5141c9..556dacc 100644
--- a/man/mantel.correlog.Rd
+++ b/man/mantel.correlog.Rd
@@ -1,172 +1,172 @@
-\encoding{UTF-8}
-\name{mantel.correlog}
-\alias{mantel.correlog}
-\alias{plot.mantel.correlog}
-\title{ Mantel Correlogram }
-
-\description{
-  Function \code{mantel.correlog} computes a multivariate
-  Mantel correlogram. Proposed by Sokal (1986) and Oden and Sokal
-  (1986), the method is also described in Legendre and Legendre (2012,
-  pp. 819--821).
-}
-
-\usage{
-mantel.correlog(D.eco, D.geo=NULL, XY=NULL, n.class=0, break.pts=NULL, 
-cutoff=TRUE, r.type="pearson", nperm=999, mult="holm", progressive=TRUE)
-\method{plot}{mantel.correlog}(x, alpha=0.05, ...)
-}
-
-\arguments{
-  \item{D.eco}{ An ecological distance matrix, with class
-  either \code{dist} or \code{matrix}. }
-  
-  \item{D.geo}{ A geographic distance matrix, with class either
-  \code{dist} or \code{matrix}. Provide either \code{D.geo} or
-  \code{XY}. Default: \code{D.geo=NULL}. }
-
-  \item{XY}{ A file of Cartesian geographic coordinates of the
-  points. Default: \code{XY=NULL}. }
-
-  \item{n.class}{ Number of classes. If \code{n.class=0}, the Sturges
-  equation will be used unless break points are provided. }
-
-  \item{break.pts}{ Vector containing the break points of the distance
-  distribution. Provide (n.class+1) breakpoints, that is, a list with
-  a beginning and an ending point. Default: \code{break.pts=NULL}. }
-
-  \item{cutoff}{ For the second half of the distance classes,
-  \code{cutoff = TRUE} limits the correlogram to the distance classes
-  that include all points. If \code{cutoff = FALSE}, the correlogram
-  includes all distance classes. }
-
-  \item{r.type}{ Type of correlation in calculation of the Mantel
-  statistic. Default: \code{r.type="pearson"}.  Other choices are
-  \code{r.type="spearman"} and \code{r.type="kendall"}, as in functions
-  \code{\link{cor}} and \code{\link{mantel}}. }
-
-  \item{nperm}{ Number of permutations for the tests of
-  significance. Default: \code{nperm=999}. For large data files,
-  permutation tests are rather slow. }
-
-  \item{mult}{ Correct P-values for multiple testing. The correction
-  methods are \code{"holm"} (default), \code{"hochberg"},
-  \code{"sidak"}, and other methods available in the
-  \code{\link{p.adjust}} function: \code{"bonferroni"} (best known, but
-  not recommended because it is overly conservative), \code{"hommel"},
-  \code{"BH"}, \code{"BY"}, \code{"fdr"}, and \code{"none"}. }
-
-  \item{progressive}{ Default: \code{progressive=TRUE} for progressive
-  correction of multiple-testing, as described in Legendre and Legendre
-  (1998, p. 721). Test of the first distance class: no correction;
-  second distance class: correct for 2 simultaneous tests; distance
-  class k: correct for k simultaneous tests. \code{progressive=FALSE}:
-  correct all tests for \code{n.class} simultaneous tests. }
-
-  \item{x}{ Output of \code{mantel.correlog}. }
-
-  \item{alpha}{ Significance level for the points drawn with black
-  symbols in the correlogram. Default: \code{alpha=0.05}. }
-
-  \item{...}{ Other parameters passed from other functions. }
-}
-
-\details{ A correlogram is a graph in which spatial correlation values
-  are plotted, on the ordinate, as a function of the geographic distance
-  classes among the study sites along the abscissa. In a Mantel
-  correlogram, a Mantel correlation (Mantel 1967) is computed between a
-  multivariate (e.g. multi-species) distance matrix of the user's choice
-  and a design matrix representing each of the geographic distance
-  classes in turn. The Mantel statistic is tested through a
-  permutational Mantel test performed by \code{vegan}'s
-  \code{\link{mantel}} function.
-
-  When a correction for multiple testing is applied, more permutations
-  are necessary than in the no-correction case, to obtain significant
-  p-values in the higher correlogram classes.
-
-  The \code{print.mantel.correlog} function prints out the
-  correlogram. See examples.  }
-
-\value{ 
-
-  \item{mantel.res }{A table with the distance classes as rows and the
-  class indices, number of distances per class, Mantel statistics
-  (computed using Pearson's r, Spearman's r, or Kendall's tau), and
-  p-values as columns. A positive Mantel statistic indicates positive
-  spatial correlation. An additional column with p-values corrected for
-  multiple testing is added unless \code{mult="none"}. }
-
-  \item{n.class }{The n umber of distance classes. }
-  
-  \item{break.pts }{The break points provided by the user or computed by
-    the program. }
-
-  \item{mult }{The name of the correction for multiple testing. No
-    correction: \code{mult="none"}. }  
-
-  \item{progressive }{A logical (\code{TRUE}, \code{FALSE}) value
-  indicating whether or not a progressive correction for multiple
-  testing was requested. } 
-
-  \item{n.tests }{The number of distance classes for which Mantel
-  tests have been computed and tested for significance. }
-
-  \item{call }{The function call. }  
-}
-
-\author{ Pierre Legendre, Université de Montréal }
-
-\references{
-
-  Legendre, P. and L. Legendre. 2012. Numerical ecology, 3rd English
-  edition. Elsevier Science BV, Amsterdam.
-
-  Mantel, N. 1967. The detection of disease clustering and a generalized
-  regression approach. Cancer Res. 27: 209-220.
-
-  Oden, N. L. and R. R. Sokal. 1986. Directional autocorrelation: an
-  extension of spatial correlograms to two dimensions. Syst. Zool. 35:
-  608-617.
-
-  Sokal, R. R. 1986. Spatial data analysis and historical
-  processes. 29-43 in: E. Diday et al. [eds.] Data analysis and
-  informatics, IV. North-Holland, Amsterdam.
-  
-  Sturges, H. A. 1926. The choice of a class interval. Journal of the 
-  American Statistical Association 21: 65–66.  }
-
-\examples{   
-# Mite data available in "vegan"
-data(mite)        
-data(mite.xy)  
-mite.hel <- decostand(mite, "hellinger")
-
-# Detrend the species data by regression on the site coordinates
-mite.hel.resid <- resid(lm(as.matrix(mite.hel) ~ ., data=mite.xy))
-
-# Compute the detrended species distance matrix
-mite.hel.D <- dist(mite.hel.resid)
-
-# Compute Mantel correlogram with cutoff, Pearson statistic
-mite.correlog <- mantel.correlog(mite.hel.D, XY=mite.xy, nperm=49)
-summary(mite.correlog)
-mite.correlog   
-# or: print(mite.correlog)
-# or: print.mantel.correlog(mite.correlog)
-plot(mite.correlog)
-
-# Compute Mantel correlogram without cutoff, Spearman statistic
-mite.correlog2 <- mantel.correlog(mite.hel.D, XY=mite.xy, cutoff=FALSE, 
-   r.type="spearman", nperm=49)
-summary(mite.correlog2)
-mite.correlog2
-plot(mite.correlog2)
-
-# NOTE: 'nperm' argument usually needs to be larger than 49.
-# It was set to this low value for demonstration purposes.
-
-}
-
-\keyword{ multivariate }
-
+\encoding{UTF-8}
+\name{mantel.correlog}
+\alias{mantel.correlog}
+\alias{plot.mantel.correlog}
+\title{ Mantel Correlogram }
+
+\description{
+  Function \code{mantel.correlog} computes a multivariate
+  Mantel correlogram. Proposed by Sokal (1986) and Oden and Sokal
+  (1986), the method is also described in Legendre and Legendre (2012,
+  pp. 819--821).
+}
+
+\usage{
+mantel.correlog(D.eco, D.geo=NULL, XY=NULL, n.class=0, break.pts=NULL, 
+cutoff=TRUE, r.type="pearson", nperm=999, mult="holm", progressive=TRUE)
+\method{plot}{mantel.correlog}(x, alpha=0.05, ...)
+}
+
+\arguments{
+  \item{D.eco}{ An ecological distance matrix, with class
+  either \code{dist} or \code{matrix}. }
+  
+  \item{D.geo}{ A geographic distance matrix, with class either
+  \code{dist} or \code{matrix}. Provide either \code{D.geo} or
+  \code{XY}. Default: \code{D.geo=NULL}. }
+
+  \item{XY}{ A file of Cartesian geographic coordinates of the
+  points. Default: \code{XY=NULL}. }
+
+  \item{n.class}{ Number of classes. If \code{n.class=0}, the Sturges
+  equation will be used unless break points are provided. }
+
+  \item{break.pts}{ Vector containing the break points of the distance
+  distribution. Provide (n.class+1) breakpoints, that is, a list with
+  a beginning and an ending point. Default: \code{break.pts=NULL}. }
+
+  \item{cutoff}{ For the second half of the distance classes,
+  \code{cutoff = TRUE} limits the correlogram to the distance classes
+  that include all points. If \code{cutoff = FALSE}, the correlogram
+  includes all distance classes. }
+
+  \item{r.type}{ Type of correlation in calculation of the Mantel
+  statistic. Default: \code{r.type="pearson"}.  Other choices are
+  \code{r.type="spearman"} and \code{r.type="kendall"}, as in functions
+  \code{\link{cor}} and \code{\link{mantel}}. }
+
+  \item{nperm}{ Number of permutations for the tests of
+  significance. Default: \code{nperm=999}. For large data files,
+  permutation tests are rather slow. }
+
+  \item{mult}{ Correct P-values for multiple testing. The correction
+  methods are \code{"holm"} (default), \code{"hochberg"},
+  \code{"sidak"}, and other methods available in the
+  \code{\link{p.adjust}} function: \code{"bonferroni"} (best known, but
+  not recommended because it is overly conservative), \code{"hommel"},
+  \code{"BH"}, \code{"BY"}, \code{"fdr"}, and \code{"none"}. }
+
+  \item{progressive}{ Default: \code{progressive=TRUE} for progressive
+  correction of multiple-testing, as described in Legendre and Legendre
+  (1998, p. 721). Test of the first distance class: no correction;
+  second distance class: correct for 2 simultaneous tests; distance
+  class k: correct for k simultaneous tests. \code{progressive=FALSE}:
+  correct all tests for \code{n.class} simultaneous tests. }
+
+  \item{x}{ Output of \code{mantel.correlog}. }
+
+  \item{alpha}{ Significance level for the points drawn with black
+  symbols in the correlogram. Default: \code{alpha=0.05}. }
+
+  \item{...}{ Other parameters passed from other functions. }
+}
+
+\details{ A correlogram is a graph in which spatial correlation values
+  are plotted, on the ordinate, as a function of the geographic distance
+  classes among the study sites along the abscissa. In a Mantel
+  correlogram, a Mantel correlation (Mantel 1967) is computed between a
+  multivariate (e.g. multi-species) distance matrix of the user's choice
+  and a design matrix representing each of the geographic distance
+  classes in turn. The Mantel statistic is tested through a
+  permutational Mantel test performed by \code{vegan}'s
+  \code{\link{mantel}} function.
+
+  When a correction for multiple testing is applied, more permutations
+  are necessary than in the no-correction case, to obtain significant
+  p-values in the higher correlogram classes.
+
+  The \code{print.mantel.correlog} function prints out the
+  correlogram. See examples.  }
+
+\value{ 
+
+  \item{mantel.res }{A table with the distance classes as rows and the
+  class indices, number of distances per class, Mantel statistics
+  (computed using Pearson's r, Spearman's r, or Kendall's tau), and
+  p-values as columns. A positive Mantel statistic indicates positive
+  spatial correlation. An additional column with p-values corrected for
+  multiple testing is added unless \code{mult="none"}. }
+
+  \item{n.class }{The n umber of distance classes. }
+  
+  \item{break.pts }{The break points provided by the user or computed by
+    the program. }
+
+  \item{mult }{The name of the correction for multiple testing. No
+    correction: \code{mult="none"}. }  
+
+  \item{progressive }{A logical (\code{TRUE}, \code{FALSE}) value
+  indicating whether or not a progressive correction for multiple
+  testing was requested. } 
+
+  \item{n.tests }{The number of distance classes for which Mantel
+  tests have been computed and tested for significance. }
+
+  \item{call }{The function call. }  
+}
+
+\author{ Pierre Legendre, Université de Montréal }
+
+\references{
+
+  Legendre, P. and L. Legendre. 2012. Numerical ecology, 3rd English
+  edition. Elsevier Science BV, Amsterdam.
+
+  Mantel, N. 1967. The detection of disease clustering and a generalized
+  regression approach. Cancer Res. 27: 209-220.
+
+  Oden, N. L. and R. R. Sokal. 1986. Directional autocorrelation: an
+  extension of spatial correlograms to two dimensions. Syst. Zool. 35:
+  608-617.
+
+  Sokal, R. R. 1986. Spatial data analysis and historical
+  processes. 29-43 in: E. Diday et al. [eds.] Data analysis and
+  informatics, IV. North-Holland, Amsterdam.
+  
+  Sturges, H. A. 1926. The choice of a class interval. Journal of the 
+  American Statistical Association 21: 65–66.  }
+
+\examples{   
+# Mite data available in "vegan"
+data(mite)        
+data(mite.xy)  
+mite.hel <- decostand(mite, "hellinger")
+
+# Detrend the species data by regression on the site coordinates
+mite.hel.resid <- resid(lm(as.matrix(mite.hel) ~ ., data=mite.xy))
+
+# Compute the detrended species distance matrix
+mite.hel.D <- dist(mite.hel.resid)
+
+# Compute Mantel correlogram with cutoff, Pearson statistic
+mite.correlog <- mantel.correlog(mite.hel.D, XY=mite.xy, nperm=49)
+summary(mite.correlog)
+mite.correlog   
+# or: print(mite.correlog)
+# or: print.mantel.correlog(mite.correlog)
+plot(mite.correlog)
+
+# Compute Mantel correlogram without cutoff, Spearman statistic
+mite.correlog2 <- mantel.correlog(mite.hel.D, XY=mite.xy, cutoff=FALSE, 
+   r.type="spearman", nperm=49)
+summary(mite.correlog2)
+mite.correlog2
+plot(mite.correlog2)
+
+# NOTE: 'nperm' argument usually needs to be larger than 49.
+# It was set to this low value for demonstration purposes.
+
+}
+
+\keyword{ multivariate }
+
diff --git a/man/metaMDS.Rd b/man/metaMDS.Rd
index 59cc266..98e66d1 100644
--- a/man/metaMDS.Rd
+++ b/man/metaMDS.Rd
@@ -41,7 +41,8 @@ metaMDSdist(comm, distance = "bray", autotransform = TRUE,
     noshare = TRUE, trace = 1, commname, zerodist = "ignore", 
     distfun = vegdist, ...)
 metaMDSiter(dist, k = 2, trymax = 20, trace = 1, plot = FALSE, 
-    previous.best, engine = "monoMDS", maxit = 200, ...)   
+    previous.best, engine = "monoMDS", maxit = 200,
+    parallel = getOption("mc.cores"), ...)   
 initMDS(x, k=2)
 postMDS(X, dist, pc=TRUE, center=TRUE, halfchange, threshold=0.8,
     nthreshold=10, plot=FALSE, ...)
@@ -124,6 +125,14 @@ metaMDSredist(object, ...)
    passed to the \code{engine} function \code{\link{monoMDS}} or
    \code{\link[MASS]{isoMDS}}.}
 
+ \item{parallel}{Number of parallel processes or a predefined socket
+   cluster.  If you use pre-defined socket clusters (say,
+   \code{clus}), you must issue \code{clusterEvalQ(clus,
+   library(vegan))} to make available internal \pkg{vegan}
+   functions. With \code{parallel = 1} uses ordinary, non-parallel
+   processing. The parallel processing is done with \pkg{parallel}
+   package.}
+
  \item{dist}{Dissimilarity matrix used in multidimensional scaling. }
   \item{pc}{Rotate to principal components. }
   \item{center}{Centre the configuration. }
diff --git a/man/monoMDS.Rd b/man/monoMDS.Rd
index ffa4c62..dd6a953 100644
--- a/man/monoMDS.Rd
+++ b/man/monoMDS.Rd
@@ -56,8 +56,8 @@ monoMDS(dist, y, k = 2, model = c("global", "local", "linear", "hybrid"),
 
   \item{smin, sfgrmin, sratmax}{Convergence criteria: iterations stop
     when stress drops below \code{smin}, scale factor of the gradient
-    drops below \code{sfgrmin}, or stress ratio goes over
-    \code{sratmax} (but is still \eqn{< 1}).}
+    drops below \code{sfgrmin}, or stress ratio between two iterations
+    goes over \code{sratmax} (but is still \eqn{< 1}).}
 
   \item{x}{A \code{monoMDS} result.}
 
diff --git a/man/mrpp.Rd b/man/mrpp.Rd
index 027b2bc..11808c6 100644
--- a/man/mrpp.Rd
+++ b/man/mrpp.Rd
@@ -13,7 +13,7 @@ and between block dissimilarities.}
 
 \usage{
 mrpp(dat, grouping, permutations = 999, distance = "euclidean",
-     weight.type = 1, strata)
+     weight.type = 1, strata = NULL, parallel = getOption("mc.cores"))
 meandist(dist, grouping, ...)
 \method{summary}{meandist}(object, ...)
 \method{plot}{meandist}(x, kind = c("dendrogram", "histogram"),  cluster = "average", 
@@ -25,8 +25,11 @@ meandist(dist, grouping, ...)
     columns are response variable(s), or a dissimilarity object or a
     symmetric square matrix of dissimilarities.} 
   \item{grouping}{ Factor or numeric index for grouping observations.}
-  \item{permutations}{Number of permutations to assess the significance
-    of the MRPP statistic, \eqn{delta}.} 
+  \item{permutations}{a list of control values for the permutations
+    as returned by the function \code{\link[permute]{how}}, or the
+    number of permutations required, or a permutation matrix where each
+    row gives the permuted indices. These are used to assess
+    the significance of the MRPP statistic, \eqn{delta}.} 
   \item{distance}{Choice of distance metric that measures the
     dissimilarity between two observations . See \code{\link{vegdist}} for
     options.  This will be used if \code{dat} was not a dissimilarity
@@ -35,6 +38,10 @@ meandist(dist, grouping, ...)
   \item{strata}{An integer vector or factor specifying the strata for
     permutation. If supplied, observations are permuted only within the
     specified strata.}
+  \item{parallel}{Number of parallel processes or a predefined socket
+    cluster.  With \code{parallel = 1} uses ordinary, non-parallel
+    processing. The parallel processing is done with \pkg{parallel}
+    package.}
   \item{dist}{A \code{\link{dist}} object of dissimilarities, such as
     produced by functions \code{\link{dist}}, \code{\link{vegdist}} or
     \code{\link{designdist}}.}.
@@ -139,8 +146,10 @@ The function returns a list of class mrpp with following items:
   \item{weight.type}{The choice of group weights used.}
   \item{boot.deltas}{The vector of "permuted deltas," the deltas
     calculated from each of the permuted datasets. The distribution of
-    this item can be inspected with \code{\link{density.mrpp}} function.}
+    this item can be inspected with \code{\link{permustats}} function.}
   \item{permutations}{The number of permutations used.}
+  \item{control}{A list of control values for the permutations
+    as returned by the function \code{\link[permute]{how}}.}
 }
 \references{
   B. McCune and J. B. Grace. 2002. \emph{Analysis of Ecological
@@ -181,7 +190,7 @@ alternative.
 \examples{
 data(dune)
 data(dune.env)
-dune.mrpp <- mrpp(dune, dune.env$Management)
+dune.mrpp <- with(dune.env, mrpp(dune, Management))
 dune.mrpp
 
 # Save and change plotting parameters
@@ -189,7 +198,7 @@ def.par <- par(no.readonly = TRUE)
 layout(matrix(1:2,nr=1))
 
 plot(dune.ord <- metaMDS(dune), type="text", display="sites" )
-ordihull(dune.ord, dune.env$Management)
+with(dune.env, ordihull(dune.ord, Management))
 
 with(dune.mrpp, {
   fig.dist <- hist(boot.deltas, xlim=range(c(delta,boot.deltas)), 
diff --git a/man/mso.Rd b/man/mso.Rd
index f1e0650..310db85 100644
--- a/man/mso.Rd
+++ b/man/mso.Rd
@@ -13,7 +13,7 @@ of cca or rda results}
   partitioning of the \code{"cca"} object.  }
 
 \usage{
-mso(object.cca, object.xy, grain = 1, round.up = FALSE, permutations = FALSE)
+mso(object.cca, object.xy, grain = 1, round.up = FALSE, permutations = 0)
 msoplot(x, alpha = 0.05, explained = FALSE, ylim = NULL, legend = "topleft", ...)
 }
 \arguments{
@@ -27,9 +27,10 @@ msoplot(x, alpha = 0.05, explained = FALSE, ylim = NULL, legend = "topleft", ...
   \item{round.up}{ Determines the choice of breaks. If false, distances
     are rounded to the nearest multiple of grain. If true, distances are
     rounded to the upper multiple of grain.} 
-  \item{permutations}{ If false, suppresses the permutation test. If an
-    integer, determines the number of permutations for the Mantel test
-    of spatial independence of residual inertia.}
+  \item{permutations}{a list of control values for the permutations
+    as returned by the function \code{\link[permute]{how}}, or the
+    number of permutations required, or a permutation matrix where each
+    row gives the permuted indices.}
   \item{x}{A result object of \code{mso}.}
   \item{alpha}{ Significance level for the two-sided permutation test of
     the Mantel statistic for spatial independence of residual inertia
@@ -125,14 +126,14 @@ data(mite.env)
 data(mite.xy)
 
 mite.cca <- cca(log(mite + 1))
-mite.cca <- mso(mite.cca, mite.xy, grain =  1, permutations = 100)
+mite.cca <- mso(mite.cca, mite.xy, grain =  1, permutations = 99)
 msoplot(mite.cca)
 mite.cca
 
 ## Constrained ordination with test for residual autocorrelation
 ## and scale-invariance of species-environment relationships
 mite.cca <- cca(log(mite + 1) ~ SubsDens + WatrCont + Substrate + Shrub + Topo, mite.env)
-mite.cca <- mso(mite.cca, mite.xy, permutations = 100)
+mite.cca <- mso(mite.cca, mite.xy, permutations = 99)
 msoplot(mite.cca)
 mite.cca
 }
diff --git a/man/multipart.Rd b/man/multipart.Rd
index 1ea62c8..5b8cd35 100644
--- a/man/multipart.Rd
+++ b/man/multipart.Rd
@@ -105,11 +105,11 @@ cutter <- function (x, cut = seq(0, 10, by = 2.5)) {
         out[which(x > cut[i] & x <= cut[(i + 1)])] <- i
     return(out)}
 ## The hierarchy of sample aggregation
-levsm <- data.frame(
+levsm <- with(mite.xy, data.frame(
     l1=1:nrow(mite),
-    l2=cutter(mite.xy$y, cut = seq(0, 10, by = 2.5)),
-    l3=cutter(mite.xy$y, cut = seq(0, 10, by = 5)),
-    l4=cutter(mite.xy$y, cut = seq(0, 10, by = 10)))
+    l2=cutter(y, cut = seq(0, 10, by = 2.5)),
+    l3=cutter(y, cut = seq(0, 10, by = 5)),
+    l4=cutter(y, cut = seq(0, 10, by = 10))))
 ## Multiplicative diversity partitioning
 multipart(mite, levsm, index="renyi", scales=1, nsimul=19)
 multipart(mite ~ ., levsm, index="renyi", scales=1, nsimul=19)
diff --git a/man/nestedtemp.Rd b/man/nestedtemp.Rd
index 0f2c2b4..0ae7e48 100644
--- a/man/nestedtemp.Rd
+++ b/man/nestedtemp.Rd
@@ -24,7 +24,7 @@ nestedchecker(comm)
 nestedn0(comm)
 nesteddisc(comm, niter = 200)
 nestedtemp(comm, ...)
-nestednodf(comm, order = TRUE, weighted = FALSE)
+nestednodf(comm, order = TRUE, weighted = FALSE, wbinary = FALSE)
 nestedbetasor(comm)
 nestedbetajac(comm)
 \method{plot}{nestedtemp}(x, kind = c("temperature", "incidence"),
@@ -43,6 +43,8 @@ nestedbetajac(comm)
     returned accordingly.}
   \item{order}{Order rows and columns by frequencies.}
   \item{weighted}{Use species abundances as weights of interactions.}
+  \item{wbinary}{Modify original method so that binary data give the same 
+    result in weighted and and unweighted analysis. }
   \item{\dots}{Other arguments to functions.}
 }
 
@@ -112,7 +114,14 @@ nestedbetajac(comm)
   al. 2008). With \code{weighted = TRUE}, the function finds the
   weighted version of the index (Almeida-Neto & Ulrich,
   2011). However, this requires quantitative null models for adequate
-  testing.
+  testing. Almeida-Neto & Ulrich (2011) say that you have positive
+  nestedness if values in the first row/column are higher than in the
+  second.  With this condition, weighted analysis of binary data will
+  always give zero nestedness. With argument \code{wbinary = TRUE},
+  equality of rows/colums also indicates nestedness, and binary data
+  will give identical results in weighted and unweighted analysis.
+  However, this can also influence the results of weighted analysis so
+  that the results may differ from Almeida-Neto & Ulrich (2011).
 
   Functions \code{nestedbetasor} and \code{nestedbetajac} find
   multiple-site dissimilarities and decompose these into components of
@@ -126,11 +135,11 @@ nestedbetajac(comm)
   \enc{Sørensen}{Sorensen} or Jaccard dissimilarity. The last one is
   the total beta diversity (Baselga 2010). The functions will treat
   data as presence/absence (binary) and they can be used with binary
-  null models (see \code{\link{commsimulator}}). The overall
-  dissimilarity is constant in all null models that fix species
-  (column) frequencies (\code{"c0"}), and all components are constant
-  if row columns are also fixed (e.g., model \code{"quasiswap"}), and
-  the functions are not meaningful with these null models.
+  \code{\link{nullmodel}}). The overall dissimilarity is constant in
+  all \code{\link{nullmodel}}s that fix species (column) frequencies
+  (\code{"c0"}), and all components are constant if row columns are
+  also fixed (e.g., model \code{"quasiswap"}), and the functions are
+  not meaningful with these null models.
 
 }
 
diff --git a/man/nullmodel.Rd b/man/nullmodel.Rd
new file mode 100644
index 0000000..964c033
--- /dev/null
+++ b/man/nullmodel.Rd
@@ -0,0 +1,168 @@
+\name{nullmodel}
+\alias{nullmodel}
+\alias{print.nullmodel}
+\alias{simulate.nullmodel}
+\alias{update.nullmodel}
+\alias{str.nullmodel}
+\alias{print.simmat}
+\title{
+Null Model and Simulation
+}
+\description{
+The \code{nullmodel} function creates an object,
+which can serve as a basis for Null Model simulation
+via the \code{\link{simulate}} method.
+The \code{\link{update}} method updates the nullmodel
+object without sampling (effective for sequential algorithms).
+}
+\usage{
+nullmodel(x, method)
+\method{print}{nullmodel}(x, ...)
+\method{simulate}{nullmodel}(object, nsim = 1, 
+seed = NULL, burnin = 0, thin = 1, ...)
+\method{update}{nullmodel}(object, nsim = 1, 
+seed = NULL, ...)
+\method{print}{simmat}(x, ...)
+}
+\arguments{
+  \item{x}{
+A community matrix.
+For the \code{print} method, it is an object to be printed.
+}
+  \item{method}{
+Character, specifying one of the null model algorithms
+listed on the help page of \code{\link{commsim}}.
+It can be a user supplied object of class \code{commsim}.
+}
+  \item{object}{
+An object of class \code{nullmodel} returned by
+the function \code{nullmodel}.
+}
+  \item{nsim}{
+Positive integer, the number of simulated matrices to return.
+For the \code{update} method, it is the number of
+burnin steps made for sequential algorithms
+to update the status of the input model \code{object}.
+}
+  \item{seed}{
+An object specifying if and how the random number 
+generator should be initialized ("seeded").
+Either \code{NULL} or an integer that will be 
+used in a call to \code{\link{set.seed}} before 
+simulating the matrices. 
+If set, the value is saved as the 
+\code{"seed"} attribute of the returned value. 
+The default, \code{NULL} will not change the 
+random generator state, and return 
+\code{\link{.Random.seed}} as the \code{"seed"}
+ attribute, see Value. 
+}
+  \item{burnin}{
+Nonnegative integer, specifying the number of steps
+discarded before starting simulation.
+Active only for sequential null model algorithms.
+Ignored for non-sequential null model algorithms.
+}
+  \item{thin}{
+Positive integer, number of simulation steps
+made between each returned matrix.
+Active only for sequential null model algorithms.
+Ignored for non-sequential null model algorithms.
+}
+  \item{\dots}{
+Additional arguments supplied to algorithms.
+}
+}
+\details{
+The purpose of the \code{nullmodel} function is to
+create an object, where all necessary statistics of the
+input matrix are calculated only once.
+This information is reused, but not recalculated
+in each step of the simulation process done by
+the \code{simulate} method.
+
+The \code{simulate} method carries out the simulation,
+the simulated matrices are stored in an array.
+For sequential algorithms, the method updates the state
+of the input \code{nullmodel} object.
+Therefore, it is possible to do diagnostic
+tests on the returned \code{simmat} object,
+and make further simulations, or use
+increased thinning value if desired.
+
+The \code{update} method makes burnin steps in case
+of sequential algorithms to update the status of the 
+input model without any attempt to return matrices.
+For non-sequential algorithms the method does nothing.
+
+\code{update} is the preferred way of making burnin iterations
+without sampling. Alternatively, burnin can be done
+via the \code{simulate} method. For convergence
+diagnostics, it is recommended to use the
+\code{simulate} method without burnin.
+The input nullmodel object is updated, so further
+samples can be simulated if desired without having
+to start the process all over again. See Examples.
+}
+\value{
+The function \code{nullmodel} returns an object of class \code{nullmodel}.
+It is a set of objects sharing the same environment:
+\itemize{
+  \item{\code{data}: }{original matrix in integer mode.}
+  \item{\code{nrow}: }{number of rows.}
+  \item{\code{ncol}: }{number of columns.}
+  \item{\code{rowSums}: }{row sums.}
+  \item{\code{colSums}: }{column sums.}
+  \item{\code{rowFreq}: }{row frequencies (number of nonzero cells).}
+  \item{\code{colFreq}: }{column frequencies (number of nonzero cells).}
+  \item{\code{totalSum}: }{total sum.}
+  \item{\code{fill}: }{number of nonzero cells in the matrix.}
+  \item{\code{commsim}: }{the \code{commsim} object as a result
+    of the \code{method} argument.}
+  \item{\code{state}: }{current state of the permutations, 
+    a matrix similar to the original.
+    It is \code{NULL} for non-sequential algorithms.}
+  \item{\code{iter}: }{current number of iterations 
+  for sequential algorithms.
+    It is \code{NULL} for non-sequential algorithms.}
+}
+
+The \code{simulate} method returns an object of class \code{simmat}.
+It is an array of simulated matrices (third dimension
+corresponding to \code{nsim} argument).
+
+The \code{update} method returns the current state (last updated matrix)
+invisibly, and update the input object for sequential algorithms.
+For non sequential algorithms, it returns \code{NULL}.
+}
+\author{
+Jari Oksanen and Peter Solymos
+}
+\seealso{
+\code{\link{commsim}}, \code{\link{make.commsim}}, 
+\code{\link{permatfull}}, \code{\link{permatswap}}
+}
+\examples{
+x <- matrix(rbinom(12*10, 1, 0.5)*rpois(12*10, 3), 12, 10)
+
+## non-sequential nullmodel
+(nm <- nullmodel(x, "r00"))
+(sm <- simulate(nm, nsim=10))
+
+## sequential nullmodel
+(nm <- nullmodel(x, "swap"))
+(sm1 <- simulate(nm, nsim=10, thin=5))
+(sm2 <- simulate(nm, nsim=10, thin=5))
+
+## sequential nullmodel with burnin and extra updating
+(nm <- nullmodel(x, "swap"))
+(sm1 <- simulate(nm, burnin=10, nsim=10, thin=5))
+(sm2 <- simulate(nm, nsim=10, thin=5))
+
+## sequential nullmodel with separate initial burnin
+(nm <- nullmodel(x, "swap"))
+nm <- update(nm, nsim=10)
+(sm2 <- simulate(nm, nsim=10, thin=5))
+}
+\keyword{ multivariate }
+\keyword{ datagen }
diff --git a/man/oecosimu.Rd b/man/oecosimu.Rd
index 238a359..6d3f640 100644
--- a/man/oecosimu.Rd
+++ b/man/oecosimu.Rd
@@ -1,148 +1,154 @@
 \encoding{UTF-8}
 \name{oecosimu}
 \alias{oecosimu}
-\alias{commsimulator}
 \alias{as.ts.oecosimu}
 \alias{as.mcmc.oecosimu}
-\alias{density.oecosimu}
-\alias{densityplot.oecosimu}
 
-\title{ Null Models for Biological Communities }
+\title{Evaluate Statistics with Null Models of Biological Communities }
 
 \description{
-  Null models generate random communities with different criteria to
-  study the significance of nestedness or other community patterns. The
-  function only simulates binary (presence/absence) models with
-  constraint for total number of presences, and optionally for numbers
-  of species and/or species frequencies.
+ 
+  Function evaluates a statistic or a vector of statistics in
+  community and evaluates its significance in a series of simulated
+  random communities.  The approach has been used traditionally for
+  the analysis of nestedness, but the function is more general and can
+  be used with any statistics evaluated with simulated
+  communities. Function \code{oecosimu} collects and evaluates the
+  statistics. The Null model communities are described in
+  \code{\link{make.commsim}} and \code{\link{permatfull}}/
+  \code{\link{permatswap}}, the definition of Null models in
+  \code{\link{nullmodel}}, and nestedness statistics in
+  \code{\link{nestednodf}} (which describes several alternative
+  statistics, including nestedness temperature, \eqn{N0}, checker
+  board units, nestedness discrepancy and NODF).
+
 }
 
 \usage{
 oecosimu(comm, nestfun, method, nsimul = 99, burnin = 0, thin = 1,
-   statistic = "statistic", alternative = c("two.sided", "less", "greater"),
-   ...)
-commsimulator(x, method, thin=1)
+   statistic = "statistic", alternative = c("two.sided", "less", "greater"), 
+   batchsize = NA, parallel = getOption("mc.cores"), ...)
 \method{as.ts}{oecosimu}(x, ...)
 \method{as.mcmc}{oecosimu}(x)
-\method{density}{oecosimu}(x, ...)
-\method{densityplot}{oecosimu}(x, data, xlab = "Simulated", ...)
 }
 
 \arguments{
-  \item{comm}{Community data.}
-  \item{x}{Community data for \code{commsimulator}, or an \code{oecosimu}
-    result object for \code{as.ts}, \code{as.mcmc}, \code{density} and 
-    \code{densityplot}.}
-  \item{nestfun}{Function to analyse nestedness. Some functions are
-  provided in \pkg{vegan}, but any function can be used if it accepts the
-  community as the first argument, and returns either a plain number or
-  the result in list item with the name defined in argument
-  \code{statistic}. See Examples for defining your own functions.}
-  \item{method}{Null model method. See details.}
-  \item{nsimul}{Number of simulated null communities.}
+  \item{comm}{Community data, or a Null model object generated by
+    \code{\link{nullmodel}} or an object of class \code{simmat} (array
+    of permuted matrices from \code{\link{simulate.nullmodel}}). If
+    \code{comm} is a community data, null model simulation
+    \code{method} must be specified.  If \code{comm} is a
+    \code{\link{nullmodel}}, the simulation \code{method} is ignored,
+    and if \code{comm} is a \code{simmat} object, all other arguments
+    are ignored except \code{nestfun}, \code{statistic} and
+    \code{alternative}.}
+  \item{nestfun}{Function analysed. Some nestedness functions are
+    provided in \pkg{vegan} (see \code{\link{nestedtemp}}), but any
+    function can be used if it accepts the community as the first
+    argument, and returns either a plain number or a vector or the
+    result in list item with the name defined in argument
+    \code{statistic}. See Examples for defining your own functions.}
+  \item{method}{Null model method: either a name (character string) of
+    a method defined in \code{\link{make.commsim}} or a
+    \code{\link{commsim}} function. This argument is ignored if
+    \code{comm} is a \code{\link{nullmodel}} or a \code{simmat}
+    object. See Details and Examples.}
+  \item{nsimul}{Number of simulated null communities (ignored if
+    \code{comm} is a \code{simmat} object).}
   \item{burnin}{Number of null communities discarded before proper
-  analysis in sequential methods \code{"swap"} and \code{"tswap"}.}
+    analysis in sequential methods (such as \code{"tswap"})
+    (ignored with non-sequential methods or when \code{comm} is a
+    \code{simmat} object).}
   \item{thin}{Number of discarded null communities between two
-  evaluations of nestedness statistic in sequential methods
-  \code{"swap"} and \code{"tswap"}.}
+    evaluations of nestedness statistic in sequential methods (ignored
+    with non-sequential methods or when \code{comm} is a \code{simmat}
+    object).}
   \item{statistic}{The name of the statistic returned by
-    \code{nestedfun}} 
+    \code{nestfun}.} 
   \item{alternative}{a character string specifying the alternative
     hypothesis, must be one of \code{"two.sided"} (default), \code{"greater"}
     or \code{"less"}. Please note that the \eqn{p}-value of two-sided
     test is approximately two times higher than in the corresponding
     one-sided test (\code{"greater"} or \code{"less"} depending on the
     sign of the difference).}
-  \item{data}{Ignored argument of the generic function.}
-  \item{xlab}{Label of the x-axis.}
+  \item{batchsize}{Size in Megabytes of largest simulation object. If
+    a larger structure would be produced, the analysis is broken
+    internally into batches. With default \code{NA} the analysis is
+    not broken into batches.  See Details.}
+  \item{parallel}{Number of parallel processes or a predefined socket
+    cluster.  With \code{parallel = 1} uses ordinary, non-parallel
+    processing. The parallel processing is done with \pkg{parallel}
+    package.  If you define a \code{nestfun} in Windows that needs other
+    \R packages than \pkg{vegan} or \pkg{permute}, you must set up a
+    socket cluster before the call. See \code{\link{vegandocs}}
+    \code{decision-vegan} for details. }
+  \item{x}{An \code{oecosimu} result object.}
+
   \item{\dots}{Other arguments to functions.}
 }
 
 \details{
   
-  Function \code{oecosimu} is a wrapper that evaluates a nestedness
-  statistic using function given by \code{nestfun}, and then simulates
-  a series of null models using \code{commsimulator} or other
-  functions (depending on method argument), and evaluates the
+  Function \code{oecosimu} is a wrapper that evaluates a statistic
+  using function given by \code{nestfun}, and then simulates a series
+  of null models based on \code{nullmodel}, and evaluates the
   statistic on these null models. The \pkg{vegan} packages contains
   some nestedness functions that are described separately
   (\code{\link{nestedchecker}}, \code{\link{nesteddisc}},
-  \code{\link{nestedn0}}, \code{\link{nestedtemp}}), but many other
-  functions can be used as long as they are meaningful with binary or
-  quantitative community models.  An applicable function must return
-  either the statistic as a plain number, or as a list element
-  \code{"statistic"} (like \code{\link{chisq.test}}), or in an item
-  whose name is given in the argument \code{statistic}.  The statistic
-  can be a single number (like typical for a nestedness index), or it
-  can be a vector. The vector indices can be used to analyse site
-  (row) or species (column) properties, see \code{\link{treedive}} for
-  an example. Raup-Crick index (\code{\link{raupcrick}}) gives an
-  example of using a dissimilarities index.
+  \code{\link{nestedn0}}, \code{\link{nestedtemp}},
+  \code{\link{nestednodf}}), but many other functions can be used as
+  long as they are meaningful with simulated communities.  An
+  applicable function must return either the statistic as a plain
+  number or a vector, or as a list element \code{"statistic"} (like
+  \code{\link{chisq.test}}), or in an item whose name is given in the
+  argument \code{statistic}.  The statistic can be a single number
+  (like typical for a nestedness index), or it can be a vector. The
+  vector indices can be used to analyse site (row) or species (column)
+  properties, see \code{\link{treedive}} for an example. Raup-Crick
+  index (\code{\link{raupcrick}}) gives an example of using a
+  dissimilarities.
 
-  Function \code{commsimulator} implements binary (presence/absence) 
-  null models for community composition.
-  The implemented models are \code{r00} which maintains the
-  number of presences but fills these anywhere so that neither species
-  (column) nor site (row) totals are preserved. Methods \code{r0},
-  \code{r1} and \code{r2} maintain the site (row) frequencies. Method \code{r0}
-  fills presences anywhere on the row with no respect to species (column)
-  frequencies, \code{r1} uses column marginal 
-  frequencies as probabilities, and \code{r2} uses squared column
-  sums. Methods \code{r1} and \code{r2} try to simulate original species
-  frequencies, but they are not strictly constrained. All these methods
-  are reviewed by Wright et al. (1998). Method \code{c0} maintains
-  species frequencies, but does not honour site (row) frequencies (Jonsson
-  2001).
+  The Null model type can be given as a name (quoted character string)
+  that is used to define a Null model in \code{\link{make.commsim}}.
+  These include all binary models described by Wright et al. (1998),
+  Jonsson (2001), Gotelli & Entsminger (2003), \enc{Miklós}{Miklos} &
+  Podani (2004), and some others. There are several quantitative Null
+  models, such those discussed by Hardy (2008), and several that are
+  unpublished (see \code{\link{make.commsim}},
+  \code{\link{permatfull}}, \code{\link{permatswap}} for
+  discussion). The user can also define her own \code{\link{commsim}}
+  function (see Examples).
 
-  The other methods maintain both row and column frequencies.
-  Methods \code{swap} and \code{tswap} implement sequential methods,
-  where the matrix is changed only little in one step, but the changed
-  matrix is used as an input if the next step.
-  Methods \code{swap} and \code{tswap} inspect random 2x2 submatrices
-  and if they are checkerboard units, the order of columns is
-  swapped. This changes the matrix structure, but does not influence
-  marginal sums (Gotelli & Entsminger
-  2003). Method \code{swap} inspects submatrices so long that a swap
-  can be done. \enc{Miklós}{Miklos} & Podani (2004) suggest that this may lead into
-  biased sequences, since some columns or rows may be more easily
-  swapped, and they suggest trying a fixed number of times and
-  doing zero to many swaps at one step. This method is implemented by
-  method \code{tswap} or trial swap. Function \code{commsimulator} makes
-  only one trial swap in time (which probably does nothing),
-  but \code{oecosimu} estimates how many
-  submatrices are expected before finding a swappable checkerboard,
-  and uses that ratio to thin the results, so that on average one swap
-  will be found per step of \code{tswap}.  However, the checkerboard
-  frequency probably changes during swaps, but this is not taken into
-  account in estimating the \code{thin}.  One swap still changes the
-  matrix only little, and it may be useful to 
-  thin the results so that the statistic is only evaluated after
-  \code{burnin} steps (and \code{thin}ned). 
+  Function works by first defining a \code{\link{nullmodel}} with
+  given \code{\link{commsim}}, and then generating a series of
+  simulated communities with \code{\link{simulate.nullmodel}}. A
+  shortcut can be used for any of these stages and the input can be
+  \enumerate{
+    \item Community data (\code{comm}), Null model function
+      (\code{nestfun}) and the number of simulations (\code{nsimul}).
+    \item A \code{\link{nullmodel}} object and the number of
+      simulations, and argument \code{method} is ignored.
+    \item A three-dimensional array of simulated communities generated
+      with \code{\link{simulate.nullmodel}}, and arguments
+      \code{method} and \code{nsimul} are ignored.  
+  }
+  The last case allows analysing several statistics with the same
+  simulations.
 
-  Methods \code{quasiswap} and \code{backtracking} are not sequential,
-  but each call produces a matrix that is independent of previous
-  matrices, and has the same marginal totals as the original data. The
-  recommended method is \code{quasiswap} which is much faster because
-  it is implemented in C. Method \code{backtracking} is provided for
-  comparison, but it is so slow that it may be dropped from future
-  releases of \pkg{vegan} (or also implemented in C).
-  Method \code{quasiswap} (\enc{Miklós}{Miklos} & Podani 2004)
-  implements a method where matrix is first filled 
-  honouring row and column totals, but with integers that may be larger than
-  one. Then the method inspects random 2x2 matrices and performs a
-  quasiswap on them. Quasiswap is similar to ordinary swap, but it also
-  can reduce numbers above one to ones maintaining marginal
-  totals.
-  Method \code{backtracking}
-  implements a filling method with constraints both for row and column
-  frequencies (Gotelli & Entsminger 2001). The matrix is first filled
-  randomly using row and column frequencies as probabilities. Typically
-  row and column sums are reached before all incidences are filled in.
-  After that begins \dQuote{backtracking}, where some of the
-  points are removed, and then filling is started again, and this
-  backtracking is done so may times that all incidences will be filled
-  into matrix. The \code{quasiswap} method is not sequential, but it produces
-  a random incidence matrix with given marginal totals. 
+  The function first generates simulations with given
+  \code{\link{nullmodel}} and then analyses these using the
+  \code{nestfun}.  With large data sets and/or large number of
+  simulations, the generated objects can be very large, and if the
+  memory is exhausted, the analysis can become very slow and the
+  system can become unresponsive. The simulation will be broken into
+  several smaller batches if the simulated \code{\link{nullmodel}}
+  objective will be above the set \code{batchsize} to avoid memory
+  problems (see \code{\link{object.size}} for estimating the size of
+  the current data set). The parallel processing still increases the
+  memory needs.  The parallel processing is only used for evaluating
+  \code{nestfun}.  The main load may be in simulation of the
+  \code{\link{nullmodel}}, and \code{parallel} argument does not help
+  there.
 
   Function \code{as.ts} transforms the simulated results of sequential
   methods into a time series or a \code{\link{ts}} object. This allows
@@ -152,50 +158,40 @@ commsimulator(x, method, thin=1)
   \pkg{coda} package. The \pkg{coda} package provides functions for
   the analysis of stationarity, adequacy of sample size,
   autocorrelation, need of burn-in and much more for sequential
-  methods. Please consult the documentation of \pkg{coda} package.
-
-  Function \code{density} provides an interface to the
-  standard \code{\link{density}} function for the simulated
-  values. Function \code{densityplot} is an interface to the
-  \code{\link[lattice]{densityplot}} function of the \pkg{lattice}
-  package. The \code{density} can be used meaningfully only for single
-  statistics and must be plotted separately. The \code{densityplot}
-  function can handle multiple statistics, and it plots the results
-  directly. In addition to the density, the \code{densityplot} also
-  shows the observed value of the statistic (provided it is within the
-  graph limits). The \code{densityplot} function is defined as a
-  generic function in the \pkg{lattice} package and you must either
-  load the \pkg{lattice} library before calling \code{densityplot}, or
-  use the longer form \code{densityplot.oecosimu} when you first time
-  call the function.
-
-  As a result of \code{method = "r2dtable"} in \code{oecosimu}, quantitative
-  community null models are used to evaluate the statistic. This setting uses
-  the \code{\link{r2dtable}} function to generate random matrices with fixed
-  row and column totals (hypergeometric distribution). This null model is
-  used in diversity partitioning function (see \code{\link{adipart}}).
+  methods, and summary of the results. Please consult the
+  documentation of the \pkg{coda} package.
 
-  The \code{method} argument can be a function with first argument taking the 
-  community matrix, and optionally with \code{burnin} and \code{thin} argument.
-  The function must return a matrix-like object with same dimensions.
-  But be careful, blindly applying permuted matrices for null model testing
-  can be dangerous.
+  Function \code{\link{permustats}} provides support to the standard
+  \code{\link{density}}, \code{\link[lattice]{densityplot}},
+  \code{\link{qqnorm}} and \code{\link[lattice]{qqmath}} functions for
+  the simulated values.
 }
 
 \value{ 
-  Function \code{oecosimu} returns the result of \code{nestfun} added
-  with a component called \code{oecosimu}. The \code{oecosimu}
-  component contains the simulated values of the statistic (item
-  \code{simulated}), the name of the \code{method}, \eqn{P} value
-  (with given \code{alternative}), \eqn{z}-value of the statistic
-  based on simulation (also known as standardized effect size), and
-  the mean of simulations.  
+
+  Function \code{oecosimu} returns an object of class
+  \code{"oecosimu"}.  The result object has items \code{statistic} and
+  \code{oecosimu}.  The \code{statistic} contains the complete object
+  returned by \code{nestfun} for the original data.  The
+  \code{oecosimu} component contains the following items:
+  \item{statistic}{Observed values of the statistic.}
+  \item{simulated}{Simulated values of the statistic.}
+  \item{means}{Mean values of the statistic from simulations.}
+  \item{z}{\eqn{z}-values or the standardized effect sizes of the observed 
+     statistic based on simulations.}
+  \item{pval}{The \eqn{P}-values of the statistic based on simulations.}
+  \item{alternative}{The type of testing as given in argument \code{alternative}.}
+  \item{method}{The \code{method} used in \code{\link{nullmodel}}.}
+  \item{isSeq}{\code{TRUE} if \code{method} was sequential.}
+
 }
 
 \references{
-  Gotelli, N.J. & Entsminger, N.J. (2001). Swap and fill algorithms in
-  null model analysis: rethinking the knight's tour. \emph{Oecologia}
-  129, 281--291.
+  Hardy, O. J. (2008) 
+  Testing the spatial phylogenetic structure of local communities: 
+  statistical performances of different null models 
+  and test statistics on a locally neutral community. 
+  \emph{Journal of Ecology} 96, 914--926.
 
   Gotelli, N.J. & Entsminger, N.J. (2003). Swap algorithms in null model
   analysis. \emph{Ecology} 84, 532--535.
@@ -209,44 +205,34 @@ commsimulator(x, method, thin=1)
   Wright, D.H., Patterson, B.D., Mikkelson, G.M., Cutler, A. & Atmar,
   W. (1998). A comparative analysis of nested subset patterns of species
   composition. \emph{Oecologia} 113, 1--20.
-  }
-\author{ Jari Oksanen }
-\note{
-  Functions \code{commsimulator} and \code{oecosimu} do not have
-  default \code{nestfun} nor default \code{method}, because there is
-  no clear natural choice. If you use these methods, you must be able
-  to choose your own strategy. The choice of nestedness index is
-  difficult because the functions seem to imply very different
-  concepts of structure and randomness. The choice of swapping method
-  is also problematic. Method \code{r00} has some heuristic value of
-  being really random. However, it produces null models which are
-  different from observed communities in most respects, and a
-  \dQuote{significant} result may simply mean that not all species are
-  equally common (\code{r0} is similar with this respect). It is also
-  difficult to find justification for \code{r2}. The methods
-  maintaining both row and column totals only study the community
-  relations, but they can be very slow. Moreover, they regard marginal
-  totals as constraints instead of results of occurrence patterns. You
-  should evaluate timings in small trials (one cycle) before launching
-  an extensive simulation. One swap is fast, but it changes data only
-  little, and you may need long \code{burnin} and strong
-  \code{thin}ning in large matrices. You should plot the simulated
-  values to see that they are more or less stationary and there is no
-  trend. Method \code{quasiswap} is implemented
-  in C and it is much faster than \code{backtrack}.  Method
-  \code{backtrack} may be removed from later releases of \pkg{vegan}
-  because it is slow, but it is still included for comparison.
+}
+
+\author{Jari Oksanen and Peter Solymos}
 
+\note{
   If you wonder about the name of \code{oecosimu}, look at journal
-  names in the References (and more in \code{\link{nestedtemp}}).  }
+  names in the References (and more in \code{\link{nestedtemp}}).  
+
+  The internal structure of the function was radically changed in
+  \pkg{vegan 2.2-0} with introduction of \code{\link{commsim}} and
+  \code{\link{nullmodel}} and deprecation of
+  \code{\link{commsimulator}}. However, the results and the basic user
+  interface remain the same (except that \code{method = "r0_old"} must
+  be used to reproduce the old results of \code{"method = r0"}).  
+}
+
+\seealso{Function \code{oecosimu} currently defines null models with
+  \code{\link{commsim}} and generates the simulated null model
+  communities with \code{\link{nullmodel}} and
+  \code{\link{simulate.nullmodel}}. For other applications of
+  \code{oecosimu}, see \code{\link{treedive}} and
+  \code{\link{raupcrick}}.
 
-\seealso{ \code{\link{r2dtable}} generates table with given marginals but
-  with entries above one. Functions \code{\link{permatfull}} and
-  \code{\link{permatswap}} generate Null models for count data.
   Function \code{\link[labdsv]{rndtaxa}}
   (\pkg{labdsv} package) randomizes a community table. See also
   \code{\link{nestedtemp}} (that also discusses other nestedness
-  functions) and \code{\link{treedive}} for another application. }
+  functions) and \code{\link{treedive}} for another application. 
+}
 \examples{
 ## Use the first eigenvalue of correspondence analysis as an index
 ## of structure: a model for making your own functions.
@@ -262,22 +248,28 @@ plot(as.ts(out))
 lag.plot(as.ts(out))
 acf(as.ts(out))
 ## Density plot
-densityplot(out, as.table = TRUE)
+densityplot(permustats(out), as.table = TRUE, layout = c(1,4))
 ## Use quantitative null models to compare
 ## mean Bray-Curtis dissimilarities
 data(dune)
 meandist <- function(x) mean(vegdist(x, "bray"))
 mbc1 <- oecosimu(dune, meandist, "r2dtable")
 mbc1
-## Define a custom function that shuffles
-## cells in each rows
-f <- function(x) {
-    apply(x, 2, function(z) sample(z, length(z)))
+
+## Define your own null model as a 'commsim' function: shuffle cells
+## in each row
+foo <- function(x, n, nr, nc, ...) {
+   out <- array(0, c(nr, nc, n))
+   for (k in seq_len(n))
+      out[,,k] <- apply(x, 2, function(z) sample(z, length(z)))
+   out
 }
-mbc2 <- oecosimu(as.matrix(dune), meandist, f)
-mbc2
+cf <- commsim("myshuffle", foo, isSeq = FALSE, binary = FALSE, 
+   mode = "double")
+oecosimu(dune, meandist, cf)
 }
 \keyword{ multivariate }
 \keyword{ datagen }
+\keyword{ nonparametric }
 
 
diff --git a/man/ordihull.Rd b/man/ordihull.Rd
index e269604..a0ca353 100644
--- a/man/ordihull.Rd
+++ b/man/ordihull.Rd
@@ -9,6 +9,7 @@
 \alias{summary.ordihull}
 \alias{scores.ordihull}
 \alias{summary.ordiellipse}
+\alias{ordiareatest}
 
 \title{Display Groups or Factor Levels in Ordination Diagrams}
 
@@ -24,11 +25,14 @@ ordiellipse(ord, groups, display="sites", kind = c("sd","se"), conf,
          draw = c("lines","polygon", "none"), w = weights(ord, display),
          col = NULL, alpha = 127, show.groups, label = FALSE, ...)
 ordispider(ord, groups, display="sites", w = weights(ord, display),
-         show.groups, label = FALSE, ...)
+	 spiders = c("centroid", "median"),  show.groups, 
+         label = FALSE, ...)
 ordicluster(ord, cluster, prune = 0, display = "sites",
          w = weights(ord, display), ...)
 \method{summary}{ordihull}(object, ...)
 \method{summary}{ordiellipse}(object, ...)
+ordiareatest(ord, groups, area = c("hull", "ellipse"), permutations = 999,
+         parallel = getOption("mc.cores"), ...)
 }
 
 \arguments{
@@ -80,6 +84,9 @@ ordicluster(ord, cluster, prune = 0, display = "sites",
     corresponding value found from the Chi-squared distribution with
     2df. }
 
+  \item{spiders}{Are centres or spider bodies calculated either as
+    centroids (averages) or spatial medians.}
+
   \item{cluster}{Result of hierarchic cluster analysis, such as
     \code{\link{hclust}} or \code{\link[cluster]{agnes}}.}
 
@@ -92,6 +99,19 @@ ordicluster(ord, cluster, prune = 0, display = "sites",
     can be saved, and used for summaries (areas etc. of hulls and
     ellipses). }
 
+  \item{area}{Evaluate the area of convex hulls of \code{ordihull}, or of
+    ellipses of \code{ordiellipse}.}
+
+  \item{permutations}{a list of control values for the permutations
+    as returned by the function \code{\link[permute]{how}}, or the
+    number of permutations required, or a permutation matrix where each
+    row gives the permuted indices.}
+
+  \item{parallel}{Number of parallel processes or a predefined socket
+    cluster.  With \code{parallel = 1} uses ordinary, non-parallel
+    processing. The parallel processing is done with \pkg{parallel}
+    package.}
+
   \item{\dots}{Parameters passed to graphical functions or to
     \code{\link{scores}} to select axes and scaling etc. } 
 }
@@ -110,6 +130,12 @@ ordicluster(ord, cluster, prune = 0, display = "sites",
   An ellipsoid hull can be drawn with function
   \code{\link[cluster]{ellipsoidhull}} of package \pkg{cluster}.
 
+  Function \code{ordihull} and \code{ordiellipse} return invisibly an
+  object that has a \code{summary} method that returns the coordinates
+  of centroids and areas of the hulls or ellipses. Function
+  \code{ordiareatest} studies the one-sided hypothesis that these
+  areas are smaller than with randomized \code{groups}.
+
   Function \code{ordispider} draws a \sQuote{spider} diagram where
   each point is connected to the group centroid with
   \code{\link{segments}}.  Weighted centroids are used in the
@@ -147,10 +173,7 @@ ordicluster(ord, cluster, prune = 0, display = "sites",
   Function \code{ordihull} returns a list of coordinates of the hulls
   (which can be extracted with \code{scores}), and \code{ordiellipse}
   returns a list of covariance matrices and scales used in drawing the
-  ellipses.  These result objects have a \code{summary} method that
-  returns the coordinates of the centres of the ellipses or hulls and
-  their surface areas in user units.  With \code{draw = "none"} only
-  the result object is returned and nothing is drawn.
+  ellipses.
 
 }
 
diff --git a/man/ordiplot3d.Rd b/man/ordiplot3d.Rd
deleted file mode 100644
index fccb920..0000000
--- a/man/ordiplot3d.Rd
+++ /dev/null
@@ -1,228 +0,0 @@
-\name{ordiplot3d}
-\alias{ordiplot3d}
-\alias{ordirgl}
-\alias{orglpoints}
-\alias{orgltext}
-\alias{orglsegments}
-\alias{orglspider}
-
-\title{Three-Dimensional and Dynamic Ordination Graphics }
-\description{
-  Function \code{ordiplot3d} displays three-dimensional ordination
-  graphics using \code{\link[scatterplot3d]{scatterplot3d}}.  Function
-  \code{ordirgl} displays three-dimensional dynamic ordination graphs
-  which can be rotated and zoomed into using \code{\link[rgl]{rgl}}
-  package. Both work with all ordination
-  results form \code{vegan} and all ordination results known by
-  \code{\link{scores}} function. 
-}
-\usage{
-ordiplot3d(object, display = "sites", choices = 1:3, ax.col = 2,
-        arr.len = 0.1, arr.col = 4, envfit, xlab, ylab, zlab, ...)
-ordirgl(object, display = "sites", choices = 1:3, type = "p", 
-        ax.col = "red", arr.col = "yellow", text, envfit, ...)
-orglpoints(object, display = "sites", choices = 1:3, ...)
-orgltext(object, text, display = "sites", choices = 1:3, justify = "center", 
-        adj = 0.5, ...)
-orglsegments(object, groups, display = "sites", choices = 1:3, ...)
-orglspider(object, groups, display = "sites", w = weights(object, display),
-        choices = 1:3, ...)
-}
-\arguments{
-  \item{object}{An ordination result or any object known by \code{\link{scores}}. }
-  \item{display}{Display \code{"sites"} or \code{"species"} or other
-    ordination object recognized by \code{\link{scores}}. }
-  \item{choices}{Selected three axes. }
-    \item{arr.len}{'Length' (width) of arrow head passed to
-    \code{\link{arrows}} function. }
-  \item{arr.col}{Colour of biplot \code{\link{arrows}} and centroids of
-    environmental variables. }
-  \item{type}{The type of plots: \code{"p"} for points or \code{"t"} for
-    text labels.}
-  \item{ax.col}{Axis colour (concerns only the crossed axes through the
-    origin).}
-  \item{text}{Text to override the default with \code{type = "t"}.}
-  \item{envfit}{Fitted environmental variables from \code{\link{envfit}}
-    displayed in the graph.}
-  \item{xlab, ylab, zlab}{Axis labels passed to
-    \code{\link[scatterplot3d]{scatterplot3d}}. If missing, labels  are
-    taken from the
-    ordination result. Set to \code{NA} to suppress labels. }
-  \item{justify, adj}{Text justification passed to
-    \code{\link[rgl]{rgl.texts}}. One of these is used depending on the
-    version of \pkg{rgl} installed.}
-  \item{groups}{Factor giving the groups for which the graphical item is
-    drawn.}
-  \item{w}{Weights used to find the average within group. Weights are
-    used automatically for \code{\link{cca}}
-    and \code{\link{decorana}} results, unless undone by the
-    user. \code{w=NULL} sets equal weights to all points. }
-  \item{\dots}{Other parameters passed to graphical functions. }
-}
-\details{
-  Both function display three-dimensional ordination graphics. Function
-  \code{ordiplot3d} plots static scatter diagrams using
-  \code{\link[scatterplot3d]{scatterplot3d}}. Function \code{ordirgl}
-  plots dynamic graphics using OpenGL  in \code{\link[rgl]{rgl}}. Both
-  functions use most default settings of underlying graphical functions,
-  and you must consult their help pages to change graphics to suit your
-  taste (see \code{\link[scatterplot3d]{scatterplot3d}},
-  \code{\link[rgl]{rgl}},
-  \code{\link[rgl]{rgl.points}},\code{\link[rgl]{rgl.texts}}). Both
-  functions will display only one selected set of \code{\link{scores}},
-  typically either \code{"sites"} or \code{"species"}, but
-  for instance \code{\link{cca}} also has \code{"lc"} scores. In
-  constrained ordination (\code{\link{cca}}, \code{\link{rda}},
-  \code{\link{capscale}}), biplot arrows and centroids are always
-  displayed similarly as in two-dimensional plotting function
-  \code{\link{plot.cca}}.  Alternatively, it is possible to display
-  fitted environmental vectors or class centroids from
-  \code{\link{envfit}} in both graphs.  These are displayed similarly as
-  the results of constrained ordination, and they can be shown only for
-  non-constrained ordination. The user must remember to specify at least
-  three axes in \code{\link{envfit}} if the results are used with these
-  functions. 
-
-  Function \code{ordiplot3d} plots only points. However, it returns
-  invisibly an object inheriting from \code{\link{ordiplot}} so that
-  you can use \code{\link{identify.ordiplot}} to identify
-  \code{"points"} or \code{"arrows"}. The underlying
-  \code{\link[scatterplot3d]{scatterplot3d}} function accepts
-  \code{type = "n"} so that only the axes, biplot arrows and centroids
-  of environmental variables will be plotted, and the ordination
-  scores can be added with \code{\link{text.ordiplot}} or
-  \code{\link{points.ordiplot}}. Further, you can use any functions
-  from the \code{\link{ordihull}} family with the invisible result of
-  \code{\link{ordiplot3d}}, but you must remember to specify the
-  \code{display} as \code{"points"} or \code{"arrows"}. To change the
-  viewing angle, orientation etc.{} you must see
-  \code{\link[scatterplot3d]{scatterplot3d}}. Only one kind of scores
-  will be plotted.  See Examples for plotting both species and
-  site scores.
-
-  Function \code{ordigl} makes a dynamic three-dimensional graph that
-  can be rotated with mouse, and zoomed into with mouse buttons or wheel
-  (but Mac users with one-button mouse should see
-  \code{\link[rgl]{rgl.viewpoint}}), or try ctrl-button. MacOS X users
-  must start \code{X11} before calling \code{\link[rgl]{rgl}} commands.
-  Function \code{ordirgl} uses default settings, and you should consult the
-  underlying functions  \code{\link[rgl]{rgl.points}},
-  \code{\link[rgl]{rgl.texts}} to see how to control the
-  graphics. Function \code{ordirgl} always cleans its graphic window
-  before drawing.  Functions \code{orglpoints} adds points and
-  \code{orgltext} adds text to existing \code{ordirgl} windows.  In
-  addition, function \code{orglsegments} combines points within
-  \code{"groups"} with line segments similarly as
-  \code{\link{ordisegments}}. Function \code{orglspider} works similarly
-  as \code{\link{ordispider}}: it connects points to their weighted
-  centroid within \code{"groups"}, and in constrained ordination it can
-  connect \code{"wa"} or weighted averages scores to corresponding
-  \code{"lc"} or linear combination scores if \code{"groups"} is
-  missing. In addition, basic \code{rgl} functions 
-  \code{\link[rgl]{rgl.points}}, \code{\link[rgl]{rgl.texts}},
- \code{\link[rgl]{rgl.lines}} and many others can be used. 
-}
- 
-\value{
-
-  Function \code{ordiplot3d} returns invisibly an object of class
-  \code{"ordiplot3d"} inheriting from \code{\link{ordiplot}}. The
-  return object will contain the coordinates projected onto two
-  dimensions for \code{points}, and the projected coordinates of
-  \code{origin}, and possibly the projected coordinates of the heads
-  of \code{arrows} and \code{centroids} of environmental variables.
-  Functions like \code{\link{identify.ordiplot}},
-  \code{\link{points.ordiplot}}, \code{\link{text.ordiplot}} can use
-  this result, as well as \code{\link{ordihull}} and other functions
-  documented with the latter. The result will also contain the object
-  returned by \code{\link[scatterplot3d]{scatterplot3d}}, including
-  function \code{xyz.convert} which projects three-dimensional
-  coordinates onto the plane used in the current plot (see
-  Examples). In addition, there is a function \code{envfit.convert}
-  that projects a three-dimensional \code{\link{envfit}} object to the
-  current plot.
-
-  Function \code{ordirgl} returns nothing.
-
-}
-
-\author{Jari Oksanen }
-
-\section{Warning}{Function \code{ordirgl} uses OpenGL package
-  \code{\link[rgl]{rgl}}
-  which may not be functional in all platforms, and can crash R in some:
-  use \code{\link{save.image}} before trying \code{ordirgl}.
-  Mac users must start \code{X11} (and first install \code{X11} and some other
-  libraries) before being able to use \code{\link[rgl]{rgl}}. It seems
-  that \code{\link[rgl]{rgl.texts}} does  not always position the text
-  like supposed, and it may be safe to verify text location with
-  corresponding points.
-
-  Function \code{ordiplot3d} is based on
-  \code{\link[scatterplot3d]{scatterplot3d}} which does not allow
-  exactly setting equal aspect ratio for axes.  The function tries to
-  circumvent this by setting equal plotting ranges for all axes so that
-  the plot should be a cube.  Depending on the dimensions of plotting
-  device, this may fail, and the user should verify that the axes are
-  approximately equal.
-
-  Please note that \code{\link[scatterplot3d]{scatterplot3d}} sets
-  internally some graphical parameters (such as \code{mar} for margins)
-  and does not honour default settings.  It is advisable to study
-  carefully the documentation and examples of
-  \code{\link[scatterplot3d]{scatterplot3d}}.
-}
-
-\note{The user interface of \pkg{rgl} changed in version 0.65, but
-  the \code{ordirgl} functions do not yet fully use the new
-  capabilities.  However, they should work both in old and new versions
-  of \pkg{rgl}. 
-  }
-
-\seealso{   \code{\link[scatterplot3d]{scatterplot3d}},
-  \code{\link[rgl]{rgl}}, \code{\link[rgl]{rgl.points}},
-  \code{\link[rgl]{rgl.texts}}, \code{\link[rgl]{rgl.viewpoint}},
-  \code{\link{ordiplot}}, \code{\link{identify.ordiplot}}, 
-  \code{\link{text.ordiplot}}, \code{\link{points.ordiplot}},
-  \code{\link{ordihull}}, \code{\link{plot.cca}}, \code{\link{envfit}}.
-}
-\examples{
-## Examples are not run, because they need non-standard packages
-## 'scatterplot3d' and 'rgl' (and the latter needs user interaction).
-#####
-### Default 'ordiplot3d'
-\dontrun{
-data(dune)
-data(dune.env)
-ord <- cca(dune ~ A1 + Moisture, dune.env)
-ordiplot3d(ord)
-### A boxed 'pin' version
-ordiplot3d(ord, type = "h")
-### More user control
-pl <- ordiplot3d(ord, scaling = 3, angle=15, type="n")
-points(pl, "points", pch=16, col="red", cex = 0.7)
-### identify(pl, "arrows", col="blue") would put labels in better positions
-text(pl, "arrows", col="blue", pos=3)
-text(pl, "centroids", col="blue", pos=1, cex = 1)
-### Add species using xyz.convert function returned by ordiplot3d
-sp <- scores(ord, choices=1:3, display="species", scaling=3)
-text(pl$xyz.convert(sp), rownames(sp), cex=0.7, xpd=TRUE)
-### Two ways of adding fitted variables to ordination plots
-ord <- cca(dune)
-ef <- envfit(ord ~ Moisture + A1, dune.env, choices = 1:3)
-### 1. use argument 'envfit'
-ordiplot3d(ord, envfit = ef)
-### 2. use returned envfit.convert function for better user control
-pl3 <- ordiplot3d(ord)
-plot(pl3$envfit.convert(ef), at = pl3$origin)
-### envfit.convert() also handles different 'choices' of axes
-pl3 <- ordiplot3d(ord, choices = c(1,3,2))
-plot(pl3$envfit.convert(ef), at = pl3$origin)
-### ordirgl
-ordirgl(ord, size=2)
-ordirgl(ord, display = "species", type = "t")
-rgl.quit()
-}
-}
-\keyword{ hplot }
-\keyword{ dynamic }
diff --git a/man/ordistep.Rd b/man/ordistep.Rd
index 430774b..ba85e89 100644
--- a/man/ordistep.Rd
+++ b/man/ordistep.Rd
@@ -16,10 +16,10 @@
 }
 \usage{
 ordistep(object, scope, direction = c("both", "backward", "forward"),
-   Pin = 0.05, Pout = 0.1, pstep = 100, perm.max = 1000, steps = 50,
+   Pin = 0.05, Pout = 0.1, permutations = how(nperm = 199), steps = 50,
    trace = TRUE, ...)
 ordiR2step(object, scope, direction = c("both", "forward"),
-   Pin = 0.05, R2scope = TRUE, pstep = 100, perm.max = 1000,
+   Pin = 0.05, R2scope = TRUE, permutations = how(nperm = 499),
    trace = TRUE, ...)
 }
 %- maybe also 'usage' for other objects documented here.
@@ -51,12 +51,12 @@ ordiR2step(object, scope, direction = c("both", "forward"),
   lower adjusted \eqn{R^2}{R2} than scope are accepted.
   }
 
-  \item{pstep}{
-  Number of permutations in one step. See \code{\link{add1.cca}}.
-}
-  \item{perm.max}{
-  Maximum number of permutation in \code{\link{anova.cca}}.
-}
+  \item{permutations}{a list of control values for the permutations as
+    returned by the function \code{\link[permute]{how}}, or the number
+    of permutations required, or a permutation matrix where each row
+    gives the permuted indices. This is passed to
+    \code{\link{anova.cca}}: see there for details.  }
+
   \item{steps}{
   Maximum number of iteration steps of dropping and adding terms.
 }
diff --git a/man/ordisurf.Rd b/man/ordisurf.Rd
index 2641c9b..356bbdc 100644
--- a/man/ordisurf.Rd
+++ b/man/ordisurf.Rd
@@ -269,15 +269,15 @@ data(varespec)
 data(varechem)
 vare.dist <- vegdist(varespec)
 vare.mds <- monoMDS(vare.dist)
-with(varechem, ordisurf(vare.mds, Baresoil, bubble = 5))
+ordisurf(vare.mds ~ Baresoil, varechem, bubble = 5)
 
 ## as above but without the extra penalties on smooth terms,
 ## and using GCV smoothness selection (old behaviour of `ordisurf()`):
-with(varechem, ordisurf(vare.mds, Baresoil,col = "blue", add = TRUE,
-                        select = FALSE, method = "GCV.Cp"))
+ordisurf(vare.mds ~ Baresoil, varechem, col = "blue", add = TRUE,
+                        select = FALSE, method = "GCV.Cp")
 
 ## Cover of Cladina arbuscula
-fit <- with(varespec, ordisurf(vare.mds, Cla.arb, family=quasipoisson)) 
+fit <- ordisurf(vare.mds ~ Cladarbu, varespec, family=quasipoisson) 
 ## Get fitted values
 calibrate(fit)
 
@@ -286,11 +286,9 @@ calibrate(fit)
 ## of the model not just to a linear surface. There are 2
 ## options available:
 ##  - option 1: `select = TRUE` --- the *default*
-with(varechem,
-     ordisurf(vare.mds, Baresoil, method = "REML", select = TRUE))
+ordisurf(vare.mds ~ Baresoil, varechem, method = "REML", select = TRUE)
 ##  - option 2: use a basis with shrinkage
-with(varechem,
-     ordisurf(vare.mds, Baresoil, method = "REML", bs = "ts"))
+ordisurf(vare.mds ~ Baresoil, varechem, method = "REML", bs = "ts")
 ## or bs = "cs" with `isotropic = FALSE`
 
 ## Plot method
@@ -299,27 +297,27 @@ plot(fit, what = "contour")
 ## Plotting the "gam" object
 plot(fit, what = "gam") ## 'col' and 'cex' not passed on
 ## or via plot.gam directly
+library(mgcv)
 plot.gam(fit, cex = 2, pch = 1, col = "blue")
 ## 'col' effects all objects drawn...
 
 ### controlling the basis functions used
 ## Use Duchon splines
-with(varechem, ordisurf(vare.mds, Baresoil, bs = "ds"))
+ordisurf(vare.mds ~ Baresoil, varechem, bs = "ds")
 
 ## A fixed degrees of freedom smooth, must use 'select = FALSE'
-with(varechem, ordisurf(vare.mds, Baresoil, knots = 4,
-                        fx = TRUE, select = FALSE))
+ordisurf(vare.mds ~ Baresoil, varechem, knots = 4,
+                        fx = TRUE, select = FALSE)
 
 ## An anisotropic smoother with cubic regression spline bases
-with(varechem, ordisurf(vare.mds, Baresoil, isotropic = FALSE,
-                        bs = "cr", knots = 4))
+ordisurf(vare.mds ~ Baresoil, varechem, isotropic = FALSE,
+                        bs = "cr", knots = 4)
 
 ## An anisotropic smoother with cubic regression spline with
 ## shrinkage bases & different degrees of freedom in each dimension
-with(varechem, ordisurf(vare.mds, Baresoil, isotropic = FALSE,
+ordisurf(vare.mds ~ Baresoil, varechem, isotropic = FALSE,
                         bs = "cs", knots = c(3,4), fx = TRUE,
-                        select = FALSE))
-
+                        select = FALSE)
 }
 \keyword{ multivariate }
 \keyword{ aplot }
diff --git a/man/orditkplot.Rd b/man/orditkplot.Rd
index 4044b7e..e3a4b89 100644
--- a/man/orditkplot.Rd
+++ b/man/orditkplot.Rd
@@ -144,7 +144,7 @@ orditkplot(x, display = "species", choices = 1:2, width, xlim, ylim,
 \seealso{ Function \code{\link{ordipointlabel}} is an automatic
   procedure with similar goals of avoiding overplotting. 	   
   See \code{\link{ordiplot}}, \code{\link{plot.cca}},
-  \code{\link{ordirgl}} and \code{\link{orditorp}} for alternative
+  \code{\link[vegan3d]{ordirgl}} and \code{\link{orditorp}} for alternative
   ordination plots, and \code{\link{scores}} for extracting ordination
   scores.  }
 \examples{
diff --git a/man/pcnm.Rd b/man/pcnm.Rd
index 7906479..682ada0 100644
--- a/man/pcnm.Rd
+++ b/man/pcnm.Rd
@@ -1,114 +1,114 @@
-\name{pcnm}
-\alias{pcnm}
-\alias{scores.pcnm}
-\title{ Principal Coordinates of Neighbourhood Matrix }
-\description{
-  This function computed classical PCNM by the principal coordinate
-  analysis of a truncated distance matrix. These are commonly used to
-  transform (spatial) distances to rectangular data that suitable for
-  constrained ordination or regression. 
-}
-\usage{
-pcnm(dis, threshold, w, dist.ret = FALSE)
-}
-
-\arguments{
-  \item{dis}{ A distance matrix. }
-  \item{threshold}{ A threshold value or truncation distance. If
-    missing, minimum distance giving connected network will be
-    used. This is found as the longest distance in the minimum spanning
-    tree of \code{dis}. }
-  \item{w}{Prior weights for rows.}
-  \item{dist.ret}{Return the distances used to calculate the PCNMs.}
-}
-
-\details{
-  Principal Coordinates of Neighbourhood Matrix (PCNM) map distances
-  between rows onto rectangular matrix on rows using a truncation
-  threshold for long distances (Borcard & Legendre 2002). If original
-  distances were Euclidean distances in two dimensions (like normal
-  spatial distances), they could be mapped onto two dimensions if there
-  is no truncation of distances. Because of truncation, there will be a
-  higher number of principal coordinates. The selection of truncation
-  distance has a huge influence on the PCNM vectors. The default is to
-  use the longest distance to keep data connected. The distances above
-  truncation threshold are given an arbitrary value of 4 times
-  threshold.  For regular data, the first PCNM vectors show a wide scale
-  variation and later PCNM vectors show smaller scale variation (Borcard
-  & Legendre 2002), but for irregular data the interpretation is not as
-  clear.
-
-  The PCNM functions are used to express distances in rectangular form
-  that is similar to normal explanatory variables used in, e.g.,
-  constrained ordination (\code{\link{rda}}, \code{\link{cca}} and
-  \code{\link{capscale}}) or univariate regression (\code{\link{lm}})
-  together with environmental variables (row weights should be supplied
-  with \code{\link{cca}}; see Examples). This is regarded as a more
-  powerful method than forcing rectangular environmental data into
-  distances and using them in partial mantel analysis
-  (\code{\link{mantel.partial}}) together with geographic distances
-  (Legendre et al. 2008, but see Tuomisto & Ruokolainen 2008).
-  
-  The function is based on \code{pcnm} function in Dray's unreleased
-  \pkg{spacemakeR} package. The differences are that the current
-  function uses \code{\link{spantree}} as an internal support
-  function. The current function also can use prior weights for rows by
-  using weighted metric scaling of \code{\link{wcmdscale}}. The use of
-  row weights allows finding orthonormal PCNMs also for correspondence
-  analysis (e.g., \code{\link{cca}}).
-  }
-
-\value{
-  A list of the following elements:
-  \item{values }{Eigenvalues obtained by the principal coordinates
-    analysis.} 
-  \item{vectors }{Eigenvectors obtained by the principal coordinates
-    analysis. They are scaled to unit norm. The vectors can be extracted 
-    with \code{scores} function. The default is to return all PCNM vectors,
-    but argument \code{choices} selects the given vectors.} 
- \item{threshold}{Truncation distance.}
- \item{dist}{The distance matrix where values above \code{threshold}
-    are replaced with arbitrary value of four times the
-    threshold. String \code{"pcnm"} is added to the \code{method}
-    attribute, and new attribute \code{threshold} is added to the
-    distances. This is returned only when \code{dist.ret = TRUE}.  }
-}
-
-\references{
-  Borcard D. and Legendre P. (2002) All-scale spatial analysis of
-  ecological data by means of principal coordinates of neighbour
-  matrices. \emph{Ecological Modelling} \bold{153}, 51--68.
-  
-  Legendre, P., Bordard, D and Peres-Neto, P. (2008) Analyzing or
-  explaining beta diversity? Comment. \emph{Ecology} \bold{89},
-  3238--3244.
-
-  Tuomisto, H. & Ruokolainen, K. (2008) Analyzing or explaining beta
-  diversity? A reply. \emph{Ecology} \bold{89}, 3244--3256.
-}
-
-\author{Jari Oksanen, based on the code of Stephane Dray.}
-\seealso{ \code{\link[vegan]{spantree}}. }
-\examples{
-## Example from Borcard & Legendre (2002)
-data(mite.xy)
-pcnm1 <- pcnm(dist(mite.xy))
-op <- par(mfrow=c(1,3))
-## Map of PCNMs in the sample plot
-ordisurf(mite.xy, scores(pcnm1, choi=1), bubble = 4, main = "PCNM 1")
-ordisurf(mite.xy, scores(pcnm1, choi=2), bubble = 4, main = "PCNM 2")
-ordisurf(mite.xy, scores(pcnm1, choi=3), bubble = 4, main = "PCNM 3")
-par(op)
-## Plot first PCNMs against each other
-ordisplom(pcnm1, choices=1:4)
-## Weighted PCNM for CCA
-data(mite)
-rs <- rowSums(mite)/sum(mite)
-pcnmw <- pcnm(dist(mite.xy), w = rs)
-ord <- cca(mite ~ scores(pcnmw))
-## Multiscale ordination: residual variance should have no distance
-## trend
-msoplot(mso(ord, mite.xy))
-}
-\keyword{ spatial }
-\keyword{ multivariate }
+\name{pcnm}
+\alias{pcnm}
+\alias{scores.pcnm}
+\title{ Principal Coordinates of Neighbourhood Matrix }
+\description{
+  This function computed classical PCNM by the principal coordinate
+  analysis of a truncated distance matrix. These are commonly used to
+  transform (spatial) distances to rectangular data that suitable for
+  constrained ordination or regression. 
+}
+\usage{
+pcnm(dis, threshold, w, dist.ret = FALSE)
+}
+
+\arguments{
+  \item{dis}{ A distance matrix. }
+  \item{threshold}{ A threshold value or truncation distance. If
+    missing, minimum distance giving connected network will be
+    used. This is found as the longest distance in the minimum spanning
+    tree of \code{dis}. }
+  \item{w}{Prior weights for rows.}
+  \item{dist.ret}{Return the distances used to calculate the PCNMs.}
+}
+
+\details{
+  Principal Coordinates of Neighbourhood Matrix (PCNM) map distances
+  between rows onto rectangular matrix on rows using a truncation
+  threshold for long distances (Borcard & Legendre 2002). If original
+  distances were Euclidean distances in two dimensions (like normal
+  spatial distances), they could be mapped onto two dimensions if there
+  is no truncation of distances. Because of truncation, there will be a
+  higher number of principal coordinates. The selection of truncation
+  distance has a huge influence on the PCNM vectors. The default is to
+  use the longest distance to keep data connected. The distances above
+  truncation threshold are given an arbitrary value of 4 times
+  threshold.  For regular data, the first PCNM vectors show a wide scale
+  variation and later PCNM vectors show smaller scale variation (Borcard
+  & Legendre 2002), but for irregular data the interpretation is not as
+  clear.
+
+  The PCNM functions are used to express distances in rectangular form
+  that is similar to normal explanatory variables used in, e.g.,
+  constrained ordination (\code{\link{rda}}, \code{\link{cca}} and
+  \code{\link{capscale}}) or univariate regression (\code{\link{lm}})
+  together with environmental variables (row weights should be supplied
+  with \code{\link{cca}}; see Examples). This is regarded as a more
+  powerful method than forcing rectangular environmental data into
+  distances and using them in partial mantel analysis
+  (\code{\link{mantel.partial}}) together with geographic distances
+  (Legendre et al. 2008, but see Tuomisto & Ruokolainen 2008).
+  
+  The function is based on \code{pcnm} function in Dray's unreleased
+  \pkg{spacemakeR} package. The differences are that the current
+  function uses \code{\link{spantree}} as an internal support
+  function. The current function also can use prior weights for rows by
+  using weighted metric scaling of \code{\link{wcmdscale}}. The use of
+  row weights allows finding orthonormal PCNMs also for correspondence
+  analysis (e.g., \code{\link{cca}}).
+  }
+
+\value{
+  A list of the following elements:
+  \item{values }{Eigenvalues obtained by the principal coordinates
+    analysis.} 
+  \item{vectors }{Eigenvectors obtained by the principal coordinates
+    analysis. They are scaled to unit norm. The vectors can be extracted 
+    with \code{scores} function. The default is to return all PCNM vectors,
+    but argument \code{choices} selects the given vectors.} 
+ \item{threshold}{Truncation distance.}
+ \item{dist}{The distance matrix where values above \code{threshold}
+    are replaced with arbitrary value of four times the
+    threshold. String \code{"pcnm"} is added to the \code{method}
+    attribute, and new attribute \code{threshold} is added to the
+    distances. This is returned only when \code{dist.ret = TRUE}.  }
+}
+
+\references{
+  Borcard D. and Legendre P. (2002) All-scale spatial analysis of
+  ecological data by means of principal coordinates of neighbour
+  matrices. \emph{Ecological Modelling} \bold{153}, 51--68.
+  
+  Legendre, P., Bordard, D and Peres-Neto, P. (2008) Analyzing or
+  explaining beta diversity? Comment. \emph{Ecology} \bold{89},
+  3238--3244.
+
+  Tuomisto, H. & Ruokolainen, K. (2008) Analyzing or explaining beta
+  diversity? A reply. \emph{Ecology} \bold{89}, 3244--3256.
+}
+
+\author{Jari Oksanen, based on the code of Stephane Dray.}
+\seealso{ \code{\link[vegan]{spantree}}. }
+\examples{
+## Example from Borcard & Legendre (2002)
+data(mite.xy)
+pcnm1 <- pcnm(dist(mite.xy))
+op <- par(mfrow=c(1,3))
+## Map of PCNMs in the sample plot
+ordisurf(mite.xy, scores(pcnm1, choi=1), bubble = 4, main = "PCNM 1")
+ordisurf(mite.xy, scores(pcnm1, choi=2), bubble = 4, main = "PCNM 2")
+ordisurf(mite.xy, scores(pcnm1, choi=3), bubble = 4, main = "PCNM 3")
+par(op)
+## Plot first PCNMs against each other
+ordisplom(pcnm1, choices=1:4)
+## Weighted PCNM for CCA
+data(mite)
+rs <- rowSums(mite)/sum(mite)
+pcnmw <- pcnm(dist(mite.xy), w = rs)
+ord <- cca(mite ~ scores(pcnmw))
+## Multiscale ordination: residual variance should have no distance
+## trend
+msoplot(mso(ord, mite.xy))
+}
+\keyword{ spatial }
+\keyword{ multivariate }
diff --git a/man/permatfull.Rd b/man/permatfull.Rd
index acf1a19..2460033 100644
--- a/man/permatfull.Rd
+++ b/man/permatfull.Rd
@@ -1,282 +1,292 @@
-\encoding{UTF-8}
-\name{permat}
-\alias{permatfull}
-\alias{permatswap}
-\alias{summary.permat}
-\alias{print.summary.permat}
-\alias{print.permat}
-\alias{plot.permat}
-\alias{lines.permat}
-\alias{as.ts.permat}
-\alias{as.mcmc.permat}
-
-\title{Matrix Permutation Algorithms for Presence-Absence and Count Data}
-
-\description{ Individual (for count data) or incidence (for
-presence-absence data) based null models can be generated for
-community level simulations. Options for preserving characteristics of
-the original matrix (rows/columns sums, matrix fill) and
-restricted permutations (based on strata) are discussed in the
-Details section.}
-
-\usage{
-permatfull(m, fixedmar = "both", shuffle = "both", strata = NULL, 
-    mtype = "count", times = 99)
-permatswap(m, method = "quasiswap", fixedmar="both", shuffle = "both",
-    strata = NULL, mtype = "count", times = 99, burnin = 0, thin = 1)
-\method{print}{permat}(x, digits = 3, ...)
-\method{summary}{permat}(object, ...)
-\method{print}{summary.permat}(x, digits = 2, ...)
-\method{plot}{permat}(x, type = "bray", ylab, xlab, col, lty,
-    lowess = TRUE, plot = TRUE, text = TRUE, ...)
-\method{lines}{permat}(x, type = "bray", ...)
-\method{as.ts}{permat}(x, type = "bray", ...)
-\method{as.mcmc}{permat}(x)
-}
-\arguments{
-  \item{m}{A community data matrix with plots (samples) as rows and
-    species (taxa) as columns.} 
-  \item{fixedmar}{character, stating which of the row/column sums should
-    be preserved (\code{"none", "rows", "columns", "both"}).} 
-  \item{strata}{Numeric vector or factor with length same as
-    \code{nrow(m)} for grouping rows within strata for restricted
-    permutations. Unique values or levels are used.} 
-  \item{mtype}{Matrix data type, either \code{"count"} for count data,
-    or \code{"prab"} for presence-absence type incidence data.} 
-  \item{times}{Number of permuted matrices.} 
-  \item{method}{Character for method used for the swap algorithm
-    (\code{"swap"}, \code{"tswap"}, \code{"quasiswap"},
-    \code{"backtrack"}) as described for function
-    \code{\link{commsimulator}}. If \code{mtype="count"} the
-    \code{"quasiswap"}, \code{"swap"}, \code{"swsh"} and
-    \code{"abuswap"} methods are available (see details).} 
-  \item{shuffle}{Character, indicating whether individuals
-    (\code{"ind"}), samples (\code{"samp"}) or both (\code{"both"})
-    should be shuffled, see details.} 
-  \item{burnin}{Number of null communities discarded before proper
-    analysis in sequential (\code{"swap", "tswap"}) methods.} 
-  \item{thin}{Number of discarded permuted matrices between two
-    evaluations in sequential (\code{"swap", "tswap"}) methods.} 
-  \item{x, object}{Object of class \code{"permat"}} 
-  \item{digits}{Number of digits used for rounding.}
-  \item{ylab, xlab, col, lty}{graphical parameters for the \code{plot}
-    method.} 
-  \item{type}{Character, type of plot to be displayed: \code{"bray"} for
-    Bray-Curtis dissimilarities, \code{"chisq"} for Chi-squared values.} 
-  \item{lowess, plot, text}{Logical arguments for the \code{plot}
-    method, whether a locally weighted regression curve should be drawn,
-    the plot should be drawn, and statistic values should be printed on
-    the plot.} 
-  \item{\dots}{Other arguments passed to methods.}
-}
-
-\details{
-  The function \code{permatfull} is useful when matrix fill is
-  allowed to vary, and matrix type is \code{count}.  The \code{fixedmar}
-  argument is used to set constraints for permutation.  If \code{none}
-  of the margins are fixed, cells are randomised within the matrix.  If
-  \code{rows} or \code{columns} are fixed, cells within rows or columns
-  are randomised, respectively.  If \code{both} margins are fixed, the
-  \code{\link{r2dtable}} function is used that is based on Patefield's
-  (1981) algorithm. For presence absence data, matrix fill should be
-  necessarily fixed, and \code{permatfull} is a wrapper for the function
-  \code{\link{commsimulator}}. The \code{r00, r0, c0, quasiswap}
-  algorithms of \code{\link{commsimulator}} are used for \code{"none",
-  "rows", "columns", "both"} values of the \code{fixedmar} argument,
-  respectively
-
-  The \code{shuffle} argument only have effect if the \code{mtype =
-  "count"} and \code{permatfull} function is used with \code{"none",
-  "rows", "columns"} values of \code{fixedmar}. All other cases for
-  count data are individual based randomisations. The \code{"samp"} and
-  \code{"both"} options result fixed matrix fill. The \code{"both"}
-  option means that individuals are shuffled among non zero cells
-  ensuring that there are no cell with zeros as a result, then cell
-  (zero and new valued cells) are shuffled.
-
-  The function \code{permatswap} is useful when with matrix fill
-  (i.e. the proportion of empty cells) and row/columns sums should be
-  kept constant. \code{permatswap} uses different kinds of swap
-  algorithms, and row and columns sums are fixed in all cases.  For
-  presence-absence data, the \code{swap} and \code{tswap} methods of
-  \code{\link{commsimulator}} can be used.  For count data, a special
-  swap algorithm ('swapcount') is implemented that results in permuted
-  matrices with fixed marginals and matrix fill at the same time.
-
-  The 'quasiswapcount' algorithm (\code{method="quasiswap"} and
-  \code{mtype="count"}) uses the same trick as Carsten Dormann's
-  \code{\link[bipartite]{swap.web}} function in the package
-  \pkg{bipartite}. First, a random matrix is generated by the
-  \code{\link{r2dtable}} function retaining row and column sums. Then
-  the original matrix fill is reconstructed by sequential steps to
-  increase or decrease matrix fill in the random matrix. These steps are
-  based on swapping 2x2 submatrices (see 'swapcount' algorithm for
-  details) to maintain row and column totals. This algorithm generates
-  independent matrices in each step, so \code{burnin} and \code{thin}
-  arguments are not considered. This is the default method, because this
-  is not sequential (as \code{swapcount} is) so independence of subsequent
-  matrices does not have to be checked.
-
-  The \code{swapcount} algorithm (\code{method="swap"} and
-  \code{mtype="count"}) tries to find 2x2 submatrices (identified by 2
-  random row and 2 random column indices), that can be swapped in order
-  to leave column and row totals and fill unchanged. First, the
-  algorithm finds the largest value in the submatrix that can be swapped
-  (\eqn{d}) and whether in diagonal or antidiagonal way. Submatrices
-  that contain values larger than zero in either diagonal or
-  antidiagonal position can be swapped. Swap means that the values in
-  diagonal or antidiagonal positions are decreased by \eqn{d}, while
-  remaining cells are increased by \eqn{d}. A swap is made only if fill
-  doesn't change. This algorithm is sequential, subsequent matrices are
-  not independent, because swaps modify little if the matrix is
-  large. In these cases many burnin steps and thinning is needed to get
-  independent random matrices. Although this algorithm is implemented in
-  C, large burnin and thin values can slow it down
-  considerably. WARNING: according to simulations, this algorithm seems
-  to be biased and non random, thus its use should be avoided!
-
-  The algorithm \code{"swsh"} in the function \code{permatswap} is a
-  hybrid algorithm. First, it makes binary quasiswaps to keep row and
-  column incidences constant, then non-zero values are modified
-  according to the \code{shuffle} argument (only \code{"samp"} and
-  \code{"both"} are available in this case, because it is applied only
-  on non-zero values).
-
-  The algorithm \code{"abuswap"} produces two kinds of null models
-  (based on \code{fixedmar="columns"} or \code{fixedmar="rows"}) as
-  described in Hardy (2008; randomization scheme 2x and 3x,
-  respectively).  These preserve column and row occurrences, and column
-  or row sums at the same time.
-
-  Constraints on row/column sums, matrix fill, total sum and sums within
-  strata can be checked by the \code{summary} method. \code{plot} method
-  is for visually testing the randomness of the permuted matrices,
-  especially for the sequential swap algorithms. If there are any
-  tendency in the graph, higher \code{burnin} and \code{thin} values can
-  help for sequential methods.  New lines can be added to existing plot
-  with the \code{lines} method.
-
-  Unrestricted and restricted permutations: if \code{strata} is
-  \code{NULL}, functions perform unrestricted permutations. Otherwise,
-  it is used for restricted permutations. Each strata should contain at
-  least 2 rows in order to perform randomization (in case of low row
-  numbers, swap algorithms can be rather slow). If the design is not
-  well balanced (i.e. same number of observations within each stratum),
-  permuted matrices may be biased because same constraints are forced on
-  submatrices of different dimensions. This often means, that the number
-  of potential permutations will decrease with their dimensions.  So the
-  more constraints we put, the less randomness can be expected.
-
-  The \code{plot} method is useful for graphically testing for trend and
-  independence of permuted matrices. This is especially important when
-  using sequential algorithms (\code{"swap", "tswap", "abuswap"}).
-
-  The \code{as.ts} method can be used to extract Bray-Curtis
-  dissimilarities or Chi-squared values as time series. This can further
-  used in testing independence (see Examples). The method \code{as.mcmc}
-  is useful for accessing diagnostic tools available in the \pkg{coda}
-  package.  }
-
-\value{ Functions \code{permatfull} and \code{permatswap} return an
-  object of class \code{"permat"} containing the the function call
-  (\code{call}), the original data matrix used for permutations
-  (\code{orig}) and a list of permuted matrices with length \code{times}
-  (\code{perm}).
-
-  The \code{summary} method returns various statistics as a list
-  (including mean Bray-Curtis dissimilarities calculated pairwise among
-  original and permuted matrices, Chi-square statistics, and check
-  results of the constraints; see Examples). Note that when
-  \code{strata} is used in the original call, summary calculation may
-  take longer.
-
-  The \code{plot} creates a plot as a side effect.
-
-  The \code{as.ts} method returns an object of class \code{"ts"}.  }
-
-
-\references{ Original references for presence-absence algorithms are
-  given on help page of \code{\link{commsimulator}}.
-
-  Hardy, O. J. (2008) Testing the spatial phylogenetic structure of
-  local communities: statistical performances of different null models
-  and test statistics on a locally neutral community. Journal of Ecology
-  96, 914--926. 
-
-  Patefield, W. M. (1981) Algorithm AS159. An efficient method of
-  generating r x c tables with given row and column totals.  
-  Applied Statistics 30, 91--97.
-}
-
-\author{\enc{Péter Sólymos}{Peter Solymos},
-\email{solymos at ualberta.ca} and Jari Oksanen}
-
-\seealso{ For other functions to permute matrices:
-\code{\link{commsimulator}}, \code{\link{r2dtable}},
-\code{\link{sample}}, \code{\link[bipartite]{swap.web}}.
-
-For the use of these permutation algorithms: \code{\link{oecosimu}},
-\code{\link{adipart}}, \code{\link{hiersimu}}.
-
-For time-series diagnostics: \code{\link{Box.test}},
-\code{\link{lag.plot}}, \code{\link{tsdiag}}, \code{\link{ar}},
-\code{\link{arima}} }
-
-\examples{
-## A simple artificial community data matrix.
-m <- matrix(c(
-    1,3,2,0,3,1,
-    0,2,1,0,2,1,
-    0,0,1,2,0,3,
-    0,0,0,1,4,3
-    ), 4, 6, byrow=TRUE)
-## Using the quasiswap algorithm to create a 
-## list of permuted matrices, where
-## row/columns sums and matrix fill are preserved:
-x1 <- permatswap(m, "quasiswap")
-summary(x1)
-## Unrestricted permutation retaining
-## row/columns sums but not matrix fill:
-x2 <- permatfull(m)
-summary(x2)
-## Unrestricted permutation of presence-absence type
-## not retaining row/columns sums:
-x3 <- permatfull(m, "none", mtype="prab")
-x3$orig  ## note: original matrix is binarized!
-summary(x3)
-## Restricted permutation,
-## check sums within strata:
-x4 <- permatfull(m, strata=c(1,1,2,2))
-summary(x4)
-
-## NOTE: 'times' argument usually needs to be >= 99
-## here much lower value is used for demonstration
-
-## Not sequential algorithm
-data(BCI)
-a <- permatswap(BCI, "quasiswap", times=19)
-## Sequential algorithm
-b <- permatswap(BCI, "abuswap", fixedmar="col",
-    burnin=0, thin=100, times=19)
-opar <- par(mfrow=c(2,2))
-plot(a, main="Not sequential")
-plot(b, main="Sequential")
-plot(a, "chisq")
-plot(b, "chisq")
-par(opar)
-## Extract Bray-Curtis dissimilarities
-## as time series
-bc <- as.ts(b)
-## Lag plot
-lag.plot(bc)
-## First order autoregressive model
-mar <- arima(bc, c(1,0,0))
-mar
-## Ljung-Box test of residuals
-Box.test(mar$residuals)
-## Graphical diagnostics
-tsdiag(mar)
-}
-
-\keyword{multivariate}
-\keyword{datagen}
+\encoding{UTF-8}
+\name{permat}
+\alias{permatfull}
+\alias{permatswap}
+\alias{summary.permat}
+\alias{print.summary.permat}
+\alias{print.permat}
+\alias{plot.permat}
+\alias{lines.permat}
+\alias{as.ts.permat}
+\alias{as.mcmc.permat}
+
+\title{Matrix Permutation Algorithms for Presence-Absence and Count Data}
+
+\description{ Individual (for count data) or incidence (for
+presence-absence data) based null models can be generated for
+community level simulations. Options for preserving characteristics of
+the original matrix (rows/columns sums, matrix fill) and
+restricted permutations (based on strata) are discussed in the
+Details section.}
+
+\usage{
+permatfull(m, fixedmar = "both", shuffle = "both", strata = NULL, 
+    mtype = "count", times = 99, ...)
+permatswap(m, method = "quasiswap", fixedmar="both", shuffle = "both",
+    strata = NULL, mtype = "count", times = 99, 
+    burnin = 0, thin = 1, ...)
+\method{print}{permat}(x, digits = 3, ...)
+\method{summary}{permat}(object, ...)
+\method{print}{summary.permat}(x, digits = 2, ...)
+\method{plot}{permat}(x, type = "bray", ylab, xlab, col, lty,
+    lowess = TRUE, plot = TRUE, text = TRUE, ...)
+\method{lines}{permat}(x, type = "bray", ...)
+\method{as.ts}{permat}(x, type = "bray", ...)
+\method{as.mcmc}{permat}(x)
+}
+\arguments{
+  \item{m}{A community data matrix with plots (samples) as rows and
+    species (taxa) as columns.} 
+  \item{fixedmar}{character, stating which of the row/column sums should
+    be preserved (\code{"none", "rows", "columns", "both"}).} 
+  \item{strata}{Numeric vector or factor with length same as
+    \code{nrow(m)} for grouping rows within strata for restricted
+    permutations. Unique values or levels are used.} 
+  \item{mtype}{Matrix data type, either \code{"count"} for count data,
+    or \code{"prab"} for presence-absence type incidence data.} 
+  \item{times}{Number of permuted matrices.} 
+  \item{method}{Character for method used for the swap algorithm
+    (\code{"swap"}, \code{"tswap"}, \code{"quasiswap"},
+    \code{"backtrack"}) as described for function
+    \code{\link{make.commsim}}. If \code{mtype="count"} the
+    \code{"quasiswap"}, \code{"swap"}, \code{"swsh"} and
+    \code{"abuswap"} methods are available (see details).} 
+  \item{shuffle}{Character, indicating whether individuals
+    (\code{"ind"}), samples (\code{"samp"}) or both (\code{"both"})
+    should be shuffled, see details.} 
+  \item{burnin}{Number of null communities discarded before proper
+    analysis in sequential (\code{"swap", "tswap"}) methods.} 
+  \item{thin}{Number of discarded permuted matrices between two
+    evaluations in sequential (\code{"swap", "tswap"}) methods.} 
+  \item{x, object}{Object of class \code{"permat"}} 
+  \item{digits}{Number of digits used for rounding.}
+  \item{ylab, xlab, col, lty}{graphical parameters for the \code{plot}
+    method.} 
+  \item{type}{Character, type of plot to be displayed: \code{"bray"} for
+    Bray-Curtis dissimilarities, \code{"chisq"} for Chi-squared values.} 
+  \item{lowess, plot, text}{Logical arguments for the \code{plot}
+    method, whether a locally weighted regression curve should be drawn,
+    the plot should be drawn, and statistic values should be printed on
+    the plot.} 
+  \item{\dots}{Other arguments passed to \code{\link{simulate.nullmodel}} 
+    or methods.}
+}
+
+\details{
+  The function \code{permatfull} is useful when matrix fill is
+  allowed to vary, and matrix type is \code{count}.  The \code{fixedmar}
+  argument is used to set constraints for permutation.  If \code{none}
+  of the margins are fixed, cells are randomised within the matrix.  If
+  \code{rows} or \code{columns} are fixed, cells within rows or columns
+  are randomised, respectively.  If \code{both} margins are fixed, the
+  \code{\link{r2dtable}} function is used that is based on Patefield's
+  (1981) algorithm. For presence absence data, matrix fill should be
+  necessarily fixed, and \code{permatfull} is a wrapper for the function
+  \code{\link{make.commsim}}. The \code{r00, r0, c0, quasiswap}
+  algorithms of \code{\link{make.commsim}} are used for \code{"none",
+  "rows", "columns", "both"} values of the \code{fixedmar} argument,
+  respectively
+
+  The \code{shuffle} argument only have effect if the \code{mtype =
+  "count"} and \code{permatfull} function is used with \code{"none",
+  "rows", "columns"} values of \code{fixedmar}. All other cases for
+  count data are individual based randomisations. The \code{"samp"} and
+  \code{"both"} options result fixed matrix fill. The \code{"both"}
+  option means that individuals are shuffled among non zero cells
+  ensuring that there are no cell with zeros as a result, then cell
+  (zero and new valued cells) are shuffled.
+
+  The function \code{permatswap} is useful when with matrix fill
+  (i.e. the proportion of empty cells) and row/columns sums should be
+  kept constant. \code{permatswap} uses different kinds of swap
+  algorithms, and row and columns sums are fixed in all cases.  For
+  presence-absence data, the \code{swap} and \code{tswap} methods of
+  \code{\link{make.commsim}} can be used.  For count data, a special
+  swap algorithm ('swapcount') is implemented that results in permuted
+  matrices with fixed marginals and matrix fill at the same time.
+
+  The 'quasiswapcount' algorithm (\code{method="quasiswap"} and
+  \code{mtype="count"}) uses the same trick as Carsten Dormann's
+  \code{\link[bipartite]{swap.web}} function in the package
+  \pkg{bipartite}. First, a random matrix is generated by the
+  \code{\link{r2dtable}} function retaining row and column sums. Then
+  the original matrix fill is reconstructed by sequential steps to
+  increase or decrease matrix fill in the random matrix. These steps are
+  based on swapping 2x2 submatrices (see 'swapcount' algorithm for
+  details) to maintain row and column totals. This algorithm generates
+  independent matrices in each step, so \code{burnin} and \code{thin}
+  arguments are not considered. This is the default method, because this
+  is not sequential (as \code{swapcount} is) so independence of subsequent
+  matrices does not have to be checked.
+
+  The \code{swapcount} algorithm (\code{method="swap"} and
+  \code{mtype="count"}) tries to find 2x2 submatrices (identified by 2
+  random row and 2 random column indices), that can be swapped in order
+  to leave column and row totals and fill unchanged. First, the
+  algorithm finds the largest value in the submatrix that can be swapped
+  (\eqn{d}) and whether in diagonal or antidiagonal way. Submatrices
+  that contain values larger than zero in either diagonal or
+  antidiagonal position can be swapped. Swap means that the values in
+  diagonal or antidiagonal positions are decreased by \eqn{d}, while
+  remaining cells are increased by \eqn{d}. A swap is made only if fill
+  doesn't change. This algorithm is sequential, subsequent matrices are
+  not independent, because swaps modify little if the matrix is
+  large. In these cases many burnin steps and thinning is needed to get
+  independent random matrices. Although this algorithm is implemented in
+  C, large burnin and thin values can slow it down
+  considerably. WARNING: according to simulations, this algorithm seems
+  to be biased and non random, thus its use should be avoided!
+
+  The algorithm \code{"swsh"} in the function \code{permatswap} is a
+  hybrid algorithm. First, it makes binary quasiswaps to keep row and
+  column incidences constant, then non-zero values are modified
+  according to the \code{shuffle} argument (only \code{"samp"} and
+  \code{"both"} are available in this case, because it is applied only
+  on non-zero values). It also recognizes the \code{fixedmar}
+  argument which cannot be \code{"both"} (\pkg{vegan} versions <= 2.0
+  had this algorithm with \code{fixedmar = "none"}).
+
+  The algorithm \code{"abuswap"} produces two kinds of null models
+  (based on \code{fixedmar="columns"} or \code{fixedmar="rows"}) as
+  described in Hardy (2008; randomization scheme 2x and 3x,
+  respectively).  These preserve column and row occurrences, and column
+  or row sums at the same time. (Note that similar constraints
+  can be achieved by the non sequential \code{"swsh"} algorithm
+  with \code{fixedmar} argument set to \code{"columns"} or
+  \code{"rows"}, respectively.)
+
+  Constraints on row/column sums, matrix fill, total sum and sums within
+  strata can be checked by the \code{summary} method. \code{plot} method
+  is for visually testing the randomness of the permuted matrices,
+  especially for the sequential swap algorithms. If there are any
+  tendency in the graph, higher \code{burnin} and \code{thin} values can
+  help for sequential methods.  New lines can be added to existing plot
+  with the \code{lines} method.
+
+  Unrestricted and restricted permutations: if \code{strata} is
+  \code{NULL}, functions perform unrestricted permutations. Otherwise,
+  it is used for restricted permutations. Each strata should contain at
+  least 2 rows in order to perform randomization (in case of low row
+  numbers, swap algorithms can be rather slow). If the design is not
+  well balanced (i.e. same number of observations within each stratum),
+  permuted matrices may be biased because same constraints are forced on
+  submatrices of different dimensions. This often means, that the number
+  of potential permutations will decrease with their dimensions.  So the
+  more constraints we put, the less randomness can be expected.
+
+  The \code{plot} method is useful for graphically testing for trend and
+  independence of permuted matrices. This is especially important when
+  using sequential algorithms (\code{"swap", "tswap", "abuswap"}).
+
+  The \code{as.ts} method can be used to extract Bray-Curtis
+  dissimilarities or Chi-squared values as time series. This can further
+  used in testing independence (see Examples). The method \code{as.mcmc}
+  is useful for accessing diagnostic tools available in the \pkg{coda}
+  package.  }
+
+\value{Functions \code{permatfull} and \code{permatswap} return an
+  object of class \code{"permat"} containing the the function call
+  (\code{call}), the original data matrix used for permutations
+  (\code{orig}) and a list of permuted matrices with length \code{times}
+  (\code{perm}).
+
+  The \code{summary} method returns various statistics as a list
+  (including mean Bray-Curtis dissimilarities calculated pairwise among
+  original and permuted matrices, Chi-square statistics, and check
+  results of the constraints; see Examples). Note that when
+  \code{strata} is used in the original call, summary calculation may
+  take longer.
+
+  The \code{plot} creates a plot as a side effect.
+
+  The \code{as.ts} method returns an object of class \code{"ts"}.  }
+
+
+\references{ Original references for presence-absence algorithms are
+  given on help page of \code{\link{make.commsim}}.
+
+  Hardy, O. J. (2008) Testing the spatial phylogenetic structure of
+  local communities: statistical performances of different null models
+  and test statistics on a locally neutral community. Journal of Ecology
+  96, 914--926. 
+
+  Patefield, W. M. (1981) Algorithm AS159. An efficient method of
+  generating r x c tables with given row and column totals.  
+  Applied Statistics 30, 91--97.
+}
+
+\author{\enc{Péter Sólymos}{Peter Solymos},
+\email{solymos at ualberta.ca} and Jari Oksanen}
+
+\seealso{ For other functions to permute matrices:
+\code{\link{make.commsim}}, \code{\link{r2dtable}},
+\code{\link{sample}}, \code{\link[bipartite]{swap.web}}.
+
+For the use of these permutation algorithms: \code{\link{oecosimu}},
+\code{\link{adipart}}, \code{\link{hiersimu}}.
+
+For time-series diagnostics: \code{\link{Box.test}},
+\code{\link{lag.plot}}, \code{\link{tsdiag}}, \code{\link{ar}},
+\code{\link{arima}} 
+
+For underlying `low level' implementation:
+\code{\link{commsim}} and \code{\link{nullmodel}}.}
+
+\examples{
+## A simple artificial community data matrix.
+m <- matrix(c(
+    1,3,2,0,3,1,
+    0,2,1,0,2,1,
+    0,0,1,2,0,3,
+    0,0,0,1,4,3
+    ), 4, 6, byrow=TRUE)
+## Using the quasiswap algorithm to create a 
+## list of permuted matrices, where
+## row/columns sums and matrix fill are preserved:
+x1 <- permatswap(m, "quasiswap")
+summary(x1)
+## Unrestricted permutation retaining
+## row/columns sums but not matrix fill:
+x2 <- permatfull(m)
+summary(x2)
+## Unrestricted permutation of presence-absence type
+## not retaining row/columns sums:
+x3 <- permatfull(m, "none", mtype="prab")
+x3$orig  ## note: original matrix is binarized!
+summary(x3)
+## Restricted permutation,
+## check sums within strata:
+x4 <- permatfull(m, strata=c(1,1,2,2))
+summary(x4)
+
+## NOTE: 'times' argument usually needs to be >= 99
+## here much lower value is used for demonstration
+
+## Not sequential algorithm
+data(BCI)
+a <- permatswap(BCI, "quasiswap", times=19)
+## Sequential algorithm
+b <- permatswap(BCI, "abuswap", fixedmar="col",
+    burnin=0, thin=100, times=19)
+opar <- par(mfrow=c(2,2))
+plot(a, main="Not sequential")
+plot(b, main="Sequential")
+plot(a, "chisq")
+plot(b, "chisq")
+par(opar)
+## Extract Bray-Curtis dissimilarities
+## as time series
+bc <- as.ts(b)
+## Lag plot
+lag.plot(bc)
+## First order autoregressive model
+mar <- arima(bc, c(1,0,0))
+mar
+## Ljung-Box test of residuals
+Box.test(residuals(mar))
+## Graphical diagnostics
+tsdiag(mar)
+}
+
+\keyword{multivariate}
+\keyword{datagen}
diff --git a/man/permustats.Rd b/man/permustats.Rd
new file mode 100644
index 0000000..403d3d5
--- /dev/null
+++ b/man/permustats.Rd
@@ -0,0 +1,144 @@
+\name{permustats}
+\alias{permustats}
+\alias{permustats.adonis}
+\alias{permustats.anosim}
+\alias{permustats.CCorA}
+\alias{permustats.envfit}
+\alias{permustats.factorfit}
+\alias{permustats.mantel}
+\alias{permustats.mrpp}
+\alias{permustats.mso}
+\alias{permustats.oecosimu}
+\alias{permustats.ordiareatest}
+\alias{permustats.permutest.betadisper}
+\alias{permustats.permutest.cca}
+\alias{permustats.protest}
+\alias{permustats.vectorfit}
+\alias{summary.permustats}
+\alias{densityplot.permustats}
+\alias{density.permustats}
+\alias{qqnorm.permustats}
+\alias{qqmath.permustats}
+
+\title{
+  Extract, Analyse and Display Permutation Results
+}
+
+\description{
+  The \code{permustats} function extracts permutation results of
+  \pkg{vegan} functions. Its support functions can find quantiles and
+  standardized effect sizes, plot densities and Q-Q plots.
+}
+
+\usage{
+permustats(x, ...)
+\method{summary}{permustats}(object, interval = 0.95, ...)
+\method{densityplot}{permustats}(x, data, xlab = "Permutations", ...)
+\method{density}{permustats}(x, observed = TRUE, ...)
+\method{qqnorm}{permustats}(y, observed = TRUE, ...)
+\method{qqmath}{permustats}(x, data, observed = TRUE, ylab = "Permutations", ...)
+}
+
+\arguments{
+  \item{object, x, y}{The object to be handled.}
+  \item{interval}{numeric; the coverage interval reported.}
+  \item{xlab, ylab}{Arguments of
+    \code{\link[lattice]{densityplot}} and
+    \code{\link[lattice]{qqmath}} functions.}
+  \item{observed}{Add observed statistic among permutations.}
+  \item{data}{Ignored.}
+  \item{\dots}{ Other arguments passed to the function. In
+    \code{density} these are passed to \code{\link{density.default}}.}
+}
+
+\details{ 
+
+  The \code{permustats} function extracts permutation results and
+  observed statistics from several \pkg{vegan} functions that perform
+  permutations or simulations.  
+
+  The \code{summary} method of \code{permustats} estimates the \eqn{z}
+  values, also known as standardized effect sizes (SES) as the
+  difference of observed statistic and mean of permutations divided by
+  the standard deviation of permutations. It also prints the the mean,
+  median, and limits which contain \code{interval} percent of permuted
+  values. With the default (\code{interval = 0.95}), for two-sided test
+  these are (2.5\%, 97.5\%) and for one-sided tests either 5\% or 95\%
+  quantile depending on the test direction. The mean, quantiles and
+  \eqn{z} values are evaluated from permuted values without observed
+  statistic.
+
+  The \code{density} and \code{densityplot} methods display the
+  kernel density estimates of permuted values. When observed value of
+  the statistic is included in the permuted values, the
+  \code{densityplot} method marks the observed statistic as a vertical
+  line. However the \code{density} method uses its standard \code{plot}
+  method and cannot mark the obseved value.
+
+  The \code{qqnorm} and \code{qqmath} display Q-Q plots of
+  permutations, optionally together with the observed value (default)
+  which is shown as horizontal line in plots. \code{qqnorm} plots
+  permutation values against standard Normal variate. \code{qqmath}
+  defaults to the standard Normal as well, but can accept other
+  alternatives (see standard \code{\link[lattice]{qqmath}}).
+
+  Functions \code{\link{density}} and \code{\link{qqnorm}} are based on
+  standard \R methods and accept their arguments. They only handle one
+  statistic, and cannot be used when several test statistic were
+  evaluated. The \code{\link[lattice]{densityplot}} and
+  \code{\link[lattice]{qqmath}} are \pkg{lattice} graphics, and can be
+  used both for one and several statistics.  All these functions pass
+  arguments to their underlying functions; see their documentation.
+
+  The \code{permustats} can extract permutation statistics from the
+  results of \code{\link{adonis}}, \code{\link{anosim}},
+  \code{\link{mantel}}, \code{\link{mantel.partial}},
+  \code{\link{mrpp}}, \code{\link{oecosimu}}, \code{\link{ordiareatest}},
+  \code{\link{permutest.cca}}, \code{\link{protest}}, and
+  \code{\link{permutest.betadisper}}. NB, there is no \code{permustats}
+  method for \code{\link{anova.cca}}, but only for
+  \code{\link{permutest.cca}}.
+
+}
+
+\value{
+  The \code{permustats} function returns an object of class
+  \code{"permustats"}. This is a list of items \code{"statistic"} for
+  observed statistics, \code{permutations} which contains permuted
+  values, and \code{alternative} which contains text defining the
+  character of the test (\code{"two.sided"}, \code{"less"} or
+  \code{"greater"}). The \code{\link{qqnorm}} and
+  \code{\link{density}} methods return their standard result objects.
+}
+
+\author{
+  Jari Oksanen with contributions from Gavin L. Simpson
+  (\code{permustats.permutest.betadisper} method and related
+  modifications to \code{summary.permustats} and the \code{print}
+  method.
+}
+
+\seealso{
+  \code{\link{density}}, \code{\link[lattice]{densityplot}},
+  \code{\link{qqnorm}}, \code{\link[lattice]{qqmath}}.
+}
+
+\examples{
+data(dune)
+data(dune.env)
+mod <- adonis(dune ~ Management + A1, data = dune.env)
+## use permustats
+perm <- permustats(mod)
+summary(perm)
+densityplot(perm)
+qqmath(perm)
+
+## example of multiple types of statistic
+mod <- with(dune.env, betadisper(vegdist(dune), Management))
+pmod <- permutest(mod, nperm = 99, pairwise = TRUE)
+perm <- permustats(pmod)
+summary(perm, interval = 0.90)
+}
+
+\keyword{ distribution }
+\keyword{ smooth }
diff --git a/man/permutations.Rd b/man/permutations.Rd
index 0e7a1bb..2e3eca8 100644
--- a/man/permutations.Rd
+++ b/man/permutations.Rd
@@ -3,35 +3,58 @@
 
 \title{Permutation tests in Vegan}
 \description{
-  Unless stated otherwise, vegan currently provides for two types of
-  permutation test:
+  From version 2.2-0, \pkg{vegan} has significantly improved access to
+  restricted permutations which brings it into line with those offered
+  by Canoco. The permutation designs are modelled after the permutation
+  schemes of Canoco 3.1 (ter Braak, 1990).
+
+  \pkg{vegan} currently provides for the following features within
+  permutation tests:
   \enumerate{
-    \item{Free permutation of \emph{DATA}, also known as randomisation,
+    \item{Free permutation of \emph{DATA}, also known as randomisation,}
+    \item{Free permutation of \emph{DATA} within the levels of a
+      grouping variable,}
+    \item{Restricted permutations for line transects or time series,}
+    \item{Permutation of groups of samples whilst retaining the
+      within-group ordering,}
+    \item{Restricted permutations for spatial grids,}
+    \item{Blocking, samples are never permuted \emph{between} blocks,
       and}
-    \item{Free permutation of \emph{DATA} within the levels of a factor
-      variable.}
+    \item{Split-plot designs, with permutation of whole plots, split
+      plots, or both.}
   }
-  We use \emph{DATA} to mean either the observed data themselves or some
-  function of the data, for example the residuals of an ordination model
-  in the presence of covariables.
+  Above, we use \emph{DATA} to mean either the observed data themselves
+  or some function of the data, for example the residuals of an
+  ordination model in the presence of covariables.
+  
+  These capabilities are provided by functions from the \pkg{permute}
+  package. The user can request a particular type of permutation by
+  supplying the \code{permutations} argument of a function with an
+  object returned by \code{\link{how}}, which defines how samples should
+  be permuted. Alternatively, the user can simply specify the required
+  number of permutations and a simple randomisation procedure will be
+  performed. Finally, the user can supply a matrix of permutations (with
+  number of rows equal to the number of permutations and number of
+  columns equal to the number of observations in the data) and
+  \pkg{vegan} will use these permutations instead of generating new
+  permutations.
   
-  The second type of permutation test above is available if the function
-  providing the test accepts an argument \code{strata} or passes
-  additional arguments (via \code{\dots}) to
-  \code{\link{permuted.index}}.
-
-  The Null hypothesis for these two types of permutation test assumes
-  free exchangeability of \emph{DATA} (within the levels of
-  \code{strata} if specified). Dependence between observations, such as
-  that which arises due to spatial or temporal autocorrelation, or
-  more-complicated experimental designs, such as split-plot designs,
-  violates this fundamental assumption of the test and requires restricted
-  permutation test designs. The next major version of Vegan will include
-  infrastructure to handle these more complicated permutation designs.
-
-  Again, unless otherwise stated in the help pages for specific
-  functions, permutation tests in Vegan all follow the same
-  format/structure:
+  The majority of functions in \pkg{vegan} allow for the full range of
+  possibilities outlined above. Exceptions include
+  \code{\link{kendall.post}} and \code{\link{kendall.global}}.
+
+  The Null hypothesis for the first two types of permutation test listed
+  above assumes free exchangeability of \emph{DATA} (within the levels
+  of the grouping variable, if specified). Dependence between
+  observations, such as that which arises due to spatial or temporal
+  autocorrelation, or more-complicated experimental designs, such as
+  split-plot designs, violates this fundamental assumption of the test
+  and requires more complex restricted permutation test designs. It is
+  these designs that are available via the \pkg{permute} package and to
+  which \pkg{vegan} provides access from version 2.2-0 onwards.
+
+  Unless otherwise stated in the help pages for specific functions,
+  permutation tests in \pkg{vegan} all follow the same format/structure:
   \enumerate{
     \item{An appropriate test statistic is chosen. Which statistic is
       chosen should be described on the help pages for individual
@@ -40,63 +63,109 @@
       data and analysis/model and recorded. Denote this value
       \eqn{x_0}{x[0]}.}
     \item{The \emph{DATA} are randomly permuted according to one of the
-      above two schemes, and the value of the test statistic for this
+      above schemes, and the value of the test statistic for this
       permutation is evaluated and recorded.}
     \item{Step 3 is repeated a total of \eqn{n} times, where \eqn{n} is
       the number of permutations requested. Denote these values as
       \eqn{x_i}{x[i]}, where \eqn{i = 1, ..., n}{{i = 1, \ldots, n}.}}
-    \item{The values of the test statistic for the \eqn{n} permutations
-      of the \emph{DATA} are added to the value of the test statistic
-      for the observed data. These \emph{n + 1} values represent the
-      \emph{Null} or \emph{randomisation} distribution of the test
-      statistic. The observed value for the test statistic is included
-      in the Null distribution because under the Null hypothesis being
-      tested, the observed value is just a typical value of the test
-      statistic, inherently no different from the values obtained via
-      permutation of \emph{DATA}.}
-    \item{The number of times that a value of the test statistic in the
-      Null distribution is equal to or greater than the value of the
-      test statistic for the observed data is recorded. Note the point
-      mentioned in step 5 above; the Null distribution includes the
-      \strong{observed} value of the test statistic. Denote this count
-      as \eqn{N}.}
+    \item{Count the number of values of the test statistic,
+      \eqn{x_i}{x[i]}, in the Null distribution that are as extreme as
+      test statistic for the observed data \eqn{x_0}{x[0]}. Denote this
+      count as \eqn{N}.
+
+      We use the phrase \emph{as extreme} to include cases where a
+      two-sided test is performed and large negative values of the test
+      statistic should be considered.}
     \item{The permutation p-value is computed as
-      \deqn{p = \frac{N}{n + 1}}{N / (n + 1)}}
+      \deqn{p = \frac{N + 1}{n + 1}}{(N + 1) / (n + 1)}}
   }
+  
   The above description illustrates why the default number of
-  permutations specified in Vegan functions takes values of 199 or 999
-  for example. Once the observed value of the test statistic is added to
-  this number of random permutations of \emph{DATA}, pretty p-values are
-  achievable because \eqn{n + 1} becomes 200 or 1000, for example.
+  permutations specified in \pkg{vegan} functions takes values of 199 or
+  999 for example. Pretty \emph{p} values are achieved because the
+  \eqn{+ 1} in the denominator results in division by 200 or 1000, for
+  the 199 or 999 random permutations used in the test.
 
-  The minimum achievable p-value is
-  \deqn{p_{\mathrm{min}} = \frac{1}{n +1}}{p[min] = 1 / (n + 1)}
+  The simple intuition behind the presence of \eqn{+ 1} in the numerator
+  and denominator is that these represent the inclusion of the observed
+  value of the statistic in the Null distribution (e.g. Manly 2006).
+  Phipson & Smyth (2010) present a more compelling explanation for the
+  inclusion of \eqn{+ 1} in the numerator and denominator of the
+  \emph{p} value calculation.
+
+  Fisher (1935) had in mind that a permutation test would involve
+  enumeration of all possible permutations of the data yielding an exact
+  test. However, doing this complete enumeration may not be feasible in
+  practice owing to the potentially vast number of arrangements of the
+  data, even in modestly-sized data sets with free permutation of
+  samples. As a result we evaluate the \emph{p} value as the tail
+  probability of the Null distribution of the test statistic directly
+  from the random sample of possible permutations. Phipson & Smyth
+  (2010) show that the naive calculation of the permutation \emph{p}
+  value is
+
+  \deqn{p = \frac{N}{n}}{p = (N / n)}
+
+  which leads to an invalid test with incorrect type I error rate. They
+  go on to show that by replacing the unknown tail probability (the
+  \emph{p} value) of the Null distribution with the biased estimator
+
+  \deqn{p = \frac{N + 1}{n + 1}}{p = (N + 1 / n + 1)}
   
-  A more common definition, in ecological circles, for \eqn{N} would be
-  the number of \eqn{x_i}{x[i]} greater than or equal to
-  \eqn{x_0}{x[0]}. The permutation p-value would then be defined as
-  \deqn{p = \frac{N + 1}{n + 1}}{(N + 1) / (n + 1)}
-  The + 1 in the numerator of the above equation represents the observed
-  statistic \eqn{x_0}{x[0]}. The minimum p-value would then be defined as
-  \deqn{p_{\mathrm{min}} = \frac{0 + 1}{n +1}}{p[min] = 0 + 1 / (n + 1)}
-  However this definition discriminates between the observed
-  statistic and the other \eqn{x_i}{x[i]}. Under the Null hypothesis
-  there is no such distinction, hence we prefer the definintion used in
-  the numbered steps above.
-
-  One cannot simply increase the number of permutations
-  (\eqn{n}) to achieve a potentially lower p-value unless the number of
-  observations available permits such a number of permutations. This is
-  unlikely to be a problem for all but the smallest data sets when
-  free permutation (randomisation) is valid, but in designs where
-  \code{strata} is specified and there are a low number of observations
-  within each level of \code{strata}, there may not be as many actual
-  permutations of the data as you might want.
+  that the positive bias induced is of just the right size to
+  account for the  uncertainty in the estimation of the tail probability
+  from the set of randomly sampled permutations to yield a test with the
+  correct type I error rate.
+
+  The estimator described above is correct for the situation where
+  permutations of the data are samples randomly \emph{without}
+  replacement. This is not strictly what happens in \pkg{vegan} because
+  permutations are drawn pseudo-randomly independent of one
+  another. Note that the actual chance of this happening is practice is
+  small but the functions in \pkg{permute} do not guarantee to generate
+  a unique set of permutations unless complete enumeration of
+  permutations is requested. This is not feasible for all but the
+  smallest of data sets or restrictive of permutation designs, but in
+  such cases the chance of drawing a set of permutations with repeats is
+  lessened as the sample size, and thence the size of set of all
+  possible permutations, increases.
+
+  Under the situation of sampling permutations with replacement then,
+  the tail probability \eqn{p} calculated from the biased estimator
+  described above is somewhat \strong{conservative}, being too large by
+  an amount that depends on the number of possible values that the test
+  statistic can take under permutation of the data (Phipson & Smyth,
+  2010). This represents a slight loss of statistical power for the
+  conservative \emph{p} value calculation used here. However, unless
+  smaples sizes are small and the the permutation design such that the
+  set of values that the test statistic can take is also small, this
+  loss of power is unlikely to be critical.
+
+  The minimum achievable p-value is
+
+  \deqn{p_{\mathrm{min}} = \frac{1}{n + 1}}{p[min] = 1 / (n + 1)}
+
+  and hence depends on the number of permutations evaluated. However,
+  one cannot simply increase the number of permutations (\eqn{n}) to
+  achieve a potentially lower p-value unless the number of observations
+  available permits such a number of permutations. This is unlikely to
+  be a problem for all but the smallest data sets when free permutation
+  (randomisation) is valid, but in restricted permutation designs with a
+  low number of observations, there may not be as many unique
+  permutations of the data as you might desire to reach the required
+  level of significance.
   
   It is currently the responsibility of the user to determine the total
-  number of possible permutations for their \emph{DATA}. No checks are
-  made within Vegan functions to ensure a sensible number of
-  permutations is chosen.
+  number of possible permutations for their \emph{DATA}. The number of
+  possible permutations allowed under the specified design can be
+  calculated using \code{\link[permute]{numPerms}} from the
+  \pkg{permute} package. Heuristics employed within the
+  \code{\link[permute]{shuffleSet}} function used by \pkg{vegan} can be
+  triggered to generate the entire set of permutations instead of a
+  random set. The settings controlling the triggering of the complete
+  enumeration step are contained within a permutation design created
+  using \code{link[permute]{how}} and can be set by the user. See
+  \code{\link[permute]{how}} for details.
 
   Limits on the total number of permutations of \emph{DATA} are more
   severe in temporally or spatially ordered data or experimental designs
@@ -106,17 +175,43 @@
 
   In situations where only a low number of permutations is possible due
   to the nature of \emph{DATA} or the experimental design, enumeration
-  of all permutations becomes important and achievable
-  computationally. Currently, Vegan does not include functions to
-  perform complete enumeration of the set of possible
-  permutations. The next major release of Vegan will include such
-  functionality, however.
+  of all permutations becomes important and achievable computationally.
+
+  Above, we have provided only a brief overview of the capbilities of
+  \pkg{vegan} and \pkg{permute}. To get the best out of the new
+  functionality and for details on how to set up permutation designs
+  using \code{\link[permute]{how}}, consult the vignette
+  \emph{Restricted permutations; using the permute package} supplied
+  with \pkg{permute} and accessible via \code{vignette("permutations",
+  package = "permute").}
 }
 
 \seealso{
-  \code{\link{permutest}}, \code{\link{permuted.index}}
+  \code{\link{permutest}} for the main interface in \pkg{vegan}. See
+  also \code{\link[permute]{how}} for details on permutation design
+  specification, \code{\link[permute]{shuffleSet}} for the code used to
+  generate a set of permutations, \code{\link[permute]{numPerms}} for
+  a function to return the size of the set of possible permutations
+  under the current design.
+}
+
+\references{
+
+  Manly, B. F. J. (2006). \emph{Randomization, Bootstrap and Monte Carlo
+  Methods in Biology}, Third Edition. Chapman and Hall/CRC.
+  
+  Phipson, B., & Smyth, G. K. (2010). Permutation P-values should never
+  be zero: calculating exact P-values when permutations are randomly
+  drawn. \emph{Statistical Applications in Genetics and Molecular
+    Biology}, \strong{9}, Article 39. DOI: 10.2202/1544-6115.1585
+  
+  ter Braak, C. J. F. (1990). \emph{Update notes: CANOCO version
+    3.1}. Wageningen: Agricultural Mathematics Group. (UR).
+
+  See also:
+
+  Davison, A. C., & Hinkley, D. V. (1997). \emph{Bootstrap Methods and
+    their Application}. Cambridge University Press.
 }
-%\references{
-%}
-\author{ Gavin Simpson }
+\author{ Gavin L. Simpson }
 \keyword{multivariate}
diff --git a/man/permutest.betadisper.Rd b/man/permutest.betadisper.Rd
index 3e33c6b..1a5c90e 100644
--- a/man/permutest.betadisper.Rd
+++ b/man/permutest.betadisper.Rd
@@ -10,15 +10,22 @@
 }
 \usage{
 \method{permutest}{betadisper}(x, pairwise = FALSE,
-         control = how(nperm = 999), \dots)
+          permutations = 999,
+          parallel = getOption("mc.cores"),
+          \dots)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
   \item{x}{an object of class \code{"betadisper"}, the result of a
     call to \code{betadisper}.}
   \item{pairwise}{logical; perform pairwise comparisons of group means?}
-  \item{control}{a list of control values for the permutations
-    as returned by the function \code{\link[permute]{how}}}
+  \item{permutations}{a list of control values for the permutations
+    as returned by the function \code{\link[permute]{how}}, or the
+    number of permutations required, or a permutation matrix where each
+    row gives the permuted indices.}
+  \item{parallel}{Number of parallel processes or a predefined socket
+    cluster.  With \code{parallel = 1} uses ordinary, non-parallel
+    processing.}
   \item{\dots}{Arguments passed to other methods.}
 }
 \details{
@@ -79,11 +86,16 @@ mod
 anova(mod)
 
 ## Permutation test for F
-permutest(mod, pairwise = TRUE)
+pmod <- permutest(mod, permutations = 99, pairwise = TRUE)
 
 ## Tukey's Honest Significant Differences
 (mod.HSD <- TukeyHSD(mod))
 plot(mod.HSD)
+
+## Has permustats() method
+pstat <- permustats(pmod)
+densityplot(pstat)
+qqmath(pstat)
 }
 \keyword{methods}
 \keyword{multivariate}
diff --git a/man/prc.Rd b/man/prc.Rd
index 0682868..811a330 100644
--- a/man/prc.Rd
+++ b/man/prc.Rd
@@ -109,19 +109,24 @@ prc(response, treatment, time, ...)
 
 \seealso{\code{\link{rda}}, \code{\link{anova.cca}}.}
 \examples{
-# Chlorpyrifos experiment and experimental design
+## Chlorpyrifos experiment and experimental design: Pesticide
+## treatment in ditches (replicated) and followed over from 4 weeks
+## before to 24 weeks after exposure 
 data(pyrifos)
 week <- gl(11, 12, labels=c(-4, -1, 0.1, 1, 2, 4, 8, 12, 15, 19, 24))
 dose <- factor(rep(c(0.1, 0, 0, 0.9, 0, 44, 6, 0.1, 44, 0.9, 0, 6), 11))
+ditch <- gl(12, 1, length=132)
 # PRC
 mod <- prc(pyrifos, dose, week)
 mod            # RDA
 summary(mod)   # PRC
 logabu <- colSums(pyrifos)
 plot(mod, select = logabu > 100)
-# Permutations should be done only within one week, and we only
-# are interested on the first axis
-anova(mod, strata = week, first=TRUE, perm.max = 100)
+## Ditches are randomized, we have a time series, and are only
+## interested in the first axis
+ctrl <- how(plots = Plots(strata = ditch,type = "free"),
+    within = Within(type = "series"), nperm = 99)
+anova(mod, permutations = ctrl, first=TRUE)
 }
 \keyword{ multivariate }
 
diff --git a/man/procrustes.Rd b/man/procrustes.Rd
index 4753d6c..632125d 100644
--- a/man/procrustes.Rd
+++ b/man/procrustes.Rd
@@ -28,7 +28,7 @@ procrustes(X, Y, scale = TRUE, symmetric = FALSE, scores = "sites", ...)
 \method{residuals}{procrustes}(object, ...)
 \method{fitted}{procrustes}(object, truemean = TRUE, ...)
 \method{predict}{procrustes}(object, newdata, truemean = TRUE, ...)
-protest(X, Y, scores = "sites", permutations = 999, strata, ...)
+protest(X, Y, scores = "sites", permutations = how(nperm = 999), ...)
 }
 
 \arguments{
@@ -64,11 +64,10 @@ protest(X, Y, scores = "sites", permutations = 999, strata, ...)
     \code{truemean = FALSE}.}
   \item{newdata}{Matrix of coordinates to be rotated and translated to
      the target.}
-  \item{permutations}{Number of permutation to assess the significance
-    of the symmetric Procrustes statistic. }
-  \item{strata}{An integer vector or factor specifying the strata for
-    permutation. If supplied, observations are permuted only within the
-    specified strata.}
+  \item{permutations}{a list of control values for the permutations
+    as returned by the function \code{\link[permute]{how}}, or the
+    number of permutations required, or a permutation matrix where each
+    row gives the permuted indices.}
   \item{ar.col}{Arrow colour.}
   \item{len}{Width of the arrow head.}
   \item{labels}{Character vector of text labels. Rownames of the result 
@@ -162,12 +161,14 @@ protest(X, Y, scores = "sites", permutations = 999, strata, ...)
   \item{t0}{This and the following items are only in class
     \code{protest}:  Procrustes correlation from non-permuted solution.}
   \item{t}{Procrustes correlations from permutations. The distribution
-    of these correlations can be inspected with \code{\link{density.protest}} 
+    of these correlations can be inspected with \code{\link{permustats}}
     function.}
   \item{signif}{`Significance' of \code{t}}
   \item{permutations}{Number of permutations.}
-  \item{strata}{The name of the stratifying variable.}
-  \item{stratum.values}{Values of the stratifying variable.}
+  \item{control}{A list of control values for the permutations
+    as returned by the function \code{\link[permute]{how}}.}
+  \item{control}{the list passed to argument \code{control} describing
+    the permutation design.}
 }
 \references{
   Mardia, K.V., Kent, J.T. and Bibby,
@@ -186,7 +187,9 @@ protest(X, Y, scores = "sites", permutations = 999, strata, ...)
 
 \seealso{\code{\link{monoMDS}},  for obtaining
 objects for \code{procrustes}, and \code{\link{mantel}} for an
-alternative to \code{protest} without need of dimension reduction.} 
+alternative to \code{protest} without need of dimension reduction. See
+\code{\link[permute]{how}} for details on specifying the type of
+permutation required.} 
 
 \examples{
 data(varespec)
diff --git a/man/rankindex.Rd b/man/rankindex.Rd
index c85eaae..c7f57e4 100644
--- a/man/rankindex.Rd
+++ b/man/rankindex.Rd
@@ -8,7 +8,9 @@
 }
 \usage{
 rankindex(grad, veg, indices = c("euc", "man", "gow", "bra", "kul"),
-          stepacross = FALSE, method = "spearman", ...)
+          stepacross = FALSE, method = "spearman", 
+	  metric = c("euclidean", "mahalanobis", "manhattan", "gower"),
+	  ...)
 }
 
 \arguments{
@@ -23,18 +25,24 @@ rankindex(grad, veg, indices = c("euc", "man", "gow", "bra", "kul"),
     with no shared species are set \code{NA} using
     \code{\link{no.shared}} so that indices with no fixed
     upper limit can also be analysed.}
-  \item{method}{Correlation method used. }
+  \item{method}{Correlation method used.}
+  \item{metric}{Metric to evaluate the gradient separation. See Details.}
   \item{...}{Other parameters to \code{\link{stepacross}}.}
 }
 \details{
-  A good dissimilarity index for multidimensional scaling 
-  should have a high rank-order similarity with gradient separation.
-  The function compares most indices in \code{\link{vegdist}} against
-  gradient separation using rank correlation coefficients in
-  \code{\link{cor.test}}. The gradient separation between each
-  point is assessed as Euclidean distance for continuous variables, and
-  as Gower metric for mixed data using function
-  \code{\link[cluster]{daisy}} when \code{grad} has factors.
+  A good dissimilarity index for multidimensional scaling should have
+  a high rank-order similarity with gradient separation.  The function
+  compares most indices in \code{\link{vegdist}} against gradient
+  separation using rank correlation coefficients in
+  \code{\link{cor}}. The gradient separation between each point is
+  assessed using given \code{metric}. The default is to use Euclidean
+  distance of continuous variables scaled to unit variance, or to use
+  Gower metric for mixed data using function
+  \code{\link[cluster]{daisy}} when \code{grad} has factors. The other
+  alternatives are Mahalanabis distances which are based on
+  \code{grad} matrix scaled so that columns are orthogonal
+  (uncorrelated) and have unit variance, or Manhattan distances of
+  \code{grad} variables scaled to unit range.
 
   The \code{indices} argument can accept any dissimilarity 
   indices besides the ones calculated by the 
@@ -85,10 +93,9 @@ rankindex(grad, veg, indices = c("euc", "man", "gow", "bra", "kul"),
 \examples{
 data(varespec)
 data(varechem)
-## The next scales all environmental variables to unit variance.
-## Some would use PCA transformation.
-rankindex(scale(varechem), varespec)
-rankindex(scale(varechem), wisconsin(varespec))
+## The variables are automatically scaled
+rankindex(varechem, varespec)
+rankindex(varechem, wisconsin(varespec))
 ## Using non vegdist indices as functions
 funs <- list(Manhattan=function(x) dist(x, "manhattan"),
     Gower=function(x) cluster:::daisy(x, "gower"),
diff --git a/man/raupcrick.Rd b/man/raupcrick.Rd
index 5767755..4e2584e 100644
--- a/man/raupcrick.Rd
+++ b/man/raupcrick.Rd
@@ -10,7 +10,7 @@
   occurrence probabilities proportional to species frequencies.  }
 
 \usage{
-raupcrick(comm, null = "r1", nsimul = 999, chase = FALSE)
+raupcrick(comm, null = "r1", nsimul = 999, chase = FALSE, ...)
 }
 
 \arguments{
@@ -27,6 +27,8 @@ raupcrick(comm, null = "r1", nsimul = 999, chase = FALSE)
      recommended except for comparing the results against the Chase
      script).}
 
+ \item{\dots}{Other parameters passed to \code{\link{oecosimu}}.}
+
 }
 
 \details{Raup-Crick index is the probability that compared sampling
diff --git a/man/renyi.Rd b/man/renyi.Rd
index 0bd0ad3..25c60f0 100644
--- a/man/renyi.Rd
+++ b/man/renyi.Rd
@@ -5,7 +5,6 @@
 \alias{renyiaccum}
 \alias{plot.renyiaccum}
 \alias{persp.renyiaccum}
-\alias{rgl.renyiaccum}
 
 \title{Renyi and Hill Diversities and Corresponding Accumulation Curves }
 \description{
@@ -23,7 +22,6 @@ renyiaccum(x, scales = c(0, 0.5, 1, 2, 4, Inf), permutations = 100,
     type = "l", 
     ...)
 \method{persp}{renyiaccum}(x, theta = 220, col = heat.colors(100), zlim, ...)
-rgl.renyiaccum(x, rgl.height = 0.2, ...)
 }
 
 \arguments{
@@ -49,7 +47,6 @@ rgl.renyiaccum(x, rgl.height = 0.2, ...)
     and vector colours will be
     selected by the midpoint of a rectangle in \code{\link{persp}}. }
   \item{zlim}{Limits of vertical axis.}
-  \item{rgl.height}{Scaling of vertical axis.}
   \item{\dots}{Other arguments which are passed to \code{renyi} and
     to graphical functions.}
 
@@ -77,12 +74,13 @@ rgl.renyiaccum(x, rgl.height = 0.2, ...)
   Function \code{renyiaccum} is similar to \code{\link{specaccum}} but
   finds \enc{Rényi}{Renyi} or Hill diversities at given \code{scales}
   for random permutations of accumulated sites.  Its \code{plot}
-  function uses \pkg{lattice} function \code{\link[lattice]{xyplot}} to
-  display the accumulation curves for each value of \code{scales} in a
-  separate panel.  In addition, it has a \code{persp} method to plot the
-  diversity surface against scale and number and sites. Dynamic graphics
-  with \code{rgl.renyiaccum} use \pkg{rgl} package, and produces similar
-  surface as \code{persp} with a mesh showing the empirical confidence levels.
+  function uses \pkg{lattice} function \code{\link[lattice]{xyplot}}
+  to display the accumulation curves for each value of \code{scales}
+  in a separate panel.  In addition, it has a \code{persp} method to
+  plot the diversity surface against scale and number and
+  sites. Similar dynamic graphics can be made with
+  \code{\link[vegan3d]{rgl.renyiaccum}} in \pkg{vegan3d} package.
+  
 }
 \value{
   Function \code{renyi} returns a data frame of selected
@@ -112,7 +110,8 @@ rgl.renyiaccum(x, rgl.height = 0.2, ...)
 \seealso{\code{\link{diversity}} for diversity indices, and
   \code{\link{specaccum}} for ordinary species accumulation curves, and
   \code{\link[lattice]{xyplot}}, \code{\link{persp}} and
-  \code{\link[rgl]{rgl}} for controlling graphics. } 
+  \code{\link[vegan3d]{rgl.renyiaccum}}. 
+} 
 \examples{
 data(BCI)
 i <- sample(nrow(BCI), 12)
diff --git a/man/reorder.hclust.Rd b/man/reorder.hclust.Rd
new file mode 100644
index 0000000..1fbfad7
--- /dev/null
+++ b/man/reorder.hclust.Rd
@@ -0,0 +1,115 @@
+\name{reorder.hclust}
+\alias{reorder.hclust}
+\alias{rev.hclust}
+\alias{scores.hclust}
+
+\title{
+Reorder a Hierarchical Clustering Tree
+}
+
+\description{
+
+  Function takes a hierarchical clustering tree from
+  \code{\link{hclust}} and a vector of values and reorders the
+  clustering tree in the order of the supplied vector, maintaining the
+  constraints on the tree. This is a method of generic function
+  \code{\link{reorder}} and an alternative to reordering a
+  \code{"dendrogram"} object with \code{\link{reorder.dendrogram}}
+
+}
+
+\usage{
+\method{reorder}{hclust}(x, wts, 
+   agglo.FUN = c("mean", "min", "max", "sum", "uwmean"), ...)
+\method{rev}{hclust}(x)
+\method{scores}{hclust}(x, display = "internal", ...)
+}
+
+\arguments{
+  \item{x}{
+    hierarchical clustering from \code{\link{hclust}}.
+}
+  \item{wts}{
+    numeric vector for reordering.
+}
+  \item{agglo.FUN}{
+    a function for weights agglomeration, see below.
+}
+  \item{display}{
+  return \code{"internal"} nodes or \code{"terminal"} nodes (also
+  called \code{"leaves"}).
+}
+  \item{\dots}{
+    additional arguments (ignored).
+}
+}
+
+\details{
+  
+  Dendrograms can be ordered in many ways. The \code{reorder} function
+  reorders an \code{\link{hclust}} tree and provides an alternative to
+  \code{\link{reorder.dendrogram}} which can reorder a
+  \code{\link{dendrogram}}. The current function will also work
+  differently when the \code{agglo.FUN} is \code{"mean"}: the
+  \code{\link{reorder.dendrogram}} will always take the direct mean of
+  member groups ignoring their sizes, but this function will used
+  \code{\link{weighted.mean}} weighted by group sizes, so that the
+  group mean is always the mean of member leaves (terminal nodes). If
+  you want to ignore group sizes, you can use unweighted mean with
+  \code{"uwmean"}. 
+
+  The function accepts only a limited list of \code{agglo.FUN}
+  functions for assessing the value of \code{wts} for groups. The
+  ordering is always ascending, but the order of leaves can be
+  reversed with \code{rev}.
+
+  Function \code{scores} finds the coordinates of nodes as a two-column
+  matrix. For terminal nodes (leaves) this the value at which the item
+  is merged to the tree, and the labels can still \code{hang} below this
+  level (see \code{\link{plot.hclust}}).
+
+}
+
+\value{
+  Reordered \code{\link{hclust}} result object with added item
+  \code{value} that gives the value of the statistic at each merge
+  level. 
+}
+
+\author{
+  Jari Oksanen
+}
+\note{
+  These functions should really be in base \R.
+}
+
+
+\seealso{
+  \code{\link{hclust}} for getting clustering trees,
+  \code{\link{as.hclust.spantree}} to change a \pkg{vegan} minimum
+  spanning tree to an \code{\link{hclust}} object, and
+  \code{\link{dendrogram}} and \code{\link{reorder.dendrogram}} for an
+  alternative implementation.
+}
+\examples{
+## reorder by water content of soil
+data(mite, mite.env)
+hc <- hclust(vegdist(wisconsin(sqrt(mite))))
+ohc <- with(mite.env, reorder(hc, WatrCont))
+plot(hc)
+plot(ohc)
+
+## label leaves by the observed value, and each branching point
+## (internal node) by the cluster mean
+with(mite.env, plot(ohc, labels=round(WatrCont), cex=0.7))
+ordilabel(scores(ohc), label=round(ohc$value), cex=0.7)
+
+## Slightly different from reordered 'dendrogram' which ignores group
+## sizes in assessing means.
+den <- as.dendrogram(hc)
+den <- with(mite.env, reorder(den, WatrCont, agglo.FUN = mean))
+plot(den)
+}
+
+\keyword{multivariate}
+
diff --git a/man/simper.Rd b/man/simper.Rd
index 22d6375..faf81b8 100644
--- a/man/simper.Rd
+++ b/man/simper.Rd
@@ -10,20 +10,33 @@
 }
 
 \usage{
-simper(comm, group,  ...)
-\method{summary}{simper}(object, ordered = TRUE, 
-     digits = max(3, getOption("digits") - 3), ...)
+simper(comm, group, permutations = 0, trace = FALSE, 
+    parallel = getOption("mc.cores"), ...)
+\method{summary}{simper}(object, ordered = TRUE,
+    digits = max(3,getOption("digits") - 3), ...)
 }
 
 \arguments{
   \item{comm}{Community data matrix.}
   \item{group}{Factor describing the group structure. Must have at
     least 2 levels.}
+  \item{permutations}{a list of control values for the permutations
+    as returned by the function \code{\link[permute]{how}}, or the
+    number of permutations required, or a permutation matrix where each
+    row gives the permuted indices.}
+  \item{trace}{Trace permutations.}
   \item{object}{an object returned by \code{simper}.}
   \item{ordered}{Logical; Should the species be ordered by their
     average contribution?}
   \item{digits}{Number of digits in output.}
-  \item{...}{Parameters passed to other functions.}
+  \item{parallel}{Number of parallel processes or a predefined socket
+    cluster.  With \code{parallel = 1} uses ordinary, non-parallel
+    processing. The parallel processing is done with \pkg{parallel}
+    package. See \code{\link{vegandocs}} \code{decision-vegan} for
+    details.}
+  \item{...}{Parameters passed to other functions. In \code{simper} the
+    extra parameters are passed to \code{\link[permute]{shuffleSet}} if
+    permutations are used.}
 }
 
 \details{ Similarity percentage, \code{simper} (Clarke 1993) is based
@@ -74,6 +87,9 @@ simper(comm, group,  ...)
   \item{ord}{An index vector to order vectors by their contribution or
     order \code{cusum} back to the original data order.}
   \item{cusum}{Ordered cumulative contribution.}
+  \item{p}{Permutation \eqn{p}-value. Probability of getting a larger
+    or equal average contribution in random permutation of the group
+    factor.}
 }
 
 \examples{
diff --git a/man/simulate.rda.Rd b/man/simulate.rda.Rd
index e55dabc..957c080 100644
--- a/man/simulate.rda.Rd
+++ b/man/simulate.rda.Rd
@@ -12,54 +12,78 @@
  works similarly as \code{simulate.lm}.  }
 
 \usage{
-\method{simulate}{rda}(object, nsim = 1, seed = NULL, indx = NULL, rank = "full", ...)
+\method{simulate}{rda}(object, nsim = 1, seed = NULL, indx = NULL,
+    rank = "full", correlated = FALSE, ...)
 }
 \arguments{
-  \item{object}{an object representing a fitted \code{\link{rda}} model.}
-  \item{nsim}{number of response vectors to simulate. (Not yet used, and 
-    values above 1 will give an error). }
+
+  \item{object}{an object representing a fitted \code{\link{rda}},
+    \code{\link{cca}} or \code{\link{capscale}} model.}
+
+  \item{nsim}{number of response matrices to be simulated. Only one
+    dissimilarity matrix is returned for \code{\link{capscale}}, and
+    larger \code{nsim} is an error.}
+
   \item{seed}{an object specifying if and how the random number
     generator should be initialized (\sQuote{seeded}). See 
     \code{\link{simulate}} for details. }
+
   \item{indx}{Index of residuals added to the fitted values, such as
-    produced by  \code{\link{permuted.index}},
-    \code{\link{shuffle}} or \code{\link{sample}}. The index can
-    have duplicate entries so that bootstrapping is allowed. If null,
-    parametric simulation is used and Gaussian error is added to the
-    fitted values.}
+    produced by \code{\link[permute]{shuffleSet}} or
+    \code{\link{sample}}.  The index can have duplicate entries so
+    that bootstrapping is allowed. If \code{nsim} \eqn{>1}, the output
+    should be compliant with \code{\link[permute]{shuffleSet}} with
+    one line for each simulation.  If \code{nsim} is missing, the
+    number of rows of \code{indx} is used to define the number of
+    simulations, but if \code{nsim} is given, it should match number
+    of rows in \code{indx}. If null, parametric simulation is used and
+    Gaussian error is added to the fitted values.}
+
   \item{rank}{The rank of the constrained component: passed to
     \code{\link{predict.rda}} or \code{\link{predict.cca}}. }
+
+  \item{correlated}{Are species regarded as correlated in parametric
+    simulation or when \code{indx} is not given? If
+    \code{correlated = TRUE}, multivariate Gaussian random error is
+    generated, and if \code{FALSE}, Gaussian random error is generated
+    separately for each species. The argument has no effect in
+    \code{\link{capscale}} which has no information on species.}
+
   \item{\dots}{additional optional arguments (ignored). }
 }
 
 \details{ The implementation follows \code{"lm"} method of
-  \code{\link{simulate}}, and adds Gaussian (Normal) error to the
-  fitted values (\code{\link{fitted.rda}}) using function
-  \code{\link{rnorm}}. The standard deviations are estimated
-  independently for each species (column) from the residuals after
-  fitting the constraints. Alternatively, the function can take a
-  permutation index that is used to add permuted residuals
-  (unconstrained component) to the fitted values. Raw data are used in
-  \code{\link{rda}}. Internal Chi-square transformed data in
-  \code{\link{cca}} within the function, but the returned data frame is 
-  similar to the original input data. The simulation is performed on
-  internal metric scaling data in \code{\link{capscale}}, but the
-  function returns the Euclidean distances calculated from the simulated
-  data.  The simulation uses only the real components, and the imaginary
-  dimensions are ignored.
-}
+  \code{\link{simulate}}, and adds Gaussian (Normal) error to the fitted
+  values (\code{\link{fitted.rda}}) using function \code{\link{rnorm}}
+  if \code{correlated = FALSE} or \code{\link[MASS]{mvrnorm}} if
+  \code{correlated = TRUE}. The standard deviations (\code{\link{rnorm}})
+  or covariance matrices for species (\code{\link[MASS]{mvrnorm}}) are
+  estimated from the residuals after fitting the constraints.
+  Alternatively, the function can take a permutation index that is used
+  to add permuted residuals (unconstrained component) to the fitted
+  values. Raw data are used in \code{\link{rda}}. Internal Chi-square
+  transformed data are used in \code{\link{cca}} within the function,
+  but the returned matrix is similar to the original input data. The
+  simulation is performed on internal metric scaling data in
+  \code{\link{capscale}}, but the function returns the Euclidean
+  distances calculated from the simulated data.  The simulation uses
+  only the real components, and the imaginary dimensions are ignored.  }
 
-\value{ Returns a data frame with similar additional arguments on
-  random number seed as \code{\link{simulate}}.  }
+\value{ If \code{nsim = 1}, returns a matrix or dissimilarities (in
+  \code{\link{capscale}}) with similar additional arguments on random
+  number seed as \code{\link{simulate}}. If \code{nsim > 1}, returns a
+  similar array as returned by \code{\link{simulate.nullmodel}} with
+  similar attributes.  }
 
 \author{Jari Oksanen}
 
 
 \seealso{ \code{\link{simulate}} for the generic case and for
-  \code{\link{lm}} objects. Functions \code{\link{fitted.rda}} and
-  \code{\link{fitted.cca}} return fitted values without the error
-  component.  
-}
+  \code{\link{lm}} objects, and \code{\link{simulate.nullmodel}} for
+  community null model simulation. Functions \code{\link{fitted.rda}}
+  and \code{\link{fitted.cca}} return fitted values without the error
+  component. See \code{\link{rnorm}} and \code{\link[MASS]{mvrnorm}}
+  (\pkg{MASS} package) for simulating Gaussian random error. }
 
 \examples{
 data(dune)
@@ -70,6 +94,8 @@ update(mod, simulate(mod) ~  .)
 ## An impression of confidence regions of site scores
 plot(mod, display="sites")
 for (i in 1:5) lines(procrustes(mod, update(mod, simulate(mod) ~ .)), col="blue")
+## Simulate a set of null communities with permutation of residuals
+simulate(mod, indx = shuffleSet(nrow(dune), 99))
 }
 \keyword{ models }
 \keyword{ datagen }
diff --git a/man/spantree.Rd b/man/spantree.Rd
index 9695a37..34a226a 100644
--- a/man/spantree.Rd
+++ b/man/spantree.Rd
@@ -1,6 +1,7 @@
 \name{spantree}
 \alias{spantree}
 \alias{cophenetic.spantree}
+\alias{as.hclust.spantree}
 \alias{plot.spantree}
 \alias{lines.spantree}
 \alias{spandepth}
@@ -13,6 +14,7 @@
 }
 \usage{
 spantree(d, toolong = 0)
+\method{as.hclust}{spantree}(x, ...)
 \method{cophenetic}{spantree}(x)
 spandepth(x)
 \method{plot}{spantree}(x, ord, cex = 0.7, type = "p", labels, dlim,
@@ -58,6 +60,15 @@ spandepth(x)
   corresponding link is \code{NA}. Connected subtrees can be identified
   using \code{\link{distconnected}}.
 
+  Minimum spanning tree is closesly related to single linkage
+  clustering, a.k.a. nearest neighbour clustering, and in genetics as
+  neighbour joining tree available in \code{\link{hclust}} and
+  \code{\link[cluster]{agnes}} functions. The most important practical
+  difference is that minimum spanning tree has no concept of cluster
+  membership, but always joins individual points to each other. Function
+  \code{as.hclust} can change the \code{spantree} result into a
+  corresponding \code{\link{hclust}} object.
+
   Function \code{cophenetic} finds distances between all points along
   the tree segments. Function \code{spandepth} returns the depth of
   each node. The nodes of a tree are either leaves (with one link) or
@@ -132,6 +143,8 @@ plot(tr, type = "t")
 ## Depths of nodes
 depths <- spandepth(tr)
 plot(tr, type = "t", label = depths)
+## Plot as a dendrogram
+plot(as.hclust(tr))
 }
 \keyword{ multivariate}
 
diff --git a/man/specaccum.Rd b/man/specaccum.Rd
index 7c28e42..e4cabfb 100644
--- a/man/specaccum.Rd
+++ b/man/specaccum.Rd
@@ -3,9 +3,11 @@
 \alias{print.specaccum}
 \alias{summary.specaccum}
 \alias{plot.specaccum}
+\alias{lines.specaccum}
 \alias{boxplot.specaccum}
 \alias{fitspecaccum}
 \alias{plot.fitspecaccum}
+\alias{lines.fitspecaccum}
 \alias{predict.specaccum}
 \alias{predict.fitspecaccum}
 
@@ -45,8 +47,7 @@ fitspecaccum(object, model, method = "random", ...)
     the empirical dataset for the exact SAC}
   \item{gamma}{Method for estimating the total extrapolated number of species in the
     survey area by function \code{\link{specpool}}}
-  \item{w}{Weights giving the sampling effort (an experimental feature
-    that may be removed).}
+  \item{w}{Weights giving the sampling effort.}
   \item{subset}{logical expression indicating sites (rows) to keep: missing
     values are taken as \code{FALSE}.}
   \item{x}{A \code{specaccum} result object}
@@ -106,8 +107,28 @@ fitspecaccum(object, model, method = "random", ...)
   achieves this by applying function \code{\link{rarefy}} with number of individuals
   corresponding to average number of individuals per site.
 
-  The function has a \code{plot} method. In addition, \code{method = "random"} 
-  has \code{summary} and \code{boxplot} methods. 
+  Methods \code{"random"} and \code{"collector"} can take weights
+  (\code{w}) that give the sampling effort for each site.  The weights
+  \code{w} do not influence the order the sites are accumulated, but
+  only the value of the sampling effort so that not all sites are
+  equal. The summary results are expressed against sites even when the
+  accumulation uses weights (methods \code{"random"},
+  \code{"collector"}), or is based on individuals
+  (\code{"rarefaction"}).  The actual sampling effort is given as item
+  \code{Effort} or \code{Individuals} in the printed result. For
+  weighted \code{"random"} method the effort refers to the average
+  effort per site, or sum of weights per number of sites. With
+  weighted \code{method = "random"}, the averaged species richness is
+  found from linear interpolation of single random permutations.
+  Therefore at least the first value (and often several first) have
+  \code{NA} richness, because these values cannot be interpolated in
+  all cases but should be extrapolated.  The \code{plot} function
+  defaults to display the results as scaled to sites, but this can be
+  changed selecting \code{xvar = "effort"} (weighted methods) or
+  \code{xvar = "individuals"} (with \code{method = "rarefaction"}).
+ 
+  The \code{summary} and \code{boxplot} methods are available for
+  \code{method = "random"}.
 
   Function \code{predict} can return the values corresponding to
   \code{newdata} using linear (\code{\link{approx}}) or spline
@@ -133,7 +154,12 @@ fitspecaccum(object, model, method = "random", ...)
   (\code{\link{SSgompertz}}), \code{"michaelis-menten"})
   (\code{\link{SSmicmen}}), \code{"logis"} (\code{\link{SSlogis}}),
   \code{"weibull"} (\code{\link{SSweibull}}). See these functions for
-  model specification and details.
+  model specification and details. 
+
+  When weights \code{w} were used the fit is based on accumulated
+  effort and in \code{model = "rarefaction"} on accumulated number of
+  individuals.  The \code{plot} is still based on sites, unless other
+  alternative is selected with \code{xvar}.
 
   Function \code{predict} uses \code{\link{predict.nls}}, and you can
   pass all arguments to that function. In addition, \code{fitted},
@@ -154,6 +180,8 @@ fitspecaccum(object, model, method = "random", ...)
     is the number of sites corresponding to a certain number of
     individuals and generally not an integer, and the average
     number of individuals is also returned in item \code{individuals}.} 
+  \item{effort}{Average sum of weights corresponding to the number of
+    sites when model was fitted with argument \code{w}}
   \item{richness}{The number of species corresponding to number of
     sites.  With \code{method = "collector"} this is the observed
     richness, for other methods the average or expected richness.}
@@ -164,7 +192,9 @@ fitspecaccum(object, model, method = "random", ...)
   \item{perm}{Permutation results with \code{method = "random"} and
     \code{NULL} in other cases. Each column in \code{perm} holds one
     permutation.}
-
+  \item{weights}{Matrix of accumulated weights corresponding to the
+    columns of the \code{perm} matrix when model was fitted with
+    argument \code{w}.}
   \item{fitted, residuals, coefficients}{Only in \code{fitspecacum}:
      fitted values, residuals and nonlinear model coefficients. For
      \code{method = "random"} these are matrices with a column for
diff --git a/man/specpool.Rd b/man/specpool.Rd
index a821482..ee7251a 100644
--- a/man/specpool.Rd
+++ b/man/specpool.Rd
@@ -19,7 +19,7 @@
   is based on abundances (counts) on single sample site. 
 }
 \usage{
-specpool(x, pool)
+specpool(x, pool, smallsample = TRUE)
 estimateR(x, ...)
 specpool2vect(X, index = c("jack1","jack2", "chao", "boot","Species"))
 poolaccum(x, permutations = 100, minsize = 3)
@@ -33,6 +33,8 @@ estaccumR(x, permutations = 100)
     for \code{plot} function.}
   \item{pool}{A vector giving a classification for pooling the sites in
     the species data. If missing, all sites are pooled together.}
+  \item{smallsample}{Use small sample correction \eqn{(N-1)/N}, where
+    \eqn{N} is the number of sites within the \code{pool}.}
   \item{X, object}{A \code{specpool} result object.}
   \item{index}{The selected index of extrapolated richness.}
   \item{permutations}{Number of permutations of sampling order of sites.}
@@ -59,33 +61,39 @@ estaccumR(x, permutations = 100)
   sites in the collection.  The variants of extrapolated richness in
   \code{specpool} are:
   \tabular{ll}{
-    Chao
-    \tab \eqn{S_P = S_0 + a1^2/(2*a2)}
+     Chao
+    \tab \eqn{S_P = S_0 + \frac{a_1^2}{2 a_2}\frac{N-1}{N}}{S_P = S_0 + a1^2/(2*a2) * (N-1)/N}
+    \cr
+    Chao bias-corrected
+    \tab \eqn{S_P = S_0 + \frac{a_1(a_1-1)}{2(a_2+1)} \frac{N-1}{N}}{S_P = S_0 + a1*(a1-1)/(2*(a2+1)) * (N-1)/N}
     \cr
     First order jackknife
     \tab \eqn{S_P = S_0 + a_1 \frac{N-1}{N}}{S_P = S_0 + a1*(N-1)/N}
     \cr
     Second order jackknife
     \tab \eqn{S_P = S_0 + a_1 \frac{2N - 3}{N} - a_2 \frac{(N-2)^2}{N
-	(N-1)}}{S_P = S_0 + a1*(2*n-3)/n - a2*(n-2)^2/n/(n-1)}
+	(N-1)}}{S_P = S_0 + a1*(2*N-3)/N - a2*(N-2)^2/N/(N-1)}
     \cr
     Bootstrap
     \tab \eqn{S_P = S_0 + \sum_{i=1}^{S_0} (1 - p_i)^N}{S_P = S_0 + Sum
       (1-p_i)^N}
     }
+    \code{specpool} normally uses basic Chao equation, but when there
+    are no doubletons (\eqn{a2=0}) it switches to bias-corrected
+    version. In that case the Chao equation simplifies to
+    \eqn{S_0 + \frac{1}{2} a_1 (a_1-1) \frac{N-1}{N}}{S_0 + (N-1)/N * a1*(a1-1)/2}.
+
+    The abundance-based estimates in \code{estimateR} use counts
+    (numbers of individuals) of species in a single site. If called for
+    a matrix or data frame, the function will give separate estimates
+    for each site.  The two variants of extrapolated richness in
+    \code{estimateR} are bias-corrected Chao and ACE (O'Hara 2005, Chiu
+    et al. 2014).  The Chao estimate is similar as the bias corrected
+    one above, but \eqn{a_i} refers to the number of species with
+    abundance \eqn{i} instead of number of sites, and the small-sample
+    correction is not used. The ACE estimate is defined as:
 
-    The abundance-based estimates in \code{estimateR} use counts (frequencies) of
-    species in a single site. If called for a matrix or data frame, the
-    function will give separate estimates for each site.  The two
-    variants of extrapolated richness in \code{estimateR} are Chao
-    (unbiased variant) and ACE.  In the Chao estimate
-    \eqn{a_i} refers to number of species with abundance \eqn{i} instead
-    of incidence: 
     \tabular{ll}{
-    Chao
-    \tab \eqn{S_P = S_0 + \frac{a_1 (a_1 -1)}{2 (a_2 + 1)}}{S_P = S_0 +
-      a1*(a1-1)/(2*(a2+1))}
-    \cr
     ACE
     \tab \eqn{S_P = S_{abund} + \frac{S_{rare}}{C_{ace}}+ \frac{a_1}{C_{ace}}
       \gamma^2_{ace}}{S_P = S_abund + S_rare/C_ace + a1/C_ace * gamma^2}
@@ -108,16 +116,16 @@ estaccumR(x, permutations = 100)
 
     Functions estimate the standard errors of the estimates. These
     only concern the number of added species, and assume that there is
-    no variance in the observed richness.
-    The equations of standard errors are too complicated to be reproduced in
-    this help page, but they can be studied in the \R source code of the
-    function.
-    The standard error are based on the following sources: Chao (1987)
-    for the Chao estimate and Smith and van Belle (1984) for the
-    first-order Jackknife and the bootstrap (second-order jackknife is
-    still missing). 
-    The variance estimator of \eqn{S_{ace}}{S_ace} was
-    developed by Bob O'Hara (unpublished).
+    no variance in the observed richness.  The equations of standard
+    errors are too complicated to be reproduced in this help page, but
+    they can be studied in the \R source code of the function and are
+    discussed in the \code{\link{vignette}} \dQuote{diversity-vegan}
+    that can be read with the \code{\link{vegandocs}} command. The
+    standard error are based on the following sources: Chiu et
+    al. (2014) for the Chao estimates and Smith and van Belle (1984)
+    for the first-order Jackknife and the bootstrap (second-order
+    jackknife is still missing).  For the variance estimator of
+    \eqn{S_{ace}}{S_ace} see O'Hara (2005).
 
   Functions \code{poolaccum} and \code{estaccumR} are similar to
   \code{\link{specaccum}}, but estimate extrapolated richness indices
@@ -127,12 +135,14 @@ estaccumR(x, permutations = 100)
   data. The functions share \code{summary} and \code{plot}
   methods. The \code{summary} returns quantile envelopes of
   permutations corresponding the given level of \code{alpha} and
-  standard deviation of permutations for each sample size. The
+  standard deviation of permutations for each sample size. NB., these
+  are not based on standard deviations estimated within \code{specpool}
+  or \code{estimateR}, but they are based on permutations. The
   \code{plot} function shows the mean and envelope of permutations
   with given \code{alpha} for models. The selection of models can be
   restricted and order changes using the \code{display} argument in
   \code{summary} or \code{plot}. For configuration of \code{plot}
-  command, see \code{\link[lattice]{xyplot}}
+  command, see \code{\link[lattice]{xyplot}}.
 }
 
 \value{
@@ -151,11 +161,18 @@ estaccumR(x, permutations = 100)
 \references{
   Chao, A. (1987). Estimating the population size for capture-recapture
   data with unequal catchability. \emph{Biometrics} 43, 783--791.
+
+  Chiu, C.H., Wang, Y.T., Walther, B.A. & Chao, A. (2014). Improved
+  nonparametric lower bound of species richness via a modified
+  Good-Turing frequency formula. \emph{Biometrics} 70, 671--682.
   
   Colwell, R.K. & Coddington, J.A. (1994). Estimating terrestrial
   biodiversity through
   extrapolation. \emph{Phil. Trans. Roy. Soc. London} B 345, 101--118.
 
+  O'Hara, R.B. (2005). Species richness estimators: how many species
+  can dance on the head of a pin? \emph{J. Anim. Ecol.} 74, 375--386.
+
   Palmer, M.W. (1990). The estimation of species richness by
   extrapolation. \emph{Ecology} 71, 1195--1198.
 
@@ -163,11 +180,18 @@ estaccumR(x, permutations = 100)
   species richness. \emph{Biometrics} 40, 119--129.
 }
 \author{Bob O'Hara (\code{estimateR}) and Jari Oksanen.}
-\note{
-  The functions are based on assumption that there is a species pool:
-  The community is closed so that there is a fixed pool size \eqn{S_P}.
-  Such cases may exist, although I have not seen them yet.  All indices
-  are biased for open communities.
+
+\note{ The functions are based on assumption that there is a species
+  pool: The community is closed so that there is a fixed pool size
+  \eqn{S_P}.  In general, the functions give only the lower limit of
+  species richness: the real richness is \eqn{S >= S_P}, and there is
+  a consistent bias in the estimates. Even the bias-correction in Chao
+  only reduces the bias, but does not remove it completely (Chiu et
+  al. 2014).
+
+  Optional small sample correction was added to \code{specpool} in
+  \pkg{vegan} 2.2-0. It was not used in the older literature (Chao
+  1987), but it is recommended recently (Chiu et al. 2014).
 
   See \url{http://viceroy.eeb.uconn.edu/EstimateS} for a more complete
   (and positive) discussion and alternative software for some platforms.
diff --git a/man/stressplot.wcmdscale.Rd b/man/stressplot.wcmdscale.Rd
new file mode 100644
index 0000000..d1465fd
--- /dev/null
+++ b/man/stressplot.wcmdscale.Rd
@@ -0,0 +1,94 @@
+\name{stressplot.wcmdscale}
+\alias{stressplot.wcmdscale}
+\alias{stressplot.cca}
+\alias{stressplot.rda}
+\alias{stressplot.capscale}
+\alias{stressplot.prcomp}
+\alias{stressplot.princomp}
+
+\title{
+  Display Ordination Distances Against Observed Distances in Eigenvector Ordinations
+}
+
+\description{
+  Functions plot ordination distances in given number of dimensions
+  against observed distances or distances in full space in eigenvector
+  methods. The display is similar as the Shepard diagram
+  (\code{\link{stressplot}} for non-metric multidimensional scaling
+  with \code{\link{metaMDS}} or \code{\link{monoMDS}}), but shows the
+  linear relationship of the eigenvector ordinations. The
+  \code{stressplot} methods are available for \code{\link{wcmdscale}},
+  \code{\link{rda}}, \code{\link{cca}}, \code{\link{capscale}},
+  \code{\link{prcomp}} and \code{\link{princomp}}. 
+}
+
+\usage{
+\method{stressplot}{wcmdscale}(object, k = 2, pch, p.col = "blue", l.col = "red",
+    lwd = 2, ...)
+}
+
+\arguments{
+  \item{object}{
+    Result object from eigenvector ordination (\code{\link{wcmdscale}},
+    \code{\link{rda}}, \code{\link{cca}}, \code{\link{capscale}})
+}
+  \item{k}{
+    Number of dimensions for which the ordination distances are displayed.
+}
+  \item{pch, p.col, l.col, lwd}{
+    Plotting character, point colour and line colour like in
+    default \code{\link{stressplot}}
+}
+  \item{\dots}{
+    Other parameters to functions, e.g. graphical parameters.
+}
+}
+
+\details{ The functions offer a similar display for eigenvector
+  ordinations as the standard Shepard diagram (\code{\link{stressplot}})
+  in non-metric multidimensional scaling. The ordination distances in
+  given number of dimensions are plotted against observed
+  distances. With metric distances, the ordination distances in full
+  space (with all ordination axes) are equal to observed distances, and
+  the fit line shows this equality. In general, the fit line does not go
+  through the points, but the points for observed distances approach the
+  fit line from below. However, with non-metric distances (in
+  \code{\link{wcmdscale}} or \code{\link{capscale}}) with negative
+  eigenvalues the ordination distances can exceed the observed distances
+  in real dimensions; the imaginary dimensions with negative eigenvalues
+  will correct these excess distances. If you have used
+  \code{\link{capscale}} with argument \code{add = TRUE} to avoid
+  negative eigenvalues, the ordination distances will exceed the
+  observed dissimilarities by the additive constant.
+
+  In partial ordination (\code{\link{cca}}, \code{\link{rda}} and
+  \code{\link{capscale}} with \code{Condition} in the formula), the
+  distances in the partial component are included both in the observed
+  distances and in ordination distances.  With \code{k=0}, the
+  ordination distances refer to the partial ordination.
+
+}
+
+\value{
+  Functions draw a graph and return invisibly the ordination distances.
+}
+
+\author{
+  Jari Oksanen.
+}
+
+\seealso{
+  \code{\link{stressplot}} and \code{\link{stressplot.monoMDS}} for
+  standard Shepard diagrams.
+}
+
+\examples{
+data(dune, dune.env)
+mod <- rda(dune)
+stressplot(mod)
+mod <- rda(dune ~ Management, dune.env)
+stressplot(mod, k=3)
+}
+
+\keyword{ multivariate }
+
diff --git a/man/treedive.Rd b/man/treedive.Rd
index a1f6837..c89d972 100644
--- a/man/treedive.Rd
+++ b/man/treedive.Rd
@@ -3,17 +3,18 @@
 \alias{treeheight}
 \alias{treedist}  
 
-\title{ Functional Diversity estimated from a Species Dendrogram}
-\description{
-  Functional diversity is defined as the total branch length in a trait
-  dendrogram connecting all species, but excluding the unnecessary root
-  segments of the tree (Petchey and Gaston 2006).
-}
+\title{Functional Diversity and Community Distances from Species Trees}
+
+\description{ Functional diversity is defined as the total branch
+  length in a trait dendrogram connecting all species, but excluding
+  the unnecessary root segments of the tree (Petchey and Gaston
+  2006). Tree distance is the increase in total branch length when
+  combining two sites.  }
 
 \usage{
-treedive(comm, tree, match.force = FALSE)
+treedive(comm, tree, match.force = TRUE, verbose = TRUE)
 treeheight(tree)
-treedist(x, tree, relative = TRUE, match.force = FALSE, ...)
+treedist(x, tree, relative = TRUE, match.force = TRUE, ...)
 }
 
 \arguments{
@@ -25,6 +26,7 @@ treedist(x, tree, relative = TRUE, match.force = FALSE, ...)
     matching only happens when dimensions differ (with a warning or
     message). The order of data must match to the order in \code{tree}
     if matching by names is not done.}
+  \item{verbose}{Print diagnostic messages and warnings.}
   \item{relative}{Use distances relative to the height of combined tree.}
   \item{\dots}{Other arguments passed to functions (ignored).}
 }
@@ -34,7 +36,8 @@ treedist(x, tree, relative = TRUE, match.force = FALSE, ...)
   dendrogram that can be coerced to a correct type using
   \code{\link{as.hclust}}. When applied to a clustering of species
   traits, this is a measure of functional diversity (Petchey and Gaston
-  2002, 2006).
+  2002, 2006), and when applied to phylogenetic trees this is
+  phylogenetic diversity.
 
   Function \code{treedive} finds the \code{treeheight} for each site
   (row) of a community matrix. The function uses a subset of
@@ -44,7 +47,7 @@ treedist(x, tree, relative = TRUE, match.force = FALSE, ...)
   calculating \code{\link{cophenetic}} distances from the input
   dendrogram, then reconstructing the dendrogram for the subset of the
   cophenetic distance matrix for species occurring in each
-  site. Diversity is 0 for one spcies, and \code{NA} for empty
+  site. Diversity is 0 for one species, and \code{NA} for empty
   communities.
 
   Function \code{treedist} finds the dissimilarities among
@@ -63,15 +66,17 @@ treedist(x, tree, relative = TRUE, match.force = FALSE, ...)
   index attains its maximum value \eqn{2}. The dissimilarity is zero
   from a combined zero-height tree.
 
-  The functions need a dendrogram of species traits as an input. If
-  species traits contain \code{\link{factor}} or \code{\link{ordered}}
-  factor variables, it is recommended to use Gower distances for mixed
-  data (function \code{\link[cluster]{daisy}} in package \pkg{cluster}),
-  and usually the recommended clustering method is UPGMA 
-  (\code{method = "average"} in function \code{\link{hclust}}) 
-  (Podani and Schmera 2006).
-
-  It is possible to analyse the non-randomness of functional diversity
+  The functions need a dendrogram of species traits or phylogenies as an
+  input. If species traits contain \code{\link{factor}} or
+  \code{\link{ordered}} factor variables, it is recommended to use Gower
+  distances for mixed data (function \code{\link[cluster]{daisy}} in
+  package \pkg{cluster}), and usually the recommended clustering method
+  is UPGMA (\code{method = "average"} in function \code{\link{hclust}})
+  (Podani and Schmera 2006). Phylogenetic trees can be changed into
+  dendrograms using \code{\link[ape]{as.hclust.phylo}} (package
+  \pkg{ape})
+
+  It is possible to analyse the non-randomness of tree diversity
   using \code{\link{oecosimu}}. This needs specifying an adequate Null
   model, and the results will change with this choice.
 }
@@ -112,19 +117,22 @@ treedist(x, tree, relative = TRUE, match.force = FALSE, ...)
 }
 
 \examples{
-## There is no data set on species properties yet, and therefore
-## the example uses taxonomy 
+
+## There is no data set on species properties yet, and we demonstrate
+## the methods using phylogenetic trees
 data(dune)
-data(dune.taxon)
-d <- taxa2dist(dune.taxon, varstep=TRUE)
-cl <- hclust(d, "aver")
+data(dune.phylodis)
+cl <- hclust(dune.phylodis)
 treedive(dune, cl)
 ## Significance test using Null model communities.
-## The current choice fixes only site totals.
-oecosimu(dune, treedive, "r0", tree = cl)
-## Clustering of tree distances
+## The current choice fixes numbers of species and picks species
+## proportionally to their overall frequency
+oecosimu(dune, treedive, "r1", tree = cl, verbose = FALSE)
+## Phylogenetically ordered community table
 dtree <- treedist(dune, cl)
-plot(hclust(dtree, "aver"))
+tabasco(dune, hclust(dtree), cl)
+## Use tree distances  in capscale
+capscale(dtree ~ 1, comm=dune)
 }
 
 \keyword{ univar }
diff --git a/man/tsallis.Rd b/man/tsallis.Rd
index 1eb4255..1a27da6 100644
--- a/man/tsallis.Rd
+++ b/man/tsallis.Rd
@@ -1,134 +1,134 @@
-\encoding{UTF-8}
-\name{tsallis}
-\alias{tsallis}
-\alias{tsallisaccum}
-\alias{persp.tsallisaccum}
-\title{Tsallis Diversity and Corresponding Accumulation Curves}
-\description{
-Function \code{tsallis} find Tsallis diversities with any scale or the corresponding evenness measures. Function \code{tsallisaccum} finds these statistics with accumulating sites.
-}
-\usage{
-tsallis(x, scales = seq(0, 2, 0.2), norm = FALSE, hill = FALSE)
-tsallisaccum(x, scales = seq(0, 2, 0.2), permutations = 100, 
-   raw = FALSE, subset, ...)
-\method{persp}{tsallisaccum}(x, theta = 220, phi = 15, col = heat.colors(100), zlim, ...)
-}
-
-\arguments{
-  \item{x}{Community data matrix or plotting object. }
-  \item{scales}{Scales of Tsallis diversity.}
-
-  \item{norm}{Logical, if \code{TRUE} diversity values are normalized
-    by their maximum (diversity value at equiprobability conditions).}
-
-  \item{hill}{Calculate Hill numbers.}
-  
-  \item{permutations}{Number of random permutations in accumulating
-    sites.}
-
-  \item{raw}{If \code{FALSE} then return summary statistics of
-    permutations, and if TRUE then returns the individual
-    permutations.}
-
-  \item{subset}{logical expression indicating sites (rows) to keep:
-    missing values are taken as \code{FALSE}.}
-
-  \item{theta, phi}{angles defining the viewing
-    direction. \code{theta} gives the azimuthal direction and
-    \code{phi} the colatitude.}
-  
-  \item{col}{Colours used for surface.}  \item{zlim}{Limits of
-  vertical axis.}  
-
-  \item{\dots}{Other arguments which are passed to \code{tsallis} and
-    to graphical functions.}
-
-} 
-
-\details{ The Tsallis diversity (also equivalent to Patil and Taillie
-diversity) is a one-parametric generalised entropy function, defined
-as:
-
-\deqn{H_q = \frac{1}{q-1} (1-\sum_{i=1}^S p_i^q)}{H.q = 1/(q-1)(1-sum(p^q))}
-
-where \eqn{q} is a scale parameter, \eqn{S} the number of species in
-the sample (Tsallis 1988, Tothmeresz 1995). This diversity is concave
-for all \eqn{q>0}, but non-additive (Keylock 2005). For \eqn{q=0} it
-gives the number of species minus one, as \eqn{q} tends to 1 this
-gives Shannon diversity, for \eqn{q=2} this gives the Simpson index
-(see function \code{\link{diversity}}).
-
-If \code{norm = TRUE}, \code{tsallis} gives values normalized by the
-maximum:
-
-\deqn{H_q(max) = \frac{S^{1-q}-1}{1-q}}{H.q(max) = (S^(1-q)-1)/(1-q)}
-
-where \eqn{S} is the number of species. As \eqn{q} tends to 1, maximum
-is defined as \eqn{ln(S)}.
-
-If \code{hill = TRUE}, \code{tsallis} gives Hill numbers (numbers
-equivalents, see Jost 2007):
-
-\deqn{D_q = (1-(q-1) H)^{1/(1-q)}}{D.q = (1-(q-1)*H)^(1/(1-q))}
-
-Details on plotting methods and accumulating values can be found on
-the help pages of the functions \code{\link{renyi}} and
-\code{\link{renyiaccum}}.  
-}
-
-\value{ 
-Function \code{tsallis} returns a data frame of selected
-indices. Function \code{tsallisaccum} with argument \code{raw = FALSE}
-returns a three-dimensional array, where the first dimension are the
-accumulated sites, second dimension are the diversity scales, and
-third dimension are the summary statistics \code{mean}, \code{stdev},
-\code{min}, \code{max}, \code{Qnt 0.025} and \code{Qnt 0.975}. With
-argument \code{raw = TRUE} the statistics on the third dimension are
-replaced with individual permutation results.  }
-
-\references{
-
-Tsallis, C. (1988) Possible generalization of Boltzmann-Gibbs
-  statistics.  \emph{J. Stat. Phis.} 52, 479--487.
-
-Tothmeresz, B. (1995) Comparison of different methods for diversity
-  ordering. \emph{Journal of Vegetation Science} \bold{6}, 283--290.
-
-Patil, G. P. and Taillie, C. (1982) Diversity as a concept and its
-  measurement.  \emph{J. Am. Stat. Ass.} \bold{77}, 548--567.
-
-Keylock, C. J. (2005) Simpson diversity and the Shannon-Wiener index
-  as special cases of a generalized entropy.  \emph{Oikos} \bold{109},
-  203--207.
-
-Jost, L (2007) Partitioning diversity into independent alpha and beta
-  components.  \emph{Ecology} \bold{88}, 2427--2439.
-}
-
-\author{\enc{Péter Sólymos}{Peter Solymos},
-\email{solymos at ualberta.ca}, based on the code of Roeland Kindt and
-Jari Oksanen written for \code{renyi}}
-
-\seealso{ Plotting methods and accumulation routines are based on
-functions \code{\link{renyi}} and \code{\link{renyiaccum}}. An object
-of class 'tsallisaccum' can be used with function
-\code{\link{rgl.renyiaccum}} as well. See also settings for
-\code{\link{persp}}.  }
-
-\examples{
-data(BCI)
-i <- sample(nrow(BCI), 12)
-x1 <- tsallis(BCI[i,])
-x1
-diversity(BCI[i,],"simpson") == x1[["2"]]
-plot(x1)
-x2 <- tsallis(BCI[i,],norm=TRUE)
-x2
-plot(x2)
-mod1 <- tsallisaccum(BCI[i,])
-plot(mod1, as.table=TRUE, col = c(1, 2, 2))
-persp(mod1)
-mod2 <- tsallisaccum(BCI[i,], norm=TRUE)
-persp(mod2,theta=100,phi=30)
-}
-\keyword{multivariate}
+\encoding{UTF-8}
+\name{tsallis}
+\alias{tsallis}
+\alias{tsallisaccum}
+\alias{persp.tsallisaccum}
+\title{Tsallis Diversity and Corresponding Accumulation Curves}
+\description{
+Function \code{tsallis} find Tsallis diversities with any scale or the corresponding evenness measures. Function \code{tsallisaccum} finds these statistics with accumulating sites.
+}
+\usage{
+tsallis(x, scales = seq(0, 2, 0.2), norm = FALSE, hill = FALSE)
+tsallisaccum(x, scales = seq(0, 2, 0.2), permutations = 100, 
+   raw = FALSE, subset, ...)
+\method{persp}{tsallisaccum}(x, theta = 220, phi = 15, col = heat.colors(100), zlim, ...)
+}
+
+\arguments{
+  \item{x}{Community data matrix or plotting object. }
+  \item{scales}{Scales of Tsallis diversity.}
+
+  \item{norm}{Logical, if \code{TRUE} diversity values are normalized
+    by their maximum (diversity value at equiprobability conditions).}
+
+  \item{hill}{Calculate Hill numbers.}
+  
+  \item{permutations}{Number of random permutations in accumulating
+    sites.}
+
+  \item{raw}{If \code{FALSE} then return summary statistics of
+    permutations, and if TRUE then returns the individual
+    permutations.}
+
+  \item{subset}{logical expression indicating sites (rows) to keep:
+    missing values are taken as \code{FALSE}.}
+
+  \item{theta, phi}{angles defining the viewing
+    direction. \code{theta} gives the azimuthal direction and
+    \code{phi} the colatitude.}
+  
+  \item{col}{Colours used for surface.}  \item{zlim}{Limits of
+  vertical axis.}  
+
+  \item{\dots}{Other arguments which are passed to \code{tsallis} and
+    to graphical functions.}
+
+} 
+
+\details{ The Tsallis diversity (also equivalent to Patil and Taillie
+diversity) is a one-parametric generalised entropy function, defined
+as:
+
+\deqn{H_q = \frac{1}{q-1} (1-\sum_{i=1}^S p_i^q)}{H.q = 1/(q-1)(1-sum(p^q))}
+
+where \eqn{q} is a scale parameter, \eqn{S} the number of species in
+the sample (Tsallis 1988, Tothmeresz 1995). This diversity is concave
+for all \eqn{q>0}, but non-additive (Keylock 2005). For \eqn{q=0} it
+gives the number of species minus one, as \eqn{q} tends to 1 this
+gives Shannon diversity, for \eqn{q=2} this gives the Simpson index
+(see function \code{\link{diversity}}).
+
+If \code{norm = TRUE}, \code{tsallis} gives values normalized by the
+maximum:
+
+\deqn{H_q(max) = \frac{S^{1-q}-1}{1-q}}{H.q(max) = (S^(1-q)-1)/(1-q)}
+
+where \eqn{S} is the number of species. As \eqn{q} tends to 1, maximum
+is defined as \eqn{ln(S)}.
+
+If \code{hill = TRUE}, \code{tsallis} gives Hill numbers (numbers
+equivalents, see Jost 2007):
+
+\deqn{D_q = (1-(q-1) H)^{1/(1-q)}}{D.q = (1-(q-1)*H)^(1/(1-q))}
+
+Details on plotting methods and accumulating values can be found on
+the help pages of the functions \code{\link{renyi}} and
+\code{\link{renyiaccum}}.  
+}
+
+\value{ 
+Function \code{tsallis} returns a data frame of selected
+indices. Function \code{tsallisaccum} with argument \code{raw = FALSE}
+returns a three-dimensional array, where the first dimension are the
+accumulated sites, second dimension are the diversity scales, and
+third dimension are the summary statistics \code{mean}, \code{stdev},
+\code{min}, \code{max}, \code{Qnt 0.025} and \code{Qnt 0.975}. With
+argument \code{raw = TRUE} the statistics on the third dimension are
+replaced with individual permutation results.  }
+
+\references{
+
+Tsallis, C. (1988) Possible generalization of Boltzmann-Gibbs
+  statistics.  \emph{J. Stat. Phis.} 52, 479--487.
+
+Tothmeresz, B. (1995) Comparison of different methods for diversity
+  ordering. \emph{Journal of Vegetation Science} \bold{6}, 283--290.
+
+Patil, G. P. and Taillie, C. (1982) Diversity as a concept and its
+  measurement.  \emph{J. Am. Stat. Ass.} \bold{77}, 548--567.
+
+Keylock, C. J. (2005) Simpson diversity and the Shannon-Wiener index
+  as special cases of a generalized entropy.  \emph{Oikos} \bold{109},
+  203--207.
+
+Jost, L (2007) Partitioning diversity into independent alpha and beta
+  components.  \emph{Ecology} \bold{88}, 2427--2439.
+}
+
+\author{\enc{Péter Sólymos}{Peter Solymos},
+\email{solymos at ualberta.ca}, based on the code of Roeland Kindt and
+Jari Oksanen written for \code{renyi}}
+
+\seealso{ Plotting methods and accumulation routines are based on
+functions \code{\link{renyi}} and \code{\link{renyiaccum}}. An object
+of class 'tsallisaccum' can be used with function
+\code{\link[vegan3d]{rgl.renyiaccum}} as well. See also settings for
+\code{\link{persp}}.  }
+
+\examples{
+data(BCI)
+i <- sample(nrow(BCI), 12)
+x1 <- tsallis(BCI[i,])
+x1
+diversity(BCI[i,],"simpson") == x1[["2"]]
+plot(x1)
+x2 <- tsallis(BCI[i,],norm=TRUE)
+x2
+plot(x2)
+mod1 <- tsallisaccum(BCI[i,])
+plot(mod1, as.table=TRUE, col = c(1, 2, 2))
+persp(mod1)
+mod2 <- tsallisaccum(BCI[i,], norm=TRUE)
+persp(mod2,theta=100,phi=30)
+}
+\keyword{multivariate}
diff --git a/man/vegan-defunct.Rd b/man/vegan-defunct.Rd
index c27d2fb..09235d0 100644
--- a/man/vegan-defunct.Rd
+++ b/man/vegan-defunct.Rd
@@ -3,24 +3,7 @@
 %    removed from vegan, but here we document only those that were
 %    renamed and are not documented in 'permute'
  
-%\alias{permCheck}
-%\alias{numPerms}
-%\alias{print.permCheck}
-%\alias{print.summary.permCheck}
-%\alias{summary.permCheck}
-\alias{getNumObs}
-%\alias{getNumObs.default}
-%\alias{getNumObs.integer}
-%\alias{getNumObs.numeric}
-%\alias{allPerms}
-%\alias{print.allPerms}
-%\alias{summary.allPerms}
-%\alias{print.summary.allPerms}
-%\alias{permuplot}
-\alias{permuted.index2}
-%\alias{permControl}
-%\alias{print.permControl}
-%\alias{permute}
+\alias{metaMDSrotate}
 
 \alias{vegan-defunct}
 %------ NOTE:  ../R/vegan-deprecated.R   must be synchronized with this!
@@ -31,20 +14,13 @@
   they are no longer needed.
 }
 \usage{
-%-- Removed from vegan 2.0-0: now in package permute
-getNumObs(object, \dots)
-\method{getNumObs}{default}(object, \dots)
-\method{getNumObs}{numeric}(object, \dots)
-\method{getNumObs}{integer}(object, \dots)
-
-permuted.index2(n, control = permControl())
+metaMDSrotate(object, vec, na.rm = FALSE, ...)
 }
 
-\details{ The \dQuote{new} permutation functions were moved to the
-  \pkg{permute} package, and they are documented there.  The
-  \pkg{permute} package replaces \code{permuted.index2} with
-  \code{\link[permute]{shuffle}} and \code{getNumObs} with its specific
-  \code{\link[permute]{nobs-methods}}.
+\details{ 
+  Function \code{metaMDSrotate} is replaced with
+  \code{\link{MDSrotate}} which can handle \code{\link{monoMDS}}
+  results in addition to \code{\link{metaMDS}}.
 }
 
 \seealso{
diff --git a/man/vegan-deprecated.Rd b/man/vegan-deprecated.Rd
index 64dd6ab..f034d1a 100644
--- a/man/vegan-deprecated.Rd
+++ b/man/vegan-deprecated.Rd
@@ -1,5 +1,16 @@
+\encoding{UTF-8}
 \name{vegan-deprecated}
-\alias{metaMDSrotate}
+\alias{commsimulator}
+\alias{density.adonis}
+\alias{density.anosim}
+\alias{density.mantel}
+\alias{density.mrpp}
+\alias{density.permutest.cca}
+\alias{density.protest}
+\alias{plot.vegandensity}
+\alias{densityplot.adonis}
+\alias{density.oecosimu}
+\alias{densityplot.oecosimu}
 
 \alias{vegan-deprecated}
 %------ NOTE:  ../R/vegan-deprecated.R   must be synchronized with this!
@@ -10,14 +21,36 @@
   \pkg{vegan} only, and may be defunct as soon as the next release.
 }
 \usage{
-metaMDSrotate(object, vec, na.rm = FALSE, ...)
+commsimulator(x, method, thin=1)
+\method{density}{adonis}(x, ...)
+\method{plot}{vegandensity}(x, main = NULL, xlab = NULL, ylab = "Density", 
+   type = "l", zero.line = TRUE, obs.line = TRUE, ...)
+\method{densityplot}{adonis}(x, data, xlab = "Null", ...)
 }
 
 \arguments{
- \item{object}{A result object from \code{metaMDS}.}
- \item{vec}{A continuous site variable (vector).}
- \item{na.rm}{Remove missing values from continuous variable \code{vec}.}
-  \item{\dots}{Other parameters passed to functions.}
+ \item{x}{Community data for \code{commsimulator},or an object to be
+   handled by \code{density} or \code{densityplot}}
+ ## commsimulator
+ \item{method}{Null model method: either a name (character string) of
+   a method defined in \code{\link{make.commsim}} or a
+   \code{\link{commsim}} function.}
+ \item{thin}{Number of discarded null communities between two
+   evaluations of nestedness statistic in sequential methods
+   \code{"swap"} and \code{"tswap"} (ignored with non-sequential
+   methods)}
+  ## density and densityplot
+  \item{main, xlab, ylab, type, zero.line}{Arguments of
+    \code{\link{plot.density}}, \code{\link[lattice]{densityplot}}.}
+  \item{obs.line}{Draw vertical line for the observed
+    statistic. Logical value \code{TRUE} draws a red line, and
+    \code{FALSE} draws nothing. Alternatively, \code{obs.line} can be a
+    definition of the colour used for the line, either as a numerical
+    value from the \code{\link[grDevices]{palette}} or as the name of
+    the colour, or other normal definition of the colour.}
+  \item{data}{Ignored.}
+  \item{\dots}{ Other arguments passed to functions. }
+
 }
 
 \details{
@@ -26,11 +59,149 @@ metaMDSrotate(object, vec, na.rm = FALSE, ...)
   %-- Or:
   %% explain *why* it's deprecated, and \code{\link{..}} to new
   
-  Function \code{metaMDSrotate} is replaced with
-  \code{\link{MDSrotate}} which can handle \code{\link{monoMDS}}
-  results in addition to \code{\link{metaMDS}}.
+  Function \code{commsimulator} is replaced with
+  \code{\link{make.commsim}} which defines the Null models, and
+  functions \code{\link{nullmodel}} and
+  \code{\link{simulate.nullmodel}} that check the input data and
+  generate the Null model communities.  Function \code{commsimulator}
+  was used to generate a single Null model for presence/absence
+  (binary) data. Below is a copy of its original documentation in
+  \code{\link{oecosimu}}, where it is now replaced with
+  \code{\link{make.commsim}}, \code{\link{nullmodel}} and
+  \code{\link{simulate.nullmodel}}.  Approximately the same
+  documentation for these models is found in
+  \code{\link{make.commsim}}.  (However, the random number sequences
+  for model \code{r0} differ, and you must use \code{method = "r0_old"} 
+  in \code{\link{make.commsim}} to reproduce the \code{commsimulator} 
+  results.)
+
+  Function \code{commsimulator} implements binary (presence/absence) 
+  null models for community composition.
+  The implemented models are \code{r00} which maintains the
+  number of presences but fills these anywhere so that neither species
+  (column) nor site (row) totals are preserved. Methods \code{r0},
+  \code{r1} and \code{r2} maintain the site (row) frequencies. Method \code{r0}
+  fills presences anywhere on the row with no respect to species (column)
+  frequencies, \code{r1} uses column marginal 
+  frequencies as probabilities, and \code{r2} uses squared column
+  sums. Methods \code{r1} and \code{r2} try to simulate original species
+  frequencies, but they are not strictly constrained. All these methods
+  are reviewed by Wright et al. (1998). Method \code{c0} maintains
+  species frequencies, but does not honour site (row) frequencies (Jonsson
+  2001). 
+
+  The other methods maintain both row and column frequencies.
+  Methods \code{swap} and \code{tswap} implement sequential methods,
+  where the matrix is changed only little in one step, but the changed
+  matrix is used as an input if the next step.
+  Methods \code{swap} and \code{tswap} inspect random 2x2 submatrices
+  and if they are checkerboard units, the order of columns is
+  swapped. This changes the matrix structure, but does not influence
+  marginal sums (Gotelli & Entsminger
+  2003). Method \code{swap} inspects submatrices so long that a swap
+  can be done. \enc{Miklós}{Miklos} & Podani (2004) suggest that this may lead into
+  biased sequences, since some columns or rows may be more easily
+  swapped, and they suggest trying a fixed number of times and
+  doing zero to many swaps at one step. This method is implemented by
+  method \code{tswap} or trial swap. Function \code{commsimulator} makes
+  only one trial swap in time (which probably does nothing),
+  but \code{oecosimu} estimates how many
+  submatrices are expected before finding a swappable checkerboard,
+  and uses that ratio to thin the results, so that on average one swap
+  will be found per step of \code{tswap}.  However, the checkerboard
+  frequency probably changes during swaps, but this is not taken into
+  account in estimating the \code{thin}.  One swap still changes the
+  matrix only little, and it may be useful to 
+  thin the results so that the statistic is only evaluated after
+  \code{burnin} steps (and \code{thin}ned). 
+
+  Methods \code{quasiswap} and \code{backtracking} are not sequential,
+  but each call produces a matrix that is independent of previous
+  matrices, and has the same marginal totals as the original data. The
+  recommended method is \code{quasiswap} which is much faster because
+  it is implemented in C. Method \code{backtracking} is provided for
+  comparison, but it is so slow that it may be dropped from future
+  releases of \pkg{vegan} (or also implemented in C).
+  Method \code{quasiswap} (\enc{Miklós}{Miklos} & Podani 2004)
+  implements a method where matrix is first filled 
+  honouring row and column totals, but with integers that may be larger than
+  one. Then the method inspects random 2x2 matrices and performs a
+  quasiswap on them. Quasiswap is similar to ordinary swap, but it also
+  can reduce numbers above one to ones maintaining marginal
+  totals.
+  Method \code{backtracking}
+  implements a filling method with constraints both for row and column
+  frequencies (Gotelli & Entsminger 2001). The matrix is first filled
+  randomly using row and column frequencies as probabilities. Typically
+  row and column sums are reached before all incidences are filled in.
+  After that begins \dQuote{backtracking}, where some of the
+  points are removed, and then filling is started again, and this
+  backtracking is done so may times that all incidences will be filled
+  into matrix. The \code{quasiswap} method is not sequential, but it produces
+  a random incidence matrix with given marginal totals. 
+
+  The \code{density} function can directly access permutation results
+  of the same function as \code{permustats}.  The \code{density}
+  function is identical to \code{\link{density.default}} and takes all
+  its arguments, but adds the observed statistic to the result as item
+  \code{"observed"}. The observed statistic is also put among the
+  permuted values so that the results are consistent with significance
+  tests. The \code{plot} method is similar to the default
+  \code{\link{plot.density}}, but can also add the observed statistic
+  to the graph as a vertical line.  In \code{\link{adonis}} it is also
+  possible to use direclty \code{densityplot} function.
+
+  The deprecated \code{density} and \code{densityplot} methods are
+  replaced with similar methods for \code{\link{permustats}}. The
+  \code{\link{permustats}} offers more powerful analysis tools for
+  permutations, including \code{\link{summary.permustats}} giving
+  \eqn{z} values (a.k.a. standardized effect sizes, SES), and Q-Q
+  plots (\code{\link{qqnorm.permustats}},
+  \code{\link{qqmath.permustats}}. Below the old documentation: 
+
+  The density methods are available for \pkg{vegan} functions
+  \code{\link{adonis}}, \code{\link{anosim}}, \code{\link{mantel}},
+  \code{\link{mantel.partial}}, \code{\link{mrpp}},
+  \code{\link{permutest.cca}}, and \code{\link{protest}}.  The
+  \code{density} function for \code{\link{oecosimu}} is documented
+  separately, and it is also used for \code{\link{adipart}},
+  \code{\link{hiersimu}} and \code{\link{multipart}}.
+
+  All \pkg{vegan} \code{density} functions return an object of class
+  \code{"vegandensity"} inheriting from \code{\link{density}}, and can
+  be plotted with its \code{plot} method.  This is identical to the
+  standard \code{plot} of \code{densiy} objects, but can also add a
+  vertical line for the observed statistic.
+
+  Functions that can return several permuted statistics simultaneously
+  also have \code{\link[lattice]{densityplot}} method
+  (\code{\link{adonis}}, \code{\link{oecosimu}} and diversity
+  partitioning functions based on \code{oecosimu}).  The standard
+  \code{\link{density}} can only handle univariate data, and a warning
+  is issued if the function is used for a model with several observed
+  statistics
 
 }
+
+\references{
+  Gotelli, N.J. & Entsminger, N.J. (2001). Swap and fill algorithms in
+  null model analysis: rethinking the knight's tour. \emph{Oecologia}
+  129, 281--291.
+
+  Gotelli, N.J. & Entsminger, N.J. (2003). Swap algorithms in null model
+  analysis. \emph{Ecology} 84, 532--535.
+
+  Jonsson, B.G. (2001) A null model for randomization tests of
+  nestedness in species assemblages. \emph{Oecologia} 127, 309--313.
+
+  \enc{Miklós}{Miklos}, I. & Podani, J. (2004). Randomization of presence-absence
+  matrices: comments and new algorithms. \emph{Ecology} 85, 86--92.
+
+  Wright, D.H., Patterson, B.D., Mikkelson, G.M., Cutler, A. & Atmar,
+  W. (1998). A comparative analysis of nested subset patterns of species
+  composition. \emph{Oecologia} 113, 1--20.
+  }
+
 \seealso{
   \code{\link{Deprecated}}
 }
diff --git a/man/vegan-internal.Rd b/man/vegan-internal.Rd
index 02c6844..95b7ba1 100644
--- a/man/vegan-internal.Rd
+++ b/man/vegan-internal.Rd
@@ -3,7 +3,8 @@
 \alias{ordiParseFormula}
 \alias{ordiNAexclude}
 \alias{ordiNApredict}
-\alias{permuted.index}
+\alias{getPermuteMatrix}
+\alias{howHead}
 \alias{centroids.cca}
 \alias{ordiTerminfo}
 \alias{pasteCall}
@@ -12,6 +13,7 @@
 \alias{ordiArgAbsorber}
 \alias{veganCovEllipse}
 \alias{hierParseFormula}
+\alias{veganMahatrans}
 
 \title{Internal vegan functions}
 
@@ -31,9 +33,11 @@ ordiArrowTextXY(vect, labels, ...)
 ordiArgAbsorber(..., shrink, origin, scaling, triangular,
                 display, choices, const, FUN)
 centroids.cca(x, mf, wt)
-permuted.index(n, strata)
+getPermuteMatrix(perm, N, strata = NULL)
+howHead(x, ...)
 pasteCall(call, prefix = "Call:")
 veganCovEllipse(cov, center = c(0, 0), scale = 1, npoints = 100)
+veganMahatrans(x, s2, tol = 1e-8)
 hierParseFormula(formula, data)
 }
 
@@ -77,11 +81,19 @@ hierParseFormula(formula, data)
 
   \code{centroids.cca} finds the weighted centroids of variables.
 
-  \code{permuted.index} creates permuted index of length \code{n}
-  possibly stratified within \code{strata}. This is the basic \pkg{vegan}
-  permutation function that should be replaced with more powerful
-  \code{\link{shuffle}} in the future releases of \pkg{vegan}, and all
-  new functions should use \code{\link{shuffle}}.
+  \code{getPermuteMatrix} interprets user input and returns a
+  permutation matrix where each row gives indices of observations for
+  a permutation. The input \code{perm} can be a single number for the
+  number of simple permutations, a result of
+  \code{\link[permute]{how}} defining a permutation scheme or a
+  permutation matrix. Function \code{permuted.index} was used earlier
+  to generate permutations, but it is now deprecated.
+
+  \code{howHead} formats the permutation scheme of
+  \code{\link[permute]{how}} for display. The formatting is more
+  compact than the one used in \code{print} in the \pkg{permute}
+  package, and shows only non-default choices. This output is normally
+  used when printing the results of \pkg{vegan} permutations.
 
   \code{pasteCall} prints the function call so that it is nicely wrapped
   in \code{\link[utils]{Sweave}} output.
@@ -89,6 +101,12 @@ hierParseFormula(formula, data)
   \code{veganCovEllipse} finds the coordinates for drawing a
   covariance ellipse.
 
+  \code{veganMahatrans} transforms data matrix so that its Euclidean
+  distances are Mahalanobis distances. The input data \code{x} must be
+  a matrix centred by columns, and \code{s2} its covariance matrix. If
+  \code{s2} is not given, covariance matrix is found from \code{x}
+  within the function.
+
   \code{hierParseFormula} returns a list of one matrix (left hand side)
   and a model frame with factors representing hierarchy levels 
   (right hand side) to be used in \code{\link{adipart}}, 
diff --git a/man/vegandocs.Rd b/man/vegandocs.Rd
index 81a7dad..c7216a4 100644
--- a/man/vegandocs.Rd
+++ b/man/vegandocs.Rd
@@ -44,11 +44,11 @@ vegandocs(doc = c("NEWS", "ONEWS", "ChangeLog", "FAQ-vegan.pdf",
 
    \item \code{decision-vegan}: a \code{\link{vignette}} discussing
      design decisions in \pkg{vegan}.  Currently this discusses
-     implementing nestedness temperature (\code{\link{nestedtemp}}),
-     backtracking algorithm in community null models
-     (\code{\link{commsimulator}}), scaling of RDA results, and why WA
-     scores are used as default instead of LC scores in constrained
-     ordination.
+     parallel processing in \pkg{vegan}, implementing nestedness
+     temperature (\code{\link{nestedtemp}}), backtracking algorithm in
+     community null models (\code{\link{make.commsim}}), scaling of
+     RDA results, and why WA scores are used as default instead of LC
+     scores in constrained ordination.
 
    \item \code{partitioning}: Detailed description of variation
      partitioning schemes used in \code{\link{varpart}}.
diff --git a/man/vegdist.Rd b/man/vegdist.Rd
index 14e888d..43b74db 100644
--- a/man/vegdist.Rd
+++ b/man/vegdist.Rd
@@ -28,7 +28,7 @@
     \code{"euclidean"}, \code{"canberra"}, \code{"bray"}, \code{"kulczynski"},
      \code{"jaccard"}, \code{"gower"}, \code{"altGower"}, \code{"morisita"}, 
      \code{"horn"}, \code{"mountford"}, \code{"raup"} , \code{"binomial"}, 
-     \code{"chao"} or \code{"cao"}.}
+     \code{"chao"}, \code{"cao"} or \code{"mahalanobis"}.}
   \item{binary}{Perform presence/absence standardization before analysis
     using \code{\link{decostand}}.}
   \item{diag}{Compute diagonals. }
@@ -187,7 +187,10 @@
   and Crick originally suggested that sampling probabilities should be
   proportional to species frequencies (Chase et al. 2011). A simulation
   approach with unequal species sampling probabilities is implemented in
-  \code{\link{raupcrick}} function following Chase et al. (2011).
+  \code{\link{raupcrick}} function following Chase et al. (2011).  The
+  index can be also used for transposed data to give a probabilistic
+  dissimilarity index of species co-occurrence (identical to Veech
+  2013).
   
   Chao index tries to take into account the number of unseen species
   pairs, similarly as in \code{method = "chao"} in
@@ -209,6 +212,13 @@
   Morisita index can be used with genuine count data (integers) only. Its
   Horn--Morisita variant is able to handle any abundance data.
 
+  Mahalanobis distances are Euclidean distances of a matrix where
+  columns are centred, have unit variance, and are uncorrelated.  The
+  index is not commonly used for community data, but it is sometimes
+  used for environmental variables. The calculation is based on
+  transforming data matrix and then using Euclidean distances
+  following Mardia et al. (1979).
+
   Euclidean and Manhattan dissimilarities are not good in gradient
   separation without proper standardization but are still included for
   comparison and special needs.
@@ -272,10 +282,16 @@
 
   Krebs, C. J. (1999). \emph{Ecological Methodology.} Addison Wesley Longman.
 
+  Mardia, K.V., Kent, J.T. and Bibby, J.M. (1979). \emph{Multivariate analysis}.
+  Academic Press.
+
   Mountford, M. D. (1962). An index of similarity and its application to
   classification problems. In: P.W.Murphy (ed.),
   \emph{Progress in Soil Zoology}, 43--50. Butterworths.
 
+  Veech, J. A. (2013). A probabilistic model for analysing species
+  co-occurrence. \emph{Global Ecology and Biogeography} 22, 252--260. 
+
   Wolda, H. (1981). Similarity indices, sample size and
   diversity. \emph{Oecologia} 50, 296--302.
 }
diff --git a/man/vegemite.Rd b/man/vegemite.Rd
index 17f1a54..171bed4 100644
--- a/man/vegemite.Rd
+++ b/man/vegemite.Rd
@@ -81,10 +81,14 @@ coverscale(x, scale=c("Braun.Blanquet", "Domin", "Hult", "Hill", "fix","log"),
   also for species).  When \code{use} is an object from
   \code{\link{hclust}}, \code{\link[cluster]{agnes}} or a
   \code{\link{dendrogram}}, the sites are ordered similarly as in the
-  cluster dendrogram.  Function \code{tabasco} re-orders the
-  dendrogram if \code{Rowv = TRUE} or \code{Rowv} is a vector. Such
-  re-ordering is not available for \code{vegemite}, but it can be done
-  by hand using \code{\link{reorder.dendrogram}}.  In all cases where
+  cluster dendrogram.  Function \code{tabasco} re-orders the dendrogram
+  if \code{Rowv = TRUE} or \code{Rowv} is a vector. Such re-ordering is
+  not available for \code{vegemite}, but it can be done by hand using
+  \code{\link{reorder.dendrogram}} or \code{\link{reorder.hclust}}.
+  Please note that \code{\link{dendrogram}} and \code{\link{hclust}}
+  reordering can differ: unweighted means of merged branches are used in
+  \code{\link{dendrogram}}, but weighted means (= means of leaves of the
+  cluster) are used in \code{\link{reorder.hclust}}.  In all cases where
   species scores are missing, species are ordered by their weighted
   averages (\code{\link{wascores}}) on site order.
 
diff --git a/man/wcmdscale.Rd b/man/wcmdscale.Rd
index ee8c777..433e395 100644
--- a/man/wcmdscale.Rd
+++ b/man/wcmdscale.Rd
@@ -92,7 +92,9 @@ wcmdscale(d, k, eig = FALSE, add = FALSE, x.ret = FALSE, w)
 
 \seealso{The function is modelled after \code{\link{cmdscale}}, but adds
   weights (hence name) and handles negative eigenvalues differently.
-  Other multidimensional scaling methods are \code{\link{monoMDS}}, and
+  \code{\link{eigenvals.wcmdscale}} and
+  \code{\link{stressplot.wcmdscale}} are some specific methods. Other
+  multidimensional scaling methods are \code{\link{monoMDS}}, and
   \code{\link[MASS]{isoMDS}} and \code{\link[MASS]{sammon}} in package
   \pkg{MASS}.  }
 
diff --git a/src/nestedness.c b/src/nestedness.c
index 82d332a..86b659e 100644
--- a/src/nestedness.c
+++ b/src/nestedness.c
@@ -90,7 +90,7 @@ void quasiswap(int *m, int *nr, int *nc)
 void trialswap(int *m, int *nr, int *nc, int *thin)
 {
 
-    int i, a, b, c, d, row[2], col[2];
+    int i, a, b, c, d, row[2], col[2], sX;
 
     GetRNGstate();
 
@@ -101,13 +101,16 @@ void trialswap(int *m, int *nr, int *nc, int *thin)
 	b = INDX(row[0], col[1], *nr);
 	c = INDX(row[1], col[0], *nr);
 	d = INDX(row[1], col[1], *nr);
-	if (m[a] == 1 && m[d] == 1 && m[b] == 0 && m[c] == 0) {
+        /* only two filled items can be swapped */
+	sX = m[a] + m[b] + m[c] + m[d];
+	if (sX != 2)
+	    continue;
+	if (m[a] == 1 && m[d] == 1) {
 	    m[a] = 0;
 	    m[d] = 0;
 	    m[b] = 1;
 	    m[c] = 1;
-	} else if (m[c] == 1 && m[b] == 1 && m[d] == 0 &&
-		   m[a] == 0) {
+	} else if (m[c] == 1 && m[b] == 1) {
 	    m[a] = 1;
 	    m[d] = 1;
 	    m[b] = 0;
@@ -126,7 +129,7 @@ void trialswap(int *m, int *nr, int *nc, int *thin)
 void swap(int *m, int *nr, int *nc, int *thin)
 {
 
-    int i, a, b, c, d, row[2], col[2];
+    int i, a, b, c, d, row[2], col[2], sX;
 
     GetRNGstate();
 
@@ -138,14 +141,17 @@ void swap(int *m, int *nr, int *nc, int *thin)
 	    b = INDX(row[0], col[1], *nr);
 	    c = INDX(row[1], col[0], *nr);
 	    d = INDX(row[1], col[1], *nr);
-	    if (m[a] == 1 && m[d] == 1 && m[b] == 0 && m[c] == 0) {
+	    sX = m[a] + m[b] + m[c] + m[d];
+	    if (sX != 2)
+		continue;
+	    if (m[a] == 1 && m[d] == 1) {
 		m[a] = 0;
 		m[d] = 0;
 		m[b] = 1;
 		m[c] = 1;
 		break;
 	    } 
-	    if (m[c] == 1 && m[b] == 1 && m[d] == 0 && m[a] == 0) {
+	    if (m[c] == 1 && m[b] == 1) {
 		m[a] = 1;
 		m[d] = 1;
 		m[b] = 0;
@@ -169,50 +175,115 @@ void swap(int *m, int *nr, int *nc, int *thin)
  * way. The input is a 2x2 submatrix 'sm'.
 */
 
-double isDiag(double *sm)
+int isDiag(int *sm, int *change)
 {
     int i, sX;
-    double choose[2];
+    int retval;
 
     /* sX: number of non-zero cells */
     for (i = 0, sX = 0; i < 4; i++)
 	    if (sm[i] > 0)
 		    sX++;
 
-    /* Smallest diagonal and antidiagonal element */
-    choose[0] = (sm[1] < sm[2]) ? sm[1] : sm[2];
-    choose[1] = (sm[0] < sm[3]) ? -sm[0] : -sm[3]; 
-
-    if (sX == 4) {
-        /* Either choose could be returned, but RNG is not needed,
-	 * because sm already is in random order, and we always return
-	 * choose[0] */
-	    return choose[0];
-    } 
-    if ((sm[0] == 0 && sm[1] > 0 && sm[2] > 0 && sm[3] == 0) ||
-	(sm[0] == 0 && sm[1] > 0 && sm[2] > 0 && sm[3] > 0) ||
-	(sm[0] > 0 && sm[1] > 0 && sm[2] > 0 && sm[3] == 0))
-	    return choose[0];
-    if ((sm[0] > 0 && sm[1] == 0 && sm[2] == 0 && sm[3] > 0) ||
-	(sm[0] > 0 && sm[1] == 0 && sm[2] > 0 && sm[3] > 0) ||
-	(sm[0] > 0 && sm[1] > 0 && sm[2] == 0 && sm[3] > 0))
-	    return choose[1];
-    if (sX < 2 ||
-	(sm[0] == 0 && sm[1] == 0 && sm[2] > 0 && sm[3] > 0) ||
-	(sm[0] > 0 && sm[1] > 0 && sm[2] == 0 && sm[3] == 0) ||
-	(sm[0] == 0 && sm[1] > 0 && sm[2] == 0 && sm[3] > 0) ||
-	(sm[0] > 0 && sm[1] == 0 && sm[2] > 0 && sm[3] == 0))
-	    return 0; 
-    /* never reach this but pacify a pedantic compiler */
-    else
-	 return 0;
+    /* default values */
+    retval = 0;
+    *change = 0;
+    switch (sX) {
+    case 0:
+    case 1:
+	    /* nothing to swap*/
+	    break;
+    case 2:
+	    /* diagonal and antidiagonal swappable */
+	    if (sm[1] > 0 && sm[2] > 0) {
+		    retval = (sm[1] < sm[2]) ? sm[1] : sm[2];
+		    if (sm[1] != sm[2])
+			    *change = 1;
+	    }
+	    else if (sm[0] > 0 && sm[3] > 0) { 
+		    retval = (sm[0] < sm[3]) ? -sm[0] : -sm[3];
+		    if (sm[0] != sm[3])
+			    *change = 1;
+	    } 
+	    break;
+    case 3:
+	    /* always swappable: case depends on the empty corner */
+	    if (sm[0] == 0 || sm[3] == 0) {
+		    retval = (sm[1] < sm[2]) ? sm[1] : sm[2];
+		    if (sm[1] == sm[2])
+			    *change = -1;
+	    } else {
+		    retval = (sm[0] < sm[3]) ? -sm[0] : -sm[3];
+		    if (sm[0] == sm[3])
+			    *change = -1;
+	    }
+	    break;
+    case 4:
+	    /* always swappable: return diagonal case */
+	    retval = (sm[1] < sm[2]) ? sm[1] : sm[2];
+	    if (sm[1] == sm[2])
+		    *change = -2;
+	    else
+		    *change = -1;
+	    break;
+    }
+    return retval;
 }
 
-void swapcount(double *m, int *nr, int *nc, int *thin)
+
+/* isDiagFill: Largest swappable element and swap policies for
+ * fill-neutral swapping
+ */
+
+int isDiagFill(int *sm)
 {
-    int row[2], col[2], k, ij[4], changed, oldn, newn, 
+    int i, sX;
+    int retval;
+
+    /* sX: number of non-zero cells */
+    for (i = 0, sX = 0; i < 4; i++)
+	    if (sm[i] > 0)
+		    sX++;
+
+    retval = 0;
+    switch (sX) {
+    case 0:
+    case 1:
+	    /* nothing to swap*/
+	    break;
+    case 2:
+	    /* equal diagonal and antidiagonal fill-neutrally
+	     * swappable */
+	    if ((sm[0] == sm[3]) && (sm[1] == sm[2])) {
+		    if (sm[1] > 0)
+			    retval = (sm[1] < sm[2]) ? sm[1] : sm[2];
+		    else
+			    retval = (sm[0] < sm[3]) ? -sm[0] : -sm[3];
+	    }
+	    break;
+    case 3:
+	    /* fill-neutrally swappable if diagonal & antidiagonal
+	     * unequal */
+	    if ((sm[0] != sm[3]) && (sm[1] != sm[2])) {
+		    if (sm[0] == 0 || sm[3] == 0) {
+			    retval = (sm[1] < sm[2]) ? sm[1] : sm[2];
+		    } else {
+			    retval = (sm[0] < sm[3]) ? -sm[0] : -sm[3];
+		    }
+	    }
+	    break;
+    case 4:
+	    /* never swappable (minelement-1 always swappable) */
+	    break;
+    }
+    return retval;
+}
+
+void swapcount(int *m, int *nr, int *nc, int *thin)
+{
+    int row[2], col[2], k, ij[4], changed, 
 	pm[4] = {1, -1, -1, 1} ;
-    double sm[4], ev;
+    int sm[4], ev;
 
     GetRNGstate();
 
@@ -228,21 +299,11 @@ void swapcount(double *m, int *nr, int *nc, int *thin)
 	for (k = 0; k < 4; k ++)
 	    sm[k] = m[ij[k]];
 	/* The largest value that can be swapped */
-	ev = isDiag(sm);
-	if (ev != 0) {
-	    /* Check that the fill doesn't change*/
-	    for (k = 0, oldn = 0, newn = 0; k < 4; k++) {
-		if(sm[k] > 0)
-		    oldn++;
-		if (sm[k] + pm[k]*ev > 0)
-		    newn++;
-	    }
-	    /* Swap */
-	    if (oldn == newn) {
+	ev = isDiagFill(sm);
+ 	if (ev != 0) { 
 		for (k = 0; k < 4; k++)
-		    m[ij[k]] += pm[k]*ev;
+			m[ij[k]] += pm[k]*ev;
 		changed++;
-	    }
 	}
     }
 
@@ -256,11 +317,11 @@ void swapcount(double *m, int *nr, int *nc, int *thin)
  * is similar as quasiswap for presence/absence data.
  */
 
-void rswapcount(double *m, int *nr, int *nc, int *mfill)
+void rswapcount(int *m, int *nr, int *nc, int *mfill)
 {
     int row[2], col[2], i, k, ij[4], n, change, cfill,
        pm[4] = {1, -1, -1, 1} ;
-    double sm[4], ev;
+    int sm[4], ev;
 
     /* Get the current fill 'cfill' */
     n = (*nr) * (*nc);
@@ -283,15 +344,8 @@ void rswapcount(double *m, int *nr, int *nc, int *mfill)
 	for (k = 0; k < 4; k ++)
 	    sm[k] = m[ij[k]];
 	/* The largest value that can be swapped */
-	ev = isDiag(sm);
+	ev = isDiag(sm, &change);
 	if (ev != 0) {
-	    /* Check the change in fills */
-	    for (k = 0, change=0; k < 4; k++) {
-		if(sm[k] > 0)
-		    change--;
-		if (sm[k] + pm[k]*ev > 0)
-		    change++;
-	    }
 	    /* Fill does not change, but swap to bail out from
 	     * non-swappable configurations */
 	    if (change == 0) {
@@ -311,30 +365,46 @@ void rswapcount(double *m, int *nr, int *nc, int *mfill)
 
 /* 'isDiagSimple' needed for 'abuswap' */
 
-double isDiagSimple(double *sm)
+int isDiagSimple(double *sm)
 {
     int i, sX;
+    int retval = 0;
 
     /* sX: number of non-zero cells */
     for (i = 0, sX = 0; i < 4; i++)
 	if (sm[i] > 0)
 	    sX++;
-
-    if (sX == 4) {
-	return 1;
+    
+    switch(sX) {
+    case 0:
+    case 1:
+	    /* never swappable */
+	    retval = 0;
+	    break;
+    case 2:
+	    /* diagonal and antidiagonal swappable */
+	    if ((sm[1] > 0 && sm[2] > 0) || (sm[0] > 0 && sm[3] > 0))
+		    retval = 1;
+	    else
+		    retval = 0;
+	    break;
+    case 3:
+	    /* never swappable */
+	    retval = 0;
+	    break;
+    case 4:
+	    /* always swappable */
+	    retval = 1;
+	    break;
     }
-    if ((sm[0] == 0 && sm[1] > 0 && sm[2] > 0 && sm[3] == 0) ||
-	(sm[0] > 0 && sm[1] == 0 && sm[2] == 0 && sm[3] > 0))
-	return 1;
-    else
-	return 0;
+    return retval;
 }
 
 /* 'abuswap' to do Hardy 2008 J Ecol 96: 914-926 */
 
 void abuswap(double *m, int *nr, int *nc, int *thin, int *direct)
 {
-    int row[2], col[2], k, ij[4], changed, ev ;
+    int row[2], col[2], k, ij[4], changed, ev;
     double sm[4];
 
     GetRNGstate();
diff --git a/src/vegdist.c b/src/vegdist.c
index 1a4b847..2ee63ec 100644
--- a/src/vegdist.c
+++ b/src/vegdist.c
@@ -44,6 +44,7 @@
 #define CHAO 13
 #define GOWERDZ 14
 #define CAO 15
+#define MAHALANOBIS 16
 #define MATCHING 50
 #define NOSHARED 99
 
@@ -125,7 +126,9 @@ double veg_gowerDZ(double *x, int nr, int nc, int i1, int i2)
      return dist;
 }
 
-/* Euclidean distance: duplicates base R */
+/* Euclidean distance: duplicates base R. If Mahalanobis
+ * transformation was performred in the calling routine, this will
+ * give Mahalanobis distances. */
 
 double veg_euclidean(double *x, int nr, int nc, int i1, int i2)
 {
@@ -631,6 +634,7 @@ void veg_distance(double *x, int *nr, int *nc, double *d, int *diag, int *method
 	distfun = veg_manhattan;
 	break;
     case EUCLIDEAN:
+    case MAHALANOBIS:
 	distfun = veg_euclidean;
 	break;
     case CANBERRA:
diff --git a/vignettes/FAQ-vegan.pdf b/vignettes/FAQ-vegan.pdf
index 34554d2..0379210 100644
Binary files a/vignettes/FAQ-vegan.pdf and b/vignettes/FAQ-vegan.pdf differ
diff --git a/vignettes/FAQ-vegan.texi b/vignettes/FAQ-vegan.texi
index 1e17678..b5c53f5 100644
--- a/vignettes/FAQ-vegan.texi
+++ b/vignettes/FAQ-vegan.texi
@@ -8,7 +8,7 @@
 @setfilename FAQ- at pkg{vegan}.info
 @settitle @pkg{vegan} FAQ
 @setchapternewpage on
- at set FAQ_YEAR 2013
+ at set FAQ_YEAR 2014
 @afourpaper
 @c %**end of header
 
@@ -16,7 +16,6 @@
 @ifnottex
 This document contains answers to some of the most frequently asked
 questions about R package @pkg{vegan}. 
-This is version of $Date: 2013-09-11 11:59:36 +0300 (Wed, 11 Sep 2013) $.
 @end ifnottex
 
 @quotation
@@ -40,7 +39,6 @@ Copyright @copyright{} 2008-2013 Jari Oksanen
 @titlepage
 @title @pkg{vegan} @acronym{FAQ}
 @subtitle Frequently Asked Questions on R package @pkg{vegan}
- at subtitle Version of $Date: 2013-09-11 11:59:36 +0300 (Wed, 11 Sep 2013) $ 
 @author Jari Oksanen
 
 @vskip 0pt plus 1fill
@@ -62,30 +60,29 @@ Copyright @copyright{} 2008-2013 Jari Oksanen
 
 
 @menu
-* Introduction::                
-* Ordination::                  
-* Other analysis methods ::     
+* Introduction::
+* Ordination::
+* Other analysis methods ::
 @end menu
 
 @node Introduction, Ordination, Top, Top
 @chapter Introduction
 
 @menu
-* What is @pkg{vegan}?::        
-* What is R?::                  
-* How to obtain @pkg{vegan} and R?::  
-* What R packages @pkg{vegan} depends on?::  
-* What other packages are available for ecologists?::  
-* What other documentation is available for @pkg{vegan}?::  
-* Is there a Graphical User Interface (GUI) for @pkg{vegan}?::  
-* How to cite @pkg{vegan}?::    
-* How to build @pkg{vegan} from sources?::  
-* Are there binaries for devel versions?::  
-* Can I use @pkg{vegan} in Mac?::  
-* How to report a bug in @pkg{vegan}?::  
-* Is it a bug or a feature?::   
-* Can I contribute to @pkg{vegan}?::  
-* Can I have write access to @pkg{vegan} repository?::  
+* What is @pkg{vegan}?::
+* What is R?::
+* How to obtain @pkg{vegan} and R?::
+* What R packages @pkg{vegan} depends on?::
+* What other packages are available for ecologists?::
+* What other documentation is available for @pkg{vegan}?::
+* Is there a Graphical User Interface (GUI) for @pkg{vegan}?::
+* How to cite @pkg{vegan}?::
+* How to build @pkg{vegan} from sources?::
+* Are there binaries for devel versions?::
+* Can I use @pkg{vegan} in Mac?::
+* How to report a bug in @pkg{vegan}?::
+* Is it a bug or a feature?::
+* Can I contribute to @pkg{vegan}?::
 @end menu
 
 @node What is @pkg{vegan}?, What is R?, Introduction, Introduction
@@ -121,37 +118,30 @@ official part of the @uref{http://www.gnu.org/, @acronym{GNU}} project
 Both R and latest release version of @pkg{vegan} can be obtained through
 @uref{http://cran.r-project.org,,CRAN}. Unstable development version of
 @pkg{vegan} can be obtained through
- at uref{http://r-forge.r-project.org/projects/vegan/,,R-Forge}.
-
-
+ at uref{https://github.com/vegandevs/vegan,,GitHub}. Formerly @pkg{vegan}
+was developed in 
+ at uref{http://r-forge.r-project.org/projects/vegan/,,R-Forge},
+but after moving to @uref{https://github.com/vegandevs/vegan,,GitHub}
+the R-Forge repository may be out of date.
 
 @node What R packages @pkg{vegan} depends on?, What other packages are available for ecologists?, How to obtain @pkg{vegan} and R?, Introduction
 @section What R packages @pkg{vegan} depends on?
 
 @pkg{Vegan} depends on the @pkg{permute} package which will provide
-advanced and flexible permutation routines for vegan (but currently only
-a small part of functions use @pkg{permute}). The @pkg{permute} package
-is developed together with @pkg{vegan} in
- at uref{http://vegan.r-forge.r-project.org/,,R-Forge}. 
+advanced and flexible permutation routines for @pkg{vegan}. The
+ at pkg{permute} package is developed together with @pkg{vegan} in
+ at uref{https://github.com/gavinsimpson/permute,,GitHub}. 
 
 Some individual @pkg{vegan} functions depend on packages @pkg{MASS},
 @pkg{mgcv}, @pkg{cluster}, @pkg{lattice} and @pkg{tcltk}.  These all are
 base or recommended R packages that should be available in every R
-installation.  In addition, some @pkg{vegan} functions @code{require}
-non-standard R packages.  @pkg{Vegan} declares these packages only as
-suggested ones, and you can install @pkg{vegan} and use most of its
-functions without these packages.  The non-standard packages needed by
-some @pkg{vegan} functions are:
- at itemize
+installation. @pkg{Vegan} declares these as suggested or imported
+packages, and you can install @pkg{vegan} and use most of its functions
+without these packages.
 
- at item Package @pkg{scatterplot3d}
-is needed by @code{ordiplot3d}
-
- at item Package @pkg{rgl}
-is needed by @code{ordirgl}
-and @code{rgl.isomap}
-
- at end itemize
+ at pkg{Vegan} is accompanied with a supporting package @pkg{vegan3d} for
+three-dimensional and dynamic plotting. The @pkg{vegan3d} package needs
+non-standard packages @pkg{rgl} and @pkg{scatterplot3d}.
 
 @node What other packages are available for ecologists?, What other documentation is available for @pkg{vegan}?, What R packages @pkg{vegan} depends on?, Introduction
 @section What other packages are available for ecologists?
@@ -195,7 +185,7 @@ Web documents outside the package include:
 @itemize
 
 @item
- at uref{http://vegan.r-forge.r-project.org/}: @pkg{vegan} homepage.
+ at uref{https://github.com/vegandevs/vegan}: @pkg{vegan} homepage.
 @item
 @uref{http://cc.oulu.fi/~jarioksa/opetus/metodi/vegantutor.pdf}: @pkg{vegan}
 tutorial.
@@ -237,7 +227,9 @@ and MacOS X).
 
 @uref{http://r-forge.r-project.org/projects/vegan/,,R-Forge} runs daily
 tests on the devel package, and if passed, it builds source package
-together with Windows and MacOS X binaries. You can install those
+together with Windows binaries. However, the R-Forge may be out of date,
+because @pkg{vegan} is mainly developed in
+ at uref{https://github.com/vegandevs/vegan,,GitHub}. You can install R-Forge
 packages within R with command 
 @code{install.packages("vegan", repos="http://r-forge.r-project.org/")}.
 If you use GUI menu entry, you must select or define the R-Forge
@@ -251,16 +243,16 @@ Yes, you can, and @pkg{vegan} binaries are available for Mac through
 need to install extra tools packages available in
 @uref{http://cran.r-project.org/bin/macosx/tools/,,MacOS tools} pages:
 If you use function such as @code{orditkplot} that need @code{Tcl/Tk}
-you may need to install @code{tcltk} package.  If you use @pkg{vegan}
-binaries from other places than from
- at uref{http://cran.r-project.org,,CRAN}, you may also need to install
- at code{gfortran} package.
+you may need to install @code{tcltk} package. No Mac binaries of
+development versions are available in any repository we know. 
 
 @node How to report a bug in @pkg{vegan}?, Is it a bug or a feature?, Can I use @pkg{vegan} in Mac?, Introduction
 @section How to report a bug in @pkg{vegan}?
 
 If you think you have found a bug in @pkg{vegan}, you should report it to
- at pkg{vegan} maintainers or developers.  The bug report should be so detailed
+ at pkg{vegan} maintainers or developers.  The preferred forum to report
+bugs is @uref{https://github.com/vegandevs/vegan/issues,,GitHub}. The
+bug report should be so detailed 
 that the bug can be replicated and corrected.  Preferably, you should
 send an example that causes a bug.  If it needs a data set that is not
 available in R, you should send a minimal data set as well. You also
@@ -272,10 +264,6 @@ Bug reports are welcome: they are the only way to make @pkg{vegan} non-buggy.
 Please note that you shall not send bug reports to R mailing lists,
 since @pkg{vegan} is not a standard R package.
 
-There also is a bug reporting tool at
- at uref{http://r-forge.r-project.org/projects/vegan/,,R-Forge}, but you
-need to register as a site user to report bugs (this is site policy).
-
 @node Is it a bug or a feature?, Can I contribute to @pkg{vegan}?, How to report a bug in @pkg{vegan}?, Introduction
 @section Is it a bug or a feature?
 
@@ -288,66 +276,48 @@ instance, function @code{vegdist} always calculates quantitative
 indices (when this is possible). If you expect it to calculate a
 binary index, you should use argument @code{binary = TRUE}.
 
- at node Can I contribute to @pkg{vegan}?, Can I have write access to @pkg{vegan} repository?, Is it a bug or a feature?, Introduction
+ at node Can I contribute to @pkg{vegan}?,  , Is it a bug or a feature?, Introduction
 @section Can I contribute to @pkg{vegan}?
 
 @pkg{Vegan} is dependent on user contribution.  All feedback is welcome.  If
-you have problem with @pkg{vegan}, it may be as simple as incomplete
-documentation, and we'll do our best to improve the documents.
+you have problems with @pkg{vegan}, it may be as simple as incomplete
+documentation, and we shall do our best to improve the documents.
 
 Feature requests also are welcome, but they are not necessarily
 fulfilled.  A new feature will be added if it is easy to do and it looks
-useful to me or in general, or if you submit code. 
-
-Contributed code and functions are welcome and more certain to be
-included than mere requests.  However, not all functions will be added,
-but I they must be suitable for @pkg{vegan}.  We also audit the code, and
-typically we edit the code in @pkg{vegan} style for easier maintenance.  All
-included contributions will be credited.
-
- at node Can I have write access to @pkg{vegan} repository?,  , Can I contribute to @pkg{vegan}?, Introduction
- at section Can I have write access to @pkg{vegan} repository?
-
-The @pkg{vegan} development happens mainly in
- at uref{http://r-forge.r-project.org/,,R-Forge} which uses subversion for
-version control.  Subversion is a centralized version control system,
-and only @pkg{vegan} developers can have write access to the central
-repository. However, the @uref{http://r-forge.r-project.org/,,R-Forge}
-is mirrored in
- at uref{https://github.com/jarioksa/vegan.git,,GitHub}. This is a
-distributed version control system and freely accessible for anybody. We
-suggest you develop your own ideas in
- at uref{https://github.com/jarioksa/vegan.git,,GitHub} and send a pull
-request to us for incorporating your changes in @pkg{vegan} releases.
+useful, or if you submit code. 
+
+If you can write code yourself, the best forum to contribute to vegan is
+ at uref{https://github.com/vegandevs/vegan,,GitHub}. 
 
 @node Ordination, Other analysis methods , Introduction, Top
 @chapter Ordination
 
 @menu
-* I have only numeric and positive data but @pkg{vegan} still complains::  
-* Can I analyse binary or cover class data?::  
-* Why dissimilarities in @pkg{vegan} differ from other sources?::  
-* Why NMDS stress is sometimes 0.1 and sometimes 10?::  
-* I get zero stress but no convergent solutions in @code{metaMDS}::  
-* Zero dissimilarities in isoMDS::  
-* I have heard that you cannot fit environmental vectors or surfaces to NMDS results which only have rank-order scores::  
-* Where can I find numerical scores of ordination axes?::  
-* How the RDA results are scaled?::  
-* cca fails with ``data.frame expected'' or ``"site.env" missing''::  
-* Ordination fails with ``Error in La.svd''::  
-* Variance explained by ordination axes::  
-* Can I have random effects in constrained ordination or in @code{adonis}?::  
-* Is it possible to have passive points in ordination?::  
-* Class variables and dummies::  
-* How are environmental arrows scaled?::  
-* I want to use Helmert or sum contrasts::  
-* What are aliased variables and how to see them?::  
-* Plotting aliased variables::  
-* Constrained permutations in @pkg{vegan}::  
-* How to use different plotting symbols in ordination graphics?::  
-* How to avoid cluttered ordination graphs?::  
-* Can I flip an axis in ordination diagram?::  
-* Can I zoom into an ordination plot?::  
+* I have only numeric and positive data but @pkg{vegan} still complains::
+* Can I analyse binary or cover class data?::
+* Why dissimilarities in @pkg{vegan} differ from other sources?::
+* Why NMDS stress is sometimes 0.1 and sometimes 10?::
+* I get zero stress but no convergent solutions in @code{metaMDS}::
+* Zero dissimilarities in isoMDS::
+* I have heard that you cannot fit environmental vectors or surfaces to NMDS results which only have rank-order scores::
+* Where can I find numerical scores of ordination axes?::
+* How the RDA results are scaled?::
+* cca fails with ``data.frame expected'' or ``"site.env" missing''::
+* Ordination fails with ``Error in La.svd''::
+* Variance explained by ordination axes::
+* Can I have random effects in constrained ordination or in @code{adonis}?::
+* Is it possible to have passive points in ordination?::
+* Class variables and dummies::
+* How are environmental arrows scaled?::
+* I want to use Helmert or sum contrasts::
+* What are aliased variables and how to see them?::
+* Plotting aliased variables::
+* Restricted permutations in @pkg{vegan}::
+* How to use different plotting symbols in ordination graphics?::
+* How to avoid cluttered ordination graphs?::
+* Can I flip an axis in ordination diagram?::
+* Can I zoom into an ordination plot?::
 @end menu
 
 @node  I have only numeric and positive data but @pkg{vegan} still complains, Can I analyse binary or cover class data?, Ordination, Ordination
@@ -427,18 +397,17 @@ with @code{monoMDS} and in principal coordinates analysis
 
 Function @code{metaMDS} uses function @code{monoMDS} as its default
 method for @acronym{NMDS}, and this function can handle zero
-dissimilarities. The alternative function @code{isoMDS} was the only
-choice before @pkg{vegan} 2.0-0, and it cannot handle zero dissimilarities. If
-you want to use @code{isoMDS}, you can use argument @code{zerodist =
-"add"} in @code{metaMDS} to handle zero dissimilarities.  With this
-argument, zero dissimilarities are replaced with a small above zero
-value, and they can be handled in @code{isoMDS}.  This is a kluge, and
-some people do not like this. A more principal solution is to remove
-duplicate sites using R command @code{unique}.  However, after some
-standardizations or with some dissimilarity indices, originally
-non-unique sites can have zero dissimilarity, and you have to resort to
-the kluge (or work harder with your data). Usually it is better to use
- at code{monoMDS}.
+dissimilarities. Alternative function @code{isoMDS} cannot handle zero
+dissimilarities. If you want to use @code{isoMDS}, you can use argument
+ at code{zerodist = "add"} in @code{metaMDS} to handle zero
+dissimilarities.  With this argument, zero dissimilarities are replaced
+with a small positive value, and they can be handled in @code{isoMDS}.
+This is a kluge, and some people do not like this. A more principal
+solution is to remove duplicate sites using R command @code{unique}.
+However, after some standardizations or with some dissimilarity indices,
+originally non-unique sites can have zero dissimilarity, and you have to
+resort to the kluge (or work harder with your data). Usually it is
+better to use @code{monoMDS}.
 
 @node I have heard that you cannot fit environmental vectors or surfaces to NMDS results which only have rank-order scores, Where can I find numerical scores of ordination axes?, Zero dissimilarities in isoMDS, Ordination
 @section I have heard that you cannot fit environmental vectors or surfaces to NMDS results which only have rank-order scores
@@ -596,11 +565,10 @@ In this way, the first terms can serve in a similar role as
 random effects, although they are fitted in the same way as all other
 terms, and strictly speaking they are fixed terms.
 
-The permutation tests can usually have a @code{strata} argument which
-restricts the permutations within levels of a factor given in the
-argument. This can be used to restrict the permutations within levels of
-factor regarded as a random term.  More structured permutations are
-available with the @pkg{permute} package.
+All permutation tests in @pkg{vegan} are based on the @pkg{permute}
+package that allows constructing various restricted permutation
+schemes. For instance, you can set levels of @code{plots} or
+ at code{blocks} for a factor regarded as a random term.
 
 A major reason why real random effects models are impossible in most
 @pkg{vegan} functions is that their tests are based on the permutation
@@ -701,7 +669,7 @@ redundant levels of factors or whole variables.
 variables.  If you only want to see the names of aliased variables or
 levels in solution @code{sol}, use @code{alias(sol, names.only=TRUE)}.
 
- at node Plotting aliased variables, Constrained permutations in @pkg{vegan}, What are aliased variables and how to see them?, Ordination
+ at node Plotting aliased variables, Restricted permutations in @pkg{vegan}, What are aliased variables and how to see them?, Ordination
 @section Plotting aliased variables
 
 You can fit vectors or class centroids for aliased variables using
@@ -709,22 +677,17 @@ You can fit vectors or class centroids for aliased variables using
 fitting, and the fitted vectors are identical to the vectors in
 correspondence analysis.
 
- at node Constrained permutations in @pkg{vegan}, How to use different plotting symbols in ordination graphics?, Plotting aliased variables, Ordination
- at section Constrained permutations in @pkg{vegan}
-
-You can constrain your permutations within @code{strata} or levels of
-factors. You can use stratified permutations in all @pkg{vegan}
-functions that use permutation, such as @code{adonis}, @code{anosim},
- at code{anova.cca}, @code{mantel}, @code{mrpp}, @code{envfit} and
- at code{protest}.
+ at node Restricted permutations in @pkg{vegan}, How to use different plotting symbols in ordination graphics?, Plotting aliased variables, Ordination
+ at section Restricted permutations in @pkg{vegan}
 
- at pkg{Vegan} will move to use @pkg{permute} package in all its
-permutation tests, but currently this package is only used in
- at code{permutest.betadisper}. The @pkg{permute} package will allow
-restricted permutation designs for time series, line transects, spatial
-grids and blocking factors.
+ at pkg{Vegan} uses @pkg{permute} package in all its permutation tests.
+The @pkg{permute} package will allow restricted permutation designs for
+time series, line transects, spatial grids and blocking factors. The
+construction of restricted permutation schemes is explained in the
+manual page @code{permutations} in @pkg{vegan} and in the documentation
+of the @pkg{permute} package.
 
- at node How to use different plotting symbols in ordination graphics?, How to avoid cluttered ordination graphs?, Constrained permutations in @pkg{vegan}, Ordination
+ at node How to use different plotting symbols in ordination graphics?, How to avoid cluttered ordination graphs?, Restricted permutations in @pkg{vegan}, Ordination
 @section How to use different plotting symbols in ordination graphics?
 
 The default ordination @code{plot} function is intended for fast
@@ -817,12 +780,12 @@ formats, or you can export the graph object back to R and use
 @chapter Other analysis methods
 
 @menu
-* Is there TWINSPAN?::          
-* Why strata do not influence adonis results?::  
-* How is deviance calculated?::  
+* Is there TWINSPAN?::
+* Why restricted permutation does not influence adonis results?::
+* How is deviance calculated?::
 @end menu
 
- at node Is there TWINSPAN?, Why strata do not influence adonis results?, Other analysis methods , Other analysis methods
+ at node Is there TWINSPAN?, Why restricted permutation does not influence adonis results?, Other analysis methods , Other analysis methods
 @section Is there TWINSPAN?
 
 No.  It may be possible to port @acronym{TWINSPAN} to @pkg{vegan}, but it is
@@ -830,16 +793,15 @@ not among the @pkg{vegan} top priorities.  If anybody wants to try porting, I
 will be happy to help.  @acronym{TWINSPAN} has a very permissive
 license, and it would be completely legal to port the function into R.
 
- at node  Why strata do not influence adonis results?, How is deviance calculated?, Is there TWINSPAN?, Other analysis methods
+ at node  Why restricted permutation does not influence adonis results?, How is deviance calculated?, Is there TWINSPAN?, Other analysis methods
 @comment  node-name,  next,  previous,  up
- at section Why strata do not influence adonis results?
-Permutation happens only within @code{strata} and this influences the
-permutation distribution of the statistics and probably the significance
-levels, but @code{strata} do not influence the calculation of the
-statistics.
+ at section Why restricted permutation does not influence adonis results?
+The permutation scheme influences the permutation distribution of the
+statistics and probably the significance levels, but does not influence
+the calculation of the statistics.
 
 
- at node How is deviance calculated?,  , Why strata do not influence adonis results?, Other analysis methods
+ at node How is deviance calculated?,  , Why restricted permutation does not influence adonis results?, Other analysis methods
 @section How is deviance calculated?
 
 Some @pkg{vegan} functions, such as @code{radfit} use base R facility of
diff --git a/vignettes/NEWS.html b/vignettes/NEWS.html
index 4694118..b61ebb8 100644
--- a/vignettes/NEWS.html
+++ b/vignettes/NEWS.html
@@ -1,13 +1,344 @@
-<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
-<html><head><title>R: vegan News</title>
-<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
-<link rel="stylesheet" type="text/css" href="R.css">
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd"><html xmlns="http://www.w3.org/1999/xhtml"><head><title>R: vegan News</title>
+<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
+<link rel="stylesheet" type="text/css" href="R.css" />
 </head><body>
 
-<table width="100%" summary="page for NEWS"><tr><td>NEWS</td><td align="right">R Documentation</td></tr></table>
+<table width="100%" summary="page for NEWS"><tr><td>NEWS</td><td style="text-align: right;">R Documentation</td></tr></table>
 
 <h2>vegan News</h2>
 
+<h3>Changes in version 2.2-0</h3>
+
+
+
+<h4>GENERAL</h4>
+
+
+<ul>
+<li><p> Several <span class="pkg">vegan</span> functions can now use parallel
+processing for slow and repeating calculations. All these
+functions have argument <code>parallel</code>. The argument can be an
+integer giving the number of parallel processes. In unix-alikes
+(Mac OS, Linux) this will launch <code>"multicore"</code> processing
+and in Windows it will set up <code>"snow"</code> clusters as desribed
+in the documentation of the <span class="pkg">parallel</span> package. If <code>option</code>
+<code>"mc.cores"</code> is set to an integer > 1, this will be used to
+automatically start parallel processing. Finally, the argument
+can also be a previously set up <code>"snow"</code> cluster which will
+be used both in Windows and in unix-alikes. <span class="pkg">Vegan</span> vignette
+on Design decision explains the implementation (use
+<code>vegandocs("decission")</code>, and <span class="pkg">parallel</span> package has more
+extensive documentation on parallel processing in <span style="font-family: Courier New, Courier; color: #666666;"><b>R</b></span>.
+</p>
+<p>The following function use parallel processing in analysing
+permutation statistics: <code>adonis</code>, <code>anosim</code>,
+<code>anova.cca</code> (and <code>permutest.cca</code>), <code>mantel</code> (and
+<code>mantel.partial</code>), <code>mrpp</code>, <code>ordiareatest</code>,
+<code>permutest.betadisper</code> and <code>simper</code>. In addition,
+<code>bioenv</code> can compare several candidate sets of models in
+paralle, <code>metaMDS</code> can launch several random starts in
+parallel, and <code>oecosimu</code> can evaluate test statistics for
+several null models in parallel.
+</p>
+</li>
+<li><p> All permutation tests are based on the <span class="pkg">permute</span> package
+which offers strong tools for restricted permutation. All these
+functions have argument <code>permutations</code>. The default usage of
+simple non-restricted permutations is achieved by giving a single
+integer number. Restricted permutations can be defined using the
+<code>how</code> function of the <span class="pkg">permute</span> package. Finally, the
+argument can be a permutation matrix where rows define
+permutations. It is possible to use external or user constructed
+permutations.
+</p>
+<p>See <code>help(permutations)</code> for a brief introduction on
+permutations in <span class="pkg">vegan</span>, and <span class="pkg">permute</span> package for the
+full documention. The vignette of the <span class="pkg">permute</span> package can
+be read from <span class="pkg">vegan</span> with command
+<code>vegandocs("permutations")</code>.
+</p>
+<p>The following functions use the <span class="pkg">permute</span> package:
+<code>CCorA</code>, <code>adonis</code>, <code>anosim</code>, <code>anova.cca</code> (plus
+associated <code>permutest.cca</code>, <code>add1.cca</code>,
+<code>drop1.cca</code>, <code>ordistep</code>, <code>ordiR2step</code>),
+<code>envfit</code> (plus associated <code>factorfit</code> and
+<code>vectorfit</code>), <code>mantel</code> (and <code>mantel.partial</code>),
+<code>mrpp</code>, <code>mso</code>, <code>ordiareatest</code>,
+<code>permutest.betadisper</code>, <code>protest</code> and <code>simper</code>.
+</p>
+</li>
+<li><p> Community null model generation has been completely
+redesigned and rewritten. The communities are constructed with
+new <code>nullmodel</code> function and defined in a low level
+<code>commsim</code> function. The actual null models are generated
+with a <code>simulate</code> function that builds an array of null
+models. The new null models include a wide array of quantitative
+models in addition to the old binary models, and users can plug
+in their own generating functions. The basic tool invoking and
+analysing null models is <code>oecosimu</code>. The null models are
+often used only for the analysis of nestedness, but the
+implementation in <code>oecosimu</code> allows analysing any
+statistic, and null models are better seen as an alternative to
+permutation tests.
+</p>
+</li></ul>
+ 
+ 
+
+
+<h4>INSTALLATION</h4>
+
+
+<ul>
+<li> <p><span class="pkg">vegan</span> package dependencies and namespace imports
+were adapted to changes in <span style="font-family: Courier New, Courier; color: #666666;"><b>R</b></span>, and no more trigger warnings and
+notes in package tests.
+</p>
+</li>
+<li><p> Three-dimensional ordination graphics using
+<span class="pkg">scatterplot3d</span> for static plots and <span class="pkg">rgl</span> for dynamic
+plots were removed from <span class="pkg">vegan</span> and moved to a companion
+package <span class="pkg">vegan3d</span>. The package is available in CRAN.
+</p>
+</li></ul>
+ 
+ 
+
+
+<h4>NEW FUNCTIONS</h4>
+
+
+<ul>
+<li><p> Function <code>dispweight</code> implements dispersion weighting
+of Clarke et al. (<em>Marine Ecology Progress Series</em>, 320,
+11–27).  In addition, we implemented a new method for
+generalized dispersion weighting <code>gdispweight</code>. Both
+methods downweight species that are significantly
+over-dispersed.
+</p>
+</li>
+<li><p> New <code>hclust</code> support functions <code>reorder</code>,
+<code>rev</code> and <code>scores</code>. Functions <code>reorder</code> and
+<code>rev</code> are similar as these functions for <code>dendrogram</code>
+objects in base <span style="font-family: Courier New, Courier; color: #666666;"><b>R</b></span>. However, <code>reorder</code> can use (and defaults
+to) weighted mean. In weighted mean the node average is always the
+mean of member leaves, whereas the <code>dendrogram</code> uses always
+unweighted means of joined branches.
+</p>
+</li>
+<li><p> Function <code>ordiareatest</code> supplements <code>ordihull</code> and
+<code>ordiellipse</code> and provides a randomization test for the
+one-sided alternative hypothesis that convex hulls or ellipses in
+two-dimensional ordination space have smaller areas than with
+randomized groups.
+</p>
+</li>
+<li><p> Function <code>permustats</code> extracts and inspects permutation
+results with support functions <code>summary</code>, <code>density</code>,
+<code>densityplot</code>, <code>qqnorm</code> and <code>qqmath</code>. The
+<code>density</code> and <code>qqnorm</code> are standard <span style="font-family: Courier New, Courier; color: #666666;"><b>R</b></span> tools that only
+work with one statistic, and <code>densityplot</code> and <code>qqmath</code>
+are <span class="pkg">lattice</span> graphics that work with univariate and
+multivariate statistics. The results of following functions can be
+extracted: <code>anosim</code>, <code>adonis</code>, <code>mantel</code> (and
+<code>mantel.partial</code>), <code>mrpp</code>, <code>oecosimu</code>,
+<code>permustest.cca</code> (but not the corresponding <code>anova</code>
+methods), <code>permutest.betadisper</code>, and <code>protest</code>.
+</p>
+</li>
+<li> <p><code>stressplot</code> functions display the ordination distances
+at given number of dimensions against original distances.  The
+method functins are similar to <code>stressplot</code> for
+<code>metaMDS</code>, and always use the inherent distances of each
+ordination method. The functions are available for the results
+<code>capscale</code>, <code>cca</code>, <code>princomp</code>, <code>prcomp</code>,
+<code>rda</code>, and <code>wcmdscale</code>.
+</p>
+</li></ul>
+ 
+ 
+
+
+<h4>BUG FIXES</h4>
+
+
+<ul>
+<li> <p><code>cascadeKM</code> of only one group will be <code>NA</code> instead
+of a random value. 
+</p>
+</li>
+<li> <p><code>ordiellipse</code> can handle points exactly on a line,
+including only two points (with a warning).
+</p>
+</li>
+<li><p> plotting <code>radfit</code> results for several species failed if
+any of the communities had no species or had only one species.
+</p>
+</li>
+<li> <p><code>RsquareAdj</code> for <code>capscale</code> with negative
+eigenvalues will now report <code>NA</code> instead of using biased
+method of <code>rda</code> results.
+</p>
+</li>
+<li> <p><code>simper</code> failed when a group had only a single member.
+</p>
+</li></ul>
+
+ 
+
+
+<h4>NEW FEATURES</h4>
+
+
+<ul>
+<li> <p><code>anova.cca</code> functions were re-written to use the
+<span class="pkg">permute</span> package. Old results may not be exactly
+reproduced, and models with missing data may fail in several
+cases. There is a new option of analysing a sequence of models
+against each other.
+</p>
+</li>
+<li> <p><code>simulate</code> functions for <code>cca</code> and <code>rda</code>
+can return several simulations in a <code>nullmodel</code> compatible
+object. The functions can produce simulations with correlated
+errors (also for <code>capscale</code>) in parametric simulation with
+Gaussian error.
+</p>
+</li>
+<li> <p><code>bioenv</code> can use Manhattan, Gower and Mahalanobis
+distances in addition to the default Euclidean. New helper
+function <code>bioenvdist</code> can extract the dissimilarities
+applied in best model or any other model.
+</p>
+</li>
+<li> <p><code>metaMDS(..., trace = 2)</code> will show convergence
+information with the default <code>monoMDS</code> engine.
+</p>
+</li>
+<li><p> Function <code>MDSrotate</code> can rotate a <i>k</i>-dimensional
+ordination to <i>k-1</i> variables. When these variables are
+correlated (like usually is the case), the vectors can also be
+correlated to previously rotated dimensions, but will be
+uncorrelated to all later ones.
+</p>
+</li>
+<li> <p><span class="pkg">vegan</span> 2.0-10 changed the weighted <code>nestednodf</code>
+so that weighted analysis of binary data was equivalent to
+binary analysis. However, this broke the equivalence to the
+original method. Now the function has an argument <code>wbinary</code>
+to select the method of analysis. The problem was reported and a
+fix submitted by Vanderlei Debastiani (Universidade Federal do
+Rio Grande do Sul, Brasil).
+</p>
+</li>
+<li> <p><code>ordiellipse</code>, <code>ordihull</code> and <code>ordiellipse</code>
+can handle missing values in <code>groups</code>.
+</p>
+</li>
+<li> <p><code>ordispider</code> can now use spatial medians instead of
+means. 
+</p>
+</li>
+<li> <p><code>rankindex</code> can use Manhattan, Gower and Mahalanobis
+distance in addition to the default Euclidean.
+</p>
+</li>
+<li><p> User can set colours and line types in function
+<code>rarecurve</code> for plotting rarefaction curves.
+</p>
+</li>
+<li> <p><code>spantree</code> gained a support function <code>as.hclust</code>
+to change the minimum spanning tree into an <code>hclust</code> tree.
+</p>
+</li>
+<li> <p><code>fitspecaccum</code> can do weighted analysis. Gained
+<code>lines</code> method.
+</p>
+</li>
+<li><p> Functions for extrapolated number of species or for the size
+of species pool using Chao method were modified following Chiu et
+al., <em>Biometrics</em> 70, 671–682 (2014).
+</p>
+<p>Incidence based <code>specpool</code> can now use (and defaults to)
+small sample correction with number of sites as the sample
+size. Function uses basic Chao extrapolation based on the ratio of
+singletons and doubletons, but switches now to bias corrected Chao
+extrapolation if there are no doubletons (species found
+twice). The variance formula for bias corrected Chao was derived
+following the supporting
+<a href="http://onlinelibrary.wiley.com/doi/10.1111/biom.12200/suppinfo">online material</a>
+and differs slightly from Chiu et al. (2014).
+</p>
+<p>The <code>poolaccum</code> function was changed similarly, but the small
+sample correction is used always.
+</p>
+<p>The abundance based <code>estimateR</code> uses bias corrected Chao
+extrapolation, but earlier it estimated its variance with classic
+Chao model. Now we use the widespread
+<a href="http://viceroy.eeb.uconn.edu/EstimateS/EstimateSPages/EstSUsersGuide/EstimateSUsersGuide.htm#AppendixB">approximate
+equation</a> for variance.
+</p>
+<p>With these changes these functions are more similar to
+<a href="http://viceroy.eeb.uconn.edu/EstimateS/EstimateSPages/EstSUsersGuide/EstimateSUsersGuide.htm#AppendixB">EstimateS</a>.
+</p>
+</li>
+<li> <p><code>tabasco</code> uses now <code>reorder.hclust</code> for
+<code>hclust</code> object for better ordering than previously when it
+cast trees to <code>dendrogram</code> objects.
+</p>
+</li>
+<li> <p><code>treedive</code> and <code>treedist</code> default now to
+<code>match.force = TRUE</code> and can be silenced with
+<code>verbose = FALSE</code>.
+</p>
+</li>
+<li> <p><code>vegdist</code> gained Mahalanobis distance.
+</p>
+</li>
+<li><p> Nomenclature updated in plant community data with the help
+of <span class="pkg">Taxonstand</span> and <span class="pkg">taxize</span> packages. The taxonomy of
+the <code>dune</code> data was adapted to the same sources and APG
+III.  <code>varespec</code> and <code>dune</code> use 8-character names (4
+from genus + 4 from species epithet). New data set on
+phylogenetic distances for <code>dune</code> was extracted from Zanne
+et al. (<em>Nature</em> 506, 89–92; 2014).
+</p>
+</li>
+<li><p> User configurable plots for <code>rarecurve</code>.
+</p>
+</li></ul>
+ 
+ 
+
+
+<h4>DEPRECATED AND DEFUNCT</h4>
+
+
+<ul>
+<li> <p><code>strata</code> are deprecated in permutations. It is still
+accepted but will be phased out in next releases. Use <code>how</code>
+of <span class="pkg">permute</span> package.
+</p>
+</li>
+<li> <p><code>cca</code>, <code>rda</code> and <code>capscale</code> do not return
+scores scaled by eigenvalues: use <code>scores</code> function to
+extract scaled results.
+</p>
+</li>
+<li> <p><code>commsimulator</code> is deprecated. Replace
+<code>commsimulator(x, method)</code> with
+<code>simulate(nullmodel(x, method))</code>.
+</p>
+</li>
+<li> <p><code>density</code> and <code>densityplot</code> for permutation
+results are deprecated: use <code>permustats</code> with its
+<code>density</code> and <code>densityplot</code> method.
+</p>
+</li></ul>
+ 
+ 
+
+
 <h3>Changes in version 2.0-10</h3>
 
 
@@ -102,11 +433,11 @@ using the sampling effort as weights.
 
 <ul>
 <li><p> This version is released due to changes in programming
-interface and testing procedures in <font face="Courier New,Courier" color="#666666"><b>R</b></font> 3.0.2. If you are using an
-older version of <font face="Courier New,Courier" color="#666666"><b>R</b></font>, there is no need to upgrade <span class="pkg">vegan</span>. There
+interface and testing procedures in <span style="font-family: Courier New, Courier; color: #666666;"><b>R</b></span> 3.0.2. If you are using an
+older version of <span style="font-family: Courier New, Courier; color: #666666;"><b>R</b></span>, there is no need to upgrade <span class="pkg">vegan</span>. There
 are no new features nor bug fixes. The only user-visible changes
 are in documentation and in output messages and formatting. Because
-of <font face="Courier New,Courier" color="#666666"><b>R</b></font> changes, this version is dependent on <font face="Courier New,Courier" color="#666666"><b>R</b></font> version 2.14.0
+of <span style="font-family: Courier New, Courier; color: #666666;"><b>R</b></span> changes, this version is dependent on <span style="font-family: Courier New, Courier; color: #666666;"><b>R</b></span> version 2.14.0
 or newer and on <span class="pkg">lattice</span> package.
 </p>
 </li></ul>
@@ -122,7 +453,7 @@ or newer and on <span class="pkg">lattice</span> package.
 
 <ul>
 <li><p> This is a maintenance release that fixes some issues
-raised by changed in <font face="Courier New,Courier" color="#666666"><b>R</b></font> toolset for processing vignettes.  In
+raised by changed in <span style="font-family: Courier New, Courier; color: #666666;"><b>R</b></span> toolset for processing vignettes.  In
 the same we also fix some typographic issues in the vignettes.
 </p>
 </li></ul>
@@ -168,7 +499,7 @@ be used to replot the saved result.
 
 <ul>
 <li> <p><code>tabasco()</code> is a new function for graphical display
-of community data matrix.  Technically it is an interface to <font face="Courier New,Courier" color="#666666"><b>R</b></font>
+of community data matrix.  Technically it is an interface to <span style="font-family: Courier New, Courier; color: #666666;"><b>R</b></span>
 <code>heatmap</code>, but its use is closer to <span class="pkg">vegan</span> function
 <code>vegemite</code>. The function can reorder the community data
 matrix similarly as <code>vegemite</code>, for instance, by ordination
@@ -357,7 +688,7 @@ statistic is evaluated within the function.
 <code>plot</code> etc. of the results. These methods are only used if
 the full <code>wcmdscale</code> result is returned with, e.g., argument
 <code>eig = TRUE</code>. The default is still to return only a matrix of
-scores similarly as the standard <font face="Courier New,Courier" color="#666666"><b>R</b></font> function <code>cmdscale()</code>,
+scores similarly as the standard <span style="font-family: Courier New, Courier; color: #666666;"><b>R</b></span> function <code>cmdscale()</code>,
 and in that case the new methods are not used.
 </p>
 </li></ul>
@@ -456,13 +787,13 @@ in the data.  All functions now have methods <code>AIC</code>,
 version of LaTeX (TeXLive 2012).
 </p>
 </li>
-<li> <p><font face="Courier New,Courier" color="#666666"><b>R</b></font> versions later than 2.15-1 (including development
+<li> <p><span style="font-family: Courier New, Courier; color: #666666;"><b>R</b></span> versions later than 2.15-1 (including development
 version) report warnings and errors when installing and checking
 <span class="pkg">vegan</span>, and you must upgrade <span class="pkg">vegan</span> to this version.
 The warnings concern functions <code>cIndexKM</code> and
 <code>betadisper</code>, and the error occurs in <code>betadisper</code>.
 These errors and warnings were triggered by internal changes in
-<font face="Courier New,Courier" color="#666666"><b>R</b></font>.
+<span style="font-family: Courier New, Courier; color: #666666;"><b>R</b></span>.
 </p>
 </li></ul>
 
@@ -593,8 +924,8 @@ argument can be used only with one set of points.
 <li><p> Added new nestedness functions <code>nestedbetasor</code> and
 <code>nestedbetajac</code> that implement multiple-site dissimilarity
 indices and their decomposition into turnover and nestedness
-components following Baselga (<EM>Global Ecology and
-Biogeography</EM> 19, 134–143; 2010).
+components following Baselga (<em>Global Ecology and
+Biogeography</em> 19, 134–143; 2010).
 </p>
 </li>
 <li><p> Added function <code>rarecurve</code> to draw rarefaction curves
@@ -604,8 +935,8 @@ for each curve.
 </p>
 </li>
 <li><p> Added function <code>simper</code> that implements
-“similarity percentages” of Clarke (<EM>Australian
-Journal of Ecology</EM> 18, 117–143; 1993).  The method compares
+“similarity percentages” of Clarke (<em>Australian
+Journal of Ecology</em> 18, 117–143; 1993).  The method compares
 two or more groups and decomposes the average between-group
 Bray-Curtis dissimilarity index to contributions by individual
 species.  The code was developed in 
@@ -677,8 +1008,8 @@ effect of constraining term to adjusted <i>R-squared</i>.
 </p>
 </li>
 <li><p> Added Cao dissimilarity (CYd) as a new dissimilarity
-method in <code>vegdist</code> following Cao et al., <EM>Water
-Envir Res</EM> 69, 95–106 (1997). The index should be good for
+method in <code>vegdist</code> following Cao et al., <em>Water
+Envir Res</em> 69, 95–106 (1997). The index should be good for
 data with high beta diversity and variable sampling
 intensity. Thanks to consultation to Yong Cao (Univ Illinois,
 USA).
@@ -757,7 +1088,7 @@ with no <code>groups</code> they are the LC scores.
 <ul>
 <li> <p><code>clamtest</code>: new function to classify species as
 generalists and specialists in two distinct habitats (CLAM test of
-Chazdon et al., <EM>Ecology</EM> 92, 1332–1343; 2011).  The test is
+Chazdon et al., <em>Ecology</em> 92, 1332–1343; 2011).  The test is
 based on multinomial distribution of individuals in two habitat
 types or sampling units, and it is applicable only to count data
 with no over-dispersion.
@@ -777,7 +1108,7 @@ frequencies.  <span class="pkg">Vegan</span> has Raup-Crick index as a choice in
 <code>vegdist</code>, but that uses equal sampling probabilities for
 species and analytic equations. The new <code>raupcrick</code>
 function uses simulation with <code>oecosimu</code>. The function
-follows Chase et al. (2011) <EM>Ecosphere</EM> 2:art24
+follows Chase et al. (2011) <em>Ecosphere</em> 2:art24
 [<a href="http://www.esajournals.org/doi/abs/10.1890/ES10-00117.1">doi:10.1890/ES10-00117.1</a>],
 and was developed with the consultation of Brian Inouye.
 </p>
@@ -810,12 +1141,12 @@ submatrix if all cells were filled.
 <code>update</code>d because of a ‘<span class="file">NAMESPACE</span>’ issue.
 </p>
 </li>
-<li> <p><font face="Courier New,Courier" color="#666666"><b>R</b></font> 2.14.0 changed so that it does not accept using
+<li> <p><span style="font-family: Courier New, Courier; color: #666666;"><b>R</b></span> 2.14.0 changed so that it does not accept using
 <code>sd()</code> function for matrices (which was the behaviour at
-least since <font face="Courier New,Courier" color="#666666"><b>R</b></font> 1.0-0), and several <span class="pkg">vegan</span> functions were
+least since <span style="font-family: Courier New, Courier; color: #666666;"><b>R</b></span> 1.0-0), and several <span class="pkg">vegan</span> functions were
 changed to adapt to this change (<code>rda</code>, <code>capscale</code>,
 <code>simulate</code> methods for <code>rda</code>, <code>cca</code> and
-<code>capscale</code>). The change in <font face="Courier New,Courier" color="#666666"><b>R</b></font> 2.14.0 does not influence the
+<code>capscale</code>). The change in <span style="font-family: Courier New, Courier; color: #666666;"><b>R</b></span> 2.14.0 does not influence the
 results but you probably wish to upgrade <span class="pkg">vegan</span> to avoid
 annoying warnings.
 </p>
@@ -849,11 +1180,11 @@ early in tries, and the results are equally good in most cases.
 <li><p> Peter Minchin joins the <span class="pkg">vegan</span> team.
 </p>
 </li>
-<li> <p><span class="pkg">vegan</span> implements standard <font face="Courier New,Courier" color="#666666"><b>R</b></font> ‘<span class="file">NAMESPACE</span>’. In
+<li> <p><span class="pkg">vegan</span> implements standard <span style="font-family: Courier New, Courier; color: #666666;"><b>R</b></span> ‘<span class="file">NAMESPACE</span>’. In
 general, <code>S3</code> methods are not exported which means that you
 cannot directly use or see contents of functions like
 <code>cca.default</code>, <code>plot.cca</code> or <code>anova.ccabyterm</code>. To
-use these functions you should rely on <font face="Courier New,Courier" color="#666666"><b>R</b></font> delegation and simply
+use these functions you should rely on <span style="font-family: Courier New, Courier; color: #666666;"><b>R</b></span> delegation and simply
 use <code>cca</code> and for its result objects use <code>plot</code> and
 <code>anova</code> without suffix <code>.cca</code>. To see the contents of
 the function you can use <code>:::</code>, such as
@@ -900,7 +1231,7 @@ an environmental vector.
 <li> <p><code>eventstar</code> finds the minimum of the evenness profile
 on the Tsallis entropy, and uses this to find the corresponding
 values of diversity, evenness and numbers equivalent following
-Mendes et al. (<EM>Ecography</EM> 31, 450-456; 2008). The code was
+Mendes et al. (<em>Ecography</em> 31, 450-456; 2008). The code was
 contributed by Eduardo Ribeira Cunha and Heloisa Beatriz Antoniazi
 Evangelista and adapted to <span class="pkg">vegan</span> by Peter Solymos.
 </p>
@@ -909,9 +1240,9 @@ Evangelista and adapted to <span class="pkg">vegan</span> by Peter Solymos.
 the species accumulation results from <code>specaccum</code>. The
 function can use new self-starting species accumulation models
 in <span class="pkg">vegan</span> or other self-starting non-linear regression
-models in <font face="Courier New,Courier" color="#666666"><b>R</b></font>. The function can fit Arrhenius, Gleason, Gitay,
+models in <span style="font-family: Courier New, Courier; color: #666666;"><b>R</b></span>. The function can fit Arrhenius, Gleason, Gitay,
 Lomolino (in <span class="pkg">vegan</span>), asymptotic, Gompertz,
-Michaelis-Menten, logistic and Weibull (in base <font face="Courier New,Courier" color="#666666"><b>R</b></font>) models. The
+Michaelis-Menten, logistic and Weibull (in base <span style="font-family: Courier New, Courier; color: #666666;"><b>R</b></span>) models. The
 function has <code>plot</code> and <code>predict</code> methods.
 </p>
 </li>
@@ -920,7 +1251,7 @@ function has <code>plot</code> and <code>predict</code> methods.
 <code>SSlomolino</code>. These can be used with <code>fitspecaccum</code> or
 directly in non-linear regression with <code>nls</code>. These functions
 were implemented because they were found good for species-area
-models by Dengler (<EM>J. Biogeogr.</EM> 36, 728-744; 2009).
+models by Dengler (<em>J. Biogeogr.</em> 36, 728-744; 2009).
 </p>
 </li></ul>
 
diff --git a/vignettes/decision-vegan.Rnw b/vignettes/decision-vegan.Rnw
index cc42763..6e00ca8 100644
--- a/vignettes/decision-vegan.Rnw
+++ b/vignettes/decision-vegan.Rnw
@@ -7,7 +7,7 @@
 \author{Jari Oksanen}
 \title{Design decisions and implementation details in vegan}
 
-\date{\footnotesize{$ $Id: decision-vegan.Rnw 2616 2013-09-11 08:34:17Z jarioksa $ $
+\date{\footnotesize{
   processed with vegan
 \Sexpr{packageDescription("vegan", field="Version")}
 in \Sexpr{R.version.string} on \today}}
@@ -35,6 +35,197 @@ another document.
 
 \tableofcontents
 
+\section{Parallel processing}
+
+Several \pkg{vegan} functions can perform parallel processing using
+the standard \R{} package \pkg{parallel}. 
+The \pkg{parallel} package in \R{} implements
+the functionality of earlier contributed packages \pkg{multicore} and
+\pkg{snow}.  The \pkg{multicore} functionality forks the analysis to
+multiple cores, and \pkg{snow} functionality sets up a socket cluster
+of workers.  The \pkg{multicore} functionality only works in unix-like
+systems (such as MacOS and Linux), but \pkg{snow} functionality works
+in all operating systems.  \pkg{Vegan} can use either method, but
+defaults to \pkg{multicore} functionality when this is available,
+because its forked clusters are usually faster.  This chapter
+describes both the user interface and internal implementation for the
+developers.
+
+\subsection{User interface}
+\label{sec:parallel:ui}
+
+The functions that are capable of parallel processing have argument
+\code{parallel}.  The normal default is \code{parallel = 1} which
+means that no parallel processing is performed.  It is possible to set
+parallel processing as the default in \pkg{vegan} (see
+\S\,\ref{sec:parallel:default}). 
+
+For parallel processing, the \code{parallel} argument can be either
+
+\begin{enumerate}
+\item An integer in which case the given number of parallel processes
+  will be launched (value $1$ launches non-parallel processing). In
+  unix-like systems (\emph{e.g.}, MacOS, Linux) these will be forked
+  \code{multicore} processes. In Windows socket clusters will be set up,
+  initialized and closed.
+\item A previously created socket cluster. This saves time as the
+  cluster is not set up and closed in the function.  If the argument is a
+  socket cluster, it will also be used in unix-like systems. Setting
+  up a socket cluster is discussed in \S\,\ref{sec:parallel:socket}.
+\end{enumerate}
+
+\subsubsection{Using parallel processing as default}
+\label{sec:parallel:default}
+
+If the user sets option \code{mc.cores}, its value will be used as the
+default value of the \code{parallel} argument in \pkg{vegan}
+functions.  The following command will set up parallel processing to
+all subsequent \pkg{vegan} commands:
+<<eval=false>>=
+options(mc.cores = 2)
+@ 
+
+The \code{mc.cores} option is defined in the \pkg{parallel} package,
+but it is usually unset in which case \pkg{vegan} will default to
+non-parallel computation.  The \code{mc.cores} option can be set by
+the environmental variable \code{MC_CORES} when the \pkg{parallel}
+package is loaded.
+
+\R{} allows\footnote{Since \R{} version 2.15.0.}
+setting up a default socket cluster (\code{setDefaultCluster}), but
+this will not be used in \pkg{vegan}. 
+
+\subsubsection{Setting up socket clusters}
+\label{sec:parallel:socket}
+
+If socket clusters are used (and they are the only alternative in
+Windows), it is often wise to set up a cluster before calling
+parallelized code and give the pre-defined cluster as the value of
+the \code{parallel} argument in \pkg{vegan}.  If you want to use
+socket clusters in unix-like systems (MacOS, Linux), this can be only
+done with pre-defined clusters.
+
+If socket cluster is not set up in Windows, \pkg{vegan} will create and
+close the cluster within the function body. This involves following commands:
+\begin{Schunk}
+\begin{Soutput}
+clus <- makeCluster(4)
+## perform parallel processing
+stopCluster(clus)
+\end{Soutput}
+\end{Schunk}
+The first command sets up the cluster, in this case with four
+cores, and the second command stops the cluster.
+
+Most parallelized \pkg{vegan} functions work similarly in socket and
+fork clusters, but in \code{oecosimu} the parallel processing is used
+to evaluate user-defined functions, and their arguments and data must
+be made known to the socket cluster.  For example, if you want to run
+in parallel the \code{meandist} function of the \code{oecosimu}
+example with a pre-defined socket cluster, you must use:
+<<eval=false>>=
+## start up and define meandist()
+library(vegan)
+data(sipoo)
+meandist <- 
+    function(x) mean(vegdist(x, "bray"))
+library(parallel)
+clus <- makeCluster(4)
+clusterEvalQ(clus, library(vegan))
+mbc1 <- oecosimu(dune, meandist, "r2dtable", 
+                 parallel = clus)
+stopCluster(clus)
+@ 
+Socket clusters are used for parallel processing in Windows, but you
+do not need to pre-define the socket cluster in \code{oecosimu} if you
+only need \pkg{vegan} commands.  However, if you need some other
+contributed packages, you must pre-define the socket cluster also in
+Windows with appropriate \code{clusterEvalQ} calls.
+
+If you pre-set the cluster, you can also use \pkg{snow} style socket
+clusters in unix-like systems.
+
+\subsubsection{Random number generation}
+
+\pkg{Vegan} does not use parallel processing in random number
+generation, and you can set the seed for the standard random number
+generator. Setting the seed for the parallelized generator (L'Ecuyer)
+has no effect in \pkg{vegan}.
+
+\subsubsection{Does it pay off?}
+
+Parallelized processing has a considerable overhead, and the analysis
+is faster only if the non-parallel code is really slow (takes several
+seconds in wall clock time). The overhead is particularly large in
+socket clusters (in Windows). Creating a socket cluster and evaluating
+\code{library(vegan)} with \code{clusterEvalQ} can take two seconds or
+longer, and only pays off if the non-parallel analysis takes ten
+seconds or longer. Using pre-defined clusters will reduce the
+overhead. Fork clusters (in unix-likes operating systems) have a
+smaller overhead and can be faster, but they also have an overhead.
+
+Each parallel process needs memory, and for a large number of
+processes you need much memory.  If the memory is exhausted, the
+parallel processes can stall and  take much longer than
+non-parallel processes (minutes instead of seconds).
+
+If the analysis is fast, and function runs in, say, less than five
+seconds, parallel processing is rarely useful.  Parallel processing is
+useful only in slow analyses: large number of replications or
+simulations, slow evaluation of each simulation. The danger of memory
+exhaustion must always be remembered.
+
+The benefits and potential problems of parallel processing depend on
+your particular system: it is best to rely on your own experience. 
+
+\subsection{Internals for developers}
+
+The implementation of the parallel processing should accord with the
+description of the user interface above (\S\,\ref{sec:parallel:ui}).
+Function \code{oecosimu} can be used as a reference implementation,
+and similar interpretation and order of interpretation of arguments
+should be followed.  All future implementations should be consistent
+and all must be changed if the call heuristic changes.
+
+The value of the \code{parallel} argument can be \code{NULL}, a
+positive integer or a socket cluster.  Integer $1$ means that no
+parallel processing is performed.  The ``normal'' default is
+\code{NULL} which in  the ``normal'' case is interpreted as $1$.  Here
+``normal'' means that \R{} is run with default settings without
+setting \code{mc.cores} or environmental variable \code{MC_CORES}.  
+
+Function \code{oecosimu} interprets the \code{parallel} arguments in
+the following way:
+\begin{enumerate} 
+\item \code{NULL}: The function is called with argument \code{parallel
+    = getOption("mc.cores")}. The option \code{mc.cores} is normally
+  unset and then the default is \code{parallel = NULL}.  
+\item Integer: An integer value is taken as the number of created
+  parallel processes.  In unix-like systems this is the number of
+  forked multicore processes, and in Windows this is the number of
+  workers in socket clusters.  In Windows, the socket cluster is
+  created, and if needed \code{library(vegan)} is evaluated in the
+  cluster (this is not necessary if the function only uses internal
+  functions), and the cluster is stopped after parallel processing.
+\item Socket cluster: If a socket cluster is given, it will be used in
+  all operating systems, and  the cluster is not stopped
+  within the function.
+\end{enumerate}
+
+This gives the following precedence order for parallel processing
+(highest to lowest):
+\begin{enumerate}
+  \item Explicitly given argument value of \code{parallel} will always
+    be used.
+  \item If \code{mc.cores} is set, it will be used. In Windows this
+    means creating and stopping socket clusters. Please note
+    that the \code{mc.cores} is only set from the environmental
+    variable \code{MC_CORES} when you load the \pkg{parallel} package,
+    and it is always unset before first
+    \code{require(parallel)}.
+ \item The fall back behaviour is no parallel processing. 
+\end{enumerate}
+
 \section{Nestedness and Null models}
 
 Some published indices of nestedness and null models of communities
@@ -237,8 +428,8 @@ weighted averaging scores have somewhat wider dispersion.
     in the functions \code{prcomp} and \code{princomp}, and the
     one used in the \pkg{vegan} function \code{rda} 
     and the proprietary software \proglang{Canoco}
-    scores in terms of orthonormal species ($u_{ik}$) and site scores
-    ($v_{jk}$), eigenvalues ($\lambda_k$), number of sites  ($n$) and
+    scores in terms of orthonormal species ($v_{ik}$) and site scores
+    ($u_{jk}$), eigenvalues ($\lambda_k$), number of sites  ($n$) and
     species standard deviations ($s_j$). In \code{rda},
     $\mathrm{const} = \sqrt[4]{(n-1) \sum \lambda_k}$.  Corresponding
     negative scaling in \pkg{vegan}
diff --git a/vignettes/decision-vegan.tex b/vignettes/decision-vegan.tex
index e860de2..b46fb64 100644
--- a/vignettes/decision-vegan.tex
+++ b/vignettes/decision-vegan.tex
@@ -7,10 +7,10 @@
 \author{Jari Oksanen}
 \title{Design decisions and implementation details in vegan}
 
-\date{\footnotesize{$ $Id: decision-vegan.Rnw 2616 2013-09-11 08:34:17Z jarioksa $ $
+\date{\footnotesize{
   processed with vegan
-2.0-10
-in R Under development (unstable) (2013-12-11 r64449) on \today}}
+2.2-0
+in R Under development (unstable) (2014-11-16 r66991) on \today}}
 
 %% need no \usepackage{Sweave}
 \begin{document}
@@ -28,6 +28,201 @@ another document.
 
 \tableofcontents
 
+\section{Parallel processing}
+
+Several \pkg{vegan} functions can perform parallel processing using
+the standard \R{} package \pkg{parallel}. 
+The \pkg{parallel} package in \R{} implements
+the functionality of earlier contributed packages \pkg{multicore} and
+\pkg{snow}.  The \pkg{multicore} functionality forks the analysis to
+multiple cores, and \pkg{snow} functionality sets up a socket cluster
+of workers.  The \pkg{multicore} functionality only works in unix-like
+systems (such as MacOS and Linux), but \pkg{snow} functionality works
+in all operating systems.  \pkg{Vegan} can use either method, but
+defaults to \pkg{multicore} functionality when this is available,
+because its forked clusters are usually faster.  This chapter
+describes both the user interface and internal implementation for the
+developers.
+
+\subsection{User interface}
+\label{sec:parallel:ui}
+
+The functions that are capable of parallel processing have argument
+\code{parallel}.  The normal default is \code{parallel = 1} which
+means that no parallel processing is performed.  It is possible to set
+parallel processing as the default in \pkg{vegan} (see
+\S\,\ref{sec:parallel:default}). 
+
+For parallel processing, the \code{parallel} argument can be either
+
+\begin{enumerate}
+\item An integer in which case the given number of parallel processes
+  will be launched (value $1$ launches non-parallel processing). In
+  unix-like systems (\emph{e.g.}, MacOS, Linux) these will be forked
+  \code{multicore} processes. In Windows socket clusters will be set up,
+  initialized and closed.
+\item A previously created socket cluster. This saves time as the
+  cluster is not set up and closed in the function.  If the argument is a
+  socket cluster, it will also be used in unix-like systems. Setting
+  up a socket cluster is discussed in \S\,\ref{sec:parallel:socket}.
+\end{enumerate}
+
+\subsubsection{Using parallel processing as default}
+\label{sec:parallel:default}
+
+If the user sets option \code{mc.cores}, its value will be used as the
+default value of the \code{parallel} argument in \pkg{vegan}
+functions.  The following command will set up parallel processing to
+all subsequent \pkg{vegan} commands:
+\begin{Schunk}
+\begin{Sinput}
+> options(mc.cores = 2)
+\end{Sinput}
+\end{Schunk}
+
+The \code{mc.cores} option is defined in the \pkg{parallel} package,
+but it is usually unset in which case \pkg{vegan} will default to
+non-parallel computation.  The \code{mc.cores} option can be set by
+the environmental variable \code{MC_CORES} when the \pkg{parallel}
+package is loaded.
+
+\R{} allows\footnote{Since \R{} version 2.15.0.}
+setting up a default socket cluster (\code{setDefaultCluster}), but
+this will not be used in \pkg{vegan}. 
+
+\subsubsection{Setting up socket clusters}
+\label{sec:parallel:socket}
+
+If socket clusters are used (and they are the only alternative in
+Windows), it is often wise to set up a cluster before calling
+parallelized code and give the pre-defined cluster as the value of
+the \code{parallel} argument in \pkg{vegan}.  If you want to use
+socket clusters in unix-like systems (MacOS, Linux), this can be only
+done with pre-defined clusters.
+
+If socket cluster is not set up in Windows, \pkg{vegan} will create and
+close the cluster within the function body. This involves following commands:
+\begin{Schunk}
+\begin{Soutput}
+clus <- makeCluster(4)
+## perform parallel processing
+stopCluster(clus)
+\end{Soutput}
+\end{Schunk}
+The first command sets up the cluster, in this case with four
+cores, and the second command stops the cluster.
+
+Most parallelized \pkg{vegan} functions work similarly in socket and
+fork clusters, but in \code{oecosimu} the parallel processing is used
+to evaluate user-defined functions, and their arguments and data must
+be made known to the socket cluster.  For example, if you want to run
+in parallel the \code{meandist} function of the \code{oecosimu}
+example with a pre-defined socket cluster, you must use:
+\begin{Schunk}
+\begin{Sinput}
+> ## start up and define meandist()
+> library(vegan)
+> data(sipoo)
+> meandist <- 
+      function(x) mean(vegdist(x, "bray"))
+> library(parallel)
+> clus <- makeCluster(4)
+> clusterEvalQ(clus, library(vegan))
+> mbc1 <- oecosimu(dune, meandist, "r2dtable", 
+                   parallel = clus)
+> stopCluster(clus)
+\end{Sinput}
+\end{Schunk}
+Socket clusters are used for parallel processing in Windows, but you
+do not need to pre-define the socket cluster in \code{oecosimu} if you
+only need \pkg{vegan} commands.  However, if you need some other
+contributed packages, you must pre-define the socket cluster also in
+Windows with appropriate \code{clusterEvalQ} calls.
+
+If you pre-set the cluster, you can also use \pkg{snow} style socket
+clusters in unix-like systems.
+
+\subsubsection{Random number generation}
+
+\pkg{Vegan} does not use parallel processing in random number
+generation, and you can set the seed for the standard random number
+generator. Setting the seed for the parallelized generator (L'Ecuyer)
+has no effect in \pkg{vegan}.
+
+\subsubsection{Does it pay off?}
+
+Parallelized processing has a considerable overhead, and the analysis
+is faster only if the non-parallel code is really slow (takes several
+seconds in wall clock time). The overhead is particularly large in
+socket clusters (in Windows). Creating a socket cluster and evaluating
+\code{library(vegan)} with \code{clusterEvalQ} can take two seconds or
+longer, and only pays off if the non-parallel analysis takes ten
+seconds or longer. Using pre-defined clusters will reduce the
+overhead. Fork clusters (in unix-likes operating systems) have a
+smaller overhead and can be faster, but they also have an overhead.
+
+Each parallel process needs memory, and for a large number of
+processes you need much memory.  If the memory is exhausted, the
+parallel processes can stall and  take much longer than
+non-parallel processes (minutes instead of seconds).
+
+If the analysis is fast, and function runs in, say, less than five
+seconds, parallel processing is rarely useful.  Parallel processing is
+useful only in slow analyses: large number of replications or
+simulations, slow evaluation of each simulation. The danger of memory
+exhaustion must always be remembered.
+
+The benefits and potential problems of parallel processing depend on
+your particular system: it is best to rely on your own experience. 
+
+\subsection{Internals for developers}
+
+The implementation of the parallel processing should accord with the
+description of the user interface above (\S\,\ref{sec:parallel:ui}).
+Function \code{oecosimu} can be used as a reference implementation,
+and similar interpretation and order of interpretation of arguments
+should be followed.  All future implementations should be consistent
+and all must be changed if the call heuristic changes.
+
+The value of the \code{parallel} argument can be \code{NULL}, a
+positive integer or a socket cluster.  Integer $1$ means that no
+parallel processing is performed.  The ``normal'' default is
+\code{NULL} which in  the ``normal'' case is interpreted as $1$.  Here
+``normal'' means that \R{} is run with default settings without
+setting \code{mc.cores} or environmental variable \code{MC_CORES}.  
+
+Function \code{oecosimu} interprets the \code{parallel} arguments in
+the following way:
+\begin{enumerate} 
+\item \code{NULL}: The function is called with argument \code{parallel
+    = getOption("mc.cores")}. The option \code{mc.cores} is normally
+  unset and then the default is \code{parallel = NULL}.  
+\item Integer: An integer value is taken as the number of created
+  parallel processes.  In unix-like systems this is the number of
+  forked multicore processes, and in Windows this is the number of
+  workers in socket clusters.  In Windows, the socket cluster is
+  created, and if needed \code{library(vegan)} is evaluated in the
+  cluster (this is not necessary if the function only uses internal
+  functions), and the cluster is stopped after parallel processing.
+\item Socket cluster: If a socket cluster is given, it will be used in
+  all operating systems, and  the cluster is not stopped
+  within the function.
+\end{enumerate}
+
+This gives the following precedence order for parallel processing
+(highest to lowest):
+\begin{enumerate}
+  \item Explicitly given argument value of \code{parallel} will always
+    be used.
+  \item If \code{mc.cores} is set, it will be used. In Windows this
+    means creating and stopping socket clusters. Please note
+    that the \code{mc.cores} is only set from the environmental
+    variable \code{MC_CORES} when you load the \pkg{parallel} package,
+    and it is always unset before first
+    \code{require(parallel)}.
+ \item The fall back behaviour is no parallel processing. 
+\end{enumerate}
+
 \section{Nestedness and Null models}
 
 Some published indices of nestedness and null models of communities
@@ -40,7 +235,7 @@ The matrix temperature is intuitively simple
 (Fig. \ref{fig:nestedtemp}), but the the exact calculations were not
 explained in the original publication \cite{AtmarPat93}.
 \begin{figure}
-\includegraphics{decision-vegan-002}
+\includegraphics{decision-vegan-004}
 \label{fig:nestedtemp}
 \caption{Matrix temperature for \emph{Falco subbuteo} on Sibbo
   Svartholmen (dot). The curve is the fill line, and in a cold
@@ -218,8 +413,8 @@ weighted averaging scores have somewhat wider dispersion.
     in the functions \code{prcomp} and \code{princomp}, and the
     one used in the \pkg{vegan} function \code{rda} 
     and the proprietary software \proglang{Canoco}
-    scores in terms of orthonormal species ($u_{ik}$) and site scores
-    ($v_{jk}$), eigenvalues ($\lambda_k$), number of sites  ($n$) and
+    scores in terms of orthonormal species ($v_{ik}$) and site scores
+    ($u_{jk}$), eigenvalues ($\lambda_k$), number of sites  ($n$) and
     species standard deviations ($s_j$). In \code{rda},
     $\mathrm{const} = \sqrt[4]{(n-1) \sum \lambda_k}$.  Corresponding
     negative scaling in \pkg{vegan}
@@ -413,7 +608,7 @@ default. So we must specifically ask for LC scores
 \end{Sinput}
 \end{Schunk}
 \begin{figure}
-\includegraphics{decision-vegan-005}
+\includegraphics{decision-vegan-007}
 \caption{LC scores in CCA of the original data.}
 \label{fig:ccalc}
 \end{figure}
@@ -428,7 +623,7 @@ shuffles the indices.
 \end{Sinput}
 \end{Schunk}
 \begin{figure}
-\includegraphics{decision-vegan-007}
+\includegraphics{decision-vegan-009}
 \caption{LC scores of shuffled species data.}
 \label{fig:ccashuff}
 \end{figure}
@@ -442,7 +637,7 @@ similar the site scores indeed are (Fig. \ref{fig:ccaproc}).
 \end{Sinput}
 \end{Schunk}
 \begin{figure}
-\includegraphics{decision-vegan-009}
+\includegraphics{decision-vegan-011}
 \caption{Procrustes rotation of LC scores from CCA of original and shuffled data.}
 \label{fig:ccaproc}
 \end{figure}
@@ -457,7 +652,7 @@ Redundancy Analysis (RDA) instead of CCA
 \end{Sinput}
 \end{Schunk}
 \begin{figure}
-\includegraphics{decision-vegan-011}
+\includegraphics{decision-vegan-013}
 \caption{Procrustes rotation of LC scores in RDA of the original and shuffled data.}
 \label{fig:rdaproc}
 \end{figure}
@@ -492,10 +687,10 @@ Eigenvalues for constrained axes:
 0.3608 0.1152 
 
 Eigenvalues for unconstrained axes:
-    CA1     CA2     CA3     CA4     CA5     CA6 
-0.37476 0.24036 0.19696 0.17818 0.15209 0.11840 
-    CA7     CA8 
-0.08364 0.07567 
+   CA1    CA2    CA3    CA4    CA5    CA6    CA7 
+0.3748 0.2404 0.1970 0.1782 0.1521 0.1184 0.0836 
+   CA8 
+0.0757 
 (Showed only 8 of all 21 unconstrained eigenvalues)
 \end{Soutput}
 \begin{Sinput}
@@ -507,26 +702,26 @@ Call: cca(formula = varespec[i, ] ~ Al + K, data
 
               Inertia Proportion Rank
 Total          2.0832     1.0000     
-Constrained    0.1932     0.0927    2
-Unconstrained  1.8900     0.9073   21
+Constrained    0.2023     0.0971    2
+Unconstrained  1.8809     0.9029   21
 Inertia is mean squared contingency coefficient 
 
 Eigenvalues for constrained axes:
-  CCA1   CCA2 
-0.1298 0.0634 
+   CCA1    CCA2 
+0.17450 0.02779 
 
 Eigenvalues for unconstrained axes:
-    CA1     CA2     CA3     CA4     CA5     CA6 
-0.52408 0.31643 0.21958 0.17766 0.17696 0.11951 
-    CA7     CA8 
-0.08447 0.07063 
+   CA1    CA2    CA3    CA4    CA5    CA6    CA7 
+0.5192 0.3131 0.1957 0.1802 0.1562 0.1203 0.0894 
+   CA8 
+0.0799 
 (Showed only 8 of all 21 unconstrained eigenvalues)
 \end{Soutput}
 \end{Schunk}
 Similarly their WA scores will be (probably) very different
 (Fig. \ref{fig:ccawa}).
 \begin{figure}
-\includegraphics{decision-vegan-013}
+\includegraphics{decision-vegan-015}
 \caption{Procrustes rotation of WA scores of CCA with the original and
   shuffled data.}
 \label{fig:ccawa}
@@ -547,11 +742,11 @@ remain within numerical accuracy:
 > max(residuals(proc))
 \end{Sinput}
 \begin{Soutput}
-[1] 2.67932e-14
+[1] 2.948264e-14
 \end{Soutput}
 \end{Schunk}
 In \code{cca} the difference would be somewhat larger than now
-observed 2.6793e-14 because site
+observed 2.9483e-14 because site
 weights used for environmental variables are shuffled with the species
 data.
 
@@ -570,7 +765,7 @@ following example uses the classical dune meadow data \cite{Jongman87}:
 When the results are plotted using LC scores, sample plots fall only
 in four alternative positions (Fig. \ref{fig:factorlc}).
 \begin{figure}
-\includegraphics{decision-vegan-016}
+\includegraphics{decision-vegan-018}
 \caption{LC scores of the dune meadow data using only one factor as a
   constraint.}
 \label{fig:factorlc}
@@ -595,7 +790,7 @@ score with the corresponding LC (Fig.  \ref{fig:walcspider}).
 \end{Sinput}
 \end{Schunk}
 \begin{figure}
-\includegraphics{decision-vegan-018}
+\includegraphics{decision-vegan-020}
 \caption{A ``spider plot'' connecting WA scores to corresponding LC
   scores. The shorter the web segments, the better the ordination.}
 \label{fig:walcspider}
diff --git a/vignettes/diversity-vegan.Rnw b/vignettes/diversity-vegan.Rnw
index ff84ad6..903a141 100644
--- a/vignettes/diversity-vegan.Rnw
+++ b/vignettes/diversity-vegan.Rnw
@@ -9,7 +9,7 @@
 
 \title{Vegan: ecological diversity} \author{Jari Oksanen} 
 
-\date{\footnotesize{$ $Id: diversity-vegan.Rnw 2807 2013-12-05 11:50:52Z jarioksa $ $
+\date{\footnotesize{
   processed with vegan \Sexpr{packageDescription("vegan", field="Version")}
   in \Sexpr{R.version.string} on \today}}
 
@@ -573,9 +573,12 @@ species is related to the number of rare species, or species seen only
 once or twice.
 
 Function \code{specpool} implements the following models to estimate
-the pool size $S_p$ \citep{SmithVanBelle84, Chao87}:
+the pool size $S_p$ \citep{SmithVanBelle84, Chao87, ChiuEtal14}:
 \begin{align}
-S_p &= S_o + \frac{f_1^2}{2 f_2} & \text{Chao}\\
+\label{eq:chao-basic}
+S_p &= S_o + \frac{f_1^2}{2 f_2} \frac{N-1}{N} & \text{Chao}\\
+\label{eq:chao-bc}
+S_p &= S_o + \frac{f_1 (f_1 -1)}{2 (f_2+1)}  \frac{N-1}{N} & \text{Chao bias-corrected}\\
 S_p &= S_o + f_1 \frac{N-1}{N}  & \text{1st order Jackknife}\\
 S_p & = S_o + f_1 \frac{2N-3}{N} \nonumber \\ & + f_2 \frac{(N-2)^2}{N(N-1)}
 & \text{2nd order Jackknife}\\
@@ -587,12 +590,36 @@ and $p_i$ are proportions of species.  The idea in jackknife seems to
 be that we missed about as many species as we saw only once, and the
 idea in bootstrap that if we repeat sampling (with replacement) from
 the same data, we miss as many species as we missed originally.
+\citet{ChiuEtal14} introduced the small-sample correction term
+$\frac{N}{N-1}$, but it was not originally used \citep{Chao87}.
+
+The variance the estimator of the basic Chao estimate is \citep{ChiuEtal14}:
+\begin{multline}
+\label{eq:var-chao-basic}
+s^2 = f_2 \left(A^2 \frac{G^4}{4} + A^2 G^3 + A \frac{G^2}{2} \right),\\
+\text{where}\; A = \frac{N-1}{N}\;\text{and}\; G = \frac{f_1}{f_2} 
+\end{multline}
+The variance of bias-corrected Chao estimate can be approximated by
+replacing the terms of eq.~\ref{eq:var-chao-basic} with the
+corresponding terms in eq.~\ref{eq:chao-bc}:
+\begin{multline}
+\label{eq:var-chao-bc}
+s^2 = A \frac{f_1(f_1-1)}{2(f_2+1)} + A^2 \frac{f_1(2 f_1+1)^2}{(f_2+1)^2}\\
+ + A^2 \frac{f_1^2 f_2 (f_1 -1)^2}{4 (f_2 + 1)^4}
+\end{multline}
+If we apply the bias-correction in the special case where there are no
+doubletons ($f_2 = 0$), the he variance is 
+\citep[who omit small-sample correction in some terms]{ChiuEtal14}:
+\begin{multline}
+\label{eq:var-chao-bc0}
+s^2 = \frac{1}{4} A^2 f_1 (2f_1 -1)^2 + \frac{1}{2} A f_1 (f_1-1) - \frac{1}{4}A^2 \frac{f_1^4}{S_p}
+\end{multline}
+Function \code{specpool} uses eq.~\ref{eq:chao-basic} and estimates
+its variance with eq.~\ref{eq:var-chao-basic} when $f_2 > 0$. When
+$f_2 = 0$, \code{specpool} applies eq.~\ref{eq:chao-bc} which reduces
+to $\frac{N-1}{N} \frac{1}{2} f_1 (f_1 - 1)$, and its variance
+estimator eq.~\ref{eq:var-chao-bc0}.
 
-The variance the estimator of \citet{Chao87} is:
-\begin{equation}
-s^2 = f_2 \left(\frac{G^4}{4} + G^3 + \frac{G^2}{2} \right), \,
-\text{where}\quad G = \frac{f_1}{f_2}
-\end{equation}
 The variance of the first-order jackknife is based on the number of
 ``singletons'' $r$ (species occurring only once in the data) in sample
 plots \citep{SmithVanBelle84}:
@@ -604,8 +631,9 @@ Variance of the second-order jackknife is not evaluated in
 For the variance of bootstrap estimator, it is practical to define a
 new variable $q_i = (1-p_i)^N$ for each species \citep{SmithVanBelle84}:
 \begin{multline}
-s^2 = \sum_{i=1}^{S_o} q_i (1-q_i) + 2 \sum \sum Z_p ,\\ \text{where}\; Z_p = \dots
+s^2 = \sum_{i=1}^{S_o} q_i (1-q_i)  \\ +2 \sum_{i \neq j}^{S_o} \left[(Z_{ij}/N)^N - q_i q_j \right]
 \end{multline}
+where $Z_{ij}$ is the number of sites where both species are absent.
 
 The extrapolated richness values for the whole BCI data are:
 <<>>=
@@ -629,9 +657,12 @@ species with low frequencies.  Function \code{estimateR} implements
 two of these methods:
 <<>>=
 estimateR(BCI[k,])
-@
-Chao's method is similar as above, but uses another, ``unbiased''
-equation. \textsc{ace} is based on rare species also:
+@ 
+Chao's method is similar as the bias-corrected model
+eq.~\ref{eq:chao-bc} with its variance estimator
+eq.~\ref{eq:var-chao-bc}, but it uses counts of individuals instead of
+incidences, and does not use small sample correction.  \textsc{ace} is
+based on rare species also:
 \begin{equation}
 \begin{split}
 S_p &= S_\mathrm{abund} + \frac{S_\mathrm{rare}}{C_\mathrm{ACE}} +
diff --git a/vignettes/diversity-vegan.tex b/vignettes/diversity-vegan.tex
index cb3a38a..6569bd3 100644
--- a/vignettes/diversity-vegan.tex
+++ b/vignettes/diversity-vegan.tex
@@ -9,9 +9,9 @@
 
 \title{Vegan: ecological diversity} \author{Jari Oksanen} 
 
-\date{\footnotesize{$ $Id: diversity-vegan.Rnw 2807 2013-12-05 11:50:52Z jarioksa $ $
-  processed with vegan 2.0-10
-  in R Under development (unstable) (2013-12-11 r64449) on \today}}
+\date{\footnotesize{
+  processed with vegan 2.2-0
+  in R Under development (unstable) (2014-11-16 r66991) on \today}}
 
 %% need no \usepackage{Sweave}
 \begin{document}
@@ -344,14 +344,14 @@ log-series for a randomly selected plot is (Fig. \ref{fig:fisher}):
 \end{Sinput}
 \begin{Soutput}
 Fisher log series model
-No. of species: 92 
-Fisher alpha:   35.12348 
+No. of species: 86 
+Fisher alpha:   33.31374 
 \end{Soutput}
 \end{Schunk}
 \begin{figure}
 \includegraphics{diversity-vegan-017}
 \caption{Fisher's log-series fitted to one randomly selected site
-  (34).}
+  (43).}
 \label{fig:fisher}
 \end{figure}
 We already saw $\alpha$ as a diversity index.
@@ -375,7 +375,7 @@ octave, and the same for all species at the octave limits occurring 2,
 the lower octave.  Function \code{prestondistr} directly maximizes
 truncated log-normal likelihood without binning data, and it is the
 recommended alternative.  Log-normal models usually fit poorly to the
-BCI data, but here our random plot (number 34):
+BCI data, but here our random plot (number 43):
 \begin{Schunk}
 \begin{Sinput}
 > prestondistr(BCI[k,])
@@ -383,18 +383,18 @@ BCI data, but here our random plot (number 34):
 \begin{Soutput}
 Preston lognormal model
 Method: maximized likelihood to log2 abundances 
-No. of species: 92 
+No. of species: 86 
 
-      mode      width         S0 
- 0.9808822  1.7328484 24.2476646 
+     mode     width        S0 
+ 0.722374  1.867705 22.353328 
 
 Frequencies by Octave
-                0        1        2        3        4
-Observed 17.00000 24.50000 22.50000 16.00000 6.500000
-Fitted   20.65821 24.24619 20.39683 12.29845 5.315036
+                0        1       2        3        4
+Observed 17.50000 27.00000 16.5000 11.50000 9.000000
+Fitted   20.74239 22.10773 17.6901 10.62715 4.792958
                 5         6
-Observed 3.500000 2.0000000
-Fitted   1.646382 0.3655304
+Observed 2.500000 2.0000000
+Fitted   1.622897 0.4125523
 \end{Soutput}
 \end{Schunk}
 
@@ -433,26 +433,26 @@ set gives (Fig. \ref{fig:rad}):
 \end{Sinput}
 \begin{Soutput}
 RAD models, family poisson 
-No. of species 92, total abundance 447
-
-           par1      par2     par3    Deviance AIC    
-Null                                   96.957  363.040
-Preemption  0.049501                   94.601  362.684
-Lognormal   0.87031   1.2147           23.247  293.330
-Zipf        0.15445  -0.88735          20.796  290.879
-Mandelbrot  0.52179  -1.2176   2.4672   6.227  278.310
-           BIC    
-Null       363.040
-Preemption 365.205
-Lognormal  298.373
-Zipf       295.922
-Mandelbrot 285.875
+No. of species 86, total abundance 407
+
+           par1      par2     par3    Deviance
+Null                                   92.4312
+Preemption  0.054324                   82.4328
+Lognormal   0.82087   1.2361           20.2361
+Zipf        0.1632   -0.90476          20.0974
+Mandelbrot  0.57359  -1.2499   2.4591   9.5133
+           AIC      BIC     
+Null       336.9752 336.9752
+Preemption 328.9768 331.4312
+Lognormal  268.7801 273.6888
+Zipf       268.6414 273.5501
+Mandelbrot 260.0574 267.4204
 \end{Soutput}
 \end{Schunk}
 \begin{figure}
 \includegraphics{diversity-vegan-020}
 \caption{Ranked abundance distribution models for a random plot
-  (no. 34).  The best model has the lowest \textsc{aic}.}
+  (no. 43).  The best model has the lowest \textsc{aic}.}
 \label{fig:rad}
 \end{figure}
 
@@ -701,9 +701,12 @@ species is related to the number of rare species, or species seen only
 once or twice.
 
 Function \code{specpool} implements the following models to estimate
-the pool size $S_p$ \citep{SmithVanBelle84, Chao87}:
+the pool size $S_p$ \citep{SmithVanBelle84, Chao87, ChiuEtal14}:
 \begin{align}
-S_p &= S_o + \frac{f_1^2}{2 f_2} & \text{Chao}\\
+\label{eq:chao-basic}
+S_p &= S_o + \frac{f_1^2}{2 f_2} \frac{N-1}{N} & \text{Chao}\\
+\label{eq:chao-bc}
+S_p &= S_o + \frac{f_1 (f_1 -1)}{2 (f_2+1)}  \frac{N-1}{N} & \text{Chao bias-corrected}\\
 S_p &= S_o + f_1 \frac{N-1}{N}  & \text{1st order Jackknife}\\
 S_p & = S_o + f_1 \frac{2N-3}{N} \nonumber \\ & + f_2 \frac{(N-2)^2}{N(N-1)}
 & \text{2nd order Jackknife}\\
@@ -715,12 +718,36 @@ and $p_i$ are proportions of species.  The idea in jackknife seems to
 be that we missed about as many species as we saw only once, and the
 idea in bootstrap that if we repeat sampling (with replacement) from
 the same data, we miss as many species as we missed originally.
+\citet{ChiuEtal14} introduced the small-sample correction term
+$\frac{N}{N-1}$, but it was not originally used \citep{Chao87}.
+
+The variance the estimator of the basic Chao estimate is \citep{ChiuEtal14}:
+\begin{multline}
+\label{eq:var-chao-basic}
+s^2 = f_2 \left(A^2 \frac{G^4}{4} + A^2 G^3 + A \frac{G^2}{2} \right),\\
+\text{where}\; A = \frac{N-1}{N}\;\text{and}\; G = \frac{f_1}{f_2} 
+\end{multline}
+The variance of bias-corrected Chao estimate can be approximated by
+replacing the terms of eq.~\ref{eq:var-chao-basic} with the
+corresponding terms in eq.~\ref{eq:chao-bc}:
+\begin{multline}
+\label{eq:var-chao-bc}
+s^2 = A \frac{f_1(f_1-1)}{2(f_2+1)} + A^2 \frac{f_1(2 f_1+1)^2}{(f_2+1)^2}\\
+ + A^2 \frac{f_1^2 f_2 (f_1 -1)^2}{4 (f_2 + 1)^4}
+\end{multline}
+If we apply the bias-correction in the special case where there are no
+doubletons ($f_2 = 0$), the he variance is 
+\citep[who omit small-sample correction in some terms]{ChiuEtal14}:
+\begin{multline}
+\label{eq:var-chao-bc0}
+s^2 = \frac{1}{4} A^2 f_1 (2f_1 -1)^2 + \frac{1}{2} A f_1 (f_1-1) - \frac{1}{4}A^2 \frac{f_1^4}{S_p}
+\end{multline}
+Function \code{specpool} uses eq.~\ref{eq:chao-basic} and estimates
+its variance with eq.~\ref{eq:var-chao-basic} when $f_2 > 0$. When
+$f_2 = 0$, \code{specpool} applies eq.~\ref{eq:chao-bc} which reduces
+to $\frac{N-1}{N} \frac{1}{2} f_1 (f_1 - 1)$, and its variance
+estimator eq.~\ref{eq:var-chao-bc0}.
 
-The variance the estimator of \citet{Chao87} is:
-\begin{equation}
-s^2 = f_2 \left(\frac{G^4}{4} + G^3 + \frac{G^2}{2} \right), \,
-\text{where}\quad G = \frac{f_1}{f_2}
-\end{equation}
 The variance of the first-order jackknife is based on the number of
 ``singletons'' $r$ (species occurring only once in the data) in sample
 plots \citep{SmithVanBelle84}:
@@ -732,8 +759,9 @@ Variance of the second-order jackknife is not evaluated in
 For the variance of bootstrap estimator, it is practical to define a
 new variable $q_i = (1-p_i)^N$ for each species \citep{SmithVanBelle84}:
 \begin{multline}
-s^2 = \sum_{i=1}^{S_o} q_i (1-q_i) + 2 \sum \sum Z_p ,\\ \text{where}\; Z_p = \dots
+s^2 = \sum_{i=1}^{S_o} q_i (1-q_i)  \\ +2 \sum_{i \neq j}^{S_o} \left[(Z_{ij}/N)^N - q_i q_j \right]
 \end{multline}
+where $Z_{ij}$ is the number of sites where both species are absent.
 
 The extrapolated richness values for the whole BCI data are:
 \begin{Schunk}
@@ -741,8 +769,8 @@ The extrapolated richness values for the whole BCI data are:
 > specpool(BCI)
 \end{Sinput}
 \begin{Soutput}
-    Species     chao  chao.se  jack1 jack1.se    jack2
-All     225 236.6053 6.659395 245.58 5.650522 247.8722
+    Species     chao chao.se  jack1 jack1.se    jack2
+All     225 236.3732 6.54361 245.58 5.650522 247.8722
         boot  boot.se  n
 All 235.6862 3.468888 50
 \end{Soutput}
@@ -756,10 +784,10 @@ the plots (but this is rarely true):
 > specpool(BCI[s,])
 \end{Sinput}
 \begin{Soutput}
-    Species     chao  chao.se jack1 jack1.se    jack2
-All     207 229.3214 11.73157   231 6.499231 241.6567
+    Species     chao  chao.se  jack1 jack1.se    jack2
+All     206 229.1771 12.00792 230.96 7.394701 242.5367
         boot  boot.se  n
-All 218.3863 3.760674 25
+All 217.8287 4.374874 25
 \end{Soutput}
 \end{Schunk}
 
@@ -776,16 +804,19 @@ two of these methods:
 > estimateR(BCI[k,])
 \end{Sinput}
 \begin{Soutput}
-                 34
-S.obs     92.000000
-S.chao1  127.062500
-se.chao1  17.669342
-S.ACE    124.460040
-se.ACE     5.531529
+                 43
+S.obs     86.000000
+S.chao1  115.750000
+se.chao1  13.264138
+S.ACE    132.940236
+se.ACE     6.432043
 \end{Soutput}
 \end{Schunk}
-Chao's method is similar as above, but uses another, ``unbiased''
-equation. \textsc{ace} is based on rare species also:
+Chao's method is similar as the bias-corrected model
+eq.~\ref{eq:chao-bc} with its variance estimator
+eq.~\ref{eq:var-chao-bc}, but it uses counts of individuals instead of
+incidences, and does not use small sample correction.  \textsc{ace} is
+based on rare species also:
 \begin{equation}
 \begin{split}
 S_p &= S_\mathrm{abund} + \frac{S_\mathrm{rare}}{C_\mathrm{ACE}} +
@@ -824,14 +855,14 @@ can try:
 \end{Sinput}
 \begin{Soutput}
 Extrapolated     Observed       Veiled 
-   105.32232     92.00000     13.32232 
+    104.6503      86.0000      18.6503 
 \end{Soutput}
 \begin{Sinput}
 > veiledspec(BCI[k,])
 \end{Sinput}
 \begin{Soutput}
 Extrapolated     Observed       Veiled 
-   111.38235     92.00000     19.38235 
+   118.52239     86.00000     32.52239 
 \end{Soutput}
 \end{Schunk}
 
diff --git a/vignettes/intro-vegan.Rnw b/vignettes/intro-vegan.Rnw
index 8a253ce..1be1c38 100644
--- a/vignettes/intro-vegan.Rnw
+++ b/vignettes/intro-vegan.Rnw
@@ -6,7 +6,7 @@
 \title{Vegan: an introduction to ordination} 
 \author{Jari Oksanen}
 
-\date{\footnotesize{$ $Id: intro-vegan.Rnw 2597 2013-08-28 08:56:55Z jarioksa $ $
+\date{\footnotesize{
   processed with vegan
 \Sexpr{packageDescription("vegan", field="Version")}
 in \Sexpr{R.version.string} on \today}}
@@ -246,7 +246,7 @@ Function \code{envfit} can be called with a \code{formula}
 interface, and it optionally can assess the ``significance'' of the
 variables using permutation tests:
 <<>>=
-ord.fit <- envfit(ord ~ A1 + Management, data=dune.env, perm=1000)
+ord.fit <- envfit(ord ~ A1 + Management, data=dune.env, perm=999)
 ord.fit
 @
 The result can be drawn directly or added to an ordination diagram
@@ -330,33 +330,21 @@ to give its name in full, because \proglang{R} automatically chooses the
 correct \code{anova} variant for the result of constrained
 ordination.
 
-The \code{anova.cca} function tries to be clever and lazy: it
-automatically stops if the observed permutation significance probably
-differs from the targeted critical value ($0.05$ as default), but it
-will continue long in uncertain cases.  You must set \code{step} and
-\code{perm.max} to same values to override this behaviour.
-
 It is also possible to analyse terms separately:
 <<>>=
-anova(ord, by="term", permu=200)
+anova(ord, by="term", permutations=199)
 @
-In this case, the function is unable to automatically select the
-number of iterations. This test is sequential: the terms are analysed
+This test is sequential: the terms are analysed
 in the order they happen to be in the model. You can also analyse
 significances of marginal effects (``Type III effects''):
 <<>>=
-anova(ord, by="mar")
+anova(ord, by="mar", permutations=199)
 @
 
 Moreover, it is possible to analyse significance of each axis:
 <<a>>=
-anova(ord, by="axis", perm=500)
+anova(ord, by="axis", permutations=499)
 @
-Now the automatic selection works, but typically some of your axes
-will be very close to the critical value, and it may be useful to set
-a lower \code{perm.max} than the default $10000$ (typically you use
-higher limits than in these examples: we used lower limits to save
-time when this document is automatically generated with this package).
 
 \subsection{Conditioned or partial ordination}
 
@@ -370,13 +358,17 @@ This partials out the effect of \code{Moisture} before analysing the
 effects of \code{A1} and \code{Management}.  This also influences
 the significances of the terms:
 <<>>=
-anova(ord, by="term", perm=500)
+anova(ord, by="term", permutations=499)
 @
 If we had a designed experiment, we may wish to restrict the
 permutations so that the observations only are permuted within levels
-of \code{strata}:
+of \code{Moisture}. Restricted permutation is based on the powerful
+\pkg{permute} package. Function \code{how()} can be used to define
+permutation schemes. In the following, we set the levels with
+\code{plots} argument:
 <<>>=
-anova(ord, by="term", perm=500, strata=Moisture)
+how <- how(nperm=499, plots = Plots(strata=dune.env$Moisture))
+anova(ord, by="term", permutations = how)
 @
 
 %%%%%%%%%%%%%%%%%%%
diff --git a/vignettes/intro-vegan.tex b/vignettes/intro-vegan.tex
index ad70f6d..13c5c70 100644
--- a/vignettes/intro-vegan.tex
+++ b/vignettes/intro-vegan.tex
@@ -6,10 +6,10 @@
 \title{Vegan: an introduction to ordination} 
 \author{Jari Oksanen}
 
-\date{\footnotesize{$ $Id: intro-vegan.Rnw 2597 2013-08-28 08:56:55Z jarioksa $ $
+\date{\footnotesize{
   processed with vegan
-2.0-10
-in R Under development (unstable) (2013-12-11 r64449) on \today}}
+2.2-0
+in R Under development (unstable) (2014-11-16 r66991) on \today}}
 
 %% need no \usepackage{Sweave}
 \begin{document}
@@ -81,9 +81,9 @@ Detrended correspondence analysis with 26 segments.
 Rescaling of axes with 4 iterations.
 
                   DCA1   DCA2    DCA3    DCA4
-Eigenvalues     0.5117 0.3036 0.12125 0.14266
+Eigenvalues     0.5117 0.3036 0.12125 0.14267
 Decorana values 0.5360 0.2869 0.08136 0.04814
-Axis lengths    3.7004 3.1166 1.30057 1.47883
+Axis lengths    3.7004 3.1166 1.30055 1.47888
 \end{Soutput}
 \end{Schunk}
 The display of results is very brief: only eigenvalues and used
@@ -113,12 +113,13 @@ species scores to the configuration as weighted averages (function
 \end{Sinput}
 \begin{Soutput}
 Run 0 stress 0.1192678 
-Run 1 stress 0.1183186 
+Run 1 stress 0.1886532 
+Run 2 stress 0.1183186 
 ... New best solution
-... procrustes: rmse 0.02026951  max resid 0.06495418 
-Run 2 stress 0.1886532 
-Run 3 stress 0.1183186 
-... procrustes: rmse 1.293513e-05  max resid 4.605534e-05 
+... procrustes: rmse 0.02026887  max resid 0.06495011 
+Run 3 stress 0.1192678 
+Run 4 stress 0.1183186 
+... procrustes: rmse 6.471967e-05  max resid 0.000210315 
 *** Solution reached
 \end{Soutput}
 \begin{Sinput}
@@ -136,7 +137,7 @@ Distance: bray
 Dimensions: 2 
 Stress:     0.1183186 
 Stress type 1, weak ties
-Two convergent solutions found after 3 tries
+Two convergent solutions found after 4 tries
 Scaling: centring, PC rotation, halfchange scaling 
 Species: expanded scores based on ‘dune’ 
 \end{Soutput}
@@ -288,33 +289,35 @@ interface, and it optionally can assess the ``significance'' of the
 variables using permutation tests:
 \begin{Schunk}
 \begin{Sinput}
-> ord.fit <- envfit(ord ~ A1 + Management, data=dune.env, perm=1000)
+> ord.fit <- envfit(ord ~ A1 + Management, data=dune.env, perm=999)
 > ord.fit
 \end{Sinput}
 \begin{Soutput}
 ***VECTORS
 
-     NMDS1   NMDS2     r2  Pr(>r)  
-A1 0.96474 0.26320 0.3649 0.02298 *
+     NMDS1   NMDS2     r2 Pr(>r)  
+A1 0.96473 0.26325 0.3649  0.017 *
 ---
 Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
-P values based on 1000 permutations.
+Permutation: free
+Number of permutations: 999
 
 ***FACTORS:
 
 Centroids:
                NMDS1   NMDS2
-ManagementBF -0.4534 -0.0102
-ManagementHF -0.2636 -0.1282
-ManagementNM  0.2958  0.5790
+ManagementBF -0.4534 -0.0103
+ManagementHF -0.2635 -0.1282
+ManagementNM  0.2957  0.5790
 ManagementSF  0.1506 -0.4670
 
 Goodness of fit:
-               r2   Pr(>r)   
-Management 0.4134 0.004995 **
+               r2 Pr(>r)   
+Management 0.4134  0.009 **
 ---
 Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
-P values based on 1000 permutations.
+Permutation: free
+Number of permutations: 999
 \end{Soutput}
 \end{Schunk}
 The result can be drawn directly or added to an ordination diagram
@@ -339,12 +342,12 @@ Link function: identity
 
 Formula:
 y ~ s(x1, x2, k = 10, bs = "tp", fx = FALSE)
-<environment: 0x3c181e8>
+<environment: 0x6e6fa00>
 
 Estimated degrees of freedom:
 1.59  total = 2.59 
 
-REML score: 41.58727
+REML score: 41.58727     
 \end{Soutput}
 \end{Schunk}
 \begin{figure}
@@ -382,14 +385,14 @@ Unconstrained  1.3355     0.6314   15
 Inertia is mean squared contingency coefficient 
 
 Eigenvalues for constrained axes:
-   CCA1    CCA2    CCA3    CCA4 
-0.31875 0.23718 0.13217 0.09168 
+  CCA1   CCA2   CCA3   CCA4 
+0.3187 0.2372 0.1322 0.0917 
 
 Eigenvalues for unconstrained axes:
-     CA1      CA2      CA3      CA4      CA5      CA6      CA7      CA8 
-0.362024 0.202884 0.152661 0.134549 0.110957 0.079982 0.076698 0.055267 
-     CA9     CA10     CA11     CA12     CA13     CA14     CA15 
-0.044361 0.041528 0.031699 0.017786 0.011642 0.008736 0.004711 
+   CA1    CA2    CA3    CA4    CA5    CA6    CA7    CA8    CA9   CA10 
+0.3620 0.2029 0.1527 0.1345 0.1110 0.0800 0.0767 0.0553 0.0444 0.0415 
+  CA11   CA12   CA13   CA14   CA15 
+0.0317 0.0178 0.0116 0.0087 0.0047 
 \end{Soutput}
 \end{Schunk}
 The results can be plotted with (Fig. \ref{fig:cca}):
@@ -431,10 +434,10 @@ Inertia is mean squared contingency coefficient
 Some constraints were aliased because they were collinear (redundant)
 
 Eigenvalues for constrained axes:
-   CCA1    CCA2    CCA3    CCA4    CCA5    CCA6    CCA7    CCA8    CCA9 
-0.46713 0.34102 0.17606 0.15317 0.09528 0.07027 0.05887 0.04993 0.03183 
-  CCA10   CCA11   CCA12 
-0.02596 0.02282 0.01082 
+  CCA1   CCA2   CCA3   CCA4   CCA5   CCA6   CCA7   CCA8   CCA9  CCA10 
+0.4671 0.3410 0.1761 0.1532 0.0953 0.0703 0.0589 0.0499 0.0318 0.0260 
+ CCA11  CCA12 
+0.0228 0.0108 
 
 Eigenvalues for unconstrained axes:
     CA1     CA2     CA3     CA4     CA5     CA6     CA7 
@@ -454,11 +457,13 @@ simultaneously:
 \end{Sinput}
 \begin{Soutput}
 Permutation test for cca under reduced model
+Permutation: free
+Number of permutations: 999
 
 Model: cca(formula = dune ~ A1 + Management, data = dune.env)
-         Df  Chisq      F N.Perm Pr(>F)   
-Model     4 0.7798 2.1896    199  0.005 **
-Residual 15 1.3355                        
+         Df ChiSquare      F Pr(>F)    
+Model     4   0.77978 2.1896  0.001 ***
+Residual 15   1.33549                  
 ---
 Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
 \end{Soutput}
@@ -468,47 +473,44 @@ to give its name in full, because \proglang{R} automatically chooses the
 correct \code{anova} variant for the result of constrained
 ordination.
 
-The \code{anova.cca} function tries to be clever and lazy: it
-automatically stops if the observed permutation significance probably
-differs from the targeted critical value ($0.05$ as default), but it
-will continue long in uncertain cases.  You must set \code{step} and
-\code{perm.max} to same values to override this behaviour.
-
 It is also possible to analyse terms separately:
 \begin{Schunk}
 \begin{Sinput}
-> anova(ord, by="term", permu=200)
+> anova(ord, by="term", permutations=199)
 \end{Sinput}
 \begin{Soutput}
 Permutation test for cca under reduced model
 Terms added sequentially (first to last)
+Permutation: free
+Number of permutations: 199
 
 Model: cca(formula = dune ~ A1 + Management, data = dune.env)
-           Df  Chisq      F N.Perm Pr(>F)   
-A1          1 0.2248 2.5245    199  0.015 * 
-Management  3 0.5550 2.0780    199  0.005 **
-Residual   15 1.3355                        
+           Df ChiSquare      F Pr(>F)   
+A1          1   0.22476 2.5245  0.025 * 
+Management  3   0.55502 2.0780  0.010 **
+Residual   15   1.33549                 
 ---
 Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
 \end{Soutput}
 \end{Schunk}
-In this case, the function is unable to automatically select the
-number of iterations. This test is sequential: the terms are analysed
+This test is sequential: the terms are analysed
 in the order they happen to be in the model. You can also analyse
 significances of marginal effects (``Type III effects''):
 \begin{Schunk}
 \begin{Sinput}
-> anova(ord, by="mar")
+> anova(ord, by="mar", permutations=199)
 \end{Sinput}
 \begin{Soutput}
 Permutation test for cca under reduced model
 Marginal effects of terms
+Permutation: free
+Number of permutations: 199
 
 Model: cca(formula = dune ~ A1 + Management, data = dune.env)
-           Df  Chisq      F N.Perm  Pr(>F)   
-A1          1 0.1759 1.9761    699 0.02857 * 
-Management  3 0.5550 2.0780    199 0.00500 **
-Residual   15 1.3355                         
+           Df ChiSquare      F Pr(>F)   
+A1          1   0.17594 1.9761   0.06 . 
+Management  3   0.55502 2.0780   0.01 **
+Residual   15   1.33549                 
 ---
 Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
 \end{Soutput}
@@ -517,25 +519,25 @@ Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’
 Moreover, it is possible to analyse significance of each axis:
 \begin{Schunk}
 \begin{Sinput}
-> anova(ord, by="axis", perm=500)
+> anova(ord, by="axis", permutations=499)
 \end{Sinput}
 \begin{Soutput}
+Permutation test for cca under reduced model
+Marginal tests for axes
+Permutation: free
+Number of permutations: 499
+
 Model: cca(formula = dune ~ A1 + Management, data = dune.env)
-         Df  Chisq      F N.Perm  Pr(>F)   
-CCA1      1 0.3187 3.5801    199 0.00500 **
-CCA2      1 0.2372 2.6640    299 0.01667 * 
-CCA3      1 0.1322 1.4845    199 0.11500   
-CCA4      1 0.0917 1.0297     99 0.33000   
-Residual 15 1.3355                         
+         Df ChiSquare      F Pr(>F)   
+CCA1      1   0.31875 3.5801  0.002 **
+CCA2      1   0.23718 2.6640  0.004 **
+CCA3      1   0.13217 1.4845  0.102   
+CCA4      1   0.09168 1.0297  0.390   
+Residual 15   1.33549                 
 ---
 Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
 \end{Soutput}
 \end{Schunk}
-Now the automatic selection works, but typically some of your axes
-will be very close to the critical value, and it may be useful to set
-a lower \code{perm.max} than the default $10000$ (typically you use
-higher limits than in these examples: we used lower limits to save
-time when this document is automatically generated with this package).
 
 \subsection{Conditioned or partial ordination}
 
@@ -562,10 +564,10 @@ Eigenvalues for constrained axes:
 0.24932 0.12090 0.08160 0.05904 
 
 Eigenvalues for unconstrained axes:
-     CA1      CA2      CA3      CA4      CA5      CA6      CA7      CA8 
-0.306366 0.131911 0.115157 0.109469 0.077242 0.075754 0.048714 0.037582 
-     CA9     CA10     CA11     CA12 
-0.031058 0.021024 0.012542 0.009277 
+    CA1     CA2     CA3     CA4     CA5     CA6     CA7     CA8     CA9 
+0.30637 0.13191 0.11516 0.10947 0.07724 0.07575 0.04871 0.03758 0.03106 
+   CA10    CA11    CA12 
+0.02102 0.01254 0.00928 
 \end{Soutput}
 \end{Schunk}
 This partials out the effect of \code{Moisture} before analysing the
@@ -573,38 +575,46 @@ effects of \code{A1} and \code{Management}.  This also influences
 the significances of the terms:
 \begin{Schunk}
 \begin{Sinput}
-> anova(ord, by="term", perm=500)
+> anova(ord, by="term", permutations=499)
 \end{Sinput}
 \begin{Soutput}
 Permutation test for cca under reduced model
 Terms added sequentially (first to last)
+Permutation: free
+Number of permutations: 499
 
 Model: cca(formula = dune ~ A1 + Management + Condition(Moisture), data = dune.env)
-           Df  Chisq      F N.Perm Pr(>F)   
-A1          1 0.1154 1.4190     99   0.15   
-Management  3 0.3954 1.6205     99   0.01 **
-Residual   12 0.9761                        
+           Df ChiSquare      F Pr(>F)  
+A1          1   0.11543 1.4190  0.122  
+Management  3   0.39543 1.6205  0.012 *
+Residual   12   0.97610                
 ---
 Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
 \end{Soutput}
 \end{Schunk}
 If we had a designed experiment, we may wish to restrict the
 permutations so that the observations only are permuted within levels
-of \code{strata}:
+of \code{Moisture}. Restricted permutation is based on the powerful
+\pkg{permute} package. Function \code{how()} can be used to define
+permutation schemes. In the following, we set the levels with
+\code{plots} argument:
 \begin{Schunk}
 \begin{Sinput}
-> anova(ord, by="term", perm=500, strata=Moisture)
+> how <- how(nperm=499, plots = Plots(strata=dune.env$Moisture))
+> anova(ord, by="term", permutations = how)
 \end{Sinput}
 \begin{Soutput}
 Permutation test for cca under reduced model
 Terms added sequentially (first to last)
-Permutations stratified within 'Moisture'
+Plots: dune.env$Moisture, plot permutation: none
+Permutation: free
+Number of permutations: 499
 
 Model: cca(formula = dune ~ A1 + Management + Condition(Moisture), data = dune.env)
-           Df  Chisq      F N.Perm Pr(>F)   
-A1          1 0.1154 1.4190     99   0.30   
-Management  3 0.3954 1.6205     99   0.01 **
-Residual   12 0.9761                        
+           Df ChiSquare      F Pr(>F)   
+A1          1   0.11543 1.4190  0.242   
+Management  3   0.39543 1.6205  0.002 **
+Residual   12   0.97610                 
 ---
 Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
 \end{Soutput}
diff --git a/vignettes/vegan.bib b/vignettes/vegan.bib
index 64065dd..4e82c52 100644
--- a/vignettes/vegan.bib
+++ b/vignettes/vegan.bib
@@ -1,4 +1,14 @@
 
+ at Article{ChiuEtal14,
+  author =	 {C. H. Chiu and Y. T. Wang and B. A. Walther and
+                  A. Chao},
+  title =	 {An improved nonparametric lower bound of species
+                  richness via a modified {G}ood-{T}uring frequency
+                  formula},
+  journal =	 {Biometrics},
+  year =	 2014,
+  volume =	 70,
+  pages =	 {671--682}}
 
 @Article{DeCaceresLegendre08,
   author =	 {M. {D}e~{C}{\'a}ceres and P. Legendre},

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-med/r-cran-vegan.git



More information about the debian-med-commit mailing list