[med-svn] [r-cran-bbmle] 05/11: New upstream version 1.0.18

Andreas Tille tille at debian.org
Fri Sep 29 18:15:34 UTC 2017


This is an automated email from the git hooks/post-receive script.

tille pushed a commit to branch master
in repository r-cran-bbmle.

commit d8b4da0634e8253e9c57236fa093b617df3bb53c
Author: Andreas Tille <tille at debian.org>
Date:   Fri Sep 29 20:11:16 2017 +0200

    New upstream version 1.0.18
---
 DESCRIPTION                             |   20 +
 MD5                                     |  114 +++
 NAMESPACE                               |   32 +
 NEWS                                    |  203 ++++
 R/IC.R                                  |  238 +++++
 R/confint.R                             |  155 +++
 R/dists.R                               |   79 ++
 R/mle.R                                 |  784 +++++++++++++++
 R/mle2-class.R                          |   44 +
 R/mle2-methods.R                        |  171 ++++
 R/predict.R                             |  114 +++
 R/profile.R                             |  410 ++++++++
 R/slice.R                               |  388 ++++++++
 R/update.R                              |   43 +
 TODO                                    |  136 +++
 build/vignette.rds                      |  Bin 0 -> 285 bytes
 debian/README.test                      |    8 -
 debian/changelog                        |   22 -
 debian/compat                           |    1 -
 debian/control                          |   26 -
 debian/copyright                        |   27 -
 debian/docs                             |    3 -
 debian/rules                            |   12 -
 debian/source/format                    |    1 -
 debian/tests/control                    |    3 -
 debian/tests/run-unit-test              |   61 --
 debian/watch                            |    2 -
 inst/NEWS.Rd                            |  494 +++++++++
 inst/doc/mle2.R                         |  196 ++++
 inst/doc/mle2.Rnw                       |  813 +++++++++++++++
 inst/doc/mle2.pdf                       |  Bin 0 -> 249971 bytes
 inst/doc/quasi.R                        |   72 ++
 inst/doc/quasi.Rnw                      |  191 ++++
 inst/doc/quasi.pdf                      |  Bin 0 -> 134912 bytes
 inst/unitTests/Makefile                 |   15 +
 inst/unitTests/boundstest.R             |   16 +
 inst/vignetteData/orob1.rda             |  Bin 0 -> 280 bytes
 man/BIC-methods.Rd                      |   92 ++
 man/ICtab.Rd                            |   80 ++
 man/as.data.frame.profile.mle2.Rd       |   50 +
 man/call.to.char.Rd                     |   27 +
 man/get.mnames.Rd                       |   17 +
 man/mle-class.Rd                        |   91 ++
 man/mle2.Rd                             |  235 +++++
 man/mle2.options.Rd                     |   32 +
 man/namedrop.Rd                         |   31 +
 man/parnames.Rd                         |   43 +
 man/predict-methods.Rd                  |   74 ++
 man/profile-methods.Rd                  |   54 +
 man/profile.mle-class.Rd                |  153 +++
 man/relist.Rd                           |   26 +
 man/sbinom.Rd                           |   58 ++
 man/slice.Rd                            |  112 +++
 man/slice.mle-class.Rd                  |   31 +
 man/strwrapx.Rd                         |   62 ++
 man/summary.mle-class.Rd                |   34 +
 tests/BIC.R                             |    8 +
 tests/BIC.Rout.save                     |   38 +
 tests/ICtab.R                           |   11 +
 tests/ICtab.Rout.save                   |   38 +
 tests/Makefile                          |   18 +
 tests/RUnit-tests.R                     |    7 +
 tests/binomtest1.R                      |   42 +
 tests/binomtest1.Rout                   |  114 +++
 tests/binomtest1.Rout.save              |   76 ++
 tests/controleval.R                     |   30 +
 tests/controleval.Rout.save             |  104 ++
 tests/doRUnit.R                         |   63 ++
 tests/eval.R                            |   66 ++
 tests/eval.Rout.save                    |  114 +++
 tests/formulatest.R                     |  311 ++++++
 tests/formulatest.Rout.save             |  393 ++++++++
 tests/glmcomp.R                         |   25 +
 tests/glmcomp.Rout.save                 |   49 +
 tests/gradient_vecpar_profile.R         |   43 +
 tests/gradient_vecpar_profile.Rout.save |   67 ++
 tests/grtest1.R                         |    9 +
 tests/grtest1.Rout.save                 |   60 ++
 tests/makesavefiles                     |    1 +
 tests/methods.R                         |   24 +
 tests/methods.Rout.save                 |   90 ++
 tests/mkout                             |    1 +
 tests/mortanal.R                        |   67 ++
 tests/mortanal.Rout.save                |  143 +++
 tests/optimize.R                        |   27 +
 tests/optimize.Rout.save                |   67 ++
 tests/optimizers.R                      |   24 +
 tests/optimizers.Rout.save              |   62 ++
 tests/optimx.R                          |   22 +
 tests/optimx.Rout.save                  |   55 +
 tests/order.R                           |   23 +
 tests/order.Rout.save                   |   58 ++
 tests/parscale.R                        |   78 ++
 tests/parscale.Rout                     |  174 ++++
 tests/parscale.Rout.save                |  130 +++
 tests/predict.R                         |   32 +
 tests/predict.Rout.save                 |   57 ++
 tests/profbound.R                       |   23 +
 tests/profbound.Rout.save               |   50 +
 tests/richards.R                        |   92 ++
 tests/richards.Rout.save                |  139 +++
 tests/startvals.R                       |   30 +
 tests/startvals.Rout.save               |   58 ++
 tests/startvals2.R                      |  193 ++++
 tests/startvals2.Rout.save              |  231 +++++
 tests/test-relist1.R                    |   17 +
 tests/test-relist1.Rout.save            |   41 +
 tests/testbounds.R                      |   14 +
 tests/testbounds.Rout                   |   39 +
 tests/testbounds.Rout.save              |   46 +
 tests/testderiv.R                       |   59 ++
 tests/testderiv.Rout.save               |  107 ++
 tests/testenv.R                         |   16 +
 tests/testenv.Rout.save                 |   48 +
 tests/testparpred.R                     |   28 +
 tests/testparpred.Rout.save             |   53 +
 tests/tmptest.R                         |   10 +
 tests/tmptest.Rout.save                 |   45 +
 tests/update.R                          |   17 +
 tests/update.Rout.save                  |   72 ++
 vignettes/cc-attrib-nc.png              |  Bin 0 -> 5145 bytes
 vignettes/chicago.bst                   | 1654 +++++++++++++++++++++++++++++++
 vignettes/clean                         |    1 +
 vignettes/mle2.Rnw                      |  813 +++++++++++++++
 vignettes/mle2.bib                      |   19 +
 vignettes/quasi.Rnw                     |  191 ++++
 126 files changed, 12957 insertions(+), 166 deletions(-)

diff --git a/DESCRIPTION b/DESCRIPTION
new file mode 100755
index 0000000..7befc11
--- /dev/null
+++ b/DESCRIPTION
@@ -0,0 +1,20 @@
+Package: bbmle
+Title: Tools for General Maximum Likelihood Estimation
+Version: 1.0.18
+Author: Ben Bolker <bolker at mcmaster.ca>, R Development Core Team
+Maintainer: Ben Bolker <bolker at mcmaster.ca>
+Depends: R (>= 3.0.0), stats4
+Imports: stats, numDeriv, lattice, MASS, methods
+Suggests: emdbook, rms, ggplot2, RUnit, MuMIn, AICcmodavg, Hmisc,
+        optimx (>= 2013.8.6), knitr, testthat
+VignetteBuilder: knitr
+BuildVignettes: yes
+Description: Methods and functions for fitting maximum likelihood models in R.
+    This package modifies and extends the 'mle' classes in the 'stats4' package.
+License: GPL
+Collate: 'mle2-class.R' 'mle2-methods.R' 'mle.R' 'confint.R'
+        'predict.R' 'profile.R' 'update.R' 'dists.R' 'IC.R' 'slice.R'
+NeedsCompilation: no
+Packaged: 2016-02-11 15:22:35 UTC; bolker
+Repository: CRAN
+Date/Publication: 2016-02-11 16:57:55
diff --git a/MD5 b/MD5
new file mode 100644
index 0000000..7beb224
--- /dev/null
+++ b/MD5
@@ -0,0 +1,114 @@
+3cb95ce6744cb07ceefd92cde7c7e544 *DESCRIPTION
+43132b4d01f40ed3509e8d74e5f0a00a *NAMESPACE
+254b14447eca543cd4b48657a57d1dad *NEWS
+1fb14e5d13c8fe35972e7184ecc3b04f *R/IC.R
+2625b509110e94cceed414dc1ca9101d *R/confint.R
+0a63c5da03a0e73ae29f6c1c4f2fd5cd *R/dists.R
+ac81141b449f31d6d2929bf2c4f559e8 *R/mle.R
+e695b7c949576d0927c0c2bf5a8e203c *R/mle2-class.R
+e6ca14e61d3632f6f91ec7a84d41f5f4 *R/mle2-methods.R
+2d3da6aaa0a07bd64c320a718d87d31e *R/predict.R
+be29fa8693c23b1a08ce4ddbe16e7307 *R/profile.R
+2c39b590a0c236bff22bb00252544082 *R/slice.R
+28b4b4a714c1beebcc7516b888e1b641 *R/update.R
+3d7347481a3e76bb6d57990dd789c3a4 *TODO
+d373722780980723c518f40de4333748 *build/vignette.rds
+31551754937d4aba61a3096e17c64978 *inst/NEWS.Rd
+53afe4d9bcf215a652c07cacdd316976 *inst/doc/mle2.R
+cd0d040f1d726e0d550dd694b84ce196 *inst/doc/mle2.Rnw
+261bb4cec6fec1afd0b51c388f04629d *inst/doc/mle2.pdf
+e4073ae8723f00fe0f3b728db2b31a16 *inst/doc/quasi.R
+c5afe01abc61f4b419917852593b9e4b *inst/doc/quasi.Rnw
+605fa1a94e9b61f0e8ac05ae8d94122a *inst/doc/quasi.pdf
+334f0de6ed55dc79f59addf091097353 *inst/unitTests/Makefile
+bf9cb0badb64c11e22d1b7d15c060a73 *inst/unitTests/boundstest.R
+a399ce19c47219ea3474978d2f4ecac6 *inst/vignetteData/orob1.rda
+fad7f0284df8a44372565c480f8e4dfb *man/BIC-methods.Rd
+7a309d55019db340dc2b1fa5e662ab32 *man/ICtab.Rd
+2973cf3eb548b5ab9cd3c3eec676ddff *man/as.data.frame.profile.mle2.Rd
+b6ce8a230403e4049aeb543dcdf7f889 *man/call.to.char.Rd
+8f4ce3f14c61679b0583aada2d2c6493 *man/get.mnames.Rd
+a6b15977ffaf14454629d969f979e9c4 *man/mle-class.Rd
+e3c46635b577a2f0245a30f5be310f01 *man/mle2.Rd
+c8bdc07658fc20e685b36d709d1ced51 *man/mle2.options.Rd
+5402485d416bd59d04bbf4c4ea34c999 *man/namedrop.Rd
+7a0bc1dbcb08bc40ee81b7870967c1ef *man/parnames.Rd
+efce19f271b87e19638cb0683f7f6bd8 *man/predict-methods.Rd
+382f3a6f1bf091225064e1f293b72774 *man/profile-methods.Rd
+33e8d338881d137e634c09eb64baecc1 *man/profile.mle-class.Rd
+ea4640bf21b60e594d437304c3910e85 *man/relist.Rd
+56db6345ce930b55ae4addbfa6afc6e3 *man/sbinom.Rd
+eba67df829390e8cd96db70c52ed6fdd *man/slice.Rd
+61aeb7bd6d5999f71fac365524a4b818 *man/slice.mle-class.Rd
+bc2aec35cda556cb0977380afebd4ca9 *man/strwrapx.Rd
+1c94867c2e5c5b7239f62290d254da0a *man/summary.mle-class.Rd
+677bab474659dbf8e1f16061a32e5f03 *tests/BIC.R
+c5f6c880e3fc121e0d6f16193014469c *tests/BIC.Rout.save
+a7a3544c028cf6e1c858559f15053914 *tests/ICtab.R
+42ab8c78d826e7b82a8eedaaabfcc7b0 *tests/ICtab.Rout.save
+3ab6379e35c758d75dbe2dd204e4766d *tests/Makefile
+7e791632cd72a0dab72b6b1059b85273 *tests/RUnit-tests.R
+202d16aa2bf77be5df020bda2240703e *tests/binomtest1.R
+138465684c603e66d87035baabc03f65 *tests/binomtest1.Rout
+de4898499c070e21485ddaed01e73c09 *tests/binomtest1.Rout.save
+055f3f858af92dac796c775fdb6cffe5 *tests/controleval.R
+54d3a16476aff59b8947a9b218733be5 *tests/controleval.Rout.save
+614b9446be434240549669abbf8cd88e *tests/doRUnit.R
+4421d42f41892221c6604df13514fab4 *tests/eval.R
+3d411aa0bc3cdad597b17fde4b539732 *tests/eval.Rout.save
+98e85875b0f557344a830f8957c600f1 *tests/formulatest.R
+6dbd75bdf12de303e957d0ae2d643e04 *tests/formulatest.Rout.save
+aa886a9c7ab1b518abd247d7d20e1ef6 *tests/glmcomp.R
+631d9de06283af92a3d567576f994553 *tests/glmcomp.Rout.save
+c5eaa3b836964c3c3a3588b13730e098 *tests/gradient_vecpar_profile.R
+332514b495edf939128e0b68c79b59c8 *tests/gradient_vecpar_profile.Rout.save
+8cb5d68e069f52992e480a7066e054ee *tests/grtest1.R
+097d2e49e2448a0bf7cb8566e39cc93d *tests/grtest1.Rout.save
+763a796aaa2bfa27693b4a8cb57783e2 *tests/makesavefiles
+6cf3e83f5491806bf7f8a75faafe2695 *tests/methods.R
+bd137b0505a83b54357345cdceb59dcb *tests/methods.Rout.save
+96ca4f4b592712ec3510bc4028a51bbe *tests/mkout
+5620dedeca0fe6b27ac488f28aef88b3 *tests/mortanal.R
+0e6681e8b20a7f49b1d47e84c2930590 *tests/mortanal.Rout.save
+4e53341cdd5f4fad2b42e54d61f1ccab *tests/optimize.R
+103b501ae4106a7da4917889d8019f38 *tests/optimize.Rout.save
+5e63a0d8e88b78f5bf84228b62f051fc *tests/optimizers.R
+b0cb07cae3015f7e56eef6708a47236e *tests/optimizers.Rout.save
+6c7b4ba28cecd89bb7f829573a2aeec8 *tests/optimx.R
+194b7c7d97d6f090cc2c58a6bb360297 *tests/optimx.Rout.save
+05f0d13ee00153918cf5d7bbe5acb61c *tests/order.R
+4bd3539efe7bdd3e2a6fc045f653b1a4 *tests/order.Rout.save
+21cf9832b13ec31b5e67e6763f80d5da *tests/parscale.R
+35577e9f38298835e572fd224048a607 *tests/parscale.Rout
+30b0b9c51cec72ecde06be963c9d3b6f *tests/parscale.Rout.save
+adf07c6ff92b4ae6f8ece745a93b1522 *tests/predict.R
+df6f12096d996324b2d19467b9905892 *tests/predict.Rout.save
+68edb941f246a47564617d7aea9647bd *tests/profbound.R
+ee5f86f38e1dfc8a69958e5d5b07df08 *tests/profbound.Rout.save
+3c624de2efa1f848c87d71e5e7cb5641 *tests/richards.R
+f25a35605243aa846d871f43ad5159af *tests/richards.Rout.save
+c703480c59bde85cdd3c51bd59d83975 *tests/startvals.R
+876a9cad0e580eda029eeb6e7d5168dd *tests/startvals.Rout.save
+71d7ebe63a25d910873f75c0a7dfa3a0 *tests/startvals2.R
+2ee0ef656d972b559e69ec3c53e384f6 *tests/startvals2.Rout.save
+75cd2bbf2e5255c1c3eac7ccfa5765a3 *tests/test-relist1.R
+c118f8284b641d973e449de5afd584f9 *tests/test-relist1.Rout.save
+1dda6925aa3654d83943ddda6412d714 *tests/testbounds.R
+9a4d9c64de4b0d973bbf93715fa3e3f7 *tests/testbounds.Rout
+375be792dbfd82d6d56aeb19006488af *tests/testbounds.Rout.save
+ba254da51e09a22e84f803832382fc11 *tests/testderiv.R
+318b6a073389d6638ba88b2892421af9 *tests/testderiv.Rout.save
+0244d9b234c38b94b099365398dad281 *tests/testenv.R
+a16a851cc68fabac462594986166e36e *tests/testenv.Rout.save
+6a8dd303587eaf35a465b2e062264b50 *tests/testparpred.R
+01059ad5c653ce771ecbd81d4946026f *tests/testparpred.Rout.save
+4a76e0b4daec5dc81b0378e7bdb67826 *tests/tmptest.R
+dd885bf956855f37df24d0dbe37ba7bd *tests/tmptest.Rout.save
+2d49b0803524b896e48d6879d18f8190 *tests/update.R
+53661890c555a4f7e5c21accbe775fed *tests/update.Rout.save
+0a27805bbe6b6d67ef37f760dc991917 *vignettes/cc-attrib-nc.png
+cd2df3f6f14e5d0af434d1aa53b7a0ed *vignettes/chicago.bst
+bc870713cfebc6c5b5fa52731fd3162b *vignettes/clean
+cd0d040f1d726e0d550dd694b84ce196 *vignettes/mle2.Rnw
+ae21998f0dafa40e30841d4abc02ceed *vignettes/mle2.bib
+c5afe01abc61f4b419917852593b9e4b *vignettes/quasi.Rnw
diff --git a/NAMESPACE b/NAMESPACE
new file mode 100755
index 0000000..b2b57e8
--- /dev/null
+++ b/NAMESPACE
@@ -0,0 +1,32 @@
+export(mle2,call.to.char,namedrop,parnames,"parnames<-",relist2)
+export(sbinom,snorm,sbeta,snbinom,spois,sbetabinom)
+export(ICtab,AICtab,BICtab,AICctab)
+export(stdEr,vcov)
+export(slice,sliceOld,slice1D,slice2D)
+exportClasses(mle2)
+exportMethods(AIC, AICc, qAICc, qAIC, 
+               profile, coef, confint, logLik,
+              update, vcov, anova, deviance, residuals,
+              simulate, predict, formula, plot, stdEr, summary)
+importClassesFrom(stats4,mle)
+importFrom(stats4,coef,confint,logLik,BIC,summary,profile,vcov,AIC, update, plot)
+importFrom(stats,
+           anova,deviance,residuals,
+           simulate,predict,formula)
+importFrom(methods,setMethod)
+importFrom(lattice,xyplot,splom,diag.panel.splom,panel.abline,panel.number,panel.points,panel.xyplot)  ## for slice methods
+importFrom(numDeriv,hessian,grad)
+importFrom("grDevices", "dev.interactive")
+importFrom("graphics", "abline", "lines", "par", "points", "text")
+importFrom("methods", "new")
+importFrom("stats", "approx", "approxfun", "as.formula", "constrOptim",
+             "deriv", "model.matrix", "na.omit", "nlm", "nlminb",
+             "optimize", "pchisq", "pnorm", "printCoefmat", "qbeta",
+             "qbinom", "qchisq", "qnbinom", "qnorm", "qpois", "setNames",
+             "spline", "uniroot", "update.formula")
+S3method(as.data.frame,profile.mle2)
+S3method(print,ICtab)
+S3method(slice,mle2)
+S3method(plot,slice)
+S3method(xyplot,slice)
+S3method(splom,slice)
diff --git a/NEWS b/NEWS
new file mode 100755
index 0000000..4e9eba4
--- /dev/null
+++ b/NEWS
@@ -0,0 +1,203 @@
+0.9.8
+ * gradient functions work better with fixed parameters, hence with profiling
+ * profile plot reverts to linear for non-monotonic profile
+ * added warning in confint for non-monotonic profile; revert
+    from spline+linear to linear approximation in this case
+ * documentation improvements
+ * optimx improvements
+ * require data= argument when using formula interface
+ * turn off hessian computation in profile
+ * allow use of MASS::ginv
+
+0.9.7
+ * bug fix in calc_mle2_function for no-intercept models
+  (thanks to Colin Kremer)
+ * fixed optimx, added 'user' option
+
+0.9.6
+ * changed hessian calculation to use numDeriv code (causes tiny
+changes to hessian results that could matter in edge cases)
+  too lazy to provide a backward compatibility mode ...
+ * documented optimizer= choices in ?mle2
+
+0.9.5.1
+ * fixed bug in AICc (David Harris)
+
+
+0.9.5 
+
+  too many changes!
+ * added NAMESPACE, various fixes to go with that
+ * beginnings of an RUnit testing framework
+ * tweaked vignette
+ * added prof.lower, prof.upper to profile()
+ * added "optimize" to list of allowed optimizers, some
+   bug fixes
+
+0.9.4.1
+
+ * tweaked par() resetting in profile plots
+
+0.9.4
+
+ * more qAICc fixing
+
+0.9.3 (version bump from previous) 18/09/2009
+
+* tweaked handling of bounds: profile now succeeds
+  on some 1D problems where it didn't before
+
+15/08/2009
+
+* added deviance, residuals methods
+* added newparams argument to predict, simulate;
+   newdata argument to simulate
+* added vignette (stub)
+* added explicit params argument, to help sort out
+  full parameter specifications when parameters is
+  non-NULL
+
+0.9.2 10/08/2009
+* fixed predict() for case with parameters
+* added snorm
+* changed ICtab defaults to weight=TRUE, base=FALSE, sort=TRUE
+
+0.9.1 
+  added simulate method (formula interface only)
+  fix AICctab bug
+  remove spurious cat/print in profile
+  fix qAIC bug
+
+0.9.0 26/08/2008
+*  fix Tom Hobbs bug: named lower/upper/parscale/ndeps
+get rearranged properly, otherwise rearrange in order
+of "start" and issue a warning
+* documentation tweak for S4 as.data.frame
+* added sbeta to list of known distributions
+* removed nlme requirement & auto-loading
+
+0.8.9 04/08/2008
+* version bump, submit to CRAN
+* added predict method
+
+0.8.8  10/07/2008
+* added flexibility for profile plotting (main, x labels etc.);
+    added examples
+* added an instance of "namedrop" to fix naming problem
+* added tol.newmin to slice etc.
+* added check for numeric return from profile within confint
+* fixed bugs in profile plotting when profile is restricted
+   to a subset of variables
+* added tests for par() to reset to original on exit
+* improved profile documentation
+* replicate std.err if specified in profile
+* add as.data.frame 
+* tweak tol.newmin (better fit found during profile) code
+
+0.8.7  12/05/2008
+* version bump, moved to R-forge.
+* reordered NEWS file (most recent first)
+     
+0.8.6.1 22/03/2008:
+   tweaked stop-on-better-fit code
+   fixed (?) qAIC(c) methods
+
+   26/03/2008:
+   tweak/fix to ICtab documentation (thanks to Tom Hobbs)
+
+0.8.6 added qAIC(c) methods (not working yet!)
+
+0.8.5.1 oops. Fixed infelicity (bug?) in new 
+  environment manipulation
+
+0.8.5 tweaked environment/data assignment to preserve
+    original minuslogl environment better
+
+0.8.4 changed  plot.profile.mle2 options (added onepage etc.,
+   made plot.confstr=TRUE by default)
+
+0.8.3 added warning about too-short lower/upper 
+      added documentation
+
+0.8.2 fixed bug in AICctab
+      cosmetic change to printing -- save call.orig
+      moved ChangeLog to NEWS
+
+0.8.1 fixed (?) environment bug
+      tried to use built-in relist, but failed: renamed relist
+	  to "relist2" (try again later)
+      documented get.mnames (auxiliary function for ICtabs)
+      started to add gr (gradient) capability -- NOT TESTED
+
+0.8  changed ICtab to allow either ICtab(x,y,z) or ICtab(list(x,y,z))
+	  (L <- list(...); if is.list(L[[1]]) && length(L)==1)
+
+0.7.7 fix bug in profiling: all optim() methods EXCEPT L-BFGS-B
+     return the value of the objective function if given a function
+     with no arguments/zero-length starting parameter vector
+     (this is the situation with "profiling" a 1-D function).
+     L-BFGS-B gives funky answers.  added a check for this case.
+     (may need to check behavior for alternate optimizers (nlm etc))
+    [this behavior triggered a "found better fit" error when profiling
+     1D functions with L-BFGS-B]
+
+     changed behavior when finding better fit during profiling
+     to return new parameters
+
+
+0.7.6 tweak vignette
+    fixed second major AICc bug (was fixed in mle2 method,
+	 but not in logLik method)
+
+0.7.5 change "ll" to "LL" in examples for clarity
+    tweaked anova reporting of models (wrap instead of truncating)
+    added (undocumented) show.points option to profile plot
+	to display actual locations of profile evaluation
+    tweaked profile to behave better when profiling variables
+	with constraints (upper, lower)
+    moved vignette to inst/doc where it belongs
+    ICtab hack to protect against package:aod definition of AIC(logLik)
+    added submit stub
+    tweaked slice.mle2-class docs for consistency
+    fiddled with vignette
+    preliminary code to allow non-monotonic profiles
+    preliminary add nlm to list of optimizers (untested)
+    add aod, Hmisc, emdbook to VignetteDepends and Suggests:
+
+0.7 better df extraction in ICtab
+    minor bug fix for AICc (allows AICc of nls objects)
+    handle models with -1 in formula better:
+	starting values set "all equal"
+    made ANOVA formula line-length accessible
+    added skip.hessian and trace arguments to mle2
+    messed around with BIC definition -- attempt at consistency with nlme
+    added rudimentary support for nlminb, constrOptim
+    nlme now required for fdHess (which is required for
+	nlminb since it doesn't compute a finite-diff
+	Hessian)
+
+0.6 add experimental formula interface
+    change all names from mle to mle2 to avoid confusion/conflicts
+       with stats4 version of mle
+    change internal structure of data evaluation
+    worked on vignette
+    added optimizer slot (stub)
+
+0.5 fix AICc bug!  (was deviance+2*k*(k+1)/(n-k-1), not AIC+2*k*(k+1)/(n-k-1)
+
+0.4 change AIC to AICc for corrections
+    add AICtab for weights, delta, sort ... options
+
+    expose error messages occuring within profile()
+    uniroot tries harder to find a valid endpoint
+    truncate terms in anova.mle at 80 characters
+
+0.3: enhanced anova method, works with print.anova
+tweaked namedrop() code -- ??
+
+0.2: added parnames, parnames<-
+minor fix to allow "profiles" of 1-parameter models
+  (skip fdHess call)
+minor change to print method for mle results
+tweaking "vecpar" (to allow parameter vectors in objective function)
+removed fdHess/nlme dependency
diff --git a/R/IC.R b/R/IC.R
new file mode 100755
index 0000000..b226833
--- /dev/null
+++ b/R/IC.R
@@ -0,0 +1,238 @@
+ICtab <- function(...,type=c("AIC","BIC","AICc","qAIC","qAICc"),
+                  weights=FALSE,delta=TRUE,base=FALSE,
+                  logLik=FALSE,
+                  sort=TRUE,nobs=NULL,dispersion=1,mnames,k=2) {
+    ## TO DO: allow inclusion of log-likelihood (or negative log-likelihood?)
+    ## base or delta? or both?  Should deltas include delta-df as well?
+    L <- list(...)
+    if (is.list(L[[1]]) && length(L)==1) L <- L[[1]]
+    type <- match.arg(type)
+    if (dispersion !=1) {
+        if (type=="BIC") stop("cannot specify dispersion with BIC")
+        if (substr(type,1,1)!="q") {
+            type = paste("q",type,sep="")
+            warning("dispersion!=1, type changed to ",type)
+        }
+    }
+    if (type=="AICc" || type=="BIC" || type=="qAICc") {
+        if (is.null(nobs)) {
+            ## if(is.null(attr(L[[1]],"nobs")))
+            ## stop("must specify number of observations if corr=TRUE")
+            ## nobs <- sapply(L,attr,"nobs")
+            nobs <- sapply(L,nobs)
+            if (length(unique(nobs))>1)
+                stop("nobs different: must have identical data for all objects")
+            nobs <- nobs[1]
+        }
+    }
+    ICs <- switch(type,
+                  AIC=sapply(L,AIC),
+                  BIC=sapply(L,BIC),
+                  AICc=sapply(L,AICc,nobs=nobs),
+                  qAIC=sapply(L,qAIC,dispersion=dispersion),
+                  qAICc=sapply(L,qAICc,nobs=nobs,dispersion=dispersion))
+    logLiks <- sapply(L,function(x) c(logLik(x)))
+    ## hack: protect against aod method
+    if (is.matrix(ICs)) ICs <- ICs["AIC",]  
+    getdf <- function(x) {
+        if (!is.null(df <- attr(x,"df"))) return(df)
+        else if (!is.null(df <- attr(logLik(x),"df"))) return(df)
+    }
+    dIC <- ICs-min(ICs)
+    dlogLiks <- logLiks-min(logLiks)
+    df <- sapply(L,getdf)
+    tab <- data.frame(df=df)
+    if (delta) {
+        dName <- paste0("d",type)
+        tab <- cbind(setNames(data.frame(dIC),dName),tab)
+        if (logLik) {
+            tab <- cbind(data.frame(dLogLik=dlogLiks),tab)
+        }
+    }
+    if (base) {
+        tab <- cbind(setNames(data.frame(ICs),type),tab)
+        if (logLik) {
+            tab <- cbind(data.frame(logLik=logLiks),tab)
+        }
+    }
+    if (!delta && !base) stop("either 'base' or 'delta' must be TRUE")
+    if (weights) {
+        wts <- exp(-dIC/2)/sum(exp(-dIC/2))
+        tab <- data.frame(tab,weight=wts)
+    }
+    if (missing(mnames)) {
+        Call <- match.call()
+        if (!is.null(names(Call))) {
+            xargs <- which(names(Call) %in% names(formals())[-1])
+        } else xargs <- numeric(0)
+        mnames <- as.character(Call)[c(-1,-xargs)]
+    }
+    row.names(tab) <- mnames
+    if (sort) {
+        tab <- tab[order(ICs),]
+    }
+    class(tab) <- "ICtab"
+    tab
+}
+
+print.ICtab <- function(x,...,min.weight=0.001) {
+    chtab <- format(do.call("cbind",lapply(x,round,1)))
+    rownames(chtab) <- attr(x,"row.names")
+    chtab[,"df"] <- as.character(x$df)
+    if (!is.null(x$weight))
+        chtab[,"weight"] <- format.pval(x$weight,eps=min.weight,
+                                        digits=2)
+    print(chtab,quote=FALSE)
+}
+
+AICtab <- function(...,mnames) {
+    ## fancy footwork to preserve model names
+    if (missing(mnames)) mnames <- get.mnames(match.call())
+    ICtab(...,mnames=mnames,type="AIC")
+}
+BICtab <- function(...,mnames) {
+    if (missing(mnames)) mnames <- get.mnames(match.call())
+    ICtab(...,mnames=mnames,type="BIC")
+}
+
+AICctab <- function(...,mnames) {
+    if (missing(mnames)) mnames <- get.mnames(match.call())
+    ICtab(...,mnames=mnames,type="AICc")
+}
+
+setGeneric("AICc", function(object, ..., nobs=NULL, k=2) standardGeneric("AICc"))
+
+setMethod("AICc", "mle2",
+          function (object, ..., nobs, k)  {
+              L <- list(...)
+              if (length(L)) {
+                  L <- c(list(object),L)
+                  if (is.null(nobs)) {
+                      nobs <- sapply(L,nobs)
+                  }
+                  if (length(unique(nobs))>1)
+                      stop("nobs different: must have identical data for all objects")
+                  logLiks <- sapply(L, logLik)
+                  df <- sapply(L,attr,"df")
+                  val <- -2*logLiks+k*df*(df+1)/(nobs-df-1)
+                  data.frame(AICc=val,df=df)
+              } else {
+                  df <- attr(object,"df")
+                  c(-2*logLik(object)+k*df+k*df*(df+1)/(nobs-df-1))
+              }
+          })
+
+setMethod("AICc", signature(object="logLik"),
+          function(object, ..., nobs=NULL, k){
+              if (missing(nobs)) {
+                  if (is.null(attr(object,"nobs")))
+                      stop("number of observations not specified")
+                  nobs <- attr(object,"nobs")
+              }
+              df <- attr(object,"df")
+              ## FIXME: should second "2" also be k?
+              -2 * c(object) + k*df+2*df*(df+1)/(nobs-df-1)
+          })
+
+setMethod("AICc", signature(object="ANY"),
+          function(object, ..., nobs=NULL, k){
+              AICc(object=logLik(object, ...), nobs=nobs, k=k)
+          })
+
+setMethod("AIC", "mle2",
+          function (object, ..., k = 2) {
+              L <- list(...)
+              if (length(L)) {
+                  L <- c(list(object),L)
+                  if (!all(sapply(L,class)=="mle2")) stop("all objects in list must be class mle2")
+                  logLiks <- lapply(L, logLik)
+                  AICs <- sapply(logLiks,AIC,k=k)
+                  df <- sapply(L,attr,"df")
+                  data.frame(AIC=AICs,df=df)
+              } else AIC(logLik(object), k = k)
+          })
+
+### quasi- methods
+
+setGeneric("qAICc", function(object, ..., nobs=NULL, dispersion, k=2)
+           standardGeneric("qAICc"))
+
+setMethod("qAICc", signature(object="ANY"),
+          function(object, ..., nobs=NULL, dispersion, k=2){
+              qAICc(object=logLik(object), nobs=nobs, dispersion=dispersion, k=k)
+          })
+
+setMethod("qAICc", "mle2",
+          function (object, ..., nobs, dispersion, k)  {
+              L <- list(...)
+              if (length(L)) {
+                  L <- c(list(object),L)
+                  if (missing(nobs)) {
+                      nobs <- sapply(L,nobs)
+                  }
+                  if (missing(dispersion) && is.null(attr(object,"dispersion")))
+                      stop("must specify (over)dispersion coefficient")
+                  if (length(unique(nobs))>1)
+                      stop("nobs different: must have identical data for all objects")
+                  nobs <- nobs[1]
+                  logLiks <- sapply(L, logLik)/dispersion
+                  df <- sapply(L,attr,"df")+1 ## add one for scale parameter
+                  val <- logLiks+k*df*(df+1)/(nobs-df-1)
+                  data.frame(AICc=val,df=df)
+              } else {
+                  df <- attr(object,"df")
+                  c(-2*logLik(object)/dispersion+2*df+2*df*(df+1)/(nobs-df-1))
+              }
+          })
+
+setMethod("qAICc", signature(object="logLik"),
+          function(object, ..., nobs, dispersion, k){
+              if (missing(nobs)) {
+                  if (is.null(attr(object,"nobs")))
+                      stop("number of observations not specified")
+                  nobs <- attr(object,"nobs")
+              }
+              if (missing(dispersion)) {
+                  if (is.null(attr(object,"dispersion")))
+                      stop("dispersion not specified")
+                  dispersion <- attr(object,"dispersion")
+              }
+              df <- attr(object,"df")+1 ## add one for scale parameter
+              -2 * c(object)/dispersion + k*df+2*df*(df+1)/(nobs-df-1)
+          })
+
+setGeneric("qAIC", function(object, ..., dispersion, k=2)
+           standardGeneric("qAIC"))
+
+setMethod("qAIC", signature(object="ANY"),
+          function(object, ..., dispersion, k=2){
+              qAIC(object=logLik(object), dispersion=dispersion, k)
+          })
+
+setMethod("qAIC", signature(object="logLik"),
+          function(object, ..., dispersion, k){
+              if (missing(dispersion)) {
+                  if (is.null(attr(object,"dispersion")))
+                      stop("dispersion not specified")
+                  dispersion <- attr(object,"dispersion")
+              }
+              df <- attr(object,"df")
+              -2 * c(object)/dispersion + k*df
+          })
+
+setMethod("qAIC", "mle2",
+          function (object, ..., dispersion, k=2) {
+              L <- list(...)
+              if (length(L)) {
+                  L <- c(list(object),L)
+                  if (!all(sapply(L,class)=="mle2"))
+                      stop("all objects in list must be class mle2")
+                  logLiks <- lapply(L, logLik)
+                  AICs <- sapply(logLiks,qAIC, k=k, dispersion=dispersion)
+                  df <- sapply(L,attr,"df")
+                  data.frame(AIC=AICs,df=df)
+              } else {
+                  qAIC(logLik(object), k=k, dispersion=dispersion)
+              }
+          })
+
diff --git a/R/confint.R b/R/confint.R
new file mode 100755
index 0000000..2966c66
--- /dev/null
+++ b/R/confint.R
@@ -0,0 +1,155 @@
+setMethod("confint", "profile.mle2",
+function (object, parm, level = 0.95, trace=FALSE, ...)
+{
+  Pnames <- names(object at profile)
+  if (missing(parm)) parm <- Pnames
+  if (is.character(parm)) parm <- match(parm,Pnames)
+  if (any(is.na(parm))) stop("parameters not found in profile")
+  ## Calculate confidence intervals based on likelihood
+  ## profiles
+  a <- (1 - level)/2
+  a <- c(a, 1 - a)
+  pct <- paste(round(100 * a, 1), "%")
+  ci <- array(NA, dim = c(length(parm), 2),
+              dimnames = list(Pnames[parm], pct))
+  cutoff <- qnorm(a)
+  for (pm in parm) {
+    pro <- object at profile[[Pnames[pm]]]
+    pv <- pro[,"par.vals"]
+    if (is.matrix(pv)) pv <- pv[,Pnames[pm]]
+    if (any(diff(pro[,1])<0)) {
+      warning(paste("non-monotonic profile (",
+                    Pnames[pm],"): reverting from spline to linear approximation ",
+              "(consider running 'profile' with manually reduced std.err)", sep=""))
+      tt <- approx(pro[,1],pv,xout=cutoff)$y
+    } else {
+      sp <- spline(x = pv, y = pro[, 1])
+      if (any(diff(sp$y)<0)) {
+        warning(paste("non-monotonic spline fit to profile (",
+                      Pnames[pm],"): reverting from spline to linear approximation",sep=""))
+        tt <- approx(pro[,1],pv,xout=cutoff)$y
+      } else {
+        tt <- try(approx(sp$y, sp$x, xout = cutoff)$y,silent=TRUE)
+        if (inherits(tt,"try-error")) tt <- rep(NA,2)
+      }
+    }
+    ci[Pnames[pm], ] <- tt
+  }
+  drop(ci)
+})
+
+setMethod("confint", "mle2",
+function (object, parm, level = 0.95, method,
+          trace=FALSE,quietly=!interactive(),
+          tol.newmin=0.001,...)
+{
+  if (missing(method)) method <- mle2.options("confint")
+  ## changed coef() calls to object at coef -- really *don't* want fullcoef!
+  Pnames <- names(object at coef)
+  if (missing(parm))
+    parm <- seq(along=Pnames)
+  if (is.character(parm)) parm <- match(parm,Pnames)
+  if (any(is.na(parm))) stop("parameters not found in model coefficients")
+  if (method=="spline") {
+    if (!quietly) message("Profiling...\n")
+    newpars_found <- FALSE
+    prof = try(profile(object,which=parm,tol.newmin=tol.newmin,...))
+    if (inherits(prof,"try-error")) stop(paste("Problem with profiling:",prof))
+    if (class(prof)=="mle2") newpars_found <- TRUE
+    if (newpars_found) {
+        ## profiling found a better fit
+        message("returning better fit\n")
+        return(prof)
+    }
+    return(confint(prof, parm, level, ...))
+  } else {
+    B0 <- object at coef
+    pnames <- names(B0)
+    if (missing(parm))
+      parm <- seq(along=pnames)
+    if (is.character(parm))
+      parm <- match(parm, pnames, nomatch = 0)
+    a <- (1 - level)/2
+    a <- c(a, 1 - a)
+    pct <- paste(round(100 * a, 1), "%")
+    pct <- paste(round(100 * a, 1), "%")
+    ci <- array(NA, dim = c(length(parm), 2),
+                dimnames = list(pnames[parm], pct))
+    std.err <- summary(object)@coef[, "Std. Error"]
+    if (method=="uniroot") {
+      chisqcutoff <- qchisq(level,1)
+      call <- object at call
+      if (!isTRUE(call$vecpar))
+        call$start <- as.list(B0) ## added
+      upper <- rep(unlist(eval.parent(call$upper)),length.out=length(pnames))
+      lower <- rep(unlist(eval.parent(call$lower)),length.out=length(pnames))
+      for (pm in parm) {
+        critfun <- function(bi)
+          {
+            fix <- list(bi)
+            names(fix) <- pnames[pm]
+            call$fixed <- c(fix,eval(call$fixed))
+            if (!is.null(upper) && length(upper)>1) call$upper <- upper[-pm]
+            if (!is.null(lower) && length(lower)>1) call$lower <- lower[-pm]
+            pfit <- try(eval(call), silent=TRUE)
+            if(inherits(pfit, "try-error")) {
+              warning(paste("Error encountered in profile (uniroot):",pfit))
+              return(NA)
+            }
+            else {
+              zz <- 2*pfit at min - 2*(-logLik(object))
+              if (zz > -tol.newmin)
+                zz <- max(zz, 0)
+              else
+                stop(sprintf("profiling has found a better solution (old deviance=%.2f, new deviance=%.2f), so original fit had not converged",2*pfit at min,2*(-logLik(object))))
+              z <- zz - chisqcutoff
+            }
+            if (trace) cat(bi, z, "\n")
+            z
+          }
+        stepfun <- function(step) {
+          B0[pm] + sgn * step * std.err[pm]
+        }
+        invstepfun <- function(out) {
+          (out - B0[pm])/(sgn * std.err[pm])
+        }
+        sgnvec=c(-1,1)
+        for (i in 1:2) {
+          sgn <- sgnvec[i]
+          bnd <- if (sgn<0) {
+            if (is.null(lower)) -Inf else lower[pm]
+          } else {
+            if (is.null(upper)) Inf else upper[pm]
+          }
+          c0 <- critfun(B0[pm])
+          bi <- 
+          ctry <- pmin(5,invstepfun(bnd))
+          cdel <- -0.25
+          c5 <- NA
+          while (is.na(c5) && ctry>0 ) {
+            c5 <- critfun(stepfun(ctry))
+            if (is.na(c5)) {
+              if (trace) cat("encountered NA, reducing ctry to",ctry+cdel,"\n")
+              ctry <- ctry+cdel
+            }
+          }
+          if (trace) cat(c0,c5,"\n")
+          if (is.na(c0*c5) || c0*c5>0) {
+            warning(paste("can't find confidence limits in",
+                          c("negative","positive")[i],"direction"))
+            curci <- NA
+            ## FIXME: could try harder!
+          } else {
+            curci <- uniroot(critfun,c(stepfun(0),stepfun(ctry)))$root
+          }
+          ci[pnames[pm],i] <- curci
+        }
+      }
+    } else if (method=="quad") {
+      for (pm in parm) {
+        ci[pnames[pm],] <- qnorm(a,B0[pm],std.err[pm])
+      }
+    } else stop("unknown method")
+    return(drop(ci))
+  }
+})
diff --git a/R/dists.R b/R/dists.R
new file mode 100755
index 0000000..56ddaf6
--- /dev/null
+++ b/R/dists.R
@@ -0,0 +1,79 @@
+
+snorm <- function(mean,sd) {
+  list(title="Normal",
+       mean=mean,sd=sd,
+       median=mean,
+       mode=mean,
+       variance=sd^2,
+       sd=sd)
+}
+
+sbinom <- function(size,prob) {
+  list(title="Binomial",
+       prob=prob,size=size,
+       mean=prob*size,
+       median=qbinom(0.5,size,prob),
+       mode=NA,
+       variance=size*prob*(1-prob),
+       sd=sqrt(size*prob*(1-prob)),
+       formula="x*log(prob)+(size-x)*log(1-prob)")
+}
+
+sbeta <- function(shape1,shape2) {
+  list(title="Beta",
+       shape1=shape1,shape2=shape2,
+       mean=shape1/(shape1+shape2),
+       median=qbeta(0.5,shape1,shape2),
+       mode=NA,
+       variance=shape1*shape2/((shape1+shape2)^2*(shape1+shape2+1)),
+       sd=sqrt(shape1*shape2/((shape1+shape2)^2*(shape1+shape2+1))))
+}
+
+snbinom <- function(size,prob,mu) {
+    if (missing(mu) && !missing(prob)) {
+        mupar <- FALSE
+        mu = NA ## FIXME
+        warning("STUB in snbinom: calc. mu as a function of prob")
+    }
+    if (!missing(mu) && missing(prob)) {
+        mupar <- TRUE
+        prob = size/(size+mu)
+    }
+    v <- if (mupar) mu+mu^2/size else size*(1-prob)/prob^2
+    list(title="Negative binomial",
+         prob=prob,mu=mu,size=size,
+         mean=if (mupar) mu else size*(1-prob)/prob,
+         median= if (mupar) qnbinom(0.5,mu=mu,size) else qnbinom(0.5,prob=prob,size),
+         mode=NA,
+         variance=v,
+         sd=sqrt(v))
+}
+
+spois <- function(lambda) {
+  list(title="Poisson",
+       lambda=lambda,
+       mean=lambda,
+       median=qpois(0.5,lambda),
+       mode=NA,
+       variance=lambda,
+       sd=sqrt(lambda))      
+}
+
+sbetabinom <- function(size,prob,theta) {
+  list(title="Beta-binomial",
+       prob=prob,size=size,theta=theta,
+       mean=prob*size,
+       median=NA, ## qbetabinom(0.5,size,prob),
+       mode=NA,
+       variance=size*prob*(1-prob)/theta,
+       sd=sqrt(size*prob*(1-prob)))
+}
+
+sgamma <- function(shape,rate=1,scale=1/rate) {
+    if (missing(rate)) rate <- 1/scale
+    list(title="Gamma",
+         mean=shape/rate,sd=sqrt(shape)/rate,
+         median=NA,
+         mode=NA,
+         variance=shape/rate^2)
+}
diff --git a/R/mle.R b/R/mle.R
new file mode 100755
index 0000000..320d607
--- /dev/null
+++ b/R/mle.R
@@ -0,0 +1,784 @@
+## require(methods,quietly=TRUE)  ## for independence from stats4
+## require(numDeriv,quietly=TRUE) ## for hessian()
+
+call.to.char <- function(x) {
+    ## utility function
+    x <- as.list(x)
+    if (length(x)>1) x <- x[c(2,1,3)]
+    paste(sapply(x,as.character),collapse="")
+}
+
+## FIXME: problem with bounds and formulae!
+calc_mle2_function <- function(formula,
+                               parameters,
+                               links,
+                               start,
+                               parnames,
+                               use.deriv=FALSE,
+                               data=NULL,
+                               trace=FALSE) {
+  ## resid=FALSE
+  ##  stub: what was I going to use this for ???
+  ##  returning residuals rather than mle (e.g. for minpack.nls??)
+  RHS <- formula[[3]]
+  ddistn <- as.character(RHS[[1]])
+  if (ddistn=="dnorm" && !("sd" %in% names(RHS))) {
+      warning("using dnorm() with sd implicitly set to 1 is rarely sensible")
+  }
+  if (ddistn=="dnbinom" && !("mu" %in% names(RHS))) {
+  }
+  ## need to check on variable order:
+  ## should it go according to function/formula,
+  ##   not start?
+  if (!is.list(data)) stop("must specify data argument",
+                           " (as a list or data frame)",
+                           " when using formula argument")
+  vecstart <- (is.numeric(start))
+  if (vecstart) start <- as.list(start) ## expand to a list
+  if (missing(parnames) || is.null(parnames)) {
+    parnames <- as.list(names(start))
+    names(parnames) <- names(start)
+  }
+  ## hack
+  if (!missing(parameters)) {
+    ## linear model specified for some parameters
+    vars <- as.character(sapply(parameters,"[[",2))
+    if (length(parameters)>1) {
+      models <-  sapply(parameters,function(z) call.to.char(z[[3]]))
+    } else {
+      models <- as.character(parameters)
+    }
+    models <- gsub(" ","",models)
+    parameters <- parameters[models!="1"]
+    npars <- length(parameters)
+    if (npars==0) { ## no non-constant parameters
+      parameters <- mmats <- vpos <- NULL
+    } else {
+      ## BUG IN HERE SOMEWHERE, FIXME: SENSITIVE TO ORDER OF 'start'
+      mmats <- list()
+      vpos <- list()
+      pnames0 <- parnames
+      names(parnames) <- parnames
+      for (i in seq(along=parameters)) {
+        vname <- vars[i]      ## name of variable
+        p <- parameters[[i]]  ## formula for variable
+        p[[2]] <- NULL
+        mmat <- model.matrix(p,data=data)     
+        pnames <- paste(vname,colnames(mmat),sep=".")
+        parnames[[vname]] <- pnames ## insert into parameter names
+        vpos0 <- which(pnames0==vname)
+        vposvals <- cumsum(sapply(parnames,length))
+        ## fill out start vectors with zeros or replicates as appropriate
+        if (length(start[[vname]])==1) {
+            if (length(grep("-1",models[i])>0)) {
+                start[[vname]] <- rep(start[[vname]],length(pnames))
+            } else {
+                start[[vname]] <- c(start[[vname]],rep(0,length(pnames)-1))
+            }
+        }
+        ## fix: what if parameters are already correctly specified?
+        startpos <- if (vpos0==1) 1 else vposvals[vpos0-1]+1
+        vpos[[vname]] <- startpos:vposvals[vpos0]
+        mmats[[vname]] <- mmat
+      }
+    }
+  } else parameters <- vars <- mmats <- vpos <- NULL
+  if (!missing(links)) {
+    stop("parameter link functions not yet implemented")
+    for (i in length(links)) {
+    }
+  }
+  parnames <- unlist(parnames)
+  start <- as.list(unlist(start)) ## collapse/re-expand (WHY?)
+  names(start) <- parnames
+  arglist <- as.list(RHS[-1]) ## delete function name
+  arglist$parameters <- NULL
+  arglist1 <- c(list(x=formula[[2]]),arglist,list(log=TRUE))
+  arglist1  ## codetools check kluge
+  fn <- function() {
+      ## is there a better way to do this?
+      ## need to look for parameters etc.
+      pars <- unlist(as.list(match.call())[-1])
+      if (!is.null(parameters)) {
+          for (.i in seq(along=parameters)) {
+              assign(vars[.i],mmats[[.i]] %*% pars[vpos[[.i]]])
+          }
+      }
+    ## if (is.null(data) || !is.list(data))
+    ## stop("data argument must be specified when using formula interface")
+    ## BUG/FIXME: data evaluates to 'FALSE' at this point -- regardless of whether
+    ## it has been specified
+    ## FIXME: how to make this eval() less fragile???
+    ## sys.frame(sys.nframe()) specifies the number of the *current* frame
+    ## ... envir=data,enclos=parent.frame()
+      ## this actually works OK: fails enigmatically if we
+      ## 
+    arglist2 <- lapply(arglist1,eval,envir=data,
+                       enclos=sys.frame(sys.nframe()))
+    if (use.deriv) {
+      stop("use.deriv is not yet implemented")
+      ## browser()
+      ## minor hack -- should store information otherwise -- could have
+      ##  different numbers of arguments for different distributions?
+      LLform <- get(gsub("^d","s",as.character(RHS[[1]])))(NA,NA)$formula
+      avals <- as.list(formula[[3]][-1])
+      for (i in seq_along(avals))
+        LLform <- gsub(names(avals)[i],avals[[i]],LLform)
+      r <- eval(deriv(parse(text=LLform),parnames),envir=c(arglist2,data))
+    } else {
+      r <- -sum(do.call(ddistn,arglist2))
+    }
+    ## doesn't work yet -- need to eval arglist in the right env ...
+    ## if (debugfn) cat(unlist(arglist),r,"\n")
+    if (trace) cat(pars,r,"\n")
+    r
+  }
+  npars <- length(parnames)
+  flist <-  vector("list",npars)
+  names(flist) <- parnames
+  ## add additional parnames?
+  ## browser()
+  ## flist <- c(flist,setdiff(names(arglist),c("x","log",... ?))
+  formals(fn) <- flist
+  if (vecstart) start <- unlist(start)
+  list(fn=fn,start=start,parameters=parameters,
+       fdata=list(vars=vars,mmats=mmats,vpos=vpos,
+       arglist1=arglist1,ddistn=ddistn,parameters=parameters),
+       parnames=parnames)
+}
+
+## need logic that will identify correctly when
+## we need to pass parameters as a vector
+mle2 <- function(minuslogl,
+                 start,  ## =formals(minuslogl),
+                 method,
+                 optimizer,
+                 fixed=NULL,
+                 data=NULL,
+                 subset=NULL,
+                 default.start=TRUE, 
+                 eval.only = FALSE,
+                 vecpar = FALSE,
+                 parameters=NULL,
+                 parnames=NULL,
+                 skip.hessian=FALSE,
+                 hessian.opts=NULL,
+                 use.ginv=TRUE,
+                 trace=FALSE,
+                 browse_obj=FALSE,
+                 gr,
+                 optimfun,
+                 ...) {
+
+  if (missing(method)) method <- mle2.options("optim.method")
+  if (missing(optimizer)) optimizer <- mle2.options("optimizer")
+  L <- list(...)
+  if (optimizer=="optimize" && (is.null(L$lower) || is.null(L$upper)))
+      stop("lower and upper bounds must be specified when using
+'optimize'")
+  if (inherits(minuslogl,"formula")) {
+    pf <- function(f) {if (is.null(f))
+                         {  ""
+                          } else {
+                            paste(f[2],"~",
+                                  gsub(" ","",as.character(f[3])),sep="")
+                          }
+                     }
+    if (missing(parameters)) {
+      formula <- pf(minuslogl)
+    } else {
+      formula <- paste(pf(minuslogl),
+                       paste(sapply(parameters,pf),collapse=", "),sep=": ")
+    }
+    tmp <- calc_mle2_function(minuslogl,parameters,
+                              start=start,
+                              parnames=parnames,
+                              data=data,trace=trace)
+    minuslogl <- tmp$fn
+    start <- tmp$start
+    fdata <- tmp$fdata
+    parameters <- tmp$parameters
+  } else {
+    formula <- ""
+    fdata <- NULL
+  }
+  call <- match.call()
+  call.orig <- call
+  ## ?? still not sure this is the best thing to do, but:
+  ##   evaluate all elements of call
+  ##    to make sure it will still function in new environments ...
+  ## call[-1] <- lapply(call[-1],eval.parent)
+  ## call[-1] <- lapply(call[-1],eval,envir=parent.frame(),enclos=parent.frame(2))
+  ## FAILS if embedded in a funny environment (e.g. called from lapply)
+  ##  why do we need this in the first place?
+  ## FIXME: change update(), profile() to re-fit model properly
+  ##  rather than evaluating call(), or generally find a less-fragile
+  ##  way to do this.  Reverting to original form for now.
+  call$data <- eval.parent(call$data)
+  call$upper <- eval.parent(call$upper)
+  call$lower <- eval.parent(call$lower)
+  call$gr <- eval.parent(call$gr)
+  ## FIX based on request from Mark Clements
+  ## call$control$parscale <- eval.parent(call$control$parscale)
+  ## call$control$ndeps <- eval.parent(call$control$ndeps)
+  ## call$control$maxit <- eval.parent(call$control$maxit)
+  call$control <- eval.parent(call$control)
+  if(!missing(start))
+    if (!is.list(start)) {
+      if (is.null(names(start)) || !is.vector(start))
+        stop("'start' must be a named vector or named list")
+      ## do we want this or not???
+      vecpar <- call$vecpar <- TRUE  ## given a vector start: set vecpar=TRUE
+      start <- as.list(start)
+    }
+  ## also check parnames(minuslogl)?
+  if (missing(start) && default.start) start <- formals(minuslogl)
+  if (!is.null(fixed) && !is.list(fixed)) {
+    if (is.null(names(fixed)) || !is.vector(fixed))
+      stop("'fixed' must be a named vector or named list")
+    fixed <- as.list(fixed)
+  }
+  if (!is.null(data) && !is.list(data)) ##  && !is.environment(data)) 
+    stop("'data' must be a list")
+  nfix <- names(unlist(namedrop(fixed)))
+  if (!is.null(parnames(minuslogl))) {
+    nfull <- parnames(minuslogl)
+    fullcoef <- vector("list",length(nfull))
+    names(fullcoef) <- nfull
+  } else {
+    fullcoef <- formals(minuslogl)
+    nfull <- names(fullcoef)
+  }
+  if(any(! nfix %in% nfull))
+    stop("some named arguments in 'fixed' are not arguments to the specified log-likelihood function")
+  if (length(nfix)>0) start[nfix] <- NULL
+  fullcoef[nfix] <- fixed
+  ## switched namedrop() from outside to inside sapply ?
+  nstart <- names(unlist(sapply(namedrop(start),eval.parent)))
+  fullcoef[! nfull %in% nfix & ! nfull %in% nstart ] <- NULL  ## delete unnecessary names
+  nfull <- names(fullcoef)
+  lc <- length(call$lower)
+  lu <- length(call$upper)
+  npnfix <- sum(!nfull %in% nfix)
+  if (!npnfix==0 && (lu>npnfix || lc>npnfix )) {
+    warning("length mismatch between lower/upper ",
+            "and number of non-fixed parameters: ",
+            "# lower=",lc,", # upper=",lu,", # non-fixed=",npnfix)
+  }
+  template <- lapply(start, eval.parent)  ## preserve list structure!
+  if (vecpar) template <- unlist(template)
+  start <- sapply(namedrop(start), eval.parent) # expressions are allowed; added namedrop
+  nstart <- names(unlist(namedrop(start)))
+  ## named <- length(names(fullcoef))
+  oo <- match(nstart, names(fullcoef))
+  if (any(is.na(oo)))
+    stop("some named arguments in 'start' are not arguments to the specified log-likelihood function")
+  ## if (named)
+  start <- start[order(oo)]
+  ## rearrange lower/upper to same order as "start"
+  ## FIXME: use names to rearrange if present
+  fix_order <- function(c1,name,default=NULL) {
+      if (!is.null(c1)) {
+          if (length(unique(c1))>1) {  ## not all the same
+            if (is.null(names(c1)) && length(unique(c1))>1) {
+              warning(name," not named: rearranging to match 'start'")
+              oo2 <- oo
+            } else oo2 <- match(names(unlist(namedrop(c1))),names(fullcoef))
+            c1 <- c1[order(oo2)]
+          }
+        } else c1 <- default
+      c1
+  }
+  call$lower <- fix_order(call$lower,"lower bounds",-Inf)
+  call$upper <- fix_order(call$upper,"upper bounds",Inf)
+  call$control$parscale <- fix_order(call$control$parscale,"parscale")
+  call$control$ndeps <- fix_order(call$control$ndeps,"ndeps")
+  if (is.null(call$control)) call$control <- list()
+  ## attach(data,warn.conflicts=FALSE)
+  ## on.exit(detach(data))
+  denv <- local(environment(),c(as.list(data),fdata,list(mleenvset=TRUE)))
+  ## denv <- local(new.env(),c(as.list(data),fdata,list(mleenvset=TRUE)))
+  argnames.in.data <- names(data)[names(data) %in% names(formals(minuslogl))]
+  args.in.data <- lapply(argnames.in.data,get,env=denv)
+  names(args.in.data) <- argnames.in.data
+  args.in.data  ## codetools kluge
+  objectivefunction <- function(p){
+      if (browse_obj) browser()
+      l <- relist2(p,template) ## redo list structure
+      ## if (named)
+      names(p) <- nstart[order(oo)] ## make sure to reorder
+      ## ??? useless, comes after l is constructed ???
+      l[nfix] <- fixed
+      ##    cat("p\n"); print(p)
+      ## cat("l\n"); print(l)
+      ##    cat("data\n"); print(data)
+      if (vecpar) {
+          ## if (named)
+          l <- namedrop(l[nfull])
+          l <- unlist(l)
+          args <- list(l)
+          args <- c(list(l),args.in.data)
+      } else { args <- c(l,args.in.data)
+           }
+      ## eval in environment of minuslogl???
+      ## doesn't help, environment(minuslogl) is empty by this time
+      ## cat("e3:",length(ls(envir=environment(minuslogl))),"\n")
+      ## hack to remove unwanted names ...
+      do.call("minuslogl",namedrop(args))
+  } ## end of objective function
+  objectivefunctiongr <-
+    if (missing(gr)) NULL else
+        function(p) {
+          if (browse_obj) browser()
+          l <- relist2(p,template) ## redo list structure
+          names(p) <- nstart[order(oo)] ## make sure to reorder
+          l[nfix] <- fixed
+          if (vecpar) {
+            l <- namedrop(l[nfull])
+            l <- unlist(l)
+            args <- list(l)
+            args <- c(list(l),args.in.data)
+          } else { args <- c(l,args.in.data)
+                 }
+          v <- do.call("gr",args)
+          if (is.null(names(v))) {
+              if (length(v)==length(l) && !is.null(tt <- names(l))) {
+                  ## try to set names from template
+                  vnames <- tt
+              } else if (length(v)==length(p) && !is.null(tt <- names(p))) {
+                  ## try to set names from params
+                  vnames <- tt
+              } else if (!is.null(tt <- parnames(minuslogl))) {
+                  ## names were set as an attribute of the function
+                  vnames <- tt
+              } else vnames <- names(formals(minuslogl))
+              if (length(vnames)!=length(v))
+                  stop("name/length mismatch in gradient function")
+              names(v) <- vnames
+          }
+          v[!names(v) %in% nfix] ## from Eric Weese
+        } ## end of gradient function
+  ## FIXME: try to do this by assignment into appropriate
+  ##    environments rather than replacing them ...
+  ## only set env if environment has not been previously set!
+  if (!("mleenvset" %in% ls(envir=environment(minuslogl)))) {
+      newenv <- new.env(hash=TRUE,parent=environment(minuslogl))
+      d <- as.list(denv)
+      mapply(assign,names(d),d,
+             MoreArgs=list(envir=newenv))
+      environment(minuslogl) <- newenv
+      if (!missing(gr)) {
+          newenvgr <- new.env(hash=TRUE,parent=environment(minuslogl))
+          mapply(assign,names(d),d,
+                 MoreArgs=list(envir=newenvgr))
+      }
+  }
+  if (length(start)==0 || eval.only) {
+    if (length(start)==0) start <- numeric(0)
+    optimizer <- "none"
+    skip.hessian <- TRUE
+    oout <- list(par=start, value=objectivefunction(start),
+                 hessian = matrix(NA,nrow=length(start),ncol=length(start)))
+  } else {
+    oout <- switch(optimizer,
+                   optim = {
+                       arglist <- list(...)
+                       arglist$lower <- arglist$upper <-
+                         arglist$control <- NULL
+                       do.call("optim",
+                               c(list(par=start,
+                                      fn=objectivefunction,
+                                      method=method,
+                                      hessian=FALSE,
+                                      gr=objectivefunctiongr,
+                                      control=call$control,
+                                      lower=call$lower,
+                                      upper=call$upper),
+                                 arglist))
+                   },
+                   optimx = {
+                     ## don't ask, will get us into
+                     ##   dependency hell
+                     ## require("optimx")
+                     arglist <- list(...)
+                     arglist$lower <- arglist$upper <-
+                       arglist$control <- NULL
+                     do.call("optimx",
+                             c(list(par=start,
+                                    fn=objectivefunction,
+                                    method=method,
+                                    hessian=FALSE,
+                                    gr=objectivefunctiongr,
+                                    control=call$control,
+                                    lower=call$lower,
+                                    upper=call$upper),
+                               arglist))
+                   },
+                   nlm = nlm(f=objectivefunction, p=start, hessian=FALSE, ...),
+                     ##!skip.hessian,
+                   ## <http://netlib.bell-labs.com/cm/cs/cstr/153.pdf>
+                   nlminb = nlminb(start=start,
+                     objective=objectivefunction, hessian=NULL, ...),
+                   constrOptim = constrOptim(theta=start,
+                     f=objectivefunction, method=method, ...),
+                   optimize=,
+                   optimise= optimize(f=objectivefunction,
+                     interval=c(call$lower,call$upper), ...),
+                   user = {
+                     arglist <- list(...)
+                     arglist$lower <- arglist$upper <-
+                       arglist$control <- NULL
+                     do.call(optimfun,
+                             c(list(par=start,
+                                    fn=objectivefunction,
+                                    method=method,
+                                    hessian=FALSE,
+                                    gr=objectivefunctiongr,
+                                    control=call$control,
+                                    lower=call$lower,
+                                    upper=call$upper),
+                               arglist))
+                   },
+                   stop("unknown optimizer (choices are 'optim', 'nlm', 'nlminb', 'constrOptim', 'user', and 'optimi[sz]e')")
+                 )
+  }
+  optimval <- switch(optimizer,
+                     optim= , constrOptim=, optimx=, user=, none="value",
+                     nlm="minimum",
+                     optimize=, optimise=, nlminb="objective")
+  if (optimizer=="optimx") {
+    fvals <- oout[["value"]]
+    conv <- oout[["convcode"]]
+    ## best <- if (!any(conv==0)) {
+    best <- which.min(fvals)
+    ##    } else {
+    ## fvals <- fvals[conv==0]
+    ## which.min(fvals)
+    ## }
+    oout <- list(par=as.numeric(unlist(oout[best,1:attr(oout,"npar")])),
+                 value=fvals[best],
+                 convergence=conv[best],
+                 method.used=attr(oout,"details")[,"method"][[best]])
+    ## FIXME: should do profiles only with best method for MLE?
+  }
+  if (optimizer=="nlm") {
+    oout$par <- oout$estimate
+    oout$convergence <- oout$code
+  }
+  if (optimizer %in% c("optimise","optimize")) {
+    oout$par <- oout$minimum
+    oout$convergence <- 0 ## can't detect non-convergence
+  }
+  if (optimizer %in% c("nlminb","optimise","optimize") ||
+      ## optimizer (bobyqa?) may have stripped names -- try to restore them!
+      is.null(names(oout$par))) {
+    names(oout$par) <- names(start)
+  }
+
+  ## FIXME: worry about boundary violations?
+  ## (if we're on the boundary then the Hessian may not be useful anyway)
+  ##
+  if (length(oout$par)==0) skip.hessian <- TRUE
+  if (!skip.hessian) {
+    if ((!is.null(call$upper) || !is.null(call$lower)) &&
+        any(oout$par==call$upper) || any(oout$par==call$lower))
+      warning("some parameters are on the boundary: variance-covariance calculations based on Hessian may be unreliable")
+  }
+  namatrix <- matrix(NA,nrow=length(start),ncol=length(start))
+  if (!skip.hessian) {
+    psc <- call$control$parscale
+    if (is.null(psc)) {
+      oout$hessian <- try(hessian(objectivefunction,oout$par,method.args=hessian.opts))
+    } else {
+      tmpf <- function(x) {
+        objectivefunction(x*psc)
+      }
+      oout$hessian <- try(hessian(tmpf,oout$par/psc,method.args=hessian.opts))/outer(psc,psc)
+    }
+  }
+  if (skip.hessian || inherits(oout$hessian,"try-error"))
+      oout$hessian <- namatrix
+  coef <- oout$par
+  nc <- names(coef)
+  if (skip.hessian) {
+    tvcov <- matrix(NA,length(coef),length(coef))
+  } else {
+    if (length(coef)) {
+      if (use.ginv) {
+        tmphess <- try(MASS::ginv(oout$hessian),silent=TRUE)
+      } else {
+        tmphess <- try(solve(oout$hessian,silent=TRUE))
+      }
+      if (class(tmphess)=="try-error") {
+        tvcov <- matrix(NA,length(coef),length(coef))
+        warning("couldn't invert Hessian")
+      } else tvcov <- tmphess
+    } else {
+      tvcov <- matrix(numeric(0),0,0)
+    }
+  }
+  dimnames(tvcov) <- list(nc,nc)
+  min <-  oout[[optimval]]
+  ##  if (named)
+  fullcoef[nstart[order(oo)]] <- coef
+  ## else fullcoef <- coef
+  ## compute termination info
+  ## FIXME: should we worry about parscale here??
+  if (length(coef)) {
+    gradvec <- if (!missing(gr)) {
+        objectivefunctiongr(coef)
+    } else {
+        if (inherits(tt <- try(grad(objectivefunction,coef),silent=TRUE),
+                     "try-error")) NA else tt
+    }
+    oout$maxgrad <-  max(abs(gradvec))
+    if (!skip.hessian) {
+        if (inherits(ev <- try(eigen(oout$hessian)$value,silent=TRUE),
+                     "try-error")) ev <- NA
+        oout$eratio <- min(ev)/max(ev)
+    }
+  }
+  if (!is.null(conv <- oout$conv) &&
+      ((optimizer=="nlm" && conv>2) ||
+       (optimizer!="nlm" && conv!=0))) {
+      ## warn of convergence failure
+      if (is.null(oout$message)) {
+          cmsg <- "unknown convergence failure: refer to optimizer documentation"
+          if (optimizer=="optim") {
+              if (conv==1) cmsg <- "iteration limit 'maxit' reached"
+              if (conv==10) cmsg <- "degenerate Nelder-Mead simplex"
+          } else if (optimizer=="nlm") {
+              if (conv==3) cmsg <- "last global step failed to locate a point lower than 'estimate': see ?nlm"
+              if (conv==4) cmsg <- "iteration limit exceeded"
+              if (conv==5) cmsg <- "maximum step size 'stepmax' exceeded five consecutive times: see ?nlm"
+          }
+      } else cmsg <- oout$message
+      warning(paste0("convergence failure: code=",conv," (",cmsg,")"))
+  }
+  m <- new("mle2", call=call, call.orig=call.orig, coef=coef, fullcoef=unlist(fullcoef), vcov=tvcov,
+           min=min, details=oout, minuslogl=minuslogl, method=method,
+           optimizer=optimizer,data=as.list(data),formula=formula)
+  attr(m,"df") = length(m at coef)
+  if (!missing(data)) attr(m,"nobs") = length(data[[1]])
+  ## to work with BIC as well
+  m
+}
+
+
+get.mnames <- function(Call) {
+    xargs <- which(names(Call) %in% names(formals(ICtab))[-1])
+    mnames <- as.character(Call)[c(-1,-xargs)]
+    if (length(mnames)==1) {
+        g <- get(mnames)
+        if (is.list(g) && length(g)>1) {
+            if (is.null(names(g))) mnames <- paste("model",1:length(g),sep="")
+            else mnames <- names(g)
+            if (any(duplicated(mnames))) stop("model names must be distinct")
+        }
+    }
+    mnames
+}
+  
+
+mle2.options <- function(...) {
+single <- FALSE
+args <- list(...)
+  setvals <- !is.null(names(args))
+  if (!length(args)) args <- names(.Mle2.options)
+if (all(unlist(lapply(args, is.character)))) 
+     args <- as.list(unlist(args))
+  if (length(args) == 1) {
+    if (is.list(args[[1]]) | is.null(args[[1]])) 
+      args <- args[[1]]
+    else if (!setvals)
+      single <- TRUE
+  }
+  if (setvals) {
+    .Mle2.options[names(args)] <<- args
+    value <- .Mle2.options[names(args)]
+  } else value <- .Mle2.options[unlist(args)]
+   if (single) value <- value[[1]]
+if (setvals) invisible(value) else value
+}
+
+
+.Mle2.options = list(optim.method="BFGS",confint = "spline",optimizer="optim")
+
+
+## .onLoad <- function(lib, pkg) require(methods)
+
+
+## (not yet) replaced by relist?
+## reconstruct list structure:
+##   v is a vector, l is the original list
+##      to use as a template
+relist2 <- function(v,l) {
+ if (is.list(v)) v <- unlist(v)
+ if (!all(sapply(l,mode)=="numeric")) {
+     stop("can't relist non-numeric values")
+   }
+   lens = sapply(l,length)
+   if (all(lens==1))
+     return(as.list(v))
+   l2 <- split(v,rep(1:length(l),lens))
+   names(l2) <- names(l)
+   l3 <- mapply(function(x,y) {
+     if (!is.null(dim(y))) {
+       z=array(x,dim(y)); dimnames(z)=dimnames(y); z
+     } else {
+       z=x; names(z)=names(y); z
+     }
+   },l2,l,SIMPLIFY=FALSE)
+ names(l3) <- names(l)
+   l3
+ }
+
+namedrop <- function(x) {
+if (!is.list(x)) x
+  for (i in seq(along=x)) {
+    ## cat(i,length(x),"\n")
+    n = names(x[[i]])
+    lx = length(x[[i]])
+    if (!is.null(n)) {
+      if (lx==1) {
+        names(x[[i]]) <- NULL
+      } else if (length(unique(n))<lx) {
+        names(x[[i]]) <- 1:lx
+      }
+    } ## !is.null(names(x[[i]]))
+  } ## loop over elements
+  x
+}
+
+"parnames<-" <- function(obj,value) {
+attr(obj,"parnames") <- value
+  obj
+}
+
+parnames <- function(obj) {
+attr(obj,"parnames")
+}
+
+
+### TEST OF NLMINB
+if (FALSE) {
+x <- 0:10
+y <- c(26, 17, 13, 12, 20, 5, 9, 8, 5, 4, 8)
+mle2(y~dpois(lambda=ymax/(1+x/xhalf)),start=list(ymax=25,xhalf=3.06),
+     optimizer="nlminb",fixed=list(ymax=38.76),lower=c(0,0),trace=TRUE)
+
+f = calc_mle2_function(y~dpois(lambda=ymax/(1+x/xhalf)),
+  start=list(ymax=25,xhalf=3.06))
+f2 = function(xhalf) {
+  -sum(dpois(y,38.76/(1+x/xhalf),log=TRUE))
+}
+optim(f2,par=3.06,method="BFGS")
+## optim(f2,par=3.06,method="L-BFGS-B",lower=0) ## error
+nlminb(objective=f2,start=3.06)
+nlminb(objective=f2,start=3.06,lower=0)
+nlminb(objective=f2,start=3.06,lower=1e-8)
+}
+
+strwrapx <- function(x, width = 0.9 * getOption("width"),
+indent = 0, exdent = 0, 
+prefix = "", simplify = TRUE,
+    parsplit= "\n[ \t\n]*\n", wordsplit = "[ \t\n]") 
+{
+    if (!is.character(x)) 
+      x <- as.character(x)
+    indentString <- paste(rep.int(" ", indent), collapse = "")
+    exdentString <- paste(rep.int(" ", exdent), collapse = "")
+    y <- list()
+    ## split at "+" locations
+    plussplit = function(w) {
+      lapply(w,
+             function(z) {
+               plusloc = which(strsplit(z,"")[[1]]=="+")
+               plussplit =
+                 apply(cbind(c(1,plusloc+1),
+                             c(plusloc,nchar(z,type="width"))),
+                       1,
+                       function(b) substr(z,b[1],b[2]))
+               plussplit
+             })}
+    ## ugh!
+    z <- lapply(strsplit(x, parsplit),
+                function(z) { lapply(strsplit(z,wordsplit),
+                                   function(x) unlist(plussplit(x)))
+                })
+    ## print(str(lapply(strsplit(x,parsplit),strsplit,wordsplit)))
+    ## print(str(z))
+    for (i in seq_along(z)) {
+        yi <- character(0)
+        for (j in seq_along(z[[i]])) {
+            words <- z[[i]][[j]]
+            nc <- nchar(words, type = "w")
+            if (any(is.na(nc))) {
+                nc0 <- nchar(words)
+                nc[is.na(nc)] <- nc0[is.na(nc)]
+            }
+            if (any(nc == 0)) {
+                zLenInd <- which(nc == 0)
+                zLenInd <- zLenInd[!(zLenInd %in% (grep("\\.$", 
+                  words) + 1))]
+                if (length(zLenInd) > 0) {
+                  words <- words[-zLenInd]
+                  nc <- nc[-zLenInd]
+                }
+            }
+            if (length(words) == 0) {
+                yi <- c(yi, "", prefix)
+                next
+            }
+            currentIndex <- 0
+            lowerBlockIndex <- 1
+            upperBlockIndex <- integer(0)
+            lens <- cumsum(nc + 1)
+            first <- TRUE
+            maxLength <- width - nchar(prefix, type = "w") - 
+                indent
+            while (length(lens) > 0) {
+                k <- max(sum(lens <= maxLength), 1)
+                if (first) {
+                  first <- FALSE
+                  maxLength <- maxLength + indent - exdent
+                }
+                currentIndex <- currentIndex + k
+                if (nc[currentIndex] == 0) 
+                  upperBlockIndex <- c(upperBlockIndex, currentIndex - 
+                    1)
+                else upperBlockIndex <- c(upperBlockIndex, currentIndex)
+                if (length(lens) > k) {
+                  if (nc[currentIndex + 1] == 0) {
+                    currentIndex <- currentIndex + 1
+                    k <- k + 1
+                  }
+                  lowerBlockIndex <- c(lowerBlockIndex, currentIndex + 
+                    1)
+                }
+                if (length(lens) > k) 
+                  lens <- lens[-(1:k)] - lens[k]
+                else lens <- NULL
+            }
+            nBlocks <- length(upperBlockIndex)
+            s <- paste(prefix, c(indentString, rep.int(exdentString, 
+                nBlocks - 1)), sep = "")
+            for (k in (1:nBlocks)) {
+              s[k] <- paste(s[k],
+                            paste(words[lowerBlockIndex[k]:upperBlockIndex[k]], 
+                                  collapse = " "), sep = "")
+            }
+            s = gsub("\\+ ","+",s)  ## kluge
+            yi <- c(yi, s, prefix)
+        }
+        y <- if (length(yi)) 
+            c(y, list(yi[-length(yi)]))
+        else c(y, "")
+    }
+    if (simplify) 
+        y <- unlist(y)
+    y
+  }
+
+
+
diff --git a/R/mle2-class.R b/R/mle2-class.R
new file mode 100755
index 0000000..266cfad
--- /dev/null
+++ b/R/mle2-class.R
@@ -0,0 +1,44 @@
+## must go before setAs to avoid warnings
+setClass("mle2", representation(call = "language",
+                                call.orig = "language",
+                                coef = "numeric",
+                                fullcoef = "numeric",
+                                vcov = "matrix",
+                                min = "numeric",
+                                details = "list",
+                                minuslogl = "function",
+                                method = "character",
+                                data="list",
+                                formula="character",
+                                optimizer="character"))
+
+setAs("mle","mle2", function(from,to) {
+  new("mle2",
+      call=from at call,
+      call.orig=from at call,
+      coef=from at coef,
+      fullcoef=from at fullcoef,
+      vcov=from at vcov,
+      min=from at min,
+      details=from at details,
+      minuslogl=from at minuslogl,
+      method=from at method,
+      data=list(),
+      formula="",
+      optimizer="optim")
+})
+                
+
+setClass("summary.mle2", representation(call = "language",
+                               coef = "matrix",
+                               m2logL = "numeric"))
+
+setClass("profile.mle2", representation(profile="list",
+                                       summary="summary.mle2"))
+
+
+setClass("slice.mle2", representation(profile="list",
+                                       summary="summary.mle2"))
+
+setIs("profile.mle2", "slice.mle2")
+
diff --git a/R/mle2-methods.R b/R/mle2-methods.R
new file mode 100755
index 0000000..2f4c285
--- /dev/null
+++ b/R/mle2-methods.R
@@ -0,0 +1,171 @@
+## setGeneric("formula", function(x, env = parent.frame(), ...) {
+##           standardGeneric("formula")})
+## don't know why behaviour of anova() and formula() are different?
+## (used setGeneric for anova() without trouble, caused problems here)
+## trying to avoid "creating a new generic" message on install?
+
+setMethod("formula", "mle2",
+          function(x, env = parent.frame(), ...) {
+            as.formula(x at formula)
+          })
+
+## stdEr <- function(x, ...) {
+##   UseMethod("stdEr")
+##  }
+
+setGeneric("stdEr", function(x, ...) { standardGeneric("stdEr")})
+
+setMethod("stdEr","mle2",
+          function(x, ...) {
+            sqrt(diag(x at vcov)) ## why doesn't vcov(x) work here???
+          })
+
+## should this be object at fullcoef or object at coef??? or should
+## it have an additional argument --- is that possible?
+setMethod("coef", "mle2", function(object,exclude.fixed=FALSE) {
+    if (!exclude.fixed) object at fullcoef else object at coef
+})
+## fullcoef <- function(object) object at fullcoef  ## this should be a method
+setMethod("coef", "summary.mle2", function(object) { object at coef })
+## hmmm.  Work on this. 'hessian' conflicts with numDeriv definition. Override?
+## setMethod("Hessian", sig="mle2", function(object) { object at details$hessian })
+
+setMethod("show", "mle2", function(object){
+    cat("\nCall:\n")
+    print(object at call.orig)
+    cat("\nCoefficients:\n")
+    print(coef(object))
+    cat("\nLog-likelihood: ")
+    cat(round(as.numeric(logLik(object)),2),"\n")
+    if (object at optimizer=="optimx" && length(object at method)>1) {
+      cat("Best method:",object at details$method.used,"\n")
+    }
+    if (object at details$convergence>0)
+      cat("\nWarning: optimization did not converge (code ",
+          object at details$convergence,": ",object at details$message,")\n",sep="")
+  })
+
+setMethod("show", "summary.mle2", function(object){
+    cat("Maximum likelihood estimation\n\nCall:\n")
+    print(object at call)
+    cat("\nCoefficients:\n")
+    printCoefmat(coef(object))
+    cat("\n-2 log L:", object at m2logL, "\n")
+})
+
+setMethod("show", "profile.mle2", function(object){
+    cat("Likelihood profile:\n\n")
+    print(object at profile)
+  })
+
+setMethod("summary", "mle2", function(object, waldtest=TRUE, ...){
+    cmat <- cbind(Estimate = object at coef,
+                  `Std. Error` = sqrt(diag(object at vcov)))
+    zval <- cmat[,"Estimate"]/cmat[,"Std. Error"]
+    pval <- 2*pnorm(-abs(zval))
+    coefmat <- cbind(cmat,"z value"=zval,"Pr(z)"=pval)
+    m2logL <- 2*object at min
+    new("summary.mle2", call=object at call.orig, coef=coefmat, m2logL= m2logL)
+})
+
+
+
+setMethod("logLik", "mle2",
+function (object, ...)
+{
+    if(length(list(...)))
+        warning("extra arguments discarded")
+    val <- -object at min
+    attr(val, "df") <- length(object at coef)
+    attr(val, "nobs") <- attr(object,"nobs")
+    class(val) <- "logLik"
+    val
+  })
+
+setGeneric("deviance", function(object, ...) standardGeneric("deviance"))
+setMethod("deviance", "mle2",
+function (object, ...)
+{
+  -2*logLik(object)
+})
+
+setMethod("vcov", "mle2", function (object, ...) { object at vcov } )
+
+
+setGeneric("anova", function(object, ...) standardGeneric("anova"))
+setMethod("anova","mle2",
+          function(object,...,width=getOption("width"), exdent=10) {
+            mlist <- c(list(object),list(...))
+            ## get names from previous call
+            mnames <- sapply(sys.call(sys.parent())[-1],deparse)
+            ltab <- as.matrix(do.call("rbind",
+                                      lapply(mlist,
+                                             function(x) {
+                                               c("Tot Df"=length(x at coef),
+                                                 Deviance=-2*logLik(x))
+                                             })))
+            terms=sapply(mlist,
+              function(obj) {
+                if (is.null(obj at formula) || obj at formula=="") {
+                  mfun <- obj at call$minuslogl
+                  mfun <- paste("[",if (is.name(mfun)) {
+                    as.character(mfun)
+                  } else { "..." },
+                                "]",sep="")
+                  paste(mfun,": ",paste(names(obj at coef),
+                                        collapse="+"),sep="")
+                } else {
+                  as.character(obj at formula)
+                }
+              })
+            mterms <- paste("Model ",
+                            1:length(mnames),": ",mnames,", ",terms,sep="")
+            mterms <- strwrapx(mterms,width=width,exdent=exdent,
+                               wordsplit="[ \n\t]")
+  ## trunc.term <- function(s,len) {
+  ##     ## cat("***",nchar(s),length(grep("\\+",s)),"\n",sep=" ")    
+  ##     if ((nchar(s)<len) || (length(grep("\\+",s))==0)) return(s)
+  ##     ## cat("abc\n")
+  ##     lens <- cumsum(sapply(strsplit(s,"\\+")[[1]],nchar)+1)
+  ##     paste(substr(s,1,max(lens[lens<len])-1),"+...",sep="")
+  ##   }
+  ## WRAP here
+  heading <- paste("Likelihood Ratio Tests",
+                   paste(mterms,
+                         collapse="\n"),
+                   sep="\n")
+  ltab <- cbind(ltab,Chisq=abs(c(NA,diff(ltab[,"Deviance"]))),
+                Df=abs(c(NA,diff(ltab[,"Tot Df"]))))
+  ltab <- cbind(ltab,"Pr(>Chisq)"=c(NA,pchisq(ltab[,"Chisq"][-1],
+                       ltab[,"Df"][-1],lower.tail=FALSE)))
+  rownames(ltab) <- 1:nrow(ltab)
+  attr(ltab,"heading") <- heading
+  class(ltab) <- "anova"
+  ltab
+})
+
+## translate from profile to data frame, as either
+## S3 or S4 method
+as.data.frame.profile.mle2 <- function(x, row.names = NULL,
+                                       optional = FALSE, ...) {
+    m1 <- mapply(function(vals,parname) {
+        ## need to unpack the vals data frame so that
+        ## parameter names show up properly
+        do.call("data.frame",
+                c(list(param=rep(parname,nrow(vals))),
+                  as.list(vals),focal=list(vals$par.vals[,parname])))
+            },
+                 x at profile,
+                 as.list(names(x at profile)),
+                 SIMPLIFY=FALSE)
+    m2 <- do.call("rbind",m1)
+    m2
+}
+
+setAs("profile.mle2","data.frame",
+      function(from) {
+          as.data.frame.profile.mle2(from)
+          })
+
+
+BIC.mle2 <- stats4:::BIC
diff --git a/R/predict.R b/R/predict.R
new file mode 100755
index 0000000..1c46a47
--- /dev/null
+++ b/R/predict.R
@@ -0,0 +1,114 @@
+setGeneric("simulate", function(object, nsim=1, seed=NULL, ...) standardGeneric("simulate"))
+setMethod("simulate", "mle2",
+          function(object, nsim=1, seed, newdata=NULL,
+                   newparams=NULL, ...) {
+            if (!is.null(seed)) set.seed(seed)
+            if (!is.null(newparams)) {
+              object at fullcoef <- newparams
+            }
+            g <- gfun(object,newdata=newdata, nsim=nsim,op="simulate")
+            if (nsim>1) {
+              g <- matrix(g,ncol=nsim)
+            }
+            g
+          })
+
+setGeneric("predict", function(object, ...) standardGeneric("predict"))
+setMethod("predict", "mle2",
+          function(object,newdata=NULL,
+                   location="mean",newparams=NULL, ...) {
+            if (!is.null(newparams)) {
+              object at fullcoef <- newparams
+            }
+            gfun(object,newdata=newdata,location=location,op="predict")
+          })
+
+setGeneric("residuals", function(object, ...) standardGeneric("residuals"))
+setMethod("residuals", "mle2",
+          function(object,
+                   type=c("pearson","response"),
+                   location="mean",
+                   ...) {
+            type <- match.arg(type)
+            location <- match.arg(location)
+            pred <- predict(object,location)
+            ## not sure this will work ...
+            obs <- with(object at data,
+                        get(gsub("~.+","",object at formula)))
+            res <- obs-pred
+            if (type=="response") return(res)
+            vars <- predict(object,location="variance")
+            return(res/sqrt(vars))
+          })
+
+## general-purpose function for simulation and
+##  prediction (the hard part is evaluating the parameters etc.)
+##
+gfun <- function(object,newdata=NULL,location=c("mean","median","variance"),
+                 nsim,
+                 op=c("predict","simulate")) {
+  ## notes: should operate on formula
+  ## pull out call$formula (not character)
+  location <- match.arg(location)
+  if (class(try(form <- as.formula(object at call$minuslogl)))!="formula")
+    stop("can only use predict() if formula specified")
+  LHS <- form[[3]]
+  ddist = as.character(LHS[[1]])
+  spref <- switch(op,predict="s",simulate="r")
+  sdist = gsub("^d",spref,ddist)
+  arglist = as.list(LHS)[-1]
+  if (!exists(sdist) || !is.function(get(sdist)))
+    stop("function ",sdist," does not exist")
+  ## evaluate parameters
+  ## evaluate sdist [ newdata > coef > data ]
+##   if (is.null(object at data)) {
+##     comb <- newdata
+##   } else {
+##     nmatch <- match(names(newdata),names(object at data))
+##     comb <- object at data
+##     comb[na.omit(nmatch)] <- newdata[!is.na(nmatch)]
+##     comb <- c(comb,newdata[is.na(nmatch)])
+##   }
+##   comb <- c(newdata,object at data)
+##   comb <- comb[!duplicated(names(comb))]
+##   comb <- comb[sapply(comb,length)>0]
+##   rvar <- strsplit(object at formula,"~")[[1]][1]
+##   comb <- comb[!names(comb)==rvar] ## remove response variable
+  parameters <- eval(object at call$parameters)
+  if (!is.null(parameters)) {
+    vars <- as.character(sapply(parameters,"[[",2))
+    models <-  sapply(parameters,function(z) call.to.char(z[[3]]))
+    parameters <- parameters[models!="1"]
+    npars <- length(parameters)
+    if (npars==0) { ## no non-constant parameters
+      parameters <- mmats <- vpos <- NULL
+    } else {
+      mmats <- list()
+      vpos <- list()
+      for (i in seq(along=parameters)) {
+        vname <- vars[i]
+        p <- parameters[[i]]
+        p[[2]] <- NULL
+        mmat <- with(c(newdata,object at data),
+                     model.matrix(p,data=environment()))
+        ## c(as.list(newdata),as.list(object at data)))
+        pnames <- paste(vname,colnames(mmat),sep=".")
+        assign(vname,mmat %*% coef(object)[pnames])
+      }
+    }
+  }
+  arglist1 <- lapply(arglist,eval,envir=c(newdata,object at data,
+                                    as.list(coef(object))),
+                     enclos=sys.frame(sys.nframe()))
+  ## HACK: need a way to figure out how many data points there
+  ##  are, in the *absence* of an explicit data argument
+  ## then replicate constant values to the full length
+  if (op=="simulate") {
+    if (length(object at data)==0)
+      stop("need explicit data argument for simulation")
+    ndata <- max(sapply(c(newdata,object at data),length)) ## ???
+    arglist1 <- c(arglist1,list(n=ndata*nsim))
+  }
+  vals <- with(as.list(coef(object)),do.call(sdist,arglist1))
+  if (op=="predict") return(vals[[location]]) else return(vals)
+}
diff --git a/R/profile.R b/R/profile.R
new file mode 100755
index 0000000..4dba45d
--- /dev/null
+++ b/R/profile.R
@@ -0,0 +1,410 @@
+## FIXME: abstract to general-purpose code?  (i.e. replace 'fitted' by
+#    objective function, parameter vector, optimizer, method, control settings,
+##   min val, standard error/Hessian, ...
+##
+## allow starting values to be set by "mle" (always use mle), "prevfit"
+##  (default?), and "extrap" (linear extrapolation from previous two fits)
+## 
+
+setMethod("profile", "mle2",
+          function (fitted, which = 1:p, maxsteps = 100,
+                    alpha = 0.01, zmax = sqrt(qchisq(1 - alpha/2, p)),
+                    del = zmax/5, trace = FALSE, skiperrs=TRUE,
+                    std.err, tol.newmin = 0.001, debug=FALSE,
+                    prof.lower, prof.upper, skip.hessian=TRUE,
+                    try_harder=FALSE, ...) {
+              ## fitted: mle2 object
+              ## which: which parameters to profile (numeric or char)
+              ## maxsteps: steps to take looking for zmax
+              ## alpha: max alpha level
+              ## zmax: max log-likelihood difference to search to
+              ## del: stepsize
+              ## trace:
+              ## skiperrs:
+            if (fitted at optimizer=="optimx") {
+              fitted at call$method <- fitted at details$method.used
+            }
+            if (fitted at optimizer=="constrOptim")
+              stop("profiling not yet working for constrOptim -- sorry")
+            Pnames <- names(fitted at coef)
+            p <- length(Pnames)
+            if (is.character(which)) which <- match(which,Pnames)
+            if (any(is.na(which)))
+              stop("parameters not found in model coefficients")
+            ## global flag for better fit found inside profile fit
+            newpars_found <- FALSE
+            if (debug) cat("i","bi","B0[i]","sgn","step","del","std.err[i]","\n")
+            onestep <- function(step,bi) {
+                if (missing(bi)) {
+                    bi <- B0[i] + sgn * step * del * std.err[i]
+                    if (debug) cat(i,bi,B0[i],sgn,step,del,std.err[i],"\n")
+                } else if (debug) cat(bi,"\n")
+                fix <- list(bi)
+                names(fix) <- p.i
+                if (is.null(call$fixed)) call$fixed <- fix
+                else call$fixed <- c(eval(call$fixed),fix)
+                if (skiperrs) {
+                    pfit <- try(eval.parent(call, 2L), silent=TRUE)
+                } else {
+                    pfit <- eval.parent(call, 2L)
+                }
+                ok <- ! inherits(pfit,"try-error")
+                if (debug && ok) cat(coef(pfit),-logLik(pfit),"\n")
+                if(skiperrs && !ok) {
+                    warning(paste("Error encountered in profile:",pfit))
+                    return(NA)
+                }
+                else {
+                    ## pfit is current (profile) fit,
+                    ##   fitted is original fit
+                    ## pfit at min _should_ be > fitted at min
+                    ## thus zz below should be >0
+                    zz <- 2*(pfit at min - fitted at min)
+                    ri <- pv0
+                    ri[, names(pfit at coef)] <- pfit at coef
+                    ri[, p.i] <- bi
+                    ##cat(2*pfit at min,2*fitted at min,zz,
+                    ##   tol.newmin,zz<(-tol.newmin),"\n")
+                    if (!is.na(zz) && zz<0) {
+                        if (zz > (-tol.newmin)) {
+                          z <- 0
+                          ## HACK for non-monotonic profiles? z <- -sgn*sqrt(abs(zz))
+                        } else {
+                          ## cat() instead of warning(); FIXME use message() instead???
+                          message("Profiling has found a better solution,",
+                                "so original fit had not converged:\n")
+                          message(sprintf("(new deviance=%1.4g, old deviance=%1.4g, diff=%1.4g)",
+                                        2*pfit at min,2*fitted at min,2*(pfit at min-fitted@min)),"\n")
+                          message("Returning better fit ...\n")
+                          ## need to return parameters all the way up
+                          ##   to top level
+                          newpars_found <<- TRUE
+                          ## return(pfit at fullcoef)
+                          return(pfit) ## return full fit
+                        }
+                    } else {
+                      z <- sgn * sqrt(zz)
+                    }
+                    pvi <<- rbind(pvi, ri)
+                    zi <<- c(zi, z) ## nb GLOBAL set
+                }
+                if (trace) cat(bi, z, "\n")
+                z
+            } ## end onestep
+            ## Profile the likelihood around its maximum
+            ## Based on profile.glm in MASS
+            summ <- summary(fitted)
+            if (missing(std.err)) {
+                std.err <- summ at coef[, "Std. Error"]
+            } else {
+                n <- length(summ at coef)
+                if (length(std.err)<n)
+                  std.err <- rep(std.err,length.out=length(summ at coef))
+                if (any(is.na(std.err)))
+                  std.err[is.na(std.err)] <- summ at coef[is.na(std.err)]
+            }
+            if (any(is.na(std.err))) {
+              std.err[is.na(std.err)] <- sqrt(1/diag(fitted at details$hessian))[is.na(std.err)]
+              if (any(is.na(std.err))) {  ## still bad
+                stop("Hessian is ill-behaved or missing, ",
+                     "can't find an initial estimate of std. error ",
+                     "(consider specifying std.err in profile call)")
+              }
+              ## warn anyway ...
+              warning("Non-positive-definite Hessian, ",
+                      "attempting initial std err estimate from diagonals")
+            }
+            Pnames <- names(B0 <- fitted at coef)
+            pv0 <- t(as.matrix(B0))
+            p <- length(Pnames)
+            prof <- vector("list", length = length(which))
+            names(prof) <- Pnames[which]
+            call <- fitted at call
+            call$skip.hessian <- skip.hessian ## BMB: experimental
+            call$minuslogl <- fitted at minuslogl
+            ndeps <- eval.parent(call$control$ndeps)
+            parscale <- eval.parent(call$control$parscale)
+            nc <- length(fitted at coef)
+            xf <- function(x) rep(x,length.out=nc) ## expand to length
+            upper <- xf(unlist(eval.parent(call$upper)))
+            lower <- xf(unlist(eval.parent(call$lower)))
+            if (all(upper==Inf & lower==-Inf)) {
+                lower <- upper <- NULL
+                ## kluge: lower/upper may have been set to +/- Inf
+                ##   in previous rounds, 
+                ##  but we don't want them in that case
+            }
+            if (!missing(prof.lower)) prof.lower <- xf(prof.lower)
+            if (!missing(prof.upper)) prof.upper <- xf(prof.upper)
+            ## cat("upper\n")
+            ## print(upper)
+            stop_msg <- list()
+            for (i in which) {
+              zi <- 0
+              pvi <- pv0
+              p.i <- Pnames[i]
+              wfun <- function(txt) paste(txt," (",p.i,")",sep="")
+              ## omit values from control vectors:
+              ##   is this necessary/correct?
+               if (!is.null(ndeps)) call$control$ndeps <- ndeps[-i]
+               if (!is.null(parscale)) call$control$parscale <- parscale[-i]
+               if (!is.null(upper) && length(upper)>1) call$upper <- upper[-i]
+               if (!is.null(lower) && length(lower)>1) call$lower <- lower[-i]
+              stop_msg[[i]] <- list(down="",up="")
+              for (sgn in c(-1, 1)) {
+                dir_ind <- (sgn+1)/2+1 ## (-1,1) -> (1,2)
+                if (trace) {
+                    cat("\nParameter:", p.i, c("down", "up")[dir_ind], "\n")
+                    cat("par val","sqrt(dev diff)\n")
+                }
+                step <- 0
+                z <- 0
+                ## This logic was a bit frail in some cases with
+                ## high parameter curvature. We should probably at least
+                ## do something about cases where the mle2 call fails
+                ## because the parameter gets stepped outside the domain.
+                ## (We now have.)
+                call$start <- as.list(B0)
+                lastz <- 0
+                valf <- function(b) {
+                  (!is.null(b) && length(b)>1) ||
+                  (length(b)==1 && i==1 && is.finite(b))
+                }
+                lbound <- if (!missing(prof.lower)) {
+                  prof.lower[i]
+                } else if (valf(lower))
+                  { lower[i]
+                  } else -Inf
+                ubound <- if (!missing(prof.upper)) prof.upper[i] else if (valf(upper)) upper[i] else Inf
+                stop_bound <- stop_na <- stop_cutoff <- stop_flat <- FALSE
+                while ((step <- step + 1) < maxsteps &&
+                       ## added is.na() test for try_harder case
+                       ## FIXME: add unit test!
+                       (is.na(z) || abs(z) < zmax)) {
+                  curval <- B0[i] + sgn * step * del * std.err[i]
+                  if ((sgn==-1 & curval<lbound) ||
+                      (sgn==1 && curval>ubound)) {
+                    stop_bound <- TRUE;
+                    stop_msg[[i]][[dir_ind]] <- paste(stop_msg[[i]][[dir_ind]],wfun("hit bound"))
+                    break
+                  }
+                  z <- onestep(step)
+                  ## stop on flat spot, unless try_harder
+                  if (step>1 && (identical(oldcurval,curval) || identical(oldz,z))) {
+                    stop_flat <- TRUE
+                    stop_msg[[i]][[dir_ind]] <- paste(stop_msg[[i]][[dir_ind]],wfun("hit flat spot"),
+                                                      sep=";")
+                    if (!try_harder) break
+                  }
+                  oldcurval <- curval
+                  oldz <- z
+                  if (newpars_found) return(z)
+                  if(is.na(z)) {
+                    stop_na <- TRUE
+                    stop_msg[[i]][[dir_ind]] <- paste(stop_msg[[i]][[dir_ind]],wfun("hit NA"),sep=";")
+                    if (!try_harder) break
+                  }
+                  lastz <- z
+                  if (newpars_found) return(z)
+                }
+                stop_cutoff <- (!is.na(z) && abs(z)>=zmax)
+                stop_maxstep <- (step==maxsteps)
+                if (stop_maxstep) stop_msg[[i]][[dir_ind]] <- paste(stop_msg[[i]][[dir_ind]],wfun("max steps"),sep=";")
+                if (debug) {
+                  if (stop_na) message(wfun("encountered NA"),"\n")
+                  if (stop_cutoff) message(wfun("above cutoff"),"\n")
+                }
+                if (stop_flat) {
+                  warning(wfun("stepsize effectively zero/flat profile"))
+                } else {
+                  if (stop_maxstep) warning(wfun("hit maximum number of steps"))
+                  if(!stop_cutoff) {
+                    if (debug) cat(wfun("haven't got to zmax yet, trying harder"),"\n")
+                    stop_msg[[i]][[dir_ind]] <- paste(stop_msg[[i]][[dir_ind]],wfun("past cutoff"),sep=";")
+                    ## now let's try a bit harder if we came up short
+                    for(dstep in c(0.2, 0.4, 0.6, 0.8, 0.9)) {
+                      curval <- B0[i] + sgn * (step-1+dstep) * del * std.err[i]
+                      if ((sgn==-1 & curval<lbound) ||
+                          (sgn==1 && curval>ubound)) break
+                      z <- onestep(step - 1 + dstep)
+                      if (newpars_found) return(z)
+                      if(is.na(z) || abs(z) > zmax) break
+                      lastz <- z
+                      if (newpars_found) return(z)
+                    }
+                    if (!stop_cutoff && stop_bound) {
+                      if (debug) cat(wfun("bounded and didn't make it, try at boundary"),"\n")
+                      ## bounded and didn't make it, try at boundary
+                      if (sgn==-1 && B0[i]>lbound) z <- onestep(bi=lbound)
+                      if (sgn==1  && B0[i]<ubound) z <- onestep(bi=ubound)
+                      if (newpars_found) return(z)
+                    }
+                  } else if (length(zi) < 5) { # try smaller steps
+                    if (debug) cat(wfun("try smaller steps"),"\n")
+                    stop_msg[[i]][[dir_ind]] <- paste(stop_msg[[i]][[dir_ind]],wfun("took more steps"),sep=";")
+                    mxstep <- step - 1
+                    step <- 0.5
+                    while ((step <- step + 1) < mxstep) {
+                      z <- onestep(step)
+                    }
+                  } ## smaller steps
+                } ## !zero stepsize
+              } ## step in both directions
+              si <- order(pvi[, i])
+              prof[[p.i]] <- data.frame(z = zi[si])
+              prof[[p.i]]$par.vals <- pvi[si,, drop=FALSE]
+            } ## for i in which
+            newprof <- new("profile.mle2", profile = prof, summary = summ)
+            attr(newprof,"stop_msg") <- stop_msg
+            newprof
+          })
+
+
+setMethod("plot", signature(x="profile.mle2", y="missing"),
+          function (x, levels, which=1:p, conf = c(99, 95, 90, 80, 50)/100,
+          plot.confstr = TRUE, confstr = NULL, absVal = TRUE, add = FALSE,
+          col.minval="green", lty.minval=2,
+          col.conf="magenta", lty.conf=2,
+          col.prof="blue", lty.prof=1,
+          xlabs=nm, ylab="z",
+          onepage=TRUE,
+          ask=((prod(par("mfcol")) < length(which)) && dev.interactive() &&
+               !onepage),
+          show.points=FALSE,
+          main, xlim, ylim, ...)
+{
+    ## Plot profiled likelihood
+    ## Based on profile.nls (package stats)
+    obj <- x at profile
+    nm <- names(obj)
+    p <- length(nm)
+    ## need to save these for i>1 below
+    no.xlim <- missing(xlim)
+    no.ylim <- missing(ylim)    
+    if (is.character(which)) which <- match(which,nm)
+    ask_orig <- par(ask=ask)
+    op <- list(ask=ask_orig)
+    if (onepage) {
+        nplots <- length(which)
+        ## Q: should we reset par(mfrow), or par(mfg), anyway?
+        if (prod(par("mfcol")) < nplots) {
+            rows <- ceiling(round(sqrt(nplots)))
+            columns <- ceiling(nplots/rows)
+            mfrow_orig <- par(mfrow=c(rows,columns))
+            op <- c(op,mfrow_orig)
+          }
+      }
+    on.exit(par(op))
+    confstr <- NULL
+    if (missing(levels)) {
+        levels <- sqrt(qchisq(pmax(0, pmin(1, conf)), 1))
+        confstr <- paste(format(100 * conf), "%", sep = "")
+    }
+    if (any(levels <= 0)) {
+        levels <- levels[levels > 0]
+        warning("levels truncated to positive values only")
+    }
+    if (is.null(confstr)) {
+        confstr <- paste(format(100 * pchisq(levels^2, 1)), "%", sep = "")
+    }
+    mlev <- max(levels) * 1.05
+    ##    opar <- par(mar = c(5, 4, 1, 1) + 0.1)
+    if (!missing(xlabs) && length(which)<length(nm)) {
+      xl2 = nm
+      xl2[which] <- xlabs
+      xlabs <- xl2
+    }
+    if (missing(main)) 
+      main <- paste("Likelihood profile:",nm)
+    main <- rep(main,length=length(nm))
+    for (i in seq(along = nm)[which]) {
+        ## <FIXME> This does not need to be monotonic
+        ## cat("**",i,obj[[i]]$par.vals[,i],obj[[i]]$z,"\n")
+        ## FIXME: reconcile this with confint!
+        yvals <- obj[[i]]$par.vals[,nm[i],drop=FALSE]
+        avals <- data.frame(x=unname(yvals), y=obj[[i]]$z)
+        if (!all(diff(obj[[i]]$z)>0)) {
+          warning("non-monotonic profile: reverting to linear interpolation.  Consider setting std.err manually")
+          predback <- approxfun(obj[[i]]$z,yvals)
+        } else {
+          sp <- splines::interpSpline(yvals, obj[[i]]$z,
+                                      na.action=na.omit)
+          avals <- rbind(avals,as.data.frame(predict(sp)))
+          avals <- avals[order(avals$x),]
+          bsp <- try(splines::backSpline(sp),silent=TRUE)
+          bsp.OK <- (class(bsp)[1]!="try-error")
+          if (bsp.OK) {
+            predback <- function(y) { predict(bsp,y)$y }
+          } else { ## backspline failed
+            warning("backspline failed: using uniroot(), confidence limits may be unreliable")
+            ## what do we do?
+            ## attempt to use uniroot
+            predback <- function(y) {
+              pfun0 <- function(z1) {
+                t1 <- try(uniroot(function(z) {
+                  predict(sp,z)$y-z1
+                }, range(obj[[i]]$par.vals[,nm[i]])),silent=TRUE)
+                if (class(t1)[1]=="try-error") NA else t1$root
+                }
+              sapply(y,pfun0)
+            }
+          }
+        }
+        ## </FIXME>
+        if (no.xlim) xlim <- predback(c(-mlev, mlev))
+        xvals <- obj[[i]]$par.vals[,nm[i]]
+        if (is.na(xlim[1]))
+          xlim[1] <- min(xvals)
+        if (is.na(xlim[2]))
+          xlim[2] <- max(xvals)
+        if (absVal) {
+            if (!add) {
+                if (no.ylim) ylim <- c(0,mlev)
+                plot(abs(obj[[i]]$z) ~ xvals, 
+                     xlab = xlabs[i],
+                     ylab = if (missing(ylab)) expression(abs(z)) else ylab,
+                     xlim = xlim, ylim = ylim,
+                     type = "n", main=main[i], ...)
+            }
+            avals$y <- abs(avals$y)
+            lines(avals, col = col.prof, lty=lty.prof)
+            if (show.points) points(yvals,abs(obj[[i]]$z))
+        } else { ## not absVal
+            if (!add) {
+                if (no.ylim) ylim <- c(-mlev,mlev)
+                plot(obj[[i]]$z ~ xvals,  xlab = xlabs[i],
+                     ylim = ylim, xlim = xlim,
+                     ylab = if (missing(ylab)) expression(z) else ylab,
+                     type = "n", main=main[i], ...)
+            }
+            lines(avals, col = col.prof, lty=lty.prof)
+            if (show.points) points(yvals,obj[[i]]$z)
+        }
+        x0 <- predback(0)
+        abline(v = x0, h=0, col = col.minval, lty = lty.minval)
+        for (j in 1:length(levels)) {
+            lev <- levels[j]
+            confstr.lev <- confstr[j]
+            ## Note: predict may return NA if we didn't profile
+            ## far enough in either direction. That's OK for the
+            ## "h" part of the plot, but the horizontal line made
+            ## with "l" disappears.
+            pred <- predback(c(-lev, lev))
+            ## horizontal
+            if (absVal) levs=rep(lev,2) else levs=c(-lev,lev)
+            lines(pred, levs, type = "h", col = col.conf, lty = 2)
+            ## vertical
+            pred <- ifelse(is.na(pred), xlim, pred)
+            if (absVal) {
+                lines(pred, rep(lev, 2), type = "l", col = col.conf, lty = lty.conf)
+            } else {
+                lines(c(x0,pred[2]), rep(lev, 2), type = "l", col = col.conf, lty = lty.conf)
+                lines(c(pred[1],x0), rep(-lev, 2), type = "l", col = col.conf, lty = lty.conf)
+            }
+            if (plot.confstr) {
+                text(labels=confstr.lev,x=x0,y=lev,col=col.conf)
+            }
+        } ## loop over levels
+    } ## loop over variables
+    ## par(opar)
+  })
diff --git a/R/slice.R b/R/slice.R
new file mode 100755
index 0000000..6157260
--- /dev/null
+++ b/R/slice.R
@@ -0,0 +1,388 @@
+## TO DO: roll back into bbmle?
+## allow multiple 'transects'?
+## (i.e. if two sets of parameters are given ...)
+## * robustification
+##  print method
+## allow manual override of ranges
+## allow log-scaling?
+## preserve order of parameters in 1D plots
+
+## substitute values of parameters into full parameter vector
+mkpar <- function(params,p,i) {
+  params[i] <- p
+  params
+}
+
+## get reasonable range for slice
+## document what is done here
+## implement upper bound
+## more robust approaches;
+## try not to assume anything about signs of parameters
+## inherit bounds from fitted value
+get_trange <- function(pars,  ## baseline parameter values
+                       i,     ## focal parameter
+                       fun,   ## objective function
+                       lower=-Inf, ## lower bound 
+                       upper=Inf,  ## upper bound
+                       cutoff=10,  ## increase above min z-value
+                       maxit=200,  ## max number of iterations
+                       steptype=c("mult","addprop"),
+                       step=0.1) {
+    ## step possibilities: multiplicative
+    ## additive (absolute scale)     [not yet implemented]
+    addabs <- NULL ## fix false positive test
+    steptype <- match.arg(steptype)
+    v <- v0 <- fun(pars)
+    lowval <- pars[i]
+    it <- 1
+    if (steptype=="addprop") step <- step*pars[i]
+    while (it<maxit && lowval>lower && v<(v0+cutoff)) {
+        lowval <- switch(steptype,
+                         addabs,
+                         addpropn=lowval-step,
+                         mult=lowval*(1-step))
+        v <- fun(mkpar(pars,lowval,i))
+        it <- it+1
+    }
+    lowdev <- v
+    lowit <- it
+    upval <- pars[i]
+    it <- 1
+    v <- v0 <- fun(pars)
+    if (upval==0) upval <- 1e-4
+    while (it<maxit && v<(v0+cutoff)) {
+        upval <- switch(steptype,
+                        addabs,
+                        addpropn=lowval+step,
+                         mult=lowval*(1+step))
+        v <- fun(mkpar(pars,upval,i))
+        ## cat(it,upper,v,"\n")
+        it <- it+1
+    }
+    updev <- v
+    upit <- it
+    c(low_val=lowval,up_valr=upval,low_dev=lowdev,up_dev=updev,
+      low_it=lowit,up_it=upit)
+  }
+
+get_all_trange <- function(params,fun,lower,upper,cutoff=10,...) {
+  arglist <- c(list(pars=params,fun=fun,cutoff=cutoff),list(...))
+  tranges <- t(mapply(FUN=get_trange,
+                      seq(length(params)),
+                      lower,
+                      upper,
+                      MoreArgs=arglist,
+                      SIMPLIFY=TRUE))
+  rownames(tranges) <- names(params)
+  tranges
+}
+
+## generic function (S3)
+slice <- function (x, dim=1, ...)  {
+    UseMethod("slice")
+}
+
+slice.mle2 <- function(x,  ...) {
+    ff <- x at minuslogl
+    ## vectorize objective function: return minus log L
+    ## (switching to deviance screws things up???)
+    ff2 <- function(p) {
+        do.call(ff,as.list(p))
+    }
+    slice0(coef(x),ff2, ...)
+}
+
+slice0 <- function(params,fun,dim=1,params2=NULL,...) {
+  if (dim==1) {
+    if (is.null(params2)) {
+      slice1D(params,fun,...)
+    } else {
+      slicetrans(params,params2,fun,...)
+    }
+  } else {
+    if (!is.null(params2)) stop("can't do transect in 2D")
+    slice2D(params,fun,...)
+  }
+}
+
+
+plot.slice <- function(x,...) {
+  switch(x$dim,xyplot(x,...),splom(x,...))
+}
+
+slicetrans <- function(params, params2, fun, extend=0.1, nt=401,
+                       lower=-Inf, upper=Inf) {
+  ## make sure 0/1 are included in parameter vector ...
+  np <- length(params)
+  extend <- rep(extend,length.out=2)
+  lower <- rep(lower,length.out=np)
+  upper <- rep(upper,length.out=np)
+  slicep <- sort(unique(c(0,1,seq(-extend[1],1+extend[2], length=nt))))
+  slicepars <- t(sapply(slicep, function(x) (1 - x) * params + x * params2))
+  OK <- apply(slicepars,1,function(x) all(x>=lower & x<=upper))
+  if (any(!OK)) {
+    warning("some parameter sets outside of bounds were removed")
+    slicep <- slicep[OK]
+    slicepars <- slicepars[OK,]
+  }
+  v <- apply(slicepars, 1, fun)
+  slices <- list(data.frame(var1="trans",x=slicep,z=v))
+  r <- list(slices=slices,params=params,params2=params2,dim=1)
+  class(r) <- "slice"
+  r
+}
+
+  
+slice1D <- function(params,fun,nt=101,
+                    lower=-Inf,
+                    upper=Inf,
+                    verbose=TRUE,
+                    tranges=NULL,
+                    ...) {
+  npv <- length(params)
+  if (is.null(pn <- names(params))) pn <- seq(npv)
+  if (is.null(tranges)) {
+      tranges <- get_all_trange(params,fun,
+                                rep(lower,length.out=npv),
+                                rep(upper,length.out=npv),
+                                ...)
+  }
+  slices <- vector("list",npv)
+  for (i in 1:npv) {
+      tvec <- seq(tranges[i,1],tranges[i,2],length=nt)
+      if (verbose) cat(pn[i],"\n")
+      vtmp <- sapply(tvec,
+                     function(t) {
+                         fun(mkpar(params,t,i))})
+      slices[[i]] <- data.frame(var1=pn[i],x=tvec,z=vtmp)
+  }
+  r <- list(slices=slices,ranges=tranges,params=params,dim=1)
+  class(r) <- "slice"
+  r
+}
+
+## OLD slice method
+## should probably roll this in as an option to profile
+## include attribute, warning? draw differently (leave off
+## conf. limit lines)
+## slice <- function(fitted, ...) UseMethod("slice")
+
+## 1D slicing implemented as in profile
+sliceOld <- function (fitted, which = 1:p, maxsteps = 100,
+                       alpha = 0.01, zmax = sqrt(qchisq(1 - alpha/2, p)),
+                       del = zmax/5, trace = FALSE,
+                       tol.newmin=0.001, ...)
+{
+    onestep <- function(step)
+    {
+        bi <- B0[i] + sgn * step * del * std.err[i]
+        fix <- list(bi)
+        names(fix) <- p.i
+        call$fixed <- c(fix,eval(call$fixed))
+        call$eval.only = TRUE
+        pfit <- try(eval(call), silent=TRUE) ##
+        if(inherits(pfit, "try-error")) return(NA)
+        else {
+            zz <- 2*(pfit at min - fitted at min)
+            ri <- pv0
+            ri[, names(pfit at coef)] <- pfit at coef
+            ri[, p.i] <- bi
+            if (zz > -tol.newmin)
+                zz <- max(zz, 0)
+            else stop("profiling has found a better solution, so original fit had not converged")
+            z <- sgn * sqrt(zz)
+            pvi <<- rbind(pvi, ri)
+            zi <<- c(zi, z) ## NB global set!
+        }
+        if (trace) cat(bi, z, "\n")
+        z
+      }
+    ## Profile the likelihood around its maximum
+    ## Based on profile.glm in MASS
+    summ <- summary(fitted)
+    std.err <- summ at coef[, "Std. Error"]
+    Pnames <- names(B0 <- fitted at coef)
+    pv0 <- t(as.matrix(B0))
+    p <- length(Pnames)
+    prof <- vector("list", length = length(which))
+    names(prof) <- Pnames[which]
+    call <- fitted at call
+    call$minuslogl <- fitted at minuslogl
+    for (i in which) {
+        zi <- 0
+        pvi <- pv0
+        p.i <- Pnames[i]
+        for (sgn in c(-1, 1)) {
+          if (trace)
+            cat("\nParameter:", p.i, c("down", "up")[(sgn + 1)/2 + 1], "\n")
+          step <- 0
+          z <- 0
+          ## This logic was a bit frail in some cases with
+          ## high parameter curvature. We should probably at least
+          ## do something about cases where the mle2 call fails
+          ## because the parameter gets stepped outside the domain.
+          ## (We now have.)
+          call$start <- as.list(B0)
+          lastz <- 0
+          while ((step <- step + 1) < maxsteps && abs(z) < zmax) {
+            z <- onestep(step)
+            if(is.na(z)) break
+            lastz <- z
+          }
+          if(abs(lastz) < zmax) {
+            ## now let's try a bit harder if we came up short
+            for(dstep in c(0.2, 0.4, 0.6, 0.8, 0.9)) {
+              z <- onestep(step - 1 + dstep)
+              if(is.na(z) || abs(z) > zmax) break
+            }
+          } else if(length(zi) < 5) { # try smaller steps
+            mxstep <- step - 1
+            step <- 0.5
+            while ((step <- step + 1) < mxstep) onestep(step)
+          }
+        }
+        si <- order(pvi[, i])
+        prof[[p.i]] <- data.frame(z = zi[si])
+        prof[[p.i]]$par.vals <- pvi[si,, drop=FALSE]
+    }
+    list(profile = prof, summary = summ)
+}
+
+
+
+## * is it possible to set up the 2D vectors so they include
+##   the baseline value? maybe not easily ...
+slice2D <- function(params,
+                    fun,
+                    nt=31,
+                    lower=-Inf,
+                    upper=Inf,
+                    cutoff=10,
+                    verbose=TRUE,
+                    tranges=NULL,
+                    ...) {
+  npv <- length(params)
+  if (is.null(pn <- names(params))) pn <- seq(npv)
+  if (is.null(tranges)) {
+      tranges <- get_all_trange(params,fun,
+                                rep(lower,length.out=npv),
+                                rep(upper,length.out=npv),
+                                cutoff=cutoff,
+                                ...)
+  }
+  slices <- list()
+  for (i in 1:(npv-1)) {
+    slices[[i]] <- vector("list",npv)
+    for (j in (i+1):npv) {
+      if (verbose) cat("param",i,j,"\n")
+      t1vec <- seq(tranges[i,1],tranges[i,2],length=nt)
+      t2vec <- seq(tranges[j,1],tranges[j,2],length=nt)
+      mtmp <- matrix(nrow=nt,ncol=nt)
+      for (t1 in seq_along(t1vec)) {
+        for (t2 in seq_along(t2vec)) {
+          mtmp[t1,t2] <- fun(mkpar(params,c(t1vec[t1],t2vec[t2]),
+                                   c(i,j)))
+        }
+      }
+      slices[[i]][[j]] <- data.frame(var1=pn[i],var2=pn[j],
+                                     expand.grid(x=t1vec,y=t2vec),
+                                     z=c(mtmp))
+    }
+  }
+  r <- list(slices=slices,ranges=tranges,params=params,dim=2)
+  class(r) <- "slice"
+  r
+}
+
+## flatten slice:
+##  do.call(rbind,lapply(slices,do.call,what=rbind))
+
+slices_apply <- function(s,FUN,...) {
+  for (i in seq_along(s)) {
+    for (j in seq_along(s[[i]])) {
+      if (!is.null(s[[i]][[j]])) {
+        s[[i]][[j]] <- FUN(s[[i]][[j]],...)
+      }
+    }
+  }
+  s
+}
+
+xyplot.slice <- function(x,data,type="l",scale.min=TRUE,...) {
+  allslice <- do.call(rbind,x$slices)
+  ## correct ordering
+  allslice$var1 <- factor(allslice$var1,
+                          levels=unique(as.character(allslice$var1)))
+  if (scale.min) allslice$z <- allslice$z-min(allslice$z)
+  pfun <- function(x1,y1,...) {
+      panel.xyplot(x1,y1,...)
+      if (is.null(x$params2)) {
+          ## regular 1D slice
+          panel.abline(v=x$params[panel.number()],col="gray")
+      } else {
+          ## 'transect' slice
+          panel.abline(v=c(0,1),col="gray")
+          panel.abline(h=y1[x1 %in% c(0,1)],col="gray")
+      }
+  }
+  xyplot(z~x|var1,data=allslice,type=type,
+         scales=list(x=list(relation="free")),
+         panel=pfun,...)
+}
+
+splom.slice <- function(x,
+                        data,
+                        scale.min=TRUE,
+                        at=NULL,
+                        which.x=NULL,
+                        which.y=NULL,
+                        dstep=4,
+                        contour=FALSE,...) {
+  if (x$dim==1) stop("can't do splom on 1D slice object")
+  smat <- t(x$ranges[,1:2])
+  if (scale.min) {
+    ## FIXME: something more elegant to flatten slice list?
+    all.z <- unlist(sapply(x$slices,
+                    function(x) {
+                      sapply(x,
+                             function(x)
+                             if (is.null(x)) NULL else x[["z"]])
+                    }))
+    min.z <- min(all.z[is.finite(all.z)])
+    ## round up to next multiple of 'dstep'
+    max.z <- dstep * ((max(all.z[is.finite(all.z)])-
+                       min.z) %/% dstep + 1)
+    if (missing(at)) {
+      at <- seq(0,max.z,by=dstep)
+    }
+    scale.z <- function(X) {
+        X$z <- X$z-min.z
+        X
+    }
+    x$slices <- slices_apply(x$slices,scale.z)
+  }
+  up0 <- function(x1, y, groups, subscripts, i, j, ...) {
+      ## browser()
+    sl <- x$slices[[j]][[i]]
+    with(sl,panel.levelplot(x=x,y=y,z=z,contour=contour,
+                            at=if (!is.null(at)) at else pretty(z),
+                            subscripts=seq(nrow(sl))))
+    panel.points(x$params[j],x$params[i],pch=16)
+    mm <- matrix(sl$z,nrow=length(unique(sl$x)))
+    ## FIXME: more robust ...
+    wmin <- which(mm==min(mm),arr.ind=TRUE)
+    xmin <- unique(sl$x)[wmin[1]]
+    ymin <- unique(sl$y)[wmin[2]]
+    panel.points(xmin,ymin,pch=1)
+  }
+  lp0 <- function(...) {
+  }
+  ## FIXME: use ?draw.colorkey to add a legend ...
+  ## FIXME: make diagonal panel text smaller ???
+  splom(smat,lower.panel=lp0,diag.panel=diag.panel.splom,
+        upper.panel=up0,...)
+}
+
+  
+## generic profiling code???
+##  either need (1) optimizers with 'masks' or (2)
diff --git a/R/update.R b/R/update.R
new file mode 100755
index 0000000..55d36e6
--- /dev/null
+++ b/R/update.R
@@ -0,0 +1,43 @@
+## setGeneric("update", function(object, formula.,  ..., evaluate=TRUE)
+##           standardGeneric("update"))
+
+
+## FIXME: compare these two
+## setMethod("update", "mle2",
+## function (object, ..., evaluate = TRUE)
+## {
+##   call <- object at call
+##   extras <- match.call(expand.dots = FALSE)$...
+##   if (length(extras) > 0) {
+##     existing <- !is.na(match(names(extras), names(call)))
+##     for (a in names(extras)[existing]) call[[a]] <- extras[[a]]
+##     if (any(!existing)) {
+##       call <- c(as.list(call), extras[!existing])
+##       call <- as.call(call)
+##     }
+##   }
+##   if (evaluate) eval(call, parent.frame()) else call
+## })
+
+ 
+
+## update.default, modified with $ turned to @ as appropriate
+setMethod("update", "mle2",
+          function (object, formula., evaluate = TRUE, ...) 
+          {
+            call <- object at call
+            extras <- match.call(expand.dots = FALSE)$...
+            if (!missing(formula.)) 
+              call$minuslogl <- update.formula(formula(object), formula.)
+            if (length(extras)) {
+              existing <- !is.na(match(names(extras), names(call)))
+              for (a in names(extras)[existing]) call[[a]] <- extras[[a]]
+              if (any(!existing)) {
+                call <- c(as.list(call), extras[!existing])
+                call <- as.call(call)
+              }
+            }
+            if (evaluate) 
+              eval(call, parent.frame())
+            else call
+          })
diff --git a/TODO b/TODO
new file mode 100755
index 0000000..cfd637a
--- /dev/null
+++ b/TODO
@@ -0,0 +1,136 @@
+BUGS/ISSUES:
+
+* change the way 'better fit found' is handled in profiles.
+  with try_harder, push through anyway ... otherwise stop
+  WITH AN ERROR and give the user a utility function for how to proceed?
+  *Don't* want to return an object with a different structure --
+  maybe attributes?
+
+* is there a way to (optionally?) save the environment of the
+   call so that fits could be profiled after being removed from
+   their original environments? (maybe fixed?)
+
+* consider "data-absent" flag for big data sets?
+
+* remove "quad" method, replace with confint.default [NO]
+
+* move calcslice from emdbook and make it more robust/general
+  (different signatures: mle2 fits, numeric vectors)
+
+
+* prettier multi-parameter profiles a la Bates/lme4
+* ggplot2 profiles?
+
+* fix confint plot to use linear interpolation when non-monotonic (done)
+* pass parameters through from confint to profile (done?)
+* nobs() methods
+* filter use.ginv properly
+* fix gradient with profile
+
+* work on vignette: ask MM/PD/BDR about mismatch between confint (approx) and profile plot (backspline) ... ?
+
+* DISABLE boundary warning when profiling ...
+* try to do something about rescaling when hessian is problematic?
+* add ginv for problematic hessians? (done)
+* implement 'subset' argument
+* check problems with optimizer="optimize" in tests/parscale.R
+* allow ICtab etc. to recognize optim(), optimize() fits
+  (ASSUMING that the function was a NLL)
+* add optimizer() as alternative optimizer [DONE]
+* fix par mfrow resetting glitch on plotting profile
+* prettier profile plots (with lattice)?  xyplot for profiles?
+* make sure numeric deriv modifications are working
+
+* incorporate optimx (done)?  minpack.lm?
+
+* proper initialization of intercept-less parameter()
+    entries
+
+* plot methods/fortify, a la ggplot2?
+
+* add deviance() method [need S3-to-S4 conversion]
+
+* make sure subset arg is really working!
+
+* spurious error on mismatched parameter names
+
+* spurious warnings in 1-parameter conf int: [FIXED]
+
+   library(bbmle)
+   m1 <- mle2(10~dbinom(prob=p,size=15),start=list(p=0.67))
+   c1 <- confint(m1)
+
+* do one-parameter profiles with optimize??
+
+* use numDeriv library hessian() function instead of nlme::fdHess?
+(or use nlme::fdHess to avoid loading whole package?)
+  [DONE]
+
+* turn off Hessian calculation for profile fits??
+  [maybe DONE by virtue of previous fix]
+
+* should print warning immediately if convergence fails
+
+* some weird stuff with returned fit from found-better-fit profile --
+treating profiled value as fixed ...
+
+* replace approx() in confint() with backspline?
+  general solution for non-monotonic profiles?
+
+BUG:   order of parameters matters for L-BFGS-B
+  (fixed)
+
+  adjusting parameter vectors for lower, upper, parscale, ...
+    when some params are fixed ...
+
+  sort out names BS -- when can we safely remove names?
+
+
+TO DO:
+
+model-averaging?
+
+more documentation -- especially S4 methods!
+   especially:
+     profile
+     plot profile
+     confint
+
+catch/interpret more error messages?
+  (try to filter last.warning?)
+
+add DIC to IC tabs?
+lmer?
+
+WISHLIST:
+start as FUNCTION (i.e., self-start)
+analytic derivatives
+relist
+subset
+plot.predict
+drop1, add1, etc.
+link functions ("identity","log", "logit", etc.)
+delta method standard error calcs
+tranformations on LHS of formula (i.e. use
+  link rather than inverse-link function?  only
+  possible if link is known and invertible:
+
+   inverse 
+   log
+   logit  (qlogis)
+   probit (qnorm)
+
+  etc.
+
+clean up/argue about data handling: closures etc. etc. etc...
+
+document argument handling:
+
+  start must be a named vector or a named list
+       [OR?? inherit from parnames(minuslogl)?]
+  if start is not a list (i.e. a numeric vector)
+  set vecpar TRUE
+  convert start to a list
+
+  if missing and default.start is TRUE use formals(minuslogl) 
+
diff --git a/build/vignette.rds b/build/vignette.rds
new file mode 100644
index 0000000..c9587b4
Binary files /dev/null and b/build/vignette.rds differ
diff --git a/debian/README.test b/debian/README.test
deleted file mode 100644
index 55a9142..0000000
--- a/debian/README.test
+++ /dev/null
@@ -1,8 +0,0 @@
-Notes on how this package can be tested.
-────────────────────────────────────────
-
-To run the unit tests provided by the package you can do
-
-   sh  run-unit-test
-
-in this directory.
diff --git a/debian/changelog b/debian/changelog
deleted file mode 100644
index 2357bee..0000000
--- a/debian/changelog
+++ /dev/null
@@ -1,22 +0,0 @@
-r-cran-bbmle (1.0.18-3) UNRELEASED; urgency=medium
-
-  * Remove temporary test dir
-
- -- Andreas Tille <tille at debian.org>  Sun, 15 Jan 2017 22:18:27 +0100
-
-r-cran-bbmle (1.0.18-2) unstable; urgency=medium
-
-  * Add missing autopkgtest dependency: r-cran-testthat
-  * Convert to dh-r
-  * Canonical homepage for CRAN
-  * d/watch: version=4
-  * debhelper 10
-  * d/watch: version=4
-
- -- Andreas Tille <tille at debian.org>  Sun, 15 Jan 2017 22:04:47 +0100
-
-r-cran-bbmle (1.0.18-1) unstable; urgency=low
-
-  * Initial release (closes: #829567)
-
- -- Andreas Tille <tille at debian.org>  Mon, 04 Jul 2016 12:29:16 +0200
diff --git a/debian/compat b/debian/compat
deleted file mode 100644
index f599e28..0000000
--- a/debian/compat
+++ /dev/null
@@ -1 +0,0 @@
-10
diff --git a/debian/control b/debian/control
deleted file mode 100644
index 7c0f4b3..0000000
--- a/debian/control
+++ /dev/null
@@ -1,26 +0,0 @@
-Source: r-cran-bbmle
-Maintainer: Debian Med Packaging Team <debian-med-packaging at lists.alioth.debian.org>
-Uploaders: Andreas Tille <tille at debian.org>
-Section: gnu-r
-Priority: optional
-Build-Depends: debhelper (>= 10),
-               dh-r,
-               r-base-dev,
-               r-cran-numderiv,
-               r-cran-lattice,
-               r-cran-mass
-Standards-Version: 3.9.8
-Vcs-Browser: https://anonscm.debian.org/viewvc/debian-med/trunk/packages/R/r-cran-bbmle/trunk/
-Vcs-Svn: svn://anonscm.debian.org/debian-med/trunk/packages/R/r-cran-bbmle/trunk/
-Homepage: https://cran.r-project.org/package=bbmle
-
-Package: r-cran-bbmle
-Architecture: all
-Depends: ${misc:Depends},
-         ${R:Depends},
-Recommends: ${R:Recommends}
-Suggests: ${R:Suggests}
-Description: GNU R tools for general maximum likelihood estimation
- This GNU R package provides methods and functions for fitting maximum
- likelihood models in R. This package modifies and extends the 'mle'
- classes in the 'stats4' package.
diff --git a/debian/copyright b/debian/copyright
deleted file mode 100644
index b4a8e38..0000000
--- a/debian/copyright
+++ /dev/null
@@ -1,27 +0,0 @@
-Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
-Upstream-Name: bbmle
-Upstream-Contact: Ben Bolker <bolker at mcmaster.ca>
-Source: https://cran.r-project.org/package=bbmle
-
-Files: *
-Copyright: 2010-2016 Ben Bolker <bolker at mcmaster.ca>
-                     R Development Core Team
-License: GPL-2+
-
-Files: debian/*
-Copyright: 2016-2017 Andreas Tille <tille at debian.org>
-License: GPL-2+
-
-License: GPL-2+
-    This program is free software; you can redistribute it and/or modify
-    it under the terms of the GNU General Public License as published by
-    the Free Software Foundation; either version 2 of the License, or
-    (at your option) any later version.
- .
-    This program is distributed in the hope that it will be useful,
-    but WITHOUT ANY WARRANTY; without even the implied warranty of
-    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-    GNU General Public License for more details.
- .
- On Debian systems, the complete text of the GNU General Public
- License can be found in `/usr/share/common-licenses/GPL-2'.
diff --git a/debian/docs b/debian/docs
deleted file mode 100644
index 3adf0d6..0000000
--- a/debian/docs
+++ /dev/null
@@ -1,3 +0,0 @@
-debian/README.test
-debian/tests/run-unit-test
-tests
diff --git a/debian/rules b/debian/rules
deleted file mode 100755
index 64f61aa..0000000
--- a/debian/rules
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/usr/bin/make -f
-
-%:
-	dh $@ --buildsystem R
-
-override_dh_install:
-	dh_install
-	find debian -name "*.R" -exec chmod -x \{\} \;
-	find debian -iname "*.rda" -exec chmod -x \{\} \;
-	find debian -iname "*.Rnw" -exec chmod -x \{\} \;
-	find debian -iname "*.Rd" -exec chmod -x \{\} \;
-	find debian -name "Makefile" -exec chmod -x \{\} \;
diff --git a/debian/source/format b/debian/source/format
deleted file mode 100644
index 163aaf8..0000000
--- a/debian/source/format
+++ /dev/null
@@ -1 +0,0 @@
-3.0 (quilt)
diff --git a/debian/tests/control b/debian/tests/control
deleted file mode 100644
index bb53548..0000000
--- a/debian/tests/control
+++ /dev/null
@@ -1,3 +0,0 @@
-Tests: run-unit-test
-Depends: @, r-cran-runit, r-cran-testthat
-Restrictions: allow-stderr
diff --git a/debian/tests/run-unit-test b/debian/tests/run-unit-test
deleted file mode 100644
index 511d4cd..0000000
--- a/debian/tests/run-unit-test
+++ /dev/null
@@ -1,61 +0,0 @@
-#!/bin/sh -e
-
-oname=bbmle
-pkg=r-cran-`echo $oname | tr '[A-Z]' '[a-z]'`
-
-# The saved result files do contain some differences in metadata and we also
-# need to ignore version differences of R
-filter() {
-    grep -v -e '^R version' \
-        -e '^Copyright (C)' \
-        -e '^R : Copyright 20' \
-        -e '^R Under development (unstable)' \
-        -e '^Platform:' \
-        -e '^ISBN 3-900051-07-0' \
-        -e '^$' \
-        -e 'Natural language support but running in an English locale' \
-        -e '^Loading required package: stats4' \
-       $1 | \
-    sed -e '/^> *proc\.time()$/,$d' \
-        -e "/^Signif. codes:/s/[‘’]/'/g"
-}
-
-if [ "$ADTTMP" = "" ] ; then
-  ADTTMP=`mktemp -d /tmp/${pkg}-test.XXXXXX`
-  trap "rm -rf $ADTTMP" 0 INT QUIT ABRT PIPE TERM
-fi
-cd $ADTTMP
-if [ 0 -eq 1 ] ; then # try to use RUnit - but this does not do anything :-(
-  mkdir inst
-  cd inst
-  cp -a /usr/lib/R/site-library/${oname}/unitTests .
-  cd ..
-fi
-cp -a /usr/share/doc/${pkg}/tests $ADTTMP
-find . -name "*.gz" -exec gunzip \{\} \;
-cd tests
-if [ 0 -eq 1 ] ; then # try to use RUnit - but this does not do anything :-(
-  # This does not test anything - no idea why
-  RCMDCHECK=FALSE R --vanilla --slave < doRUnit.R
-fi
-
-for htest in `ls *.R | sed 's/\.R$//'` ; do
-   if [ "${htest}" = "doRUnit" -o "${htest}" = "RUnit-tests" ] ; then
-      # This is no separate test but should trigger all test (but fails to do so for whatever reason
-      continue
-   fi
-   if [ "${htest}" = "optimx" ] ; then
-      # Needs https://cran.r-project.org/web/packages/optimx/ which is not packaged yet
-      continue
-   fi
-   LC_ALL=C R --no-save < ${htest}.R 2>&1 | tee > ${htest}.Rout
-   filter ${htest}.Rout.save > ${htest}.Rout.save_
-   filter ${htest}.Rout > ${htest}.Rout_
-   diff -u --ignore-space-change ${htest}.Rout.save_ ${htest}.Rout_
-   if [ ! $? ] ; then
-     echo "Test ${htest} failed"
-     exit 1
-   else
-     echo "Test ${htest} passed"
-   fi
-done
diff --git a/debian/watch b/debian/watch
deleted file mode 100644
index d6dd599..0000000
--- a/debian/watch
+++ /dev/null
@@ -1,2 +0,0 @@
-version=4
-http://cran.r-project.org/src/contrib/bbmle_([-\d.]*)\.tar\.gz
diff --git a/inst/NEWS.Rd b/inst/NEWS.Rd
new file mode 100755
index 0000000..f3826a7
--- /dev/null
+++ b/inst/NEWS.Rd
@@ -0,0 +1,494 @@
+\newcommand{\PR}{\Sexpr[results=rd]{tools:::Rd_expr_PR(#1)}}
+
+\name{NEWS}
+\title{bbmle News}
+\encoding{UTF-8}
+
+\section{Changes in version 1.0.18 (2016-02-11)}{
+  \itemize{
+    \item update slice functionality; allow for explicit ranges
+    \item CRAN updates (import from base packages)
+  }
+}
+\section{Changes in version 1.0.17 (2014-01-01)}{
+  \itemize{
+    \item new warning if ~dnorm is called with sd implicitly ==1
+    \item some internal tweaking to slice functionality
+    \item updated to allow for \code{MuMIn} v 1.10.0 changes
+  }
+}
+
+\section{Changes in version 1.0.16 (2014-01-01)}{
+  \itemize{
+    \item fix \code{mnames} behaviour
+    \item slight vignette cleanup
+  }
+}
+\section{Changes in version 1.0.15 (2013-11-20)}{
+  \itemize{
+    \item add \code{logLik} option to IC tables, more use of \code{nobs} methods
+    \item minor improvements to \code{slice} functionality
+  }
+}
+
+\section{Changes in version 1.0.14 (2013-08-24)}{
+  \itemize{
+    \item more CRAN tweaks
+  }
+}
+\section{Changes in version 1.0.13 (2013-08-22)}{
+  \itemize{
+    \item .Rbuildignore tweaks for CRAN
+  }
+}
+
+\section{Changes in version 1.0.12 (2013-08-22)}{
+  \itemize{
+    \item vignette tweaks
+    \item add Depends: R>=3.0.0
+  }
+}
+\section{Changes in version 1.0.11 (2013-08-19)}{
+  \itemize{
+    \item add .Rbuildignore for CRAN tests
+  }
+}
+\section{Changes in version 1.0.10 (2013-08-18)}{
+  \itemize{
+    \item adapt to updated optimx
+    \item tweaks for CRAN compliance
+  }
+}
+\section{Changes in version 1.0.9 (2013-06-23)}{
+  \itemize{
+    \item switch from aod to aods3 in vignettes
+    \item switch vignette to knitr
+   }
+}
+
+\section{Changes in version 1.0.8 (2013-04-23)}{
+  \itemize{
+     \item tweaks to \code{print.ICtab()}
+   }
+}
+
+\section{Changes in version 1.0.7 }{
+  \itemize{
+     \item warn on convergence failure
+   }
+}
+
+\section{Changes in version 1.0.6 }{
+  \itemize{
+     \item fixed (fairly obscure) bug in assigning data environment to
+  gradient function; replace 'gr' in call with appropriately evaluated version
+   }
+}
+
+\section{Changes in version 1.0.5.3 (2012-09-05)}{
+  \itemize{
+     \item changed some cat() to message()
+   }
+}
+
+\section{Changes in version 1.0.5.2 (2012-07-29)}{
+  \itemize{
+=======
+    \item add .Rbuildignore for CRAN tests
+  }
+}
+\section{Changes in version 1.0.10 (2013-08-18)}{
+  \itemize{
+    \item adapt to updated optimx
+    \item tweaks for CRAN compliance
+  }
+}
+\section{Changes in version 1.0.9 (2013-06-23)}{
+  \itemize{
+    \item switch from aod to aods3 in vignettes
+    \item switch vignette to knitr
+   }
+}
+
+\section{Changes in version 1.0.8 (2013-04-23)}{
+  \itemize{
+     \item tweaks to \code{print.ICtab()}
+   }
+}
+
+\section{Changes in version 1.0.7 }{
+  \itemize{
+     \item warn on convergence failure
+   }
+}
+
+\section{Changes in version 1.0.6 }{
+  \itemize{
+     \item fixed (fairly obscure) bug in assigning data environment to
+  gradient function; replace 'gr' in call with appropriately evaluated version
+   }
+}
+
+\section{Changes in version 1.0.5.3 (2012-09-05)}{
+  \itemize{
+     \item changed some cat() to message()
+   }
+}
+
+\section{Changes in version 1.0.5.2 (2012-07-29)}{
+  \itemize{
+>>>>>>> .r116
+     \item remove BIC definitions (now unnecessary/should be adequately
+     defined in core R)
+     \item add explicit Depends: on stats4
+     \item note that development optimx (2012.05.24+) now fails on
+     profiling when reduced model is 1-dimensional (i.e. for a
+     2-parameter model)
+   }
+}
+
+\section{Changes in version 1.0.5.1 (2012-07-17)}{
+  \itemize{
+     \item remove spurious .RData file; version bump
+   }
+}
+
+\section{Changes in version 1.0.5 (2012-05-15)}{
+  \itemize{
+    \item wrapped eigen() call to prevent failure of eigenvalue ratio
+    summary when hessian is bad
+    \item fix bug: forgot to export summary method for mle2
+    \item add \code{exclude.fixed} argument to \code{coef} method
+    \item fix bug: single lower/upper parameter, or
+    prof.lower/prof.upper parameter, not interpreted properly in
+    profile/confint
+    \item add and document \code{slice} methods: change from old
+    (profile-like) behavior, old version is available as \code{sliceOld}
+    \item DESCRIPTION/NAMESPACE fixes, move most Depends: to Imports:
+    instead (except methods package)
+  }
+}
+
+\section{Changes in version 1.0.4.2 (2012-02-25)}{
+  \itemize{
+    \item fix bug in gradient/vecpar/profiling interaction (Daniel
+    Kaschek)
+    \item improve (and document) uniroot method for confint: now
+    respects box constraints
+    \item fix issue where bobyqa (optimx) strips parameter names from result
+  }
+}
+\section{Changes in version 1.0.4.1 (2012-01-27)}{
+  \itemize{
+    \item remove a bit of installed junk; vignette fix
+  }
+}
+\section{Changes in version 1.0.4 (2012-01-02)}{
+  \itemize{
+    \item fix imports for latest version of R-devel
+  }
+}
+\section{Changes in version 1.0.3}{
+  \itemize{
+    \item modified starting value code and documentation slightly:
+    now allows per-parameter lists.  Updated docs.
+    \item Fixed bug that would screw things up if 'i' were used
+    as a parameter.
+    \item Update vignette to conform to MuMIn changes
+  }
+}
+
+\section{Changes in version 1.0.2 (2011-09-07)}{
+  \itemize{
+    \item fixed buglet in detection of no-intercept models when setting
+    starting values
+  }
+}
+\section{Changes in version 1.0.1 (2011-08-04)}{
+  \itemize{
+    \item Turned off some warnings when skip.hessian=TRUE (e.g. in profile)
+    \item Calculate max grad and Hessian eigenval ratio, add to "details"
+    \item (Should add accessor methods)
+    \item Fixed NAMESPACE to export methods etc etc properly; removed
+    universal exportPattern directive
+  }
+}
+
+\section{Changes in version 1.0.0 (2011-06-17)}{
+  \itemize{
+    \item mainly just a version bump for CRAN
+    \item added tag for variables in profile/confint warnings
+    \item lots more technical information in vignette about profiling algorithm
+}}
+
+\section{Changes in version 0.9.9 (2011-05-14)}{
+  \itemize{
+    \item changed NEWS to NEWS.Rd
+    \item fixed bug for "optimize" -- profile etc. now respect bounds
+    \item eliminated warning about bounds names if all identical
+    \item add "try_harder" flag to profiling (ignore flat spots, NAs ...)
+}}
+  
+\section{Changes in version 0.9.8}{
+  \itemize{
+    \item gradient functions work better with fixed parameters, hence with profiling
+    \item profile plot reverts to linear for non-monotonic profile
+    \item added warning in confint for non-monotonic profile, or for non-monotonic spline fit to monotonic profile; revert from spline+linear to linear approximation in this case
+    \item various documentation improvements
+    \item optimx improvements
+    \item require data= argument when using formula interface
+    \item turn off hessian computation in profile
+    \item allow use of MASS::ginv
+}}
+
+\section{Changes in version 0.9.7}{
+  \itemize{
+    \item bug fix in calc_mle2_function for no-intercept models
+    (thanks to Colin Kremer)
+    \item fixed optimx, added 'user' option
+}}
+
+\section{Changes in version 0.9.6}{
+  \itemize{
+    \item changed hessian calculation to use numDeriv code (causes tiny changes to hessian results that could matter in edge cases). Too lazy to provide a backward compatibility mode ...
+    \item documented optimizer= choices in ?mle2
+}}
+
+\section{Changes in version 0.9.5.1}{
+  \itemize{
+    \item fixed bug in AICc (David Harris)
+}}
+
+\section{Changes in version 0.9.5}{
+  \itemize{
+    \item added NAMESPACE, various fixes to go with that
+    \item beginnings of an RUnit testing framework
+    \item tweaked vignette
+    \item added prof.lower, prof.upper to profile()
+    \item added "optimize" to list of allowed optimizers, some bug fixes
+}}
+
+\section{Changes in version 0.9.4.1}{
+  \itemize{
+    \item tweaked par() resetting in profile plots
+}}
+
+\section{Changes in version 0.9.4}{
+  \itemize{
+    \item more qAICc fixing
+}}
+
+\section{Changes in version 0.9.3 (2009-09-18)}{
+  \itemize{
+    \item tweaked handling of bounds: profile now succeeds
+    on some 1D problems where it didn't before
+    \item added deviance, residuals methods
+    \item added newparams argument to predict, simulate; newdata argument to simulate
+    \item added vignette (stub)
+    \item added explicit params argument, to help sort out full parameter specifications when parameters is non-NULL
+}}
+
+\section{Changes in version 0.9.2 (2009-08-10)}{
+  \itemize{
+    \item fixed predict() for case with parameters
+    \item added snorm
+    \item changed ICtab defaults to weight=TRUE, base=FALSE, sort=TRUE
+}}
+
+\section{Changes in version 0.9.1}{
+  \itemize{
+\item added simulate method (formula interface only)
+\item fix AICctab bug
+\item remove spurious cat/print in profile
+\item fix qAIC bug
+}}
+
+\section{Changes in version 0.9.0 (2008-08-26)}{
+  \itemize{
+  \item fix Tom Hobbs bug: named lower/upper/parscale/ndeps
+  get rearranged properly, otherwise rearrange in order
+  of "start" and issue a warning
+  \item documentation tweak for S4 as.data.frame
+  \item added sbeta to list of known distributions
+  \item removed nlme requirement & auto-loading
+ }}
+
+ \section{Changes in version 0.8.9 (2008-08-04)}{
+   \itemize{
+     \item version bump, submit to CRAN
+     \item added predict method
+ }}
+
+ \section{Changes in version 0.8.8 (2008-07-10)}{
+   \itemize{
+\item added flexibility for profile plotting (main, x labels etc.);
+    added examples
+\item added an instance of "namedrop" to fix naming problem
+\item added tol.newmin to slice etc.
+\item added check for numeric return from profile within confint
+\item fixed bugs in profile plotting when profile is restricted
+   to a subset of variables
+\item added tests for par() to reset to original on exit
+\item improved profile documentation
+\item replicate std.err if specified in profile
+\item add as.data.frame 
+\item tweak tol.newmin (better fit found during profile) code
+}}
+
+\section{Changes in version 0.8.7 (2008-05-12)}{
+  \itemize{
+    \item version bump, moved to R-forge.
+    \item reordered NEWS file (most recent first)
+}}
+\section{Changes in version 0.8.6.1 (2008-03-22)}{
+  \itemize{
+    \item tweaked stop-on-better-fit code
+    \item fixed (?) qAIC(c) methods
+}}
+
+\section{Changes in version 0.8.6 (2008-03-26)}{
+  \itemize{
+    \item tweak/fix to ICtab documentation (thanks to Tom Hobbs)
+    \item added qAIC(c) methods (not working yet!)
+}}
+
+\section{Changes in version 0.8.5.1}{
+\itemize{
+  \item oops. Fixed infelicity (bug?) in new  environment manipulation
+  }}
+
+\section{Changes in version 0.8.5}{
+  \itemize{
+    \item tweaked environment/data assignment to preserve
+    original minuslogl environment better
+}}
+
+\section{Changes in version 0.8.4}{
+  \itemize{
+    \item changed  plot.profile.mle2 options (added onepage etc.,
+    made plot.confstr=TRUE by default)
+}}
+
+\section{Changes in version 0.8.3}{
+  \itemize{
+    \item added warning about too-short lower/upper 
+    \item added documentation
+    }}
+
+\section{Changes in version 0.8.2}{
+  \itemize{
+\item   fixed bug in AICctab
+\item cosmetic change to printing -- save call.orig
+\item moved ChangeLog to NEWS
+}}
+
+\section{Changes in version 0.8.1}{
+  \itemize{fixed (?) environment bug
+\item tried to use built-in relist, but failed: renamed relist
+	  to "relist2" (try again later)
+\item documented get.mnames (auxiliary function for ICtabs)
+\item started to add gr (gradient) capability -- NOT TESTED
+}}
+\section{Changes in version 0.8}{
+  \itemize{
+    \item changed ICtab to allow either ICtab(x,y,z) or ICtab(list(x,y,z))
+    (L <- list(...); if is.list(L[[1]]) && length(L)==1)
+}}
+
+\section{Changes in version 0.7.7}{
+  \itemize{
+    \item fix bug in profiling: all optim() methods EXCEPT L-BFGS-B.
+    return the value of the objective function if given a function
+    with no arguments/zero-length starting parameter vector
+    (this is the situation with "profiling" a 1-D function).
+    L-BFGS-B gives funky answers.  added a check for this case.
+    (may need to check behavior for alternate optimizers (nlm etc))
+    [this behavior triggered a "found better fit" error when profiling
+     1D functions with L-BFGS-B]
+
+     \item changed behavior when finding better fit during profiling
+     to return new parameters
+ }}
+
+
+\section{Changes in version 0.7.6}{
+  \itemize{
+    \item tweak vignette
+    \item fixed second major AICc bug (was fixed in mle2 method,
+    but not in logLik method)
+}}
+     
+\section{Changes in version 0.7.5}{
+  \itemize{
+    \item change "ll" to "LL" in examples for clarity
+    \item tweaked anova reporting of models (wrap instead of truncating)
+    \item added (undocumented) show.points option to profile plot
+    to display actual locations of profile evaluation
+\item tweaked profile to behave better when profiling variables
+	with constraints (upper, lower)
+\item moved vignette to inst/doc where it belongs
+\item ICtab hack to protect against package:aod definition of AIC(logLik)
+\item added submit stub
+\item tweaked slice.mle2-class docs for consistency
+\item fiddled with vignette
+\item preliminary code to allow non-monotonic profiles
+\item preliminary add nlm to list of optimizers (untested)
+\item add aod, Hmisc, emdbook to VignetteDepends and Suggests:
+}}
+
+\section{Changes in version 0.7}{
+  \itemize{
+    \item better df extraction in ICtab
+\item minor bug fix for AICc (allows AICc of nls objects)
+\item handle models with -1 in formula better:
+	starting values set "all equal"
+\item made ANOVA formula line-length accessible
+\item added skip.hessian and trace arguments to mle2
+\item messed around with BIC definition -- attempt at consistency with nlme
+\item added rudimentary support for nlminb, constrOptim
+\item nlme now required for fdHess (which is required for
+	nlminb since it doesn't compute a finite-diff
+	Hessian)
+    }}
+    
+    \section{Changes in version 0.6}{
+      \itemize{
+	\item add experimental formula interface
+\item change all names from mle to mle2 to avoid confusion/conflicts
+\item with stats4 version of mle
+\item change internal structure of data evaluation
+\item worked on vignette
+\item added optimizer slot (stub)
+}}
+
+\section{Changes in version 0.5}{
+  \itemize{
+  \item fix AICc bug!  (was deviance+2*k*(k+1)/(n-k-1), not
+  AIC+2*k*(k+1)/(n-k-1))
+  }}
+
+  \section{Changes in version 0.4}{
+      \itemize{
+    \item change AIC to AICc for corrections
+    \item add AICtab for weights, delta, sort ... options
+\item expose error messages occuring within profile()
+\item uniroot tries harder to find a valid endpoint
+\item truncate terms in anova.mle at 80 characters
+}}
+
+\section{Changes in version 0.3}{
+  \itemize{
+    \item enhanced anova method, works with print.anova
+    \item tweaked namedrop() code -- ??
+}}
+
+\section{Changes in version 0.2}{
+  \itemize{
+    \item added parnames, parnames<-
+    \item minor fix to allow "profiles" of 1-parameter models
+    (skip fdHess call)
+    \item minor change to print method for mle results
+    \item tweaking "vecpar" (to allow parameter vectors in objective function)
+    \item removed fdHess/nlme dependency
+  }
+}
diff --git a/inst/doc/mle2.R b/inst/doc/mle2.R
new file mode 100644
index 0000000..25192e3
--- /dev/null
+++ b/inst/doc/mle2.R
@@ -0,0 +1,196 @@
+## ----knitropts,echo=FALSE,message=FALSE----------------------------------
+if (require("knitr")) opts_chunk$set(fig.width=5,fig.height=5,tidy=FALSE,warning=FALSE,error=TRUE)
+
+## ----setup,results="hide",echo=FALSE,message=FALSE-----------------------
+library(Hmisc)
+
+## ----emdbook,message=FALSE-----------------------------------------------
+library(emdbook)
+
+## ----bbsim---------------------------------------------------------------
+set.seed(1001)
+x1 <- rbetabinom(n=1000,prob=0.1,size=50,theta=10)
+
+## ----bbmle,message=FALSE-------------------------------------------------
+library("bbmle")
+
+## ----likfun1-------------------------------------------------------------
+mtmp <- function(prob,size,theta) {
+  -sum(dbetabinom(x1,prob,size,theta,log=TRUE))
+}
+
+## ----fit1,warning=FALSE--------------------------------------------------
+(m0 <- mle2(mtmp,start=list(prob=0.2,theta=9),data=list(size=50)))
+
+## ----sum1----------------------------------------------------------------
+summary(m0)
+
+## ----confint1,warning=FALSE----------------------------------------------
+confint(p0)
+confint(m0,method="quad")
+confint(m0,method="uniroot")
+
+## ----profplot1,fig.height=5,fig.width=10,out.width="\\textwidth"---------
+par(mfrow=c(1,2))
+plot(p0,plot.confstr=TRUE)
+
+## ----fit2,warning=FALSE--------------------------------------------------
+m0f <- mle2(x1~dbetabinom(prob,size=50,theta),
+            start=list(prob=0.2,theta=9),data=data.frame(x1))
+
+## ----fit2f---------------------------------------------------------------
+m0cf <- mle2(x1~dbetabinom(prob=plogis(lprob),size=50,theta=exp(ltheta)),
+            start=list(lprob=0,ltheta=2),data=data.frame(x1))
+confint(m0cf,method="uniroot")
+confint(m0cf,method="spline")
+
+## ----orobdata------------------------------------------------------------
+load(system.file("vignetteData","orob1.rda",package="bbmle"))
+summary(orob1)
+
+## ----aodlikfun-----------------------------------------------------------
+ML1 <- function(prob1,prob2,prob3,theta,x) {
+  prob <- c(prob1,prob2,prob3)[as.numeric(x$dilution)]
+  size <- x$n
+  -sum(dbetabinom(x$m,prob,size,theta,log=TRUE))
+}
+
+## ----crowdertab,echo=FALSE,results="asis"--------------------------------
+crowder.results <- matrix(c(0.132,0.871,0.839,78.424,0.027,0.028,0.032,-34.991,
+                            rep(NA,7),-34.829,
+                            rep(NA,7),-56.258),
+                          dimnames=list(c("prop diffs","full model","homog model"),
+                            c("prob1","prob2","prob3","theta","sd.prob1","sd.prob2","sd.prob3","NLL")),
+                          byrow=TRUE,nrow=3)
+latex(crowder.results,file="",table.env=FALSE,title="model")
+
+## ----eval=FALSE----------------------------------------------------------
+#  ## would prefer ~dilution-1, but problems with starting values ...
+#  (m1B <- mle2(m~dbetabinom(prob,size=n,theta),
+#               param=list(prob~dilution),
+#               start=list(prob=0.5,theta=1),
+#      data=orob1))
+
+## ----suppWarn,echo=FALSE-------------------------------------------------
+opts_chunk$set(warning=FALSE)
+
+## ----aodstderr-----------------------------------------------------------
+round(stdEr(m2),3)
+
+## ----aodvar--------------------------------------------------------------
+sqrt(1/(1+coef(m2)["theta"]))
+
+## ----deltavar------------------------------------------------------------
+sqrt(deltavar(sqrt(1/(1+theta)),meanval=coef(m2)["theta"],
+         vars="theta",Sigma=vcov(m2)[4,4]))
+
+## ----sigma3--------------------------------------------------------------
+m2b <- mle2(m~dbetabinom(prob,size=n,theta=1/sigma^2-1),
+            data=orob1,
+            parameters=list(prob~dilution,sigma~1),
+            start=list(prob=0.5,sigma=0.1))
+## ignore warnings (we haven't bothered to bound sigma<1)
+round(stdEr(m2b)["sigma"],3)
+p2b <- profile(m2b,prof.lower=c(-Inf,-Inf,-Inf,0))
+
+## ----compquad------------------------------------------------------------
+r1 <- rbind(confint(p2)["theta",],
+            confint(m2,method="quad")["theta",])
+rownames(r1) <- c("spline","quad")
+r1
+
+## ----profplottheta-------------------------------------------------------
+plot(p2,which="theta",plot.confstr=TRUE)
+
+## ----profplotsigma-------------------------------------------------------
+plot(p2b,which="sigma",plot.confstr=TRUE,
+     show.points=TRUE)
+
+## ----homogmodel----------------------------------------------------------
+ml0 <- function(prob,theta,x) {
+  size <- x$n
+  -sum(dbetabinom(x$m,prob,size,theta,log=TRUE))
+}
+m0 <- mle2(ml0,start=list(prob=0.5,theta=100),
+          data=list(x=orob1))
+
+## ----logLikcomp----------------------------------------------------------
+logLik(m0)
+
+## ----formulafit----------------------------------------------------------
+m0f <- mle2(m~dbetabinom(prob,size=n,theta),
+            parameters=list(prob~1,theta~1),
+            data=orob1,
+            start=list(prob=0.5,theta=100))
+m2f <- update(m0f,
+              parameters=list(prob~dilution,theta~1),
+              start=list(prob=0.5,theta=78.424))
+m3f <- update(m0f,
+              parameters=list(prob~dilution,theta~dilution),
+              start=list(prob=0.5,theta=78.424))
+
+## ----anovafit------------------------------------------------------------
+anova(m0f,m2f,m3f)
+
+## ----ICtabfit------------------------------------------------------------
+AICtab(m0f,m2f,m3f,weights=TRUE,delta=TRUE,sort=TRUE)
+BICtab(m0f,m2f,m3f,delta=TRUE,nobs=nrow(orob1),sort=TRUE,weights=TRUE)
+AICctab(m0f,m2f,m3f,delta=TRUE,nobs=nrow(orob1),sort=TRUE,weights=TRUE)
+
+## ----reWarn,echo=FALSE---------------------------------------------------
+opts_chunk$set(warning=FALSE)
+
+## ----frogsetup-----------------------------------------------------------
+frogdat <- data.frame(
+  size=rep(c(9,12,21,25,37),each=3),
+  killed=c(0,2,1,3,4,5,rep(0,4),1,rep(0,4)))
+frogdat$initial <- rep(10,nrow(frogdat))
+
+## ----getgg---------------------------------------------------------------
+library(ggplot2)
+
+## ----gg1-----------------------------------------------------------------
+gg1 <- ggplot(frogdat,aes(x=size,y=killed))+geom_point()+
+      stat_sum(aes(size=factor(..n..)))+
+      labs(size="#")+scale_x_continuous(limits=c(0,40))
+
+## ----gg1plot-------------------------------------------------------------
+gg1 + geom_line(data=pdat1,colour="red")+
+      geom_line(data=pdat2,colour="blue")
+
+## ----basegraphprofplot---------------------------------------------------
+plot(prof4)
+
+## ----latticeprof,fig.height=5,fig.width=10,out.width="\\textwidth"-------
+prof4_df <- as.data.frame(prof4)
+library(lattice)
+xyplot(abs(z)~focal|param,data=prof4_df,
+      subset=abs(z)<3,
+       type="b",
+       xlab="",
+       ylab=expression(paste(abs(z),
+           " (square root of ",Delta," deviance)")),
+       scale=list(x=list(relation="free")),
+             layout=c(3,1))
+
+## ----ggplotprof,fig.height=5,fig.width=10--------------------------------
+ss <-subset(prof4_df,abs(z)<3)
+ggplot(ss,
+       aes(x=focal,y=abs(z)))+geom_line()+
+      geom_point()+
+      facet_grid(.~param,scale="free_x")
+
+## ----oldargs,eval=FALSE--------------------------------------------------
+#  function (x, levels, conf = c(99, 95, 90, 80, 50)/100, nseg = 50,
+#            absVal = TRUE, ...) {}
+
+## ----newargs,eval=FALSE--------------------------------------------------
+#  function (x, levels, which=1:p, conf = c(99, 95, 90, 80, 50)/100, nseg = 50,
+#            plot.confstr = FALSE, confstr = NULL, absVal = TRUE, add = FALSE,
+#            col.minval="green", lty.minval=2,
+#            col.conf="magenta", lty.conf=2,
+#            col.prof="blue", lty.prof=1,
+#            xlabs=nm, ylab="score",
+#            show.points=FALSE,
+#            main, xlim, ylim, ...) {}
+
diff --git a/inst/doc/mle2.Rnw b/inst/doc/mle2.Rnw
new file mode 100755
index 0000000..25e6994
--- /dev/null
+++ b/inst/doc/mle2.Rnw
@@ -0,0 +1,813 @@
+\documentclass{article}
+%\VignetteIndexEntry{Examples for enhanced mle code}
+%\VignettePackage{bbmle}
+%\VignetteDepends{Hmisc}
+%\VignetteDepends{emdbook}
+%\VignetteDepends{ggplot2}
+%\VignetteDepends{lattice}
+%\VignetteEngine{knitr::knitr}
+\usepackage[utf8]{inputenc} % for UTF-8/single quotes from sQuote()
+\usepackage[english]{babel} % for texi2dvi ~ bug
+\usepackage{graphicx}
+\usepackage{natbib}
+\usepackage{array}
+\usepackage{color}
+\usepackage[colorlinks=true,urlcolor=blue,bookmarks=true]{hyperref}
+\usepackage{url}
+\author{Ben Bolker}
+\title{Maximum likelihood estimation and analysis
+  with the \code{bbmle} package}
+\newcommand{\code}[1]{{\tt #1}}
+\newcommand{\bbnote}[1]{\color{red} {\em #1} \color{black}}
+\date{\today}
+\begin{document}
+\bibliographystyle{chicago}
+%\bibliographystyle{plain}
+\maketitle
+\tableofcontents
+
+<<knitropts,echo=FALSE,message=FALSE>>=
+if (require("knitr")) opts_chunk$set(fig.width=5,fig.height=5,tidy=FALSE,warning=FALSE,error=TRUE)
+@ 
+<<setup,results="hide",echo=FALSE,message=FALSE>>=
+library(Hmisc)
+@ 
+
+The \code{bbmle} package, designed to simplify
+maximum likelihood estimation and analysis in R,
+extends and modifies the \code{mle} function and class
+in the \code{stats4} package that comes with R by default.
+\code{mle} is in turn a wrapper around the \code{optim}
+function in base R.
+The maximum-likelihood-estimation function and class
+in \code{bbmle} are both called \code{mle2}, to avoid
+confusion and conflict with the original functions in
+the \code{stats4} package.  The major differences between
+\code{mle} and \code{mle2} are:
+\begin{itemize}
+\item \code{mle2} is more robust, with additional warnings (e.g.
+  if the Hessian can't be computed by finite differences,
+  \code{mle2} returns a fit with a missing Hessian rather
+  than stopping with an error)
+\item \code{mle2} uses a \code{data} argument to allow different
+  data to be passed to the negative log-likelihood function
+\item \code{mle2} has a formula interface like that
+ of (e.g.) \code{gls} in the \code{nlme} package.
+ For relatively simple models the formula for the
+ maximum likelihood can be written in-line, rather than
+ defining a negative log-likelihood function.  The formula
+ interface also simplifies fitting models with
+ categorical variables.  Models fitted using the formula interface
+ also have applicable \code{predict} and \code{simulate} methods.
+\item \code{bbmle} defines \code{anova}, \code{AIC}, \code{AICc}, 
+  and \code{BIC} methods for
+  \code{mle2} objects, as well as
+  \code{AICtab}, \code{BICtab}, \code{AICctab}
+  functions for producing summary tables of information criteria for a 
+  set of models.
+\end{itemize}
+
+Other packages with similar functionality (extending
+GLMs in various ways) are
+\begin{itemize}
+\item on CRAN: \code{aods3} (overdispersed models such as beta-binomial);
+  \code{vgam} (a wide range of models);
+  \code{betareg} (beta regression);
+  \code{pscl} (zero-inflated, hurdle models);
+  \code{maxLik} (another general-purpose maximizer, with
+  a different selection of optimizers)
+\item In Jim Lindsey's code repository
+  (\url{http://popgen.unimaas.nl/~jlindsey/rcode.html}):
+  \code{gnlr} and \code{gnlr3}
+\end{itemize}
+  
+\section{Example: \emph{Orobanche}/overdispersed binomial}
+
+This example will use the classic data set on
+\emph{Orobanche} germination from \cite{Crowder1978}
+(you can also use
+\code{glm(...,family="quasibinomial")} or
+the \code{aods3} package to analyze these data).
+
+\subsection{Test basic fit to simulated beta-binomial data}
+
+First, generate a single beta-binomially distributed
+set of points as a simple test.
+
+Load the \code{emdbook} package
+to get functions for the beta-binomial distribution (random-deviate 
+function \code{rbetabinom} --- these functions are also available
+in Jim Lindsey's \code{rmutil} package).
+<<emdbook,message=FALSE>>=
+library(emdbook)
+@  
+
+Generate random deviates from a random beta-binomial:
+<<bbsim>>=
+set.seed(1001)
+x1 <- rbetabinom(n=1000,prob=0.1,size=50,theta=10)
+@ 
+
+Load the package:
+<<bbmle,message=FALSE>>=
+library("bbmle")
+@ 
+
+Construct a simple negative log-likelihood function:
+<<likfun1>>=
+mtmp <- function(prob,size,theta) {
+  -sum(dbetabinom(x1,prob,size,theta,log=TRUE))
+}
+@ 
+
+Fit the model --- use \code{data} to pass the \code{size}
+parameter (since it wasn't hard-coded in the \code{mtmp}
+function):
+<<fit1,warning=FALSE>>=
+(m0 <- mle2(mtmp,start=list(prob=0.2,theta=9),data=list(size=50)))
+@
+(here and below, I'm suppressing lots of warnings about {\tt NaNs produced})
+
+The \code{summary} method for \code{mle2} objects
+shows the parameters; approximate standard
+errors (based on quadratic approximation to the curvature at
+the maximum likelihood estimate); and a test
+of the parameter difference from zero based on
+this standard error and on an assumption 
+that the likelihood surface is quadratic
+(or equivalently that the sampling distribution
+of the estimated parameters is normal).
+
+<<sum1>>=
+summary(m0)
+@ 
+
+Construct the likelihood profile (you can
+apply \code{confint} directly to \code{m0},
+but if you're going to work with the likelihood
+profile [e.g. plotting, or looking for confidence
+intervals at several different $\alpha$ values]
+then it is more efficient to compute the profile
+once):
+
+<<prof1,cache=TRUE,warning=FALSE>>=
+p0 <- profile(m0)
+@ 
+
+Compare the confidence interval estimates based on
+inverting a spline fit to the profile (the default);
+based on the quadratic approximation at the
+maximum likelihood estimate; and based on
+root-finding to find the exact point where the
+profile crosses the critical level.
+
+<<confint1,warning=FALSE>>=
+confint(p0)
+confint(m0,method="quad")
+confint(m0,method="uniroot")
+@ 
+
+All three types of confidence limits are similar.
+
+Plot the profiles:
+<<profplot1,fig.height=5,fig.width=10,out.width="\\textwidth">>=
+par(mfrow=c(1,2))
+plot(p0,plot.confstr=TRUE)
+@ 
+
+By default, the plot method for 
+likelihood profiles displays the square root of the
+the deviance difference
+(twice the difference in negative
+log-likelihood from the best fit), so it will
+be {\sf V}-shaped
+for cases where the quadratic approximation works well
+(as in this case).
+(For a better visual estimate of whether the profile
+is quadratic, use the \code{absVal=FALSE} option to the \code{plot}
+method.)
+
+You can also request confidence intervals
+calculated using \code{uniroot}, which may be more exact when
+the profile is not smooth enough to be modeled accurately
+by a spline.  However, this method is
+also more sensitive to numeric problems.
+
+Instead of defining an
+explicit function for \code{minuslogl}, 
+we can also use the formula interface.
+The formula interface assumes that
+the density function given (1) has \code{x} as
+its first argument (if the distribution is multivariate,
+then \code{x} should be a matrix of observations) and
+(2) has a \code{log} argument that will return
+the log-probability or log-probability density
+if \code{log=TRUE}.  Some of the extended functionality
+(prediction etc.) depends on the existence of 
+an \code{s}- variant function for the distribution
+that returns (at least) the mean and median as
+a function of the parameters
+(currently defined: \code{snorm}, \code{sbinom},
+\code{sbeta}, \code{snbinom}, \code{spois}).
+<<fit2,warning=FALSE>>=
+m0f <- mle2(x1~dbetabinom(prob,size=50,theta),
+            start=list(prob=0.2,theta=9),data=data.frame(x1))
+@ 
+Note that you must specify the data via the \code{data}
+argument when using the formula interface. This may be
+slightly more unwieldy than just pulling the data from your
+workspace when you are doing simple things, but in the long
+run it makes tasks like predicting new responses much simpler.
+
+It's convenient to use the formula interface
+to try out likelihood estimation on the
+transformed parameters:
+<<fit2f>>=
+m0cf <- mle2(x1~dbetabinom(prob=plogis(lprob),size=50,theta=exp(ltheta)),
+            start=list(lprob=0,ltheta=2),data=data.frame(x1))
+confint(m0cf,method="uniroot")
+confint(m0cf,method="spline")
+@ 
+
+In this case the answers from \code{uniroot}
+and \code{spline} (default) methods barely
+differ.
+
+\subsection{Real data (\emph{Orobanche}, \cite{Crowder1978})}
+Data are copied from the \code{aods3} package
+(but a copy is saved with the package to avoid depending on the
+ \code{aods3} package):
+<<orobdata>>=
+load(system.file("vignetteData","orob1.rda",package="bbmle"))
+summary(orob1)
+@ 
+
+Now construct a negative log-likelihood
+function that differentiates among groups:
+<<aodlikfun>>=
+ML1 <- function(prob1,prob2,prob3,theta,x) {
+  prob <- c(prob1,prob2,prob3)[as.numeric(x$dilution)]
+  size <- x$n
+  -sum(dbetabinom(x$m,prob,size,theta,log=TRUE))
+}
+@ 
+
+Results from \cite{Crowder1978}:
+<<crowdertab,echo=FALSE,results="asis">>=
+crowder.results <- matrix(c(0.132,0.871,0.839,78.424,0.027,0.028,0.032,-34.991,
+                            rep(NA,7),-34.829,
+                            rep(NA,7),-56.258),
+                          dimnames=list(c("prop diffs","full model","homog model"),
+                            c("prob1","prob2","prob3","theta","sd.prob1","sd.prob2","sd.prob3","NLL")),
+                          byrow=TRUE,nrow=3)
+latex(crowder.results,file="",table.env=FALSE,title="model")
+@
+
+<<aodfit1,cache=TRUE,warning=FALSE>>=
+(m1 <- mle2(ML1,start=list(prob1=0.5,prob2=0.5,prob3=0.5,theta=1),
+            data=list(x=orob1)))
+@ 
+
+Or:
+<<eval=FALSE>>=
+## would prefer ~dilution-1, but problems with starting values ...
+(m1B <- mle2(m~dbetabinom(prob,size=n,theta),
+             param=list(prob~dilution),
+             start=list(prob=0.5,theta=1),
+    data=orob1))
+@ 
+The result warns us that the optimization has not
+converged; we also don't match
+Crowder's results for $\theta$ exactly.
+We can fix both of these problems by setting \code{parscale} appropriately.
+
+Since we don't bound $\theta$ (or below, $\sigma$) we get  a fair number
+of warnings with this and the next few fitting and profiling attempts.
+We will ignore these for now, since the final results reached are reasonable
+(and match or nearly match Crowder's values); the appropriate, careful thing
+to do would be either to fit on a transformed scale where all real-valued
+parameter values were legal, or to use \code{method="L-BFGS-B"} (or \code{method="bobyqa"} 
+with the \code{optimx} package) to bound the parameters appropriately.
+You can also use \code{suppressWarnings()} if you're sure you don't
+need to know about any warnings (beware: this will suppress \emph{all}
+warnings, those you weren't expecting as well as those you were \ldots)
+
+<<suppWarn,echo=FALSE>>=
+opts_chunk$set(warning=FALSE)
+@ 
+<<aodfit2,cache=TRUE>>=
+(m2 <- mle2(ML1,start=as.list(coef(m1)),
+          control=list(parscale=coef(m1)),
+          data=list(x=orob1)))
+@ 
+
+Calculate likelihood profile (restrict the upper limit
+of $\theta$, simply because it will make the picture
+below a little bit nicer):
+<<aodprof2,cache=TRUE>>=
+p2 <- profile(m2,prof.upper=c(Inf,Inf,Inf,theta=2000))
+@ 
+
+Get the curvature-based parameter standard
+deviations (which Crowder used
+rather than computing likelihood profiles):
+<<aodstderr>>=
+round(stdEr(m2),3)
+@ 
+We are slightly off Crowder's numbers --- rounding
+error?
+
+Crowder also defines a variance (overdispersion) parameter
+$\sigma^2=1/(1+\theta)$.
+<<aodvar>>=
+sqrt(1/(1+coef(m2)["theta"]))
+@ 
+
+Using the delta method (via the \code{deltavar}
+function in the \code{emdbook} package)
+to approximate the standard deviation of
+$\sigma$:
+<<deltavar>>=
+sqrt(deltavar(sqrt(1/(1+theta)),meanval=coef(m2)["theta"],
+         vars="theta",Sigma=vcov(m2)[4,4]))
+@ 
+
+Another way to fit in terms of $\sigma$ rather than $\theta$
+is to compute $\theta=1/\sigma^2-1$ on the fly in a
+formula:
+
+<<sigma3>>=
+m2b <- mle2(m~dbetabinom(prob,size=n,theta=1/sigma^2-1),
+            data=orob1,
+            parameters=list(prob~dilution,sigma~1),
+            start=list(prob=0.5,sigma=0.1))
+## ignore warnings (we haven't bothered to bound sigma<1)
+round(stdEr(m2b)["sigma"],3)
+p2b <- profile(m2b,prof.lower=c(-Inf,-Inf,-Inf,0))
+@ 
+
+As might be expected since the standard deviation
+of $\sigma$ is large, the quadratic approximation is
+poor:
+
+<<compquad>>=
+r1 <- rbind(confint(p2)["theta",],
+            confint(m2,method="quad")["theta",])
+rownames(r1) <- c("spline","quad")
+r1
+@ 
+
+Plot the profile:
+<<profplottheta>>=
+plot(p2,which="theta",plot.confstr=TRUE)
+@ 
+
+What does the profile for $\sigma$ look like?
+<<profplotsigma>>=
+plot(p2b,which="sigma",plot.confstr=TRUE,
+     show.points=TRUE)
+@ 
+
+Now fit a homogeneous model:
+<<homogmodel>>=
+ml0 <- function(prob,theta,x) {
+  size <- x$n
+  -sum(dbetabinom(x$m,prob,size,theta,log=TRUE))
+}
+m0 <- mle2(ml0,start=list(prob=0.5,theta=100),
+          data=list(x=orob1))
+@ 
+
+The log-likelihood matches Crowder's result:
+<<logLikcomp>>=
+logLik(m0)
+@ 
+
+It's easier to 
+use the formula interface
+to specify all three of the models
+fitted by Crowder (homogeneous, probabilities differing
+by group, probabilities and overdispersion differing
+by group):
+
+<<formulafit>>=
+m0f <- mle2(m~dbetabinom(prob,size=n,theta),
+            parameters=list(prob~1,theta~1),
+            data=orob1,
+            start=list(prob=0.5,theta=100))
+m2f <- update(m0f,
+              parameters=list(prob~dilution,theta~1),
+              start=list(prob=0.5,theta=78.424))
+m3f <- update(m0f,
+              parameters=list(prob~dilution,theta~dilution),
+              start=list(prob=0.5,theta=78.424))
+@ 
+
+\code{anova} runs a likelihood ratio test on nested
+models:
+<<anovafit>>=
+anova(m0f,m2f,m3f)
+@ 
+
+The various \code{ICtab} commands produce tables of
+information criteria, optionally sorted and
+with model weights.
+<<ICtabfit>>=
+AICtab(m0f,m2f,m3f,weights=TRUE,delta=TRUE,sort=TRUE)
+BICtab(m0f,m2f,m3f,delta=TRUE,nobs=nrow(orob1),sort=TRUE,weights=TRUE)
+AICctab(m0f,m2f,m3f,delta=TRUE,nobs=nrow(orob1),sort=TRUE,weights=TRUE)
+@ 
+<<reWarn,echo=FALSE>>=
+opts_chunk$set(warning=FALSE)
+@ 
+
+\section{Example: reed frog size predation}
+
+Data from an experiment by Vonesh \citep{VoneshBolker2005}
+<<frogsetup>>=
+frogdat <- data.frame(
+  size=rep(c(9,12,21,25,37),each=3),
+  killed=c(0,2,1,3,4,5,rep(0,4),1,rep(0,4)))
+frogdat$initial <- rep(10,nrow(frogdat))
+@ 
+
+<<getgg>>=
+library(ggplot2)
+@ 
+
+<<gg1>>=
+gg1 <- ggplot(frogdat,aes(x=size,y=killed))+geom_point()+
+      stat_sum(aes(size=factor(..n..)))+
+      labs(size="#")+scale_x_continuous(limits=c(0,40))
+@ 
+
+<<frogfit1,cache=TRUE,warning=FALSE>>=
+m3 <- mle2(killed~dbinom(prob=c*(size/d)^g*exp(1-size/d),
+  size=initial),data=frogdat,start=list(c=0.5,d=5,g=1))
+pdat <- data.frame(size=1:40,initial=rep(10,40))
+pdat1 <- data.frame(pdat,killed=predict(m3,newdata=pdat))
+@
+
+<<frogfit2,cache=TRUE,warning=FALSE>>=
+m4 <- mle2(killed~dbinom(prob=c*((size/d)*exp(1-size/d))^g,
+  size=initial),data=frogdat,start=list(c=0.5,d=5,g=1))
+pdat2 <- data.frame(pdat,killed=predict(m4,newdata=pdat))
+@ 
+
+<<gg1plot>>=
+gg1 + geom_line(data=pdat1,colour="red")+
+      geom_line(data=pdat2,colour="blue")
+@ 
+
+<<frogfit2anal,cache=TRUE,warning=FALSE>>=
+coef(m4)
+prof4 <- profile(m4)
+@ 
+
+Three different ways to draw the profile:
+
+(1) Built-in method (base graphics):
+<<basegraphprofplot>>=
+plot(prof4)
+@ 
+
+(2) Using \code{xyplot} from the \code{lattice} package:
+\setkeys{Gin}{width=\textwidth}
+<<latticeprof,fig.height=5,fig.width=10,out.width="\\textwidth">>=
+prof4_df <- as.data.frame(prof4)
+library(lattice)
+xyplot(abs(z)~focal|param,data=prof4_df,
+      subset=abs(z)<3,
+       type="b",
+       xlab="",
+       ylab=expression(paste(abs(z),
+           " (square root of ",Delta," deviance)")),
+       scale=list(x=list(relation="free")),
+             layout=c(3,1))
+@ 
+
+(3) Using \code{ggplot} from the \code{ggplot2} package:
+<<ggplotprof,fig.height=5,fig.width=10>>=
+ss <-subset(prof4_df,abs(z)<3)
+ggplot(ss,
+       aes(x=focal,y=abs(z)))+geom_line()+
+      geom_point()+
+      facet_grid(.~param,scale="free_x")
+@ 
+
+\section*{Additions/enhancements/differences from \code{stats4::mle}}
+\begin{itemize}
+\item{\code{anova} method}
+\item{warnings on convergence failure}
+\item{more robust to non-positive-definite Hessian;
+  can also specify \code{skip.hessian} to skip Hessian
+  computation when it is problematic}
+\item{when profiling fails because better value is
+    found, report new values}
+\item{can take named vectors as well as lists as
+    starting parameter vectors}
+\item{added \code{AICc}, \code{BIC} definitions,
+    \code{ICtab} functions}
+\item{added \code{"uniroot"} and \code{"quad"}
+    options to \code{confint}}
+\item{more options for colors and line types etc etc.
+The old arguments are:
+<<oldargs,eval=FALSE>>=
+function (x, levels, conf = c(99, 95, 90, 80, 50)/100, nseg = 50,
+          absVal = TRUE, ...) {}
+@ 
+The new one is:
+<<newargs,eval=FALSE>>=
+function (x, levels, which=1:p, conf = c(99, 95, 90, 80, 50)/100, nseg = 50,
+          plot.confstr = FALSE, confstr = NULL, absVal = TRUE, add = FALSE,
+          col.minval="green", lty.minval=2,
+          col.conf="magenta", lty.conf=2,
+          col.prof="blue", lty.prof=1,
+          xlabs=nm, ylab="score",
+          show.points=FALSE,
+          main, xlim, ylim, ...) {}
+@ 
+\code{which} selects (by character vector or numbers)
+which parameters to plot: \code{nseg} does nothing
+(even in the old version); \code{plot.confstr} turns on
+the labels for the confidence levels; \code{confstr} gives
+the labels; \code{add} specifies whether to add the
+profile to an existing plot; \code{col} and \code{lty}
+options specify the colors and line types for
+horizontal and vertical lines marking the minimum
+and confidence vals and the profile curve; \code{xlabs}
+gives a vector of x labels; \code{ylab} gives the y label;
+\code{show.points} specifies whether to show the raw points
+computed.
+}
+\item{\code{mle.options()}}
+\item{\code{data} argument}
+\item{handling of names in argument lists}
+\item{can use alternative optimizers (\code{nlminb}, \code{nlm}, \code{constrOptim}, \code{optimx},
+    \code{optimize})}
+\item{uses code from \code{numDeriv} package to compute Hessians rather than
+    built-in optimizer code}
+\item{by default, uses \code{MASS::ginv} (generalized inverse) rather than \code{solve} to invert
+    Hessian (more robust to positive-semidefinite Hessians \ldots)}
+\item{can use \code{vecpar=TRUE} (and \code{parnames()}) to use objective functions with parameters
+    specified as vectors (for compatibility with \code{optim} etc.)}
+\end{itemize}
+
+\section{Newer stuff}
+
+\textbf{To do:}
+\begin{itemize}
+\item{use \code{predict}, \code{simulate} etc.
+    to demonstrate different parametric bootstrap approaches
+    to confidence and prediction intervals
+    \begin{itemize}
+    \item use \code{predict} to get means and standard
+      deviations, use delta method?
+    \item use \code{vcov}, assuming quadratic profiles,
+      with \code{predict(\ldots,newparams=\ldots)}
+    \item prediction intervals assuming no parameter uncertainty
+      with \code{simulate}
+    \item both together \ldots
+    \end{itemize}
+  }
+\end{itemize}
+
+
+\section{Technical details}
+
+\subsection{Profiling and confidence intervals}
+
+This section describes the algorithm for constructing profiles
+and confidence intervals, which is not otherwise documented anywhere
+except in the code.  * indicates changes from the version
+in \code{stats4:::mle}
+
+\subsubsection{Estimating standard error}
+
+In order to construct the profile for a particular parameter, one
+needs an initial estimate of the scale over which to vary that
+parameter.  The estimated standard error of the parameter based
+on the estimated curvature of the likelihood surface at the MLE
+is a good guess.
+\begin{itemize}
+\item if \code{std.err} is missing, extract the 
+  standard error from the summary coefficient table (ultimately computed from 
+  \code{sqrt(diag(inverse Hessian))} of the fit)
+\item * a user-set value of \code{std.err} overrides this behavior 
+  unless the value is specified as \code{NA} (in which
+  case the estimate from the previous step is used)
+\item * if the standard error value is still \code{NA} (i.e. the
+  user did not specify it and the value estimated from the Hessian
+  is missing or \code{NA}) use \code{sqrt(1/diag(hessian))}. This
+  represents a (fairly feeble) attempt to come up with a plausible number
+  when the Hessian is not positive definite but still has positive diagonal
+  entries
+\item if all else fails, stop and * print an error message that encourages
+  the user to specify the values with \code{std.err}
+\end{itemize}
+  
+There may be further tricks that would help guess the appropriate scale:
+for example, one could guess on the basis of a comparison between the
+parameter values and negative log-likelihoods at the starting and ending points
+of the fits.  On the other hand, (a) this would take some effort and
+still be subject to failure for sufficiently pathological fits and (b) there
+is some value to forcing the user to take explicit, manual steps to remedy
+such problems, as they may be signs of poorly defined or buggy log-likelihood
+functions.
+
+\subsubsection{Profiling}
+  
+Profiling is done on the basis of a constructed function that minimizes
+the negative log-likelihood for a fixed value of the focal parameter and
+returns the signed square-root of the deviance difference from the 
+minimum (denoted by $z$).  At the MLE $z=0$ by definition; it should never
+be $<0$ unless something has gone wrong with the original fit. The LRT significance
+cutoffs for $z$ are equal to the usual two-tailed normal distribution cutoffs
+(e.g. $\pm \approx 1.96$ for 95\% confidence regions).
+  
+In each direction (decreasing and increasing from the MLE for the focal parameter):
+\begin{itemize}
+\item fix the focal parameter
+\item adjust control parameters etc. accordingly (e.g. remove the
+  entry for the focal parameter so that the remaining control
+  parameters match the non-fixed parameters)
+\item{controls on the profiling (which can be set manually, but for which there
+    is not much guidance in the documentation):
+    \begin{itemize}
+    \item \code{zmax} Maximum $z$ to aim for. (Default: \code{sqrt(qchisq(1-alpha/2, p))})
+      The default maximum $\alpha$ (type~I error) is 0.01.
+      \bbnote{I don't understand this
+        criterion. It seems to expand the size of the univariate profile
+        to match a cutoff for the multivariate confidence region of
+        the model.  The $\chi^2$ cutoff for deviance to get the $(1-\alpha)$ 
+        multivariate confidence region (i.e.,
+        on all $p$ of the parameters) would be \code{qchisq(1-alpha,p)} --- %
+        representing a one-tailed test on the deviance.  Taking the square root
+        makes sense, since we are working with the square root of the deviance,
+        but I don't understand (1) why we are expanding the region to allow
+        for the multivariate confidence region (since we are computing univariate
+        profiles) [you could at least argue that this is conservative, making
+        the region a little bigger than it needs to be]; (2) why we are
+        using $1-\alpha/2$ rather than $1-\alpha$.
+      }
+      For comparison, \code{MASS::profile.glm} (written by Bates and Venables in
+      1996, ported to R by BDR in 1998) uses \code{zmax}=\code{sqrt(qchisq(1-alpha,1))}
+      \bbnote{(this makes more sense to me \ldots)}.
+      On the other hand, the profiling code in \code{lme4a} (the \code{profile}
+      method for \code{merMod}, in \code{profile.R}) uses
+      \code{qchisq(1-alphamax, nptot)} \ldots
+    \item \code{del} Step size (scaled by standard error) (Default: \code{zmax}/5.)
+      Presumably (?) copied from \code{MASS::profile.glm}, 
+      which says (in \code{?profile.glm}):
+      ``[d]efault value chosen to allow profiling at about 10 parameter
+      values.''
+    \item \code{maxsteps} Maximum number of profiling steps to try in each direction. (Default: 100)
+    \end{itemize}
+  }
+\item While \verb+step<maxsteps+ and \verb+abs(z) < zmax+, set the value of the focal
+  parameter to its MLE + \code{sgn*step*del*std.err} where \code{sgn} represents
+  the direction, \code{step} is the current (integer) step, and \code{del} and 
+  \code{std.err} are the step size scaling factor and standard error estimate
+  discussed above (i.e. take steps of size (\code{del*std.err}) in the appropriate direction);
+  evaluate $z$
+\item{Stop the profiling: 
+    \begin{itemize}
+    \item if $z$ doesn't change from the previous step (\verb+stop_flat+) %
+      --- unless \verb+try_harder+ is \code{TRUE}
+    \item * stop if $z$ is less than \code{tol.newmin} (default: 0.001) units 
+      \emph{better} than the MLE fit, i.e. $z<-\mbox{\code{tol.newmin}}$
+      (if $-\mbox{\code{tol.newmin}}<z<0$, set $z$ to zero) (\verb+newpars_found+)
+    \item if $z$ is \code{NA} (\verb+stop_na+)  --- unless \verb+try_harder+ is \code{TRUE}
+    \item if $z$ is beyond \code{zmax} (i.e., we have reached our goal: \verb+stop_cutoff+)
+    \item if \code{step==maxsteps}
+    \item if the focal parameter has hit its upper/lower bound (\verb+stop_bound+)
+    \end{itemize}
+  }
+\item if we have hit the maximum number of steps but not reached the cutoff
+  (\verb+stop_maxstep+ but not \verb+stop_cutoff+), ``try a bit harder'':
+  go \emph{almost} one more \code{del*std.err} unit out (in intervals
+  of 0.2, 0.4, 0.6, 0.8, 0.9) (\bbnote{also seems reasonable but don't
+    know where it comes from})
+\item * if we violated the boundary but did not reach the cutoff
+  (\verb+!stop_cutoff && stop_bound+), evaluate $z$ at the boundary
+\item if we got to the cutoff in $<5$ steps, try smaller steps:
+  start at \code{step=0.5} and proceed to
+  \code{mxstep-0.5} in unit increments
+  (rather than the original scale which went from 0 to \code{mxstep}).
+  (\bbnote{
+    Again, it seems reasonable, but I don't know what the original justification
+    was \ldots})
+\end{itemize}
+
+\subsubsection{Confidence intervals}
+
+We are looking for the values where $z$ (signed square root deviance
+difference) is equal to the usual two-tailed normal distribution cutoffs
+for a specified $\alpha$ level, e.g. $z=\pm 1.96$ for 95\% confidence
+intervals (this is equivalent to a one-tailed test on the deviance
+difference with the cutoff value for $\chi^2_1$).
+
+\begin{description}
+\item[Spline method]{(default)
+    \begin{itemize}
+    \item If necessary (i.e. if applied to a fitted object and not 
+      to an existing profile), construct the profile
+    \item * If the profile of the signed square root is non-monotonic,
+      warn the user and revert to linear approximation on the profiled points
+      to find the cutoffs:
+    \item Otherwise, build an interpolation spline of $z$ (signed square root deviance
+      difference) based on profiled points (the default is $n=3 \times L$
+      where $L$ is the length of the original vector). Then
+      use linear approximation on the $y$ ($z$) and $x$ (focal
+      parameter value) of the spline
+      to find the cutoffs (\bbnote{Why construct a spline and then interpolate linearly?  Why
+        not use \code{backSpline} as in the profile plotting code?})
+      \end{itemize}
+    }
+  \item[Quad method]{Use a quadratic approximation based
+      on the estimated curvature (this is almost identical to
+      using \code{confint.default}, and perhaps obsolete/could
+      be replaced by a pointer to \code{confint.default} \ldots)
+    }
+  \item[Uniroot]{
+      For each direction (up and down):
+      \begin{itemize}
+      \item start by stepping 5 $\sigma$ away from the
+        MLE, or to the box constraint on the parameter,
+        whichever is closer (\bbnote{this standard error is based on the
+          curvature; I should allow it, or the intervals themselves,
+          to be overridden via a \code{std.err} or \code{interval}
+          parameter})
+      \item compute the difference between the deviance and
+        the desired deviance cutoff at this point;
+        if it is \code{NA}, reduce the distance in steps
+        of 0.25 $\sigma$ until it is not, until you reduce
+        the distance to zero
+      \item if the product of the deviance differences at the MLE
+        and at the point you stopped at is \code{NA} or positive
+        (indicating that you didn't find a root-crossing in the
+        range $[0,5\sigma]$), quit.
+      \item otherwise, apply \code{uniroot} across this interval
+      \end{itemize}
+      
+      \code{method="uniroot"} should give the most accurate results, 
+      especially when the profile is wonky (it won't care about non-smooth
+      profiles), but it will be the slowest --- and different confidence
+      levels will have to be computed individually, whereas multiple
+      confidence levels can be computed quickly from a single computed
+      profile.  A cruder approach would be to use profiling but decrease
+      \code{std.err} a lot so that the profile points were very closely
+      spaced.
+    }
+  \end{description}
+
+\subsubsection{Profile plotting}
+
+Plot the signed (or unsigned) square root deviance difference, and ($1-\alpha$) confidence regions/critical
+values designated by \code{conf} (default: $\{0.99,0.95,0.9,0.8,0.5\}$).
+
+\begin{itemize}
+\item * If the (signed) profile is non-monotonic, simply plot  
+  computed points with \code{type="l"} (i.e., with the default linear interpolation)
+\item Construct the interpolation spline (using \code{splines:::interpSpline}
+  rather than \code{spline} as in the confidence interval method (\bbnote{why this
+    difference?})
+\item attempt to construct the inverse of the interpolation spline (using \code{backSpline})
+\item * if this fails warn the user (assume this was due to non-monotonicity)
+  and try to use \code{uniroot} and \code{predict} to find cutoff values
+\item otherwise, use the inverse spline to find cutoff values
+\end{itemize}
+\bbnote{Why is there machinery in the plotting code to find confidence intervals?
+  Shouldn't this call \code{confint}, for consistency/fewer points of failure?}
+
+\section*{Bugs, wishes, to do}
+\begin{itemize}
+\item \textbf{WISH}: further methods and arguments: \code{subset},
+  \code{predict}, \code{resid}: \code{sim}?
+\item \textbf{WISH}: extend ICtab to allow DIC as well?
+\item minor \textbf{WISH}: 
+  better methods for extracting \code{nobs} information
+  when possible (e.g. with formula interface)
+\item \textbf{WISH}: better documentation, especially for S4 methods
+\item \textbf{WISH}: variable-length (and shaped) chunks in argument list -- cleaner division
+  between linear model specs/list of arguments/vector equivalent
+\item \textbf{WISH}: limited automatic differentiation
+  (add capability for common distributions)
+\item \textbf{WISH}: store \code{objectivefunction}
+  and \code{objectivefunctiongr} (vectorized objective/gradient
+  functions) in the \code{mle2} object (will break backward compatibility!!);
+  add accessors for these and for \code{minuslogl}
+\item \textbf{WISH}: document use of the objective function in \code{MCMCpack}
+  to do {\emph post hoc} MCMC sampling (or write my own Metropolis-Hastings
+  sampler \ldots)
+\item \textbf{WISH}: polish profile plotting, with lattice or ggplot2
+  methods
+\item \textbf{WISH}: add in/document/demonstrate ``slice'' capabilities
+\item \textbf{WISH}: refactor profiling to use stored objective functions
+  rather than re-calling \code{mle2} with \code{fixed} values mucked around
+  with in the calls???  Strip out and make generic for vectorized objective function?
+  (\code{profileModel} package only works for glm-like objects, with a linear predictor)
+\end{itemize}
+
+\bibliography{mle2}
+\end{document}
diff --git a/inst/doc/mle2.pdf b/inst/doc/mle2.pdf
new file mode 100644
index 0000000..c01fde0
Binary files /dev/null and b/inst/doc/mle2.pdf differ
diff --git a/inst/doc/quasi.R b/inst/doc/quasi.R
new file mode 100644
index 0000000..5a3ec2f
--- /dev/null
+++ b/inst/doc/quasi.R
@@ -0,0 +1,72 @@
+## ----opts,echo=FALSE-----------------------------------------------------
+if (require("knitr")) opts_chunk$set(tidy=FALSE)
+
+## ----dfun----------------------------------------------------------------
+dfun <- function(object) {
+  with(object,sum((weights * residuals^2)[weights > 0])/df.residual)
+}
+
+## ----dobdata-------------------------------------------------------------
+## Dobson (1990) Page 93: Randomized Controlled Trial :
+counts <- c(18,17,15,20,10,20,25,13,12)
+outcome <- gl(3,1,9)
+treatment <- gl(3,3)
+
+## ----fitdob--------------------------------------------------------------
+glmOT.D93 <- glm(counts ~ outcome + treatment, family=poisson)
+glmO.D93  <- update(glmOT.D93, . ~ . - treatment)
+glmT.D93  <- update(glmOT.D93, . ~ . - outcome)
+glmX.D93  <- update(glmT.D93, . ~ . - treatment)
+glmQOT.D93 <- update(glmOT.D93, family=quasipoisson)
+glmQO.D93 <- update(glmO.D93, family=quasipoisson)
+glmQT.D93 <- update(glmT.D93, family=quasipoisson)
+glmQX.D93 <- update(glmX.D93, family=quasipoisson)
+
+## ----dobll---------------------------------------------------------------
+(sum(dpois(counts,
+          lambda=exp(predict(glmOT.D93)),log=TRUE))) ## by hand
+(logLik(glmOT.D93))  ## from Poisson fit
+
+## ----dobll2--------------------------------------------------------------
+(-2*(logLik(glmT.D93)-logLik(glmOT.D93)))  ## Poisson fit
+(deviance(glmT.D93)-deviance(glmOT.D93))   ## Poisson fit
+(deviance(glmQT.D93)-deviance(glmQOT.D93)) ## quasi-fit
+
+## ----dobdisp-------------------------------------------------------------
+(dfun(glmOT.D93))
+(sum(residuals(glmOT.D93,"pearson")^2)/glmOT.D93$df.residual)
+(summary(glmOT.D93)$dispersion)
+(summary(glmQOT.D93)$dispersion)
+
+## ----bbmle---------------------------------------------------------------
+library(bbmle)
+(qAIC(glmOT.D93,dispersion=dfun(glmOT.D93)))
+(qAICc(glmOT.D93,dispersion=dfun(glmOT.D93),nobs=length(counts)))
+ICtab(glmOT.D93,glmT.D93,glmO.D93,glmX.D93,
+      dispersion=dfun(glmOT.D93),type="qAIC")
+ICtab(glmOT.D93,glmT.D93,glmO.D93,glmX.D93,
+      dispersion=dfun(glmOT.D93),
+      nobs=length(counts),type="qAICc")
+detach("package:bbmle")
+
+## ----AICcmodavg----------------------------------------------------------
+library(AICcmodavg)
+aictab(list(glmOT.D93,glmT.D93,glmO.D93,glmX.D93), 
+       modnames=c("OT","T","O","X"),
+       c.hat=dfun(glmOT.D93))
+detach("package:AICcmodavg")
+
+## ----MuMin---------------------------------------------------------------
+library(MuMIn); packageVersion("MuMIn")
+## from ?QAIC
+x.quasipoisson <- function(...) {
+    res <- quasipoisson(...)
+    res$aic <- poisson(...)$aic
+    res
+}
+glmQOT2.D93 <- update(glmOT.D93,family="x.quasipoisson",
+                      na.action=na.fail)
+(gg <-  dredge(glmQOT2.D93,rank="QAIC", chat=dfun(glmOT.D93)))
+(ggc <- dredge(glmQOT2.D93,rank="QAICc",chat=dfun(glmOT.D93)))
+detach("package:MuMIn")
+
diff --git a/inst/doc/quasi.Rnw b/inst/doc/quasi.Rnw
new file mode 100755
index 0000000..3ce84e8
--- /dev/null
+++ b/inst/doc/quasi.Rnw
@@ -0,0 +1,191 @@
+\documentclass{article}
+%\VignettePackage{mle2}
+%\VignetteIndexEntry{quasi: notes on quasi-likelihood/qAIC analysis inR}
+%\VignetteDepends{MuMIn,AICcmodavg}
+%\VignetteEngine{knitr::knitr}
+
+\usepackage{graphicx}
+\usepackage{url}
+\newcommand{\code}[1]{{\tt #1}}
+\title{Dealing with \code{quasi-}  models in R}
+\date{\today}
+\author{Ben Bolker}
+\begin{document}
+\maketitle
+
+\includegraphics[width=2.64cm,height=0.93cm]{cc-attrib-nc.png}
+\begin{minipage}[b]{3in}
+{\tiny Licensed under the Creative Commons 
+  attribution-noncommercial license
+(\url{http://creativecommons.org/licenses/by-nc/3.0/}).
+Please share \& remix noncommercially,
+mentioning its origin.}
+\end{minipage}
+
+<<opts,echo=FALSE>>=
+if (require("knitr")) opts_chunk$set(tidy=FALSE)
+@ 
+Computing ``quasi-AIC'' (QAIC), in R is a minor
+pain, because  the R Core team (or at least the ones who wrote \code{glm},
+\code{glmmPQL}, etc.) 
+are purists and don't believe that quasi- models should report a likelihood.
+As far as I know, there are three R packages that compute/handle
+QAIC: \code{bbmle}, \code{AICcmodavg} (both on CRAN) and \code{MuMIn}
+(formerly known as \code{dRedging}, on r-forge).
+
+The basic problem is that quasi- model fits with \code{glm} return
+an \code{NA} for the log-likelihood, while the dispersion parameter
+($\hat c$, $\phi$, whatever you want to call it)
+is only reported for quasi- models.
+Various ways to get around this are:
+\begin{itemize}
+  \item{fit the model twice, once with a regular
+      likelihood model (\code{family=binomial}, \code{poisson}, etc.)
+      and once with the \code{quasi-} variant --- extract
+      the log-likelihood from the former and the dispersion parameter
+      from the latter}
+    \item{only fit the regular model; extract
+      the overdispersion parameter manually
+      with
+<<dfun>>=
+dfun <- function(object) {
+  with(object,sum((weights * residuals^2)[weights > 0])/df.residual)
+}
+@ 
+}
+\item{use the fact that quasi- fits still contain a deviance,
+    even if they set the log-likelihood to \code{NA}.  The deviance
+    is twice the negative log-likelihood (it's offset by some constant
+    which I haven't figured out yet, but it should still work
+    fine for model comparisons)}
+\end{itemize}
+
+The whole problem is worse for \code{MASS::glmmPQL}, where (1) the
+authors have gone to greater efforts to make sure that the (quasi-)deviance
+is no longer preserved anywhere in the fitted model, and (2) they
+may have done it for good reason --- it is not clear whether the
+number that would get left in the 'deviance' slot at the end of
+\code{glmmPQL}'s alternating \code{lme} and \code{glm} fits is
+even meaningful to the extent that regular QAICs are.  (For 
+discussion of a similar situation, see the \code{WARNING}
+section of \code{?gamm} in the \code{mgcv} package.)
+
+Example: use the values from one of the examples
+in \code{?glm}:
+
+<<dobdata>>=
+## Dobson (1990) Page 93: Randomized Controlled Trial :
+counts <- c(18,17,15,20,10,20,25,13,12)
+outcome <- gl(3,1,9)
+treatment <- gl(3,3)
+@ 
+
+Fit Poisson and quasi-Poisson models with all combinations
+of predictors:
+
+<<fitdob>>=
+glmOT.D93 <- glm(counts ~ outcome + treatment, family=poisson)
+glmO.D93  <- update(glmOT.D93, . ~ . - treatment)
+glmT.D93  <- update(glmOT.D93, . ~ . - outcome)
+glmX.D93  <- update(glmT.D93, . ~ . - treatment)
+glmQOT.D93 <- update(glmOT.D93, family=quasipoisson)
+glmQO.D93 <- update(glmO.D93, family=quasipoisson)
+glmQT.D93 <- update(glmT.D93, family=quasipoisson)
+glmQX.D93 <- update(glmX.D93, family=quasipoisson)
+@ 
+
+
+Extract log-likelihoods:
+<<dobll>>=
+(sum(dpois(counts,
+          lambda=exp(predict(glmOT.D93)),log=TRUE))) ## by hand
+(logLik(glmOT.D93))  ## from Poisson fit
+@ 
+
+The deviance (\code{deviance(glmOT.D93)}=\Sexpr{round(deviance(glmOT.D93),3)}
+is not the same as $-2L$ (\code{-2*logLik(glmOT.D93)}=\Sexpr{round(-2*c(logLik(glmOT.D93)),3)}),
+but the calculated differences in deviance are consistent,
+and are also extractable from the quasi- fit even though
+the log-likelihood is \code{NA}:
+<<dobll2>>=
+(-2*(logLik(glmT.D93)-logLik(glmOT.D93)))  ## Poisson fit
+(deviance(glmT.D93)-deviance(glmOT.D93))   ## Poisson fit
+(deviance(glmQT.D93)-deviance(glmQOT.D93)) ## quasi-fit
+@ 
+
+
+Compare hand-computed dispersion (in two ways)
+with the dispersion computed by \code{summary.glm()}
+on a quasi- fit:
+
+<<dobdisp>>=
+(dfun(glmOT.D93))
+(sum(residuals(glmOT.D93,"pearson")^2)/glmOT.D93$df.residual)
+(summary(glmOT.D93)$dispersion)
+(summary(glmQOT.D93)$dispersion)
+@ 
+
+
+\section*{Examples}
+
+\subsection*{\code{bbmle} package (Ben Bolker), CRAN/R-forge}
+
+<<bbmle>>=
+library(bbmle)
+(qAIC(glmOT.D93,dispersion=dfun(glmOT.D93)))
+(qAICc(glmOT.D93,dispersion=dfun(glmOT.D93),nobs=length(counts)))
+ICtab(glmOT.D93,glmT.D93,glmO.D93,glmX.D93,
+      dispersion=dfun(glmOT.D93),type="qAIC")
+ICtab(glmOT.D93,glmT.D93,glmO.D93,glmX.D93,
+      dispersion=dfun(glmOT.D93),
+      nobs=length(counts),type="qAICc")
+detach("package:bbmle")
+@ 
+
+\subsection*{\code{AICcmodavg} package (Marc Mazerolle), CRAN}
+
+<<AICcmodavg>>=
+library(AICcmodavg)
+aictab(list(glmOT.D93,glmT.D93,glmO.D93,glmX.D93), 
+       modnames=c("OT","T","O","X"),
+       c.hat=dfun(glmOT.D93))
+detach("package:AICcmodavg")
+@ 
+
+\subsection*{\code{MuMIn} package (Kamil Barto{\'n})}
+
+<<MuMin>>=
+library(MuMIn); packageVersion("MuMIn")
+## from ?QAIC
+x.quasipoisson <- function(...) {
+    res <- quasipoisson(...)
+    res$aic <- poisson(...)$aic
+    res
+}
+glmQOT2.D93 <- update(glmOT.D93,family="x.quasipoisson",
+                      na.action=na.fail)
+(gg <-  dredge(glmQOT2.D93,rank="QAIC", chat=dfun(glmOT.D93)))
+(ggc <- dredge(glmQOT2.D93,rank="QAICc",chat=dfun(glmOT.D93)))
+detach("package:MuMIn")
+@ 
+
+Notes: ICtab only gives delta-IC, limited decimal places
+(on purpose, but how do you change these defaults if you
+want to?).  Need to add 1 to parameters
+to account for scale parameter.  When doing corrected-IC you need
+to get the absolute number of parameters right, not just the relative
+number \ldots Not sure which classes of models each of these
+will handle (lm, glm, (n)lme, lme4, mle2 \ldots).  Remember
+need to use overdispersion parameter from most complex model.
+glmmPQL: needs to be hacked somewhat more severely (does not
+contain deviance element, logLik has been NA'd out).
+
+\begin{tabular}{l|ccccccc}
+  package & \code{lm} & \code{glm} & \code{(n)lme} & \code{multinom} & \code{polr} & \code{lme4} & \code{mle2} \\
+  \hline
+  \code{AICcmodavg} & y & y & y & y & y & ? & ? \\
+  \code{MuMIn}      & ? & ? & ? & ? & ? & ? & ? \\
+  \code{mle2 }      & ? & ? & ? & ? & ? & ? & ?
+\end{tabular}
+  
+\end{document}
diff --git a/inst/doc/quasi.pdf b/inst/doc/quasi.pdf
new file mode 100644
index 0000000..211de0a
Binary files /dev/null and b/inst/doc/quasi.pdf differ
diff --git a/inst/unitTests/Makefile b/inst/unitTests/Makefile
new file mode 100755
index 0000000..8d13225
--- /dev/null
+++ b/inst/unitTests/Makefile
@@ -0,0 +1,15 @@
+TOP=../..
+PKG=${shell cd ${TOP};pwd}
+SUITE=doRUnit.R
+R=R
+
+all: inst test
+
+inst: # Install package
+	cd ${TOP}/..;\
+	${R} CMD INSTALL ${PKG}
+
+test: # Run unit tests
+	export RCMDCHECK=FALSE;\
+	cd ${TOP}/tests;\
+	${R} --vanilla --slave < ${SUITE}
diff --git a/inst/unitTests/boundstest.R b/inst/unitTests/boundstest.R
new file mode 100755
index 0000000..ef1684e
--- /dev/null
+++ b/inst/unitTests/boundstest.R
@@ -0,0 +1,16 @@
+## logic for removing/modifying bounds:
+## (1) unbounded opt. will have limits of -Inf/Inf
+##   [or missing()]
+## (2) bounded opt
+## fix length mismatch errors!
+k <- 19
+N <- 20
+
+uniboundtest <- function() {
+  m1 <- mle2(k~dbinom(size=N,prob=p),
+             start=list(p=0.5))
+  m1b <- mle2(k~dbinom(size=N,prob=p),
+             start=list(p=0.5),method="L-BFGS-B",upper=0.999)
+  p1 <- profile(m1)
+  p1b <- profile(m1b)
+}
diff --git a/inst/vignetteData/orob1.rda b/inst/vignetteData/orob1.rda
new file mode 100755
index 0000000..4420ee1
Binary files /dev/null and b/inst/vignetteData/orob1.rda differ
diff --git a/man/BIC-methods.Rd b/man/BIC-methods.Rd
new file mode 100755
index 0000000..4586199
--- /dev/null
+++ b/man/BIC-methods.Rd
@@ -0,0 +1,92 @@
+\name{BIC-methods}
+\docType{methods}
+%\alias{BIC}
+\alias{BIC-methods}
+\alias{AIC-methods}
+\alias{AICc-methods}
+\alias{logLik-methods}
+\alias{AICc}
+\alias{AIC,mle2-method}
+\alias{AICc,mle2-method}
+\alias{AICc,logLik-method}
+\alias{AICc,ANY-method}
+\alias{AICc,ANY,mle2,logLik-method}
+\alias{qAICc}
+\alias{qAICc-methods}
+\alias{qAICc,ANY-method}
+\alias{qAICc,mle2-method}
+\alias{qAICc,logLik-method}
+\alias{qAIC}
+\alias{qAIC-methods}
+\alias{qAIC,ANY-method}
+\alias{qAIC,mle2-method}
+\alias{qAIC,logLik-method}
+%\alias{BIC,logLik-method}
+%\alias{BIC,ANY-method}
+%\alias{BIC,mle2-method}
+%\alias{BIC,ANY,mle2,logLik-method}
+\alias{qAIC,ANY,mle2,logLik-method}
+\alias{qAICc,ANY,mle2,logLik-method}
+\alias{logLik,mle2-method}
+\alias{anova,mle2-method}
+\title{Log likelihoods and model selection for mle2 objects}
+\description{
+  Various functions for likelihood-based and information-theoretic
+  model selection of likelihood models
+}
+\section{Methods}{
+\describe{
+    \item{logLik}{\code{signature(object = "mle2")}: Extract maximized
+      log-likelihood.}
+    \item{AIC}{\code{signature(object = "mle2")}: Calculate
+      Akaike Information Criterion}
+    \item{AICc}{\code{signature(object = "mle2")}: Calculate
+      small-sample corrected Akaike Information Criterion}
+    %\item{BIC}{\code{signature(object = "mle2")}: Calculate
+    %Bayesian (Schwarz) Information Criterion}
+    %\item{BIC}{\code{signature(object = "logLik")}: Calculate
+    %Bayesian (Schwarz) Information Criterion}
+    %\item{BIC}{\code{signature(object = "ANY")}: Calculate
+    %Bayesian (Schwarz) Information Criterion}
+    \item{anova}{\code{signature(object="mle2")}: Likelihood Ratio Test
+      comparision of different models}
+  }
+}
+\usage{
+%\S4method{BIC}{ANY,mle2,logLik}(object,...)
+\S4method{AICc}{ANY,mle2,logLik}(object,...,nobs,k=2)
+\S4method{qAIC}{ANY,mle2,logLik}(object,...,k=2)
+\S4method{qAICc}{ANY,mle2,logLik}(object,...,nobs,k=2)
+}
+\arguments{
+  \item{object}{A \code{logLik} or \code{mle2} object}
+  \item{...}{An optional list of additional \code{logLik}
+    or \code{mle2} objects (fitted to the same data set).}
+  \item{nobs}{Number of observations (sometimes 
+    obtainable as an attribute of
+    the fit or of the log-likelihood)}
+  \item{k}{penalty parameter (nearly always left at its default value of 2)}
+}
+\details{
+  Further arguments to \code{BIC} can be specified
+  in the \code{...} list: \code{delta} (logical)
+  specifies whether to include a column for delta-BIC
+  in the output.
+}
+\value{
+  A table of the BIC values, degrees of freedom,
+  and possibly delta-BIC values relative to the
+  minimum-BIC model
+}
+\note{This is implemented in an ugly way and could
+  probably be improved!}
+\examples{
+  d <- data.frame(x=0:10,y=c(26, 17, 13, 12, 20, 5, 9, 8, 5, 4, 8))
+  (fit <- mle2(y~dpois(lambda=ymax/(1+x/xhalf)),
+      start=list(ymax=25,xhalf=3),data=d))
+  (fit2 <- mle2(y~dpois(lambda=(x+1)*slope),
+      start=list(slope=1),data=d))
+  BIC(fit)
+  BIC(fit,fit2)
+  }
+\keyword{methods}
diff --git a/man/ICtab.Rd b/man/ICtab.Rd
new file mode 100755
index 0000000..4b21bcd
--- /dev/null
+++ b/man/ICtab.Rd
@@ -0,0 +1,80 @@
+\name{ICtab}
+\alias{ICtab}
+\alias{AICtab}
+\alias{BICtab}
+\alias{AICctab}
+\alias{print.ICtab}
+\title{Compute table of information criteria and auxiliary info}
+\description{
+  Computes information criteria for a series of models, optionally
+  giving information about weights, differences between ICs, etc.
+}
+\usage{
+ICtab(\dots, type=c("AIC","BIC","AICc","qAIC","qAICc"),
+    weights = FALSE, delta = TRUE, base = FALSE,
+logLik=FALSE, sort = TRUE,
+nobs=NULL, dispersion = 1, mnames, k = 2)
+AICtab(\dots,mnames)
+BICtab(\dots,mnames)
+AICctab(\dots,mnames)
+\method{print}{ICtab}(x,\dots,min.weight)
+}
+\arguments{
+  \item{\dots}{a list of (logLik or?) mle objects; in the case of
+    \code{AICtab} etc., could also include other arguments to \code{ICtab}}
+  \item{type}{specify information criterion to use}
+  \item{base}{(logical) include base IC (and log-likelihood) values?}
+  \item{weights}{(logical) compute IC weights?}
+  \item{logLik}{(logical) include log-likelihoods in the table?}
+  \item{delta}{(logical) compute differences among ICs (and log-likelihoods)?}
+  \item{sort}{(logical) sort ICs in increasing order?}
+  \item{nobs}{(integer) number of observations: required for
+    \code{type="BIC"} or \code{type="AICc"} unless objects have
+    a \code{\link{nobs}} method}
+  \item{dispersion}{overdispersion estimate, for computing qAIC:
+    required for \code{type="qAIC"} or \code{type="qAICc"} unless
+    objects have a \code{"dispersion"} attribute}
+  \item{mnames}{names for table rows: defaults to names of objects passed}
+  \item{k}{penalty term (largely unused: left at default of 2)}
+  \item{x}{an ICtab object}
+  \item{min.weight}{minimum weight for exact reporting (smaller values
+    will be reported as "<[min.weight]")}
+}
+\value{
+  A data frame containing:
+  \item{IC}{information criterion}
+  \item{df}{degrees of freedom/number of parameters}
+  \item{dIC}{difference in IC from minimum-IC model}
+  \item{weights}{exp(-dIC/2)/sum(exp(-dIC/2))}
+}
+\note{(1) The print method uses sensible defaults; all ICs are rounded
+  to the nearest 0.1, and IC weights are printed using
+  \code{\link{format.pval}} to print an inequality for
+  values <0.001. (2) The computation of degrees of freedom/number of
+  parameters (e.g., whether
+  variance parameters are included in the total) varies enormously
+  between packages.  As long as the df computations
+  for a given set of models is consistent, differences
+  don't matter, but one needs to be careful with log likelihoods
+  and models taken from different packages.  If necessary
+  one can change the degrees of freedom manually by
+  saying \code{attr(obj,"df") <- df.new}, where \code{df.new}
+  is the desired number of parameters.
+  (3) Defaults have changed to \code{sort=TRUE}, \code{base=FALSE},
+  \code{delta=TRUE}, to match my conviction that it rarely makes
+  sense to report the overall values of information criteria}
+\references{Burnham and Anderson 2002}
+\author{Ben Bolker}
+\examples{
+  set.seed(101)
+  d <- data.frame(x=1:20,y=rpois(20,lambda=2))
+  m0 <- glm(y~1,data=d)
+  m1 <- update(m0,.~x)
+  m2 <- update(m0,.~poly(x,2))
+  AICtab(m0,m1,m2,mnames=LETTERS[1:3])
+  AICtab(m0,m1,m2,base=TRUE,logLik=TRUE)
+  AICtab(m0,m1,m2,logLik=TRUE)
+  AICctab(m0,m1,m2,weights=TRUE)
+  print(AICctab(m0,m1,m2,weights=TRUE),min.weight=0.1)
+}
+\keyword{misc}
diff --git a/man/as.data.frame.profile.mle2.Rd b/man/as.data.frame.profile.mle2.Rd
new file mode 100755
index 0000000..f8edd02
--- /dev/null
+++ b/man/as.data.frame.profile.mle2.Rd
@@ -0,0 +1,50 @@
+\name{as.data.frame.profile.mle2}
+\alias{as.data.frame.profile.mle2}
+\alias{coerce,profile.mle2-method}
+\alias{coerce,profile.mle2,data.frame-method}
+\title{convert profile to data frame}
+\description{
+  converts a profile of a fitted mle2 object
+  to a data frame
+}
+\usage{
+\S3method{as.data.frame}{profile.mle2}(x, row.names=NULL,
+optional=FALSE, \dots)
+}
+\arguments{
+  \item{x}{a profile object}
+  \item{row.names}{row names (unused)}
+  \item{optional}{unused}
+  \item{\dots}{unused}
+}
+\value{
+  a data frame with columns
+  \item{param}{name of parameter being profiled}
+  \item{z}{signed square root of the deviance difference from the
+    minimum}
+  \item{parameter values}{named par.vals.parname}
+  \item{focal}{value of focal parameter: redundant, but included for
+    plotting convenience}
+}
+\examples{
+  ## use as.data.frame and lattice to plot profiles
+  x <- 0:10
+  y <- c(26, 17, 13, 12, 20, 5, 9, 8, 5, 4, 8)
+  library(bbmle)
+  LL <- function(ymax=15, xhalf=6)
+  -sum(stats::dpois(y, lambda=ymax/(1+x/xhalf), log=TRUE))
+  ## uses default parameters of LL
+  fit1 <- mle2(LL)
+  p1 <- profile(fit1)
+  d1 = as.data.frame(p1)
+  library(lattice)
+  xyplot(abs(z)~focal|param,data=d1,
+    subset=abs(z)<3,
+    type="b",
+    xlab="",
+    ylab=expression(paste(abs(z),
+    " (square root of ",Delta," deviance)")),
+    scale=list(x=list(relation="free")))
+}
+\author{Ben Bolker}
+\keyword{misc}
diff --git a/man/call.to.char.Rd b/man/call.to.char.Rd
new file mode 100755
index 0000000..9c890a8
--- /dev/null
+++ b/man/call.to.char.Rd
@@ -0,0 +1,27 @@
+\name{call.to.char}
+\alias{call.to.char}
+\title{Convert calls to character}
+\description{
+  Utility function (hack) to convert calls such
+  as y~x to their character equivalent
+}
+\usage{
+call.to.char(x)
+}
+\arguments{
+  \item{x}{a formula (call)}
+}
+\details{
+  It would be nice if \code{as.character(y~x)}
+  gave "y~x", but it doesn't, so this hack achieves
+  the same goal
+}
+\value{
+  a character vector of length 1
+}
+\author{Ben Bolker}
+\examples{
+as.character(y~x)
+call.to.char(y~x)
+}
+\keyword{misc}
diff --git a/man/get.mnames.Rd b/man/get.mnames.Rd
new file mode 100755
index 0000000..c22fb1f
--- /dev/null
+++ b/man/get.mnames.Rd
@@ -0,0 +1,17 @@
+\name{get.mnames}
+\alias{get.mnames}
+\title{extract model names}
+\description{
+  given a list of models, extract the names (or "model n")
+}
+\usage{
+get.mnames(Call)
+}
+\arguments{
+  \item{Call}{a function call (usually a list of models)}
+}
+\value{
+  a vector of model names
+}
+\author{Ben Bolker}
+\keyword{misc}
diff --git a/man/mle-class.Rd b/man/mle-class.Rd
new file mode 100755
index 0000000..5683006
--- /dev/null
+++ b/man/mle-class.Rd
@@ -0,0 +1,91 @@
+\name{mle2-class}
+\docType{class}
+\alias{mle2-class}
+\alias{coef,mle2-method}
+\alias{show,mle2-method}
+\alias{slice,mle2-method}
+\alias{summary,mle2-method}
+\alias{update,mle2-method}
+\alias{vcov,mle2-method}
+\alias{deviance,mle2-method}
+\alias{coerce,mle,mle2-method}
+\alias{formula,mle2-method}
+\alias{stdEr}
+\alias{stdEr,mle2-method}
+\title{Class "mle2". Result of Maximum Likelihood Estimation.}
+\description{This class encapsulates results of a generic maximum
+  likelihood procedure.}
+\section{Objects from the Class}{
+  Objects can be created by calls of the form \code{new("mle2", \dots)}, but
+  most often as the result of a call to \code{\link{mle2}}.
+}
+\section{Slots}{
+  \describe{
+    \item{\code{call}:}{(language) The call to \code{\link{mle2}}.}
+    \item{\code{call.orig}:}{(language) The call to \code{\link{mle2}},
+      saved in its original form (i.e. without data arguments
+      evaluated).}
+    \item{\code{coef}:}{(numeric) Vector of estimated parameters.}
+    \item{\code{data}:}{(data frame or list) Data with which to evaluate the negative log-likelihood function}
+    \item{\code{fullcoef}:}{(numeric) Fixed and estimated parameters.}
+    \item{\code{vcov}:}{(numeric matrix) Approximate variance-covariance
+      matrix, based on the second derivative matrix at the MLE.}
+    \item{\code{min}:}{(numeric) Minimum value of objective function =
+      minimum negative log-likelihood.}
+    \item{\code{details}:}{(list) Return value from \code{\link{optim}}.}
+    \item{\code{minuslogl}:}{(function) The negative log-likelihood
+      function.}
+    \item{\code{optimizer}:}{(character) The optimizing function used.}
+    \item{\code{method}:}{(character) The optimization method used.}
+    \item{\code{formula}:}{(character) If a formula was specified, a
+      character vector giving the formula and parameter specifications.}
+}
+}
+\section{Methods}{
+  \describe{
+    \item{coef}{\code{signature(object = "mle2")}: Extract coefficients.
+      If \code{exclude.fixed=TRUE} (it is \code{FALSE} by default),
+      only the non-fixed parameter values are returned.}t
+    \item{confint}{\code{signature(object = "mle2")}: Confidence
+      intervals from likelihood profiles, or quadratic approximations,
+      or root-finding.}
+    \item{show}{\code{signature(object = "mle2")}: Display object
+      briefly.}
+    \item{show}{\code{signature(object = "summary.mle2")}: Display object briefly.}
+    \item{summary}{\code{signature(object = "mle2")}: Generate object summary.}
+    \item{update}{\code{signature(object = "mle2")}:  Update fit.}
+    \item{vcov}{\code{signature(object = "mle2")}: Extract
+      variance-covariance matrix.}
+    \item{formula}{\code{signature(object="mle2")}: Extract formula}
+    \item{plot}{\code{signature(object="profile.mle2,missing")}: Plot
+      profile. }
+  }
+}
+\details{
+  When the parameters in the original fit are constrained using
+  \code{lower} or \code{upper}, or when \code{prof.lower} or
+  \code{prof.upper} are set, and the confidence intervals lie
+  outside the constraint region, \code{confint} will return \code{NA}.
+  This may be too conservative -- in some cases, the appropriate
+  answer would be to set the confidence limit to the lower/upper
+  bound as appropriate -- but it is the most general answer.
+
+  (If you have a strong opinion about the need for a new
+  option to \code{confint} that sets the bounds to the limits
+  automatically, please contact the package maintainer.)
+  
+}
+\examples{
+x <- 0:10
+y <- c(26, 17, 13, 12, 20, 5, 9, 8, 5, 4, 8)
+lowerbound <- c(a=2,b=-0.2)
+d <- data.frame(x,y)
+fit1 <- mle2(y~dpois(lambda=exp(a+b*x)),start=list(a=0,b=2),data=d,
+method="L-BFGS-B",lower=c(a=2,b=-0.2))
+(cc <- confint(fit1,quietly=TRUE))
+## to set the lower bounds to the limit
+na_lower <- is.na(cc[,1])
+cc[na_lower,1] <- lowerbound[na_lower]
+cc
+}
+\keyword{classes}
diff --git a/man/mle2.Rd b/man/mle2.Rd
new file mode 100755
index 0000000..a714fdc
--- /dev/null
+++ b/man/mle2.Rd
@@ -0,0 +1,235 @@
+\name{mle2}
+\alias{mle2}
+\alias{mle}
+\alias{calc_mle2_function}
+\title{Maximum Likelihood Estimation}
+\description{
+  Estimate parameters by the method of maximum likelihood.
+}
+\usage{
+mle2(minuslogl, start, method, optimizer,
+    fixed = NULL, data=NULL,
+    subset=NULL,
+default.start=TRUE, eval.only = FALSE, vecpar=FALSE,
+parameters=NULL,
+parnames=NULL,
+skip.hessian=FALSE,
+hessian.opts=NULL,
+use.ginv=TRUE,
+trace=FALSE,
+browse_obj=FALSE,
+gr,
+optimfun,\dots)
+calc_mle2_function(formula,parameters, links, start,
+   parnames, use.deriv=FALSE, data=NULL,trace=FALSE)
+}
+\arguments{
+  \item{minuslogl}{Function to calculate negative log-likelihood,
+  or a formula}
+  \item{start}{Named list. Initial values for optimizer}
+  \item{method}{Optimization method to use. See \code{\link{optim}}.}
+  \item{optimizer}{Optimization function to use. Currently available
+    choices are "optim" (the default), "nlm", "nlminb", "constrOptim",
+    "optimx", and "optimize". If "optimx" is used, (1) the \code{optimx}
+    package must be explicitly loaded with \code{\link{load}} or
+    \code{\link{require}}(\emph{Warning:} Options other than the
+    default may be poorly tested, use with caution.)
+  }
+  \item{fixed}{Named list.  Parameter values to keep fixed during
+    optimization.}
+  \item{data}{list of data to pass to negative log-likelihood function: must
+    be specified if \code{minuslogl} is specified as a formula}
+  \item{subset}{logical vector for subsetting data (STUB)}
+  \item{default.start}{Logical: allow default values of \code{minuslogl}
+    as starting values?}
+  \item{eval.only}{Logical: return value of \code{minuslogl(start)}
+    rather than optimizing}
+  \item{vecpar}{Logical: is first argument a vector of all parameters?
+    (For compatibility with \code{\link{optim}}.)
+  If \code{vecpar} is \code{TRUE}, then you should use
+  \code{\link{parnames}} to define the parameter names for the
+  negative log-likelihood function.}
+  \item{parameters}{List of linear models for parameters.
+    \emph{MUST BE SPECIFIED IN THE SAME ORDER as the start vector
+      (this is a bug/restriction that I hope to fix soon, but in
+      the meantime beware)}}
+  \item{links}{(unimplemented) specify transformations of parameters}
+  \item{parnames}{List (or vector?) of parameter names}
+  \item{gr}{gradient function}
+  \item{\dots}{Further arguments to pass to optimizer}
+  \item{formula}{a formula for the likelihood (see Details)}
+  \item{trace}{Logical: print parameter values tested?}
+  \item{browse_obj}{Logical: drop into browser() within the objective function?}
+  \item{skip.hessian}{Bypass Hessian calculation?}
+  \item{hessian.opts}{Options for Hessian calculation, passed through to
+    the \code{\link[numDeriv]{hessian}} function}
+  \item{use.ginv}{Use generalized inverse (\code{\link[MASS]{ginv}}) to
+    compute approximate variance-covariance}
+  \item{optimfun}{user-supplied optimization function. Must take exactly
+    the same arguments and return exactly the same structure as \code{\link{optim}}.}
+  \item{use.deriv}{(experimental, not yet implemented): construct symbolic
+     derivatives based on formula?}
+ }
+ \section{Warning}{Do not use a higher-level variable named \code{.i} in
+   \code{parameters} -- this is reserved for internal use.
+ }
+\details{
+  The \code{\link{optim}} optimizer is used to find the minimum of the
+  negative log-likelihood.  An approximate covariance matrix for the
+  parameters is obtained by inverting the Hessian matrix at the optimum.
+
+  The \code{minuslogl} argument can also specify a formula,
+  rather than an objective function, of the
+  form \code{x~ddistn(param1,...,paramn)}.  In this case
+  \code{ddistn} is taken to be a probability or density
+  function, which must have (literally) \code{x} as its
+  first argument (although this argument may be interpreted as
+  a matrix of multivariate responses) and which must have
+  a \code{log} argument that can be used to specify the
+  log-probability or log-probability-density is required.
+  If a formula is specified, then \code{parameters} can contain
+  a list of linear models for the parameters.
+
+  If a formula is given and non-trivial linear models are given
+  in \code{parameters} for some of the variables, then
+  model matrices will be generated using \code{model.matrix}.
+  \code{start} can be given:
+  \itemize{
+    \item as a list containing lists, with each list corresponding
+    to the starting values for a particular parameter;
+    \item just for the higher-level parameters, in which case
+    all of the additional parameters generated by \code{model.matrix}
+    will be given starting values of zero (unless a no-intercept
+    formula with \code{-1} is specified, in which case all the
+    starting values for that parameter will be set equal)
+    \item [to be implemented!] as an exhaustive (flat) list
+    of starting values (in the order given by \code{model.matrix})
+  }
+  
+  The \code{trace} argument applies only when a formula is specified.
+  If you specify a function, you can build in your own \code{print()}
+  or \code{cat()} statement to trace its progress.  (You can also
+  specify a value for \code{trace} as part of a \code{control}
+  list for \code{optim()}: see \code{\link{optim}}.)
+
+  The \code{skip.hessian} argument is useful if the function is
+  crashing with a "non-finite finite difference value" error when trying
+  to evaluate the Hessian, but will preclude many subsequent
+  confidence interval calculations.  (You will know the Hessian
+  is failing if you use \code{method="Nelder-Mead"} and still
+  get a finite-difference error.)
+  
+  If convergence fails, see the manual page of the
+  relevant optimizer (\code{\link{optim}} by default,
+  but possibly \code{\link{nlm}}, \code{\link{nlminb}},
+  \code{\link[optimx]{optimx}}, or \code{\link{constrOptim}}
+  if you have set the value of \code{optimizer})
+  for the meanings of the error codes/messages.
+}
+\value{
+  An object of class \code{"mle2"}.
+}
+\note{
+  Note that the \code{minuslogl} function should
+  return the negative log-likelihood, -log L (not
+  the log-likelihood, log L, nor the deviance, -2 log L). It
+  is the user's responsibility
+  to ensure that the likelihood is correct, and that
+  asymptotic likelihood inference is valid (e.g.
+  that there are "enough" data and that the
+  estimated parameter values do not lie on the
+  boundary of the feasible parameter space).
+
+  If \code{lower}, \code{upper}, \code{control$parscale},
+  or \code{control$ndeps} are specified for \code{optim}
+  fits, they must be named vectors.
+
+  The requirement that \code{data} be specified when using
+  the formula interface is relatively new: it saves many
+  headaches on the programming side when evaluating the
+  likelihood function later on (e.g. for profiling or
+  constructing predictions).  Since \code{data.frame} uses
+  the names of its arguments as column names by default, it
+  is probably the easiest way to package objects that are
+  lying around in the global workspace for use in \code{mle2}
+  (provided they are all of the same length).
+
+  When \code{optimizer} is set to "optimx" and multiple
+  optimization methods are used (i.e. the \code{methods}
+  argument has more than one element, or \code{all.methods=TRUE}
+  is set in the control options), the best (minimum
+  negative log-likelihood) solution will be saved,
+  regardless of reported convergence status
+  (and future operations such as profiling on the fit
+  will only use the method that found the best result).
+  
+}
+\seealso{
+  \code{\link{mle2-class}}
+}
+\examples{
+x <- 0:10
+y <- c(26, 17, 13, 12, 20, 5, 9, 8, 5, 4, 8)
+d <- data.frame(x,y)
+
+## in general it is best practice to use the `data' argument,
+##  but variables can also be drawn from the global environment
+LL <- function(ymax=15, xhalf=6)
+    -sum(stats::dpois(y, lambda=ymax/(1+x/xhalf), log=TRUE))
+## uses default parameters of LL
+(fit <- mle2(LL))
+fit1F <- mle2(LL, fixed=list(xhalf=6))
+coef(fit1F)
+coef(fit1F,exclude.fixed=TRUE)
+
+(fit0 <- mle2(y~dpois(lambda=ymean),start=list(ymean=mean(y)),data=d))
+anova(fit0,fit)
+summary(fit)
+logLik(fit)
+vcov(fit)
+p1 <- profile(fit)
+plot(p1, absVal=FALSE)
+confint(fit)
+
+## use bounded optimization
+## the lower bounds are really > 0, but we use >=0 to stress-test
+## profiling; note lower must be named
+(fit1 <- mle2(LL, method="L-BFGS-B", lower=c(ymax=0, xhalf=0)))
+p1 <- profile(fit1)
+
+plot(p1, absVal=FALSE)
+## a better parameterization:
+LL2 <- function(lymax=log(15), lxhalf=log(6))
+    -sum(stats::dpois(y, lambda=exp(lymax)/(1+x/exp(lxhalf)), log=TRUE))
+(fit2 <- mle2(LL2))
+plot(profile(fit2), absVal=FALSE)
+exp(confint(fit2))
+vcov(fit2)
+cov2cor(vcov(fit2))
+
+mle2(y~dpois(lambda=exp(lymax)/(1+x/exp(lhalf))),
+   start=list(lymax=0,lhalf=0),
+   data=d,
+   parameters=list(lymax~1,lhalf~1))
+
+## try bounded optimization with nlminb and constrOptim
+(fit1B <- mle2(LL, optimizer="nlminb", lower=c(lymax=1e-7, lhalf=1e-7)))
+p1B <- profile(fit1B)
+confint(p1B)
+(fit1C <- mle2(LL, optimizer="constrOptim", ui = c(lymax=1,lhalf=1), ci=2,
+   method="Nelder-Mead"))
+
+set.seed(1001)
+lymax <- c(0,2)
+lhalf <- 0
+x <- sort(runif(200))
+g <- factor(sample(c("a","b"),200,replace=TRUE))
+y <- rnbinom(200,mu=exp(lymax[g])/(1+x/exp(lhalf)),size=2)
+d2 <- data.frame(x,g,y)
+
+fit3 <- mle2(y~dnbinom(mu=exp(lymax)/(1+x/exp(lhalf)),size=exp(logk)),
+    parameters=list(lymax~g),data=d2,
+    start=list(lymax=0,lhalf=0,logk=0))
+}
+\keyword{models}
+
diff --git a/man/mle2.options.Rd b/man/mle2.options.Rd
new file mode 100755
index 0000000..86b91db
--- /dev/null
+++ b/man/mle2.options.Rd
@@ -0,0 +1,32 @@
+\name{mle2.options}
+\alias{mle2.options}
+\title{Options for maximum likelihood estimation}
+\description{
+  Query or set MLE parameters
+}
+\usage{
+mle2.options(...)
+}
+\arguments{
+  \item{\dots}{names of arguments to query,
+    or a list of values to set}
+}
+\details{
+  \itemize{
+    \item{optim.method}{name of optimization method (see
+      \code{\link{optim}} for choices)}
+    \item{confint}{name of confidence-interval: choices
+      are "spline", "uniroot", "hessian" corresponding
+      to spline inversion, attempt to find best answer via uniroot,
+      information-matrix approximation}
+    \item{optimizer}{optimization function to use by default
+(choices: "optim", "nlm", "nlminb", "constrOptim")}
+}
+}
+\value{
+  Values of queried parameters, or (invisibly) the full list of parameters
+}
+\seealso{
+  \code{\link{mle2-class}}
+}
+\keyword{models}
diff --git a/man/namedrop.Rd b/man/namedrop.Rd
new file mode 100755
index 0000000..528919a
--- /dev/null
+++ b/man/namedrop.Rd
@@ -0,0 +1,31 @@
+\name{namedrop}
+\alias{namedrop}
+\title{drop unneeded names from list elements}
+\description{
+  goes through a list (containing a combination
+  of single- and multiple-element vectors) and
+  removes redundant names that will make trouble
+  for mle
+}
+\usage{
+namedrop(x)
+}
+\arguments{
+  \item{x}{a list of named or unnamed, typically numeric, vectors}
+}
+\details{
+  examines each element of \code{x}.  If the element has length
+  one and is a named vector, the name is removed; if \code{length(x)}
+  is greater than 1, but all the names are the same, the vector
+  is renamed 
+}
+\value{
+  the original list, with names removed/added
+}
+\author{Ben Bolker}
+\examples{
+x = list(a=c(a=1),b=c(d=1,d=2),c=c(a=1,b=2,c=3))
+names(unlist(namedrop(x)))
+names(unlist(namedrop(x)))
+}
+\keyword{misc}
diff --git a/man/parnames.Rd b/man/parnames.Rd
new file mode 100755
index 0000000..a8a11b5
--- /dev/null
+++ b/man/parnames.Rd
@@ -0,0 +1,43 @@
+\name{parnames}
+\alias{parnames}
+\alias{parnames<-}
+\title{get and set parameter names}
+\description{
+  Gets and sets the "parnames" attribute on a
+  negative log-likelihood function
+}
+\usage{
+parnames(obj)
+parnames(obj) <- value
+}
+\arguments{
+  \item{obj}{a negative log-likelihood function}
+  \item{value}{a character vector of parameter names}
+}
+\details{
+  The \code{parnames} attribute is used by \code{mle2()}
+  when the negative log-likelihood function takes a parameter
+  vector, rather than a list of parameters; this allows
+  users to use the same objective function for \code{optim()}
+  and \code{mle2()}
+}
+\value{
+  Returns the \code{parnames} attribute (a character vector of
+  parameter names) or sets it.
+}
+\author{Ben Bolker}
+\examples{
+x <- 1:5
+set.seed(1001)
+y <- rbinom(5,prob=x/(1+x),size=10)
+mfun <- function(p) {
+  a <- p[1]
+  b <- p[2]
+  -sum(dbinom(y,prob=a*x/(b+x),size=10,log=TRUE))
+}
+optim(fn=mfun,par=c(1,1))
+parnames(mfun) <- c("a","b")
+mle2(minuslogl=mfun,start=c(a=1,b=1),method="Nelder-Mead")
+}
+\keyword{misc}
+
diff --git a/man/predict-methods.Rd b/man/predict-methods.Rd
new file mode 100755
index 0000000..ee67489
--- /dev/null
+++ b/man/predict-methods.Rd
@@ -0,0 +1,74 @@
+\name{predict-methods}
+\docType{methods}
+\alias{gfun}
+\alias{predict-methods}
+\alias{predict,mle2-method}
+\alias{residuals,mle2-method}
+\alias{simulate,mle2-method}
+\title{Predicted values from an mle2 fit}
+\description{
+  Given an \code{mle2} fit and an optional list
+  of new data, return predictions (more generally,
+  summary statistics of the predicted distribution)
+}
+\section{Methods}{
+\describe{
+  \item{x = "mle2"}{an \code{mle2} fit}
+}}
+\usage{
+    \S4method{predict}{mle2}(object, newdata=NULL,
+                         location="mean", newparams=NULL, \dots)
+    \S4method{simulate}{mle2}(object, nsim,
+                         seed, newdata=NULL, newparams=NULL, \dots)
+    \S4method{residuals}{mle2}(object,type=c("pearson","response"),
+                   location="mean",\dots)
+}
+\arguments{
+  \item{object}{an mle2 object}
+  \item{newdata}{optional list of new data}
+  \item{newparams}{optional vector of new parameters}
+  \item{location}{name of the summary statistic to return}
+  \item{nsim}{number of simulations}
+  \item{seed}{random number seed}
+  \item{type}{residuals type}
+  \item{\dots}{additional arguments (for generic compatibility)}
+}
+\note{For some models (e.g. constant models), \code{predict} may
+  return a single value rather than a vector of the appropriate length.}
+\examples{
+set.seed(1002)
+lymax <- c(0,2)
+lhalf <- 0
+x <- runif(200)
+g <- factor(rep(c("a","b"),each=100))
+y <- rnbinom(200,mu=exp(lymax[g])/(1+x/exp(lhalf)),size=2)
+dat <- data.frame(y,g,x)
+
+fit3 <- mle2(y~dnbinom(mu=exp(lymax)/(1+x/exp(lhalf)),size=exp(logk)),
+    parameters=list(lymax~g),
+    start=list(lymax=0,lhalf=0,logk=0),
+data=dat)
+
+plot(y~x,col=g)
+## true curves
+curve(exp(0)/(1+x/exp(0)),add=TRUE)
+curve(exp(2)/(1+x/exp(0)),col=2,add=TRUE)
+## model predictions
+xvec = seq(0,1,length=100)
+lines(xvec,predict(fit3,newdata=list(g=factor(rep("a",100),levels=c("a","b")),
+                                x = xvec)),col=1,lty=2)
+lines(xvec,predict(fit3,newdata=list(g=factor(rep("b",100),levels=c("a","b")),
+                                x = xvec)),col=2,lty=2)
+
+
+## comparing automatic and manual predictions
+p1 = predict(fit3)
+p2A =
+with(as.list(coef(fit3)),exp(`lymax.(Intercept)`)/(1+x[1:100]/exp(lhalf)))
+p2B =
+with(as.list(coef(fit3)),exp(`lymax.(Intercept)`+lymax.gb)/(1+x[101:200]/exp(lhalf)))
+all(p1==c(p2A,p2B))
+##
+simulate(fit3)
+}
+\keyword{methods}
diff --git a/man/profile-methods.Rd b/man/profile-methods.Rd
new file mode 100755
index 0000000..42c78e1
--- /dev/null
+++ b/man/profile-methods.Rd
@@ -0,0 +1,54 @@
+\name{profile-methods}
+\docType{methods}
+\alias{profile-methods}
+\alias{profile,mle2-method}
+\alias{profile.mle2}
+\title{Likelihood profiles }
+\description{
+  Compute likelihood profiles for a fitted model
+}
+
+\usage{
+\S4method{profile}{mle2}(fitted, which = 1:p, maxsteps = 100,
+                    alpha = 0.01, zmax = sqrt(qchisq(1 - alpha/2, p)),
+                    del = zmax/5, trace = FALSE, skiperrs=TRUE,
+                    std.err, 
+                    tol.newmin = 0.001, debug=FALSE,
+                    prof.lower, prof.upper,
+skip.hessian = TRUE, try_harder=FALSE, \dots)
+}
+\arguments{
+  \item{fitted}{A fitted maximum likelihood model of class
+\dQuote{mle2}}
+  \item{which}{a numeric or character vector describing which parameters
+to profile (default is to profile all parameters)}
+  \item{maxsteps}{maximum number of steps to take looking for an upper
+value of the negative log-likelihood}
+  \item{alpha}{maximum (two-sided) likelihood ratio test confidence
+    level to find}
+  \item{zmax}{maximum value of signed square root of deviance difference
+    to find (default value corresponds to a
+  2-tailed chi-squared test at level alpha)}
+  \item{del}{step size for profiling}
+  \item{trace}{(logical) produce tracing output?}
+  \item{skiperrs}{(logical) ignore errors produced during profiling?}
+  \item{std.err}{Optional numeric vector of standard errors, for cases
+    when the Hessian is badly behaved.  Will be replicated if necessary,
+    and NA values will be replaced by the corresponding values from
+    the fit summary}
+  \item{tol.newmin}{tolerance for diagnosing a new minimum below the
+    minimum deviance estimated in initial fit is found}
+  \item{debug}{(logical) debugging output?}
+  \item{prof.lower}{optional vector of lower bounds for profiles}
+  \item{prof.upper}{optional vector of upper bounds for profiles}
+  \item{skip.hessian}{skip hessian (defunct?)}
+  \item{try_harder}{(logical) ignore \code{NA} and flat spots in the
+profile, try to continue anyway?}
+  \item{\dots}{additional arguments (not used)}
+}
+\details{
+  See the vignette (\code{vignette("mle2",package="bbmle")})
+  for more technical details of how profiling is done.
+ }
+ \seealso{\code{\link{profile.mle-class}}}
+\keyword{methods}
diff --git a/man/profile.mle-class.Rd b/man/profile.mle-class.Rd
new file mode 100755
index 0000000..9477f5c
--- /dev/null
+++ b/man/profile.mle-class.Rd
@@ -0,0 +1,153 @@
+\name{profile.mle2-class}
+\docType{class}
+\alias{profile.mle2-class}
+\alias{confint,profile.mle2-method}
+\alias{confint,mle2-method}
+\alias{confint.mle2}  %% bogus but good ref link
+\alias{plot,profile.mle2-method}
+\alias{plot,profile.mle2,missing-method}
+\alias{show,profile.mle2-method}
+\alias{plot.profile.mle2}
+\title{Methods for likelihood profiles}
+\description{Definition of the mle2 likelihood profile class,
+  and applicable methods}
+\section{Objects from the Class}{
+  Objects can be created by calls of the form \code{new("profile.mle2",
+    ...)}, but most often by invoking \code{profile} on an "mle2" object.
+}
+\section{Slots}{
+  \describe{
+    \item{\code{profile}:}{Object of class \code{"list"}. List of
+      profiles, one for each requested parameter. Each profile is a data
+      frame with the first column called \code{z} being the signed square
+      root of the deviance, and the others being the
+      parameters with names prefixed by \code{par.vals.}}
+    \item{\code{summary}:}{Object of class \code{"summary.mle2"}. Summary
+      of object being profiled.}
+  }
+}
+\section{Methods}{
+  \describe{
+    \item{confint}{\code{signature(object = "profile.mle2")}: Use profile
+      to generate approximate confidence intervals for parameters.}
+    \item{plot}{\code{signature(x = "profile.mle2", y = "missing")}: Plot
+      profiles for each parameter.}
+    \item{summary}{\code{signature(x = "profile.mle2")}: Plot
+      profiles for each parameter.}
+    \item{show}{\code{signature(object = "profile.mle2")}: Show object.}
+  }
+}
+\usage{
+\S4method{plot}{profile.mle2}(x,
+   levels, which=1:p, conf = c(99, 95, 90, 80, 50)/100,
+   plot.confstr = TRUE,
+   confstr = NULL, absVal = TRUE, add = FALSE,
+   col.minval="green", lty.minval=2,
+   col.conf="magenta", lty.conf=2,
+   col.prof="blue", lty.prof=1,
+   xlabs=nm, ylab="z",
+   onepage=TRUE,
+   ask=((prod(par("mfcol")) < length(which)) && dev.interactive() &&
+               !onepage),
+   show.points=FALSE,
+   main, xlim, ylim, \dots)
+\S4method{confint}{mle2}(object, parm, level = 0.95, method,
+          trace=FALSE,quietly=!interactive(),
+          tol.newmin=0.001,\dots)
+\S4method{confint}{profile.mle2}(object, parm, level = 0.95, trace=FALSE, \dots)
+}
+\arguments{
+  \item{x}{An object of class \code{profile.mle2}}
+  \item{object}{An object of class \code{mle2} or \code{profile.mle2}
+    (as appropriate)}
+  \item{levels}{levels at which to plot likelihood cutoffs (set by conf
+    by default)}
+  \item{level}{level at which to compute confidence interval}
+  \item{which}{(numeric or character) which parameter profiles to plot}
+  \item{parm}{(numeric or character) which parameter(s) to find
+    confidence intervals for}
+  \item{method}{(character) "spline", "uniroot", or "quad", for
+    spline-extrapolation-based (default), root-finding, or quadratic
+    confidence intervals.  By default it uses the value of
+    \code{mle2.options("confint")} -- the factory setting is "spline".}
+  \item{trace}{trace progress of confidence interval calculation when using
+    \sQuote{uniroot} method?}
+  \item{conf}{(1-alpha) levels at which to plot likelihood
+    cutoffs/confidence intervals}
+  \item{quietly}{(logical) suppress \dQuote{Profiling ...} message
+    when computing profile to get confidence interval?}
+  \item{tol.newmin}{see \code{\link{profile-methods}}}
+  \item{plot.confstr}{(logical) plot labels showing confidence levels?}
+  \item{confstr}{(character) labels for confidence levels (by default, constructed from conf levels)}
+  \item{absVal}{(logical) plot absolute values of signed square
+    root deviance difference ("V" plot rather than straight-line
+    plot)?}
+  \item{add}{(logical) add profile to existing graph?}
+  \item{col.minval}{color for minimum line}
+  \item{lty.minval}{line type for minimum line}
+  \item{col.conf}{color for confidence intervals}
+  \item{lty.conf}{line type for confidence intervals}
+  \item{col.prof}{color for profile}
+  \item{lty.prof}{line type for profile}
+  \item{xlabs}{x labels}
+  \item{ylab}{y label}
+  \item{onepage}{(logical) plot all profiles on one page,
+    adjusting par(mfcol) as necessary?}
+  \item{ask}{(logical) pause for user input between plots?}
+  \item{show.points}{(logical) show computed profile points
+    as well as interpolated spline?}
+  \item{main}{(logical) main title}
+  \item{xlim}{x limits}
+  \item{ylim}{y limits}
+  \item{\dots}{other arguments}
+}
+\seealso{
+  \code{\link{mle2}}, \code{\link{mle2-class}}, \code{\link{summary.mle2-class}} 
+}
+\details{
+  The default confidence interval calculation computes a likelihood
+  profile and uses the points therein, or uses the computed points in
+  an existing \code{profile.mle2} object, to construct an interpolation
+  spline (which by default has three times as many points as were in
+  the original set of profile points).  It then uses linear
+  interpolation between these interpolated points (!)
+}
+\examples{
+x <- 0:10
+y <- c(26, 17, 13, 12, 20, 5, 9, 8, 5, 4, 8)
+d <- data.frame(x,y)
+## we have a choice here: (1) don't impose boundaries on the parameters,
+##  put up with warning messages about NaN values: 
+fit1 <- mle2(y~dpois(lambda=ymax/(1+x/xhalf)),
+     start=list(ymax=1,xhalf=1),
+     data=d)
+p1 <- suppressWarnings(profile(fit1))
+plot(p1,main=c("first","second"),
+     xlab=c(~y[max],~x[1/2]),ylab="Signed square root deviance",
+     show.points=TRUE)
+suppressWarnings(confint(fit1)) ## recomputes profile
+confint(p1)  ## operates on existing profile
+suppressWarnings(confint(fit1,method="uniroot"))
+## alternatively, we can use box constraints to keep ourselves
+##  to positive parameter values ...
+fit2 <- update(fit1,method="L-BFGS-B",lower=c(ymax=0.001,xhalf=0.001))
+p2 <- profile(fit2)
+plot(p2,show.points=TRUE)
+## but the fit for ymax is just bad enough that the spline gets wonky
+confint(p2)  ## now we get a warning
+confint(fit2,method="uniroot")
+## bobyqa is a better-behaved bounded optimizer ...
+##  BUT recent (development, 2012.5.24) versions of
+##    optimx no longer allow single-parameter fits!
+\dontrun{
+if (require(optimx)) {
+  fit3 <- update(fit1,
+      optimizer="optimx",
+      method="bobyqa",lower=c(ymax=0.001,xhalf=0.001))
+   p3 <- profile(fit3)
+   plot(p3,show.points=TRUE)
+  confint(p3)
+}
+}
+}
+\keyword{classes}
diff --git a/man/relist.Rd b/man/relist.Rd
new file mode 100755
index 0000000..f498f0d
--- /dev/null
+++ b/man/relist.Rd
@@ -0,0 +1,26 @@
+\name{relist2}
+\alias{relist2}
+\title{reconstruct the structure of a list}
+\description{
+  reshapes a vector according to a list template
+}
+\usage{
+relist2(v, l)
+}
+\arguments{
+  \item{v}{vector, probably numeric, of values to reshape}
+  \item{l}{template list giving structure}
+}
+\details{
+  attempts to coerce \code{v} into a list with the same
+  structure and names as \code{l}
+}
+\value{
+  a list with values corresponding to v and structure corresponding to l
+}
+\author{Ben Bolker}
+\examples{
+  l = list(b=1,c=2:5,d=matrix(1:4,nrow=2))
+  relist2(1:9,l)
+}
+\keyword{misc}
diff --git a/man/sbinom.Rd b/man/sbinom.Rd
new file mode 100755
index 0000000..52d06c0
--- /dev/null
+++ b/man/sbinom.Rd
@@ -0,0 +1,58 @@
+\name{sbinom}
+\alias{sbinom}
+\alias{spois}
+\alias{snbinom}
+\alias{snorm}
+\alias{sbeta}
+\alias{sbetabinom}
+\title{Abstract definitions of distributions}
+\description{
+  Functions returning values for summary statistics
+  (mean, median, etc.) of distributions
+}
+\usage{
+sbeta(shape1, shape2)
+sbetabinom(size, prob, theta)
+sbinom(size, prob)
+snbinom(size, prob, mu)
+snorm(mean, sd)
+spois(lambda)
+}
+\arguments{
+  \item{prob}{probability as defined for \code{\link{dbinom}},
+    \code{\link{dnbinom}}, or beta-binomial distribution
+    (\code{dbetabinom} in the \code{emdbook} package)}
+  \item{size}{size parameter as defined for
+    \code{\link{dbinom}} or \code{dbetabinom}
+    in the \code{emdbook} package, or size/overdispersion parameter
+    as in \code{\link{dnbinom}}}
+  \item{mean}{mean parameter as defined for \code{\link{dnorm}}}
+  \item{mu}{mean parameter as defined for \code{\link{dnbinom}}}
+  \item{sd}{standard deviation parameter as defined for \code{\link{dnorm}}}
+  \item{shape1}{shape parameter for \code{\link{dbeta}}}
+  \item{shape2}{shape parameter for \code{\link{dbeta}}}
+  \item{lambda}{rate parameter as defined for \code{\link{dpois}}}
+  \item{theta}{overdispersion parameter for beta-binomial
+    (see \code{dbetabinom} in the \code{emdbook} package)}
+}
+\value{
+  \item{title}{name of the distribution}
+  \item{[parameters]}{input parameters for the distribution}
+  \item{mean}{theoretical mean of the distribution}
+  \item{median}{theoretical median of the distribution}
+  \item{mode}{theoretical mode of the distribution}
+  \item{variance}{theoretical variance of the distribution}
+  \item{sd}{theoretical standard deviation of the distribution}
+}
+\author{Ben Bolker}
+\seealso{\code{\link{dbinom}}, \code{\link{dpois}}, \code{\link{dnorm}}, \code{\link{dnbinom}}}
+\examples{
+  sbinom(prob=0.2,size=10)
+  snbinom(mu=2,size=1.2)
+}
+\note{these definitions are tentative, subject to change
+as I figure this out better.  Perhaps construct functions
+that return functions? Strip down results? Do more
+automatically?}
+
+\keyword{misc}
diff --git a/man/slice.Rd b/man/slice.Rd
new file mode 100755
index 0000000..1420c95
--- /dev/null
+++ b/man/slice.Rd
@@ -0,0 +1,112 @@
+\name{slice}
+\alias{slice}
+\alias{sliceOld}
+\alias{slicetrans}
+\alias{slice1D}
+\alias{slice2D}
+\title{Calculate likelihood "slices"}
+\description{
+  Computes cross-section(s) of a multi-dimensional likelihood surface
+}
+\usage{
+slice(x, dim=1, ...)
+sliceOld(fitted, which = 1:p, maxsteps = 100,
+                       alpha = 0.01, zmax = sqrt(qchisq(1 - alpha/2, p)),
+                       del = zmax/5, trace = FALSE,
+                       tol.newmin=0.001, \dots)
+slice1D(params,fun,nt=101,lower=-Inf,
+                    upper=Inf,verbose=TRUE, tranges=NULL,\dots)
+slice2D(params,fun,nt=31,lower=-Inf,
+                    upper=Inf,
+                    cutoff=10,verbose=TRUE,
+                    tranges=NULL, \dots)
+slicetrans(params, params2, fun, extend=0.1, nt=401,
+                       lower=-Inf, upper=Inf)
+}
+\arguments{
+  \item{x}{a fitted model object of some sort}
+  \item{dim}{dimensionality of slices (1 or 2)}
+  \item{params}{a named vector of baseline parameter values}
+  \item{params2}{a vector of parameter values}
+  \item{fun}{an objective function}
+  \item{nt}{(integer) number of slice-steps to take}
+  \item{lower}{lower bound(s) (stub?)}
+  \item{upper}{upper bound(s) (stub?)}
+  \item{cutoff}{maximum increase in objective function to allow
+    when computing ranges}
+  \item{extend}{(numeric) fraction by which to extend range beyond specified points}
+  \item{verbose}{print verbose output?}
+  \item{fitted}{A fitted maximum likelihood model of class
+    \dQuote{mle2}}
+  \item{which}{a numeric or character vector describing which parameters
+    to profile (default is to profile all parameters)}
+  \item{maxsteps}{maximum number of steps to take looking for an upper
+    value of the negative log-likelihood}
+  \item{alpha}{maximum (two-sided) likelihood ratio test confidence
+    level to find}
+  \item{zmax}{maximum value of signed square root of deviance difference
+    to find (default value corresponds to a
+    2-tailed chi-squared test at level alpha)}
+  \item{del}{step size for profiling}
+  \item{trace}{(logical) produce tracing output?}
+  \item{tol.newmin}{tolerance for diagnosing a new minimum below the
+    minimum deviance estimated in initial fit is found}
+  \item{tranges}{a two-column matrix giving lower and upper bounds for
+    each parameter}
+  \item{\dots}{additional arguments (not used)}
+}
+\value{
+  An object of class \code{slice} with
+  \describe{
+    \item{slices}{a list of individual parameter (or parameter-pair)
+    slices, each of which is a data frame with elements
+    \describe{
+      \item{var1}{name of the first variable}
+      \item{var2}{(for 2D slices) name of the second variable}
+      \item{x}{parameter values}
+      \item{y}{(for 2D slices) parameter values}
+      \item{z}{slice values}
+      \item{ranges}{a list (?) of the ranges for each parameter}
+      \item{params}{vector of baseline parameter values}
+      \item{dim}{1 or 2}
+    }
+  }
+  \code{sliceOld} returns instead a list with elements \code{profile}
+  and \code{summary} (see \code{\link{profile.mle2}})
+}
+}
+\details{
+  Slices provide a lighter-weight way to explore likelihood surfaces
+  than profiles, since they vary a single parameter rather than
+  optimizing over all but one or two parameters.
+
+\describe{
+\item{slice}{is a generic method}
+\item{slice1D}{creates one-dimensional slices, by default of all
+parameters of a model}
+\item{slice2D}{creates two-dimensional slices, by default of all pairs
+of parameters in a model}
+\item{slicetrans}{creates a slice along a transect between two specified
+points in parameter space (see \code{calcslice} in the \code{emdbook}
+package)}
+}
+}
+\author{Ben Bolker}
+\seealso{\code{\link{profile}}}
+\examples{
+x <- 0:10
+y <- c(26, 17, 13, 12, 20, 5, 9, 8, 5, 4, 8)
+d <- data.frame(x,y)
+fit1 <- mle2(y~dpois(lambda=exp(lymax)/(1+x/exp(lhalf))),
+   start=list(lymax=0,lhalf=0),
+   data=d)
+s1 <- slice(fit1,verbose=FALSE)
+s2 <- slice(fit1,dim=2,verbose=FALSE)
+require(lattice)
+plot(s1)
+plot(s2)
+## 'transect' slice, from best-fit values to another point
+st <- slice(fit1,params2=c(5,0.5))
+plot(st)
+}
+\keyword{misc}
diff --git a/man/slice.mle-class.Rd b/man/slice.mle-class.Rd
new file mode 100755
index 0000000..fa5497c
--- /dev/null
+++ b/man/slice.mle-class.Rd
@@ -0,0 +1,31 @@
+\name{slice.mle2-class}
+\docType{class}
+\alias{slice.mle2-class}
+\title{likelihood-surface slices}
+\description{evaluations of log-likelihood along transects in parameter space}
+\section{Objects from the Class}{
+Objects can be created by calls of the form \code{new("slice.mle2", ...)}.
+The objects are similar to likelihood profiles, but don't involve
+any optimization with respect to the other parameters.
+}
+\section{Slots}{
+  \describe{
+    \item{\code{profile}:}{Object of class \code{"list"}. List of
+      slices, one for each requested parameter. Each slice is a data
+      frame with the first column called \code{z} being the signed square
+      root of the -2 log likelihood ratio, and the others being the
+      parameters with names prefixed by \code{par.vals.}}
+    \item{\code{summary}:}{Object of class \code{"summary.mle2"}. Summary
+      of object being profiled.}
+  }
+}
+\section{Methods}{
+  \describe{
+    \item{plot}{\code{signature(x = "profile.mle2", y = "missing")}: Plot
+      profiles for each parameter.}
+  }
+}
+\seealso{
+  \code{\link{profile.mle2-class}}
+}
+\keyword{classes}
diff --git a/man/strwrapx.Rd b/man/strwrapx.Rd
new file mode 100755
index 0000000..1ab1551
--- /dev/null
+++ b/man/strwrapx.Rd
@@ -0,0 +1,62 @@
+\name{strwrapx}
+\alias{strwrapx}
+\title{Wrap strings at white space and + symbols}
+\description{
+  Extended (hacked) version of strwrap: wraps a string at whitespace
+  and plus symbols
+}
+\usage{
+strwrapx(x, width = 0.9 * getOption("width"), indent = 0,
+exdent = 0, prefix = "", simplify = TRUE,
+parsplit = "\n[ \t\n]*\n", wordsplit = "[ \t\n]")
+}
+\arguments{
+  \item{x}{a character vector, or an object which can be converted to a
+    character vector by \code{\link{as.character}}.}
+  \item{width}{a positive integer giving the target column for wrapping
+    lines in the output.}
+  \item{indent}{a non-negative integer giving the indentation of the
+    first line in a paragraph.}
+  \item{exdent}{a non-negative integer specifying the indentation of
+    subsequent lines in paragraphs.}
+  \item{prefix}{a character string to be used as prefix for each line.}
+  \item{simplify}{a logical.  If \code{TRUE}, the result is a single
+    character vector of line text; otherwise, it is a list of the same
+    length as \code{x} the elements of which are character vectors of
+    line text obtained from the corresponding element of \code{x}.
+    (Hence, the result in the former case is obtained by unlisting that
+    of the latter.)}
+  \item{parsplit}{Regular expression describing how to split paragraphs}
+  \item{wordsplit}{Regular expression decribing how to split words}
+}
+\details{
+  Whitespace in the input is destroyed.  Double spaces after periods
+  (thought as representing sentence ends) are preserved.  Currently,
+  possible sentence ends at line breaks are not considered specially.
+  
+  Indentation is relative to the number of characters in the prefix
+  string.
+}
+\examples{
+## Read in file 'THANKS'.
+x <- paste(readLines(file.path(R.home("doc"), "THANKS")), collapse = "\n")
+## Split into paragraphs and remove the first three ones
+x <- unlist(strsplit(x, "\n[ \t\n]*\n"))[-(1:3)]
+## Join the rest
+x <- paste(x, collapse = "\n\n")
+## Now for some fun:
+writeLines(strwrap(x, width = 60))
+writeLines(strwrap(x, width = 60, indent = 5))
+writeLines(strwrap(x, width = 60, exdent = 5))
+writeLines(strwrap(x, prefix = "THANKS> "))
+
+## Note that messages are wrapped AT the target column indicated by
+## 'width' (and not beyond it).
+## From an R-devel posting by J. Hosking <jh910 at juno.com>.
+x <- paste(sapply(sample(10, 100, rep=TRUE),
+           function(x) substring("aaaaaaaaaa", 1, x)), collapse = " ")
+sapply(10:40,
+       function(m)
+       c(target = m, actual = max(nchar(strwrap(x, m)))))
+}
+\keyword{character}
diff --git a/man/summary.mle-class.Rd b/man/summary.mle-class.Rd
new file mode 100755
index 0000000..ebfadf4
--- /dev/null
+++ b/man/summary.mle-class.Rd
@@ -0,0 +1,34 @@
+\name{summary.mle2-class}
+\docType{class}
+\alias{summary.mle2-class}
+\alias{coef,summary.mle2-method}
+\alias{show,summary.mle2-method}
+\title{Class "summary.mle2", summary of "mle2" objects}
+\description{Extract of "mle2" object}
+\section{Objects from the Class}{
+Objects can be created by calls of the form \code{new("summary.mle2",
+  ...)}, but most often by invoking \code{summary} on an "mle2" object.
+They contain values meant for printing by \code{show}.
+}
+\section{Slots}{
+  \describe{
+    \item{\code{call}:}{Object of class \code{"language"} The call that
+      generated the "mle2" object.}
+    \item{\code{coef}:}{Object of class \code{"matrix"}. Estimated
+      coefficients and standard errors }
+    \item{\code{m2logL}:}{Object of class \code{"numeric"}. Minus twice
+      the log likelihood.}
+  }
+}
+\section{Methods}{
+  \describe{
+    \item{show}{\code{signature(object = "summary.mle2")}: Pretty-prints
+      \code{object}  }
+    \item{coef}{\code{signature(object = "summary.mle2")}: Extracts the
+    contents of the \code{coef} slot}
+  }
+}
+\seealso{
+  \code{\link{summary}}, \code{\link{mle2}}, \code{\link{mle2-class}} 
+}
+\keyword{classes}
diff --git a/tests/BIC.R b/tests/BIC.R
new file mode 100755
index 0000000..aea5553
--- /dev/null
+++ b/tests/BIC.R
@@ -0,0 +1,8 @@
+require(bbmle)
+x <- 0:10
+y <- c(26, 17, 13, 12, 20, 5, 9, 8, 5, 4, 8)
+d <- data.frame(x,y)
+fit <- mle2(y~dpois(lambda=ymax/(1+x/xhalf)), start=list(ymax=25,xhalf=3),data=d)
+fit2 <- mle2(y~dpois(lambda=(x+1)*slope), start=list(slope=1),data=d)
+BIC(fit)
+BIC(fit,fit2)
diff --git a/tests/BIC.Rout.save b/tests/BIC.Rout.save
new file mode 100755
index 0000000..0b55d46
--- /dev/null
+++ b/tests/BIC.Rout.save
@@ -0,0 +1,38 @@
+
+R Under development (unstable) (2012-12-14 r61321) -- "Unsuffered Consequences"
+Copyright (C) 2012 The R Foundation for Statistical Computing
+ISBN 3-900051-07-0
+Platform: i686-pc-linux-gnu (32-bit)
+
+R is free software and comes with ABSOLUTELY NO WARRANTY.
+You are welcome to redistribute it under certain conditions.
+Type 'license()' or 'licence()' for distribution details.
+
+  Natural language support but running in an English locale
+
+R is a collaborative project with many contributors.
+Type 'contributors()' for more information and
+'citation()' on how to cite R or R packages in publications.
+
+Type 'demo()' for some demos, 'help()' for on-line help, or
+'help.start()' for an HTML browser interface to help.
+Type 'q()' to quit R.
+
+> require(bbmle)
+Loading required package: bbmle
+Loading required package: stats4
+> x <- 0:10
+> y <- c(26, 17, 13, 12, 20, 5, 9, 8, 5, 4, 8)
+> d <- data.frame(x,y)
+> fit <- mle2(y~dpois(lambda=ymax/(1+x/xhalf)), start=list(ymax=25,xhalf=3),data=d)
+> fit2 <- mle2(y~dpois(lambda=(x+1)*slope), start=list(slope=1),data=d)
+> BIC(fit)
+[1] 62.0039
+> BIC(fit,fit2)
+     df      BIC
+fit   2  62.0039
+fit2  1 228.2046
+> 
+> proc.time()
+   user  system elapsed 
+  0.716   1.076   1.659 
diff --git a/tests/ICtab.R b/tests/ICtab.R
new file mode 100755
index 0000000..3e74bf9
--- /dev/null
+++ b/tests/ICtab.R
@@ -0,0 +1,11 @@
+library(bbmle)
+
+set.seed(101)
+z = rpois(100,lambda=5)
+
+m1 = mle2(z~dpois(lambda=L),start=list(L=4),data=data.frame(z))
+
+ICtab(m1,type="qAICc",dispersion=1.2,nobs=100)
+
+m2 = glm(z~1,family=poisson)
+qAICc(m2,nobs=100,dispersion=2)
diff --git a/tests/ICtab.Rout.save b/tests/ICtab.Rout.save
new file mode 100755
index 0000000..61e3f7d
--- /dev/null
+++ b/tests/ICtab.Rout.save
@@ -0,0 +1,38 @@
+
+R Under development (unstable) (2012-07-27 r60013) -- "Unsuffered Consequences"
+Copyright (C) 2012 The R Foundation for Statistical Computing
+ISBN 3-900051-07-0
+Platform: i686-pc-linux-gnu (32-bit)
+
+R is free software and comes with ABSOLUTELY NO WARRANTY.
+You are welcome to redistribute it under certain conditions.
+Type 'license()' or 'licence()' for distribution details.
+
+  Natural language support but running in an English locale
+
+R is a collaborative project with many contributors.
+Type 'contributors()' for more information and
+'citation()' on how to cite R or R packages in publications.
+
+Type 'demo()' for some demos, 'help()' for on-line help, or
+'help.start()' for an HTML browser interface to help.
+Type 'q()' to quit R.
+
+> library(bbmle)
+> 
+> set.seed(101)
+> z = rpois(100,lambda=5)
+> 
+> m1 = mle2(z~dpois(lambda=L),start=list(L=4),data=data.frame(z))
+> 
+> ICtab(m1,type="qAICc",dispersion=1.2,nobs=100)
+   dqAICc df
+m1 0      1 
+> 
+> m2 = glm(z~1,family=poisson)
+> qAICc(m2,nobs=100,dispersion=2)
+[1] 226.1823
+> 
+> proc.time()
+   user  system elapsed 
+  0.704   1.028   1.583 
diff --git a/tests/Makefile b/tests/Makefile
new file mode 100755
index 0000000..af3f3eb
--- /dev/null
+++ b/tests/Makefile
@@ -0,0 +1,18 @@
+TOP=../..
+PKG=${shell cd ${TOP};pwd}
+SUITE=doRUnit.R
+R=R
+
+all: inst test
+
+inst: # Install package
+	cd ${TOP}/..;\
+	${R} CMD INSTALL ${PKG}
+
+test: # Run unit tests
+	export RCMDCHECK=FALSE;\
+	cd ${TOP}/tests;\
+	${R} --vanilla --slave < ${SUITE}
+
+savefiles:
+	./makesavefiles
diff --git a/tests/RUnit-tests.R b/tests/RUnit-tests.R
new file mode 100755
index 0000000..b6834e2
--- /dev/null
+++ b/tests/RUnit-tests.R
@@ -0,0 +1,7 @@
+require(RUnit)
+## TODO -- find solution to run these tests on R-forge
+
+##testsuite <- defineTestSuite("phylobase", dirs="/home/francois/Work/R-dev/phylobase/branches/fm-branch/RUnit-tests",
+##                             testFileRegexp="^test", testFuncRegexp="^test")
+##testRslt <- runTestSuite(testsuite)
+##printTextProtocol(testRslt)
diff --git a/tests/binomtest1.R b/tests/binomtest1.R
new file mode 100755
index 0000000..3465950
--- /dev/null
+++ b/tests/binomtest1.R
@@ -0,0 +1,42 @@
+library(bbmle)
+
+funcresp <-
+structure(list(Initial = as.integer(c(5, 5, 10, 10, 15, 15, 20, 
+20, 30, 30, 50, 50, 75, 75, 100, 100)), Killed = as.integer(c(1, 
+2, 5, 6, 10, 9, 7, 10, 11, 15, 5, 21, 32, 18, 25, 35))), .Names = c("Initial", 
+"Killed"), class = "data.frame", row.names = c("1", "2", "3", 
+"4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "15", 
+"16"))
+
+binomNLL2 = function(p) {
+  a = p[1]
+  h = p[2]
+  ##  cat(a,h,"\n")
+  p = a/(1+a*h*N)
+  -sum(dbinom(k,prob=p,size=N,log=TRUE))
+}
+
+N=0; k=0
+parnames(binomNLL2) = c("a","h")
+m2a = mle2(binomNLL2,start=c(a=0.5,h=0.0125),
+      data=with(funcresp,list(N=Initial,k=Killed)))
+p1a = profile(m2a)
+c2a = print(confint(p1a),digits=3)
+
+binomNLL2b = function(p,N,k) {
+  a = p[1]
+  h = p[2]
+  ##  cat(a,h,"\n")
+  p = a/(1+a*h*N)
+  -sum(dbinom(k,prob=p,size=N,log=TRUE))
+}
+parnames(binomNLL2b) = c("a","h")
+m2b = mle2(binomNLL2,start=c(a=0.5,h=0.0125),
+  data=with(funcresp,list(N=Initial,k=Killed)))
+c2b = confint(m2b)
+
+N=funcresp$Initial; k=funcresp$Killed
+m2c = mle2(binomNLL2,start=c(a=0.5,h=0.0125))
+c2c = confint(m2c)
+print(c2c,digits=3)
+
diff --git a/tests/binomtest1.Rout b/tests/binomtest1.Rout
new file mode 100755
index 0000000..82f6688
--- /dev/null
+++ b/tests/binomtest1.Rout
@@ -0,0 +1,114 @@
+
+R version 2.11.1 (2010-05-31)
+Copyright (C) 2010 The R Foundation for Statistical Computing
+ISBN 3-900051-07-0
+
+R is free software and comes with ABSOLUTELY NO WARRANTY.
+You are welcome to redistribute it under certain conditions.
+Type 'license()' or 'licence()' for distribution details.
+
+  Natural language support but running in an English locale
+
+R is a collaborative project with many contributors.
+Type 'contributors()' for more information and
+'citation()' on how to cite R or R packages in publications.
+
+Type 'demo()' for some demos, 'help()' for on-line help, or
+'help.start()' for an HTML browser interface to help.
+Type 'q()' to quit R.
+
+> 
+> library(bbmle)
+Loading required package: stats4
+Loading required package: numDeriv
+> 
+> funcresp <-
++ structure(list(Initial = as.integer(c(5, 5, 10, 10, 15, 15, 20, 
++ 20, 30, 30, 50, 50, 75, 75, 100, 100)), Killed = as.integer(c(1, 
++ 2, 5, 6, 10, 9, 7, 10, 11, 15, 5, 21, 32, 18, 25, 35))), .Names = c("Initial", 
++ "Killed"), class = "data.frame", row.names = c("1", "2", "3", 
++ "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "15", 
++ "16"))
+> 
+> attach(funcresp)
+> 
+> binomNLL2 = function(p) {
++   a = p[1]
++   h = p[2]
++   ##  cat(a,h,"\n")
++   p = a/(1+a*h*N)
++   -sum(dbinom(k,prob=p,size=N,log=TRUE))
++ }
+> 
+> N=0; k=0
+> parnames(binomNLL2) = c("a","h")
+> m2a = mle2(binomNLL2,start=c(a=0.5,h=0.0125),
++   data=list(N=Initial,k=Killed))
+> p1a = profile(m2a); p1a
+There were 50 or more warnings (use warnings() to see the first 50)
+Likelihood profile:
+
+$a
+            z   par.vals.a   par.vals.h
+1  -4.2047344  0.294898645 -0.002923466
+2  -3.1552066  0.341179554  0.002586064
+3  -2.2351038  0.387460464  0.007009828
+4  -1.4145435  0.433741374  0.010694613
+5  -0.6726261  0.480022283  0.013859302
+6   0.0000000  0.526303193  0.016643616
+7   0.6321738  0.572584102  0.019113307
+8   1.2156051  0.618865012  0.021399150
+9   1.7630606  0.665145921  0.023494921
+10  2.2804928  0.711426831  0.025475099
+11  2.7729144  0.757707740  0.027355948
+12  3.2447726  0.803988650  0.029170757
+13  3.7001523  0.850269559  0.030945274
+
+$h
+            z    par.vals.a    par.vals.h
+1  -3.7637543  0.3268572493 -0.0024273676
+2  -3.1748327  0.3542640536  0.0007511297
+3  -2.5644438  0.3843760379  0.0039296269
+4  -1.9359396  0.4170494900  0.0071081242
+5  -1.2938745  0.4519556085  0.0102866214
+6  -0.6437592  0.4886001613  0.0134651187
+7   0.0000000  0.5263031926  0.0166436159
+8   0.6563173  0.5646512092  0.0198221132
+9   1.2951023  0.6028512247  0.0230006104
+10  1.9201220  0.6405127788  0.0261791077
+11  2.5281012  0.6773052997  0.0293576049
+12  3.1168240  0.7130175259  0.0325361022
+13  3.6849884  0.7475421634  0.0357145994
+
+> c2a = confint(p1a); c2a
+        2.5 %     97.5 %
+a 0.402495803 0.68249529
+h 0.006987227 0.02638541
+> 
+> binomNLL2b = function(p,N,k) {
++   a = p[1]
++   h = p[2]
++   ##  cat(a,h,"\n")
++   p = a/(1+a*h*N)
++   -sum(dbinom(k,prob=p,size=N,log=TRUE))
++ }
+> parnames(binomNLL2b) = c("a","h")
+> m2b = mle2(binomNLL2,start=c(a=0.5,h=0.0125),
++   data=list(N=Initial,k=Killed))
+> c2b = confint(m2b)
+There were 50 or more warnings (use warnings() to see the first 50)
+> 
+> N=Initial; k=Killed
+> m2c = mle2(binomNLL2,start=c(a=0.5,h=0.0125))
+> c2c = confint(m2c); c2c
+There were 50 or more warnings (use warnings() to see the first 50)
+        2.5 %     97.5 %
+a 0.402495803 0.68249529
+h 0.006987227 0.02638541
+> 
+> detach(funcresp)
+> 
+> 
+> proc.time()
+   user  system elapsed 
+  4.572   0.048   4.676 
diff --git a/tests/binomtest1.Rout.save b/tests/binomtest1.Rout.save
new file mode 100755
index 0000000..89e6d53
--- /dev/null
+++ b/tests/binomtest1.Rout.save
@@ -0,0 +1,76 @@
+
+R Under development (unstable) (2012-12-14 r61321) -- "Unsuffered Consequences"
+Copyright (C) 2012 The R Foundation for Statistical Computing
+ISBN 3-900051-07-0
+Platform: i686-pc-linux-gnu (32-bit)
+
+R is free software and comes with ABSOLUTELY NO WARRANTY.
+You are welcome to redistribute it under certain conditions.
+Type 'license()' or 'licence()' for distribution details.
+
+  Natural language support but running in an English locale
+
+R is a collaborative project with many contributors.
+Type 'contributors()' for more information and
+'citation()' on how to cite R or R packages in publications.
+
+Type 'demo()' for some demos, 'help()' for on-line help, or
+'help.start()' for an HTML browser interface to help.
+Type 'q()' to quit R.
+
+> library(bbmle)
+Loading required package: stats4
+> 
+> funcresp <-
++ structure(list(Initial = as.integer(c(5, 5, 10, 10, 15, 15, 20, 
++ 20, 30, 30, 50, 50, 75, 75, 100, 100)), Killed = as.integer(c(1, 
++ 2, 5, 6, 10, 9, 7, 10, 11, 15, 5, 21, 32, 18, 25, 35))), .Names = c("Initial", 
++ "Killed"), class = "data.frame", row.names = c("1", "2", "3", 
++ "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "15", 
++ "16"))
+> 
+> binomNLL2 = function(p) {
++   a = p[1]
++   h = p[2]
++   ##  cat(a,h,"\n")
++   p = a/(1+a*h*N)
++   -sum(dbinom(k,prob=p,size=N,log=TRUE))
++ }
+> 
+> N=0; k=0
+> parnames(binomNLL2) = c("a","h")
+> m2a = mle2(binomNLL2,start=c(a=0.5,h=0.0125),
++       data=with(funcresp,list(N=Initial,k=Killed)))
+> p1a = profile(m2a)
+There were 50 or more warnings (use warnings() to see the first 50)
+> c2a = print(confint(p1a),digits=3)
+    2.5 % 97.5 %
+a 0.40250 0.6825
+h 0.00699 0.0264
+> 
+> binomNLL2b = function(p,N,k) {
++   a = p[1]
++   h = p[2]
++   ##  cat(a,h,"\n")
++   p = a/(1+a*h*N)
++   -sum(dbinom(k,prob=p,size=N,log=TRUE))
++ }
+> parnames(binomNLL2b) = c("a","h")
+> m2b = mle2(binomNLL2,start=c(a=0.5,h=0.0125),
++   data=with(funcresp,list(N=Initial,k=Killed)))
+> c2b = confint(m2b)
+There were 50 or more warnings (use warnings() to see the first 50)
+> 
+> N=funcresp$Initial; k=funcresp$Killed
+> m2c = mle2(binomNLL2,start=c(a=0.5,h=0.0125))
+> c2c = confint(m2c)
+There were 50 or more warnings (use warnings() to see the first 50)
+> print(c2c,digits=3)
+    2.5 % 97.5 %
+a 0.40250 0.6825
+h 0.00699 0.0264
+> 
+> 
+> proc.time()
+   user  system elapsed 
+  2.332   0.972   3.180 
diff --git a/tests/controleval.R b/tests/controleval.R
new file mode 100755
index 0000000..d632e7d
--- /dev/null
+++ b/tests/controleval.R
@@ -0,0 +1,30 @@
+require(bbmle)
+mle2a <- function(...)
+  mle2(...)
+
+mle2b <- function(...)
+  mle2a(...)
+
+## some data
+d <- data.frame(x=0:10,y=c(26, 17, 13, 12, 20, 5, 9, 8, 5, 4, 8))
+ym <- mean(d$y)
+
+## some fits
+
+(fit0 <- mle2(y~dpois(lambda=ymean),start=list(ymean=ym),data=d)) # okay
+predict(fit0)
+(fit0.2 <- mle2(y~dpois(lambda=ymean),start=list(ymean=ym),data=d,
+                control=list(parscale=2))) # okay
+predict(fit0.2)
+(fit1 <- mle2a(y~dpois(lambda=ymean),start=list(ymean=ym),data=d)) # okay
+(fit1.2 <- mle2a(y~dpois(lambda=ymean),start=list(ymean=ym),data=d,
+                 control=list(parscale=2))) # FAILS
+(fit1.3 <- mle2b(y~dpois(lambda=ymean),start=list(ymean=ym),data=d,
+                 control=list(parscale=2))) # FAILS
+
+### NOT WORKING:
+if (FALSE) {
+  predict(fit1)
+  predict(fit1.2)
+  predict(fit1.3)
+}
diff --git a/tests/controleval.Rout.save b/tests/controleval.Rout.save
new file mode 100755
index 0000000..42071e3
--- /dev/null
+++ b/tests/controleval.Rout.save
@@ -0,0 +1,104 @@
+
+R Under development (unstable) (2012-07-27 r60013) -- "Unsuffered Consequences"
+Copyright (C) 2012 The R Foundation for Statistical Computing
+ISBN 3-900051-07-0
+Platform: i686-pc-linux-gnu (32-bit)
+
+R is free software and comes with ABSOLUTELY NO WARRANTY.
+You are welcome to redistribute it under certain conditions.
+Type 'license()' or 'licence()' for distribution details.
+
+  Natural language support but running in an English locale
+
+R is a collaborative project with many contributors.
+Type 'contributors()' for more information and
+'citation()' on how to cite R or R packages in publications.
+
+Type 'demo()' for some demos, 'help()' for on-line help, or
+'help.start()' for an HTML browser interface to help.
+Type 'q()' to quit R.
+
+> require(bbmle)
+Loading required package: bbmle
+> mle2a <- function(...)
++   mle2(...)
+> 
+> mle2b <- function(...)
++   mle2a(...)
+> 
+> ## some data
+> d <- data.frame(x=0:10,y=c(26, 17, 13, 12, 20, 5, 9, 8, 5, 4, 8))
+> ym <- mean(d$y)
+> 
+> ## some fits
+> 
+> (fit0 <- mle2(y~dpois(lambda=ymean),start=list(ymean=ym),data=d)) # okay
+
+Call:
+mle2(minuslogl = y ~ dpois(lambda = ymean), start = list(ymean = ym), 
+    data = d)
+
+Coefficients:
+   ymean 
+11.54545 
+
+Log-likelihood: -42.73 
+> predict(fit0)
+[1] 11.54545
+> (fit0.2 <- mle2(y~dpois(lambda=ymean),start=list(ymean=ym),data=d,
++                 control=list(parscale=2))) # okay
+
+Call:
+mle2(minuslogl = y ~ dpois(lambda = ymean), start = list(ymean = ym), 
+    data = d, control = list(parscale = 2))
+
+Coefficients:
+   ymean 
+11.54545 
+
+Log-likelihood: -42.73 
+> predict(fit0.2)
+[1] 11.54545
+> (fit1 <- mle2a(y~dpois(lambda=ymean),start=list(ymean=ym),data=d)) # okay
+
+Call:
+mle2(minuslogl = ..1, start = ..2, data = ..3)
+
+Coefficients:
+   ymean 
+11.54545 
+
+Log-likelihood: -42.73 
+> (fit1.2 <- mle2a(y~dpois(lambda=ymean),start=list(ymean=ym),data=d,
++                  control=list(parscale=2))) # FAILS
+
+Call:
+mle2(minuslogl = ..1, start = ..2, data = ..3, control = ..4)
+
+Coefficients:
+   ymean 
+11.54545 
+
+Log-likelihood: -42.73 
+> (fit1.3 <- mle2b(y~dpois(lambda=ymean),start=list(ymean=ym),data=d,
++                  control=list(parscale=2))) # FAILS
+
+Call:
+mle2(minuslogl = ..1, start = ..2, data = ..3, control = ..4)
+
+Coefficients:
+   ymean 
+11.54545 
+
+Log-likelihood: -42.73 
+> 
+> ### NOT WORKING:
+> if (FALSE) {
++   predict(fit1)
++   predict(fit1.2)
++   predict(fit1.3)
++ }
+> 
+> proc.time()
+   user  system elapsed 
+  0.736   1.076   1.638 
diff --git a/tests/doRUnit.R b/tests/doRUnit.R
new file mode 100755
index 0000000..e80bc1f
--- /dev/null
+++ b/tests/doRUnit.R
@@ -0,0 +1,63 @@
+## RUnit script obtained from:
+##   http://wiki.r-project.org/rwiki/doku.php?id=developers:runit
+##  copied from phylobase package
+
+## unit tests will not be done if RUnit is not available
+if(require("RUnit", quietly=TRUE)) {
+ 
+  ## --- Setup ---
+ 
+  pkg <- "bbmle"
+  if(Sys.getenv("RCMDCHECK") == "FALSE") {
+    ## Path to unit tests for standalone running under Makefile (not R CMD check)
+    ## PKG/tests/../inst/unitTests
+    path <- file.path(getwd(), "..", "inst", "unitTests")
+  } else {
+    ## Path to unit tests for R CMD check
+    ## PKG.Rcheck/tests/../PKG/unitTests
+    path <- system.file(package=pkg, "unitTests")
+  }
+  cat("\nRunning unit tests\n")
+  print(list(pkg=pkg, getwd=getwd(), pathToUnitTests=path))
+ 
+  library(package=pkg, character.only=TRUE)
+ 
+  ## If desired, load the name space to allow testing of private functions
+  ## if (is.element(pkg, loadedNamespaces()))
+  ##     attach(loadNamespace(pkg), name=paste("namespace", pkg, sep=":"), pos=3)
+  ##
+  ## or simply call PKG:::myPrivateFunction() in tests
+ 
+  ## --- Testing ---
+ 
+  ## Define tests
+  testSuite <- defineTestSuite(name=paste(pkg, "unit testing"),
+                                          dirs=path)
+  ## Run
+  tests <- runTestSuite(testSuite)
+ 
+  ## Default report name
+  pathReport <- file.path(path, "report")
+ 
+  ## Report to stdout and text files
+  cat("------------------- UNIT TEST SUMMARY ---------------------\n\n")
+  printTextProtocol(tests, showDetails=FALSE)
+  printTextProtocol(tests, showDetails=FALSE,
+                    fileName=paste(pathReport, "Summary.txt", sep=""))
+  printTextProtocol(tests, showDetails=TRUE,
+                    fileName=paste(pathReport, ".txt", sep=""))
+ 
+  ## Report to HTML file
+  printHTMLProtocol(tests, fileName=paste(pathReport, ".html", sep=""))
+ 
+  ## Return stop() to cause R CMD check stop in case of
+  ##  - failures i.e. FALSE to unit tests or
+  ##  - errors i.e. R errors
+  tmp <- getErrors(tests)
+  if(tmp$nFail > 0 | tmp$nErr > 0) {
+    stop(paste("\n\nunit testing failed (#test failures: ", tmp$nFail,
+               ", #R errors: ",  tmp$nErr, ")\n\n", sep=""))
+  }
+} else {
+  warning("cannot run unit tests -- package RUnit is not available")
+}
diff --git a/tests/eval.R b/tests/eval.R
new file mode 100755
index 0000000..3d8a0fe
--- /dev/null
+++ b/tests/eval.R
@@ -0,0 +1,66 @@
+## I am experiencing difficulties with one of my modeling function (bbmle::mle2)
+## which, like other modeling functions in R, uses match.call() to
+## retrieve and save the original function call for future use.
+## I'll describe the problem for bbmle and then show that I can
+## provoke a similar problem with lm().
+
+## ============
+## PART I: mle2()
+
+  library(bbmle)
+
+  x <- 0:10
+  y <- c(26, 17, 13, 12, 20, 5, 9, 8, 5, 4, 8)
+  d <- data.frame(x,y)
+
+## The key is to call the modeling function from within another
+## function which passes additional arguments via ... 
+
+  ff <- function(d,...) {
+    mle2(y~dpois(lambda=ymean),start=list(ymean=mean(y)),data=d,...)
+  }
+
+  ff(d)
+  try(ff(d,control=list(maxit=1000)))
+
+##   Error in call$control$parscale : 
+##      object of type 'symbol' is not subsettable
+
+## This happens when I try:
+
+##  call$control$parscale <- eval.parent(call$control$parscale)
+
+## in 'normal' circumstances call$control and call$control$parscale
+## are either NULL or well-specified ...
+
+## Debugging mle2 shows that the results of match.call() are
+
+##   mle2(minuslogl = y ~ dpois(lambda = ymean), start = list(ymean = mean(y)), 
+##       data = d, control = ..1)
+
+## ============
+## PART II: lm()
+
+## I can find a similar issue with lm(), although admittedly
+## I have to work a bit harder/do something a little bit more
+## obscure.
+
+  L1 <-  lm(y~1,data=d,tol=1e-6)
+  L1$call
+
+  ff2 <- function(d,...) {
+    lm(y~1,data=d,...)
+  }
+
+  tt <- 1e-6
+  L2 <- ff2(d,tol=tt)
+  L2$call
+
+  try(update(L2,.~.+x))
+
+## Error in lm.fit(x, y, offset = offset, singular.ok = singular.ok, ...) : 
+##   ..1 used in an incorrect context, no ... to look in
+
+ ## similar issue in curve3d().  How does curve() work?
+
+
diff --git a/tests/eval.Rout.save b/tests/eval.Rout.save
new file mode 100755
index 0000000..3be40ba
--- /dev/null
+++ b/tests/eval.Rout.save
@@ -0,0 +1,114 @@
+
+R Under development (unstable) (2012-07-27 r60013) -- "Unsuffered Consequences"
+Copyright (C) 2012 The R Foundation for Statistical Computing
+ISBN 3-900051-07-0
+Platform: i686-pc-linux-gnu (32-bit)
+
+R is free software and comes with ABSOLUTELY NO WARRANTY.
+You are welcome to redistribute it under certain conditions.
+Type 'license()' or 'licence()' for distribution details.
+
+  Natural language support but running in an English locale
+
+R is a collaborative project with many contributors.
+Type 'contributors()' for more information and
+'citation()' on how to cite R or R packages in publications.
+
+Type 'demo()' for some demos, 'help()' for on-line help, or
+'help.start()' for an HTML browser interface to help.
+Type 'q()' to quit R.
+
+> ## I am experiencing difficulties with one of my modeling function (bbmle::mle2)
+> ## which, like other modeling functions in R, uses match.call() to
+> ## retrieve and save the original function call for future use.
+> ## I'll describe the problem for bbmle and then show that I can
+> ## provoke a similar problem with lm().
+> 
+> ## ============
+> ## PART I: mle2()
+> 
+>   library(bbmle)
+> 
+>   x <- 0:10
+>   y <- c(26, 17, 13, 12, 20, 5, 9, 8, 5, 4, 8)
+>   d <- data.frame(x,y)
+> 
+> ## The key is to call the modeling function from within another
+> ## function which passes additional arguments via ... 
+> 
+>   ff <- function(d,...) {
++     mle2(y~dpois(lambda=ymean),start=list(ymean=mean(y)),data=d,...)
++   }
+> 
+>   ff(d)
+
+Call:
+mle2(minuslogl = y ~ dpois(lambda = ymean), start = list(ymean = mean(y)), 
+    data = d)
+
+Coefficients:
+   ymean 
+11.54545 
+
+Log-likelihood: -42.73 
+>   try(ff(d,control=list(maxit=1000)))
+
+Call:
+mle2(minuslogl = y ~ dpois(lambda = ymean), start = list(ymean = mean(y)), 
+    data = d, control = ..1)
+
+Coefficients:
+   ymean 
+11.54545 
+
+Log-likelihood: -42.73 
+> 
+> ##   Error in call$control$parscale : 
+> ##      object of type 'symbol' is not subsettable
+> 
+> ## This happens when I try:
+> 
+> ##  call$control$parscale <- eval.parent(call$control$parscale)
+> 
+> ## in 'normal' circumstances call$control and call$control$parscale
+> ## are either NULL or well-specified ...
+> 
+> ## Debugging mle2 shows that the results of match.call() are
+> 
+> ##   mle2(minuslogl = y ~ dpois(lambda = ymean), start = list(ymean = mean(y)), 
+> ##       data = d, control = ..1)
+> 
+> ## ============
+> ## PART II: lm()
+> 
+> ## I can find a similar issue with lm(), although admittedly
+> ## I have to work a bit harder/do something a little bit more
+> ## obscure.
+> 
+>   L1 <-  lm(y~1,data=d,tol=1e-6)
+>   L1$call
+lm(formula = y ~ 1, data = d, tol = 1e-06)
+> 
+>   ff2 <- function(d,...) {
++     lm(y~1,data=d,...)
++   }
+> 
+>   tt <- 1e-6
+>   L2 <- ff2(d,tol=tt)
+>   L2$call
+lm(formula = y ~ 1, data = d, tol = ..1)
+> 
+>   try(update(L2,.~.+x))
+Error in lm.fit(x, y, offset = offset, singular.ok = singular.ok, ...) : 
+  ..1 used in an incorrect context, no ... to look in
+> 
+> ## Error in lm.fit(x, y, offset = offset, singular.ok = singular.ok, ...) : 
+> ##   ..1 used in an incorrect context, no ... to look in
+> 
+>  ## similar issue in curve3d().  How does curve() work?
+> 
+> 
+> 
+> proc.time()
+   user  system elapsed 
+  0.728   1.020   1.595 
diff --git a/tests/formulatest.R b/tests/formulatest.R
new file mode 100755
index 0000000..df10f18
--- /dev/null
+++ b/tests/formulatest.R
@@ -0,0 +1,311 @@
+library(bbmle)
+
+set.seed(1001)
+
+## test 1
+x <- 0:10
+y <- c(26, 17, 13, 12, 20, 5, 9, 8, 5, 4, 8)
+d <- data.frame(x,y)
+suppressWarnings(m1 <- mle2(y~dpois(lambda=ymax/(1+x/xhalf)),
+           parameters=list(ymax~1,xhalf~1),
+           start=list(ymax=1,xhalf=1),data=d))
+
+suppressWarnings(p1 <- profile(m1))
+
+suppressWarnings(m2 <- mle2(y~dpois(lambda=ymax/(1+x/xhalf)),
+           start=list(ymax=1,xhalf=1),data=d))
+
+## should be able to omit parameters (?) or
+## have them taken from 
+## test 2:
+
+ReedfrogSizepred <-
+structure(list(TBL = as.integer(c(9, 9, 9, 12, 12, 12, 21, 21, 
+21, 25, 25, 25, 37, 37, 37)), Kill = as.integer(c(0, 2, 1, 3, 
+4, 5, 0, 0, 0, 0, 1, 0, 0, 0, 0))), .Names = c("TBL", "Kill"), class = "data.frame", row.names = c("1", 
+"2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", 
+"14", "15"))
+
+VBlogist <- function(x,sizep1,sizep2,sizep3) {
+  exp(sizep1*(sizep3-x))/(1+exp(sizep2*sizep1*(sizep3-x)))
+}
+startp <- list(sizep1=0,sizep2=1,sizep3=12)
+mle2(Kill~dbinom(prob=VBlogist(TBL,sizep1,sizep2,sizep3),size=10),
+     start=startp,
+     method="Nelder-Mead",
+     data=ReedfrogSizepred)
+             
+## test 3:
+f <- factor(rep(1:2,each=20))
+xhalf <- c(5,10)
+ymax <- 10
+  x <- rep(0:19,2)
+  y <- rpois(40,ymax/(1+x/xhalf[f]))
+d <- data.frame(x,y)
+##  plot(x,y,col=as.numeric(f))
+
+  m3 <- mle2(y~dpois(lambda=ymax/(1+x/xhalf)),
+             parameters=list(xhalf~f),
+             start=list(ymax=1,xhalf=1),data=d)
+
+  m4 <- mle2(y~dpois(lambda=ymax/(1+x/xhalf)),
+             parameters=list(ymax~f,xhalf~f),
+             start=list(ymax=1,xhalf=1),data=d)
+
+  suppressWarnings(m5 <- mle2(y~dpois(lambda=ymax/(1+x/xhalf)),
+             parameters=list(ymax~f),
+             start=list(ymax=1,xhalf=1),data=d))
+
+  anova(m2,m3,m4)
+  anova(m2,m5,m4)
+  AICtab(m2,m3,m4,m5)
+
+GobySurvival <-
+structure(list(exper = as.integer(c(1, 1, 1, 1, 1, 1, 1, 1, 1, 
+1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 
+1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 
+1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 
+1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 
+1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 
+1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 
+1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 
+2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 
+2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 
+2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 
+3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 
+3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 
+3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 
+3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 
+3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 
+3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 
+3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 
+5, 5, 5)), year = as.integer(c(2000, 2000, 2000, 2000, 2000, 
+2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 
+2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 
+2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 
+2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 
+2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 
+2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 
+2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 
+2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 
+2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 
+2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 
+2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 
+2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 
+2000, 2000, 2000, 2000, 2000, 2000, 2001, 2001, 2001, 2001, 2001, 
+2001, 2001, 2001, 2001, 2001, 2001, 2001, 2001, 2001, 2001, 2001, 
+2001, 2001, 2001, 2001, 2001, 2001, 2001, 2001, 2001, 2001, 2001, 
+2001, 2001, 2001, 2001, 2001, 2001, 2001, 2001, 2001, 2001, 2001, 
+2001, 2001, 2001, 2001, 2001, 2001, 2001, 2001, 2001, 2001, 2001, 
+2001, 2001, 2001, 2001, 2001, 2001, 2001, 2001, 2001, 2001, 2001, 
+2001, 2001, 2001, 2001, 2001, 2001, 2001, 2001, 2001, 2001, 2001, 
+2001, 2001, 2001, 2001, 2001, 2002, 2002, 2002, 2002, 2002, 2002, 
+2002, 2002, 2002, 2002, 2002, 2002, 2002, 2002, 2002, 2002, 2002, 
+2002, 2002, 2002, 2002, 2002, 2002, 2002, 2002, 2002, 2002, 2002, 
+2002, 2002, 2002, 2002, 2002, 2002, 2002, 2002, 2002, 2002, 2002, 
+2002, 2002, 2002, 2002, 2002, 2002, 2002, 2002, 2002, 2002, 2002, 
+2002, 2002, 2002, 2002, 2002, 2002, 2002, 2002, 2002, 2002, 2002, 
+2002, 2002, 2002, 2002, 2002, 2002, 2002, 2002, 2002, 2002, 2002, 
+2002, 2002, 2002, 2002, 2002, 2002, 2002, 2002, 2002, 2002, 2002, 
+2002, 2002, 2002, 2002, 2002, 2002, 2002, 2002, 2002, 2002, 2002, 
+2002, 2002, 2002, 2002, 2002, 2002, 2002, 2002, 2002, 2002, 2002, 
+2002, 2002, 2002, 2002, 2002, 2002, 2002, 2002, 2002, 2002, 2002, 
+2002, 2002, 2002, 2002, 2002, 2002, 2002, 2002, 2002, 2002, 2002, 
+2002, 2002, 2002, 2002, 2002, 2002, 2002, 2002, 2002, 2002, 2002, 
+2002, 2002, 2002, 2002, 2002, 2002, 2002, 2002, 2002, 2002, 2002, 
+2002)), site = structure(as.integer(c(2, 2, 2, 2, 2, 2, 2, 2, 
+2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 
+2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 
+2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 
+2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 
+2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 
+2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 
+2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 
+1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 
+1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 
+1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 
+1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 
+1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 
+1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 
+1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 
+1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 
+1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 
+1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 
+1, 1, 1, 1)), .Label = c("backreef", "patchreef"), class = "factor"), 
+    head = structure(as.integer(c(15, 15, 15, 15, 15, 15, 15, 
+    15, 15, 15, 15, 4, 4, 4, 19, 19, 24, 24, 24, 24, 24, 24, 
+    6, 6, 6, 6, 6, 6, 6, 6, 9, 9, 9, 10, 10, 10, 10, 10, 10, 
+    10, 10, 10, 10, 10, 13, 13, 13, 13, 3, 3, 3, 3, 3, 3, 3, 
+    3, 2, 2, 2, 2, 5, 5, 5, 5, 12, 12, 12, 12, 7, 7, 7, 11, 11, 
+    11, 11, 11, 11, 11, 11, 11, 14, 14, 14, 23, 23, 23, 23, 23, 
+    23, 23, 23, 23, 22, 22, 22, 8, 8, 8, 8, 8, 8, 8, 8, 8, 20, 
+    20, 20, 20, 20, 20, 20, 20, 21, 21, 21, 1, 1, 1, 1, 1, 1, 
+    1, 1, 1, 1, 17, 17, 17, 17, 17, 17, 17, 16, 16, 16, 16, 16, 
+    16, 16, 16, 18, 18, 18, 26, 26, 26, 55, 55, 55, 57, 57, 41, 
+    41, 41, 45, 45, 47, 47, 48, 48, 58, 58, 34, 34, 34, 34, 35, 
+    35, 35, 35, 50, 50, 50, 32, 32, 32, 25, 25, 25, 25, 25, 33, 
+    33, 33, 28, 28, 31, 31, 31, 36, 36, 36, 44, 44, 44, 44, 29, 
+    29, 29, 27, 27, 27, 40, 40, 40, 46, 46, 46, 46, 46, 39, 39, 
+    39, 39, 30, 30, 30, 30, 30, 51, 51, 51, 51, 51, 51, 56, 56, 
+    56, 56, 56, 56, 52, 52, 52, 52, 52, 52, 55, 55, 55, 53, 53, 
+    53, 57, 57, 57, 57, 57, 57, 35, 35, 35, 35, 35, 35, 33, 33, 
+    33, 33, 33, 33, 29, 29, 29, 45, 45, 45, 45, 45, 45, 38, 38, 
+    38, 38, 38, 38, 27, 27, 27, 27, 27, 27, 59, 59, 59, 59, 59, 
+    59, 54, 54, 54, 54, 54, 54, 39, 39, 39, 39, 39, 39, 42, 42, 
+    42, 41, 41, 41, 41, 41, 41, 49, 49, 49, 46, 46, 46, 46, 46, 
+    46, 47, 47, 47, 47, 47, 47, 37, 37, 37, 43, 43, 43, 43, 43, 
+    43, 40, 40, 40, 40, 40, 40, 48, 48, 48, 48, 48, 48, 51, 51, 
+    51, 45, 45, 45, 41, 41, 41, 47, 47, 47, 37, 37, 37, 49, 49, 
+    49, 34, 34, 34, 25, 25, 25)), .Label = c("p1", "p10", "p11", 
+    "p12", "p13", "p14", "p15", "p16", "p17", "p18", "p19", "p2", 
+    "p20", "p21", "p3", "p4", "p42", "p5", "p51", "p6", "p7", 
+    "p70", "p8", "p9", "r10", "r11", "r13", "r14", "r15", "r17", 
+    "r18", "r19", "r2", "r20", "r21", "r22", "r23", "r24", "r25", 
+    "r26", "r27", "r28", "r29", "r3", "r30", "r33", "r34", "r35", 
+    "r36", "r37", "r41", "r45", "r47", "r48", "r5", "r6", "r7", 
+    "r8", "r9"), class = "factor"), density = as.integer(c(11, 
+    11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 3, 3, 3, 2, 2, 6, 
+    6, 6, 6, 6, 6, 8, 8, 8, 8, 8, 8, 8, 8, 3, 3, 3, 11, 11, 11, 
+    11, 11, 11, 11, 11, 11, 11, 11, 4, 4, 4, 4, 8, 8, 8, 8, 8, 
+    8, 8, 8, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 3, 9, 
+    9, 9, 9, 9, 9, 9, 9, 9, 3, 3, 3, 9, 9, 9, 9, 9, 9, 9, 9, 
+    9, 3, 3, 3, 9, 9, 9, 9, 9, 9, 9, 9, 9, 8, 8, 8, 8, 8, 8, 
+    8, 8, 3, 3, 3, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 7, 
+    7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 3, 3, 3, 3, 3, 
+    3, 3, 3, 3, 2, 2, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 4, 4, 
+    4, 4, 4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 5, 5, 5, 5, 5, 3, 3, 
+    3, 2, 2, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 
+    3, 3, 3, 5, 5, 5, 5, 5, 4, 4, 4, 4, 5, 5, 5, 5, 5, 11, 11, 
+    11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 6, 6, 6, 6, 6, 6, 
+    3, 3, 3, 3, 3, 3, 11, 11, 11, 11, 11, 11, 6, 6, 6, 6, 6, 
+    6, 11, 11, 11, 11, 11, 11, 3, 3, 3, 6, 6, 6, 6, 6, 6, 11, 
+    11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 
+    11, 11, 6, 6, 6, 6, 6, 6, 11, 11, 11, 11, 11, 11, 3, 3, 3, 
+    11, 11, 11, 11, 11, 11, 3, 3, 3, 6, 6, 6, 6, 6, 6, 11, 11, 
+    11, 11, 11, 11, 3, 3, 3, 11, 11, 11, 11, 11, 11, 6, 6, 6, 
+    6, 6, 6, 11, 11, 11, 11, 11, 11, 3, 3, 3, 3, 3, 3, 3, 3, 
+    3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3)), qual = as.integer(c(1, 
+    1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 
+    2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 
+    2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 
+    4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 
+    5, 5, 5, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 9, 
+    9, 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 
+    10, 10, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 
+    14, 14, 14, 14, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 
+    2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 
+    4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 
+    6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 9, 
+    9, 9, 9, 9, 9, 11, 11, 11, 11, 11, 12, 12, 12, 12, 18, 18, 
+    18, 18, 18, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 
+    2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 
+    3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 
+    5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 
+    7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 9, 
+    9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 
+    10, 10, 10, 10, 10, 12, 12, 12, 12, 12, 12, 16, 16, 16, 16, 
+    16, 16, 2, 2, 2, 5, 5, 5, 8, 8, 8, 9, 9, 9, 10, 10, 10, 9, 
+    9, 9, 4, 4, 4, 3, 3, 3)), d1 = as.integer(c(1, 1, 1, 1, 1, 
+    1, 1, 1, 1, 1, 4, 1, 1, 11, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 
+    1, 1, 1, 1, 8, 8, 4, 8, 11, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 
+    1, 1, 1, 11, 11, 1, 1, 1, 4, 4, 11, 11, 11, 4, 8, 11, 11, 
+    1, 1, 1, 11, 1, 1, 8, 11, 1, 1, 11, 1, 1, 1, 1, 1, 1, 1, 
+    11, 11, 1, 8, 11, 4, 8, 8, 8, 11, 11, 11, 11, 11, 1, 1, 8, 
+    1, 1, 1, 1, 1, 1, 1, 4, 8, 1, 1, 1, 1, 1, 1, 4, 11, 1, 1, 
+    1, 1, 1, 1, 1, 1, 1, 1, 1, 11, 11, 1, 1, 1, 1, 1, 1, 8, 1, 
+    1, 1, 1, 1, 8, 11, 11, 1, 4, 11, 1, 1, 3, 1, 1, 1, 1, 1, 
+    1, 1, 4, 2, 12, 2, 12, 3, 12, 2, 12, 1, 1, 1, 1, 1, 1, 1, 
+    12, 1, 1, 1, 1, 1, 4, 1, 1, 1, 2, 4, 1, 1, 12, 1, 1, 1, 1, 
+    4, 1, 1, 12, 1, 1, 3, 8, 1, 2, 12, 1, 1, 1, 1, 1, 8, 1, 1, 
+    3, 3, 12, 1, 1, 2, 12, 1, 2, 4, 8, 8, 1, 2, 3, 1, 1, 1, 1, 
+    1, 1, 1, 3, 3, 1, 1, 3, 1, 1, 1, 1, 1, 1, 1, 1, 12, 1, 1, 
+    8, 1, 2, 10, 1, 1, 12, 1, 1, 3, 1, 1, 1, 1, 2, 2, 1, 4, 6, 
+    3, 3, 4, 1, 4, 12, 1, 1, 3, 1, 1, 3, 1, 1, 1, 1, 1, 1, 1, 
+    1, 1, 1, 3, 6, 1, 1, 1, 1, 1, 1, 1, 1, 12, 1, 1, 12, 3, 6, 
+    10, 1, 1, 12, 1, 1, 8, 1, 2, 12, 1, 1, 1, 1, 1, 1, 1, 1, 
+    1, 1, 1, 12, 2, 2, 12, 1, 12, 12, 4, 4, 4, 1, 1, 2, 1, 1, 
+    1, 1, 1, 8, 1, 1, 2, 1, 1, 4, 1, 1, 12, 1, 1, 12, 1, 3, 12, 
+    2, 4, 12, 2, 10, 12, 1, 1, 8, 1, 1, 8)), d2 = as.integer(c(4, 
+    4, 4, 4, 4, 4, 4, 4, 4, 4, 8, 4, 4, 70, 4, 4, 4, 4, 4, 4, 
+    4, 4, 4, 4, 4, 4, 4, 4, 11, 11, 8, 11, 70, 4, 4, 4, 4, 4, 
+    4, 4, 4, 4, 4, 4, 4, 4, 70, 70, 4, 4, 4, 8, 8, 70, 70, 70, 
+    8, 11, 70, 70, 4, 4, 4, 70, 4, 4, 11, 70, 4, 4, 70, 4, 4, 
+    4, 4, 4, 4, 4, 70, 70, 4, 11, 70, 8, 11, 11, 11, 70, 70, 
+    70, 70, 70, 4, 4, 11, 4, 4, 4, 4, 4, 4, 4, 8, 11, 4, 4, 4, 
+    4, 4, 4, 8, 70, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 70, 70, 
+    4, 4, 4, 4, 4, 4, 11, 4, 4, 4, 4, 4, 11, 70, 70, 4, 8, 70, 
+    2, 2, 4, 2, 2, 2, 2, 2, 2, 2, 8, 3, 70, 3, 70, 4, 70, 3, 
+    70, 2, 2, 2, 2, 2, 2, 2, 70, 2, 2, 2, 2, 2, 8, 2, 2, 2, 3, 
+    8, 2, 2, 70, 2, 2, 2, 2, 8, 2, 2, 70, 2, 2, 4, 12, 2, 3, 
+    70, 2, 2, 2, 2, 2, 12, 2, 2, 4, 4, 70, 2, 2, 3, 70, 2, 3, 
+    8, 12, 12, 2, 3, 4, 2, 2, 2, 2, 2, 2, 2, 4, 4, 2, 2, 4, 2, 
+    2, 2, 2, 2, 2, 2, 2, 70, 2, 2, 10, 2, 3, 12, 2, 2, 70, 2, 
+    2, 4, 2, 2, 2, 2, 3, 3, 2, 6, 8, 4, 4, 6, 2, 6, 70, 2, 2, 
+    4, 2, 2, 4, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 4, 8, 2, 2, 2, 
+    2, 2, 2, 2, 2, 70, 2, 2, 70, 4, 8, 12, 2, 2, 70, 2, 2, 10, 
+    2, 3, 70, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 70, 3, 3, 70, 
+    2, 70, 70, 6, 6, 6, 2, 2, 3, 2, 2, 2, 2, 2, 10, 2, 2, 3, 
+    2, 2, 6, 2, 2, 70, 2, 2, 70, 2, 4, 70, 3, 6, 70, 3, 12, 70, 
+    2, 2, 10, 2, 2, 10))), .Names = c("exper", "year", "site", 
+"head", "density", "qual", "d1", "d2"), class = "data.frame", row.names = c("1", 
+"2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", 
+"14", "15", "16", "17", "18", "19", "20", "21", "22", "23", "24", 
+"25", "26", "27", "28", "29", "30", "31", "32", "33", "34", "35", 
+"36", "37", "38", "39", "40", "41", "42", "43", "44", "45", "46", 
+"47", "48", "49", "50", "51", "52", "53", "54", "55", "56", "57", 
+"58", "59", "60", "61", "62", "63", "64", "65", "66", "67", "68", 
+"69", "70", "71", "72", "73", "74", "75", "76", "77", "78", "79", 
+"80", "81", "82", "83", "84", "85", "86", "87", "88", "89", "90", 
+"91", "92", "93", "94", "95", "96", "97", "98", "99", "100", 
+"101", "102", "103", "104", "105", "106", "107", "108", "109", 
+"110", "111", "112", "113", "114", "115", "116", "117", "118", 
+"119", "120", "121", "122", "123", "124", "125", "126", "127", 
+"128", "129", "130", "131", "132", "133", "134", "135", "136", 
+"137", "138", "139", "140", "141", "142", "143", "144", "145", 
+"146", "147", "148", "149", "150", "151", "152", "153", "154", 
+"155", "156", "157", "158", "159", "160", "161", "162", "163", 
+"164", "165", "166", "167", "168", "169", "170", "171", "172", 
+"173", "174", "175", "176", "177", "178", "179", "180", "181", 
+"182", "183", "184", "185", "186", "187", "188", "189", "190", 
+"191", "192", "193", "194", "195", "196", "197", "198", "199", 
+"200", "201", "202", "203", "204", "205", "206", "207", "208", 
+"209", "210", "211", "212", "213", "214", "215", "216", "217", 
+"218", "219", "220", "221", "222", "223", "224", "225", "226", 
+"227", "228", "229", "230", "231", "232", "233", "234", "235", 
+"236", "237", "238", "239", "240", "241", "242", "243", "244", 
+"245", "246", "247", "248", "249", "250", "251", "252", "253", 
+"254", "255", "256", "257", "258", "259", "260", "261", "262", 
+"263", "264", "265", "266", "267", "268", "269", "270", "271", 
+"272", "273", "274", "275", "276", "277", "278", "279", "280", 
+"281", "282", "283", "284", "285", "286", "287", "288", "289", 
+"290", "291", "292", "293", "294", "295", "296", "297", "298", 
+"299", "300", "301", "302", "303", "304", "305", "306", "307", 
+"308", "309", "310", "311", "312", "313", "314", "315", "316", 
+"317", "318", "319", "320", "321", "322", "323", "324", "325", 
+"326", "327", "328", "329", "330", "331", "332", "333", "334", 
+"335", "336", "337", "338", "339", "340", "341", "342", "343", 
+"344", "345", "346", "347", "348", "349", "350", "351", "352", 
+"353", "354", "355", "356", "357", "358", "359", "360", "361", 
+"362", "363", "364", "365", "366", "367", "368", "369"))
+
+dicweib <- function(x,shape,scale,log=FALSE) {
+  if (is.matrix(x)) {
+    day1 <- x[,1]
+    day2 <- x[,2]
+  } else {
+    day1 <- x[1]
+    day2 <- x[2]
+  }
+  v <- log(pweibull(day2,shape,scale)-pweibull(day1,shape,scale))
+  if (log) v else exp(v)
+}
+
+GS2 <- transform(GobySurvival,
+                          day1 = d1-1,
+                          day2 = ifelse(d2==70,Inf,d2-1),
+                          fexper=factor(exper))
+totmeansurv <- with(GS2,mean((d1+d2)/2))
+
+mle2(cbind(day1,day2)~dicweib(exp(shape),exp(scale)),
+     parameters=list(scale~fexper+qual*density),
+     start=list(scale=log(totmeansurv),shape=0),data=GS2)
diff --git a/tests/formulatest.Rout.save b/tests/formulatest.Rout.save
new file mode 100755
index 0000000..f77475c
--- /dev/null
+++ b/tests/formulatest.Rout.save
@@ -0,0 +1,393 @@
+
+R Under development (unstable) (2012-12-14 r61321) -- "Unsuffered Consequences"
+Copyright (C) 2012 The R Foundation for Statistical Computing
+ISBN 3-900051-07-0
+Platform: i686-pc-linux-gnu (32-bit)
+
+R is free software and comes with ABSOLUTELY NO WARRANTY.
+You are welcome to redistribute it under certain conditions.
+Type 'license()' or 'licence()' for distribution details.
+
+  Natural language support but running in an English locale
+
+R is a collaborative project with many contributors.
+Type 'contributors()' for more information and
+'citation()' on how to cite R or R packages in publications.
+
+Type 'demo()' for some demos, 'help()' for on-line help, or
+'help.start()' for an HTML browser interface to help.
+Type 'q()' to quit R.
+
+> library(bbmle)
+Loading required package: stats4
+> 
+> set.seed(1001)
+> 
+> ## test 1
+> x <- 0:10
+> y <- c(26, 17, 13, 12, 20, 5, 9, 8, 5, 4, 8)
+> d <- data.frame(x,y)
+> suppressWarnings(m1 <- mle2(y~dpois(lambda=ymax/(1+x/xhalf)),
++            parameters=list(ymax~1,xhalf~1),
++            start=list(ymax=1,xhalf=1),data=d))
+> 
+> suppressWarnings(p1 <- profile(m1))
+> 
+> suppressWarnings(m2 <- mle2(y~dpois(lambda=ymax/(1+x/xhalf)),
++            start=list(ymax=1,xhalf=1),data=d))
+> 
+> ## should be able to omit parameters (?) or
+> ## have them taken from 
+> ## test 2:
+> 
+> ReedfrogSizepred <-
++ structure(list(TBL = as.integer(c(9, 9, 9, 12, 12, 12, 21, 21, 
++ 21, 25, 25, 25, 37, 37, 37)), Kill = as.integer(c(0, 2, 1, 3, 
++ 4, 5, 0, 0, 0, 0, 1, 0, 0, 0, 0))), .Names = c("TBL", "Kill"), class = "data.frame", row.names = c("1", 
++ "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", 
++ "14", "15"))
+> 
+> VBlogist <- function(x,sizep1,sizep2,sizep3) {
++   exp(sizep1*(sizep3-x))/(1+exp(sizep2*sizep1*(sizep3-x)))
++ }
+> startp <- list(sizep1=0,sizep2=1,sizep3=12)
+> mle2(Kill~dbinom(prob=VBlogist(TBL,sizep1,sizep2,sizep3),size=10),
++      start=startp,
++      method="Nelder-Mead",
++      data=ReedfrogSizepred)
+
+Call:
+mle2(minuslogl = Kill ~ dbinom(prob = VBlogist(TBL, sizep1, sizep2, 
+    sizep3), size = 10), start = startp, method = "Nelder-Mead", 
+    data = ReedfrogSizepred)
+
+Coefficients:
+    sizep1     sizep2     sizep3 
+-0.5944408  1.6799300 12.9078275 
+
+Log-likelihood: -12.15 
+>              
+> ## test 3:
+> f <- factor(rep(1:2,each=20))
+> xhalf <- c(5,10)
+> ymax <- 10
+>   x <- rep(0:19,2)
+>   y <- rpois(40,ymax/(1+x/xhalf[f]))
+> d <- data.frame(x,y)
+> ##  plot(x,y,col=as.numeric(f))
+> 
+>   m3 <- mle2(y~dpois(lambda=ymax/(1+x/xhalf)),
++              parameters=list(xhalf~f),
++              start=list(ymax=1,xhalf=1),data=d)
+> 
+>   m4 <- mle2(y~dpois(lambda=ymax/(1+x/xhalf)),
++              parameters=list(ymax~f,xhalf~f),
++              start=list(ymax=1,xhalf=1),data=d)
+Warning messages:
+1: In dpois(x = c(16L, 8L, 6L, 6L, 8L, 0L, 2L, 3L, 5L, 3L, 1L, 5L,  :
+  NaNs produced
+2: In dpois(x = c(16L, 8L, 6L, 6L, 8L, 0L, 2L, 3L, 5L, 3L, 1L, 5L,  :
+  NaNs produced
+> 
+>   suppressWarnings(m5 <- mle2(y~dpois(lambda=ymax/(1+x/xhalf)),
++              parameters=list(ymax~f),
++              start=list(ymax=1,xhalf=1),data=d))
+> 
+>   anova(m2,m3,m4)
+Likelihood Ratio Tests
+Model 1: m2, y~dpois(lambda=ymax/(1+x/xhalf))
+Model 2: m3, y~dpois(lambda=ymax/(1+x/xhalf)): xhalf~f
+Model 3: m4, y~dpois(lambda=ymax/(1+x/xhalf)): ymax~f, xhalf~f
+  Tot Df Deviance    Chisq Df Pr(>Chisq)    
+1      2   57.208                           
+2      3  173.004 115.7960  1     <2e-16 ***
+3      4  172.415   0.5894  1     0.4427    
+---
+Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 
+>   anova(m2,m5,m4)
+Likelihood Ratio Tests
+Model 1: m2, y~dpois(lambda=ymax/(1+x/xhalf))
+Model 2: m5, y~dpois(lambda=ymax/(1+x/xhalf)): ymax~f
+Model 3: m4, y~dpois(lambda=ymax/(1+x/xhalf)): ymax~f, xhalf~f
+  Tot Df Deviance    Chisq Df Pr(>Chisq)    
+1      2   57.208                           
+2      3  177.101 119.8930  1     <2e-16 ***
+3      4  172.415   4.6864  1     0.0304 *  
+---
+Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 
+>   AICtab(m2,m3,m4,m5)
+   dAIC  df
+m2   0.0 2 
+m3 117.8 3 
+m4 119.2 4 
+m5 121.9 3 
+> 
+> GobySurvival <-
++ structure(list(exper = as.integer(c(1, 1, 1, 1, 1, 1, 1, 1, 1, 
++ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 
++ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 
++ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 
++ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 
++ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 
++ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 
++ 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 
++ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 
++ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 
++ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 
++ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 
++ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 
++ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 
++ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 
++ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 
++ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 
++ 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 
++ 5, 5, 5)), year = as.integer(c(2000, 2000, 2000, 2000, 2000, 
++ 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 
++ 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 
++ 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 
++ 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 
++ 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 
++ 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 
++ 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 
++ 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 
++ 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 
++ 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 
++ 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 
++ 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 
++ 2000, 2000, 2000, 2000, 2000, 2000, 2001, 2001, 2001, 2001, 2001, 
++ 2001, 2001, 2001, 2001, 2001, 2001, 2001, 2001, 2001, 2001, 2001, 
++ 2001, 2001, 2001, 2001, 2001, 2001, 2001, 2001, 2001, 2001, 2001, 
++ 2001, 2001, 2001, 2001, 2001, 2001, 2001, 2001, 2001, 2001, 2001, 
++ 2001, 2001, 2001, 2001, 2001, 2001, 2001, 2001, 2001, 2001, 2001, 
++ 2001, 2001, 2001, 2001, 2001, 2001, 2001, 2001, 2001, 2001, 2001, 
++ 2001, 2001, 2001, 2001, 2001, 2001, 2001, 2001, 2001, 2001, 2001, 
++ 2001, 2001, 2001, 2001, 2001, 2002, 2002, 2002, 2002, 2002, 2002, 
++ 2002, 2002, 2002, 2002, 2002, 2002, 2002, 2002, 2002, 2002, 2002, 
++ 2002, 2002, 2002, 2002, 2002, 2002, 2002, 2002, 2002, 2002, 2002, 
++ 2002, 2002, 2002, 2002, 2002, 2002, 2002, 2002, 2002, 2002, 2002, 
++ 2002, 2002, 2002, 2002, 2002, 2002, 2002, 2002, 2002, 2002, 2002, 
++ 2002, 2002, 2002, 2002, 2002, 2002, 2002, 2002, 2002, 2002, 2002, 
++ 2002, 2002, 2002, 2002, 2002, 2002, 2002, 2002, 2002, 2002, 2002, 
++ 2002, 2002, 2002, 2002, 2002, 2002, 2002, 2002, 2002, 2002, 2002, 
++ 2002, 2002, 2002, 2002, 2002, 2002, 2002, 2002, 2002, 2002, 2002, 
++ 2002, 2002, 2002, 2002, 2002, 2002, 2002, 2002, 2002, 2002, 2002, 
++ 2002, 2002, 2002, 2002, 2002, 2002, 2002, 2002, 2002, 2002, 2002, 
++ 2002, 2002, 2002, 2002, 2002, 2002, 2002, 2002, 2002, 2002, 2002, 
++ 2002, 2002, 2002, 2002, 2002, 2002, 2002, 2002, 2002, 2002, 2002, 
++ 2002, 2002, 2002, 2002, 2002, 2002, 2002, 2002, 2002, 2002, 2002, 
++ 2002)), site = structure(as.integer(c(2, 2, 2, 2, 2, 2, 2, 2, 
++ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 
++ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 
++ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 
++ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 
++ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 
++ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 
++ 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 
++ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 
++ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 
++ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 
++ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 
++ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 
++ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 
++ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 
++ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 
++ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 
++ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 
++ 1, 1, 1, 1)), .Label = c("backreef", "patchreef"), class = "factor"), 
++     head = structure(as.integer(c(15, 15, 15, 15, 15, 15, 15, 
++     15, 15, 15, 15, 4, 4, 4, 19, 19, 24, 24, 24, 24, 24, 24, 
++     6, 6, 6, 6, 6, 6, 6, 6, 9, 9, 9, 10, 10, 10, 10, 10, 10, 
++     10, 10, 10, 10, 10, 13, 13, 13, 13, 3, 3, 3, 3, 3, 3, 3, 
++     3, 2, 2, 2, 2, 5, 5, 5, 5, 12, 12, 12, 12, 7, 7, 7, 11, 11, 
++     11, 11, 11, 11, 11, 11, 11, 14, 14, 14, 23, 23, 23, 23, 23, 
++     23, 23, 23, 23, 22, 22, 22, 8, 8, 8, 8, 8, 8, 8, 8, 8, 20, 
++     20, 20, 20, 20, 20, 20, 20, 21, 21, 21, 1, 1, 1, 1, 1, 1, 
++     1, 1, 1, 1, 17, 17, 17, 17, 17, 17, 17, 16, 16, 16, 16, 16, 
++     16, 16, 16, 18, 18, 18, 26, 26, 26, 55, 55, 55, 57, 57, 41, 
++     41, 41, 45, 45, 47, 47, 48, 48, 58, 58, 34, 34, 34, 34, 35, 
++     35, 35, 35, 50, 50, 50, 32, 32, 32, 25, 25, 25, 25, 25, 33, 
++     33, 33, 28, 28, 31, 31, 31, 36, 36, 36, 44, 44, 44, 44, 29, 
++     29, 29, 27, 27, 27, 40, 40, 40, 46, 46, 46, 46, 46, 39, 39, 
++     39, 39, 30, 30, 30, 30, 30, 51, 51, 51, 51, 51, 51, 56, 56, 
++     56, 56, 56, 56, 52, 52, 52, 52, 52, 52, 55, 55, 55, 53, 53, 
++     53, 57, 57, 57, 57, 57, 57, 35, 35, 35, 35, 35, 35, 33, 33, 
++     33, 33, 33, 33, 29, 29, 29, 45, 45, 45, 45, 45, 45, 38, 38, 
++     38, 38, 38, 38, 27, 27, 27, 27, 27, 27, 59, 59, 59, 59, 59, 
++     59, 54, 54, 54, 54, 54, 54, 39, 39, 39, 39, 39, 39, 42, 42, 
++     42, 41, 41, 41, 41, 41, 41, 49, 49, 49, 46, 46, 46, 46, 46, 
++     46, 47, 47, 47, 47, 47, 47, 37, 37, 37, 43, 43, 43, 43, 43, 
++     43, 40, 40, 40, 40, 40, 40, 48, 48, 48, 48, 48, 48, 51, 51, 
++     51, 45, 45, 45, 41, 41, 41, 47, 47, 47, 37, 37, 37, 49, 49, 
++     49, 34, 34, 34, 25, 25, 25)), .Label = c("p1", "p10", "p11", 
++     "p12", "p13", "p14", "p15", "p16", "p17", "p18", "p19", "p2", 
++     "p20", "p21", "p3", "p4", "p42", "p5", "p51", "p6", "p7", 
++     "p70", "p8", "p9", "r10", "r11", "r13", "r14", "r15", "r17", 
++     "r18", "r19", "r2", "r20", "r21", "r22", "r23", "r24", "r25", 
++     "r26", "r27", "r28", "r29", "r3", "r30", "r33", "r34", "r35", 
++     "r36", "r37", "r41", "r45", "r47", "r48", "r5", "r6", "r7", 
++     "r8", "r9"), class = "factor"), density = as.integer(c(11, 
++     11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 3, 3, 3, 2, 2, 6, 
++     6, 6, 6, 6, 6, 8, 8, 8, 8, 8, 8, 8, 8, 3, 3, 3, 11, 11, 11, 
++     11, 11, 11, 11, 11, 11, 11, 11, 4, 4, 4, 4, 8, 8, 8, 8, 8, 
++     8, 8, 8, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 3, 9, 
++     9, 9, 9, 9, 9, 9, 9, 9, 3, 3, 3, 9, 9, 9, 9, 9, 9, 9, 9, 
++     9, 3, 3, 3, 9, 9, 9, 9, 9, 9, 9, 9, 9, 8, 8, 8, 8, 8, 8, 
++     8, 8, 3, 3, 3, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 7, 
++     7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 3, 3, 3, 3, 3, 
++     3, 3, 3, 3, 2, 2, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 4, 4, 
++     4, 4, 4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 5, 5, 5, 5, 5, 3, 3, 
++     3, 2, 2, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 
++     3, 3, 3, 5, 5, 5, 5, 5, 4, 4, 4, 4, 5, 5, 5, 5, 5, 11, 11, 
++     11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 6, 6, 6, 6, 6, 6, 
++     3, 3, 3, 3, 3, 3, 11, 11, 11, 11, 11, 11, 6, 6, 6, 6, 6, 
++     6, 11, 11, 11, 11, 11, 11, 3, 3, 3, 6, 6, 6, 6, 6, 6, 11, 
++     11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 
++     11, 11, 6, 6, 6, 6, 6, 6, 11, 11, 11, 11, 11, 11, 3, 3, 3, 
++     11, 11, 11, 11, 11, 11, 3, 3, 3, 6, 6, 6, 6, 6, 6, 11, 11, 
++     11, 11, 11, 11, 3, 3, 3, 11, 11, 11, 11, 11, 11, 6, 6, 6, 
++     6, 6, 6, 11, 11, 11, 11, 11, 11, 3, 3, 3, 3, 3, 3, 3, 3, 
++     3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3)), qual = as.integer(c(1, 
++     1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 
++     2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 
++     2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 
++     4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 
++     5, 5, 5, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 9, 
++     9, 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 
++     10, 10, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 
++     14, 14, 14, 14, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 
++     2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 
++     4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 
++     6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 9, 
++     9, 9, 9, 9, 9, 11, 11, 11, 11, 11, 12, 12, 12, 12, 18, 18, 
++     18, 18, 18, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 
++     2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 
++     3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 
++     5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 
++     7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 9, 
++     9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 
++     10, 10, 10, 10, 10, 12, 12, 12, 12, 12, 12, 16, 16, 16, 16, 
++     16, 16, 2, 2, 2, 5, 5, 5, 8, 8, 8, 9, 9, 9, 10, 10, 10, 9, 
++     9, 9, 4, 4, 4, 3, 3, 3)), d1 = as.integer(c(1, 1, 1, 1, 1, 
++     1, 1, 1, 1, 1, 4, 1, 1, 11, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 
++     1, 1, 1, 1, 8, 8, 4, 8, 11, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 
++     1, 1, 1, 11, 11, 1, 1, 1, 4, 4, 11, 11, 11, 4, 8, 11, 11, 
++     1, 1, 1, 11, 1, 1, 8, 11, 1, 1, 11, 1, 1, 1, 1, 1, 1, 1, 
++     11, 11, 1, 8, 11, 4, 8, 8, 8, 11, 11, 11, 11, 11, 1, 1, 8, 
++     1, 1, 1, 1, 1, 1, 1, 4, 8, 1, 1, 1, 1, 1, 1, 4, 11, 1, 1, 
++     1, 1, 1, 1, 1, 1, 1, 1, 1, 11, 11, 1, 1, 1, 1, 1, 1, 8, 1, 
++     1, 1, 1, 1, 8, 11, 11, 1, 4, 11, 1, 1, 3, 1, 1, 1, 1, 1, 
++     1, 1, 4, 2, 12, 2, 12, 3, 12, 2, 12, 1, 1, 1, 1, 1, 1, 1, 
++     12, 1, 1, 1, 1, 1, 4, 1, 1, 1, 2, 4, 1, 1, 12, 1, 1, 1, 1, 
++     4, 1, 1, 12, 1, 1, 3, 8, 1, 2, 12, 1, 1, 1, 1, 1, 8, 1, 1, 
++     3, 3, 12, 1, 1, 2, 12, 1, 2, 4, 8, 8, 1, 2, 3, 1, 1, 1, 1, 
++     1, 1, 1, 3, 3, 1, 1, 3, 1, 1, 1, 1, 1, 1, 1, 1, 12, 1, 1, 
++     8, 1, 2, 10, 1, 1, 12, 1, 1, 3, 1, 1, 1, 1, 2, 2, 1, 4, 6, 
++     3, 3, 4, 1, 4, 12, 1, 1, 3, 1, 1, 3, 1, 1, 1, 1, 1, 1, 1, 
++     1, 1, 1, 3, 6, 1, 1, 1, 1, 1, 1, 1, 1, 12, 1, 1, 12, 3, 6, 
++     10, 1, 1, 12, 1, 1, 8, 1, 2, 12, 1, 1, 1, 1, 1, 1, 1, 1, 
++     1, 1, 1, 12, 2, 2, 12, 1, 12, 12, 4, 4, 4, 1, 1, 2, 1, 1, 
++     1, 1, 1, 8, 1, 1, 2, 1, 1, 4, 1, 1, 12, 1, 1, 12, 1, 3, 12, 
++     2, 4, 12, 2, 10, 12, 1, 1, 8, 1, 1, 8)), d2 = as.integer(c(4, 
++     4, 4, 4, 4, 4, 4, 4, 4, 4, 8, 4, 4, 70, 4, 4, 4, 4, 4, 4, 
++     4, 4, 4, 4, 4, 4, 4, 4, 11, 11, 8, 11, 70, 4, 4, 4, 4, 4, 
++     4, 4, 4, 4, 4, 4, 4, 4, 70, 70, 4, 4, 4, 8, 8, 70, 70, 70, 
++     8, 11, 70, 70, 4, 4, 4, 70, 4, 4, 11, 70, 4, 4, 70, 4, 4, 
++     4, 4, 4, 4, 4, 70, 70, 4, 11, 70, 8, 11, 11, 11, 70, 70, 
++     70, 70, 70, 4, 4, 11, 4, 4, 4, 4, 4, 4, 4, 8, 11, 4, 4, 4, 
++     4, 4, 4, 8, 70, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 70, 70, 
++     4, 4, 4, 4, 4, 4, 11, 4, 4, 4, 4, 4, 11, 70, 70, 4, 8, 70, 
++     2, 2, 4, 2, 2, 2, 2, 2, 2, 2, 8, 3, 70, 3, 70, 4, 70, 3, 
++     70, 2, 2, 2, 2, 2, 2, 2, 70, 2, 2, 2, 2, 2, 8, 2, 2, 2, 3, 
++     8, 2, 2, 70, 2, 2, 2, 2, 8, 2, 2, 70, 2, 2, 4, 12, 2, 3, 
++     70, 2, 2, 2, 2, 2, 12, 2, 2, 4, 4, 70, 2, 2, 3, 70, 2, 3, 
++     8, 12, 12, 2, 3, 4, 2, 2, 2, 2, 2, 2, 2, 4, 4, 2, 2, 4, 2, 
++     2, 2, 2, 2, 2, 2, 2, 70, 2, 2, 10, 2, 3, 12, 2, 2, 70, 2, 
++     2, 4, 2, 2, 2, 2, 3, 3, 2, 6, 8, 4, 4, 6, 2, 6, 70, 2, 2, 
++     4, 2, 2, 4, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 4, 8, 2, 2, 2, 
++     2, 2, 2, 2, 2, 70, 2, 2, 70, 4, 8, 12, 2, 2, 70, 2, 2, 10, 
++     2, 3, 70, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 70, 3, 3, 70, 
++     2, 70, 70, 6, 6, 6, 2, 2, 3, 2, 2, 2, 2, 2, 10, 2, 2, 3, 
++     2, 2, 6, 2, 2, 70, 2, 2, 70, 2, 4, 70, 3, 6, 70, 3, 12, 70, 
++     2, 2, 10, 2, 2, 10))), .Names = c("exper", "year", "site", 
++ "head", "density", "qual", "d1", "d2"), class = "data.frame", row.names = c("1", 
++ "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", 
++ "14", "15", "16", "17", "18", "19", "20", "21", "22", "23", "24", 
++ "25", "26", "27", "28", "29", "30", "31", "32", "33", "34", "35", 
++ "36", "37", "38", "39", "40", "41", "42", "43", "44", "45", "46", 
++ "47", "48", "49", "50", "51", "52", "53", "54", "55", "56", "57", 
++ "58", "59", "60", "61", "62", "63", "64", "65", "66", "67", "68", 
++ "69", "70", "71", "72", "73", "74", "75", "76", "77", "78", "79", 
++ "80", "81", "82", "83", "84", "85", "86", "87", "88", "89", "90", 
++ "91", "92", "93", "94", "95", "96", "97", "98", "99", "100", 
++ "101", "102", "103", "104", "105", "106", "107", "108", "109", 
++ "110", "111", "112", "113", "114", "115", "116", "117", "118", 
++ "119", "120", "121", "122", "123", "124", "125", "126", "127", 
++ "128", "129", "130", "131", "132", "133", "134", "135", "136", 
++ "137", "138", "139", "140", "141", "142", "143", "144", "145", 
++ "146", "147", "148", "149", "150", "151", "152", "153", "154", 
++ "155", "156", "157", "158", "159", "160", "161", "162", "163", 
++ "164", "165", "166", "167", "168", "169", "170", "171", "172", 
++ "173", "174", "175", "176", "177", "178", "179", "180", "181", 
++ "182", "183", "184", "185", "186", "187", "188", "189", "190", 
++ "191", "192", "193", "194", "195", "196", "197", "198", "199", 
++ "200", "201", "202", "203", "204", "205", "206", "207", "208", 
++ "209", "210", "211", "212", "213", "214", "215", "216", "217", 
++ "218", "219", "220", "221", "222", "223", "224", "225", "226", 
++ "227", "228", "229", "230", "231", "232", "233", "234", "235", 
++ "236", "237", "238", "239", "240", "241", "242", "243", "244", 
++ "245", "246", "247", "248", "249", "250", "251", "252", "253", 
++ "254", "255", "256", "257", "258", "259", "260", "261", "262", 
++ "263", "264", "265", "266", "267", "268", "269", "270", "271", 
++ "272", "273", "274", "275", "276", "277", "278", "279", "280", 
++ "281", "282", "283", "284", "285", "286", "287", "288", "289", 
++ "290", "291", "292", "293", "294", "295", "296", "297", "298", 
++ "299", "300", "301", "302", "303", "304", "305", "306", "307", 
++ "308", "309", "310", "311", "312", "313", "314", "315", "316", 
++ "317", "318", "319", "320", "321", "322", "323", "324", "325", 
++ "326", "327", "328", "329", "330", "331", "332", "333", "334", 
++ "335", "336", "337", "338", "339", "340", "341", "342", "343", 
++ "344", "345", "346", "347", "348", "349", "350", "351", "352", 
++ "353", "354", "355", "356", "357", "358", "359", "360", "361", 
++ "362", "363", "364", "365", "366", "367", "368", "369"))
+> 
+> dicweib <- function(x,shape,scale,log=FALSE) {
++   if (is.matrix(x)) {
++     day1 <- x[,1]
++     day2 <- x[,2]
++   } else {
++     day1 <- x[1]
++     day2 <- x[2]
++   }
++   v <- log(pweibull(day2,shape,scale)-pweibull(day1,shape,scale))
++   if (log) v else exp(v)
++ }
+> 
+> GS2 <- transform(GobySurvival,
++                           day1 = d1-1,
++                           day2 = ifelse(d2==70,Inf,d2-1),
++                           fexper=factor(exper))
+> totmeansurv <- with(GS2,mean((d1+d2)/2))
+> 
+> mle2(cbind(day1,day2)~dicweib(exp(shape),exp(scale)),
++      parameters=list(scale~fexper+qual*density),
++      start=list(scale=log(totmeansurv),shape=0),data=GS2)
+
+Call:
+mle2(minuslogl = cbind(day1, day2) ~ dicweib(exp(shape), exp(scale)), 
+    start = list(scale = log(totmeansurv), shape = 0), data = GS2, 
+    parameters = list(scale ~ fexper + qual * density))
+
+Coefficients:
+ scale.(Intercept)      scale.fexper2      scale.fexper3      scale.fexper4 
+       1.950601011       -1.070739935       -0.767760213       -0.131513595 
+     scale.fexper5         scale.qual      scale.density scale.qual:density 
+       0.004852567       -0.013727672       -0.219867981        0.012638159 
+             shape 
+      -1.001618792 
+
+Log-likelihood: -443.06 
+There were 14 warnings (use warnings() to see them)
+> 
+> proc.time()
+   user  system elapsed 
+  2.800   1.028   3.756 
diff --git a/tests/glmcomp.R b/tests/glmcomp.R
new file mode 100755
index 0000000..7efceae
--- /dev/null
+++ b/tests/glmcomp.R
@@ -0,0 +1,25 @@
+library(bbmle)
+library(testthat)
+x <- 0:10
+y <- c(26, 17, 13, 12, 20, 5, 9, 8, 5, 4, 8)
+d <- data.frame(x,y)
+LL <- function(ymax=15, xhalf=6)
+    -sum(stats::dpois(y, lambda=ymax/(1+x/xhalf), log=TRUE))
+mfit0 <- mle2(y~dpois(lambda=exp(interc)),
+              start=list(interc=log(mean(y))),data=d)
+
+mfit1 <- mle2(y~dpois(lambda=exp(loglambda)),
+              start=list(loglambda=log(mean(y))),data=d)
+              
+gfit0 <- glm(y~1,family=poisson)
+expect_equal(unname(coef(mfit0)),unname(coef(gfit0)))
+expect_equal(logLik(mfit0),logLik(gfit0))
+expect_equal(predict(mfit0),  ## only one value for now
+             unique(predict(gfit0,type="response")))
+
+## FIXME: residuals are backwards
+expect_equal(residuals(mfit0,type="response"),unname(residuals(gfit0,type="response")))
+## FIXME: residuals are backwards
+expect_equal(residuals(mfit0,type="pearson"),unname(residuals(gfit0,type="pearson")))
+
+
diff --git a/tests/glmcomp.Rout.save b/tests/glmcomp.Rout.save
new file mode 100755
index 0000000..34d7c9b
--- /dev/null
+++ b/tests/glmcomp.Rout.save
@@ -0,0 +1,49 @@
+
+R Under development (unstable) (2014-05-14 r65609) -- "Unsuffered Consequences"
+Copyright (C) 2014 The R Foundation for Statistical Computing
+Platform: i686-pc-linux-gnu (32-bit)
+
+R is free software and comes with ABSOLUTELY NO WARRANTY.
+You are welcome to redistribute it under certain conditions.
+Type 'license()' or 'licence()' for distribution details.
+
+  Natural language support but running in an English locale
+
+R is a collaborative project with many contributors.
+Type 'contributors()' for more information and
+'citation()' on how to cite R or R packages in publications.
+
+Type 'demo()' for some demos, 'help()' for on-line help, or
+'help.start()' for an HTML browser interface to help.
+Type 'q()' to quit R.
+
+> library(bbmle)
+Loading required package: stats4
+> library(testthat)
+> x <- 0:10
+> y <- c(26, 17, 13, 12, 20, 5, 9, 8, 5, 4, 8)
+> d <- data.frame(x,y)
+> LL <- function(ymax=15, xhalf=6)
++     -sum(stats::dpois(y, lambda=ymax/(1+x/xhalf), log=TRUE))
+> mfit0 <- mle2(y~dpois(lambda=exp(interc)),
++               start=list(interc=log(mean(y))),data=d)
+> 
+> mfit1 <- mle2(y~dpois(lambda=exp(loglambda)),
++               start=list(loglambda=log(mean(y))),data=d)
+>               
+> gfit0 <- glm(y~1,family=poisson)
+> expect_equal(unname(coef(mfit0)),unname(coef(gfit0)))
+> expect_equal(logLik(mfit0),logLik(gfit0))
+> expect_equal(predict(mfit0),  ## only one value for now
++              unique(predict(gfit0,type="response")))
+> 
+> ## FIXME: residuals are backwards
+> expect_equal(residuals(mfit0,type="response"),unname(residuals(gfit0,type="response")))
+> ## FIXME: residuals are backwards
+> expect_equal(residuals(mfit0,type="pearson"),unname(residuals(gfit0,type="pearson")))
+> 
+> 
+> 
+> proc.time()
+   user  system elapsed 
+  0.816   1.912   2.711 
diff --git a/tests/gradient_vecpar_profile.R b/tests/gradient_vecpar_profile.R
new file mode 100755
index 0000000..09b438a
--- /dev/null
+++ b/tests/gradient_vecpar_profile.R
@@ -0,0 +1,43 @@
+library(bbmle)
+
+## Simulate data
+
+set.seed(1)
+x <- 1:5
+y <- 2*x+1
+noise <- rnorm(5, 0, 0.1)
+mydata <- data.frame(x = x, y=y+noise)
+
+## Model definition
+
+model <- function(a, b) with(mydata, a*x+b)
+
+## Negative log-likelihood
+
+nll <- function(par) with(mydata, {
+  a <- par[1]
+  b <- par[2]
+  sum(0.5*((y-model(a,b))/0.1)^2)
+  
+})
+
+gr <- function(par) with(mydata, {
+  a <- par[1]
+  b <- par[2]
+  dnllda <- -sum(((y-model(a,b))/0.1)*x/0.1)
+  dnlldb <- -sum(((y-model(a,b))/0.1)*1/0.1)
+  return(c(dnllda, dnlldb))
+  
+})
+
+## optimization
+
+parnames(nll) <- c("a", "b")
+parnames(gr) <- c("a", "b")
+
+fit <- mle2(nll, c(a = 1, b=2), gr=gr)
+
+myprof <- profile(fit)
+
+fit <- mle2(nll, c(a = 1, b=2), gr=gr, skip.hessian=TRUE)
+myprof2 <- profile(fit,std.err=c(0.1,0.1))
diff --git a/tests/gradient_vecpar_profile.Rout.save b/tests/gradient_vecpar_profile.Rout.save
new file mode 100755
index 0000000..0cdb2ee
--- /dev/null
+++ b/tests/gradient_vecpar_profile.Rout.save
@@ -0,0 +1,67 @@
+
+R Under development (unstable) (2012-07-27 r60013) -- "Unsuffered Consequences"
+Copyright (C) 2012 The R Foundation for Statistical Computing
+ISBN 3-900051-07-0
+Platform: i686-pc-linux-gnu (32-bit)
+
+R is free software and comes with ABSOLUTELY NO WARRANTY.
+You are welcome to redistribute it under certain conditions.
+Type 'license()' or 'licence()' for distribution details.
+
+  Natural language support but running in an English locale
+
+R is a collaborative project with many contributors.
+Type 'contributors()' for more information and
+'citation()' on how to cite R or R packages in publications.
+
+Type 'demo()' for some demos, 'help()' for on-line help, or
+'help.start()' for an HTML browser interface to help.
+Type 'q()' to quit R.
+
+> library(bbmle)
+> 
+> ## Simulate data
+> 
+> set.seed(1)
+> x <- 1:5
+> y <- 2*x+1
+> noise <- rnorm(5, 0, 0.1)
+> mydata <- data.frame(x = x, y=y+noise)
+> 
+> ## Model definition
+> 
+> model <- function(a, b) with(mydata, a*x+b)
+> 
+> ## Negative log-likelihood
+> 
+> nll <- function(par) with(mydata, {
++   a <- par[1]
++   b <- par[2]
++   sum(0.5*((y-model(a,b))/0.1)^2)
++   
++ })
+> 
+> gr <- function(par) with(mydata, {
++   a <- par[1]
++   b <- par[2]
++   dnllda <- -sum(((y-model(a,b))/0.1)*x/0.1)
++   dnlldb <- -sum(((y-model(a,b))/0.1)*1/0.1)
++   return(c(dnllda, dnlldb))
++   
++ })
+> 
+> ## optimization
+> 
+> parnames(nll) <- c("a", "b")
+> parnames(gr) <- c("a", "b")
+> 
+> fit <- mle2(nll, c(a = 1, b=2), gr=gr)
+> 
+> myprof <- profile(fit)
+> 
+> fit <- mle2(nll, c(a = 1, b=2), gr=gr, skip.hessian=TRUE)
+> myprof2 <- profile(fit,std.err=c(0.1,0.1))
+> 
+> proc.time()
+   user  system elapsed 
+  1.028   1.072   1.970 
diff --git a/tests/grtest1.R b/tests/grtest1.R
new file mode 100755
index 0000000..7f8835b
--- /dev/null
+++ b/tests/grtest1.R
@@ -0,0 +1,9 @@
+## from Eric Weese
+library(bbmle)
+f <- function(x=2,a=1) x^2 - a
+f.g <- function(x,a) 2*x
+f.g2 <- function(x,a) c(2*x,0)
+options(digits=3)
+mle2(f,fixed=list(a=1))
+mle2(f,gr=f.g,fixed=list(a=1))
+mle2(f,gr=f.g2,fixed=list(a=1))
diff --git a/tests/grtest1.Rout.save b/tests/grtest1.Rout.save
new file mode 100644
index 0000000..0dfc1e1
--- /dev/null
+++ b/tests/grtest1.Rout.save
@@ -0,0 +1,60 @@
+
+R Under development (unstable) (2016-02-09 r70138) -- "Unsuffered Consequences"
+Copyright (C) 2016 The R Foundation for Statistical Computing
+Platform: x86_64-pc-linux-gnu (64-bit)
+
+R is free software and comes with ABSOLUTELY NO WARRANTY.
+You are welcome to redistribute it under certain conditions.
+Type 'license()' or 'licence()' for distribution details.
+
+  Natural language support but running in an English locale
+
+R is a collaborative project with many contributors.
+Type 'contributors()' for more information and
+'citation()' on how to cite R or R packages in publications.
+
+Type 'demo()' for some demos, 'help()' for on-line help, or
+'help.start()' for an HTML browser interface to help.
+Type 'q()' to quit R.
+
+> ## from Eric Weese
+> library(bbmle)
+Loading required package: stats4
+> f <- function(x=2,a=1) x^2 - a
+> f.g <- function(x,a) 2*x
+> f.g2 <- function(x,a) c(2*x,0)
+> options(digits=3)
+> mle2(f,fixed=list(a=1))
+
+Call:
+mle2(minuslogl = f, fixed = list(a = 1))
+
+Coefficients:
+       x        a 
+1.09e-13 1.00e+00 
+
+Log-likelihood: 1 
+> mle2(f,gr=f.g,fixed=list(a=1))
+
+Call:
+mle2(minuslogl = f, fixed = list(a = 1), gr = f.g)
+
+Coefficients:
+x a 
+0 1 
+
+Log-likelihood: 1 
+> mle2(f,gr=f.g2,fixed=list(a=1))
+
+Call:
+mle2(minuslogl = f, fixed = list(a = 1), gr = f.g2)
+
+Coefficients:
+x a 
+0 1 
+
+Log-likelihood: 1 
+> 
+> proc.time()
+   user  system elapsed 
+  0.436   0.044   0.488 
diff --git a/tests/makesavefiles b/tests/makesavefiles
new file mode 100755
index 0000000..37850b8
--- /dev/null
+++ b/tests/makesavefiles
@@ -0,0 +1 @@
+for i in `echo *.R | sed -e "s/\.R//g"`; do R CMD BATCH --vanilla $i.R; mv $i.Rout $i.Rout.save; done
diff --git a/tests/methods.R b/tests/methods.R
new file mode 100755
index 0000000..677ba31
--- /dev/null
+++ b/tests/methods.R
@@ -0,0 +1,24 @@
+library(bbmle)
+x <- 0:10
+y <- c(26, 17, 13, 12, 20, 5, 9, 8, 5, 4, 8)
+d <- data.frame(x,y)
+LL <- function(ymax=15, xhalf=6)
+    -sum(stats::dpois(y, lambda=ymax/(1+x/xhalf), log=TRUE))
+options(digits=3)
+mfit0 <- mle2(y~dpois(lambda=exp(interc)),
+              start=list(interc=log(mean(y))),data=d)
+mfit1 <- mle2(y~dpois(lambda=exp(loglambda)),
+              start=list(loglambda=log(mean(y))),data=d)
+
+coef(mfit0)
+residuals(mfit0)
+AIC(mfit0)
+BIC(mfit0)
+vcov(mfit0)
+## fitted(mfit0)  ## fails, looks for default value
+predict(mfit0)  ## FIXME: doesn't expand properly (need implicit lambda~1 formula??)
+set.seed(1001)
+simulate(mfit0)
+anova(mfit0,mfit1)
+summary(mfit0)
+summary(mfit1)
diff --git a/tests/methods.Rout.save b/tests/methods.Rout.save
new file mode 100755
index 0000000..9023fa8
--- /dev/null
+++ b/tests/methods.Rout.save
@@ -0,0 +1,90 @@
+
+R Under development (unstable) (2014-05-14 r65609) -- "Unsuffered Consequences"
+Copyright (C) 2014 The R Foundation for Statistical Computing
+Platform: i686-pc-linux-gnu (32-bit)
+
+R is free software and comes with ABSOLUTELY NO WARRANTY.
+You are welcome to redistribute it under certain conditions.
+Type 'license()' or 'licence()' for distribution details.
+
+  Natural language support but running in an English locale
+
+R is a collaborative project with many contributors.
+Type 'contributors()' for more information and
+'citation()' on how to cite R or R packages in publications.
+
+Type 'demo()' for some demos, 'help()' for on-line help, or
+'help.start()' for an HTML browser interface to help.
+Type 'q()' to quit R.
+
+> library(bbmle)
+Loading required package: stats4
+> x <- 0:10
+> y <- c(26, 17, 13, 12, 20, 5, 9, 8, 5, 4, 8)
+> d <- data.frame(x,y)
+> LL <- function(ymax=15, xhalf=6)
++     -sum(stats::dpois(y, lambda=ymax/(1+x/xhalf), log=TRUE))
+> options(digits=3)
+> mfit0 <- mle2(y~dpois(lambda=exp(interc)),
++               start=list(interc=log(mean(y))),data=d)
+> mfit1 <- mle2(y~dpois(lambda=exp(loglambda)),
++               start=list(loglambda=log(mean(y))),data=d)
+> 
+> coef(mfit0)
+interc 
+  2.45 
+> residuals(mfit0)
+ [1]  4.254  1.605  0.428  0.134  2.488 -1.926 -0.749 -1.043 -1.926 -2.221
+[11] -1.043
+> AIC(mfit0)
+[1] 87.5
+> BIC(mfit0)
+[1] 87.9
+> vcov(mfit0)
+        interc
+interc 0.00787
+> ## fitted(mfit0)  ## fails, looks for default value
+> predict(mfit0)  ## FIXME: doesn't expand properly (need implicit lambda~1 formula??)
+[1] 11.5
+> set.seed(1001)
+> simulate(mfit0)
+ [1] 18 10 10 15  7  9 10 14 10 13 15
+> anova(mfit0,mfit1)
+Likelihood Ratio Tests
+Model 1: mfit0, y~dpois(lambda=exp(interc))
+Model 2: mfit1, y~dpois(lambda=exp(loglambda))
+  Tot Df Deviance Chisq Df Pr(>Chisq)
+1      1     85.5                    
+2      1     85.5     0  0          1
+> summary(mfit0)
+Maximum likelihood estimation
+
+Call:
+mle2(minuslogl = y ~ dpois(lambda = exp(interc)), start = list(interc = log(mean(y))), 
+    data = d)
+
+Coefficients:
+       Estimate Std. Error z value  Pr(z)    
+interc   2.4463     0.0887    27.6 <2e-16 ***
+---
+Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
+
+-2 log L: 85.5 
+> summary(mfit1)
+Maximum likelihood estimation
+
+Call:
+mle2(minuslogl = y ~ dpois(lambda = exp(loglambda)), start = list(loglambda = log(mean(y))), 
+    data = d)
+
+Coefficients:
+          Estimate Std. Error z value  Pr(z)    
+loglambda   2.4463     0.0887    27.6 <2e-16 ***
+---
+Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
+
+-2 log L: 85.5 
+> 
+> proc.time()
+   user  system elapsed 
+  0.672   1.400   2.330 
diff --git a/tests/mkout b/tests/mkout
new file mode 100755
index 0000000..0b4f616
--- /dev/null
+++ b/tests/mkout
@@ -0,0 +1 @@
+R CMD BATCH --vanilla $1.R; mv $1.Rout $1.Rout.save
diff --git a/tests/mortanal.R b/tests/mortanal.R
new file mode 100755
index 0000000..64bc11e
--- /dev/null
+++ b/tests/mortanal.R
@@ -0,0 +1,67 @@
+library(bbmle)
+
+## goby data in dump format
+
+x <- structure(list(indiv = structure(as.integer(c(20, 77, 79, 21, 
+33, 40, 11, 28, 43, 85, 56, 49, 29, 37, 57, 36, 66, 65, 19, 69, 
+47, 60, 23, 25, 39, 84, 12, 5, 76, 55, 32, 10, 75, 4, 78, 80, 
+86, 48, 54, 22, 18, 61, 41, 74, 68, 14, 53, 45, 30, 17, 62, 3, 
+7, 50, 34, 82, 8, 70, 38, 52, 2, 63, 81, 15, 44, 58, 13, 26, 
+73, 83, 59, 42, 72, 67, 35, 16, 1, 46, 27, 64, 51, 24, 71, 6, 
+9, 31)), .Label = c("f10al1", "f10al2", "f10al3", "f10r1", "f10r2", 
+"f11al1", "f11al2", "f11al3", "f11al4", "f11r1", "f11r2", "f11r3", 
+"f12al1", "f12al2", "f12al3", "f12al4", "f12al5", "f12r1", "f12r2", 
+"f12r3", "f12r4", "f12r5", "f12r6", "f13al1", "f13r1", "f14al1", 
+"f14al2", "f14r1", "f14r2", "f15al1", "f15al2", "f15r1", "f15r2", 
+"f18al1", "f18al2", "f18r1", "f18r2", "f19al1", "f19r1", "f19r2", 
+"f1al1", "f1al2", "f1r1", "f20al1", "f20al2", "f20al3", "f20r1", 
+"f20r2", "f20r3", "f2al1", "f2al2", "f2al3", "f2al4", "f2r1", 
+"f2r2", "f2r3", "f2r4", "f3al1", "f3al2", "f3r1", "f3r2", "f4al1", 
+"f5al1", "f5al2", "f5r1", "f5r2", "f6al1", "f6al2", "f6r1", "f7al1", 
+"f7al2", "f7al3", "f7al4", "f7al5", "f7r1", "f7r2", "f7r3", "f7r4", 
+"f7r5", "f7r6", "f9al1", "f9al2", "f9al4", "f9r1", "f9r2", "f9r3"
+), class = "factor"), group = structure(as.integer(c(5, 5, 5, 
+5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 
+4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 3, 
+3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 
+2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1)), .Label = c("AL", 
+"AL-Rat5th", "AL-RatOv", "R", "R-ALat5th"), class = "factor"), 
+    lifespan = as.integer(c(391, 370, 346, 341, 334, 320, 319, 
+    317, 314, 307, 295, 260, 30, 10, 397, 380, 364, 355, 352, 
+    341, 340, 339, 336, 320, 314, 312, 308, 302, 296, 290, 284, 
+    267, 263, 263, 255, 253, 242, 222, 220, 181, 64, 36, 192, 
+    192, 189, 186, 183, 181, 180, 176, 173, 171, 170, 169, 166, 
+    11, 247, 235, 234, 233, 232, 224, 221, 220, 215, 210, 210, 
+    204, 202, 17, 13, 301, 300, 296, 281, 271, 253, 250, 241, 
+    239, 232, 221, 220, 214, 33, 30))), .Names = c("indiv", "group", 
+"lifespan"), class = "data.frame", row.names = c("1", "2", "3", 
+"4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "15", 
+"16", "17", "18", "19", "20", "21", "22", "23", "24", "25", "26", 
+"27", "28", "29", "30", "31", "32", "33", "34", "35", "36", "37", 
+"38", "39", "40", "41", "42", "43", "44", "45", "46", "47", "48", 
+"49", "50", "51", "52", "53", "54", "55", "56", "57", "58", "59", 
+"60", "61", "62", "63", "64", "65", "66", "67", "68", "69", "70", 
+"71", "72", "73", "74", "75", "76", "77", "78", "79", "80", "81", 
+"82", "83", "84", "85", "86"))
+
+mlife <- log(mean(x$lifespan))
+Bm0w <- mle2(lifespan~dweibull(scale=exp(llambda),shape=alpha),
+             start=list(llambda=mlife,alpha=1),
+             data=x)
+Bm1w <- mle2(lifespan~dweibull(scale=exp(llambda),shape=alpha),
+           start=list(llambda=mlife,alpha=1),
+           parameters=list(llambda~group),
+           data=x)
+Bm2w <- mle2(lifespan~dweibull(scale=exp(llambda),shape=alpha),
+             start=list(llambda=mlife,alpha=1),
+             parameters=list(llambda~group,alpha~group),
+             data=x)           
+Bm3w <- mle2(lifespan~dweibull(scale=exp(llambda),shape=alpha),
+           start=list(llambda=mlife,alpha=3),
+           parameters=list(alpha~group),
+           data=x)
+anova(Bm0w,Bm1w)
+anova(Bm0w,Bm1w,Bm2w)
+anova(Bm0w,Bm3w,Bm2w)
+AICctab(Bm0w,Bm1w,Bm2w,Bm3w,sort=TRUE,nobs=nrow(x),delta=TRUE)
+
diff --git a/tests/mortanal.Rout.save b/tests/mortanal.Rout.save
new file mode 100755
index 0000000..bb6d6d1
--- /dev/null
+++ b/tests/mortanal.Rout.save
@@ -0,0 +1,143 @@
+
+R Under development (unstable) (2012-07-27 r60013) -- "Unsuffered Consequences"
+Copyright (C) 2012 The R Foundation for Statistical Computing
+ISBN 3-900051-07-0
+Platform: i686-pc-linux-gnu (32-bit)
+
+R is free software and comes with ABSOLUTELY NO WARRANTY.
+You are welcome to redistribute it under certain conditions.
+Type 'license()' or 'licence()' for distribution details.
+
+  Natural language support but running in an English locale
+
+R is a collaborative project with many contributors.
+Type 'contributors()' for more information and
+'citation()' on how to cite R or R packages in publications.
+
+Type 'demo()' for some demos, 'help()' for on-line help, or
+'help.start()' for an HTML browser interface to help.
+Type 'q()' to quit R.
+
+> library(bbmle)
+> 
+> ## goby data in dump format
+> 
+> x <- structure(list(indiv = structure(as.integer(c(20, 77, 79, 21, 
++ 33, 40, 11, 28, 43, 85, 56, 49, 29, 37, 57, 36, 66, 65, 19, 69, 
++ 47, 60, 23, 25, 39, 84, 12, 5, 76, 55, 32, 10, 75, 4, 78, 80, 
++ 86, 48, 54, 22, 18, 61, 41, 74, 68, 14, 53, 45, 30, 17, 62, 3, 
++ 7, 50, 34, 82, 8, 70, 38, 52, 2, 63, 81, 15, 44, 58, 13, 26, 
++ 73, 83, 59, 42, 72, 67, 35, 16, 1, 46, 27, 64, 51, 24, 71, 6, 
++ 9, 31)), .Label = c("f10al1", "f10al2", "f10al3", "f10r1", "f10r2", 
++ "f11al1", "f11al2", "f11al3", "f11al4", "f11r1", "f11r2", "f11r3", 
++ "f12al1", "f12al2", "f12al3", "f12al4", "f12al5", "f12r1", "f12r2", 
++ "f12r3", "f12r4", "f12r5", "f12r6", "f13al1", "f13r1", "f14al1", 
++ "f14al2", "f14r1", "f14r2", "f15al1", "f15al2", "f15r1", "f15r2", 
++ "f18al1", "f18al2", "f18r1", "f18r2", "f19al1", "f19r1", "f19r2", 
++ "f1al1", "f1al2", "f1r1", "f20al1", "f20al2", "f20al3", "f20r1", 
++ "f20r2", "f20r3", "f2al1", "f2al2", "f2al3", "f2al4", "f2r1", 
++ "f2r2", "f2r3", "f2r4", "f3al1", "f3al2", "f3r1", "f3r2", "f4al1", 
++ "f5al1", "f5al2", "f5r1", "f5r2", "f6al1", "f6al2", "f6r1", "f7al1", 
++ "f7al2", "f7al3", "f7al4", "f7al5", "f7r1", "f7r2", "f7r3", "f7r4", 
++ "f7r5", "f7r6", "f9al1", "f9al2", "f9al4", "f9r1", "f9r2", "f9r3"
++ ), class = "factor"), group = structure(as.integer(c(5, 5, 5, 
++ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 
++ 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 3, 
++ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 
++ 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1)), .Label = c("AL", 
++ "AL-Rat5th", "AL-RatOv", "R", "R-ALat5th"), class = "factor"), 
++     lifespan = as.integer(c(391, 370, 346, 341, 334, 320, 319, 
++     317, 314, 307, 295, 260, 30, 10, 397, 380, 364, 355, 352, 
++     341, 340, 339, 336, 320, 314, 312, 308, 302, 296, 290, 284, 
++     267, 263, 263, 255, 253, 242, 222, 220, 181, 64, 36, 192, 
++     192, 189, 186, 183, 181, 180, 176, 173, 171, 170, 169, 166, 
++     11, 247, 235, 234, 233, 232, 224, 221, 220, 215, 210, 210, 
++     204, 202, 17, 13, 301, 300, 296, 281, 271, 253, 250, 241, 
++     239, 232, 221, 220, 214, 33, 30))), .Names = c("indiv", "group", 
++ "lifespan"), class = "data.frame", row.names = c("1", "2", "3", 
++ "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "15", 
++ "16", "17", "18", "19", "20", "21", "22", "23", "24", "25", "26", 
++ "27", "28", "29", "30", "31", "32", "33", "34", "35", "36", "37", 
++ "38", "39", "40", "41", "42", "43", "44", "45", "46", "47", "48", 
++ "49", "50", "51", "52", "53", "54", "55", "56", "57", "58", "59", 
++ "60", "61", "62", "63", "64", "65", "66", "67", "68", "69", "70", 
++ "71", "72", "73", "74", "75", "76", "77", "78", "79", "80", "81", 
++ "82", "83", "84", "85", "86"))
+> 
+> mlife <- log(mean(x$lifespan))
+> Bm0w <- mle2(lifespan~dweibull(scale=exp(llambda),shape=alpha),
++              start=list(llambda=mlife,alpha=1),
++              data=x)
+> Bm1w <- mle2(lifespan~dweibull(scale=exp(llambda),shape=alpha),
++            start=list(llambda=mlife,alpha=1),
++            parameters=list(llambda~group),
++            data=x)
+Warning message:
+In dweibull(x = c(391L, 370L, 346L, 341L, 334L, 320L, 319L, 317L,  :
+  NaNs produced
+> Bm2w <- mle2(lifespan~dweibull(scale=exp(llambda),shape=alpha),
++              start=list(llambda=mlife,alpha=1),
++              parameters=list(llambda~group,alpha~group),
++              data=x)           
+Warning messages:
+1: In dweibull(x = c(391L, 370L, 346L, 341L, 334L, 320L, 319L, 317L,  :
+  NaNs produced
+2: In dweibull(x = c(391L, 370L, 346L, 341L, 334L, 320L, 319L, 317L,  :
+  NaNs produced
+> Bm3w <- mle2(lifespan~dweibull(scale=exp(llambda),shape=alpha),
++            start=list(llambda=mlife,alpha=3),
++            parameters=list(alpha~group),
++            data=x)
+Warning messages:
+1: In dweibull(x = c(391L, 370L, 346L, 341L, 334L, 320L, 319L, 317L,  :
+  NaNs produced
+2: In dweibull(x = c(391L, 370L, 346L, 341L, 334L, 320L, 319L, 317L,  :
+  NaNs produced
+3: In dweibull(x = c(391L, 370L, 346L, 341L, 334L, 320L, 319L, 317L,  :
+  NaNs produced
+4: In dweibull(x = c(391L, 370L, 346L, 341L, 334L, 320L, 319L, 317L,  :
+  NaNs produced
+> anova(Bm0w,Bm1w)
+Likelihood Ratio Tests
+Model 1: Bm0w, lifespan~dweibull(scale=exp(llambda),shape=alpha)
+Model 2: Bm1w, lifespan~dweibull(scale=exp(llambda),shape=alpha): llambda~group
+  Tot Df Deviance  Chisq Df Pr(>Chisq)    
+1      2   1043.5                         
+2      6   1015.5 27.945  4   1.28e-05 ***
+---
+Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 
+> anova(Bm0w,Bm1w,Bm2w)
+Likelihood Ratio Tests
+Model 1: Bm0w, lifespan~dweibull(scale=exp(llambda),shape=alpha)
+Model 2: Bm1w, lifespan~dweibull(scale=exp(llambda),shape=alpha): llambda~group
+Model 3: Bm2w, lifespan~dweibull(scale=exp(llambda),shape=alpha):
+          llambda~group, alpha~group
+  Tot Df Deviance  Chisq Df Pr(>Chisq)    
+1      2   1043.5                         
+2      6   1015.5 27.945  4   1.28e-05 ***
+3     10   1008.8  6.736  4     0.1505    
+---
+Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 
+> anova(Bm0w,Bm3w,Bm2w)
+Likelihood Ratio Tests
+Model 1: Bm0w, lifespan~dweibull(scale=exp(llambda),shape=alpha)
+Model 2: Bm3w, lifespan~dweibull(scale=exp(llambda),shape=alpha): alpha~group
+Model 3: Bm2w, lifespan~dweibull(scale=exp(llambda),shape=alpha):
+          llambda~group, alpha~group
+  Tot Df Deviance   Chisq Df Pr(>Chisq)    
+1      2   1043.5                          
+2      6   1038.5  4.9434  4     0.2932    
+3     10   1008.8 29.7377  4  5.535e-06 ***
+---
+Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 
+> AICctab(Bm0w,Bm1w,Bm2w,Bm3w,sort=TRUE,nobs=nrow(x),delta=TRUE)
+     dAICc df
+Bm1w  0.0  6 
+Bm2w  3.1  10
+Bm0w 19.0  2 
+Bm3w 23.0  6 
+> 
+> 
+> proc.time()
+   user  system elapsed 
+  1.600   1.164   2.632 
diff --git a/tests/optimize.R b/tests/optimize.R
new file mode 100755
index 0000000..7159828
--- /dev/null
+++ b/tests/optimize.R
@@ -0,0 +1,27 @@
+## try to reconstruct error reported by Hofert Jan Marius
+## (simpler version)
+
+Lfun <- function(x) {
+  (x-5)^2
+}
+
+
+
+library(bbmle)
+
+lb <- 6
+## first try with L-BFGS-B and bounds
+m1 <- mle2(Lfun,start=list(x=7),lower=6,method="L-BFGS-B")
+coef(m1)
+p1 <- profile(m1)
+plot(p1)
+(c1 <- confint(m1,quietly=TRUE))
+## all  OK
+
+m2 <- mle2(Lfun,start=list(x=7),optimizer="optimize",
+           lower=lb,upper=10)
+coef(m2)
+p2 <- profile(m2)
+(c2 <- confint(m2))
+(c2 <- confint(m2))
+plot(p2,show.points=TRUE)
diff --git a/tests/optimize.Rout.save b/tests/optimize.Rout.save
new file mode 100755
index 0000000..331df38
--- /dev/null
+++ b/tests/optimize.Rout.save
@@ -0,0 +1,67 @@
+
+R Under development (unstable) (2012-07-27 r60013) -- "Unsuffered Consequences"
+Copyright (C) 2012 The R Foundation for Statistical Computing
+ISBN 3-900051-07-0
+Platform: i686-pc-linux-gnu (32-bit)
+
+R is free software and comes with ABSOLUTELY NO WARRANTY.
+You are welcome to redistribute it under certain conditions.
+Type 'license()' or 'licence()' for distribution details.
+
+  Natural language support but running in an English locale
+
+R is a collaborative project with many contributors.
+Type 'contributors()' for more information and
+'citation()' on how to cite R or R packages in publications.
+
+Type 'demo()' for some demos, 'help()' for on-line help, or
+'help.start()' for an HTML browser interface to help.
+Type 'q()' to quit R.
+
+> ## try to reconstruct error reported by Hofert Jan Marius
+> ## (simpler version)
+> 
+> Lfun <- function(x) {
++   (x-5)^2
++ }
+> 
+> 
+> 
+> library(bbmle)
+> 
+> lb <- 6
+> ## first try with L-BFGS-B and bounds
+> m1 <- mle2(Lfun,start=list(x=7),lower=6,method="L-BFGS-B")
+Warning message:
+In mle2(Lfun, start = list(x = 7), lower = 6, method = "L-BFGS-B") :
+  some parameters are on the boundary: variance-covariance calculations based on Hessian may be unreliable
+> coef(m1)
+x 
+6 
+> p1 <- profile(m1)
+> plot(p1)
+> (c1 <- confint(m1,quietly=TRUE))
+   2.5 %   97.5 % 
+      NA 6.702747 
+> ## all  OK
+> 
+> m2 <- mle2(Lfun,start=list(x=7),optimizer="optimize",
++            lower=lb,upper=10)
+> coef(m2)
+      x 
+6.00006 
+> p2 <- profile(m2)
+> (c2 <- confint(m2))
+   2.5 %   97.5 % 
+      NA 6.668954 
+> (c2 <- confint(m2))
+   2.5 %   97.5 % 
+      NA 6.668954 
+> plot(p2,show.points=TRUE)
+Warning message:
+In .local(x, ...) :
+  non-monotonic profile: reverting to linear interpolation.  Consider setting std.err manually
+> 
+> proc.time()
+   user  system elapsed 
+  0.960   1.064   1.875 
diff --git a/tests/optimizers.R b/tests/optimizers.R
new file mode 100755
index 0000000..b95c7d7
--- /dev/null
+++ b/tests/optimizers.R
@@ -0,0 +1,24 @@
+library(bbmle)
+old_opts <- options(digits=3)
+x <- 0:10
+y <- c(26, 17, 13, 12, 20, 5, 9, 8, 5, 4, 8)
+d <- data.frame(x,y)
+suppressWarnings(fits <- lapply(c("optim","nlm","nlminb"),
+               mle2,
+               minuslogl=y~dpois(lambda=ymax/(1+x/xhalf)),
+               start=list(ymax=15,xhalf=6),data=d,
+               method="Nelder-Mead")) ## 'method' is ignored by nlm()/nlminb()
+
+sapply(fits,coef)
+sapply(fits,logLik)
+
+(fit2 <-  mle2(y~dpois(lambda=25/(1+x/xhalf)),
+              start=list(xhalf=5),data=d,
+              lower=2,upper=8,
+              optimizer="optimize"))
+
+## gives error referring to 'interval' rather than 'upper'/'lower'
+## (fit2 <-  mle2(y~dpois(lambda=25/(1+x/xhalf)),
+##              start=list(xhalf=5),
+##              optimizer="optimize"))
+options(old_opts)
diff --git a/tests/optimizers.Rout.save b/tests/optimizers.Rout.save
new file mode 100755
index 0000000..04ddc0d
--- /dev/null
+++ b/tests/optimizers.Rout.save
@@ -0,0 +1,62 @@
+
+R Under development (unstable) (2013-10-24 r64106) -- "Unsuffered Consequences"
+Copyright (C) 2013 The R Foundation for Statistical Computing
+Platform: i686-pc-linux-gnu (32-bit)
+
+R is free software and comes with ABSOLUTELY NO WARRANTY.
+You are welcome to redistribute it under certain conditions.
+Type 'license()' or 'licence()' for distribution details.
+
+  Natural language support but running in an English locale
+
+R is a collaborative project with many contributors.
+Type 'contributors()' for more information and
+'citation()' on how to cite R or R packages in publications.
+
+Type 'demo()' for some demos, 'help()' for on-line help, or
+'help.start()' for an HTML browser interface to help.
+Type 'q()' to quit R.
+
+> library(bbmle)
+Loading required package: stats4
+> old_opts <- options(digits=3)
+> x <- 0:10
+> y <- c(26, 17, 13, 12, 20, 5, 9, 8, 5, 4, 8)
+> d <- data.frame(x,y)
+> suppressWarnings(fits <- lapply(c("optim","nlm","nlminb"),
++                mle2,
++                minuslogl=y~dpois(lambda=ymax/(1+x/xhalf)),
++                start=list(ymax=15,xhalf=6),data=d,
++                method="Nelder-Mead")) ## 'method' is ignored by nlm()/nlminb()
+> 
+> sapply(fits,coef)
+       [,1]  [,2]  [,3]
+ymax  25.00 25.00 25.00
+xhalf  3.06  3.06  3.06
+> sapply(fits,logLik)
+[1] -28.6 -28.6 -28.6
+> 
+> (fit2 <-  mle2(y~dpois(lambda=25/(1+x/xhalf)),
++               start=list(xhalf=5),data=d,
++               lower=2,upper=8,
++               optimizer="optimize"))
+
+Call:
+mle2(minuslogl = y ~ dpois(lambda = 25/(1 + x/xhalf)), start = list(xhalf = 5), 
+    optimizer = "optimize", data = d, lower = 2, upper = 8)
+
+Coefficients:
+xhalf 
+ 3.06 
+
+Log-likelihood: -28.6 
+> 
+> ## gives error referring to 'interval' rather than 'upper'/'lower'
+> ## (fit2 <-  mle2(y~dpois(lambda=25/(1+x/xhalf)),
+> ##              start=list(xhalf=5),
+> ##              optimizer="optimize"))
+> options(old_opts)
+> 
+> proc.time()
+   user  system elapsed 
+  0.788   1.212   2.049 
diff --git a/tests/optimx.R b/tests/optimx.R
new file mode 100755
index 0000000..fa59078
--- /dev/null
+++ b/tests/optimx.R
@@ -0,0 +1,22 @@
+library(bbmle)
+old_opt <- options(digits=3)
+require(optimx)
+x <- 0:10
+y <- c(26, 17, 13, 12, 20, 5, 9, 8, 5, 4, 8)
+d <- data.frame(x,y)
+
+## breaks, don't try this
+## optimx(fn=Lfn,par=c(15,6),method="Rvmmin")
+
+suppressWarnings(m1 <- mle2(minuslogl=y~dpois(lambda=ymax/(1+x/xhalf)),
+     start=list(ymax=15,xhalf=6),data=d,
+     optimizer="optimx",
+           method=c("BFGS","Nelder-Mead","CG")))
+
+## FIXME!! fails (although not with an error, because
+##  errors are caught by profiling) due to npar now
+## being restricted to >1 in optimx 2012.05.24 ...
+
+suppressWarnings(head(as.data.frame(profile(m1))))
+detach("package:optimx")
+options(old_opt)
diff --git a/tests/optimx.Rout.save b/tests/optimx.Rout.save
new file mode 100755
index 0000000..750876b
--- /dev/null
+++ b/tests/optimx.Rout.save
@@ -0,0 +1,55 @@
+
+R Under development (unstable) (2013-08-18 r63609) -- "Unsuffered Consequences"
+Copyright (C) 2013 The R Foundation for Statistical Computing
+Platform: i686-pc-linux-gnu (32-bit)
+
+R is free software and comes with ABSOLUTELY NO WARRANTY.
+You are welcome to redistribute it under certain conditions.
+Type 'license()' or 'licence()' for distribution details.
+
+  Natural language support but running in an English locale
+
+R is a collaborative project with many contributors.
+Type 'contributors()' for more information and
+'citation()' on how to cite R or R packages in publications.
+
+Type 'demo()' for some demos, 'help()' for on-line help, or
+'help.start()' for an HTML browser interface to help.
+Type 'q()' to quit R.
+
+> library(bbmle)
+Loading required package: stats4
+> old_opt <- options(digits=3)
+> require(optimx)
+Loading required package: optimx
+Loading required package: numDeriv
+> x <- 0:10
+> y <- c(26, 17, 13, 12, 20, 5, 9, 8, 5, 4, 8)
+> d <- data.frame(x,y)
+> 
+> ## breaks, don't try this
+> ## optimx(fn=Lfn,par=c(15,6),method="Rvmmin")
+> 
+> suppressWarnings(m1 <- mle2(minuslogl=y~dpois(lambda=ymax/(1+x/xhalf)),
++      start=list(ymax=15,xhalf=6),data=d,
++      optimizer="optimx",
++            method=c("BFGS","Nelder-Mead","CG")))
+> 
+> ## FIXME!! fails (although not with an error, because
+> ##  errors are caught by profiling) due to npar now
+> ## being restricted to >1 in optimx 2012.05.24 ...
+> 
+> suppressWarnings(head(as.data.frame(profile(m1))))
+       param      z par.vals.ymax par.vals.xhalf focal
+ymax.1  ymax -3.377          14.0           9.99  14.0
+ymax.2  ymax -2.341          16.7           6.54  16.7
+ymax.3  ymax -1.458          19.5           4.79  19.5
+ymax.4  ymax -0.687          22.2           3.75  22.2
+ymax.5  ymax  0.000          25.0           3.06  25.0
+ymax.6  ymax  0.620          27.7           2.57  27.7
+> detach("package:optimx")
+> options(old_opt)
+> 
+> proc.time()
+   user  system elapsed 
+  7.848   4.400  12.412 
diff --git a/tests/order.R b/tests/order.R
new file mode 100755
index 0000000..db21541
--- /dev/null
+++ b/tests/order.R
@@ -0,0 +1,23 @@
+set.seed(1001)
+x <- runif(10)
+y <- 1000+x+rnorm(10,sd=0.1)
+d <- data.frame(x,y)
+
+library(bbmle)
+## warning
+m1 = mle2(y~dnorm(a+b*x,sd=exp(s)),start=list(a=900,b=1,s=log(0.1)),
+  control=list(parscale=c(1000,1,0.1)),data=d)
+
+m2 = mle2(y~dnorm(a+b*x,sd=exp(s)),start=list(a=900,b=1,s=log(0.1)),
+  control=list(parscale=c(b=1,a=1000,s=0.1)),data=d)
+
+m3 = mle2(y~dnorm(a+b*x,sd=exp(s)),start=list(a=1,b=1,s=log(0.1)),
+  method="L-BFGS-B",lower=c(a=1100,b=2,s=-Inf),data=d)
+
+## warning
+m4 = mle2(y~dnorm(a+b*x,sd=exp(s)),start=list(b=1,a=1200,s=log(0.1)),
+  method="L-BFGS-B",lower=c(2,1100,0.1),data=d)
+
+c1 = coef(m3)[c("a","b","s")]
+c2 = coef(m4)[c("a","b","s")]
+if (!all(abs(c1-c2)<1e-7)) stop("mismatch")
diff --git a/tests/order.Rout.save b/tests/order.Rout.save
new file mode 100755
index 0000000..d22c780
--- /dev/null
+++ b/tests/order.Rout.save
@@ -0,0 +1,58 @@
+
+R Under development (unstable) (2012-07-27 r60013) -- "Unsuffered Consequences"
+Copyright (C) 2012 The R Foundation for Statistical Computing
+ISBN 3-900051-07-0
+Platform: i686-pc-linux-gnu (32-bit)
+
+R is free software and comes with ABSOLUTELY NO WARRANTY.
+You are welcome to redistribute it under certain conditions.
+Type 'license()' or 'licence()' for distribution details.
+
+  Natural language support but running in an English locale
+
+R is a collaborative project with many contributors.
+Type 'contributors()' for more information and
+'citation()' on how to cite R or R packages in publications.
+
+Type 'demo()' for some demos, 'help()' for on-line help, or
+'help.start()' for an HTML browser interface to help.
+Type 'q()' to quit R.
+
+> set.seed(1001)
+> x <- runif(10)
+> y <- 1000+x+rnorm(10,sd=0.1)
+> d <- data.frame(x,y)
+> 
+> library(bbmle)
+> ## warning
+> m1 = mle2(y~dnorm(a+b*x,sd=exp(s)),start=list(a=900,b=1,s=log(0.1)),
++   control=list(parscale=c(1000,1,0.1)),data=d)
+Warning message:
+In fix_order(call$control$parscale, "parscale") :
+  parscale not named: rearranging to match 'start'
+> 
+> m2 = mle2(y~dnorm(a+b*x,sd=exp(s)),start=list(a=900,b=1,s=log(0.1)),
++   control=list(parscale=c(b=1,a=1000,s=0.1)),data=d)
+> 
+> m3 = mle2(y~dnorm(a+b*x,sd=exp(s)),start=list(a=1,b=1,s=log(0.1)),
++   method="L-BFGS-B",lower=c(a=1100,b=2,s=-Inf),data=d)
+Warning message:
+In mle2(y ~ dnorm(a + b * x, sd = exp(s)), start = list(a = 1, b = 1,  :
+  some parameters are on the boundary: variance-covariance calculations based on Hessian may be unreliable
+> 
+> ## warning
+> m4 = mle2(y~dnorm(a+b*x,sd=exp(s)),start=list(b=1,a=1200,s=log(0.1)),
++   method="L-BFGS-B",lower=c(2,1100,0.1),data=d)
+Warning messages:
+1: In fix_order(call$lower, "lower bounds", -Inf) :
+  lower bounds not named: rearranging to match 'start'
+2: In mle2(y ~ dnorm(a + b * x, sd = exp(s)), start = list(b = 1, a = 1200,  :
+  some parameters are on the boundary: variance-covariance calculations based on Hessian may be unreliable
+> 
+> c1 = coef(m3)[c("a","b","s")]
+> c2 = coef(m4)[c("a","b","s")]
+> if (!all(abs(c1-c2)<1e-7)) stop("mismatch")
+> 
+> proc.time()
+   user  system elapsed 
+  1.012   1.024   1.896 
diff --git a/tests/parscale.R b/tests/parscale.R
new file mode 100755
index 0000000..ed6b2a7
--- /dev/null
+++ b/tests/parscale.R
@@ -0,0 +1,78 @@
+library(bbmle)
+old_opt <- options(digits=3)
+tracelevel <- 0
+
+## source("~/lib/R/pkgs/bbmle/pkg/R/mle.R
+
+set.seed(1002)
+X <- rexp(1000, rate = 0.0001)
+f <- function(X, rate) {
+  if (tracelevel>0 && rate<0) cat("rate<0: ",rate,"\n")
+  -sum(dexp(X, rate = rate, log = TRUE))
+}
+if (FALSE) {
+  ## L-BFGS-B violates bounds, and gets stuck at lower bound
+  m <- mle2(minuslogl = f,
+            data = list(X = X),
+            start = list(rate = 0.01),
+            method = "L-BFGS-B",
+            control = list(trace = tracelevel, 
+                           parscale = 1e-4),
+            lower = c(rate = 1e-9))
+
+  profile(m, std.err=0.0001) ## finds new optimum
+
+  fsc <- function(X, rate) {
+    -sum(dexp(X, rate = rate*1e-4, log = TRUE))
+  }
+  msc <- mle2(minuslogl = fsc,
+            data = list(X = X),
+            start = list(rate = 100),
+            method = "L-BFGS-B",
+            control = list(trace = tracelevel),
+            lower = c(rate = 1e-5))
+
+  ## does it work if we scale by hand?
+  ##   no, identical problem
+}
+
+## works fine with a better starting point
+m <- mle2(minuslogl = f,
+          data = list(X = X),
+          start = list(rate = 0.001),
+          method = "L-BFGS-B",
+          control = list(trace = tracelevel,
+                         parscale=1e-4),
+              lower = c(rate = 1e-9))
+vcov(m)
+confint(m)
+
+
+## works OK despite warnings about 1-dimensional opt. with N-M
+(m0 <- mle2(minuslogl = f,
+          data = list(X = X),
+          start = list(rate = 0.01),
+          method = "Nelder-Mead",
+          control = list(trace = tracelevel, parscale = 1e-4)))
+vcov(m0)
+
+confint(m0)
+confint(m0,method="quad")
+## very similar (good quadratic surface, not surprising)
+
+m1 <- mle2(minuslogl = f,
+          data = list(X = X),
+          start = list(rate = 0.01),
+          method = "BFGS",
+          control = list(trace = tracelevel, parscale = 1e-4))
+
+
+## gets stuck? will have to investigate ...
+m2 <- mle2(minuslogl = f,
+           data = list(X = X),
+           start = list(rate = 0.01),
+           optimizer = "optimize",
+           lower=1e-9,upper=0.1)
+
+vcov(m2)
+options(old_opt)
diff --git a/tests/parscale.Rout b/tests/parscale.Rout
new file mode 100755
index 0000000..e1e8576
--- /dev/null
+++ b/tests/parscale.Rout
@@ -0,0 +1,174 @@
+
+R version 2.13.0 alpha (2011-03-18 r54865)
+Copyright (C) 2011 The R Foundation for Statistical Computing
+ISBN 3-900051-07-0
+Platform: i686-pc-linux-gnu (32-bit)
+
+R is free software and comes with ABSOLUTELY NO WARRANTY.
+You are welcome to redistribute it under certain conditions.
+Type 'license()' or 'licence()' for distribution details.
+
+  Natural language support but running in an English locale
+
+R is a collaborative project with many contributors.
+Type 'contributors()' for more information and
+'citation()' on how to cite R or R packages in publications.
+
+Type 'demo()' for some demos, 'help()' for on-line help, or
+'help.start()' for an HTML browser interface to help.
+Type 'q()' to quit R.
+
+[Previously saved workspace restored]
+
+> library(bbmle)
+Loading required package: stats4
+Loading required package: numDeriv
+Loading required package: lattice
+Loading required package: MASS
+> 
+> ## source("~/lib/R/pkgs/bbmle/pkg/R/mle.R
+> 
+> set.seed(1002)
+> X <- rexp(1000, rate = 0.0001)
+> f <- function(X, rate) {
++   if (rate<0) cat("rate<0: ",rate,"\n")
++   -sum(dexp(X, rate = rate, log = TRUE))
++ }
+> if (FALSE) {
++   ## L-BFGS-B violates bounds, and gets stuck at lower bound
++   m <- mle2(minuslogl = f,
++             data = list(X = X),
++             start = list(rate = 0.01),
++             method = "L-BFGS-B",
++             control = list(trace = 1, parscale = 1e-4),
++             lower = list(rate = 1e-9))
++ 
++   profile(m, std.err=0.0001) ## finds new optimum
++ 
++   fsc <- function(X, rate) {
++     -sum(dexp(X, rate = rate*1e-4, log = TRUE))
++   }
++   msc <- mle2(minuslogl = fsc,
++             data = list(X = X),
++             start = list(rate = 100),
++             method = "L-BFGS-B",
++             control = list(trace = 1),
++             lower = list(rate = 1e-5))
++ 
++   ## does it work if we scale by hand?
++   ##   no, identical problem
++ }
+> 
+> ## works fine with a better starting point
+> m <- mle2(minuslogl = f,
++           data = list(X = X),
++           start = list(rate = 0.001),
++           method = "L-BFGS-B",
++           control = list(trace = 1,parscale=1e-4),
++               lower = list(rate = 1e-9))
+iter    0 value 12490.509482
+final  value 10188.014396 
+converged
+> vcov(m)
+             rate
+rate 1.045669e-11
+> confint(m)
+       2.5 %       97.5 % 
+9.605011e-05 1.087274e-04 
+> 
+> 
+> ## works OK despite warnings about 1-dimensional opt. with N-M
+> (m0 <- mle2(minuslogl = f,
++           data = list(X = X),
++           start = list(rate = 0.01),
++           method = "Nelder-Mead",
++           control = list(trace = 1, parscale = 1e-4)))
+  Nelder-Mead direct search function minimizer
+function value for initial parameters = 102397.310635
+  Scaled convergence tolerance is 0.00152584
+Stepsize computed as 10.000000
+BUILD              2 112081.214500 102397.310635
+EXTENSION          4 102397.310635 83062.026096
+EXTENSION          6 83062.026096 44638.317097
+HI-REDUCTION       8 63791.280079 44638.317097
+REFLECTION        10 44638.317097 25773.036188
+HI-REDUCTION      12 35146.785125 25773.036188
+REFLECTION        14 25773.036188 16686.969324
+HI-REDUCTION      16 21171.111238 16686.969324
+REFLECTION        18 16686.969324 12490.509482
+HI-REDUCTION      20 14529.847885 12490.509482
+REFLECTION        22 12490.509482 10738.853151
+HI-REDUCTION      24 11555.789799 10738.853151
+REFLECTION        26 10738.853151 10209.598576
+HI-REDUCTION      28 10415.334346 10209.598576
+LO-REDUCTION      30 10209.598576 10191.680210
+HI-REDUCTION      32 10191.680210 10190.329749
+HI-REDUCTION      34 10190.329749 10188.037612
+HI-REDUCTION      36 10188.497339 10188.037612
+HI-REDUCTION      38 10188.089444 10188.037612
+HI-REDUCTION      40 10188.037612 10188.018175
+HI-REDUCTION      42 10188.018175 10188.016446
+HI-REDUCTION      44 10188.016446 10188.014462
+Exiting from Nelder Mead minimizer
+    46 function evaluations used
+
+Call:
+mle2(minuslogl = f, start = list(rate = 0.01), method = "Nelder-Mead", 
+    data = list(X = X), control = list(trace = 1, parscale = 1e-04))
+
+Coefficients:
+        rate 
+0.0001022949 
+
+Log-likelihood: -10188.01 
+Warning message:
+In optim(par = 0.01, fn = function (p)  :
+  one-diml optimization by Nelder-Mead is unreliable: use optimize
+> vcov(m0)
+             rate
+rate 1.046414e-11
+> 
+> confint(m0)
+       2.5 %       97.5 % 
+9.604965e-05 1.087271e-04 
+> confint(m0,method="quad")
+       2.5 %       97.5 % 
+9.595477e-05 1.086351e-04 
+> ## very similar (good quadratic surface, not surprising)
+> 
+> m1 <- mle2(minuslogl = f,
++           data = list(X = X),
++           start = list(rate = 0.01),
++           method = "BFGS",
++           control = list(trace = 1, parscale = 1e-4))
+initial  value 102397.310635 
+rate<0:  -0.08679214 
+rate<0:  -0.009358428 
+rate<0:  -0.5831727 
+rate<0:  -0.1117319 
+rate<0:  -0.01744372 
+rate<0:  -0.07719334 
+rate<0:  -0.01430754 
+rate<0:  -0.001730383 
+rate<0:  -0.08426903 
+rate<0:  -0.01622577 
+rate<0:  -0.002617114 
+final  value 10188.014408 
+converged
+There were 11 warnings (use warnings() to see them)
+> 
+> 
+> ## gets stuck? will have to investigate ...
+> m2 <- mle2(minuslogl = f,
++            data = list(X = X),
++            start = list(rate = 0.01),
++            optimizer = "optimize",
++            lower=1e-9,upper=0.1)
+> 
+> vcov(m2)
+             rate
+rate 1.407176e-11
+> 
+> proc.time()
+   user  system elapsed 
+  0.856   0.196   1.065 
diff --git a/tests/parscale.Rout.save b/tests/parscale.Rout.save
new file mode 100755
index 0000000..34f97c6
--- /dev/null
+++ b/tests/parscale.Rout.save
@@ -0,0 +1,130 @@
+
+R Under development (unstable) (2012-12-14 r61321) -- "Unsuffered Consequences"
+Copyright (C) 2012 The R Foundation for Statistical Computing
+ISBN 3-900051-07-0
+Platform: i686-pc-linux-gnu (32-bit)
+
+R is free software and comes with ABSOLUTELY NO WARRANTY.
+You are welcome to redistribute it under certain conditions.
+Type 'license()' or 'licence()' for distribution details.
+
+  Natural language support but running in an English locale
+
+R is a collaborative project with many contributors.
+Type 'contributors()' for more information and
+'citation()' on how to cite R or R packages in publications.
+
+Type 'demo()' for some demos, 'help()' for on-line help, or
+'help.start()' for an HTML browser interface to help.
+Type 'q()' to quit R.
+
+> library(bbmle)
+Loading required package: stats4
+> old_opt <- options(digits=3)
+> tracelevel <- 0
+> 
+> ## source("~/lib/R/pkgs/bbmle/pkg/R/mle.R
+> 
+> set.seed(1002)
+> X <- rexp(1000, rate = 0.0001)
+> f <- function(X, rate) {
++   if (tracelevel>0 && rate<0) cat("rate<0: ",rate,"\n")
++   -sum(dexp(X, rate = rate, log = TRUE))
++ }
+> if (FALSE) {
++   ## L-BFGS-B violates bounds, and gets stuck at lower bound
++   m <- mle2(minuslogl = f,
++             data = list(X = X),
++             start = list(rate = 0.01),
++             method = "L-BFGS-B",
++             control = list(trace = tracelevel, 
++                            parscale = 1e-4),
++             lower = c(rate = 1e-9))
++ 
++   profile(m, std.err=0.0001) ## finds new optimum
++ 
++   fsc <- function(X, rate) {
++     -sum(dexp(X, rate = rate*1e-4, log = TRUE))
++   }
++   msc <- mle2(minuslogl = fsc,
++             data = list(X = X),
++             start = list(rate = 100),
++             method = "L-BFGS-B",
++             control = list(trace = tracelevel),
++             lower = c(rate = 1e-5))
++ 
++   ## does it work if we scale by hand?
++   ##   no, identical problem
++ }
+> 
+> ## works fine with a better starting point
+> m <- mle2(minuslogl = f,
++           data = list(X = X),
++           start = list(rate = 0.001),
++           method = "L-BFGS-B",
++           control = list(trace = tracelevel,
++                          parscale=1e-4),
++               lower = c(rate = 1e-9))
+> vcov(m)
+         rate
+rate 1.05e-11
+> confint(m)
+   2.5 %   97.5 % 
+9.61e-05 1.09e-04 
+> 
+> 
+> ## works OK despite warnings about 1-dimensional opt. with N-M
+> (m0 <- mle2(minuslogl = f,
++           data = list(X = X),
++           start = list(rate = 0.01),
++           method = "Nelder-Mead",
++           control = list(trace = tracelevel, parscale = 1e-4)))
+
+Call:
+mle2(minuslogl = f, start = list(rate = 0.01), method = "Nelder-Mead", 
+    data = list(X = X), control = list(trace = tracelevel, parscale = 1e-04))
+
+Coefficients:
+    rate 
+0.000102 
+
+Log-likelihood: -10188 
+Warning message:
+In optim(par = 0.01, fn = function (p)  :
+  one-dimensional optimization by Nelder-Mead is unreliable:
+use "Brent" or optimize() directly
+> vcov(m0)
+         rate
+rate 1.05e-11
+> 
+> confint(m0)
+   2.5 %   97.5 % 
+0.000096 0.000109 
+> confint(m0,method="quad")
+   2.5 %   97.5 % 
+0.000096 0.000109 
+> ## very similar (good quadratic surface, not surprising)
+> 
+> m1 <- mle2(minuslogl = f,
++           data = list(X = X),
++           start = list(rate = 0.01),
++           method = "BFGS",
++           control = list(trace = tracelevel, parscale = 1e-4))
+There were 11 warnings (use warnings() to see them)
+> 
+> 
+> ## gets stuck? will have to investigate ...
+> m2 <- mle2(minuslogl = f,
++            data = list(X = X),
++            start = list(rate = 0.01),
++            optimizer = "optimize",
++            lower=1e-9,upper=0.1)
+> 
+> vcov(m2)
+         rate
+rate 1.41e-11
+> options(old_opt)
+> 
+> proc.time()
+   user  system elapsed 
+  0.732   1.348   1.980 
diff --git a/tests/predict.R b/tests/predict.R
new file mode 100755
index 0000000..581205e
--- /dev/null
+++ b/tests/predict.R
@@ -0,0 +1,32 @@
+library(bbmle)
+set.seed(1002)
+lymax <- c(0,2)
+lhalf <- 0
+x <- runif(200)
+g <- factor(rep(c("a","b"),each=100))
+y <- rnbinom(200,mu=exp(lymax[g])/(1+x/exp(lhalf)),size=2)
+d <- data.frame(x,g,y)
+
+fit3 <- mle2(y~dnbinom(mu=exp(lymax)/(1+x/exp(lhalf)),size=exp(logk)),
+    parameters=list(lymax~g),
+    start=list(lymax=0,lhalf=0,logk=0),data=d)
+
+plot(y~x,col=g)
+## true curves
+curve(exp(0)/(1+x/exp(0)),add=TRUE)
+curve(exp(2)/(1+x/exp(0)),col=2,add=TRUE)
+xvec = seq(0,1,length=100)
+lines(xvec,predict(fit3,newdata=list(g=factor(rep("a",100),levels=c("a","b")),
+                                x = xvec)),col=1,lty=2)
+lines(xvec,predict(fit3,newdata=list(g=factor(rep("b",100),levels=c("a","b")),
+                                x = xvec)),col=2,lty=2)
+
+p1 = predict(fit3)
+## manual prediction
+p2A =
+with(as.list(coef(fit3)),exp(`lymax.(Intercept)`)/(1+x[1:100]/exp(lhalf)))
+p2B = with(as.list(coef(fit3)),exp(`lymax.(Intercept)`+lymax.gb)/(1+x[101:200]/exp(lhalf)))
+p2 = c(p2A,p2B)
+all(p1==p2)
+
+
diff --git a/tests/predict.Rout.save b/tests/predict.Rout.save
new file mode 100755
index 0000000..13a5e5c
--- /dev/null
+++ b/tests/predict.Rout.save
@@ -0,0 +1,57 @@
+
+R Under development (unstable) (2012-07-27 r60013) -- "Unsuffered Consequences"
+Copyright (C) 2012 The R Foundation for Statistical Computing
+ISBN 3-900051-07-0
+Platform: i686-pc-linux-gnu (32-bit)
+
+R is free software and comes with ABSOLUTELY NO WARRANTY.
+You are welcome to redistribute it under certain conditions.
+Type 'license()' or 'licence()' for distribution details.
+
+  Natural language support but running in an English locale
+
+R is a collaborative project with many contributors.
+Type 'contributors()' for more information and
+'citation()' on how to cite R or R packages in publications.
+
+Type 'demo()' for some demos, 'help()' for on-line help, or
+'help.start()' for an HTML browser interface to help.
+Type 'q()' to quit R.
+
+> library(bbmle)
+> set.seed(1002)
+> lymax <- c(0,2)
+> lhalf <- 0
+> x <- runif(200)
+> g <- factor(rep(c("a","b"),each=100))
+> y <- rnbinom(200,mu=exp(lymax[g])/(1+x/exp(lhalf)),size=2)
+> d <- data.frame(x,g,y)
+> 
+> fit3 <- mle2(y~dnbinom(mu=exp(lymax)/(1+x/exp(lhalf)),size=exp(logk)),
++     parameters=list(lymax~g),
++     start=list(lymax=0,lhalf=0,logk=0),data=d)
+> 
+> plot(y~x,col=g)
+> ## true curves
+> curve(exp(0)/(1+x/exp(0)),add=TRUE)
+> curve(exp(2)/(1+x/exp(0)),col=2,add=TRUE)
+> xvec = seq(0,1,length=100)
+> lines(xvec,predict(fit3,newdata=list(g=factor(rep("a",100),levels=c("a","b")),
++                                 x = xvec)),col=1,lty=2)
+> lines(xvec,predict(fit3,newdata=list(g=factor(rep("b",100),levels=c("a","b")),
++                                 x = xvec)),col=2,lty=2)
+> 
+> p1 = predict(fit3)
+> ## manual prediction
+> p2A =
++ with(as.list(coef(fit3)),exp(`lymax.(Intercept)`)/(1+x[1:100]/exp(lhalf)))
+> p2B = with(as.list(coef(fit3)),exp(`lymax.(Intercept)`+lymax.gb)/(1+x[101:200]/exp(lhalf)))
+> p2 = c(p2A,p2B)
+> all(p1==p2)
+[1] TRUE
+> 
+> 
+> 
+> proc.time()
+   user  system elapsed 
+  1.004   1.108   1.982 
diff --git a/tests/profbound.R b/tests/profbound.R
new file mode 100755
index 0000000..057817e
--- /dev/null
+++ b/tests/profbound.R
@@ -0,0 +1,23 @@
+library(bbmle)
+old_opt <- options(digits=3)
+x <- 0:10
+y <- c(26, 17, 13, 12, 20, 5, 9, 8, 5, 4, 8)
+d <- data.frame(x,y)
+
+fit0 <- mle2(y~dpois(lambda=ymean),start=list(ymean=mean(y)),data=d,
+             method="L-BFGS-B",lower=10)
+
+stopifnot(is.na(confint(fit0)[1]))
+
+fit1 <- mle2(y~dpois(lambda=exp(a+b*x)),start=list(a=0,b=2),data=d,
+             method="L-BFGS-B",lower=-0.2)
+
+suppressWarnings(confint(fit1))
+
+fit2 <- mle2(y~dpois(lambda=exp(a+b*x)),start=list(a=0,b=2),data=d,
+             method="L-BFGS-B")
+
+pp <- profile(fit2,prof.lower=-0.2)
+stopifnot(min(subset(as.data.frame(pp),param=="b")$par.vals.b)==-0.2)
+## note that b does go below -0.2 when profiling a ...
+options(old_opt)
diff --git a/tests/profbound.Rout.save b/tests/profbound.Rout.save
new file mode 100755
index 0000000..ceb11dc
--- /dev/null
+++ b/tests/profbound.Rout.save
@@ -0,0 +1,50 @@
+
+R Under development (unstable) (2013-08-18 r63609) -- "Unsuffered Consequences"
+Copyright (C) 2013 The R Foundation for Statistical Computing
+Platform: i686-pc-linux-gnu (32-bit)
+
+R is free software and comes with ABSOLUTELY NO WARRANTY.
+You are welcome to redistribute it under certain conditions.
+Type 'license()' or 'licence()' for distribution details.
+
+  Natural language support but running in an English locale
+
+R is a collaborative project with many contributors.
+Type 'contributors()' for more information and
+'citation()' on how to cite R or R packages in publications.
+
+Type 'demo()' for some demos, 'help()' for on-line help, or
+'help.start()' for an HTML browser interface to help.
+Type 'q()' to quit R.
+
+> library(bbmle)
+Loading required package: stats4
+> old_opt <- options(digits=3)
+> x <- 0:10
+> y <- c(26, 17, 13, 12, 20, 5, 9, 8, 5, 4, 8)
+> d <- data.frame(x,y)
+> 
+> fit0 <- mle2(y~dpois(lambda=ymean),start=list(ymean=mean(y)),data=d,
++              method="L-BFGS-B",lower=10)
+> 
+> stopifnot(is.na(confint(fit0)[1]))
+> 
+> fit1 <- mle2(y~dpois(lambda=exp(a+b*x)),start=list(a=0,b=2),data=d,
++              method="L-BFGS-B",lower=-0.2)
+> 
+> suppressWarnings(confint(fit1))
+  2.5 %  97.5 %
+a  2.81  3.3579
+b    NA -0.0944
+> 
+> fit2 <- mle2(y~dpois(lambda=exp(a+b*x)),start=list(a=0,b=2),data=d,
++              method="L-BFGS-B")
+> 
+> pp <- profile(fit2,prof.lower=-0.2)
+> stopifnot(min(subset(as.data.frame(pp),param=="b")$par.vals.b)==-0.2)
+> ## note that b does go below -0.2 when profiling a ...
+> options(old_opt)
+> 
+> proc.time()
+   user  system elapsed 
+  1.084   0.820   3.943 
diff --git a/tests/richards.R b/tests/richards.R
new file mode 100755
index 0000000..3155a1e
--- /dev/null
+++ b/tests/richards.R
@@ -0,0 +1,92 @@
+## implement richards-incidence (="revised superlogistic")
+##  with analytic gradients
+
+## from Junling's code:
+model_richardson <- function(times, theta, N)
+{	
+	x0 = theta[1]
+	lambda = theta[2]
+	K = theta[3] * N
+	alpha = theta[4]
+	return(K/(1+((K/x0)^alpha-1)*exp(-lambda*alpha*times))^(1/alpha))
+}
+
+## equivalent model, in terms of sigma and as a symbolic expression
+Rcum <- expression((sigma*N)/(1+(((sigma*N)/x0)^alpha-1)*exp(-lambda*alpha*times))^(1/alpha))
+
+pnames <- c("x0","lambda","sigma","alpha")
+
+## function to compute gradient (and value), derived by R
+Rderiv <- deriv(Rcum,pnames, function.arg=c(pnames,"N","times"))
+
+## equivalent (using Rcum): return incidence (incid=TRUE) or cumulative incidence (incid=FALSE)
+calc_mean <- function(p,times,N,incid=TRUE) {
+  ## this is more 'magic' than I would like it to be ...
+  ##  have to create an environment and populate it with the contents of p (and N and times),
+  ##  then evaluate the expression in this environment
+  pp <- c(as.list(p),list(times=times,N=N))
+  ##  e0 <- new.env()
+  ## mapply(assign,names(pp),pp,MoreArgs=list(envir=e0))
+  cumvals <- eval(Rcum,envir=pp)
+  if (incid) diff(cumvals) else cumvals
+}
+
+## Poisson likelihood function
+likfun <- function(p,dat,times,N,incid=TRUE) {
+  -sum(dpois(dat,calc_mean(p,times,N,incid=incid),log=TRUE))
+}
+
+## deriv of P(x,lambda) = -sum(dpois(x,lambda,log=TRUE)) wrt lambda == sum(1-lambda/x) = N - lambda/(sum(x))
+## deriv of P(x,lambda) wrt p = dP/d(lambda) * d(lambda)/dp
+
+## compute gradient vector
+gradlikfun <- function(p,dat,times,N,incid=TRUE) {
+  gcall <- do.call(Rderiv,c(as.list(p),list(times=times,N=N))) ## values + gradient matrix
+  lambda <- gcall
+  attr(lambda,"gradient") <- NULL
+  if (incid) lambda <- diff(lambda)
+  gmat <- attr(gcall,"gradient") ## extract gradient
+  if (incid) gmat <- apply(gmat,2,diff)  ## differences
+  totderiv <- sweep(gmat,MARGIN=1,(1-dat/lambda),"*") ## apply chain rule (multiply columns of gmat by dP/dlambda)
+  colSums(totderiv)  ## deriv of summed likelihood = sum of derivs of likelihod
+}
+
+N <- 1000
+p0 <- c(x0=0.1,lambda=1,sigma=0.5,alpha=0.5)
+t0 <- 1:10
+
+## deterministic versions of data  (cumulative and incidence)
+dcdat <- model_richardson(t0,p0,N)
+ddat <- diff(dcdat)
+
+plot(t0,dcdat)
+plot(t0[-1],ddat)
+
+set.seed(1001)
+ddat <- rpois(length(ddat),ddat)
+
+likfun(p0,ddat,t0,N)
+gradlikfun(p0,ddat,t0,N)
+
+library(numDeriv)
+grad(likfun,p0,dat=ddat,times=t0,N=N)  ## finite differences
+## matches!
+
+library(bbmle)
+parnames(likfun) <- names(p0)
+
+m1 <- mle2(likfun,start=p0,gr=gradlikfun,data=list(times=t0,N=N,dat=ddat),
+           vecpar=TRUE)
+
+plot(t0[-1],ddat)
+lines(t0[-1],calc_mean(coef(m1),times=t0,N=N))
+
+pp1 <- profile(m1,which="lambda")
+
+m0 <- mle2(likfun,start=p0,data=list(times=t0,N=N,dat=ddat),
+           vecpar=TRUE)
+
+pp0 <- profile(m0,which="lambda")
+par(mfrow=c(1,2))
+plot(pp1,show.points=TRUE)
+plot(pp0,show.points=TRUE)
diff --git a/tests/richards.Rout.save b/tests/richards.Rout.save
new file mode 100755
index 0000000..a2c1502
--- /dev/null
+++ b/tests/richards.Rout.save
@@ -0,0 +1,139 @@
+
+R Under development (unstable) (2012-07-27 r60013) -- "Unsuffered Consequences"
+Copyright (C) 2012 The R Foundation for Statistical Computing
+ISBN 3-900051-07-0
+Platform: i686-pc-linux-gnu (32-bit)
+
+R is free software and comes with ABSOLUTELY NO WARRANTY.
+You are welcome to redistribute it under certain conditions.
+Type 'license()' or 'licence()' for distribution details.
+
+  Natural language support but running in an English locale
+
+R is a collaborative project with many contributors.
+Type 'contributors()' for more information and
+'citation()' on how to cite R or R packages in publications.
+
+Type 'demo()' for some demos, 'help()' for on-line help, or
+'help.start()' for an HTML browser interface to help.
+Type 'q()' to quit R.
+
+> ## implement richards-incidence (="revised superlogistic")
+> ##  with analytic gradients
+> 
+> ## from Junling's code:
+> model_richardson <- function(times, theta, N)
++ {	
++ 	x0 = theta[1]
++ 	lambda = theta[2]
++ 	K = theta[3] * N
++ 	alpha = theta[4]
++ 	return(K/(1+((K/x0)^alpha-1)*exp(-lambda*alpha*times))^(1/alpha))
++ }
+> 
+> ## equivalent model, in terms of sigma and as a symbolic expression
+> Rcum <- expression((sigma*N)/(1+(((sigma*N)/x0)^alpha-1)*exp(-lambda*alpha*times))^(1/alpha))
+> 
+> pnames <- c("x0","lambda","sigma","alpha")
+> 
+> ## function to compute gradient (and value), derived by R
+> Rderiv <- deriv(Rcum,pnames, function.arg=c(pnames,"N","times"))
+> 
+> ## equivalent (using Rcum): return incidence (incid=TRUE) or cumulative incidence (incid=FALSE)
+> calc_mean <- function(p,times,N,incid=TRUE) {
++   ## this is more 'magic' than I would like it to be ...
++   ##  have to create an environment and populate it with the contents of p (and N and times),
++   ##  then evaluate the expression in this environment
++   pp <- c(as.list(p),list(times=times,N=N))
++   ##  e0 <- new.env()
++   ## mapply(assign,names(pp),pp,MoreArgs=list(envir=e0))
++   cumvals <- eval(Rcum,envir=pp)
++   if (incid) diff(cumvals) else cumvals
++ }
+> 
+> ## Poisson likelihood function
+> likfun <- function(p,dat,times,N,incid=TRUE) {
++   -sum(dpois(dat,calc_mean(p,times,N,incid=incid),log=TRUE))
++ }
+> 
+> ## deriv of P(x,lambda) = -sum(dpois(x,lambda,log=TRUE)) wrt lambda == sum(1-lambda/x) = N - lambda/(sum(x))
+> ## deriv of P(x,lambda) wrt p = dP/d(lambda) * d(lambda)/dp
+> 
+> ## compute gradient vector
+> gradlikfun <- function(p,dat,times,N,incid=TRUE) {
++   gcall <- do.call(Rderiv,c(as.list(p),list(times=times,N=N))) ## values + gradient matrix
++   lambda <- gcall
++   attr(lambda,"gradient") <- NULL
++   if (incid) lambda <- diff(lambda)
++   gmat <- attr(gcall,"gradient") ## extract gradient
++   if (incid) gmat <- apply(gmat,2,diff)  ## differences
++   totderiv <- sweep(gmat,MARGIN=1,(1-dat/lambda),"*") ## apply chain rule (multiply columns of gmat by dP/dlambda)
++   colSums(totderiv)  ## deriv of summed likelihood = sum of derivs of likelihod
++ }
+> 
+> N <- 1000
+> p0 <- c(x0=0.1,lambda=1,sigma=0.5,alpha=0.5)
+> t0 <- 1:10
+> 
+> ## deterministic versions of data  (cumulative and incidence)
+> dcdat <- model_richardson(t0,p0,N)
+> ddat <- diff(dcdat)
+> 
+> plot(t0,dcdat)
+> plot(t0[-1],ddat)
+> 
+> set.seed(1001)
+> ddat <- rpois(length(ddat),ddat)
+> 
+> likfun(p0,ddat,t0,N)
+[1] 22.3544
+> gradlikfun(p0,ddat,t0,N)
+      x0   lambda    sigma    alpha 
+15.42028 30.95135 19.33690 30.04404 
+> 
+> library(numDeriv)
+> grad(likfun,p0,dat=ddat,times=t0,N=N)  ## finite differences
+[1] 15.42028 30.95135 19.33690 30.04404
+> ## matches!
+> 
+> library(bbmle)
+> parnames(likfun) <- names(p0)
+> 
+> m1 <- mle2(likfun,start=p0,gr=gradlikfun,data=list(times=t0,N=N,dat=ddat),
++            vecpar=TRUE)
+Warning messages:
+1: In dpois(dat, calc_mean(p, times, N, incid = incid), log = TRUE) :
+  NaNs produced
+2: In dpois(dat, calc_mean(p, times, N, incid = incid), log = TRUE) :
+  NaNs produced
+3: In dpois(dat, calc_mean(p, times, N, incid = incid), log = TRUE) :
+  NaNs produced
+> 
+> plot(t0[-1],ddat)
+> lines(t0[-1],calc_mean(coef(m1),times=t0,N=N))
+> 
+> pp1 <- profile(m1,which="lambda")
+There were 50 or more warnings (use warnings() to see the first 50)
+> 
+> m0 <- mle2(likfun,start=p0,data=list(times=t0,N=N,dat=ddat),
++            vecpar=TRUE)
+Warning messages:
+1: In dpois(dat, calc_mean(p, times, N, incid = incid), log = TRUE) :
+  NaNs produced
+2: In dpois(dat, calc_mean(p, times, N, incid = incid), log = TRUE) :
+  NaNs produced
+3: In dpois(dat, calc_mean(p, times, N, incid = incid), log = TRUE) :
+  NaNs produced
+> 
+> pp0 <- profile(m0,which="lambda")
+There were 50 or more warnings (use warnings() to see the first 50)
+> par(mfrow=c(1,2))
+> plot(pp1,show.points=TRUE)
+> plot(pp0,show.points=TRUE)
+Warning message:
+In .local(x, ...) :
+  non-monotonic profile: reverting to linear interpolation.  Consider setting std.err manually
+> 
+> proc.time()
+   user  system elapsed 
+  8.528   1.180   9.627 
diff --git a/tests/startvals.R b/tests/startvals.R
new file mode 100755
index 0000000..50dccfb
--- /dev/null
+++ b/tests/startvals.R
@@ -0,0 +1,30 @@
+library(bbmle)
+
+## copied from emdbook
+dbetabinom <- function (x, prob, size, theta, shape1, shape2, log = FALSE) 
+{
+    if (missing(prob) && !missing(shape1) && !missing(shape2)) {
+        prob = shape1/(shape1 + shape2)
+        theta = shape1 + shape2
+    }
+    v <- lchoose(size, x) - lbeta(theta * (1 - prob), theta * 
+        prob) + lbeta(size - x + theta * (1 - prob), x + theta * 
+        prob)
+    if (log) 
+        v
+    else exp(v)
+}
+
+ss <- data.frame(taken=c(0,1,2,5),available=c(5,5,5,5),
+                 dist=rep(1,4))
+
+SP.bb=mle2(taken~dbetabinom(prob,theta,size=available),
+  start=list(prob=0.5,theta=1),data=ss)
+SP.bb.dist=mle2(taken~dbetabinom(prob,size=available,theta),
+  parameters=list(prob~dist-1,theta~dist-1),
+  start=as.list(coef(SP.bb)),data=ss)
+
+SP.bb.dist2=mle2(taken~dbetabinom(prob,size=available,theta),
+  parameters=list(prob~dist - 1,theta~dist - 1),
+  start=as.list(coef(SP.bb)),data=ss)
+
diff --git a/tests/startvals.Rout.save b/tests/startvals.Rout.save
new file mode 100755
index 0000000..39c1d6d
--- /dev/null
+++ b/tests/startvals.Rout.save
@@ -0,0 +1,58 @@
+
+R Under development (unstable) (2012-07-27 r60013) -- "Unsuffered Consequences"
+Copyright (C) 2012 The R Foundation for Statistical Computing
+ISBN 3-900051-07-0
+Platform: i686-pc-linux-gnu (32-bit)
+
+R is free software and comes with ABSOLUTELY NO WARRANTY.
+You are welcome to redistribute it under certain conditions.
+Type 'license()' or 'licence()' for distribution details.
+
+  Natural language support but running in an English locale
+
+R is a collaborative project with many contributors.
+Type 'contributors()' for more information and
+'citation()' on how to cite R or R packages in publications.
+
+Type 'demo()' for some demos, 'help()' for on-line help, or
+'help.start()' for an HTML browser interface to help.
+Type 'q()' to quit R.
+
+> library(bbmle)
+> 
+> ## copied from emdbook
+> dbetabinom <- function (x, prob, size, theta, shape1, shape2, log = FALSE) 
++ {
++     if (missing(prob) && !missing(shape1) && !missing(shape2)) {
++         prob = shape1/(shape1 + shape2)
++         theta = shape1 + shape2
++     }
++     v <- lchoose(size, x) - lbeta(theta * (1 - prob), theta * 
++         prob) + lbeta(size - x + theta * (1 - prob), x + theta * 
++         prob)
++     if (log) 
++         v
++     else exp(v)
++ }
+> 
+> ss <- data.frame(taken=c(0,1,2,5),available=c(5,5,5,5),
++                  dist=rep(1,4))
+> 
+> SP.bb=mle2(taken~dbetabinom(prob,theta,size=available),
++   start=list(prob=0.5,theta=1),data=ss)
+Warning messages:
+1: In lbeta(theta * (1 - prob), theta * prob) : NaNs produced
+2: In lbeta(size - x + theta * (1 - prob), x + theta * prob) :
+  NaNs produced
+> SP.bb.dist=mle2(taken~dbetabinom(prob,size=available,theta),
++   parameters=list(prob~dist-1,theta~dist-1),
++   start=as.list(coef(SP.bb)),data=ss)
+> 
+> SP.bb.dist2=mle2(taken~dbetabinom(prob,size=available,theta),
++   parameters=list(prob~dist - 1,theta~dist - 1),
++   start=as.list(coef(SP.bb)),data=ss)
+> 
+> 
+> proc.time()
+   user  system elapsed 
+  0.808   1.072   1.743 
diff --git a/tests/startvals2.R b/tests/startvals2.R
new file mode 100755
index 0000000..e84ad3a
--- /dev/null
+++ b/tests/startvals2.R
@@ -0,0 +1,193 @@
+library(bbmle)
+
+## fir data from emdbook package ...
+firdata <- structure(list(TOTCONES = c(19, 42, 40, 68, 5, 0, 21, 114, 37, 
+92, 84, 102, 98, 63, 9, 31, 35, 216, 27, 297, 36, 127, 23, 46, 
+27, 66, 11, 20, 141, 3, 22, 39, 96, 206.5, 40, 231, 63.5, 202, 
+54, 32, 107.5, 142.5, 82, 65, 153, 123, 131, 43, 98, 37, 34, 
+10, 65, 35, 50, 19, 73, 33, 61, 9, 146, 0, 44, 42, 0, 61, 17, 
+53, 27, 0, 74, 36, 28, 56, 46, 0, 15, 26, 46, 15, 105, 0, 62, 
+24, 25, 41, 138, 77, 227.7, 28, 45, 57, 109, 28, 17, 91, 69, 
+87, 10, 65, 50, 27, 30, 86, 119, 22, 8, 54, 104, 14, 16, 5, 53, 
+40, 32, 114, 39, 37, 111, 226, 156, 42, 86, 94, 54, 1, 14, 44, 
+108, 116.5, 14, 73, 3, 16, 87, 61, 48, 0, 17, 5, 88, 11, 133, 
+121, 166, 171, 63, 23, 4, 51, 10, 14, 78, 47, 31, 42, 24, 42, 
+55, 19, 63, 127, 9, 74, 120, 85, 51, 19, 131, 7, 23, 7, 9, 23, 
+55, 48, 13, 2, 9, 3, 4, 16, 1, 88, 8, 27, 16, 184, 14, 22, 25, 
+52, 2, 134, 81, 85, 3, 56, 17, 8, 10, 6, 69, 58, 1, 22, 3, 11, 
+22, 2, 37, 8, 15, 61, 6, 18, 9, 109, 54, 4, 11, 30, 0, 0, 3, 
+0, 16, 22, 9, 56, 17, 64, 38, 59, 37, 22, 41, 1, 22, 16, 17, 
+4), DBH = c(9.4, 10.6, 7.7, 10.6, 8.7, 10.1, 8.1, 11.6, 10.1, 
+13.3, 10, 13.4, 9.7, 7.4, 8.7, 8.6, 7.9, 14.2, 9.5, 15.9, 6, 
+10.6, 7.3, 10.3, 8.4, 10.2, 13.8, 9.4, 8.1, 9.6, 7.3, 7.4, 10.3, 
+13.4, 9.2, 13.9, 10.9, 17.4, 10.2, 8.2, 11.3, 16.1, 12.3, 8.3, 
+12.4, 12.5, 11.3, 7.8, 11.6, 10, 7, 5.7, 7.7, 8.9, 8.5, 8.5, 
+10.7, 10.2, 10.8, 9, 9.4, 7.6, 10.6, 10, 8, 7.4, 9.1, 6.7, 9.7, 
+6.8, 8.6, 9.1, 6.3, 6.7, 10.9, 9.5, 9.9, 6.8, 9.8, 7.7, 12.1, 
+8.2, 10, 9.6, 9.2, 8.2, 11.3, 11.6, 15.7, 9.1, 8.9, 8.7, 11, 
+6.6, 7.1, 9, 12.4, 12.1, 7.5, 9, 8, 10.9, 9.2, 10.1, 12.1, 7, 
+6.8, 8.6, 11.6, 6.6, 6.7, 6.8, 8.5, 7.8, 7.9, 9.8, 6.2, 6.7, 
+15.4, 9.2, 12.9, 6.7, 9.6, 8.4, 8, 8.7, 6.7, 9.2, 9.5, 8, 5.5, 
+8.5, 5.7, 5.6, 8, 6.5, 9.6, 6.1, 7.9, 5.9, 11, 8.2, 12.8, 12.8, 
+12.5, 13.7, 11.8, 6.3, 6.3, 8.2, 6.2, 6.7, 9.8, 9.4, 6.7, 6, 
+4.9, 9.6, 7.5, 8.4, 7.4, 9.9, 7.4, 9.5, 13.9, 6.9, 9.4, 7.4, 
+12.8, 5.8, 7.2, 5.6, 6.9, 11.3, 9.6, 6.8, 6.9, 6.6, 4.8, 4.4, 
+4.8, 8.5, 7, 8.7, 6.6, 8.6, 5.3, 10.4, 6.4, 5.4, 8.2, 5.5, 6.2, 
+14.7, 10.5, 14.4, 5.8, 6.1, 6.2, 6.2, 7.2, 6, 10.6, 8.7, 7.5, 
+7.3, 5.2, 6.9, 6.6, 6.7, 5.2, 6.9, 7.5, 9, 5.9, 6.5, 6.6, 9.8, 
+4.7, 4.2, 4.8, 6.7, 6.5, 6.7, 5.9, 5.4, 6.9, 6.5, 6, 12, 7.5, 
+6.4, 7.3, 7.3, 6.4, 7, 5.9, 9.1, 6.7, 4, 6.5, 4.7), WAVE_NON = structure(c(1L, 
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 
+1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 
+1L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 
+2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 
+2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 
+2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 
+2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 
+2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 
+2L), .Label = c("n", "w"), class = "factor"), logcones = c(2.99573227355399, 
+3.76120011569356, 3.71357206670431, 4.23410650459726, 1.79175946922805, 
+0, 3.09104245335832, 4.74493212836325, 3.63758615972639, 4.53259949315326, 
+4.44265125649032, 4.63472898822964, 4.59511985013459, 4.15888308335967, 
+2.30258509299405, 3.46573590279973, 3.58351893845611, 5.37989735354046, 
+3.3322045101752, 5.6970934865054, 3.61091791264422, 4.85203026391962, 
+3.17805383034795, 3.85014760171006, 3.3322045101752, 4.20469261939097, 
+2.484906649788, 3.04452243772342, 4.95582705760126, 1.38629436111989, 
+3.13549421592915, 3.68887945411394, 4.57471097850338, 5.33513133967075, 
+3.71357206670431, 5.44673737166631, 4.16666522380173, 5.31320597904179, 
+4.00733318523247, 3.49650756146648, 4.68675017298051, 4.96633503519968, 
+4.4188406077966, 4.18965474202643, 5.03695260241363, 4.82028156560504, 
+4.88280192258637, 3.78418963391826, 4.59511985013459, 3.63758615972639, 
+3.55534806148941, 2.39789527279837, 4.18965474202643, 3.58351893845611, 
+3.93182563272433, 2.99573227355399, 4.30406509320417, 3.52636052461616, 
+4.12713438504509, 2.30258509299405, 4.99043258677874, 0, 3.80666248977032, 
+3.76120011569356, 0, 4.12713438504509, 2.89037175789616, 3.98898404656427, 
+3.3322045101752, 0, 4.31748811353631, 3.61091791264422, 3.36729582998647, 
+4.04305126783455, 3.85014760171006, 0, 2.77258872223978, 3.29583686600433, 
+3.85014760171006, 2.77258872223978, 4.66343909411207, 0, 4.14313472639153, 
+3.2188758248682, 3.25809653802148, 3.73766961828337, 4.93447393313069, 
+4.35670882668959, 5.43241110102874, 3.36729582998647, 3.8286413964891, 
+4.06044301054642, 4.70048036579242, 3.36729582998647, 2.89037175789616, 
+4.52178857704904, 4.24849524204936, 4.47733681447821, 2.39789527279837, 
+4.18965474202643, 3.93182563272433, 3.3322045101752, 3.43398720448515, 
+4.46590811865458, 4.78749174278205, 3.13549421592915, 2.19722457733622, 
+4.00733318523247, 4.65396035015752, 2.70805020110221, 2.83321334405622, 
+1.79175946922805, 3.98898404656427, 3.71357206670431, 3.49650756146648, 
+4.74493212836325, 3.68887945411394, 3.63758615972639, 4.71849887129509, 
+5.4249500174814, 5.05624580534831, 3.76120011569356, 4.46590811865458, 
+4.55387689160054, 4.00733318523247, 0.693147180559945, 2.70805020110221, 
+3.80666248977032, 4.69134788222914, 4.76643833358421, 2.70805020110221, 
+4.30406509320417, 1.38629436111989, 2.83321334405622, 4.47733681447821, 
+4.12713438504509, 3.89182029811063, 0, 2.89037175789616, 1.79175946922805, 
+4.48863636973214, 2.484906649788, 4.89783979995091, 4.80402104473326, 
+5.11799381241676, 5.14749447681345, 4.15888308335967, 3.17805383034795, 
+1.6094379124341, 3.95124371858143, 2.39789527279837, 2.70805020110221, 
+4.36944785246702, 3.87120101090789, 3.46573590279973, 3.76120011569356, 
+3.2188758248682, 3.76120011569356, 4.02535169073515, 2.99573227355399, 
+4.15888308335967, 4.85203026391962, 2.30258509299405, 4.31748811353631, 
+4.79579054559674, 4.45434729625351, 3.95124371858143, 2.99573227355399, 
+4.88280192258637, 2.07944154167984, 3.17805383034795, 2.07944154167984, 
+2.30258509299405, 3.17805383034795, 4.02535169073515, 3.89182029811063, 
+2.63905732961526, 1.09861228866811, 2.30258509299405, 1.38629436111989, 
+1.6094379124341, 2.83321334405622, 0.693147180559945, 4.48863636973214, 
+2.19722457733622, 3.3322045101752, 2.83321334405622, 5.22035582507832, 
+2.70805020110221, 3.13549421592915, 3.25809653802148, 3.97029191355212, 
+1.09861228866811, 4.90527477843843, 4.40671924726425, 4.45434729625351, 
+1.38629436111989, 4.04305126783455, 2.89037175789616, 2.19722457733622, 
+2.39789527279837, 1.94591014905531, 4.24849524204936, 4.07753744390572, 
+0.693147180559945, 3.13549421592915, 1.38629436111989, 2.484906649788, 
+3.13549421592915, 1.09861228866811, 3.63758615972639, 2.19722457733622, 
+2.77258872223978, 4.12713438504509, 1.94591014905531, 2.94443897916644, 
+2.30258509299405, 4.70048036579242, 4.00733318523247, 1.6094379124341, 
+2.484906649788, 3.43398720448515, 0, 0, 1.38629436111989, 0, 
+2.83321334405622, 3.13549421592915, 2.30258509299405, 4.04305126783455, 
+2.89037175789616, 4.17438726989564, 3.66356164612965, 4.0943445622221, 
+3.63758615972639, 3.13549421592915, 3.73766961828337, 0.693147180559945, 
+3.13549421592915, 2.83321334405622, 2.89037175789616, 1.6094379124341
+)), .Names = c("TOTCONES", "DBH", "WAVE_NON", "logcones"), row.names = c(1L, 
+2L, 3L, 4L, 5L, 6L, 7L, 8L, 9L, 10L, 11L, 12L, 13L, 14L, 15L, 
+16L, 17L, 18L, 19L, 20L, 21L, 22L, 23L, 24L, 25L, 26L, 27L, 28L, 
+29L, 30L, 31L, 32L, 33L, 35L, 36L, 37L, 38L, 39L, 40L, 41L, 42L, 
+43L, 44L, 45L, 46L, 47L, 48L, 49L, 50L, 51L, 52L, 53L, 54L, 55L, 
+56L, 58L, 59L, 60L, 61L, 62L, 63L, 64L, 65L, 66L, 67L, 68L, 69L, 
+70L, 71L, 72L, 73L, 74L, 75L, 76L, 78L, 79L, 80L, 81L, 82L, 83L, 
+84L, 85L, 86L, 87L, 88L, 89L, 90L, 91L, 92L, 93L, 94L, 95L, 96L, 
+97L, 98L, 99L, 100L, 101L, 102L, 103L, 104L, 105L, 106L, 107L, 
+108L, 109L, 110L, 111L, 112L, 113L, 118L, 119L, 120L, 121L, 122L, 
+123L, 124L, 126L, 127L, 128L, 129L, 130L, 131L, 132L, 133L, 134L, 
+135L, 136L, 137L, 138L, 139L, 140L, 142L, 144L, 145L, 146L, 147L, 
+148L, 149L, 150L, 151L, 154L, 155L, 157L, 159L, 160L, 168L, 169L, 
+170L, 171L, 172L, 173L, 174L, 175L, 176L, 177L, 178L, 179L, 180L, 
+181L, 184L, 185L, 186L, 187L, 189L, 190L, 193L, 198L, 247L, 272L, 
+273L, 275L, 276L, 277L, 278L, 280L, 281L, 282L, 283L, 284L, 285L, 
+286L, 287L, 288L, 289L, 290L, 291L, 292L, 293L, 294L, 295L, 296L, 
+297L, 298L, 299L, 300L, 301L, 303L, 304L, 305L, 306L, 307L, 308L, 
+309L, 310L, 311L, 313L, 314L, 315L, 316L, 319L, 320L, 321L, 322L, 
+323L, 325L, 326L, 327L, 330L, 331L, 332L, 337L, 338L, 339L, 340L, 
+341L, 342L, 343L, 344L, 345L, 346L, 347L, 348L, 349L, 350L, 351L, 
+352L, 353L, 357L, 358L, 360L, 366L), na.action = structure(c(34L, 
+57L, 77L, 114L, 115L, 116L, 117L, 125L, 141L, 143L, 152L, 153L, 
+156L, 158L, 161L, 162L, 163L, 164L, 165L, 166L, 167L, 182L, 183L, 
+188L, 191L, 192L, 194L, 195L, 196L, 197L, 199L, 200L, 201L, 202L, 
+203L, 204L, 205L, 206L, 207L, 208L, 209L, 210L, 211L, 212L, 213L, 
+214L, 215L, 216L, 217L, 218L, 219L, 220L, 221L, 222L, 223L, 224L, 
+225L, 226L, 227L, 228L, 229L, 230L, 231L, 232L, 233L, 234L, 235L, 
+236L, 237L, 238L, 239L, 240L, 241L, 242L, 243L, 244L, 245L, 246L, 
+248L, 249L, 250L, 251L, 252L, 253L, 254L, 255L, 256L, 257L, 258L, 
+259L, 260L, 261L, 262L, 263L, 264L, 265L, 266L, 267L, 268L, 269L, 
+270L, 271L, 274L, 279L, 302L, 312L, 317L, 318L, 324L, 328L, 329L, 
+333L, 334L, 335L, 336L, 354L, 355L, 356L, 359L, 361L, 362L, 363L, 
+364L, 365L, 367L, 368L, 369L, 370L, 371L), .Names = c("34", "57", 
+"77", "114", "115", "116", "117", "125", "141", "143", "152", 
+"153", "156", "158", "161", "162", "163", "164", "165", "166", 
+"167", "182", "183", "188", "191", "192", "194", "195", "196", 
+"197", "199", "200", "201", "202", "203", "204", "205", "206", 
+"207", "208", "209", "210", "211", "212", "213", "214", "215", 
+"216", "217", "218", "219", "220", "221", "222", "223", "224", 
+"225", "226", "227", "228", "229", "230", "231", "232", "233", 
+"234", "235", "236", "237", "238", "239", "240", "241", "242", 
+"243", "244", "245", "246", "248", "249", "250", "251", "252", 
+"253", "254", "255", "256", "257", "258", "259", "260", "261", 
+"262", "263", "264", "265", "266", "267", "268", "269", "270", 
+"271", "274", "279", "302", "312", "317", "318", "324", "328", 
+"329", "333", "334", "335", "336", "354", "355", "356", "359", 
+"361", "362", "363", "364", "365", "367", "368", "369", "370", 
+"371"), class = "omit"), class = "data.frame")
+
+
+m1 <- mle2(logcones ~ dnorm(i + slope*log(DBH), sd),
+           parameters= list(i ~ WAVE_NON-1, slope ~ WAVE_NON-1),
+           data = firdata,
+           start = list(i=c(-2,-2),slope=c(2.5,2.5),sd=1))
+
+ancovafun = function(i1,i2,slope1,slope2,sigma) {
+  int = c(i1,i2)[WAVE_NON]
+  slope = c(slope1,slope2)[WAVE_NON]
+  Y.pred =  int+ slope*log(DBH)
+  r <- -sum(dnorm(logcones,mean=Y.pred,sd=sigma,log=TRUE))
+  ## cat(i1,i2,slope1,slope2,sigma,r,"\n")
+  r
+}
+m2 <- mle2(ancovafun,start=list(i1=-2,i2=-2,slope1=2.5,slope2=2.5,sigma=1),
+  data=firdata)
+
+
+m3 <- mle2(logcones ~ dnorm(mu, sd),
+     parameters= list(mu ~ WAVE_NON*log(DBH)),
+     data = firdata,
+     start = list(mu=1,sd=1))
+
+stopifnot(all.equal(AIC(m1),AIC(m2),AIC(m3)))
+
+## m4 <- mle2(logcones ~ dnorm(i + slope*log(DBH), sd),
+##           parameters= list(i ~ WAVE_NON-1, slope ~ WAVE_NON-1),
+##           data = firdata,
+##           start = c(-2,-2,2.5,2.5,sd=1))
+
diff --git a/tests/startvals2.Rout.save b/tests/startvals2.Rout.save
new file mode 100755
index 0000000..ce0849f
--- /dev/null
+++ b/tests/startvals2.Rout.save
@@ -0,0 +1,231 @@
+
+R Under development (unstable) (2014-05-14 r65609) -- "Unsuffered Consequences"
+Copyright (C) 2014 The R Foundation for Statistical Computing
+Platform: i686-pc-linux-gnu (32-bit)
+
+R is free software and comes with ABSOLUTELY NO WARRANTY.
+You are welcome to redistribute it under certain conditions.
+Type 'license()' or 'licence()' for distribution details.
+
+  Natural language support but running in an English locale
+
+R is a collaborative project with many contributors.
+Type 'contributors()' for more information and
+'citation()' on how to cite R or R packages in publications.
+
+Type 'demo()' for some demos, 'help()' for on-line help, or
+'help.start()' for an HTML browser interface to help.
+Type 'q()' to quit R.
+
+> library(bbmle)
+Loading required package: stats4
+> 
+> ## fir data from emdbook package ...
+> firdata <- structure(list(TOTCONES = c(19, 42, 40, 68, 5, 0, 21, 114, 37, 
++ 92, 84, 102, 98, 63, 9, 31, 35, 216, 27, 297, 36, 127, 23, 46, 
++ 27, 66, 11, 20, 141, 3, 22, 39, 96, 206.5, 40, 231, 63.5, 202, 
++ 54, 32, 107.5, 142.5, 82, 65, 153, 123, 131, 43, 98, 37, 34, 
++ 10, 65, 35, 50, 19, 73, 33, 61, 9, 146, 0, 44, 42, 0, 61, 17, 
++ 53, 27, 0, 74, 36, 28, 56, 46, 0, 15, 26, 46, 15, 105, 0, 62, 
++ 24, 25, 41, 138, 77, 227.7, 28, 45, 57, 109, 28, 17, 91, 69, 
++ 87, 10, 65, 50, 27, 30, 86, 119, 22, 8, 54, 104, 14, 16, 5, 53, 
++ 40, 32, 114, 39, 37, 111, 226, 156, 42, 86, 94, 54, 1, 14, 44, 
++ 108, 116.5, 14, 73, 3, 16, 87, 61, 48, 0, 17, 5, 88, 11, 133, 
++ 121, 166, 171, 63, 23, 4, 51, 10, 14, 78, 47, 31, 42, 24, 42, 
++ 55, 19, 63, 127, 9, 74, 120, 85, 51, 19, 131, 7, 23, 7, 9, 23, 
++ 55, 48, 13, 2, 9, 3, 4, 16, 1, 88, 8, 27, 16, 184, 14, 22, 25, 
++ 52, 2, 134, 81, 85, 3, 56, 17, 8, 10, 6, 69, 58, 1, 22, 3, 11, 
++ 22, 2, 37, 8, 15, 61, 6, 18, 9, 109, 54, 4, 11, 30, 0, 0, 3, 
++ 0, 16, 22, 9, 56, 17, 64, 38, 59, 37, 22, 41, 1, 22, 16, 17, 
++ 4), DBH = c(9.4, 10.6, 7.7, 10.6, 8.7, 10.1, 8.1, 11.6, 10.1, 
++ 13.3, 10, 13.4, 9.7, 7.4, 8.7, 8.6, 7.9, 14.2, 9.5, 15.9, 6, 
++ 10.6, 7.3, 10.3, 8.4, 10.2, 13.8, 9.4, 8.1, 9.6, 7.3, 7.4, 10.3, 
++ 13.4, 9.2, 13.9, 10.9, 17.4, 10.2, 8.2, 11.3, 16.1, 12.3, 8.3, 
++ 12.4, 12.5, 11.3, 7.8, 11.6, 10, 7, 5.7, 7.7, 8.9, 8.5, 8.5, 
++ 10.7, 10.2, 10.8, 9, 9.4, 7.6, 10.6, 10, 8, 7.4, 9.1, 6.7, 9.7, 
++ 6.8, 8.6, 9.1, 6.3, 6.7, 10.9, 9.5, 9.9, 6.8, 9.8, 7.7, 12.1, 
++ 8.2, 10, 9.6, 9.2, 8.2, 11.3, 11.6, 15.7, 9.1, 8.9, 8.7, 11, 
++ 6.6, 7.1, 9, 12.4, 12.1, 7.5, 9, 8, 10.9, 9.2, 10.1, 12.1, 7, 
++ 6.8, 8.6, 11.6, 6.6, 6.7, 6.8, 8.5, 7.8, 7.9, 9.8, 6.2, 6.7, 
++ 15.4, 9.2, 12.9, 6.7, 9.6, 8.4, 8, 8.7, 6.7, 9.2, 9.5, 8, 5.5, 
++ 8.5, 5.7, 5.6, 8, 6.5, 9.6, 6.1, 7.9, 5.9, 11, 8.2, 12.8, 12.8, 
++ 12.5, 13.7, 11.8, 6.3, 6.3, 8.2, 6.2, 6.7, 9.8, 9.4, 6.7, 6, 
++ 4.9, 9.6, 7.5, 8.4, 7.4, 9.9, 7.4, 9.5, 13.9, 6.9, 9.4, 7.4, 
++ 12.8, 5.8, 7.2, 5.6, 6.9, 11.3, 9.6, 6.8, 6.9, 6.6, 4.8, 4.4, 
++ 4.8, 8.5, 7, 8.7, 6.6, 8.6, 5.3, 10.4, 6.4, 5.4, 8.2, 5.5, 6.2, 
++ 14.7, 10.5, 14.4, 5.8, 6.1, 6.2, 6.2, 7.2, 6, 10.6, 8.7, 7.5, 
++ 7.3, 5.2, 6.9, 6.6, 6.7, 5.2, 6.9, 7.5, 9, 5.9, 6.5, 6.6, 9.8, 
++ 4.7, 4.2, 4.8, 6.7, 6.5, 6.7, 5.9, 5.4, 6.9, 6.5, 6, 12, 7.5, 
++ 6.4, 7.3, 7.3, 6.4, 7, 5.9, 9.1, 6.7, 4, 6.5, 4.7), WAVE_NON = structure(c(1L, 
++ 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 
++ 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 
++ 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 
++ 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 
++ 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 
++ 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 
++ 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 
++ 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 
++ 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 
++ 1L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 
++ 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 
++ 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 
++ 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 
++ 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 
++ 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 
++ 2L), .Label = c("n", "w"), class = "factor"), logcones = c(2.99573227355399, 
++ 3.76120011569356, 3.71357206670431, 4.23410650459726, 1.79175946922805, 
++ 0, 3.09104245335832, 4.74493212836325, 3.63758615972639, 4.53259949315326, 
++ 4.44265125649032, 4.63472898822964, 4.59511985013459, 4.15888308335967, 
++ 2.30258509299405, 3.46573590279973, 3.58351893845611, 5.37989735354046, 
++ 3.3322045101752, 5.6970934865054, 3.61091791264422, 4.85203026391962, 
++ 3.17805383034795, 3.85014760171006, 3.3322045101752, 4.20469261939097, 
++ 2.484906649788, 3.04452243772342, 4.95582705760126, 1.38629436111989, 
++ 3.13549421592915, 3.68887945411394, 4.57471097850338, 5.33513133967075, 
++ 3.71357206670431, 5.44673737166631, 4.16666522380173, 5.31320597904179, 
++ 4.00733318523247, 3.49650756146648, 4.68675017298051, 4.96633503519968, 
++ 4.4188406077966, 4.18965474202643, 5.03695260241363, 4.82028156560504, 
++ 4.88280192258637, 3.78418963391826, 4.59511985013459, 3.63758615972639, 
++ 3.55534806148941, 2.39789527279837, 4.18965474202643, 3.58351893845611, 
++ 3.93182563272433, 2.99573227355399, 4.30406509320417, 3.52636052461616, 
++ 4.12713438504509, 2.30258509299405, 4.99043258677874, 0, 3.80666248977032, 
++ 3.76120011569356, 0, 4.12713438504509, 2.89037175789616, 3.98898404656427, 
++ 3.3322045101752, 0, 4.31748811353631, 3.61091791264422, 3.36729582998647, 
++ 4.04305126783455, 3.85014760171006, 0, 2.77258872223978, 3.29583686600433, 
++ 3.85014760171006, 2.77258872223978, 4.66343909411207, 0, 4.14313472639153, 
++ 3.2188758248682, 3.25809653802148, 3.73766961828337, 4.93447393313069, 
++ 4.35670882668959, 5.43241110102874, 3.36729582998647, 3.8286413964891, 
++ 4.06044301054642, 4.70048036579242, 3.36729582998647, 2.89037175789616, 
++ 4.52178857704904, 4.24849524204936, 4.47733681447821, 2.39789527279837, 
++ 4.18965474202643, 3.93182563272433, 3.3322045101752, 3.43398720448515, 
++ 4.46590811865458, 4.78749174278205, 3.13549421592915, 2.19722457733622, 
++ 4.00733318523247, 4.65396035015752, 2.70805020110221, 2.83321334405622, 
++ 1.79175946922805, 3.98898404656427, 3.71357206670431, 3.49650756146648, 
++ 4.74493212836325, 3.68887945411394, 3.63758615972639, 4.71849887129509, 
++ 5.4249500174814, 5.05624580534831, 3.76120011569356, 4.46590811865458, 
++ 4.55387689160054, 4.00733318523247, 0.693147180559945, 2.70805020110221, 
++ 3.80666248977032, 4.69134788222914, 4.76643833358421, 2.70805020110221, 
++ 4.30406509320417, 1.38629436111989, 2.83321334405622, 4.47733681447821, 
++ 4.12713438504509, 3.89182029811063, 0, 2.89037175789616, 1.79175946922805, 
++ 4.48863636973214, 2.484906649788, 4.89783979995091, 4.80402104473326, 
++ 5.11799381241676, 5.14749447681345, 4.15888308335967, 3.17805383034795, 
++ 1.6094379124341, 3.95124371858143, 2.39789527279837, 2.70805020110221, 
++ 4.36944785246702, 3.87120101090789, 3.46573590279973, 3.76120011569356, 
++ 3.2188758248682, 3.76120011569356, 4.02535169073515, 2.99573227355399, 
++ 4.15888308335967, 4.85203026391962, 2.30258509299405, 4.31748811353631, 
++ 4.79579054559674, 4.45434729625351, 3.95124371858143, 2.99573227355399, 
++ 4.88280192258637, 2.07944154167984, 3.17805383034795, 2.07944154167984, 
++ 2.30258509299405, 3.17805383034795, 4.02535169073515, 3.89182029811063, 
++ 2.63905732961526, 1.09861228866811, 2.30258509299405, 1.38629436111989, 
++ 1.6094379124341, 2.83321334405622, 0.693147180559945, 4.48863636973214, 
++ 2.19722457733622, 3.3322045101752, 2.83321334405622, 5.22035582507832, 
++ 2.70805020110221, 3.13549421592915, 3.25809653802148, 3.97029191355212, 
++ 1.09861228866811, 4.90527477843843, 4.40671924726425, 4.45434729625351, 
++ 1.38629436111989, 4.04305126783455, 2.89037175789616, 2.19722457733622, 
++ 2.39789527279837, 1.94591014905531, 4.24849524204936, 4.07753744390572, 
++ 0.693147180559945, 3.13549421592915, 1.38629436111989, 2.484906649788, 
++ 3.13549421592915, 1.09861228866811, 3.63758615972639, 2.19722457733622, 
++ 2.77258872223978, 4.12713438504509, 1.94591014905531, 2.94443897916644, 
++ 2.30258509299405, 4.70048036579242, 4.00733318523247, 1.6094379124341, 
++ 2.484906649788, 3.43398720448515, 0, 0, 1.38629436111989, 0, 
++ 2.83321334405622, 3.13549421592915, 2.30258509299405, 4.04305126783455, 
++ 2.89037175789616, 4.17438726989564, 3.66356164612965, 4.0943445622221, 
++ 3.63758615972639, 3.13549421592915, 3.73766961828337, 0.693147180559945, 
++ 3.13549421592915, 2.83321334405622, 2.89037175789616, 1.6094379124341
++ )), .Names = c("TOTCONES", "DBH", "WAVE_NON", "logcones"), row.names = c(1L, 
++ 2L, 3L, 4L, 5L, 6L, 7L, 8L, 9L, 10L, 11L, 12L, 13L, 14L, 15L, 
++ 16L, 17L, 18L, 19L, 20L, 21L, 22L, 23L, 24L, 25L, 26L, 27L, 28L, 
++ 29L, 30L, 31L, 32L, 33L, 35L, 36L, 37L, 38L, 39L, 40L, 41L, 42L, 
++ 43L, 44L, 45L, 46L, 47L, 48L, 49L, 50L, 51L, 52L, 53L, 54L, 55L, 
++ 56L, 58L, 59L, 60L, 61L, 62L, 63L, 64L, 65L, 66L, 67L, 68L, 69L, 
++ 70L, 71L, 72L, 73L, 74L, 75L, 76L, 78L, 79L, 80L, 81L, 82L, 83L, 
++ 84L, 85L, 86L, 87L, 88L, 89L, 90L, 91L, 92L, 93L, 94L, 95L, 96L, 
++ 97L, 98L, 99L, 100L, 101L, 102L, 103L, 104L, 105L, 106L, 107L, 
++ 108L, 109L, 110L, 111L, 112L, 113L, 118L, 119L, 120L, 121L, 122L, 
++ 123L, 124L, 126L, 127L, 128L, 129L, 130L, 131L, 132L, 133L, 134L, 
++ 135L, 136L, 137L, 138L, 139L, 140L, 142L, 144L, 145L, 146L, 147L, 
++ 148L, 149L, 150L, 151L, 154L, 155L, 157L, 159L, 160L, 168L, 169L, 
++ 170L, 171L, 172L, 173L, 174L, 175L, 176L, 177L, 178L, 179L, 180L, 
++ 181L, 184L, 185L, 186L, 187L, 189L, 190L, 193L, 198L, 247L, 272L, 
++ 273L, 275L, 276L, 277L, 278L, 280L, 281L, 282L, 283L, 284L, 285L, 
++ 286L, 287L, 288L, 289L, 290L, 291L, 292L, 293L, 294L, 295L, 296L, 
++ 297L, 298L, 299L, 300L, 301L, 303L, 304L, 305L, 306L, 307L, 308L, 
++ 309L, 310L, 311L, 313L, 314L, 315L, 316L, 319L, 320L, 321L, 322L, 
++ 323L, 325L, 326L, 327L, 330L, 331L, 332L, 337L, 338L, 339L, 340L, 
++ 341L, 342L, 343L, 344L, 345L, 346L, 347L, 348L, 349L, 350L, 351L, 
++ 352L, 353L, 357L, 358L, 360L, 366L), na.action = structure(c(34L, 
++ 57L, 77L, 114L, 115L, 116L, 117L, 125L, 141L, 143L, 152L, 153L, 
++ 156L, 158L, 161L, 162L, 163L, 164L, 165L, 166L, 167L, 182L, 183L, 
++ 188L, 191L, 192L, 194L, 195L, 196L, 197L, 199L, 200L, 201L, 202L, 
++ 203L, 204L, 205L, 206L, 207L, 208L, 209L, 210L, 211L, 212L, 213L, 
++ 214L, 215L, 216L, 217L, 218L, 219L, 220L, 221L, 222L, 223L, 224L, 
++ 225L, 226L, 227L, 228L, 229L, 230L, 231L, 232L, 233L, 234L, 235L, 
++ 236L, 237L, 238L, 239L, 240L, 241L, 242L, 243L, 244L, 245L, 246L, 
++ 248L, 249L, 250L, 251L, 252L, 253L, 254L, 255L, 256L, 257L, 258L, 
++ 259L, 260L, 261L, 262L, 263L, 264L, 265L, 266L, 267L, 268L, 269L, 
++ 270L, 271L, 274L, 279L, 302L, 312L, 317L, 318L, 324L, 328L, 329L, 
++ 333L, 334L, 335L, 336L, 354L, 355L, 356L, 359L, 361L, 362L, 363L, 
++ 364L, 365L, 367L, 368L, 369L, 370L, 371L), .Names = c("34", "57", 
++ "77", "114", "115", "116", "117", "125", "141", "143", "152", 
++ "153", "156", "158", "161", "162", "163", "164", "165", "166", 
++ "167", "182", "183", "188", "191", "192", "194", "195", "196", 
++ "197", "199", "200", "201", "202", "203", "204", "205", "206", 
++ "207", "208", "209", "210", "211", "212", "213", "214", "215", 
++ "216", "217", "218", "219", "220", "221", "222", "223", "224", 
++ "225", "226", "227", "228", "229", "230", "231", "232", "233", 
++ "234", "235", "236", "237", "238", "239", "240", "241", "242", 
++ "243", "244", "245", "246", "248", "249", "250", "251", "252", 
++ "253", "254", "255", "256", "257", "258", "259", "260", "261", 
++ "262", "263", "264", "265", "266", "267", "268", "269", "270", 
++ "271", "274", "279", "302", "312", "317", "318", "324", "328", 
++ "329", "333", "334", "335", "336", "354", "355", "356", "359", 
++ "361", "362", "363", "364", "365", "367", "368", "369", "370", 
++ "371"), class = "omit"), class = "data.frame")
+> 
+> 
+> m1 <- mle2(logcones ~ dnorm(i + slope*log(DBH), sd),
++            parameters= list(i ~ WAVE_NON-1, slope ~ WAVE_NON-1),
++            data = firdata,
++            start = list(i=c(-2,-2),slope=c(2.5,2.5),sd=1))
+Warning message:
+In calc_mle2_function(minuslogl, parameters, start = start, parnames = parnames,  :
+  using dnorm() with sd implicitly set to 1 is rarely sensible
+> 
+> ancovafun = function(i1,i2,slope1,slope2,sigma) {
++   int = c(i1,i2)[WAVE_NON]
++   slope = c(slope1,slope2)[WAVE_NON]
++   Y.pred =  int+ slope*log(DBH)
++   r <- -sum(dnorm(logcones,mean=Y.pred,sd=sigma,log=TRUE))
++   ## cat(i1,i2,slope1,slope2,sigma,r,"\n")
++   r
++ }
+> m2 <- mle2(ancovafun,start=list(i1=-2,i2=-2,slope1=2.5,slope2=2.5,sigma=1),
++   data=firdata)
+> 
+> 
+> m3 <- mle2(logcones ~ dnorm(mu, sd),
++      parameters= list(mu ~ WAVE_NON*log(DBH)),
++      data = firdata,
++      start = list(mu=1,sd=1))
+Warning messages:
+1: In calc_mle2_function(minuslogl, parameters, start = start, parnames = parnames,  :
+  using dnorm() with sd implicitly set to 1 is rarely sensible
+2: In dnorm(x = c(2.99573227355399, 3.76120011569356, 3.71357206670431,  :
+  NaNs produced
+3: In dnorm(x = c(2.99573227355399, 3.76120011569356, 3.71357206670431,  :
+  NaNs produced
+4: In dnorm(x = c(2.99573227355399, 3.76120011569356, 3.71357206670431,  :
+  NaNs produced
+5: In dnorm(x = c(2.99573227355399, 3.76120011569356, 3.71357206670431,  :
+  NaNs produced
+> 
+> stopifnot(all.equal(AIC(m1),AIC(m2),AIC(m3)))
+> 
+> ## m4 <- mle2(logcones ~ dnorm(i + slope*log(DBH), sd),
+> ##           parameters= list(i ~ WAVE_NON-1, slope ~ WAVE_NON-1),
+> ##           data = firdata,
+> ##           start = c(-2,-2,2.5,2.5,sd=1))
+> 
+> 
+> proc.time()
+   user  system elapsed 
+  1.112   1.364   2.438 
diff --git a/tests/test-relist1.R b/tests/test-relist1.R
new file mode 100755
index 0000000..29f5321
--- /dev/null
+++ b/tests/test-relist1.R
@@ -0,0 +1,17 @@
+library(bbmle)
+set.seed(1001)
+f <- factor(rep(1:3,each=50))
+kvals <- c(1,2,5)
+muvals <- c(10,2,5)
+y <- rnbinom(length(f),size=kvals[f],mu=muvals[f])
+plot(y)
+
+NLL <- function(p) {
+  kvals <- p[1:3]
+  muvals <- p[4:6]
+  -sum(dnbinom(y,size=kvals[f],mu=muvals[f],log=TRUE))
+}
+parnames(NLL) <- c("k1","k2","k3","mu1","mu2","mu3")
+svec <- c(kvals,muvals)
+names(svec) <- parnames(NLL)
+m1 <- mle2(NLL,start=svec,vecpar=TRUE)
diff --git a/tests/test-relist1.Rout.save b/tests/test-relist1.Rout.save
new file mode 100755
index 0000000..ec80e59
--- /dev/null
+++ b/tests/test-relist1.Rout.save
@@ -0,0 +1,41 @@
+
+R Under development (unstable) (2012-07-27 r60013) -- "Unsuffered Consequences"
+Copyright (C) 2012 The R Foundation for Statistical Computing
+ISBN 3-900051-07-0
+Platform: i686-pc-linux-gnu (32-bit)
+
+R is free software and comes with ABSOLUTELY NO WARRANTY.
+You are welcome to redistribute it under certain conditions.
+Type 'license()' or 'licence()' for distribution details.
+
+  Natural language support but running in an English locale
+
+R is a collaborative project with many contributors.
+Type 'contributors()' for more information and
+'citation()' on how to cite R or R packages in publications.
+
+Type 'demo()' for some demos, 'help()' for on-line help, or
+'help.start()' for an HTML browser interface to help.
+Type 'q()' to quit R.
+
+> library(bbmle)
+> set.seed(1001)
+> f <- factor(rep(1:3,each=50))
+> kvals <- c(1,2,5)
+> muvals <- c(10,2,5)
+> y <- rnbinom(length(f),size=kvals[f],mu=muvals[f])
+> plot(y)
+> 
+> NLL <- function(p) {
++   kvals <- p[1:3]
++   muvals <- p[4:6]
++   -sum(dnbinom(y,size=kvals[f],mu=muvals[f],log=TRUE))
++ }
+> parnames(NLL) <- c("k1","k2","k3","mu1","mu2","mu3")
+> svec <- c(kvals,muvals)
+> names(svec) <- parnames(NLL)
+> m1 <- mle2(NLL,start=svec,vecpar=TRUE)
+> 
+> proc.time()
+   user  system elapsed 
+  0.988   1.116   1.990 
diff --git a/tests/testbounds.R b/tests/testbounds.R
new file mode 100755
index 0000000..57e110b
--- /dev/null
+++ b/tests/testbounds.R
@@ -0,0 +1,14 @@
+x <- runif(10)
+y <- 1+x+rnorm(10,sd=0.1)
+d <- data.frame(x,y)
+
+library(bbmle)
+m1 <- mle2(y~dnorm(a+b*x,sd=exp(s)),start=list(a=1,b=1,s=log(0.1)),data=d)
+
+m2 <- mle2(y~dnorm(a+b*x,sd=exp(s)),start=list(a=1,b=1,s=log(0.1)),
+  method="L-BFGS-B",lower=c(a=0,b=0,s=-Inf),data=d)
+
+m2F <- mle2(y~dnorm(a+b*x,sd=exp(s)),start=list(a=1,b=1,s=log(0.1)),
+  method="L-BFGS-B",lower=c(a=0,b=0,s=-Inf),
+  fixed=list(a=1),data=d)
+
diff --git a/tests/testbounds.Rout b/tests/testbounds.Rout
new file mode 100755
index 0000000..d484a83
--- /dev/null
+++ b/tests/testbounds.Rout
@@ -0,0 +1,39 @@
+
+R version 2.8.1 (2008-12-22)
+Copyright (C) 2008 The R Foundation for Statistical Computing
+ISBN 3-900051-07-0
+
+R is free software and comes with ABSOLUTELY NO WARRANTY.
+You are welcome to redistribute it under certain conditions.
+Type 'license()' or 'licence()' for distribution details.
+
+  Natural language support but running in an English locale
+
+R is a collaborative project with many contributors.
+Type 'contributors()' for more information and
+'citation()' on how to cite R or R packages in publications.
+
+Type 'demo()' for some demos, 'help()' for on-line help, or
+'help.start()' for an HTML browser interface to help.
+Type 'q()' to quit R.
+
+> x <- runif(10)
+> y <- 1+x+rnorm(10,sd=0.1)
+> 
+> library(bbmle)
+> m1 = mle2(y~dnorm(a+b*x,sd=exp(s)),start=list(a=1,b=1,s=log(0.1)))
+> 
+> m2 = mle2(y~dnorm(a+b*x,sd=exp(s)),start=list(a=1,b=1,s=log(0.1)),
++   method="L-BFGS-B",lower=c(a=0,b=0,s=-Inf))
+> 
+> m2F = mle2(y~dnorm(a+b*x,sd=exp(s)),start=list(a=1,b=1,s=log(0.1)),
++   method="L-BFGS-B",lower=c(a=0,b=0,s=-Inf),
++   fixed=list(a=1))
+Warning message:
+In mle2(y ~ dnorm(a + b * x, sd = exp(s)), start = list(a = 1, b = 1,  :
+  length mismatch between lower/upper and number of non-fixed parameters
+> 
+> 
+> proc.time()
+   user  system elapsed 
+  1.008   0.024   1.044 
diff --git a/tests/testbounds.Rout.save b/tests/testbounds.Rout.save
new file mode 100755
index 0000000..a78043b
--- /dev/null
+++ b/tests/testbounds.Rout.save
@@ -0,0 +1,46 @@
+
+R Under development (unstable) (2012-12-14 r61321) -- "Unsuffered Consequences"
+Copyright (C) 2012 The R Foundation for Statistical Computing
+ISBN 3-900051-07-0
+Platform: i686-pc-linux-gnu (32-bit)
+
+R is free software and comes with ABSOLUTELY NO WARRANTY.
+You are welcome to redistribute it under certain conditions.
+Type 'license()' or 'licence()' for distribution details.
+
+  Natural language support but running in an English locale
+
+R is a collaborative project with many contributors.
+Type 'contributors()' for more information and
+'citation()' on how to cite R or R packages in publications.
+
+Type 'demo()' for some demos, 'help()' for on-line help, or
+'help.start()' for an HTML browser interface to help.
+Type 'q()' to quit R.
+
+> x <- runif(10)
+> y <- 1+x+rnorm(10,sd=0.1)
+> d <- data.frame(x,y)
+> 
+> library(bbmle)
+Loading required package: stats4
+> m1 <- mle2(y~dnorm(a+b*x,sd=exp(s)),start=list(a=1,b=1,s=log(0.1)),data=d)
+> 
+> m2 <- mle2(y~dnorm(a+b*x,sd=exp(s)),start=list(a=1,b=1,s=log(0.1)),
++   method="L-BFGS-B",lower=c(a=0,b=0,s=-Inf),data=d)
+> 
+> m2F <- mle2(y~dnorm(a+b*x,sd=exp(s)),start=list(a=1,b=1,s=log(0.1)),
++   method="L-BFGS-B",lower=c(a=0,b=0,s=-Inf),
++   fixed=list(a=1),data=d)
+Warning messages:
+1: In mle2(y ~ dnorm(a + b * x, sd = exp(s)), start = list(a = 1, b = 1,  :
+  length mismatch between lower/upper and number of non-fixed parameters: # lower=3, # upper=0, # non-fixed=2
+2: In oout$par == call$lower :
+  longer object length is not a multiple of shorter object length
+3: In mle2(y ~ dnorm(a + b * x, sd = exp(s)), start = list(a = 1, b = 1,  :
+  some parameters are on the boundary: variance-covariance calculations based on Hessian may be unreliable
+> 
+> 
+> proc.time()
+   user  system elapsed 
+  0.820   1.052   1.857 
diff --git a/tests/testderiv.R b/tests/testderiv.R
new file mode 100755
index 0000000..8959fd9
--- /dev/null
+++ b/tests/testderiv.R
@@ -0,0 +1,59 @@
+library(bbmle)
+old_opt <- options(digits=3)
+## source("../R/dists.R")
+## source("../R/mle.R")
+
+## an attempt to sketch out by hand
+##  how one would derive an analytic
+##  gradient function for a formula-specified
+##  likelihood and use it ...
+
+## chain rule should be:
+
+## deriv(probability distribution)/[prob params] *
+##    deriv([prob params])/[model params] *
+##   {OPTIONAL} deriv([model params])/[linear model params]
+
+set.seed(1001)
+x <- rbinom(50,size=10,prob=0.4)
+suppressWarnings(mle2(x~dbinom(prob=p,size=10),start=list(p=0.3),data=data.frame(x)))
+
+## step 1: construct gradient function for simplest example
+f <- sbinom(prob=0.1,size=1)$formula
+
+d1 <- deriv(parse(text=f),"prob",function.arg=TRUE)
+
+## step 2: chain rule step #1
+mle2(x~dbinom(prob=plogis(logitp),size=10),start=list(logitp=-1),
+     data=data.frame(x))
+
+f <- sbinom(prob=NA,size=NA)$formula
+
+## note: plogis is not in derivatives table!!
+##  will need to extend by text substitution ...
+gsub("plogis(\\([^)]+\\))",
+     "(1+exp(\\1))^(-1)",
+     "plogis(logitprob)")
+
+f2 <- gsub("plogis(\\([^)]+\\))",
+     "(1+exp(\\1))^(-1)","plogis(logitp)")
+
+## start with a single parameter (ignore 'size')
+fun1 <- deriv(parse(text=f),c("prob"),function.arg=TRUE)
+fun2 <- deriv(parse(text=f2),"logitp", function.arg=TRUE)
+
+size <- 10
+a1 <- attr(fun2(logitp=0),"gradient")
+a2 <- attr(fun1(prob=plogis(0)),"gradient")
+
+## compute gradient by variable and sum
+colSums(apply(a1,2,"*",a2))
+## rep(a1,length(x))*a2
+
+
+## eventually we will want to do something tricky to
+## 'memoise' results because optim() requires the
+## objective function and gradient to be computed
+## *separately*.  Not worth worrying about this in the
+## first pass!
+options(old_opt)
diff --git a/tests/testderiv.Rout.save b/tests/testderiv.Rout.save
new file mode 100755
index 0000000..cb98b4a
--- /dev/null
+++ b/tests/testderiv.Rout.save
@@ -0,0 +1,107 @@
+
+R Under development (unstable) (2012-12-14 r61321) -- "Unsuffered Consequences"
+Copyright (C) 2012 The R Foundation for Statistical Computing
+ISBN 3-900051-07-0
+Platform: i686-pc-linux-gnu (32-bit)
+
+R is free software and comes with ABSOLUTELY NO WARRANTY.
+You are welcome to redistribute it under certain conditions.
+Type 'license()' or 'licence()' for distribution details.
+
+  Natural language support but running in an English locale
+
+R is a collaborative project with many contributors.
+Type 'contributors()' for more information and
+'citation()' on how to cite R or R packages in publications.
+
+Type 'demo()' for some demos, 'help()' for on-line help, or
+'help.start()' for an HTML browser interface to help.
+Type 'q()' to quit R.
+
+> library(bbmle)
+Loading required package: stats4
+> old_opt <- options(digits=3)
+> ## source("../R/dists.R")
+> ## source("../R/mle.R")
+> 
+> ## an attempt to sketch out by hand
+> ##  how one would derive an analytic
+> ##  gradient function for a formula-specified
+> ##  likelihood and use it ...
+> 
+> ## chain rule should be:
+> 
+> ## deriv(probability distribution)/[prob params] *
+> ##    deriv([prob params])/[model params] *
+> ##   {OPTIONAL} deriv([model params])/[linear model params]
+> 
+> set.seed(1001)
+> x <- rbinom(50,size=10,prob=0.4)
+> suppressWarnings(mle2(x~dbinom(prob=p,size=10),start=list(p=0.3),data=data.frame(x)))
+
+Call:
+mle2(minuslogl = x ~ dbinom(prob = p, size = 10), start = list(p = 0.3), 
+    data = data.frame(x))
+
+Coefficients:
+    p 
+0.396 
+
+Log-likelihood: -97.2 
+> 
+> ## step 1: construct gradient function for simplest example
+> f <- sbinom(prob=0.1,size=1)$formula
+> 
+> d1 <- deriv(parse(text=f),"prob",function.arg=TRUE)
+> 
+> ## step 2: chain rule step #1
+> mle2(x~dbinom(prob=plogis(logitp),size=10),start=list(logitp=-1),
++      data=data.frame(x))
+
+Call:
+mle2(minuslogl = x ~ dbinom(prob = plogis(logitp), size = 10), 
+    start = list(logitp = -1), data = data.frame(x))
+
+Coefficients:
+logitp 
+-0.422 
+
+Log-likelihood: -97.2 
+> 
+> f <- sbinom(prob=NA,size=NA)$formula
+> 
+> ## note: plogis is not in derivatives table!!
+> ##  will need to extend by text substitution ...
+> gsub("plogis(\\([^)]+\\))",
++      "(1+exp(\\1))^(-1)",
++      "plogis(logitprob)")
+[1] "(1+exp((logitprob)))^(-1)"
+> 
+> f2 <- gsub("plogis(\\([^)]+\\))",
++      "(1+exp(\\1))^(-1)","plogis(logitp)")
+> 
+> ## start with a single parameter (ignore 'size')
+> fun1 <- deriv(parse(text=f),c("prob"),function.arg=TRUE)
+> fun2 <- deriv(parse(text=f2),"logitp", function.arg=TRUE)
+> 
+> size <- 10
+> a1 <- attr(fun2(logitp=0),"gradient")
+> a2 <- attr(fun1(prob=plogis(0)),"gradient")
+> 
+> ## compute gradient by variable and sum
+> colSums(apply(a1,2,"*",a2))
+logitp 
+    52 
+> ## rep(a1,length(x))*a2
+> 
+> 
+> ## eventually we will want to do something tricky to
+> ## 'memoise' results because optim() requires the
+> ## objective function and gradient to be computed
+> ## *separately*.  Not worth worrying about this in the
+> ## first pass!
+> options(old_opt)
+> 
+> proc.time()
+   user  system elapsed 
+  0.640   1.272   1.801 
diff --git a/tests/testenv.R b/tests/testenv.R
new file mode 100755
index 0000000..bc8abe9
--- /dev/null
+++ b/tests/testenv.R
@@ -0,0 +1,16 @@
+library(bbmle)
+f <- function() {
+  maxit <- 1000
+  d <- data.frame(x=0:10,
+                  y=c(26, 17, 13, 12, 20, 5, 9, 8, 5, 4, 8))
+  mle2(y~dpois(lambda=exp(lymax)/(1+x/exp(lhalf))),
+   start=list(lymax=0,lhalf=0),
+   data=d,
+     control=list(maxit=maxit),
+   parameters=list(lymax~1,lhalf~1))
+}
+
+m1 <- f()
+p <- profile(m1)
+## FIXME: check results (need to save in an environment-friendly way!)
+print(head(as.data.frame(p)),digits=3)
diff --git a/tests/testenv.Rout.save b/tests/testenv.Rout.save
new file mode 100755
index 0000000..f0f9f83
--- /dev/null
+++ b/tests/testenv.Rout.save
@@ -0,0 +1,48 @@
+
+R Under development (unstable) (2012-12-14 r61321) -- "Unsuffered Consequences"
+Copyright (C) 2012 The R Foundation for Statistical Computing
+ISBN 3-900051-07-0
+Platform: i686-pc-linux-gnu (32-bit)
+
+R is free software and comes with ABSOLUTELY NO WARRANTY.
+You are welcome to redistribute it under certain conditions.
+Type 'license()' or 'licence()' for distribution details.
+
+  Natural language support but running in an English locale
+
+R is a collaborative project with many contributors.
+Type 'contributors()' for more information and
+'citation()' on how to cite R or R packages in publications.
+
+Type 'demo()' for some demos, 'help()' for on-line help, or
+'help.start()' for an HTML browser interface to help.
+Type 'q()' to quit R.
+
+> library(bbmle)
+Loading required package: stats4
+> f <- function() {
++   maxit <- 1000
++   d <- data.frame(x=0:10,
++                   y=c(26, 17, 13, 12, 20, 5, 9, 8, 5, 4, 8))
++   mle2(y~dpois(lambda=exp(lymax)/(1+x/exp(lhalf))),
++    start=list(lymax=0,lhalf=0),
++    data=d,
++      control=list(maxit=maxit),
++    parameters=list(lymax~1,lhalf~1))
++ }
+> 
+> m1 <- f()
+> p <- profile(m1)
+> ## FIXME: check results (need to save in an environment-friendly way!)
+> print(head(as.data.frame(p)),digits=3)
+        param      z par.vals.lymax par.vals.lhalf focal
+lymax.1 lymax -5.469           2.56          27.21  2.56
+lymax.2 lymax -3.204           2.67           2.22  2.67
+lymax.3 lymax -2.569           2.78           1.96  2.78
+lymax.4 lymax -1.931           2.89           1.73  2.89
+lymax.5 lymax -1.292           3.00           1.51  3.00
+lymax.6 lymax -0.648           3.11           1.31  3.11
+> 
+> proc.time()
+   user  system elapsed 
+  0.920   1.088   1.862 
diff --git a/tests/testparpred.R b/tests/testparpred.R
new file mode 100755
index 0000000..8833cfe
--- /dev/null
+++ b/tests/testparpred.R
@@ -0,0 +1,28 @@
+## set up a data frame for prediction
+
+set.seed(1001)
+f = factor(rep(letters[1:4],each=20))
+x = runif(80)
+u = rnorm(4)
+y = rnorm(80,mean=2+x*(3+u[f]),sd=0.1)
+dat = data.frame(f,x,y)
+
+## fit a model ... could easily do by lm() but want to
+##   demonstrate the problem
+
+library(bbmle)
+m1 = mle2(y~dnorm(a+b*x,sd=exp(logs)),parameters=list(b~f),data=dat,
+  start=list(a=0,b=2,logs=-3))
+
+## data frame for prediction
+pp0 = expand.grid(x=seq(0,1,length=11),
+  f=levels(dat$f))
+
+## combine frame and model data: have to keep the model data
+##  around, because it contain other information needed for
+##  prediction.
+
+nrow(predict(m1,pp0))
+
+
+
diff --git a/tests/testparpred.Rout.save b/tests/testparpred.Rout.save
new file mode 100755
index 0000000..87136bb
--- /dev/null
+++ b/tests/testparpred.Rout.save
@@ -0,0 +1,53 @@
+
+R Under development (unstable) (2012-07-27 r60013) -- "Unsuffered Consequences"
+Copyright (C) 2012 The R Foundation for Statistical Computing
+ISBN 3-900051-07-0
+Platform: i686-pc-linux-gnu (32-bit)
+
+R is free software and comes with ABSOLUTELY NO WARRANTY.
+You are welcome to redistribute it under certain conditions.
+Type 'license()' or 'licence()' for distribution details.
+
+  Natural language support but running in an English locale
+
+R is a collaborative project with many contributors.
+Type 'contributors()' for more information and
+'citation()' on how to cite R or R packages in publications.
+
+Type 'demo()' for some demos, 'help()' for on-line help, or
+'help.start()' for an HTML browser interface to help.
+Type 'q()' to quit R.
+
+> ## set up a data frame for prediction
+> 
+> set.seed(1001)
+> f = factor(rep(letters[1:4],each=20))
+> x = runif(80)
+> u = rnorm(4)
+> y = rnorm(80,mean=2+x*(3+u[f]),sd=0.1)
+> dat = data.frame(f,x,y)
+> 
+> ## fit a model ... could easily do by lm() but want to
+> ##   demonstrate the problem
+> 
+> library(bbmle)
+> m1 = mle2(y~dnorm(a+b*x,sd=exp(logs)),parameters=list(b~f),data=dat,
++   start=list(a=0,b=2,logs=-3))
+> 
+> ## data frame for prediction
+> pp0 = expand.grid(x=seq(0,1,length=11),
++   f=levels(dat$f))
+> 
+> ## combine frame and model data: have to keep the model data
+> ##  around, because it contain other information needed for
+> ##  prediction.
+> 
+> nrow(predict(m1,pp0))
+[1] 44
+> 
+> 
+> 
+> 
+> proc.time()
+   user  system elapsed 
+  1.112   1.036   2.007 
diff --git a/tests/tmptest.R b/tests/tmptest.R
new file mode 100755
index 0000000..3f83c81
--- /dev/null
+++ b/tests/tmptest.R
@@ -0,0 +1,10 @@
+library(bbmle)
+d <- data.frame(x=0:10,
+                y=c(26, 17, 13, 12, 20, 5, 9, 8, 5, 4, 8))
+
+maxit <- 1000
+mle2(y~dpois(lambda=exp(lymax)/(1+x/exp(lhalf))),
+   start=list(lymax=0,lhalf=0),
+   data=d,
+     control=list(maxit=maxit),
+   parameters=list(lymax~1,lhalf~1))
diff --git a/tests/tmptest.Rout.save b/tests/tmptest.Rout.save
new file mode 100755
index 0000000..e8ebb88
--- /dev/null
+++ b/tests/tmptest.Rout.save
@@ -0,0 +1,45 @@
+
+R Under development (unstable) (2012-07-27 r60013) -- "Unsuffered Consequences"
+Copyright (C) 2012 The R Foundation for Statistical Computing
+ISBN 3-900051-07-0
+Platform: i686-pc-linux-gnu (32-bit)
+
+R is free software and comes with ABSOLUTELY NO WARRANTY.
+You are welcome to redistribute it under certain conditions.
+Type 'license()' or 'licence()' for distribution details.
+
+  Natural language support but running in an English locale
+
+R is a collaborative project with many contributors.
+Type 'contributors()' for more information and
+'citation()' on how to cite R or R packages in publications.
+
+Type 'demo()' for some demos, 'help()' for on-line help, or
+'help.start()' for an HTML browser interface to help.
+Type 'q()' to quit R.
+
+> library(bbmle)
+> d <- data.frame(x=0:10,
++                 y=c(26, 17, 13, 12, 20, 5, 9, 8, 5, 4, 8))
+> 
+> maxit <- 1000
+> mle2(y~dpois(lambda=exp(lymax)/(1+x/exp(lhalf))),
++    start=list(lymax=0,lhalf=0),
++    data=d,
++      control=list(maxit=maxit),
++    parameters=list(lymax~1,lhalf~1))
+
+Call:
+mle2(minuslogl = y ~ dpois(lambda = exp(lymax)/(1 + x/exp(lhalf))), 
+    start = list(lymax = 0, lhalf = 0), data = d, parameters = list(lymax ~ 
+        1, lhalf ~ 1), control = list(maxit = maxit))
+
+Coefficients:
+   lymax    lhalf 
+3.218853 1.117035 
+
+Log-likelihood: -28.6 
+> 
+> proc.time()
+   user  system elapsed 
+  0.708   1.004   1.572 
diff --git a/tests/update.R b/tests/update.R
new file mode 100755
index 0000000..61ca641
--- /dev/null
+++ b/tests/update.R
@@ -0,0 +1,17 @@
+library(bbmle)
+
+x <- 0:10
+y <- c(26, 17, 13, 12, 20, 5, 9, 8, 5, 4, 8)
+d <- data.frame(x,y)
+oldopts <- options(warn=-1,digits=3)  ## ignore warnings
+m1 <- mle2(y~dpois(lambda=ymax/(1+x/xhalf)),
+           start=list(ymax=1,xhalf=1),data=d)
+m1
+y2 <- c(26, 17, 10, 15, 20, 5, 9, 8, 5, 4, 8)
+d2 <- data.frame(x,y=y2)
+
+m2 <- update(m1,data=d2)
+m2
+m3 <- update(m1,.~dpois(lambda=c),start=list(c=5))
+m3
+options(oldopts)
diff --git a/tests/update.Rout.save b/tests/update.Rout.save
new file mode 100755
index 0000000..4531e6c
--- /dev/null
+++ b/tests/update.Rout.save
@@ -0,0 +1,72 @@
+
+R Under development (unstable) (2013-08-18 r63609) -- "Unsuffered Consequences"
+Copyright (C) 2013 The R Foundation for Statistical Computing
+Platform: i686-pc-linux-gnu (32-bit)
+
+R is free software and comes with ABSOLUTELY NO WARRANTY.
+You are welcome to redistribute it under certain conditions.
+Type 'license()' or 'licence()' for distribution details.
+
+  Natural language support but running in an English locale
+
+R is a collaborative project with many contributors.
+Type 'contributors()' for more information and
+'citation()' on how to cite R or R packages in publications.
+
+Type 'demo()' for some demos, 'help()' for on-line help, or
+'help.start()' for an HTML browser interface to help.
+Type 'q()' to quit R.
+
+> library(bbmle)
+Loading required package: stats4
+> 
+> x <- 0:10
+> y <- c(26, 17, 13, 12, 20, 5, 9, 8, 5, 4, 8)
+> d <- data.frame(x,y)
+> oldopts <- options(warn=-1,digits=3)  ## ignore warnings
+> m1 <- mle2(y~dpois(lambda=ymax/(1+x/xhalf)),
++            start=list(ymax=1,xhalf=1),data=d)
+> m1
+
+Call:
+mle2(minuslogl = y ~ dpois(lambda = ymax/(1 + x/xhalf)), start = list(ymax = 1, 
+    xhalf = 1), data = d)
+
+Coefficients:
+ ymax xhalf 
+24.99  3.06 
+
+Log-likelihood: -28.6 
+> y2 <- c(26, 17, 10, 15, 20, 5, 9, 8, 5, 4, 8)
+> d2 <- data.frame(x,y=y2)
+> 
+> m2 <- update(m1,data=d2)
+> m2
+
+Call:
+mle2(minuslogl = y ~ dpois(lambda = ymax/(1 + x/xhalf)), start = list(ymax = 1, 
+    xhalf = 1), data = ..1, lower = -Inf, upper = Inf, control = list())
+
+Coefficients:
+ ymax xhalf 
+24.63  3.16 
+
+Log-likelihood: -29.6 
+> m3 <- update(m1,.~dpois(lambda=c),start=list(c=5))
+> m3
+
+Call:
+mle2(minuslogl = y ~ dpois(lambda = c), start = ..2, data = list(
+    x = 0:10, y = c(26, 17, 13, 12, 20, 5, 9, 8, 5, 4, 8)), lower = -Inf, 
+    upper = Inf, control = list())
+
+Coefficients:
+   c 
+11.5 
+
+Log-likelihood: -42.7 
+> options(oldopts)
+> 
+> proc.time()
+   user  system elapsed 
+  0.576   0.832   2.785 
diff --git a/vignettes/cc-attrib-nc.png b/vignettes/cc-attrib-nc.png
new file mode 100755
index 0000000..5f98214
Binary files /dev/null and b/vignettes/cc-attrib-nc.png differ
diff --git a/vignettes/chicago.bst b/vignettes/chicago.bst
new file mode 100755
index 0000000..511f3aa
--- /dev/null
+++ b/vignettes/chicago.bst
@@ -0,0 +1,1654 @@
+%%% ====================================================================
+%%%  @BibTeX-style-file{
+%%%     author          = "Glenn Paulley",
+%%%     version         = "4",
+%%%     date            = "28 August 1992",
+%%%     time            = "10:23:39 199",
+%%%     filename        = "chicago.bst",
+%%%     address         = "Data Structuring Group
+%%%                        Department of Computer Science
+%%%                        University of Waterloo
+%%%                        Waterloo, Ontario, Canada
+%%%                        N2L 3G1",
+%%%     telephone       = "(519) 885-1211",
+%%%     FAX             = "(519) 885-1208",
+%%%     checksum        = "26323 1654 5143 37417",
+%%%     email           = "gnpaulle at bluebox.uwaterloo.ca",
+%%%     codetable       = "ISO/ASCII",
+%%%     keywords        = "",
+%%%     supported       = "yes",
+%%%     abstract        = "A BibTeX bibliography style that follows the
+%%%                        `B' reference style of the 13th Edition of
+%%%                        the Chicago Manual of Style. A detailed
+%%%                        feature list is given below.",
+%%%     docstring       = "The checksum field above contains a CRC-16
+%%%                        checksum as the first value, followed by the
+%%%                        equivalent of the standard UNIX wc (word
+%%%                        count) utility output of lines, words, and
+%%%                        characters.  This is produced by Robert
+%%%                        Solovay's checksum utility.",
+%%%  }
+%%% ====================================================================
+%
+% "Chicago" BibTeX style, chicago.bst
+% ===================================
+%
+% BibTeX `chicago' style file for BibTeX version 0.99c, LaTeX version 2.09
+% Place it in a file called chicago.bst in the BibTeX search path.
+% You need to include chicago.sty as a \documentstyle option.
+% (Placing it in the same directory as the LaTeX document should also work.)
+% This "chicago" style is based on newapa.bst (American Psych. Assoc.)
+% found at ymir.claremont.edu.
+%
+%   Citation format: (author-last-name year)
+%             (author-last-name and author-last-name year)
+%             (author-last-name, author-last-name, and author-last-name year)
+%             (author-last-name et al. year)
+%             (author-last-name)
+%             author-last-name (year)
+%             (author-last-name and author-last-name)
+%             (author-last-name et al.)
+%             (year) or (year,year)
+%             year or year,year
+%
+%   Reference list ordering: alphabetical by author or whatever passes
+%    for author in the absence of one.
+%
+% This BibTeX style has support for abbreviated author lists and for
+%    year-only citations.  This is done by having the citations
+%    actually look like
+%
+%    \citeauthoryear{full-author-info}{abbrev-author-info}{year}
+%
+% The LaTeX style has to have the following (or similar)
+%
+%     \let\@internalcite\cite
+%     \def\fullcite{\def\citeauthoryear##1##2##3{##1, ##3}\@internalcite}
+%     \def\fullciteA{\def\citeauthoryear##1##2##3{##1}\@internalcite}
+%     \def\shortcite{\def\citeauthoryear##1##2##3{##2, ##3}\@internalcite}
+%     \def\shortciteA{\def\citeauthoryear##1##2##3{##2}\@internalcite}
+%     \def\citeyear{\def\citeauthoryear##1##2##3{##3}\@internalcite}
+%
+% These TeX macro definitions are found in chicago.sty. Additional
+% commands to manipulate different components of a citation can be defined
+% so that, for example, you can list author's names without parentheses
+% if using a citation as a noun or object in a sentence.
+%
+% This file was originally copied from newapa.bst at ymir.claremont.edu.
+%
+% Features of chicago.bst:
+% =======================
+%
+% - full names used in citations, but abbreviated citations are available
+%   (see above)
+% - if an entry has a "month", then the month and year are also printed
+%   as part of that bibitem.
+% - all conjunctions use "and" instead of "\&"
+% - major modification from Chicago Manual of Style (13th ed.) is that
+%   only the first author in a reference appears last name first-
+%   additional authors appear as J. Q. Public.
+% - pages are listed as "pp. xx-xx" in all entry types except
+%   article entries.
+% - book, inbook, and manual use "location: publisher" (or organization)
+%   for address and publisher. All other types list publishers separately.
+% - "pp." are used to identify page numbers for all entry types except
+%   articles.
+% - organization is used as a citation label if neither author nor editor
+%   is present (for manuals).
+% - "et al." is used for long author and editor lists, or when "others"
+%   is used.
+%
+% Modifications and bug fixes from newapa.bst:
+% ===========================================
+%
+%   - added month, year to bib entries if month is present
+%   - fixed bug with In proceedings, added necessary comma after title
+%   - all conjunctions changed to "and" from "\&"
+%   - fixed bug with author labels in my.full.label: "et al." now is
+%        generated when "others" is an author name
+%   - major modification from Chicago Manual of Style (13th ed.) is that
+%     only the first author in a reference appears last name first-
+%     additional authors appear as J. Q. Public.
+%   - pages are listed as "pp. xx-xx" in all entry types except
+%     article entries. Unnecessary (IMHO) "()" around page numbers
+%     were removed, and page numbers now don't end with a period.
+%   - created chicago.sty for use with this bibstyle (required).
+%   - fixed bugs in FUNCTION {format.vol.num.pages} for missing volume,
+%     number, and /or pages. Renamed to format.jour.vol.
+%   - fixed bug in formatting booktitles: additional period an error if
+%     book has a volume.
+%   - fixed bug: editors usually given redundant period before next clause
+%     (format.editors.dot) removed.
+%   - added label support for organizations, if both author and editor
+%     are missing (from alpha.bst). If organization is too long, then
+%     the key field is used for abbreviated citations.
+%   - In proceedings or books of several volumes, no comma was written
+%     between the "Volume x" and the page numbers (this was intentional
+%     in newapa.bst). Fixed.
+%   - Some journals may not have volumes/numbers, only month/year (eg.
+%     IEEE Computer). Fixed bug in article style that assumed volume/number
+%     was always present.
+%
+% Original documentation for newapa.sty:
+% =====================================
+%
+% This version was made by modifying the master file made by
+% Oren Patashnik (PATASHNIK at SCORE.STANFORD.EDU), and the 'named' BibTeX
+% style of Peter F. Patel-Schneider.
+%
+% Copyright (C) 1985, all rights reserved.
+% Copying of this file is authorized only if either
+% (1) you make absolutely no changes to your copy, including name, or
+% (2) if you do make changes, you name it something other than 'newapa.bst'.
+% There are undoubtably bugs in this style.  If you make bug fixes,
+% improvements, etc.  please let me know.  My e-mail address is:
+%    spencer at cgrg.ohio.state.edu or 71160.3141 at compuserve.com
+%
+% This style was made from 'plain.bst', 'named.bst', and 'apalike.bst',
+% with lots of tweaking to make it look like APA style, along with tips
+% from Young Ryu and Brian Reiser's modifications of 'apalike.bst'.
+
+ENTRY
+  { address
+    author
+    booktitle
+    chapter
+    edition
+    editor
+    howpublished
+    institution
+    journal
+    key
+    month
+    note
+    number
+    organization
+    pages
+    publisher
+    school
+    series
+    title
+    type
+    volume
+    year
+  }
+  {}
+  { label.year extra.label sort.year sort.label }
+
+INTEGERS { output.state before.all mid.sentence after.sentence after.block }
+
+FUNCTION {init.state.consts}
+{ #0 'before.all :=
+  #1 'mid.sentence :=
+  #2 'after.sentence :=
+  #3 'after.block :=
+}
+
+STRINGS { s t u }
+
+FUNCTION {output.nonnull}
+{ 's :=
+  output.state mid.sentence =
+    { ", " * write$ }
+    { output.state after.block =
+    { add.period$ write$
+      newline$
+      "\newblock " write$
+    }
+    { output.state before.all =
+        'write$
+        { add.period$ " " * write$ }
+      if$
+    }
+      if$
+      mid.sentence 'output.state :=
+    }
+  if$
+  s
+}
+
+% Use a colon to separate output. Used only for address/publisher
+% combination in book/inbook types, address/institution for manuals,
+% and organization:publisher for proceedings (inproceedings).
+%
+FUNCTION {output.nonnull.colon}
+{ 's :=
+  output.state mid.sentence =
+    { ": " * write$ }
+    { output.state after.block =
+    { add.period$ write$
+      newline$
+      "\newblock " write$
+    }
+    { output.state before.all =
+        'write$
+        { add.period$ " " * write$ }
+      if$
+    }
+      if$
+      mid.sentence 'output.state :=
+    }
+  if$
+  s
+}
+
+FUNCTION {output}
+{ duplicate$ empty$
+    'pop$
+    'output.nonnull
+  if$
+}
+
+FUNCTION {output.colon}
+{ duplicate$ empty$
+    'pop$
+    'output.nonnull.colon
+  if$
+}
+
+FUNCTION {output.check}
+{ 't :=
+  duplicate$ empty$
+    { pop$ "empty " t * " in " * cite$ * warning$ }
+    'output.nonnull
+  if$
+}
+
+FUNCTION {output.check.colon}
+{ 't :=
+  duplicate$ empty$
+    { pop$ "empty " t * " in " * cite$ * warning$ }
+    'output.nonnull.colon
+  if$
+}
+
+FUNCTION {output.year.check}
+{ year empty$
+     { "empty year in " cite$ * warning$ }
+     { write$
+        " (" year * extra.label *
+       month empty$
+          { ")" * }
+          { ", " * month * ")" * }
+       if$
+       mid.sentence 'output.state :=
+     }
+  if$
+}
+
+
+FUNCTION {fin.entry}
+{ add.period$
+  write$
+  newline$
+}
+
+FUNCTION {new.block}
+{ output.state before.all =
+    'skip$
+    { after.block 'output.state := }
+  if$
+}
+
+FUNCTION {new.sentence}
+{ output.state after.block =
+    'skip$
+    { output.state before.all =
+    'skip$
+    { after.sentence 'output.state := }
+      if$
+    }
+  if$
+}
+
+FUNCTION {not}
+{   { #0 }
+    { #1 }
+  if$
+}
+
+FUNCTION {and}
+{   'skip$
+    { pop$ #0 }
+  if$
+}
+
+FUNCTION {or}
+{   { pop$ #1 }
+    'skip$
+  if$
+}
+
+FUNCTION {new.block.checka}
+{ empty$
+    'skip$
+    'new.block
+  if$
+}
+
+FUNCTION {new.block.checkb}
+{ empty$
+  swap$ empty$
+  and
+    'skip$
+    'new.block
+  if$
+}
+
+FUNCTION {new.sentence.checka}
+{ empty$
+    'skip$
+    'new.sentence
+  if$
+}
+
+FUNCTION {new.sentence.checkb}
+{ empty$
+  swap$ empty$
+  and
+    'skip$
+    'new.sentence
+  if$
+}
+
+FUNCTION {field.or.null}
+{ duplicate$ empty$
+    { pop$ "" }
+    'skip$
+  if$
+}
+
+%
+% Emphasize the top string on the stack.
+%
+FUNCTION {emphasize}
+{ duplicate$ empty$
+    { pop$ "" }
+    { "{\em " swap$ * "}" * }
+  if$
+}
+
+%
+% Emphasize the top string on the stack, but add a trailing space.
+%
+FUNCTION {emphasize.space}
+{ duplicate$ empty$
+    { pop$ "" }
+    { "{\em " swap$ * "\/}" * }
+  if$
+}
+
+INTEGERS { nameptr namesleft numnames }
+%
+% Format bibliographical entries with the first author last name first,
+% and subsequent authors with initials followed by last name.
+% All names are formatted in this routine.
+%
+FUNCTION {format.names}
+{ 's :=
+  #1 'nameptr :=               % nameptr = 1;
+  s num.names$ 'numnames :=    % numnames = num.name$(s);
+  numnames 'namesleft :=
+    { namesleft #0 > }
+
+    { nameptr #1 =
+        {s nameptr "{vv~}{ll}{, jj}{, f.}" format.name$ 't := }
+        {s nameptr "{f.~}{vv~}{ll}{, jj}" format.name$ 't := }
+      if$
+      nameptr #1 >
+        { namesleft #1 >
+              { ", " * t * }
+              { numnames #2 >
+                  { "," * }
+                  'skip$
+                if$
+                t "others" =
+                    { " et~al." * }
+                    { " and " * t * } % from Chicago Manual of Style
+                  if$
+               }
+               if$
+             }
+            't
+        if$
+        nameptr #1 + 'nameptr :=          % nameptr += 1;
+        namesleft #1 - 'namesleft :=      % namesleft =- 1;
+    }
+  while$
+}
+
+FUNCTION {my.full.label}
+{ 's :=
+  #1 'nameptr :=               % nameptr = 1;
+  s num.names$ 'numnames :=    % numnames = num.name$(s);
+  numnames 'namesleft :=
+    { namesleft #0 > }
+
+    { s nameptr "{vv~}{ll}" format.name$ 't :=  % get the next name
+      nameptr #1 >
+        { namesleft #1 >
+              { ", " * t * }
+              { numnames #2 >
+                  { "," * }
+                  'skip$
+                if$
+                t "others" =
+                    { " et~al." * }
+                    { " and " * t * } % from Chicago Manual of Style
+                  if$
+               }
+               if$
+             }
+            't
+        if$
+        nameptr #1 + 'nameptr :=          % nameptr += 1;
+        namesleft #1 - 'namesleft :=      % namesleft =- 1;
+    }
+  while$
+
+}
+
+FUNCTION {format.names.fml}
+%
+% Format names in "familiar" format, with first initial followed by
+% last name. Like format.names, ALL names are formatted.
+%
+{ 's :=
+  #1 'nameptr :=               % nameptr = 1;
+  s num.names$ 'numnames :=    % numnames = num.name$(s);
+  numnames 'namesleft :=
+    { namesleft #0 > }
+
+    { s nameptr "{f.~}{vv~}{ll}{, jj}" format.name$ 't :=
+
+      nameptr #1 >
+        { namesleft #1 >
+              { ", " * t * }
+               { numnames #2 >
+                    { "," * }
+                    'skip$
+                  if$
+                  t "others" =
+                        { " et~al." * }
+                        { " and " * t * }
+%                       { " \& " * t * }
+                      if$
+                }
+               if$
+             }
+            't
+        if$
+        nameptr #1 + 'nameptr :=          % nameptr += 1;
+        namesleft #1 - 'namesleft :=      % namesleft =- 1;
+    }
+  while$
+}
+
+FUNCTION {format.authors}
+{ author empty$
+    { "" }
+    { author format.names }
+  if$
+}
+
+FUNCTION {format.key}
+{ empty$
+    { key field.or.null }
+    { "" }
+  if$
+}
+
+%
+% Format editor names for use in the "in" types: inbook, incollection,
+% inproceedings: first initial, then last names. When editors are the
+% LABEL for an entry, then format.editor is used which lists editors
+% by last name first.
+%
+FUNCTION {format.editors.fml}
+{ editor empty$
+    { "" }
+    { editor format.names.fml
+      editor num.names$ #1 >
+    { " (Eds.)" * }
+    { " (Ed.)" * }
+      if$
+    }
+  if$
+}
+
+%
+% Format editor names for use in labels, last names first.
+%
+FUNCTION {format.editors}
+{ editor empty$
+    { "" }
+    { editor format.names
+      editor num.names$ #1 >
+    { " (Eds.)" * }
+    { " (Ed.)" * }
+      if$
+    }
+  if$
+}
+
+FUNCTION {format.title}
+{ title empty$
+    { "" }
+    { title "t" change.case$ }
+  if$
+}
+
+% Note that the APA style requres case changes
+% in article titles. The following does not
+% change cases. If you perfer it, uncomment the
+% following and comment out the above.
+
+%FUNCTION {format.title}
+%{ title empty$
+%    { "" }
+%    { title }
+%  if$
+%}
+
+FUNCTION {n.dashify}
+{ 't :=
+  ""
+    { t empty$ not }
+    { t #1 #1 substring$ "-" =
+    { t #1 #2 substring$ "--" = not
+        { "--" *
+          t #2 global.max$ substring$ 't :=
+        }
+        {   { t #1 #1 substring$ "-" = }
+        { "-" *
+          t #2 global.max$ substring$ 't :=
+        }
+          while$
+        }
+      if$
+    }
+    { t #1 #1 substring$ *
+      t #2 global.max$ substring$ 't :=
+    }
+      if$
+    }
+  while$
+}
+
+FUNCTION {format.btitle}
+{ edition empty$
+  { title emphasize }
+  { title empty$
+    { title emphasize }
+    { volume empty$     % gnp - check for volume, then don't need period
+       { "{\em " title * "\/} (" * edition * " ed.)" * "." * }
+       { "{\em " title * "\/} (" * edition * " ed.)" * }
+      if$
+    }
+    if$
+  }
+  if$
+}
+
+FUNCTION {format.emphasize.booktitle}
+{ edition empty$
+  { booktitle emphasize }
+  { booktitle empty$
+    { booktitle emphasize }
+    { volume empty$    % gnp - extra period an error if book has a volume
+        { "{\em " booktitle * "\/} (" * edition * " ed.)" * "." *}
+        { "{\em " booktitle * "\/} (" * edition * " ed.)" * }
+      if$
+      }
+    if$
+    }
+  if$
+  }
+
+
+FUNCTION {tie.or.space.connect}
+{ duplicate$ text.length$ #3 <
+    { "~" }
+    { " " }
+  if$
+  swap$ * *
+}
+
+FUNCTION {either.or.check}
+{ empty$
+    'pop$
+    { "can't use both " swap$ * " fields in " * cite$ * warning$ }
+  if$
+}
+
+FUNCTION {format.bvolume}
+{ volume empty$
+    { "" }
+    { "Volume" volume tie.or.space.connect % gnp - changed to mixed case
+      series empty$
+        'skip$
+        { " of " * series emphasize * }
+      if$
+      "volume and number" number either.or.check
+    }
+  if$
+}
+
+FUNCTION {format.number.series}
+{ volume empty$
+    { number empty$
+    { series field.or.null }
+    { output.state mid.sentence =
+        { "Number" } % gnp - changed to mixed case always
+        { "Number" }
+      if$
+      number tie.or.space.connect
+      series empty$
+        { "there's a number but no series in " cite$ * warning$ }
+        { " in " * series * }
+      if$
+    }
+      if$
+    }
+    { "" }
+  if$
+}
+
+INTEGERS { multiresult }
+
+FUNCTION {multi.page.check}
+{ 't :=
+  #0 'multiresult :=
+    { multiresult not
+      t empty$ not
+      and
+    }
+    { t #1 #1 substring$
+      duplicate$ "-" =
+      swap$ duplicate$ "," =
+      swap$ "+" =
+      or or
+    { #1 'multiresult := }
+    { t #2 global.max$ substring$ 't := }
+      if$
+    }
+  while$
+  multiresult
+}
+
+FUNCTION {format.pages}
+{ pages empty$
+  { "" }
+  { pages multi.page.check
+	{ "pp.\ " pages n.dashify tie.or.space.connect } % gnp - removed ()
+	{ "pp.\ " pages tie.or.space.connect }
+    if$
+  }
+  if$
+}
+
+% By Young (and Spencer)
+% GNP - fixed bugs with missing volume, number, and/or pages
+%
+% Format journal, volume, number, pages for article types.
+%
+FUNCTION {format.jour.vol}
+{ journal empty$
+    { "no journal in " cite$ * warning$
+      "" }
+    { journal emphasize.space }
+    if$
+  number empty$
+    { volume empty$
+       { "no number and no volume in " cite$ * warning$
+         "" * }
+       { "~{\em " * Volume * "}" * }
+      if$
+    }
+    { volume empty$
+      {"no volume for " cite$ * warning$
+       "~(" * number * ")" * }
+      { "~" *
+        volume emphasize.space
+        "(" * number * ")" * * }
+      if$
+    }
+  if$
+  pages empty$
+    {"page numbers missing in " cite$ * warning$
+     "" * } % gnp - place a null string on the stack for output
+    { duplicate$ empty$
+      { pop$ format.pages }
+      { ", " *  pages n.dashify * } % gnp - removed pp. for articles
+      if$
+    }
+  if$
+}
+
+FUNCTION {format.chapter.pages}
+{ chapter empty$
+    'format.pages
+    { type empty$
+        { "Chapter" } % gnp - changed to mixed case
+        { type "t" change.case$ }
+      if$
+      chapter tie.or.space.connect
+      pages empty$
+        {"page numbers missing in " cite$ * warning$} % gnp - added check
+        { ", " * format.pages * }
+      if$
+    }
+  if$
+}
+
+FUNCTION {format.in.ed.booktitle}
+{ booktitle empty$
+  { "" }
+  { editor empty$
+    { "In " format.emphasize.booktitle * }
+    { "In " format.editors.fml * ", " * format.emphasize.booktitle * }
+    if$
+  }
+  if$
+}
+
+FUNCTION {format.thesis.type}
+{ type empty$
+    'skip$
+    { pop$
+      type "t" change.case$
+    }
+  if$
+}
+
+FUNCTION {format.tr.number}
+{ type empty$
+    { "Technical Report" }
+    'type
+  if$
+  number empty$
+    { "t" change.case$ }
+    { number tie.or.space.connect }
+  if$
+}
+
+FUNCTION {format.article.crossref}
+{ "See"
+  "\citeN{" * crossref * "}" *
+}
+
+FUNCTION {format.crossref.editor}
+{ editor #1 "{vv~}{ll}" format.name$
+  editor num.names$ duplicate$
+  #2 >
+    { pop$ " et~al." * }
+    { #2 <
+    'skip$
+    { editor #2 "{ff }{vv }{ll}{ jj}" format.name$ "others" =
+        { " et~al." * }
+        { " and " * editor #2 "{vv~}{ll}" format.name$ * }
+      if$
+    }
+      if$
+    }
+  if$
+}
+
+FUNCTION {format.book.crossref}
+{ volume empty$
+    { "empty volume in " cite$ * "'s crossref of " * crossref * warning$
+      "In "
+    }
+    { "Volume" volume tie.or.space.connect % gnp - changed to mixed case
+      " of " *
+    }
+  if$
+  editor empty$
+  editor field.or.null author field.or.null =
+  or
+    { key empty$
+    { series empty$
+        { "need editor, key, or series for " cite$ * " to crossref " *
+          crossref * warning$
+          "" *
+        }
+        { "{\em " * series * "\/}" * }
+      if$
+    }
+    { key * }
+      if$
+    }
+    { format.crossref.editor * }
+  if$
+  " \citeN{" * crossref * "}" *
+}
+
+FUNCTION {format.incoll.inproc.crossref}
+{ "See"
+  " \citeN{" * crossref * "}" *
+}
+
+% format.lab.names:
+%
+% determines "short" names for the abbreviated author information.
+% "Long" labels are created in calc.label, using the routine my.full.label
+% to format author and editor fields.
+%
+% There are 4 cases for labels.   (n=3 in the example)
+% a) one author             Foo
+% b) one to n               Foo, Bar and Baz
+% c) use of "and others"    Foo, Bar et al.
+% d) more than n            Foo et al.
+%
+FUNCTION {format.lab.names}
+{ 's :=
+  s num.names$ 'numnames :=
+  numnames #2 >    % change number to number of others allowed before
+		   % forcing "et al".
+    { s #1 "{vv~}{ll}" format.name$ " et~al." * }
+    {
+      numnames #1 - 'namesleft :=
+      #2 'nameptr :=
+      s #1 "{vv~}{ll}" format.name$
+	{ namesleft #0 > }
+	{ nameptr numnames =
+	    { s nameptr "{ff }{vv }{ll}{ jj}" format.name$ "others" =
+		{ " et~al." * }
+		{ " and " * s nameptr "{vv~}{ll}" format.name$ * }
+	      if$
+	    }
+	    { ", " * s nameptr "{vv~}{ll}" format.name$ * }
+	  if$
+	  nameptr #1 + 'nameptr :=
+	  namesleft #1 - 'namesleft :=
+	}
+      while$
+    }
+  if$
+}
+
+FUNCTION {author.key.label}
+{ author empty$
+    { key empty$
+          { "no key, author in " cite$ * warning$
+            cite$ #1 #3 substring$ }
+         'key
+      if$
+    }
+    { author format.lab.names }
+  if$
+}
+
+FUNCTION {editor.key.label}
+{ editor empty$
+    { key empty$
+          { "no key, editor in " cite$ * warning$
+            cite$ #1 #3 substring$ }
+          'key
+        if$
+     }
+     { editor format.lab.names }
+  if$
+}
+
+FUNCTION {author.key.organization.label}
+%
+% added - gnp. Provide label formatting by organization if author is null.
+%
+{ author empty$
+    { organization empty$
+	{ key empty$
+	    { "no key, author or organization in " cite$ * warning$
+              cite$ #1 #3 substring$ }
+	    'key
+	  if$
+	}
+        { organization }
+      if$
+    }
+    { author format.lab.names }
+  if$
+}
+
+FUNCTION {editor.key.organization.label}
+%
+% added - gnp. Provide label formatting by organization if editor is null.
+%
+{ editor empty$
+    { organization empty$
+	{ key empty$
+	    { "no key, editor or organization in " cite$ * warning$
+              cite$ #1 #3 substring$ }
+	    'key
+	  if$
+	}
+        { organization }
+      if$
+    }
+    { editor format.lab.names }
+  if$
+}
+
+FUNCTION {author.editor.key.label}
+{ author empty$
+    { editor empty$
+          { key empty$
+               { "no key, author, or editor in " cite$ * warning$
+                 cite$ #1 #3 substring$ }
+             'key
+           if$
+         }
+          { editor format.lab.names }
+      if$
+    }
+    { author format.lab.names }
+  if$
+}
+
+FUNCTION {calc.label}
+%
+% Changed - GNP. See also author.organization.sort, editor.organization.sort
+% Form label for BibTeX entry. The classification of which fields are used
+% for which type of entry (book, inbook, etc.) are taken from alpha.bst.
+% The change here from newapa is to also include organization as a
+% citation label if author or editor is missing.
+%
+{ type$ "book" =
+  type$ "inbook" =
+  or
+    'author.editor.key.label
+    { type$ "proceedings" =
+	'editor.key.organization.label
+	{ type$ "manual" =
+	    'author.key.organization.label
+	    'author.key.label
+	  if$
+	}
+      if$
+    }
+  if$
+
+  author empty$  % generate the full label citation information.
+    { editor empty$
+        { organization empty$
+           { "no author, editor, or organization in " cite$ * warning$
+             "??" }
+           { organization }
+           if$
+        }
+        { editor my.full.label }
+        if$
+    }
+    { author my.full.label }
+  if$
+
+% leave label on the stack, to be popped when required.
+
+  "}{" * swap$ * "}{" *
+%  year field.or.null purify$ #-1 #4 substring$ *
+%
+% save the year for sort processing afterwards (adding a, b, c, etc.)
+%
+  year field.or.null purify$ #-1 #4 substring$
+  'label.year :=
+}
+
+FUNCTION {output.bibitem}
+{ newline$
+
+  "\bibitem[\protect\citeauthoryear{" write$
+  calc.label write$
+  sort.year write$
+  "}]{" write$
+
+  cite$ write$
+  "}" write$
+  newline$
+  ""
+  before.all 'output.state :=
+}
+
+FUNCTION {article}
+{ output.bibitem
+  format.authors
+  "author" output.check
+  author format.key output          % added
+  output.year.check                 % added
+  new.block
+  format.title
+  "title" output.check
+  new.block
+  crossref missing$
+    { format.jour.vol output
+    }
+    { format.article.crossref output.nonnull
+      format.pages output
+    }
+  if$
+  new.block
+  note output
+  fin.entry
+}
+
+FUNCTION {book}
+{ output.bibitem
+  author empty$
+    { format.editors
+	  "author and editor" output.check }
+    { format.authors
+	  output.nonnull
+      crossref missing$
+    	{ "author and editor" editor either.or.check }
+    	'skip$
+      if$
+    }
+  if$
+  output.year.check       % added
+  new.block
+  format.btitle
+  "title" output.check
+  crossref missing$
+    { format.bvolume output
+      new.block
+      format.number.series output
+      new.sentence
+      address output
+      publisher "publisher" output.check.colon
+    }
+    { new.block
+      format.book.crossref output.nonnull
+    }
+  if$
+  new.block
+  note output
+  fin.entry
+}
+
+FUNCTION {booklet}
+{ output.bibitem
+  format.authors output
+  author format.key output          % added
+  output.year.check                 % added
+  new.block
+  format.title
+  "title" output.check
+  new.block
+  howpublished output
+  address output
+  new.block
+  note output
+  fin.entry
+}
+
+FUNCTION {inbook}
+{ output.bibitem
+  author empty$
+    { format.editors
+      "author and editor" output.check
+    }
+    { format.authors output.nonnull
+      crossref missing$
+    { "author and editor" editor either.or.check }
+    'skip$
+      if$
+    }
+  if$
+  output.year.check                 % added
+  new.block
+  format.btitle
+  "title" output.check
+  crossref missing$
+    { format.bvolume output
+      format.chapter.pages
+      "chapter and pages" output.check
+      new.block
+      format.number.series output
+      new.sentence
+      address output
+      publisher
+      "publisher" output.check.colon
+    }
+    { format.chapter.pages "chapter and pages" output.check
+      new.block
+      format.book.crossref output.nonnull
+    }
+  if$
+  new.block
+  note output
+  fin.entry
+}
+
+FUNCTION {incollection}
+{ output.bibitem
+  format.authors
+  "author" output.check
+  author format.key output       % added
+  output.year.check              % added
+  new.block
+  format.title
+  "title" output.check
+  new.block
+  crossref missing$
+  { format.in.ed.booktitle
+    "booktitle" output.check
+    format.bvolume output
+    format.number.series output
+    format.chapter.pages output % gnp - was special.output.nonnull
+%                                 left out comma before page numbers
+    new.sentence
+    address output
+    publisher "publisher" output.check.colon
+  }
+  { format.incoll.inproc.crossref
+	output.nonnull
+    format.chapter.pages output
+  }
+  if$
+  new.block
+  note output
+  fin.entry
+}
+
+FUNCTION {inproceedings}
+{ output.bibitem
+  format.authors
+  "author" output.check
+  author format.key output            % added
+  output.year.check                   % added
+  new.block
+  format.title
+  "title" output.check
+  new.block
+  crossref missing$
+    { format.in.ed.booktitle
+	  "booktitle" output.check
+      format.bvolume output
+      format.number.series output
+      address output
+      format.pages output
+      new.sentence
+      organization output
+      publisher output.colon
+      }
+    { format.incoll.inproc.crossref output.nonnull
+      format.pages output
+    }
+  if$
+  new.block
+  note output
+  fin.entry
+}
+
+FUNCTION {conference} { inproceedings }
+
+FUNCTION {manual}
+{ output.bibitem
+  author empty$
+    { editor empty$
+      { organization "organization" output.check
+        organization format.key output }  % if all else fails, use key
+      { format.editors "author and editor" output.check }
+      if$
+    }
+    { format.authors output.nonnull }
+    if$
+  output.year.check                 % added
+  new.block
+  format.btitle
+  "title" output.check
+  organization address new.block.checkb
+% Reversed the order of "address" and "organization", added the ":".
+  address output
+  organization "organization" output.check.colon
+%  address output
+%  ":" output
+%  organization output
+  new.block
+  note output
+  fin.entry
+}
+
+FUNCTION {mastersthesis}
+{ output.bibitem
+  format.authors
+  "author" output.check
+  author format.key output          % added
+  output.year.check                 % added
+  new.block
+  format.title
+  "title" output.check
+  new.block
+  "Master's thesis" format.thesis.type output.nonnull
+  school "school" output.check
+  address output
+  new.block
+  note output
+  fin.entry
+}
+
+FUNCTION {misc}
+{ output.bibitem
+  format.authors output
+  author format.key output            % added
+  output.year.check                   % added
+  title howpublished new.block.checkb
+  format.title output
+  new.block
+  howpublished output
+  new.block
+  note output
+  fin.entry
+}
+
+FUNCTION {phdthesis}
+{ output.bibitem
+  format.authors
+  "author" output.check
+  author format.key output            % added
+  output.year.check                   % added
+  new.block
+  format.btitle
+  "title" output.check
+  new.block
+  "Ph.\ D. thesis" format.thesis.type output.nonnull
+  school "school" output.check
+  address output
+  new.block
+  note output
+  fin.entry
+}
+
+FUNCTION {proceedings}
+{ output.bibitem
+  editor empty$
+    { organization output
+      organization format.key output }  % gnp - changed from author format.key
+    { format.editors output.nonnull }
+  if$
+% author format.key output             % gnp - removed (should be either
+%                                        editor or organization
+  output.year.check                    % added (newapa)
+  new.block
+  format.btitle
+  "title" output.check
+  format.bvolume output
+  format.number.series output
+  address output
+  new.sentence
+  organization output
+  publisher output.colon
+  new.block
+  note output
+  fin.entry
+}
+
+FUNCTION {techreport}
+{ output.bibitem
+  format.authors
+  "author" output.check
+  author format.key output             % added
+  output.year.check                    % added
+  new.block
+  format.title
+  "title" output.check
+  new.block
+  format.tr.number output.nonnull
+  institution
+  "institution" output.check
+  address output
+  new.block
+  note output
+  fin.entry
+}
+
+FUNCTION {unpublished}
+{ output.bibitem
+  format.authors
+  "author" output.check
+  author format.key output              % added
+  output.year.check                      % added
+  new.block
+  format.title
+  "title" output.check
+  new.block
+  note "note" output.check
+  fin.entry
+}
+
+FUNCTION {default.type} { misc }
+
+MACRO {jan} {"January"}
+
+MACRO {feb} {"February"}
+
+MACRO {mar} {"March"}
+
+MACRO {apr} {"April"}
+
+MACRO {may} {"May"}
+
+MACRO {jun} {"June"}
+
+MACRO {jul} {"July"}
+
+MACRO {aug} {"August"}
+
+MACRO {sep} {"September"}
+
+MACRO {oct} {"October"}
+
+MACRO {nov} {"November"}
+
+MACRO {dec} {"December"}
+
+MACRO {acmcs} {"ACM Computing Surveys"}
+
+MACRO {acta} {"Acta Informatica"}
+
+MACRO {ai} {"Artificial Intelligence"}
+
+MACRO {cacm} {"Communications of the ACM"}
+
+MACRO {ibmjrd} {"IBM Journal of Research and Development"}
+
+MACRO {ibmsj} {"IBM Systems Journal"}
+
+MACRO {ieeese} {"IEEE Transactions on Software Engineering"}
+
+MACRO {ieeetc} {"IEEE Transactions on Computers"}
+
+MACRO {ieeetcad}
+ {"IEEE Transactions on Computer-Aided Design of Integrated Circuits"}
+
+MACRO {ipl} {"Information Processing Letters"}
+
+MACRO {jacm} {"Journal of the ACM"}
+
+MACRO {jcss} {"Journal of Computer and System Sciences"}
+
+MACRO {scp} {"Science of Computer Programming"}
+
+MACRO {sicomp} {"SIAM Journal on Computing"}
+
+MACRO {tocs} {"ACM Transactions on Computer Systems"}
+
+MACRO {tods} {"ACM Transactions on Database Systems"}
+
+MACRO {tog} {"ACM Transactions on Graphics"}
+
+MACRO {toms} {"ACM Transactions on Mathematical Software"}
+
+MACRO {toois} {"ACM Transactions on Office Information Systems"}
+
+MACRO {toplas} {"ACM Transactions on Programming Languages and Systems"}
+
+MACRO {tcs} {"Theoretical Computer Science"}
+
+READ
+
+FUNCTION {sortify}
+{ purify$
+  "l" change.case$
+}
+
+INTEGERS { len }
+
+FUNCTION {chop.word}
+{ 's :=
+  'len :=
+  s #1 len substring$ =
+    { s len #1 + global.max$ substring$ }
+    's
+  if$
+}
+
+
+
+FUNCTION {sort.format.names}
+{ 's :=
+  #1 'nameptr :=
+  ""
+  s num.names$ 'numnames :=
+  numnames 'namesleft :=
+    { namesleft #0 > }
+    { nameptr #1 >
+          { "   " * }
+         'skip$
+      if$
+      s nameptr "{vv{ } }{ll{ }}{  f{ }}{  jj{ }}" format.name$ 't :=
+      nameptr numnames = t "others" = and
+          { " et~al" * }
+          { t sortify * }
+      if$
+      nameptr #1 + 'nameptr :=
+      namesleft #1 - 'namesleft :=
+    }
+  while$
+}
+
+FUNCTION {sort.format.title}
+{ 't :=
+  "A " #2
+    "An " #3
+      "The " #4 t chop.word
+    chop.word
+  chop.word
+  sortify
+  #1 global.max$ substring$
+}
+
+FUNCTION {author.sort}
+{ author empty$
+    { key empty$
+         { "to sort, need author or key in " cite$ * warning$
+           "" }
+         { key sortify }
+      if$
+    }
+    { author sort.format.names }
+  if$
+}
+
+FUNCTION {editor.sort}
+{ editor empty$
+    { key empty$
+         { "to sort, need editor or key in " cite$ * warning$
+           ""
+         }
+         { key sortify }
+      if$
+    }
+    { editor sort.format.names }
+  if$
+}
+
+FUNCTION {author.editor.sort}
+{ author empty$
+    { "missing author in " cite$ * warning$
+      editor empty$
+         { key empty$
+             { "to sort, need author, editor, or key in " cite$ * warning$
+               ""
+             }
+             { key sortify }
+           if$
+         }
+         { editor sort.format.names }
+      if$
+    }
+    { author sort.format.names }
+  if$
+}
+
+FUNCTION {author.organization.sort}
+%
+% added - GNP. Stack author or organization for sorting (from alpha.bst).
+% Unlike alpha.bst, we need entire names, not abbreviations
+%
+{ author empty$
+    { organization empty$
+	{ key empty$
+	    { "to sort, need author, organization, or key in " cite$ * warning$
+	      ""
+	    }
+	    { key sortify }
+	  if$
+	}
+	{ organization sortify }
+      if$
+    }
+    { author sort.format.names }
+  if$
+}
+
+FUNCTION {editor.organization.sort}
+%
+% added - GNP. Stack editor or organization for sorting (from alpha.bst).
+% Unlike alpha.bst, we need entire names, not abbreviations
+%
+{ editor empty$
+    { organization empty$
+	{ key empty$
+	    { "to sort, need editor, organization, or key in " cite$ * warning$
+	      ""
+	    }
+	    { key sortify }
+	  if$
+	}
+	{ organization sortify }
+      if$
+    }
+    { editor sort.format.names }
+  if$
+}
+
+FUNCTION {presort}
+%
+% Presort creates the bibentry's label via a call to calc.label, and then
+% sorts the entries based on entry type. Chicago.bst adds support for
+% including organizations as the sort key; the following is stolen from
+% alpha.bst.
+%
+{ calc.label sortify % recalculate bibitem label
+  year field.or.null purify$ #-1 #4 substring$ * % add year
+  "    "
+  *
+  type$ "book" =
+  type$ "inbook" =
+  or
+    'author.editor.sort
+    { type$ "proceedings" =
+	'editor.organization.sort
+	{ type$ "manual" =
+	    'author.organization.sort
+	    'author.sort
+	  if$
+	}
+      if$
+    }
+  if$
+  #1 entry.max$ substring$        % added for newapa
+  'sort.label :=                  % added for newapa
+  sort.label                      % added for newapa
+  *
+  "    "
+  *
+  title field.or.null
+  sort.format.title
+  *
+  #1 entry.max$ substring$
+  'sort.key$ :=
+}
+
+ITERATE {presort}
+
+SORT             % by label, year, author/editor, title
+
+STRINGS { last.label next.extra }
+
+INTEGERS { last.extra.num }
+
+FUNCTION {initialize.extra.label.stuff}
+{ #0 int.to.chr$ 'last.label :=
+  "" 'next.extra :=
+  #0 'last.extra.num :=
+}
+
+FUNCTION {forward.pass}
+%
+% Pass through all entries, comparing current entry to last one.
+% Need to concatenate year to the stack (done by calc.label) to determine
+% if two entries are the same (see presort)
+%
+{ last.label
+  calc.label year field.or.null purify$ #-1 #4 substring$ * % add year
+  #1 entry.max$ substring$ =     % are they equal?
+     { last.extra.num #1 + 'last.extra.num :=
+       last.extra.num int.to.chr$ 'extra.label :=
+     }
+     { "a" chr.to.int$ 'last.extra.num :=
+       "" 'extra.label :=
+       calc.label year field.or.null purify$ #-1 #4 substring$ * % add year
+       #1 entry.max$ substring$ 'last.label := % assign to last.label
+     }
+  if$
+}
+
+FUNCTION {reverse.pass}
+{ next.extra "b" =
+    { "a" 'extra.label := }
+     'skip$
+  if$
+  label.year extra.label * 'sort.year :=
+  extra.label 'next.extra :=
+}
+
+EXECUTE {initialize.extra.label.stuff}
+
+ITERATE {forward.pass}
+
+REVERSE {reverse.pass}
+
+FUNCTION {bib.sort.order}
+{ sort.label
+  "    "
+  *
+  year field.or.null sortify
+  *
+  "    "
+  *
+  title field.or.null
+  sort.format.title
+  *
+  #1 entry.max$ substring$
+  'sort.key$ :=
+}
+
+ITERATE {bib.sort.order}
+
+SORT             % by sort.label, year, title --- giving final bib. order.
+
+FUNCTION {begin.bib}
+
+{ preamble$ empty$
+    'skip$
+    { preamble$ write$ newline$ }
+  if$
+  "\begin{thebibliography}{}" write$ newline$
+}
+
+
+EXECUTE {begin.bib}
+
+EXECUTE {init.state.consts}
+
+ITERATE {call.type$}
+
+FUNCTION {end.bib}
+{ newline$
+  "\end{thebibliography}" write$ newline$
+}
+
+EXECUTE {end.bib}
+
+
diff --git a/vignettes/clean b/vignettes/clean
new file mode 100755
index 0000000..297e593
--- /dev/null
+++ b/vignettes/clean
@@ -0,0 +1 @@
+rm -f mle2.{aux,bbl,blg,log,out,toc,tex} quasi.{aux,bbl,blg,log,out,tex}
diff --git a/vignettes/mle2.Rnw b/vignettes/mle2.Rnw
new file mode 100755
index 0000000..25e6994
--- /dev/null
+++ b/vignettes/mle2.Rnw
@@ -0,0 +1,813 @@
+\documentclass{article}
+%\VignetteIndexEntry{Examples for enhanced mle code}
+%\VignettePackage{bbmle}
+%\VignetteDepends{Hmisc}
+%\VignetteDepends{emdbook}
+%\VignetteDepends{ggplot2}
+%\VignetteDepends{lattice}
+%\VignetteEngine{knitr::knitr}
+\usepackage[utf8]{inputenc} % for UTF-8/single quotes from sQuote()
+\usepackage[english]{babel} % for texi2dvi ~ bug
+\usepackage{graphicx}
+\usepackage{natbib}
+\usepackage{array}
+\usepackage{color}
+\usepackage[colorlinks=true,urlcolor=blue,bookmarks=true]{hyperref}
+\usepackage{url}
+\author{Ben Bolker}
+\title{Maximum likelihood estimation and analysis
+  with the \code{bbmle} package}
+\newcommand{\code}[1]{{\tt #1}}
+\newcommand{\bbnote}[1]{\color{red} {\em #1} \color{black}}
+\date{\today}
+\begin{document}
+\bibliographystyle{chicago}
+%\bibliographystyle{plain}
+\maketitle
+\tableofcontents
+
+<<knitropts,echo=FALSE,message=FALSE>>=
+if (require("knitr")) opts_chunk$set(fig.width=5,fig.height=5,tidy=FALSE,warning=FALSE,error=TRUE)
+@ 
+<<setup,results="hide",echo=FALSE,message=FALSE>>=
+library(Hmisc)
+@ 
+
+The \code{bbmle} package, designed to simplify
+maximum likelihood estimation and analysis in R,
+extends and modifies the \code{mle} function and class
+in the \code{stats4} package that comes with R by default.
+\code{mle} is in turn a wrapper around the \code{optim}
+function in base R.
+The maximum-likelihood-estimation function and class
+in \code{bbmle} are both called \code{mle2}, to avoid
+confusion and conflict with the original functions in
+the \code{stats4} package.  The major differences between
+\code{mle} and \code{mle2} are:
+\begin{itemize}
+\item \code{mle2} is more robust, with additional warnings (e.g.
+  if the Hessian can't be computed by finite differences,
+  \code{mle2} returns a fit with a missing Hessian rather
+  than stopping with an error)
+\item \code{mle2} uses a \code{data} argument to allow different
+  data to be passed to the negative log-likelihood function
+\item \code{mle2} has a formula interface like that
+ of (e.g.) \code{gls} in the \code{nlme} package.
+ For relatively simple models the formula for the
+ maximum likelihood can be written in-line, rather than
+ defining a negative log-likelihood function.  The formula
+ interface also simplifies fitting models with
+ categorical variables.  Models fitted using the formula interface
+ also have applicable \code{predict} and \code{simulate} methods.
+\item \code{bbmle} defines \code{anova}, \code{AIC}, \code{AICc}, 
+  and \code{BIC} methods for
+  \code{mle2} objects, as well as
+  \code{AICtab}, \code{BICtab}, \code{AICctab}
+  functions for producing summary tables of information criteria for a 
+  set of models.
+\end{itemize}
+
+Other packages with similar functionality (extending
+GLMs in various ways) are
+\begin{itemize}
+\item on CRAN: \code{aods3} (overdispersed models such as beta-binomial);
+  \code{vgam} (a wide range of models);
+  \code{betareg} (beta regression);
+  \code{pscl} (zero-inflated, hurdle models);
+  \code{maxLik} (another general-purpose maximizer, with
+  a different selection of optimizers)
+\item In Jim Lindsey's code repository
+  (\url{http://popgen.unimaas.nl/~jlindsey/rcode.html}):
+  \code{gnlr} and \code{gnlr3}
+\end{itemize}
+  
+\section{Example: \emph{Orobanche}/overdispersed binomial}
+
+This example will use the classic data set on
+\emph{Orobanche} germination from \cite{Crowder1978}
+(you can also use
+\code{glm(...,family="quasibinomial")} or
+the \code{aods3} package to analyze these data).
+
+\subsection{Test basic fit to simulated beta-binomial data}
+
+First, generate a single beta-binomially distributed
+set of points as a simple test.
+
+Load the \code{emdbook} package
+to get functions for the beta-binomial distribution (random-deviate 
+function \code{rbetabinom} --- these functions are also available
+in Jim Lindsey's \code{rmutil} package).
+<<emdbook,message=FALSE>>=
+library(emdbook)
+@  
+
+Generate random deviates from a random beta-binomial:
+<<bbsim>>=
+set.seed(1001)
+x1 <- rbetabinom(n=1000,prob=0.1,size=50,theta=10)
+@ 
+
+Load the package:
+<<bbmle,message=FALSE>>=
+library("bbmle")
+@ 
+
+Construct a simple negative log-likelihood function:
+<<likfun1>>=
+mtmp <- function(prob,size,theta) {
+  -sum(dbetabinom(x1,prob,size,theta,log=TRUE))
+}
+@ 
+
+Fit the model --- use \code{data} to pass the \code{size}
+parameter (since it wasn't hard-coded in the \code{mtmp}
+function):
+<<fit1,warning=FALSE>>=
+(m0 <- mle2(mtmp,start=list(prob=0.2,theta=9),data=list(size=50)))
+@
+(here and below, I'm suppressing lots of warnings about {\tt NaNs produced})
+
+The \code{summary} method for \code{mle2} objects
+shows the parameters; approximate standard
+errors (based on quadratic approximation to the curvature at
+the maximum likelihood estimate); and a test
+of the parameter difference from zero based on
+this standard error and on an assumption 
+that the likelihood surface is quadratic
+(or equivalently that the sampling distribution
+of the estimated parameters is normal).
+
+<<sum1>>=
+summary(m0)
+@ 
+
+Construct the likelihood profile (you can
+apply \code{confint} directly to \code{m0},
+but if you're going to work with the likelihood
+profile [e.g. plotting, or looking for confidence
+intervals at several different $\alpha$ values]
+then it is more efficient to compute the profile
+once):
+
+<<prof1,cache=TRUE,warning=FALSE>>=
+p0 <- profile(m0)
+@ 
+
+Compare the confidence interval estimates based on
+inverting a spline fit to the profile (the default);
+based on the quadratic approximation at the
+maximum likelihood estimate; and based on
+root-finding to find the exact point where the
+profile crosses the critical level.
+
+<<confint1,warning=FALSE>>=
+confint(p0)
+confint(m0,method="quad")
+confint(m0,method="uniroot")
+@ 
+
+All three types of confidence limits are similar.
+
+Plot the profiles:
+<<profplot1,fig.height=5,fig.width=10,out.width="\\textwidth">>=
+par(mfrow=c(1,2))
+plot(p0,plot.confstr=TRUE)
+@ 
+
+By default, the plot method for 
+likelihood profiles displays the square root of the
+the deviance difference
+(twice the difference in negative
+log-likelihood from the best fit), so it will
+be {\sf V}-shaped
+for cases where the quadratic approximation works well
+(as in this case).
+(For a better visual estimate of whether the profile
+is quadratic, use the \code{absVal=FALSE} option to the \code{plot}
+method.)
+
+You can also request confidence intervals
+calculated using \code{uniroot}, which may be more exact when
+the profile is not smooth enough to be modeled accurately
+by a spline.  However, this method is
+also more sensitive to numeric problems.
+
+Instead of defining an
+explicit function for \code{minuslogl}, 
+we can also use the formula interface.
+The formula interface assumes that
+the density function given (1) has \code{x} as
+its first argument (if the distribution is multivariate,
+then \code{x} should be a matrix of observations) and
+(2) has a \code{log} argument that will return
+the log-probability or log-probability density
+if \code{log=TRUE}.  Some of the extended functionality
+(prediction etc.) depends on the existence of 
+an \code{s}- variant function for the distribution
+that returns (at least) the mean and median as
+a function of the parameters
+(currently defined: \code{snorm}, \code{sbinom},
+\code{sbeta}, \code{snbinom}, \code{spois}).
+<<fit2,warning=FALSE>>=
+m0f <- mle2(x1~dbetabinom(prob,size=50,theta),
+            start=list(prob=0.2,theta=9),data=data.frame(x1))
+@ 
+Note that you must specify the data via the \code{data}
+argument when using the formula interface. This may be
+slightly more unwieldy than just pulling the data from your
+workspace when you are doing simple things, but in the long
+run it makes tasks like predicting new responses much simpler.
+
+It's convenient to use the formula interface
+to try out likelihood estimation on the
+transformed parameters:
+<<fit2f>>=
+m0cf <- mle2(x1~dbetabinom(prob=plogis(lprob),size=50,theta=exp(ltheta)),
+            start=list(lprob=0,ltheta=2),data=data.frame(x1))
+confint(m0cf,method="uniroot")
+confint(m0cf,method="spline")
+@ 
+
+In this case the answers from \code{uniroot}
+and \code{spline} (default) methods barely
+differ.
+
+\subsection{Real data (\emph{Orobanche}, \cite{Crowder1978})}
+Data are copied from the \code{aods3} package
+(but a copy is saved with the package to avoid depending on the
+ \code{aods3} package):
+<<orobdata>>=
+load(system.file("vignetteData","orob1.rda",package="bbmle"))
+summary(orob1)
+@ 
+
+Now construct a negative log-likelihood
+function that differentiates among groups:
+<<aodlikfun>>=
+ML1 <- function(prob1,prob2,prob3,theta,x) {
+  prob <- c(prob1,prob2,prob3)[as.numeric(x$dilution)]
+  size <- x$n
+  -sum(dbetabinom(x$m,prob,size,theta,log=TRUE))
+}
+@ 
+
+Results from \cite{Crowder1978}:
+<<crowdertab,echo=FALSE,results="asis">>=
+crowder.results <- matrix(c(0.132,0.871,0.839,78.424,0.027,0.028,0.032,-34.991,
+                            rep(NA,7),-34.829,
+                            rep(NA,7),-56.258),
+                          dimnames=list(c("prop diffs","full model","homog model"),
+                            c("prob1","prob2","prob3","theta","sd.prob1","sd.prob2","sd.prob3","NLL")),
+                          byrow=TRUE,nrow=3)
+latex(crowder.results,file="",table.env=FALSE,title="model")
+@
+
+<<aodfit1,cache=TRUE,warning=FALSE>>=
+(m1 <- mle2(ML1,start=list(prob1=0.5,prob2=0.5,prob3=0.5,theta=1),
+            data=list(x=orob1)))
+@ 
+
+Or:
+<<eval=FALSE>>=
+## would prefer ~dilution-1, but problems with starting values ...
+(m1B <- mle2(m~dbetabinom(prob,size=n,theta),
+             param=list(prob~dilution),
+             start=list(prob=0.5,theta=1),
+    data=orob1))
+@ 
+The result warns us that the optimization has not
+converged; we also don't match
+Crowder's results for $\theta$ exactly.
+We can fix both of these problems by setting \code{parscale} appropriately.
+
+Since we don't bound $\theta$ (or below, $\sigma$) we get  a fair number
+of warnings with this and the next few fitting and profiling attempts.
+We will ignore these for now, since the final results reached are reasonable
+(and match or nearly match Crowder's values); the appropriate, careful thing
+to do would be either to fit on a transformed scale where all real-valued
+parameter values were legal, or to use \code{method="L-BFGS-B"} (or \code{method="bobyqa"} 
+with the \code{optimx} package) to bound the parameters appropriately.
+You can also use \code{suppressWarnings()} if you're sure you don't
+need to know about any warnings (beware: this will suppress \emph{all}
+warnings, those you weren't expecting as well as those you were \ldots)
+
+<<suppWarn,echo=FALSE>>=
+opts_chunk$set(warning=FALSE)
+@ 
+<<aodfit2,cache=TRUE>>=
+(m2 <- mle2(ML1,start=as.list(coef(m1)),
+          control=list(parscale=coef(m1)),
+          data=list(x=orob1)))
+@ 
+
+Calculate likelihood profile (restrict the upper limit
+of $\theta$, simply because it will make the picture
+below a little bit nicer):
+<<aodprof2,cache=TRUE>>=
+p2 <- profile(m2,prof.upper=c(Inf,Inf,Inf,theta=2000))
+@ 
+
+Get the curvature-based parameter standard
+deviations (which Crowder used
+rather than computing likelihood profiles):
+<<aodstderr>>=
+round(stdEr(m2),3)
+@ 
+We are slightly off Crowder's numbers --- rounding
+error?
+
+Crowder also defines a variance (overdispersion) parameter
+$\sigma^2=1/(1+\theta)$.
+<<aodvar>>=
+sqrt(1/(1+coef(m2)["theta"]))
+@ 
+
+Using the delta method (via the \code{deltavar}
+function in the \code{emdbook} package)
+to approximate the standard deviation of
+$\sigma$:
+<<deltavar>>=
+sqrt(deltavar(sqrt(1/(1+theta)),meanval=coef(m2)["theta"],
+         vars="theta",Sigma=vcov(m2)[4,4]))
+@ 
+
+Another way to fit in terms of $\sigma$ rather than $\theta$
+is to compute $\theta=1/\sigma^2-1$ on the fly in a
+formula:
+
+<<sigma3>>=
+m2b <- mle2(m~dbetabinom(prob,size=n,theta=1/sigma^2-1),
+            data=orob1,
+            parameters=list(prob~dilution,sigma~1),
+            start=list(prob=0.5,sigma=0.1))
+## ignore warnings (we haven't bothered to bound sigma<1)
+round(stdEr(m2b)["sigma"],3)
+p2b <- profile(m2b,prof.lower=c(-Inf,-Inf,-Inf,0))
+@ 
+
+As might be expected since the standard deviation
+of $\sigma$ is large, the quadratic approximation is
+poor:
+
+<<compquad>>=
+r1 <- rbind(confint(p2)["theta",],
+            confint(m2,method="quad")["theta",])
+rownames(r1) <- c("spline","quad")
+r1
+@ 
+
+Plot the profile:
+<<profplottheta>>=
+plot(p2,which="theta",plot.confstr=TRUE)
+@ 
+
+What does the profile for $\sigma$ look like?
+<<profplotsigma>>=
+plot(p2b,which="sigma",plot.confstr=TRUE,
+     show.points=TRUE)
+@ 
+
+Now fit a homogeneous model:
+<<homogmodel>>=
+ml0 <- function(prob,theta,x) {
+  size <- x$n
+  -sum(dbetabinom(x$m,prob,size,theta,log=TRUE))
+}
+m0 <- mle2(ml0,start=list(prob=0.5,theta=100),
+          data=list(x=orob1))
+@ 
+
+The log-likelihood matches Crowder's result:
+<<logLikcomp>>=
+logLik(m0)
+@ 
+
+It's easier to 
+use the formula interface
+to specify all three of the models
+fitted by Crowder (homogeneous, probabilities differing
+by group, probabilities and overdispersion differing
+by group):
+
+<<formulafit>>=
+m0f <- mle2(m~dbetabinom(prob,size=n,theta),
+            parameters=list(prob~1,theta~1),
+            data=orob1,
+            start=list(prob=0.5,theta=100))
+m2f <- update(m0f,
+              parameters=list(prob~dilution,theta~1),
+              start=list(prob=0.5,theta=78.424))
+m3f <- update(m0f,
+              parameters=list(prob~dilution,theta~dilution),
+              start=list(prob=0.5,theta=78.424))
+@ 
+
+\code{anova} runs a likelihood ratio test on nested
+models:
+<<anovafit>>=
+anova(m0f,m2f,m3f)
+@ 
+
+The various \code{ICtab} commands produce tables of
+information criteria, optionally sorted and
+with model weights.
+<<ICtabfit>>=
+AICtab(m0f,m2f,m3f,weights=TRUE,delta=TRUE,sort=TRUE)
+BICtab(m0f,m2f,m3f,delta=TRUE,nobs=nrow(orob1),sort=TRUE,weights=TRUE)
+AICctab(m0f,m2f,m3f,delta=TRUE,nobs=nrow(orob1),sort=TRUE,weights=TRUE)
+@ 
+<<reWarn,echo=FALSE>>=
+opts_chunk$set(warning=FALSE)
+@ 
+
+\section{Example: reed frog size predation}
+
+Data from an experiment by Vonesh \citep{VoneshBolker2005}
+<<frogsetup>>=
+frogdat <- data.frame(
+  size=rep(c(9,12,21,25,37),each=3),
+  killed=c(0,2,1,3,4,5,rep(0,4),1,rep(0,4)))
+frogdat$initial <- rep(10,nrow(frogdat))
+@ 
+
+<<getgg>>=
+library(ggplot2)
+@ 
+
+<<gg1>>=
+gg1 <- ggplot(frogdat,aes(x=size,y=killed))+geom_point()+
+      stat_sum(aes(size=factor(..n..)))+
+      labs(size="#")+scale_x_continuous(limits=c(0,40))
+@ 
+
+<<frogfit1,cache=TRUE,warning=FALSE>>=
+m3 <- mle2(killed~dbinom(prob=c*(size/d)^g*exp(1-size/d),
+  size=initial),data=frogdat,start=list(c=0.5,d=5,g=1))
+pdat <- data.frame(size=1:40,initial=rep(10,40))
+pdat1 <- data.frame(pdat,killed=predict(m3,newdata=pdat))
+@
+
+<<frogfit2,cache=TRUE,warning=FALSE>>=
+m4 <- mle2(killed~dbinom(prob=c*((size/d)*exp(1-size/d))^g,
+  size=initial),data=frogdat,start=list(c=0.5,d=5,g=1))
+pdat2 <- data.frame(pdat,killed=predict(m4,newdata=pdat))
+@ 
+
+<<gg1plot>>=
+gg1 + geom_line(data=pdat1,colour="red")+
+      geom_line(data=pdat2,colour="blue")
+@ 
+
+<<frogfit2anal,cache=TRUE,warning=FALSE>>=
+coef(m4)
+prof4 <- profile(m4)
+@ 
+
+Three different ways to draw the profile:
+
+(1) Built-in method (base graphics):
+<<basegraphprofplot>>=
+plot(prof4)
+@ 
+
+(2) Using \code{xyplot} from the \code{lattice} package:
+\setkeys{Gin}{width=\textwidth}
+<<latticeprof,fig.height=5,fig.width=10,out.width="\\textwidth">>=
+prof4_df <- as.data.frame(prof4)
+library(lattice)
+xyplot(abs(z)~focal|param,data=prof4_df,
+      subset=abs(z)<3,
+       type="b",
+       xlab="",
+       ylab=expression(paste(abs(z),
+           " (square root of ",Delta," deviance)")),
+       scale=list(x=list(relation="free")),
+             layout=c(3,1))
+@ 
+
+(3) Using \code{ggplot} from the \code{ggplot2} package:
+<<ggplotprof,fig.height=5,fig.width=10>>=
+ss <-subset(prof4_df,abs(z)<3)
+ggplot(ss,
+       aes(x=focal,y=abs(z)))+geom_line()+
+      geom_point()+
+      facet_grid(.~param,scale="free_x")
+@ 
+
+\section*{Additions/enhancements/differences from \code{stats4::mle}}
+\begin{itemize}
+\item{\code{anova} method}
+\item{warnings on convergence failure}
+\item{more robust to non-positive-definite Hessian;
+  can also specify \code{skip.hessian} to skip Hessian
+  computation when it is problematic}
+\item{when profiling fails because better value is
+    found, report new values}
+\item{can take named vectors as well as lists as
+    starting parameter vectors}
+\item{added \code{AICc}, \code{BIC} definitions,
+    \code{ICtab} functions}
+\item{added \code{"uniroot"} and \code{"quad"}
+    options to \code{confint}}
+\item{more options for colors and line types etc etc.
+The old arguments are:
+<<oldargs,eval=FALSE>>=
+function (x, levels, conf = c(99, 95, 90, 80, 50)/100, nseg = 50,
+          absVal = TRUE, ...) {}
+@ 
+The new one is:
+<<newargs,eval=FALSE>>=
+function (x, levels, which=1:p, conf = c(99, 95, 90, 80, 50)/100, nseg = 50,
+          plot.confstr = FALSE, confstr = NULL, absVal = TRUE, add = FALSE,
+          col.minval="green", lty.minval=2,
+          col.conf="magenta", lty.conf=2,
+          col.prof="blue", lty.prof=1,
+          xlabs=nm, ylab="score",
+          show.points=FALSE,
+          main, xlim, ylim, ...) {}
+@ 
+\code{which} selects (by character vector or numbers)
+which parameters to plot: \code{nseg} does nothing
+(even in the old version); \code{plot.confstr} turns on
+the labels for the confidence levels; \code{confstr} gives
+the labels; \code{add} specifies whether to add the
+profile to an existing plot; \code{col} and \code{lty}
+options specify the colors and line types for
+horizontal and vertical lines marking the minimum
+and confidence vals and the profile curve; \code{xlabs}
+gives a vector of x labels; \code{ylab} gives the y label;
+\code{show.points} specifies whether to show the raw points
+computed.
+}
+\item{\code{mle.options()}}
+\item{\code{data} argument}
+\item{handling of names in argument lists}
+\item{can use alternative optimizers (\code{nlminb}, \code{nlm}, \code{constrOptim}, \code{optimx},
+    \code{optimize})}
+\item{uses code from \code{numDeriv} package to compute Hessians rather than
+    built-in optimizer code}
+\item{by default, uses \code{MASS::ginv} (generalized inverse) rather than \code{solve} to invert
+    Hessian (more robust to positive-semidefinite Hessians \ldots)}
+\item{can use \code{vecpar=TRUE} (and \code{parnames()}) to use objective functions with parameters
+    specified as vectors (for compatibility with \code{optim} etc.)}
+\end{itemize}
+
+\section{Newer stuff}
+
+\textbf{To do:}
+\begin{itemize}
+\item{use \code{predict}, \code{simulate} etc.
+    to demonstrate different parametric bootstrap approaches
+    to confidence and prediction intervals
+    \begin{itemize}
+    \item use \code{predict} to get means and standard
+      deviations, use delta method?
+    \item use \code{vcov}, assuming quadratic profiles,
+      with \code{predict(\ldots,newparams=\ldots)}
+    \item prediction intervals assuming no parameter uncertainty
+      with \code{simulate}
+    \item both together \ldots
+    \end{itemize}
+  }
+\end{itemize}
+
+
+\section{Technical details}
+
+\subsection{Profiling and confidence intervals}
+
+This section describes the algorithm for constructing profiles
+and confidence intervals, which is not otherwise documented anywhere
+except in the code.  * indicates changes from the version
+in \code{stats4:::mle}
+
+\subsubsection{Estimating standard error}
+
+In order to construct the profile for a particular parameter, one
+needs an initial estimate of the scale over which to vary that
+parameter.  The estimated standard error of the parameter based
+on the estimated curvature of the likelihood surface at the MLE
+is a good guess.
+\begin{itemize}
+\item if \code{std.err} is missing, extract the 
+  standard error from the summary coefficient table (ultimately computed from 
+  \code{sqrt(diag(inverse Hessian))} of the fit)
+\item * a user-set value of \code{std.err} overrides this behavior 
+  unless the value is specified as \code{NA} (in which
+  case the estimate from the previous step is used)
+\item * if the standard error value is still \code{NA} (i.e. the
+  user did not specify it and the value estimated from the Hessian
+  is missing or \code{NA}) use \code{sqrt(1/diag(hessian))}. This
+  represents a (fairly feeble) attempt to come up with a plausible number
+  when the Hessian is not positive definite but still has positive diagonal
+  entries
+\item if all else fails, stop and * print an error message that encourages
+  the user to specify the values with \code{std.err}
+\end{itemize}
+  
+There may be further tricks that would help guess the appropriate scale:
+for example, one could guess on the basis of a comparison between the
+parameter values and negative log-likelihoods at the starting and ending points
+of the fits.  On the other hand, (a) this would take some effort and
+still be subject to failure for sufficiently pathological fits and (b) there
+is some value to forcing the user to take explicit, manual steps to remedy
+such problems, as they may be signs of poorly defined or buggy log-likelihood
+functions.
+
+\subsubsection{Profiling}
+  
+Profiling is done on the basis of a constructed function that minimizes
+the negative log-likelihood for a fixed value of the focal parameter and
+returns the signed square-root of the deviance difference from the 
+minimum (denoted by $z$).  At the MLE $z=0$ by definition; it should never
+be $<0$ unless something has gone wrong with the original fit. The LRT significance
+cutoffs for $z$ are equal to the usual two-tailed normal distribution cutoffs
+(e.g. $\pm \approx 1.96$ for 95\% confidence regions).
+  
+In each direction (decreasing and increasing from the MLE for the focal parameter):
+\begin{itemize}
+\item fix the focal parameter
+\item adjust control parameters etc. accordingly (e.g. remove the
+  entry for the focal parameter so that the remaining control
+  parameters match the non-fixed parameters)
+\item{controls on the profiling (which can be set manually, but for which there
+    is not much guidance in the documentation):
+    \begin{itemize}
+    \item \code{zmax} Maximum $z$ to aim for. (Default: \code{sqrt(qchisq(1-alpha/2, p))})
+      The default maximum $\alpha$ (type~I error) is 0.01.
+      \bbnote{I don't understand this
+        criterion. It seems to expand the size of the univariate profile
+        to match a cutoff for the multivariate confidence region of
+        the model.  The $\chi^2$ cutoff for deviance to get the $(1-\alpha)$ 
+        multivariate confidence region (i.e.,
+        on all $p$ of the parameters) would be \code{qchisq(1-alpha,p)} --- %
+        representing a one-tailed test on the deviance.  Taking the square root
+        makes sense, since we are working with the square root of the deviance,
+        but I don't understand (1) why we are expanding the region to allow
+        for the multivariate confidence region (since we are computing univariate
+        profiles) [you could at least argue that this is conservative, making
+        the region a little bigger than it needs to be]; (2) why we are
+        using $1-\alpha/2$ rather than $1-\alpha$.
+      }
+      For comparison, \code{MASS::profile.glm} (written by Bates and Venables in
+      1996, ported to R by BDR in 1998) uses \code{zmax}=\code{sqrt(qchisq(1-alpha,1))}
+      \bbnote{(this makes more sense to me \ldots)}.
+      On the other hand, the profiling code in \code{lme4a} (the \code{profile}
+      method for \code{merMod}, in \code{profile.R}) uses
+      \code{qchisq(1-alphamax, nptot)} \ldots
+    \item \code{del} Step size (scaled by standard error) (Default: \code{zmax}/5.)
+      Presumably (?) copied from \code{MASS::profile.glm}, 
+      which says (in \code{?profile.glm}):
+      ``[d]efault value chosen to allow profiling at about 10 parameter
+      values.''
+    \item \code{maxsteps} Maximum number of profiling steps to try in each direction. (Default: 100)
+    \end{itemize}
+  }
+\item While \verb+step<maxsteps+ and \verb+abs(z) < zmax+, set the value of the focal
+  parameter to its MLE + \code{sgn*step*del*std.err} where \code{sgn} represents
+  the direction, \code{step} is the current (integer) step, and \code{del} and 
+  \code{std.err} are the step size scaling factor and standard error estimate
+  discussed above (i.e. take steps of size (\code{del*std.err}) in the appropriate direction);
+  evaluate $z$
+\item{Stop the profiling: 
+    \begin{itemize}
+    \item if $z$ doesn't change from the previous step (\verb+stop_flat+) %
+      --- unless \verb+try_harder+ is \code{TRUE}
+    \item * stop if $z$ is less than \code{tol.newmin} (default: 0.001) units 
+      \emph{better} than the MLE fit, i.e. $z<-\mbox{\code{tol.newmin}}$
+      (if $-\mbox{\code{tol.newmin}}<z<0$, set $z$ to zero) (\verb+newpars_found+)
+    \item if $z$ is \code{NA} (\verb+stop_na+)  --- unless \verb+try_harder+ is \code{TRUE}
+    \item if $z$ is beyond \code{zmax} (i.e., we have reached our goal: \verb+stop_cutoff+)
+    \item if \code{step==maxsteps}
+    \item if the focal parameter has hit its upper/lower bound (\verb+stop_bound+)
+    \end{itemize}
+  }
+\item if we have hit the maximum number of steps but not reached the cutoff
+  (\verb+stop_maxstep+ but not \verb+stop_cutoff+), ``try a bit harder'':
+  go \emph{almost} one more \code{del*std.err} unit out (in intervals
+  of 0.2, 0.4, 0.6, 0.8, 0.9) (\bbnote{also seems reasonable but don't
+    know where it comes from})
+\item * if we violated the boundary but did not reach the cutoff
+  (\verb+!stop_cutoff && stop_bound+), evaluate $z$ at the boundary
+\item if we got to the cutoff in $<5$ steps, try smaller steps:
+  start at \code{step=0.5} and proceed to
+  \code{mxstep-0.5} in unit increments
+  (rather than the original scale which went from 0 to \code{mxstep}).
+  (\bbnote{
+    Again, it seems reasonable, but I don't know what the original justification
+    was \ldots})
+\end{itemize}
+
+\subsubsection{Confidence intervals}
+
+We are looking for the values where $z$ (signed square root deviance
+difference) is equal to the usual two-tailed normal distribution cutoffs
+for a specified $\alpha$ level, e.g. $z=\pm 1.96$ for 95\% confidence
+intervals (this is equivalent to a one-tailed test on the deviance
+difference with the cutoff value for $\chi^2_1$).
+
+\begin{description}
+\item[Spline method]{(default)
+    \begin{itemize}
+    \item If necessary (i.e. if applied to a fitted object and not 
+      to an existing profile), construct the profile
+    \item * If the profile of the signed square root is non-monotonic,
+      warn the user and revert to linear approximation on the profiled points
+      to find the cutoffs:
+    \item Otherwise, build an interpolation spline of $z$ (signed square root deviance
+      difference) based on profiled points (the default is $n=3 \times L$
+      where $L$ is the length of the original vector). Then
+      use linear approximation on the $y$ ($z$) and $x$ (focal
+      parameter value) of the spline
+      to find the cutoffs (\bbnote{Why construct a spline and then interpolate linearly?  Why
+        not use \code{backSpline} as in the profile plotting code?})
+      \end{itemize}
+    }
+  \item[Quad method]{Use a quadratic approximation based
+      on the estimated curvature (this is almost identical to
+      using \code{confint.default}, and perhaps obsolete/could
+      be replaced by a pointer to \code{confint.default} \ldots)
+    }
+  \item[Uniroot]{
+      For each direction (up and down):
+      \begin{itemize}
+      \item start by stepping 5 $\sigma$ away from the
+        MLE, or to the box constraint on the parameter,
+        whichever is closer (\bbnote{this standard error is based on the
+          curvature; I should allow it, or the intervals themselves,
+          to be overridden via a \code{std.err} or \code{interval}
+          parameter})
+      \item compute the difference between the deviance and
+        the desired deviance cutoff at this point;
+        if it is \code{NA}, reduce the distance in steps
+        of 0.25 $\sigma$ until it is not, until you reduce
+        the distance to zero
+      \item if the product of the deviance differences at the MLE
+        and at the point you stopped at is \code{NA} or positive
+        (indicating that you didn't find a root-crossing in the
+        range $[0,5\sigma]$), quit.
+      \item otherwise, apply \code{uniroot} across this interval
+      \end{itemize}
+      
+      \code{method="uniroot"} should give the most accurate results, 
+      especially when the profile is wonky (it won't care about non-smooth
+      profiles), but it will be the slowest --- and different confidence
+      levels will have to be computed individually, whereas multiple
+      confidence levels can be computed quickly from a single computed
+      profile.  A cruder approach would be to use profiling but decrease
+      \code{std.err} a lot so that the profile points were very closely
+      spaced.
+    }
+  \end{description}
+
+\subsubsection{Profile plotting}
+
+Plot the signed (or unsigned) square root deviance difference, and ($1-\alpha$) confidence regions/critical
+values designated by \code{conf} (default: $\{0.99,0.95,0.9,0.8,0.5\}$).
+
+\begin{itemize}
+\item * If the (signed) profile is non-monotonic, simply plot  
+  computed points with \code{type="l"} (i.e., with the default linear interpolation)
+\item Construct the interpolation spline (using \code{splines:::interpSpline}
+  rather than \code{spline} as in the confidence interval method (\bbnote{why this
+    difference?})
+\item attempt to construct the inverse of the interpolation spline (using \code{backSpline})
+\item * if this fails warn the user (assume this was due to non-monotonicity)
+  and try to use \code{uniroot} and \code{predict} to find cutoff values
+\item otherwise, use the inverse spline to find cutoff values
+\end{itemize}
+\bbnote{Why is there machinery in the plotting code to find confidence intervals?
+  Shouldn't this call \code{confint}, for consistency/fewer points of failure?}
+
+\section*{Bugs, wishes, to do}
+\begin{itemize}
+\item \textbf{WISH}: further methods and arguments: \code{subset},
+  \code{predict}, \code{resid}: \code{sim}?
+\item \textbf{WISH}: extend ICtab to allow DIC as well?
+\item minor \textbf{WISH}: 
+  better methods for extracting \code{nobs} information
+  when possible (e.g. with formula interface)
+\item \textbf{WISH}: better documentation, especially for S4 methods
+\item \textbf{WISH}: variable-length (and shaped) chunks in argument list -- cleaner division
+  between linear model specs/list of arguments/vector equivalent
+\item \textbf{WISH}: limited automatic differentiation
+  (add capability for common distributions)
+\item \textbf{WISH}: store \code{objectivefunction}
+  and \code{objectivefunctiongr} (vectorized objective/gradient
+  functions) in the \code{mle2} object (will break backward compatibility!!);
+  add accessors for these and for \code{minuslogl}
+\item \textbf{WISH}: document use of the objective function in \code{MCMCpack}
+  to do {\emph post hoc} MCMC sampling (or write my own Metropolis-Hastings
+  sampler \ldots)
+\item \textbf{WISH}: polish profile plotting, with lattice or ggplot2
+  methods
+\item \textbf{WISH}: add in/document/demonstrate ``slice'' capabilities
+\item \textbf{WISH}: refactor profiling to use stored objective functions
+  rather than re-calling \code{mle2} with \code{fixed} values mucked around
+  with in the calls???  Strip out and make generic for vectorized objective function?
+  (\code{profileModel} package only works for glm-like objects, with a linear predictor)
+\end{itemize}
+
+\bibliography{mle2}
+\end{document}
diff --git a/vignettes/mle2.bib b/vignettes/mle2.bib
new file mode 100755
index 0000000..1e3aa0c
--- /dev/null
+++ b/vignettes/mle2.bib
@@ -0,0 +1,19 @@
+ at ARTICLE{VoneshBolker2005,
+  author = {James R. Vonesh and Benjamin M. Bolker},
+  title = {Compensatory larval responses shift tradeoffs associated with predator-induced
+	hatching plasticity},
+  journal = {Ecology},
+  year = {2005},
+  volume = {86},
+  pages = {1580-1591},
+  number = {6}
+}
+
+ at ARTICLE{Crowder1978,
+  author = {Crowder, M. J.},
+  title = {Beta-binomial {Anova} for proportions},
+  journal = {Applied Statistics},
+  year = {1978},
+  volume = {27},
+  pages = {34-37}
+}
diff --git a/vignettes/quasi.Rnw b/vignettes/quasi.Rnw
new file mode 100755
index 0000000..3ce84e8
--- /dev/null
+++ b/vignettes/quasi.Rnw
@@ -0,0 +1,191 @@
+\documentclass{article}
+%\VignettePackage{mle2}
+%\VignetteIndexEntry{quasi: notes on quasi-likelihood/qAIC analysis inR}
+%\VignetteDepends{MuMIn,AICcmodavg}
+%\VignetteEngine{knitr::knitr}
+
+\usepackage{graphicx}
+\usepackage{url}
+\newcommand{\code}[1]{{\tt #1}}
+\title{Dealing with \code{quasi-}  models in R}
+\date{\today}
+\author{Ben Bolker}
+\begin{document}
+\maketitle
+
+\includegraphics[width=2.64cm,height=0.93cm]{cc-attrib-nc.png}
+\begin{minipage}[b]{3in}
+{\tiny Licensed under the Creative Commons 
+  attribution-noncommercial license
+(\url{http://creativecommons.org/licenses/by-nc/3.0/}).
+Please share \& remix noncommercially,
+mentioning its origin.}
+\end{minipage}
+
+<<opts,echo=FALSE>>=
+if (require("knitr")) opts_chunk$set(tidy=FALSE)
+@ 
+Computing ``quasi-AIC'' (QAIC), in R is a minor
+pain, because  the R Core team (or at least the ones who wrote \code{glm},
+\code{glmmPQL}, etc.) 
+are purists and don't believe that quasi- models should report a likelihood.
+As far as I know, there are three R packages that compute/handle
+QAIC: \code{bbmle}, \code{AICcmodavg} (both on CRAN) and \code{MuMIn}
+(formerly known as \code{dRedging}, on r-forge).
+
+The basic problem is that quasi- model fits with \code{glm} return
+an \code{NA} for the log-likelihood, while the dispersion parameter
+($\hat c$, $\phi$, whatever you want to call it)
+is only reported for quasi- models.
+Various ways to get around this are:
+\begin{itemize}
+  \item{fit the model twice, once with a regular
+      likelihood model (\code{family=binomial}, \code{poisson}, etc.)
+      and once with the \code{quasi-} variant --- extract
+      the log-likelihood from the former and the dispersion parameter
+      from the latter}
+    \item{only fit the regular model; extract
+      the overdispersion parameter manually
+      with
+<<dfun>>=
+dfun <- function(object) {
+  with(object,sum((weights * residuals^2)[weights > 0])/df.residual)
+}
+@ 
+}
+\item{use the fact that quasi- fits still contain a deviance,
+    even if they set the log-likelihood to \code{NA}.  The deviance
+    is twice the negative log-likelihood (it's offset by some constant
+    which I haven't figured out yet, but it should still work
+    fine for model comparisons)}
+\end{itemize}
+
+The whole problem is worse for \code{MASS::glmmPQL}, where (1) the
+authors have gone to greater efforts to make sure that the (quasi-)deviance
+is no longer preserved anywhere in the fitted model, and (2) they
+may have done it for good reason --- it is not clear whether the
+number that would get left in the 'deviance' slot at the end of
+\code{glmmPQL}'s alternating \code{lme} and \code{glm} fits is
+even meaningful to the extent that regular QAICs are.  (For 
+discussion of a similar situation, see the \code{WARNING}
+section of \code{?gamm} in the \code{mgcv} package.)
+
+Example: use the values from one of the examples
+in \code{?glm}:
+
+<<dobdata>>=
+## Dobson (1990) Page 93: Randomized Controlled Trial :
+counts <- c(18,17,15,20,10,20,25,13,12)
+outcome <- gl(3,1,9)
+treatment <- gl(3,3)
+@ 
+
+Fit Poisson and quasi-Poisson models with all combinations
+of predictors:
+
+<<fitdob>>=
+glmOT.D93 <- glm(counts ~ outcome + treatment, family=poisson)
+glmO.D93  <- update(glmOT.D93, . ~ . - treatment)
+glmT.D93  <- update(glmOT.D93, . ~ . - outcome)
+glmX.D93  <- update(glmT.D93, . ~ . - treatment)
+glmQOT.D93 <- update(glmOT.D93, family=quasipoisson)
+glmQO.D93 <- update(glmO.D93, family=quasipoisson)
+glmQT.D93 <- update(glmT.D93, family=quasipoisson)
+glmQX.D93 <- update(glmX.D93, family=quasipoisson)
+@ 
+
+
+Extract log-likelihoods:
+<<dobll>>=
+(sum(dpois(counts,
+          lambda=exp(predict(glmOT.D93)),log=TRUE))) ## by hand
+(logLik(glmOT.D93))  ## from Poisson fit
+@ 
+
+The deviance (\code{deviance(glmOT.D93)}=\Sexpr{round(deviance(glmOT.D93),3)}
+is not the same as $-2L$ (\code{-2*logLik(glmOT.D93)}=\Sexpr{round(-2*c(logLik(glmOT.D93)),3)}),
+but the calculated differences in deviance are consistent,
+and are also extractable from the quasi- fit even though
+the log-likelihood is \code{NA}:
+<<dobll2>>=
+(-2*(logLik(glmT.D93)-logLik(glmOT.D93)))  ## Poisson fit
+(deviance(glmT.D93)-deviance(glmOT.D93))   ## Poisson fit
+(deviance(glmQT.D93)-deviance(glmQOT.D93)) ## quasi-fit
+@ 
+
+
+Compare hand-computed dispersion (in two ways)
+with the dispersion computed by \code{summary.glm()}
+on a quasi- fit:
+
+<<dobdisp>>=
+(dfun(glmOT.D93))
+(sum(residuals(glmOT.D93,"pearson")^2)/glmOT.D93$df.residual)
+(summary(glmOT.D93)$dispersion)
+(summary(glmQOT.D93)$dispersion)
+@ 
+
+
+\section*{Examples}
+
+\subsection*{\code{bbmle} package (Ben Bolker), CRAN/R-forge}
+
+<<bbmle>>=
+library(bbmle)
+(qAIC(glmOT.D93,dispersion=dfun(glmOT.D93)))
+(qAICc(glmOT.D93,dispersion=dfun(glmOT.D93),nobs=length(counts)))
+ICtab(glmOT.D93,glmT.D93,glmO.D93,glmX.D93,
+      dispersion=dfun(glmOT.D93),type="qAIC")
+ICtab(glmOT.D93,glmT.D93,glmO.D93,glmX.D93,
+      dispersion=dfun(glmOT.D93),
+      nobs=length(counts),type="qAICc")
+detach("package:bbmle")
+@ 
+
+\subsection*{\code{AICcmodavg} package (Marc Mazerolle), CRAN}
+
+<<AICcmodavg>>=
+library(AICcmodavg)
+aictab(list(glmOT.D93,glmT.D93,glmO.D93,glmX.D93), 
+       modnames=c("OT","T","O","X"),
+       c.hat=dfun(glmOT.D93))
+detach("package:AICcmodavg")
+@ 
+
+\subsection*{\code{MuMIn} package (Kamil Barto{\'n})}
+
+<<MuMin>>=
+library(MuMIn); packageVersion("MuMIn")
+## from ?QAIC
+x.quasipoisson <- function(...) {
+    res <- quasipoisson(...)
+    res$aic <- poisson(...)$aic
+    res
+}
+glmQOT2.D93 <- update(glmOT.D93,family="x.quasipoisson",
+                      na.action=na.fail)
+(gg <-  dredge(glmQOT2.D93,rank="QAIC", chat=dfun(glmOT.D93)))
+(ggc <- dredge(glmQOT2.D93,rank="QAICc",chat=dfun(glmOT.D93)))
+detach("package:MuMIn")
+@ 
+
+Notes: ICtab only gives delta-IC, limited decimal places
+(on purpose, but how do you change these defaults if you
+want to?).  Need to add 1 to parameters
+to account for scale parameter.  When doing corrected-IC you need
+to get the absolute number of parameters right, not just the relative
+number \ldots Not sure which classes of models each of these
+will handle (lm, glm, (n)lme, lme4, mle2 \ldots).  Remember
+need to use overdispersion parameter from most complex model.
+glmmPQL: needs to be hacked somewhat more severely (does not
+contain deviance element, logLik has been NA'd out).
+
+\begin{tabular}{l|ccccccc}
+  package & \code{lm} & \code{glm} & \code{(n)lme} & \code{multinom} & \code{polr} & \code{lme4} & \code{mle2} \\
+  \hline
+  \code{AICcmodavg} & y & y & y & y & y & ? & ? \\
+  \code{MuMIn}      & ? & ? & ? & ? & ? & ? & ? \\
+  \code{mle2 }      & ? & ? & ? & ? & ? & ? & ?
+\end{tabular}
+  
+\end{document}

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-med/r-cran-bbmle.git



More information about the debian-med-commit mailing list