[med-svn] [r-cran-solrium] 01/05: New upstream version 1.0.0

Andreas Tille tille at debian.org
Thu Nov 9 13:36:48 UTC 2017


This is an automated email from the git hooks/post-receive script.

tille pushed a commit to branch master
in repository r-cran-solrium.

commit 51f39e777d9f582e385290f0975808bed09e433c
Author: Andreas Tille <tille at debian.org>
Date:   Thu Nov 9 14:26:34 2017 +0100

    New upstream version 1.0.0
---
 DESCRIPTION                              |   22 +-
 LICENSE                                  |    2 +-
 MD5                                      |  305 ++++-----
 NAMESPACE                                |   20 +-
 NEWS.md                                  |   45 +-
 R/SolrClient.R                           | 1013 ++++++++++++++++++++++++++++++
 R/add.R                                  |   90 ++-
 R/check_args_helpers.R                   |  102 +++
 R/collection_addreplica.R                |   38 +-
 R/collection_addreplicaprop.R            |   60 +-
 R/collection_addrole.R                   |   23 +-
 R/collection_balanceshardunique.R        |   57 +-
 R/collection_clusterprop.R               |   42 +-
 R/collection_clusterstatus.R             |   34 +-
 R/collection_create.R                    |   78 +--
 R/collection_createalias.R               |   31 +-
 R/collection_createshard.R               |   32 +-
 R/collection_delete.R                    |   27 +-
 R/collection_deletealias.R               |   33 +-
 R/collection_deletereplica.R             |   42 +-
 R/collection_deletereplicaprop.R         |   50 +-
 R/collection_deleteshard.R               |   35 +-
 R/collection_exists.R                    |   31 +-
 R/collection_list.R                      |   23 +-
 R/collection_migrate.R                   |   41 +-
 R/collection_overseerstatus.R            |   26 +-
 R/collection_rebalanceleaders.R          |   52 +-
 R/collection_reload.R                    |   26 +-
 R/collection_removerole.R                |   28 +-
 R/collection_requeststatus.R             |   38 +-
 R/collection_splitshard.R                |   44 +-
 R/collections.R                          |   39 +-
 R/commit.R                               |   48 +-
 R/config_get.R                           |   33 +-
 R/config_overlay.R                       |   29 +-
 R/config_params.R                        |   65 +-
 R/config_set.R                           |   43 +-
 R/connect.R                              |  164 -----
 R/content_types.R                        |   12 +
 R/core_create.R                          |   69 +-
 R/core_exists.R                          |   38 +-
 R/core_mergeindexes.R                    |   34 +-
 R/core_reload.R                          |   25 +-
 R/core_rename.R                          |   45 +-
 R/core_requeststatus.R                   |   22 +-
 R/core_split.R                           |   40 +-
 R/core_status.R                          |   31 +-
 R/core_swap.R                            |   59 +-
 R/core_unload.R                          |   49 +-
 R/delete.R                               |   57 +-
 R/optimize.R                             |   48 +-
 R/parsers.R                              |   60 +-
 R/ping.R                                 |   47 +-
 R/schema.R                               |   47 +-
 R/search_route_keys.R                    |   46 ++
 R/solr_all.r                             |   87 ++-
 R/solr_facet.r                           |  157 +++--
 R/solr_get.R                             |   43 +-
 R/solr_group.r                           |  126 ++--
 R/solr_highlight.r                       |   78 +--
 R/solr_mlt.r                             |   70 +--
 R/solr_search.r                          |  186 +++---
 R/solr_stats.r                           |   70 +--
 R/solrium-package.R                      |   30 +-
 R/update_atomic_json.R                   |   56 ++
 R/update_atomic_xml.R                    |   66 ++
 R/update_csv.R                           |   56 +-
 R/update_json.R                          |   47 +-
 R/update_xml.R                           |   46 +-
 R/zzz.r                                  |  227 ++++---
 README.md                                |  481 ++++++++------
 build/vignette.rds                       |  Bin 291 -> 292 bytes
 inst/doc/local_setup.Rmd                 |    4 +-
 inst/doc/local_setup.html                |    6 +-
 man/SolrClient.Rd                        |  155 +++++
 man/add.Rd                               |   36 +-
 man/collapse_pivot_names.Rd              |    1 -
 man/collectargs.Rd                       |   15 -
 man/collection_addreplica.Rd             |   28 +-
 man/collection_addreplicaprop.Rd         |   42 +-
 man/collection_addrole.Rd                |   13 +-
 man/collection_balanceshardunique.Rd     |   50 +-
 man/collection_clusterprop.Rd            |   32 +-
 man/collection_clusterstatus.Rd          |   29 +-
 man/collection_create.Rd                 |   39 +-
 man/collection_createalias.Rd            |   23 +-
 man/collection_createshard.Rd            |   27 +-
 man/collection_delete.Rd                 |   19 +-
 man/collection_deletealias.Rd            |   23 +-
 man/collection_deletereplica.Rd          |   23 +-
 man/collection_deletereplicaprop.Rd      |   43 +-
 man/collection_deleteshard.Rd            |   25 +-
 man/collection_exists.Rd                 |   17 +-
 man/collection_list.Rd                   |   18 +-
 man/collection_migrate.Rd                |   30 +-
 man/collection_overseerstatus.Rd         |   21 +-
 man/collection_rebalanceleaders.Rd       |   46 +-
 man/collection_reload.Rd                 |   19 +-
 man/collection_removerole.Rd             |   22 +-
 man/collection_requeststatus.Rd          |   35 +-
 man/collection_splitshard.Rd             |   36 +-
 man/collections.Rd                       |   25 +-
 man/commit.Rd                            |   31 +-
 man/config_get.Rd                        |   27 +-
 man/config_overlay.Rd                    |   13 +-
 man/config_params.Rd                     |   40 +-
 man/config_set.Rd                        |   23 +-
 man/core_create.Rd                       |   48 +-
 man/core_exists.Rd                       |   22 +-
 man/core_mergeindexes.Rd                 |   25 +-
 man/core_reload.Rd                       |   18 +-
 man/core_rename.Rd                       |   39 +-
 man/core_requeststatus.Rd                |   14 +-
 man/core_split.Rd                        |   31 +-
 man/core_status.Rd                       |   22 +-
 man/core_swap.Rd                         |   50 +-
 man/core_unload.Rd                       |   29 +-
 man/delete.Rd                            |   31 +-
 man/is-sr.Rd                             |    1 -
 man/makemultiargs.Rd                     |    5 +-
 man/optimize.Rd                          |   48 --
 man/ping.Rd                              |   28 +-
 man/pivot_flatten_tabular.Rd             |    1 -
 man/schema.Rd                            |   44 +-
 man/solr_all.Rd                          |  182 +++---
 man/solr_connect.Rd                      |   58 --
 man/solr_facet.Rd                        |  366 +++++------
 man/solr_get.Rd                          |   33 +-
 man/solr_group.Rd                        |  205 +++---
 man/solr_highlight.Rd                    |  316 ++++------
 man/solr_mlt.Rd                          |  140 ++---
 man/solr_optimize.Rd                     |   50 ++
 man/solr_parse.Rd                        |   21 +-
 man/solr_search.Rd                       |  266 ++++----
 man/solr_stats.Rd                        |   90 +--
 man/solrium-package.Rd                   |   46 +-
 man/update_atomic_json.Rd                |   72 +++
 man/update_atomic_xml.Rd                 |   78 +++
 man/update_csv.Rd                        |   45 +-
 man/update_json.Rd                       |   44 +-
 man/update_xml.Rd                        |   48 +-
 tests/testthat/helper-solrium.R          |   18 +
 tests/testthat/test-add.R                |   42 ++
 tests/testthat/test-client.R             |   46 ++
 tests/testthat/test-collections.R        |   47 ++
 tests/testthat/test-core_create.R        |   15 +-
 tests/testthat/test-delete.R             |   55 ++
 tests/testthat/test-errors.R             |   51 +-
 tests/testthat/test-ping.R               |   57 +-
 tests/testthat/test-schema.R             |   50 +-
 tests/testthat/test-solr_all.R           |  120 ++--
 tests/testthat/test-solr_connect.R       |   50 --
 tests/testthat/test-solr_error.R         |   41 +-
 tests/testthat/test-solr_facet.r         |   91 +--
 tests/testthat/test-solr_get.R           |   43 ++
 tests/testthat/test-solr_goup.R          |   59 ++
 tests/testthat/test-solr_group.r         |   39 --
 tests/testthat/test-solr_highlight.r     |   44 +-
 tests/testthat/test-solr_mlt.r           |   91 ++-
 tests/testthat/test-solr_search.r        |  151 +++--
 tests/testthat/test-solr_settings.R      |   31 -
 tests/testthat/test-solr_stats.r         |   95 +--
 tests/testthat/test-update_atomic_json.R |   41 ++
 tests/testthat/test-update_atomic_xml.R  |   56 ++
 tests/testthat/test-update_csv.R         |   35 ++
 tests/testthat/test-update_json.R        |   33 +
 tests/testthat/test-update_xml.R         |   33 +
 vignettes/local_setup.Rmd                |    4 +-
 168 files changed, 6136 insertions(+), 4320 deletions(-)

diff --git a/DESCRIPTION b/DESCRIPTION
index 7595704..4c4de0c 100644
--- a/DESCRIPTION
+++ b/DESCRIPTION
@@ -1,24 +1,24 @@
 Package: solrium
 Title: General Purpose R Interface to 'Solr'
 Description: Provides a set of functions for querying and parsing data
-    from 'Solr' (<http://lucene.apache.org/solr>) 'endpoints' (local and 
-    remote), including search, 'faceting', 'highlighting', 'stats', and 
-    'more like this'. In addition, some functionality is included for 
+    from 'Solr' (<http://lucene.apache.org/solr>) 'endpoints' (local and
+    remote), including search, 'faceting', 'highlighting', 'stats', and
+    'more like this'. In addition, some functionality is included for
     creating, deleting, and updating documents in a 'Solr' 'database'.
-Version: 0.4.0
+Version: 1.0.0
 Authors at R: person("Scott", "Chamberlain", role = c("aut", "cre"),
     email = "myrmecocystus at gmail.com")
 License: MIT + file LICENSE
 URL: https://github.com/ropensci/solrium
-BugReports: http://www.github.com/ropensci/solrium/issues
+BugReports: https://github.com/ropensci/solrium/issues
 VignetteBuilder: knitr
-Imports: utils, dplyr (>= 0.5.0), plyr (>= 1.8.4), httr (>= 1.2.0),
-        xml2 (>= 1.0.0), jsonlite (>= 1.0), tibble (>= 1.2)
-Suggests: roxygen2 (>= 5.0.1), testthat, knitr, covr
-RoxygenNote: 5.0.1
+Imports: utils, dplyr (>= 0.5.0), plyr (>= 1.8.4), crul (>= 0.4.0),
+        xml2 (>= 1.0.0), jsonlite (>= 1.0), tibble (>= 1.2), R6
+Suggests: roxygen2 (>= 6.0.1), testthat, knitr
+RoxygenNote: 6.0.1
 NeedsCompilation: no
-Packaged: 2016-10-05 20:41:34 UTC; sacmac
+Packaged: 2017-11-02 01:57:53 UTC; sacmac
 Author: Scott Chamberlain [aut, cre]
 Maintainer: Scott Chamberlain <myrmecocystus at gmail.com>
 Repository: CRAN
-Date/Publication: 2016-10-06 00:52:32
+Date/Publication: 2017-11-02 09:42:40 UTC
diff --git a/LICENSE b/LICENSE
index d044f7e..37ee2c7 100644
--- a/LICENSE
+++ b/LICENSE
@@ -1,2 +1,2 @@
-YEAR: 2016
+YEAR: 2017
 COPYRIGHT HOLDER: Scott Chamberlain
diff --git a/MD5 b/MD5
index 96391d3..8fb5c1f 100644
--- a/MD5
+++ b/MD5
@@ -1,75 +1,80 @@
-1e3407357ace4dffd780087af36a5c6a *DESCRIPTION
-769bdbb0572f2eefda48945aefb690fc *LICENSE
-4b4dd872f4ac3702ae8353a23ca4d7de *NAMESPACE
-16b8215614efd12268d070eb742fcaa4 *NEWS.md
-042ae6c92cb790d73ae73895585ca5df *R/add.R
+a9f6b254fbb76129a4e190c097a5a991 *DESCRIPTION
+c5af52351472a750055a760a8924ce71 *LICENSE
+6147f3bce15f560ba361d3631e113af7 *NAMESPACE
+015fccc23f84fa8879f8480784c10bda *NEWS.md
+519f0da8c1e307e8c3ce6efcdecf8ffe *R/SolrClient.R
+51a24bb84d580db5ecd67f434299f211 *R/add.R
+74a70333074fe4456d00b6e2277e530f *R/check_args_helpers.R
 ed9b1328a6361812eb7f44a8dd71928e *R/classes.r
-5f5bc88650e588764e5141debd6c284e *R/collection_addreplica.R
-c14bc915d103fdf9cb03ded344636801 *R/collection_addreplicaprop.R
-8a4cc98f61507c44bb97c987d906c94b *R/collection_addrole.R
-e6f972fe99931651438f5c557be82650 *R/collection_balanceshardunique.R
-3bd66336fa6df09c446bb2fee1701189 *R/collection_clusterprop.R
-e68bf45e955ede48839742dd14168c4f *R/collection_clusterstatus.R
-d734674cd72eeac330c8a5f24b8270df *R/collection_create.R
-df0fc73b8f2a4632d99df219cd1c7b37 *R/collection_createalias.R
-1a1a44d204dd2f951ed41f81c7a8aa26 *R/collection_createshard.R
-34b6e2356b1ca290a2713f63bcb183cd *R/collection_delete.R
-8b4b8bf1500a0171540573297f5b07e4 *R/collection_deletealias.R
-2ffe5f55802ce4042fef3e42f15265e1 *R/collection_deletereplica.R
-04a0045ca626e752b1edb895e7f23eef *R/collection_deletereplicaprop.R
-61bbfca06860882c643c4aab54d0f9a6 *R/collection_deleteshard.R
-3f85403747851e8a57e4c9388feb729d *R/collection_exists.R
-9cf7f14e8ea90fcc766a6db3e7cbef9c *R/collection_list.R
-711e5820f74e4dbf32b51b1d7c3fd31c *R/collection_migrate.R
-0a9a7798bee29c2e02a98f685417d931 *R/collection_overseerstatus.R
-dcb8980942da90ce37b057728c8e7f00 *R/collection_rebalanceleaders.R
-b39f73c57f985efa81802ad53aaf79c6 *R/collection_reload.R
-91bc558e6e006dda14ec3518a928c302 *R/collection_removerole.R
-1f60721671157bf6b58120d2bce68561 *R/collection_requeststatus.R
-7cb83e55408c66aff7c63d5372271b92 *R/collection_splitshard.R
-87a4dfb2c17ca145eccccff511f97ad6 *R/collections.R
-f32d5e0c66238949fc85f75fc8ad8f4f *R/commit.R
-4a7331cf105712ad607cac87d2643dda *R/config_get.R
-59b698fe6d931839a7073ca29def07aa *R/config_overlay.R
-bff441cc3ecc45cae3705165e23c656b *R/config_params.R
-cecf41bf78cf8ade2ee413c7bded44be *R/config_set.R
-b120ccede9c7ccf32235a030c36e7774 *R/connect.R
-aa43dd790dc1cf5abab7089aa0745ef6 *R/core_create.R
-7eccfffac673bb7e133f10321ed3b8ce *R/core_exists.R
-5dfa47a191e16def0199d85ef3d20a53 *R/core_mergeindexes.R
-826e2180e7a88e0cf118a96fd3aadef7 *R/core_reload.R
-592592972b9bcfb63da73625863c73d2 *R/core_rename.R
-3882c8dc2b5b4948b5877f1e95e3264c *R/core_requeststatus.R
-0e6f9033e87ab7f916e40764f8b7378e *R/core_split.R
-a67847b5b53be428c7d90242d41113e4 *R/core_status.R
-d19e8f8c78a5e38d477845682e58f30f *R/core_swap.R
-d68d4430bbed63e103bc68176e65361f *R/core_unload.R
-61d914b7266ffa22ca25887b6a1b5968 *R/delete.R
-8983e299d349397b18629f99a21ae750 *R/optimize.R
-351dc1c8e530ad741325153869cc3131 *R/parsers.R
-209300e7b795ddd8cd5a2df42dcbedda *R/ping.R
-a3927f5509ec56b665f6c6eb12268c0e *R/schema.R
-b66ab779938427e1098b048724bb38b8 *R/solr_all.r
-571feb0c743e31d365e3bd9d7867bcc0 *R/solr_facet.r
-b5d70d74be22f43b32ba98cfc28ff58e *R/solr_get.R
-6369b7581dcb084806c0807d367262f2 *R/solr_group.r
-d4d431ced585c439b02f1948242a7b88 *R/solr_highlight.r
-669d2b52dc43ff033a8a38e8e3ec5b32 *R/solr_mlt.r
-4c12d8c43a9231dc2aef61f1d45d31f7 *R/solr_search.r
-f3ef00ab64fb6ac47fb658b186f30804 *R/solr_stats.r
-85d89423eb166ed3b91c6477bcecfc90 *R/solrium-package.R
-bc3d2fe13e45ad93a212b3e9200d3177 *R/update_csv.R
-28ef96c7d5cb72216d308dd1c8c2de46 *R/update_json.R
-5397084fc6f5abf6c98738bc863d8f57 *R/update_xml.R
-9095b8d2a21574b8955d77d4aa2f4d00 *R/zzz.r
-0e9a61c7a417d1f6092af4feb2ed9a63 *README.md
-4f696e68a3f28548dccebef0953fed29 *build/vignette.rds
+303d3f009cfbc6bec848fc26d0c215fe *R/collection_addreplica.R
+6bcf0d1dc0812110277f4a06eaa0cdde *R/collection_addreplicaprop.R
+2d0fe3418cfc877548477a2b2bef4720 *R/collection_addrole.R
+b6343a9ae1d4f89d985fd260b599e3d6 *R/collection_balanceshardunique.R
+e511c30d0dd791b3b15d7b19919c3e7c *R/collection_clusterprop.R
+2a9603a7140d94e015bf88ba70a9b151 *R/collection_clusterstatus.R
+243f203538dbc33f22ae6169671dc904 *R/collection_create.R
+979f5bf6cec0c3ff17a37eded0ebd047 *R/collection_createalias.R
+5c79097e5351a62d7b7703f5f7c5ae5f *R/collection_createshard.R
+c0661830891b17d6f896b6d08c563f40 *R/collection_delete.R
+c8232f50626418fc2c54d62ee8dd39a9 *R/collection_deletealias.R
+096f1e33dec260db6c2ea453dc2efd45 *R/collection_deletereplica.R
+09f099f4eedac0be1cce30b553892395 *R/collection_deletereplicaprop.R
+b178e05dadfd441b1eadb85046859521 *R/collection_deleteshard.R
+4b77d9b58bc95bb9e7b18a35ce882813 *R/collection_exists.R
+2677b582bca6e52a1028ab9afea12438 *R/collection_list.R
+3cf13e8e338666a59ba7b45b04a8fa07 *R/collection_migrate.R
+f7835251d65615664739fbff93fddf23 *R/collection_overseerstatus.R
+5f575502710a6b6383c825bd4d6b65b0 *R/collection_rebalanceleaders.R
+3e57d061f34f82852586631791c362d0 *R/collection_reload.R
+c52ae0ed1994b261c8f28cb8fd48d087 *R/collection_removerole.R
+1a13ae2d8f65d06d3e8440c89e3af3ec *R/collection_requeststatus.R
+beb1516c83908cad52e3fc8ef9c75d59 *R/collection_splitshard.R
+3e14e4ae28443f1e09ffd3030307f826 *R/collections.R
+4de7d8c434f27a0b386ed81d84cf91a5 *R/commit.R
+d3539d5bc7e120c2ca38429082bd1d23 *R/config_get.R
+8ce85ae2d4374337226e1e761fb847de *R/config_overlay.R
+6f8defad7fa86ee7f0b8e7e65b33c549 *R/config_params.R
+66353ed97c77a7cfce5974f6a6408044 *R/config_set.R
+aa0c2598a5077f0e6c233282cd18db9d *R/content_types.R
+03b47bf590ffa32ad0a56aba5d1726aa *R/core_create.R
+bd94926a81c559b1d23701363fe9eeac *R/core_exists.R
+0386f1af9bbe2d29c8d0520fa79d1b8c *R/core_mergeindexes.R
+10e1de7c866c7a71b2989d30674b250d *R/core_reload.R
+b07588a1e7d854685e1b2d7324bd8e4e *R/core_rename.R
+8e56bd3bf9cc8a0948ce1d6e4ac76216 *R/core_requeststatus.R
+1112fc94a77c16fc969a061f8f2c40c1 *R/core_split.R
+70814361469f34b89ac64ad91b9a6f14 *R/core_status.R
+d1f3339df4fda9c0e48bb7a14479fcf0 *R/core_swap.R
+9a92ba303c8719f45d809782f9cc5aa0 *R/core_unload.R
+c2dfedc7bf6c4336c9a06a62b1e09489 *R/delete.R
+c497e20c95c55bf83ad3ba7d46c40bbf *R/optimize.R
+e7f9105b78fe7090c809ea14cbdb51de *R/parsers.R
+8537d997ad200c9001ccb78fd244ef87 *R/ping.R
+268ae9899726f7c5bd38f93f45c3548b *R/schema.R
+33f009223c3e99005c87d48342d13215 *R/search_route_keys.R
+32b7ca10eddf65ac52d3126382221ccb *R/solr_all.r
+ca0e43329720ff0c0efa2d84446ccc28 *R/solr_facet.r
+c291615b5d5e1f1fde68360cdd253005 *R/solr_get.R
+9a24b9176a7fd56dec21c8dfee4b689a *R/solr_group.r
+acde743d3d4dd4dc428a80b0d7f37bdb *R/solr_highlight.r
+db959de704f4028b485d8cf6396d2b9f *R/solr_mlt.r
+e6c39fd5a5d35731292a8b7a055ccc9c *R/solr_search.r
+10b08a15c4f6625be4e113e66b2d960f *R/solr_stats.r
+0dc4cc285b6f4c55daa434d0a6ea5eb9 *R/solrium-package.R
+5172210f95313185eaec7372a06b0d8e *R/update_atomic_json.R
+324dc3a80d76b017c1b9e5ab4e3c6b89 *R/update_atomic_xml.R
+c4f5c519254c3f369a5b0bd93d868250 *R/update_csv.R
+ae1fe1b092aec27b166e6ae79c8ab891 *R/update_json.R
+ecee6b8324315ee18654c01e2ef62093 *R/update_xml.R
+fef37f4cf43ff7e75715f5a445462cb3 *R/zzz.r
+f4318dab14f7d2342d9337123472f6be *README.md
+73a20101fec67c6f936ae8246947946a *build/vignette.rds
 ae1097c4c79e8dfbf4f5982af5a2bb3f *inst/doc/cores_collections.Rmd
 0f33cd79c266c0543a4a8ec6dca17c91 *inst/doc/cores_collections.html
 24a71da1896d16ecbd9fc4d7476c91d3 *inst/doc/document_management.Rmd
 b077e3a569d0726ca65946c5513a000b *inst/doc/document_management.html
-17b2cf10a4ff9abc151600f8efad7b03 *inst/doc/local_setup.Rmd
-ae251089e247e82ea7c591dc878e7a6a *inst/doc/local_setup.html
+8b3228a467423b9a150c7e9a99bbfeef *inst/doc/local_setup.Rmd
+d43725530d4414406cf2bdabadd53cf3 *inst/doc/local_setup.html
 f4bc6338aebf8ed9212b6f8c5122a1d1 *inst/doc/search.Rmd
 253ad63f64961638a8d40bfdca2c143b *inst/doc/search.html
 cd1cc006debed6e2b4556001fb61b872 *inst/examples/add_delete.json
@@ -88,88 +93,98 @@ f8225c6c4a245757188e297d7259f5bb *inst/examples/updatecommands_add.json
 1d42c66dcbc92c2f8ac0c2a3effabcca *inst/examples/updatecommands_add.xml
 5eab27b9c1f8c6f873c7bb16dd7d24a7 *inst/examples/updatecommands_delete.json
 d268b60d7387fb5dc7c7b640de3e1ea1 *inst/examples/updatecommands_delete.xml
-b442acc0ef5259a14ffe466f4d9b68b4 *man/add.Rd
-09fac0ac81484533d27da59b4d28ae2b *man/collapse_pivot_names.Rd
-f928c15332cddd32a053acf65e2b6154 *man/collectargs.Rd
-50edf4f47dc16efcb0c9803d2ebbc9e5 *man/collection_addreplica.Rd
-3481633c2ae2d271c66c1cd2aa2571f8 *man/collection_addreplicaprop.Rd
-863220e5be3c44f16894a63de0c4bb1f *man/collection_addrole.Rd
-30aedaf0285d9758ed58966959494c6a *man/collection_balanceshardunique.Rd
-d6859c5ea8355dcece7f2b3c16ea0d46 *man/collection_clusterprop.Rd
-3aa70e87fa8d90cebc6c58a56403a576 *man/collection_clusterstatus.Rd
-afb5e5bfb08a6fcbedef9623a124e309 *man/collection_create.Rd
-e2b69db6c36c4037d144c9d4d5a9818c *man/collection_createalias.Rd
-b63107d7916f450176a4ee2eeb76d167 *man/collection_createshard.Rd
-9ea7005f31d7fc335cbf7d0d6ddb471a *man/collection_delete.Rd
-70cf52f10af4ec611c07578350abab5b *man/collection_deletealias.Rd
-1c0d9f2eafe233baad095011c20c2219 *man/collection_deletereplica.Rd
-ebe88b276884ce0ac516fcec5624bf60 *man/collection_deletereplicaprop.Rd
-bd2f73bbd90927d4303772f6566cb9e9 *man/collection_deleteshard.Rd
-80baa3bcc8707b26b4e527e4eccc7f26 *man/collection_exists.Rd
-306789b56163228adf1cbc08112a69dc *man/collection_list.Rd
-72d5aca86ccfa8c3e863f779fa24e69b *man/collection_migrate.Rd
-0762e205891253d7f0babfb94e67c99e *man/collection_overseerstatus.Rd
-b10821e8b317462765483f9ead272f86 *man/collection_rebalanceleaders.Rd
-d01abcd1db2401886eca4329689fd4b6 *man/collection_reload.Rd
-60586db27a665b9c1a733629debbef5a *man/collection_removerole.Rd
-ccadfcae48dbb7bf7dea0b8731c1c09b *man/collection_requeststatus.Rd
-a6d7e3b92741db99023850bb99ad6b8e *man/collection_splitshard.Rd
-c9870202f4f842af2ca41fcdbebedb26 *man/collections.Rd
-b7b539cc2a5d273e19d85937e81c1347 *man/commit.Rd
-eda4a72e94fa01b0d188cb78bd207d5a *man/config_get.Rd
-87feec17216fc41495abd8b558ebb421 *man/config_overlay.Rd
-905af41498a3c2d4720d44a59573038e *man/config_params.Rd
-9ce3c931155ab597bda364dfe212e82d *man/config_set.Rd
-a56c6cfa42b947076bf8d0528ee99ea9 *man/core_create.Rd
-80845e25fe010437ae631d7a249989bc *man/core_exists.Rd
-555aa1958aa10d9b6008b9c6478409e2 *man/core_mergeindexes.Rd
-62a41c43111d53c1e0f24571a3480d8e *man/core_reload.Rd
-f66ffce36ee693570233162582fcdc57 *man/core_rename.Rd
-f47a5bac5e63a03723662b10915fa8a9 *man/core_requeststatus.Rd
-fb0b38c91635d17377af96534cb81463 *man/core_split.Rd
-459a178c90678304f413db250f4fd972 *man/core_status.Rd
-23b44147bc10d678f3b1906fbf016b22 *man/core_swap.Rd
-ee993dffa018053e21064340a42e3d7a *man/core_unload.Rd
-2317e698215663f4d5c3e8b015de7ec5 *man/delete.Rd
-d05d5dbb1295cfa97cd282c5bd165c8a *man/is-sr.Rd
-5fdc32ecdc180365d23aebc29364722b *man/makemultiargs.Rd
-47dc0f9ce0aa48e5202eb59a87e157a0 *man/optimize.Rd
-08de32419aa64bb6cb8f397d66d49807 *man/ping.Rd
-6489a80c5ff1d662c06a9a6363a72d1e *man/pivot_flatten_tabular.Rd
-8b0b6e516777030b673f4d61e097dee3 *man/schema.Rd
-71dd82c1312f20153a0ae925af58fbd5 *man/solr_all.Rd
-b980a9159999acffd61014f07f555d8b *man/solr_connect.Rd
-dd431c67f9c9b4e82e91eee74fb99c7f *man/solr_facet.Rd
-6c3b041a87f552ad383fe1b47e0c9863 *man/solr_get.Rd
-c59c6bb03d8f728b54c04b32d8872bc5 *man/solr_group.Rd
-75b8e242a3fe3c8f6d059ee01db0cdfd *man/solr_highlight.Rd
-67d1e2223cef7255b701fc707a7a6e3f *man/solr_mlt.Rd
-4aa2ff06afacbf86d05eefe409758ecb *man/solr_parse.Rd
-cfce05916ff81f431ba0d5ce50ffb2e4 *man/solr_search.Rd
-008a2d7ffedc2c9865ee2a7a4f32c17a *man/solr_stats.Rd
-885ddddf54c7479a48f21b1c0346c075 *man/solrium-package.Rd
-c80d338cd022acbd23e377f013ee53f1 *man/update_csv.Rd
-76c2d2c6fc7ef2a5ea43c863db93c3d5 *man/update_json.Rd
-4b7fbdb25a7c60eb9785c405fdfdccfb *man/update_xml.Rd
+d9ca6e1c6a421c09252e389aef1f0535 *man/SolrClient.Rd
+363eaa8d0f45441bb35efda979b2dbe9 *man/add.Rd
+be82eb40abcd983510ee1d2af38d1610 *man/collapse_pivot_names.Rd
+78294e9b187a0441566f1ab312bbfdad *man/collection_addreplica.Rd
+707717c713d372a201b0c91a373ab615 *man/collection_addreplicaprop.Rd
+5ff2387d649bf3996c5a90ff5a759d5d *man/collection_addrole.Rd
+be28882bc27c489319bb7fec1f0d4d05 *man/collection_balanceshardunique.Rd
+9edcc747d3eb8b6da0fa81d813d0e938 *man/collection_clusterprop.Rd
+68b07f2f80979d08c9b57a5669965129 *man/collection_clusterstatus.Rd
+5b627557db1fc02904d367a1fbd089a5 *man/collection_create.Rd
+a8d93f334bcdae775162652e5e394fc3 *man/collection_createalias.Rd
+ca482055af0977eed371bb907304f182 *man/collection_createshard.Rd
+5bdea47a19db7a87fd723aa1d85b7444 *man/collection_delete.Rd
+59c5613db0979f87a1362d12b1869aea *man/collection_deletealias.Rd
+1e06bb6219379d1d17cd02145827b821 *man/collection_deletereplica.Rd
+206c7330cb11f725904e287711e18f97 *man/collection_deletereplicaprop.Rd
+52e611201b8c50f9b5fcb1e280161cf2 *man/collection_deleteshard.Rd
+3aa8e3423d20c94f977de83fa2783bd4 *man/collection_exists.Rd
+243a59739867bee00821facf00b8d7eb *man/collection_list.Rd
+c83caf0e99c1f8798f93ae1905c39b00 *man/collection_migrate.Rd
+451eb3efacde1b7c480b3c4766a19ba9 *man/collection_overseerstatus.Rd
+17f3e2f8407c5d6dbf7319d44f9b26a8 *man/collection_rebalanceleaders.Rd
+22abf6ae18bfe3823f3d8d118920acfd *man/collection_reload.Rd
+68a113d06d718e7b3ebcf3e9039cf7fb *man/collection_removerole.Rd
+9698967ae7240248c6907411b1a9dbeb *man/collection_requeststatus.Rd
+eeb299bae2d8218722f6af36ed000475 *man/collection_splitshard.Rd
+d355868cc83860adb6a13b2e65f95a41 *man/collections.Rd
+1316c5571e56c3d0db766a7d3b659e4c *man/commit.Rd
+ec9a08982d63890ec0c5af832ce92c67 *man/config_get.Rd
+58f715f83073846d4a3a915ca045e818 *man/config_overlay.Rd
+20cf3e585698725426680eaaa0436dc4 *man/config_params.Rd
+caf8a6f5497b8eaf7f2b0fd64d9d974e *man/config_set.Rd
+d9ca78fb61bb8f516d03a59bda170b79 *man/core_create.Rd
+18114e49836db916e449e5998f2624d6 *man/core_exists.Rd
+4418747a026107b89471bca47b3ec16e *man/core_mergeindexes.Rd
+270af43560eb3a503971e1107fb6a3b4 *man/core_reload.Rd
+cbd213de89e022e12499415eed338e3e *man/core_rename.Rd
+3fbc439715559065ad0cb0bfca97a907 *man/core_requeststatus.Rd
+6515de785c27318696cb043f6b9069e9 *man/core_split.Rd
+aad7bd951f4314b143a6301c0fd9dc6f *man/core_status.Rd
+d40278a4fb3217e8bfd5188e9aed54e1 *man/core_swap.Rd
+7a7e98ae6bb64723b14eb2a281e18484 *man/core_unload.Rd
+4915619f623906447223fa0c7609ee53 *man/delete.Rd
+c500e495fc2936de7888060c883b1fe5 *man/is-sr.Rd
+773e7575d5753efe8175c2f82ea32b13 *man/makemultiargs.Rd
+2360da5240906b0ffdebc5d9f051b14b *man/ping.Rd
+7add5ac5e331e622f8f75e91a69cb416 *man/pivot_flatten_tabular.Rd
+dc1bf83683ddc1a5f70c21071e307fd9 *man/schema.Rd
+89e3bc155893ab4ba62673eff88b04b1 *man/solr_all.Rd
+f275e18b0bb6f47fd263de673cdb194e *man/solr_facet.Rd
+924b0fa8bd1fdb54238929a2698aceae *man/solr_get.Rd
+fc8069708d471d59f00450f50070d5cc *man/solr_group.Rd
+a2b7b2612581e4fb86d4282be276935d *man/solr_highlight.Rd
+ba53d988a5466a7b6f73578347f2901a *man/solr_mlt.Rd
+4945c47c1995f5ae0fb5c5991871f2f1 *man/solr_optimize.Rd
+078a05655fa73abafca65fb0a53a0b3b *man/solr_parse.Rd
+c68066d685e4faf639ea85d5ca469607 *man/solr_search.Rd
+0ab2d4dbf743b24a4ed77011dc3d1c13 *man/solr_stats.Rd
+9ab36ee2cc21b7583ccafac30b170337 *man/solrium-package.Rd
+d734b617f91fd6750b7a7ac70163054e *man/update_atomic_json.Rd
+c4234a41a385a4a60bc34263ea6ea1f7 *man/update_atomic_xml.Rd
+00e78ae90e489d35652ed16ebc381894 *man/update_csv.Rd
+b4e30bf1582f82c9657a05ad5dd12338 *man/update_json.Rd
+398845a8c0ad2a69737b262b1e7c3481 *man/update_xml.Rd
 b4487f117183b6157cba9b75de6d078a *tests/cloud_mode/test-add.R
 a72186f5ba6d9b13fe5219c2e8024c2e *tests/cloud_mode/test-collections.R
 1baaceeffe758af5c1b0b01e073927e2 *tests/standard_mode/test-core_create.R
 d4549d7babf9d1437a58916e7778aafb *tests/test-all.R
-68fe948d65ab12bcf4358ccd67936bd8 *tests/testthat/test-core_create.R
-8d0a8f385e29f2f3823e75a13a507b19 *tests/testthat/test-errors.R
-0ae0bf3544431d4933adb7d36702f923 *tests/testthat/test-ping.R
-e5b3d2ca168afdffbd68ea2ccc6ecb7d *tests/testthat/test-schema.R
-3cc3c33ba45c3662a5bb19db53875731 *tests/testthat/test-solr_all.R
-a560f95a79ba74a3b8db747541df4e45 *tests/testthat/test-solr_connect.R
-026c5851382faf1967616cb897d5501f *tests/testthat/test-solr_error.R
-2ff1226459959411035392906e7522bf *tests/testthat/test-solr_facet.r
-7d0f2d7545878325d53e6e650d874218 *tests/testthat/test-solr_group.r
-237bd9e4c99c714269a152dfb6cb605b *tests/testthat/test-solr_highlight.r
-a6da0b4abbd193ccad3eee22de160729 *tests/testthat/test-solr_mlt.r
-9e3e5256bcd62c5adca8f3826e9464a2 *tests/testthat/test-solr_search.r
-1c9ca9c79b510d58e9e027e0de36f142 *tests/testthat/test-solr_settings.R
-c9e394804c05152a3526fa7996ebcce1 *tests/testthat/test-solr_stats.r
+7db67b4e4446b2e7eaf8c91cd6c6c8d3 *tests/testthat/helper-solrium.R
+76ad8e374fa9d7ca38c84193602293ba *tests/testthat/test-add.R
+8b65e2f8269bddd921c5079cdef0b96a *tests/testthat/test-client.R
+4f634305850503776164a898d7f58719 *tests/testthat/test-collections.R
+9efa8981e4737d2fdd02dba6320ce4a8 *tests/testthat/test-core_create.R
+b0c377a9f0842f20a1054a1b9f103a62 *tests/testthat/test-delete.R
+5d50d567cc62e141475db642fc9b37ec *tests/testthat/test-errors.R
+04976b640a1ca2b204b34f499be9693a *tests/testthat/test-ping.R
+c042f55fe69bf8cd9cb6e956df89c140 *tests/testthat/test-schema.R
+1054ffe8bd0314e3f227b5eb6bc162c2 *tests/testthat/test-solr_all.R
+b24c6a523123d571d3a24f9d3c198934 *tests/testthat/test-solr_error.R
+4ce0d10effac270c39ea51a432fd72bf *tests/testthat/test-solr_facet.r
+48997aafacc6dddcf4427c0e87c875fe *tests/testthat/test-solr_get.R
+704b1d7e0ad5daf0a484749e55b336d9 *tests/testthat/test-solr_goup.R
+97781e5df6dfdc278b7a2e1a43649d07 *tests/testthat/test-solr_highlight.r
+4c8b786a1690b4620549fc18cb4eed10 *tests/testthat/test-solr_mlt.r
+341f4deb3ead0ca9f504c6372050ac24 *tests/testthat/test-solr_search.r
+2a53b123f557b35b7f3f5ef89140285b *tests/testthat/test-solr_stats.r
+ccd3487af7a54204ddf8275587cd8f8d *tests/testthat/test-update_atomic_json.R
+a273ac80232ad13018015d48922c27a0 *tests/testthat/test-update_atomic_xml.R
+cd5267e708a49578de64d0c589ed14eb *tests/testthat/test-update_csv.R
+e761a4148542b740dad408411d5e66ae *tests/testthat/test-update_json.R
+89f45dd274d1929b4b830ec84b6c320d *tests/testthat/test-update_xml.R
 ae1097c4c79e8dfbf4f5982af5a2bb3f *vignettes/cores_collections.Rmd
 24a71da1896d16ecbd9fc4d7476c91d3 *vignettes/document_management.Rmd
-17b2cf10a4ff9abc151600f8efad7b03 *vignettes/local_setup.Rmd
+8b3228a467423b9a150c7e9a99bbfeef *vignettes/local_setup.Rmd
 f4bc6338aebf8ed9212b6f8c5122a1d1 *vignettes/search.Rmd
diff --git a/NAMESPACE b/NAMESPACE
index 01b0ff1..d413a2d 100644
--- a/NAMESPACE
+++ b/NAMESPACE
@@ -2,7 +2,6 @@
 
 S3method(add,data.frame)
 S3method(add,list)
-S3method(print,solr_connection)
 S3method(solr_parse,default)
 S3method(solr_parse,ping)
 S3method(solr_parse,sr_all)
@@ -13,6 +12,7 @@ S3method(solr_parse,sr_mlt)
 S3method(solr_parse,sr_search)
 S3method(solr_parse,sr_stats)
 S3method(solr_parse,update)
+export(SolrClient)
 export(add)
 export(collection_addreplica)
 export(collection_addreplicaprop)
@@ -59,34 +59,26 @@ export(delete_by_query)
 export(is.sr_facet)
 export(is.sr_high)
 export(is.sr_search)
-export(optimize)
 export(ping)
 export(schema)
 export(solr_all)
-export(solr_connect)
 export(solr_facet)
 export(solr_get)
 export(solr_group)
 export(solr_highlight)
 export(solr_mlt)
+export(solr_optimize)
 export(solr_parse)
 export(solr_search)
-export(solr_settings)
 export(solr_stats)
+export(update_atomic_json)
+export(update_atomic_xml)
 export(update_csv)
 export(update_json)
 export(update_xml)
+importFrom(R6,R6Class)
+importFrom(crul,HttpClient)
 importFrom(dplyr,bind_rows)
-importFrom(httr,GET)
-importFrom(httr,POST)
-importFrom(httr,content)
-importFrom(httr,content_type)
-importFrom(httr,content_type_json)
-importFrom(httr,content_type_xml)
-importFrom(httr,http_condition)
-importFrom(httr,http_status)
-importFrom(httr,stop_for_status)
-importFrom(httr,upload_file)
 importFrom(jsonlite,fromJSON)
 importFrom(plyr,rbind.fill)
 importFrom(tibble,add_column)
diff --git a/NEWS.md b/NEWS.md
index bc4cb20..18fa1d4 100644
--- a/NEWS.md
+++ b/NEWS.md
@@ -1,3 +1,46 @@
+solrium 1.0.0
+=============
+
+This is v1, indicating breaking changes from the previous version!
+
+### NEW FEATURES
+
+* Package has been reworked to allow control over what parameters are sent
+as query parameters and which as body. If only query parameters given, we do a
+`GET` request, but if any body parameters given (even if query params given)
+we do a `POST` request.  This means that all `solr_*` functions have more or
+less the same parameters, and you now pass query parameters to `params` and
+body parameters to `body`. This definitely breaks previous code, apologies
+for that, but the bump in major version is a big indicator of the breakage.
+* As part of overhaual, moved to using an `R6` setup for the Solr connection
+object. The connection object deals with connection details, and you can call
+all methods on the object created. Additionally, you can simply
+pass the connection object to standalone methods. This change means
+you can create connection objects to >1 Solr instance, so you can use many
+Solr instances in one R session. (#100)
+* gains new functions `update_atomic_json` and `update_atomic_xml` for doing
+atomic updates (#97) thanks @yinghaoh
+* `solr_search` and `solr_all` gain attributes that include `numFound`,
+`start`, and `maxScore` (#94)
+* `solr_search`/`solr_all`/`solr_mlt` gain new feature where we automically
+check for and adjust `rows` parameter for you if you allow us to.
+You can toggle this behavior and you can set a minimum number for rows
+to be optimized with `minOptimizedRows`. See (#102) (#104) (#105) for
+discussion. Thanks @1havran
+
+### MINOR IMPROVEMENTS
+
+* Replaced `httr` with `crul`. Should only be noticeable with respect
+to specifying curl options (#98)
+* Added more tests (#56)
+* `optimize` renamed to `solr_optimize` (#107)
+* now `solr_facet` fails better when no `facet.*` fields given (#103)
+
+### BUG FIXES
+
+* Fixed `solr_highlight` parsing to data.frame bug (#109)
+
+
 solrium 0.4.0
 =============
 
@@ -10,7 +53,7 @@ solrium 0.4.0
 * Added examples and tests for a few more public Solr instances (#30)
 * Now using `tibble` to give back compact data.frame's
 * namespace all base package calls
-* Many changes to internal parsers to use `xml2` instead of `XML`, and 
+* Many changes to internal parsers to use `xml2` instead of `XML`, and
 improvements
 
 solrium 0.3.0
diff --git a/R/SolrClient.R b/R/SolrClient.R
new file mode 100644
index 0000000..c6ab8f1
--- /dev/null
+++ b/R/SolrClient.R
@@ -0,0 +1,1013 @@
+#' Solr connection client
+#'
+#' @export
+#' @param host (character) Host url. Deafault: 127.0.0.1
+#' @param path (character) url path.
+#' @param port (character/numeric) Port. Default: 8389
+#' @param scheme (character) http scheme, one of http or https. Default: http
+#' @param proxy List of arguments for a proxy connection, including one or
+#' more of: url, port, username, password, and auth. See
+#' [crul::proxy] for  help, which is used to construct the
+#' proxy connection.
+#' @param errors (character) One of `"simple"` or `"complete"`. Simple gives
+#' http code and  error message on an error, while complete gives both http
+#' code and error message, and stack trace, if available.
+#'
+#' @return Various output, see help files for each grouping of methods.
+#'
+#' @details `SolrClient` creates a R6 class object. The object is
+#' not cloneable and is portable, so it can be inherited across packages
+#' without complication.
+#'
+#' `SolrClient` is used to initialize a client that knows about your
+#' Solr instance, with options for setting host, port, http scheme,
+#' and simple vs. complete error reporting
+#'
+#' @section SolrClient methods:
+#'
+#' Each of these methods also has a matching standalone exported
+#' function that you can use by passing in the connection object made
+#' by calling `SolrClient$new()`. Also, see the docs for each method for
+#' parameter definitions and their default values.
+#'
+#' * `ping(name, wt = 'json', raw = FALSE, ...)`
+#' * `schema(name, what = '', raw = FALSE, ...)`
+#' * `commit(name, expunge_deletes = FALSE, wait_searcher = TRUE,
+#' soft_commit = FALSE, wt = 'json', raw = FALSE, ...)`
+#' * `optimize(name, max_segments = 1, wait_searcher = TRUE,
+#' soft_commit = FALSE, wt = 'json', raw = FALSE, ...)`
+#' * `config_get(name, what = NULL, wt = "json", raw = FALSE, ...)`
+#' * `config_params(name, param = NULL, set = NULL, unset = NULL,
+#' update = NULL, ...)`
+#' * `config_overlay(name, omitHeader = FALSE, ...)`
+#' * `config_set(name, set = NULL, unset = NULL, ...)`
+#' * `collection_exists(name, ...)`
+#' * `collection_list(raw = FALSE, ...)`
+#' * `collection_create(name, numShards = 1, maxShardsPerNode = 1,
+#' createNodeSet = NULL, collection.configName = NULL, replicationFactor = 1,
+#' router.name = NULL, shards = NULL, createNodeSet.shuffle = TRUE,
+#' router.field = NULL, autoAddReplicas = FALSE, async = NULL, raw = FALSE,
+#' callopts=list(), ...)`
+#' * `collection_addreplica(name, shard = NULL, route = NULL, node = NULL,
+#' instanceDir = NULL, dataDir = NULL, async = NULL, raw = FALSE,
+#' callopts=list(), ...)`
+#' * `collection_addreplicaprop(name, shard, replica, property, property.value,
+#' shardUnique = FALSE, raw = FALSE, callopts=list())`
+#' * `collection_addrole(role = "overseer", node, raw = FALSE, ...)`
+#' * `collection_balanceshardunique(name, property, onlyactivenodes = TRUE,
+#' shardUnique = NULL, raw = FALSE, ...)`
+#' * `collection_clusterprop(name, val, raw = FALSE, callopts=list())`
+#' * `collection_clusterstatus(name = NULL, shard = NULL, raw = FALSE, ...)`
+#' * `collection_createalias(alias, collections, raw = FALSE, ...)`
+#' * `collection_createshard(name, shard, createNodeSet = NULL,
+#' raw = FALSE, ...)`
+#' * `collection_delete(name, raw = FALSE, ...)`
+#' * `collection_deletealias(alias, raw = FALSE, ...)`
+#' * `collection_deletereplica(name, shard = NULL, replica = NULL,
+#' onlyIfDown = FALSE, raw = FALSE, callopts=list(), ...)`
+#' * `collection_deletereplicaprop(name, shard, replica, property, raw = FALSE,
+#' callopts=list())`
+#' * `collection_deleteshard(name, shard, raw = FALSE, ...)`
+#' * `collection_migrate(name, target.collection, split.key,
+#' forward.timeout = NULL, async = NULL, raw = FALSE, ...)`
+#' * `collection_overseerstatus(raw = FALSE, ...)`
+#' * `collection_rebalanceleaders(name, maxAtOnce = NULL, maxWaitSeconds = NULL,
+#' raw = FALSE, ...)`
+#' * `collection_reload(name, raw = FALSE, ...)`
+#' * `collection_removerole(role = "overseer", node, raw = FALSE, ...)`
+#' * `collection_requeststatus(requestid, raw = FALSE, ...)`
+#' * `collection_splitshard(name, shard, ranges = NULL, split.key = NULL,
+#' async = NULL, raw = FALSE, ...)`
+#' * `core_status(name = NULL, indexInfo = TRUE, raw = FALSE, callopts=list())`
+#' * `core_exists(name, callopts = list())`
+#' * `core_create(name, instanceDir = NULL, config = NULL, schema = NULL,
+#' dataDir = NULL, configSet = NULL, collection = NULL, shard = NULL,
+#' async=NULL, raw = FALSE, callopts=list(), ...)`
+#' * `core_unload(name, deleteIndex = FALSE, deleteDataDir = FALSE,
+#' deleteInstanceDir = FALSE, async = NULL, raw = FALSE, callopts = list())`
+#' * `core_rename(name, other, async = NULL, raw = FALSE, callopts=list())`
+#' * `core_reload(name, raw = FALSE, callopts=list())`
+#' * `core_swap(name, other, async = NULL, raw = FALSE, callopts=list())`
+#' * `core_mergeindexes(name, indexDir = NULL, srcCore = NULL, async = NULL,
+#' raw = FALSE, callopts = list())`
+#' * `core_requeststatus(requestid, raw = FALSE, callopts = list())`
+#' * `core_split(name, path = NULL, targetCore = NULL, ranges = NULL,
+#' split.key = NULL, async = NULL, raw = FALSE, callopts=list())`
+#' * `search(name = NULL, params = NULL, body = NULL, callopts = list(),
+#' raw = FALSE,  parsetype = 'df', concat = ',', optimizeMaxRows = TRUE,
+#' minOptimizedRows = 50000L, ...)`
+#' * `facet(name = NULL, params = NULL, body = NULL, callopts = list(),
+#' raw = FALSE,  parsetype = 'df', concat = ',', ...)`
+#' * `stats(name = NULL, params = list(q = '*:*', stats.field = NULL,
+#' stats.facet = NULL), body = NULL, callopts=list(), raw = FALSE,
+#' parsetype = 'df', ...)`
+#' * `highlight(name = NULL, params = NULL, body = NULL, callopts=list(),
+#' raw = FALSE, parsetype = 'df', ...)`
+#' * `group(name = NULL, params = NULL, body = NULL, callopts=list(),
+#' raw=FALSE, parsetype='df', concat=',', ...)`
+#' * `mlt(name = NULL, params = NULL, body = NULL, callopts=list(),
+#' raw=FALSE, parsetype='df', concat=',', optimizeMaxRows = TRUE,
+#' minOptimizedRows = 50000L, ...)`
+#' * `all(name = NULL, params = NULL, body = NULL, callopts=list(),
+#' raw=FALSE, parsetype='df', concat=',', optimizeMaxRows = TRUE,
+#' minOptimizedRows = 50000L, ...)`
+#' * `get(ids, name, fl = NULL, wt = 'json', raw = FALSE, ...)`
+#' * `add(x, name, commit = TRUE, commit_within = NULL, overwrite = TRUE,
+#' boost = NULL, wt = 'json', raw = FALSE, ...)`
+#' * `delete_by_id(ids, name, commit = TRUE, commit_within = NULL,
+#' overwrite = TRUE, boost = NULL, wt = 'json', raw = FALSE, ...)`
+#' * `delete_by_query(query, name, commit = TRUE, commit_within = NULL,
+#' overwrite = TRUE, boost = NULL, wt = 'json', raw = FALSE, ...)`
+#' * `update_json(files, name, commit = TRUE, optimize = FALSE,
+#' max_segments = 1, expunge_deletes = FALSE, wait_searcher = TRUE,
+#' soft_commit = FALSE, prepare_commit = NULL, wt = 'json', raw = FALSE, ...)`
+#' * `update_xml(files, name, commit = TRUE, optimize = FALSE, max_segments = 1,
+#' expunge_deletes = FALSE, wait_searcher = TRUE, soft_commit = FALSE,
+#' prepare_commit = NULL, wt = 'json', raw = FALSE, ...)`
+#' * `update_csv(files, name, separator = ',', header = TRUE, fieldnames = NULL,
+#' skip = NULL, skipLines = 0, trim = FALSE, encapsulator = NULL,
+#' escape = NULL, keepEmpty = FALSE, literal = NULL, map = NULL, split = NULL,
+#' rowid = NULL, rowidOffset = NULL, overwrite = NULL, commit = NULL,
+#' wt = 'json', raw = FALSE, ...)`
+#' * `update_atomic_json(body, name, wt = 'json', raw = FALSE, ...)`
+#' * `update_atomic_xml(body, name, wt = 'json', raw = FALSE, ...)`
+#'
+#' @format NULL
+#' @usage NULL
+#'
+#' @examples \dontrun{
+#' # make a client
+#' (cli <- SolrClient$new())
+#'
+#' # variables
+#' cli$host
+#' cli$port
+#' cli$path
+#' cli$scheme
+#'
+#' # ping
+#' ## ping to make sure it's up
+#' cli$ping("gettingstarted")
+#'
+#' # version
+#' ## get Solr version information
+#' cli$schema("gettingstarted")
+#' cli$schema("gettingstarted", "fields")
+#' cli$schema("gettingstarted", "name")
+#' cli$schema("gettingstarted", "version")$version
+#'
+#' # Search
+#' cli$search("gettingstarted", params = list(q = "*:*"))
+#' cli$search("gettingstarted", body = list(query = "*:*"))
+#'
+#' # set a different host
+#' SolrClient$new(host = 'stuff.com')
+#'
+#' # set a different port
+#' SolrClient$new(host = 3456)
+#'
+#' # set a different http scheme
+#' SolrClient$new(scheme = 'https')
+#'
+#' # set a proxy
+#' SolrClient$new(proxy = list(url = "187.62.207.130:3128"))
+#'
+#' prox <- list(url = "187.62.207.130:3128", user = "foo", pwd = "bar")
+#' cli <- SolrClient$new(proxy = prox)
+#' cli$proxy
+#'
+#' # A remote Solr instance to which you don't have admin access
+#' (cli <- SolrClient$new(host = "api.plos.org", path = "search", port = NULL))
+#' cli$search(params = list(q = "memory"))
+#' }
+SolrClient <- R6::R6Class(
+  "SolrClient",
+  portable = TRUE,
+  cloneable = FALSE,
+  public = list(
+    host = "127.0.0.1",
+    port = 8983,
+    path = NULL,
+    scheme = 'http',
+    proxy = NULL,
+    errors = "simple",
+
+    initialize = function(host, path, port, scheme, proxy, errors) {
+      if (!missing(host)) self$host <- host
+      if (!missing(path)) self$path <- path
+      if (!missing(port)) self$port <- port
+      if (!missing(scheme)) self$scheme <- scheme
+      if (!missing(proxy)) self$proxy <- private$make_proxy(proxy)
+      if (!missing(errors)) self$errors <- private$lint_errors(errors)
+    },
+
+    print = function(...) {
+      cat('<Solr Client>', sep = "\n")
+      cat(paste0('  host: ', self$host), sep = "\n")
+      cat(paste0('  path: ', self$path), sep = "\n")
+      cat(paste0('  port: ', self$port), sep = "\n")
+      cat(paste0('  scheme: ', self$scheme), sep = "\n")
+      cat(paste0('  errors: ', self$errors), sep = "\n")
+      cat("  proxy:", sep = "\n")
+      if (is.null(self$proxy)) {
+      } else {
+        cat(paste0("    url:  ", self$proxy$proxy), sep = "\n")
+        cat(paste0("    port: ", self$proxy$proxyport))
+      }
+    },
+
+    # Admin methods
+    ping = function(name, wt = 'json', raw = FALSE, ...) {
+      path <- sprintf('solr/%s/admin/ping', name)
+      res <- tryCatch(
+        solr_GET(self$make_url(), path = path, args = list(wt = wt),
+                 callopts = list(...)),
+        error = function(e) e
+      )
+      if (inherits(res, "error")) {
+        return(list(status = "not found"))
+      } else {
+        out <- structure(res, class = "ping", wt = wt)
+        if (raw) return( out )
+        solr_parse(out)
+      }
+    },
+
+    schema = function(name, what = '', raw = FALSE, ...) {
+      res <- solr_GET(self$make_url(), sprintf('solr/%s/schema/%s', name, what),
+                      list(wt = "json"), ...)
+      if (raw) return(res)
+      jsonlite::fromJSON(res)
+    },
+
+    commit = function(name, expunge_deletes = FALSE, wait_searcher = TRUE,
+                      soft_commit = FALSE, wt = 'json', raw = FALSE, ...) {
+
+      obj_proc(self$make_url(), sprintf('solr/%s/update', name),
+               body = list(commit =
+                             list(expungeDeletes = asl(expunge_deletes),
+                                  waitSearcher = asl(wait_searcher),
+                                  softCommit = asl(soft_commit))),
+               args = list(wt = wt),
+               raw = raw,
+               self$proxy, ...)
+    },
+
+    optimize = function(name, max_segments = 1, wait_searcher = TRUE,
+                        soft_commit = FALSE, wt = 'json', raw = FALSE, ...) {
+
+      obj_proc(self$make_url(), sprintf('solr/%s/update', name),
+               body = list(optimize =
+                             list(maxSegments = max_segments,
+                                  waitSearcher = asl(wait_searcher),
+                                  softCommit = asl(soft_commit))),
+               args = list(wt = wt),
+               raw = raw,
+               self$proxy, ...)
+    },
+
+
+
+    config_get = function(name, what = NULL, wt = "json", raw = FALSE, ...) {
+      res <- solr_GET(self$make_url(), sprintf('solr/%s/config', name),
+                      sc(list(wt = wt)), self$proxy, ...)
+      config_parse(res, what, wt, raw)
+    },
+
+    config_params = function(name, param = NULL, set = NULL,
+                              unset = NULL, update = NULL, ...) {
+
+      if (all(vapply(list(set, unset, update), is.null, logical(1)))) {
+        if (is.null(param)) {
+          url <- sprintf('solr/%s/config/params', name)
+        } else {
+          url <- sprintf('solr/%s/config/params/%s', name, param)
+        }
+        res <- solr_GET(self$make_url(),
+                        sprintf('solr/%s/config/params/%s', name, param),
+                        list(wt = "json"), list(...), self$proxy)
+      } else {
+        path <- sprintf('solr/%s/config/params', name)
+        body <- sc(c(name_by(unbox_if(set, TRUE), "set"),
+                     name_by(unbox_if(unset, TRUE), "unset"),
+                     name_by(unbox_if(update, TRUE), "update")))
+        res <- solr_POST_body(self$make_url(), path,
+                              body, list(wt = "json"),
+                              ctype_json(), list(...), self$proxy)
+      }
+      jsonlite::fromJSON(res)
+    },
+
+    config_overlay = function(name, omitHeader = FALSE, ...) {
+      args <- sc(list(wt = "json", omitHeader = asl(omitHeader)))
+      res <- solr_GET(self$make_url(),
+                      sprintf('solr/%s/config/overlay', name), args,
+                      self$proxy, ...)
+      jsonlite::fromJSON(res)
+    },
+
+    config_set = function(name, set = NULL, unset = NULL, ...) {
+      body <- sc(list(`set-property` = unbox_if(set),
+                      `unset-property` = unset))
+      res <- solr_POST_body(self$make_url(),
+                            sprintf('solr/%s/config', name),
+                            body, list(wt = "json"), ctype_json(),
+                            list(...), self$proxy)
+      jsonlite::fromJSON(res)
+    },
+
+
+    # Collection methods
+    collection_exists = function(name, ...) {
+      name %in% suppressMessages(self$collection_list(...))$collections
+    },
+
+    collection_list = function(raw = FALSE, callopts = list()) {
+      private$coll_h(sc(list(action = 'LIST', wt = 'json')), callopts, raw)
+    },
+
+    collection_create = function(name, numShards = 1, maxShardsPerNode = 1,
+      createNodeSet = NULL, collection.configName = NULL, replicationFactor = 1,
+      router.name = NULL, shards = NULL, createNodeSet.shuffle = TRUE,
+      router.field = NULL, autoAddReplicas = FALSE, async = NULL,
+      raw = FALSE, callopts=list()) {
+
+      args <- sc(list(action = 'CREATE', name = name, numShards = numShards,
+                      replicationFactor = replicationFactor,
+                      maxShardsPerNode = maxShardsPerNode, createNodeSet = createNodeSet,
+                      collection.configName = collection.configName,
+                      router.name = router.name, shards = shards,
+                      createNodeSet.shuffle = asl(createNodeSet.shuffle),
+                      router.field = router.field, autoAddReplicas = asl(autoAddReplicas),
+                      async = async, wt = 'json'))
+      private$coll_h(args, callopts, raw)
+    },
+
+    collection_addreplica = function(name, shard = NULL, route = NULL,
+      node = NULL, instanceDir = NULL, dataDir = NULL, async = NULL,
+      raw = FALSE, callopts=list()) {
+
+      args <- sc(list(action = 'ADDREPLICA', collection = name, shard = shard,
+                      route = route, node = node, instanceDir = instanceDir,
+                      dataDir = dataDir, async = async, wt = 'json'))
+      private$coll_h(args, callopts, raw)
+    },
+
+    collection_addreplicaprop = function(name, shard, replica, property,
+      property.value, shardUnique = FALSE, raw = FALSE, callopts=list()) {
+
+      args <- sc(list(action = 'ADDREPLICAPROP', collection = name,
+                      shard = shard, replica = replica, property = property,
+                      property.value = property.value,
+                      shardUnique = asl(shardUnique), wt = 'json'))
+      private$coll_h(args, callopts, raw)
+    },
+
+    collection_addrole = function(role = "overseer", node, raw = FALSE,
+      callopts = list(), ...) {
+
+      args <- sc(list(action = 'ADDROLE', role = role, node = node,
+                      wt = 'json'))
+      private$coll_h(args, callopts, raw)
+    },
+
+    collection_balanceshardunique = function(name, property, onlyactivenodes = TRUE,
+                        shardUnique = NULL, raw = FALSE, callopts = list()) {
+
+      args <- sc(list(action = 'BALANCESHARDUNIQUE', collection = name,
+                      property = property,
+                      onlyactivenodes = asl(onlyactivenodes),
+                      shardUnique = asl(shardUnique),
+                      wt = 'json'))
+      private$coll_h(args, callopts, raw)
+    },
+
+    collection_clusterprop = function(name, val, raw = FALSE, callopts=list()) {
+
+      args <- sc(list(action = 'CLUSTERPROP', name = name,
+                      val = if (is.null(val)) "" else val, wt = 'json'))
+      private$coll_h(args, callopts, raw)
+    },
+
+    collection_clusterstatus = function(name = NULL, shard = NULL, raw = FALSE,
+                                        callopts = list()) {
+      shard <- check_shard(shard)
+      args <- sc(list(action = 'CLUSTERSTATUS', collection = name,
+                      shard = shard, wt = 'json'))
+      private$coll_h(args, callopts, raw)
+    },
+
+    collection_createalias = function(alias, collections, raw = FALSE,
+      callopts = list()) {
+
+      collections <- check_shard(collections)
+      args <- sc(list(action = 'CREATEALIAS', name = alias,
+                      collections = collections, wt = 'json'))
+      private$coll_h(args, callopts, raw)
+    },
+
+    collection_createshard = function(name, shard, createNodeSet = NULL,
+                                       raw = FALSE, callopts = list()) {
+
+      args <- sc(list(action = 'CREATESHARD', collection = name, shard = shard,
+                      createNodeSet = createNodeSet, wt = 'json'))
+      private$coll_h(args, callopts, raw)
+    },
+
+    collection_delete = function(name, raw = FALSE, callopts = list()) {
+      args <- sc(list(action = 'DELETE', name = name, wt = 'json'))
+      private$coll_h(args, callopts, raw)
+    },
+
+    collection_deletealias = function(alias, raw = FALSE, callopts = list()) {
+      args <- sc(list(action = 'DELETEALIAS', name = alias, wt = 'json'))
+      private$coll_h(args, callopts, raw)
+    },
+
+    collection_deletereplica = function(name, shard = NULL, replica = NULL,
+      onlyIfDown = FALSE, raw = FALSE, callopts=list(), ...) {
+
+      args <- sc(list(action = 'DELETEREPLICA', collection = name,
+                      shard = shard, replica = replica,
+                      onlyIfDown = asl(onlyIfDown), wt = 'json'))
+      private$coll_h(args, callopts, raw)
+    },
+
+    collection_deletereplicaprop = function(name, shard, replica, property,
+                                             raw = FALSE, callopts=list()) {
+      args <- sc(list(action = 'DELETEREPLICAPROP', collection = name,
+                      shard = shard, replica = replica, property = property,
+                      wt = 'json'))
+      private$coll_h(args, callopts, raw)
+    },
+
+    collection_deleteshard = function(name, shard, raw = FALSE, callopts = list()) {
+      args <- sc(list(action = 'DELETESHARD', collection = name, shard = shard,
+                      wt = 'json'))
+      private$coll_h(args, callopts, raw)
+    },
+
+    collection_migrate = function(name, target.collection, split.key, forward.timeout = NULL,
+                                   async = NULL, raw = FALSE, callopts = list()) {
+      args <- sc(list(action = 'MIGRATE', collection = name,
+                      target.collection = target.collection,
+                      split.key = split.key, forward.timeout = forward.timeout,
+                      async = async, wt = 'json'))
+      private$coll_h(args, callopts, raw)
+    },
+
+    collection_overseerstatus = function(raw = FALSE, callopts = list()) {
+      args <- sc(list(action = 'OVERSEERSTATUS', wt = 'json'))
+      private$coll_h(args, callopts, raw)
+    },
+
+    collection_rebalanceleaders = function(name, maxAtOnce = NULL,
+      maxWaitSeconds = NULL, raw = FALSE, callopts = list()) {
+
+      args <- sc(list(action = 'REBALANCELEADERS', collection = name,
+                      maxAtOnce = maxAtOnce,
+                      maxWaitSeconds = maxWaitSeconds, wt = 'json'))
+      private$coll_h(args, callopts, raw)
+    },
+
+    collection_reload = function(name, raw = FALSE, callopts = list()) {
+      args <- sc(list(action = 'RELOAD', name = name, wt = 'json'))
+      private$coll_h(args, callopts, raw)
+    },
+
+    collection_removerole = function(role = "overseer", node, raw = FALSE,
+                                     callopts = list()) {
+
+      args <- sc(list(action = 'REMOVEROLE', role = role, node = node,
+                      wt = 'json'))
+      private$coll_h(args, callopts, raw)
+    },
+
+    collection_requeststatus = function(requestid, raw = FALSE, callopts = list()) {
+      args <- sc(list(action = 'REQUESTSTATUS', requestid = requestid,
+                      wt = 'json'))
+      private$coll_h(args, callopts, raw)
+    },
+
+    collection_splitshard = function(name, shard, ranges = NULL, split.key = NULL,
+                                      async = NULL, raw = FALSE, callopts = list()) {
+      args <- sc(list(action = 'SPLITSHARD', collection = name, shard = shard,
+                      ranges = do_ranges(ranges), split.key = split.key,
+                      async = async, wt = 'json'))
+      private$coll_h(args, callopts, raw)
+    },
+
+
+    # Core methods
+    core_status = function(name = NULL, indexInfo = TRUE, raw = FALSE,
+                           callopts=list()) {
+      args <- sc(list(action = 'STATUS', core = name,
+                      indexInfo = asl(indexInfo), wt = 'json'))
+      res <- solr_GET(self$make_url(), 'solr/admin/cores', args, callopts,
+                      self$proxy)
+      if (raw) res else jsonlite::fromJSON(res)
+    },
+
+    core_exists = function(name, callopts = list()) {
+      tmp <- suppressMessages(self$core_status(name = name, callopts = callopts))
+      length(tmp$status[[1]]) > 0
+    },
+
+    core_create = function(name, instanceDir = NULL, config = NULL,
+      schema = NULL, dataDir = NULL, configSet = NULL, collection = NULL,
+      shard = NULL, async=NULL, raw = FALSE, callopts=list(), ...) {
+
+      args <- sc(list(action = 'CREATE', name = name, instanceDir = instanceDir,
+                      config = config, schema = schema, dataDir = dataDir,
+                      configSet = configSet, collection = collection,
+                      shard = shard, async = async, wt = 'json'))
+      res <- solr_GET(self$make_url(), 'solr/admin/cores', args, callopts,
+                      self$proxy)
+      if (raw) res else jsonlite::fromJSON(res)
+    },
+
+    core_unload = function(name, deleteIndex = FALSE, deleteDataDir = FALSE,
+                           deleteInstanceDir = FALSE, async = NULL,
+                           raw = FALSE, callopts = list()) {
+
+      args <- sc(list(action = 'UNLOAD', core = name,
+                      deleteIndex = asl(deleteIndex),
+                      deleteDataDir = asl(deleteDataDir),
+                      deleteInstanceDir = asl(deleteInstanceDir),
+                      async = async, wt = 'json'))
+      res <- solr_GET(self$make_url(), 'solr/admin/cores', args, callopts,
+                      self$proxy)
+      if (raw) res else jsonlite::fromJSON(res)
+    },
+
+    core_rename = function(name, other, async = NULL, raw = FALSE,
+                           callopts=list()) {
+      args <- sc(list(action = 'RENAME', core = name, other = other,
+                      async = async, wt = 'json'))
+      res <- solr_GET(self$make_url(), 'solr/admin/cores', args, callopts,
+                      self$proxy)
+      if (raw) res else jsonlite::fromJSON(res)
+    },
+
+    core_reload = function(name, raw = FALSE, callopts=list()) {
+      args <- sc(list(action = 'RELOAD', core = name, wt = 'json'))
+      res <- solr_GET(self$make_url(), 'solr/admin/cores', args, callopts,
+                      self$proxy)
+      if (raw) res else jsonlite::fromJSON(res)
+    },
+
+    core_swap = function(name, other, async = NULL, raw = FALSE, callopts=list()) {
+      if (is_in_cloud_mode(self)) stop("You are in SolrCloud mode, stopping",
+                                       call. = FALSE)
+      args <- sc(list(action = 'SWAP', core = name, other = other,
+                      async = async, wt = 'json'))
+      res <- solr_GET(self$make_url(), 'solr/admin/cores', args, callopts,
+                      self$proxy)
+      if (raw) res else jsonlite::fromJSON(res)
+    },
+
+    core_mergeindexes = function(name, indexDir = NULL, srcCore = NULL,
+                                 async = NULL, raw = FALSE, callopts = list()) {
+
+      args <- sc(list(action = 'MERGEINDEXES', core = name, indexDir = indexDir,
+                      srcCore = srcCore, async = async, wt = 'json'))
+      res <- solr_GET(self$make_url(), 'solr/admin/cores', args, callopts,
+                      self$proxy)
+      if (raw) res else jsonlite::fromJSON(res)
+    },
+
+    core_requeststatus = function(requestid, raw = FALSE, callopts = list()) {
+      args <- sc(list(action = 'REQUESTSTATUS', requestid = requestid,
+                      wt = 'json'))
+      res <- solr_GET(self$make_url(), 'solr/admin/cores', args, callopts,
+                      self$proxy)
+      if (raw) res else jsonlite::fromJSON(res)
+    },
+
+    core_split = function(name, path = NULL, targetCore = NULL, ranges = NULL,
+                          split.key = NULL, async = NULL, raw = FALSE,
+                          callopts=list()) {
+      args <- sc(list(action = 'SPLIT', core = name, ranges = do_ranges(ranges),
+                      split.key = split.key, async = async, wt = 'json'))
+      args <- c(args, make_args(path), make_args(targetCore))
+      res <- solr_GET(self$make_url(), 'solr/admin/cores', args, callopts,
+                      self$proxy)
+      if (raw) res else jsonlite::fromJSON(res)
+    },
+
+
+    # Search methods
+    search = function(name = NULL, params = NULL, body = NULL, callopts = list(),
+                      raw = FALSE,  parsetype = 'df', concat = ',',
+                      optimizeMaxRows = TRUE, minOptimizedRows = 50000L, ...) {
+
+      if (is.null(params)) {
+        if (is.null(body)) stop("if 'params' NULL, body must be given")
+      }
+      stopifnot(inherits(params, "list") || is.null(params))
+      stopifnot(inherits(body, "list") || is.null(body))
+      if (!is.null(params) && length(params) > 0) {
+        params$rows <- private$adjust_rows(params, optimizeMaxRows, minOptimizedRows, name)
+      }
+      if (!is.null(body) && length(body) > 0) {
+        body$rows <- private$adjust_rows(body, optimizeMaxRows, minOptimizedRows, name)
+      }
+      if (!is.null(params)) params <- check_args_search(params, "fq", ...)
+      if (!is.null(body)) body <- check_args_search(body, "fq", ...)
+      if (!is.null(body)) {
+        res <- solr_POST_body(self$make_url(),
+            if (!is.null(name)) url_handle(name) else self$path,
+            body, params, ctype_json(), callopts, self$proxy)
+        out <- structure(res, class = "sr_search", wt = params$wt)
+      } else {
+        res <- solr_GET(self$make_url(),
+                 if (!is.null(name)) url_handle(name) else self$path,
+                 params, callopts, self$proxy)
+        out <- structure(res, class = "sr_search", wt = params$wt)
+      }
+      if (raw) {
+        return( out )
+      } else {
+        parsed <- cont_parse(out, params$wt %||% body$wt %||% "json")
+        parsed <- structure(parsed, class = c(class(parsed), "sr_search"))
+        solr_parse(parsed, parsetype, concat)
+      }
+    },
+
+    facet = function(name = NULL, params = NULL, body = NULL, callopts = list(),
+                     raw = FALSE,  parsetype = 'df', concat = ',', ...) {
+
+      if (is.null(params)) {
+        if (is.null(body)) stop("if 'params' NULL, body must be given")
+      }
+      stopifnot(inherits(params, "list") || is.null(params))
+      stopifnot(inherits(body, "list") || is.null(body))
+      if (!is.null(params)) params <- check_args_facet(params, keys_facet, ...)
+      if (!is.null(body)) body <- check_args_facet(body, keys_facet, ...)
+
+      if (!is.null(body)) {
+        res <- solr_POST_body(self$make_url(),
+          if (!is.null(name)) url_handle(name) else self$path,
+          body, params, ctype_json(), callopts, self$proxy)
+        out <- structure(res, class = "sr_facet", wt = params$wt)
+      } else {
+        res <- solr_GET(self$make_url(),
+                        if (!is.null(name)) url_handle(name) else self$path,
+                        params, callopts, self$proxy)
+        out <- structure(res, class = "sr_facet", wt = params$wt)
+      }
+      if (raw) {
+        return( out )
+      } else {
+        parsed <- cont_parse(out, params$wt %||% body$wt %||% "json")
+        parsed <- structure(parsed, class = c(class(parsed), "sr_facet"))
+        solr_parse(parsed)
+      }
+    },
+
+    stats = function(name = NULL,
+      params = list(q = '*:*', stats.field = NULL, stats.facet = NULL), body = NULL,
+      callopts=list(), raw = FALSE, parsetype = 'df', ...) {
+
+      if (is.null(params)) {
+        if (is.null(body)) stop("if 'params' NULL, body must be given")
+      }
+      stopifnot(inherits(params, "list") || is.null(body))
+      stopifnot(inherits(body, "list") || is.null(body))
+      if (!is.null(params)) params <- check_args_stats(params, keys_stats, ...)
+      if (!is.null(body)) body <- check_args_stats(body, keys_stats, ...)
+      if (!is.null(body)) {
+        res <- solr_POST_body(self$make_url(),
+          if (!is.null(name)) url_handle(name) else self$path,
+          body, params, ctype_json(), callopts, self$proxy)
+        out <- structure(res, class = "sr_stats", wt = params$wt)
+      } else {
+        res <- solr_GET(self$make_url(),
+                        if (!is.null(name)) url_handle(name) else self$path,
+                        params, callopts, self$proxy)
+        out <- structure(res, class = "sr_stats", wt = params$wt)
+      }
+      if (raw) {
+        return( out )
+      } else {
+        parsed <- cont_parse(out, params$wt %||% body$wt %||% "json")
+        parsed <- structure(parsed, class = c(class(parsed), "sr_stats"))
+        solr_parse(out, parsetype)
+      }
+    },
+
+    highlight = function(name = NULL, params = NULL, body = NULL,
+                         callopts=list(), raw = FALSE, parsetype = 'df', ...) {
+
+      if (is.null(params)) {
+        if (is.null(body)) stop("if 'params' NULL, body must be given")
+      }
+      stopifnot(inherits(params, "list") || is.null(body))
+      stopifnot(inherits(body, "list") || is.null(body))
+      if (!is.null(params)) params <- check_args_high(params, keys_high, ...)
+      if (!is.null(body)) body <- check_args_high(body, keys_high, ...)
+      if (!is.null(body)) {
+        res <- solr_POST_body(self$make_url(),
+          if (!is.null(name)) url_handle(name) else self$path,
+          body, params, callopts, self$proxy)
+        out <- structure(res, class = "sr_high", wt = params$wt)
+      } else {
+        res <- solr_GET(self$make_url(),
+                        if (!is.null(name)) url_handle(name) else self$path,
+                        params, callopts, self$proxy)
+        out <- structure(res, class = "sr_high", wt = params$wt)
+      }
+      if (raw) {
+        return(out)
+      } else {
+        parsed <- cont_parse(out, params$wt %||% body$wt %||% "json")
+        parsed <- structure(parsed, class = c(class(parsed), "sr_high"))
+        solr_parse(out, parsetype)
+      }
+    },
+
+    group = function(name = NULL, params = NULL, body = NULL,
+                     callopts=list(), raw=FALSE, parsetype='df', concat=',',
+                     ...) {
+
+      if (is.null(params)) {
+        if (is.null(body)) stop("if 'params' NULL, body must be given")
+      }
+      stopifnot(inherits(params, "list") || is.null(params))
+      stopifnot(inherits(body, "list") || is.null(body))
+      if (!is.null(params)) params <- check_args_group(params, keys_group, ...)
+      if (!is.null(body)) body <- check_args_group(body, keys_group, ...)
+
+      if (!is.null(body)) {
+        res <- solr_POST_body(
+          self$make_url(),
+          if (!is.null(name)) url_handle(name) else self$path,
+          body, params, ctype_json(), callopts, self$proxy)
+        out <- structure(res, class = "sr_group", wt = body$wt)
+      } else {
+        res <- solr_GET(self$make_url(),
+                        if (!is.null(name)) url_handle(name) else self$path,
+                        params, callopts, self$proxy)
+        out <- structure(res, class = "sr_group", wt = params$wt)
+      }
+      if (raw) {
+        return(out)
+      } else {
+        parsed <- cont_parse(out, params$wt %||% body$wt %||% "json")
+        parsed <- structure(parsed, class = c(class(parsed), "sr_group"))
+        solr_parse(out, parsetype)
+      }
+    },
+
+    mlt = function(name = NULL, params = NULL, body = NULL,
+                   callopts=list(), raw=FALSE, parsetype='df', concat=',',
+                   optimizeMaxRows = TRUE, minOptimizedRows = 50000L, ...) {
+
+      if (is.null(params)) {
+        if (is.null(body)) stop("if 'params' NULL, body must be given")
+      }
+      stopifnot(inherits(params, "list") || is.null(params))
+      stopifnot(inherits(body, "list") || is.null(body))
+      if (!is.null(params) && length(params) > 0) {
+        params$rows <- private$adjust_rows(params, optimizeMaxRows, minOptimizedRows, name)
+      }
+      if (!is.null(body) && length(body) > 0) {
+        body$rows <- private$adjust_rows(body, optimizeMaxRows, minOptimizedRows, name)
+      }
+      if (!is.null(params)) params <- check_args_mlt(params, keys_mlt, ...)
+      if (!is.null(body)) body <- check_args_mlt(body, keys_mlt, ...)
+
+      if (!is.null(body)) {
+        res <- solr_POST_body(
+          self$make_url(),
+          if (!is.null(name)) url_handle(name) else self$path,
+          body, params, ctype_json(), callopts, self$proxy)
+        out <- structure(res, class = "sr_mlt", wt = body$wt)
+      } else {
+        res <- solr_GET(self$make_url(),
+                        if (!is.null(name)) url_handle(name) else self$path,
+                        params, callopts, self$proxy)
+        out <- structure(res, class = "sr_mlt", wt = params$wt)
+      }
+      if (raw) {
+        return( out )
+      } else {
+        parsed <- cont_parse(out, params$wt %||% body$wt %||% "json")
+        parsed <- structure(parsed, class = c(class(parsed), "sr_mlt"))
+        solr_parse(parsed, parsetype, concat)
+      }
+    },
+
+    all = function(name = NULL, params = NULL, body = NULL,
+                   callopts=list(), raw=FALSE, parsetype='df', concat=',',
+                   optimizeMaxRows = TRUE, minOptimizedRows = 50000L, ...) {
+
+      if (is.null(params)) {
+        if (is.null(body)) stop("if 'params' NULL, body must be given")
+      }
+      stopifnot(inherits(params, "list") || is.null(params))
+      stopifnot(inherits(body, "list") || is.null(body))
+      if (!is.null(params) && length(params) > 0) {
+        params$rows <- private$adjust_rows(params, optimizeMaxRows, minOptimizedRows, name)
+      }
+      if (!is.null(body) && length(body) > 0) {
+        body$rows <- private$adjust_rows(body, optimizeMaxRows, minOptimizedRows, name)
+      }
+      if (!is.null(params)) params <- check_args_search(params, keys_all, ...)
+      if (!is.null(body)) body <- check_args_search(body, keys_all, ...)
+
+      if (!is.null(body)) {
+        res <- solr_POST_body(
+          self$make_url(),
+          if (!is.null(name)) url_handle(name) else self$path,
+          body, params, ctype_json(), callopts, self$proxy)
+        out <- structure(res, class = "sr_all", wt = body$wt)
+      } else {
+        res <- solr_GET(self$make_url(),
+                        if (!is.null(name)) url_handle(name) else self$path,
+                        params, callopts, self$proxy)
+        out <- structure(res, class = "sr_all", wt = params$wt)
+      }
+      if (raw) {
+        return( out )
+      } else {
+        parsed <- cont_parse(out, params$wt %||% body$wt %||% "json")
+        parsed <- structure(parsed, class = c(class(parsed), "sr_all"))
+        solr_parse(parsed, parsetype, concat)
+      }
+    },
+
+
+    # documents methods
+    get = function(ids, name, fl = NULL, wt = 'json', raw = FALSE, ...) {
+      if (!is.null(fl)) fl <- paste0(fl, collapse = ",")
+      args <- sc(list(ids = paste0(ids, collapse = ","), fl = fl, wt = wt))
+      res <- solr_GET(self$make_url(), sprintf('solr/%s/get', name),
+                      args, self$proxy, ...)
+      config_parse(res, wt = wt, raw = raw)
+    },
+
+    add = function(x, name, commit = TRUE, commit_within = NULL, overwrite = TRUE,
+                    boost = NULL, wt = 'json', raw = FALSE, ...) {
+      args <- sc(list(commit = asl(commit), commitWithin = commit_within,
+                      overwrite = asl(overwrite), wt = wt))
+      obj_proc(self$make_url(), sprintf('solr/%s/update/json/docs', name),
+               x, args, raw, self$proxy, ...)
+    },
+
+    delete_by_id = function(ids, name, commit = TRUE, commit_within = NULL,
+                            overwrite = TRUE, boost = NULL, wt = 'json',
+                            raw = FALSE, ...) {
+      args <- sc(list(commit = asl(commit), wt = wt))
+      body <- list(delete = lapply(ids, function(z) list(id = z)))
+      obj_proc(self$make_url(), sprintf('solr/%s/update/json', name), body,
+               args, raw, self$proxy, ...)
+    },
+
+    delete_by_query = function(query, name, commit = TRUE, commit_within = NULL,
+                               overwrite = TRUE, boost = NULL, wt = 'json',
+                               raw = FALSE, ...) {
+      args <- sc(list(commit = asl(commit), wt = wt))
+      body <- list(delete = list(query = query))
+      obj_proc(self$make_url(), sprintf('solr/%s/update/json', name), body,
+               args, raw, self$proxy, ...)
+    },
+
+    update_json = function(files, name, commit = TRUE, optimize = FALSE,
+      max_segments = 1, expunge_deletes = FALSE, wait_searcher = TRUE,
+      soft_commit = FALSE, prepare_commit = NULL, wt = 'json',
+      raw = FALSE, ...) {
+
+      private$stop_if_absent(name)
+      args <- sc(list(commit = asl(commit), optimize = asl(optimize),
+                      maxSegments = max_segments,
+                      expungeDeletes = asl(expunge_deletes),
+                      waitSearcher = asl(wait_searcher),
+                      softCommit = asl(soft_commit),
+                      prepareCommit = prepare_commit, wt = wt))
+      docreate(self$make_url(), sprintf('solr/%s/update/json/docs', name),
+               crul::upload(files), args, ctype_json(), raw, self$proxy,
+               ...)
+    },
+
+    update_xml = function(files, name, commit = TRUE, optimize = FALSE,
+      max_segments = 1, expunge_deletes = FALSE, wait_searcher = TRUE,
+      soft_commit = FALSE, prepare_commit = NULL, wt = 'json',
+      raw = FALSE, ...) {
+
+      private$stop_if_absent(name)
+      args <- sc(
+        list(commit = asl(commit), optimize = asl(optimize),
+             maxSegments = max_segments, expungeDeletes = asl(expunge_deletes),
+             waitSearcher = asl(wait_searcher), softCommit = asl(soft_commit),
+             prepareCommit = prepare_commit, wt = wt))
+      docreate(self$make_url(), sprintf('solr/%s/update', name),
+               crul::upload(files), args, ctype_xml(), raw, self$proxy, ...)
+    },
+
+    update_csv = function(files, name, separator = ',', header = TRUE,
+      fieldnames = NULL, skip = NULL, skipLines = 0, trim = FALSE,
+      encapsulator = NULL, escape = NULL, keepEmpty = FALSE, literal = NULL,
+      map = NULL, split = NULL, rowid = NULL, rowidOffset = NULL,
+      overwrite = NULL, commit = NULL, wt = 'json', raw = FALSE, ...) {
+
+      private$stop_if_absent(name)
+      if (!is.null(fieldnames)) fieldnames <- paste0(fieldnames, collapse = ",")
+      args <- sc(
+        list(separator = separator, header = header, fieldnames = fieldnames,
+             skip = skip, skipLines = skipLines, trim = trim,
+             encapsulator = encapsulator, escape = escape, keepEmpty = keepEmpty,
+             literal = literal, map = map, split = split, rowid = rowid,
+             rowidOffset = rowidOffset, overwrite = overwrite,
+             commit = commit, wt = wt))
+      docreate(self$make_url(), sprintf('solr/%s/update/csv', name),
+               crul::upload(files), args, ctype_csv(), raw, self$proxy, ...)
+    },
+
+    update_atomic_json = function(body, name, wt = 'json', raw = FALSE, ...) {
+      private$stop_if_absent(name)
+      doatomiccreate(self$make_url(), sprintf('solr/%s/update', name),
+                     body, list(wt = wt), "json", raw, self$proxy, ...)
+    },
+
+    update_atomic_xml = function(body, name, wt = 'json', raw = FALSE, ...) {
+      private$stop_if_absent(name)
+      doatomiccreate(self$make_url(), sprintf('solr/%s/update', name),
+                     body, list(wt = wt), "xml", raw, self$proxy, ...)
+    },
+
+
+
+    # utility functions
+    make_url = function() {
+      if (is.null(self$port)) {
+        #sprintf("%s://%s/%s", self$scheme, self$host, self$path)
+        sprintf("%s://%s", self$scheme, self$host)
+      } else {
+        #sprintf("%s://%s:%s/%s", self$scheme, self$host, self$port, self$path)
+        sprintf("%s://%s:%s", self$scheme, self$host, self$port)
+      }
+    }
+  ),
+
+  private = list(
+    stop_if_absent = function(x) {
+      tmp <- vapply(list(self$core_exists, self$collection_exists), function(z) {
+        tmp <- tryCatch(z(x), error = function(e) e)
+        if (inherits(tmp, "error")) FALSE else tmp
+      }, logical(1))
+      if (!any(tmp)) {
+        stop(
+          x,
+          " doesn't exist - create it first.\n See core_create()/collection_create()",
+          call. = FALSE)
+      }
+    },
+
+    give_data = function(x, y) {
+      if (x) return(y) else jsonlite::fromJSON(y)
+    },
+
+    coll_h = function(args, callopts = list(), raw) {
+      res <- solr_GET(self$make_url(), 'solr/admin/collections', args,
+                      callopts,  self$proxy)
+      private$give_data(raw, res)
+    },
+
+    make_proxy = function(args) {
+      if (is.null(args)) {
+        NULL
+      } else {
+        crul::proxy(url = args$url, user = args$user,
+                    pwd = args$pwd, auth = args$auth %||% "basic")
+      }
+    },
+
+    lint_errors = function(x) {
+      if (!x %in% c('simple', 'complete')) {
+        stop("errors must be one of 'simple' or 'complete'")
+      }
+      return(x)
+    },
+
+    adjust_rows = function(x, optimizeMaxRows, minOptimizedRows, name) {
+      rows <- x$rows %||% NULL
+      rows <- cn(rows)
+      if (!is.null(rows) && optimizeMaxRows) {
+        if (rows > minOptimizedRows || rows < 0) {
+          out <- self$search(
+            name = name,
+            params = list(q = x$q %||% NULL, rows = 0, wt = 'json'),
+            raw = TRUE, optimizeMaxRows = FALSE)
+          oj <- jsonlite::fromJSON(out)
+          if (rows > oj$response$numFound || rows < 0) {
+            rows <- as.double(oj$response$numFound)
+          }
+        }
+      }
+
+      return(rows)
+    }
+
+  )
+)
diff --git a/R/add.R b/R/add.R
index ccc2ba0..93dfe3e 100644
--- a/R/add.R
+++ b/R/add.R
@@ -1,47 +1,49 @@
 #' Add documents from R objects
-#' 
+#'
 #' @export
 #' @param x Documents, either as rows in a data.frame, or a list.
+#' @param conn A solrium connection object, see [SolrClient]
 #' @param name (character) A collection or core name. Required.
-#' @param commit (logical) If \code{TRUE}, documents immediately searchable. 
-#' Default: \code{TRUE}
-#' @param commit_within (numeric) Milliseconds to commit the change, the 
+#' @param commit (logical) If `TRUE`, documents immediately searchable.
+#' Default: `TRUE`
+#' @param commit_within (numeric) Milliseconds to commit the change, the
 #' document will be added within that time. Default: NULL
-#' @param overwrite (logical) Overwrite documents with matching keys. 
-#' Default: \code{TRUE}
+#' @param overwrite (logical) Overwrite documents with matching keys.
+#' Default: `TRUE`
 #' @param boost (numeric) Boost factor. Default: NULL
-#' @param wt (character) One of json (default) or xml. If json, uses 
-#' \code{\link[jsonlite]{fromJSON}} to parse. If xml, uses \code{\link[xml2]{read_xml}} to 
+#' @param wt (character) One of json (default) or xml. If json, uses
+#' \code{\link[jsonlite]{fromJSON}} to parse. If xml, uses \code{\link[xml2]{read_xml}} to
 #' parse
-#' @param raw (logical) If \code{TRUE}, returns raw data in format specified by 
+#' @param raw (logical) If `TRUE`, returns raw data in format specified by
 #' \code{wt} param
-#' @param ... curl options passed on to \code{\link[httr]{GET}}
-#' 
-#' @details Works for Collections as well as Cores (in SolrCloud and Standalone 
+#' @param ... curl options passed on to [crul::HttpClient]
+#'
+#' @details Works for Collections as well as Cores (in SolrCloud and Standalone
 #' modes, respectively)
-#' 
-#' @seealso \code{\link{update_json}}, \code{\link{update_xml}}, 
+#'
+#' @seealso \code{\link{update_json}}, \code{\link{update_xml}},
 #' \code{\link{update_csv}} for adding documents from files
-#' 
+#'
 #' @examples \dontrun{
-#' solr_connect()
-#' 
+#' (cli <- SolrClient$new())
+#'
 #' # create the boooks collection
-#' if (!collection_exists("books")) {
-#'   collection_create(name = "books", numShards = 2)
+#' if (!collection_exists(cli, "books")) {
+#'   collection_create(cli, name = "books", numShards = 1)
 #' }
-#' 
+#'
 #' # Documents in a list
 #' ss <- list(list(id = 1, price = 100), list(id = 2, price = 500))
-#' add(ss, name = "books")
-#' 
+#' add(ss, cli, name = "books")
+#' cli$get(c(1, 2), "books")
+#'
 #' # Documents in a data.frame
 #' ## Simple example
 #' df <- data.frame(id = c(67, 68), price = c(1000, 500000000))
-#' add(x = df, "books")
+#' add(df, cli, "books")
 #' df <- data.frame(id = c(77, 78), price = c(1, 2.40))
-#' add(x = df, "books")
-#' 
+#' add(df, "books")
+#'
 #' ## More complex example, get file from package examples
 #' # start Solr in Schemaless mode first: bin/solr start -e schemaless
 #' file <- system.file("examples", "books.csv", package = "solrium")
@@ -52,10 +54,10 @@
 #'   collection_create(name = "mybooks", numShards = 2)
 #' }
 #' add(x, "mybooks")
-#' 
+#'
 #' # Use modifiers
 #' add(x, "mybooks", commit_within = 5000)
-#' 
+#'
 #' # Get back XML instead of a list
 #' ss <- list(list(id = 1, price = 100), list(id = 2, price = 500))
 #' # parsed XML
@@ -63,36 +65,28 @@
 #' # raw XML
 #' add(ss, name = "books", wt = "xml", raw = TRUE)
 #' }
-add <- function(x, name, commit = TRUE, commit_within = NULL, overwrite = TRUE,
-                boost = NULL, wt = 'json', raw = FALSE, ...) {
+add <- function(x, conn, name, commit = TRUE, commit_within = NULL,
+                overwrite = TRUE, boost = NULL, wt = 'json', raw = FALSE, ...) {
   UseMethod("add")
 }
 
 #' @export
-add.list <- function(x, name, commit = TRUE, commit_within = NULL, 
-                     overwrite = TRUE, boost = NULL, wt = 'json', raw = FALSE, ...) {
-  
-  conn <- solr_settings()
-  check_conn(conn)
-  args <- sc(list(commit = asl(commit), commitWithin = commit_within, 
-                  overwrite = asl(overwrite), wt = wt))
+add.list <- function(x, conn, name, commit = TRUE, commit_within = NULL,
+  overwrite = TRUE, boost = NULL, wt = 'json', raw = FALSE, ...) {
+
+  check_sr(conn)
   if (!is.null(boost)) {
     x <- lapply(x, function(z) modifyList(z, list(boost = boost)))
   }
-  obj_proc(file.path(conn$url, sprintf('solr/%s/update/json/docs', name)), x, args, raw, conn$proxy, ...)
+  conn$add(x, name, commit, commit_within, overwrite, boost, wt, raw, ...)
 }
 
 #' @export
-add.data.frame <- function(x, name, commit = TRUE, commit_within = NULL, 
-                           overwrite = TRUE, boost = NULL, wt = 'json', raw = FALSE, ...) {
-  
-  conn <- solr_settings()
-  check_conn(conn)
-  args <- sc(list(commit = asl(commit), commitWithin = commit_within, 
-                  overwrite = asl(overwrite), wt = wt))
-  if (!is.null(boost)) {
-    x$boost <- boost
-  }
+add.data.frame <- function(x, conn, name, commit = TRUE, commit_within = NULL,
+  overwrite = TRUE, boost = NULL, wt = 'json', raw = FALSE, ...) {
+
+  check_sr(conn)
+  if (!is.null(boost)) x$boost <- boost
   x <- apply(x, 1, as.list)
-  obj_proc(file.path(conn$url, sprintf('solr/%s/update/json/docs', name)), x, args, raw, conn$proxy, ...)
+  conn$add(x, name, commit, commit_within, overwrite, boost, wt, raw, ...)
 }
diff --git a/R/check_args_helpers.R b/R/check_args_helpers.R
new file mode 100644
index 0000000..efe3ef8
--- /dev/null
+++ b/R/check_args_helpers.R
@@ -0,0 +1,102 @@
+check_args_search <- function(x, reps, ...) {
+  if (deparse(substitute(x)) == "params") {
+    if (is.null(x$wt)) x$wt <- "json"
+    check_wt(x$wt)
+  }
+  if (!is.null(x$fl)) x$fl <- paste0(x$fl, collapse = ",")
+  # args that can be repeated
+  tmp <- x
+  for (i in reps) tmp[[i]] <- NULL
+  x <- c(tmp, collectargs(z = reps, lst = x))
+  # additional parameters
+  x <- c(x, list(...))
+  return(x)
+}
+
+check_args_facet <- function(x, reps, ...) {
+  if (deparse(substitute(x)) == "params") {
+    if (is.null(x$wt)) x$wt <- "json"
+    check_wt(x$wt)
+  }
+  if (!is.null(x$fl)) x$fl <- paste0(x$fl, collapse = ",")
+  # args that can be repeated
+  x <- collectargs(reps, x)
+  # additional parameters
+  x <- c(x, list(...))
+  x$fl <- 'DOES_NOT_EXIST'
+  x$facet <- 'true'
+  if (length(x[names(x) %in% "facet.pivot"]) > 1) {
+    xx <- paste0(unlist(unname(x[names(x) %in% "facet.pivot"])),
+                 collapse = ",")
+    x[names(x) %in% "facet.pivot"] <- NULL
+    x$facet.pivot <- xx
+  }
+  # check if any facet.* fields - if none, stop with message
+  if (!any(grepl("facet\\.", names(x)))) {
+    stop("didn't detect any facet. fields - at least 1 required")
+  }
+  return(x)
+}
+
+check_args_stats <- function(x, reps, ...) {
+  if (deparse(substitute(x)) == "params") {
+    if (is.null(x$wt)) x$wt <- "json"
+    check_wt(x$wt)
+  }
+  if (!is.null(x$fl)) x$fl <- paste0(x$fl, collapse = ",")
+  # args that can be repeated
+  x <- collectargs(reps, x)
+  # additional parameters
+  x <- c(x, list(...))
+  x$stats <- 'true'
+  return(x)
+}
+
+check_args_high <- function(x, reps, ...) {
+  if (deparse(substitute(x)) == "params") {
+    if (is.null(x$wt)) x$wt <- "json"
+    check_wt(x$wt)
+  }
+  if (!is.null(x$fl)) x$fl <- paste0(x$fl, collapse = ",")
+  if (!is.null(x$hl.fl)) names(x$hl.fl) <- rep("hl.fl", length(x$hl.fl))
+  x <- c(popp(x, "hl.fl"), x$hl.fl)
+  # additional parameters
+  x <- c(x, list(...))
+  x$hl <- 'true'
+  # check that args are in acceptable set
+  if (!all(names(x) %in% reps)) stop("some keys not in acceptable set for highlight")
+  return(x)
+}
+
+check_args_mlt <- function(x, reps, ...) {
+  if (deparse(substitute(x)) == "params") {
+    if (is.null(x$wt)) x$wt <- "json"
+    check_wt(x$wt)
+  }
+  fl_str <- paste0(x$fl, collapse = ",")
+  if (any(grepl('id', x$fl))) {
+    x$fl <- fl_str
+  } else {
+    x$fl <- sprintf('id,%s', fl_str)
+  }
+  # additional parameters
+  x <- c(x, list(...))
+  x$mlt <- 'true'
+  # check that args are in acceptable set
+  if (!all(names(x) %in% reps)) stop("some keys not in acceptable set for mlt")
+  return(x)
+}
+
+check_args_group <- function(x, reps, ...) {
+  if (deparse(substitute(x)) == "params") {
+    if (is.null(x$wt)) x$wt <- "json"
+    check_wt(x$wt)
+  }
+  if (!is.null(x$fl)) x$fl <- paste0(x$fl, collapse = ",")
+  # args that can be repeated
+  x <- collectargs(reps, x)
+  # additional parameters
+  x <- c(x, list(...))
+  x$group <- 'true'
+  return(x)
+}
diff --git a/R/collection_addreplica.R b/R/collection_addreplica.R
index 1a6cac1..8829e6c 100644
--- a/R/collection_addreplica.R
+++ b/R/collection_addreplica.R
@@ -4,7 +4,7 @@
 #' specified if the replica is to be created in a specific node
 #'
 #' @export
-#' @param name (character) The name of the collection. Required
+#' @inheritParams collection_create
 #' @param shard (character) The name of the shard to which replica is to be added.
 #' If \code{shard} is not given, then \code{route} must be.
 #' @param route (character) If the exact shard name is not known, users may pass
@@ -15,46 +15,36 @@
 #' @param dataDir	(character)	The directory in which the core should be created
 #' @param async	(character) Request ID to track this action which will be processed
 #' asynchronously
-#' @param raw (logical) If \code{TRUE}, returns raw data
-#' @param callopts curl options passed on to \code{\link[httr]{GET}}
 #' @param ... You can pass in parameters like \code{property.name=value}	to set
 #' core property name to value. See the section Defining core.properties for details on
 #' supported properties and values.
 #' (https://cwiki.apache.org/confluence/display/solr/Defining+core.properties)
 #' @examples \dontrun{
-#' solr_connect()
+#' (conn <- SolrClient$new())
 #'
 #' # create collection
-#' if (!collection_exists("foobar")) {
-#'   collection_create(name = "foobar", numShards = 2) # bin/solr create -c foobar
+#' if (!conn$collection_exists("foobar")) {
+#'   conn$collection_create(name = "foobar", numShards = 2)
+#'   # OR bin/solr create -c foobar
 #' }
 #'
 #' # status
-#' collection_clusterstatus()$cluster$collections$foobar
+#' conn$collection_clusterstatus()$cluster$collections$foobar
 #'
 #' # add replica
-#' if (!collection_exists("foobar")) {
-#'   collection_addreplica(name = "foobar", shard = "shard1")
+#' if (!conn$collection_exists("foobar")) {
+#'   conn$collection_addreplica(name = "foobar", shard = "shard1")
 #' }
 #'
 #' # status again
-#' collection_clusterstatus()$cluster$collections$foobar
-#' collection_clusterstatus()$cluster$collections$foobar$shards
-#' collection_clusterstatus()$cluster$collections$foobar$shards$shard1
+#' conn$collection_clusterstatus()$cluster$collections$foobar
+#' conn$collection_clusterstatus()$cluster$collections$foobar$shards
+#' conn$collection_clusterstatus()$cluster$collections$foobar$shards$shard1
 #' }
-collection_addreplica <- function(name, shard = NULL, route = NULL, node = NULL,
+collection_addreplica <- function(conn, name, shard = NULL, route = NULL, node = NULL,
                               instanceDir = NULL, dataDir = NULL, async = NULL,
                               raw = FALSE, callopts=list(), ...) {
 
-  conn <- solr_settings()
-  check_conn(conn)
-  args <- sc(list(action = 'ADDREPLICA', collection = name, shard = shard, route = route,
-                  node = node, instanceDir = instanceDir, dataDir = dataDir,
-                  async = async, wt = 'json'))
-  res <- solr_GET(file.path(conn$url, 'solr/admin/collections'), args, callopts, conn$proxy, ...)
-  if (raw) {
-    return(res)
-  } else {
-    jsonlite::fromJSON(res)
-  }
+  conn$collection_addreplica(name, shard, route, node, instanceDir, dataDir,
+                             async, raw, callopts, ...)
 }
diff --git a/R/collection_addreplicaprop.R b/R/collection_addreplicaprop.R
index 18e316b..b5616b9 100644
--- a/R/collection_addreplicaprop.R
+++ b/R/collection_addreplicaprop.R
@@ -5,48 +5,42 @@
 #' with the new value.
 #'
 #' @export
-#' @param name (character) Required. The name of the collection this replica belongs to.
-#' @param shard (character) Required. The name of the shard the replica belongs to.
+#' @inheritParams collection_create
+#' @param shard (character) Required. The name of the shard the replica
+#' belongs to
 #' @param replica (character) Required. The replica, e.g. core_node1.
-#' @param property (character) Required. The property to add. Note: this will have the
-#' literal 'property.' prepended to distinguish it from system-maintained properties.
-#' So these two forms are equivalent: \code{property=special} and
-#' \code{property=property.special}
-#' @param property.value (character) Required. The value to assign to the property.
-#' @param shardUnique (logical) If \code{TRUE}, then setting this property in one
-#' replica will (1) remove the property from all other replicas in that shard.
-#' Default: \code{FALSE}
-#' @param raw (logical) If \code{TRUE}, returns raw data
-#' @param callopts curl options passed on to \code{\link[httr]{GET}}
+#' @param property (character) Required. The property to add. Note: this will
+#' have the literal 'property.' prepended to distinguish it from
+#' system-maintained properties. So these two forms are equivalent:
+#' `property=special` and `property=property.special`
+#' @param property.value (character) Required. The value to assign to
+#' the property
+#' @param shardUnique (logical) If `TRUE`, then setting this property in one
+#' replica will (1) remove the property from all other replicas in that shard
+#' Default: `FALSE`
 #' @examples \dontrun{
-#' solr_connect()
+#' (conn <- SolrClient$new())
 #'
 #' # create collection
-#' collection_create(name = "addrep", numShards = 2) # bin/solr create -c addrep
+#' if (!conn$collection_exists("addrep")) {
+#'   conn$collection_create(name = "addrep", numShards = 1)
+#'   # OR bin/solr create -c addrep
+#' }
 #'
 #' # status
-#' collection_clusterstatus()$cluster$collections$addrep$shards
+#' conn$collection_clusterstatus()$cluster$collections$addrep$shards
 #'
 #' # add the value world to the property hello
-#' collection_addreplicaprop(name = "addrep", shard = "shard1", replica = "core_node1",
-#'    property = "hello", property.value = "world")
+#' conn$collection_addreplicaprop(name = "addrep", shard = "shard1",
+#'   replica = "core_node1", property = "hello", property.value = "world")
 #'
 #' # check status
-#' collection_clusterstatus()$cluster$collections$addrep$shards
-#' collection_clusterstatus()$cluster$collections$addrep$shards$shard1$replicas$core_node1
+#' conn$collection_clusterstatus()$cluster$collections$addrep$shards
+#' conn$collection_clusterstatus()$cluster$collections$addrep$shards$shard1$replicas$core_node1
 #' }
-collection_addreplicaprop <- function(name, shard, replica, property, property.value,
-                                      shardUnique = FALSE, raw = FALSE, callopts=list()) {
-  conn <- solr_settings()
-  check_conn(conn)
-  args <- sc(list(action = 'ADDREPLICAPROP', collection = name, shard = shard,
-                  replica = replica, property = property,
-                  property.value = property.value,
-                  shardUnique = asl(shardUnique), wt = 'json'))
-  res <- solr_GET(file.path(conn$url, 'solr/admin/collections'), args, callopts, conn$proxy)
-  if (raw) {
-    return(res)
-  } else {
-    jsonlite::fromJSON(res)
-  }
+collection_addreplicaprop <- function(conn, name, shard, replica, property,
+  property.value, shardUnique = FALSE, raw = FALSE, callopts=list()) {
+
+  conn$collection_addreplicaprop(name, shard, replica, property,
+                                 property.value, shardUnique, raw, callopts)
 }
diff --git a/R/collection_addrole.R b/R/collection_addrole.R
index 2f21b6d..fc77b24 100644
--- a/R/collection_addrole.R
+++ b/R/collection_addrole.R
@@ -9,27 +9,20 @@
 #' are up and running
 #'
 #' @export
+#' @param conn A solrium connection object, see [SolrClient]
 #' @param role (character) Required. The name of the role. The only supported role
 #' as of now is overseer (set as default).
 #' @param node (character) Required. The name of the node. It is possible to assign a
 #' role even before that node is started.
-#' @param raw (logical) If \code{TRUE}, returns raw data
-#' @param ... curl options passed on to \code{\link[httr]{GET}}
+#' @param raw (logical) If `TRUE`, returns raw data
+#' @param ... curl options passed on to [crul::HttpClient]
 #' @examples \dontrun{
-#' solr_connect()
+#' (conn <- SolrClient$new())
 #'
 #' # get list of nodes
-#' nodes <- collection_clusterstatus()$cluster$live_nodes
-#' collection_addrole(node = nodes[1])
+#' nodes <- conn$collection_clusterstatus()$cluster$live_nodes
+#' collection_addrole(conn, node = nodes[1])
 #' }
-collection_addrole <- function(role = "overseer", node, raw = FALSE, ...) {
-  conn <- solr_settings()
-  check_conn(conn)
-  args <- sc(list(action = 'ADDROLE', role = role, node = node, wt = 'json'))
-  res <- solr_GET(file.path(conn$url, 'solr/admin/collections'), args, conn$proxy, ...)
-  if (raw) {
-    return(res)
-  } else {
-    jsonlite::fromJSON(res)
-  }
+collection_addrole <- function(conn, role = "overseer", node, raw = FALSE, ...) {
+  conn$collection_addrole(role, node, raw, ...)
 }
diff --git a/R/collection_balanceshardunique.R b/R/collection_balanceshardunique.R
index da44e22..d45cc41 100644
--- a/R/collection_balanceshardunique.R
+++ b/R/collection_balanceshardunique.R
@@ -1,46 +1,39 @@
 #' @title Balance a property
 #'
-#' @description Insures that a particular property is distributed evenly amongst the
-#' physical nodes that make up a collection. If the property already exists on a replica,
-#' every effort is made to leave it there. If the property is not on any replica on a
-#' shard one is chosen and the property is added.
+#' @description Insures that a particular property is distributed evenly
+#' amongst the physical nodes that make up a collection. If the property
+#' already exists on a replica, every effort is made to leave it there. If the
+#' property is not on any replica on a shard one is chosen and the property
+#' is added.
 #'
 #' @export
-#' @param name (character) Required. The name of the collection to balance the property in
-#' @param property (character) Required. The property to balance. The literal "property."
-#' is prepended to this property if not specified explicitly.
-#' @param onlyactivenodes (logical) Normally, the property is instantiated on active
-#' nodes only. If \code{FALSE}, then inactive nodes are also included for distribution.
-#' Default: \code{TRUE}
-#' @param shardUnique (logical) Something of a safety valve. There is one pre-defined
-#' property (preferredLeader) that defaults this value to \code{TRUE}. For all other
-#' properties that are balanced, this must be set to \code{TRUE} or an error message is
-#' returned
-#' @param raw (logical) If \code{TRUE}, returns raw data
-#' @param ... curl options passed on to \code{\link[httr]{GET}}
+#' @inheritParams collection_create
+#' @param property (character) Required. The property to balance. The literal
+#' "property." is prepended to this property if not specified explicitly.
+#' @param onlyactivenodes (logical) Normally, the property is instantiated
+#' on active nodes only. If `FALSE`, then inactive nodes are also included
+#' for distribution. Default: `TRUE`
+#' @param shardUnique (logical) Something of a safety valve. There is one
+#' pre-defined property (preferredLeader) that defaults this value to `TRUE`.
+#' For all other properties that are balanced, this must be set to `TRUE` or
+#' an error message is returned
 #' @examples \dontrun{
-#' solr_connect()
+#' (conn <- SolrClient$new())
 #'
 #' # create collection
-#' collection_create(name = "mycollection") # bin/solr create -c mycollection
+#' if (!conn$collection_exists("addrep")) {
+#'   conn$collection_create(name = "mycollection")
+#'   # OR: bin/solr create -c mycollection
+#' }
 #'
 #' # balance preferredLeader property
-#' collection_balanceshardunique("mycollection", property = "preferredLeader")
+#' conn$collection_balanceshardunique("mycollection", property = "preferredLeader")
 #'
 #' # examine cluster status
-#' collection_clusterstatus()$cluster$collections$mycollection
+#' conn$collection_clusterstatus()$cluster$collections$mycollection
 #' }
-collection_balanceshardunique <- function(name, property, onlyactivenodes = TRUE,
+collection_balanceshardunique <- function(conn, name, property, onlyactivenodes = TRUE,
                                           shardUnique = NULL, raw = FALSE, ...) {
-  conn <- solr_settings()
-  check_conn(conn)
-  args <- sc(list(action = 'BALANCESHARDUNIQUE', collection = name, property = property,
-                  onlyactivenodes = asl(onlyactivenodes), shardUnique = asl(shardUnique),
-                  wt = 'json'))
-  res <- solr_GET(file.path(conn$url, 'solr/admin/collections'), args, conn$proxy, ...)
-  if (raw) {
-    return(res)
-  } else {
-    jsonlite::fromJSON(res)
-  }
+  conn$collection_balanceshardunique(name, property, onlyactivenodes,
+                                     shardUnique, raw, ...)
 }
diff --git a/R/collection_clusterprop.R b/R/collection_clusterprop.R
index c2753a1..da3cc7f 100644
--- a/R/collection_clusterprop.R
+++ b/R/collection_clusterprop.R
@@ -1,40 +1,32 @@
 #' @title Add, edit, delete a cluster-wide property
 #'
-#' @description Important: whether add, edit, or delete is used is determined by
-#' the value passed to the \code{val} parameter. If the property name is
-#' new, it will be added. If the property name exists, and the value is different,
-#' it will be edited. If the property name exists, and the value is NULL or empty
-#' the property is deleted (unset).
+#' @description Important: whether add, edit, or delete is used is determined
+#' by the value passed to the \code{val} parameter. If the property name is
+#' new, it will be added. If the property name exists, and the value is
+#' different, it will be edited. If the property name exists, and the value
+#' is `NULL` or empty the property is deleted (unset).
 #'
 #' @export
-#' @param name (character) Required. The name of the property. The two supported
-#' properties names are urlScheme and autoAddReplicas. Other names are rejected
-#' with an error
+#' @param conn A solrium connection object, see [SolrClient]
+#' @param name (character) Name of the core or collection
 #' @param val (character) Required. The value of the property. If the value is
 #' empty or null, the property is unset.
-#' @param raw (logical) If \code{TRUE}, returns raw data
-#' @param callopts curl options passed on to \code{\link[httr]{GET}}
+#' @param raw (logical) If \code{TRUE}, returns raw data in format specified by
+#' \code{wt} param
+#' @param callopts curl options passed on to [crul::HttpClient]
 #' @examples \dontrun{
-#' solr_connect()
+#' (conn <- SolrClient$new())
 #'
 #' # add the value https to the property urlScheme
-#' collection_clusterprop(name = "urlScheme", val = "https")
+#' collection_clusterprop(conn, name = "urlScheme", val = "https")
 #'
 #' # status again
-#' collection_clusterstatus()$cluster$properties
+#' collection_clusterstatus(conn)$cluster$properties
 #'
 #' # delete the property urlScheme by setting val to NULL or a 0 length string
-#' collection_clusterprop(name = "urlScheme", val = "")
+#' collection_clusterprop(conn, name = "urlScheme", val = "")
 #' }
-collection_clusterprop <- function(name, val, raw = FALSE, callopts=list()) {
-  conn <- solr_settings()
-  check_conn(conn)
-  val <- if (is.null(val)) "" else val
-  args <- sc(list(action = 'CLUSTERPROP', name = name, val = val, wt = 'json'))
-  res <- solr_GET(file.path(conn$url, 'solr/admin/collections'), args, callopts, conn$proxy)
-  if (raw) {
-    return(res)
-  } else {
-    jsonlite::fromJSON(res)
-  }
+collection_clusterprop <- function(conn, name, val, raw = FALSE,
+                                   callopts=list()) {
+  conn$collection_clusterprop(name, val, raw, callopts)
 }
diff --git a/R/collection_clusterstatus.R b/R/collection_clusterstatus.R
index a5ce9da..d654654 100644
--- a/R/collection_clusterstatus.R
+++ b/R/collection_clusterstatus.R
@@ -1,36 +1,26 @@
 #' @title Get cluster status
 #'
-#' @description Fetch the cluster status including collections, shards, replicas,
-#' configuration name as well as collection aliases and cluster properties.
+#' @description Fetch the cluster status including collections, shards, 
+#' replicas, configuration name as well as collection aliases and cluster 
+#' properties.
 #'
 #' @export
-#' @param name (character) The collection name for which information is requested.
-#' If omitted, information on all collections in the cluster will be returned.
-#' @param shard (character) The shard(s) for which information is requested. Multiple
-#' shard names can be specified as a character vector.
-#' @param raw (logical) If \code{TRUE}, returns raw data
-#' @param ... curl options passed on to \code{\link[httr]{GET}}
+#' @inheritParams collection_create
+#' @param shard (character) The shard(s) for which information is requested. 
+#' Multiple shard names can be specified as a character vector.
 #' @examples \dontrun{
-#' solr_connect()
-#' collection_clusterstatus()
-#' res <- collection_clusterstatus()
+#' (conn <- SolrClient$new())
+#' conn$collection_clusterstatus()
+#' res <- conn$collection_clusterstatus()
 #' res$responseHeader
 #' res$cluster
 #' res$cluster$collections
 #' res$cluster$collections$gettingstarted
 #' res$cluster$live_nodes
 #' }
-collection_clusterstatus <- function(name = NULL, shard = NULL, raw = FALSE, ...) {
-  conn <- solr_settings()
-  check_conn(conn)
-  shard <- check_shard(shard)
-  args <- sc(list(action = 'CLUSTERSTATUS', collection = name, shard = shard, wt = 'json'))
-  res <- solr_GET(file.path(conn$url, 'solr/admin/collections'), args, conn$proxy, ...)
-  if (raw) {
-    return(res)
-  } else {
-    jsonlite::fromJSON(res)
-  }
+collection_clusterstatus <- function(conn, name = NULL, shard = NULL, 
+                                     raw = FALSE, ...) {
+  conn$collection_clusterstatus(name, shard, raw, ...)
 }
 
 check_shard <- function(x) {
diff --git a/R/collection_create.R b/R/collection_create.R
index 73f9a92..b69d321 100644
--- a/R/collection_create.R
+++ b/R/collection_create.R
@@ -1,7 +1,8 @@
 #' Add a collection
 #'
 #' @export
-#' @param name The name of the collection to be created. Required
+#' @param conn A solrium connection object, see [SolrClient]
+#' @param name (character) The name of the core to be created. Required
 #' @param numShards (integer) The number of shards to be created as part of the
 #' collection. This is a required parameter when using the 'compositeId' router.
 #' @param maxShardsPerNode (integer) When creating collections, the shards and/or replicas
@@ -14,18 +15,18 @@
 #' @param createNodeSet (logical) Allows defining the nodes to spread the new collection
 #' across. If not provided, the CREATE operation will create shard-replica spread across all
 #' live Solr nodes. The format is a comma-separated list of node_names, such as
-#' localhost:8983_solr, localhost:8984_solr, localhost:8985_solr. Default: \code{NULL}
+#' localhost:8983_solr, localhost:8984_solr, localhost:8985_solr. Default: `NULL`
 #' @param collection.configName (character) Defines the name of the configurations (which
 #' must already be stored in ZooKeeper) to use for this collection. If not provided, Solr
-#' will default to the collection name as the configuration name. Default: \code{compositeId}
+#' will default to the collection name as the configuration name. Default: `compositeId`
 #' @param replicationFactor (integer) The number of replicas to be created for each shard.
 #' Default: 1
 #' @param router.name (character) The router name that will be used. The router defines
-#' how documents will be distributed among the shards. The value can be either \code{implicit},
-#' which uses an internal default hash, or \code{compositeId}, which allows defining the specific
+#' how documents will be distributed among the shards. The value can be either `implicit`,
+#' which uses an internal default hash, or `compositeId`, which allows defining the specific
 #' shard to assign documents to. When using the 'implicit' router, the shards parameter is
 #' required. When using the 'compositeId' router, the numShards parameter is required.
-#' For more information, see also the section Document Routing. Default: \code{compositeId}
+#' For more information, see also the section Document Routing. Default: `compositeId`
 #' @param shards (character) A comma separated list of shard names, e.g.,
 #' shard-x,shard-y,shard-z . This is a required parameter when using the 'implicit' router.
 #' @param createNodeSet.shuffle	(logical)	Controls wether or not the shard-replicas created
@@ -34,7 +35,7 @@
 #' replicas.  A 'false' value makes the results of a collection creation predictible and
 #' gives more exact control over the location of the individual shard-replicas, but 'true'
 #' can be a better choice for ensuring replicas are distributed evenly across nodes. Ignored
-#' if createNodeSet is not also specified. Default: \code{TRUE}
+#' if createNodeSet is not also specified. Default: `TRUE`
 #' @param router.field (character) If this field is specified, the router will look at the
 #' value of the field in an input document to compute the hash and identify a shard instead of
 #' looking at the uniqueKey field. If the field specified is null in the document, the document
@@ -42,57 +43,38 @@
 #' parameter _route_ (or shard.keys) to avoid a distributed search.
 #' @param autoAddReplicas	(logical)	When set to true, enables auto addition of replicas on
 #' shared file systems. See the section autoAddReplicas Settings for more details on settings
-#' and overrides. Default: \code{FALSE}
+#' and overrides. Default: `FALSE`
 #' @param async	(character) Request ID to track this action which will be processed
 #' asynchronously
-#' @param raw (logical) If \code{TRUE}, returns raw data
-#' @param callopts curl options passed on to \code{\link[httr]{GET}}
-#' @param ... You can pass in parameters like \code{property.name=value}	to set
-#' core property name to value. See the section Defining core.properties for details on
-#' supported properties and values.
-#' (https://cwiki.apache.org/confluence/display/solr/Defining+core.properties)
+#' @param raw (logical) If `TRUE`, returns raw data
+#' @param callopts curl options passed on to [crul::HttpClient]
+#' @param ... You can pass in parameters like `property.name=value`	to set
+#' core property name to value. See the section Defining core.properties for
+#' details on supported properties and values.
+#' (https://lucene.apache.org/solr/guide/7_0/defining-core-properties.html)
 #' @examples \dontrun{
-#' solr_connect()
-#' 
-#' if (!collection_exists("foobar")) {
-#'   collection_delete(name = "helloWorld")
-#'   collection_create(name = "helloWorld", numShards = 2)
+#' # connect
+#' (cli <- SolrClient$new())
+#'
+#' if (!cli$collection_exists("helloWorld")) {
+#'   cli$collection_create(name = "helloWorld")
 #' }
-#' if (!collection_exists("foobar")) {
-#'   collection_delete(name = "tablesChairs")
-#'   collection_create(name = "tablesChairs")
+#' if (!cli$collection_exists("tablesChairs")) {
+#'   cli$collection_create(name = "tablesChairs")
 #' }
-#' 
-#' # you may have to do this if you don't want to use 
-#' # bin/solr or use zookeeper directly
-#' path <- "~/solr-5.4.1/server/solr/newcore/conf"
-#' dir.create(path, recursive = TRUE)
-#' files <- list.files("~/solr-5.4.1/server/solr/configsets/data_driven_schema_configs/conf/",
-#' full.names = TRUE)
-#' invisible(file.copy(files, path, recursive = TRUE))
-#' collection_create(name = "newcore", collection.configName = "newcore")
 #' }
-collection_create <- function(name, numShards = 2, maxShardsPerNode = 1,
+collection_create <- function(conn, name, numShards = 1, maxShardsPerNode = 1,
                        createNodeSet = NULL, collection.configName = NULL,
                        replicationFactor = 1, router.name = NULL, shards = NULL,
                        createNodeSet.shuffle = TRUE, router.field = NULL,
                        autoAddReplicas = FALSE, async = NULL,
                        raw = FALSE, callopts=list(), ...) {
 
-  conn <- solr_settings()
-  check_conn(conn)
-  args <- sc(list(action = 'CREATE', name = name, numShards = numShards,
-                  replicationFactor = replicationFactor,
-                  maxShardsPerNode = maxShardsPerNode, createNodeSet = createNodeSet,
-                  collection.configName = collection.configName,
-                  router.name = router.name, shards = shards,
-                  createNodeSet.shuffle = asl(createNodeSet.shuffle),
-                  router.field = router.field, autoAddReplicas = asl(autoAddReplicas),
-                  async = async, wt = 'json'))
-  res <- solr_GET(file.path(conn$url, 'solr/admin/collections'), args, callopts, conn$proxy, ...)
-  if (raw) {
-    return(res)
-  } else {
-    jsonlite::fromJSON(res)
-  }
+  conn$collection_create(
+    name, numShards = numShards, maxShardsPerNode = maxShardsPerNode,
+    createNodeSet = createNodeSet, collection.configName = collection.configName,
+    replicationFactor = replicationFactor, router.name = router.name,
+    shards = shards, createNodeSet.shuffle = createNodeSet.shuffle,
+    router.field = router.field, autoAddReplicas = autoAddReplicas, async = async,
+    raw = raw, callopts = callopts, ...)
 }
diff --git a/R/collection_createalias.R b/R/collection_createalias.R
index 44174c6..e0d7d2f 100644
--- a/R/collection_createalias.R
+++ b/R/collection_createalias.R
@@ -5,25 +5,22 @@
 #' alias, effectively acting like an atomic "MOVE" command.
 #'
 #' @export
+#' @param conn A solrium connection object, see [SolrClient]
 #' @param alias (character) Required. The alias name to be created
-#' @param collections (character) Required. A character vector of collections to be aliased
+#' @param collections (character) Required. A character vector of collections
+#' to be aliased
 #' @param raw (logical) If \code{TRUE}, returns raw data
-#' @param ... curl options passed on to \code{\link[httr]{GET}}
+#' @param callopts curl options passed on to \code{\link[crul]{HttpClient}}
 #' @examples \dontrun{
-#' solr_connect()
-#' collection_create(name = "thingsstuff", numShards = 2)
-#' collection_createalias("tstuff", "thingsstuff")
-#' collection_clusterstatus()$cluster$collections$thingsstuff$aliases # new alias
+#' (conn <- SolrClient$new())
+#'
+#' if (!conn$collection_exists("thingsstuff")) {
+#'   conn$collection_create(name = "thingsstuff")
+#' }
+#'
+#' conn$collection_createalias("tstuff", "thingsstuff")
+#' conn$collection_clusterstatus()$cluster$collections$thingsstuff$aliases
 #' }
-collection_createalias <- function(alias, collections, raw = FALSE, ...) {
-  conn <- solr_settings()
-  check_conn(conn)
-  collections <- check_shard(collections)
-  args <- sc(list(action = 'CREATEALIAS', name = alias, collections = collections, wt = 'json'))
-  res <- solr_GET(file.path(conn$url, 'solr/admin/collections'), args, conn$proxy, ...)
-  if (raw) {
-    return(res)
-  } else {
-    jsonlite::fromJSON(res)
-  }
+collection_createalias <- function(conn, alias, collections, raw = FALSE, callopts = list()) {
+  conn$collection_createalias(alias, collections, raw, callopts)
 }
diff --git a/R/collection_createshard.R b/R/collection_createshard.R
index 7e0c084..ca40884 100644
--- a/R/collection_createshard.R
+++ b/R/collection_createshard.R
@@ -1,30 +1,20 @@
 #' Create a shard
 #'
 #' @export
-#' @param name (character) Required. The name of the collection that includes the shard
-#' that will be splitted.
+#' @inheritParams collection_create
 #' @param shard (character) Required. The name of the shard to be created.
 #' @param createNodeSet (character) Allows defining the nodes to spread the new
-#' collection across. If not provided, the CREATE operation will create shard-replica
-#' spread across all live Solr nodes. The format is a comma-separated list of
-#' node_names, such as localhost:8983_solr, localhost:8984_s olr, localhost:8985_solr.
-#' @param raw (logical) If \code{TRUE}, returns raw data
-#' @param ... curl options passed on to \code{\link[httr]{GET}}
+#' collection across. If not provided, the CREATE operation will create 
+#' shard-replica spread across all live Solr nodes. The format is a 
+#' comma-separated list of node_names, such as localhost:8983_solr, 
+#' localhost:8984_s olr, localhost:8985_solr.
 #' @examples \dontrun{
-#' solr_connect()
+#' (conn <- SolrClient$new())
 #' ## FIXME - doesn't work right now
-#' # collection_create(name = "trees")
-#' # collection_createshard(name = "trees", shard = "newshard")
+#' # conn$collection_create(name = "trees")
+#' # conn$collection_createshard(name = "trees", shard = "newshard")
 #' }
-collection_createshard <- function(name, shard, createNodeSet = NULL, raw = FALSE, ...) {
-  conn <- solr_settings()
-  check_conn(conn)
-  args <- sc(list(action = 'CREATESHARD', collection = name, shard = shard,
-                  createNodeSet = createNodeSet, wt = 'json'))
-  res <- solr_GET(file.path(conn$url, 'solr/admin/collections'), args, conn$proxy, ...)
-  if (raw) {
-    return(res)
-  } else {
-    jsonlite::fromJSON(res)
-  }
+collection_createshard <- function(conn, name, shard, createNodeSet = NULL, 
+                                   raw = FALSE, ...) {
+  conn$collection_createshard(name, shard, createNodeSet, raw, ...)
 }
diff --git a/R/collection_delete.R b/R/collection_delete.R
index de773d3..22f1f11 100644
--- a/R/collection_delete.R
+++ b/R/collection_delete.R
@@ -1,22 +1,19 @@
 #' Add a collection
 #'
 #' @export
-#' @param name The name of the collection to be created. Required
+#' @param conn A solrium connection object, see [SolrClient]
+#' @param name (character) The name of the core to be created. Required
 #' @param raw (logical) If \code{TRUE}, returns raw data
-#' @param ... curl options passed on to \code{\link[httr]{GET}}
+#' @param callopts curl options passed on to \code{\link[crul]{HttpClient}}
 #' @examples \dontrun{
-#' solr_connect()
-#' collection_create(name = "helloWorld")
-#' collection_delete(name = "helloWorld")
+#' (conn <- SolrClient$new())
+#'
+#' if (!conn$collection_exists("helloWorld")) {
+#'   conn$collection_create(name = "helloWorld")
+#' }
+#'
+#' collection_delete(conn, name = "helloWorld")
 #' }
-collection_delete <- function(name, raw = FALSE, ...) {
-  conn <- solr_settings()
-  check_conn(conn)
-  args <- sc(list(action = 'DELETE', name = name, wt = 'json'))
-  res <- solr_GET(file.path(conn$url, 'solr/admin/collections'), args, conn$proxy, ...)
-  if (raw) {
-    return(res)
-  } else {
-    jsonlite::fromJSON(res)
-  }
+collection_delete <- function(conn, name, raw = FALSE, callopts = list()) {
+  conn$collection_delete(name, raw, callopts)
 }
diff --git a/R/collection_deletealias.R b/R/collection_deletealias.R
index 8d8f399..fcafaa9 100644
--- a/R/collection_deletealias.R
+++ b/R/collection_deletealias.R
@@ -1,25 +1,22 @@
 #' Delete a collection alias
 #'
 #' @export
+#' @param conn A solrium connection object, see [SolrClient]
 #' @param alias (character) Required. The alias name to be created
-#' @param raw (logical) If \code{TRUE}, returns raw data
-#' @param ... curl options passed on to \code{\link[httr]{GET}}
+#' @param raw (logical) If `TRUE`, returns raw data
+#' @param callopts curl options passed on to [crul::HttpClient]
 #' @examples \dontrun{
-#' solr_connect()
-#' collection_create(name = "thingsstuff", numShards = 2)
-#' collection_createalias("tstuff", "thingsstuff")
-#' collection_clusterstatus()$cluster$collections$thingsstuff$aliases # new alias
-#' collection_deletealias("tstuff")
-#' collection_clusterstatus()$cluster$collections$thingsstuff$aliases # gone
+#' (conn <- SolrClient$new())
+#'
+#' if (!conn$collection_exists("thingsstuff")) {
+#'   conn$collection_create(name = "thingsstuff")
+#' }
+#'
+#' conn$collection_createalias("tstuff", "thingsstuff")
+#' conn$collection_clusterstatus()$cluster$collections$thingsstuff$aliases # new alias
+#' conn$collection_deletealias("tstuff")
+#' conn$collection_clusterstatus()$cluster$collections$thingsstuff$aliases # gone
 #' }
-collection_deletealias <- function(alias, raw = FALSE, ...) {
-  conn <- solr_settings()
-  check_conn(conn)
-  args <- sc(list(action = 'DELETEALIAS', name = alias, wt = 'json'))
-  res <- solr_GET(file.path(conn$url, 'solr/admin/collections'), args, conn$proxy, ...)
-  if (raw) {
-    return(res)
-  } else {
-    jsonlite::fromJSON(res)
-  }
+collection_deletealias <- function(conn, alias, raw = FALSE, callopts = list()) {
+  conn$collection_deletealias(alias, raw, callopts)
 }
diff --git a/R/collection_deletereplica.R b/R/collection_deletereplica.R
index aedf253..0671a4b 100644
--- a/R/collection_deletereplica.R
+++ b/R/collection_deletereplica.R
@@ -7,49 +7,45 @@
 #' unregistered.
 #'
 #' @export
+#' @param conn A solrium connection object, see [SolrClient]
 #' @param name (character) Required. The name of the collection.
 #' @param shard (character) Required. The name of the shard that includes the replica to
 #' be removed.
 #' @param replica (character) Required. The name of the replica to remove.
-#' @param onlyIfDown (logical) When \code{TRUE} will not take any action if the replica
-#' is active. Default: \code{FALSE}
-#' @param raw (logical) If \code{TRUE}, returns raw data
-#' @param callopts curl options passed on to \code{\link[httr]{GET}}
+#' @param onlyIfDown (logical) When `TRUE` will not take any action if the replica
+#' is active. Default: `FALSE`
+#' @param raw (logical) If `TRUE`, returns raw data
+#' @param callopts curl options passed on to [crul::HttpClient]
 #' @param ... You can pass in parameters like \code{property.name=value}	to set
 #' core property name to value. See the section Defining core.properties for details on
 #' supported properties and values.
 #' (https://cwiki.apache.org/confluence/display/solr/Defining+core.properties)
 #' @examples \dontrun{
-#' solr_connect()
+#' (conn <- SolrClient$new())
 #'
 #' # create collection
-#' collection_create(name = "foobar2", numShards = 2) # bin/solr create -c foobar2
+#' if (!conn$collection_exists("foobar2")) {
+#'   conn$collection_create(name = "foobar2", maxShardsPerNode = 2)
+#' }
 #'
 #' # status
-#' collection_clusterstatus()$cluster$collections$foobar2$shards$shard1
+#' conn$collection_clusterstatus()$cluster$collections$foobar2$shards$shard1
 #'
 #' # add replica
-#' collection_addreplica(name = "foobar2", shard = "shard1")
+#' conn$collection_addreplica(name = "foobar2", shard = "shard1")
 #'
 #' # delete replica
 #' ## get replica name
-#' nms <- names(collection_clusterstatus()$cluster$collections$foobar2$shards$shard1$replicas)
-#' collection_deletereplica(name = "foobar2", shard = "shard1", replica = nms[1])
+#' nms <- names(conn$collection_clusterstatus()$cluster$collections$foobar2$shards$shard1$replicas)
+#' conn$collection_deletereplica(name = "foobar2", shard = "shard1", replica = nms[1])
 #'
 #' # status again
-#' collection_clusterstatus()$cluster$collections$foobar2$shards$shard1
+#' conn$collection_clusterstatus()$cluster$collections$foobar2$shards$shard1
 #' }
-collection_deletereplica <- function(name, shard = NULL, replica = NULL, onlyIfDown = FALSE,
-                                  raw = FALSE, callopts=list(), ...) {
+collection_deletereplica <- function(conn, name, shard = NULL, replica = NULL,
+                                     onlyIfDown = FALSE, raw = FALSE,
+                                     callopts=list(), ...) {
 
-  conn <- solr_settings()
-  check_conn(conn)
-  args <- sc(list(action = 'DELETEREPLICA', collection = name, shard = shard, replica = replica,
-                  onlyIfDown = asl(onlyIfDown), wt = 'json'))
-  res <- solr_GET(file.path(conn$url, 'solr/admin/collections'), args, callopts, conn$proxy, ...)
-  if (raw) {
-    return(res)
-  } else {
-    jsonlite::fromJSON(res)
-  }
+  conn$collection_deletereplica(name, shard, replica, onlyIfDown, raw,
+                                callopts, ...)
 }
diff --git a/R/collection_deletereplicaprop.R b/R/collection_deletereplicaprop.R
index 315dd3d..8c90dc3 100644
--- a/R/collection_deletereplicaprop.R
+++ b/R/collection_deletereplicaprop.R
@@ -3,49 +3,43 @@
 #' @description Deletes an arbitrary property from a particular replica.
 #'
 #' @export
-#' @param name (character) Required. The name of the collection this replica belongs to.
-#' @param shard (character) Required. The name of the shard the replica belongs to.
+#' @inheritParams collection_create
+#' @param shard (character) Required. The name of the shard the replica
+#' belongs to.
 #' @param replica (character) Required. The replica, e.g. core_node1.
-#' @param property (character) Required. The property to delete. Note: this will have the
-#' literal 'property.' prepended to distinguish it from system-maintained properties.
-#' So these two forms are equivalent: \code{property=special} and
-#' \code{property=property.special}
-#' @param raw (logical) If \code{TRUE}, returns raw data
-#' @param callopts curl options passed on to \code{\link[httr]{GET}}
+#' @param property (character) Required. The property to delete. Note: this
+#' will have the literal 'property.' prepended to distinguish it from
+#' system-maintained properties. So these two forms are equivalent:
+#' `property=special` and  `property=property.special`
 #' @examples \dontrun{
-#' solr_connect()
+#' (conn <- SolrClient$new())
 #'
 #' # create collection
-#' collection_create(name = "deleterep", numShards = 2) # bin/solr create -c deleterep
+#' if (!conn$collection_exists("deleterep")) {
+#'   conn$collection_create(name = "deleterep")
+#'   # OR bin/solr create -c deleterep
+#' }
 #'
 #' # status
-#' collection_clusterstatus()$cluster$collections$deleterep$shards
+#' conn$collection_clusterstatus()$cluster$collections$deleterep$shards
 #'
 #' # add the value bar to the property foo
-#' collection_addreplicaprop(name = "deleterep", shard = "shard1", replica = "core_node1",
-#'    property = "foo", property.value = "bar")
+#' conn$collection_addreplicaprop(name = "deleterep", shard = "shard1",
+#'   replica = "core_node1", property = "foo", property.value = "bar")
 #'
 #' # check status
-#' collection_clusterstatus()$cluster$collections$deleterep$shards
-#' collection_clusterstatus()$cluster$collections$deleterep$shards$shard1$replicas$core_node1
+#' conn$collection_clusterstatus()$cluster$collections$deleterep$shards
+#' conn$collection_clusterstatus()$cluster$collections$deleterep$shards$shard1$replicas$core_node1
 #'
 #' # delete replica property
-#' collection_deletereplicaprop(name = "deleterep", shard = "shard1",
+#' conn$collection_deletereplicaprop(name = "deleterep", shard = "shard1",
 #'    replica = "core_node1", property = "foo")
 #'
 #' # check status - foo should be gone
-#' collection_clusterstatus()$cluster$collections$deleterep$shards$shard1$replicas$core_node1
+#' conn$collection_clusterstatus()$cluster$collections$deleterep$shards$shard1$replicas$core_node1
 #' }
-collection_deletereplicaprop <- function(name, shard, replica, property,
+collection_deletereplicaprop <- function(conn, name, shard, replica, property,
                                          raw = FALSE, callopts=list()) {
-  conn <- solr_settings()
-  check_conn(conn)
-  args <- sc(list(action = 'DELETEREPLICAPROP', collection = name, shard = shard,
-                  replica = replica, property = property, wt = 'json'))
-  res <- solr_GET(file.path(conn$url, 'solr/admin/collections'), args, callopts, conn$proxy)
-  if (raw) {
-    return(res)
-  } else {
-    jsonlite::fromJSON(res)
-  }
+  conn$collection_deletereplicaprop(name, shard, replica, property, raw,
+                                    callopts)
 }
diff --git a/R/collection_deleteshard.R b/R/collection_deleteshard.R
index 78dd2e9..3beb37e 100644
--- a/R/collection_deleteshard.R
+++ b/R/collection_deleteshard.R
@@ -5,34 +5,33 @@
 #' which have no range given for custom sharding.
 #'
 #' @export
+#' @param conn A solrium connection object, see [SolrClient]
 #' @param name (character) Required. The name of the collection that includes the shard
 #' to be deleted
 #' @param shard (character) Required. The name of the shard to be deleted
-#' @param raw (logical) If \code{TRUE}, returns raw data
-#' @param ... curl options passed on to \code{\link[httr]{GET}}
+#' @param raw (logical) If `TRUE`, returns raw data
+#' @param ... curl options passed on to [crul::HttpClient]
 #' @examples \dontrun{
-#' solr_connect()
+#' (conn <- SolrClient$new())
+#'
 #' # create collection
-#' # collection_create(name = "buffalo") # bin/solr create -c buffalo
+#' if (!conn$collection_exists("buffalo")) {
+#'   conn$collection_create(name = "buffalo")
+#'   # OR: bin/solr create -c buffalo
+#' }
 #'
 #' # find shard names
-#' names(collection_clusterstatus()$cluster$collections$buffalo$shards)
+#' names(conn$collection_clusterstatus()$cluster$collections$buffalo$shards)
+#'
 #' # split a shard by name
-#' collection_splitshard(name = "buffalo", shard = "shard1")
+#' collection_splitshard(conn, name = "buffalo", shard = "shard1")
+#'
 #' # now we have three shards
-#' names(collection_clusterstatus()$cluster$collections$buffalo$shards)
+#' names(conn$collection_clusterstatus()$cluster$collections$buffalo$shards)
 #'
 #' # delete shard
-#' collection_deleteshard(name = "buffalo", shard = "shard1_1")
+#' conn$collection_deleteshard(name = "buffalo", shard = "shard1_1")
 #' }
-collection_deleteshard <- function(name, shard, raw = FALSE, ...) {
-  conn <- solr_settings()
-  check_conn(conn)
-  args <- sc(list(action = 'DELETESHARD', collection = name, shard = shard, wt = 'json'))
-  res <- solr_GET(file.path(conn$url, 'solr/admin/collections'), args, conn$proxy, ...)
-  if (raw) {
-    return(res)
-  } else {
-    jsonlite::fromJSON(res)
-  }
+collection_deleteshard <- function(conn, name, shard, raw = FALSE, ...) {
+  conn$collection_deleteshard(name, shard, raw, ...)
 }
diff --git a/R/collection_exists.R b/R/collection_exists.R
index a89bced..f458e92 100644
--- a/R/collection_exists.R
+++ b/R/collection_exists.R
@@ -1,30 +1,23 @@
 #' Check if a collection exists
-#' 
+#'
 #' @export
-#' 
+#' @param conn A solrium connection object, see [SolrClient]
 #' @param name (character) The name of the core. If not given, all cores.
-#' @param ... curl options passed on to \code{\link[httr]{GET}}
-#' @details Simply calls \code{\link{collection_list}} internally
-#' @return A single boolean, \code{TRUE} or \code{FALSE}
+#' @param ... curl options passed on to [crul::HttpClient]
+#' @details Simply calls [collection_list()] internally
+#' @return A single boolean, `TRUE` or `FALSE`
 #' @examples \dontrun{
 #' # start Solr with Cloud mode via the schemaless eg: bin/solr -e cloud
 #' # you can create a new core like: bin/solr create -c corename
 #' # where <corename> is the name for your core - or creaate as below
-#' 
-#' # connect
-#' solr_connect()
-#' 
+#' (conn <- SolrClient$new())
+#'
 #' # exists
-#' collection_exists("gettingstarted")
-#' 
+#' conn$collection_exists("gettingstarted")
+#'
 #' # doesn't exist
-#' collection_exists("hhhhhh")
+#' conn$collection_exists("hhhhhh")
 #' }
-collection_exists <- function(name, ...) {
-  tmp <- suppressMessages(collection_list(...))$collections
-  if (name %in% tmp) {
-    TRUE 
-  } else {
-    FALSE
-  }
+collection_exists <- function(conn, name, ...) {
+  name %in% suppressMessages(conn$collection_list(...))$collections
 }
diff --git a/R/collection_list.R b/R/collection_list.R
index b70c937..c2b2ba3 100644
--- a/R/collection_list.R
+++ b/R/collection_list.R
@@ -1,21 +1,14 @@
 #' List collections
 #'
 #' @export
-#' @param raw (logical) If \code{TRUE}, returns raw data
-#' @param ... curl options passed on to \code{\link[httr]{GET}}
+#' @inheritParams ping
 #' @examples \dontrun{
-#' solr_connect()
-#' collection_list()
-#' collection_list()$collections
+#' (conn <- SolrClient$new())
+#'
+#' conn$collection_list()
+#' conn$collection_list()$collections
+#' collection_list(conn)
 #' }
-collection_list <- function(raw = FALSE, ...) {
-  conn <- solr_settings()
-  check_conn(conn)
-  args <- sc(list(action = 'LIST', wt = 'json'))
-  res <- solr_GET(file.path(conn$url, 'solr/admin/collections'), args, conn$proxy, ...)
-  if (raw) {
-    return(res)
-  } else {
-    jsonlite::fromJSON(res)
-  }
+collection_list <- function(conn, raw = FALSE, ...) {
+  conn$collection_list(raw = raw, ...)
 }
diff --git a/R/collection_migrate.R b/R/collection_migrate.R
index 6ee23c9..cedecf2 100644
--- a/R/collection_migrate.R
+++ b/R/collection_migrate.R
@@ -1,8 +1,7 @@
 #' Migrate documents to another collection
 #'
 #' @export
-#' @param name (character) Required. The name of the source collection from which
-#' documents will be split
+#' @inheritParams collection_create
 #' @param target.collection (character) Required. The name of the target collection
 #' to which documents will be migrated
 #' @param split.key (character) Required. The routing key prefix. For example, if
@@ -12,37 +11,33 @@
 #' target shard. Default: 60
 #' @param async	(character) Request ID to track this action which will be processed
 #' asynchronously
-#' @param raw (logical) If \code{TRUE}, returns raw data
-#' @param ... curl options passed on to \code{\link[httr]{GET}}
 #' @examples \dontrun{
-#' solr_connect()
+#' (conn <- SolrClient$new())
 #'
 #' # create collection
-#' collection_create(name = "migrate_from") # bin/solr create -c migrate_from
+#' if (!conn$collection_exists("migrate_from")) {
+#'   conn$collection_create(name = "migrate_from")
+#'   # OR: bin/solr create -c migrate_from
+#' }
 #'
 #' # create another collection
-#' collection_create(name = "migrate_to") # bin/solr create -c migrate_to
+#' if (!conn$collection_exists("migrate_to")) {
+#'   conn$collection_create(name = "migrate_to")
+#'   # OR bin/solr create -c migrate_to
+#' }
 #'
 #' # add some documents
-#' file <- system.file("examples", "books.csv", package = "solr")
+#' file <- system.file("examples", "books.csv", package = "solrium")
 #' x <- read.csv(file, stringsAsFactors = FALSE)
-#' add(x, "migrate_from")
+#' conn$add(x, "migrate_from")
 #'
 #' # migrate some documents from one collection to the other
 #' ## FIXME - not sure if this is actually working....
-#' collection_migrate("migrate_from", "migrate_to", split.key = "05535")
+#' # conn$collection_migrate("migrate_from", "migrate_to", split.key = "05535")
 #' }
-collection_migrate <- function(name, target.collection, split.key, forward.timeout = NULL,
-                               async = NULL, raw = FALSE, ...) {
-  conn <- solr_settings()
-  check_conn(conn)
-  args <- sc(list(action = 'MIGRATE', collection = name, target.collection = target.collection,
-                  split.key = split.key, forward.timeout = forward.timeout,
-                  async = async, wt = 'json'))
-  res <- solr_GET(file.path(conn$url, 'solr/admin/collections'), args, conn$proxy, ...)
-  if (raw) {
-    return(res)
-  } else {
-    jsonlite::fromJSON(res)
-  }
+collection_migrate <- function(conn, name, target.collection, split.key,
+                               forward.timeout = NULL, async = NULL,
+                               raw = FALSE, callopts = list()) {
+  conn$collection_migrate(name, target.collection, split.key,
+                          forward.timeout, async, raw = FALSE, callopts)
 }
diff --git a/R/collection_overseerstatus.R b/R/collection_overseerstatus.R
index 8d888f6..b74f31c 100644
--- a/R/collection_overseerstatus.R
+++ b/R/collection_overseerstatus.R
@@ -1,15 +1,15 @@
 #' @title Get overseer status
 #'
-#' @description Returns the current status of the overseer, performance statistics
-#' of various overseer APIs as well as last 10 failures per operation type.
+#' @description Returns the current status of the overseer, performance 
+#' statistics of various overseer APIs as well as last 10 failures per 
+#' operation type.
 #'
 #' @export
-#' @param raw (logical) If \code{TRUE}, returns raw data
-#' @param ... curl options passed on to \code{\link[httr]{GET}}
+#' @inheritParams collection_create
 #' @examples \dontrun{
-#' solr_connect()
-#' collection_overseerstatus()
-#' res <- collection_overseerstatus()
+#' (conn <- SolrClient$new())
+#' conn$collection_overseerstatus()
+#' res <- conn$collection_overseerstatus()
 #' res$responseHeader
 #' res$leader
 #' res$overseer_queue_size
@@ -20,14 +20,6 @@
 #' res$overseer_internal_queue
 #' res$collection_queue
 #' }
-collection_overseerstatus <- function(raw = FALSE, ...) {
-  conn <- solr_settings()
-  check_conn(conn)
-  args <- sc(list(action = 'OVERSEERSTATUS', wt = 'json'))
-  res <- solr_GET(file.path(conn$url, 'solr/admin/collections'), args, conn$proxy, ...)
-  if (raw) {
-    return(res)
-  } else {
-    jsonlite::fromJSON(res)
-  }
+collection_overseerstatus <- function(conn, raw = FALSE, ...) {
+  conn$collection_overseerstatus(raw, ...)
 }
diff --git a/R/collection_rebalanceleaders.R b/R/collection_rebalanceleaders.R
index a6cdc72..e67bc59 100644
--- a/R/collection_rebalanceleaders.R
+++ b/R/collection_rebalanceleaders.R
@@ -4,43 +4,37 @@
 #' property across active nodes
 #'
 #' @export
-#' @param name (character) Required. The name of the collection rebalance preferredLeaders on.
-#' @param maxAtOnce (integer) The maximum number of reassignments to have queue up at once.
-#' Values <=0 are use the default value Integer.MAX_VALUE. When this number is reached, the
-#' process waits for one or more leaders to be successfully assigned before adding more
-#' to the queue.
-#' @param maxWaitSeconds (integer) Timeout value when waiting for leaders to be reassigned.
-#' NOTE: if maxAtOnce is less than the number of reassignments that will take place,
-#' this is the maximum interval that any single wait for at least one reassignment.
-#' For example, if 10 reassignments are to take place and maxAtOnce is 1 and maxWaitSeconds
-#' is 60, the upper bound on the time that the command may wait is 10 minutes. Default: 60
-#' @param raw (logical) If \code{TRUE}, returns raw data
-#' @param ... curl options passed on to \code{\link[httr]{GET}}
+#' @inheritParams collection_create
+#' @param maxAtOnce (integer) The maximum number of reassignments to have queue
+#' up at once. Values <=0 are use the default value Integer.MAX_VALUE. When
+#' this number is reached, the process waits for one or more leaders to be
+#' successfully assigned before adding more to the queue.
+#' @param maxWaitSeconds (integer) Timeout value when waiting for leaders to
+#' be reassigned. NOTE: if maxAtOnce is less than the number of reassignments
+#' that will take place, this is the maximum interval that any single wait for
+#' at least one reassignment. For example, if 10 reassignments are to take
+#' place and maxAtOnce is 1 and maxWaitSeconds is 60, the upper bound on the
+#' time that the command may wait is 10 minutes. Default: 60
 #' @examples \dontrun{
-#' solr_connect()
+#' (conn <- SolrClient$new())
 #'
 #' # create collection
-#' collection_create(name = "mycollection2") # bin/solr create -c mycollection2
+#' if (!conn$collection_exists("mycollection2")) {
+#'   conn$collection_create(name = "mycollection2")
+#'   # OR: bin/solr create -c mycollection2
+#' }
 #'
 #' # balance preferredLeader property
-#' collection_balanceshardunique("mycollection2", property = "preferredLeader")
+#' conn$collection_balanceshardunique("mycollection2", property = "preferredLeader")
 #'
 #' # balance preferredLeader property
-#' collection_rebalanceleaders("mycollection2")
+#' conn$collection_rebalanceleaders("mycollection2")
 #'
 #' # examine cluster status
-#' collection_clusterstatus()$cluster$collections$mycollection2
+#' conn$collection_clusterstatus()$cluster$collections$mycollection2
 #' }
-collection_rebalanceleaders <- function(name, maxAtOnce = NULL, maxWaitSeconds = NULL,
-                                          raw = FALSE, ...) {
-  conn <- solr_settings()
-  check_conn(conn)
-  args <- sc(list(action = 'REBALANCELEADERS', collection = name, maxAtOnce = maxAtOnce,
-                  maxWaitSeconds = maxWaitSeconds, wt = 'json'))
-  res <- solr_GET(file.path(conn$url, 'solr/admin/collections'), args, conn$proxy, ...)
-  if (raw) {
-    return(res)
-  } else {
-    jsonlite::fromJSON(res)
-  }
+collection_rebalanceleaders <- function(conn, name, maxAtOnce = NULL,
+  maxWaitSeconds = NULL, raw = FALSE, ...) {
+
+  conn$collection_rebalanceleaders(name, maxAtOnce, maxWaitSeconds, raw, ...)
 }
diff --git a/R/collection_reload.R b/R/collection_reload.R
index 7fe723d..285f4ea 100644
--- a/R/collection_reload.R
+++ b/R/collection_reload.R
@@ -1,22 +1,16 @@
 #' Reload a collection
 #'
 #' @export
-#' @param name The name of the collection to reload. Required
-#' @param raw (logical) If \code{TRUE}, returns raw data
-#' @param ... curl options passed on to \code{\link[httr]{GET}}
+#' @inheritParams collection_create
 #' @examples \dontrun{
-#' solr_connect()
-#' collection_create(name = "helloWorld")
-#' collection_reload(name = "helloWorld")
+#' (conn <- SolrClient$new())
+#'
+#' if (!conn$collection_exists("helloWorld")) {
+#'   conn$collection_create(name = "helloWorld")
+#' }
+#'
+#' conn$collection_reload(name = "helloWorld")
 #' }
-collection_reload <- function(name, raw = FALSE, ...) {
-  conn <- solr_settings()
-  check_conn(conn)
-  args <- sc(list(action = 'RELOAD', name = name, wt = 'json'))
-  res <- solr_GET(file.path(conn$url, 'solr/admin/collections'), args, conn$proxy, ...)
-  if (raw) {
-    return(res)
-  } else {
-    jsonlite::fromJSON(res)
-  }
+collection_reload <- function(conn, name, raw = FALSE, callopts) {
+  conn$collection_reload(name, raw, callopts)
 }
diff --git a/R/collection_removerole.R b/R/collection_removerole.R
index f54ef2f..c4dfb42 100644
--- a/R/collection_removerole.R
+++ b/R/collection_removerole.R
@@ -4,27 +4,19 @@
 #' assigned using \code{\link{collection_addrole}}
 #'
 #' @export
-#' @param role (character) Required. The name of the role. The only supported role
-#' as of now is overseer (set as default).
+#' @param role (character) Required. The name of the role. The only supported
+#' role as of now is overseer (set as default).
 #' @param node (character) Required. The name of the node.
-#' @param raw (logical) If \code{TRUE}, returns raw data
-#' @param ... curl options passed on to \code{\link[httr]{GET}}
+#' @inheritParams collection_create
 #' @examples \dontrun{
-#' solr_connect()
+#' (conn <- SolrClient$new())
 #'
 #' # get list of nodes
-#' nodes <- collection_clusterstatus()$cluster$live_nodes
-#' collection_addrole(node = nodes[1])
-#' collection_removerole(node = nodes[1])
+#' nodes <- conn$collection_clusterstatus()$cluster$live_nodes
+#' conn$collection_addrole(node = nodes[1])
+#' conn$collection_removerole(node = nodes[1])
 #' }
-collection_removerole <- function(role = "overseer", node, raw = FALSE, ...) {
-  conn <- solr_settings()
-  check_conn(conn)
-  args <- sc(list(action = 'REMOVEROLE', role = role, node = node, wt = 'json'))
-  res <- solr_GET(file.path(conn$url, 'solr/admin/collections'), args, conn$proxy, ...)
-  if (raw) {
-    return(res)
-  } else {
-    jsonlite::fromJSON(res)
-  }
+collection_removerole <- function(conn, role = "overseer", node, raw = FALSE, 
+                                  ...) {
+  conn$collection_removerole(role, node, raw, ...)
 }
diff --git a/R/collection_requeststatus.R b/R/collection_requeststatus.R
index c6e646a..9cc9153 100644
--- a/R/collection_requeststatus.R
+++ b/R/collection_requeststatus.R
@@ -1,34 +1,14 @@
 #' @title Get request status
 #'
-#' @description Request the status of an already submitted Asynchronous Collection
-#' API call. This call is also used to clear up the stored statuses.
+#' @description Request the status of an already submitted Asynchronous
+#' Collection API call. This call is also used to clear up the stored statuses.
 #'
 #' @export
-#' @param requestid (character) Required. The user defined request-id for the request.
-#' This can be used to track the status of the submitted asynchronous task. \code{-1}
-#' is a special request id which is used to cleanup the stored states for all of the
-#' already completed/failed tasks.
-#' @param raw (logical) If \code{TRUE}, returns raw data
-#' @param ... curl options passed on to \code{\link[httr]{GET}}
-#' @examples \dontrun{
-#' solr_connect()
-#'
-#' # invalid requestid
-#' collection_requeststatus(requestid = "xxx")
-#'
-#' # valid requestid
-#' collection_requeststatus(requestid = "xxx")
-#' res$responseHeader
-#' res$xxx
-#' }
-collection_requeststatus <- function(requestid, raw = FALSE, ...) {
-  conn <- solr_settings()
-  check_conn(conn)
-  args <- sc(list(action = 'REQUESTSTATUS', requestid = requestid, wt = 'json'))
-  res <- solr_GET(file.path(conn$url, 'solr/admin/collections'), args, conn$proxy, ...)
-  if (raw) {
-    return(res)
-  } else {
-    jsonlite::fromJSON(res)
-  }
+#' @param requestid (character) Required. The user defined request-id for the
+#' request. This can be used to track the status of the submitted asynchronous
+#' task. `-1` is a special request id which is used to cleanup the stored
+#' states for all of the already completed/failed tasks.
+#' @inheritParams collection_create
+collection_requeststatus <- function(conn, requestid, raw = FALSE, ...) {
+  conn$collection_requeststatus(requestid, raw, ...)
 }
diff --git a/R/collection_splitshard.R b/R/collection_splitshard.R
index 2354c4a..69dd263 100644
--- a/R/collection_splitshard.R
+++ b/R/collection_splitshard.R
@@ -1,37 +1,31 @@
 #' Create a shard
 #'
 #' @export
-#' @param name (character) Required. The name of the collection that includes the shard
-#' to be split
+#' @inheritParams collection_create
 #' @param shard (character) Required. The name of the shard to be split
-#' @param ranges (character) A comma-separated list of hash ranges in hexadecimal
-#' e.g. ranges=0-1f4,1f5-3e8,3e9-5dc
+#' @param ranges (character) A comma-separated list of hash ranges in
+#' hexadecimal e.g. ranges=0-1f4,1f5-3e8,3e9-5dc
 #' @param split.key (character) The key to use for splitting the index
-#' @param async	(character) Request ID to track this action which will be processed
-#' asynchronously
-#' @param raw (logical) If \code{TRUE}, returns raw data
-#' @param ... curl options passed on to \code{\link[httr]{GET}}
+#' @param async	(character) Request ID to track this action which will be
+#' processed asynchronously
 #' @examples \dontrun{
-#' solr_connect()
+#' (conn <- SolrClient$new())
+#'
 #' # create collection
-#' collection_create(name = "trees")
+#' if (!conn$collection_exists("trees")) {
+#'   conn$collection_create("trees")
+#' }
+#'
 #' # find shard names
-#' names(collection_clusterstatus()$cluster$collections$trees$shards)
+#' names(conn$collection_clusterstatus()$cluster$collections$trees$shards)
+#'
 #' # split a shard by name
-#' collection_splitshard(name = "trees", shard = "shard1")
+#' conn$collection_splitshard(name = "trees", shard = "shard1")
+#'
 #' # now we have three shards
-#' names(collection_clusterstatus()$cluster$collections$trees$shards)
+#' names(conn$collection_clusterstatus()$cluster$collections$trees$shards)
 #' }
-collection_splitshard <- function(name, shard, ranges = NULL, split.key = NULL,
-                                  async = NULL, raw = FALSE, ...) {
-  conn <- solr_settings()
-  check_conn(conn)
-  args <- sc(list(action = 'SPLITSHARD', collection = name, shard = shard,
-                  ranges = do_ranges(ranges), split.key = split.key, async = async, wt = 'json'))
-  res <- solr_GET(file.path(conn$url, 'solr/admin/collections'), args, conn$proxy, ...)
-  if (raw) {
-    return(res)
-  } else {
-    jsonlite::fromJSON(res)
-  }
+collection_splitshard <- function(conn, name, shard, ranges = NULL, split.key = NULL,
+                                  async = NULL, raw = FALSE, callopts = list()) {
+  conn$collection_splitshard(name, shard, ranges, split.key, async, raw, callopts)
 }
diff --git a/R/collections.R b/R/collections.R
index 0d5734d..171456f 100644
--- a/R/collections.R
+++ b/R/collections.R
@@ -1,33 +1,30 @@
 #' List collections or cores
-#' 
-#' @name collections
-#' @param ... curl options passed on to \code{\link[httr]{GET}}
-#' @details Calls \code{\link{collection_list}} or \code{\link{core_status}} internally, 
+#'
+#' @export
+#' @inheritParams ping
+#' @details Calls [collection_list()] or [core_status()] internally,
 #' and parses out names for you.
-#' @return A character vector
+#' @return character vector
 #' @examples \dontrun{
 #' # connect
-#' solr_connect(verbose = FALSE)
-#' 
+#' (conn <- SolrClient$new())
+#'
 #' # list collections
-#' collections()
-#' 
+#' conn$collection_list()
+#' collections(conn)
+#'
 #' # list cores
-#' cores()
-#' 
-#' # curl options
-#' library("httr")
-#' collections(config = verbose())
+#' conn$core_status()
+#' cores(conn)
 #' }
-
-#' @export
-#' @rdname collections
-collections <- function(...) {
-  collection_list(...)$collections
+collections <- function(conn, ...) {
+	check_sr(conn)
+  as.character(conn$collection_list(...)$collections)
 }
 
 #' @export
 #' @rdname collections
-cores <- function(...) {
-  names(core_status(...)$status)
+cores <- function(conn, ...) {
+	check_sr(conn)
+  names(conn$core_status(...)$status)
 }
diff --git a/R/commit.R b/R/commit.R
index e057203..1a38f3d 100644
--- a/R/commit.R
+++ b/R/commit.R
@@ -1,41 +1,33 @@
 #' Commit
 #'
 #' @export
+#' @param conn A solrium connection object, see [SolrClient]
 #' @param name (character) A collection or core name. Required.
-#' @param expunge_deletes merge segments with deletes away. Default: \code{FALSE}
-#' @param wait_searcher block until a new searcher is opened and registered as the
-#' main query searcher, making the changes visible. Default: \code{TRUE}
-#' @param soft_commit  perform a soft commit - this will refresh the 'view' of the
-#' index in a more performant manner, but without "on-disk" guarantees.
-#' Default: \code{FALSE}
+#' @param expunge_deletes merge segments with deletes away. Default: `FALSE`
+#' @param wait_searcher block until a new searcher is opened and registered as
+#' the main query searcher, making the changes visible. Default: `TRUE`
+#' @param soft_commit  perform a soft commit - this will refresh the 'view' of
+#' the index in a more performant manner, but without "on-disk" guarantees.
+#' Default: `FALSE`
 #' @param wt (character) One of json (default) or xml. If json, uses
-#' \code{\link[jsonlite]{fromJSON}} to parse. If xml, uses \code{\link[xml2]{read_xml}} to
-#' parse
-#' @param raw (logical) If \code{TRUE}, returns raw data in format specified by
-#' \code{wt} param
-#' @param ... curl options passed on to \code{\link[httr]{GET}}
+#' [jsonlite::fromJSON()] to parse. If xml, uses [xml2::read_xml()] to parse
+#' @param raw (logical) If `TRUE`, returns raw data in format specified by
+#' `wt` param
+#' @param ... curl options passed on to [crul::HttpClient]
+#' @references <>
 #' @examples \dontrun{
-#' solr_connect()
+#' (conn <- SolrClient$new())
 #'
-#' commit("gettingstarted")
-#' commit("gettingstarted", wait_searcher = FALSE)
+#' conn$commit("gettingstarted")
+#' conn$commit("gettingstarted", wait_searcher = FALSE)
 #'
 #' # get xml back
-#' commit("gettingstarted", wt = "xml")
+#' conn$commit("gettingstarted", wt = "xml")
 #' ## raw xml
-#' commit("gettingstarted", wt = "xml", raw = TRUE)
+#' conn$commit("gettingstarted", wt = "xml", raw = TRUE)
 #' }
-commit <- function(name, expunge_deletes = FALSE, wait_searcher = TRUE, soft_commit = FALSE,
-                   wt = 'json', raw = FALSE, ...) {
+commit <- function(conn, name, expunge_deletes = FALSE, wait_searcher = TRUE,
+                   soft_commit = FALSE, wt = 'json', raw = FALSE, ...) {
 
-  conn <- solr_settings()
-  check_conn(conn)
-  obj_proc(file.path(conn$url, sprintf('solr/%s/update', name)),
-           body = list(commit =
-                         list(expungeDeletes = asl(expunge_deletes),
-                              waitSearcher = asl(wait_searcher),
-                              softCommit = asl(soft_commit))),
-           args = list(wt = wt),
-           raw = raw,
-           conn$proxy, ...)
+  conn$commit(name, expunge_deletes, wait_searcher, soft_commit, wt, raw, ...)
 }
diff --git a/R/config_get.R b/R/config_get.R
index 14037de..3bffdba 100644
--- a/R/config_get.R
+++ b/R/config_get.R
@@ -2,15 +2,16 @@
 #'
 #' @export
 #'
+#' @param conn A solrium connection object, see [SolrClient]
 #' @param name (character) The name of the core. If not given, all cores.
 #' @param what (character) What you want to look at. One of solrconfig or
 #' schema. Default: solrconfig
 #' @param wt (character) One of json (default) or xml. Data type returned.
 #' If json, uses \code{\link[jsonlite]{fromJSON}} to parse. If xml, uses
 #' \code{\link[xml2]{read_xml}} to parse.
-#' @param raw (logical) If \code{TRUE}, returns raw data in format specified by
+#' @param raw (logical) If `TRUE`, returns raw data in format specified by
 #' \code{wt}
-#' @param ... curl options passed on to \code{\link[httr]{GET}}
+#' @param ... curl options passed on to [crul::HttpClient]
 #' @return A list, \code{xml_document}, or character
 #' @details Note that if \code{raw=TRUE}, \code{what} is ignored. That is,
 #' you get all the data when \code{raw=TRUE}.
@@ -20,37 +21,33 @@
 #' # where <corename> is the name for your core - or creaate as below
 #'
 #' # connect
-#' solr_connect()
+#' (conn <- SolrClient$new())
 #'
 #' # all config settings
-#' config_get("gettingstarted")
+#' conn$config_get("gettingstarted")
 #'
 #' # just znodeVersion
-#' config_get("gettingstarted", "znodeVersion")
+#' conn$config_get("gettingstarted", "znodeVersion")
 #'
 #' # just znodeVersion
-#' config_get("gettingstarted", "luceneMatchVersion")
+#' conn$config_get("gettingstarted", "luceneMatchVersion")
 #'
 #' # just updateHandler
-#' config_get("gettingstarted", "updateHandler")
+#' conn$config_get("gettingstarted", "updateHandler")
 #'
 #' # just updateHandler
-#' config_get("gettingstarted", "requestHandler")
+#' conn$config_get("gettingstarted", "requestHandler")
 #'
 #' ## Get XML
-#' config_get("gettingstarted", wt = "xml")
-#' config_get("gettingstarted", "updateHandler", wt = "xml")
-#' config_get("gettingstarted", "requestHandler", wt = "xml")
+#' conn$config_get("gettingstarted", wt = "xml")
+#' conn$config_get("gettingstarted", "updateHandler", wt = "xml")
+#' conn$config_get("gettingstarted", "requestHandler", wt = "xml")
 #'
 #' ## Raw data - what param ignored when raw=TRUE
-#' config_get("gettingstarted", raw = TRUE)
+#' conn$config_get("gettingstarted", raw = TRUE)
 #' }
-config_get <- function(name, what = NULL, wt = "json", raw = FALSE, ...) {
-  conn <- solr_settings()
-  check_conn(conn)
-  args <- sc(list(wt = wt))
-  res <- solr_GET(file.path(conn$url, sprintf('solr/%s/config', name)), args, conn$proxy, ...)
-  config_parse(res, what, wt, raw)
+config_get <- function(conn, name, what = NULL, wt = "json", raw = FALSE, ...) {
+  conn$config_get(name, what, wt, raw, ...)
 }
 
 config_parse <- function(x, what = NULL, wt, raw) {
diff --git a/R/config_overlay.R b/R/config_overlay.R
index e7ef37a..5374458 100644
--- a/R/config_overlay.R
+++ b/R/config_overlay.R
@@ -1,30 +1,25 @@
 #' Get Solr configuration overlay
-#' 
+#'
 #' @export
-#' 
+#' @param conn A solrium connection object, see [SolrClient]
 #' @param name (character) The name of the core. If not given, all cores.
-#' @param omitHeader (logical) If \code{TRUE}, omit header. Default: \code{FALSE}
-#' @param ... curl options passed on to \code{\link[httr]{GET}}
+#' @param omitHeader (logical) If `TRUE`, omit header. Default: `FALSE`
+#' @param ... curl options passed on to [crul::HttpClient]
 #' @return A list with response from server
 #' @examples \dontrun{
 #' # start Solr with Cloud mode via the schemaless eg: bin/solr -e cloud
 #' # you can create a new core like: bin/solr create -c corename
 #' # where <corename> is the name for your core - or creaate as below
-#' 
+#'
 #' # connect
-#' solr_connect()
-#' 
+#' (conn <- SolrClient$new())
+#'
 #' # get config overlay
-#' config_overlay("gettingstarted")
-#' 
+#' conn$config_overlay("gettingstarted")
+#'
 #' # without header
-#' config_overlay("gettingstarted", omitHeader = TRUE)
+#' conn$config_overlay("gettingstarted", omitHeader = TRUE)
 #' }
-config_overlay <- function(name, omitHeader = FALSE, ...) {
-  conn <- solr_settings()
-  check_conn(conn)
-  url <- file.path(conn$url, sprintf('solr/%s/config/overlay', name))
-  args <- sc(list(wt = "json", omitHeader = asl(omitHeader)))
-  res <- solr_GET(url, args, conn$proxy, ...)
-  jsonlite::fromJSON(res)
+config_overlay <- function(conn, name, omitHeader = FALSE, ...) {
+  conn$config_overlay(name, omitHeader, ...)
 }
diff --git a/R/config_params.R b/R/config_params.R
index 76ca0ac..e410ccc 100644
--- a/R/config_params.R
+++ b/R/config_params.R
@@ -1,62 +1,43 @@
 #' Set Solr configuration params
-#' 
+#'
 #' @export
-#' 
+#'
+#' @param conn A solrium connection object, see [SolrClient]
 #' @param name (character) The name of the core. If not given, all cores.
 #' @param param (character) Name of a parameter
-#' @param set (list) List of key:value pairs of what to set. Create or overwrite 
+#' @param set (list) List of key:value pairs of what to set. Create or overwrite
 #' a parameter set map. Default: NULL (nothing passed)
-#' @param unset (list) One or more character strings of keys to unset. Default: NULL 
+#' @param unset (list) One or more character strings of keys to unset. Default: NULL
 #' (nothing passed)
-#' @param update (list) List of key:value pairs of what to update. Updates a parameter 
-#' set map. This essentially overwrites the old parameter set, so all parameters must 
+#' @param update (list) List of key:value pairs of what to update. Updates a parameter
+#' set map. This essentially overwrites the old parameter set, so all parameters must
 #' be sent in each update request.
-#' @param ... curl options passed on to \code{\link[httr]{GET}}
+#' @param ... curl options passed on to [crul::HttpClient]
 #' @return A list with response from server
-#' @details The Request Parameters API allows creating parameter sets that can 
-#' override or take the place of parameters defined in solrconfig.xml. It is 
-#' really another endpoint of the Config API instead of a separate API, and 
-#' has distinct commands. It does not replace or modify any sections of 
-#' solrconfig.xml, but instead provides another approach to handling parameters 
-#' used in requests. It behaves in the same way as the Config API, by storing 
-#' parameters in another file that will be used at runtime. In this case, 
-#' the parameters are stored in a file named params.json. This file is kept in 
+#' @details The Request Parameters API allows creating parameter sets that can
+#' override or take the place of parameters defined in solrconfig.xml. It is
+#' really another endpoint of the Config API instead of a separate API, and
+#' has distinct commands. It does not replace or modify any sections of
+#' solrconfig.xml, but instead provides another approach to handling parameters
+#' used in requests. It behaves in the same way as the Config API, by storing
+#' parameters in another file that will be used at runtime. In this case,
+#' the parameters are stored in a file named params.json. This file is kept in
 #' ZooKeeper or in the conf directory of a standalone Solr instance.
 #' @examples \dontrun{
 #' # start Solr in standard or Cloud mode
 #' # connect
-#' solr_connect()
-#' 
+#' (conn <- SolrClient$new())
+#'
 #' # set a parameter set
 #' myFacets <- list(myFacets = list(facet = TRUE, facet.limit = 5))
-#' config_params("gettingstarted", set = myFacets)
-#' 
+#' config_params(conn, "gettingstarted", set = myFacets)
+#'
 #' # check a parameter
-#' config_params("gettingstarted", param = "myFacets")
-#' 
-#' # see all params
-#' config_params("gettingstarted")
+#' config_params(conn, "gettingstarted", param = "myFacets")
 #' }
-config_params <- function(name, param = NULL, set = NULL, 
+config_params <- function(conn, name, param = NULL, set = NULL,
                           unset = NULL, update = NULL, ...) {
-  
-  conn <- solr_settings()
-  check_conn(conn)
-  if (all(vapply(list(set, unset, update), is.null, logical(1)))) {
-    if (is.null(param)) {
-      url <- file.path(conn$url, sprintf('solr/%s/config/params', name))
-    } else {
-      url <- file.path(conn$url, sprintf('solr/%s/config/params/%s', name, param))
-    }
-    res <- solr_GET(url, list(wt = "json"), conn$proxy, ...)
-  } else {
-    url <- file.path(conn$url, sprintf('solr/%s/config/params', name))
-    body <- sc(c(name_by(unbox_if(set, TRUE), "set"), 
-                 name_by(unbox_if(unset, TRUE), "unset"),
-                 name_by(unbox_if(update, TRUE), "update")))
-    res <- solr_POST_body(url, body, list(wt = "json"), conn$proxy, ...)
-  }
-  jsonlite::fromJSON(res)
+  conn$config_params(name, param, set, unset, update, ...)
 }
 
 name_by <- function(x, y) {
diff --git a/R/config_set.R b/R/config_set.R
index 1d009d9..c8fbc92 100644
--- a/R/config_set.R
+++ b/R/config_set.R
@@ -1,44 +1,41 @@
 #' Set Solr configuration details
-#' 
+#'
 #' @export
-#' 
+#'
+#' @param conn A solrium connection object, see [SolrClient]
 #' @param name (character) The name of the core. If not given, all cores.
-#' @param set (list) List of key:value pairs of what to set. Default: NULL 
+#' @param set (list) List of key:value pairs of what to set. Default: NULL
 #' (nothing passed)
-#' @param unset (list) One or more character strings of keys to unset. Default: NULL 
+#' @param unset (list) One or more character strings of keys to unset. Default: NULL
 #' (nothing passed)
-#' @param ... curl options passed on to \code{\link[httr]{GET}}
+#' @param ... curl options passed on to [crul::HttpClient]
 #' @return A list with response from server
 #' @examples \dontrun{
 #' # start Solr with Cloud mode via the schemaless eg: bin/solr -e cloud
 #' # you can create a new core like: bin/solr create -c corename
 #' # where <corename> is the name for your core - or creaate as below
-#' 
+#'
 #' # connect
-#' solr_connect()
-#' 
+#' (conn <- SolrClient$new())
+#'
 #' # set a property
-#' config_set("gettingstarted", set = list(query.filterCache.autowarmCount = 1000))
-#' 
+#' conn$config_set("gettingstarted", 
+#'   set = list(query.filterCache.autowarmCount = 1000))
+#'
 #' # unset a property
-#' config_set("gettingstarted", unset = "query.filterCache.size", config = verbose())
-#' 
+#' conn$config_set("gettingstarted", unset = "query.filterCache.size", 
+#'   verbose = TRUE)
+#'
 #' # both set a property and unset a property
-#' config_set("gettingstarted", unset = "enableLazyFieldLoading")
-#' 
+#' conn$config_set("gettingstarted", unset = "enableLazyFieldLoading")
+#'
 #' # many properties
-#' config_set("gettingstarted", set = list(
+#' conn$config_set("gettingstarted", set = list(
 #'    query.filterCache.autowarmCount = 1000,
 #'    query.commitWithin.softCommit = 'false'
 #'  )
 #' )
 #' }
-config_set <- function(name, set = NULL, unset = NULL, ...) {
-  conn <- solr_settings()
-  check_conn(conn)
-  url <- file.path(conn$url, sprintf('solr/%s/config', name))
-  body <- sc(list(`set-property` = unbox_if(set), 
-                  `unset-property` = unset))
-  res <- solr_POST_body(url, body, list(wt = "json"), conn$proxy, ...)
-  jsonlite::fromJSON(res)
+config_set <- function(conn, name, set = NULL, unset = NULL, ...) {
+  conn$config_set(name, set, unset, ...)
 }
diff --git a/R/connect.R b/R/connect.R
deleted file mode 100644
index 7c60a6c..0000000
--- a/R/connect.R
+++ /dev/null
@@ -1,164 +0,0 @@
-#' @title Solr connection 
-#' 
-#' @description Set Solr options, including base URL, proxy, and errors
-#' 
-#' @export
-#' @param url Base URL for Solr instance. For a local instance, this is likely going
-#' to be \code{http://localhost:8983} (also the default), or a different port if you
-#' set a different port. 
-#' @param proxy List of arguments for a proxy connection, including one or more of:
-#' url, port, username, password, and auth. See \code{\link[httr]{use_proxy}} for 
-#' help, which is used to construct the proxy connection.
-#' @param errors (character) One of simple or complete. Simple gives http code and 
-#' error message on an error, while complete gives both http code and error message, 
-#' and stack trace, if available.
-#' @param verbose (logical) Whether to print help messages or not. E.g., if 
-#' \code{TRUE}, we print the URL on each request to a Solr server for your 
-#' reference. Default: \code{TRUE}
-#' @details This function sets environment variables that we use internally
-#' within functions in this package to determine the right thing to do given your
-#' inputs. 
-#' 
-#' In addition, \code{solr_connect} does a quick \code{GET} request to the URL you 
-#' provide to make sure the service is up.
-#' @examples \dontrun{
-#' # set solr settings
-#' solr_connect()
-#' 
-#' # set solr settings with a proxy
-#' prox <- list(url = "187.62.207.130", port = 3128)
-#' solr_connect(url = "http://localhost:8983", proxy = prox)
-#' 
-#' # get solr settings
-#' solr_settings()
-#' 
-#' # you can also check your settings via Sys.getenv()
-#' Sys.getenv("SOLR_URL")
-#' Sys.getenv("SOLR_ERRORS")
-#' }
-solr_connect <- function(url = "http://localhost:8983", proxy = NULL, 
-                         errors = "simple", verbose = TRUE) {
-  # checks
-  url <- checkurl(url)
-  errors <- match.arg(errors, c('simple', 'complete'))
-  check_proxy_args(proxy)
-  
-  # set
-  Sys.setenv("SOLR_URL" = url)
-  Sys.setenv("SOLR_ERRORS" = errors)
-  Sys.setenv("SOLR_VERBOSITY" = verbose)
-  options(solr_proxy = proxy)
-  
-  # ping server
-  res <- tryCatch(GET(Sys.getenv("SOLR_URL")), error = function(e) e)
-  if (inherits(res, "error")) {
-    stop(sprintf("\n  Failed to connect to %s\n  Remember to start Solr before connecting",
-                 url), call. = FALSE)
-  }
-  
-  structure(list(url = Sys.getenv("SOLR_URL"), 
-                 proxy = make_proxy(proxy), 
-                 errors = Sys.getenv("SOLR_ERRORS"), 
-                 verbose = Sys.getenv("SOLR_VERBOSITY")), 
-            class = "solr_connection")
-}
-
-#' @export
-#' @rdname solr_connect
-solr_settings <- function() {
-  url <- Sys.getenv("SOLR_URL")
-  err <- Sys.getenv("SOLR_ERRORS")
-  verbose <- Sys.getenv("SOLR_VERBOSITY")
-  proxy <- getOption("solr_proxy")
-  structure(list(url = url, proxy = make_proxy(proxy), errors = err, verbose = verbose), class = "solr_connection")
-}
-
-#' @export
-print.solr_connection <- function(x, ...) {
-  cat("<solr_connection>", sep = "\n")
-  cat(paste0("  url:    ", x$url), sep = "\n")
-  cat(paste0("  errors: ", x$errors), sep = "\n")
-  cat(paste0("  verbose: ", x$verbose), sep = "\n")
-  cat("  proxy:", sep = "\n")
-  if (is.null(x$proxy)) {
-  } else {
-    cat(paste0("      url:     ", x$proxy$options$proxy), sep = "\n")
-    cat(paste0("      port:     ", x$proxy$options$proxyport))
-  }
-}
-
-# cat_proxy <- function(x) {
-#   if (is.null(x)) {
-#     ''
-#   } else {
-#     x$options$proxy
-#   }
-# }
-
-check_proxy_args <- function(x) {
-  if (!all(names(x) %in% c('url', 'port', 'username', 'password', 'auth'))) {
-    stop("Input to proxy can only contain: url, port, username, password, auth", 
-         call. = FALSE)
-  }
-}
-
-make_proxy <- function(args) {
-  if (is.null(args)) {
-    NULL
-  } else {
-    httr::use_proxy(url = args$url, port = args$port, 
-                    username = args$username, password = args$password, 
-                    auth = args$auth)
-  }
-}
-
-is_url <- function(x){
-  grepl("https?://", x, ignore.case = TRUE) || grepl("localhost:[0-9]{4}", x, ignore.case = TRUE)
-}
-
-checkurl <- function(x){
-  if (!is_url(x)) {
-    stop("That does not appear to be a url", call. = FALSE)
-  } else {
-    if (grepl("https?", x)) {
-      x
-    } else {
-      paste0("http://", x)
-    }
-  }
-}
-
-# ### R6 version
-# library("R6")
-# library("httr")
-# 
-# solr_connect <- function(url, proxy = NULL) {
-#   .solr_connection$new(url, proxy)
-# }
-# 
-# .solr_connection <-
-#   R6::R6Class("solr_connection",
-#     public = list(
-#       url = "http://localhost:8983",
-#       proxy = NULL,
-#       initialize = function(url, proxy) {
-#         if (!missing(url)) self$url <- url
-#         if (!missing(proxy)) self$proxy <- proxy
-#       },
-#       status = function(...) {
-#         httr::http_status(httr::HEAD(self$url, ...))$message
-#       }
-#     ),
-#     cloneable = FALSE
-# )
-# 
-# conn <- solr_connect("http://scottchamberlain.info/")
-# # conn <- solr_connect$new(url = "http://localhost:8983")
-# # conn <- solr_connect$new(url = 'http://api.plos.org/search')
-# # conn <- solr_connect$new(proxy = use_proxy("64.251.21.73", 8080))
-# conn
-# conn$url
-# conn$proxy
-# conn$status()
-# conn$status(config = verbose())
-# conn$ping()
diff --git a/R/content_types.R b/R/content_types.R
new file mode 100644
index 0000000..5e4523c
--- /dev/null
+++ b/R/content_types.R
@@ -0,0 +1,12 @@
+ctype_xml <- function() list(`Content-Type` = "application/xml") 
+ctype_json <- function() list(`Content-Type` = "application/json") 
+ctype_csv <- function() list(`Content-Type` = "application/csv")
+ctype <- function(x) list(`Content-Type` = x)
+
+get_ctype <- function(x) {
+  switch(x, 
+         xml = ctype_xml(),
+         json = ctype_json(),
+         csv = ctype("application/csv; charset=utf-8")
+  )
+}
diff --git a/R/core_create.R b/R/core_create.R
index c068cad..820eb33 100644
--- a/R/core_create.R
+++ b/R/core_create.R
@@ -2,58 +2,53 @@
 #'
 #' @export
 #'
+#' @param conn A solrium connection object, see [SolrClient]
 #' @param name (character) The name of the core to be created. Required
 #' @param instanceDir (character) Path to instance directory
 #' @param config (character) Path to config file
 #' @param schema (character) Path to schema file
-#' @param dataDir (character) Name of the data directory relative to instanceDir.
-#' @param configSet (character) Name of the configset to use for this core. For more
-#' information, see https://cwiki.apache.org/confluence/display/solr/Config+Sets
-#' @param collection (character) The name of the collection to which this core belongs.
-#' The default is the name of the core. collection.<param>=<val ue> causes a property of
-#' <param>=<value> to be set if a new collection is being created. Use collection.configNa
-#' me=<configname> to point to the configuration for a new collection.
-#' @param shard (character) The shard id this core represents. Normally you want to be
-#' auto-assigned a shard id.
+#' @param dataDir (character) Name of the data directory relative to
+#' instanceDir.
+#' @param configSet (character) Name of the configset to use for this core.
+#' For more information, see
+#' https://lucene.apache.org/solr/guide/6_6/config-sets.html
+#' @param collection (character) The name of the collection to which this core
+#' belongs. The default is the name of the core. collection.<param>=<val ue>
+#' causes a property of <param>=<value> to be set if a new collection is being
+#' created. Use collection.configNa me=<configname> to point to the
+#' configuration for a new collection.
+#' @param shard (character) The shard id this core represents. Normally you
+#' want to be auto-assigned a shard id.
 #' @param async	(character) Request ID to track this action which will be
 #' processed asynchronously
-#' @param raw (logical) If \code{TRUE}, returns raw data
-#' @param callopts curl options passed on to \code{\link[httr]{GET}}
-#' @param ... You can pass in parameters like \code{property.name=value}	to set
-#' core property name to value. See the section Defining core.properties for details on
-#' supported properties and values.
-#' (https://cwiki.apache.org/confluence/display/solr/Defining+core.properties)
+#' @param raw (logical) If `TRUE`, returns raw data
+#' @param callopts curl options passed on to [crul::HttpClient]
+#' @param ... You can pass in parameters like `property.name=value`	to set
+#' core property name to value. See the section Defining core.properties for
+#' details on supported properties and values.
+#' (https://lucene.apache.org/solr/guide/6_6/defining-core-properties.html)
 #' @examples \dontrun{
-#' # start Solr with Schemaless mode via the schemaless eg: bin/solr start -e schemaless
+#' # start Solr with Schemaless mode via the schemaless eg:
+#' #   bin/solr start -e schemaless
 #' # you can create a new core like: bin/solr create -c corename
 #' # where <corename> is the name for your core - or create as below
 #'
 #' # connect
-#' solr_connect()
+#' (conn <- SolrClient$new())
 #'
 #' # Create a core
-#' path <- "~/solr-5.4.1/server/solr/newcore/conf"
+#' path <- "~/solr-7.0.0/server/solr/newcore/conf"
 #' dir.create(path, recursive = TRUE)
-#' files <- list.files("~/solr-5.4.1/server/solr/configsets/data_driven_schema_configs/conf/",
+#' files <- list.files("~/solr-7.0.0/server/solr/configsets/sample_techproducts_configs/conf/",
 #' full.names = TRUE)
-#' file.copy(files, path, recursive = TRUE)
-#' core_create(name = "newcore", instanceDir = "newcore", configSet = "basic_configs")
+#' invisible(file.copy(files, path, recursive = TRUE))
+#' conn$core_create(name = "newcore", instanceDir = "newcore",
+#'   configSet = "sample_techproducts_configs")
 #' }
-core_create <- function(name, instanceDir = NULL, config = NULL, schema = NULL, dataDir = NULL,
-                        configSet = NULL, collection = NULL, shard = NULL, async = NULL,
-                        raw = FALSE, callopts=list(), ...) {
+core_create <- function(conn, name, instanceDir = NULL, config = NULL,
+  schema = NULL, dataDir = NULL, configSet = NULL, collection = NULL,
+  shard = NULL, async=NULL, raw = FALSE, callopts=list(), ...) {
 
-  conn <- solr_settings()
-  check_conn(conn)
-  args <- sc(list(action = 'CREATE', name = name, instanceDir = instanceDir,
-                  config = config, schema = schema, dataDir = dataDir,
-                  configSet = configSet, collection = collection, shard = shard,
-                  async = async, wt = 'json'))
-  res <- solr_GET(file.path(conn$url, 'solr/admin/cores'), args, callopts, conn$proxy, ...)
-  if (raw) {
-    return(res)
-  } else {
-    jsonlite::fromJSON(res)
-  }
+  conn$core_create(name, instanceDir, config, schema, dataDir, configSet,
+                   collection, shard, async, raw, callopts, ...)
 }
-
diff --git a/R/core_exists.R b/R/core_exists.R
index b91f11b..0999600 100644
--- a/R/core_exists.R
+++ b/R/core_exists.R
@@ -1,30 +1,26 @@
 #' Check if a core exists
-#' 
+#'
 #' @export
-#' 
-#' @param name (character) The name of the core. If not given, all cores.
-#' @param callopts curl options passed on to \code{\link[httr]{GET}}
-#' @details Simply calls \code{\link{core_status}} internally
-#' @return A single boolean, \code{TRUE} or \code{FALSE}
+#'
+#' @inheritParams core_create
+#' @details Simply calls [core_status()] internally
+#' @return A single boolean, `TRUE` or `FALSE`
 #' @examples \dontrun{
-#' # start Solr with Schemaless mode via the schemaless eg: bin/solr start -e schemaless
+#' # start Solr with Schemaless mode via the schemaless eg:
+#' #   bin/solr start -e schemaless
 #' # you can create a new core like: bin/solr create -c corename
-#' # where <corename> is the name for your core - or creaate as below
-#' 
+#' # where <corename> is the name for your core - or create as below
+#'
 #' # connect
-#' solr_connect()
-#' 
+#' (conn <- SolrClient$new())
+#'
 #' # exists
-#' core_exists("gettingstarted")
-#' 
+#' conn$core_exists("gettingstarted")
+#'
 #' # doesn't exist
-#' core_exists("hhhhhh")
+#' conn$core_exists("hhhhhh")
 #' }
-core_exists <- function(name, callopts=list()) {
-  tmp <- suppressMessages(core_status(name = name, callopts = callopts))
-  if (length(tmp$status[[1]]) > 0) {
-    TRUE 
-  } else {
-    FALSE
-  }
+core_exists <- function(conn, name, callopts=list()) {
+  tmp <- suppressMessages(core_status(conn, name = name, callopts = callopts))
+  length(tmp$status[[1]]) > 0
 }
diff --git a/R/core_mergeindexes.R b/R/core_mergeindexes.R
index 0d082c6..0f00678 100644
--- a/R/core_mergeindexes.R
+++ b/R/core_mergeindexes.R
@@ -8,39 +8,29 @@
 #'
 #' @export
 #'
-#' @param name The name of the target core/index. Required
+#' @inheritParams core_create
 #' @param indexDir (character)	Multi-valued, directories that would be merged.
 #' @param srcCore	(character)	Multi-valued, source cores that would be merged.
-#' @param async	(character) Request ID to track this action which will be processed
-#' asynchronously
-#' @param raw (logical) If \code{TRUE}, returns raw data
-#' @param callopts curl options passed on to \code{\link[httr]{GET}}
+#' @param async	(character) Request ID to track this action which will be
+#' processed asynchronously
 #' @examples \dontrun{
-#' # start Solr with Schemaless mode via the schemaless eg: bin/solr start -e schemaless
+#' # start Solr with Schemaless mode via the schemaless eg:
+#' #  bin/solr start -e schemaless
 #'
 #' # connect
-#' solr_connect()
+#' (conn <- SolrClient$new())
 #'
 #' ## FIXME: not tested yet
 #'
 #' # use indexDir parameter
-#' core_mergeindexes(core="new_core_name", indexDir = c("/solr_home/core1/data/index",
+#' conn$core_mergeindexes(core="new_core_name",
+#'    indexDir = c("/solr_home/core1/data/index",
 #'    "/solr_home/core2/data/index"))
 #'
 #' # use srcCore parameter
-#' core_mergeindexes(name = "new_core_name", srcCore = c('core1', 'core2'))
+#' conn$core_mergeindexes(name = "new_core_name", srcCore = c('core1', 'core2'))
 #' }
-core_mergeindexes <- function(name, indexDir = NULL, srcCore = NULL, async = NULL,
-                        raw = FALSE, callopts = list()) {
-
-  conn <- solr_settings()
-  check_conn(conn)
-  args <- sc(list(action = 'MERGEINDEXES', core = name, indexDir = indexDir,
-                  srcCore = srcCore, async = async, wt = 'json'))
-  res <- solr_GET(file.path(conn$url, 'solr/admin/cores'), args, callopts, conn$proxy)
-  if (raw) {
-    return(res)
-  } else {
-    jsonlite::fromJSON(res)
-  }
+core_mergeindexes <- function(conn, name, indexDir = NULL, srcCore = NULL,
+                              async = NULL, raw = FALSE, callopts = list()) {
+  conn$core_mergeindexes(name, indexDir, srcCore, async, raw, callopts)
 }
diff --git a/R/core_reload.R b/R/core_reload.R
index 0589756..d472c14 100644
--- a/R/core_reload.R
+++ b/R/core_reload.R
@@ -2,30 +2,21 @@
 #'
 #' @export
 #'
-#' @param name (character) The name of the core. Required
-#' @param raw (logical) If \code{TRUE}, returns raw data
-#' @param callopts curl options passed on to \code{\link[httr]{GET}}
+#' @inheritParams core_create
 #' @examples \dontrun{
-#' # start Solr with Schemaless mode via the schemaless eg: bin/solr start -e schemaless
+#' # start Solr with Schemaless mode via the schemaless eg:
+#' #  bin/solr start -e schemaless
 #' # you can create a new core like: bin/solr create -c corename
 #' # where <corename> is the name for your core - or creaate as below
 #'
 #' # connect
-#' solr_connect()
+#' (conn <- SolrClient$new())
 #'
 #' # Status of particular cores
-#' core_reload("gettingstarted")
-#' core_status("gettingstarted")
+#' conn$core_reload("gettingstarted")
+#' conn$core_status("gettingstarted")
 #' }
-core_reload <- function(name, raw = FALSE, callopts=list()) {
-  conn <- solr_settings()
-  check_conn(conn)
-  args <- sc(list(action = 'RELOAD', core = name, wt = 'json'))
-  res <- solr_GET(file.path(conn$url, 'solr/admin/cores'), args, callopts, conn$proxy)
-  if (raw) {
-    return(res)
-  } else {
-    jsonlite::fromJSON(res)
-  }
+core_reload <- function(conn, name, raw = FALSE, callopts=list()) {
+  conn$core_reload(name, raw, callopts)
 }
 
diff --git a/R/core_rename.R b/R/core_rename.R
index e35a669..d34dba2 100644
--- a/R/core_rename.R
+++ b/R/core_rename.R
@@ -2,35 +2,38 @@
 #'
 #' @export
 #'
-#' @param name (character) The name of the core to be renamed. Required
+#' @inheritParams core_create
 #' @param other (character) The new name of the core. Required.
-#' @param async	(character) Request ID to track this action which will be processed
-#' asynchronously
-#' @param raw (logical) If \code{TRUE}, returns raw data
-#' @param callopts curl options passed on to \code{\link[httr]{GET}}
+#' @param async	(character) Request ID to track this action which will be
+#' processed asynchronously
 #' @examples \dontrun{
-#' # start Solr with Schemaless mode via the schemaless eg: bin/solr start -e schemaless
+#' # start Solr with Schemaless mode via the schemaless eg:
+#' #   bin/solr start -e schemaless
 #' # you can create a new core like: bin/solr create -c corename
 #' # where <corename> is the name for your core - or creaate as below
 #'
 #' # connect
-#' solr_connect()
+#' (conn <- SolrClient$new())
 #'
 #' # Status of particular cores
-#' core_create("testcore") # or create in the CLI: bin/solr create -c testcore
-#' core_rename("testcore", "newtestcore")
-#' core_status("testcore") # core missing
-#' core_status("newtestcore", FALSE) # not missing
+#' path <- "~/solr-7.0.0/server/solr/testcore/conf"
+#' dir.create(path, recursive = TRUE)
+#' files <- list.files(
+#' "~/solr-7.0.0/server/solr/configsets/sample_techproducts_configs/conf/",
+#' full.names = TRUE)
+#' invisible(file.copy(files, path, recursive = TRUE))
+#' conn$core_create("testcore") # or create in CLI: bin/solr create -c testcore
+#'
+#' # rename
+#' conn$core_rename("testcore", "newtestcore")
+#' ## status
+#' conn$core_status("testcore") # core missing
+#' conn$core_status("newtestcore", FALSE) # not missing
+#'
+#' # cleanup
+#' conn$core_unload("newtestcore")
 #' }
-core_rename <- function(name, other, async = NULL, raw = FALSE, callopts=list()) {
-  conn <- solr_settings()
-  check_conn(conn)
-  args <- sc(list(action = 'RENAME', core = name, other = other, async = async, wt = 'json'))
-  res <- solr_GET(file.path(conn$url, 'solr/admin/cores'), args, callopts, conn$proxy)
-  if (raw) {
-    return(res)
-  } else {
-    jsonlite::fromJSON(res)
-  }
+core_rename <- function(conn, name, other, async = NULL, raw = FALSE, callopts=list()) {
+  conn$core_rename(name, other, async, raw, callopts)
 }
 
diff --git a/R/core_requeststatus.R b/R/core_requeststatus.R
index d4a122c..ccfcba7 100644
--- a/R/core_requeststatus.R
+++ b/R/core_requeststatus.R
@@ -3,23 +3,15 @@
 #' @export
 #'
 #' @param requestid The name of one of the cores to be removed. Required
-#' @param raw (logical) If \code{TRUE}, returns raw data
-#' @param callopts curl options passed on to \code{\link[httr]{GET}}
+#' @inheritParams core_create
 #' @examples \dontrun{
-#' # start Solr with Schemaless mode via the schemaless eg: bin/solr start -e schemaless
+#' # start Solr with Schemaless mode via the schemaless eg:
+#' #   bin/solr start -e schemaless
 #'
 #' # FIXME: not tested yet...
-#' # solr_connect()
-#' # core_requeststatus(requestid = 1)
+#' # (conn <- SolrClient$new())
+#' # conn$core_requeststatus(requestid = 1)
 #' }
-core_requeststatus <- function(requestid, raw = FALSE, callopts = list()) {
-  conn <- solr_settings()
-  check_conn(conn)
-  args <- sc(list(action = 'REQUESTSTATUS', requestid = requestid, wt = 'json'))
-  res <- solr_GET(file.path(conn$url, 'solr/admin/cores'), args, callopts, conn$proxy)
-  if (raw) {
-    return(res)
-  } else {
-    jsonlite::fromJSON(res)
-  }
+core_requeststatus <- function(conn, requestid, raw = FALSE, callopts = list()) {
+  conn$core_requeststatus(requestid, raw, callopts)
 }
diff --git a/R/core_split.R b/R/core_split.R
index ae2a5ad..bfb1522 100644
--- a/R/core_split.R
+++ b/R/core_split.R
@@ -7,7 +7,7 @@
 #'
 #' @export
 #'
-#' @param name (character) The name of one of the cores to be swapped. Required
+#' @inheritParams core_create
 #' @param path (character) Two or more target directory paths in which a piece of the
 #' index will be written
 #' @param targetCore (character) Two or more target Solr cores to which a piece
@@ -18,8 +18,6 @@
 #' @param split.key (character) The key to be used for splitting the index
 #' @param async	(character) Request ID to track this action which will be processed
 #' asynchronously
-#' @param raw (logical) If \code{TRUE}, returns raw data
-#' @param callopts curl options passed on to \code{\link[httr]{GET}}
 #' @details The core index will be split into as many pieces as the number of \code{path}
 #' or \code{targetCore} parameters.
 #'
@@ -32,49 +30,39 @@
 #' # where <corename> is the name for your core - or creaate as below
 #'
 #' # connect
-#' solr_connect()
+#' (conn <- SolrClient$new())
 #'
 #' # Swap a core
 #' ## First, create two cores
-#' # core_split("splitcoretest0") # or create in the CLI: bin/solr create -c splitcoretest0
-#' # core_split("splitcoretest1") # or create in the CLI: bin/solr create -c splitcoretest1
-#' # core_split("splitcoretest2") # or create in the CLI: bin/solr create -c splitcoretest2
+#' # conn$core_split("splitcoretest0") # or create in the CLI: bin/solr create -c splitcoretest0
+#' # conn$core_split("splitcoretest1") # or create in the CLI: bin/solr create -c splitcoretest1
+#' # conn$core_split("splitcoretest2") # or create in the CLI: bin/solr create -c splitcoretest2
 #'
 #' ## check status
-#' core_status("splitcoretest0", FALSE)
-#' core_status("splitcoretest1", FALSE)
-#' core_status("splitcoretest2", FALSE)
+#' conn$core_status("splitcoretest0", FALSE)
+#' conn$core_status("splitcoretest1", FALSE)
+#' conn$core_status("splitcoretest2", FALSE)
 #'
 #' ## split core using targetCore parameter
-#' core_split("splitcoretest0", targetCore = c("splitcoretest1", "splitcoretest2"))
+#' conn$core_split("splitcoretest0", targetCore = c("splitcoretest1", "splitcoretest2"))
 #'
 #' ## split core using split.key parameter
 #' ### Here all documents having the same route key as the split.key i.e. 'A!'
 #' ### will be split from the core index and written to the targetCore
-#' core_split("splitcoretest0", targetCore = "splitcoretest1", split.key = "A!")
+#' conn$core_split("splitcoretest0", targetCore = "splitcoretest1", split.key = "A!")
 #'
 #' ## split core using ranges parameter
 #' ### Solr expects hash ranges in hexidecimal, but since we're in R,
 #' ### let's not make our lives any harder, so you can pass in numbers
 #' ### but you can still pass in hexidecimal if you want.
 #' rgs <- c('0-1f4', '1f5-3e8')
-#' core_split("splitcoretest0", targetCore = c("splitcoretest1", "splitcoretest2"), ranges = rgs)
+#' conn$core_split("splitcoretest0", targetCore = c("splitcoretest1", "splitcoretest2"), ranges = rgs)
 #' rgs <- list(c(0, 500), c(501, 1000))
-#' core_split("splitcoretest0", targetCore = c("splitcoretest1", "splitcoretest2"), ranges = rgs)
+#' conn$core_split("splitcoretest0", targetCore = c("splitcoretest1", "splitcoretest2"), ranges = rgs)
 #' }
-core_split <- function(name, path = NULL, targetCore = NULL, ranges = NULL, split.key = NULL,
+core_split <- function(conn, name, path = NULL, targetCore = NULL, ranges = NULL, split.key = NULL,
                        async = NULL, raw = FALSE, callopts=list()) {
-  conn <- solr_settings()
-  check_conn(conn)
-  args <- sc(list(action = 'SPLIT', core = name, ranges = do_ranges(ranges),
-                  split.key = split.key, async = async, wt = 'json'))
-  args <- c(args, make_args(path), make_args(targetCore))
-  res <- solr_GET(file.path(conn$url, 'solr/admin/cores'), args, callopts, conn$proxy)
-  if (raw) {
-    return(res)
-  } else {
-    jsonlite::fromJSON(res)
-  }
+  conn$core_split(name, path, targetCore, ranges, async, raw, callopts)
 }
 
 make_args <- function(x) {
diff --git a/R/core_status.R b/R/core_status.R
index 22f15c8..eab4dd0 100644
--- a/R/core_status.R
+++ b/R/core_status.R
@@ -2,38 +2,29 @@
 #'
 #' @export
 #'
-#' @param name (character) The name of the core. If not given, all cores.
+#' @inheritParams core_create
 #' @param indexInfo (logical)
-#' @param raw (logical) If \code{TRUE}, returns raw data
-#' @param callopts curl options passed on to \code{\link[httr]{GET}}
 #' @examples \dontrun{
-#' # start Solr with Schemaless mode via the schemaless eg: bin/solr start -e schemaless
+#' # start Solr with Schemaless mode via the schemaless eg:
+#' #   bin/solr start -e schemaless
 #' # you can create a new core like: bin/solr create -c corename
 #' # where <corename> is the name for your core - or creaate as below
 #'
 #' # connect
-#' solr_connect()
+#' (conn <- SolrClient$new())
 #'
 #' # Status of all cores
-#' core_status()
+#' conn$core_status()
 #'
 #' # Status of particular cores
-#' core_status("gettingstarted")
+#' conn$core_status("gettingstarted")
 #'
 #' # Get index info or not
 #' ## Default: TRUE
-#' core_status("gettingstarted", indexInfo = TRUE)
-#' core_status("gettingstarted", indexInfo = FALSE)
+#' conn$core_status("gettingstarted", indexInfo = TRUE)
+#' conn$core_status("gettingstarted", indexInfo = FALSE)
 #' }
-core_status <- function(name = NULL, indexInfo = TRUE, raw = FALSE, callopts=list()) {
-  conn <- solr_settings()
-  check_conn(conn)
-  args <- sc(list(action = 'STATUS', core = name, indexInfo = asl(indexInfo), wt = 'json'))
-  res <- solr_GET(file.path(conn$url, 'solr/admin/cores'), args, callopts, conn$proxy)
-  if (raw) {
-    return(res)
-  } else {
-    jsonlite::fromJSON(res)
-  }
+core_status <- function(conn, name = NULL, indexInfo = TRUE, raw = FALSE,
+                        callopts=list()) {
+  conn$core_status(name, indexInfo, raw, callopts)
 }
-
diff --git a/R/core_swap.R b/R/core_swap.R
index b14f1e6..d6b2594 100644
--- a/R/core_swap.R
+++ b/R/core_swap.R
@@ -1,54 +1,49 @@
 #' @title Swap a core
 #'
-#' @description SWAP atomically swaps the names used to access two existing Solr cores.
-#' This can be used to swap new content into production. The prior core remains
-#' available and can be swapped back, if necessary. Each core will be known by
-#' the name of the other, after the swap
+#' @description SWAP atomically swaps the names used to access two existing
+#' Solr cores. This can be used to swap new content into production. The
+#' prior core remains available and can be swapped back, if necessary. Each
+#' core will be known by the name of the other, after the swap
 #'
 #' @export
 #'
-#' @param name (character) The name of one of the cores to be swapped. Required
-#' @param other (character) The name of one of the cores to be swapped. Required.
-#' @param async	(character) Request ID to track this action which will be processed
-#' asynchronously
-#' @param raw (logical) If \code{TRUE}, returns raw data
-#' @param callopts curl options passed on to \code{\link[httr]{GET}}
-#' @details Do not use \code{core_swap} with a SolrCloud node. It is not supported and
-#' can result in the core being unusable. We'll try to stop you if you try.
+#' @inheritParams core_create
+#' @param other (character) The name of one of the cores to be swapped.
+#' Required.
+#' @param async	(character) Request ID to track this action which will be
+#' processed asynchronously
+#' @details Do not use \code{core_swap} with a SolrCloud node. It is not
+#' supported and can result in the core being unusable. We'll try to stop
+#' you if you try.
 #' @examples \dontrun{
-#' # start Solr with Schemaless mode via the schemaless eg: bin/solr start -e schemaless
+#' # start Solr with Schemaless mode via the schemaless eg:
+#' #   bin/solr start -e schemaless
 #' # you can create a new core like: bin/solr create -c corename
 #' # where <corename> is the name for your core - or creaate as below
 #'
 #' # connect
-#' solr_connect()
+#' (conn <- SolrClient$new())
 #'
 #' # Swap a core
 #' ## First, create two cores
-#' core_create("swapcoretest") # or create in the CLI: bin/solr create -c swapcoretest
-#' core_create("swapcoretest") # or create in the CLI: bin/solr create -c swapcoretest
+#' conn$core_create("swapcoretest1")
+#' # - or create on CLI: bin/solr create -c swapcoretest1
+#' conn$core_create("swapcoretest2")
+#' # - or create on CLI: bin/solr create -c swapcoretest2
 #'
 #' ## check status
-#' core_status("swapcoretest1", FALSE)
-#' core_status("swapcoretest2", FALSE)
+#' conn$core_status("swapcoretest1", FALSE)
+#' conn$core_status("swapcoretest2", FALSE)
 #'
 #' ## swap core
-#' core_swap("swapcoretest1", "swapcoretest2")
+#' conn$core_swap("swapcoretest1", "swapcoretest2")
 #'
 #' ## check status again
-#' core_status("swapcoretest1", FALSE)
-#' core_status("swapcoretest2", FALSE)
+#' conn$core_status("swapcoretest1", FALSE)
+#' conn$core_status("swapcoretest2", FALSE)
 #' }
-core_swap <- function(name, other, async = NULL, raw = FALSE, callopts=list()) {
-  conn <- solr_settings()
-  check_conn(conn)
-  if (is_in_cloud_mode(conn)) stop("You are in SolrCloud mode, stopping", call. = FALSE)
-  args <- sc(list(action = 'SWAP', core = name, other = other, async = async, wt = 'json'))
-  res <- solr_GET(file.path(conn$url, 'solr/admin/cores'), args, callopts, conn$proxy)
-  if (raw) {
-    return(res)
-  } else {
-    jsonlite::fromJSON(res)
-  }
+core_swap <- function(conn, name, other, async = NULL, raw = FALSE,
+                      callopts=list()) {
+  conn$core_swap(name, other, async, raw, callopts)
 }
 
diff --git a/R/core_unload.R b/R/core_unload.R
index bbd5e54..a6d8136 100644
--- a/R/core_unload.R
+++ b/R/core_unload.R
@@ -2,43 +2,36 @@
 #'
 #' @export
 #'
-#' @param name The name of one of the cores to be removed. Required
-#' @param deleteIndex	(logical)	If \code{TRUE}, will remove the index when unloading
-#' the core. Default: \code{FALSE}
-#' @param deleteDataDir	(logical)	If \code{TRUE}, removes the data directory and all
-#' sub-directories. Default: \code{FALSE}
-#' @param deleteInstanceDir	(logical)	If \code{TRUE}, removes everything related to
-#' the core, including the index directory, configuration files and other related
-#' files. Default: \code{FALSE}
-#' @param async	(character) Request ID to track this action which will be processed
-#' asynchronously
-#' @param raw (logical) If \code{TRUE}, returns raw data
-#' @param callopts curl options passed on to \code{\link[httr]{GET}}
+#' @inheritParams core_create
+#' @param deleteIndex	(logical)	If `TRUE`, will remove the index when unloading
+#' the core. Default: `FALSE`
+#' @param deleteDataDir	(logical)	If `TRUE`, removes the data directory and all
+#' sub-directories. Default: `FALSE`
+#' @param deleteInstanceDir	(logical)	If `TRUE`, removes everything related to
+#' the core, including the index directory, configuration files and other 
+#' related files. Default: `FALSE`
+#' @param async	(character) Request ID to track this action which will be 
+#' processed asynchronously
 #' @examples \dontrun{
-#' # start Solr with Schemaless mode via the schemaless eg: bin/solr start -e schemaless
+#' # start Solr with Schemaless mode via the schemaless eg:
+#' #   bin/solr start -e schemaless
 #'
 #' # connect
-#' solr_connect()
+#' (conn <- SolrClient$new())
 #'
 #' # Create a core
-#' core_create(name = "thingsstuff")
+#' conn$core_create(name = "books")
 #'
 #' # Unload a core
-#' core_unload(name = "fart")
+#' conn$core_unload(name = "books")
+#' ## not found
+#' # conn$core_unload(name = "books")
+#' # > Error: 400 - Cannot unload non-existent core [books]
 #' }
-core_unload <- function(name, deleteIndex = FALSE, deleteDataDir = FALSE,
+core_unload <- function(conn, name, deleteIndex = FALSE, deleteDataDir = FALSE,
                         deleteInstanceDir = FALSE, async = NULL,
                         raw = FALSE, callopts = list()) {
 
-  conn <- solr_settings()
-  check_conn(conn)
-  args <- sc(list(action = 'UNLOAD', core = name, deleteIndex = asl(deleteIndex),
-                  deleteDataDir = asl(deleteDataDir), deleteInstanceDir = asl(deleteInstanceDir),
-                  async = async, wt = 'json'))
-  res <- solr_GET(file.path(conn$url, 'solr/admin/cores'), args, callopts, conn$proxy)
-  if (raw) {
-    return(res)
-  } else {
-    jsonlite::fromJSON(res)
-  }
+  conn$core_unload(name, deleteIndex, deleteDataDir, deleteInstanceDir, async,
+                   raw, callopts)
 }
diff --git a/R/delete.R b/R/delete.R
index cd32ed2..f83124d 100644
--- a/R/delete.R
+++ b/R/delete.R
@@ -1,59 +1,58 @@
 #' Delete documents by ID or query
 #'
 #' @name delete
+#' @param conn A solrium connection object, see [SolrClient]
 #' @param ids Document IDs, one or more in a vector or list
 #' @param name (character) A collection or core name. Required.
 #' @param query Query to use to delete documents
-#' @param commit (logical) If \code{TRUE}, documents immediately searchable.
-#' Deafult: \code{TRUE}
-#' @param commit_within (numeric) Milliseconds to commit the change, the document will be added
-#' within that time. Default: NULL
-#' @param overwrite (logical) Overwrite documents with matching keys. Default: \code{TRUE}
-#' @param boost (numeric) Boost factor. Default: NULL
+#' @param commit (logical) If `TRUE`, documents immediately searchable.
+#' Deafult: `TRUE`
+#' @param commit_within (numeric) Milliseconds to commit the change, the
+#' document will be added within that time. Default: `NULL`
+#' @param overwrite (logical) Overwrite documents with matching keys.
+#' Default: `TRUE`
+#' @param boost (numeric) Boost factor. Default: `NULL`
 #' @param wt (character) One of json (default) or xml. If json, uses
-#' \code{\link[jsonlite]{fromJSON}} to parse. If xml, uses \code{\link[xml2]{read_xml}} to
+#' [jsonlite::fromJSON()] to parse. If xml, uses [xml2::read_xml()] to
 #' parse
-#' @param raw (logical) If \code{TRUE}, returns raw data in format specified by
-#' \code{wt} param
-#' @param ... curl options passed on to \code{\link[httr]{GET}}
+#' @param raw (logical) If `TRUE`, returns raw data in format specified by
+#' `wt` param
+#' @param ... curl options passed on to [crul::HttpClient]
 #' @details We use json internally as data interchange format for this function.
 #' @examples \dontrun{
-#' solr_connect()
+#' (cli <- SolrClient$new())
 #'
 #' # add some documents first
 #' ss <- list(list(id = 1, price = 100), list(id = 2, price = 500))
-#' add(ss, name = "gettingstarted")
+#' cli$add(ss, name = "gettingstarted")
 #'
 #' # Now, delete them
 #' # Delete by ID
-#' # delete_by_id(ids = 9)
+#' cli$delete_by_id(ids = 1, "gettingstarted")
 #' ## Many IDs
-#' # delete_by_id(ids = c(3, 4))
+#' cli$delete_by_id(ids = c(3, 4), "gettingstarted")
 #'
 #' # Delete by query
-#' # delete_by_query(query = "manu:bank")
+#' cli$delete_by_query(query = "manu:bank", "gettingstarted")
 #' }
 
 #' @export
 #' @name delete
-delete_by_id <- function(ids, name, commit = TRUE, commit_within = NULL, overwrite = TRUE,
-                         boost = NULL, wt = 'json', raw = FALSE, ...) {
+delete_by_id <- function(conn, ids, name, commit = TRUE, commit_within = NULL,
+  overwrite = TRUE, boost = NULL, wt = 'json', raw = FALSE, ...) {
 
-  conn <- solr_settings()
-  check_conn(conn)
-  args <- sc(list(commit = asl(commit), wt = wt))
-  body <- list(delete = lapply(ids, function(z) list(id = z)))
-  obj_proc(file.path(conn$url, sprintf('solr/%s/update/json', name)), body, args, raw, conn$proxy, ...)
+	check_sr(conn)
+  conn$delete_by_id(ids, name, commit, commit_within, overwrite, boost,
+                    wt, raw, ...)
 }
 
 #' @export
 #' @name delete
-delete_by_query <- function(query, name, commit = TRUE, commit_within = NULL, overwrite = TRUE,
-                            boost = NULL, wt = 'json', raw = FALSE, ...) {
+delete_by_query <- function(conn, query, name, commit = TRUE,
+  commit_within = NULL, overwrite = TRUE, boost = NULL, wt = 'json',
+  raw = FALSE, ...) {
 
-  conn <- solr_settings()
-  check_conn(conn)
-  args <- sc(list(commit = asl(commit), wt = wt))
-  body <- list(delete = list(query = query))
-  obj_proc(file.path(conn$url, sprintf('solr/%s/update/json', name)), body, args, raw, conn$proxy, ...)
+  check_sr(conn)
+  conn$delete_by_query(query, name, commit, commit_within, overwrite, boost,
+                    wt, raw, ...)
 }
diff --git a/R/optimize.R b/R/optimize.R
index 76aa74b..ac2bf99 100644
--- a/R/optimize.R
+++ b/R/optimize.R
@@ -1,42 +1,34 @@
 #' Optimize
 #'
 #' @export
+#' @param conn A solrium connection object, see [SolrClient]
 #' @param name (character) A collection or core name. Required.
-#' @param max_segments optimizes down to at most this number of segments. Default: 1
-#' @param wait_searcher block until a new searcher is opened and registered as the
-#' main query searcher, making the changes visible. Default: \code{TRUE}
-#' @param soft_commit  perform a soft commit - this will refresh the 'view' of the
-#' index in a more performant manner, but without "on-disk" guarantees.
-#' Default: \code{FALSE}
+#' @param max_segments optimizes down to at most this number of segments.
+#' Default: 1
+#' @param wait_searcher block until a new searcher is opened and registered
+#' as the main query searcher, making the changes visible. Default: `TRUE`
+#' @param soft_commit  perform a soft commit - this will refresh the 'view'
+#' of the index in a more performant manner, but without "on-disk" guarantees.
+#' Default: `FALSE`
 #' @param wt (character) One of json (default) or xml. If json, uses
-#' \code{\link[jsonlite]{fromJSON}} to parse. If xml, uses \code{\link[xml2]{read_xml}} to
+#' [jsonlite::fromJSON()] to parse. If xml, uses [xml2::read_xml()] to
 #' parse
-#' @param raw (logical) If \code{TRUE}, returns raw data in format specified by
+#' @param raw (logical) If `TRUE`, returns raw data in format specified by
 #' \code{wt} param
-#' @param ... curl options passed on to \code{\link[httr]{GET}}
+#' @param ... curl options passed on to [crul::HttpClient]
 #' @examples \dontrun{
-#' solr_connect()
+#' (conn <- SolrClient$new())
 #'
-#' optimize("gettingstarted")
-#' optimize("gettingstarted", max_segments = 2)
-#' optimize("gettingstarted", wait_searcher = FALSE)
+#' solr_optimize(conn, "gettingstarted")
+#' solr_optimize(conn, "gettingstarted", max_segments = 2)
+#' solr_optimize(conn, "gettingstarted", wait_searcher = FALSE)
 #'
 #' # get xml back
-#' optimize("gettingstarted", wt = "xml")
+#' solr_optimize(conn, "gettingstarted", wt = "xml")
 #' ## raw xml
-#' optimize("gettingstarted", wt = "xml", raw = TRUE)
+#' solr_optimize(conn, "gettingstarted", wt = "xml", raw = TRUE)
 #' }
-optimize <- function(name, max_segments = 1, wait_searcher = TRUE, soft_commit = FALSE,
-                     wt = 'json', raw = FALSE, ...) {
-
-  conn <- solr_settings()
-  check_conn(conn)
-  obj_proc(file.path(conn$url, sprintf('solr/%s/update', name)),
-           body = list(optimize =
-                         list(maxSegments = max_segments,
-                              waitSearcher = asl(wait_searcher),
-                              softCommit = asl(soft_commit))),
-           args = list(wt = wt),
-           raw = raw,
-           conn$proxy, ...)
+solr_optimize <- function(conn, name, max_segments = 1, wait_searcher = TRUE,
+                     soft_commit = FALSE, wt = 'json', raw = FALSE, ...) {
+  conn$optimize(name, max_segments, wait_searcher, soft_commit, wt, raw, ...)
 }
diff --git a/R/parsers.R b/R/parsers.R
index 9181127..56303d8 100644
--- a/R/parsers.R
+++ b/R/parsers.R
@@ -2,13 +2,13 @@
 #'
 #' @param input Output from solr_facet
 #' @param parsetype One of 'list' or 'df' (data.frame)
-#' @param concat Character to conactenate strings by, e.g,. ',' (character). Used
-#' in solr_parse.sr_search only.
-#' @details This is the parser used internally in solr_facet, but if you output raw
-#' data from solr_facet using raw=TRUE, then you can use this function to parse that
-#' data (a sr_facet S3 object) after the fact to a list of data.frame's for easier
-#' consumption. The data format type is detected from the attribute "wt" on the
-#' sr_facet object.
+#' @param concat Character to conactenate strings by, e.g,. ',' (character).
+#' Used in solr_parse.sr_search only.
+#' @details This is the parser used internally in solr_facet, but if you
+#' output raw data from solr_facet using raw=TRUE, then you can use this
+#' function to parse that data (a sr_facet S3 object) after the fact to a
+#' list of data.frame's for easier consumption. The data format type is
+#' detected from the attribute "wt" on the sr_facet object.
 #' @export
 solr_parse <- function(input, parsetype = NULL, concat) {
   UseMethod("solr_parse")
@@ -30,16 +30,20 @@ solr_parse.update <- function(input, parsetype=NULL, concat=',') {
   wt <- attributes(input)$wt
   switch(wt,
          xml = xml2::read_xml(unclass(input)),
-         json = jsonlite::fromJSON(input, simplifyDataFrame = FALSE, simplifyMatrix = FALSE),
-         csv = jsonlite::fromJSON(input, simplifyDataFrame = FALSE, simplifyMatrix = FALSE)
+         json = jsonlite::fromJSON(input, simplifyDataFrame = FALSE,
+                                   simplifyMatrix = FALSE),
+         csv = jsonlite::fromJSON(input, simplifyDataFrame = FALSE,
+                                  simplifyMatrix = FALSE)
   )
 }
 
 #' @export
 solr_parse.sr_facet <- function(input, parsetype = NULL, concat = ',') {
-  if (inherits(unclass(input), "character")) input <- parse_ch(input, parsetype, concat)
+  if (inherits(unclass(input), "character")) {
+    input <- parse_ch(input, parsetype, concat)
+  }
   wt <- attributes(input)$wt
-  
+
   # Facet queries
   if (wt == 'json') {
     fqdat <- input$facet_counts$facet_queries
@@ -175,7 +179,11 @@ solr_parse.sr_high <- function(input, parsetype='list', concat=',') {
   if (wt == 'json') {
     if (parsetype == 'df') {
       dat <- input$highlight
-      df <- dplyr::bind_rows(lapply(dat, as_data_frame))
+      df <- dplyr::bind_rows(lapply(dat, function(z) {
+        dplyr::as_data_frame(lapply(z, function(w) {
+          if (length(w) > 1) paste0(w, collapse = "") else w
+        }))
+      }))
       if (NROW(df) == 0) {
         highout <- tibble::data_frame()
       } else {
@@ -195,7 +203,7 @@ solr_parse.sr_high <- function(input, parsetype='list', concat=',') {
       )
     })
     if (parsetype == 'df') {
-      highout <- bind_rows(lapply(tmptmp, as_data_frame))
+      highout <- dplyr::bind_rows(lapply(tmptmp, dplyr::as_data_frame))
     } else {
       highout <- tmptmp
     }
@@ -222,7 +230,7 @@ solr_parse.sr_search <- function(input, parsetype = 'list', concat = ',') {
           if (inherits(y, "list")) unlist(tmp) else tmp
         })
       })
-      datout <- bind_rows(lapply(dat2, as_data_frame))
+      datout <- dplyr::bind_rows(lapply(dat2, as_data_frame))
     } else {
       datout <- input$response$docs
     }
@@ -233,7 +241,7 @@ solr_parse.sr_search <- function(input, parsetype = 'list', concat = ',') {
       sapply(xml2::xml_children(x), nmtxt)
     })
     if (parsetype == 'df') {
-      datout <- bind_rows(lapply(tmptmp, as_data_frame))
+      datout <- dplyr::bind_rows(lapply(tmptmp, as_data_frame))
     } else {
       datout <- tmptmp
     }
@@ -275,7 +283,7 @@ solr_parse.sr_mlt <- function(input, parsetype = 'list', concat = ',') {
           }
         })
       })
-      resdat <- bind_rows(lapply(reslist, as_data_frame))
+      resdat <- dplyr::bind_rows(lapply(reslist, as_data_frame))
 
       dat <- input$moreLikeThis
       dat2 <- lapply(dat, function(x){
@@ -293,7 +301,7 @@ solr_parse.sr_mlt <- function(input, parsetype = 'list', concat = ',') {
       datmlt <- list()
       for (i in seq_along(dat)) {
         attsdf <- as_data_frame(popp(dat[[i]], "docs"))
-        df <- bind_rows(lapply(dat[[i]]$docs, function(y) {
+        df <- dplyr::bind_rows(lapply(dat[[i]]$docs, function(y) {
           as_data_frame(lapply(y, function(z) {
             if (length(z) > 1) {
               paste(z, collapse = concat)
@@ -316,7 +324,7 @@ solr_parse.sr_mlt <- function(input, parsetype = 'list', concat = ',') {
     }
   } else {
     res <- xml_find_all(input, '//result[@name="response"]//doc')
-    resdat <- bind_rows(lapply(res, function(x){
+    resdat <- dplyr::bind_rows(lapply(res, function(x){
       tmp <- sapply(xml_children(x), nmtxt)
       as_data_frame(tmp)
     }))
@@ -340,7 +348,7 @@ solr_parse.sr_mlt <- function(input, parsetype = 'list', concat = ',') {
 
     if (parsetype == 'df') {
       datmlt <- lapply(tmptmp, function(z) {
-        df <- bind_rows(lapply(z, as_data_frame))
+        df <- dplyr::bind_rows(lapply(z, as_data_frame))
         atts <- attributes(z)
         attsdf <- as_data_frame(atts)
         if (NROW(df) == 0) {
@@ -379,7 +387,7 @@ solr_parse.sr_stats <- function(input, parsetype = 'list', concat = ',') {
         dat_facet <- lapply(dat, function(x){
           facetted <- x[names(x) %in% 'facets'][[1]]
           if (length(facetted) == 1) {
-            df <- bind_rows(
+            df <- dplyr::bind_rows(
               lapply(facetted[[1]], function(z) {
                 as_data_frame(
                   lapply(z[!names(z) %in% 'facets'], function(w) {
@@ -390,7 +398,7 @@ solr_parse.sr_stats <- function(input, parsetype = 'list', concat = ',') {
             , .id = names(facetted))
           } else {
             df <- stats::setNames(lapply(seq.int(length(facetted)), function(n) {
-              bind_rows(lapply(facetted[[n]], function(b) {
+              dplyr::bind_rows(lapply(facetted[[n]], function(b) {
                 as_data_frame(
                   lapply(b[!names(b) %in% 'facets'], function(w) {
                     if (length(w) == 0) "" else w
@@ -429,14 +437,14 @@ solr_parse.sr_stats <- function(input, parsetype = 'list', concat = ',') {
     temp <- xml_find_all(input, '//lst/lst[@name="stats_fields"]/lst')
     if (parsetype == 'df') {
       # w/o facets
-      dat_reg <- bind_rows(stats::setNames(lapply(temp, function(h){
+      dat_reg <- dplyr::bind_rows(stats::setNames(lapply(temp, function(h){
         as_data_frame(popp(sapply(xml_children(h), nmtxt), "facets"))
       }), xml_attr(temp, "name")), .id = "stat")
       # just facets
       dat_facet <- stats::setNames(lapply(temp, function(e){
         tt <- xml_find_first(e, 'lst[@name="facets"]')
         stats::setNames(lapply(xml_children(tt), function(f){
-          bind_rows(stats::setNames(lapply(xml_children(f), function(g){
+          dplyr::bind_rows(stats::setNames(lapply(xml_children(f), function(g){
             as_data_frame(popp(sapply(xml_children(g), nmtxt), "facets"))
           }), xml_attr(xml_children(f), "name")), .id = xml_attr(f, "name"))
         }), xml_attr(xml_children(tt), "name"))
@@ -459,7 +467,7 @@ solr_parse.sr_stats <- function(input, parsetype = 'list', concat = ',') {
       datout <- list(data = dat_reg, facet = dat_facet)
     }
   }
-  
+
   datout <- if (length(Filter(length, datout)) == 0) NULL else datout
   return( datout )
 }
@@ -546,9 +554,9 @@ solr_parse.sr_group <- function(input, parsetype = 'list', concat = ',') {
     if (parsetype == 'df') {
       datout <- stats::setNames(lapply(temp, function(e){
         tt <- xml_find_first(e, 'arr[@name="groups"]')
-        bind_rows(stats::setNames(lapply(xml_children(tt), function(f){
+        dplyr::bind_rows(stats::setNames(lapply(xml_children(tt), function(f){
           docc <- xml_find_all(f, 'result[@name="doclist"]/doc')
-          df <- bind_rows(lapply(docc, function(g){
+          df <- dplyr::bind_rows(lapply(docc, function(g){
             as_data_frame(sapply(xml_children(g), nmtxt))
           }))
           add_column(
diff --git a/R/ping.R b/R/ping.R
index 3304048..f049bf2 100644
--- a/R/ping.R
+++ b/R/ping.R
@@ -1,17 +1,16 @@
 #' Ping a Solr instance
 #'
 #' @export
+#' @param conn A solrium connection object, see [SolrClient]
 #' @param name (character) Name of a collection or core. Required.
 #' @param wt (character) One of json (default) or xml. If json, uses
-#' \code{\link[jsonlite]{fromJSON}} to parse. If xml, uses
-#' \code{\link[xml2]{read_xml}} to parse
-#' @param verbose If TRUE (default) the url call used printed to console.
-#' @param raw (logical) If TRUE, returns raw data in format specified by
-#' \code{wt} param
-#' @param ... curl options passed on to \code{\link[httr]{GET}}
+#' [jsonlite::fromJSON()] to parse. If xml, uses [xml2::read_xml)] to parse
+#' @param raw (logical) If `TRUE`, returns raw data in format specified by
+#' `wt` param
+#' @param ... curl options passed on to [crul::HttpClient]
 #'
-#' @return if \code{wt="xml"} an object of class \code{xml_document}, if
-#' \code{wt="json"} an object of class \code{list}
+#' @return if `wt="xml"` an object of class `xml_document`, if
+#' `wt="json"` an object of class `list`
 #'
 #' @details You likely may not be able to run this function against many public
 #' Solr services as they hopefully don't expose their admin interface to the
@@ -23,31 +22,17 @@
 #' # do so
 #'
 #' # connect: by default we connect to localhost, port 8983
-#' solr_connect()
+#' (cli <- SolrClient$new())
 #'
 #' # ping the gettingstarted index
-#' ping("gettingstarted")
-#' ping("gettingstarted", wt = "xml")
-#' ping("gettingstarted", verbose = FALSE)
-#' ping("gettingstarted", raw = TRUE)
+#' cli$ping("gettingstarted")
+#' ping(cli, "gettingstarted")
+#' ping(cli, "gettingstarted", wt = "xml")
+#' ping(cli, "gettingstarted", verbose = FALSE)
+#' ping(cli, "gettingstarted", raw = TRUE)
 #'
-#' library("httr")
-#' ping("gettingstarted", wt="xml", config = verbose())
+#' ping(cli, "gettingstarted", wt="xml", verbose = TRUE)
 #' }
-
-ping <- function(name, wt = 'json', verbose = TRUE, raw = FALSE, ...) {
-  conn <- solr_settings()
-  check_conn(conn)
-  res <- tryCatch(solr_GET(file.path(conn$url, sprintf('solr/%s/admin/ping', name)),
-           args = list(wt = wt), verbose = verbose, conn$proxy, ...), error = function(e) e)
-  if (inherits(res, "error")) {
-    return(list(status = "not found"))
-  } else {
-    out <- structure(res, class = "ping", wt = wt)
-    if (raw) {
-      return( out )
-    } else {
-      solr_parse(out)
-    }
-  }
+ping <- function(conn, name, wt = 'json', raw = FALSE, ...) {
+  conn$ping(name = name, wt = wt, raw = raw, ...)
 }
diff --git a/R/schema.R b/R/schema.R
index 2eca2b3..eb69d16 100644
--- a/R/schema.R
+++ b/R/schema.R
@@ -1,53 +1,42 @@
 #' Get the schema for a collection or core
 #' 
 #' @export
-#' @param name (character) Name of collection or core
 #' @param what (character) What to retrieve. By default, we retrieve the entire
 #' schema. Options include: fields, dynamicfields, fieldtypes, copyfields, name,
 #' version, uniquekey, similarity, "solrqueryparser/defaultoperator"
-#' @param raw (logical) If \code{TRUE}, returns raw data 
-#' @param verbose If TRUE (default) the url call used printed to console.
-#' @param ... curl options passed on to \code{\link[httr]{GET}}
+#' @inheritParams ping
 #' @examples \dontrun{
 #' # start Solr, in your CLI, run: `bin/solr start -e cloud -noprompt`
 #' # after that, if you haven't run `bin/post -c gettingstarted docs/` yet, do so
 #' 
 #' # connect: by default we connect to localhost, port 8983
-#' solr_connect()
+#' (cli <- SolrClient$new())
 #' 
 #' # get the schema for the gettingstarted index
-#' schema(name = "gettingstarted")
+#' schema(cli, name = "gettingstarted")
 #' 
 #' # Get parts of the schema
-#' schema(name = "gettingstarted", "fields")
-#' schema(name = "gettingstarted", "dynamicfields")
-#' schema(name = "gettingstarted", "fieldtypes")
-#' schema(name = "gettingstarted", "copyfields")
-#' schema(name = "gettingstarted", "name")
-#' schema(name = "gettingstarted", "version")
-#' schema(name = "gettingstarted", "uniquekey")
-#' schema(name = "gettingstarted", "similarity")
-#' schema(name = "gettingstarted", "solrqueryparser/defaultoperator")
+#' schema(cli, name = "gettingstarted", "fields")
+#' schema(cli, name = "gettingstarted", "dynamicfields")
+#' schema(cli, name = "gettingstarted", "fieldtypes")
+#' schema(cli, name = "gettingstarted", "copyfields")
+#' schema(cli, name = "gettingstarted", "name")
+#' schema(cli, name = "gettingstarted", "version")
+#' schema(cli, name = "gettingstarted", "uniquekey")
+#' schema(cli, name = "gettingstarted", "similarity")
+#' schema(cli, name = "gettingstarted", "solrqueryparser/defaultoperator")
 #' 
 #' # get raw data
-#' schema(name = "gettingstarted", "similarity", raw = TRUE)
-#' schema(name = "gettingstarted", "uniquekey", raw = TRUE)
+#' schema(cli, name = "gettingstarted", "similarity", raw = TRUE)
+#' schema(cli, name = "gettingstarted", "uniquekey", raw = TRUE)
 #' 
 #' # start Solr in Schemaless mode: bin/solr start -e schemaless
-#' # schema("gettingstarted")
+#' # schema(cli, "gettingstarted")
 #' 
 #' # start Solr in Standalone mode: bin/solr start
 #' # then add a core: bin/solr create -c helloWorld
-#' # schema("helloWorld")
+#' # schema(cli, "helloWorld")
 #' }
-schema <- function(name, what = '', raw = FALSE, verbose = TRUE, ...) {
-  conn <- solr_settings()
-  check_conn(conn)
-  res <- solr_GET(file.path(conn$url, sprintf('solr/%s/schema', name), what), 
-                  list(wt = "json"), verbose = verbose, conn$proxy, ...)
-  if (raw) {
-    return(res)
-  } else {
-    jsonlite::fromJSON(res)
-  }
+schema <- function(conn, name, what = '', raw = FALSE, ...) {
+  conn$schema(name = name, what = what, raw = raw, ...)
 }
diff --git a/R/search_route_keys.R b/R/search_route_keys.R
new file mode 100644
index 0000000..978f189
--- /dev/null
+++ b/R/search_route_keys.R
@@ -0,0 +1,46 @@
+filter_keys <- function(lst, keys) lst[names(lst) %in% keys] # nocov start
+keys_search <- c("q", "sort", "start", "rows", "pageDoc", "pageScore", "fl",
+                 "defType", "timeAllowed", "qt", "wt", "NOW", "TZ",
+                 "echoHandler", "echoParams")
+keys_facet <- c(
+  "q",  "facet.query",  "facet.field",
+  "facet.prefix", "facet.sort", "facet.limit", "facet.offset",
+  "facet.mincount", "facet.missing", "facet.method", "facet.enum.cache.minDf",
+  "facet.threads", "facet.date", "facet.date.start", "facet.date.end",
+  "facet.date.gap", "facet.date.hardend", "facet.date.other",
+  "facet.date.include", "facet.range", "facet.range.start", "facet.range.end",
+  "facet.range.gap", "facet.range.hardend", "facet.range.other",
+  "facet.range.include", "facet.pivot", "facet.pivot.mincount",
+  "start", "rows", "key", "wt")
+keys_stats <- c("q", "stats.field", "stats.facet", "start", "rows", "key", "wt")
+keys_high <- c("fl", "fq", "hl", "hl.fl", "hl.alternateField", "hl.alternateField ",
+               "hl.boundaryScanner", "hl.boundaryScanner ", "hl.bs.chars",
+               "hl.bs.chars", "hl.bs.country", "hl.bs.country ",
+               "hl.bs.language", "hl.bs.language ", "hl.bs.maxScan",
+               "hl.bs.maxScan", "hl.bs.type", "hl.bs.type ", "hl.formatter",
+               "hl.formatter", "hl.fragListBuilder", "hl.fragListBuilder ",
+               "hl.fragmenter", "hl.fragmenter ", "hl.fragmentsBuilder",
+               "hl.fragmentsBuilder", "hl.fragsize", "hl.highlightMultiTerm",
+               "hl.highlightMultiTerm", "hl.maxAlternateFieldLength",
+               "hl.maxAlternateFieldLength", "hl.maxAnalyzedChars",
+               "hl.maxAnalyzedChars", "hl.maxMultiValuedToExamine",
+               "hl.maxMultiValuedToExamine", "hl.maxMultiValuedToMatch",
+               "hl.maxMultiValuedToMatch", "hl.mergeContiguous",
+               "hl.mergeContiguous", "hl.preserveMulti", "hl.preserveMulti",
+               "hl.regex.maxAnalyzedChars", "hl.regex.maxAnalyzedChars",
+               "hl.regex.pattern", "hl.regex.pattern ", "hl.regex.slop",
+               "hl.regex.slop", "hl.requireFieldMatch", "hl.requireFieldMatch",
+               "hl.simple.post", "hl.simple.post", "hl.simple.pre",
+               "hl.simple.pre", "hl.snippets", "hl.useFastVectorHighlighter",
+               "hl.useFastVectorHighlighter", "hl.usePhraseHighlighter",
+               "hl.usePhraseHighlighter", "q", "rows", "start", "wt")
+keys_group <- c("group.query","group.field", 'q', 'start', 'rows', 'sort',
+                'fq', 'wt', 'group.limit', 'group.offset', 'group.sort',
+                'group.sort', 'group.format', 'group.func', 'group.main',
+                'group.ngroups', 'group.cache.percent', 'group.cache.percent',
+                'fl')
+keys_all <- c("q", "sort", "start", "rows", "pageDoc", "pageScore", "fl", "fq",
+              "defType", "timeAllowed", "qt", "wt", "NOW", "TZ", "echoHandler")
+keys_mlt <- c("q", "fq", "fl", "mlt.count", "mlt.fl", "mlt.mintf", "mlt.mindf",
+              "mlt.minwl", "mlt.maxwl", "mlt.maxqt", "mlt.maxntp", "mlt.boost",
+              "mlt.qf", "start", "rows", "wt", "mlt") # nocov end
diff --git a/R/solr_all.r b/R/solr_all.r
index 4724a45..237245d 100644
--- a/R/solr_all.r
+++ b/R/solr_all.r
@@ -1,77 +1,68 @@
 #' @title All purpose search
 #'
-#' @description Includes documents, facets, groups, mlt, stats, and highlights.
+#' @description Includes documents, facets, groups, mlt, stats, and highlights
 #'
+#' @export
 #' @template search
-#' @param wt (character) One of json (default) or xml. If json, uses
-#' \code{\link[jsonlite]{fromJSON}} to parse. If xml, uses \code{\link[xml2]{read_xml}}
-#' to parse. You can't use \code{csv} because the point of this function
+#' @template optimizerows
+#' @param conn A solrium connection object, see [SolrClient]
+#' @param params (list) a named list of parameters, results in a GET reqeust
+#' as long as no body parameters given
+#' @param body (list) a named list of parameters, if given a POST request
+#' will be performed
 #' @return XML, JSON, a list, or data.frame
-#' @seealso \code{\link{solr_highlight}}, \code{\link{solr_facet}}
-#' @references See \url{http://wiki.apache.org/solr/#Search_and_Indexing} for
+#' @seealso [solr_highlight()], [solr_facet()]
+#' @references See <http://wiki.apache.org/solr/#Search_and_Indexing> for
 #' more information.
-#' @export
 #' @examples \dontrun{
 #' # connect
-#' solr_connect('http://api.plos.org/search')
+#' (cli <- SolrClient$new(host = "api.plos.org", path = "search", port = NULL))
 #'
-#' solr_all(q='*:*', rows=2, fl='id')
+#' solr_all(cli, params = list(q='*:*', rows=2, fl='id'))
 #'
 #' # facets
-#' solr_all(q='*:*', rows=2, fl='id', facet="true", facet.field="journal")
+#' solr_all(cli, params = list(q='*:*', rows=2, fl='id', facet="true",
+#'   facet.field="journal"))
 #'
 #' # mlt
-#' solr_all(q='ecology', rows=2, fl='id', mlt='true', mlt.count=2, mlt.fl='abstract')
+#' solr_all(cli, params = list(q='ecology', rows=2, fl='id', mlt='true',
+#'   mlt.count=2, mlt.fl='abstract'))
 #'
 #' # facets and mlt
-#' solr_all(q='ecology', rows=2, fl='id', facet="true", facet.field="journal",
-#' mlt='true', mlt.count=2, mlt.fl='abstract')
+#' solr_all(cli, params = list(q='ecology', rows=2, fl='id', facet="true",
+#'   facet.field="journal", mlt='true', mlt.count=2, mlt.fl='abstract'))
 #'
 #' # stats
-#' solr_all(q='ecology', rows=2, fl='id', stats='true', stats.field='counter_total_all')
+#' solr_all(cli, params = list(q='ecology', rows=2, fl='id', stats='true',
+#'   stats.field='counter_total_all'))
 #'
 #' # facets, mlt, and stats
-#' solr_all(q='ecology', rows=2, fl='id', facet="true", facet.field="journal",
-#' mlt='true', mlt.count=2, mlt.fl='abstract', stats='true', stats.field='counter_total_all')
+#' solr_all(cli, params = list(q='ecology', rows=2, fl='id', facet="true",
+#'   facet.field="journal", mlt='true', mlt.count=2, mlt.fl='abstract',
+#'   stats='true', stats.field='counter_total_all'))
 #'
 #' # group
-#' solr_all(q='ecology', rows=2, fl='id', group='true',
-#'    group.field='journal', group.limit=3)
+#' solr_all(cli, params = list(q='ecology', rows=2, fl='id', group='true',
+#'  group.field='journal', group.limit=3))
 #'
 #' # facets, mlt, stats, and groups
-#' solr_all(q='ecology', rows=2, fl='id', facet="true", facet.field="journal",
-#'    mlt='true', mlt.count=2, mlt.fl='abstract', stats='true', stats.field='counter_total_all',
-#'    group='true', group.field='journal', group.limit=3)
+#' solr_all(cli, params = list(q='ecology', rows=2, fl='id', facet="true",
+#'  facet.field="journal", mlt='true', mlt.count=2, mlt.fl='abstract',
+#'  stats='true', stats.field='counter_total_all', group='true',
+#'  group.field='journal', group.limit=3))
 #'
 #' # using wt = xml
-#' solr_all(q='*:*', rows=50, fl=c('id','score'), fq='doc_type:full', wt="xml", raw=TRUE)
+#' solr_all(cli, params = list(q='*:*', rows=50, fl=c('id','score'),
+#'   fq='doc_type:full', wt="xml"), raw=TRUE)
 #' }
 
-solr_all <- function(name = NULL, q='*:*', sort=NULL, start=0, rows=NULL, pageDoc=NULL,
-  pageScore=NULL, fq=NULL, fl=NULL, defType=NULL, timeAllowed=NULL, qt=NULL,
-  wt='json', NOW=NULL, TZ=NULL, echoHandler=NULL, echoParams=NULL, key = NULL,
-  callopts=list(), raw=FALSE, parsetype='df', concat=',', ...) {
-
-  check_defunct(...)
-  conn <- solr_settings()
-  check_conn(conn)
-  check_wt(wt)
-  if (!is.null(fl)) fl <- paste0(fl, collapse = ",")
-  args <- sc(list(q = q, sort = sort, start = start, rows = rows, pageDoc = pageDoc,
-                       pageScore = pageScore, fl = fl, fq = fq, defType = defType,
-                       timeAllowed = timeAllowed, qt = qt, wt = wt, NOW = NOW, TZ = TZ,
-                       echoHandler = echoHandler, echoParams = echoParams))
-
-  # additional parameters
-  args <- c(args, list(...))
+solr_all <- function(conn, name = NULL, params = NULL, body = NULL,
+                     callopts=list(), raw=FALSE, parsetype='df',
+                     concat=',', optimizeMaxRows = TRUE,
+                     minOptimizedRows = 50000L, ...) {
 
-  out <- structure(solr_GET(handle_url(conn, name), args, callopts, conn$proxy),
-                   class = "sr_all", wt = wt)
-  if (raw) {
-    return( out )
-  } else {
-    parsed <- cont_parse(out, wt)
-    parsed <- structure(parsed, class = c(class(parsed), "sr_all"))
-    solr_parse(parsed, parsetype, concat)
-  }
+  conn$all(name = name, params = params, body = body, callopts = callopts,
+             raw = raw, parsetype = parsetype, concat = concat,
+             optimizeMaxRows = optimizeMaxRows,
+             minOptimizedRows = minOptimizedRows, ...)
 }
diff --git a/R/solr_facet.r b/R/solr_facet.r
index c8c199d..b3026f8 100644
--- a/R/solr_facet.r
+++ b/R/solr_facet.r
@@ -2,125 +2,118 @@
 #'
 #' @description Returns only facet items
 #'
+#' @export
 #' @template facet
-#' @return Raw json or xml, or a list of length 4 parsed elements (usually data.frame's).
-#' @seealso \code{\link{solr_search}}, \code{\link{solr_highlight}}, \code{\link{solr_parse}}
-#' @references See \url{http://wiki.apache.org/solr/SimpleFacetParameters} for
+#' @param conn A solrium connection object, see [SolrClient]
+#' @param params (list) a named list of parameters, results in a GET reqeust
+#' as long as no body parameters given
+#' @param body (list) a named list of parameters, if given a POST request
+#' will be performed
+#' @return Raw json or xml, or a list of length 4 parsed elements
+#' (usually data.frame's).
+#' @seealso [solr_search()], [solr_highlight()], [solr_parse()]
+#' @references See <http://wiki.apache.org/solr/SimpleFacetParameters> for
 #' more information on faceting.
-#' @export
 #' @examples \dontrun{
-#' # connect
-#' solr_connect('http://api.plos.org/search')
+#' # connect - local Solr instance
+#' (cli <- SolrClient$new())
+#' cli$facet("gettingstarted", params = list(q="*:*", facet.field='name'))
+#' cli$facet("gettingstarted", params = list(q="*:*", facet.field='name'),
+#'   callopts = list(verbose = TRUE))
+#' cli$facet("gettingstarted", body = list(q="*:*", facet.field='name'),
+#'   callopts = list(verbose = TRUE))
 #'
 #' # Facet on a single field
-#' solr_facet(q='*:*', facet.field='journal')
+#' solr_facet(cli, "gettingstarted", params = list(q='*:*', facet.field='name'))
+#'
+#' # Remote instance
+#' (cli <- SolrClient$new(host = "api.plos.org", path = "search", port = NULL))
 #'
 #' # Facet on multiple fields
-#' solr_facet(q='alcohol', facet.field=c('journal','subject'))
+#' solr_facet(cli, params = list(q='alcohol',
+#'   facet.field = c('journal','subject')))
 #'
 #' # Using mincount
-#' solr_facet(q='alcohol', facet.field='journal', facet.mincount='500')
+#' solr_facet(cli, params = list(q='alcohol', facet.field='journal',
+#'   facet.mincount='500'))
 #'
 #' # Using facet.query to get counts
-#' solr_facet(q='*:*', facet.field='journal', facet.query=c('cell','bird'))
+#' solr_facet(cli, params = list(q='*:*', facet.field='journal',
+#'   facet.query=c('cell','bird')))
 #'
 #' # Using facet.pivot to simulate SQL group by counts
-#' solr_facet(q='alcohol', facet.pivot='journal,subject',
-#'              facet.pivot.mincount=10)
-#' ## two or more fields are required - you can pass in as a single character string
-#' solr_facet(facet.pivot = "journal,subject", facet.limit =  3)
+#' solr_facet(cli, params = list(q='alcohol', facet.pivot='journal,subject',
+#'              facet.pivot.mincount=10))
+#' ## two or more fields are required - you can pass in as a single
+#' ## character string
+#' solr_facet(cli, params = list(q='*:*', facet.pivot = "journal,subject",
+#'   facet.limit =  3))
 #' ## Or, pass in as a vector of length 2 or greater
-#' solr_facet(facet.pivot = c("journal", "subject"), facet.limit =  3)
+#' solr_facet(cli, params = list(q='*:*', facet.pivot = c("journal", "subject"),
+#'   facet.limit =  3))
 #'
 #' # Date faceting
-#' solr_facet(q='*:*', facet.date='publication_date',
-#' facet.date.start='NOW/DAY-5DAYS', facet.date.end='NOW', facet.date.gap='+1DAY')
+#' solr_facet(cli, params = list(q='*:*', facet.date='publication_date',
+#'   facet.date.start='NOW/DAY-5DAYS', facet.date.end='NOW',
+#'   facet.date.gap='+1DAY'))
 #' ## two variables
-#' solr_facet(q='*:*', facet.date=c('publication_date', 'timestamp'),
-#' facet.date.start='NOW/DAY-5DAYS', facet.date.end='NOW', facet.date.gap='+1DAY')
+#' solr_facet(cli, params = list(q='*:*',
+#'   facet.date=c('publication_date', 'timestamp'),
+#'   facet.date.start='NOW/DAY-5DAYS', facet.date.end='NOW',
+#'   facet.date.gap='+1DAY'))
 #'
 #' # Range faceting
-#' solr_facet(q='*:*', facet.range='counter_total_all',
-#' facet.range.start=5, facet.range.end=1000, facet.range.gap=10)
+#' solr_facet(cli, params = list(q='*:*', facet.range='counter_total_all',
+#'   facet.range.start=5, facet.range.end=1000, facet.range.gap=10))
 #'
 #' # Range faceting with > 1 field, same settings
-#' solr_facet(q='*:*', facet.range=c('counter_total_all','alm_twitterCount'),
-#' facet.range.start=5, facet.range.end=1000, facet.range.gap=10)
+#' solr_facet(cli, params = list(q='*:*',
+#'   facet.range=c('counter_total_all','alm_twitterCount'),
+#'   facet.range.start=5, facet.range.end=1000, facet.range.gap=10))
 #'
 #' # Range faceting with > 1 field, different settings
-#' solr_facet(q='*:*', facet.range=c('counter_total_all','alm_twitterCount'),
-#' f.counter_total_all.facet.range.start=5, f.counter_total_all.facet.range.end=1000,
-#' f.counter_total_all.facet.range.gap=10, f.alm_twitterCount.facet.range.start=5,
-#' f.alm_twitterCount.facet.range.end=1000, f.alm_twitterCount.facet.range.gap=10)
+#' solr_facet(cli, params = list(q='*:*',
+#'   facet.range=c('counter_total_all','alm_twitterCount'),
+#'   f.counter_total_all.facet.range.start=5,
+#'   f.counter_total_all.facet.range.end=1000,
+#'   f.counter_total_all.facet.range.gap=10,
+#'   f.alm_twitterCount.facet.range.start=5,
+#'   f.alm_twitterCount.facet.range.end=1000,
+#'   f.alm_twitterCount.facet.range.gap=10))
 #'
 #' # Get raw json or xml
 #' ## json
-#' solr_facet(q='*:*', facet.field='journal', raw=TRUE)
+#' solr_facet(cli, params = list(q='*:*', facet.field='journal'), raw=TRUE)
 #' ## xml
-#' solr_facet(q='*:*', facet.field='journal', raw=TRUE, wt='xml')
+#' solr_facet(cli, params = list(q='*:*', facet.field='journal', wt='xml'),
+#'   raw=TRUE)
 #'
 #' # Get raw data back, and parse later, same as what goes on internally if
 #' # raw=FALSE (Default)
-#' out <- solr_facet(q='*:*', facet.field='journal', raw=TRUE)
+#' out <- solr_facet(cli, params = list(q='*:*', facet.field='journal'),
+#'   raw=TRUE)
 #' solr_parse(out)
-#' out <- solr_facet(q='*:*', facet.field='journal', raw=TRUE,
-#'    wt='xml')
+#' out <- solr_facet(cli, params = list(q='*:*', facet.field='journal',
+#'   wt = 'xml'), raw=TRUE)
 #' solr_parse(out)
 #'
 #' # Using the USGS BISON API (https://bison.usgs.gov/#solr)
 #' ## The occurrence endpoint
-#' solr_connect("https://bison.usgs.gov/solr/occurrences/select")
-#' solr_facet(q='*:*', facet.field='year')
-#' solr_facet(q='*:*', facet.field='computedStateFips')
+#' (cli <- SolrClient$new(host = "bison.usgs.gov", scheme = "https",
+#'   path = "solr/occurrences/select", port = NULL))
+#' solr_facet(cli, params = list(q='*:*', facet.field='year'))
+#' solr_facet(cli, params = list(q='*:*', facet.field='computedStateFips'))
 #'
 #' # using a proxy
-#' # prox <- list(url = "54.195.48.153", port = 8888)
-#' # solr_connect(url = 'http://api.plos.org/search', proxy = prox)
-#' # solr_facet(facet.field='journal', callopts=verbose())
+#' # cli <- SolrClient$new(host = "api.plos.org", path = "search", port = NULL,
+#' #   proxy = list(url = "http://54.195.48.153:8888"))
+#' # solr_facet(cli, params = list(facet.field='journal'),
+#' #   callopts=list(verbose=TRUE))
 #' }
+solr_facet <- function(conn, name = NULL, params = list(q = '*:*'),
+  body = NULL, callopts = list(), raw = FALSE,  parsetype = 'df',
+  concat = ',', ...) {
 
-solr_facet <- function(name = NULL, q="*:*", facet.query=NA, facet.field=NA,
-   facet.prefix = NA, facet.sort = NA, facet.limit = NA, facet.offset = NA,
-   facet.mincount = NA, facet.missing = NA, facet.method = NA, facet.enum.cache.minDf = NA,
-   facet.threads = NA, facet.date = NA, facet.date.start = NA, facet.date.end = NA,
-   facet.date.gap = NA, facet.date.hardend = NA, facet.date.other = NA,
-   facet.date.include = NA, facet.range = NA, facet.range.start = NA, facet.range.end = NA,
-   facet.range.gap = NA, facet.range.hardend = NA, facet.range.other = NA, facet.range.include = NA,
-   facet.pivot = NA, facet.pivot.mincount = NA, start=NA, rows=NA, key=NA, wt='json',
-   raw=FALSE, callopts=list(), ...) {
-
-  check_defunct(...)
-  conn <- solr_settings()
-  check_conn(conn)
-  check_wt(wt)
-  todonames <- c("q",  "facet.query",  "facet.field",
-     "facet.prefix", "facet.sort", "facet.limit", "facet.offset",
-     "facet.mincount", "facet.missing", "facet.method", "facet.enum.cache.minDf",
-     "facet.threads", "facet.date", "facet.date.start", "facet.date.end",
-     "facet.date.gap", "facet.date.hardend", "facet.date.other",
-     "facet.date.include", "facet.range", "facet.range.start", "facet.range.end",
-     "facet.range.gap", "facet.range.hardend", "facet.range.other",
-     "facet.range.include", "facet.pivot", "facet.pivot.mincount",
-     "start", "rows", "key", "wt")
-  args <- collectargs(todonames)
-  args$fl <- 'DOES_NOT_EXIST'
-  args$facet <- 'true'
-
-  # additional parameters
-  args <- c(args, list(...))
-  if (length(args[names(args) %in% "facet.pivot"]) > 1) {
-    xx <- paste0(unlist(unname(args[names(args) %in% "facet.pivot"])), collapse = ",")
-    args[names(args) %in% "facet.pivot"] <- NULL
-    args$facet.pivot <- xx
-  }
-
-  out <- structure(solr_GET(handle_url(conn, name), args, callopts, conn$proxy),
-                   class = "sr_facet", wt = wt)
-  if (raw) {
-    return( out )
-  } else {
-    parsed <- cont_parse(out, wt)
-    parsed <- structure(parsed, class = c(class(parsed), "sr_facet"))
-    solr_parse(parsed)
-  }
+  conn$facet(name = name, params = params, body = body, callopts = callopts,
+             raw = raw, parsetype = parsetype, concat = concat, ...)
 }
diff --git a/R/solr_get.R b/R/solr_get.R
index 52d395a..8467064 100644
--- a/R/solr_get.R
+++ b/R/solr_get.R
@@ -3,40 +3,37 @@
 #' @description Get documents by id
 #'
 #' @export
+#' @param conn A solrium connection object, see [SolrClient]
 #' @param ids Document IDs, one or more in a vector or list
 #' @param name (character) A collection or core name. Required.
-#' @param fl Fields to return, can be a character vector like \code{c('id', 'title')},
-#' or a single character vector with one or more comma separated names, like
-#' \code{'id,title'}
+#' @param fl Fields to return, can be a character vector like
+#' `c('id', 'title')`, or a single character vector with one or more
+#' comma separated names, like `'id,title'`
 #' @param wt (character) One of json (default) or xml. Data type returned.
-#' If json, uses \code{\link[jsonlite]{fromJSON}} to parse. If xml, uses
-#' \code{\link[xml2]{read_xml}} to parse.
-#' @param raw (logical) If \code{TRUE}, returns raw data in format specified by
-#' \code{wt} param
-#' @param ... curl options passed on to \code{\link[httr]{GET}}
+#' If json, uses [jsonlite::fromJSON()] to parse. If xml, uses
+#' [xml2::read_xml()] to parse.
+#' @param raw (logical) If `TRUE`, returns raw data in format specified by
+#' `wt` param
+#' @param ... curl options passed on to [crul::HttpClient]
 #' @details We use json internally as data interchange format for this function.
 #' @examples \dontrun{
-#' solr_connect()
+#' (cli <- SolrClient$new())
 #'
 #' # add some documents first
 #' ss <- list(list(id = 1, price = 100), list(id = 2, price = 500))
-#' add(ss, name = "gettingstarted")
+#' add(cli, ss, name = "gettingstarted")
 #'
 #' # Now, get documents by id
-#' solr_get(ids = 1, "gettingstarted")
-#' solr_get(ids = 2, "gettingstarted")
-#' solr_get(ids = c(1, 2), "gettingstarted")
-#' solr_get(ids = "1,2", "gettingstarted")
+#' solr_get(cli, ids = 1, "gettingstarted")
+#' solr_get(cli, ids = 2, "gettingstarted")
+#' solr_get(cli, ids = c(1, 2), "gettingstarted")
+#' solr_get(cli, ids = "1,2", "gettingstarted")
 #'
 #' # Get raw JSON
-#' solr_get(ids = 1, "gettingstarted", raw = TRUE, wt = "json")
-#' solr_get(ids = 1, "gettingstarted", raw = TRUE, wt = "xml")
+#' solr_get(cli, ids = 1, "gettingstarted", raw = TRUE, wt = "json")
+#' solr_get(cli, ids = 1, "gettingstarted", raw = TRUE, wt = "xml")
 #' }
-solr_get <- function(ids, name, fl = NULL, wt = 'json', raw = FALSE, ...) {
-  conn <- solr_settings()
-  check_conn(conn)
-  if (!is.null(fl)) fl <- paste0(fl, collapse = ",")
-  args <- sc(list(ids = paste0(ids, collapse = ","), fl = fl, wt = wt))
-  res <- solr_GET(file.path(conn$url, sprintf('solr/%s/get', name)), args, conn$proxy, ...)
-  config_parse(res, wt = wt, raw = raw)
+solr_get <- function(conn, ids, name, fl = NULL, wt = 'json', raw = FALSE, ...) {
+	check_sr(conn)
+  conn$get(ids = ids, name = name, fl = fl, wt = wt, raw = raw, ...)
 }
diff --git a/R/solr_group.r b/R/solr_group.r
index b6095d9..00ebb5d 100644
--- a/R/solr_group.r
+++ b/R/solr_group.r
@@ -2,106 +2,88 @@
 #'
 #' @description Returns only group items
 #'
+#' @export
 #' @template group
+#' @param conn A solrium connection object, see [SolrClient]
+#' @param params (list) a named list of parameters, results in a GET reqeust
+#' as long as no body parameters given
+#' @param body (list) a named list of parameters, if given a POST request
+#' will be performed
 #' @return XML, JSON, a list, or data.frame
-#' @seealso \code{\link{solr_highlight}}, \code{\link{solr_facet}}
-#' @references See \url{http://wiki.apache.org/solr/FieldCollapsing} for more
+#' @seealso [solr_highlight()], [solr_facet()]
+#' @references See <http://wiki.apache.org/solr/FieldCollapsing> for more
 #' information.
-#' @export
 #' @examples \dontrun{
 #' # connect
-#' solr_connect('http://api.plos.org/search')
+#' (cli <- SolrClient$new())
+#'
+#' # by default we do a GET request
+#' cli$group("gettingstarted",
+#'   params = list(q='*:*', group.field='compName_s'))
+#' # OR
+#' solr_group(cli, "gettingstarted",
+#'   params = list(q='*:*', group.field='compName_s'))
+#'
+#' # connect
+#' (cli <- SolrClient$new(host = "api.plos.org", path = "search", port = NULL))
 #'
 #' # Basic group query
-#' solr_group(q='ecology', group.field='journal', group.limit=3,
-#'   fl=c('id','score'))
-#' solr_group(q='ecology', group.field='journal', group.limit=3,
-#'   fl='article_type')
+#' solr_group(cli, params = list(q='ecology', group.field='journal',
+#'   group.limit=3, fl=c('id','score')))
+#' solr_group(cli, params = list(q='ecology', group.field='journal',
+#'   group.limit=3, fl='article_type'))
 #'
 #' # Different ways to sort (notice diff btw sort of group.sort)
 #' # note that you can only sort on a field if you return that field
-#' solr_group(q='ecology', group.field='journal', group.limit=3,
-#'    fl=c('id','score'))
-#' solr_group(q='ecology', group.field='journal', group.limit=3,
-#'    fl=c('id','score','alm_twitterCount'), group.sort='alm_twitterCount desc')
-#' solr_group(q='ecology', group.field='journal', group.limit=3,
+#' solr_group(cli, params = list(q='ecology', group.field='journal', group.limit=3,
+#'    fl=c('id','score')))
+#' solr_group(cli, params = list(q='ecology', group.field='journal', group.limit=3,
+#'    fl=c('id','score','alm_twitterCount'), group.sort='alm_twitterCount desc'))
+#' solr_group(cli, params = list(q='ecology', group.field='journal', group.limit=3,
 #'    fl=c('id','score','alm_twitterCount'), sort='score asc',
-#'    group.sort='alm_twitterCount desc')
+#'    group.sort='alm_twitterCount desc'))
 #'
 #' # Two group.field values
-#' out <- solr_group(q='ecology', group.field=c('journal','article_type'),
-#'   group.limit=3,
-#'   fl='id', raw=TRUE)
+#' out <- solr_group(cli, params = list(q='ecology', group.field=c('journal','article_type'),
+#'   group.limit=3, fl='id'), raw=TRUE)
 #' solr_parse(out)
 #' solr_parse(out, 'df')
 #'
 #' # Get two groups, one with alm_twitterCount of 0-10, and another group
 #' # with 10 to infinity
-#' solr_group(q='ecology', group.limit=3, fl=c('id','alm_twitterCount'),
-#'  group.query=c('alm_twitterCount:[0 TO 10]','alm_twitterCount:[10 TO *]'))
+#' solr_group(cli, params = list(q='ecology', group.limit=3, fl=c('id','alm_twitterCount'),
+#'  group.query=c('alm_twitterCount:[0 TO 10]','alm_twitterCount:[10 TO *]')))
 #'
 #' # Use of group.format and group.simple.
 #' ## The raw data structure of these two calls are slightly different, but
 #' ## the parsing inside the function outputs the same results. You can
 #' ## of course set raw=TRUE to get back what the data actually look like
-#' solr_group(q='ecology', group.field='journal', group.limit=3,
-#'   fl=c('id','score'), group.format='simple')
-#' solr_group(q='ecology', group.field='journal', group.limit=3,
-#'   fl=c('id','score'), group.format='grouped')
-#' solr_group(q='ecology', group.field='journal', group.limit=3,
-#'   fl=c('id','score'), group.format='grouped', group.main='true')
+#' solr_group(cli, params = list(q='ecology', group.field='journal', group.limit=3,
+#'   fl=c('id','score'), group.format='simple'))
+#' solr_group(cli, params = list(q='ecology', group.field='journal', group.limit=3,
+#'   fl=c('id','score'), group.format='grouped'))
+#' solr_group(cli, params = list(q='ecology', group.field='journal', group.limit=3,
+#'   fl=c('id','score'), group.format='grouped', group.main='true'))
 #'
 #' # xml back
-#' solr_group(q='ecology', group.field='journal', group.limit=3,
-#'   fl=c('id','score'), wt = "xml")
-#' solr_group(q='ecology', group.field='journal', group.limit=3,
-#'   fl=c('id','score'), wt = "xml", parsetype = "list")
-#' res <- solr_group(q='ecology', group.field='journal', group.limit=3,
-#'   fl=c('id','score'), wt = "xml", raw = TRUE)
+#' solr_group(cli, params = list(q='ecology', group.field='journal', group.limit=3,
+#'   fl=c('id','score'), wt = "xml"))
+#' solr_group(cli, params = list(q='ecology', group.field='journal', group.limit=3,
+#'   fl=c('id','score'), wt = "xml"), parsetype = "list")
+#' res <- solr_group(cli, params = list(q='ecology', group.field='journal', group.limit=3,
+#'   fl=c('id','score'), wt = "xml"), raw = TRUE)
 #' library("xml2")
 #' xml2::read_xml(unclass(res))
 #'
-#' solr_group(q='ecology', group.field='journal', group.limit=3,
-#'   fl='article_type', wt = "xml")
-#' solr_group(q='ecology', group.field='journal', group.limit=3,
-#'   fl='article_type', wt = "xml", parsetype = "list")
-#'
-#' # examples with Dryad's Solr instance
-#' solr_connect("http://datadryad.org/solr/search/select")
-#' solr_group(q='ecology', group.field='journal', group.limit=3,
-#'   fl='article_type')
+#' solr_group(cli, params = list(q='ecology', group.field='journal', group.limit=3,
+#'   fl='article_type', wt = "xml"))
+#' solr_group(cli, params = list(q='ecology', group.field='journal', group.limit=3,
+#'   fl='article_type', wt = "xml"), parsetype = "list")
 #' }
+solr_group <- function(conn, name = NULL, params = NULL, body = NULL,
+                       callopts=list(), raw=FALSE, parsetype='df',
+                       concat=',', ...) {
 
-solr_group <- function(name = NULL, q='*:*', start=0, rows = NA, sort = NA, fq = NA, fl = NULL,
-  wt='json', key = NA, group.field = NA, group.limit = NA, group.offset = NA,
-  group.sort = NA, group.main = NA, group.ngroups = NA,
-  group.cache.percent = NA, group.query = NA, group.format = NA,
-  group.func = NA, callopts=list(), raw=FALSE, parsetype='df',
-  concat=',', ...) {
-
-  check_defunct(...)
-  conn <- solr_settings()
-  check_conn(conn)
-  check_wt(wt)
-  if (!is.null(fl)) fl <- paste0(fl, collapse = ",")
-  todonames <- c("group.query","group.field", 'q', 'start', 'rows', 'sort',
-    'fq', 'wt', 'group.limit', 'group.offset', 'group.sort', 'group.sort',
-    'group.format', 'group.func', 'group.main', 'group.ngroups',
-    'group.cache.percent', 'group.cache.percent', 'fl')
-  args <- collectargs(todonames)
-  args$group <- 'true'
-
-  # additional parameters
-  args <- c(args, list(...))
-
-  out <- structure(solr_GET(base = handle_url(conn, name), args, callopts, conn$proxy),
-                   class = "sr_group", wt = wt)
-
-  if (raw) {
-    return(out)
-  } else {
-    parsed <- cont_parse(out, wt)
-    parsed <- structure(parsed, class = c(class(parsed), "sr_group"))
-    solr_parse(out, parsetype)
-  }
+  conn$group(name = name, params = params, body = body, callopts = callopts,
+             raw = raw, parsetype = parsetype, concat = concat, ...)
 }
diff --git a/R/solr_highlight.r b/R/solr_highlight.r
index 967f28c..1b0d151 100644
--- a/R/solr_highlight.r
+++ b/R/solr_highlight.r
@@ -4,73 +4,41 @@
 #'
 #' @export
 #' @template high
+#' @param conn A solrium connection object, see [SolrClient]
+#' @param params (list) a named list of parameters, results in a GET reqeust
+#' as long as no body parameters given
+#' @param body (list) a named list of parameters, if given a POST request
+#' will be performed
 #' @return XML, JSON, a list, or data.frame
-#' @seealso \code{\link{solr_search}}, \code{\link{solr_facet}}
-#' @references See \url{http://wiki.apache.org/solr/HighlightingParameters} for
+#' @seealso [solr_search()], [solr_facet()]
+#' @references See <http://wiki.apache.org/solr/HighlightingParameters> for
 #' more information on highlighting.
 #' @examples \dontrun{
 #' # connect
-#' solr_connect('http://api.plos.org/search')
+#' (conn <- SolrClient$new(host = "api.plos.org", path = "search", port = NULL))
 #'
 #' # highlight search
-#' solr_highlight(q='alcohol', hl.fl = 'abstract', rows=10)
-#' solr_highlight(q='alcohol', hl.fl = c('abstract','title'), rows=3)
+#' solr_highlight(conn, params = list(q='alcohol', hl.fl = 'abstract', rows=10),
+#'   parsetype = "list")
+#' solr_highlight(conn, params = list(q='alcohol', hl.fl = c('abstract','title'),
+#'   rows=3), parsetype = "list")
 #'
 #' # Raw data back
 #' ## json
-#' solr_highlight(q='alcohol', hl.fl = 'abstract', rows=10,
+#' solr_highlight(conn, params = list(q='alcohol', hl.fl = 'abstract', rows=10),
 #'    raw=TRUE)
 #' ## xml
-#' solr_highlight(q='alcohol', hl.fl = 'abstract', rows=10,
-#'    raw=TRUE, wt='xml')
+#' solr_highlight(conn, params = list(q='alcohol', hl.fl = 'abstract', rows=10,
+#'    wt='xml'), raw=TRUE)
 #' ## parse after getting data back
-#' out <- solr_highlight(q='alcohol', hl.fl = c('abstract','title'), hl.fragsize=30,
-#'    rows=10, raw=TRUE, wt='xml')
-#' solr_parse(out, parsetype='df')
+#' out <- solr_highlight(conn, params = list(q='theoretical math',
+#'    hl.fl = c('abstract','title'), hl.fragsize=30, rows=10, wt='xml'),
+#'    raw=TRUE)
+#' solr_parse(out, parsetype='list')
 #' }
+solr_highlight <- function(conn, name = NULL, params = NULL, body = NULL,
+                           callopts=list(), raw=FALSE, parsetype='df', ...) {
 
-solr_highlight <- function(name = NULL, q, hl.fl = NULL, hl.snippets = NULL, hl.fragsize = NULL,
-     hl.q = NULL, hl.mergeContiguous = NULL, hl.requireFieldMatch = NULL,
-     hl.maxAnalyzedChars = NULL, hl.alternateField = NULL, hl.maxAlternateFieldLength = NULL,
-     hl.preserveMulti = NULL, hl.maxMultiValuedToExamine = NULL,
-     hl.maxMultiValuedToMatch = NULL, hl.formatter = NULL, hl.simple.pre = NULL,
-     hl.simple.post = NULL, hl.fragmenter = NULL, hl.fragListBuilder = NULL,
-     hl.fragmentsBuilder = NULL, hl.boundaryScanner = NULL, hl.bs.maxScan = NULL,
-     hl.bs.chars = NULL, hl.bs.type = NULL, hl.bs.language = NULL, hl.bs.country = NULL,
-     hl.useFastVectorHighlighter = NULL, hl.usePhraseHighlighter = NULL,
-     hl.highlightMultiTerm = NULL, hl.regex.slop = NULL, hl.regex.pattern = NULL,
-     hl.regex.maxAnalyzedChars = NULL, start = 0, rows = NULL,
-     wt='json', raw = FALSE, key = NULL, callopts=list(),
-     fl='DOES_NOT_EXIST', fq=NULL, parsetype='list') {
-
-  conn <- solr_settings()
-  check_conn(conn)
-  check_wt(wt)
-  if(!is.null(hl.fl)) names(hl.fl) <- rep("hl.fl", length(hl.fl))
-  args <- sc(list(wt=wt, q=q, start=start, rows=rows, hl='true',
-     hl.snippets=hl.snippets, hl.fragsize=hl.fragsize, fl=fl, fq=fq,
-     hl.mergeContiguous = hl.mergeContiguous, hl.requireFieldMatch = hl.requireFieldMatch,
-     hl.maxAnalyzedChars = hl.maxAnalyzedChars, hl.alternateField = hl.alternateField,
-     hl.maxAlternateFieldLength = hl.maxAlternateFieldLength, hl.preserveMulti = hl.preserveMulti,
-     hl.maxMultiValuedToExamine = hl.maxMultiValuedToExamine, hl.maxMultiValuedToMatch = hl.maxMultiValuedToMatch,
-     hl.formatter = hl.formatter, hl.simple.pre = hl.simple.pre, hl.simple.post = hl.simple.post,
-     hl.fragmenter = hl.fragmenter, hl.fragListBuilder = hl.fragListBuilder,
-     hl.fragmentsBuilder = hl.fragmentsBuilder, hl.boundaryScanner = hl.boundaryScanner,
-     hl.bs.maxScan = hl.bs.maxScan, hl.bs.chars = hl.bs.chars, hl.bs.type = hl.bs.type,
-     hl.bs.language = hl.bs.language, hl.bs.country = hl.bs.country,
-     hl.useFastVectorHighlighter = hl.useFastVectorHighlighter,
-     hl.usePhraseHighlighter = hl.usePhraseHighlighter, hl.highlightMultiTerm = hl.highlightMultiTerm,
-     hl.regex.slop = hl.regex.slop, hl.regex.pattern = hl.regex.pattern,
-     hl.regex.maxAnalyzedChars = hl.regex.maxAnalyzedChars))
-  args <- c(args, hl.fl)
-
-  out <- structure(solr_GET(handle_url(conn, name), args, callopts, conn$proxy),
-                   class = "sr_high", wt = wt)
-  if (raw) {
-    return(out)
-  } else {
-    parsed <- cont_parse(out, wt)
-    parsed <- structure(parsed, class = c(class(parsed), "sr_high"))
-    solr_parse(out, parsetype)
-  }
+  conn$highlight(name = name, params = params, body = body, callopts = callopts,
+             raw = raw, parsetype = parsetype, ...)
 }
diff --git a/R/solr_mlt.r b/R/solr_mlt.r
index 91b9218..c4c460e 100644
--- a/R/solr_mlt.r
+++ b/R/solr_mlt.r
@@ -4,59 +4,43 @@
 #'
 #' @export
 #' @template mlt
+#' @template optimizerows
+#' @param conn A solrium connection object, see [SolrClient]
+#' @param params (list) a named list of parameters, results in a GET reqeust
+#' as long as no body parameters given
+#' @param body (list) a named list of parameters, if given a POST request
+#' will be performed
 #' @return XML, JSON, a list, or data.frame
 #' @references See \url{http://wiki.apache.org/solr/MoreLikeThis} for more
 #' information.
 #' @examples \dontrun{
 #' # connect
-#' solr_connect('http://api.plos.org/search')
+#' (conn <- SolrClient$new(host = "api.plos.org", path = "search", port = NULL))
 #'
 #' # more like this search
-#' solr_mlt(q='*:*', mlt.count=2, mlt.fl='abstract', fl='score',
-#'   fq="doc_type:full")
-#' solr_mlt(q='*:*', rows=2, mlt.fl='title', mlt.mindf=1, mlt.mintf=1,
-#'   fl='alm_twitterCount')
-#' solr_mlt(q='title:"ecology" AND body:"cell"', mlt.fl='title', mlt.mindf=1,
-#'   mlt.mintf=1, fl='counter_total_all', rows=5)
-#' solr_mlt(q='ecology', mlt.fl='abstract', fl='title', rows=5)
-#' solr_mlt(q='ecology', mlt.fl='abstract', fl=c('score','eissn'),
-#'   rows=5)
-#' solr_mlt(q='ecology', mlt.fl='abstract', fl=c('score','eissn'),
-#'   rows=5, wt = "xml")
+#' conn$mlt(params = list(q='*:*', mlt.count=2, mlt.fl='abstract', fl='score',
+#'   fq="doc_type:full"))
+#' conn$mlt(params = list(q='*:*', rows=2, mlt.fl='title', mlt.mindf=1,
+#'   mlt.mintf=1, fl='alm_twitterCount'))
+#' conn$mlt(params = list(q='title:"ecology" AND body:"cell"', mlt.fl='title',
+#'   mlt.mindf=1, mlt.mintf=1, fl='counter_total_all', rows=5))
+#' conn$mlt(params = list(q='ecology', mlt.fl='abstract', fl='title', rows=5))
+#' solr_mlt(conn, params = list(q='ecology', mlt.fl='abstract',
+#'   fl=c('score','eissn'), rows=5))
+#' solr_mlt(conn, params = list(q='ecology', mlt.fl='abstract',
+#'   fl=c('score','eissn'), rows=5, wt = "xml"))
 #'
 #' # get raw data, and parse later if needed
-#' out <- solr_mlt(q='ecology', mlt.fl='abstract', fl='title',
-#'  rows=2, raw=TRUE)
-#' library('jsonlite')
+#' out <- solr_mlt(conn, params=list(q='ecology', mlt.fl='abstract', fl='title',
+#'  rows=2), raw=TRUE)
 #' solr_parse(out, "df")
 #' }
+solr_mlt <- function(conn, name = NULL, params = NULL, body = NULL,
+                     callopts=list(), raw=FALSE, parsetype='df', concat=',',
+                     optimizeMaxRows = TRUE, minOptimizedRows = 50000L, ...) {
 
-solr_mlt <- function(name = NULL, q='*:*', fq = NULL, mlt.count=NULL, mlt.fl=NULL, mlt.mintf=NULL,
-  mlt.mindf=NULL, mlt.minwl=NULL, mlt.maxwl=NULL, mlt.maxqt=NULL, mlt.maxntp=NULL,
-  mlt.boost=NULL, mlt.qf=NULL, fl=NULL, wt='json', start=0, rows=NULL, key = NULL,
-  callopts=list(), raw=FALSE, parsetype='df', concat=',') {
-
-  conn <- solr_settings()
-  check_conn(conn)
-  check_wt(wt)
-  fl_str <- paste0(fl, collapse = ",")
-  if (any(grepl('id', fl))) {
-    fl2 <- fl_str
-  } else {
-    fl2 <- sprintf('id,%s',fl_str)
-  }
-  args <- sc(list(q = q, fq = fq, mlt = 'true', fl = fl2, mlt.count = mlt.count, mlt.fl = mlt.fl,
-    mlt.mintf = mlt.mintf, mlt.mindf = mlt.mindf, mlt.minwl = mlt.minwl,
-    mlt.maxwl = mlt.maxwl, mlt.maxqt = mlt.maxqt, mlt.maxntp = mlt.maxntp,
-    mlt.boost = mlt.boost, mlt.qf = mlt.qf, start = start, rows = rows, wt = wt))
-
-  out <- structure(solr_GET(handle_url(conn, name), args, callopts, conn$proxy),
-                   class = "sr_mlt", wt = wt)
-  if (raw) {
-    return( out )
-  } else {
-    parsed <- cont_parse(out, wt)
-    parsed <- structure(parsed, class = c(class(parsed), "sr_mlt"))
-    solr_parse(parsed, parsetype, concat)
-  }
+  conn$mlt(name = name, params = params, body = body, callopts = callopts,
+           raw = raw, parsetype = parsetype, concat = concat,
+           optimizeMaxRows = optimizeMaxRows,
+           minOptimizedRows = minOptimizedRows, ...)
 }
diff --git a/R/solr_search.r b/R/solr_search.r
index b6b8a44..4b9b1e7 100644
--- a/R/solr_search.r
+++ b/R/solr_search.r
@@ -3,149 +3,127 @@
 #' @description Returns only matched documents, and doesn't return other items,
 #' including facets, groups, mlt, stats, and highlights.
 #'
+#' @export
 #' @template search
+#' @template optimizerows
+#' @param conn A solrium connection object, see [SolrClient]
+#' @param params (list) a named list of parameters, results in a GET reqeust
+#' as long as no body parameters given
+#' @param body (list) a named list of parameters, if given a POST request
+#' will be performed
+#'
 #' @return XML, JSON, a list, or data.frame
-#' @param wt (character) One of json, xml, or csv. Data type returned, defaults to 'csv'.
-#' If json, uses \code{\link[jsonlite]{fromJSON}} to parse. If xml, uses
-#' \code{\link[xml2]{read_xml}} to parse. If csv, uses \code{\link{read.table}} to parse.
-#' \code{wt=csv} gives the fastest performance at least in all the cases we have
-#' tested in, thus it's the default value for \code{wt}.
-#' @seealso \code{\link{solr_highlight}}, \code{\link{solr_facet}}
-#' @references See \url{http://wiki.apache.org/solr/#Search_and_Indexing} for more information.
+#' @seealso [solr_highlight()], [solr_facet()]
+#' @references See <http://wiki.apache.org/solr/#Search_and_Indexing>
+#' for more information.
 #' @note SOLR v1.2 was first version to support csv. See
-#' \url{https://issues.apache.org/jira/browse/SOLR-66}
-#' @export
+#' <https://issues.apache.org/jira/browse/SOLR-66>
 #' @examples \dontrun{
-#' # connect
-#' solr_connect('http://api.plos.org/search')
+#' # Connect to a local Solr instance
+#' (cli <- SolrClient$new())
+#' cli$search("gettingstarted", params = list(q = "features:notes"))
+#'
+#' solr_search(cli, "gettingstarted")
+#' solr_search(cli, "gettingstarted", params = list(q = "features:notes"))
+#' solr_search(cli, "gettingstarted", body = list(query = "features:notes"))
+#'
+#' (cli <- SolrClient$new(host = "api.plos.org", path = "search", port = NULL))
+#' cli$search(params = list(q = "*:*"))
+#' cli$search(params = list(q = "title:golgi", fl = c('id', 'title')))
+#'
+#' cli$search(params = list(q = "*:*", facet = "true"))
+#'
 #'
 #' # search
-#' solr_search(q='*:*', rows=2, fl='id')
+#' solr_search(cli, params = list(q='*:*', rows=2, fl='id'))
+#'
+#' # search and return all rows
+#' solr_search(cli, params = list(q='*:*', rows=-1, fl='id'))
 #'
 #' # Search for word ecology in title and cell in the body
-#' solr_search(q='title:"ecology" AND body:"cell"', fl='title', rows=5)
+#' solr_search(cli, params = list(q='title:"ecology" AND body:"cell"',
+#'   fl='title', rows=5))
 #'
 #' # Search for word "cell" and not "body" in the title field
-#' solr_search(q='title:"cell" -title:"lines"', fl='title', rows=5)
+#' solr_search(cli, params = list(q='title:"cell" -title:"lines"', fl='title',
+#'   rows=5))
 #'
 #' # Wildcards
 #' ## Search for word that starts with "cell" in the title field
-#' solr_search(q='title:"cell*"', fl='title', rows=5)
+#' solr_search(cli, params = list(q='title:"cell*"', fl='title', rows=5))
 #'
 #' # Proximity searching
 #' ## Search for words "sports" and "alcohol" within four words of each other
-#' solr_search(q='everything:"sports alcohol"~7', fl='abstract', rows=3)
+#' solr_search(cli, params = list(q='everything:"sports alcohol"~7',
+#'   fl='abstract', rows=3))
 #'
 #' # Range searches
 #' ## Search for articles with Twitter count between 5 and 10
-#' solr_search(q='*:*', fl=c('alm_twitterCount','id'), fq='alm_twitterCount:[5 TO 50]',
-#' rows=10)
+#' solr_search(cli, params = list(q='*:*', fl=c('alm_twitterCount','id'),
+#'   fq='alm_twitterCount:[5 TO 50]', rows=10))
 #'
 #' # Boosts
-#' ## Assign higher boost to title matches than to body matches (compare the two calls)
-#' solr_search(q='title:"cell" abstract:"science"', fl='title', rows=3)
-#' solr_search(q='title:"cell"^1.5 AND abstract:"science"', fl='title', rows=3)
+#' ## Assign higher boost to title matches than to body matches
+#' ## (compare the two calls)
+#' solr_search(cli, params = list(q='title:"cell" abstract:"science"',
+#'   fl='title', rows=3))
+#' solr_search(cli, params = list(q='title:"cell"^1.5 AND abstract:"science"',
+#'   fl='title', rows=3))
 #'
 #' # FunctionQuery queries
-#' ## This kind of query allows you to use the actual values of fields to calculate
-#' ## relevancy scores for returned documents
+#' ## This kind of query allows you to use the actual values of fields to
+#' ## calculate relevancy scores for returned documents
 #'
 #' ## Here, we search on the product of counter_total_all and alm_twitterCount
 #' ## metrics for articles in PLOS Journals
-#' solr_search(q="{!func}product($v1,$v2)", v1 = 'sqrt(counter_total_all)',
-#'    v2 = 'log(alm_twitterCount)', rows=5, fl=c('id','title'), fq='doc_type:full')
+#' solr_search(cli, params = list(q="{!func}product($v1,$v2)",
+#'   v1 = 'sqrt(counter_total_all)',
+#'   v2 = 'log(alm_twitterCount)', rows=5, fl=c('id','title'),
+#'   fq='doc_type:full'))
 #'
-#' ## here, search on the product of counter_total_all and alm_twitterCount, using
-#' ## a new temporary field "_val_"
-#' solr_search(q='_val_:"product(counter_total_all,alm_twitterCount)"',
-#'    rows=5, fl=c('id','title'), fq='doc_type:full')
+#' ## here, search on the product of counter_total_all and alm_twitterCount,
+#' ## using a new temporary field "_val_"
+#' solr_search(cli,
+#'   params = list(q='_val_:"product(counter_total_all,alm_twitterCount)"',
+#'   rows=5, fl=c('id','title'), fq='doc_type:full'))
 #'
 #' ## papers with most citations
-#' solr_search(q='_val_:"max(counter_total_all)"',
-#'    rows=5, fl=c('id','counter_total_all'), fq='doc_type:full')
+#' solr_search(cli, params = list(q='_val_:"max(counter_total_all)"',
+#'    rows=5, fl=c('id','counter_total_all'), fq='doc_type:full'))
 #'
 #' ## papers with most tweets
-#' solr_search(q='_val_:"max(alm_twitterCount)"',
-#'    rows=5, fl=c('id','alm_twitterCount'), fq='doc_type:full')
+#' solr_search(cli, params = list(q='_val_:"max(alm_twitterCount)"',
+#'    rows=5, fl=c('id','alm_twitterCount'), fq='doc_type:full'))
+#'
+#' ## many fq values
+#' solr_search(cli, params = list(q="*:*", fl=c('id','alm_twitterCount'),
+#'    fq=list('doc_type:full','subject:"Social networks"',
+#'            'alm_twitterCount:[100 TO 10000]'),
+#'    sort='counter_total_month desc'))
 #'
 #' ## using wt = csv
-#' solr_search(q='*:*', rows=50, fl=c('id','score'), fq='doc_type:full', wt="csv")
-#' solr_search(q='*:*', rows=50, fl=c('id','score'), fq='doc_type:full')
+#' solr_search(cli, params = list(q='*:*', rows=50, fl=c('id','score'),
+#'   fq='doc_type:full', wt="csv"))
+#' solr_search(cli, params = list(q='*:*', rows=50, fl=c('id','score'),
+#'   fq='doc_type:full'))
 #'
 #' # using a proxy
-#' # prox <- list(url = "186.249.1.146", port = 80)
-#' # solr_connect(url = 'http://api.plos.org/search', proxy = prox)
-#' # solr_search(q='*:*', rows=2, fl='id', callopts=verbose())
-#' ## vs. w/o a proxy
-#' # solr_connect(url = 'http://api.plos.org/search')
-#' # solr_search(q='*:*', rows=2, fl='id', callopts=verbose())
+#' # cli <- SolrClient$new(host = "api.plos.org", path = "search", port = NULL,
+#' #   proxy = list(url = "http://186.249.1.146:80"))
+#' # solr_search(cli, q='*:*', rows=2, fl='id', callopts=list(verbose=TRUE))
 #'
 #' # Pass on curl options to modify request
-#' solr_connect(url = 'http://api.plos.org/search')
 #' ## verbose
-#' solr_search(q='*:*', rows=2, fl='id', callopts=verbose())
-#' ## progress
-#' res <- solr_search(q='*:*', rows=200, fl='id', callopts=progress())
-#' ## timeout
-#' # solr_search(q='*:*', rows=200, fl='id', callopts=timeout(0.01))
-#' ## combine curl options using the c() function
-#' opts <- c(verbose(), progress())
-#' res <- solr_search(q='*:*', rows=200, fl='id', callopts=opts)
-#'
-#' ## Searching Europeana
-#' ### They don't return the expected Solr output, so we can get raw data, then parse separately
-#' solr_connect('http://europeana.eu/api/v2/search.json')
-#' key <- getOption("eu_key")
-#' dat <- solr_search(query='*:*', rows=5, wskey = key, raw=TRUE)
-#' library('jsonlite')
-#' head( jsonlite::fromJSON(dat)$items )
-#'
-#' # Connect to a local Solr instance
-#' ## not run - replace with your local Solr URL and collection/core name
-#' # solr_connect("localhost:8889")
-#' # solr_search("gettingstarted")
+#' solr_search(cli, params = list(q='*:*', rows=2, fl='id'),
+#'   callopts = list(verbose=TRUE))
 #' }
 
-solr_search <- function(name = NULL, q='*:*', sort=NULL, start=NULL, rows=NULL, pageDoc=NULL,
-  pageScore=NULL, fq=NULL, fl=NULL, defType=NULL, timeAllowed=NULL, qt=NULL,
-  wt='json', NOW=NULL, TZ=NULL, echoHandler=NULL, echoParams=NULL, key = NULL,
-  callopts=list(), raw=FALSE, parsetype='df', concat=',', ...) {
-
-  check_defunct(...)
-  conn <- solr_settings()
-  check_conn(conn)
-  check_wt(wt)
-  if (!is.null(fl)) fl <- paste0(fl, collapse = ",")
-  args <- sc(list(q = q, sort = sort, start = start, rows = rows, pageDoc = pageDoc,
-      pageScore = pageScore, fl = fl, defType = defType,
-      timeAllowed = timeAllowed, qt = qt, wt = wt, NOW = NOW, TZ = TZ,
-      echoHandler = echoHandler, echoParams = echoParams))
-
-  # args that can be repeated
-  todonames <- "fq"
-  args <- c(args, collectargs(todonames))
-
-  # additional parameters
-  args <- c(args, list(...))
-  if ('query' %in% names(args)) {
-    args <- args[!names(args) %in% "q"]
-  }
-
-  out <- structure(solr_GET(handle_url(conn, name), args, callopts, conn$proxy),
-                   class = "sr_search", wt = wt)
-  if (raw) {
-    return( out )
-  } else {
-    parsed <- cont_parse(out, wt)
-    parsed <- structure(parsed, class = c(class(parsed), "sr_search"))
-    solr_parse(parsed, parsetype, concat)
-  }
-}
+solr_search <- function(conn, name = NULL, params = list(q = '*:*'),
+  body = NULL, callopts = list(), raw = FALSE, parsetype = 'df',
+  concat = ',', optimizeMaxRows = TRUE, minOptimizedRows = 50000L, ...) {
 
-handle_url <- function(conn, name) {
-  if (is.null(name)) {
-    conn$url
-  } else {
-    file.path(conn$url, "solr", name, "select")
-  }
+  conn$search(name = name, params = params, body = body, callopts = callopts,
+              raw = raw, parsetype = parsetype, concat = concat,
+              optimizeMaxRows = optimizeMaxRows,
+              minOptimizedRows = minOptimizedRows, ...)
 }
diff --git a/R/solr_stats.r b/R/solr_stats.r
index beda65d..d5fdd33 100644
--- a/R/solr_stats.r
+++ b/R/solr_stats.r
@@ -2,66 +2,60 @@
 #'
 #' @description Returns only stat items
 #'
+#' @export
 #' @template stats
+#' @param conn A solrium connection object, see [SolrClient]
+#' @param params (list) a named list of parameters, results in a GET reqeust
+#' as long as no body parameters given
+#' @param body (list) a named list of parameters, if given a POST request
+#' will be performed
 #' @return XML, JSON, a list, or data.frame
-#' @seealso \code{\link{solr_highlight}}, \code{\link{solr_facet}},
-#' \code{\link{solr_search}}, \code{\link{solr_mlt}}
-#' @references See \url{http://wiki.apache.org/solr/StatsComponent} for
+#' @seealso [solr_highlight()], [solr_facet()], [solr_search()], [solr_mlt()]
+#' @references See <http://wiki.apache.org/solr/StatsComponent> for
 #' more information on Solr stats.
-#' @export
 #' @examples \dontrun{
 #' # connect
-#' solr_connect('http://api.plos.org/search')
+#' (cli <- SolrClient$new(host = "api.plos.org", path = "search", port = NULL))
 #'
 #' # get stats
-#' solr_stats(q='science', stats.field='counter_total_all', raw=TRUE)
-#' solr_stats(q='title:"ecology" AND body:"cell"',
-#'    stats.field=c('counter_total_all','alm_twitterCount'))
-#' solr_stats(q='ecology', stats.field=c('counter_total_all','alm_twitterCount'),
-#'    stats.facet='journal')
-#' solr_stats(q='ecology', stats.field=c('counter_total_all','alm_twitterCount'),
-#'    stats.facet=c('journal','volume'))
+#' solr_stats(cli, params = list(q='science', stats.field='counter_total_all'),
+#'   raw=TRUE)
+#' solr_stats(cli, params = list(q='title:"ecology" AND body:"cell"',
+#'    stats.field=c('counter_total_all','alm_twitterCount')))
+#' solr_stats(cli, params = list(q='ecology',
+#'   stats.field=c('counter_total_all','alm_twitterCount'),
+#'   stats.facet='journal'))
+#' solr_stats(cli, params = list(q='ecology',
+#'   stats.field=c('counter_total_all','alm_twitterCount'),
+#'   stats.facet=c('journal','volume')))
 #'
 #' # Get raw data, then parse later if you feel like it
 #' ## json
-#' out <- solr_stats(q='ecology', stats.field=c('counter_total_all','alm_twitterCount'),
-#'    stats.facet=c('journal','volume'), raw=TRUE)
+#' out <- solr_stats(cli, params = list(q='ecology',
+#'   stats.field=c('counter_total_all','alm_twitterCount'),
+#'   stats.facet=c('journal','volume')), raw=TRUE)
 #' library("jsonlite")
 #' jsonlite::fromJSON(out)
 #' solr_parse(out) # list
 #' solr_parse(out, 'df') # data.frame
 #'
 #' ## xml
-#' out <- solr_stats(q='ecology', stats.field=c('counter_total_all','alm_twitterCount'),
-#'    stats.facet=c('journal','volume'), raw=TRUE, wt="xml")
+#' out <- solr_stats(cli, params = list(q='ecology',
+#'   stats.field=c('counter_total_all','alm_twitterCount'),
+#'   stats.facet=c('journal','volume'), wt="xml"), raw=TRUE)
 #' library("xml2")
 #' xml2::read_xml(unclass(out))
 #' solr_parse(out) # list
 #' solr_parse(out, 'df') # data.frame
 #'
 #' # Get verbose http call information
-#' library("httr")
-#' solr_stats(q='ecology', stats.field='alm_twitterCount',
-#'    callopts=verbose())
+#' solr_stats(cli, params = list(q='ecology', stats.field='alm_twitterCount'),
+#'    callopts=list(verbose=TRUE))
 #' }
+solr_stats <- function(conn, name = NULL, params = list(q = '*:*',
+  stats.field = NULL, stats.facet = NULL), body = NULL, callopts=list(),
+  raw=FALSE, parsetype='df', ...) {
 
-solr_stats <- function(name = NULL, q='*:*', stats.field=NULL, stats.facet=NULL,
-  wt='json', start=0, rows=0, key = NULL, callopts=list(), raw=FALSE, parsetype='df') {
-
-  conn <- solr_settings()
-  check_conn(conn)
-  check_wt(wt)
-  todonames <- c("q", "stats.field", "stats.facet", "start", "rows", "key", "wt")
-  args <- collectargs(todonames)
-  args$stats <- 'true'
-
-  out <- structure(solr_GET(handle_url(conn, name), args, callopts, conn$proxy),
-                   class = "sr_stats", wt = wt)
-  if (raw) {
-    return( out )
-  } else {
-    parsed <- cont_parse(out, wt)
-    parsed <- structure(parsed, class = c(class(parsed), "sr_stats"))
-    solr_parse(out, parsetype)
-  }
+  conn$stats(name = name, params = params, body = body, callopts = callopts,
+             raw = raw, parsetype = parsetype, ...)
 }
diff --git a/R/solrium-package.R b/R/solrium-package.R
index 6465207..759f26c 100644
--- a/R/solrium-package.R
+++ b/R/solrium-package.R
@@ -1,8 +1,8 @@
 #' General purpose R interface to Solr.
-#' 
+#'
 #' This package has support for all the search endpoints, as well as a suite
-#' of functions for managing a Solr database, including adding and deleting 
-#' documents. 
+#' of functions for managing a Solr database, including adding and deleting
+#' documents.
 #'
 #' @section Important search functions:
 #'
@@ -16,16 +16,16 @@
 #'   \item \code{\link{solr_group}} - Group search (w/o general search)
 #'   \item \code{\link{solr_stats}} - Stats search (w/o general search)
 #' }
-#' 
+#'
 #' @section Important Solr management functions:
 #'
 #' \itemize{
-#'   \item \code{\link{update_json}} - Add or delete documents using json in a 
+#'   \item \code{\link{update_json}} - Add or delete documents using json in a
 #'   file
 #'   \item \code{\link{add}} - Add documents via an R list or data.frame
 #'   \item \code{\link{delete_by_id}} - Delete documents by ID
 #'   \item \code{\link{delete_by_query}} - Delete documents by query
-#' } 
+#' }
 #'
 #' @section Vignettes:
 #'
@@ -34,33 +34,33 @@
 #' @section Performance:
 #'
 #' \code{v0.2} and above of this package will have \code{wt=csv} as the default.
-#' This  should give significant performance improvement over the previous 
-#' default of \code{wt=json}, which pulled down json, parsed to an R list, 
-#' then to a data.frame. With \code{wt=csv}, we pull down csv, and read that 
+#' This  should give significant performance improvement over the previous
+#' default of \code{wt=json}, which pulled down json, parsed to an R list,
+#' then to a data.frame. With \code{wt=csv}, we pull down csv, and read that
 #' in directly to a data.frame.
 #'
-#' The http library we use, \pkg{httr}, sets gzip compression header by 
-#' default. As long as compression is used server side, you're good to go on 
+#' The http library we use, \pkg{crul}, sets gzip compression header by
+#' default. As long as compression is used server side, you're good to go on
 #' compression, which should be a good peformance boost. See
 #' \url{https://wiki.apache.org/solr/SolrPerformanceFactors#Query_Response_Compression}
 #' for notes on how to enable compression.
 #'
 #' There are other notes about Solr performance at
-#' \url{https://wiki.apache.org/solr/SolrPerformanceFactors} that can be 
-#' used server side/in your Solr config, but aren't things to tune here in 
+#' \url{https://wiki.apache.org/solr/SolrPerformanceFactors} that can be
+#' used server side/in your Solr config, but aren't things to tune here in
 #' this R client.
 #'
 #' Let us know if there's any further performance improvements we can make.
 #'
 #' @importFrom utils URLdecode head modifyList read.table
-#' @importFrom httr GET POST stop_for_status content content_type_json
-#' content_type_xml content_type upload_file http_condition http_status
+#' @importFrom crul HttpClient
 #' @importFrom xml2 read_xml xml_children xml_find_first xml_find_all
 #' xml_name xml_text xml_attr xml_attrs
 #' @importFrom jsonlite fromJSON
 #' @importFrom plyr rbind.fill
 #' @importFrom dplyr bind_rows
 #' @importFrom tibble data_frame as_data_frame as_tibble add_column
+#' @importFrom R6 R6Class
 #' @name solrium-package
 #' @aliases solrium
 #' @docType package
diff --git a/R/update_atomic_json.R b/R/update_atomic_json.R
new file mode 100644
index 0000000..228ca5d
--- /dev/null
+++ b/R/update_atomic_json.R
@@ -0,0 +1,56 @@
+#' Atomic updates with JSON data
+#'
+#' Atomic updates to parts of Solr documents
+#'
+#' @export
+#' @param body (character) JSON as a character string
+#' @inheritParams update_atomic_xml
+#' @references
+#' <https://lucene.apache.org/solr/guide/7_0/updating-parts-of-documents.html>
+#' @examples \dontrun{
+#' # start Solr in Cloud mode: bin/solr start -e cloud -noprompt
+#'
+#' # connect
+#' (conn <- SolrClient$new())
+#'
+#' # create a collection
+#' if (!conn$collection_exists("books")) {
+#'   conn$collection_delete("books")
+#'   conn$collection_create("books")
+#' }
+#'
+#' # Add documents
+#' file <- system.file("examples", "books2.json", package = "solrium")
+#' cat(readLines(file), sep = "\n")
+#' conn$update_json(file, "books")
+#'
+#' # get a document
+#' conn$get(ids = 343334534545, "books")
+#'
+#' # atomic update
+#' body <- '[{
+#'  "id": "343334534545",
+#'  "genre_s": {"set": "mystery" },
+#'  "pages_i": {"inc": 1 }
+#' }]'
+#' conn$update_atomic_json(body, "books")
+#'
+#' # get the document again
+#' conn$get(ids = 343334534545, "books")
+#'
+#' # another atomic update
+#' body <- '[{
+#'  "id": "343334534545",
+#'  "price": {"remove": "12.5" }
+#' }]'
+#' conn$update_atomic_json(body, "books")
+#'
+#' # get the document again
+#' conn$get(ids = 343334534545, "books")
+#' }
+update_atomic_json <- function(conn, body, name, wt = 'json',
+	  raw = FALSE, ...) {
+
+	check_sr(conn)
+  conn$update_atomic_json(body, name, wt, raw, ...)
+}
diff --git a/R/update_atomic_xml.R b/R/update_atomic_xml.R
new file mode 100644
index 0000000..33f69dd
--- /dev/null
+++ b/R/update_atomic_xml.R
@@ -0,0 +1,66 @@
+#' Atomic updates with XML data
+#'
+#' Atomic updates to parts of Solr documents
+#'
+#' @export
+#' @param conn A solrium connection object, see [SolrClient]
+#' @param body (character) XML as a character string
+#' @param name (character) Name of the core or collection
+#' @param wt (character) One of json (default) or xml. If json, uses
+#' [jsonlite::fromJSON()] to parse. If xml, uses [xml2::read_xml()] to parse
+#' @param raw (logical) If `TRUE`, returns raw data in format specified by
+#' `wt` param
+#' @param ... curl options passed on to [crul::HttpClient]
+#' @references
+#' <https://lucene.apache.org/solr/guide/7_0/updating-parts-of-documents.html>
+#' @examples \dontrun{
+#' # start Solr in Cloud mode: bin/solr start -e cloud -noprompt
+#'
+#' # connect
+#' (conn <- SolrClient$new())
+#'
+#' # create a collection
+#' if (!conn$collection_exists("books")) {
+#'   conn$collection_delete("books")
+#'   conn$collection_create("books")
+#' }
+#'
+#' # Add documents
+#' file <- system.file("examples", "books.xml", package = "solrium")
+#' cat(readLines(file), sep = "\n")
+#' conn$update_xml(file, "books")
+#'
+#' # get a document
+#' conn$get(ids = '978-0641723445', "books", wt = "xml")
+#'
+#' # atomic update
+#' body <- '
+#' <add>
+#'  <doc>
+#'    <field name="id">978-0641723445</field>
+#'    <field name="genre_s" update="set">mystery</field>
+#'    <field name="pages_i" update="inc">1</field>
+#'  </doc>
+#' </add>'
+#' conn$update_atomic_xml(body, name="books")
+#'
+#' # get the document again
+#' conn$get(ids = '978-0641723445', "books", wt = "xml")
+#'
+#' # another atomic update
+#' body <- '
+#' <add>
+#'  <doc>
+#'    <field name="id">978-0641723445</field>
+#'    <field name="price" update="remove">12.5</field>
+#'  </doc>
+#' </add>'
+#' conn$update_atomic_xml(body, "books")
+#'
+#' # get the document again
+#' conn$get(ids = '978-0641723445', "books", wt = "xml")
+#' }
+update_atomic_xml <- function(conn, body, name, wt = 'json', raw = FALSE, ...) {
+	check_sr(conn)
+  conn$update_atomic_xml(body, name, wt, raw, ...)
+}
diff --git a/R/update_csv.R b/R/update_csv.R
index 7f85218..6553a3c 100644
--- a/R/update_csv.R
+++ b/R/update_csv.R
@@ -1,45 +1,47 @@
-#' Update documents using CSV
+#' Update documents with CSV data
 #'
 #' @export
 #' @family update
 #' @template csvcreate
-#' @param files Path to file to load into Solr
+#' @param conn A solrium connection object, see [SolrClient]
+#' @param files Path to a single file to load into Solr
 #' @param name (character) Name of the core or collection
 #' @param wt (character) One of json (default) or xml. If json, uses
-#' \code{\link[jsonlite]{fromJSON}} to parse. If xml, uses \code{\link[xml2]{read_xml}} to parse
-#' @param raw (logical) If TRUE, returns raw data in format specified by wt param
-#' @param ... curl options passed on to \code{\link[httr]{GET}}
+#' \code{\link[jsonlite]{fromJSON}} to parse. If xml, uses
+#' \code{\link[xml2]{read_xml}} to parse
+#' @param raw (logical) If TRUE, returns raw data in format specified by
+#' \code{wt} param
+#' @param ... curl options passed on to [crul::HttpClient]
 #' @note SOLR v1.2 was first version to support csv. See
 #' \url{https://issues.apache.org/jira/browse/SOLR-66}
 #' @examples \dontrun{
-#' # start Solr in Schemaless mode: bin/solr start -e schemaless
+#' # start Solr: bin/solr start -f -c -p 8983
 #'
 #' # connect
-#' solr_connect()
+#' (cli <- SolrClient$new())
+#'
+#' if (!cli$collection_exists("helloWorld")) {
+#'   cli$collection_create(name = "helloWorld", numShards = 2)
+#' }
 #'
 #' df <- data.frame(id=1:3, name=c('red', 'blue', 'green'))
 #' write.csv(df, file="df.csv", row.names=FALSE, quote = FALSE)
-#' update_csv("df.csv", "books")
+#' conn$update_csv("df.csv", "helloWorld", verbose = TRUE)
 #'
-#' # give back xml
-#' update_csv("df.csv", "books", wt = "xml")
-#' ## raw xml
-#' update_csv("df.csv", "books", wt = "xml", raw = FALSE)
+#' # give back raw xml
+#' conn$update_csv("df.csv", "helloWorld", wt = "xml")
+#' ## raw json
+#' conn$update_csv("df.csv", "helloWorld", wt = "json", raw = TRUE)
 #' }
-update_csv <- function(files, name, separator = ',', header = TRUE,
-                       fieldnames = NULL, skip = NULL, skipLines = 0, trim = FALSE,
-                       encapsulator = NULL, escape = NULL, keepEmpty = FALSE, literal = NULL,
-                       map = NULL, split = NULL, rowid = NULL, rowidOffset = NULL, overwrite = NULL,
-                       commit = NULL, wt = 'json', raw = FALSE, ...) {
+update_csv <- function(conn, files, name, separator = ',', header = TRUE,
+  fieldnames = NULL, skip = NULL, skipLines = 0, trim = FALSE,
+  encapsulator = NULL, escape = NULL, keepEmpty = FALSE, literal = NULL,
+  map = NULL, split = NULL, rowid = NULL, rowidOffset = NULL, overwrite = NULL,
+  commit = NULL, wt = 'json', raw = FALSE, ...) {
 
-  conn <- solr_settings()
-  check_conn(conn)
-  stop_if_absent(name)
-  if (!is.null(fieldnames)) fieldnames <- paste0(fieldnames, collapse = ",")
-  args <- sc(list(separator = separator, header = header, fieldnames = fieldnames, skip = skip,
-                  skipLines = skipLines, trim = trim, encapsulator = encapsulator, escape = escape,
-                  keepEmpty = keepEmpty, literal = literal, map = map, split = split,
-                  rowid = rowid, rowidOffset = rowidOffset, overwrite = overwrite,
-                  commit = commit, wt = wt))
-  docreate(file.path(conn$url, sprintf('solr/%s/update/csv', name)), files, args, content = "csv", raw, ...)
+  check_sr(conn)
+  conn$update_csv(files, name, separator, header, fieldnames, skip,
+                   skipLines, trim, encapsulator, escape, keepEmpty, literal,
+                   map, split, rowid, rowidOffset, overwrite, commit,
+                   wt, raw, ...)
 }
diff --git a/R/update_json.R b/R/update_json.R
index 0a3def8..9010003 100644
--- a/R/update_json.R
+++ b/R/update_json.R
@@ -1,50 +1,47 @@
-#' Update documents using JSON
+#' Update documents with JSON data
 #'
 #' @export
 #' @family update
 #' @template update
 #' @template commitcontrol
-#' @param files Path to file to load into Solr
+#' @param conn A solrium connection object, see [SolrClient]
+#' @param files Path to a single file to load into Solr
 #' @examples \dontrun{
-#' # start Solr in Schemaless mode: bin/solr start -e schemaless
-#' 
+#' # start Solr: bin/solr start -f -c -p 8983
+#'
 #' # connect
-#' solr_connect()
+#' (conn <- SolrClient$new())
 #'
 #' # Add documents
 #' file <- system.file("examples", "books2.json", package = "solrium")
 #' cat(readLines(file), sep = "\n")
-#' update_json(file, "books")
+#' conn$update_json(files = file, name = "books")
+#' update_json(conn, files = file, name = "books")
 #'
 #' # Update commands - can include many varying commands
 #' ## Add file
-#' file <- system.file("examples", "updatecommands_add.json", package = "solrium")
+#' file <- system.file("examples", "updatecommands_add.json",
+#'   package = "solrium")
 #' cat(readLines(file), sep = "\n")
-#' update_json(file, "books")
+#' conn$update_json(file, "books")
 #'
 #' ## Delete file
-#' file <- system.file("examples", "updatecommands_delete.json", package = "solrium")
+#' file <- system.file("examples", "updatecommands_delete.json",
+#'   package = "solrium")
 #' cat(readLines(file), sep = "\n")
-#' update_json(file, "books")
+#' conn$update_json(file, "books")
 #'
 #' # Add and delete in the same document
 #' ## Add a document first, that we can later delete
 #' ss <- list(list(id = 456, name = "cat"))
-#' add(ss, "books")
-#' ## Now add a new document, and delete the one we just made
-#' file <- system.file("examples", "add_delete.json", package = "solrium")
-#' cat(readLines(file), sep = "\n")
-#' update_json(file, "books")
+#' conn$add(ss, "books")
 #' }
-update_json <- function(files, name, commit = TRUE, optimize = FALSE, max_segments = 1,
-                        expunge_deletes = FALSE, wait_searcher = TRUE, soft_commit = FALSE,
-                        prepare_commit = NULL, wt = 'json', raw = FALSE, ...) {
+update_json <- function(conn, files, name, commit = TRUE, optimize = FALSE,
+  max_segments = 1, expunge_deletes = FALSE, wait_searcher = TRUE,
+  soft_commit = FALSE, prepare_commit = NULL, wt = 'json', raw = FALSE, ...) {
 
-  conn <- solr_settings()
-  check_conn(conn)
-  #stop_if_absent(name)
-  args <- sc(list(commit = asl(commit), optimize = asl(optimize), maxSegments = max_segments,
-                  expungeDeletes = asl(expunge_deletes), waitSearcher = asl(wait_searcher),
-                  softCommit = asl(soft_commit), prepareCommit = prepare_commit, wt = wt))
-  docreate(file.path(conn$url, sprintf('solr/%s/update/json/docs', name)), files, args, 'json', raw, ...)
+	check_sr(conn)
+  conn$update_json(files, name, commit, optimize, max_segments,
+                   expunge_deletes, wait_searcher, soft_commit, prepare_commit,
+                   wt, raw, ...)
 }
diff --git a/R/update_xml.R b/R/update_xml.R
index 7eb646f..a1cad6b 100644
--- a/R/update_xml.R
+++ b/R/update_xml.R
@@ -1,50 +1,54 @@
-#' Update documents using XML
+#' Update documents with XML data
 #'
 #' @export
 #' @family update
 #' @template update
 #' @template commitcontrol
-#' @param files Path to file to load into Solr
+#' @param conn A solrium connection object, see [SolrClient]
+#' @param files Path to a single file to load into Solr
 #' @examples \dontrun{
-#' # start Solr in Schemaless mode: bin/solr start -e schemaless
-#' 
+#' # start Solr: bin/solr start -f -c -p 8983
+#'
 #' # connect
-#' solr_connect()
+#' (conn <- SolrClient$new())
+#'
+#' # create a collection
+#' if (!conn$collection_exists("books")) {
+#'   conn$collection_create(name = "books", numShards = 2)
+#' }
 #'
 #' # Add documents
 #' file <- system.file("examples", "books.xml", package = "solrium")
 #' cat(readLines(file), sep = "\n")
-#' update_xml(file, "books")
+#' conn$update_xml(file, "books")
 #'
 #' # Update commands - can include many varying commands
 #' ## Add files
 #' file <- system.file("examples", "books2_delete.xml", package = "solrium")
 #' cat(readLines(file), sep = "\n")
-#' update_xml(file, "books")
+#' conn$update_xml(file, "books")
 #'
 #' ## Delete files
-#' file <- system.file("examples", "updatecommands_delete.xml", package = "solrium")
+#' file <- system.file("examples", "updatecommands_delete.xml",
+#' package = "solrium")
 #' cat(readLines(file), sep = "\n")
-#' update_xml(file, "books")
+#' conn$update_xml(file, "books")
 #'
 #' ## Add and delete in the same document
 #' ## Add a document first, that we can later delete
 #' ss <- list(list(id = 456, name = "cat"))
-#' add(ss, "books")
+#' conn$add(ss, "books")
 #' ## Now add a new document, and delete the one we just made
 #' file <- system.file("examples", "add_delete.xml", package = "solrium")
 #' cat(readLines(file), sep = "\n")
-#' update_xml(file, "books")
+#' conn$update_xml(file, "books")
 #' }
-update_xml <- function(files, name, commit = TRUE, optimize = FALSE, max_segments = 1,
-                       expunge_deletes = FALSE, wait_searcher = TRUE, soft_commit = FALSE,
-                       prepare_commit = NULL, wt = 'json', raw = FALSE, ...) {
+update_xml <- function(conn, files, name, commit = TRUE, optimize = FALSE,
+  max_segments = 1, expunge_deletes = FALSE, wait_searcher = TRUE,
+  soft_commit = FALSE, prepare_commit = NULL, wt = 'json', raw = FALSE, ...) {
 
-  conn <- solr_settings()
-  check_conn(conn)
-  stop_if_absent(name)
-  args <- sc(list(commit = asl(commit), optimize = asl(optimize), maxSegments = max_segments,
-                  expungeDeletes = asl(expunge_deletes), waitSearcher = asl(wait_searcher),
-                  softCommit = asl(soft_commit), prepareCommit = prepare_commit, wt = wt))
-  docreate(file.path(conn$url, sprintf('solr/%s/update', name)), files, args, content = 'xml', raw, ...)
+	check_sr(conn)
+  conn$update_xml(files, name, commit, optimize, max_segments,
+                  expunge_deletes, wait_searcher, soft_commit, prepare_commit,
+                  wt, raw, ...)
 }
diff --git a/R/zzz.r b/R/zzz.r
index aa5e719..a7583f0 100644
--- a/R/zzz.r
+++ b/R/zzz.r
@@ -1,15 +1,15 @@
-#' Function to make make multiple args of the same name from a 
+#' Function to make make multiple args of the same name from a
 #' single input with length > 1
 #' @param x Value
 makemultiargs <- function(x){
   value <- get(x, envir = parent.frame(n = 2))
-  if ( length(value) == 0 ) { 
-    NULL 
+  if ( length(value) == 0 ) {
+    NULL
   } else {
-    if ( any(sapply(value, is.na)) ) { 
-      NULL 
+    if ( any(sapply(value, is.na)) ) {
+      NULL
     } else {
-      if ( !is.character(value) ) { 
+      if ( !is.character(value) ) {
         value <- as.character(value)
       }
       names(value) <- rep(x, length(value))
@@ -18,42 +18,60 @@ makemultiargs <- function(x){
   }
 }
 
+make_multiargs <- function(z, lst) {
+  value <- lst[[z]]
+  if (length(value) == 0) {
+    return(NULL)
+  } else {
+    if (any(sapply(value, is.na))) {
+      return(NULL)
+    } else {
+      if ( !is.character(value) ) {
+        value <- as.character(value)
+      }
+      names(value) <- rep(z, length(value))
+      value
+    }
+  }
+}
+
 popp <- function(x, nms) {
   x[!names(x) %in% nms]
 }
 
-#' Function to make a list of args passing arg names through multiargs function.
-#' @param x Value
-collectargs <- function(x){
+# Function to make a list of args passing arg names through multiargs function
+collectargs <- function(z, lst){
   outlist <- list()
-  for (i in seq_along(x)) {
-    outlist[[i]] <- makemultiargs(x[[i]])
+  for (i in seq_along(z)) {
+    outlist[[i]] <- make_multiargs(z[[i]], lst)
   }
   as.list(unlist(sc(outlist)))
 }
 
-# GET helper fxn
-solr_GET <- function(base, args, callopts = NULL, ...){
-  tt <- GET(base, query = args, callopts, ...)
-  if (solr_settings()$verbose) message(URLdecode(tt$url))
-  if (tt$status_code > 201) {
-    solr_error(tt)
+solr_GET <- function(base, path, args, callopts = NULL, proxy = NULL) {
+  cli <- crul::HttpClient$new(url = base, opts = callopts)
+  if (inherits(proxy, "proxy")) cli$proxies <- proxy
+  res <- cli$get(path = path, query = args)
+  if (res$status_code > 201) {
+    solr_error(res)
   } else {
-    content(tt, as = "text", encoding = "UTF-8")
+    res$parse("UTF-8")
   }
 }
 
 solr_error <- function(x) {
-  if (grepl("html", x$headers$`content-type`)) {
-    stop(http_status(x)$message, call. = FALSE)
-  } else { 
-    err <- jsonlite::fromJSON(content(x, "text", encoding = "UTF-8"))
+  if (grepl("html", x$response_headers$`content-type`)) {
+    stat <- x$status_http()
+    stop(sprintf('(%s) %s - %s',
+                 stat$status_code, stat$message, stat$explanation))
+  } else {
+    err <- jsonlite::fromJSON(x$parse("UTF-8"))
     erropt <- Sys.getenv("SOLR_ERRORS")
     if (erropt == "simple" || erropt == "") {
       stop(err$error$code, " - ", err$error$msg, call. = FALSE)
     } else {
-      stop(err$error$code, " - ", err$error$msg, 
-           "\nAPI stack trace\n", 
+      stop(err$error$code, " - ", err$error$msg,
+           "\nAPI stack trace\n",
            pluck_trace(err$error$trace), call. = FALSE)
     }
   }
@@ -68,28 +86,36 @@ pluck_trace <- function(x) {
 }
 
 # POST helper fxn
-solr_POST <- function(base, body, args, content, ...) {
+solr_POST <- function(base, path, body, args, ctype, proxy, ...) {
   invisible(match.arg(args$wt, c("xml", "json", "csv")))
-  ctype <- get_ctype(content)
   args <- lapply(args, function(x) if (is.logical(x)) tolower(x) else x)
-  tt <- POST(base, query = args, body = upload_file(path = body), ctype)
+  cli <- crul::HttpClient$new(url = base, headers = ctype, opts = list(...))
+  if (inherits(proxy, "proxy")) cli$proxies <- proxy
+  tt <- cli$post(path, query = args, body = body)
   get_response(tt)
 }
 
 # POST helper fxn - just a body
-solr_POST_body <- function(base, body, args, ...) {
+solr_POST_body <- function(base, path, body, args, ctype, callopts = list(), proxy) {
   invisible(match.arg(args$wt, c("xml", "json")))
-  tt <- POST(base, query = args, body = body, 
-             content_type_json(), encode = "json", ...)
-  get_response(tt)
+  httpcli <- crul::HttpClient$new(url = base, headers = ctype, opts = callopts)
+  if (inherits(proxy, "proxy")) httpcli$proxies <- proxy
+  res <- httpcli$post(path = path, query = args, body = body, encode = "json")
+  if (res$status_code > 201) solr_error(res) else res$parse("UTF-8")
 }
 
 # POST helper fxn for R objects
-obj_POST <- function(base, body, args, ...) {
+obj_POST <- function(base, path, body, args, proxy, ...) {
   invisible(match.arg(args$wt, c("xml", "json", "csv")))
   args <- lapply(args, function(x) if (is.logical(x)) tolower(x) else x)
   body <- jsonlite::toJSON(body, auto_unbox = TRUE)
-  tt <- POST(base, query = args, body = body, content_type_json(), ...)
+  cli <- crul::HttpClient$new(
+    url = base,
+    headers = list(`Content-Type` = "application/json"),
+    opts = list(...)
+  )
+  if (inherits(proxy, "proxy")) httpcli$proxies <- proxy
+  tt <- cli$post(path, query = args, body = body, encode = "form", ...)
   get_response(tt)
 }
 
@@ -100,47 +126,42 @@ stop_if_absent <- function(x) {
     if (inherits(tmp, "error")) FALSE else tmp
   }, logical(1))
   if (!any(tmp)) {
-    stop(x, " doesn't exist - create it first.\n See core_create() or collection_create()", 
-         call. = FALSE)
+    stop(
+      x,
+      " doesn't exist - create it first.\n See core_create()/collection_create()",
+      call. = FALSE)
   }
 }
 
 # helper for POSTing from R objects
-obj_proc <- function(url, body, args, raw, ...) {
-  out <- structure(obj_POST(url, body, args, ...), class = "update", wt = args$wt)
+obj_proc <- function(base, path, body, args, raw, proxy, ...) {
+  out <- structure(obj_POST(base, path, body, args, proxy, ...), class = "update",
+                   wt = args$wt)
   if (raw) {
     out
   } else {
-    solr_parse(out) 
+    solr_parse(out)
   }
 }
 
-get_ctype <- function(x) {
-  switch(x, 
-         xml = content_type_xml(),
-         json = content_type_json(),
-         csv = content_type("application/csv; charset=utf-8")
-  )
-}
-
-get_response <- function(x, as = "text") {
+get_response <- function(x) {
   if (x$status_code > 201) {
-    err <- jsonlite::fromJSON(httr::content(x, "text", encoding = "UTF-8"))$error
+    err <- jsonlite::fromJSON(x$parse("UTF-8"))$error
     stop(sprintf("%s: %s", err$code, err$msg), call. = FALSE)
   } else {
-    content(x, as = as, encoding = "UTF-8")
+    x$parse("UTF-8")
   }
 }
 
 # small function to replace elements of length 0 with NULL
 replacelen0 <- function(x) {
-  if (length(x) < 1) { 
-    NULL 
-  } else { 
-    x 
+  if (length(x) < 1) {
+    NULL
+  } else {
+    x
   }
 }
-  
+
 sc <- function(l) Filter(Negate(is.null), l)
 
 asl <- function(z) {
@@ -159,63 +180,58 @@ asl <- function(z) {
   }
 }
 
-docreate <- function(base, files, args, content, raw, ...) {
-  out <- structure(solr_POST(base, files, args, content, ...), class = "update", wt = args$wt)
-  if (raw) { 
-    return(out) 
-  } else { 
-    solr_parse(out) 
-  } 
+docreate <- function(base, path, files, args, ctype, raw, proxy, ...) {
+  out <- structure(solr_POST(base, path, files, args, ctype, proxy, ...),
+                   class = "update", wt = args$wt)
+  if (raw) return(out)
+  solr_parse(out)
 }
 
-objcreate <- function(base, dat, args, raw, ...) {
-  out <- structure(solr_POST(base, dat, args, "json", ...), class = "update", wt = args$wt)
-  if (raw) { 
-    return(out) 
-  } else { 
-    solr_parse(out) 
-  } 
+doatomiccreate <- function(base, path, body, args, content, raw, proxy, ...) {
+  ctype <- get_ctype(content)
+  out <- structure(solr_POST_body(base, path, body, args, ctype, list(...), proxy),
+                   class = "update", wt = args$wt)
+  if (raw) return(out)
+  solr_parse(out)
 }
 
-check_conn <- function(x) {
-  if (!inherits(x, "solr_connection")) {
-    stop("Input to conn parameter must be an object of class solr_connection", 
-         call. = FALSE)
-  }
-  if (is.null(x)) {
-    stop("You must provide a connection object", 
-         call. = FALSE)
-  }
+objcreate <- function(base, path, dat, args, raw, ...) {
+  out <- structure(solr_POST(base, path, dat, args, "json", ...),
+                   class = "update", wt = args$wt)
+  if (raw) return(out)
+  solr_parse(out)
 }
 
 check_wt <- function(x) {
-  if (!x %in% c('json', 'xml', 'csv')) {
-    stop("wt must be one of: json, xml, csv", 
-         call. = FALSE)
-  }  
+  if (!is.null(x)) {
+    if (!x %in% c('json', 'xml', 'csv')) {
+      stop("wt must be one of: json, xml, csv",
+           call. = FALSE)
+    }
+  }
 }
 
 check_defunct <- function(...) {
   calls <- names(sapply(match.call(), deparse))[-1]
   calls_vec <- "verbose" %in% calls
   if (any(calls_vec)) {
-    stop("The parameter verbose has been removed - see ?solr_connect", 
+    stop("The parameter verbose has been removed - see ?SolrClient",
          call. = FALSE)
   }
 }
 
 is_in_cloud_mode <- function(x) {
-  res <- GET(file.path(x$url, "solr/admin/collections"), 
-             query = list(wt = 'json'))
+  xx <- crul::HttpClient$new(url = x$make_url())
+  res <- xx$get("solr/admin/collections",
+                query = list(action = 'LIST', wt = 'json'))
   if (res$status_code > 201) return(FALSE)
-  msg <- jsonlite::fromJSON(content(res, "text", encoding = "UTF-8"))$error$msg
-  if (grepl("not running", msg)) {
-    FALSE
-  } else {
-    TRUE
-  }
+  msg <- jsonlite::fromJSON(res$parse("UTF-8"))$error$msg
+  if (is.null(msg)) return(TRUE)
+  !grepl("not running", msg)
 }
 
+is_not_in_cloud_mode <- function(x) !is_in_cloud_mode(x)
+
 json_parse <- function(x, raw) {
   if (raw) {
     x
@@ -236,4 +252,33 @@ unbox_if <- function(x, recursive = FALSE) {
   }
 }
 
-`%||%` <- function(x, y) if (is.na(x) || is.null(x)) y else x
+`%||%` <- function(x, y) if (suppressWarnings(is.na(x)) || is.null(x)) y else x
+
+url_handle <- function(name) {
+  if (is.null(name)) {
+    ""
+  } else {
+    file.path("solr", name, "select")
+  }
+}
+
+check_sr <- function(x) {
+  if (!inherits(x, "SolrClient")) {
+    stop("conn must be a SolrClient object, see ?SolrClient")
+  }
+}
+
+cn <- function(x) {
+  name <- substitute(x)
+  if (!is.null(x)) {
+    tryx <- tryCatch(as.numeric(as.character(x)), warning = function(e) e)
+    if ("warning" %in% class(tryx)) {
+      stop(name, " should be a numeric or integer class value", call. = FALSE)
+    }
+    if (!inherits(tryx, "numeric") | is.na(tryx))
+      stop(name, " should be a numeric or integer class value", call. = FALSE)
+    return( format(x, digits = 22, scientific = FALSE) )
+  } else {
+    NULL
+  }
+}
diff --git a/README.md b/README.md
index a479cbd..18d138b 100644
--- a/README.md
+++ b/README.md
@@ -3,30 +3,91 @@ solrium
 
 
 
-[![Build Status](https://api.travis-ci.org/ropensci/solrium.png)](https://travis-ci.org/ropensci/solrium)
+[![Build Status](https://travis-ci.org/ropensci/solrium.svg?branch=master)](https://travis-ci.org/ropensci/solrium)
 [![codecov.io](https://codecov.io/github/ropensci/solrium/coverage.svg?branch=master)](https://codecov.io/github/ropensci/solrium?branch=master)
-[![rstudio mirror downloads](http://cranlogs.r-pkg.org/badges/solrium?color=2ED968)](https://github.com/metacran/cranlogs.app)
+[![rstudio mirror downloads](https://cranlogs.r-pkg.org/badges/solrium?color=2ED968)](https://github.com/metacran/cranlogs.app)
+[![cran version](https://www.r-pkg.org/badges/version/solrium)](https://cran.r-project.org/package=solrium)
 
-**A general purpose R interface to [Solr](http://lucene.apache.org/solr/)**
+**A general purpose R interface to [Solr](https://lucene.apache.org/solr/)**
 
-Development is now following Solr v5 and greater - which introduced many changes, which means many functions here may not work with your Solr installation older than v5.
+Development is now following Solr v7 and greater - which introduced many changes, which means many functions here may not work with your Solr installation older than v7.
 
 Be aware that currently some functions will only work in certain Solr modes, e.g, `collection_create()` won't work when you are not in Solrcloud mode. But, you should get an error message stating that you aren't.
 
-> Currently developing against Solr `v5.4.1`
+> Currently developing against Solr `v7.0.0`
 
 > Note that we recently changed the package name to `solrium`. A previous version of this package is on CRAN as `solr`, but next version will be up as `solrium`.
 
 ## Solr info
 
 + [Solr home page](http://lucene.apache.org/solr/)
-+ [Highlighting help](http://wiki.apache.org/solr/HighlightingParameters)
++ [Highlighting help](https://lucene.apache.org/solr/guide/7_0/highlighting.html)
 + [Faceting help](http://wiki.apache.org/solr/SimpleFacetParameters)
 + [Solr stats](http://wiki.apache.org/solr/StatsComponent)
 + ['More like this' searches](http://wiki.apache.org/solr/MoreLikeThis)
 + [Grouping/Feild collapsing](http://wiki.apache.org/solr/FieldCollapsing)
 + [Install and Setup SOLR in OSX, including running Solr](http://risnandar.wordpress.com/2013/09/08/how-to-install-and-setup-apache-lucene-solr-in-osx/)
-+ [Solr csv writer](http://wiki.apache.org/solr/CSVResponseWriter)
++ [Solr csv writer](https://lucene.apache.org/solr/guide/7_0/response-writers.html#ResponseWriters-CSVResponseWriter)
+
+## Package API and ways of using the package
+
+The first thing to look at is `SolrClient` to instantiate a client connection
+to your Solr instance. `ping` and `schema` are helpful functions to look
+at after instantiating your client.
+
+There are two ways to use `solrium`:
+
+1. Call functions on the `SolrClient` object
+2. Pass the `SolrClient` object to functions
+
+For example, if we instantiate a client like `conn <- SolrClient$new()`, then
+to use the first way we can do `conn$search(...)`, and the second way by doing
+`solr_search(conn, ...)`. These two ways of using the package hopefully
+make the package more user friendly for more people, those that prefer a more
+object oriented approach, and those that prefer more of a functional approach.
+
+**Collections**
+
+Functions that start with `collection` work with Solr collections when in
+cloud mode. Note that these functions won't work when in Solr standard mode
+
+**Cores**
+
+Functions that start with `core` work with Solr cores when in standard Solr
+mode. Note that these functions won't work when in Solr cloud mode
+
+**Documents**
+
+The following functions work with documents in Solr
+
+```
+#>  - add
+#>  - delete_by_id
+#>  - delete_by_query
+#>  - update_atomic_json
+#>  - update_atomic_xml
+#>  - update_csv
+#>  - update_json
+#>  - update_xml
+```
+
+**Search**
+
+Search functions, including `solr_parse` for parsing results from different
+functions appropriately
+
+```
+#>  - solr_all
+#>  - solr_facet
+#>  - solr_get
+#>  - solr_group
+#>  - solr_highlight
+#>  - solr_mlt
+#>  - solr_parse
+#>  - solr_search
+#>  - solr_stats
+```
+
 
 ## Install
 
@@ -51,43 +112,59 @@ library("solrium")
 
 ## Setup
 
-Use `solr_connect()` to initialize your connection. These examples use a remote Solr server, but work on any local Solr server.
+Use `SolrClient$new()` to initialize your connection. These examples use a remote Solr server, but work on any local Solr server.
 
 
 ```r
-invisible(solr_connect('http://api.plos.org/search'))
+(cli <- SolrClient$new(host = "api.plos.org", path = "search", port = NULL))
+#> <Solr Client>
+#>   host: api.plos.org
+#>   path: search
+#>   port: 
+#>   scheme: http
+#>   errors: simple
+#>   proxy:
 ```
 
 You can also set whether you want simple or detailed error messages (via `errors`), and whether you want URLs used in each function call or not (via `verbose`), and your proxy settings (via `proxy`) if needed. For example:
 
 
 ```r
-solr_connect("localhost:8983", errors = "complete", verbose = FALSE)
+SolrClient$new(errors = "complete")
 ```
 
-Then you can get your settings like
+Your settings are printed in the print method for the connection object
 
 
 ```r
-solr_settings()
-#> <solr_connection>
-#>   url:    localhost:8983
-#>   errors: complete
-#>   verbose: FALSE
+cli
+#> <Solr Client>
+#>   host: api.plos.org
+#>   path: search
+#>   port: 
+#>   scheme: http
+#>   errors: simple
 #>   proxy:
 ```
 
+For local Solr server setup:
+
+```
+bin/solr start -e cloud -noprompt
+bin/post -c gettingstarted example/exampledocs/*.xml
+```
+
+
 ## Search
 
 
 ```r
-solr_search(q='*:*', rows=2, fl='id')
-#> Source: local data frame [2 x 1]
-#>
-#>                                                              id
-#>                                                           (chr)
-#> 1       10.1371/annotation/d090733e-1f34-43c5-a06a-255456946303
-#> 2 10.1371/annotation/d090733e-1f34-43c5-a06a-255456946303/title
+cli$search(params = list(q='*:*', rows=2, fl='id'))
+#> # A tibble: 2 x 1
+#>                                      id
+#>                                   <chr>
+#> 1    10.1371/journal.pone.0079536/title
+#> 2 10.1371/journal.pone.0079536/abstract
 ```
 
 ### Search grouped data
@@ -96,13 +173,15 @@ Most recent publication by journal
 
 
 ```r
-solr_group(q='*:*', group.field='journal', rows=5, group.limit=1, group.sort='publication_date desc', fl='publication_date, score')
+cli$group(params = list(q='*:*', group.field='journal', rows=5, group.limit=1,
+                        group.sort='publication_date desc',
+                        fl='publication_date, score'))
 #>                         groupValue numFound start     publication_date
-#> 1                         plos one  1233651     0 2016-02-05T00:00:00Z
-#> 2                   plos pathogens    42827     0 2016-02-05T00:00:00Z
-#> 3                     plos biology    28755     0 2016-02-04T00:00:00Z
-#> 4 plos neglected tropical diseases    33921     0 2016-02-05T00:00:00Z
-#> 5                    plos genetics    49295     0 2016-02-05T00:00:00Z
+#> 1                         plos one  1572163     0 2017-11-01T00:00:00Z
+#> 2 plos neglected tropical diseases    47510     0 2017-11-01T00:00:00Z
+#> 3                    plos genetics    59871     0 2017-11-01T00:00:00Z
+#> 4                   plos pathogens    53246     0 2017-11-01T00:00:00Z
+#> 5                             none    63561     0 2012-10-23T00:00:00Z
 #>   score
 #> 1     1
 #> 2     1
@@ -115,18 +194,21 @@ First publication by journal
 
 
 ```r
-solr_group(q='*:*', group.field='journal', group.limit=1, group.sort='publication_date asc', fl='publication_date, score', fq="publication_date:[1900-01-01T00:00:00Z TO *]")
+cli$group(params = list(q = '*:*', group.field = 'journal', group.limit = 1,
+                        group.sort = 'publication_date asc',
+                        fl = c('publication_date', 'score'),
+                        fq = "publication_date:[1900-01-01T00:00:00Z TO *]"))
 #>                          groupValue numFound start     publication_date
-#> 1                          plos one  1233651     0 2006-12-20T00:00:00Z
-#> 2                    plos pathogens    42827     0 2005-07-22T00:00:00Z
-#> 3                      plos biology    28755     0 2003-08-18T00:00:00Z
-#> 4  plos neglected tropical diseases    33921     0 2007-08-30T00:00:00Z
-#> 5                     plos genetics    49295     0 2005-06-17T00:00:00Z
-#> 6                     plos medicine    19944     0 2004-09-07T00:00:00Z
-#> 7        plos computational biology    36383     0 2005-06-24T00:00:00Z
-#> 8                              none    57557     0 2005-08-23T00:00:00Z
-#> 9              plos clinical trials      521     0 2006-04-21T00:00:00Z
-#> 10                     plos medicin        9     0 2012-04-17T00:00:00Z
+#> 1                          plos one  1572163     0 2006-12-20T00:00:00Z
+#> 2  plos neglected tropical diseases    47510     0 2007-08-30T00:00:00Z
+#> 3                    plos pathogens    53246     0 2005-07-22T00:00:00Z
+#> 4        plos computational biology    45582     0 2005-06-24T00:00:00Z
+#> 5                              none    57532     0 2005-08-23T00:00:00Z
+#> 6              plos clinical trials      521     0 2006-04-21T00:00:00Z
+#> 7                     plos genetics    59871     0 2005-06-17T00:00:00Z
+#> 8                     plos medicine    23519     0 2004-09-07T00:00:00Z
+#> 9                      plos medicin        9     0 2012-04-17T00:00:00Z
+#> 10                     plos biology    32513     0 2003-08-18T00:00:00Z
 #>    score
 #> 1      1
 #> 2      1
@@ -144,55 +226,66 @@ Search group query : Last 3 publications of 2013.
 
 
 ```r
-solr_group(q='*:*', group.query='publication_date:[2013-01-01T00:00:00Z TO 2013-12-31T00:00:00Z]', group.limit = 3, group.sort='publication_date desc', fl='publication_date')
+gq <- 'publication_date:[2013-01-01T00:00:00Z TO 2013-12-31T00:00:00Z]'
+cli$group(
+  params = list(q='*:*', group.query = gq,
+                group.limit = 3, group.sort = 'publication_date desc',
+                fl = 'publication_date'))
 #>   numFound start     publication_date
-#> 1   307081     0 2013-12-31T00:00:00Z
-#> 2   307081     0 2013-12-31T00:00:00Z
-#> 3   307081     0 2013-12-31T00:00:00Z
+#> 1   307076     0 2013-12-31T00:00:00Z
+#> 2   307076     0 2013-12-31T00:00:00Z
+#> 3   307076     0 2013-12-31T00:00:00Z
 ```
 
 Search group with format simple
 
 
 ```r
-solr_group(q='*:*', group.field='journal', rows=5, group.limit=3, group.sort='publication_date desc', group.format='simple', fl='journal, publication_date')
-#>   numFound start        journal     publication_date
-#> 1  1508973     0       PLOS ONE 2016-02-05T00:00:00Z
-#> 2  1508973     0       PLOS ONE 2016-02-05T00:00:00Z
-#> 3  1508973     0       PLOS ONE 2016-02-05T00:00:00Z
-#> 4  1508973     0 PLOS Pathogens 2016-02-05T00:00:00Z
-#> 5  1508973     0 PLOS Pathogens 2016-02-05T00:00:00Z
+cli$group(params = list(q='*:*', group.field='journal', rows=5,
+                        group.limit=3, group.sort='publication_date desc',
+                        group.format='simple', fl='journal, publication_date'))
+#>   numFound start     publication_date  journal
+#> 1  1898495     0 2012-10-23T00:00:00Z     <NA>
+#> 2  1898495     0 2012-10-23T00:00:00Z     <NA>
+#> 3  1898495     0 2012-10-23T00:00:00Z     <NA>
+#> 4  1898495     0 2017-11-01T00:00:00Z PLOS ONE
+#> 5  1898495     0 2017-11-01T00:00:00Z PLOS ONE
 ```
 
 ### Facet
 
 
 ```r
-solr_facet(q='*:*', facet.field='journal', facet.query='cell,bird')
+cli$facet(params = list(q='*:*', facet.field='journal', facet.query=c('cell', 'bird')))
 #> $facet_queries
-#>        term value
-#> 1 cell,bird    24
-#>
+#> # A tibble: 2 x 2
+#>    term  value
+#>   <chr>  <int>
+#> 1  cell 157652
+#> 2  bird  16385
+#> 
 #> $facet_fields
 #> $facet_fields$journal
-#>                                 X1      X2
-#> 1                         plos one 1233651
-#> 2                    plos genetics   49295
-#> 3                   plos pathogens   42827
-#> 4       plos computational biology   36383
-#> 5 plos neglected tropical diseases   33921
-#> 6                     plos biology   28755
-#> 7                    plos medicine   19944
+#> # A tibble: 9 x 2
+#>                               term   value
+#>                              <chr>   <chr>
+#> 1                         plos one 1572163
+#> 2                    plos genetics   59871
+#> 3                   plos pathogens   53246
+#> 4 plos neglected tropical diseases   47510
+#> 5       plos computational biology   45582
+#> 6                     plos biology   32513
+#> 7                    plos medicine   23519
 #> 8             plos clinical trials     521
 #> 9                     plos medicin       9
-#>
-#>
+#> 
+#> 
 #> $facet_pivot
 #> NULL
-#>
+#> 
 #> $facet_dates
 #> NULL
-#>
+#> 
 #> $facet_ranges
 #> NULL
 ```
@@ -201,33 +294,31 @@ solr_facet(q='*:*', facet.field='journal', facet.query='cell,bird')
 
 
 ```r
-solr_highlight(q='alcohol', hl.fl = 'abstract', rows=2)
-#> $`10.1371/journal.pmed.0040151`
-#> $`10.1371/journal.pmed.0040151`$abstract
-#> [1] "Background: <em>Alcohol</em> consumption causes an estimated 4% of the global disease burden, prompting"
-#>
-#>
-#> $`10.1371/journal.pone.0027752`
-#> $`10.1371/journal.pone.0027752`$abstract
-#> [1] "Background: The negative influences of <em>alcohol</em> on TB management with regard to delays in seeking"
+cli$highlight(params = list(q='alcohol', hl.fl = 'abstract', rows=2))
+#> # A tibble: 2 x 2
+#>                          names
+#>                          <chr>
+#> 1 10.1371/journal.pone.0185457
+#> 2 10.1371/journal.pone.0071284
+#> # ... with 1 more variables: abstract <chr>
 ```
 
 ### Stats
 
 
 ```r
-out <- solr_stats(q='ecology', stats.field=c('counter_total_all','alm_twitterCount'), stats.facet='journal')
+out <- cli$stats(params = list(q='ecology', stats.field=c('counter_total_all','alm_twitterCount'), stats.facet='journal'))
 ```
 
 
 ```r
 out$data
 #>                   min    max count missing       sum sumOfSquares
-#> counter_total_all   0 366453 31467       0 140736717 3.127644e+12
-#> alm_twitterCount    0   1753 31467       0    166651 3.225792e+07
-#>                          mean     stddev
-#> counter_total_all 4472.517781 8910.30381
-#> alm_twitterCount     5.296056   31.57718
+#> counter_total_all   0 920716 40497       0 219020039 7.604567e+12
+#> alm_twitterCount    0   3401 40497       0    281128 7.300081e+07
+#>                          mean      stddev
+#> counter_total_all 5408.302813 12591.07462
+#> alm_twitterCount     6.941946    41.88646
 ```
 
 ### More like this
@@ -236,65 +327,74 @@ out$data
 
 
 ```r
-out <- solr_mlt(q='title:"ecology" AND body:"cell"', mlt.fl='title', mlt.mindf=1, mlt.mintf=1, fl='counter_total_all', rows=5)
+out <- cli$mlt(params = list(q='title:"ecology" AND body:"cell"', mlt.fl='title', mlt.mindf=1, mlt.mintf=1, fl='counter_total_all', rows=5))
 ```
 
 
 ```r
 out$docs
-#> Source: local data frame [5 x 2]
-#>
+#> # A tibble: 5 x 2
 #>                             id counter_total_all
-#>                          (chr)             (int)
-#> 1 10.1371/journal.pbio.1001805             17004
-#> 2 10.1371/journal.pbio.0020440             23871
-#> 3 10.1371/journal.pone.0087217              5904
-#> 4 10.1371/journal.pbio.1002191             12846
-#> 5 10.1371/journal.pone.0040117              4294
+#>                          <chr>             <int>
+#> 1 10.1371/journal.pbio.1001805             21824
+#> 2 10.1371/journal.pbio.0020440             25424
+#> 3 10.1371/journal.pbio.1002559              9746
+#> 4 10.1371/journal.pone.0087217             11502
+#> 5 10.1371/journal.pbio.1002191             22013
 ```
 
 
 ```r
 out$mlt
 #> $`10.1371/journal.pbio.1001805`
-#>                             id counter_total_all
-#> 1 10.1371/journal.pone.0082578              2192
-#> 2 10.1371/journal.pone.0098876              2434
-#> 3 10.1371/journal.pone.0102159              1166
-#> 4 10.1371/journal.pone.0076063              3217
-#> 5 10.1371/journal.pone.0087380              1883
-#>
+#> # A tibble: 5 x 4
+#>   numFound start                           id counter_total_all
+#>      <int> <int>                        <chr>             <int>
+#> 1     3822     0 10.1371/journal.pone.0098876              3590
+#> 2     3822     0 10.1371/journal.pone.0082578              2893
+#> 3     3822     0 10.1371/journal.pone.0102159              2028
+#> 4     3822     0 10.1371/journal.pcbi.1002652              3819
+#> 5     3822     0 10.1371/journal.pcbi.1003408              9920
+#> 
 #> $`10.1371/journal.pbio.0020440`
-#>                             id counter_total_all
-#> 1 10.1371/journal.pone.0035964              5524
-#> 2 10.1371/journal.pone.0102679              3085
-#> 3 10.1371/journal.pone.0003259              2784
-#> 4 10.1371/journal.pone.0068814              7503
-#> 5 10.1371/journal.pone.0101568              2648
-#>
+#> # A tibble: 5 x 4
+#>   numFound start                           id counter_total_all
+#>      <int> <int>                        <chr>             <int>
+#> 1     1115     0 10.1371/journal.pone.0162651              2828
+#> 2     1115     0 10.1371/journal.pone.0003259              3225
+#> 3     1115     0 10.1371/journal.pntd.0003377              4267
+#> 4     1115     0 10.1371/journal.pone.0101568              4603
+#> 5     1115     0 10.1371/journal.pone.0068814              9042
+#> 
+#> $`10.1371/journal.pbio.1002559`
+#> # A tibble: 5 x 4
+#>   numFound start                           id counter_total_all
+#>      <int> <int>                        <chr>             <int>
+#> 1     5482     0 10.1371/journal.pone.0155989              2519
+#> 2     5482     0 10.1371/journal.pone.0023086              8442
+#> 3     5482     0 10.1371/journal.pone.0155028              1547
+#> 4     5482     0 10.1371/journal.pone.0041684             22057
+#> 5     5482     0 10.1371/journal.pone.0164330               969
+#> 
 #> $`10.1371/journal.pone.0087217`
-#>                             id counter_total_all
-#> 1 10.1371/journal.pone.0131665               403
-#> 2 10.1371/journal.pcbi.0020092             19563
-#> 3 10.1371/journal.pone.0133941               463
-#> 4 10.1371/journal.pone.0123774               990
-#> 5 10.1371/journal.pone.0140306               321
-#>
+#> # A tibble: 5 x 4
+#>   numFound start                           id counter_total_all
+#>      <int> <int>                        <chr>             <int>
+#> 1     4576     0 10.1371/journal.pone.0175497              1088
+#> 2     4576     0 10.1371/journal.pone.0159131              4937
+#> 3     4576     0 10.1371/journal.pcbi.0020092             24786
+#> 4     4576     0 10.1371/journal.pone.0133941              1336
+#> 5     4576     0 10.1371/journal.pone.0131665              1207
+#> 
 #> $`10.1371/journal.pbio.1002191`
-#>                             id counter_total_all
-#> 1 10.1371/journal.pbio.1002232              1936
-#> 2 10.1371/journal.pone.0131700               972
-#> 3 10.1371/journal.pone.0070448              1607
-#> 4 10.1371/journal.pone.0144763               483
-#> 5 10.1371/journal.pone.0062824              2531
-#>
-#> $`10.1371/journal.pone.0040117`
-#>                             id counter_total_all
-#> 1 10.1371/journal.pone.0069352              2743
-#> 2 10.1371/journal.pone.0148280                 0
-#> 3 10.1371/journal.pone.0035502              4016
-#> 4 10.1371/journal.pone.0014065              5744
-#> 5 10.1371/journal.pone.0113280              1977
+#> # A tibble: 5 x 4
+#>   numFound start                           id counter_total_all
+#>      <int> <int>                        <chr>             <int>
+#> 1    12585     0 10.1371/journal.pbio.1002232              3055
+#> 2    12585     0 10.1371/journal.pone.0070448              2203
+#> 3    12585     0 10.1371/journal.pone.0131700              2493
+#> 4    12585     0 10.1371/journal.pone.0121680              4980
+#> 5    12585     0 10.1371/journal.pone.0041534              5701
 ```
 
 ### Parsing
@@ -305,8 +405,9 @@ For example:
 
 
 ```r
-(out <- solr_highlight(q='alcohol', hl.fl = 'abstract', rows=2, raw=TRUE))
-#> [1] "{\"response\":{\"numFound\":20268,\"start\":0,\"docs\":[{},{}]},\"highlighting\":{\"10.1371/journal.pmed.0040151\":{\"abstract\":[\"Background: <em>Alcohol</em> consumption causes an estimated 4% of the global disease burden, prompting\"]},\"10.1371/journal.pone.0027752\":{\"abstract\":[\"Background: The negative influences of <em>alcohol</em> on TB management with regard to delays in seeking\"]}}}\n"
+(out <- cli$highlight(params = list(q='alcohol', hl.fl = 'abstract', rows=2),
+                      raw=TRUE))
+#> [1] "{\"response\":{\"numFound\":25987,\"start\":0,\"maxScore\":4.705177,\"docs\":[{\"id\":\"10.1371/journal.pone.0185457\",\"journal\":\"PLOS ONE\",\"eissn\":\"1932-6203\",\"publication_date\":\"2017-09-28T00:00:00Z\",\"article_type\":\"Research Article\",\"author_display\":[\"Jacqueline Willmore\",\"Terry-Lynne Marko\",\"Darcie Taing\",\"Hugues Sampasa-Kanyinga\"],\"abstract\":[\"Objectives: Alcohol-related morbidity and mortality are significant public health issues. The purpose of [...]
 #> attr(,"class")
 #> [1] "sr_high"
 #> attr(,"wt")
@@ -318,12 +419,12 @@ Then parse
 
 ```r
 solr_parse(out, 'df')
+#> # A tibble: 2 x 2
 #>                          names
-#> 1 10.1371/journal.pmed.0040151
-#> 2 10.1371/journal.pone.0027752
-#>                                                                                                    abstract
-#> 1   Background: <em>Alcohol</em> consumption causes an estimated 4% of the global disease burden, prompting
-#> 2 Background: The negative influences of <em>alcohol</em> on TB management with regard to delays in seeking
+#>                          <chr>
+#> 1 10.1371/journal.pone.0185457
+#> 2 10.1371/journal.pone.0071284
+#> # ... with 1 more variables: abstract <chr>
 ```
 
 ### Advanced: Function Queries
@@ -332,52 +433,50 @@ Function Queries allow you to query on actual numeric fields in the SOLR databas
 
 
 ```r
-solr_search(q='_val_:"product(counter_total_all,alm_twitterCount)"',
-  rows=5, fl='id,title', fq='doc_type:full')
-#> Source: local data frame [5 x 2]
-#>
+cli$search(params = list(q='_val_:"product(counter_total_all,alm_twitterCount)"',
+  rows=5, fl='id,title', fq='doc_type:full'))
+#> # A tibble: 5 x 2
 #>                             id
-#>                          (chr)
+#>                          <chr>
 #> 1 10.1371/journal.pmed.0020124
-#> 2 10.1371/journal.pone.0073791
-#> 3 10.1371/journal.pone.0115069
-#> 4 10.1371/journal.pone.0046362
-#> 5 10.1371/journal.pone.0069841
-#> Variables not shown: title (chr)
+#> 2 10.1371/journal.pone.0141854
+#> 3 10.1371/journal.pone.0073791
+#> 4 10.1371/journal.pone.0153419
+#> 5 10.1371/journal.pone.0115069
+#> # ... with 1 more variables: title <chr>
 ```
 
 Here, we search for the papers with the most citations
 
 
 ```r
-solr_search(q='_val_:"max(counter_total_all)"',
-    rows=5, fl='id,counter_total_all', fq='doc_type:full')
-#> Source: local data frame [5 x 2]
-#>
-#>                             id counter_total_all
-#>                          (chr)             (int)
-#> 1 10.1371/journal.pmed.0020124           1553063
-#> 2 10.1371/journal.pmed.0050045            378855
-#> 3 10.1371/journal.pcbi.0030102            374783
-#> 4 10.1371/journal.pone.0069841            366453
-#> 5 10.1371/journal.pone.0007595            362047
+cli$search(params = list(q='_val_:"max(counter_total_all)"',
+    rows=5, fl='id,counter_total_all', fq='doc_type:full'))
+#> # A tibble: 5 x 2
+#>                                                        id
+#>                                                     <chr>
+#> 1                            10.1371/journal.pmed.0020124
+#> 2 10.1371/annotation/80bd7285-9d2d-403a-8e6f-9c375bf977ca
+#> 3                            10.1371/journal.pcbi.1003149
+#> 4                            10.1371/journal.pone.0141854
+#> 5                            10.1371/journal.pcbi.0030102
+#> # ... with 1 more variables: counter_total_all <int>
 ```
 
 Or with the most tweets
 
 
 ```r
-solr_search(q='_val_:"max(alm_twitterCount)"',
-    rows=5, fl='id,alm_twitterCount', fq='doc_type:full')
-#> Source: local data frame [5 x 2]
-#>
+cli$search(params = list(q='_val_:"max(alm_twitterCount)"',
+    rows=5, fl='id,alm_twitterCount', fq='doc_type:full'))
+#> # A tibble: 5 x 2
 #>                             id alm_twitterCount
-#>                          (chr)            (int)
-#> 1 10.1371/journal.pone.0061981             2383
-#> 2 10.1371/journal.pone.0115069             2338
-#> 3 10.1371/journal.pmed.0020124             2169
-#> 4 10.1371/journal.pbio.1001535             1753
-#> 5 10.1371/journal.pone.0073791             1624
+#>                          <chr>            <int>
+#> 1 10.1371/journal.pone.0141854             3401
+#> 2 10.1371/journal.pmed.0020124             3207
+#> 3 10.1371/journal.pone.0115069             2873
+#> 4 10.1371/journal.pmed.1001953             2821
+#> 5 10.1371/journal.pone.0061981             2392
 ```
 
 ### Using specific data sources
@@ -388,27 +487,34 @@ The occurrences service
 
 
 ```r
-invisible(solr_connect("http://bison.usgs.ornl.gov/solrstaging/occurrences/select"))
-solr_search(q='*:*', fl=c('decimalLatitude','decimalLongitude','scientificName'), rows=2)
-#> Source: local data frame [2 x 3]
-#>
-#>   decimalLongitude decimalLatitude        scientificName
-#>              (dbl)           (dbl)                 (chr)
-#> 1         -98.2376         29.5502   Nyctanassa violacea
-#> 2         -98.2376         29.5502 Myiarchus cinerascens
+conn <- SolrClient$new(scheme = "https", host = "bison.usgs.gov", path = "solr/occurrences/select", port = NULL)
+conn$search(params = list(q = '*:*', fl = c('decimalLatitude','decimalLongitude','scientificName'), rows = 2))
+#> # A tibble: 2 x 3
+#>   decimalLongitude         scientificName decimalLatitude
+#>              <dbl>                  <chr>           <dbl>
+#> 1        -116.5694 Zonotrichia leucophrys        34.05072
+#> 2        -116.5694    Tyrannus vociferans        34.05072
 ```
 
 The species names service
 
 
 ```r
-invisible(solr_connect("http://bisonapi.usgs.ornl.gov/solr/scientificName/select"))
-solr_search(q='*:*', raw=TRUE)
-#> [1] "{\"responseHeader\":{\"status\":0,\"QTime\":12},\"response\":{\"numFound\":401329,\"start\":0,\"docs\":[{\"scientificName\":\"Catocala editha\",\"_version_\":1518645306257833984},{\"scientificName\":\"Dictyopteris polypodioides\",\"_version_\":1518645306259931136},{\"scientificName\":\"Lonicera iberica\",\"_version_\":1518645306259931137},{\"scientificName\":\"Pseudopomala brachyptera\",\"_version_\":1518645306259931138},{\"scientificName\":\"Lycopodium cernuum ingens\",\"_versio [...]
-#> attr(,"class")
-#> [1] "sr_search"
-#> attr(,"wt")
-#> [1] "json"
+conn <- SolrClient$new(scheme = "https", host = "bison.usgs.gov", path = "solr/scientificName/select", port = NULL)
+conn$search(params = list(q = '*:*'))
+#> # A tibble: 10 x 2
+#>                scientificName  `_version_`
+#>                         <chr>        <dbl>
+#>  1 Dictyopteris polypodioides 1.565325e+18
+#>  2           Lonicera iberica 1.565325e+18
+#>  3            Epuraea ambigua 1.565325e+18
+#>  4   Pseudopomala brachyptera 1.565325e+18
+#>  5    Didymosphaeria populina 1.565325e+18
+#>  6                   Sanoarca 1.565325e+18
+#>  7     Celleporina ventricosa 1.565325e+18
+#>  8         Trigonurus crotchi 1.565325e+18
+#>  9       Ceraticelus laticeps 1.565325e+18
+#> 10           Micraster acutus 1.565325e+18
 ```
 
 __PLOS Search API__
@@ -421,6 +527,11 @@ This isn't as complete as searching functions show above, but we're getting ther
 
 ### Cores
 
+
+```r
+conn <- SolrClient$new()
+```
+
 Many functions, e.g.:
 
 * `core_create()`
@@ -432,7 +543,7 @@ Create a core
 
 
 ```r
-core_create(name = "foo_bar")
+conn$core_create(name = "foo_bar")
 ```
 
 ### Collections
@@ -448,7 +559,7 @@ Create a collection
 
 
 ```r
-collection_create(name = "hello_world")
+conn$collection_create(name = "hello_world")
 ```
 
 ### Add documents
@@ -458,21 +569,21 @@ Add documents, supports adding from files (json, xml, or csv format), and from R
 
 ```r
 df <- data.frame(id = c(67, 68), price = c(1000, 500000000))
-add(df, name = "books")
+conn$add(df, name = "books")
 ```
 
 Delete documents, by id
 
 
 ```r
-delete_by_id(ids = c(3, 4))
+conn$delete_by_id(name = "books", ids = c(3, 4))
 ```
 
 Or by query
 
 
 ```r
-delete_by_query(query = "manu:bank")
+conn$delete_by_query(name = "books", query = "manu:bank")
 ```
 
 ## Meta
@@ -482,4 +593,4 @@ delete_by_query(query = "manu:bank")
 * Get citation information for `solrium` in R doing `citation(package = 'solrium')`
 * Please note that this project is released with a [Contributor Code of Conduct](CONDUCT.md). By participating in this project you agree to abide by its terms.
 
-[![ropensci_footer](http://ropensci.org/public_images/github_footer.png)](http://ropensci.org)
+[![ropensci_footer](https://ropensci.org/public_images/github_footer.png)](https://ropensci.org)
diff --git a/build/vignette.rds b/build/vignette.rds
index 8c713a4..e1ba240 100644
Binary files a/build/vignette.rds and b/build/vignette.rds differ
diff --git a/inst/doc/local_setup.Rmd b/inst/doc/local_setup.Rmd
index 290ff07..0320cda 100644
--- a/inst/doc/local_setup.Rmd
+++ b/inst/doc/local_setup.Rmd
@@ -4,7 +4,7 @@
 %\VignetteEncoding{UTF-8}
 -->
 
-Local Solr setup 
+Local Solr setup
 ======
 
 ### OSX
@@ -29,7 +29,7 @@ bunch of documents
 
 #### Linuxbrew
 
-[Linuxbrew](http://brew.sh/linuxbrew/) is a port of Mac OS homebrew to linux.  Operation is essentially the same as for homebrew.  Follow the [installation instructions for linuxbrew](http://brew.sh/linuxbrew/#installation) and then the instructions for using homebrew (above) should work without modification.
+[Linuxbrew](http://linuxbrew.sh/) is a port of Mac OS homebrew to linux.  Operation is essentially the same as for homebrew.  Follow the installation instructions for linuxbrew and then the instructions for using homebrew (above) should work without modification.
 
 ### Windows
 
diff --git a/inst/doc/local_setup.html b/inst/doc/local_setup.html
index ea12978..c2097e2 100644
--- a/inst/doc/local_setup.html
+++ b/inst/doc/local_setup.html
@@ -3,7 +3,7 @@
 <head>
 <meta http-equiv="Content-Type" content="text/html; charset=utf-8"/>
 
-<title>Local Solr setup </title>
+<title>Local Solr setup</title>
 
 <script type="text/javascript">
 window.onload = function() {
@@ -207,7 +207,7 @@ hr {
 %\VignetteEncoding{UTF-8}
 -->
 
-<h1>Local Solr setup </h1>
+<h1>Local Solr setup</h1>
 
 <h3>OSX</h3>
 
@@ -234,7 +234,7 @@ bunch of documents</li>
 
 <h4>Linuxbrew</h4>
 
-<p><a href="http://brew.sh/linuxbrew/">Linuxbrew</a> is a port of Mac OS homebrew to linux.  Operation is essentially the same as for homebrew.  Follow the <a href="http://brew.sh/linuxbrew/#installation">installation instructions for linuxbrew</a> and then the instructions for using homebrew (above) should work without modification.</p>
+<p><a href="http://linuxbrew.sh/">Linuxbrew</a> is a port of Mac OS homebrew to linux.  Operation is essentially the same as for homebrew.  Follow the installation instructions for linuxbrew and then the instructions for using homebrew (above) should work without modification.</p>
 
 <h3>Windows</h3>
 
diff --git a/man/SolrClient.Rd b/man/SolrClient.Rd
new file mode 100644
index 0000000..8715874
--- /dev/null
+++ b/man/SolrClient.Rd
@@ -0,0 +1,155 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/SolrClient.R
+\docType{data}
+\name{SolrClient}
+\alias{SolrClient}
+\title{Solr connection client}
+\arguments{
+\item{host}{(character) Host url. Deafault: 127.0.0.1}
+
+\item{path}{(character) url path.}
+
+\item{port}{(character/numeric) Port. Default: 8389}
+
+\item{scheme}{(character) http scheme, one of http or https. Default: http}
+
+\item{proxy}{List of arguments for a proxy connection, including one or
+more of: url, port, username, password, and auth. See
+\link[crul:proxy]{crul::proxy} for  help, which is used to construct the
+proxy connection.}
+
+\item{errors}{(character) One of \code{"simple"} or \code{"complete"}. Simple gives
+http code and  error message on an error, while complete gives both http
+code and error message, and stack trace, if available.}
+}
+\value{
+Various output, see help files for each grouping of methods.
+}
+\description{
+Solr connection client
+}
+\details{
+\code{SolrClient} creates a R6 class object. The object is
+not cloneable and is portable, so it can be inherited across packages
+without complication.
+
+\code{SolrClient} is used to initialize a client that knows about your
+Solr instance, with options for setting host, port, http scheme,
+and simple vs. complete error reporting
+}
+\section{SolrClient methods}{
+
+
+Each of these methods also has a matching standalone exported
+function that you can use by passing in the connection object made
+by calling \code{SolrClient$new()}. Also, see the docs for each method for
+parameter definitions and their default values.
+\itemize{
+\item \code{ping(name, wt = 'json', raw = FALSE, ...)}
+\item \code{schema(name, what = '', raw = FALSE, ...)}
+\item \code{commit(name, expunge_deletes = FALSE, wait_searcher = TRUE, soft_commit = FALSE, wt = 'json', raw = FALSE, ...)}
+\item \code{optimize(name, max_segments = 1, wait_searcher = TRUE, soft_commit = FALSE, wt = 'json', raw = FALSE, ...)}
+\item \code{config_get(name, what = NULL, wt = "json", raw = FALSE, ...)}
+\item \code{config_params(name, param = NULL, set = NULL, unset = NULL, update = NULL, ...)}
+\item \code{config_overlay(name, omitHeader = FALSE, ...)}
+\item \code{config_set(name, set = NULL, unset = NULL, ...)}
+\item \code{collection_exists(name, ...)}
+\item \code{collection_list(raw = FALSE, ...)}
+\item \code{collection_create(name, numShards = 1, maxShardsPerNode = 1, createNodeSet = NULL, collection.configName = NULL, replicationFactor = 1, router.name = NULL, shards = NULL, createNodeSet.shuffle = TRUE, router.field = NULL, autoAddReplicas = FALSE, async = NULL, raw = FALSE, callopts=list(), ...)}
+\item \code{collection_addreplica(name, shard = NULL, route = NULL, node = NULL, instanceDir = NULL, dataDir = NULL, async = NULL, raw = FALSE, callopts=list(), ...)}
+\item \code{collection_addreplicaprop(name, shard, replica, property, property.value, shardUnique = FALSE, raw = FALSE, callopts=list())}
+\item \code{collection_addrole(role = "overseer", node, raw = FALSE, ...)}
+\item \code{collection_balanceshardunique(name, property, onlyactivenodes = TRUE, shardUnique = NULL, raw = FALSE, ...)}
+\item \code{collection_clusterprop(name, val, raw = FALSE, callopts=list())}
+\item \code{collection_clusterstatus(name = NULL, shard = NULL, raw = FALSE, ...)}
+\item \code{collection_createalias(alias, collections, raw = FALSE, ...)}
+\item \code{collection_createshard(name, shard, createNodeSet = NULL, raw = FALSE, ...)}
+\item \code{collection_delete(name, raw = FALSE, ...)}
+\item \code{collection_deletealias(alias, raw = FALSE, ...)}
+\item \code{collection_deletereplica(name, shard = NULL, replica = NULL, onlyIfDown = FALSE, raw = FALSE, callopts=list(), ...)}
+\item \code{collection_deletereplicaprop(name, shard, replica, property, raw = FALSE, callopts=list())}
+\item \code{collection_deleteshard(name, shard, raw = FALSE, ...)}
+\item \code{collection_migrate(name, target.collection, split.key, forward.timeout = NULL, async = NULL, raw = FALSE, ...)}
+\item \code{collection_overseerstatus(raw = FALSE, ...)}
+\item \code{collection_rebalanceleaders(name, maxAtOnce = NULL, maxWaitSeconds = NULL, raw = FALSE, ...)}
+\item \code{collection_reload(name, raw = FALSE, ...)}
+\item \code{collection_removerole(role = "overseer", node, raw = FALSE, ...)}
+\item \code{collection_requeststatus(requestid, raw = FALSE, ...)}
+\item \code{collection_splitshard(name, shard, ranges = NULL, split.key = NULL, async = NULL, raw = FALSE, ...)}
+\item \code{core_status(name = NULL, indexInfo = TRUE, raw = FALSE, callopts=list())}
+\item \code{core_exists(name, callopts = list())}
+\item \code{core_create(name, instanceDir = NULL, config = NULL, schema = NULL, dataDir = NULL, configSet = NULL, collection = NULL, shard = NULL, async=NULL, raw = FALSE, callopts=list(), ...)}
+\item \code{core_unload(name, deleteIndex = FALSE, deleteDataDir = FALSE, deleteInstanceDir = FALSE, async = NULL, raw = FALSE, callopts = list())}
+\item \code{core_rename(name, other, async = NULL, raw = FALSE, callopts=list())}
+\item \code{core_reload(name, raw = FALSE, callopts=list())}
+\item \code{core_swap(name, other, async = NULL, raw = FALSE, callopts=list())}
+\item \code{core_mergeindexes(name, indexDir = NULL, srcCore = NULL, async = NULL, raw = FALSE, callopts = list())}
+\item \code{core_requeststatus(requestid, raw = FALSE, callopts = list())}
+\item \code{core_split(name, path = NULL, targetCore = NULL, ranges = NULL, split.key = NULL, async = NULL, raw = FALSE, callopts=list())}
+\item \code{search(name = NULL, params = NULL, body = NULL, callopts = list(), raw = FALSE, parsetype = 'df', concat = ',', optimizeMaxRows = TRUE, minOptimizedRows = 50000L, ...)}
+\item \code{facet(name = NULL, params = NULL, body = NULL, callopts = list(), raw = FALSE, parsetype = 'df', concat = ',', ...)}
+\item \code{stats(name = NULL, params = list(q = '*:*', stats.field = NULL, stats.facet = NULL), body = NULL, callopts=list(), raw = FALSE, parsetype = 'df', ...)}
+\item \code{highlight(name = NULL, params = NULL, body = NULL, callopts=list(), raw = FALSE, parsetype = 'df', ...)}
+\item \code{group(name = NULL, params = NULL, body = NULL, callopts=list(), raw=FALSE, parsetype='df', concat=',', ...)}
+\item \code{mlt(name = NULL, params = NULL, body = NULL, callopts=list(), raw=FALSE, parsetype='df', concat=',', optimizeMaxRows = TRUE, minOptimizedRows = 50000L, ...)}
+\item \code{all(name = NULL, params = NULL, body = NULL, callopts=list(), raw=FALSE, parsetype='df', concat=',', optimizeMaxRows = TRUE, minOptimizedRows = 50000L, ...)}
+\item \code{get(ids, name, fl = NULL, wt = 'json', raw = FALSE, ...)}
+\item \code{add(x, name, commit = TRUE, commit_within = NULL, overwrite = TRUE, boost = NULL, wt = 'json', raw = FALSE, ...)}
+\item \code{delete_by_id(ids, name, commit = TRUE, commit_within = NULL, overwrite = TRUE, boost = NULL, wt = 'json', raw = FALSE, ...)}
+\item \code{delete_by_query(query, name, commit = TRUE, commit_within = NULL, overwrite = TRUE, boost = NULL, wt = 'json', raw = FALSE, ...)}
+\item \code{update_json(files, name, commit = TRUE, optimize = FALSE, max_segments = 1, expunge_deletes = FALSE, wait_searcher = TRUE, soft_commit = FALSE, prepare_commit = NULL, wt = 'json', raw = FALSE, ...)}
+\item \code{update_xml(files, name, commit = TRUE, optimize = FALSE, max_segments = 1, expunge_deletes = FALSE, wait_searcher = TRUE, soft_commit = FALSE, prepare_commit = NULL, wt = 'json', raw = FALSE, ...)}
+\item \code{update_csv(files, name, separator = ',', header = TRUE, fieldnames = NULL, skip = NULL, skipLines = 0, trim = FALSE, encapsulator = NULL, escape = NULL, keepEmpty = FALSE, literal = NULL, map = NULL, split = NULL, rowid = NULL, rowidOffset = NULL, overwrite = NULL, commit = NULL, wt = 'json', raw = FALSE, ...)}
+\item \code{update_atomic_json(body, name, wt = 'json', raw = FALSE, ...)}
+\item \code{update_atomic_xml(body, name, wt = 'json', raw = FALSE, ...)}
+}
+}
+
+\examples{
+\dontrun{
+# make a client
+(cli <- SolrClient$new())
+
+# variables
+cli$host
+cli$port
+cli$path
+cli$scheme
+
+# ping
+## ping to make sure it's up
+cli$ping("gettingstarted")
+
+# version
+## get Solr version information
+cli$schema("gettingstarted")
+cli$schema("gettingstarted", "fields")
+cli$schema("gettingstarted", "name")
+cli$schema("gettingstarted", "version")$version
+
+# Search
+cli$search("gettingstarted", params = list(q = "*:*"))
+cli$search("gettingstarted", body = list(query = "*:*"))
+
+# set a different host
+SolrClient$new(host = 'stuff.com')
+
+# set a different port
+SolrClient$new(host = 3456)
+
+# set a different http scheme
+SolrClient$new(scheme = 'https')
+
+# set a proxy
+SolrClient$new(proxy = list(url = "187.62.207.130:3128"))
+
+prox <- list(url = "187.62.207.130:3128", user = "foo", pwd = "bar")
+cli <- SolrClient$new(proxy = prox)
+cli$proxy
+
+# A remote Solr instance to which you don't have admin access
+(cli <- SolrClient$new(host = "api.plos.org", path = "search", port = NULL))
+cli$search(params = list(q = "memory"))
+}
+}
+\keyword{datasets}
diff --git a/man/add.Rd b/man/add.Rd
index acc0704..8367ec5 100644
--- a/man/add.Rd
+++ b/man/add.Rd
@@ -4,60 +4,63 @@
 \alias{add}
 \title{Add documents from R objects}
 \usage{
-add(x, name, commit = TRUE, commit_within = NULL, overwrite = TRUE,
+add(x, conn, name, commit = TRUE, commit_within = NULL, overwrite = TRUE,
   boost = NULL, wt = "json", raw = FALSE, ...)
 }
 \arguments{
 \item{x}{Documents, either as rows in a data.frame, or a list.}
 
+\item{conn}{A solrium connection object, see \link{SolrClient}}
+
 \item{name}{(character) A collection or core name. Required.}
 
-\item{commit}{(logical) If \code{TRUE}, documents immediately searchable. 
+\item{commit}{(logical) If \code{TRUE}, documents immediately searchable.
 Default: \code{TRUE}}
 
-\item{commit_within}{(numeric) Milliseconds to commit the change, the 
+\item{commit_within}{(numeric) Milliseconds to commit the change, the
 document will be added within that time. Default: NULL}
 
-\item{overwrite}{(logical) Overwrite documents with matching keys. 
+\item{overwrite}{(logical) Overwrite documents with matching keys.
 Default: \code{TRUE}}
 
 \item{boost}{(numeric) Boost factor. Default: NULL}
 
-\item{wt}{(character) One of json (default) or xml. If json, uses 
-\code{\link[jsonlite]{fromJSON}} to parse. If xml, uses \code{\link[xml2]{read_xml}} to 
+\item{wt}{(character) One of json (default) or xml. If json, uses
+\code{\link[jsonlite]{fromJSON}} to parse. If xml, uses \code{\link[xml2]{read_xml}} to
 parse}
 
-\item{raw}{(logical) If \code{TRUE}, returns raw data in format specified by 
+\item{raw}{(logical) If \code{TRUE}, returns raw data in format specified by
 \code{wt} param}
 
-\item{...}{curl options passed on to \code{\link[httr]{GET}}}
+\item{...}{curl options passed on to \link[crul:HttpClient]{crul::HttpClient}}
 }
 \description{
 Add documents from R objects
 }
 \details{
-Works for Collections as well as Cores (in SolrCloud and Standalone 
+Works for Collections as well as Cores (in SolrCloud and Standalone
 modes, respectively)
 }
 \examples{
 \dontrun{
-solr_connect()
+(cli <- SolrClient$new())
 
 # create the boooks collection
-if (!collection_exists("books")) {
-  collection_create(name = "books", numShards = 2)
+if (!collection_exists(cli, "books")) {
+  collection_create(cli, name = "books", numShards = 1)
 }
 
 # Documents in a list
 ss <- list(list(id = 1, price = 100), list(id = 2, price = 500))
-add(ss, name = "books")
+add(ss, cli, name = "books")
+cli$get(c(1, 2), "books")
 
 # Documents in a data.frame
 ## Simple example
 df <- data.frame(id = c(67, 68), price = c(1000, 500000000))
-add(x = df, "books")
+add(df, cli, "books")
 df <- data.frame(id = c(77, 78), price = c(1, 2.40))
-add(x = df, "books")
+add(df, "books")
 
 ## More complex example, get file from package examples
 # start Solr in Schemaless mode first: bin/solr start -e schemaless
@@ -82,7 +85,6 @@ add(ss, name = "books", wt = "xml", raw = TRUE)
 }
 }
 \seealso{
-\code{\link{update_json}}, \code{\link{update_xml}}, 
+\code{\link{update_json}}, \code{\link{update_xml}},
 \code{\link{update_csv}} for adding documents from files
 }
-
diff --git a/man/collapse_pivot_names.Rd b/man/collapse_pivot_names.Rd
index fa326d3..43bc93a 100644
--- a/man/collapse_pivot_names.Rd
+++ b/man/collapse_pivot_names.Rd
@@ -21,4 +21,3 @@ into 2 columns assuming that the first column of every set of 3
 This type of structure is usually returned by facet.pivot responses.
 }
 \keyword{internal}
-
diff --git a/man/collectargs.Rd b/man/collectargs.Rd
deleted file mode 100644
index 1bd81b9..0000000
--- a/man/collectargs.Rd
+++ /dev/null
@@ -1,15 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/zzz.r
-\name{collectargs}
-\alias{collectargs}
-\title{Function to make a list of args passing arg names through multiargs function.}
-\usage{
-collectargs(x)
-}
-\arguments{
-\item{x}{Value}
-}
-\description{
-Function to make a list of args passing arg names through multiargs function.
-}
-
diff --git a/man/collection_addreplica.Rd b/man/collection_addreplica.Rd
index 49c3a3d..e4bd33d 100644
--- a/man/collection_addreplica.Rd
+++ b/man/collection_addreplica.Rd
@@ -4,12 +4,14 @@
 \alias{collection_addreplica}
 \title{Add a replica}
 \usage{
-collection_addreplica(name, shard = NULL, route = NULL, node = NULL,
+collection_addreplica(conn, name, shard = NULL, route = NULL, node = NULL,
   instanceDir = NULL, dataDir = NULL, async = NULL, raw = FALSE,
   callopts = list(), ...)
 }
 \arguments{
-\item{name}{(character) The name of the collection. Required}
+\item{conn}{A solrium connection object, see \link{SolrClient}}
+
+\item{name}{(character) The name of the core to be created. Required}
 
 \item{shard}{(character) The name of the shard to which replica is to be added.
 If \code{shard} is not given, then \code{route} must be.}
@@ -29,7 +31,7 @@ asynchronously}
 
 \item{raw}{(logical) If \code{TRUE}, returns raw data}
 
-\item{callopts}{curl options passed on to \code{\link[httr]{GET}}}
+\item{callopts}{curl options passed on to \link[crul:HttpClient]{crul::HttpClient}}
 
 \item{...}{You can pass in parameters like \code{property.name=value}    to set
 core property name to value. See the section Defining core.properties for details on
@@ -42,25 +44,25 @@ specified if the replica is to be created in a specific node
 }
 \examples{
 \dontrun{
-solr_connect()
+(conn <- SolrClient$new())
 
 # create collection
-if (!collection_exists("foobar")) {
-  collection_create(name = "foobar", numShards = 2) # bin/solr create -c foobar
+if (!conn$collection_exists("foobar")) {
+  conn$collection_create(name = "foobar", numShards = 2)
+  # OR bin/solr create -c foobar
 }
 
 # status
-collection_clusterstatus()$cluster$collections$foobar
+conn$collection_clusterstatus()$cluster$collections$foobar
 
 # add replica
-if (!collection_exists("foobar")) {
-  collection_addreplica(name = "foobar", shard = "shard1")
+if (!conn$collection_exists("foobar")) {
+  conn$collection_addreplica(name = "foobar", shard = "shard1")
 }
 
 # status again
-collection_clusterstatus()$cluster$collections$foobar
-collection_clusterstatus()$cluster$collections$foobar$shards
-collection_clusterstatus()$cluster$collections$foobar$shards$shard1
+conn$collection_clusterstatus()$cluster$collections$foobar
+conn$collection_clusterstatus()$cluster$collections$foobar$shards
+conn$collection_clusterstatus()$cluster$collections$foobar$shards$shard1
 }
 }
-
diff --git a/man/collection_addreplicaprop.Rd b/man/collection_addreplicaprop.Rd
index a26a1c4..7f2708b 100644
--- a/man/collection_addreplicaprop.Rd
+++ b/man/collection_addreplicaprop.Rd
@@ -4,30 +4,34 @@
 \alias{collection_addreplicaprop}
 \title{Add a replica property}
 \usage{
-collection_addreplicaprop(name, shard, replica, property, property.value,
+collection_addreplicaprop(conn, name, shard, replica, property, property.value,
   shardUnique = FALSE, raw = FALSE, callopts = list())
 }
 \arguments{
-\item{name}{(character) Required. The name of the collection this replica belongs to.}
+\item{conn}{A solrium connection object, see \link{SolrClient}}
 
-\item{shard}{(character) Required. The name of the shard the replica belongs to.}
+\item{name}{(character) The name of the core to be created. Required}
+
+\item{shard}{(character) Required. The name of the shard the replica
+belongs to}
 
 \item{replica}{(character) Required. The replica, e.g. core_node1.}
 
-\item{property}{(character) Required. The property to add. Note: this will have the
-literal 'property.' prepended to distinguish it from system-maintained properties.
-So these two forms are equivalent: \code{property=special} and
-\code{property=property.special}}
+\item{property}{(character) Required. The property to add. Note: this will
+have the literal 'property.' prepended to distinguish it from
+system-maintained properties. So these two forms are equivalent:
+\code{property=special} and \code{property=property.special}}
 
-\item{property.value}{(character) Required. The value to assign to the property.}
+\item{property.value}{(character) Required. The value to assign to
+the property}
 
 \item{shardUnique}{(logical) If \code{TRUE}, then setting this property in one
-replica will (1) remove the property from all other replicas in that shard.
+replica will (1) remove the property from all other replicas in that shard
 Default: \code{FALSE}}
 
 \item{raw}{(logical) If \code{TRUE}, returns raw data}
 
-\item{callopts}{curl options passed on to \code{\link[httr]{GET}}}
+\item{callopts}{curl options passed on to \link[crul:HttpClient]{crul::HttpClient}}
 }
 \description{
 Assign an arbitrary property to a particular replica and give it
@@ -36,21 +40,23 @@ with the new value.
 }
 \examples{
 \dontrun{
-solr_connect()
+(conn <- SolrClient$new())
 
 # create collection
-collection_create(name = "addrep", numShards = 2) # bin/solr create -c addrep
+if (!conn$collection_exists("addrep")) {
+  conn$collection_create(name = "addrep", numShards = 1)
+  # OR bin/solr create -c addrep
+}
 
 # status
-collection_clusterstatus()$cluster$collections$addrep$shards
+conn$collection_clusterstatus()$cluster$collections$addrep$shards
 
 # add the value world to the property hello
-collection_addreplicaprop(name = "addrep", shard = "shard1", replica = "core_node1",
-   property = "hello", property.value = "world")
+conn$collection_addreplicaprop(name = "addrep", shard = "shard1",
+  replica = "core_node1", property = "hello", property.value = "world")
 
 # check status
-collection_clusterstatus()$cluster$collections$addrep$shards
-collection_clusterstatus()$cluster$collections$addrep$shards$shard1$replicas$core_node1
+conn$collection_clusterstatus()$cluster$collections$addrep$shards
+conn$collection_clusterstatus()$cluster$collections$addrep$shards$shard1$replicas$core_node1
 }
 }
-
diff --git a/man/collection_addrole.Rd b/man/collection_addrole.Rd
index 4f1d475..877a61f 100644
--- a/man/collection_addrole.Rd
+++ b/man/collection_addrole.Rd
@@ -4,9 +4,11 @@
 \alias{collection_addrole}
 \title{Add a role to a node}
 \usage{
-collection_addrole(role = "overseer", node, raw = FALSE, ...)
+collection_addrole(conn, role = "overseer", node, raw = FALSE, ...)
 }
 \arguments{
+\item{conn}{A solrium connection object, see \link{SolrClient}}
+
 \item{role}{(character) Required. The name of the role. The only supported role
 as of now is overseer (set as default).}
 
@@ -15,7 +17,7 @@ role even before that node is started.}
 
 \item{raw}{(logical) If \code{TRUE}, returns raw data}
 
-\item{...}{curl options passed on to \code{\link[httr]{GET}}}
+\item{...}{curl options passed on to \link[crul:HttpClient]{crul::HttpClient}}
 }
 \description{
 Assign a role to a given node in the cluster. The only supported role
@@ -28,11 +30,10 @@ are up and running
 }
 \examples{
 \dontrun{
-solr_connect()
+(conn <- SolrClient$new())
 
 # get list of nodes
-nodes <- collection_clusterstatus()$cluster$live_nodes
-collection_addrole(node = nodes[1])
+nodes <- conn$collection_clusterstatus()$cluster$live_nodes
+collection_addrole(conn, node = nodes[1])
 }
 }
-
diff --git a/man/collection_balanceshardunique.Rd b/man/collection_balanceshardunique.Rd
index b9d0fc1..8239b2c 100644
--- a/man/collection_balanceshardunique.Rd
+++ b/man/collection_balanceshardunique.Rd
@@ -4,46 +4,54 @@
 \alias{collection_balanceshardunique}
 \title{Balance a property}
 \usage{
-collection_balanceshardunique(name, property, onlyactivenodes = TRUE,
+collection_balanceshardunique(conn, name, property, onlyactivenodes = TRUE,
   shardUnique = NULL, raw = FALSE, ...)
 }
 \arguments{
-\item{name}{(character) Required. The name of the collection to balance the property in}
+\item{conn}{A solrium connection object, see \link{SolrClient}}
 
-\item{property}{(character) Required. The property to balance. The literal "property."
-is prepended to this property if not specified explicitly.}
+\item{name}{(character) The name of the core to be created. Required}
 
-\item{onlyactivenodes}{(logical) Normally, the property is instantiated on active
-nodes only. If \code{FALSE}, then inactive nodes are also included for distribution.
-Default: \code{TRUE}}
+\item{property}{(character) Required. The property to balance. The literal
+"property." is prepended to this property if not specified explicitly.}
 
-\item{shardUnique}{(logical) Something of a safety valve. There is one pre-defined
-property (preferredLeader) that defaults this value to \code{TRUE}. For all other
-properties that are balanced, this must be set to \code{TRUE} or an error message is
-returned}
+\item{onlyactivenodes}{(logical) Normally, the property is instantiated
+on active nodes only. If \code{FALSE}, then inactive nodes are also included
+for distribution. Default: \code{TRUE}}
+
+\item{shardUnique}{(logical) Something of a safety valve. There is one
+pre-defined property (preferredLeader) that defaults this value to \code{TRUE}.
+For all other properties that are balanced, this must be set to \code{TRUE} or
+an error message is returned}
 
 \item{raw}{(logical) If \code{TRUE}, returns raw data}
 
-\item{...}{curl options passed on to \code{\link[httr]{GET}}}
+\item{...}{You can pass in parameters like \code{property.name=value}    to set
+core property name to value. See the section Defining core.properties for
+details on supported properties and values.
+(https://lucene.apache.org/solr/guide/7_0/defining-core-properties.html)}
 }
 \description{
-Insures that a particular property is distributed evenly amongst the
-physical nodes that make up a collection. If the property already exists on a replica,
-every effort is made to leave it there. If the property is not on any replica on a
-shard one is chosen and the property is added.
+Insures that a particular property is distributed evenly
+amongst the physical nodes that make up a collection. If the property
+already exists on a replica, every effort is made to leave it there. If the
+property is not on any replica on a shard one is chosen and the property
+is added.
 }
 \examples{
 \dontrun{
-solr_connect()
+(conn <- SolrClient$new())
 
 # create collection
-collection_create(name = "mycollection") # bin/solr create -c mycollection
+if (!conn$collection_exists("addrep")) {
+  conn$collection_create(name = "mycollection")
+  # OR: bin/solr create -c mycollection
+}
 
 # balance preferredLeader property
-collection_balanceshardunique("mycollection", property = "preferredLeader")
+conn$collection_balanceshardunique("mycollection", property = "preferredLeader")
 
 # examine cluster status
-collection_clusterstatus()$cluster$collections$mycollection
+conn$collection_clusterstatus()$cluster$collections$mycollection
 }
 }
-
diff --git a/man/collection_clusterprop.Rd b/man/collection_clusterprop.Rd
index 2cb2122..f0e0fa5 100644
--- a/man/collection_clusterprop.Rd
+++ b/man/collection_clusterprop.Rd
@@ -4,39 +4,39 @@
 \alias{collection_clusterprop}
 \title{Add, edit, delete a cluster-wide property}
 \usage{
-collection_clusterprop(name, val, raw = FALSE, callopts = list())
+collection_clusterprop(conn, name, val, raw = FALSE, callopts = list())
 }
 \arguments{
-\item{name}{(character) Required. The name of the property. The two supported
-properties names are urlScheme and autoAddReplicas. Other names are rejected
-with an error}
+\item{conn}{A solrium connection object, see \link{SolrClient}}
+
+\item{name}{(character) Name of the core or collection}
 
 \item{val}{(character) Required. The value of the property. If the value is
 empty or null, the property is unset.}
 
-\item{raw}{(logical) If \code{TRUE}, returns raw data}
+\item{raw}{(logical) If \code{TRUE}, returns raw data in format specified by
+\code{wt} param}
 
-\item{callopts}{curl options passed on to \code{\link[httr]{GET}}}
+\item{callopts}{curl options passed on to \link[crul:HttpClient]{crul::HttpClient}}
 }
 \description{
-Important: whether add, edit, or delete is used is determined by
-the value passed to the \code{val} parameter. If the property name is
-new, it will be added. If the property name exists, and the value is different,
-it will be edited. If the property name exists, and the value is NULL or empty
-the property is deleted (unset).
+Important: whether add, edit, or delete is used is determined
+by the value passed to the \code{val} parameter. If the property name is
+new, it will be added. If the property name exists, and the value is
+different, it will be edited. If the property name exists, and the value
+is \code{NULL} or empty the property is deleted (unset).
 }
 \examples{
 \dontrun{
-solr_connect()
+(conn <- SolrClient$new())
 
 # add the value https to the property urlScheme
-collection_clusterprop(name = "urlScheme", val = "https")
+collection_clusterprop(conn, name = "urlScheme", val = "https")
 
 # status again
-collection_clusterstatus()$cluster$properties
+collection_clusterstatus(conn)$cluster$properties
 
 # delete the property urlScheme by setting val to NULL or a 0 length string
-collection_clusterprop(name = "urlScheme", val = "")
+collection_clusterprop(conn, name = "urlScheme", val = "")
 }
 }
-
diff --git a/man/collection_clusterstatus.Rd b/man/collection_clusterstatus.Rd
index bb86d8a..e212149 100644
--- a/man/collection_clusterstatus.Rd
+++ b/man/collection_clusterstatus.Rd
@@ -4,28 +4,34 @@
 \alias{collection_clusterstatus}
 \title{Get cluster status}
 \usage{
-collection_clusterstatus(name = NULL, shard = NULL, raw = FALSE, ...)
+collection_clusterstatus(conn, name = NULL, shard = NULL, raw = FALSE,
+  ...)
 }
 \arguments{
-\item{name}{(character) The collection name for which information is requested.
-If omitted, information on all collections in the cluster will be returned.}
+\item{conn}{A solrium connection object, see \link{SolrClient}}
 
-\item{shard}{(character) The shard(s) for which information is requested. Multiple
-shard names can be specified as a character vector.}
+\item{name}{(character) The name of the core to be created. Required}
+
+\item{shard}{(character) The shard(s) for which information is requested.
+Multiple shard names can be specified as a character vector.}
 
 \item{raw}{(logical) If \code{TRUE}, returns raw data}
 
-\item{...}{curl options passed on to \code{\link[httr]{GET}}}
+\item{...}{You can pass in parameters like \code{property.name=value}    to set
+core property name to value. See the section Defining core.properties for
+details on supported properties and values.
+(https://lucene.apache.org/solr/guide/7_0/defining-core-properties.html)}
 }
 \description{
-Fetch the cluster status including collections, shards, replicas,
-configuration name as well as collection aliases and cluster properties.
+Fetch the cluster status including collections, shards,
+replicas, configuration name as well as collection aliases and cluster
+properties.
 }
 \examples{
 \dontrun{
-solr_connect()
-collection_clusterstatus()
-res <- collection_clusterstatus()
+(conn <- SolrClient$new())
+conn$collection_clusterstatus()
+res <- conn$collection_clusterstatus()
 res$responseHeader
 res$cluster
 res$cluster$collections
@@ -33,4 +39,3 @@ res$cluster$collections$gettingstarted
 res$cluster$live_nodes
 }
 }
-
diff --git a/man/collection_create.Rd b/man/collection_create.Rd
index bd0c14c..67a1cd0 100644
--- a/man/collection_create.Rd
+++ b/man/collection_create.Rd
@@ -4,7 +4,7 @@
 \alias{collection_create}
 \title{Add a collection}
 \usage{
-collection_create(name, numShards = 2, maxShardsPerNode = 1,
+collection_create(conn, name, numShards = 1, maxShardsPerNode = 1,
   createNodeSet = NULL, collection.configName = NULL,
   replicationFactor = 1, router.name = NULL, shards = NULL,
   createNodeSet.shuffle = TRUE, router.field = NULL,
@@ -12,7 +12,9 @@ collection_create(name, numShards = 2, maxShardsPerNode = 1,
   ...)
 }
 \arguments{
-\item{name}{The name of the collection to be created. Required}
+\item{conn}{A solrium connection object, see \link{SolrClient}}
+
+\item{name}{(character) The name of the core to be created. Required}
 
 \item{numShards}{(integer) The number of shards to be created as part of the
 collection. This is a required parameter when using the 'compositeId' router.}
@@ -59,7 +61,7 @@ if createNodeSet is not also specified. Default: \code{TRUE}}
 value of the field in an input document to compute the hash and identify a shard instead of
 looking at the uniqueKey field. If the field specified is null in the document, the document
 will be rejected. Please note that RealTime Get or retrieval by id would also require the
-parameter _route_ (or shard.keys) to avoid a distributed search.}
+parameter \emph{route} (or shard.keys) to avoid a distributed search.}
 
 \item{autoAddReplicas}{(logical)    When set to true, enables auto addition of replicas on
 shared file systems. See the section autoAddReplicas Settings for more details on settings
@@ -70,37 +72,26 @@ asynchronously}
 
 \item{raw}{(logical) If \code{TRUE}, returns raw data}
 
-\item{callopts}{curl options passed on to \code{\link[httr]{GET}}}
+\item{callopts}{curl options passed on to \link[crul:HttpClient]{crul::HttpClient}}
 
 \item{...}{You can pass in parameters like \code{property.name=value}    to set
-core property name to value. See the section Defining core.properties for details on
-supported properties and values.
-(https://cwiki.apache.org/confluence/display/solr/Defining+core.properties)}
+core property name to value. See the section Defining core.properties for
+details on supported properties and values.
+(https://lucene.apache.org/solr/guide/7_0/defining-core-properties.html)}
 }
 \description{
 Add a collection
 }
 \examples{
 \dontrun{
-solr_connect()
+# connect
+(cli <- SolrClient$new())
 
-if (!collection_exists("foobar")) {
-  collection_delete(name = "helloWorld")
-  collection_create(name = "helloWorld", numShards = 2)
+if (!cli$collection_exists("helloWorld")) {
+  cli$collection_create(name = "helloWorld")
 }
-if (!collection_exists("foobar")) {
-  collection_delete(name = "tablesChairs")
-  collection_create(name = "tablesChairs")
+if (!cli$collection_exists("tablesChairs")) {
+  cli$collection_create(name = "tablesChairs")
 }
-
-# you may have to do this if you don't want to use 
-# bin/solr or use zookeeper directly
-path <- "~/solr-5.4.1/server/solr/newcore/conf"
-dir.create(path, recursive = TRUE)
-files <- list.files("~/solr-5.4.1/server/solr/configsets/data_driven_schema_configs/conf/",
-full.names = TRUE)
-invisible(file.copy(files, path, recursive = TRUE))
-collection_create(name = "newcore", collection.configName = "newcore")
 }
 }
-
diff --git a/man/collection_createalias.Rd b/man/collection_createalias.Rd
index c586352..5f8efab 100644
--- a/man/collection_createalias.Rd
+++ b/man/collection_createalias.Rd
@@ -4,16 +4,20 @@
 \alias{collection_createalias}
 \title{Create an alias for a collection}
 \usage{
-collection_createalias(alias, collections, raw = FALSE, ...)
+collection_createalias(conn, alias, collections, raw = FALSE,
+  callopts = list())
 }
 \arguments{
+\item{conn}{A solrium connection object, see \link{SolrClient}}
+
 \item{alias}{(character) Required. The alias name to be created}
 
-\item{collections}{(character) Required. A character vector of collections to be aliased}
+\item{collections}{(character) Required. A character vector of collections
+to be aliased}
 
 \item{raw}{(logical) If \code{TRUE}, returns raw data}
 
-\item{...}{curl options passed on to \code{\link[httr]{GET}}}
+\item{callopts}{curl options passed on to \code{\link[crul]{HttpClient}}}
 }
 \description{
 Create a new alias pointing to one or more collections. If an
@@ -22,10 +26,13 @@ alias, effectively acting like an atomic "MOVE" command.
 }
 \examples{
 \dontrun{
-solr_connect()
-collection_create(name = "thingsstuff", numShards = 2)
-collection_createalias("tstuff", "thingsstuff")
-collection_clusterstatus()$cluster$collections$thingsstuff$aliases # new alias
-}
+(conn <- SolrClient$new())
+
+if (!conn$collection_exists("thingsstuff")) {
+  conn$collection_create(name = "thingsstuff")
 }
 
+conn$collection_createalias("tstuff", "thingsstuff")
+conn$collection_clusterstatus()$cluster$collections$thingsstuff$aliases
+}
+}
diff --git a/man/collection_createshard.Rd b/man/collection_createshard.Rd
index 9ed74ac..6d30c51 100644
--- a/man/collection_createshard.Rd
+++ b/man/collection_createshard.Rd
@@ -4,32 +4,37 @@
 \alias{collection_createshard}
 \title{Create a shard}
 \usage{
-collection_createshard(name, shard, createNodeSet = NULL, raw = FALSE, ...)
+collection_createshard(conn, name, shard, createNodeSet = NULL, raw = FALSE,
+  ...)
 }
 \arguments{
-\item{name}{(character) Required. The name of the collection that includes the shard
-that will be splitted.}
+\item{conn}{A solrium connection object, see \link{SolrClient}}
+
+\item{name}{(character) The name of the core to be created. Required}
 
 \item{shard}{(character) Required. The name of the shard to be created.}
 
 \item{createNodeSet}{(character) Allows defining the nodes to spread the new
-collection across. If not provided, the CREATE operation will create shard-replica
-spread across all live Solr nodes. The format is a comma-separated list of
-node_names, such as localhost:8983_solr, localhost:8984_s olr, localhost:8985_solr.}
+collection across. If not provided, the CREATE operation will create
+shard-replica spread across all live Solr nodes. The format is a
+comma-separated list of node_names, such as localhost:8983_solr,
+localhost:8984_s olr, localhost:8985_solr.}
 
 \item{raw}{(logical) If \code{TRUE}, returns raw data}
 
-\item{...}{curl options passed on to \code{\link[httr]{GET}}}
+\item{...}{You can pass in parameters like \code{property.name=value}    to set
+core property name to value. See the section Defining core.properties for
+details on supported properties and values.
+(https://lucene.apache.org/solr/guide/7_0/defining-core-properties.html)}
 }
 \description{
 Create a shard
 }
 \examples{
 \dontrun{
-solr_connect()
+(conn <- SolrClient$new())
 ## FIXME - doesn't work right now
-# collection_create(name = "trees")
-# collection_createshard(name = "trees", shard = "newshard")
+# conn$collection_create(name = "trees")
+# conn$collection_createshard(name = "trees", shard = "newshard")
 }
 }
-
diff --git a/man/collection_delete.Rd b/man/collection_delete.Rd
index 9a1000f..8c1f959 100644
--- a/man/collection_delete.Rd
+++ b/man/collection_delete.Rd
@@ -4,23 +4,28 @@
 \alias{collection_delete}
 \title{Add a collection}
 \usage{
-collection_delete(name, raw = FALSE, ...)
+collection_delete(conn, name, raw = FALSE, callopts = list())
 }
 \arguments{
-\item{name}{The name of the collection to be created. Required}
+\item{conn}{A solrium connection object, see \link{SolrClient}}
+
+\item{name}{(character) The name of the core to be created. Required}
 
 \item{raw}{(logical) If \code{TRUE}, returns raw data}
 
-\item{...}{curl options passed on to \code{\link[httr]{GET}}}
+\item{callopts}{curl options passed on to \code{\link[crul]{HttpClient}}}
 }
 \description{
 Add a collection
 }
 \examples{
 \dontrun{
-solr_connect()
-collection_create(name = "helloWorld")
-collection_delete(name = "helloWorld")
-}
+(conn <- SolrClient$new())
+
+if (!conn$collection_exists("helloWorld")) {
+  conn$collection_create(name = "helloWorld")
 }
 
+collection_delete(conn, name = "helloWorld")
+}
+}
diff --git a/man/collection_deletealias.Rd b/man/collection_deletealias.Rd
index 1fbf925..a9a6457 100644
--- a/man/collection_deletealias.Rd
+++ b/man/collection_deletealias.Rd
@@ -4,26 +4,31 @@
 \alias{collection_deletealias}
 \title{Delete a collection alias}
 \usage{
-collection_deletealias(alias, raw = FALSE, ...)
+collection_deletealias(conn, alias, raw = FALSE, callopts = list())
 }
 \arguments{
+\item{conn}{A solrium connection object, see \link{SolrClient}}
+
 \item{alias}{(character) Required. The alias name to be created}
 
 \item{raw}{(logical) If \code{TRUE}, returns raw data}
 
-\item{...}{curl options passed on to \code{\link[httr]{GET}}}
+\item{callopts}{curl options passed on to \link[crul:HttpClient]{crul::HttpClient}}
 }
 \description{
 Delete a collection alias
 }
 \examples{
 \dontrun{
-solr_connect()
-collection_create(name = "thingsstuff", numShards = 2)
-collection_createalias("tstuff", "thingsstuff")
-collection_clusterstatus()$cluster$collections$thingsstuff$aliases # new alias
-collection_deletealias("tstuff")
-collection_clusterstatus()$cluster$collections$thingsstuff$aliases # gone
-}
+(conn <- SolrClient$new())
+
+if (!conn$collection_exists("thingsstuff")) {
+  conn$collection_create(name = "thingsstuff")
 }
 
+conn$collection_createalias("tstuff", "thingsstuff")
+conn$collection_clusterstatus()$cluster$collections$thingsstuff$aliases # new alias
+conn$collection_deletealias("tstuff")
+conn$collection_clusterstatus()$cluster$collections$thingsstuff$aliases # gone
+}
+}
diff --git a/man/collection_deletereplica.Rd b/man/collection_deletereplica.Rd
index 03198ac..e10a31f 100644
--- a/man/collection_deletereplica.Rd
+++ b/man/collection_deletereplica.Rd
@@ -4,10 +4,12 @@
 \alias{collection_deletereplica}
 \title{Delete a replica}
 \usage{
-collection_deletereplica(name, shard = NULL, replica = NULL,
+collection_deletereplica(conn, name, shard = NULL, replica = NULL,
   onlyIfDown = FALSE, raw = FALSE, callopts = list(), ...)
 }
 \arguments{
+\item{conn}{A solrium connection object, see \link{SolrClient}}
+
 \item{name}{(character) Required. The name of the collection.}
 
 \item{shard}{(character) Required. The name of the shard that includes the replica to
@@ -20,7 +22,7 @@ is active. Default: \code{FALSE}}
 
 \item{raw}{(logical) If \code{TRUE}, returns raw data}
 
-\item{callopts}{curl options passed on to \code{\link[httr]{GET}}}
+\item{callopts}{curl options passed on to \link[crul:HttpClient]{crul::HttpClient}}
 
 \item{...}{You can pass in parameters like \code{property.name=value}    to set
 core property name to value. See the section Defining core.properties for details on
@@ -36,24 +38,25 @@ unregistered.
 }
 \examples{
 \dontrun{
-solr_connect()
+(conn <- SolrClient$new())
 
 # create collection
-collection_create(name = "foobar2", numShards = 2) # bin/solr create -c foobar2
+if (!conn$collection_exists("foobar2")) {
+  conn$collection_create(name = "foobar2", maxShardsPerNode = 2)
+}
 
 # status
-collection_clusterstatus()$cluster$collections$foobar2$shards$shard1
+conn$collection_clusterstatus()$cluster$collections$foobar2$shards$shard1
 
 # add replica
-collection_addreplica(name = "foobar2", shard = "shard1")
+conn$collection_addreplica(name = "foobar2", shard = "shard1")
 
 # delete replica
 ## get replica name
-nms <- names(collection_clusterstatus()$cluster$collections$foobar2$shards$shard1$replicas)
-collection_deletereplica(name = "foobar2", shard = "shard1", replica = nms[1])
+nms <- names(conn$collection_clusterstatus()$cluster$collections$foobar2$shards$shard1$replicas)
+conn$collection_deletereplica(name = "foobar2", shard = "shard1", replica = nms[1])
 
 # status again
-collection_clusterstatus()$cluster$collections$foobar2$shards$shard1
+conn$collection_clusterstatus()$cluster$collections$foobar2$shards$shard1
 }
 }
-
diff --git a/man/collection_deletereplicaprop.Rd b/man/collection_deletereplicaprop.Rd
index d1a9666..575d736 100644
--- a/man/collection_deletereplicaprop.Rd
+++ b/man/collection_deletereplicaprop.Rd
@@ -4,52 +4,57 @@
 \alias{collection_deletereplicaprop}
 \title{Delete a replica property}
 \usage{
-collection_deletereplicaprop(name, shard, replica, property, raw = FALSE,
-  callopts = list())
+collection_deletereplicaprop(conn, name, shard, replica, property,
+  raw = FALSE, callopts = list())
 }
 \arguments{
-\item{name}{(character) Required. The name of the collection this replica belongs to.}
+\item{conn}{A solrium connection object, see \link{SolrClient}}
 
-\item{shard}{(character) Required. The name of the shard the replica belongs to.}
+\item{name}{(character) The name of the core to be created. Required}
+
+\item{shard}{(character) Required. The name of the shard the replica
+belongs to.}
 
 \item{replica}{(character) Required. The replica, e.g. core_node1.}
 
-\item{property}{(character) Required. The property to delete. Note: this will have the
-literal 'property.' prepended to distinguish it from system-maintained properties.
-So these two forms are equivalent: \code{property=special} and
-\code{property=property.special}}
+\item{property}{(character) Required. The property to delete. Note: this
+will have the literal 'property.' prepended to distinguish it from
+system-maintained properties. So these two forms are equivalent:
+\code{property=special} and  \code{property=property.special}}
 
 \item{raw}{(logical) If \code{TRUE}, returns raw data}
 
-\item{callopts}{curl options passed on to \code{\link[httr]{GET}}}
+\item{callopts}{curl options passed on to \link[crul:HttpClient]{crul::HttpClient}}
 }
 \description{
 Deletes an arbitrary property from a particular replica.
 }
 \examples{
 \dontrun{
-solr_connect()
+(conn <- SolrClient$new())
 
 # create collection
-collection_create(name = "deleterep", numShards = 2) # bin/solr create -c deleterep
+if (!conn$collection_exists("deleterep")) {
+  conn$collection_create(name = "deleterep")
+  # OR bin/solr create -c deleterep
+}
 
 # status
-collection_clusterstatus()$cluster$collections$deleterep$shards
+conn$collection_clusterstatus()$cluster$collections$deleterep$shards
 
 # add the value bar to the property foo
-collection_addreplicaprop(name = "deleterep", shard = "shard1", replica = "core_node1",
-   property = "foo", property.value = "bar")
+conn$collection_addreplicaprop(name = "deleterep", shard = "shard1",
+  replica = "core_node1", property = "foo", property.value = "bar")
 
 # check status
-collection_clusterstatus()$cluster$collections$deleterep$shards
-collection_clusterstatus()$cluster$collections$deleterep$shards$shard1$replicas$core_node1
+conn$collection_clusterstatus()$cluster$collections$deleterep$shards
+conn$collection_clusterstatus()$cluster$collections$deleterep$shards$shard1$replicas$core_node1
 
 # delete replica property
-collection_deletereplicaprop(name = "deleterep", shard = "shard1",
+conn$collection_deletereplicaprop(name = "deleterep", shard = "shard1",
    replica = "core_node1", property = "foo")
 
 # check status - foo should be gone
-collection_clusterstatus()$cluster$collections$deleterep$shards$shard1$replicas$core_node1
+conn$collection_clusterstatus()$cluster$collections$deleterep$shards$shard1$replicas$core_node1
 }
 }
-
diff --git a/man/collection_deleteshard.Rd b/man/collection_deleteshard.Rd
index e9fe3da..6f108d1 100644
--- a/man/collection_deleteshard.Rd
+++ b/man/collection_deleteshard.Rd
@@ -4,9 +4,11 @@
 \alias{collection_deleteshard}
 \title{Delete a shard}
 \usage{
-collection_deleteshard(name, shard, raw = FALSE, ...)
+collection_deleteshard(conn, name, shard, raw = FALSE, ...)
 }
 \arguments{
+\item{conn}{A solrium connection object, see \link{SolrClient}}
+
 \item{name}{(character) Required. The name of the collection that includes the shard
 to be deleted}
 
@@ -14,7 +16,7 @@ to be deleted}
 
 \item{raw}{(logical) If \code{TRUE}, returns raw data}
 
-\item{...}{curl options passed on to \code{\link[httr]{GET}}}
+\item{...}{curl options passed on to \link[crul:HttpClient]{crul::HttpClient}}
 }
 \description{
 Deleting a shard will unload all replicas of the shard and remove
@@ -23,19 +25,24 @@ which have no range given for custom sharding.
 }
 \examples{
 \dontrun{
-solr_connect()
+(conn <- SolrClient$new())
+
 # create collection
-# collection_create(name = "buffalo") # bin/solr create -c buffalo
+if (!conn$collection_exists("buffalo")) {
+  conn$collection_create(name = "buffalo")
+  # OR: bin/solr create -c buffalo
+}
 
 # find shard names
-names(collection_clusterstatus()$cluster$collections$buffalo$shards)
+names(conn$collection_clusterstatus()$cluster$collections$buffalo$shards)
+
 # split a shard by name
-collection_splitshard(name = "buffalo", shard = "shard1")
+collection_splitshard(conn, name = "buffalo", shard = "shard1")
+
 # now we have three shards
-names(collection_clusterstatus()$cluster$collections$buffalo$shards)
+names(conn$collection_clusterstatus()$cluster$collections$buffalo$shards)
 
 # delete shard
-collection_deleteshard(name = "buffalo", shard = "shard1_1")
+conn$collection_deleteshard(name = "buffalo", shard = "shard1_1")
 }
 }
-
diff --git a/man/collection_exists.Rd b/man/collection_exists.Rd
index 8bf9682..0435b2a 100644
--- a/man/collection_exists.Rd
+++ b/man/collection_exists.Rd
@@ -4,12 +4,14 @@
 \alias{collection_exists}
 \title{Check if a collection exists}
 \usage{
-collection_exists(name, ...)
+collection_exists(conn, name, ...)
 }
 \arguments{
+\item{conn}{A solrium connection object, see \link{SolrClient}}
+
 \item{name}{(character) The name of the core. If not given, all cores.}
 
-\item{...}{curl options passed on to \code{\link[httr]{GET}}}
+\item{...}{curl options passed on to \link[crul:HttpClient]{crul::HttpClient}}
 }
 \value{
 A single boolean, \code{TRUE} or \code{FALSE}
@@ -18,22 +20,19 @@ A single boolean, \code{TRUE} or \code{FALSE}
 Check if a collection exists
 }
 \details{
-Simply calls \code{\link{collection_list}} internally
+Simply calls \code{\link[=collection_list]{collection_list()}} internally
 }
 \examples{
 \dontrun{
 # start Solr with Cloud mode via the schemaless eg: bin/solr -e cloud
 # you can create a new core like: bin/solr create -c corename
 # where <corename> is the name for your core - or creaate as below
-
-# connect
-solr_connect()
+(conn <- SolrClient$new())
 
 # exists
-collection_exists("gettingstarted")
+conn$collection_exists("gettingstarted")
 
 # doesn't exist
-collection_exists("hhhhhh")
+conn$collection_exists("hhhhhh")
 }
 }
-
diff --git a/man/collection_list.Rd b/man/collection_list.Rd
index 0a270f4..4257eeb 100644
--- a/man/collection_list.Rd
+++ b/man/collection_list.Rd
@@ -4,21 +4,25 @@
 \alias{collection_list}
 \title{List collections}
 \usage{
-collection_list(raw = FALSE, ...)
+collection_list(conn, raw = FALSE, ...)
 }
 \arguments{
-\item{raw}{(logical) If \code{TRUE}, returns raw data}
+\item{conn}{A solrium connection object, see \link{SolrClient}}
 
-\item{...}{curl options passed on to \code{\link[httr]{GET}}}
+\item{raw}{(logical) If \code{TRUE}, returns raw data in format specified by
+\code{wt} param}
+
+\item{...}{curl options passed on to \link[crul:HttpClient]{crul::HttpClient}}
 }
 \description{
 List collections
 }
 \examples{
 \dontrun{
-solr_connect()
-collection_list()
-collection_list()$collections
+(conn <- SolrClient$new())
+
+conn$collection_list()
+conn$collection_list()$collections
+collection_list(conn)
 }
 }
-
diff --git a/man/collection_migrate.Rd b/man/collection_migrate.Rd
index a13e3c3..4201d54 100644
--- a/man/collection_migrate.Rd
+++ b/man/collection_migrate.Rd
@@ -4,12 +4,13 @@
 \alias{collection_migrate}
 \title{Migrate documents to another collection}
 \usage{
-collection_migrate(name, target.collection, split.key, forward.timeout = NULL,
-  async = NULL, raw = FALSE, ...)
+collection_migrate(conn, name, target.collection, split.key,
+  forward.timeout = NULL, async = NULL, raw = FALSE, callopts = list())
 }
 \arguments{
-\item{name}{(character) Required. The name of the source collection from which
-documents will be split}
+\item{conn}{A solrium connection object, see \link{SolrClient}}
+
+\item{name}{(character) The name of the core to be created. Required}
 
 \item{target.collection}{(character) Required. The name of the target collection
 to which documents will be migrated}
@@ -26,29 +27,34 @@ asynchronously}
 
 \item{raw}{(logical) If \code{TRUE}, returns raw data}
 
-\item{...}{curl options passed on to \code{\link[httr]{GET}}}
+\item{callopts}{curl options passed on to \link[crul:HttpClient]{crul::HttpClient}}
 }
 \description{
 Migrate documents to another collection
 }
 \examples{
 \dontrun{
-solr_connect()
+(conn <- SolrClient$new())
 
 # create collection
-collection_create(name = "migrate_from") # bin/solr create -c migrate_from
+if (!conn$collection_exists("migrate_from")) {
+  conn$collection_create(name = "migrate_from")
+  # OR: bin/solr create -c migrate_from
+}
 
 # create another collection
-collection_create(name = "migrate_to") # bin/solr create -c migrate_to
+if (!conn$collection_exists("migrate_to")) {
+  conn$collection_create(name = "migrate_to")
+  # OR bin/solr create -c migrate_to
+}
 
 # add some documents
-file <- system.file("examples", "books.csv", package = "solr")
+file <- system.file("examples", "books.csv", package = "solrium")
 x <- read.csv(file, stringsAsFactors = FALSE)
-add(x, "migrate_from")
+conn$add(x, "migrate_from")
 
 # migrate some documents from one collection to the other
 ## FIXME - not sure if this is actually working....
-collection_migrate("migrate_from", "migrate_to", split.key = "05535")
+# conn$collection_migrate("migrate_from", "migrate_to", split.key = "05535")
 }
 }
-
diff --git a/man/collection_overseerstatus.Rd b/man/collection_overseerstatus.Rd
index 6aff27b..7d22710 100644
--- a/man/collection_overseerstatus.Rd
+++ b/man/collection_overseerstatus.Rd
@@ -4,22 +4,28 @@
 \alias{collection_overseerstatus}
 \title{Get overseer status}
 \usage{
-collection_overseerstatus(raw = FALSE, ...)
+collection_overseerstatus(conn, raw = FALSE, ...)
 }
 \arguments{
+\item{conn}{A solrium connection object, see \link{SolrClient}}
+
 \item{raw}{(logical) If \code{TRUE}, returns raw data}
 
-\item{...}{curl options passed on to \code{\link[httr]{GET}}}
+\item{...}{You can pass in parameters like \code{property.name=value}    to set
+core property name to value. See the section Defining core.properties for
+details on supported properties and values.
+(https://lucene.apache.org/solr/guide/7_0/defining-core-properties.html)}
 }
 \description{
-Returns the current status of the overseer, performance statistics
-of various overseer APIs as well as last 10 failures per operation type.
+Returns the current status of the overseer, performance
+statistics of various overseer APIs as well as last 10 failures per
+operation type.
 }
 \examples{
 \dontrun{
-solr_connect()
-collection_overseerstatus()
-res <- collection_overseerstatus()
+(conn <- SolrClient$new())
+conn$collection_overseerstatus()
+res <- conn$collection_overseerstatus()
 res$responseHeader
 res$leader
 res$overseer_queue_size
@@ -31,4 +37,3 @@ res$overseer_internal_queue
 res$collection_queue
 }
 }
-
diff --git a/man/collection_rebalanceleaders.Rd b/man/collection_rebalanceleaders.Rd
index 7eefdfd..bf43e1d 100644
--- a/man/collection_rebalanceleaders.Rd
+++ b/man/collection_rebalanceleaders.Rd
@@ -4,26 +4,32 @@
 \alias{collection_rebalanceleaders}
 \title{Rebalance leaders}
 \usage{
-collection_rebalanceleaders(name, maxAtOnce = NULL, maxWaitSeconds = NULL,
-  raw = FALSE, ...)
+collection_rebalanceleaders(conn, name, maxAtOnce = NULL,
+  maxWaitSeconds = NULL, raw = FALSE, ...)
 }
 \arguments{
-\item{name}{(character) Required. The name of the collection rebalance preferredLeaders on.}
+\item{conn}{A solrium connection object, see \link{SolrClient}}
 
-\item{maxAtOnce}{(integer) The maximum number of reassignments to have queue up at once.
-Values <=0 are use the default value Integer.MAX_VALUE. When this number is reached, the
-process waits for one or more leaders to be successfully assigned before adding more
-to the queue.}
+\item{name}{(character) The name of the core to be created. Required}
 
-\item{maxWaitSeconds}{(integer) Timeout value when waiting for leaders to be reassigned.
-NOTE: if maxAtOnce is less than the number of reassignments that will take place,
-this is the maximum interval that any single wait for at least one reassignment.
-For example, if 10 reassignments are to take place and maxAtOnce is 1 and maxWaitSeconds
-is 60, the upper bound on the time that the command may wait is 10 minutes. Default: 60}
+\item{maxAtOnce}{(integer) The maximum number of reassignments to have queue
+up at once. Values <=0 are use the default value Integer.MAX_VALUE. When
+this number is reached, the process waits for one or more leaders to be
+successfully assigned before adding more to the queue.}
+
+\item{maxWaitSeconds}{(integer) Timeout value when waiting for leaders to
+be reassigned. NOTE: if maxAtOnce is less than the number of reassignments
+that will take place, this is the maximum interval that any single wait for
+at least one reassignment. For example, if 10 reassignments are to take
+place and maxAtOnce is 1 and maxWaitSeconds is 60, the upper bound on the
+time that the command may wait is 10 minutes. Default: 60}
 
 \item{raw}{(logical) If \code{TRUE}, returns raw data}
 
-\item{...}{curl options passed on to \code{\link[httr]{GET}}}
+\item{...}{You can pass in parameters like \code{property.name=value}    to set
+core property name to value. See the section Defining core.properties for
+details on supported properties and values.
+(https://lucene.apache.org/solr/guide/7_0/defining-core-properties.html)}
 }
 \description{
 Reassign leaders in a collection according to the preferredLeader
@@ -31,19 +37,21 @@ property across active nodes
 }
 \examples{
 \dontrun{
-solr_connect()
+(conn <- SolrClient$new())
 
 # create collection
-collection_create(name = "mycollection2") # bin/solr create -c mycollection2
+if (!conn$collection_exists("mycollection2")) {
+  conn$collection_create(name = "mycollection2")
+  # OR: bin/solr create -c mycollection2
+}
 
 # balance preferredLeader property
-collection_balanceshardunique("mycollection2", property = "preferredLeader")
+conn$collection_balanceshardunique("mycollection2", property = "preferredLeader")
 
 # balance preferredLeader property
-collection_rebalanceleaders("mycollection2")
+conn$collection_rebalanceleaders("mycollection2")
 
 # examine cluster status
-collection_clusterstatus()$cluster$collections$mycollection2
+conn$collection_clusterstatus()$cluster$collections$mycollection2
 }
 }
-
diff --git a/man/collection_reload.Rd b/man/collection_reload.Rd
index c624173..866ed9f 100644
--- a/man/collection_reload.Rd
+++ b/man/collection_reload.Rd
@@ -4,23 +4,28 @@
 \alias{collection_reload}
 \title{Reload a collection}
 \usage{
-collection_reload(name, raw = FALSE, ...)
+collection_reload(conn, name, raw = FALSE, callopts)
 }
 \arguments{
-\item{name}{The name of the collection to reload. Required}
+\item{conn}{A solrium connection object, see \link{SolrClient}}
+
+\item{name}{(character) The name of the core to be created. Required}
 
 \item{raw}{(logical) If \code{TRUE}, returns raw data}
 
-\item{...}{curl options passed on to \code{\link[httr]{GET}}}
+\item{callopts}{curl options passed on to \link[crul:HttpClient]{crul::HttpClient}}
 }
 \description{
 Reload a collection
 }
 \examples{
 \dontrun{
-solr_connect()
-collection_create(name = "helloWorld")
-collection_reload(name = "helloWorld")
-}
+(conn <- SolrClient$new())
+
+if (!conn$collection_exists("helloWorld")) {
+  conn$collection_create(name = "helloWorld")
 }
 
+conn$collection_reload(name = "helloWorld")
+}
+}
diff --git a/man/collection_removerole.Rd b/man/collection_removerole.Rd
index 3917d21..968948a 100644
--- a/man/collection_removerole.Rd
+++ b/man/collection_removerole.Rd
@@ -4,17 +4,22 @@
 \alias{collection_removerole}
 \title{Remove a role from a node}
 \usage{
-collection_removerole(role = "overseer", node, raw = FALSE, ...)
+collection_removerole(conn, role = "overseer", node, raw = FALSE, ...)
 }
 \arguments{
-\item{role}{(character) Required. The name of the role. The only supported role
-as of now is overseer (set as default).}
+\item{conn}{A solrium connection object, see \link{SolrClient}}
+
+\item{role}{(character) Required. The name of the role. The only supported
+role as of now is overseer (set as default).}
 
 \item{node}{(character) Required. The name of the node.}
 
 \item{raw}{(logical) If \code{TRUE}, returns raw data}
 
-\item{...}{curl options passed on to \code{\link[httr]{GET}}}
+\item{...}{You can pass in parameters like \code{property.name=value}    to set
+core property name to value. See the section Defining core.properties for
+details on supported properties and values.
+(https://lucene.apache.org/solr/guide/7_0/defining-core-properties.html)}
 }
 \description{
 Remove an assigned role. This API is used to undo the roles
@@ -22,12 +27,11 @@ assigned using \code{\link{collection_addrole}}
 }
 \examples{
 \dontrun{
-solr_connect()
+(conn <- SolrClient$new())
 
 # get list of nodes
-nodes <- collection_clusterstatus()$cluster$live_nodes
-collection_addrole(node = nodes[1])
-collection_removerole(node = nodes[1])
+nodes <- conn$collection_clusterstatus()$cluster$live_nodes
+conn$collection_addrole(node = nodes[1])
+conn$collection_removerole(node = nodes[1])
 }
 }
-
diff --git a/man/collection_requeststatus.Rd b/man/collection_requeststatus.Rd
index 94e46c6..8bc7327 100644
--- a/man/collection_requeststatus.Rd
+++ b/man/collection_requeststatus.Rd
@@ -4,33 +4,24 @@
 \alias{collection_requeststatus}
 \title{Get request status}
 \usage{
-collection_requeststatus(requestid, raw = FALSE, ...)
+collection_requeststatus(conn, requestid, raw = FALSE, ...)
 }
 \arguments{
-\item{requestid}{(character) Required. The user defined request-id for the request.
-This can be used to track the status of the submitted asynchronous task. \code{-1}
-is a special request id which is used to cleanup the stored states for all of the
-already completed/failed tasks.}
+\item{conn}{A solrium connection object, see \link{SolrClient}}
+
+\item{requestid}{(character) Required. The user defined request-id for the
+request. This can be used to track the status of the submitted asynchronous
+task. \code{-1} is a special request id which is used to cleanup the stored
+states for all of the already completed/failed tasks.}
 
 \item{raw}{(logical) If \code{TRUE}, returns raw data}
 
-\item{...}{curl options passed on to \code{\link[httr]{GET}}}
+\item{...}{You can pass in parameters like \code{property.name=value}    to set
+core property name to value. See the section Defining core.properties for
+details on supported properties and values.
+(https://lucene.apache.org/solr/guide/7_0/defining-core-properties.html)}
 }
 \description{
-Request the status of an already submitted Asynchronous Collection
-API call. This call is also used to clear up the stored statuses.
-}
-\examples{
-\dontrun{
-solr_connect()
-
-# invalid requestid
-collection_requeststatus(requestid = "xxx")
-
-# valid requestid
-collection_requeststatus(requestid = "xxx")
-res$responseHeader
-res$xxx
-}
+Request the status of an already submitted Asynchronous
+Collection API call. This call is also used to clear up the stored statuses.
 }
-
diff --git a/man/collection_splitshard.Rd b/man/collection_splitshard.Rd
index e761487..a6a0ed3 100644
--- a/man/collection_splitshard.Rd
+++ b/man/collection_splitshard.Rd
@@ -4,41 +4,47 @@
 \alias{collection_splitshard}
 \title{Create a shard}
 \usage{
-collection_splitshard(name, shard, ranges = NULL, split.key = NULL,
-  async = NULL, raw = FALSE, ...)
+collection_splitshard(conn, name, shard, ranges = NULL, split.key = NULL,
+  async = NULL, raw = FALSE, callopts = list())
 }
 \arguments{
-\item{name}{(character) Required. The name of the collection that includes the shard
-to be split}
+\item{conn}{A solrium connection object, see \link{SolrClient}}
+
+\item{name}{(character) The name of the core to be created. Required}
 
 \item{shard}{(character) Required. The name of the shard to be split}
 
-\item{ranges}{(character) A comma-separated list of hash ranges in hexadecimal
-e.g. ranges=0-1f4,1f5-3e8,3e9-5dc}
+\item{ranges}{(character) A comma-separated list of hash ranges in
+hexadecimal e.g. ranges=0-1f4,1f5-3e8,3e9-5dc}
 
 \item{split.key}{(character) The key to use for splitting the index}
 
-\item{async}{(character) Request ID to track this action which will be processed
-asynchronously}
+\item{async}{(character) Request ID to track this action which will be
+processed asynchronously}
 
 \item{raw}{(logical) If \code{TRUE}, returns raw data}
 
-\item{...}{curl options passed on to \code{\link[httr]{GET}}}
+\item{callopts}{curl options passed on to \link[crul:HttpClient]{crul::HttpClient}}
 }
 \description{
 Create a shard
 }
 \examples{
 \dontrun{
-solr_connect()
+(conn <- SolrClient$new())
+
 # create collection
-collection_create(name = "trees")
+if (!conn$collection_exists("trees")) {
+  conn$collection_create("trees")
+}
+
 # find shard names
-names(collection_clusterstatus()$cluster$collections$trees$shards)
+names(conn$collection_clusterstatus()$cluster$collections$trees$shards)
+
 # split a shard by name
-collection_splitshard(name = "trees", shard = "shard1")
+conn$collection_splitshard(name = "trees", shard = "shard1")
+
 # now we have three shards
-names(collection_clusterstatus()$cluster$collections$trees$shards)
+names(conn$collection_clusterstatus()$cluster$collections$trees$shards)
 }
 }
-
diff --git a/man/collections.Rd b/man/collections.Rd
index fcc8135..b3983c4 100644
--- a/man/collections.Rd
+++ b/man/collections.Rd
@@ -5,37 +5,36 @@
 \alias{cores}
 \title{List collections or cores}
 \usage{
-collections(...)
+collections(conn, ...)
 
-cores(...)
+cores(conn, ...)
 }
 \arguments{
-\item{...}{curl options passed on to \code{\link[httr]{GET}}}
+\item{conn}{A solrium connection object, see \link{SolrClient}}
+
+\item{...}{curl options passed on to \link[crul:HttpClient]{crul::HttpClient}}
 }
 \value{
-A character vector
+character vector
 }
 \description{
 List collections or cores
 }
 \details{
-Calls \code{\link{collection_list}} or \code{\link{core_status}} internally, 
+Calls \code{\link[=collection_list]{collection_list()}} or \code{\link[=core_status]{core_status()}} internally,
 and parses out names for you.
 }
 \examples{
 \dontrun{
 # connect
-solr_connect(verbose = FALSE)
+(conn <- SolrClient$new())
 
 # list collections
-collections()
+conn$collection_list()
+collections(conn)
 
 # list cores
-cores()
-
-# curl options
-library("httr")
-collections(config = verbose())
+conn$core_status()
+cores(conn)
 }
 }
-
diff --git a/man/commit.Rd b/man/commit.Rd
index f7f619d..c4a9774 100644
--- a/man/commit.Rd
+++ b/man/commit.Rd
@@ -4,44 +4,47 @@
 \alias{commit}
 \title{Commit}
 \usage{
-commit(name, expunge_deletes = FALSE, wait_searcher = TRUE,
+commit(conn, name, expunge_deletes = FALSE, wait_searcher = TRUE,
   soft_commit = FALSE, wt = "json", raw = FALSE, ...)
 }
 \arguments{
+\item{conn}{A solrium connection object, see \link{SolrClient}}
+
 \item{name}{(character) A collection or core name. Required.}
 
 \item{expunge_deletes}{merge segments with deletes away. Default: \code{FALSE}}
 
-\item{wait_searcher}{block until a new searcher is opened and registered as the
-main query searcher, making the changes visible. Default: \code{TRUE}}
+\item{wait_searcher}{block until a new searcher is opened and registered as
+the main query searcher, making the changes visible. Default: \code{TRUE}}
 
-\item{soft_commit}{perform a soft commit - this will refresh the 'view' of the
-index in a more performant manner, but without "on-disk" guarantees.
+\item{soft_commit}{perform a soft commit - this will refresh the 'view' of
+the index in a more performant manner, but without "on-disk" guarantees.
 Default: \code{FALSE}}
 
 \item{wt}{(character) One of json (default) or xml. If json, uses
-\code{\link[jsonlite]{fromJSON}} to parse. If xml, uses \code{\link[xml2]{read_xml}} to
-parse}
+\code{\link[jsonlite:fromJSON]{jsonlite::fromJSON()}} to parse. If xml, uses \code{\link[xml2:read_xml]{xml2::read_xml()}} to parse}
 
 \item{raw}{(logical) If \code{TRUE}, returns raw data in format specified by
 \code{wt} param}
 
-\item{...}{curl options passed on to \code{\link[httr]{GET}}}
+\item{...}{curl options passed on to \link[crul:HttpClient]{crul::HttpClient}}
 }
 \description{
 Commit
 }
 \examples{
 \dontrun{
-solr_connect()
+(conn <- SolrClient$new())
 
-commit("gettingstarted")
-commit("gettingstarted", wait_searcher = FALSE)
+conn$commit("gettingstarted")
+conn$commit("gettingstarted", wait_searcher = FALSE)
 
 # get xml back
-commit("gettingstarted", wt = "xml")
+conn$commit("gettingstarted", wt = "xml")
 ## raw xml
-commit("gettingstarted", wt = "xml", raw = TRUE)
+conn$commit("gettingstarted", wt = "xml", raw = TRUE)
 }
 }
-
+\references{
+<>
+}
diff --git a/man/config_get.Rd b/man/config_get.Rd
index 27ce7f6..d0378c7 100644
--- a/man/config_get.Rd
+++ b/man/config_get.Rd
@@ -4,9 +4,11 @@
 \alias{config_get}
 \title{Get Solr configuration details}
 \usage{
-config_get(name, what = NULL, wt = "json", raw = FALSE, ...)
+config_get(conn, name, what = NULL, wt = "json", raw = FALSE, ...)
 }
 \arguments{
+\item{conn}{A solrium connection object, see \link{SolrClient}}
+
 \item{name}{(character) The name of the core. If not given, all cores.}
 
 \item{what}{(character) What you want to look at. One of solrconfig or
@@ -19,7 +21,7 @@ If json, uses \code{\link[jsonlite]{fromJSON}} to parse. If xml, uses
 \item{raw}{(logical) If \code{TRUE}, returns raw data in format specified by
 \code{wt}}
 
-\item{...}{curl options passed on to \code{\link[httr]{GET}}}
+\item{...}{curl options passed on to \link[crul:HttpClient]{crul::HttpClient}}
 }
 \value{
 A list, \code{xml_document}, or character
@@ -38,30 +40,29 @@ you get all the data when \code{raw=TRUE}.
 # where <corename> is the name for your core - or creaate as below
 
 # connect
-solr_connect()
+(conn <- SolrClient$new())
 
 # all config settings
-config_get("gettingstarted")
+conn$config_get("gettingstarted")
 
 # just znodeVersion
-config_get("gettingstarted", "znodeVersion")
+conn$config_get("gettingstarted", "znodeVersion")
 
 # just znodeVersion
-config_get("gettingstarted", "luceneMatchVersion")
+conn$config_get("gettingstarted", "luceneMatchVersion")
 
 # just updateHandler
-config_get("gettingstarted", "updateHandler")
+conn$config_get("gettingstarted", "updateHandler")
 
 # just updateHandler
-config_get("gettingstarted", "requestHandler")
+conn$config_get("gettingstarted", "requestHandler")
 
 ## Get XML
-config_get("gettingstarted", wt = "xml")
-config_get("gettingstarted", "updateHandler", wt = "xml")
-config_get("gettingstarted", "requestHandler", wt = "xml")
+conn$config_get("gettingstarted", wt = "xml")
+conn$config_get("gettingstarted", "updateHandler", wt = "xml")
+conn$config_get("gettingstarted", "requestHandler", wt = "xml")
 
 ## Raw data - what param ignored when raw=TRUE
-config_get("gettingstarted", raw = TRUE)
+conn$config_get("gettingstarted", raw = TRUE)
 }
 }
-
diff --git a/man/config_overlay.Rd b/man/config_overlay.Rd
index f98efc1..2d6f019 100644
--- a/man/config_overlay.Rd
+++ b/man/config_overlay.Rd
@@ -4,14 +4,16 @@
 \alias{config_overlay}
 \title{Get Solr configuration overlay}
 \usage{
-config_overlay(name, omitHeader = FALSE, ...)
+config_overlay(conn, name, omitHeader = FALSE, ...)
 }
 \arguments{
+\item{conn}{A solrium connection object, see \link{SolrClient}}
+
 \item{name}{(character) The name of the core. If not given, all cores.}
 
 \item{omitHeader}{(logical) If \code{TRUE}, omit header. Default: \code{FALSE}}
 
-\item{...}{curl options passed on to \code{\link[httr]{GET}}}
+\item{...}{curl options passed on to \link[crul:HttpClient]{crul::HttpClient}}
 }
 \value{
 A list with response from server
@@ -26,13 +28,12 @@ Get Solr configuration overlay
 # where <corename> is the name for your core - or creaate as below
 
 # connect
-solr_connect()
+(conn <- SolrClient$new())
 
 # get config overlay
-config_overlay("gettingstarted")
+conn$config_overlay("gettingstarted")
 
 # without header
-config_overlay("gettingstarted", omitHeader = TRUE)
+conn$config_overlay("gettingstarted", omitHeader = TRUE)
 }
 }
-
diff --git a/man/config_params.Rd b/man/config_params.Rd
index b63e481..5f1c1c5 100644
--- a/man/config_params.Rd
+++ b/man/config_params.Rd
@@ -4,25 +4,27 @@
 \alias{config_params}
 \title{Set Solr configuration params}
 \usage{
-config_params(name, param = NULL, set = NULL, unset = NULL,
+config_params(conn, name, param = NULL, set = NULL, unset = NULL,
   update = NULL, ...)
 }
 \arguments{
+\item{conn}{A solrium connection object, see \link{SolrClient}}
+
 \item{name}{(character) The name of the core. If not given, all cores.}
 
 \item{param}{(character) Name of a parameter}
 
-\item{set}{(list) List of key:value pairs of what to set. Create or overwrite 
+\item{set}{(list) List of key:value pairs of what to set. Create or overwrite
 a parameter set map. Default: NULL (nothing passed)}
 
-\item{unset}{(list) One or more character strings of keys to unset. Default: NULL 
+\item{unset}{(list) One or more character strings of keys to unset. Default: NULL
 (nothing passed)}
 
-\item{update}{(list) List of key:value pairs of what to update. Updates a parameter 
-set map. This essentially overwrites the old parameter set, so all parameters must 
+\item{update}{(list) List of key:value pairs of what to update. Updates a parameter
+set map. This essentially overwrites the old parameter set, so all parameters must
 be sent in each update request.}
 
-\item{...}{curl options passed on to \code{\link[httr]{GET}}}
+\item{...}{curl options passed on to \link[crul:HttpClient]{crul::HttpClient}}
 }
 \value{
 A list with response from server
@@ -31,31 +33,27 @@ A list with response from server
 Set Solr configuration params
 }
 \details{
-The Request Parameters API allows creating parameter sets that can 
-override or take the place of parameters defined in solrconfig.xml. It is 
-really another endpoint of the Config API instead of a separate API, and 
-has distinct commands. It does not replace or modify any sections of 
-solrconfig.xml, but instead provides another approach to handling parameters 
-used in requests. It behaves in the same way as the Config API, by storing 
-parameters in another file that will be used at runtime. In this case, 
-the parameters are stored in a file named params.json. This file is kept in 
+The Request Parameters API allows creating parameter sets that can
+override or take the place of parameters defined in solrconfig.xml. It is
+really another endpoint of the Config API instead of a separate API, and
+has distinct commands. It does not replace or modify any sections of
+solrconfig.xml, but instead provides another approach to handling parameters
+used in requests. It behaves in the same way as the Config API, by storing
+parameters in another file that will be used at runtime. In this case,
+the parameters are stored in a file named params.json. This file is kept in
 ZooKeeper or in the conf directory of a standalone Solr instance.
 }
 \examples{
 \dontrun{
 # start Solr in standard or Cloud mode
 # connect
-solr_connect()
+(conn <- SolrClient$new())
 
 # set a parameter set
 myFacets <- list(myFacets = list(facet = TRUE, facet.limit = 5))
-config_params("gettingstarted", set = myFacets)
+config_params(conn, "gettingstarted", set = myFacets)
 
 # check a parameter
-config_params("gettingstarted", param = "myFacets")
-
-# see all params
-config_params("gettingstarted")
+config_params(conn, "gettingstarted", param = "myFacets")
 }
 }
-
diff --git a/man/config_set.Rd b/man/config_set.Rd
index e22881d..ae4da20 100644
--- a/man/config_set.Rd
+++ b/man/config_set.Rd
@@ -4,18 +4,20 @@
 \alias{config_set}
 \title{Set Solr configuration details}
 \usage{
-config_set(name, set = NULL, unset = NULL, ...)
+config_set(conn, name, set = NULL, unset = NULL, ...)
 }
 \arguments{
+\item{conn}{A solrium connection object, see \link{SolrClient}}
+
 \item{name}{(character) The name of the core. If not given, all cores.}
 
-\item{set}{(list) List of key:value pairs of what to set. Default: NULL 
+\item{set}{(list) List of key:value pairs of what to set. Default: NULL
 (nothing passed)}
 
-\item{unset}{(list) One or more character strings of keys to unset. Default: NULL 
+\item{unset}{(list) One or more character strings of keys to unset. Default: NULL
 (nothing passed)}
 
-\item{...}{curl options passed on to \code{\link[httr]{GET}}}
+\item{...}{curl options passed on to \link[crul:HttpClient]{crul::HttpClient}}
 }
 \value{
 A list with response from server
@@ -30,23 +32,24 @@ Set Solr configuration details
 # where <corename> is the name for your core - or creaate as below
 
 # connect
-solr_connect()
+(conn <- SolrClient$new())
 
 # set a property
-config_set("gettingstarted", set = list(query.filterCache.autowarmCount = 1000))
+conn$config_set("gettingstarted", 
+  set = list(query.filterCache.autowarmCount = 1000))
 
 # unset a property
-config_set("gettingstarted", unset = "query.filterCache.size", config = verbose())
+conn$config_set("gettingstarted", unset = "query.filterCache.size", 
+  verbose = TRUE)
 
 # both set a property and unset a property
-config_set("gettingstarted", unset = "enableLazyFieldLoading")
+conn$config_set("gettingstarted", unset = "enableLazyFieldLoading")
 
 # many properties
-config_set("gettingstarted", set = list(
+conn$config_set("gettingstarted", set = list(
    query.filterCache.autowarmCount = 1000,
    query.commitWithin.softCommit = 'false'
  )
 )
 }
 }
-
diff --git a/man/core_create.Rd b/man/core_create.Rd
index 9b364d9..fcb45a4 100644
--- a/man/core_create.Rd
+++ b/man/core_create.Rd
@@ -4,11 +4,13 @@
 \alias{core_create}
 \title{Create a core}
 \usage{
-core_create(name, instanceDir = NULL, config = NULL, schema = NULL,
+core_create(conn, name, instanceDir = NULL, config = NULL, schema = NULL,
   dataDir = NULL, configSet = NULL, collection = NULL, shard = NULL,
   async = NULL, raw = FALSE, callopts = list(), ...)
 }
 \arguments{
+\item{conn}{A solrium connection object, see \link{SolrClient}}
+
 \item{name}{(character) The name of the core to be created. Required}
 
 \item{instanceDir}{(character) Path to instance directory}
@@ -17,50 +19,54 @@ core_create(name, instanceDir = NULL, config = NULL, schema = NULL,
 
 \item{schema}{(character) Path to schema file}
 
-\item{dataDir}{(character) Name of the data directory relative to instanceDir.}
+\item{dataDir}{(character) Name of the data directory relative to
+instanceDir.}
 
-\item{configSet}{(character) Name of the configset to use for this core. For more
-information, see https://cwiki.apache.org/confluence/display/solr/Config+Sets}
+\item{configSet}{(character) Name of the configset to use for this core.
+For more information, see
+https://lucene.apache.org/solr/guide/6_6/config-sets.html}
 
-\item{collection}{(character) The name of the collection to which this core belongs.
-The default is the name of the core. collection.<param>=<val ue> causes a property of
-<param>=<value> to be set if a new collection is being created. Use collection.configNa
-me=<configname> to point to the configuration for a new collection.}
+\item{collection}{(character) The name of the collection to which this core
+belongs. The default is the name of the core. collection.<param>=<val ue>
+causes a property of <param>=<value> to be set if a new collection is being
+created. Use collection.configNa me=<configname> to point to the
+configuration for a new collection.}
 
-\item{shard}{(character) The shard id this core represents. Normally you want to be
-auto-assigned a shard id.}
+\item{shard}{(character) The shard id this core represents. Normally you
+want to be auto-assigned a shard id.}
 
 \item{async}{(character) Request ID to track this action which will be
 processed asynchronously}
 
 \item{raw}{(logical) If \code{TRUE}, returns raw data}
 
-\item{callopts}{curl options passed on to \code{\link[httr]{GET}}}
+\item{callopts}{curl options passed on to \link[crul:HttpClient]{crul::HttpClient}}
 
 \item{...}{You can pass in parameters like \code{property.name=value}    to set
-core property name to value. See the section Defining core.properties for details on
-supported properties and values.
-(https://cwiki.apache.org/confluence/display/solr/Defining+core.properties)}
+core property name to value. See the section Defining core.properties for
+details on supported properties and values.
+(https://lucene.apache.org/solr/guide/6_6/defining-core-properties.html)}
 }
 \description{
 Create a core
 }
 \examples{
 \dontrun{
-# start Solr with Schemaless mode via the schemaless eg: bin/solr start -e schemaless
+# start Solr with Schemaless mode via the schemaless eg:
+#   bin/solr start -e schemaless
 # you can create a new core like: bin/solr create -c corename
 # where <corename> is the name for your core - or create as below
 
 # connect
-solr_connect()
+(conn <- SolrClient$new())
 
 # Create a core
-path <- "~/solr-5.4.1/server/solr/newcore/conf"
+path <- "~/solr-7.0.0/server/solr/newcore/conf"
 dir.create(path, recursive = TRUE)
-files <- list.files("~/solr-5.4.1/server/solr/configsets/data_driven_schema_configs/conf/",
+files <- list.files("~/solr-7.0.0/server/solr/configsets/sample_techproducts_configs/conf/",
 full.names = TRUE)
-file.copy(files, path, recursive = TRUE)
-core_create(name = "newcore", instanceDir = "newcore", configSet = "basic_configs")
+invisible(file.copy(files, path, recursive = TRUE))
+conn$core_create(name = "newcore", instanceDir = "newcore",
+  configSet = "sample_techproducts_configs")
 }
 }
-
diff --git a/man/core_exists.Rd b/man/core_exists.Rd
index 0157624..099a818 100644
--- a/man/core_exists.Rd
+++ b/man/core_exists.Rd
@@ -4,12 +4,14 @@
 \alias{core_exists}
 \title{Check if a core exists}
 \usage{
-core_exists(name, callopts = list())
+core_exists(conn, name, callopts = list())
 }
 \arguments{
-\item{name}{(character) The name of the core. If not given, all cores.}
+\item{conn}{A solrium connection object, see \link{SolrClient}}
 
-\item{callopts}{curl options passed on to \code{\link[httr]{GET}}}
+\item{name}{(character) The name of the core to be created. Required}
+
+\item{callopts}{curl options passed on to \link[crul:HttpClient]{crul::HttpClient}}
 }
 \value{
 A single boolean, \code{TRUE} or \code{FALSE}
@@ -18,22 +20,22 @@ A single boolean, \code{TRUE} or \code{FALSE}
 Check if a core exists
 }
 \details{
-Simply calls \code{\link{core_status}} internally
+Simply calls \code{\link[=core_status]{core_status()}} internally
 }
 \examples{
 \dontrun{
-# start Solr with Schemaless mode via the schemaless eg: bin/solr start -e schemaless
+# start Solr with Schemaless mode via the schemaless eg:
+#   bin/solr start -e schemaless
 # you can create a new core like: bin/solr create -c corename
-# where <corename> is the name for your core - or creaate as below
+# where <corename> is the name for your core - or create as below
 
 # connect
-solr_connect()
+(conn <- SolrClient$new())
 
 # exists
-core_exists("gettingstarted")
+conn$core_exists("gettingstarted")
 
 # doesn't exist
-core_exists("hhhhhh")
+conn$core_exists("hhhhhh")
 }
 }
-
diff --git a/man/core_mergeindexes.Rd b/man/core_mergeindexes.Rd
index ab7dd6d..db1af1f 100644
--- a/man/core_mergeindexes.Rd
+++ b/man/core_mergeindexes.Rd
@@ -4,22 +4,24 @@
 \alias{core_mergeindexes}
 \title{Merge indexes (cores)}
 \usage{
-core_mergeindexes(name, indexDir = NULL, srcCore = NULL, async = NULL,
-  raw = FALSE, callopts = list())
+core_mergeindexes(conn, name, indexDir = NULL, srcCore = NULL,
+  async = NULL, raw = FALSE, callopts = list())
 }
 \arguments{
-\item{name}{The name of the target core/index. Required}
+\item{conn}{A solrium connection object, see \link{SolrClient}}
+
+\item{name}{(character) The name of the core to be created. Required}
 
 \item{indexDir}{(character)    Multi-valued, directories that would be merged.}
 
 \item{srcCore}{(character)    Multi-valued, source cores that would be merged.}
 
-\item{async}{(character) Request ID to track this action which will be processed
-asynchronously}
+\item{async}{(character) Request ID to track this action which will be
+processed asynchronously}
 
 \item{raw}{(logical) If \code{TRUE}, returns raw data}
 
-\item{callopts}{curl options passed on to \code{\link[httr]{GET}}}
+\item{callopts}{curl options passed on to \link[crul:HttpClient]{crul::HttpClient}}
 }
 \description{
 Merges one or more indexes to another index. The indexes must
@@ -30,19 +32,20 @@ more indexes that will be merged to it.
 }
 \examples{
 \dontrun{
-# start Solr with Schemaless mode via the schemaless eg: bin/solr start -e schemaless
+# start Solr with Schemaless mode via the schemaless eg:
+#  bin/solr start -e schemaless
 
 # connect
-solr_connect()
+(conn <- SolrClient$new())
 
 ## FIXME: not tested yet
 
 # use indexDir parameter
-core_mergeindexes(core="new_core_name", indexDir = c("/solr_home/core1/data/index",
+conn$core_mergeindexes(core="new_core_name",
+   indexDir = c("/solr_home/core1/data/index",
    "/solr_home/core2/data/index"))
 
 # use srcCore parameter
-core_mergeindexes(name = "new_core_name", srcCore = c('core1', 'core2'))
+conn$core_mergeindexes(name = "new_core_name", srcCore = c('core1', 'core2'))
 }
 }
-
diff --git a/man/core_reload.Rd b/man/core_reload.Rd
index 8f40784..f88adc4 100644
--- a/man/core_reload.Rd
+++ b/man/core_reload.Rd
@@ -4,30 +4,32 @@
 \alias{core_reload}
 \title{Reload a core}
 \usage{
-core_reload(name, raw = FALSE, callopts = list())
+core_reload(conn, name, raw = FALSE, callopts = list())
 }
 \arguments{
-\item{name}{(character) The name of the core. Required}
+\item{conn}{A solrium connection object, see \link{SolrClient}}
+
+\item{name}{(character) The name of the core to be created. Required}
 
 \item{raw}{(logical) If \code{TRUE}, returns raw data}
 
-\item{callopts}{curl options passed on to \code{\link[httr]{GET}}}
+\item{callopts}{curl options passed on to \link[crul:HttpClient]{crul::HttpClient}}
 }
 \description{
 Reload a core
 }
 \examples{
 \dontrun{
-# start Solr with Schemaless mode via the schemaless eg: bin/solr start -e schemaless
+# start Solr with Schemaless mode via the schemaless eg:
+#  bin/solr start -e schemaless
 # you can create a new core like: bin/solr create -c corename
 # where <corename> is the name for your core - or creaate as below
 
 # connect
-solr_connect()
+(conn <- SolrClient$new())
 
 # Status of particular cores
-core_reload("gettingstarted")
-core_status("gettingstarted")
+conn$core_reload("gettingstarted")
+conn$core_status("gettingstarted")
 }
 }
-
diff --git a/man/core_rename.Rd b/man/core_rename.Rd
index ca6d036..3517698 100644
--- a/man/core_rename.Rd
+++ b/man/core_rename.Rd
@@ -4,37 +4,52 @@
 \alias{core_rename}
 \title{Rename a core}
 \usage{
-core_rename(name, other, async = NULL, raw = FALSE, callopts = list())
+core_rename(conn, name, other, async = NULL, raw = FALSE,
+  callopts = list())
 }
 \arguments{
-\item{name}{(character) The name of the core to be renamed. Required}
+\item{conn}{A solrium connection object, see \link{SolrClient}}
+
+\item{name}{(character) The name of the core to be created. Required}
 
 \item{other}{(character) The new name of the core. Required.}
 
-\item{async}{(character) Request ID to track this action which will be processed
-asynchronously}
+\item{async}{(character) Request ID to track this action which will be
+processed asynchronously}
 
 \item{raw}{(logical) If \code{TRUE}, returns raw data}
 
-\item{callopts}{curl options passed on to \code{\link[httr]{GET}}}
+\item{callopts}{curl options passed on to \link[crul:HttpClient]{crul::HttpClient}}
 }
 \description{
 Rename a core
 }
 \examples{
 \dontrun{
-# start Solr with Schemaless mode via the schemaless eg: bin/solr start -e schemaless
+# start Solr with Schemaless mode via the schemaless eg:
+#   bin/solr start -e schemaless
 # you can create a new core like: bin/solr create -c corename
 # where <corename> is the name for your core - or creaate as below
 
 # connect
-solr_connect()
+(conn <- SolrClient$new())
 
 # Status of particular cores
-core_create("testcore") # or create in the CLI: bin/solr create -c testcore
-core_rename("testcore", "newtestcore")
-core_status("testcore") # core missing
-core_status("newtestcore", FALSE) # not missing
+path <- "~/solr-7.0.0/server/solr/testcore/conf"
+dir.create(path, recursive = TRUE)
+files <- list.files(
+"~/solr-7.0.0/server/solr/configsets/sample_techproducts_configs/conf/",
+full.names = TRUE)
+invisible(file.copy(files, path, recursive = TRUE))
+conn$core_create("testcore") # or create in CLI: bin/solr create -c testcore
+
+# rename
+conn$core_rename("testcore", "newtestcore")
+## status
+conn$core_status("testcore") # core missing
+conn$core_status("newtestcore", FALSE) # not missing
+
+# cleanup
+conn$core_unload("newtestcore")
 }
 }
-
diff --git a/man/core_requeststatus.Rd b/man/core_requeststatus.Rd
index 3f4a4ca..6bc69cb 100644
--- a/man/core_requeststatus.Rd
+++ b/man/core_requeststatus.Rd
@@ -4,25 +4,27 @@
 \alias{core_requeststatus}
 \title{Request status of asynchronous CoreAdmin API call}
 \usage{
-core_requeststatus(requestid, raw = FALSE, callopts = list())
+core_requeststatus(conn, requestid, raw = FALSE, callopts = list())
 }
 \arguments{
+\item{conn}{A solrium connection object, see \link{SolrClient}}
+
 \item{requestid}{The name of one of the cores to be removed. Required}
 
 \item{raw}{(logical) If \code{TRUE}, returns raw data}
 
-\item{callopts}{curl options passed on to \code{\link[httr]{GET}}}
+\item{callopts}{curl options passed on to \link[crul:HttpClient]{crul::HttpClient}}
 }
 \description{
 Request status of asynchronous CoreAdmin API call
 }
 \examples{
 \dontrun{
-# start Solr with Schemaless mode via the schemaless eg: bin/solr start -e schemaless
+# start Solr with Schemaless mode via the schemaless eg:
+#   bin/solr start -e schemaless
 
 # FIXME: not tested yet...
-# solr_connect()
-# core_requeststatus(requestid = 1)
+# (conn <- SolrClient$new())
+# conn$core_requeststatus(requestid = 1)
 }
 }
-
diff --git a/man/core_split.Rd b/man/core_split.Rd
index 12ac1ae..d4eddd1 100644
--- a/man/core_split.Rd
+++ b/man/core_split.Rd
@@ -4,11 +4,13 @@
 \alias{core_split}
 \title{Split a core}
 \usage{
-core_split(name, path = NULL, targetCore = NULL, ranges = NULL,
+core_split(conn, name, path = NULL, targetCore = NULL, ranges = NULL,
   split.key = NULL, async = NULL, raw = FALSE, callopts = list())
 }
 \arguments{
-\item{name}{(character) The name of one of the cores to be swapped. Required}
+\item{conn}{A solrium connection object, see \link{SolrClient}}
+
+\item{name}{(character) The name of the core to be created. Required}
 
 \item{path}{(character) Two or more target directory paths in which a piece of the
 index will be written}
@@ -27,7 +29,7 @@ asynchronously}
 
 \item{raw}{(logical) If \code{TRUE}, returns raw data}
 
-\item{callopts}{curl options passed on to \code{\link[httr]{GET}}}
+\item{callopts}{curl options passed on to \link[crul:HttpClient]{crul::HttpClient}}
 }
 \description{
 SPLIT splits an index into two or more indexes. The index being
@@ -50,35 +52,34 @@ the two should be specified, if at all required.
 # where <corename> is the name for your core - or creaate as below
 
 # connect
-solr_connect()
+(conn <- SolrClient$new())
 
 # Swap a core
 ## First, create two cores
-# core_split("splitcoretest0") # or create in the CLI: bin/solr create -c splitcoretest0
-# core_split("splitcoretest1") # or create in the CLI: bin/solr create -c splitcoretest1
-# core_split("splitcoretest2") # or create in the CLI: bin/solr create -c splitcoretest2
+# conn$core_split("splitcoretest0") # or create in the CLI: bin/solr create -c splitcoretest0
+# conn$core_split("splitcoretest1") # or create in the CLI: bin/solr create -c splitcoretest1
+# conn$core_split("splitcoretest2") # or create in the CLI: bin/solr create -c splitcoretest2
 
 ## check status
-core_status("splitcoretest0", FALSE)
-core_status("splitcoretest1", FALSE)
-core_status("splitcoretest2", FALSE)
+conn$core_status("splitcoretest0", FALSE)
+conn$core_status("splitcoretest1", FALSE)
+conn$core_status("splitcoretest2", FALSE)
 
 ## split core using targetCore parameter
-core_split("splitcoretest0", targetCore = c("splitcoretest1", "splitcoretest2"))
+conn$core_split("splitcoretest0", targetCore = c("splitcoretest1", "splitcoretest2"))
 
 ## split core using split.key parameter
 ### Here all documents having the same route key as the split.key i.e. 'A!'
 ### will be split from the core index and written to the targetCore
-core_split("splitcoretest0", targetCore = "splitcoretest1", split.key = "A!")
+conn$core_split("splitcoretest0", targetCore = "splitcoretest1", split.key = "A!")
 
 ## split core using ranges parameter
 ### Solr expects hash ranges in hexidecimal, but since we're in R,
 ### let's not make our lives any harder, so you can pass in numbers
 ### but you can still pass in hexidecimal if you want.
 rgs <- c('0-1f4', '1f5-3e8')
-core_split("splitcoretest0", targetCore = c("splitcoretest1", "splitcoretest2"), ranges = rgs)
+conn$core_split("splitcoretest0", targetCore = c("splitcoretest1", "splitcoretest2"), ranges = rgs)
 rgs <- list(c(0, 500), c(501, 1000))
-core_split("splitcoretest0", targetCore = c("splitcoretest1", "splitcoretest2"), ranges = rgs)
+conn$core_split("splitcoretest0", targetCore = c("splitcoretest1", "splitcoretest2"), ranges = rgs)
 }
 }
-
diff --git a/man/core_status.Rd b/man/core_status.Rd
index 3a0aaef..79d60a0 100644
--- a/man/core_status.Rd
+++ b/man/core_status.Rd
@@ -4,40 +4,42 @@
 \alias{core_status}
 \title{Get core status}
 \usage{
-core_status(name = NULL, indexInfo = TRUE, raw = FALSE,
+core_status(conn, name = NULL, indexInfo = TRUE, raw = FALSE,
   callopts = list())
 }
 \arguments{
-\item{name}{(character) The name of the core. If not given, all cores.}
+\item{conn}{A solrium connection object, see \link{SolrClient}}
+
+\item{name}{(character) The name of the core to be created. Required}
 
 \item{indexInfo}{(logical)}
 
 \item{raw}{(logical) If \code{TRUE}, returns raw data}
 
-\item{callopts}{curl options passed on to \code{\link[httr]{GET}}}
+\item{callopts}{curl options passed on to \link[crul:HttpClient]{crul::HttpClient}}
 }
 \description{
 Get core status
 }
 \examples{
 \dontrun{
-# start Solr with Schemaless mode via the schemaless eg: bin/solr start -e schemaless
+# start Solr with Schemaless mode via the schemaless eg:
+#   bin/solr start -e schemaless
 # you can create a new core like: bin/solr create -c corename
 # where <corename> is the name for your core - or creaate as below
 
 # connect
-solr_connect()
+(conn <- SolrClient$new())
 
 # Status of all cores
-core_status()
+conn$core_status()
 
 # Status of particular cores
-core_status("gettingstarted")
+conn$core_status("gettingstarted")
 
 # Get index info or not
 ## Default: TRUE
-core_status("gettingstarted", indexInfo = TRUE)
-core_status("gettingstarted", indexInfo = FALSE)
+conn$core_status("gettingstarted", indexInfo = TRUE)
+conn$core_status("gettingstarted", indexInfo = FALSE)
 }
 }
-
diff --git a/man/core_swap.Rd b/man/core_swap.Rd
index 5fb0868..8c48171 100644
--- a/man/core_swap.Rd
+++ b/man/core_swap.Rd
@@ -4,54 +4,60 @@
 \alias{core_swap}
 \title{Swap a core}
 \usage{
-core_swap(name, other, async = NULL, raw = FALSE, callopts = list())
+core_swap(conn, name, other, async = NULL, raw = FALSE, callopts = list())
 }
 \arguments{
-\item{name}{(character) The name of one of the cores to be swapped. Required}
+\item{conn}{A solrium connection object, see \link{SolrClient}}
 
-\item{other}{(character) The name of one of the cores to be swapped. Required.}
+\item{name}{(character) The name of the core to be created. Required}
 
-\item{async}{(character) Request ID to track this action which will be processed
-asynchronously}
+\item{other}{(character) The name of one of the cores to be swapped.
+Required.}
+
+\item{async}{(character) Request ID to track this action which will be
+processed asynchronously}
 
 \item{raw}{(logical) If \code{TRUE}, returns raw data}
 
-\item{callopts}{curl options passed on to \code{\link[httr]{GET}}}
+\item{callopts}{curl options passed on to \link[crul:HttpClient]{crul::HttpClient}}
 }
 \description{
-SWAP atomically swaps the names used to access two existing Solr cores.
-This can be used to swap new content into production. The prior core remains
-available and can be swapped back, if necessary. Each core will be known by
-the name of the other, after the swap
+SWAP atomically swaps the names used to access two existing
+Solr cores. This can be used to swap new content into production. The
+prior core remains available and can be swapped back, if necessary. Each
+core will be known by the name of the other, after the swap
 }
 \details{
-Do not use \code{core_swap} with a SolrCloud node. It is not supported and
-can result in the core being unusable. We'll try to stop you if you try.
+Do not use \code{core_swap} with a SolrCloud node. It is not
+supported and can result in the core being unusable. We'll try to stop
+you if you try.
 }
 \examples{
 \dontrun{
-# start Solr with Schemaless mode via the schemaless eg: bin/solr start -e schemaless
+# start Solr with Schemaless mode via the schemaless eg:
+#   bin/solr start -e schemaless
 # you can create a new core like: bin/solr create -c corename
 # where <corename> is the name for your core - or creaate as below
 
 # connect
-solr_connect()
+(conn <- SolrClient$new())
 
 # Swap a core
 ## First, create two cores
-core_create("swapcoretest") # or create in the CLI: bin/solr create -c swapcoretest
-core_create("swapcoretest") # or create in the CLI: bin/solr create -c swapcoretest
+conn$core_create("swapcoretest1")
+# - or create on CLI: bin/solr create -c swapcoretest1
+conn$core_create("swapcoretest2")
+# - or create on CLI: bin/solr create -c swapcoretest2
 
 ## check status
-core_status("swapcoretest1", FALSE)
-core_status("swapcoretest2", FALSE)
+conn$core_status("swapcoretest1", FALSE)
+conn$core_status("swapcoretest2", FALSE)
 
 ## swap core
-core_swap("swapcoretest1", "swapcoretest2")
+conn$core_swap("swapcoretest1", "swapcoretest2")
 
 ## check status again
-core_status("swapcoretest1", FALSE)
-core_status("swapcoretest2", FALSE)
+conn$core_status("swapcoretest1", FALSE)
+conn$core_status("swapcoretest2", FALSE)
 }
 }
-
diff --git a/man/core_unload.Rd b/man/core_unload.Rd
index 66b5a52..fbec6b3 100644
--- a/man/core_unload.Rd
+++ b/man/core_unload.Rd
@@ -4,12 +4,14 @@
 \alias{core_unload}
 \title{Unload (delete) a core}
 \usage{
-core_unload(name, deleteIndex = FALSE, deleteDataDir = FALSE,
+core_unload(conn, name, deleteIndex = FALSE, deleteDataDir = FALSE,
   deleteInstanceDir = FALSE, async = NULL, raw = FALSE,
   callopts = list())
 }
 \arguments{
-\item{name}{The name of one of the cores to be removed. Required}
+\item{conn}{A solrium connection object, see \link{SolrClient}}
+
+\item{name}{(character) The name of the core to be created. Required}
 
 \item{deleteIndex}{(logical)    If \code{TRUE}, will remove the index when unloading
 the core. Default: \code{FALSE}}
@@ -18,31 +20,34 @@ the core. Default: \code{FALSE}}
 sub-directories. Default: \code{FALSE}}
 
 \item{deleteInstanceDir}{(logical)    If \code{TRUE}, removes everything related to
-the core, including the index directory, configuration files and other related
-files. Default: \code{FALSE}}
+the core, including the index directory, configuration files and other
+related files. Default: \code{FALSE}}
 
-\item{async}{(character) Request ID to track this action which will be processed
-asynchronously}
+\item{async}{(character) Request ID to track this action which will be
+processed asynchronously}
 
 \item{raw}{(logical) If \code{TRUE}, returns raw data}
 
-\item{callopts}{curl options passed on to \code{\link[httr]{GET}}}
+\item{callopts}{curl options passed on to \link[crul:HttpClient]{crul::HttpClient}}
 }
 \description{
 Unload (delete) a core
 }
 \examples{
 \dontrun{
-# start Solr with Schemaless mode via the schemaless eg: bin/solr start -e schemaless
+# start Solr with Schemaless mode via the schemaless eg:
+#   bin/solr start -e schemaless
 
 # connect
-solr_connect()
+(conn <- SolrClient$new())
 
 # Create a core
-core_create(name = "thingsstuff")
+conn$core_create(name = "books")
 
 # Unload a core
-core_unload(name = "fart")
+conn$core_unload(name = "books")
+## not found
+# conn$core_unload(name = "books")
+# > Error: 400 - Cannot unload non-existent core [books]
 }
 }
-
diff --git a/man/delete.Rd b/man/delete.Rd
index 6932225..c114040 100644
--- a/man/delete.Rd
+++ b/man/delete.Rd
@@ -3,16 +3,19 @@
 \name{delete}
 \alias{delete}
 \alias{delete_by_id}
+\alias{delete}
 \alias{delete_by_query}
 \title{Delete documents by ID or query}
 \usage{
-delete_by_id(ids, name, commit = TRUE, commit_within = NULL,
+delete_by_id(conn, ids, name, commit = TRUE, commit_within = NULL,
   overwrite = TRUE, boost = NULL, wt = "json", raw = FALSE, ...)
 
-delete_by_query(query, name, commit = TRUE, commit_within = NULL,
+delete_by_query(conn, query, name, commit = TRUE, commit_within = NULL,
   overwrite = TRUE, boost = NULL, wt = "json", raw = FALSE, ...)
 }
 \arguments{
+\item{conn}{A solrium connection object, see \link{SolrClient}}
+
 \item{ids}{Document IDs, one or more in a vector or list}
 
 \item{name}{(character) A collection or core name. Required.}
@@ -20,21 +23,22 @@ delete_by_query(query, name, commit = TRUE, commit_within = NULL,
 \item{commit}{(logical) If \code{TRUE}, documents immediately searchable.
 Deafult: \code{TRUE}}
 
-\item{commit_within}{(numeric) Milliseconds to commit the change, the document will be added
-within that time. Default: NULL}
+\item{commit_within}{(numeric) Milliseconds to commit the change, the
+document will be added within that time. Default: \code{NULL}}
 
-\item{overwrite}{(logical) Overwrite documents with matching keys. Default: \code{TRUE}}
+\item{overwrite}{(logical) Overwrite documents with matching keys.
+Default: \code{TRUE}}
 
-\item{boost}{(numeric) Boost factor. Default: NULL}
+\item{boost}{(numeric) Boost factor. Default: \code{NULL}}
 
 \item{wt}{(character) One of json (default) or xml. If json, uses
-\code{\link[jsonlite]{fromJSON}} to parse. If xml, uses \code{\link[xml2]{read_xml}} to
+\code{\link[jsonlite:fromJSON]{jsonlite::fromJSON()}} to parse. If xml, uses \code{\link[xml2:read_xml]{xml2::read_xml()}} to
 parse}
 
 \item{raw}{(logical) If \code{TRUE}, returns raw data in format specified by
 \code{wt} param}
 
-\item{...}{curl options passed on to \code{\link[httr]{GET}}}
+\item{...}{curl options passed on to \link[crul:HttpClient]{crul::HttpClient}}
 
 \item{query}{Query to use to delete documents}
 }
@@ -46,20 +50,19 @@ We use json internally as data interchange format for this function.
 }
 \examples{
 \dontrun{
-solr_connect()
+(cli <- SolrClient$new())
 
 # add some documents first
 ss <- list(list(id = 1, price = 100), list(id = 2, price = 500))
-add(ss, name = "gettingstarted")
+cli$add(ss, name = "gettingstarted")
 
 # Now, delete them
 # Delete by ID
-# delete_by_id(ids = 9)
+cli$delete_by_id(ids = 1, "gettingstarted")
 ## Many IDs
-# delete_by_id(ids = c(3, 4))
+cli$delete_by_id(ids = c(3, 4), "gettingstarted")
 
 # Delete by query
-# delete_by_query(query = "manu:bank")
+cli$delete_by_query(query = "manu:bank", "gettingstarted")
 }
 }
-
diff --git a/man/is-sr.Rd b/man/is-sr.Rd
index bc7f5b1..d84102f 100644
--- a/man/is-sr.Rd
+++ b/man/is-sr.Rd
@@ -22,4 +22,3 @@ Test for sr_high class
 
 Test for sr_search class
 }
-
diff --git a/man/makemultiargs.Rd b/man/makemultiargs.Rd
index 4a96fc8..17f3ba3 100644
--- a/man/makemultiargs.Rd
+++ b/man/makemultiargs.Rd
@@ -2,7 +2,7 @@
 % Please edit documentation in R/zzz.r
 \name{makemultiargs}
 \alias{makemultiargs}
-\title{Function to make make multiple args of the same name from a 
+\title{Function to make make multiple args of the same name from a
 single input with length > 1}
 \usage{
 makemultiargs(x)
@@ -11,7 +11,6 @@ makemultiargs(x)
 \item{x}{Value}
 }
 \description{
-Function to make make multiple args of the same name from a 
+Function to make make multiple args of the same name from a
 single input with length > 1
 }
-
diff --git a/man/optimize.Rd b/man/optimize.Rd
deleted file mode 100644
index 67f1ca0..0000000
--- a/man/optimize.Rd
+++ /dev/null
@@ -1,48 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/optimize.R
-\name{optimize}
-\alias{optimize}
-\title{Optimize}
-\usage{
-optimize(name, max_segments = 1, wait_searcher = TRUE,
-  soft_commit = FALSE, wt = "json", raw = FALSE, ...)
-}
-\arguments{
-\item{name}{(character) A collection or core name. Required.}
-
-\item{max_segments}{optimizes down to at most this number of segments. Default: 1}
-
-\item{wait_searcher}{block until a new searcher is opened and registered as the
-main query searcher, making the changes visible. Default: \code{TRUE}}
-
-\item{soft_commit}{perform a soft commit - this will refresh the 'view' of the
-index in a more performant manner, but without "on-disk" guarantees.
-Default: \code{FALSE}}
-
-\item{wt}{(character) One of json (default) or xml. If json, uses
-\code{\link[jsonlite]{fromJSON}} to parse. If xml, uses \code{\link[xml2]{read_xml}} to
-parse}
-
-\item{raw}{(logical) If \code{TRUE}, returns raw data in format specified by
-\code{wt} param}
-
-\item{...}{curl options passed on to \code{\link[httr]{GET}}}
-}
-\description{
-Optimize
-}
-\examples{
-\dontrun{
-solr_connect()
-
-optimize("gettingstarted")
-optimize("gettingstarted", max_segments = 2)
-optimize("gettingstarted", wait_searcher = FALSE)
-
-# get xml back
-optimize("gettingstarted", wt = "xml")
-## raw xml
-optimize("gettingstarted", wt = "xml", raw = TRUE)
-}
-}
-
diff --git a/man/ping.Rd b/man/ping.Rd
index 8dfdb1a..504af48 100644
--- a/man/ping.Rd
+++ b/man/ping.Rd
@@ -4,21 +4,22 @@
 \alias{ping}
 \title{Ping a Solr instance}
 \usage{
-ping(name, wt = "json", verbose = TRUE, raw = FALSE, ...)
+ping(conn, name, wt = "json", raw = FALSE, ...)
 }
 \arguments{
+\item{conn}{A solrium connection object, see \link{SolrClient}}
+
 \item{name}{(character) Name of a collection or core. Required.}
 
 \item{wt}{(character) One of json (default) or xml. If json, uses
-\code{\link[jsonlite]{fromJSON}} to parse. If xml, uses
-\code{\link[xml2]{read_xml}} to parse}
+\code{\link[jsonlite:fromJSON]{jsonlite::fromJSON()}} to parse. If xml, uses [xml2::read_xml)] to parse
 
-\item{verbose}{If TRUE (default) the url call used printed to console.}
+[xml2::read_xml)]: R:xml2::read_xml)}
 
-\item{raw}{(logical) If TRUE, returns raw data in format specified by
+\item{raw}{(logical) If \code{TRUE}, returns raw data in format specified by
 \code{wt} param}
 
-\item{...}{curl options passed on to \code{\link[httr]{GET}}}
+\item{...}{curl options passed on to \link[crul:HttpClient]{crul::HttpClient}}
 }
 \value{
 if \code{wt="xml"} an object of class \code{xml_document}, if
@@ -39,16 +40,15 @@ public, but works locally.
 # do so
 
 # connect: by default we connect to localhost, port 8983
-solr_connect()
+(cli <- SolrClient$new())
 
 # ping the gettingstarted index
-ping("gettingstarted")
-ping("gettingstarted", wt = "xml")
-ping("gettingstarted", verbose = FALSE)
-ping("gettingstarted", raw = TRUE)
+cli$ping("gettingstarted")
+ping(cli, "gettingstarted")
+ping(cli, "gettingstarted", wt = "xml")
+ping(cli, "gettingstarted", verbose = FALSE)
+ping(cli, "gettingstarted", raw = TRUE)
 
-library("httr")
-ping("gettingstarted", wt="xml", config = verbose())
+ping(cli, "gettingstarted", wt="xml", verbose = TRUE)
 }
 }
-
diff --git a/man/pivot_flatten_tabular.Rd b/man/pivot_flatten_tabular.Rd
index 34a6a8d..02b894e 100644
--- a/man/pivot_flatten_tabular.Rd
+++ b/man/pivot_flatten_tabular.Rd
@@ -19,4 +19,3 @@ Convert a nested hierarchy of facet.pivot elements
 to tabular data (rows and columns)
 }
 \keyword{internal}
-
diff --git a/man/schema.Rd b/man/schema.Rd
index f1e7c77..2b5e7cf 100644
--- a/man/schema.Rd
+++ b/man/schema.Rd
@@ -4,20 +4,21 @@
 \alias{schema}
 \title{Get the schema for a collection or core}
 \usage{
-schema(name, what = "", raw = FALSE, verbose = TRUE, ...)
+schema(conn, name, what = "", raw = FALSE, ...)
 }
 \arguments{
-\item{name}{(character) Name of collection or core}
+\item{conn}{A solrium connection object, see \link{SolrClient}}
+
+\item{name}{(character) Name of a collection or core. Required.}
 
 \item{what}{(character) What to retrieve. By default, we retrieve the entire
 schema. Options include: fields, dynamicfields, fieldtypes, copyfields, name,
 version, uniquekey, similarity, "solrqueryparser/defaultoperator"}
 
-\item{raw}{(logical) If \code{TRUE}, returns raw data}
-
-\item{verbose}{If TRUE (default) the url call used printed to console.}
+\item{raw}{(logical) If \code{TRUE}, returns raw data in format specified by
+\code{wt} param}
 
-\item{...}{curl options passed on to \code{\link[httr]{GET}}}
+\item{...}{curl options passed on to \link[crul:HttpClient]{crul::HttpClient}}
 }
 \description{
 Get the schema for a collection or core
@@ -28,32 +29,31 @@ Get the schema for a collection or core
 # after that, if you haven't run `bin/post -c gettingstarted docs/` yet, do so
 
 # connect: by default we connect to localhost, port 8983
-solr_connect()
+(cli <- SolrClient$new())
 
 # get the schema for the gettingstarted index
-schema(name = "gettingstarted")
+schema(cli, name = "gettingstarted")
 
 # Get parts of the schema
-schema(name = "gettingstarted", "fields")
-schema(name = "gettingstarted", "dynamicfields")
-schema(name = "gettingstarted", "fieldtypes")
-schema(name = "gettingstarted", "copyfields")
-schema(name = "gettingstarted", "name")
-schema(name = "gettingstarted", "version")
-schema(name = "gettingstarted", "uniquekey")
-schema(name = "gettingstarted", "similarity")
-schema(name = "gettingstarted", "solrqueryparser/defaultoperator")
+schema(cli, name = "gettingstarted", "fields")
+schema(cli, name = "gettingstarted", "dynamicfields")
+schema(cli, name = "gettingstarted", "fieldtypes")
+schema(cli, name = "gettingstarted", "copyfields")
+schema(cli, name = "gettingstarted", "name")
+schema(cli, name = "gettingstarted", "version")
+schema(cli, name = "gettingstarted", "uniquekey")
+schema(cli, name = "gettingstarted", "similarity")
+schema(cli, name = "gettingstarted", "solrqueryparser/defaultoperator")
 
 # get raw data
-schema(name = "gettingstarted", "similarity", raw = TRUE)
-schema(name = "gettingstarted", "uniquekey", raw = TRUE)
+schema(cli, name = "gettingstarted", "similarity", raw = TRUE)
+schema(cli, name = "gettingstarted", "uniquekey", raw = TRUE)
 
 # start Solr in Schemaless mode: bin/solr start -e schemaless
-# schema("gettingstarted")
+# schema(cli, "gettingstarted")
 
 # start Solr in Standalone mode: bin/solr start
 # then add a core: bin/solr create -c helloWorld
-# schema("helloWorld")
+# schema(cli, "helloWorld")
 }
 }
-
diff --git a/man/solr_all.Rd b/man/solr_all.Rd
index 149e57b..b5de90e 100644
--- a/man/solr_all.Rd
+++ b/man/solr_all.Rd
@@ -4,131 +4,140 @@
 \alias{solr_all}
 \title{All purpose search}
 \usage{
-solr_all(name = NULL, q = "*:*", sort = NULL, start = 0, rows = NULL,
-  pageDoc = NULL, pageScore = NULL, fq = NULL, fl = NULL,
-  defType = NULL, timeAllowed = NULL, qt = NULL, wt = "json",
-  NOW = NULL, TZ = NULL, echoHandler = NULL, echoParams = NULL,
-  key = NULL, callopts = list(), raw = FALSE, parsetype = "df",
-  concat = ",", ...)
+solr_all(conn, name = NULL, params = NULL, body = NULL,
+  callopts = list(), raw = FALSE, parsetype = "df", concat = ",",
+  optimizeMaxRows = TRUE, minOptimizedRows = 50000L, ...)
 }
 \arguments{
-\item{name}{Name of a collection or core. Or leave as \code{NULL} if not needed.}
-
-\item{q}{Query terms, defaults to '*:*', or everything.}
-
-\item{sort}{Field to sort on. You can specify ascending (e.g., score desc) or 
-descending (e.g., score asc), sort by two fields (e.g., score desc, price asc), 
-or sort by a function (e.g., sum(x_f, y_f) desc, which sorts by the sum of 
-x_f and y_f in a descending order).}
-
-\item{start}{Record to start at, default to beginning.}
-
-\item{rows}{Number of records to return. Default: 10.}
-
-\item{pageDoc}{If you expect to be paging deeply into the results (say beyond page 10, 
-assuming rows=10) and you are sorting by score, you may wish to add the pageDoc 
-and pageScore parameters to your request. These two parameters tell Solr (and Lucene) 
-what the last result (Lucene internal docid and score) of the previous page was, 
-so that when scoring the query for the next set of pages, it can ignore any results 
-that occur higher than that item. To get the Lucene internal doc id, you will need 
-to add [docid] to the &fl list. 
-e.g., q=*:*&start=10&pageDoc=5&pageScore=1.345&fl=[docid],score}
-
-\item{pageScore}{See pageDoc notes.}
-
-\item{fq}{Filter query, this does not affect the search, only what gets returned. 
-This parameter can accept multiple items in a lis or vector. You can't pass more than 
-one parameter of the same name, so we get around it by passing multiple queries 
-and we parse internally}
-
-\item{fl}{Fields to return, can be a character vector like \code{c('id', 'title')}, 
-or a single character vector with one or more comma separated names, like 
-\code{'id,title'}}
-
-\item{defType}{Specify the query parser to use with this request.}
-
-\item{timeAllowed}{The time allowed for a search to finish. This value only applies 
-to the search and not to requests in general. Time is in milliseconds. Values <= 0 
-mean no time restriction. Partial results may be returned (if there are any).}
-
-\item{qt}{Which query handler used. Options: dismax, others?}
-
-\item{wt}{(character) One of json (default) or xml. If json, uses
-\code{\link[jsonlite]{fromJSON}} to parse. If xml, uses \code{\link[xml2]{read_xml}}
-to parse. You can't use \code{csv} because the point of this function}
-
-\item{NOW}{Set a fixed time for evaluating Date based expresions}
+\item{conn}{A solrium connection object, see \link{SolrClient}}
 
-\item{TZ}{Time zone, you can override the default.}
-
-\item{echoHandler}{If \code{TRUE}, Solr places the name of the handle used in the 
-response to the client for debugging purposes. Default:}
+\item{name}{Name of a collection or core. Or leave as \code{NULL} if not needed.}
 
-\item{echoParams}{The echoParams parameter tells Solr what kinds of Request 
-parameters should be included in the response for debugging purposes, legal values 
-include:
-\itemize{
- \item none - don't include any request parameters for debugging
- \item explicit - include the parameters explicitly specified by the client in the request
- \item all - include all parameters involved in this request, either specified explicitly 
- by the client, or implicit because of the request handler configuration.
-}}
+\item{params}{(list) a named list of parameters, results in a GET reqeust
+as long as no body parameters given}
 
-\item{key}{API key, if needed.}
+\item{body}{(list) a named list of parameters, if given a POST request
+will be performed}
 
-\item{callopts}{Call options passed on to httr::GET}
+\item{callopts}{Call options passed on to [crul::HttpClient]}
 
 \item{raw}{(logical) If TRUE, returns raw data in format specified by wt param}
 
 \item{parsetype}{(character) One of 'list' or 'df'}
 
-\item{concat}{(character) Character to concatenate elements of longer than length 1. 
+\item{concat}{(character) Character to concatenate elements of longer than length 1.
 Note that this only works reliably when data format is json (wt='json'). The parsing
 is more complicated in XML format, but you can do that on your own.}
 
-\item{...}{Further args.}
+\item{optimizeMaxRows}{(logical) If \code{TRUE}, then rows parameter will be
+adjusted to the number of returned results by the same constraints.
+It will only be applied if rows parameter is higher
+than \code{minOptimizedRows}. Default: \code{TRUE}}
+
+\item{minOptimizedRows}{(numeric) used by \code{optimizedMaxRows} parameter,
+the minimum optimized rows. Default: 50000}
+
+\item{...}{Further args to be combined into query}
 }
 \value{
 XML, JSON, a list, or data.frame
 }
 \description{
-Includes documents, facets, groups, mlt, stats, and highlights.
+Includes documents, facets, groups, mlt, stats, and highlights
 }
+\section{Parameters}{
+
+\itemize{
+ \item q Query terms, defaults to '*:*', or everything.
+ \item sort Field to sort on. You can specify ascending (e.g., score desc) or
+descending (e.g., score asc), sort by two fields (e.g., score desc, price asc),
+or sort by a function (e.g., sum(x_f, y_f) desc, which sorts by the sum of
+x_f and y_f in a descending order).
+ \item start Record to start at, default to beginning.
+ \item rows Number of records to return. Default: 10.
+ \item pageDoc If you expect to be paging deeply into the results (say beyond page 10,
+assuming rows=10) and you are sorting by score, you may wish to add the pageDoc
+and pageScore parameters to your request. These two parameters tell Solr (and Lucene)
+what the last result (Lucene internal docid and score) of the previous page was,
+so that when scoring the query for the next set of pages, it can ignore any results
+that occur higher than that item. To get the Lucene internal doc id, you will need
+to add [docid] to the &fl list.
+e.g., q=*:*&start=10&pageDoc=5&pageScore=1.345&fl=[docid],score
+ \item pageScore See pageDoc notes.
+ \item fq Filter query, this does not affect the search, only what gets returned.
+This parameter can accept multiple items in a lis or vector. You can't pass more than
+one parameter of the same name, so we get around it by passing multiple queries
+and we parse internally
+ \item fl Fields to return, can be a character vector like \code{c('id', 'title')},
+or a single character vector with one or more comma separated names, like
+\code{'id,title'}
+ \item defType Specify the query parser to use with this request.
+ \item timeAllowed The time allowed for a search to finish. This value only applies
+to the search and not to requests in general. Time is in milliseconds. Values <= 0
+mean no time restriction. Partial results may be returned (if there are any).
+ \item qt Which query handler used. Options: dismax, others?
+ \item NOW Set a fixed time for evaluating Date based expresions
+ \item TZ Time zone, you can override the default.
+ \item echoHandler If \code{TRUE}, Solr places the name of the handle used in the
+response to the client for debugging purposes. Default:
+ \item echoParams The echoParams parameter tells Solr what kinds of Request
+parameters should be included in the response for debugging purposes, legal values
+include:
+  \itemize{
+   \item none - don't include any request parameters for debugging
+   \item explicit - include the parameters explicitly specified by the client in the request
+   \item all - include all parameters involved in this request, either specified explicitly
+   by the client, or implicit because of the request handler configuration.
+ }
+\item wt (character) One of json, xml, or csv. Data type returned, defaults
+  to 'csv'. If json, uses [jsonlite::fromJSON()] to parse. If xml,
+  uses [xml2::read_xml()] to parse. If csv, uses [read.table()] to parse.
+  `wt=csv` gives the fastest performance at least in all the cases we have
+  tested in, thus it's the default value for `wt`
+}
+}
+
 \examples{
 \dontrun{
 # connect
-solr_connect('http://api.plos.org/search')
+(cli <- SolrClient$new(host = "api.plos.org", path = "search", port = NULL))
 
-solr_all(q='*:*', rows=2, fl='id')
+solr_all(cli, params = list(q='*:*', rows=2, fl='id'))
 
 # facets
-solr_all(q='*:*', rows=2, fl='id', facet="true", facet.field="journal")
+solr_all(cli, params = list(q='*:*', rows=2, fl='id', facet="true",
+  facet.field="journal"))
 
 # mlt
-solr_all(q='ecology', rows=2, fl='id', mlt='true', mlt.count=2, mlt.fl='abstract')
+solr_all(cli, params = list(q='ecology', rows=2, fl='id', mlt='true',
+  mlt.count=2, mlt.fl='abstract'))
 
 # facets and mlt
-solr_all(q='ecology', rows=2, fl='id', facet="true", facet.field="journal",
-mlt='true', mlt.count=2, mlt.fl='abstract')
+solr_all(cli, params = list(q='ecology', rows=2, fl='id', facet="true",
+  facet.field="journal", mlt='true', mlt.count=2, mlt.fl='abstract'))
 
 # stats
-solr_all(q='ecology', rows=2, fl='id', stats='true', stats.field='counter_total_all')
+solr_all(cli, params = list(q='ecology', rows=2, fl='id', stats='true',
+  stats.field='counter_total_all'))
 
 # facets, mlt, and stats
-solr_all(q='ecology', rows=2, fl='id', facet="true", facet.field="journal",
-mlt='true', mlt.count=2, mlt.fl='abstract', stats='true', stats.field='counter_total_all')
+solr_all(cli, params = list(q='ecology', rows=2, fl='id', facet="true",
+  facet.field="journal", mlt='true', mlt.count=2, mlt.fl='abstract',
+  stats='true', stats.field='counter_total_all'))
 
 # group
-solr_all(q='ecology', rows=2, fl='id', group='true',
-   group.field='journal', group.limit=3)
+solr_all(cli, params = list(q='ecology', rows=2, fl='id', group='true',
+ group.field='journal', group.limit=3))
 
 # facets, mlt, stats, and groups
-solr_all(q='ecology', rows=2, fl='id', facet="true", facet.field="journal",
-   mlt='true', mlt.count=2, mlt.fl='abstract', stats='true', stats.field='counter_total_all',
-   group='true', group.field='journal', group.limit=3)
+solr_all(cli, params = list(q='ecology', rows=2, fl='id', facet="true",
+ facet.field="journal", mlt='true', mlt.count=2, mlt.fl='abstract',
+ stats='true', stats.field='counter_total_all', group='true',
+ group.field='journal', group.limit=3))
 
 # using wt = xml
-solr_all(q='*:*', rows=50, fl=c('id','score'), fq='doc_type:full', wt="xml", raw=TRUE)
+solr_all(cli, params = list(q='*:*', rows=50, fl=c('id','score'),
+  fq='doc_type:full', wt="xml"), raw=TRUE)
 }
 }
 \references{
@@ -136,6 +145,5 @@ See \url{http://wiki.apache.org/solr/#Search_and_Indexing} for
 more information.
 }
 \seealso{
-\code{\link{solr_highlight}}, \code{\link{solr_facet}}
+\code{\link[=solr_highlight]{solr_highlight()}}, \code{\link[=solr_facet]{solr_facet()}}
 }
-
diff --git a/man/solr_connect.Rd b/man/solr_connect.Rd
deleted file mode 100644
index c30eb5b..0000000
--- a/man/solr_connect.Rd
+++ /dev/null
@@ -1,58 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/connect.R
-\name{solr_connect}
-\alias{solr_connect}
-\alias{solr_settings}
-\title{Solr connection}
-\usage{
-solr_connect(url = "http://localhost:8983", proxy = NULL,
-  errors = "simple", verbose = TRUE)
-
-solr_settings()
-}
-\arguments{
-\item{url}{Base URL for Solr instance. For a local instance, this is likely going
-to be \code{http://localhost:8983} (also the default), or a different port if you
-set a different port.}
-
-\item{proxy}{List of arguments for a proxy connection, including one or more of:
-url, port, username, password, and auth. See \code{\link[httr]{use_proxy}} for 
-help, which is used to construct the proxy connection.}
-
-\item{errors}{(character) One of simple or complete. Simple gives http code and 
-error message on an error, while complete gives both http code and error message, 
-and stack trace, if available.}
-
-\item{verbose}{(logical) Whether to print help messages or not. E.g., if 
-\code{TRUE}, we print the URL on each request to a Solr server for your 
-reference. Default: \code{TRUE}}
-}
-\description{
-Set Solr options, including base URL, proxy, and errors
-}
-\details{
-This function sets environment variables that we use internally
-within functions in this package to determine the right thing to do given your
-inputs. 
-
-In addition, \code{solr_connect} does a quick \code{GET} request to the URL you 
-provide to make sure the service is up.
-}
-\examples{
-\dontrun{
-# set solr settings
-solr_connect()
-
-# set solr settings with a proxy
-prox <- list(url = "187.62.207.130", port = 3128)
-solr_connect(url = "http://localhost:8983", proxy = prox)
-
-# get solr settings
-solr_settings()
-
-# you can also check your settings via Sys.getenv()
-Sys.getenv("SOLR_URL")
-Sys.getenv("SOLR_ERRORS")
-}
-}
-
diff --git a/man/solr_facet.Rd b/man/solr_facet.Rd
index 72a87a3..3f8d03a 100644
--- a/man/solr_facet.Rd
+++ b/man/solr_facet.Rd
@@ -4,164 +4,36 @@
 \alias{solr_facet}
 \title{Faceted search}
 \usage{
-solr_facet(name = NULL, q = "*:*", facet.query = NA, facet.field = NA,
-  facet.prefix = NA, facet.sort = NA, facet.limit = NA,
-  facet.offset = NA, facet.mincount = NA, facet.missing = NA,
-  facet.method = NA, facet.enum.cache.minDf = NA, facet.threads = NA,
-  facet.date = NA, facet.date.start = NA, facet.date.end = NA,
-  facet.date.gap = NA, facet.date.hardend = NA, facet.date.other = NA,
-  facet.date.include = NA, facet.range = NA, facet.range.start = NA,
-  facet.range.end = NA, facet.range.gap = NA, facet.range.hardend = NA,
-  facet.range.other = NA, facet.range.include = NA, facet.pivot = NA,
-  facet.pivot.mincount = NA, start = NA, rows = NA, key = NA,
-  wt = "json", raw = FALSE, callopts = list(), ...)
+solr_facet(conn, name = NULL, params = list(q = "*:*"), body = NULL,
+  callopts = list(), raw = FALSE, parsetype = "df", concat = ",", ...)
 }
 \arguments{
-\item{name}{Name of a collection or core. Or leave as \code{NULL} if not needed.}
-
-\item{q}{Query terms. See examples.}
-
-\item{facet.query}{This param allows you to specify an arbitrary query in the
-Lucene default syntax to generate a facet count. By default, faceting returns
-a count of the unique terms for a "field", while facet.query allows you to
-determine counts for arbitrary terms or expressions. This parameter can be
-specified multiple times to indicate that multiple queries should be used as
-separate facet constraints. It can be particularly useful for numeric range
-based facets, or prefix based facets -- see example below (i.e. price:[* TO 500]
-and  price:[501 TO *]).}
-
-\item{facet.field}{This param allows you to specify a field which should be
-treated as a facet. It will iterate over each Term in the field and generate a
-facet count using that Term as the constraint. This parameter can be specified
-multiple times to indicate multiple facet fields. None of the other params in
-this section will have any effect without specifying at least one field name
-using this param.}
-
-\item{facet.prefix}{Limits the terms on which to facet to those starting with
-the given string prefix. Note that unlike fq, this does not change the search
-results -- it merely reduces the facet values returned to those beginning with
-the specified prefix. This parameter can be specified on a per field basis.}
-
-\item{facet.sort}{See Details.}
-
-\item{facet.limit}{This param indicates the maximum number of constraint counts
-that should be returned for the facet fields. A negative value means unlimited.
-Default: 100. Can be specified on a per field basis.}
-
-\item{facet.offset}{This param indicates an offset into the list of constraints
-to allow paging. Default: 0. This parameter can be specified on a per field basis.}
-
-\item{facet.mincount}{This param indicates the minimum counts for facet fields
-should be included in the response. Default: 0. This parameter can be specified
-on a per field basis.}
-
-\item{facet.missing}{Set to "true" this param indicates that in addition to the
-Term based constraints of a facet field, a count of all matching results which
-have no value for the field should be computed. Default: FALSE. This parameter
-can be specified on a per field basis.}
-
-\item{facet.method}{See Details.}
-
-\item{facet.enum.cache.minDf}{This param indicates the minimum document frequency
-(number of documents matching a term) for which the filterCache should be used
-when determining the constraint count for that term. This is only used when
-facet.method=enum method of faceting. A value greater than zero will decrease
-memory usage of the filterCache, but increase the query time. When faceting on
-a field with a very large number of terms, and you wish to decrease memory usage,
-try a low value of 25 to 50 first. Default: 0, causing the filterCache to be used
-for all terms in the field. This parameter can be specified on a per field basis.}
-
-\item{facet.threads}{This param will cause loading the underlying fields used in
-faceting to be executed in parallel with the number of threads specified. Specify
-as facet.threads=# where # is the maximum number of threads used. Omitting this
-parameter or specifying the thread count as 0 will not spawn any threads just as
-before. Specifying a negative number of threads will spin up to Integer.MAX_VALUE
-threads. Currently this is limited to the fields, range and query facets are not
-yet supported. In at least one case this has reduced warmup times from 20 seconds
-to under 5 seconds.}
-
-\item{facet.date}{Specify names of fields (of type DateField) which should be
-treated as date facets. Can be specified multiple times to indicate multiple
-date facet fields.}
-
-\item{facet.date.start}{The lower bound for the first date range for all Date
-Faceting on this field. This should be a single date expression which may use
-the DateMathParser syntax. Can be specified on a per field basis.}
-
-\item{facet.date.end}{The minimum upper bound for the last date range for all
-Date Faceting on this field (see facet.date.hardend for an explanation of what
-the actual end value may be greater). This should be a single date expression
-which may use the DateMathParser syntax. Can be specified on a per field basis.}
-
-\item{facet.date.gap}{The size of each date range expressed as an interval to
-be added to the lower bound using the DateMathParser syntax. Eg:
-facet.date.gap=+1DAY. Can be specified on a per field basis.}
-
-\item{facet.date.hardend}{A Boolean parameter instructing Solr what to do in the
-event that facet.date.gap does not divide evenly between facet.date.start and
-facet.date.end. If this is true, the last date range constraint will have an
-upper bound of facet.date.end; if false, the last date range will have the smallest
-possible upper bound greater then facet.date.end such that the range is exactly
-facet.date.gap wide. Default: FALSE. This parameter can be specified on a per
-field basis.}
-
-\item{facet.date.other}{See Details.}
-
-\item{facet.date.include}{See Details.}
-
-\item{facet.range}{Indicates what field to create range facets for. Example:
-facet.range=price&facet.range=age}
-
-\item{facet.range.start}{The lower bound of the ranges. Can be specified on a
-per field basis. Example: f.price.facet.range.start=0.0&f.age.facet.range.start=10}
-
-\item{facet.range.end}{The upper bound of the ranges. Can be specified on a per
-field basis. Example: f.price.facet.range.end=1000.0&f.age.facet.range.start=99}
-
-\item{facet.range.gap}{The size of each range expressed as a value to be added
-to the lower bound. For date fields, this should be expressed using the
-DateMathParser syntax. (ie: facet.range.gap=+1DAY). Can be specified
-on a per field basis. Example: f.price.facet.range.gap=100&f.age.facet.range.gap=10}
+\item{conn}{A solrium connection object, see \link{SolrClient}}
 
-\item{facet.range.hardend}{A Boolean parameter instructing Solr what to do in the
-event that facet.range.gap does not divide evenly between facet.range.start and
-facet.range.end. If this is true, the last range constraint will have an upper
-bound of facet.range.end; if false, the last range will have the smallest possible
-upper bound greater then facet.range.end such that the range is exactly
-facet.range.gap wide. Default: FALSE. This parameter can be specified on a
-per field basis.}
-
-\item{facet.range.other}{See Details.}
-
-\item{facet.range.include}{See Details.}
-
-\item{facet.pivot}{This param allows you to specify a single comma-separated string 
-of fields to allow you to facet within the results of the parent facet to return 
-counts in the format of SQL group by operation}
-
-\item{facet.pivot.mincount}{This param indicates the minimum counts for facet fields
-to be included in the response. Default: 0. This parameter should only be specified 
-once.}
-
-\item{start}{Record to start at, default to beginning.}
+\item{name}{Name of a collection or core. Or leave as \code{NULL} if not needed.}
 
-\item{rows}{Number of records to return.}
+\item{params}{(list) a named list of parameters, results in a GET reqeust
+as long as no body parameters given}
 
-\item{key}{API key, if needed.}
+\item{body}{(list) a named list of parameters, if given a POST request
+will be performed}
 
-\item{wt}{(character) Data type returned, defaults to 'json'. One of json or xml. If json, 
-uses \code{\link[jsonlite]{fromJSON}} to parse. If xml, uses \code{\link[XML]{xmlParse}} to 
-parse. csv is only supported in \code{\link{solr_search}} and \code{\link{solr_all}}.}
+\item{callopts}{Call options passed on to [crul::HttpClient]}
 
 \item{raw}{(logical) If TRUE (default) raw json or xml returned. If FALSE,
 parsed data returned.}
 
-\item{callopts}{Call options passed on to httr::GET}
+\item{parsetype}{(character) One of 'list' or 'df'}
+
+\item{concat}{(character) Character to concatenate elements of longer than length 1. 
+Note that this only works reliably when data format is json (wt='json'). The parsing
+is more complicated in XML format, but you can do that on your own.}
 
 \item{...}{Further args, usually per field arguments for faceting.}
 }
 \value{
-Raw json or xml, or a list of length 4 parsed elements (usually data.frame's).
+Raw json or xml, or a list of length 4 parsed elements
+(usually data.frame's).
 }
 \description{
 Returns only facet items
@@ -284,76 +156,211 @@ Can be specified on a per field basis. Can be specified multiple times to indica
 multiple choices. If you want to ensure you don't double-count, don't choose both
 lower & upper, don't choose outer, and don't choose all.
 }
+\section{Facet parameters}{
+
+\itemize{
+ \item name Name of a collection or core. Or leave as \code{NULL} if not needed.
+ \item q Query terms. See examples.
+ \item facet.query This param allows you to specify an arbitrary query in the
+Lucene default syntax to generate a facet count. By default, faceting returns
+a count of the unique terms for a "field", while facet.query allows you to
+determine counts for arbitrary terms or expressions. This parameter can be
+specified multiple times to indicate that multiple queries should be used as
+separate facet constraints. It can be particularly useful for numeric range
+based facets, or prefix based facets -- see example below (i.e. price:[* TO 500]
+and  price:[501 TO *]).
+ \item facet.field This param allows you to specify a field which should be
+treated as a facet. It will iterate over each Term in the field and generate a
+facet count using that Term as the constraint. This parameter can be specified
+multiple times to indicate multiple facet fields. None of the other params in
+this section will have any effect without specifying at least one field name
+using this param.
+ \item facet.prefix Limits the terms on which to facet to those starting with
+the given string prefix. Note that unlike fq, this does not change the search
+results -- it merely reduces the facet values returned to those beginning with
+the specified prefix. This parameter can be specified on a per field basis.
+ \item facet.sort See Details.
+ \item facet.limit This param indicates the maximum number of constraint counts
+that should be returned for the facet fields. A negative value means unlimited.
+Default: 100. Can be specified on a per field basis.
+ \item facet.offset This param indicates an offset into the list of constraints
+to allow paging. Default: 0. This parameter can be specified on a per field basis.
+ \item facet.mincount This param indicates the minimum counts for facet fields
+should be included in the response. Default: 0. This parameter can be specified
+on a per field basis.
+ \item facet.missing Set to "true" this param indicates that in addition to the
+Term based constraints of a facet field, a count of all matching results which
+have no value for the field should be computed. Default: FALSE. This parameter
+can be specified on a per field basis.
+ \item facet.method See Details.
+ \item facet.enum.cache.minDf This param indicates the minimum document frequency
+(number of documents matching a term) for which the filterCache should be used
+when determining the constraint count for that term. This is only used when
+facet.method=enum method of faceting. A value greater than zero will decrease
+memory usage of the filterCache, but increase the query time. When faceting on
+a field with a very large number of terms, and you wish to decrease memory usage,
+try a low value of 25 to 50 first. Default: 0, causing the filterCache to be used
+for all terms in the field. This parameter can be specified on a per field basis.
+ \item facet.threads This param will cause loading the underlying fields used in
+faceting to be executed in parallel with the number of threads specified. Specify
+as facet.threads=# where # is the maximum number of threads used. Omitting this
+parameter or specifying the thread count as 0 will not spawn any threads just as
+before. Specifying a negative number of threads will spin up to Integer.MAX_VALUE
+threads. Currently this is limited to the fields, range and query facets are not
+yet supported. In at least one case this has reduced warmup times from 20 seconds
+to under 5 seconds.
+ \item facet.date Specify names of fields (of type DateField) which should be
+treated as date facets. Can be specified multiple times to indicate multiple
+date facet fields.
+ \item facet.date.start The lower bound for the first date range for all Date
+Faceting on this field. This should be a single date expression which may use
+the DateMathParser syntax. Can be specified on a per field basis.
+ \item facet.date.end The minimum upper bound for the last date range for all
+Date Faceting on this field (see facet.date.hardend for an explanation of what
+the actual end value may be greater). This should be a single date expression
+which may use the DateMathParser syntax. Can be specified on a per field basis.
+ \item facet.date.gap The size of each date range expressed as an interval to
+be added to the lower bound using the DateMathParser syntax. Eg:
+facet.date.gap=+1DAY. Can be specified on a per field basis.
+ \item facet.date.hardend A Boolean parameter instructing Solr what to do in the
+event that facet.date.gap does not divide evenly between facet.date.start and
+facet.date.end. If this is true, the last date range constraint will have an
+upper bound of facet.date.end; if false, the last date range will have the smallest
+possible upper bound greater then facet.date.end such that the range is exactly
+facet.date.gap wide. Default: FALSE. This parameter can be specified on a per
+field basis.
+ \item facet.date.other See Details.
+ \item facet.date.include See Details.
+ \item facet.range Indicates what field to create range facets for. Example:
+facet.range=price&facet.range=age
+ \item facet.range.start The lower bound of the ranges. Can be specified on a
+per field basis. Example: f.price.facet.range.start=0.0&f.age.facet.range.start=10
+ \item facet.range.end The upper bound of the ranges. Can be specified on a per
+field basis. Example: f.price.facet.range.end=1000.0&f.age.facet.range.start=99
+ \item facet.range.gap The size of each range expressed as a value to be added
+to the lower bound. For date fields, this should be expressed using the
+DateMathParser syntax. (ie: facet.range.gap=+1DAY). Can be specified
+on a per field basis. Example: f.price.facet.range.gap=100&f.age.facet.range.gap=10
+ \item facet.range.hardend A Boolean parameter instructing Solr what to do in the
+event that facet.range.gap does not divide evenly between facet.range.start and
+facet.range.end. If this is true, the last range constraint will have an upper
+bound of facet.range.end; if false, the last range will have the smallest possible
+upper bound greater then facet.range.end such that the range is exactly
+facet.range.gap wide. Default: FALSE. This parameter can be specified on a
+per field basis.
+ \item facet.range.other See Details.
+ \item facet.range.include See Details.
+ \item facet.pivot This param allows you to specify a single comma-separated string
+of fields to allow you to facet within the results of the parent facet to return
+counts in the format of SQL group by operation
+ \item facet.pivot.mincount This param indicates the minimum counts for facet fields
+to be included in the response. Default: 0. This parameter should only be specified
+once.
+ \item start Record to start at, default to beginning.
+ \item rows Number of records to return.
+ \item key API key, if needed.
+ \item wt (character) Data type returned, defaults to 'json'. One of json or xml. If json,
+  uses \code{\link[jsonlite]{fromJSON}} to parse. If xml, uses \code{\link[XML]{xmlParse}} to
+  parse. csv is only supported in \code{\link{solr_search}} and \code{\link{solr_all}}.
+}
+}
+
 \examples{
 \dontrun{
-# connect
-solr_connect('http://api.plos.org/search')
+# connect - local Solr instance
+(cli <- SolrClient$new())
+cli$facet("gettingstarted", params = list(q="*:*", facet.field='name'))
+cli$facet("gettingstarted", params = list(q="*:*", facet.field='name'),
+  callopts = list(verbose = TRUE))
+cli$facet("gettingstarted", body = list(q="*:*", facet.field='name'),
+  callopts = list(verbose = TRUE))
 
 # Facet on a single field
-solr_facet(q='*:*', facet.field='journal')
+solr_facet(cli, "gettingstarted", params = list(q='*:*', facet.field='name'))
+
+# Remote instance
+(cli <- SolrClient$new(host = "api.plos.org", path = "search", port = NULL))
 
 # Facet on multiple fields
-solr_facet(q='alcohol', facet.field=c('journal','subject'))
+solr_facet(cli, params = list(q='alcohol',
+  facet.field = c('journal','subject')))
 
 # Using mincount
-solr_facet(q='alcohol', facet.field='journal', facet.mincount='500')
+solr_facet(cli, params = list(q='alcohol', facet.field='journal',
+  facet.mincount='500'))
 
 # Using facet.query to get counts
-solr_facet(q='*:*', facet.field='journal', facet.query=c('cell','bird'))
+solr_facet(cli, params = list(q='*:*', facet.field='journal',
+  facet.query=c('cell','bird')))
 
 # Using facet.pivot to simulate SQL group by counts
-solr_facet(q='alcohol', facet.pivot='journal,subject',
-             facet.pivot.mincount=10)
-## two or more fields are required - you can pass in as a single character string
-solr_facet(facet.pivot = "journal,subject", facet.limit =  3)
+solr_facet(cli, params = list(q='alcohol', facet.pivot='journal,subject',
+             facet.pivot.mincount=10))
+## two or more fields are required - you can pass in as a single
+## character string
+solr_facet(cli, params = list(q='*:*', facet.pivot = "journal,subject",
+  facet.limit =  3))
 ## Or, pass in as a vector of length 2 or greater
-solr_facet(facet.pivot = c("journal", "subject"), facet.limit =  3)
+solr_facet(cli, params = list(q='*:*', facet.pivot = c("journal", "subject"),
+  facet.limit =  3))
 
 # Date faceting
-solr_facet(q='*:*', facet.date='publication_date',
-facet.date.start='NOW/DAY-5DAYS', facet.date.end='NOW', facet.date.gap='+1DAY')
+solr_facet(cli, params = list(q='*:*', facet.date='publication_date',
+  facet.date.start='NOW/DAY-5DAYS', facet.date.end='NOW',
+  facet.date.gap='+1DAY'))
 ## two variables
-solr_facet(q='*:*', facet.date=c('publication_date', 'timestamp'),
-facet.date.start='NOW/DAY-5DAYS', facet.date.end='NOW', facet.date.gap='+1DAY')
+solr_facet(cli, params = list(q='*:*',
+  facet.date=c('publication_date', 'timestamp'),
+  facet.date.start='NOW/DAY-5DAYS', facet.date.end='NOW',
+  facet.date.gap='+1DAY'))
 
 # Range faceting
-solr_facet(q='*:*', facet.range='counter_total_all',
-facet.range.start=5, facet.range.end=1000, facet.range.gap=10)
+solr_facet(cli, params = list(q='*:*', facet.range='counter_total_all',
+  facet.range.start=5, facet.range.end=1000, facet.range.gap=10))
 
 # Range faceting with > 1 field, same settings
-solr_facet(q='*:*', facet.range=c('counter_total_all','alm_twitterCount'),
-facet.range.start=5, facet.range.end=1000, facet.range.gap=10)
+solr_facet(cli, params = list(q='*:*',
+  facet.range=c('counter_total_all','alm_twitterCount'),
+  facet.range.start=5, facet.range.end=1000, facet.range.gap=10))
 
 # Range faceting with > 1 field, different settings
-solr_facet(q='*:*', facet.range=c('counter_total_all','alm_twitterCount'),
-f.counter_total_all.facet.range.start=5, f.counter_total_all.facet.range.end=1000,
-f.counter_total_all.facet.range.gap=10, f.alm_twitterCount.facet.range.start=5,
-f.alm_twitterCount.facet.range.end=1000, f.alm_twitterCount.facet.range.gap=10)
+solr_facet(cli, params = list(q='*:*',
+  facet.range=c('counter_total_all','alm_twitterCount'),
+  f.counter_total_all.facet.range.start=5,
+  f.counter_total_all.facet.range.end=1000,
+  f.counter_total_all.facet.range.gap=10,
+  f.alm_twitterCount.facet.range.start=5,
+  f.alm_twitterCount.facet.range.end=1000,
+  f.alm_twitterCount.facet.range.gap=10))
 
 # Get raw json or xml
 ## json
-solr_facet(q='*:*', facet.field='journal', raw=TRUE)
+solr_facet(cli, params = list(q='*:*', facet.field='journal'), raw=TRUE)
 ## xml
-solr_facet(q='*:*', facet.field='journal', raw=TRUE, wt='xml')
+solr_facet(cli, params = list(q='*:*', facet.field='journal', wt='xml'),
+  raw=TRUE)
 
 # Get raw data back, and parse later, same as what goes on internally if
 # raw=FALSE (Default)
-out <- solr_facet(q='*:*', facet.field='journal', raw=TRUE)
+out <- solr_facet(cli, params = list(q='*:*', facet.field='journal'),
+  raw=TRUE)
 solr_parse(out)
-out <- solr_facet(q='*:*', facet.field='journal', raw=TRUE,
-   wt='xml')
+out <- solr_facet(cli, params = list(q='*:*', facet.field='journal',
+  wt = 'xml'), raw=TRUE)
 solr_parse(out)
 
 # Using the USGS BISON API (https://bison.usgs.gov/#solr)
 ## The occurrence endpoint
-solr_connect("https://bison.usgs.gov/solr/occurrences/select")
-solr_facet(q='*:*', facet.field='year')
-solr_facet(q='*:*', facet.field='computedStateFips')
+(cli <- SolrClient$new(host = "bison.usgs.gov", scheme = "https",
+  path = "solr/occurrences/select", port = NULL))
+solr_facet(cli, params = list(q='*:*', facet.field='year'))
+solr_facet(cli, params = list(q='*:*', facet.field='computedStateFips'))
 
 # using a proxy
-# prox <- list(url = "54.195.48.153", port = 8888)
-# solr_connect(url = 'http://api.plos.org/search', proxy = prox)
-# solr_facet(facet.field='journal', callopts=verbose())
+# cli <- SolrClient$new(host = "api.plos.org", path = "search", port = NULL,
+#   proxy = list(url = "http://54.195.48.153:8888"))
+# solr_facet(cli, params = list(facet.field='journal'),
+#   callopts=list(verbose=TRUE))
 }
 }
 \references{
@@ -361,6 +368,5 @@ See \url{http://wiki.apache.org/solr/SimpleFacetParameters} for
 more information on faceting.
 }
 \seealso{
-\code{\link{solr_search}}, \code{\link{solr_highlight}}, \code{\link{solr_parse}}
+\code{\link[=solr_search]{solr_search()}}, \code{\link[=solr_highlight]{solr_highlight()}}, \code{\link[=solr_parse]{solr_parse()}}
 }
-
diff --git a/man/solr_get.Rd b/man/solr_get.Rd
index 6696787..bb3a14b 100644
--- a/man/solr_get.Rd
+++ b/man/solr_get.Rd
@@ -4,25 +4,27 @@
 \alias{solr_get}
 \title{Real time get}
 \usage{
-solr_get(ids, name, fl = NULL, wt = "json", raw = FALSE, ...)
+solr_get(conn, ids, name, fl = NULL, wt = "json", raw = FALSE, ...)
 }
 \arguments{
+\item{conn}{A solrium connection object, see \link{SolrClient}}
+
 \item{ids}{Document IDs, one or more in a vector or list}
 
 \item{name}{(character) A collection or core name. Required.}
 
-\item{fl}{Fields to return, can be a character vector like \code{c('id', 'title')},
-or a single character vector with one or more comma separated names, like
-\code{'id,title'}}
+\item{fl}{Fields to return, can be a character vector like
+\code{c('id', 'title')}, or a single character vector with one or more
+comma separated names, like \code{'id,title'}}
 
 \item{wt}{(character) One of json (default) or xml. Data type returned.
-If json, uses \code{\link[jsonlite]{fromJSON}} to parse. If xml, uses
-\code{\link[xml2]{read_xml}} to parse.}
+If json, uses \code{\link[jsonlite:fromJSON]{jsonlite::fromJSON()}} to parse. If xml, uses
+\code{\link[xml2:read_xml]{xml2::read_xml()}} to parse.}
 
 \item{raw}{(logical) If \code{TRUE}, returns raw data in format specified by
 \code{wt} param}
 
-\item{...}{curl options passed on to \code{\link[httr]{GET}}}
+\item{...}{curl options passed on to \link[crul:HttpClient]{crul::HttpClient}}
 }
 \description{
 Get documents by id
@@ -32,21 +34,20 @@ We use json internally as data interchange format for this function.
 }
 \examples{
 \dontrun{
-solr_connect()
+(cli <- SolrClient$new())
 
 # add some documents first
 ss <- list(list(id = 1, price = 100), list(id = 2, price = 500))
-add(ss, name = "gettingstarted")
+add(cli, ss, name = "gettingstarted")
 
 # Now, get documents by id
-solr_get(ids = 1, "gettingstarted")
-solr_get(ids = 2, "gettingstarted")
-solr_get(ids = c(1, 2), "gettingstarted")
-solr_get(ids = "1,2", "gettingstarted")
+solr_get(cli, ids = 1, "gettingstarted")
+solr_get(cli, ids = 2, "gettingstarted")
+solr_get(cli, ids = c(1, 2), "gettingstarted")
+solr_get(cli, ids = "1,2", "gettingstarted")
 
 # Get raw JSON
-solr_get(ids = 1, "gettingstarted", raw = TRUE, wt = "json")
-solr_get(ids = 1, "gettingstarted", raw = TRUE, wt = "xml")
+solr_get(cli, ids = 1, "gettingstarted", raw = TRUE, wt = "json")
+solr_get(cli, ids = 1, "gettingstarted", raw = TRUE, wt = "xml")
 }
 }
-
diff --git a/man/solr_group.Rd b/man/solr_group.Rd
index c830a79..f17d81e 100644
--- a/man/solr_group.Rd
+++ b/man/solr_group.Rd
@@ -4,86 +4,31 @@
 \alias{solr_group}
 \title{Grouped search}
 \usage{
-solr_group(name = NULL, q = "*:*", start = 0, rows = NA, sort = NA,
-  fq = NA, fl = NULL, wt = "json", key = NA, group.field = NA,
-  group.limit = NA, group.offset = NA, group.sort = NA, group.main = NA,
-  group.ngroups = NA, group.cache.percent = NA, group.query = NA,
-  group.format = NA, group.func = NA, callopts = list(), raw = FALSE,
-  parsetype = "df", concat = ",", ...)
+solr_group(conn, name = NULL, params = NULL, body = NULL,
+  callopts = list(), raw = FALSE, parsetype = "df", concat = ",", ...)
 }
 \arguments{
-\item{name}{Name of a collection or core. Or leave as \code{NULL} if not needed.}
-
-\item{q}{Query terms, defaults to '*:*', or everything.}
-
-\item{start}{[number] The offset into the list of groups.}
-
-\item{rows}{[number] The number of groups to return. Defaults to 10.}
-
-\item{sort}{How to sort the groups relative to each other. For example, 
-sort=popularity desc will cause the groups to be sorted according to the highest 
-popularity doc in each group. Defaults to "score desc".}
-
-\item{fq}{Filter query, this does not affect the search, only what gets returned}
-
-\item{fl}{Fields to return}
-
-\item{wt}{(character) Data type returned, defaults to 'json'. One of json or xml. If json, 
-uses \code{\link[jsonlite]{fromJSON}} to parse. If xml, uses \code{\link[XML]{xmlParse}} to 
-parse. csv is only supported in \code{\link{solr_search}} and \code{\link{solr_all}}.}
-
-\item{key}{API key, if needed.}
-
-\item{group.field}{[fieldname] Group based on the unique values of a field. The 
-field must currently be single-valued and must be either indexed, or be another 
-field type that has a value source and works in a function query - such as 
-ExternalFileField. Note: for Solr 3.x versions the field must by a string like 
-field such as StrField or TextField, otherwise a http status 400 is returned.}
-
-\item{group.limit}{[number] The number of results (documents) to return for each 
-group. Defaults to 1.}
-
-\item{group.offset}{[number] The offset into the document list of each group.}
-
-\item{group.sort}{How to sort documents within a single group. Defaults 
-to the same value as the sort parameter.}
+\item{conn}{A solrium connection object, see \link{SolrClient}}
 
-\item{group.main}{(logical) If true, the result of the last field grouping command 
-is used as the main result list in the response, using group.format=simple}
-
-\item{group.ngroups}{(logical) If true, includes the number of groups that have 
-matched the query. Default is false. <!> Solr4.1 WARNING: If this parameter is set 
-to true on a sharded environment, all the documents that belong to the same group 
-have to be located in the same shard, otherwise the count will be incorrect. If you 
-are using SolrCloud, consider using "custom hashing"}
-
-\item{group.cache.percent}{[0-100] If > 0 enables grouping cache. Grouping is executed 
-actual two searches. This option caches the second search. A value of 0 disables 
-grouping caching. Default is 0. Tests have shown that this cache only improves search 
-time with boolean queries, wildcard queries and fuzzy queries. For simple queries like 
-a term query or a match all query this cache has a negative impact on performance}
-
-\item{group.query}{[query] Return a single group of documents that also match the 
-given query.}
+\item{name}{Name of a collection or core. Or leave as \code{NULL} if not needed.}
 
-\item{group.format}{One of grouped or simple. If simple, the grouped documents are 
-presented in a single flat list. The start and rows parameters refer to numbers of 
-documents instead of numbers of groups.}
+\item{params}{(list) a named list of parameters, results in a GET reqeust
+as long as no body parameters given}
 
-\item{group.func}{[function query] Group based on the unique values of a function 
-query. <!> Solr4.0 This parameter only is supported on 4.0}
+\item{body}{(list) a named list of parameters, if given a POST request
+will be performed}
 
-\item{callopts}{Call options passed on to httr::GET}
+\item{callopts}{Call options passed on to [crul::HttpClient]}
 
 \item{raw}{(logical) If TRUE, returns raw data in format specified by wt param}
 
 \item{parsetype}{(character) One of 'list' or 'df'}
 
-\item{concat}{(character) Character to concatenate elements of longer than length 1. 
+\item{concat}{(character) Character to concatenate elements of longer than length 1.
 Note that this only works reliably when data format is json (wt='json'). The parsing
 is more complicated in XML format, but you can do that on your own.}
 
-\item{...}{Further args.}
+\item{...}{Further args to be combined into query}
 }
 \value{
 XML, JSON, a list, or data.frame
@@ -91,69 +36,120 @@ XML, JSON, a list, or data.frame
 \description{
 Returns only group items
 }
+\section{Group parameters}{
+
+\itemize{
+ \item q Query terms, defaults to '*:*', or everything.
+ \item fq Filter query, this does not affect the search, only what gets returned
+ \item fl Fields to return
+ \item wt (character) Data type returned, defaults to 'json'. One of json or xml. If json,
+uses \code{\link[jsonlite]{fromJSON}} to parse. If xml, uses \code{\link[XML]{xmlParse}} to
+parse. csv is only supported in \code{\link{solr_search}} and \code{\link{solr_all}}.
+ \item key API key, if needed.
+ \item group.field [fieldname] Group based on the unique values of a field. The
+field must currently be single-valued and must be either indexed, or be another
+field type that has a value source and works in a function query - such as
+ExternalFileField. Note: for Solr 3.x versions the field must by a string like
+field such as StrField or TextField, otherwise a http status 400 is returned.
+ \item group.func [function query] Group based on the unique values of a function
+query. <!> Solr4.0 This parameter only is supported on 4.0
+ \item group.query [query] Return a single group of documents that also match the
+given query.
+ \item rows [number] The number of groups to return. Defaults to 10.
+ \item start [number] The offset into the list of groups.
+ \item group.limit [number] The number of results (documents) to return for each
+group. Defaults to 1.
+ \item group.offset [number] The offset into the document list of each group.
+ \item sort How to sort the groups relative to each other. For example,
+sort=popularity desc will cause the groups to be sorted according to the highest
+popularity doc in each group. Defaults to "score desc".
+ \item group.sort How to sort documents within a single group. Defaults
+to the same value as the sort parameter.
+ \item group.format One of grouped or simple. If simple, the grouped documents are
+presented in a single flat list. The start and rows parameters refer to numbers of
+documents instead of numbers of groups.
+ \item group.main (logical) If true, the result of the last field grouping command
+is used as the main result list in the response, using group.format=simple
+ \item group.ngroups (logical) If true, includes the number of groups that have
+matched the query. Default is false. <!> Solr4.1 WARNING: If this parameter is set
+to true on a sharded environment, all the documents that belong to the same group
+have to be located in the same shard, otherwise the count will be incorrect. If you
+are using SolrCloud, consider using "custom hashing"
+ \item group.cache.percent [0-100] If > 0 enables grouping cache. Grouping is executed
+actual two searches. This option caches the second search. A value of 0 disables
+grouping caching. Default is 0. Tests have shown that this cache only improves search
+time with boolean queries, wildcard queries and fuzzy queries. For simple queries like
+a term query or a match all query this cache has a negative impact on performance
+}
+}
+
 \examples{
 \dontrun{
 # connect
-solr_connect('http://api.plos.org/search')
+(cli <- SolrClient$new())
+
+# by default we do a GET request
+cli$group("gettingstarted",
+  params = list(q='*:*', group.field='compName_s'))
+# OR
+solr_group(cli, "gettingstarted",
+  params = list(q='*:*', group.field='compName_s'))
+
+# connect
+(cli <- SolrClient$new(host = "api.plos.org", path = "search", port = NULL))
 
 # Basic group query
-solr_group(q='ecology', group.field='journal', group.limit=3,
-  fl=c('id','score'))
-solr_group(q='ecology', group.field='journal', group.limit=3,
-  fl='article_type')
+solr_group(cli, params = list(q='ecology', group.field='journal',
+  group.limit=3, fl=c('id','score')))
+solr_group(cli, params = list(q='ecology', group.field='journal',
+  group.limit=3, fl='article_type'))
 
 # Different ways to sort (notice diff btw sort of group.sort)
 # note that you can only sort on a field if you return that field
-solr_group(q='ecology', group.field='journal', group.limit=3,
-   fl=c('id','score'))
-solr_group(q='ecology', group.field='journal', group.limit=3,
-   fl=c('id','score','alm_twitterCount'), group.sort='alm_twitterCount desc')
-solr_group(q='ecology', group.field='journal', group.limit=3,
+solr_group(cli, params = list(q='ecology', group.field='journal', group.limit=3,
+   fl=c('id','score')))
+solr_group(cli, params = list(q='ecology', group.field='journal', group.limit=3,
+   fl=c('id','score','alm_twitterCount'), group.sort='alm_twitterCount desc'))
+solr_group(cli, params = list(q='ecology', group.field='journal', group.limit=3,
    fl=c('id','score','alm_twitterCount'), sort='score asc',
-   group.sort='alm_twitterCount desc')
+   group.sort='alm_twitterCount desc'))
 
 # Two group.field values
-out <- solr_group(q='ecology', group.field=c('journal','article_type'),
-  group.limit=3,
-  fl='id', raw=TRUE)
+out <- solr_group(cli, params = list(q='ecology', group.field=c('journal','article_type'),
+  group.limit=3, fl='id'), raw=TRUE)
 solr_parse(out)
 solr_parse(out, 'df')
 
 # Get two groups, one with alm_twitterCount of 0-10, and another group
 # with 10 to infinity
-solr_group(q='ecology', group.limit=3, fl=c('id','alm_twitterCount'),
- group.query=c('alm_twitterCount:[0 TO 10]','alm_twitterCount:[10 TO *]'))
+solr_group(cli, params = list(q='ecology', group.limit=3, fl=c('id','alm_twitterCount'),
+ group.query=c('alm_twitterCount:[0 TO 10]','alm_twitterCount:[10 TO *]')))
 
 # Use of group.format and group.simple.
 ## The raw data structure of these two calls are slightly different, but
 ## the parsing inside the function outputs the same results. You can
 ## of course set raw=TRUE to get back what the data actually look like
-solr_group(q='ecology', group.field='journal', group.limit=3,
-  fl=c('id','score'), group.format='simple')
-solr_group(q='ecology', group.field='journal', group.limit=3,
-  fl=c('id','score'), group.format='grouped')
-solr_group(q='ecology', group.field='journal', group.limit=3,
-  fl=c('id','score'), group.format='grouped', group.main='true')
+solr_group(cli, params = list(q='ecology', group.field='journal', group.limit=3,
+  fl=c('id','score'), group.format='simple'))
+solr_group(cli, params = list(q='ecology', group.field='journal', group.limit=3,
+  fl=c('id','score'), group.format='grouped'))
+solr_group(cli, params = list(q='ecology', group.field='journal', group.limit=3,
+  fl=c('id','score'), group.format='grouped', group.main='true'))
 
 # xml back
-solr_group(q='ecology', group.field='journal', group.limit=3,
-  fl=c('id','score'), wt = "xml")
-solr_group(q='ecology', group.field='journal', group.limit=3,
-  fl=c('id','score'), wt = "xml", parsetype = "list")
-res <- solr_group(q='ecology', group.field='journal', group.limit=3,
-  fl=c('id','score'), wt = "xml", raw = TRUE)
+solr_group(cli, params = list(q='ecology', group.field='journal', group.limit=3,
+  fl=c('id','score'), wt = "xml"))
+solr_group(cli, params = list(q='ecology', group.field='journal', group.limit=3,
+  fl=c('id','score'), wt = "xml"), parsetype = "list")
+res <- solr_group(cli, params = list(q='ecology', group.field='journal', group.limit=3,
+  fl=c('id','score'), wt = "xml"), raw = TRUE)
 library("xml2")
 xml2::read_xml(unclass(res))
 
-solr_group(q='ecology', group.field='journal', group.limit=3,
-  fl='article_type', wt = "xml")
-solr_group(q='ecology', group.field='journal', group.limit=3,
-  fl='article_type', wt = "xml", parsetype = "list")
-
-# examples with Dryad's Solr instance
-solr_connect("http://datadryad.org/solr/search/select")
-solr_group(q='ecology', group.field='journal', group.limit=3,
-  fl='article_type')
+solr_group(cli, params = list(q='ecology', group.field='journal', group.limit=3,
+  fl='article_type', wt = "xml"))
+solr_group(cli, params = list(q='ecology', group.field='journal', group.limit=3,
+  fl='article_type', wt = "xml"), parsetype = "list")
 }
 }
 \references{
@@ -161,6 +157,5 @@ See \url{http://wiki.apache.org/solr/FieldCollapsing} for more
 information.
 }
 \seealso{
-\code{\link{solr_highlight}}, \code{\link{solr_facet}}
+\code{\link[=solr_highlight]{solr_highlight()}}, \code{\link[=solr_facet]{solr_facet()}}
 }
-
diff --git a/man/solr_highlight.Rd b/man/solr_highlight.Rd
index df696f3..3d019b4 100644
--- a/man/solr_highlight.Rd
+++ b/man/solr_highlight.Rd
@@ -4,180 +4,28 @@
 \alias{solr_highlight}
 \title{Highlighting search}
 \usage{
-solr_highlight(name = NULL, q, hl.fl = NULL, hl.snippets = NULL,
-  hl.fragsize = NULL, hl.q = NULL, hl.mergeContiguous = NULL,
-  hl.requireFieldMatch = NULL, hl.maxAnalyzedChars = NULL,
-  hl.alternateField = NULL, hl.maxAlternateFieldLength = NULL,
-  hl.preserveMulti = NULL, hl.maxMultiValuedToExamine = NULL,
-  hl.maxMultiValuedToMatch = NULL, hl.formatter = NULL,
-  hl.simple.pre = NULL, hl.simple.post = NULL, hl.fragmenter = NULL,
-  hl.fragListBuilder = NULL, hl.fragmentsBuilder = NULL,
-  hl.boundaryScanner = NULL, hl.bs.maxScan = NULL, hl.bs.chars = NULL,
-  hl.bs.type = NULL, hl.bs.language = NULL, hl.bs.country = NULL,
-  hl.useFastVectorHighlighter = NULL, hl.usePhraseHighlighter = NULL,
-  hl.highlightMultiTerm = NULL, hl.regex.slop = NULL,
-  hl.regex.pattern = NULL, hl.regex.maxAnalyzedChars = NULL, start = 0,
-  rows = NULL, wt = "json", raw = FALSE, key = NULL,
-  callopts = list(), fl = "DOES_NOT_EXIST", fq = NULL,
-  parsetype = "list")
+solr_highlight(conn, name = NULL, params = NULL, body = NULL,
+  callopts = list(), raw = FALSE, parsetype = "df", ...)
 }
 \arguments{
-\item{name}{Name of a collection or core. Or leave as \code{NULL} if not needed.}
-
-\item{q}{Query terms. See examples.}
-
-\item{hl.fl}{A comma-separated list of fields for which to generate highlighted snippets. 
-If left blank, the fields highlighted for the LuceneQParser are the defaultSearchField 
-(or the df param if used) and for the DisMax parser the qf fields are used. A '*' can 
-be used to match field globs, e.g. 'text_*' or even '*' to highlight on all fields where 
-highlighting is possible. When using '*', consider adding hl.requireFieldMatch=TRUE.}
-
-\item{hl.snippets}{Max no. of highlighted snippets to generate per field. Note: 
-it is possible for any number of snippets from zero to this value to be generated. 
-This parameter accepts per-field overrides. Default: 1.}
-
-\item{hl.fragsize}{The size, in characters, of the snippets (aka fragments) created by 
-the highlighter. In the original Highlighter, "0" indicates that the whole field value 
-should be used with no fragmenting. See 
-\url{http://wiki.apache.org/solr/HighlightingParameters} for more info.}
-
-\item{hl.q}{Set a query request to be highlighted. It overrides q parameter for 
-highlighting. Solr query syntax is acceptable for this parameter.}
-
-\item{hl.mergeContiguous}{Collapse contiguous fragments into a single fragment. "true" 
-indicates contiguous fragments will be collapsed into single fragment. This parameter 
-accepts per-field overrides. This parameter makes sense for the original Highlighter 
-only. Default: FALSE.}
-
-\item{hl.requireFieldMatch}{If TRUE, then a field will only be highlighted if the 
-query matched in this particular field (normally, terms are highlighted in all 
-requested fields regardless of which field matched the query). This only takes effect 
-if "hl.usePhraseHighlighter" is TRUE. Default: FALSE.}
-
-\item{hl.maxAnalyzedChars}{How many characters into a document to look for suitable 
-snippets. This parameter makes sense for the original Highlighter only. Default: 51200. 
-You can assign a large value to this parameter and use hl.fragsize=0 to return 
-highlighting in large fields that have size greater than 51200 characters.}
-
-\item{hl.alternateField}{If a snippet cannot be generated (due to no terms matching), 
-you can specify a field to use as the fallback. This parameter accepts per-field overrides.}
-
-\item{hl.maxAlternateFieldLength}{If hl.alternateField is specified, this parameter 
-specifies the maximum number of characters of the field to return. Any value less than or 
-equal to 0 means unlimited. Default: unlimited.}
-
-\item{hl.preserveMulti}{Preserve order of values in a multiValued list. Default: FALSE.}
-
-\item{hl.maxMultiValuedToExamine}{When highlighting a multiValued field, stop examining 
-the individual entries after looking at this many of them. Will potentially return 0 
-snippets if this limit is reached before any snippets are found. If maxMultiValuedToMatch 
-is also specified, whichever limit is hit first will terminate looking for more. 
-Default: Integer.MAX_VALUE}
-
-\item{hl.maxMultiValuedToMatch}{When highlighting a multiValued field, stop examining 
-the individual entries after looking at this many matches are found. If 
-maxMultiValuedToExamine is also specified, whichever limit is hit first will terminate 
-looking for more. Default: Integer.MAX_VALUE}
-
-\item{hl.formatter}{Specify a formatter for the highlight output. Currently the only 
-legal value is "simple", which surrounds a highlighted term with a customizable pre- and 
-post text snippet. This parameter accepts per-field overrides. This parameter makes 
-sense for the original Highlighter only.}
-
-\item{hl.simple.pre}{The text which appears before and after a highlighted term when using 
-the simple formatter. This parameter accepts per-field overrides. The default values are 
-"<em>" and "</em>" This parameter makes sense for the original Highlighter only. Use 
-hl.tag.pre and hl.tag.post for FastVectorHighlighter (see example under hl.fragmentsBuilder)}
-
-\item{hl.simple.post}{The text which appears before and after a highlighted term when using 
-the simple formatter. This parameter accepts per-field overrides. The default values are 
-"<em>" and "</em>" This parameter makes sense for the original Highlighter only. Use 
-hl.tag.pre and hl.tag.post for FastVectorHighlighter (see example under hl.fragmentsBuilder)}
-
-\item{hl.fragmenter}{Specify a text snippet generator for highlighted text. The standard 
-fragmenter is gap (which is so called because it creates fixed-sized fragments with gaps 
-for multi-valued fields). Another option is regex, which tries to create fragments that 
-"look like" a certain regular expression. This parameter accepts per-field overrides. 
-Default: "gap"}
-
-\item{hl.fragListBuilder}{Specify the name of SolrFragListBuilder.  This parameter 
-makes sense for FastVectorHighlighter only. To create a fragSize=0 with the 
-FastVectorHighlighter, use the SingleFragListBuilder. This field supports per-field 
-overrides.}
-
-\item{hl.fragmentsBuilder}{Specify the name of SolrFragmentsBuilder. This parameter makes 
-sense for FastVectorHighlighter only.}
-
-\item{hl.boundaryScanner}{Configures how the boundaries of fragments are determined. By 
-default, boundaries will split at the character level, creating a fragment such as "uick 
-brown fox jumps over the la". Valid entries are breakIterator or simple, with breakIterator 
-being the most commonly used. This parameter makes sense for FastVectorHighlighter only.}
+\item{conn}{A solrium connection object, see \link{SolrClient}}
 
-\item{hl.bs.maxScan}{Specify the length of characters to be scanned by SimpleBoundaryScanner. 
-Default: 10.  This parameter makes sense for FastVectorHighlighter only.}
-
-\item{hl.bs.chars}{Specify the boundary characters, used by SimpleBoundaryScanner. 
-This parameter makes sense for FastVectorHighlighter only.}
-
-\item{hl.bs.type}{Specify one of CHARACTER, WORD, SENTENCE and LINE, used by 
-BreakIteratorBoundaryScanner. Default: WORD. This parameter makes sense for 
-FastVectorHighlighter only.}
-
-\item{hl.bs.language}{Specify the language for Locale that is used by 
-BreakIteratorBoundaryScanner. This parameter makes sense for FastVectorHighlighter only. 
-Valid entries take the form of ISO 639-1 strings.}
-
-\item{hl.bs.country}{Specify the country for Locale that is used by 
-BreakIteratorBoundaryScanner. This parameter makes sense for FastVectorHighlighter only. 
-Valid entries take the form of ISO 3166-1 alpha-2 strings.}
-
-\item{hl.useFastVectorHighlighter}{Use FastVectorHighlighter. FastVectorHighlighter 
-requires the field is termVectors=on, termPositions=on and termOffsets=on. This 
-parameter accepts per-field overrides. Default: FALSE}
-
-\item{hl.usePhraseHighlighter}{Use SpanScorer to highlight phrase terms only when 
-they appear within the query phrase in the document. Default: TRUE.}
-
-\item{hl.highlightMultiTerm}{If the SpanScorer is also being used, enables highlighting 
-for range/wildcard/fuzzy/prefix queries. Default: FALSE. This parameter makes sense 
-for the original Highlighter only.}
-
-\item{hl.regex.slop}{Factor by which the regex fragmenter can stray from the ideal 
-fragment size (given by hl.fragsize) to accomodate the regular expression. For 
-instance, a slop of 0.2 with fragsize of 100 should yield fragments between 80 
-and 120 characters in length. It is usually good to provide a slightly smaller 
-fragsize when using the regex fragmenter. Default: .6. This parameter makes sense 
-for the original Highlighter only.}
-
-\item{hl.regex.pattern}{The regular expression for fragmenting. This could be 
-used to extract sentences (see example solrconfig.xml) This parameter makes sense 
-for the original Highlighter only.}
-
-\item{hl.regex.maxAnalyzedChars}{Only analyze this many characters from a field 
-when using the regex fragmenter (after which, the fragmenter produces fixed-sized 
-fragments). Applying a complicated regex to a huge field is expensive. 
-Default: 10000. This parameter makes sense for the original Highlighter only.}
+\item{name}{Name of a collection or core. Or leave as \code{NULL} if not needed.}
 
-\item{start}{Record to start at, default to beginning.}
+\item{params}{(list) a named list of parameters, results in a GET reqeust
+as long as no body parameters given}
 
-\item{rows}{Number of records to return.}
+\item{body}{(list) a named list of parameters, if given a POST request
+will be performed}
 
-\item{wt}{(character) Data type returned, defaults to 'json'. One of json or xml. If json, 
-uses \code{\link[jsonlite]{fromJSON}} to parse. If xml, uses \code{\link[XML]{xmlParse}} to 
-parse. csv is only supported in \code{\link{solr_search}} and \code{\link{solr_all}}.}
+\item{callopts}{Call options passed on to [crul::HttpClient]}
 
 \item{raw}{(logical) If TRUE (default) raw json or xml returned. If FALSE,
 parsed data returned.}
 
-\item{key}{API key, if needed.}
-
-\item{callopts}{Call options passed on to httr::GET}
-
-\item{fl}{Fields to return}
-
-\item{fq}{Filter query, this does not affect the search, only what gets returned}
-
 \item{parsetype}{One of list of df (data.frame)}
+
+\item{...}{Further args to be combined into query}
 }
 \value{
 XML, JSON, a list, or data.frame
@@ -185,30 +33,145 @@ XML, JSON, a list, or data.frame
 \description{
 Returns only highlight items
 }
-\details{
-The \code{verbose} parameter dropped. See \code{\link{solr_connect}}, which
-can be used to set verbose status.
+\section{Facet parameters}{
+
+\itemize{
+ \item q Query terms. See examples.
+ \item hl.fl A comma-separated list of fields for which to generate highlighted snippets.
+If left blank, the fields highlighted for the LuceneQParser are the defaultSearchField
+(or the df param if used) and for the DisMax parser the qf fields are used. A '*' can
+be used to match field globs, e.g. 'text_*' or even '*' to highlight on all fields where
+highlighting is possible. When using '*', consider adding hl.requireFieldMatch=TRUE.
+ \item hl.snippets Max no. of highlighted snippets to generate per field. Note:
+it is possible for any number of snippets from zero to this value to be generated.
+This parameter accepts per-field overrides. Default: 1.
+ \item hl.fragsize The size, in characters, of the snippets (aka fragments) created by
+the highlighter. In the original Highlighter, "0" indicates that the whole field value
+should be used with no fragmenting. See
+\url{http://wiki.apache.org/solr/HighlightingParameters} for more info.
+ \item hl.q Set a query request to be highlighted. It overrides q parameter for
+highlighting. Solr query syntax is acceptable for this parameter.
+ \item hl.mergeContiguous Collapse contiguous fragments into a single fragment. "true"
+indicates contiguous fragments will be collapsed into single fragment. This parameter
+accepts per-field overrides. This parameter makes sense for the original Highlighter
+only. Default: FALSE.
+ \item hl.requireFieldMatch If TRUE, then a field will only be highlighted if the
+query matched in this particular field (normally, terms are highlighted in all
+requested fields regardless of which field matched the query). This only takes effect
+if "hl.usePhraseHighlighter" is TRUE. Default: FALSE.
+ \item hl.maxAnalyzedChars How many characters into a document to look for suitable
+snippets. This parameter makes sense for the original Highlighter only. Default: 51200.
+You can assign a large value to this parameter and use hl.fragsize=0 to return
+highlighting in large fields that have size greater than 51200 characters.
+ \item hl.alternateField If a snippet cannot be generated (due to no terms matching),
+you can specify a field to use as the fallback. This parameter accepts per-field overrides.
+ \item hl.maxAlternateFieldLength If hl.alternateField is specified, this parameter
+specifies the maximum number of characters of the field to return. Any value less than or
+equal to 0 means unlimited. Default: unlimited.
+ \item hl.preserveMulti Preserve order of values in a multiValued list. Default: FALSE.
+ \item hl.maxMultiValuedToExamine When highlighting a multiValued field, stop examining
+the individual entries after looking at this many of them. Will potentially return 0
+snippets if this limit is reached before any snippets are found. If maxMultiValuedToMatch
+is also specified, whichever limit is hit first will terminate looking for more.
+Default: Integer.MAX_VALUE
+ \item hl.maxMultiValuedToMatch When highlighting a multiValued field, stop examining
+the individual entries after looking at this many matches are found. If
+maxMultiValuedToExamine is also specified, whichever limit is hit first will terminate
+looking for more. Default: Integer.MAX_VALUE
+ \item hl.formatter Specify a formatter for the highlight output. Currently the only
+legal value is "simple", which surrounds a highlighted term with a customizable pre- and
+post text snippet. This parameter accepts per-field overrides. This parameter makes
+sense for the original Highlighter only.
+ \item hl.simple.pre The text which appears before and after a highlighted term when using
+the simple formatter. This parameter accepts per-field overrides. The default values are
+"<em>" and "</em>" This parameter makes sense for the original Highlighter only. Use
+hl.tag.pre and hl.tag.post for FastVectorHighlighter (see example under hl.fragmentsBuilder)
+ \item hl.simple.post The text which appears before and after a highlighted term when using
+the simple formatter. This parameter accepts per-field overrides. The default values are
+"<em>" and "</em>" This parameter makes sense for the original Highlighter only. Use
+hl.tag.pre and hl.tag.post for FastVectorHighlighter (see example under hl.fragmentsBuilder)
+ \item hl.fragmenter Specify a text snippet generator for highlighted text. The standard
+fragmenter is gap (which is so called because it creates fixed-sized fragments with gaps
+for multi-valued fields). Another option is regex, which tries to create fragments that
+"look like" a certain regular expression. This parameter accepts per-field overrides.
+Default: "gap"
+ \item hl.fragListBuilder Specify the name of SolrFragListBuilder.  This parameter
+makes sense for FastVectorHighlighter only. To create a fragSize=0 with the
+FastVectorHighlighter, use the SingleFragListBuilder. This field supports per-field
+overrides.
+ \item hl.fragmentsBuilder Specify the name of SolrFragmentsBuilder. This parameter makes
+sense for FastVectorHighlighter only.
+ \item hl.boundaryScanner Configures how the boundaries of fragments are determined. By
+default, boundaries will split at the character level, creating a fragment such as "uick
+brown fox jumps over the la". Valid entries are breakIterator or simple, with breakIterator
+being the most commonly used. This parameter makes sense for FastVectorHighlighter only.
+ \item hl.bs.maxScan Specify the length of characters to be scanned by SimpleBoundaryScanner.
+Default: 10.  This parameter makes sense for FastVectorHighlighter only.
+ \item hl.bs.chars Specify the boundary characters, used by SimpleBoundaryScanner.
+This parameter makes sense for FastVectorHighlighter only.
+ \item hl.bs.type Specify one of CHARACTER, WORD, SENTENCE and LINE, used by
+BreakIteratorBoundaryScanner. Default: WORD. This parameter makes sense for
+FastVectorHighlighter only.
+ \item hl.bs.language Specify the language for Locale that is used by
+BreakIteratorBoundaryScanner. This parameter makes sense for FastVectorHighlighter only.
+Valid entries take the form of ISO 639-1 strings.
+ \item hl.bs.country Specify the country for Locale that is used by
+BreakIteratorBoundaryScanner. This parameter makes sense for FastVectorHighlighter only.
+Valid entries take the form of ISO 3166-1 alpha-2 strings.
+ \item hl.useFastVectorHighlighter Use FastVectorHighlighter. FastVectorHighlighter
+requires the field is termVectors=on, termPositions=on and termOffsets=on. This
+parameter accepts per-field overrides. Default: FALSE
+ \item hl.usePhraseHighlighter Use SpanScorer to highlight phrase terms only when
+they appear within the query phrase in the document. Default: TRUE.
+ \item hl.highlightMultiTerm If the SpanScorer is also being used, enables highlighting
+for range/wildcard/fuzzy/prefix queries. Default: FALSE. This parameter makes sense
+for the original Highlighter only.
+ \item hl.regex.slop Factor by which the regex fragmenter can stray from the ideal
+fragment size (given by hl.fragsize) to accomodate the regular expression. For
+instance, a slop of 0.2 with fragsize of 100 should yield fragments between 80
+and 120 characters in length. It is usually good to provide a slightly smaller
+fragsize when using the regex fragmenter. Default: .6. This parameter makes sense
+for the original Highlighter only.
+ \item hl.regex.pattern The regular expression for fragmenting. This could be
+used to extract sentences (see example solrconfig.xml) This parameter makes sense
+for the original Highlighter only.
+ \item hl.regex.maxAnalyzedChars Only analyze this many characters from a field
+when using the regex fragmenter (after which, the fragmenter produces fixed-sized
+fragments). Applying a complicated regex to a huge field is expensive.
+Default: 10000. This parameter makes sense for the original Highlighter only.
+ \item start Record to start at, default to beginning.
+ \item rows Number of records to return.
+ \item wt (character) Data type returned, defaults to 'json'. One of json or xml. If json,
+uses \code{\link[jsonlite]{fromJSON}} to parse. If xml, uses \code{\link[XML]{xmlParse}} to
+parse. csv is only supported in \code{\link{solr_search}} and \code{\link{solr_all}}.
+ \item fl Fields to return
+ \item fq Filter query, this does not affect the search, only what gets returned
 }
+}
+
 \examples{
 \dontrun{
 # connect
-solr_connect('http://api.plos.org/search')
+(conn <- SolrClient$new(host = "api.plos.org", path = "search", port = NULL))
 
 # highlight search
-solr_highlight(q='alcohol', hl.fl = 'abstract', rows=10)
-solr_highlight(q='alcohol', hl.fl = c('abstract','title'), rows=3)
+solr_highlight(conn, params = list(q='alcohol', hl.fl = 'abstract', rows=10),
+  parsetype = "list")
+solr_highlight(conn, params = list(q='alcohol', hl.fl = c('abstract','title'),
+  rows=3), parsetype = "list")
 
 # Raw data back
 ## json
-solr_highlight(q='alcohol', hl.fl = 'abstract', rows=10,
+solr_highlight(conn, params = list(q='alcohol', hl.fl = 'abstract', rows=10),
    raw=TRUE)
 ## xml
-solr_highlight(q='alcohol', hl.fl = 'abstract', rows=10,
-   raw=TRUE, wt='xml')
+solr_highlight(conn, params = list(q='alcohol', hl.fl = 'abstract', rows=10,
+   wt='xml'), raw=TRUE)
 ## parse after getting data back
-out <- solr_highlight(q='alcohol', hl.fl = c('abstract','title'), hl.fragsize=30,
-   rows=10, raw=TRUE, wt='xml')
-solr_parse(out, parsetype='df')
+out <- solr_highlight(conn, params = list(q='theoretical math',
+   hl.fl = c('abstract','title'), hl.fragsize=30, rows=10, wt='xml'),
+   raw=TRUE)
+solr_parse(out, parsetype='list')
 }
 }
 \references{
@@ -216,6 +179,5 @@ See \url{http://wiki.apache.org/solr/HighlightingParameters} for
 more information on highlighting.
 }
 \seealso{
-\code{\link{solr_search}}, \code{\link{solr_facet}}
+\code{\link[=solr_search]{solr_search()}}, \code{\link[=solr_facet]{solr_facet()}}
 }
-
diff --git a/man/solr_mlt.Rd b/man/solr_mlt.Rd
index 9206857..28f6141 100644
--- a/man/solr_mlt.Rd
+++ b/man/solr_mlt.Rd
@@ -4,71 +4,40 @@
 \alias{solr_mlt}
 \title{"more like this" search}
 \usage{
-solr_mlt(name = NULL, q = "*:*", fq = NULL, mlt.count = NULL,
-  mlt.fl = NULL, mlt.mintf = NULL, mlt.mindf = NULL, mlt.minwl = NULL,
-  mlt.maxwl = NULL, mlt.maxqt = NULL, mlt.maxntp = NULL,
-  mlt.boost = NULL, mlt.qf = NULL, fl = NULL, wt = "json", start = 0,
-  rows = NULL, key = NULL, callopts = list(), raw = FALSE,
-  parsetype = "df", concat = ",")
+solr_mlt(conn, name = NULL, params = NULL, body = NULL,
+  callopts = list(), raw = FALSE, parsetype = "df", concat = ",",
+  optimizeMaxRows = TRUE, minOptimizedRows = 50000L, ...)
 }
 \arguments{
-\item{name}{Name of a collection or core. Or leave as \code{NULL} if not needed.}
-
-\item{q}{Query terms, defaults to '*:*', or everything.}
-
-\item{fq}{Filter query, this does not affect the search, only what gets returned}
-
-\item{mlt.count}{The number of similar documents to return for each result. Default is 5.}
-
-\item{mlt.fl}{The fields to use for similarity. NOTE: if possible these should have a stored 
-TermVector DEFAULT_FIELD_NAMES = new String[] {"contents"}}
-
-\item{mlt.mintf}{Minimum Term Frequency - the frequency below which terms will be ignored in 
-the source doc. DEFAULT_MIN_TERM_FREQ = 2}
-
-\item{mlt.mindf}{Minimum Document Frequency - the frequency at which words will be ignored which 
-do not occur in at least this many docs. DEFAULT_MIN_DOC_FREQ = 5}
-
-\item{mlt.minwl}{minimum word length below which words will be ignored. 
-DEFAULT_MIN_WORD_LENGTH = 0}
-
-\item{mlt.maxwl}{maximum word length above which words will be ignored. 
-DEFAULT_MAX_WORD_LENGTH = 0}
-
-\item{mlt.maxqt}{maximum number of query terms that will be included in any generated query. 
-DEFAULT_MAX_QUERY_TERMS = 25}
-
-\item{mlt.maxntp}{maximum number of tokens to parse in each example doc field that is not stored 
-with TermVector support. DEFAULT_MAX_NUM_TOKENS_PARSED = 5000}
+\item{conn}{A solrium connection object, see \link{SolrClient}}
 
-\item{mlt.boost}{[true/false] set if the query will be boosted by the interesting term relevance. 
-DEFAULT_BOOST = false}
-
-\item{mlt.qf}{Query fields and their boosts using the same format as that used in 
-DisMaxQParserPlugin. These fields must also be specified in mlt.fl.}
-
-\item{fl}{Fields to return. We force 'id' to be returned so that there is a unique identifier 
-with each record.}
-
-\item{wt}{(character) Data type returned, defaults to 'json'. One of json or xml. If json, 
-uses \code{\link[jsonlite]{fromJSON}} to parse. If xml, uses \code{\link[XML]{xmlParse}} to 
-parse. csv is only supported in \code{\link{solr_search}} and \code{\link{solr_all}}.}
-
-\item{start}{Record to start at, default to beginning.}
+\item{name}{Name of a collection or core. Or leave as \code{NULL} if not needed.}
 
-\item{rows}{Number of records to return. Defaults to 10.}
+\item{params}{(list) a named list of parameters, results in a GET reqeust
+as long as no body parameters given}
 
-\item{key}{API key, if needed.}
+\item{body}{(list) a named list of parameters, if given a POST request
+will be performed}
 
-\item{callopts}{Call options passed on to httr::GET}
+\item{callopts}{Call options passed on to [crul::HttpClient]}
 
 \item{raw}{(logical) If TRUE, returns raw data in format specified by wt param}
 
 \item{parsetype}{(character) One of 'list' or 'df'}
 
-\item{concat}{(character) Character to concatenate elements of longer than length 1. 
+\item{concat}{(character) Character to concatenate elements of longer than length 1.
 Note that this only works reliably when data format is json (wt='json'). The parsing
 is more complicated in XML format, but you can do that on your own.}
+
+\item{optimizeMaxRows}{(logical) If \code{TRUE}, then rows parameter will be
+adjusted to the number of returned results by the same constraints.
+It will only be applied if rows parameter is higher
+than \code{minOptimizedRows}. Default: \code{TRUE}}
+
+\item{minOptimizedRows}{(numeric) used by \code{optimizedMaxRows} parameter,
+the minimum optimized rows. Default: 50000}
+
+\item{...}{Further args to be combined into query}
 }
 \value{
 XML, JSON, a list, or data.frame
@@ -76,32 +45,62 @@ XML, JSON, a list, or data.frame
 \description{
 Returns only more like this items
 }
-\details{
-The \code{verbose} parameter dropped. See \code{\link{solr_connect}}, which
-can be used to set verbose status.
+\section{More like this parameters}{
+
+\itemize{
+ \item q Query terms, defaults to '*:*', or everything.
+ \item fq Filter query, this does not affect the search, only what gets returned
+ \item mlt.count The number of similar documents to return for each result. Default is 5.
+ \item mlt.fl The fields to use for similarity. NOTE: if possible these should have a stored
+TermVector DEFAULT_FIELD_NAMES = new String[] {"contents"}
+ \item mlt.mintf Minimum Term Frequency - the frequency below which terms will be ignored in
+the source doc. DEFAULT_MIN_TERM_FREQ = 2
+ \item mlt.mindf Minimum Document Frequency - the frequency at which words will be ignored which
+do not occur in at least this many docs. DEFAULT_MIN_DOC_FREQ = 5
+ \item mlt.minwl minimum word length below which words will be ignored.
+DEFAULT_MIN_WORD_LENGTH = 0
+ \item mlt.maxwl maximum word length above which words will be ignored.
+DEFAULT_MAX_WORD_LENGTH = 0
+ \item mlt.maxqt maximum number of query terms that will be included in any generated query.
+DEFAULT_MAX_QUERY_TERMS = 25
+ \item mlt.maxntp maximum number of tokens to parse in each example doc field that is not stored
+with TermVector support. DEFAULT_MAX_NUM_TOKENS_PARSED = 5000
+ \item mlt.boost [true/false] set if the query will be boosted by the interesting term relevance.
+DEFAULT_BOOST = false
+ \item mlt.qf Query fields and their boosts using the same format as that used in
+DisMaxQParserPlugin. These fields must also be specified in mlt.fl.
+ \item fl Fields to return. We force 'id' to be returned so that there is a unique identifier
+with each record.
+ \item wt (character) Data type returned, defaults to 'json'. One of json or xml. If json,
+uses \code{\link[jsonlite]{fromJSON}} to parse. If xml, uses \code{\link[XML]{xmlParse}} to
+parse. csv is only supported in \code{\link{solr_search}} and \code{\link{solr_all}}.
+ \item start Record to start at, default to beginning.
+ \item rows Number of records to return. Defaults to 10.
+ \item key API key, if needed.
+}
 }
+
 \examples{
 \dontrun{
 # connect
-solr_connect('http://api.plos.org/search')
+(conn <- SolrClient$new(host = "api.plos.org", path = "search", port = NULL))
 
 # more like this search
-solr_mlt(q='*:*', mlt.count=2, mlt.fl='abstract', fl='score',
-  fq="doc_type:full")
-solr_mlt(q='*:*', rows=2, mlt.fl='title', mlt.mindf=1, mlt.mintf=1,
-  fl='alm_twitterCount')
-solr_mlt(q='title:"ecology" AND body:"cell"', mlt.fl='title', mlt.mindf=1,
-  mlt.mintf=1, fl='counter_total_all', rows=5)
-solr_mlt(q='ecology', mlt.fl='abstract', fl='title', rows=5)
-solr_mlt(q='ecology', mlt.fl='abstract', fl=c('score','eissn'),
-  rows=5)
-solr_mlt(q='ecology', mlt.fl='abstract', fl=c('score','eissn'),
-  rows=5, wt = "xml")
+conn$mlt(params = list(q='*:*', mlt.count=2, mlt.fl='abstract', fl='score',
+  fq="doc_type:full"))
+conn$mlt(params = list(q='*:*', rows=2, mlt.fl='title', mlt.mindf=1,
+  mlt.mintf=1, fl='alm_twitterCount'))
+conn$mlt(params = list(q='title:"ecology" AND body:"cell"', mlt.fl='title',
+  mlt.mindf=1, mlt.mintf=1, fl='counter_total_all', rows=5))
+conn$mlt(params = list(q='ecology', mlt.fl='abstract', fl='title', rows=5))
+solr_mlt(conn, params = list(q='ecology', mlt.fl='abstract',
+  fl=c('score','eissn'), rows=5))
+solr_mlt(conn, params = list(q='ecology', mlt.fl='abstract',
+  fl=c('score','eissn'), rows=5, wt = "xml"))
 
 # get raw data, and parse later if needed
-out <- solr_mlt(q='ecology', mlt.fl='abstract', fl='title',
- rows=2, raw=TRUE)
-library('jsonlite')
+out <- solr_mlt(conn, params=list(q='ecology', mlt.fl='abstract', fl='title',
+ rows=2), raw=TRUE)
 solr_parse(out, "df")
 }
 }
@@ -109,4 +108,3 @@ solr_parse(out, "df")
 See \url{http://wiki.apache.org/solr/MoreLikeThis} for more
 information.
 }
-
diff --git a/man/solr_optimize.Rd b/man/solr_optimize.Rd
new file mode 100644
index 0000000..9d93f43
--- /dev/null
+++ b/man/solr_optimize.Rd
@@ -0,0 +1,50 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/optimize.R
+\name{solr_optimize}
+\alias{solr_optimize}
+\title{Optimize}
+\usage{
+solr_optimize(conn, name, max_segments = 1, wait_searcher = TRUE,
+  soft_commit = FALSE, wt = "json", raw = FALSE, ...)
+}
+\arguments{
+\item{conn}{A solrium connection object, see \link{SolrClient}}
+
+\item{name}{(character) A collection or core name. Required.}
+
+\item{max_segments}{optimizes down to at most this number of segments.
+Default: 1}
+
+\item{wait_searcher}{block until a new searcher is opened and registered
+as the main query searcher, making the changes visible. Default: \code{TRUE}}
+
+\item{soft_commit}{perform a soft commit - this will refresh the 'view'
+of the index in a more performant manner, but without "on-disk" guarantees.
+Default: \code{FALSE}}
+
+\item{wt}{(character) One of json (default) or xml. If json, uses
+\code{\link[jsonlite:fromJSON]{jsonlite::fromJSON()}} to parse. If xml, uses \code{\link[xml2:read_xml]{xml2::read_xml()}} to
+parse}
+
+\item{raw}{(logical) If \code{TRUE}, returns raw data in format specified by
+\code{wt} param}
+
+\item{...}{curl options passed on to \link[crul:HttpClient]{crul::HttpClient}}
+}
+\description{
+Optimize
+}
+\examples{
+\dontrun{
+(conn <- SolrClient$new())
+
+solr_optimize(conn, "gettingstarted")
+solr_optimize(conn, "gettingstarted", max_segments = 2)
+solr_optimize(conn, "gettingstarted", wait_searcher = FALSE)
+
+# get xml back
+solr_optimize(conn, "gettingstarted", wt = "xml")
+## raw xml
+solr_optimize(conn, "gettingstarted", wt = "xml", raw = TRUE)
+}
+}
diff --git a/man/solr_parse.Rd b/man/solr_parse.Rd
index e5c81db..6beb01c 100644
--- a/man/solr_parse.Rd
+++ b/man/solr_parse.Rd
@@ -2,12 +2,12 @@
 % Please edit documentation in R/parsers.R
 \name{solr_parse}
 \alias{solr_parse}
-\alias{solr_parse.sr_all}
-\alias{solr_parse.sr_group}
 \alias{solr_parse.sr_high}
-\alias{solr_parse.sr_mlt}
 \alias{solr_parse.sr_search}
+\alias{solr_parse.sr_all}
+\alias{solr_parse.sr_mlt}
 \alias{solr_parse.sr_stats}
+\alias{solr_parse.sr_group}
 \title{Parse raw data from solr_search, solr_facet, or solr_highlight.}
 \usage{
 solr_parse(input, parsetype = NULL, concat)
@@ -29,17 +29,16 @@ solr_parse(input, parsetype = NULL, concat)
 
 \item{parsetype}{One of 'list' or 'df' (data.frame)}
 
-\item{concat}{Character to conactenate strings by, e.g,. ',' (character). Used
-in solr_parse.sr_search only.}
+\item{concat}{Character to conactenate strings by, e.g,. ',' (character).
+Used in solr_parse.sr_search only.}
 }
 \description{
 Parse raw data from solr_search, solr_facet, or solr_highlight.
 }
 \details{
-This is the parser used internally in solr_facet, but if you output raw
-data from solr_facet using raw=TRUE, then you can use this function to parse that
-data (a sr_facet S3 object) after the fact to a list of data.frame's for easier
-consumption. The data format type is detected from the attribute "wt" on the
-sr_facet object.
+This is the parser used internally in solr_facet, but if you
+output raw data from solr_facet using raw=TRUE, then you can use this
+function to parse that data (a sr_facet S3 object) after the fact to a
+list of data.frame's for easier consumption. The data format type is
+detected from the attribute "wt" on the sr_facet object.
 }
-
diff --git a/man/solr_search.Rd b/man/solr_search.Rd
index cd6c210..a9f785c 100644
--- a/man/solr_search.Rd
+++ b/man/solr_search.Rd
@@ -4,91 +4,40 @@
 \alias{solr_search}
 \title{Solr search}
 \usage{
-solr_search(name = NULL, q = "*:*", sort = NULL, start = NULL,
-  rows = NULL, pageDoc = NULL, pageScore = NULL, fq = NULL, fl = NULL,
-  defType = NULL, timeAllowed = NULL, qt = NULL, wt = "json",
-  NOW = NULL, TZ = NULL, echoHandler = NULL, echoParams = NULL,
-  key = NULL, callopts = list(), raw = FALSE, parsetype = "df",
-  concat = ",", ...)
+solr_search(conn, name = NULL, params = list(q = "*:*"), body = NULL,
+  callopts = list(), raw = FALSE, parsetype = "df", concat = ",",
+  optimizeMaxRows = TRUE, minOptimizedRows = 50000L, ...)
 }
 \arguments{
-\item{name}{Name of a collection or core. Or leave as \code{NULL} if not needed.}
-
-\item{q}{Query terms, defaults to '*:*', or everything.}
-
-\item{sort}{Field to sort on. You can specify ascending (e.g., score desc) or 
-descending (e.g., score asc), sort by two fields (e.g., score desc, price asc), 
-or sort by a function (e.g., sum(x_f, y_f) desc, which sorts by the sum of 
-x_f and y_f in a descending order).}
-
-\item{start}{Record to start at, default to beginning.}
-
-\item{rows}{Number of records to return. Default: 10.}
-
-\item{pageDoc}{If you expect to be paging deeply into the results (say beyond page 10, 
-assuming rows=10) and you are sorting by score, you may wish to add the pageDoc 
-and pageScore parameters to your request. These two parameters tell Solr (and Lucene) 
-what the last result (Lucene internal docid and score) of the previous page was, 
-so that when scoring the query for the next set of pages, it can ignore any results 
-that occur higher than that item. To get the Lucene internal doc id, you will need 
-to add [docid] to the &fl list. 
-e.g., q=*:*&start=10&pageDoc=5&pageScore=1.345&fl=[docid],score}
-
-\item{pageScore}{See pageDoc notes.}
-
-\item{fq}{Filter query, this does not affect the search, only what gets returned. 
-This parameter can accept multiple items in a lis or vector. You can't pass more than 
-one parameter of the same name, so we get around it by passing multiple queries 
-and we parse internally}
-
-\item{fl}{Fields to return, can be a character vector like \code{c('id', 'title')}, 
-or a single character vector with one or more comma separated names, like 
-\code{'id,title'}}
+\item{conn}{A solrium connection object, see \link{SolrClient}}
 
-\item{defType}{Specify the query parser to use with this request.}
-
-\item{timeAllowed}{The time allowed for a search to finish. This value only applies 
-to the search and not to requests in general. Time is in milliseconds. Values <= 0 
-mean no time restriction. Partial results may be returned (if there are any).}
-
-\item{qt}{Which query handler used. Options: dismax, others?}
-
-\item{wt}{(character) One of json, xml, or csv. Data type returned, defaults to 'csv'.
-If json, uses \code{\link[jsonlite]{fromJSON}} to parse. If xml, uses
-\code{\link[xml2]{read_xml}} to parse. If csv, uses \code{\link{read.table}} to parse.
-\code{wt=csv} gives the fastest performance at least in all the cases we have
-tested in, thus it's the default value for \code{wt}.}
-
-\item{NOW}{Set a fixed time for evaluating Date based expresions}
-
-\item{TZ}{Time zone, you can override the default.}
-
-\item{echoHandler}{If \code{TRUE}, Solr places the name of the handle used in the 
-response to the client for debugging purposes. Default:}
+\item{name}{Name of a collection or core. Or leave as \code{NULL} if not needed.}
 
-\item{echoParams}{The echoParams parameter tells Solr what kinds of Request 
-parameters should be included in the response for debugging purposes, legal values 
-include:
-\itemize{
- \item none - don't include any request parameters for debugging
- \item explicit - include the parameters explicitly specified by the client in the request
- \item all - include all parameters involved in this request, either specified explicitly 
- by the client, or implicit because of the request handler configuration.
-}}
+\item{params}{(list) a named list of parameters, results in a GET reqeust
+as long as no body parameters given}
 
-\item{key}{API key, if needed.}
+\item{body}{(list) a named list of parameters, if given a POST request
+will be performed}
 
-\item{callopts}{Call options passed on to httr::GET}
+\item{callopts}{Call options passed on to [crul::HttpClient]}
 
 \item{raw}{(logical) If TRUE, returns raw data in format specified by wt param}
 
 \item{parsetype}{(character) One of 'list' or 'df'}
 
-\item{concat}{(character) Character to concatenate elements of longer than length 1. 
+\item{concat}{(character) Character to concatenate elements of longer than length 1.
 Note that this only works reliably when data format is json (wt='json'). The parsing
 is more complicated in XML format, but you can do that on your own.}
 
-\item{...}{Further args.}
+\item{optimizeMaxRows}{(logical) If \code{TRUE}, then rows parameter will be
+adjusted to the number of returned results by the same constraints.
+It will only be applied if rows parameter is higher
+than \code{minOptimizedRows}. Default: \code{TRUE}}
+
+\item{minOptimizedRows}{(numeric) used by \code{optimizedMaxRows} parameter,
+the minimum optimized rows. Default: 50000}
+
+\item{...}{Further args to be combined into query}
 }
 \value{
 XML, JSON, a list, or data.frame
@@ -101,102 +50,163 @@ including facets, groups, mlt, stats, and highlights.
 SOLR v1.2 was first version to support csv. See
 \url{https://issues.apache.org/jira/browse/SOLR-66}
 }
+\section{Parameters}{
+
+\itemize{
+ \item q Query terms, defaults to '*:*', or everything.
+ \item sort Field to sort on. You can specify ascending (e.g., score desc) or
+descending (e.g., score asc), sort by two fields (e.g., score desc, price asc),
+or sort by a function (e.g., sum(x_f, y_f) desc, which sorts by the sum of
+x_f and y_f in a descending order).
+ \item start Record to start at, default to beginning.
+ \item rows Number of records to return. Default: 10.
+ \item pageDoc If you expect to be paging deeply into the results (say beyond page 10,
+assuming rows=10) and you are sorting by score, you may wish to add the pageDoc
+and pageScore parameters to your request. These two parameters tell Solr (and Lucene)
+what the last result (Lucene internal docid and score) of the previous page was,
+so that when scoring the query for the next set of pages, it can ignore any results
+that occur higher than that item. To get the Lucene internal doc id, you will need
+to add [docid] to the &fl list.
+e.g., q=*:*&start=10&pageDoc=5&pageScore=1.345&fl=[docid],score
+ \item pageScore See pageDoc notes.
+ \item fq Filter query, this does not affect the search, only what gets returned.
+This parameter can accept multiple items in a lis or vector. You can't pass more than
+one parameter of the same name, so we get around it by passing multiple queries
+and we parse internally
+ \item fl Fields to return, can be a character vector like \code{c('id', 'title')},
+or a single character vector with one or more comma separated names, like
+\code{'id,title'}
+ \item defType Specify the query parser to use with this request.
+ \item timeAllowed The time allowed for a search to finish. This value only applies
+to the search and not to requests in general. Time is in milliseconds. Values <= 0
+mean no time restriction. Partial results may be returned (if there are any).
+ \item qt Which query handler used. Options: dismax, others?
+ \item NOW Set a fixed time for evaluating Date based expresions
+ \item TZ Time zone, you can override the default.
+ \item echoHandler If \code{TRUE}, Solr places the name of the handle used in the
+response to the client for debugging purposes. Default:
+ \item echoParams The echoParams parameter tells Solr what kinds of Request
+parameters should be included in the response for debugging purposes, legal values
+include:
+  \itemize{
+   \item none - don't include any request parameters for debugging
+   \item explicit - include the parameters explicitly specified by the client in the request
+   \item all - include all parameters involved in this request, either specified explicitly
+   by the client, or implicit because of the request handler configuration.
+ }
+\item wt (character) One of json, xml, or csv. Data type returned, defaults
+  to 'csv'. If json, uses [jsonlite::fromJSON()] to parse. If xml,
+  uses [xml2::read_xml()] to parse. If csv, uses [read.table()] to parse.
+  `wt=csv` gives the fastest performance at least in all the cases we have
+  tested in, thus it's the default value for `wt`
+}
+}
+
 \examples{
 \dontrun{
-# connect
-solr_connect('http://api.plos.org/search')
+# Connect to a local Solr instance
+(cli <- SolrClient$new())
+cli$search("gettingstarted", params = list(q = "features:notes"))
+
+solr_search(cli, "gettingstarted")
+solr_search(cli, "gettingstarted", params = list(q = "features:notes"))
+solr_search(cli, "gettingstarted", body = list(query = "features:notes"))
+
+(cli <- SolrClient$new(host = "api.plos.org", path = "search", port = NULL))
+cli$search(params = list(q = "*:*"))
+cli$search(params = list(q = "title:golgi", fl = c('id', 'title')))
+
+cli$search(params = list(q = "*:*", facet = "true"))
+
 
 # search
-solr_search(q='*:*', rows=2, fl='id')
+solr_search(cli, params = list(q='*:*', rows=2, fl='id'))
+
+# search and return all rows
+solr_search(cli, params = list(q='*:*', rows=-1, fl='id'))
 
 # Search for word ecology in title and cell in the body
-solr_search(q='title:"ecology" AND body:"cell"', fl='title', rows=5)
+solr_search(cli, params = list(q='title:"ecology" AND body:"cell"',
+  fl='title', rows=5))
 
 # Search for word "cell" and not "body" in the title field
-solr_search(q='title:"cell" -title:"lines"', fl='title', rows=5)
+solr_search(cli, params = list(q='title:"cell" -title:"lines"', fl='title',
+  rows=5))
 
 # Wildcards
 ## Search for word that starts with "cell" in the title field
-solr_search(q='title:"cell*"', fl='title', rows=5)
+solr_search(cli, params = list(q='title:"cell*"', fl='title', rows=5))
 
 # Proximity searching
 ## Search for words "sports" and "alcohol" within four words of each other
-solr_search(q='everything:"sports alcohol"~7', fl='abstract', rows=3)
+solr_search(cli, params = list(q='everything:"sports alcohol"~7',
+  fl='abstract', rows=3))
 
 # Range searches
 ## Search for articles with Twitter count between 5 and 10
-solr_search(q='*:*', fl=c('alm_twitterCount','id'), fq='alm_twitterCount:[5 TO 50]',
-rows=10)
+solr_search(cli, params = list(q='*:*', fl=c('alm_twitterCount','id'),
+  fq='alm_twitterCount:[5 TO 50]', rows=10))
 
 # Boosts
-## Assign higher boost to title matches than to body matches (compare the two calls)
-solr_search(q='title:"cell" abstract:"science"', fl='title', rows=3)
-solr_search(q='title:"cell"^1.5 AND abstract:"science"', fl='title', rows=3)
+## Assign higher boost to title matches than to body matches
+## (compare the two calls)
+solr_search(cli, params = list(q='title:"cell" abstract:"science"',
+  fl='title', rows=3))
+solr_search(cli, params = list(q='title:"cell"^1.5 AND abstract:"science"',
+  fl='title', rows=3))
 
 # FunctionQuery queries
-## This kind of query allows you to use the actual values of fields to calculate
-## relevancy scores for returned documents
+## This kind of query allows you to use the actual values of fields to
+## calculate relevancy scores for returned documents
 
 ## Here, we search on the product of counter_total_all and alm_twitterCount
 ## metrics for articles in PLOS Journals
-solr_search(q="{!func}product($v1,$v2)", v1 = 'sqrt(counter_total_all)',
-   v2 = 'log(alm_twitterCount)', rows=5, fl=c('id','title'), fq='doc_type:full')
+solr_search(cli, params = list(q="{!func}product($v1,$v2)",
+  v1 = 'sqrt(counter_total_all)',
+  v2 = 'log(alm_twitterCount)', rows=5, fl=c('id','title'),
+  fq='doc_type:full'))
 
-## here, search on the product of counter_total_all and alm_twitterCount, using
-## a new temporary field "_val_"
-solr_search(q='_val_:"product(counter_total_all,alm_twitterCount)"',
-   rows=5, fl=c('id','title'), fq='doc_type:full')
+## here, search on the product of counter_total_all and alm_twitterCount,
+## using a new temporary field "_val_"
+solr_search(cli,
+  params = list(q='_val_:"product(counter_total_all,alm_twitterCount)"',
+  rows=5, fl=c('id','title'), fq='doc_type:full'))
 
 ## papers with most citations
-solr_search(q='_val_:"max(counter_total_all)"',
-   rows=5, fl=c('id','counter_total_all'), fq='doc_type:full')
+solr_search(cli, params = list(q='_val_:"max(counter_total_all)"',
+   rows=5, fl=c('id','counter_total_all'), fq='doc_type:full'))
 
 ## papers with most tweets
-solr_search(q='_val_:"max(alm_twitterCount)"',
-   rows=5, fl=c('id','alm_twitterCount'), fq='doc_type:full')
+solr_search(cli, params = list(q='_val_:"max(alm_twitterCount)"',
+   rows=5, fl=c('id','alm_twitterCount'), fq='doc_type:full'))
+
+## many fq values
+solr_search(cli, params = list(q="*:*", fl=c('id','alm_twitterCount'),
+   fq=list('doc_type:full','subject:"Social networks"',
+           'alm_twitterCount:[100 TO 10000]'),
+   sort='counter_total_month desc'))
 
 ## using wt = csv
-solr_search(q='*:*', rows=50, fl=c('id','score'), fq='doc_type:full', wt="csv")
-solr_search(q='*:*', rows=50, fl=c('id','score'), fq='doc_type:full')
+solr_search(cli, params = list(q='*:*', rows=50, fl=c('id','score'),
+  fq='doc_type:full', wt="csv"))
+solr_search(cli, params = list(q='*:*', rows=50, fl=c('id','score'),
+  fq='doc_type:full'))
 
 # using a proxy
-# prox <- list(url = "186.249.1.146", port = 80)
-# solr_connect(url = 'http://api.plos.org/search', proxy = prox)
-# solr_search(q='*:*', rows=2, fl='id', callopts=verbose())
-## vs. w/o a proxy
-# solr_connect(url = 'http://api.plos.org/search')
-# solr_search(q='*:*', rows=2, fl='id', callopts=verbose())
+# cli <- SolrClient$new(host = "api.plos.org", path = "search", port = NULL,
+#   proxy = list(url = "http://186.249.1.146:80"))
+# solr_search(cli, q='*:*', rows=2, fl='id', callopts=list(verbose=TRUE))
 
 # Pass on curl options to modify request
-solr_connect(url = 'http://api.plos.org/search')
 ## verbose
-solr_search(q='*:*', rows=2, fl='id', callopts=verbose())
-## progress
-res <- solr_search(q='*:*', rows=200, fl='id', callopts=progress())
-## timeout
-# solr_search(q='*:*', rows=200, fl='id', callopts=timeout(0.01))
-## combine curl options using the c() function
-opts <- c(verbose(), progress())
-res <- solr_search(q='*:*', rows=200, fl='id', callopts=opts)
-
-## Searching Europeana
-### They don't return the expected Solr output, so we can get raw data, then parse separately
-solr_connect('http://europeana.eu/api/v2/search.json')
-key <- getOption("eu_key")
-dat <- solr_search(query='*:*', rows=5, wskey = key, raw=TRUE)
-library('jsonlite')
-head( jsonlite::fromJSON(dat)$items )
-
-# Connect to a local Solr instance
-## not run - replace with your local Solr URL and collection/core name
-# solr_connect("localhost:8889")
-# solr_search("gettingstarted")
+solr_search(cli, params = list(q='*:*', rows=2, fl='id'),
+  callopts = list(verbose=TRUE))
 }
 }
 \references{
-See \url{http://wiki.apache.org/solr/#Search_and_Indexing} for more information.
+See \url{http://wiki.apache.org/solr/#Search_and_Indexing}
+for more information.
 }
 \seealso{
-\code{\link{solr_highlight}}, \code{\link{solr_facet}}
+\code{\link[=solr_highlight]{solr_highlight()}}, \code{\link[=solr_facet]{solr_facet()}}
 }
-
diff --git a/man/solr_stats.Rd b/man/solr_stats.Rd
index 818b518..e5a49f9 100644
--- a/man/solr_stats.Rd
+++ b/man/solr_stats.Rd
@@ -4,34 +4,30 @@
 \alias{solr_stats}
 \title{Solr stats}
 \usage{
-solr_stats(name = NULL, q = "*:*", stats.field = NULL,
-  stats.facet = NULL, wt = "json", start = 0, rows = 0, key = NULL,
-  callopts = list(), raw = FALSE, parsetype = "df")
+solr_stats(conn, name = NULL, params = list(q = "*:*", stats.field = NULL,
+  stats.facet = NULL), body = NULL, callopts = list(), raw = FALSE,
+  parsetype = "df", ...)
 }
 \arguments{
-\item{name}{Name of a collection or core. Or leave as \code{NULL} if not needed.}
+\item{conn}{A solrium connection object, see \link{SolrClient}}
 
-\item{q}{Query terms, defaults to '*:*', or everything.}
+\item{name}{Name of a collection or core. Or leave as \code{NULL} if
+not needed.}
 
-\item{stats.field}{The number of similar documents to return for each result.}
+\item{params}{(list) a named list of parameters, results in a GET reqeust
+as long as no body parameters given}
 
-\item{stats.facet}{You can not facet on multi-valued fields.}
+\item{body}{(list) a named list of parameters, if given a POST request
+will be performed}
 
-\item{wt}{(character) Data type returned, defaults to 'json'. One of json or xml. If json, 
-uses \code{\link[jsonlite]{fromJSON}} to parse. If xml, uses \code{\link[XML]{xmlParse}} to 
-parse. csv is only supported in \code{\link{solr_search}} and \code{\link{solr_all}}.}
+\item{callopts}{Call options passed on to [crul::HttpClient]}
 
-\item{start}{Record to start at, default to beginning.}
-
-\item{rows}{Number of records to return. Defaults to 10.}
-
-\item{key}{API key, if needed.}
-
-\item{callopts}{Call options passed on to httr::GET}
-
-\item{raw}{(logical) If TRUE, returns raw data in format specified by wt param}
+\item{raw}{(logical) If TRUE, returns raw data in format specified by
+wt param}
 
 \item{parsetype}{(character) One of 'list' or 'df'}
+
+\item{...}{Further args to be combined into query}
 }
 \value{
 XML, JSON, a list, or data.frame
@@ -39,45 +35,61 @@ XML, JSON, a list, or data.frame
 \description{
 Returns only stat items
 }
-\details{
-The \code{verbose} parameter dropped. See \code{\link{solr_connect}}, which
-can be used to set verbose status.
+\section{Stats parameters}{
+
+\itemize{
+ \item q Query terms, defaults to '*:*', or everything.
+ \item stats.field The number of similar documents to return for each result.
+ \item stats.facet You can not facet on multi-valued fields.
+ \item wt (character) Data type returned, defaults to 'json'. One of json
+ or xml. If json, uses \code{\link[jsonlite]{fromJSON}} to parse. If xml,
+ uses \code{\link[XML]{xmlParse}} to parse. csv is only supported in
+ \code{\link{solr_search}} and \code{\link{solr_all}}.
+ \item start Record to start at, default to beginning.
+ \item rows Number of records to return. Defaults to 10.
+ \item key API key, if needed.
+}
 }
+
 \examples{
 \dontrun{
 # connect
-solr_connect('http://api.plos.org/search')
+(cli <- SolrClient$new(host = "api.plos.org", path = "search", port = NULL))
 
 # get stats
-solr_stats(q='science', stats.field='counter_total_all', raw=TRUE)
-solr_stats(q='title:"ecology" AND body:"cell"',
-   stats.field=c('counter_total_all','alm_twitterCount'))
-solr_stats(q='ecology', stats.field=c('counter_total_all','alm_twitterCount'),
-   stats.facet='journal')
-solr_stats(q='ecology', stats.field=c('counter_total_all','alm_twitterCount'),
-   stats.facet=c('journal','volume'))
+solr_stats(cli, params = list(q='science', stats.field='counter_total_all'),
+  raw=TRUE)
+solr_stats(cli, params = list(q='title:"ecology" AND body:"cell"',
+   stats.field=c('counter_total_all','alm_twitterCount')))
+solr_stats(cli, params = list(q='ecology',
+  stats.field=c('counter_total_all','alm_twitterCount'),
+  stats.facet='journal'))
+solr_stats(cli, params = list(q='ecology',
+  stats.field=c('counter_total_all','alm_twitterCount'),
+  stats.facet=c('journal','volume')))
 
 # Get raw data, then parse later if you feel like it
 ## json
-out <- solr_stats(q='ecology', stats.field=c('counter_total_all','alm_twitterCount'),
-   stats.facet=c('journal','volume'), raw=TRUE)
+out <- solr_stats(cli, params = list(q='ecology',
+  stats.field=c('counter_total_all','alm_twitterCount'),
+  stats.facet=c('journal','volume')), raw=TRUE)
 library("jsonlite")
 jsonlite::fromJSON(out)
 solr_parse(out) # list
 solr_parse(out, 'df') # data.frame
 
 ## xml
-out <- solr_stats(q='ecology', stats.field=c('counter_total_all','alm_twitterCount'),
-   stats.facet=c('journal','volume'), raw=TRUE, wt="xml")
+out <- solr_stats(cli, params = list(q='ecology',
+  stats.field=c('counter_total_all','alm_twitterCount'),
+  stats.facet=c('journal','volume'), wt="xml"), raw=TRUE)
 library("xml2")
 xml2::read_xml(unclass(out))
 solr_parse(out) # list
 solr_parse(out, 'df') # data.frame
 
 # Get verbose http call information
-library("httr")
-solr_stats(q='ecology', stats.field='alm_twitterCount',
-   callopts=verbose())
+solr_stats(cli, params = list(q='ecology', stats.field='alm_twitterCount'),
+   callopts=list(verbose=TRUE))
 }
 }
 \references{
@@ -85,7 +97,5 @@ See \url{http://wiki.apache.org/solr/StatsComponent} for
 more information on Solr stats.
 }
 \seealso{
-\code{\link{solr_highlight}}, \code{\link{solr_facet}},
-\code{\link{solr_search}}, \code{\link{solr_mlt}}
+\code{\link[=solr_highlight]{solr_highlight()}}, \code{\link[=solr_facet]{solr_facet()}}, \code{\link[=solr_search]{solr_search()}}, \code{\link[=solr_mlt]{solr_mlt()}}
 }
-
diff --git a/man/solrium-package.Rd b/man/solrium-package.Rd
index 1c70739..71a7f51 100644
--- a/man/solrium-package.Rd
+++ b/man/solrium-package.Rd
@@ -2,26 +2,26 @@
 % Please edit documentation in R/solrium-package.R
 \docType{package}
 \name{solrium-package}
-\alias{solrium}
 \alias{solrium-package}
+\alias{solrium}
 \title{General purpose R interface to Solr.}
 \description{
 This package has support for all the search endpoints, as well as a suite
-of functions for managing a Solr database, including adding and deleting 
+of functions for managing a Solr database, including adding and deleting
 documents.
 }
 \section{Important search functions}{
 
 
 \itemize{
-  \item \code{\link{solr_search}} - General search, only returns documents
-  \item \code{\link{solr_all}} - General search, including all non-documents
-  in addition to documents: facets, highlights, groups, mlt, stats.
-  \item \code{\link{solr_facet}} - Faceting only (w/o general search)
-  \item \code{\link{solr_highlight}} - Highlighting only (w/o general search)
-  \item \code{\link{solr_mlt}} - More like this (w/o general search)
-  \item \code{\link{solr_group}} - Group search (w/o general search)
-  \item \code{\link{solr_stats}} - Stats search (w/o general search)
+\item \code{\link{solr_search}} - General search, only returns documents
+\item \code{\link{solr_all}} - General search, including all non-documents
+in addition to documents: facets, highlights, groups, mlt, stats.
+\item \code{\link{solr_facet}} - Faceting only (w/o general search)
+\item \code{\link{solr_highlight}} - Highlighting only (w/o general search)
+\item \code{\link{solr_mlt}} - More like this (w/o general search)
+\item \code{\link{solr_group}} - Group search (w/o general search)
+\item \code{\link{solr_stats}} - Stats search (w/o general search)
 }
 }
 
@@ -29,11 +29,11 @@ documents.
 
 
 \itemize{
-  \item \code{\link{update_json}} - Add or delete documents using json in a 
-  file
-  \item \code{\link{add}} - Add documents via an R list or data.frame
-  \item \code{\link{delete_by_id}} - Delete documents by ID
-  \item \code{\link{delete_by_query}} - Delete documents by query
+\item \code{\link{update_json}} - Add or delete documents using json in a
+file
+\item \code{\link{add}} - Add documents via an R list or data.frame
+\item \code{\link{delete_by_id}} - Delete documents by ID
+\item \code{\link{delete_by_query}} - Delete documents by query
 }
 }
 
@@ -47,26 +47,26 @@ See the vignettes for help \code{browseVignettes(package = "solrium")}
 
 
 \code{v0.2} and above of this package will have \code{wt=csv} as the default.
-This  should give significant performance improvement over the previous 
-default of \code{wt=json}, which pulled down json, parsed to an R list, 
-then to a data.frame. With \code{wt=csv}, we pull down csv, and read that 
+This  should give significant performance improvement over the previous
+default of \code{wt=json}, which pulled down json, parsed to an R list,
+then to a data.frame. With \code{wt=csv}, we pull down csv, and read that
 in directly to a data.frame.
 
-The http library we use, \pkg{httr}, sets gzip compression header by 
-default. As long as compression is used server side, you're good to go on 
+The http library we use, \pkg{crul}, sets gzip compression header by
+default. As long as compression is used server side, you're good to go on
 compression, which should be a good peformance boost. See
 \url{https://wiki.apache.org/solr/SolrPerformanceFactors#Query_Response_Compression}
 for notes on how to enable compression.
 
 There are other notes about Solr performance at
-\url{https://wiki.apache.org/solr/SolrPerformanceFactors} that can be 
-used server side/in your Solr config, but aren't things to tune here in 
+\url{https://wiki.apache.org/solr/SolrPerformanceFactors} that can be
+used server side/in your Solr config, but aren't things to tune here in
 this R client.
 
 Let us know if there's any further performance improvements we can make.
 }
+
 \author{
 Scott Chamberlain \email{myrmecocystus at gmail.com}
 }
 \keyword{package}
-
diff --git a/man/update_atomic_json.Rd b/man/update_atomic_json.Rd
new file mode 100644
index 0000000..061fcaf
--- /dev/null
+++ b/man/update_atomic_json.Rd
@@ -0,0 +1,72 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/update_atomic_json.R
+\name{update_atomic_json}
+\alias{update_atomic_json}
+\title{Atomic updates with JSON data}
+\usage{
+update_atomic_json(conn, body, name, wt = "json", raw = FALSE, ...)
+}
+\arguments{
+\item{conn}{A solrium connection object, see \link{SolrClient}}
+
+\item{body}{(character) JSON as a character string}
+
+\item{name}{(character) Name of the core or collection}
+
+\item{wt}{(character) One of json (default) or xml. If json, uses
+\code{\link[jsonlite:fromJSON]{jsonlite::fromJSON()}} to parse. If xml, uses \code{\link[xml2:read_xml]{xml2::read_xml()}} to parse}
+
+\item{raw}{(logical) If \code{TRUE}, returns raw data in format specified by
+\code{wt} param}
+
+\item{...}{curl options passed on to \link[crul:HttpClient]{crul::HttpClient}}
+}
+\description{
+Atomic updates to parts of Solr documents
+}
+\examples{
+\dontrun{
+# start Solr in Cloud mode: bin/solr start -e cloud -noprompt
+
+# connect
+(conn <- SolrClient$new())
+
+# create a collection
+if (!conn$collection_exists("books")) {
+  conn$collection_delete("books")
+  conn$collection_create("books")
+}
+
+# Add documents
+file <- system.file("examples", "books2.json", package = "solrium")
+cat(readLines(file), sep = "\\n")
+conn$update_json(file, "books")
+
+# get a document
+conn$get(ids = 343334534545, "books")
+
+# atomic update
+body <- '[{
+ "id": "343334534545",
+ "genre_s": {"set": "mystery" },
+ "pages_i": {"inc": 1 }
+}]'
+conn$update_atomic_json(body, "books")
+
+# get the document again
+conn$get(ids = 343334534545, "books")
+
+# another atomic update
+body <- '[{
+ "id": "343334534545",
+ "price": {"remove": "12.5" }
+}]'
+conn$update_atomic_json(body, "books")
+
+# get the document again
+conn$get(ids = 343334534545, "books")
+}
+}
+\references{
+\url{https://lucene.apache.org/solr/guide/7_0/updating-parts-of-documents.html}
+}
diff --git a/man/update_atomic_xml.Rd b/man/update_atomic_xml.Rd
new file mode 100644
index 0000000..377e215
--- /dev/null
+++ b/man/update_atomic_xml.Rd
@@ -0,0 +1,78 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/update_atomic_xml.R
+\name{update_atomic_xml}
+\alias{update_atomic_xml}
+\title{Atomic updates with XML data}
+\usage{
+update_atomic_xml(conn, body, name, wt = "json", raw = FALSE, ...)
+}
+\arguments{
+\item{conn}{A solrium connection object, see \link{SolrClient}}
+
+\item{body}{(character) XML as a character string}
+
+\item{name}{(character) Name of the core or collection}
+
+\item{wt}{(character) One of json (default) or xml. If json, uses
+\code{\link[jsonlite:fromJSON]{jsonlite::fromJSON()}} to parse. If xml, uses \code{\link[xml2:read_xml]{xml2::read_xml()}} to parse}
+
+\item{raw}{(logical) If \code{TRUE}, returns raw data in format specified by
+\code{wt} param}
+
+\item{...}{curl options passed on to \link[crul:HttpClient]{crul::HttpClient}}
+}
+\description{
+Atomic updates to parts of Solr documents
+}
+\examples{
+\dontrun{
+# start Solr in Cloud mode: bin/solr start -e cloud -noprompt
+
+# connect
+(conn <- SolrClient$new())
+
+# create a collection
+if (!conn$collection_exists("books")) {
+  conn$collection_delete("books")
+  conn$collection_create("books")
+}
+
+# Add documents
+file <- system.file("examples", "books.xml", package = "solrium")
+cat(readLines(file), sep = "\\n")
+conn$update_xml(file, "books")
+
+# get a document
+conn$get(ids = '978-0641723445', "books", wt = "xml")
+
+# atomic update
+body <- '
+<add>
+ <doc>
+   <field name="id">978-0641723445</field>
+   <field name="genre_s" update="set">mystery</field>
+   <field name="pages_i" update="inc">1</field>
+ </doc>
+</add>'
+conn$update_atomic_xml(body, name="books")
+
+# get the document again
+conn$get(ids = '978-0641723445', "books", wt = "xml")
+
+# another atomic update
+body <- '
+<add>
+ <doc>
+   <field name="id">978-0641723445</field>
+   <field name="price" update="remove">12.5</field>
+ </doc>
+</add>'
+conn$update_atomic_xml(body, "books")
+
+# get the document again
+conn$get(ids = '978-0641723445', "books", wt = "xml")
+}
+}
+\references{
+\url{https://lucene.apache.org/solr/guide/7_0/updating-parts-of-documents.html}
+}
diff --git a/man/update_csv.Rd b/man/update_csv.Rd
index 00926cb..41745d5 100644
--- a/man/update_csv.Rd
+++ b/man/update_csv.Rd
@@ -2,16 +2,18 @@
 % Please edit documentation in R/update_csv.R
 \name{update_csv}
 \alias{update_csv}
-\title{Update documents using CSV}
+\title{Update documents with CSV data}
 \usage{
-update_csv(files, name, separator = ",", header = TRUE, fieldnames = NULL,
-  skip = NULL, skipLines = 0, trim = FALSE, encapsulator = NULL,
-  escape = NULL, keepEmpty = FALSE, literal = NULL, map = NULL,
-  split = NULL, rowid = NULL, rowidOffset = NULL, overwrite = NULL,
-  commit = NULL, wt = "json", raw = FALSE, ...)
+update_csv(conn, files, name, separator = ",", header = TRUE,
+  fieldnames = NULL, skip = NULL, skipLines = 0, trim = FALSE,
+  encapsulator = NULL, escape = NULL, keepEmpty = FALSE, literal = NULL,
+  map = NULL, split = NULL, rowid = NULL, rowidOffset = NULL,
+  overwrite = NULL, commit = NULL, wt = "json", raw = FALSE, ...)
 }
 \arguments{
-\item{files}{Path to file to load into Solr}
+\item{conn}{A solrium connection object, see \link{SolrClient}}
+
+\item{files}{Path to a single file to load into Solr}
 
 \item{name}{(character) Name of the core or collection}
 
@@ -83,14 +85,16 @@ with &overwrite=false.}
 default is commit=false to avoid the potential performance impact of frequent commits.}
 
 \item{wt}{(character) One of json (default) or xml. If json, uses
-\code{\link[jsonlite]{fromJSON}} to parse. If xml, uses \code{\link[xml2]{read_xml}} to parse}
+\code{\link[jsonlite]{fromJSON}} to parse. If xml, uses
+\code{\link[xml2]{read_xml}} to parse}
 
-\item{raw}{(logical) If TRUE, returns raw data in format specified by wt param}
+\item{raw}{(logical) If TRUE, returns raw data in format specified by
+\code{wt} param}
 
-\item{...}{curl options passed on to \code{\link[httr]{GET}}}
+\item{...}{curl options passed on to \link[crul:HttpClient]{crul::HttpClient}}
 }
 \description{
-Update documents using CSV
+Update documents with CSV data
 }
 \note{
 SOLR v1.2 was first version to support csv. See
@@ -98,23 +102,26 @@ SOLR v1.2 was first version to support csv. See
 }
 \examples{
 \dontrun{
-# start Solr in Schemaless mode: bin/solr start -e schemaless
+# start Solr: bin/solr start -f -c -p 8983
 
 # connect
-solr_connect()
+(cli <- SolrClient$new())
+
+if (!cli$collection_exists("helloWorld")) {
+  cli$collection_create(name = "helloWorld", numShards = 2)
+}
 
 df <- data.frame(id=1:3, name=c('red', 'blue', 'green'))
 write.csv(df, file="df.csv", row.names=FALSE, quote = FALSE)
-update_csv("df.csv", "books")
+conn$update_csv("df.csv", "helloWorld", verbose = TRUE)
 
-# give back xml
-update_csv("df.csv", "books", wt = "xml")
-## raw xml
-update_csv("df.csv", "books", wt = "xml", raw = FALSE)
+# give back raw xml
+conn$update_csv("df.csv", "helloWorld", wt = "xml")
+## raw json
+conn$update_csv("df.csv", "helloWorld", wt = "json", raw = TRUE)
 }
 }
 \seealso{
 Other update: \code{\link{update_json}},
   \code{\link{update_xml}}
 }
-
diff --git a/man/update_json.Rd b/man/update_json.Rd
index 5b983e7..32f3f51 100644
--- a/man/update_json.Rd
+++ b/man/update_json.Rd
@@ -2,15 +2,17 @@
 % Please edit documentation in R/update_json.R
 \name{update_json}
 \alias{update_json}
-\title{Update documents using JSON}
+\title{Update documents with JSON data}
 \usage{
-update_json(files, name, commit = TRUE, optimize = FALSE,
+update_json(conn, files, name, commit = TRUE, optimize = FALSE,
   max_segments = 1, expunge_deletes = FALSE, wait_searcher = TRUE,
   soft_commit = FALSE, prepare_commit = NULL, wt = "json", raw = FALSE,
   ...)
 }
 \arguments{
-\item{files}{Path to file to load into Solr}
+\item{conn}{A solrium connection object, see \link{SolrClient}}
+
+\item{files}{Path to a single file to load into Solr}
 
 \item{name}{(character) Name of the core or collection}
 
@@ -35,56 +37,54 @@ Default: \code{FALSE}}
 calls Lucene's IndexWriter.prepareCommit(). Not passed by default}
 
 \item{wt}{(character) One of json (default) or xml. If json, uses 
-\code{\link[jsonlite]{fromJSON}} to parse. If xml, uses \code{\link[XML]{xmlParse}} to 
-parse}
+\code{\link[jsonlite]{fromJSON}} to parse. If xml, uses 
+\code{\link[xml2]{read_xml}} to parse}
 
 \item{raw}{(logical) If \code{TRUE}, returns raw data in format specified by 
 \code{wt} param}
 
-\item{...}{curl options passed on to \code{\link[httr]{GET}}}
+\item{...}{curl options passed on to \code{\link[crul]{HttpClient}}}
 }
 \description{
-Update documents using JSON
+Update documents with JSON data
 }
 \details{
-You likely may not be able to run this function against many public Solr 
-services, but should work locally.
+You likely may not be able to run this function against many 
+public Solr services, but should work locally.
 }
 \examples{
 \dontrun{
-# start Solr in Schemaless mode: bin/solr start -e schemaless
+# start Solr: bin/solr start -f -c -p 8983
 
 # connect
-solr_connect()
+(conn <- SolrClient$new())
 
 # Add documents
 file <- system.file("examples", "books2.json", package = "solrium")
 cat(readLines(file), sep = "\\n")
-update_json(file, "books")
+conn$update_json(files = file, name = "books")
+update_json(conn, files = file, name = "books")
 
 # Update commands - can include many varying commands
 ## Add file
-file <- system.file("examples", "updatecommands_add.json", package = "solrium")
+file <- system.file("examples", "updatecommands_add.json",
+  package = "solrium")
 cat(readLines(file), sep = "\\n")
-update_json(file, "books")
+conn$update_json(file, "books")
 
 ## Delete file
-file <- system.file("examples", "updatecommands_delete.json", package = "solrium")
+file <- system.file("examples", "updatecommands_delete.json",
+  package = "solrium")
 cat(readLines(file), sep = "\\n")
-update_json(file, "books")
+conn$update_json(file, "books")
 
 # Add and delete in the same document
 ## Add a document first, that we can later delete
 ss <- list(list(id = 456, name = "cat"))
-add(ss, "books")
-## Now add a new document, and delete the one we just made
-file <- system.file("examples", "add_delete.json", package = "solrium")
-cat(readLines(file), sep = "\\n")
-update_json(file, "books")
+conn$add(ss, "books")
 }
 }
 \seealso{
 Other update: \code{\link{update_csv}},
   \code{\link{update_xml}}
 }
-
diff --git a/man/update_xml.Rd b/man/update_xml.Rd
index a59f55d..156bbb8 100644
--- a/man/update_xml.Rd
+++ b/man/update_xml.Rd
@@ -2,14 +2,17 @@
 % Please edit documentation in R/update_xml.R
 \name{update_xml}
 \alias{update_xml}
-\title{Update documents using XML}
+\title{Update documents with XML data}
 \usage{
-update_xml(files, name, commit = TRUE, optimize = FALSE, max_segments = 1,
-  expunge_deletes = FALSE, wait_searcher = TRUE, soft_commit = FALSE,
-  prepare_commit = NULL, wt = "json", raw = FALSE, ...)
+update_xml(conn, files, name, commit = TRUE, optimize = FALSE,
+  max_segments = 1, expunge_deletes = FALSE, wait_searcher = TRUE,
+  soft_commit = FALSE, prepare_commit = NULL, wt = "json", raw = FALSE,
+  ...)
 }
 \arguments{
-\item{files}{Path to file to load into Solr}
+\item{conn}{A solrium connection object, see \link{SolrClient}}
+
+\item{files}{Path to a single file to load into Solr}
 
 \item{name}{(character) Name of the core or collection}
 
@@ -34,56 +37,61 @@ Default: \code{FALSE}}
 calls Lucene's IndexWriter.prepareCommit(). Not passed by default}
 
 \item{wt}{(character) One of json (default) or xml. If json, uses 
-\code{\link[jsonlite]{fromJSON}} to parse. If xml, uses \code{\link[XML]{xmlParse}} to 
-parse}
+\code{\link[jsonlite]{fromJSON}} to parse. If xml, uses 
+\code{\link[xml2]{read_xml}} to parse}
 
 \item{raw}{(logical) If \code{TRUE}, returns raw data in format specified by 
 \code{wt} param}
 
-\item{...}{curl options passed on to \code{\link[httr]{GET}}}
+\item{...}{curl options passed on to \code{\link[crul]{HttpClient}}}
 }
 \description{
-Update documents using XML
+Update documents with XML data
 }
 \details{
-You likely may not be able to run this function against many public Solr 
-services, but should work locally.
+You likely may not be able to run this function against many 
+public Solr services, but should work locally.
 }
 \examples{
 \dontrun{
-# start Solr in Schemaless mode: bin/solr start -e schemaless
+# start Solr: bin/solr start -f -c -p 8983
 
 # connect
-solr_connect()
+(conn <- SolrClient$new())
+
+# create a collection
+if (!conn$collection_exists("books")) {
+  conn$collection_create(name = "books", numShards = 2)
+}
 
 # Add documents
 file <- system.file("examples", "books.xml", package = "solrium")
 cat(readLines(file), sep = "\\n")
-update_xml(file, "books")
+conn$update_xml(file, "books")
 
 # Update commands - can include many varying commands
 ## Add files
 file <- system.file("examples", "books2_delete.xml", package = "solrium")
 cat(readLines(file), sep = "\\n")
-update_xml(file, "books")
+conn$update_xml(file, "books")
 
 ## Delete files
-file <- system.file("examples", "updatecommands_delete.xml", package = "solrium")
+file <- system.file("examples", "updatecommands_delete.xml",
+package = "solrium")
 cat(readLines(file), sep = "\\n")
-update_xml(file, "books")
+conn$update_xml(file, "books")
 
 ## Add and delete in the same document
 ## Add a document first, that we can later delete
 ss <- list(list(id = 456, name = "cat"))
-add(ss, "books")
+conn$add(ss, "books")
 ## Now add a new document, and delete the one we just made
 file <- system.file("examples", "add_delete.xml", package = "solrium")
 cat(readLines(file), sep = "\\n")
-update_xml(file, "books")
+conn$update_xml(file, "books")
 }
 }
 \seealso{
 Other update: \code{\link{update_csv}},
   \code{\link{update_json}}
 }
-
diff --git a/tests/testthat/helper-solrium.R b/tests/testthat/helper-solrium.R
new file mode 100644
index 0000000..4b917a7
--- /dev/null
+++ b/tests/testthat/helper-solrium.R
@@ -0,0 +1,18 @@
+conn <- SolrClient$new()
+conn_plos <- SolrClient$new(host = "api.plos.org", path = "search", port = NULL)
+conn_simp <- SolrClient$new(host = 'api.plos.org', path = 'search', port = NULL)
+conn_comp <- SolrClient$new(host = 'api.plos.org', path = 'search',
+                            port = NULL, errors = "complete")
+conn_hathi <- SolrClient$new(
+  host = "chinkapin.pti.indiana.edu", path = "solr/meta/select", port = 9994)
+conn_dc <- SolrClient$new(host = "search.datacite.org", path = "api", port = NULL)
+conn_dryad <- SolrClient$new(host = "datadryad.org", path = "solr/search/select",
+                             port = NULL)
+
+# cloud mode: create collection "gettingstarted"
+up <- tryCatch(conn$collection_exists("gettingstarted"), error = function(e) e)
+if (!inherits(up, "error")) {
+  if (!conn$collection_exists("gettingstarted")) {
+    conn$collection_create("gettingstarted")
+  }
+}
diff --git a/tests/testthat/test-add.R b/tests/testthat/test-add.R
new file mode 100644
index 0000000..1c17542
--- /dev/null
+++ b/tests/testthat/test-add.R
@@ -0,0 +1,42 @@
+context("add")
+
+test_that("add works with a list and data.frame", {
+  skip_on_cran()
+
+  if (!collection_exists(conn, "books")) {
+    collection_create(conn, name = "books")
+  }
+
+  ss <- list(list(id = 1, price = 100), list(id = 2, price = 500))
+  aa <- add(ss, conn, name = "books")
+
+  expect_is(aa, "list")
+  expect_named(aa, c("responseHeader"))
+  expect_is(conn$get(c(1, 2), "books"), "list")
+  expect_named(conn$get(c(1, 2), "books"), "response")
+
+
+  df <- data.frame(id = c(67, 68), price = c(1000, 500000000))
+  aa <- add(df, conn, "books")
+
+  expect_is(aa, "list")
+  expect_named(aa, c("responseHeader"))
+})
+
+test_that("add works with new interface", {
+  skip_on_cran()
+
+  ss <- list(list(id = 1, price = 100), list(id = 2, price = 500))
+  aa <- conn$add(ss, name = "books")
+
+  expect_is(aa, "list")
+  expect_named(aa, c("responseHeader"))
+})
+
+test_that("add fails well", {
+  skip_on_cran()
+
+  expect_error(add(), "no applicable method")
+  expect_error(add(5), "no applicable method")
+  expect_error(add(mtcars, 4), "conn must be a SolrClient object")
+})
diff --git a/tests/testthat/test-client.R b/tests/testthat/test-client.R
new file mode 100644
index 0000000..3f282ee
--- /dev/null
+++ b/tests/testthat/test-client.R
@@ -0,0 +1,46 @@
+context("SolrClient")
+
+test_that("SolrClient to remote Solr server works", {
+  skip_on_cran()
+  
+  aa <- SolrClient$new(host = 'api.plos.org', path = 'search', port = NULL)
+  
+  expect_is(aa, "SolrClient")
+  expect_is(aa$host, "character")
+  expect_null(aa$proxy)
+  expect_is(aa$errors, "character")
+  expect_true(all(c('host', 'proxy', 'errors') %in% names(aa)))
+})
+
+test_that("SolrClient to local Solr server works", {
+  skip_on_cran()
+  
+  bb <- SolrClient$new()
+  
+  expect_is(bb, "SolrClient")
+  expect_is(bb$host, "character")
+  expect_null(bb$proxy)
+  expect_is(bb$errors, "character")
+  expect_true(all(c('host', 'proxy', 'errors') %in% names(bb)))
+})
+
+test_that("SolrClient works with a proxy", {
+  skip_on_cran()
+  
+  port <- 3128
+  proxy <- list(url = "187.62.207.130", port = port)
+  cc <- SolrClient$new(proxy = proxy)
+  
+  expect_is(cc, "SolrClient")
+  expect_is(cc$host, "character")
+  expect_is(cc$proxy, "proxy")
+  expect_is(cc$proxy$proxy, "character")
+})
+
+test_that("SolrClient fails well", {
+  skip_on_cran()
+  
+  #expect_error(SolrClient$new(host = "foobar"), "That does not appear to be a url")
+  expect_error(SolrClient$new(errors = 'foo'), "errors must be one of")
+  expect_error(SolrClient$new(proxy = list(foo = "bar")), "proxy URL not")
+})
diff --git a/tests/testthat/test-collections.R b/tests/testthat/test-collections.R
new file mode 100644
index 0000000..8602db8
--- /dev/null
+++ b/tests/testthat/test-collections.R
@@ -0,0 +1,47 @@
+context("collections")
+
+test_that("collections works - no collections", {
+  skip_on_cran()
+  skip_if_not(is_in_cloud_mode(conn))
+
+  if (conn$collection_exists("books")) conn$collection_delete("books")
+  if (conn$collection_exists("gettingstarted")) conn$collection_delete("gettingstarted")
+
+  aa <- collections(conn)
+
+  expect_is(aa, "character")
+  expect_false("books" %in% aa)
+  expect_false("gettingstarted" %in% aa)
+})
+
+test_that("collections works - with some collections", {
+  skip_on_cran()
+  skip_if_not(is_in_cloud_mode(conn))
+
+  if (!conn$collection_exists("books")) conn$collection_create("books")
+  if (!conn$collection_exists("gettingstarted")) conn$collection_create("gettingstarted")
+
+  aa <- collections(conn)
+
+  expect_is(aa, "character")
+  expect_true("books" %in% aa)
+  expect_true("gettingstarted" %in% aa)
+})
+
+test_that("collections works - new way of using", {
+  skip_on_cran()
+  skip_if_not(is_in_cloud_mode(conn))
+
+  if (!conn$collection_exists("books")) conn$collection_create("books")
+  if (!conn$collection_exists("gettingstarted")) conn$collection_create("gettingstarted")
+
+  aa <- conn$collection_list()
+
+  expect_is(aa, "list")
+  expect_named(aa, c('responseHeader', 'collections'))
+})
+
+test_that("collections fails well", {
+  expect_error(collections(), "argument \"conn\" is missing")
+  expect_error(collections(5), "conn must be a SolrClient")
+})
diff --git a/tests/testthat/test-core_create.R b/tests/testthat/test-core_create.R
index 5c61009..f70a956 100644
--- a/tests/testthat/test-core_create.R
+++ b/tests/testthat/test-core_create.R
@@ -2,24 +2,25 @@ context("core_create")
 
 test_that("core_create works", {
   skip_on_cran()
-  
-  solr_connect(verbose = FALSE)
+  skip_on_travis()
+  skip_if_not(is_not_in_cloud_mode(conn))
   
   core_name <- "slamcore"
 
   # delete if exists
-  if (core_exists(core_name)) {
-    invisible(core_unload(core_name))
+  if (conn$core_exists(core_name)) {
+    invisible(conn$core_unload(core_name))
   }
   
   # write files in preparation
-  path <- sprintf("~/solr-5.4.1/server/solr/%s/conf", core_name)
+  path <- sprintf("~/solr-7.0.0/server/solr/%s/conf", core_name)
   dir.create(path, recursive = TRUE, showWarnings = FALSE)
-  files <- list.files("~/solr-5.4.1/server/solr/configsets/data_driven_schema_configs/conf/", full.names = TRUE)
+  files <- list.files("~/solr-7.0.0/server/solr/configsets/sample_techproducts_configs/conf/", full.names = TRUE)
   invisible(file.copy(files, path, recursive = TRUE))
   
   # create the core
-  aa <- suppressMessages(core_create(name = core_name, instanceDir = core_name, configSet = "basic_configs"))
+  aa <- suppressMessages(conn$core_create(
+    name = core_name, instanceDir = core_name, configSet = "basic_configs"))
 
   expect_is(aa, "list")
   expect_is(aa$responseHeader, "list")
diff --git a/tests/testthat/test-delete.R b/tests/testthat/test-delete.R
new file mode 100644
index 0000000..01210a3
--- /dev/null
+++ b/tests/testthat/test-delete.R
@@ -0,0 +1,55 @@
+context("delete_by_id")
+
+test_that("delete by ", {
+  skip_on_cran()
+
+  if (!collection_exists(conn, "gettingstarted")) {
+    collection_create(conn, name = "gettingstarted", numShards = 1)
+  }
+  ss <- list(list(id = 1, price = 100), list(id = 2, price = 500),
+            list(id = 3, price = 100), list(id = 4, price = 500))
+  invisible(add(ss, conn, name = "gettingstarted"))
+
+  # single id
+  aa <- conn$delete_by_id(ids = 1, "gettingstarted")
+
+  expect_is(aa, "list")
+  expect_named(aa, c("responseHeader"))
+
+  # many ids
+  aa <- conn$delete_by_id(ids = c(3, 4), "gettingstarted")
+
+  expect_is(aa, "list")
+  expect_named(aa, c("responseHeader"))
+
+  res <- conn$get(ids = 3:4, "gettingstarted")
+  expect_equal(length(res$response$docs), 0)
+})
+
+context("delete_by_query")
+
+test_that("delete by many ids", {
+  skip_on_cran()
+
+  ss <- list(list(id = 10, title = "adfadsf"), list(id = 12, title = "though"),
+          list(id = 13, title = "cheese"), list(id = 14, title = "animals"))
+  invisible(add(ss, conn, name = "gettingstarted"))
+
+  aa <- conn$delete_by_query(query = "title:cheese", "gettingstarted")
+
+  expect_is(aa, "list")
+  expect_named(aa, c("responseHeader"))
+
+  res <- conn$search("gettingstarted", params = list(q = "title:cheese"))
+  expect_equal(NROW(res), 0)
+})
+
+test_that("delete fails well", {
+  skip_on_cran()
+
+  expect_error(delete_by_id(), "argument \"conn\" is missing")
+  expect_error(delete_by_query(), "argument \"conn\" is missing")
+
+  expect_error(delete_by_id(5), "conn must be a SolrClient object")
+  expect_error(delete_by_query(5), "conn must be a SolrClient object")
+})
diff --git a/tests/testthat/test-errors.R b/tests/testthat/test-errors.R
index 90130d3..ffe99d7 100644
--- a/tests/testthat/test-errors.R
+++ b/tests/testthat/test-errors.R
@@ -3,48 +3,37 @@ context("errors")
 
 test_that("setting errors level gives correct error classes", {
   skip_on_cran()
-  
-  invisible(aa <- solr_connect('http://api.plos.org/search'))
-  invisible(bb <- solr_connect('http://api.plos.org/search', errors = "simple"))
-  invisible(cc <- solr_connect('http://api.plos.org/search', errors = "complete"))
-  
-  expect_is(aa, "solr_connection")
-  expect_is(bb, "solr_connection")
-  expect_is(cc, "solr_connection")
-  expect_is(aa$errors, "character")
-  expect_is(bb$errors, "character")
-  expect_is(cc$errors, "character")
+
+  expect_is(conn_simp, "SolrClient")
+  expect_is(conn_comp, "SolrClient")
+  expect_is(conn_simp$errors, "character")
+  expect_is(conn_comp$errors, "character")
 })
 
 test_that("setting errors level gives correct error values", {
   skip_on_cran()
-  
-  invisible(aa <- solr_connect('http://api.plos.org/search'))
-  invisible(bb <- solr_connect('http://api.plos.org/search', errors = "simple"))
-  invisible(cc <- solr_connect('http://api.plos.org/search', errors = "complete"))
-  
-  expect_equal(aa$errors, "simple")
-  expect_equal(bb$errors, "simple")
-  expect_equal(cc$errors, "complete")
+
+  expect_equal(conn_plos$errors, "simple")
+  expect_equal(conn_simp$errors, "simple")
+  expect_equal(conn_comp$errors, "complete")
 })
 
 test_that("setting error levels gives correct effect - simple errors", {
   skip_on_cran()
-  
-  invisible(solr_connect('http://api.plos.org/search', errors = "simple", verbose = FALSE))
-  
-  expect_error(solr_search(q = "*:*", rows = "asdf"), "500 - For input string")
-  expect_error(solr_search(q = "*:*", rows = "asdf"), "500 - For input string")
+
+  expect_error(conn_simp$search(params = list(q = "*:*", rows = "asdf")),
+               "rows should be a numeric or integer class value")
+  expect_error(conn_simp$search(params = list(q = "*:*", rows = "asdf")),
+               "rows should be a numeric or integer class value")
 })
 
 test_that("setting error levels gives correct effect - complete errors", {
   skip_on_cran()
-  
-  invisible(solr_connect('http://api.plos.org/search', errors = "complete", verbose = FALSE))
-  
-  errmssg <- "500 - For input string: \"asdf\"\nAPI stack trace"
-  expect_error(solr_search(q = "*:*", rows = "asdf"), errmssg)
-  expect_error(solr_search(q = "*:*", start = "asdf"), errmssg)
-  expect_error(solr_search(q = "*:*", sort = "down"), 
+
+  expect_error(conn_comp$search(params = list(q = "*:*", rows = "asdf")),
+               "rows should be a numeric or integer class value")
+  expect_error(conn_comp$search(params = list(q = "*:*", start = "asdf")),
+               "500 - For input string: \"asdf\"")
+  expect_error(conn_comp$search(params = list(q = "*:*", sort = "down")),
     "400 - Can't determine a Sort Order \\(asc or desc\\) in sort spec 'down'")
 })
diff --git a/tests/testthat/test-ping.R b/tests/testthat/test-ping.R
index 75ce00a..c413fd1 100644
--- a/tests/testthat/test-ping.R
+++ b/tests/testthat/test-ping.R
@@ -1,12 +1,10 @@
-# ping
-context("ping")
-
-test_that("ping works against", {
+context("ping - regular mode")
+test_that("ping works", {
   skip_on_cran()
+  skip_if_not(!is_in_cloud_mode(conn))
 
-  invisible(solr_connect(verbose = FALSE))
-
-  aa <- ping(name = "gettingstarted")
+  if (!conn$core_exists("gettingstarted")) conn$core_create("gettingstarted")
+  aa <- conn$ping(name = "gettingstarted")
 
   expect_is(aa, "list")
   expect_is(aa$responseHeader, "list")
@@ -16,8 +14,7 @@ test_that("ping works against", {
 
 test_that("ping gives raw data correctly", {
   skip_on_cran()
-  
-  solr_connect(verbose = FALSE)
+  skip_if_not(!is_in_cloud_mode(conn))
   
   expect_is(ping("gettingstarted", raw = TRUE), "ping")
   expect_is(ping("gettingstarted", raw = FALSE), "list")
@@ -27,9 +24,45 @@ test_that("ping gives raw data correctly", {
 
 test_that("ping fails well", {
   skip_on_cran()
-
-  solr_connect(verbose = FALSE)
-
+  skip_if_not(!is_in_cloud_mode(conn))
+  
   expect_equal(ping()$status, "not found")
   expect_equal(ping("adfdafs")$status, "not found")
 })
+
+
+
+context("ping - cloud mode")
+test_that("ping works", {
+  skip_on_cran()
+  skip_if_not(is_in_cloud_mode(conn))
+  
+  if (!conn$collection_exists("gettingstarted")) {
+    conn$collection_create("gettingstarted")
+  }
+  
+  aa <- conn$ping(name = "gettingstarted")
+  
+  expect_is(aa, "list")
+  expect_is(aa$responseHeader, "list")
+  expect_equal(aa$responseHeader$status, 0)
+  expect_equal(aa$responseHeader$params$q, "{!lucene}*:*")
+})
+
+test_that("ping gives raw data correctly", {
+  skip_on_cran()
+  skip_if_not(is_in_cloud_mode(conn))
+  
+  expect_is(ping(conn, "gettingstarted", raw = TRUE), "ping")
+  expect_is(ping(conn, "gettingstarted", raw = FALSE), "list")
+  expect_is(ping(conn, "gettingstarted", wt = "xml", raw = TRUE), "ping")
+  expect_is(ping(conn, "gettingstarted", wt = "xml", raw = FALSE), "xml_document")
+})
+
+test_that("ping fails well", {
+  skip_on_cran()
+  skip_if_not(is_in_cloud_mode(conn))
+  
+  expect_error(conn$ping()$status, "argument \"name\" is missing")
+  expect_equal(conn$ping("adfdafs")$status, "not found")
+})
diff --git a/tests/testthat/test-schema.R b/tests/testthat/test-schema.R
index 9e4b663..b1a1586 100644
--- a/tests/testthat/test-schema.R
+++ b/tests/testthat/test-schema.R
@@ -1,36 +1,48 @@
-# schema
-context("schema")
+context("schema - cloud mode")
+
+test_that("both R6 and normal function call work", {
+  skip_on_cran()
+
+  expect_is(conn$schema, "function")
+  expect_equal(names(formals(schema))[1], "conn")
+})
 
 test_that("schema works against", {
   skip_on_cran()
+  skip_if_not(!is_in_cloud_mode(conn))
 
-  invisible(solr_connect(verbose = FALSE))
+  aa <- conn$schema(name = "gettingstarted")
+  bb <- conn$schema(name = "gettingstarted", what = "fields")
 
-  aa <- schema(name = "gettingstarted")
-  bb <- schema(name = "gettingstarted", "fields")
-  
-  expect_is(schema(name = "gettingstarted", "dynamicfields"), "list")
-  expect_is(schema(name = "gettingstarted", "fieldtypes"), "list")
-  expect_is(schema(name = "gettingstarted", "copyfields"), "list")
-  expect_is(schema(name = "gettingstarted", "name"), "list")
-  expect_is(schema(name = "gettingstarted", "version"), "list")
-  expect_is(schema(name = "gettingstarted", "uniquekey"), "list")
-  expect_is(schema(name = "gettingstarted", "similarity"), "list")
+  expect_is(conn$schema(name = "gettingstarted", "dynamicfields"), "list")
+  expect_is(conn$schema(name = "gettingstarted", "fieldtypes"), "list")
+  expect_is(conn$schema(name = "gettingstarted", "copyfields"), "list")
+  expect_is(conn$schema(name = "gettingstarted", "name"), "list")
+  expect_is(conn$schema(name = "gettingstarted", "version"), "list")
+  expect_is(conn$schema(name = "gettingstarted", "uniquekey"), "list")
+  expect_is(conn$schema(name = "gettingstarted", "similarity"), "list")
 
   expect_is(aa, "list")
   expect_is(aa$responseHeader, "list")
   expect_is(aa$schema, "list")
   expect_is(aa$schema$name, "character")
-  
+
   expect_is(bb, "list")
   expect_is(bb$fields, "data.frame")
 })
 
 test_that("schema fails well", {
   skip_on_cran()
-  
-  invisible(solr_connect(verbose = FALSE))
-  
-  expect_error(schema(), "argument \"name\" is missing")
-  expect_error(schema(name = "gettingstarted", "stuff"), "Client error")
+  skip_if_not(!is_in_cloud_mode(conn))
+
+  expect_error(conn$schema(), "argument \"name\" is missing")
+  expect_error(conn$schema(name = "gettingstarted", "stuff"), "Not Found")
+})
+
+test_that("schema old style works", {
+  skip_on_cran()
+
+  expect_is(schema(conn, name = "gettingstarted"),
+    "list"
+  )
 })
diff --git a/tests/testthat/test-solr_all.R b/tests/testthat/test-solr_all.R
index f29ec2d..b4d6c6e 100644
--- a/tests/testthat/test-solr_all.R
+++ b/tests/testthat/test-solr_all.R
@@ -1,86 +1,108 @@
 context("solr_all")
-
 test_that("solr_all works", {
   skip_on_cran()
 
-  solr_connect('http://api.plos.org/search', verbose = FALSE)
-
-  a <- solr_all(q='*:*', rows=2, fl='id')
-  b <- solr_all(q='title:"ecology" AND body:"cell"', fl='title', rows=5)
+  a <- conn_plos$all(params = list(q='*:*', rows=2, fl='id'))
 
   # correct dimensions
   expect_equal(length(a), 6)
-  expect_equal(length(b), 6)
 
   # correct classes
   expect_is(a, "list")
   expect_is(a$search, "tbl_df")
-  expect_is(b, "list")
-  expect_is(b$search, "tbl_df")
-  
+
   # right slot names
   expect_named(a, c('search','facet','high','mlt','group','stats'))
-  expect_named(b, c('search','facet','high','mlt','group','stats'))
 })
 
 test_that("solr_all fails well", {
   skip_on_cran()
 
-  invisible(solr_connect('http://api.plos.org/search', verbose = FALSE))
-
-  expect_error(solr_all(q = "*:*", rows = "asdf"), "500 - For input string")
-  expect_error(solr_all(q = "*:*", sort = "down"),
+  expect_error(conn_plos$all(params = list(q = "*:*", rows = "asdf")),
+    "rows should be a numeric or integer class value")
+  Sys.sleep(2)
+  expect_error(conn_plos$all(params = list(q = "*:*", sort = "down")),
                "400 - Can't determine a Sort Order \\(asc or desc\\) in sort spec 'down'")
-  expect_error(solr_all(q='*:*', fl=c('alm_twitterCount','id'),
-                           fq='alm_notafield:[5 TO 50]', rows=10),
+  Sys.sleep(2)
+  expect_error(conn_plos$all(params = list(q='*:*', fl=c('alm_twitterCount','id'),
+                           fq='alm_notafield:[5 TO 50]', rows=10)),
                "undefined field")
-  expect_error(solr_all(q = "*:*", wt = "foobar"),
+  expect_error(conn_plos$all(params = list(q = "*:*", wt = "foobar")),
                "wt must be one of: json, xml, csv")
 
 })
 
-test_that("solr_all works with HathiTrust", {
+test_that("solr_all works with Datacite", {
   skip_on_cran()
 
-  url_hathi <- "http://chinkapin.pti.indiana.edu:9994/solr/meta/select"
-  invisible(solr_connect(url = url_hathi, verbose = FALSE))
-
-  a <- solr_all(q = '*:*', rows = 2, fl = 'id')
-  b <- solr_all(q = 'language:Spanish', rows = 5)
-
+  a <- conn_dc$all(params = list(q = '*:*', rows = 2))
+  b <- conn_dc$all(params = list(q = 'publisher:Data', rows = 5))
   # correct dimensions
   expect_equal(NROW(a$search), 2)
   expect_equal(NROW(b$search), 5)
+})
 
-  # correct classes
-  expect_is(a, "list")
-  expect_is(a$search, "data.frame")
-  expect_is(a$high, "data.frame")
-  expect_is(a$group, "data.frame")
-  expect_null(b$stats)
-  expect_null(b$facet)
-  
-  expect_is(b, "list")
-  expect_is(a$search, "data.frame")
-  expect_is(b$high, "data.frame")
-  expect_is(b$group, "data.frame")
-  expect_null(b$stats)
-  expect_null(b$facet)
-
-  # names
-  expect_named(a$search, "id")
+
+test_that("solr_all old style works", {
+  skip_on_cran()
+
+  expect_is(solr_all(conn_plos,
+    params = list(q='*:*', rows=2, fl='id')),
+    "list"
+  )
 })
 
-test_that("solr_all works with Datacite", {
+
+test_that("solr_all optimize max rows with lower boundary", {
   skip_on_cran()
 
-  url_dc <- "http://search.datacite.org/api"
-  invisible(solr_connect(url = url_dc, verbose = FALSE))
+  a <- conn_plos$all(params = list(q='*:*', rows=1, fl='id'))
+  query <- paste0('id:', a$search$id)
+  b <- conn_plos$all(params = list(q=query, rows=1, fl='id'))
+  cc <- conn_plos$all(params = list(q=query, rows=-1, fl='id'))
 
-  a <- solr_all(q = '*:*', rows = 2)
-  b <- solr_all(q = 'publisher:Data', rows = 5)
+  expect_identical(b, cc)
+})
 
-  # correct dimensions
-  expect_equal(NROW(a$search), 2)
-  expect_equal(NROW(b$search), 5)
+test_that("solr_all optimize max rows with upper boundary", {
+  skip_on_cran()
+
+  a <- conn_plos$all(params = list(q='*:*', rows=1, fl='id'))
+  query <- paste0('id:', a$search$id)
+  b <- conn_plos$all(params = list(q=query, rows=1, fl='id'))
+  c <- conn_plos$all(params = list(q=query, rows=50000, fl='id'))
+
+  expect_identical(b, c)
+})
+
+test_that("solr_all optimize max rows with rows higher than upper boundary", {
+  skip_on_cran()
+
+  a <- conn_plos$all(params = list(q='*:*', rows=1, fl='id'))
+  query <- paste0('id:', a$search$id)
+  b <- conn_plos$all(params = list(q=query, rows=1, fl='id'))
+  c <- conn_plos$all(params = list(q=query, rows=50001, fl='id'))
+
+  expect_identical(b, c)
+})
+
+test_that("solr_all optimize max rows with rows=31 and minOptimizedRows=30", {
+  skip_on_cran()
+
+  a <- conn_plos$all(params = list(q='*:*', rows=1, fl='id'))
+  query <- paste0('id:', a$search$id)
+  b <- conn_plos$all(params = list(q=query, rows=1, fl='id'))
+  c <- conn_plos$all(params = list(q=query, rows=31, fl='id'), optimizeMaxRows=TRUE, minOptimizedRows=30)
+
+  expect_identical(b, c)
+})
+
+
+test_that("solr_all fails if optimize max rows is disabled with rows equal to -1", {
+  skip_on_cran()
+
+  expect_error(
+    conn_plos$all(params = list(q='*:*', rows=-1, fl='id'), optimizeMaxRows=FALSE),
+    "'rows' parameter cannot be negative"
+  )
 })
diff --git a/tests/testthat/test-solr_connect.R b/tests/testthat/test-solr_connect.R
deleted file mode 100644
index a3f0d21..0000000
--- a/tests/testthat/test-solr_connect.R
+++ /dev/null
@@ -1,50 +0,0 @@
-# solr_connect
-context("solr_connect")
-
-test_that("solr_connect to remote Solr server works", {
-  skip_on_cran()
-  
-  invisible(aa <- solr_connect('http://api.plos.org/search'))
-  
-  expect_is(aa, "solr_connection")
-  expect_is(aa$url, "character")
-  expect_null(aa$proxy)
-  expect_is(aa$errors, "character")
-  expect_named(aa, c('url', 'proxy', 'errors', 'verbose'))
-})
-
-test_that("solr_connect to local Solr server works", {
-  skip_on_cran()
-  
-  invisible(bb <- solr_connect())
-  
-  expect_is(bb, "solr_connection")
-  expect_is(bb$url, "character")
-  expect_null(bb$proxy)
-  expect_is(bb$errors, "character")
-  expect_named(bb, c('url', 'proxy', 'errors', 'verbose'))
-})
-
-test_that("solr_connect works with a proxy", {
-  skip_on_cran()
-  
-  port = 3128
-  proxy <- list(url = "187.62.207.130", port = port)
-  invisible(cc <- solr_connect(proxy = proxy))
-  
-  expect_is(cc, "solr_connection")
-  expect_is(cc$url, "character")
-  expect_is(cc$proxy, "request")
-  expect_is(cc$proxy$options, "list")
-  expect_equal(cc$proxy$options$proxyport, port)
-  expect_is(cc$errors, "character")
-})
-
-test_that("solr_connect fails well", {
-  skip_on_cran()
-  
-  expect_error(solr_connect("foobar"), "That does not appear to be a url")
-  expect_error(solr_connect(errors = 'foo'), "should be one of")
-  expect_error(solr_connect(proxy = list(foo = "bar")), 
-               "Input to proxy can only contain")
-})
diff --git a/tests/testthat/test-solr_error.R b/tests/testthat/test-solr_error.R
index 73942b1..6809362 100644
--- a/tests/testthat/test-solr_error.R
+++ b/tests/testthat/test-solr_error.R
@@ -3,10 +3,13 @@ context("solr_error internal function")
 test_that("solr_error works when no errors", {
   skip_on_cran()
 
-  invisible(solr_connect('http://api.plos.org/search', verbose = FALSE))
-  
-  aa <- solr_search(q = '*:*', rows = 2, fl = 'id')
-  expect_equal(solr_settings()$errors, "simple")
+  aa <- conn_simp$search(params = list(q = '*:*', rows = 2, fl = 'id'))
+  expect_equal(conn$errors, "simple")
+  expect_is(aa, "data.frame")
+  expect_is(aa$id, "character")
+
+  aa <- solr_search(conn_simp, params = list(q = '*:*', rows = 2, fl = 'id'))
+  expect_equal(conn$errors, "simple")
   expect_is(aa, "data.frame")
   expect_is(aa$id, "character")
 })
@@ -14,36 +17,24 @@ test_that("solr_error works when no errors", {
 
 test_that("solr_error works when there should be errors - simple errors", {
   skip_on_cran()
-  
-  invisible(solr_connect('http://api.plos.org/search', verbose = FALSE))
-  
-  expect_equal(solr_settings()$errors, "simple")
-  expect_error(solr_search(q = '*:*', rows = 5, sort = "things"), 
+
+  expect_equal(conn_simp$errors, "simple")
+  expect_error(conn_simp$search(params = list(q = '*:*', rows = 5, sort = "things")),
                "Can't determine a Sort Order")
 })
 
 test_that("solr_error works when there should be errors - complete errors", {
   skip_on_cran()
-  
-  invisible(solr_connect('http://api.plos.org/search', 
-                         errors = "complete", 
-                         verbose = FALSE))
-  
-  expect_equal(solr_settings()$errors, "complete")
-  expect_error(solr_search(q = '*:*', rows = 5, sort = "things"), 
+
+  expect_equal(conn_comp$errors, "complete")
+  expect_error(conn_comp$search(params = list(q = '*:*', rows = 5, sort = "things")),
                "Can't determine a Sort Order")
-  expect_error(solr_search(q = '*:*', rows = 5, sort = "things"), 
-               "no stack trace")
 })
 
 test_that("solr_error - test directly", {
   skip_on_cran()
-  
-  invisible(solr_connect('http://api.plos.org/search', 
-                         errors = "complete", 
-                         verbose = FALSE))
-  
-  library("httr")
-  res <- GET("http://api.plos.org/search?wt=json&q=%22synthetic%20biology%22&rows=10&fl=id,title&sort=notasortoption")
+
+  library(crul)
+  res <- crul::HttpClient$new(url = "http://api.plos.org/search?wt=json&q=%22synthetic%20biology%22&rows=10&fl=id,title&sort=notasortoption")$get()
   expect_error(solrium:::solr_error(res), "Can't determine a Sort Order \\(asc or desc\\)")
 })
diff --git a/tests/testthat/test-solr_facet.r b/tests/testthat/test-solr_facet.r
index e72afe2..faad22b 100644
--- a/tests/testthat/test-solr_facet.r
+++ b/tests/testthat/test-solr_facet.r
@@ -3,67 +3,72 @@ context("solr_facet")
 test_that("solr_facet works", {
   skip_on_cran()
 
-  invisible(solr_connect('http://api.plos.org/search', verbose=FALSE))
-
-  a <- solr_facet(q='*:*', facet.field='journal')
-  b <- solr_facet(q='*:*', facet.date='publication_date', 
-                  facet.date.start='NOW/DAY-5DAYS', facet.date.end='NOW', 
-                  facet.date.gap='+1DAY')
-  c <- solr_facet(q='alcohol', facet.pivot='journal,subject', 
-                  facet.pivot.mincount=10)
+  a <- conn_plos$facet(params = list(q='*:*', facet.field='journal'))
+  Sys.sleep(2)
+  # FIXME: this doesn't work anymore
+  # b <- conn_plos$facet(params = list(q='*:*', facet.date='publication_date',
+  #                 facet.date.start='NOW/DAY-90DAYS', facet.date.end='NOW',
+  #                 facet.date.gap='+1DAY'))
+  c <- conn_plos$facet(params = list(q='alcohol', facet.pivot='journal,subject',
+                  facet.pivot.mincount=10))
 
   # correct dimenions
   expect_equal(length(a), 5)
   expect_equal(length(a$facet_queries), 0)
   expect_equal(NCOL(a$facet_fields$journal), 2)
 
-  expect_that(length(b), equals(5))
-  expect_that(length(b$facet_dates), equals(1))
-  expect_that(dim(b$facet_dates$publication_date), equals(c(6,2)))
-  
+  # expect_that(length(b), equals(5))
+  # expect_that(length(b$facet_dates), equals(1))
+  # expect_that(dim(b$facet_dates$publication_date), equals(c(6,2)))
+
   expect_equal(length(c), 5)
   expect_equal(names(c$facet_pivot), c('journal', 'journal,subject'))
   expect_equal(names(c$facet_pivot$journal), c('journal', 'count'))
   expect_equal(names(c$facet_pivot$`journal,subject`), c('journal', 'subject', 'count'))
   expect_true(min(unlist(c$facet_pivot$`journal,subject`$count)) >= 10)
-  
+
   # correct classes
   expect_is(a, "list")
-  expect_is(b, "list")
+  # expect_is(b, "list")
   expect_is(c, "list")
-  expect_is(b$facet_dates, "list")
-  expect_is(b$facet_dates$publication_date, "data.frame")
+  # expect_is(b$facet_dates, "list")
+  # expect_is(b$facet_dates$publication_date, "data.frame")
   expect_is(c$facet_pivot, "list")
   expect_is(c$facet_pivot$journal, "data.frame")
   expect_is(c$facet_pivot$`journal,subject`, "data.frame")
 })
 
 
-test_that("faceting works against HathiTrust", {
-  url_hathi <- "http://chinkapin.pti.indiana.edu:9994/solr/meta/select"
-  invisible(solr_connect(url = url_hathi, verbose = FALSE))
-  
-  # regular facet
-  a <- solr_facet(q = '*:*', facet.field = 'genre')
-  # pivot facet
-  c <- solr_facet(q = '*:*', facet.pivot = 'genre,publisher', 
-                  facet.pivot.mincount = 10)
-  
-  expect_equal(length(a), 5)
-  expect_equal(length(a$facet_queries), 0)
-  expect_equal(NCOL(a$facet_fields$genre), 2)
-  
-  expect_equal(length(c), 5)
-  expect_equal(names(c$facet_pivot), c('genre', 'genre,publisher'))
-  expect_named(c$facet_pivot$genre, c('genre', 'count'))
-  expect_named(c$facet_pivot$`genre,publisher`, c('genre', 'publisher', 'count'))
-  expect_true(min(unlist(c$facet_pivot$`genre,publisher`$count)) >= 10)
-  
-  # correct classes
-  expect_is(a, "list")
-  expect_is(c, "list")
-  expect_is(c$facet_pivot, "list")
-  expect_is(c$facet_pivot$genre, "data.frame")
-  expect_is(c$facet_pivot$`genre,publisher`, "data.frame")  
-})
+# test_that("faceting works against HathiTrust", {
+#   # regular facet
+#   a <- conn_hathi$facet(params = list(q = '*:*', facet.field = 'genre'))
+#   # pivot facet
+#   c <- conn_hathi$facet(params = list(q = '*:*', facet.pivot = 'genre,publisher',
+#                   facet.pivot.mincount = 10))
+
+#   expect_equal(length(a), 5)
+#   expect_equal(length(a$facet_queries), 0)
+#   expect_equal(NCOL(a$facet_fields$genre), 2)
+
+#   expect_equal(length(c), 5)
+#   expect_equal(names(c$facet_pivot), c('genre', 'genre,publisher'))
+#   expect_named(c$facet_pivot$genre, c('genre', 'count'))
+#   expect_named(c$facet_pivot$`genre,publisher`, c('genre', 'publisher', 'count'))
+#   expect_true(min(unlist(c$facet_pivot$`genre,publisher`$count)) >= 10)
 
+#   # correct classes
+#   expect_is(a, "list")
+#   expect_is(c, "list")
+#   expect_is(c$facet_pivot, "list")
+#   expect_is(c$facet_pivot$genre, "data.frame")
+#   expect_is(c$facet_pivot$`genre,publisher`, "data.frame")
+# })
+
+test_that("solr_facet old style works", {
+  skip_on_cran()
+
+  expect_is(solr_facet(conn_plos,
+    params = list(q='*:*', facet.field='journal')),
+    "list"
+  )
+})
diff --git a/tests/testthat/test-solr_get.R b/tests/testthat/test-solr_get.R
new file mode 100644
index 0000000..2315485
--- /dev/null
+++ b/tests/testthat/test-solr_get.R
@@ -0,0 +1,43 @@
+context("get")
+
+test_that("get works with a single id", {
+  skip_on_cran()
+
+  if (!collection_exists(conn, "gettingstarted")) {
+    collection_create(conn, name = "gettingstarted", numShards = 1)
+  }
+  ss <- list(list(id = 1, price = 100), list(id = 2, price = 500))
+  invisible(add(ss, conn, name = "gettingstarted"))
+
+  aa <- solr_get(conn, ids = 1, "gettingstarted")
+
+  expect_is(aa, "list")
+  expect_named(aa, c("response"))
+  expect_named(aa$response, c("numFound", "start", "docs"))
+  expect_is(aa$response$docs, "data.frame")
+
+
+  aa <- solr_get(conn, ids = c(1, 2), "gettingstarted")
+
+  expect_is(aa, "list")
+  expect_named(aa, c("response"))
+  expect_equal(NROW(aa$response$docs), 2)
+
+  aa <- solr_get(conn, ids = "1,2", "gettingstarted")
+
+  expect_is(aa, "list")
+  expect_named(aa, c("response"))
+  expect_equal(NROW(aa$response$docs), 2)
+
+  aa <- conn$get(1, "gettingstarted")
+
+  expect_is(aa, "list")
+  expect_named(aa$response, c("numFound", "start", "docs"))
+})
+
+test_that("get fails well", {
+  skip_on_cran()
+
+  expect_error(solr_get(), "argument \"conn\" is missing")
+  expect_error(solr_get(5), "conn must be a SolrClient object")
+})
diff --git a/tests/testthat/test-solr_goup.R b/tests/testthat/test-solr_goup.R
new file mode 100644
index 0000000..6ecf521
--- /dev/null
+++ b/tests/testthat/test-solr_goup.R
@@ -0,0 +1,59 @@
+context("solr_group")
+
+test_that("solr_group works", {
+  skip_on_cran()
+
+  a <- conn_plos$group(params = list(q='ecology', group.field='journal',
+    group.limit=3, fl=c('id','score')))
+  Sys.sleep(2)
+  b <- conn_plos$group(params = list(q='ecology', group.field='journal',
+    group.limit=3, fl=c('id','score','alm_twitterCount'),
+    group.sort='alm_twitterCount desc'))
+  Sys.sleep(2)
+  out <- conn_plos$group(params = list(q='ecology',
+    group.field=c('journal','article_type'), group.limit=3, fl='id'),
+    raw=TRUE)
+  Sys.sleep(2)
+  c <- out
+  d <- solr_parse(out, 'df')
+  e <- conn_plos$group(params = list(q='ecology', group.field='journal', group.limit=3, fl=c('id','score'),
+                  group.format='grouped', group.main='true'))
+
+  suppressPackageStartupMessages(library('jsonlite', quietly = TRUE))
+  f <- jsonlite::fromJSON(out, FALSE)
+
+  # correct dimensions
+  expect_equal(NCOL(a), 5)
+  expect_equal(NCOL(b), 6)
+  expect_that(length(c), equals(1))
+  expect_that(length(d), equals(2))
+  expect_equal(NCOL(d$article_type), 4)
+  expect_equal(NCOL(e), 4)
+  expect_that(length(f), equals(1))
+  expect_that(length(f$grouped), equals(2))
+
+  #  correct classes
+  expect_is(a, "data.frame")
+  expect_is(b, "data.frame")
+  expect_is(c, "sr_group")
+  expect_is(d, "list")
+  expect_is(d$journal, "data.frame")
+  expect_is(e, "data.frame")
+})
+
+test_that("solr_group old style works", {
+  skip_on_cran()
+
+  expect_is(solr_group(conn_plos,
+    params = list(q='ecology', group.field='journal',
+      group.limit=3, fl=c('id','score'))),
+    "data.frame"
+  )
+
+  expect_is(solr_group(conn_plos,
+    params = list(q='ecology', group.field='journal', group.limit=3, fl=c('id','score'),
+                  group.format='grouped', group.main='true')),
+    "data.frame"
+  )
+})
+
diff --git a/tests/testthat/test-solr_group.r b/tests/testthat/test-solr_group.r
deleted file mode 100644
index 3657277..0000000
--- a/tests/testthat/test-solr_group.r
+++ /dev/null
@@ -1,39 +0,0 @@
-context("solr_group")
-
-test_that("solr_group works", {
-  skip_on_cran()
-
-  solr_connect('http://api.plos.org/search', verbose=FALSE)
-
-  a <- solr_group(q='ecology', group.field='journal', group.limit=3, fl=c('id','score'))
-  b <- solr_group(q='ecology', group.field='journal', group.limit=3,
-                  fl=c('id','score','alm_twitterCount'),
-                  group.sort='alm_twitterCount desc')
-  out <- solr_group(q='ecology', group.field=c('journal','article_type'), group.limit=3, fl='id',
-                    raw=TRUE)
-  c <- out
-  d <- solr_parse(out, 'df')
-  e <- solr_group(q='ecology', group.field='journal', group.limit=3, fl=c('id','score'),
-                  group.format='grouped', group.main='true')
-
-  suppressPackageStartupMessages(library('jsonlite', quietly = TRUE))
-  f <- jsonlite::fromJSON(out, FALSE)
-
-  # correct dimensions
-  expect_equal(NCOL(a), 5)
-  expect_equal(NCOL(b), 6)
-  expect_that(length(c), equals(1))
-  expect_that(length(d), equals(2))
-  expect_equal(NCOL(d$article_type), 4)
-  expect_equal(NCOL(e), 4)
-  expect_that(length(f), equals(1))
-  expect_that(length(f$grouped), equals(2))
-
-  #  correct classes
-  expect_is(a, "data.frame")
-  expect_is(b, "data.frame")
-  expect_is(c, "sr_group")
-  expect_is(d, "list")
-  expect_is(d$journal, "data.frame")
-  expect_is(e, "data.frame")
-})
diff --git a/tests/testthat/test-solr_highlight.r b/tests/testthat/test-solr_highlight.r
index 0c2a916..495270b 100644
--- a/tests/testthat/test-solr_highlight.r
+++ b/tests/testthat/test-solr_highlight.r
@@ -3,23 +3,39 @@ context("solr_highlight")
 test_that("solr_highlight works", {
   skip_on_cran()
 
-  solr_connect('http://api.plos.org/search', verbose=FALSE)
-
-  a <- solr_highlight(q='alcohol', hl.fl = 'abstract', rows=10)
-  b <- solr_highlight(q='alcohol', hl.fl = c('abstract','title'), rows=3)
+  a <- conn_plos$highlight(params = list(q='alcohol', hl.fl = 'abstract',
+                                         rows=10))
+  Sys.sleep(2)
+  b <- conn_plos$highlight(params = list(q='alcohol',
+                                         hl.fl = c('abstract','title'),
+                                         rows=3))
 
   # correct dimensions
-  expect_that(length(a), equals(10))
-  expect_that(length(a[[1]]), equals(1))
-  expect_that(length(b), equals(3))
-  expect_that(length(b[[3]]), equals(2))
+  expect_that(NROW(a), equals(10))
+  expect_that(NCOL(a), equals(2))
+  expect_that(NROW(b), equals(3))
+  expect_that(NCOL(b), equals(3))
 
   # correct classes
-  expect_is(a, "list")
-  expect_is(a[[1]]$abstract, "character")
+  expect_is(a, "tbl_df")
+  expect_is(a$abstract, "character")
+
+  expect_is(b, "tbl_df")
+  expect_is(b$abstract, "character")
+  expect_is(b$title, "character")
+})
+
+test_that("solr_highlight old style works", {
+  skip_on_cran()
+
+  expect_is(solr_highlight(conn_plos,
+    params = list(q='alcohol', hl.fl = 'abstract', rows=10)),
+    "tbl_df"
+  )
 
-  expect_is(b, "list")
-  expect_is(b[[1]], "list")
-  expect_is(b[[1]]$abstract, "character")
-  expect_is(b[[1]]$title, "character")
+  expect_is(solr_highlight(conn_plos,
+    params = list(q='alcohol',
+      hl.fl = c('abstract','title'), rows=3)),
+    "tbl_df"
+  )
 })
diff --git a/tests/testthat/test-solr_mlt.r b/tests/testthat/test-solr_mlt.r
index a2c0d5e..fc7d311 100644
--- a/tests/testthat/test-solr_mlt.r
+++ b/tests/testthat/test-solr_mlt.r
@@ -3,12 +3,15 @@ context("solr_mlt")
 test_that("solr_mlt works", {
   skip_on_cran()
 
-  solr_connect('http://api.plos.org/search', verbose=FALSE)
+  a <- conn_plos$mlt(params = list(q='*:*', mlt.count=2,
+    mlt.fl='abstract', fl='score', fq="doc_type:full"))
+  Sys.sleep(2)
+  c <- conn_plos$mlt(params = list(q='ecology', mlt.fl='abstract',
+    fl='title', rows=5))
+  Sys.sleep(2)
 
-  a <- solr_mlt(q='*:*', mlt.count=2, mlt.fl='abstract', fl='score', fq="doc_type:full")
-  c <- solr_mlt(q='ecology', mlt.fl='abstract', fl='title', rows=5)
-
-  out <- solr_mlt(q='ecology', mlt.fl='abstract', fl='title', rows=2, raw=TRUE, wt="xml")
+  out <- conn_plos$mlt(params = list(q='ecology', mlt.fl='abstract',
+    fl='title', rows=2, wt="xml"), raw=TRUE)
   library("xml2")
   outxml <- read_xml(unclass(out))
   outdf <- solr_parse(out, "df")
@@ -33,3 +36,81 @@ test_that("solr_mlt works", {
   expect_is(outdf, "list")
   expect_is(outdf$mlt[[1]], "data.frame")
 })
+
+test_that("solr_mlt old style works", {
+  skip_on_cran()
+
+  expect_is(
+    solr_mlt(conn_plos,
+      params = list(q='*:*', mlt.count=2,
+        mlt.fl='abstract', fl='score', fq="doc_type:full")),
+    "list"
+  )
+
+  expect_is(
+    solr_mlt(conn_plos,
+      params = list(q='ecology',
+        mlt.fl='abstract', fl='title', rows=5)),
+    "list"
+  )
+})
+
+
+
+
+
+test_that("solr_mlt optimize max rows with lower boundary", {
+  skip_on_cran()
+
+  a <- conn_plos$mlt(params = list(q='*:*', mlt.count=2, mlt.fl='abstract', rows=1))
+  query <- paste0('id:', a$docs$id)
+  b <- conn_plos$mlt(params = list(q=query, mlt.count=2, mlt.fl='abstract', rows=1))
+  cc <- conn_plos$mlt(params = list(q=query, mlt.count=2, mlt.fl='abstract', rows=-1))
+
+  expect_identical(b, cc)
+})
+
+test_that("solr_mlt optimize max rows with upper boundary", {
+  skip_on_cran()
+
+  a <- conn_plos$mlt(params = list(q='*:*', mlt.count=2, mlt.fl='abstract', rows=1))
+  query <- paste0('id:', a$docs$id)
+  b <- conn_plos$mlt(params = list(q=query, mlt.count=2, mlt.fl='abstract', rows=1))
+  c <- conn_plos$mlt(params = list(q=query, mlt.count=2, mlt.fl='abstract', rows=50000))
+
+  expect_identical(b, c)
+})
+
+test_that("solr_mlt optimize max rows with rows higher than upper boundary", {
+  skip_on_cran()
+
+  a <- conn_plos$mlt(params = list(q='ecology', mlt.count=2, mlt.fl='abstract', rows=1))
+  query <- paste0('id:', a$docs$id)
+  b <- conn_plos$mlt(params = list(q=query, mlt.count=2, mlt.fl='abstract', rows=1))
+  c <- conn_plos$mlt(params = list(q=query, mlt.count=2, mlt.fl='abstract', rows=50001))
+
+  expect_identical(b, c)
+})
+
+test_that("solr_mlt optimize max rows with rows=31 and minOptimizedRows=30", {
+  skip_on_cran()
+
+  a <- conn_plos$mlt(params = list(q='*:*', mlt.count=2, mlt.fl='abstract', rows=1))
+  query <- paste0('id:', a$docs$id)
+  b <- conn_plos$mlt(params = list(q=query, mlt.count=2, mlt.fl='abstract', rows=1))
+  c <- conn_plos$mlt(params = list(q=query, mlt.count=2, mlt.fl='abstract', rows=31),
+    optimizeMaxRows=TRUE, minOptimizedRows=30)
+
+  expect_identical(b, c)
+})
+
+
+test_that("solr_mlt fails if optimize max rows is disabled with rows equal to -1", {
+  skip_on_cran()
+
+  expect_error(
+    conn_plos$mlt(params = list(q='*:*', mlt.count=2, mlt.fl='abstract', rows=-1),
+      optimizeMaxRows=FALSE),
+    "'rows' parameter cannot be negative"
+  )
+})
diff --git a/tests/testthat/test-solr_search.r b/tests/testthat/test-solr_search.r
index 96a567f..ca95117 100644
--- a/tests/testthat/test-solr_search.r
+++ b/tests/testthat/test-solr_search.r
@@ -3,10 +3,10 @@ context("solr_search")
 test_that("solr_search works", {
   skip_on_cran()
 
-  solr_connect('http://api.plos.org/search', verbose = FALSE)
-
-  a <- solr_search(q='*:*', rows=2, fl='id')
-  b <- solr_search(q='title:"ecology" AND body:"cell"', fl='title', rows=5)
+  a <- conn_plos$search(params = list(q='*:*', rows=2, fl='id'))
+  Sys.sleep(2)
+  b <- conn_plos$search(params = list(q='title:"ecology" AND body:"cell"', fl='title', rows=5))
+  Sys.sleep(2)
 
   # correct dimensions
   expect_that(length(a), equals(1))
@@ -15,86 +15,113 @@ test_that("solr_search works", {
   # correct classes
   expect_is(a, "data.frame")
   expect_is(b, "data.frame")
+
+  expect_is(
+    solr_search(conn_plos, params = list(q='*:*', rows=2, fl='id')),
+    "tbl_df")
+  expect_is(
+    solr_search(conn_plos, params = list(q='title:"ecology" AND body:"cell"',
+      fl='title', rows=5)), "tbl_df")
 })
 
 test_that("solr_search fails well", {
   skip_on_cran()
-  
-  invisible(solr_connect('http://api.plos.org/search', verbose = FALSE))
-  
-  expect_error(solr_search(q = "*:*", rows = "asdf"), "500 - For input string")
-  expect_error(solr_search(q = "*:*", sort = "down"), 
+
+  expect_error(conn_plos$search(params = list(q = "*:*", rows = "asdf")),
+               "rows should be a numeric or integer")
+  expect_error(solr_search(conn_plos, params = list(q = "*:*", rows = "asdf")),
+               "rows should be a numeric or integer")
+  expect_error(conn_plos$search(params = list(q = "*:*", sort = "down")),
                "400 - Can't determine a Sort Order \\(asc or desc\\) in sort spec 'down'")
-  expect_error(solr_search(q='*:*', fl=c('alm_twitterCount','id'), 
-                           fq='alm_notafield:[5 TO 50]', rows=10), 
+  expect_error(conn_plos$search(params = list(q='*:*', fl=c('alm_twitterCount','id'),
+                           fq='alm_notafield:[5 TO 50]', rows=10)),
                "undefined field")
-  expect_error(solr_search(q = "*:*", wt = "foobar"), 
+  expect_error(conn_plos$search(params = list(q = "*:*", wt = "foobar")),
                "wt must be one of: json, xml, csv")
-  
+
 })
 
-test_that("solr_search works with HathiTrust", {
+
+test_that("solr_search works with Dryad", {
   skip_on_cran()
-  
-  url_hathi <- "http://chinkapin.pti.indiana.edu:9994/solr/meta/select"
-  invisible(solr_connect(url = url_hathi, verbose = FALSE))
-  
-  a <- solr_search(q = '*:*', rows = 2, fl = 'id')
-  b <- solr_search(q = 'language:Spanish', rows = 5)
-  
+
+  a <- conn_dryad$search(params = list(q = '*:*', rows = 2))
+  Sys.sleep(2)
+  b <- conn_dryad$search(params = list(q = 'dc.title.en:ecology', rows = 5))
+
   # correct dimensions
   expect_equal(NROW(a), 2)
   expect_equal(NROW(b), 5)
-  
+
   # correct classes
   expect_is(a, "data.frame")
   expect_is(a, "tbl_df")
   expect_is(b, "data.frame")
   expect_is(b, "tbl_df")
-  
-  # names
-  expect_named(a, "id")
+
+  # correct content
+  expect_true(all(grepl("ecolog", b$dc.title.en, ignore.case = TRUE)))
+
+  # solr_search
+  expect_is(solr_search(conn_dryad, params = list(q = '*:*', rows = 2)),
+    "tbl_df")
+  expect_is(
+    solr_search(conn_dryad, params = list(q = 'dc.title.en:ecology', rows = 5)),
+    "tbl_df")
 })
 
-test_that("solr_search works with Datacite", {
+
+
+test_that("solr_search optimize max rows with lower boundary", {
   skip_on_cran()
-  
-  url_dc <- "http://search.datacite.org/api"
-  invisible(solr_connect(url = url_dc, verbose = FALSE))
-  
-  a <- solr_search(q = '*:*', rows = 2)
-  b <- solr_search(q = 'publisher:Data', rows = 5)
-  
-  # correct dimensions
-  expect_equal(NROW(a), 2)
-  expect_equal(NROW(b), 5)
-  
-  # correct classes
-  expect_is(a, "data.frame")
-  expect_is(a, "tbl_df")
-  expect_is(b, "data.frame")
-  expect_is(b, "tbl_df")
+
+  a <- conn_plos$search(params = list(q='*:*', rows=1, fl='id'))
+  query <- paste0('id:', a$id)
+  b <- conn_plos$search(params = list(q=query, rows=1, fl='id'))
+  cc <- conn_plos$search(params = list(q=query, rows=-1, fl='id'))
+
+  expect_identical(b, cc)
 })
 
-test_that("solr_search works with Dryad", {
+test_that("solr_search optimize max rows with upper boundary", {
   skip_on_cran()
-  
-  url_dryad <- "http://datadryad.org/solr/search/select"
-  invisible(solr_connect(url = url_dryad, verbose = FALSE))
-  
-  a <- solr_search(q = '*:*', rows = 2)
-  b <- solr_search(q = 'dc.title.en:ecology', rows = 5)
-  
-  # correct dimensions
-  expect_equal(NROW(a), 2)
-  expect_equal(NROW(b), 5)
-  
-  # correct classes
-  expect_is(a, "data.frame")
-  expect_is(a, "tbl_df")
-  expect_is(b, "data.frame")
-  expect_is(b, "tbl_df")
-  
-  # correct content
-  expect_true(all(grepl("ecolog", b$dc.title.en, ignore.case = TRUE)))
+
+  a <- conn_plos$search(params = list(q='*:*', rows=1, fl='id'))
+  query <- paste0('id:', a$id)
+  b <- conn_plos$search(params = list(q=query, rows=1, fl='id'))
+  c <- conn_plos$search(params = list(q=query, rows=50000, fl='id'))
+
+  expect_identical(b, c)
+})
+
+test_that("solr_search optimize max rows with rows higher than upper boundary", {
+  skip_on_cran()
+
+  a <- conn_plos$search(params = list(q='*:*', rows=1, fl='id'))
+  query <- paste0('id:', a$id)
+  b <- conn_plos$search(params = list(q=query, rows=1, fl='id'))
+  c <- conn_plos$search(params = list(q=query, rows=50001, fl='id'))
+
+  expect_identical(b, c)
+})
+
+test_that("solr_search optimize max rows with rows=31 and minOptimizedRows=30", {
+  skip_on_cran()
+
+  a <- conn_plos$search(params = list(q='*:*', rows=1, fl='id'))
+  query <- paste0('id:', a$id)
+  b <- conn_plos$search(params = list(q=query, rows=1, fl='id'))
+  c <- conn_plos$search(params = list(q=query, rows=31, fl='id'), optimizeMaxRows=TRUE, minOptimizedRows=30)
+
+  expect_identical(b, c)
+})
+
+
+test_that("solr_search fails if optimize max rows is disabled with rows equal to -1", {
+  skip_on_cran()
+
+  expect_error(
+    conn_plos$search(params = list(q='*:*', rows=-1, fl='id'), optimizeMaxRows=FALSE),
+    "'rows' parameter cannot be negative"
+  )
 })
diff --git a/tests/testthat/test-solr_settings.R b/tests/testthat/test-solr_settings.R
deleted file mode 100644
index fdac54e..0000000
--- a/tests/testthat/test-solr_settings.R
+++ /dev/null
@@ -1,31 +0,0 @@
-# solr_settings
-context("solr_settings")
-
-test_that("solr_settings gives right classes", {
-  skip_on_cran()
-  
-  invisible(solr_connect('http://api.plos.org/search'))
-  aa <- solr_settings()
-  
-  expect_is(aa, "solr_connection")
-  expect_is(aa$url, "character")
-  expect_null(aa$proxy)
-  expect_is(aa$errors, "character")
-})
-
-
-test_that("solr_settings gives right values", {
-  skip_on_cran()
-  
-  invisible(solr_connect('http://api.plos.org/search'))
-  aa <- solr_settings()
-  
-  expect_equal(aa$errors, "simple")
-})
-
-
-test_that("solr_settings fails with a argument passed", {
-  skip_on_cran()
-  
-  expect_error(solr_settings(3), "unused argument")
-})
diff --git a/tests/testthat/test-solr_stats.r b/tests/testthat/test-solr_stats.r
index 90c03d6..67cb6e7 100644
--- a/tests/testthat/test-solr_stats.r
+++ b/tests/testthat/test-solr_stats.r
@@ -3,13 +3,14 @@ context("solr_stats")
 test_that("solr_stats works", {
   skip_on_cran()
 
-  invisible(solr_connect('http://api.plos.org/search', verbose=FALSE))
-
-  a <- solr_stats(q='science', stats.field='counter_total_all', raw=TRUE)
-  b <- solr_stats(q='ecology', stats.field=c('counter_total_all','alm_twitterCount'), 
-                  stats.facet=c('journal','volume'))
-  c <- solr_stats(q='ecology', stats.field=c('counter_total_all','alm_twitterCount'), 
-                  stats.facet=c('journal','volume'), raw=TRUE)
+  a <- conn_plos$stats(params = list(q='science', stats.field='counter_total_all'), raw=TRUE)
+  Sys.sleep(2)
+  b <- conn_plos$stats(params = list(q='ecology', stats.field=c('counter_total_all','alm_twitterCount'),
+                  stats.facet=c('journal','volume')))
+  Sys.sleep(2)
+  c <- conn_plos$stats(params = list(q='ecology', stats.field=c('counter_total_all','alm_twitterCount'),
+                  stats.facet=c('journal','volume')), raw=TRUE)
+  Sys.sleep(2)
   d <- solr_parse(c) # list
   e <- solr_parse(c, 'df') # data.frame
 
@@ -33,25 +34,39 @@ test_that("solr_stats works", {
   expect_equal(attr(c, "wt"), "json")
   expect_is(d, "list")
   expect_is(e, "list")
+
+  # solr_stats
+  expect_is(
+    solr_stats(conn_plos,
+      params = list(q='ecology',
+        stats.field=c('counter_total_all','alm_twitterCount'),
+                  stats.facet=c('journal','volume'))),
+    "list"
+  )
+  expect_is(
+    solr_stats(conn_plos,
+      params = list(q='ecology',
+        stats.field=c('counter_total_all','alm_twitterCount'),
+                  stats.facet=c('journal','volume')), raw=TRUE),
+    "sr_stats"
+  )
 })
 
 test_that("solr_stats works using wt=xml", {
   skip_on_cran()
-  
-  invisible(solr_connect('http://api.plos.org/search', verbose = FALSE))
-  
-  aa <- solr_stats(q='science', wt="xml", stats.field='counter_total_all', raw=TRUE)
-  bb <- solr_stats(q='science', wt="xml", stats.field='counter_total_all')
-  cc <- solr_stats(q='science', wt="xml", stats.field=c('counter_total_all','alm_twitterCount'), 
-                   stats.facet=c('journal','volume'))
-  
+
+  aa <- conn_plos$stats(params = list(q='science', wt="xml", stats.field='counter_total_all'), raw=TRUE)
+  bb <- conn_plos$stats(params = list(q='science', wt="xml", stats.field='counter_total_all'))
+  cc <- conn_plos$stats(params = list(q='science', wt="xml", stats.field=c('counter_total_all','alm_twitterCount'),
+                   stats.facet=c('journal','volume')))
+
   # correct dimenions
   expect_equal(length(aa), 1)
   expect_equal(length(bb), 2)
   expect_equal(NROW(bb$data), 1)
   expect_named(cc$facet[[1]], c("volume", "journal"))
   expect_equal(length(cc), 2)
-  
+
   # classes
   expect_is(aa, "sr_stats")
   expect_is(bb, "list")
@@ -60,51 +75,3 @@ test_that("solr_stats works using wt=xml", {
   expect_is(cc$facet[[1]][[1]], "data.frame")
   expect_equal(attr(aa, "wt"), "xml")
 })
-
-test_that("solr_stats works with HathiTrust", {
-  skip_on_cran()
-  
-  url_hathi <- "http://chinkapin.pti.indiana.edu:9994/solr/meta/select"
-  invisible(solr_connect(url = url_hathi, verbose = FALSE))
-  
-  a <- solr_stats(q='*:*', stats.field = 'htrc_wordCount', raw = TRUE)
-  b <- solr_stats(q = '*:*', stats.field = c('htrc_wordCount', 'htrc_pageCount'))
-  c <- solr_stats(q = '*:*', stats.field = 'htrc_charCount')
-  d <- solr_parse(a) # list
-  
-  # correct dimenions
-  expect_equal(length(a), 1)
-  expect_equal(length(b), 2)
-  expect_equal(nrow(b$data), 2)
-  expect_equal(length(c), 2)
-  expect_equal(length(d), 2)
-  expect_equal(length(d$data$htrc_wordCount), 8)
-  
-  # classes
-  expect_is(a, "sr_stats")
-  expect_is(b, "list")
-  expect_is(b$data, "data.frame")
-  expect_is(d, "list")
-})
-
-test_that("solr_stats works with Datacite", {
-  skip_on_cran()
-  
-  url_dc <- "http://search.datacite.org/api"
-  invisible(solr_connect(url = url_dc, verbose = FALSE))
-  
-  a <- solr_stats(q='*:*', stats.field='publicationYear', raw=TRUE)
-  b <- solr_stats(q='*:*', stats.field='publicationYear', stats.facet = "prefix")
-  
-  # correct dimenions
-  expect_equal(length(a), 1)
-  expect_equal(length(b), 2)
-  expect_equal(nrow(b$data), 1)
-  expect_equal(NCOL(b$facet$publicationYear), 5)
-  
-  # classes
-  expect_is(a, "sr_stats")
-  expect_is(b, "list")
-  expect_is(b$data, "data.frame")
-  expect_is(b$facet$publicationYear, "data.frame")
-})
diff --git a/tests/testthat/test-update_atomic_json.R b/tests/testthat/test-update_atomic_json.R
new file mode 100644
index 0000000..88182d1
--- /dev/null
+++ b/tests/testthat/test-update_atomic_json.R
@@ -0,0 +1,41 @@
+context("update_atomic_json")
+
+test_that("update_atomic_json works", {
+  skip_on_cran()
+
+  if (!conn$collection_exists("books")) {
+    conn$collection_delete("books")
+    conn$collection_create("books")
+  }
+
+  file <- system.file("examples", "books2.json", package = "solrium")
+  invisible(conn$update_json(file, "books"))
+
+  # get a document
+  res1 <- conn$get(ids = 343334534545, "books")
+
+  # atomic update
+  body <- '[{
+   "id": "343334534545",
+   "genre_s": {"set": "mystery" },
+   "pages_i": {"inc": 1 }
+  }]'
+  aa <- conn$update_atomic_json(body, "books")
+
+  # get the document after updating
+  res2 <- conn$get(ids = 343334534545, "books")
+
+  expect_is(aa, "list")
+  expect_named(aa, c("responseHeader"))
+
+  expect_is(res1$response$docs, "data.frame")
+  expect_equal(res1$response$docs$genre_s, "fantasy")
+  expect_equal(res1$response$docs$pages_i, 384)
+  expect_is(res2$response$docs, "data.frame")
+  expect_equal(res2$response$docs$pages_i, 385)
+})
+
+test_that("update_atomic_json fails well", {
+  expect_error(update_atomic_json(), "argument \"conn\" is missing")
+  expect_error(update_atomic_json(5), "conn must be a SolrClient object")
+})
diff --git a/tests/testthat/test-update_atomic_xml.R b/tests/testthat/test-update_atomic_xml.R
new file mode 100644
index 0000000..0e47a45
--- /dev/null
+++ b/tests/testthat/test-update_atomic_xml.R
@@ -0,0 +1,56 @@
+context("update_atomic_xml")
+
+library(xml2)
+
+test_that("update_atomic_xml works", {
+  skip_on_cran()
+
+  if (conn$collection_exists("books")) {
+    conn$collection_delete("books")
+  }
+  conn$collection_create("books")
+
+  # Add documents
+  file <- system.file("examples", "books.xml", package = "solrium")
+  invisible(conn$update_xml(file, "books"))
+
+  # get a document
+  res1 <- conn$get(ids = '978-0641723445', "books", wt = "xml")
+  res1_genre <- xml2::xml_text(
+    xml2::xml_find_all(res1, '//doc//str[@name="genre_s"]'))
+  res1_pages <- xml2::xml_text(
+    xml2::xml_find_all(res1, '//doc//int[@name="pages_i"]'))
+
+  # atomic update
+  body <- '
+  <add>
+   <doc>
+     <field name="id">978-0641723445</field>
+     <field name="genre_s" update="set">mystery</field>
+     <field name="pages_i" update="inc">1</field>
+   </doc>
+  </add>'
+  aa <- conn$update_atomic_xml(body, name="books")
+
+  # get the document again
+  res2 <- conn$get(ids = '978-0641723445', "books", wt = "xml")
+  res2_genre <- xml2::xml_text(
+    xml2::xml_find_all(res2, '//doc//str[@name="genre_s"]'))
+  res2_pages <- xml2::xml_text(
+    xml2::xml_find_all(res2, '//doc//int[@name="pages_i"]'))
+
+  expect_is(aa, "list")
+  expect_named(aa, c("responseHeader"))
+  expect_is(res1, "xml_document")
+  expect_equal(res1_genre, "fantasy")
+  expect_equal(res1_pages, "384")
+
+  expect_is(res2, "xml_document")
+  expect_equal(res2_genre, "mystery")
+  expect_equal(res2_pages, "385")
+})
+
+test_that("update_atomic_xml fails well", {
+  expect_error(update_atomic_xml(), "argument \"conn\" is missing")
+  expect_error(update_atomic_xml(5), "conn must be a SolrClient object")
+})
diff --git a/tests/testthat/test-update_csv.R b/tests/testthat/test-update_csv.R
new file mode 100644
index 0000000..5ed98c3
--- /dev/null
+++ b/tests/testthat/test-update_csv.R
@@ -0,0 +1,35 @@
+context("update_csv")
+
+df <- data.frame(id=1:3, name=c('red', 'blue', 'green'))
+write.csv(df, file="df.csv", row.names=FALSE, quote = FALSE)
+
+test_that("update_csv works", {
+  skip_on_cran()
+
+  if (!conn$collection_exists("books")) conn$collection_create("books")
+
+  aa <- conn$update_csv("df.csv", name = "books")
+
+  expect_is(aa, "list")
+  expect_named(aa, c("responseHeader"))
+  expect_true(conn$collection_exists("books"))
+})
+
+test_that("update_csv works with old format", {
+  skip_on_cran()
+
+  if (!conn$collection_exists("books")) conn$collection_create("books")
+  aa <- update_csv(conn, "df.csv", name = "books")
+
+  expect_is(aa, "list")
+  expect_named(aa, c("responseHeader"))
+  expect_true(conn$collection_exists("books"))
+})
+
+test_that("update_csv fails well", {
+  skip_on_cran()
+
+  expect_error(update_csv(), "argument \"conn\" is missing")
+
+  expect_error(update_csv(5), "conn must be a SolrClient object")
+})
diff --git a/tests/testthat/test-update_json.R b/tests/testthat/test-update_json.R
new file mode 100644
index 0000000..9045bc2
--- /dev/null
+++ b/tests/testthat/test-update_json.R
@@ -0,0 +1,33 @@
+context("update_json")
+
+test_that("update_json works", {
+  skip_on_cran()
+
+  file <- system.file("examples", "books2.json", package = "solrium")
+  if (!conn$collection_exists("books")) conn$collection_create("books")
+  aa <- conn$update_json(files = file, name = "books")
+
+  expect_is(aa, "list")
+  expect_named(aa, c("responseHeader"))
+  expect_true(conn$collection_exists("books"))
+})
+
+test_that("update_json works with old format", {
+  skip_on_cran()
+
+  file <- system.file("examples", "books2.json", package = "solrium")
+  if (!conn$collection_exists("books")) conn$collection_create("books")
+  aa <- update_json(conn, files = file, name = "books")
+
+  expect_is(aa, "list")
+  expect_named(aa, c("responseHeader"))
+  expect_true(conn$collection_exists("books"))
+})
+
+test_that("update_json fails well", {
+  skip_on_cran()
+
+  expect_error(update_json(), "argument \"conn\" is missing")
+
+  expect_error(update_json(5), "conn must be a SolrClient object")
+})
diff --git a/tests/testthat/test-update_xml.R b/tests/testthat/test-update_xml.R
new file mode 100644
index 0000000..efb996a
--- /dev/null
+++ b/tests/testthat/test-update_xml.R
@@ -0,0 +1,33 @@
+context("update_xml")
+
+test_that("update_xml works", {
+  skip_on_cran()
+
+  file <- system.file("examples", "books.xml", package = "solrium")
+  if (!conn$collection_exists("books")) conn$collection_create("books")
+  aa <- conn$update_xml(files = file, name = "books")
+
+  expect_is(aa, "list")
+  expect_named(aa, c("responseHeader"))
+  expect_true(conn$collection_exists("books"))
+})
+
+test_that("update_xml works with old format", {
+  skip_on_cran()
+
+  file <- system.file("examples", "books.xml", package = "solrium")
+  if (!conn$collection_exists("books")) conn$collection_create("books")
+  aa <- update_xml(conn, files = file, name = "books")
+
+  expect_is(aa, "list")
+  expect_named(aa, c("responseHeader"))
+  expect_true(conn$collection_exists("books"))
+})
+
+test_that("update_xml fails well", {
+  skip_on_cran()
+
+  expect_error(update_xml(), "argument \"conn\" is missing")
+
+  expect_error(update_xml(5), "conn must be a SolrClient object")
+})
diff --git a/vignettes/local_setup.Rmd b/vignettes/local_setup.Rmd
index 290ff07..0320cda 100644
--- a/vignettes/local_setup.Rmd
+++ b/vignettes/local_setup.Rmd
@@ -4,7 +4,7 @@
 %\VignetteEncoding{UTF-8}
 -->
 
-Local Solr setup 
+Local Solr setup
 ======
 
 ### OSX
@@ -29,7 +29,7 @@ bunch of documents
 
 #### Linuxbrew
 
-[Linuxbrew](http://brew.sh/linuxbrew/) is a port of Mac OS homebrew to linux.  Operation is essentially the same as for homebrew.  Follow the [installation instructions for linuxbrew](http://brew.sh/linuxbrew/#installation) and then the instructions for using homebrew (above) should work without modification.
+[Linuxbrew](http://linuxbrew.sh/) is a port of Mac OS homebrew to linux.  Operation is essentially the same as for homebrew.  Follow the installation instructions for linuxbrew and then the instructions for using homebrew (above) should work without modification.
 
 ### Windows
 

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-med/r-cran-solrium.git



More information about the debian-med-commit mailing list